Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 4 Jun 2012 18:25:31 +0000 (11:25 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 4 Jun 2012 18:25:31 +0000 (11:25 -0700)
Pull timer updates from Thomas Gleixner:
 "The clocksource driver is pure hardware enablement and the skew option
  is default off, well tested and non dangerous."

* 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  tick: Move skew_tick option into the HIGH_RES_TIMER section
  clocksource: em_sti: Add DT support
  clocksource: em_sti: Emma Mobile STI driver
  clockevents: Make clockevents_config() a global symbol
  tick: Add tick skew boot option

2627 files changed:
.mailmap
Documentation/ABI/testing/sysfs-block-rssd
Documentation/ABI/testing/sysfs-bus-fcoe [new file with mode: 0644]
Documentation/ABI/testing/sysfs-bus-i2c-devices-lm3533 [new file with mode: 0644]
Documentation/ABI/testing/sysfs-bus-rbd
Documentation/ABI/testing/sysfs-class-backlight-driver-lm3533 [new file with mode: 0644]
Documentation/ABI/testing/sysfs-class-led-driver-lm3533 [new file with mode: 0644]
Documentation/ABI/testing/sysfs-class-mtd
Documentation/CodingStyle
Documentation/DocBook/mtdnand.tmpl
Documentation/SubmittingPatches
Documentation/arm/OMAP/DSS
Documentation/arm/SPEAr/overview.txt
Documentation/cgroups/memory.txt
Documentation/cgroups/resource_counter.txt
Documentation/cris/README
Documentation/device-mapper/thin-provisioning.txt
Documentation/devicetree/bindings/arm/fsl.txt
Documentation/devicetree/bindings/arm/samsung/interrupt-combiner.txt [new file with mode: 0644]
Documentation/devicetree/bindings/arm/spear-timer.txt [new file with mode: 0644]
Documentation/devicetree/bindings/arm/spear.txt
Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt [new file with mode: 0644]
Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt [new file with mode: 0644]
Documentation/devicetree/bindings/dma/snps-dma.txt [new file with mode: 0644]
Documentation/devicetree/bindings/gpio/gpio-mm-lantiq.txt [new file with mode: 0644]
Documentation/devicetree/bindings/gpio/gpio-mxs.txt [new file with mode: 0644]
Documentation/devicetree/bindings/gpio/gpio-stp-xway.txt [new file with mode: 0644]
Documentation/devicetree/bindings/i2c/i2c-mxs.txt [new file with mode: 0644]
Documentation/devicetree/bindings/i2c/mux.txt [new file with mode: 0644]
Documentation/devicetree/bindings/i2c/samsung-i2c.txt
Documentation/devicetree/bindings/i2c/xiic.txt [new file with mode: 0644]
Documentation/devicetree/bindings/iommu/nvidia,tegra20-gart.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mfd/da9052-i2c.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mfd/tps65910.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mfd/twl6040.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt
Documentation/devicetree/bindings/mmc/mmc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mmc/mmci.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mmc/mxs-mmc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mmc/nvidia-sdhci.txt
Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
Documentation/devicetree/bindings/mtd/gpmi-nand.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mtd/mxc-nand.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/fsl-fec.txt
Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt
Documentation/devicetree/bindings/rtc/lpc32xx-rtc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/rtc/spear-rtc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/sound/omap-dmic.txt [new file with mode: 0644]
Documentation/devicetree/bindings/sound/omap-mcpdm.txt [new file with mode: 0644]
Documentation/devicetree/bindings/tty/serial/fsl-imx-uart.txt
Documentation/devicetree/bindings/usb/tegra-usb.txt
Documentation/dma-buf-sharing.txt
Documentation/feature-removal-schedule.txt
Documentation/filesystems/Locking
Documentation/filesystems/ext3.txt
Documentation/filesystems/porting
Documentation/filesystems/proc.txt
Documentation/filesystems/vfs.txt
Documentation/i2c/functionality
Documentation/i2c/i2c-protocol
Documentation/i2c/muxes/gpio-i2cmux [deleted file]
Documentation/i2c/muxes/i2c-mux-gpio [new file with mode: 0644]
Documentation/initrd.txt
Documentation/kbuild/kbuild.txt
Documentation/kbuild/kconfig.txt
Documentation/kernel-parameters.txt
Documentation/leds/ledtrig-transient.txt [new file with mode: 0644]
Documentation/power/charger-manager.txt
Documentation/power/power_supply_class.txt
Documentation/sysctl/fs.txt
Documentation/virtual/kvm/api.txt
Documentation/virtual/kvm/cpuid.txt
Documentation/virtual/kvm/msr.txt
Documentation/vm/pagemap.txt
Documentation/vm/slub.txt
Documentation/vm/transhuge.txt
Documentation/watchdog/watchdog-kernel-api.txt
Documentation/watchdog/watchdog-parameters.txt
Documentation/x86/efi-stub.txt [new file with mode: 0644]
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/include/asm/kvm_para.h [new file with mode: 0644]
arch/alpha/include/asm/posix_types.h
arch/alpha/kernel/signal.c
arch/arm/Kconfig
arch/arm/Kconfig.debug
arch/arm/Makefile
arch/arm/boot/dts/db8500.dtsi
arch/arm/boot/dts/exynos5250-smdk5250.dts
arch/arm/boot/dts/exynos5250.dtsi
arch/arm/boot/dts/imx23-evk.dts [new file with mode: 0644]
arch/arm/boot/dts/imx23.dtsi [new file with mode: 0644]
arch/arm/boot/dts/imx27-phytec-phycore.dts
arch/arm/boot/dts/imx27.dtsi
arch/arm/boot/dts/imx28-evk.dts [new file with mode: 0644]
arch/arm/boot/dts/imx28.dtsi [new file with mode: 0644]
arch/arm/boot/dts/imx51-babbage.dts
arch/arm/boot/dts/imx51.dtsi
arch/arm/boot/dts/imx53-ard.dts
arch/arm/boot/dts/imx53-evk.dts
arch/arm/boot/dts/imx53-qsb.dts
arch/arm/boot/dts/imx53-smd.dts
arch/arm/boot/dts/imx53.dtsi
arch/arm/boot/dts/imx6q-arm2.dts
arch/arm/boot/dts/imx6q-sabrelite.dts
arch/arm/boot/dts/imx6q-sabresd.dts [new file with mode: 0644]
arch/arm/boot/dts/imx6q.dtsi
arch/arm/boot/dts/lpc32xx.dtsi
arch/arm/boot/dts/omap3-beagle.dts
arch/arm/boot/dts/omap4-panda.dts
arch/arm/boot/dts/omap4-sdp.dts
arch/arm/boot/dts/phy3250.dts
arch/arm/boot/dts/snowball.dts
arch/arm/boot/dts/spear1310-evb.dts [new file with mode: 0644]
arch/arm/boot/dts/spear1310.dtsi [new file with mode: 0644]
arch/arm/boot/dts/spear1340-evb.dts [new file with mode: 0644]
arch/arm/boot/dts/spear1340.dtsi [new file with mode: 0644]
arch/arm/boot/dts/spear13xx.dtsi [new file with mode: 0644]
arch/arm/boot/dts/spear300-evb.dts
arch/arm/boot/dts/spear310-evb.dts
arch/arm/boot/dts/spear320-evb.dts
arch/arm/boot/dts/spear3xx.dtsi
arch/arm/boot/dts/spear600-evb.dts
arch/arm/boot/dts/spear600.dtsi
arch/arm/boot/dts/tegra-cardhu.dts
arch/arm/boot/dts/tegra-harmony.dts
arch/arm/boot/dts/tegra-paz00.dts
arch/arm/boot/dts/tegra-seaboard.dts
arch/arm/boot/dts/tegra-trimslice.dts
arch/arm/boot/dts/tegra-ventana.dts
arch/arm/boot/dts/tegra20.dtsi
arch/arm/boot/dts/tegra30.dtsi
arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
arch/arm/boot/dts/vexpress-v2p-ca5s.dts
arch/arm/boot/dts/vexpress-v2p-ca9.dts
arch/arm/common/dmabounce.c
arch/arm/configs/imx_v4_v5_defconfig
arch/arm/configs/imx_v6_v7_defconfig
arch/arm/configs/mxs_defconfig
arch/arm/configs/prima2_defconfig [new file with mode: 0644]
arch/arm/configs/spear13xx_defconfig [new file with mode: 0644]
arch/arm/configs/spear3xx_defconfig
arch/arm/configs/spear6xx_defconfig
arch/arm/configs/tegra_defconfig
arch/arm/configs/u8500_defconfig
arch/arm/include/asm/device.h
arch/arm/include/asm/dma-contiguous.h [new file with mode: 0644]
arch/arm/include/asm/dma-iommu.h [new file with mode: 0644]
arch/arm/include/asm/dma-mapping.h
arch/arm/include/asm/hardware/pl080.h
arch/arm/include/asm/io.h
arch/arm/include/asm/kvm_para.h [new file with mode: 0644]
arch/arm/include/asm/mach/arch.h
arch/arm/include/asm/mach/map.h
arch/arm/include/asm/posix_types.h
arch/arm/include/asm/thread_info.h
arch/arm/kernel/entry-common.S
arch/arm/kernel/ptrace.c
arch/arm/kernel/setup.c
arch/arm/kernel/signal.c
arch/arm/kernel/signal.h
arch/arm/kernel/smp.c
arch/arm/kernel/traps.c
arch/arm/mach-at91/at91sam9g45_devices.c
arch/arm/mach-at91/include/mach/at_hdmac.h
arch/arm/mach-davinci/board-da830-evm.c
arch/arm/mach-davinci/board-da850-evm.c
arch/arm/mach-davinci/board-dm355-evm.c
arch/arm/mach-davinci/board-dm355-leopard.c
arch/arm/mach-davinci/board-dm365-evm.c
arch/arm/mach-davinci/board-dm644x-evm.c
arch/arm/mach-davinci/board-dm646x-evm.c
arch/arm/mach-davinci/board-mityomapl138.c
arch/arm/mach-davinci/board-neuros-osd2.c
arch/arm/mach-davinci/board-omapl138-hawk.c
arch/arm/mach-davinci/board-sffsdr.c
arch/arm/mach-davinci/board-tnetv107x-evm.c
arch/arm/mach-davinci/clock.c
arch/arm/mach-davinci/common.c
arch/arm/mach-davinci/cpufreq.c
arch/arm/mach-davinci/dma.c
arch/arm/mach-davinci/include/mach/common.h
arch/arm/mach-davinci/include/mach/debug-macro.S
arch/arm/mach-davinci/include/mach/hardware.h
arch/arm/mach-davinci/include/mach/serial.h
arch/arm/mach-davinci/include/mach/uncompress.h
arch/arm/mach-davinci/pm.c
arch/arm/mach-dove/common.c
arch/arm/mach-dove/dove-db-setup.c
arch/arm/mach-ep93xx/adssphere.c
arch/arm/mach-ep93xx/core.c
arch/arm/mach-ep93xx/crunch.c
arch/arm/mach-ep93xx/edb93xx.c
arch/arm/mach-ep93xx/gesbc9312.c
arch/arm/mach-ep93xx/include/mach/platform.h
arch/arm/mach-ep93xx/micro9.c
arch/arm/mach-ep93xx/simone.c
arch/arm/mach-ep93xx/snappercl15.c
arch/arm/mach-ep93xx/ts72xx.c
arch/arm/mach-ep93xx/vision_ep9307.c
arch/arm/mach-exynos/Kconfig
arch/arm/mach-exynos/Makefile
arch/arm/mach-exynos/Makefile.boot
arch/arm/mach-exynos/clock-exynos4.c
arch/arm/mach-exynos/clock-exynos4.h
arch/arm/mach-exynos/clock-exynos4210.c
arch/arm/mach-exynos/clock-exynos4212.c
arch/arm/mach-exynos/clock-exynos5.c
arch/arm/mach-exynos/common.c
arch/arm/mach-exynos/common.h
arch/arm/mach-exynos/cpuidle.c
arch/arm/mach-exynos/dev-drm.c [new file with mode: 0644]
arch/arm/mach-exynos/dev-sysmmu.c
arch/arm/mach-exynos/dma.c
arch/arm/mach-exynos/include/mach/gpio.h
arch/arm/mach-exynos/include/mach/irqs.h
arch/arm/mach-exynos/include/mach/map.h
arch/arm/mach-exynos/include/mach/pm-core.h
arch/arm/mach-exynos/include/mach/pmu.h
arch/arm/mach-exynos/include/mach/regs-clock.h
arch/arm/mach-exynos/include/mach/regs-pmu.h
arch/arm/mach-exynos/include/mach/regs-sysmmu.h [deleted file]
arch/arm/mach-exynos/include/mach/spi-clocks.h
arch/arm/mach-exynos/include/mach/sysmmu.h
arch/arm/mach-exynos/mach-armlex4210.c
arch/arm/mach-exynos/mach-exynos4-dt.c
arch/arm/mach-exynos/mach-exynos5-dt.c
arch/arm/mach-exynos/mach-nuri.c
arch/arm/mach-exynos/mach-origen.c
arch/arm/mach-exynos/mach-smdk4x12.c
arch/arm/mach-exynos/mach-smdkv310.c
arch/arm/mach-exynos/mach-universal_c210.c
arch/arm/mach-exynos/mct.c
arch/arm/mach-exynos/pm.c
arch/arm/mach-exynos/pm_domains.c
arch/arm/mach-exynos/pmu.c
arch/arm/mach-imx/Kconfig
arch/arm/mach-imx/Makefile
arch/arm/mach-imx/Makefile.boot
arch/arm/mach-imx/clk-busy.c [new file with mode: 0644]
arch/arm/mach-imx/clk-gate2.c [new file with mode: 0644]
arch/arm/mach-imx/clk-imx1.c [new file with mode: 0644]
arch/arm/mach-imx/clk-imx21.c [new file with mode: 0644]
arch/arm/mach-imx/clk-imx25.c [new file with mode: 0644]
arch/arm/mach-imx/clk-imx27.c [new file with mode: 0644]
arch/arm/mach-imx/clk-imx31.c [new file with mode: 0644]
arch/arm/mach-imx/clk-imx35.c [new file with mode: 0644]
arch/arm/mach-imx/clk-imx51-imx53.c [new file with mode: 0644]
arch/arm/mach-imx/clk-imx6q.c [new file with mode: 0644]
arch/arm/mach-imx/clk-pfd.c [new file with mode: 0644]
arch/arm/mach-imx/clk-pllv1.c [new file with mode: 0644]
arch/arm/mach-imx/clk-pllv2.c [new file with mode: 0644]
arch/arm/mach-imx/clk-pllv3.c [new file with mode: 0644]
arch/arm/mach-imx/clk.h [new file with mode: 0644]
arch/arm/mach-imx/clock-imx1.c [deleted file]
arch/arm/mach-imx/clock-imx21.c [deleted file]
arch/arm/mach-imx/clock-imx25.c [deleted file]
arch/arm/mach-imx/clock-imx27.c [deleted file]
arch/arm/mach-imx/clock-imx31.c [deleted file]
arch/arm/mach-imx/clock-imx35.c [deleted file]
arch/arm/mach-imx/clock-imx6q.c [deleted file]
arch/arm/mach-imx/clock-mx51-mx53.c [deleted file]
arch/arm/mach-imx/cpu-imx5.c
arch/arm/mach-imx/crmregs-imx3.h
arch/arm/mach-imx/imx27-dt.c
arch/arm/mach-imx/imx51-dt.c
arch/arm/mach-imx/imx53-dt.c
arch/arm/mach-imx/lluart.c
arch/arm/mach-imx/mach-cpuimx51sd.c
arch/arm/mach-imx/mach-imx6q.c
arch/arm/mach-imx/mach-mx51_3ds.c
arch/arm/mach-imx/mach-mx51_babbage.c
arch/arm/mach-imx/mach-mx51_efikamx.c
arch/arm/mach-imx/mach-mx51_efikasb.c
arch/arm/mach-imx/mach-pcm037.c
arch/arm/mach-imx/mach-pcm037_eet.c
arch/arm/mach-imx/mm-imx3.c
arch/arm/mach-imx/mm-imx5.c
arch/arm/mach-imx/pcm037.h
arch/arm/mach-imx/pm-imx3.c
arch/arm/mach-ixp4xx/common.c
arch/arm/mach-ixp4xx/include/mach/gpio.h
arch/arm/mach-ixp4xx/ixdp425-setup.c
arch/arm/mach-kirkwood/board-dreamplug.c
arch/arm/mach-kirkwood/board-dt.c
arch/arm/mach-kirkwood/common.c
arch/arm/mach-kirkwood/common.h
arch/arm/mach-kirkwood/include/mach/bridge-regs.h
arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c
arch/arm/mach-kirkwood/pcie.c
arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
arch/arm/mach-kirkwood/t5325-setup.c
arch/arm/mach-kirkwood/tsx1x-common.c
arch/arm/mach-msm/board-halibut.c
arch/arm/mach-msm/board-mahimahi.c
arch/arm/mach-msm/board-msm7x27.c
arch/arm/mach-msm/board-msm7x30.c
arch/arm/mach-msm/board-msm8960.c
arch/arm/mach-msm/board-msm8x60.c
arch/arm/mach-msm/board-qsd8x50.c
arch/arm/mach-msm/board-sapphire.c
arch/arm/mach-msm/board-trout.c
arch/arm/mach-msm/include/mach/board.h
arch/arm/mach-msm/smd_debug.c
arch/arm/mach-mv78xx0/common.c
arch/arm/mach-mxs/Kconfig
arch/arm/mach-mxs/Makefile
arch/arm/mach-mxs/clock-mx23.c [deleted file]
arch/arm/mach-mxs/clock-mx28.c [deleted file]
arch/arm/mach-mxs/clock.c [deleted file]
arch/arm/mach-mxs/devices/Kconfig
arch/arm/mach-mxs/devices/platform-dma.c
arch/arm/mach-mxs/devices/platform-gpio-mxs.c
arch/arm/mach-mxs/devices/platform-mxs-mmc.c
arch/arm/mach-mxs/include/mach/clock.h [deleted file]
arch/arm/mach-mxs/include/mach/common.h
arch/arm/mach-mxs/include/mach/devices-common.h
arch/arm/mach-mxs/include/mach/mmc.h [deleted file]
arch/arm/mach-mxs/mach-mx28evk.c
arch/arm/mach-mxs/mach-mxs.c [new file with mode: 0644]
arch/arm/mach-mxs/mm.c
arch/arm/mach-mxs/regs-clkctrl-mx23.h [deleted file]
arch/arm/mach-mxs/regs-clkctrl-mx28.h [deleted file]
arch/arm/mach-mxs/system.c
arch/arm/mach-mxs/timer.c
arch/arm/mach-nomadik/board-nhk8815.c
arch/arm/mach-omap1/board-ams-delta.c
arch/arm/mach-omap1/board-fsample.c
arch/arm/mach-omap1/board-generic.c
arch/arm/mach-omap1/board-h2.c
arch/arm/mach-omap1/board-h3.c
arch/arm/mach-omap1/board-htcherald.c
arch/arm/mach-omap1/board-innovator.c
arch/arm/mach-omap1/board-nokia770.c
arch/arm/mach-omap1/board-osk.c
arch/arm/mach-omap1/board-palmte.c
arch/arm/mach-omap1/board-palmtt.c
arch/arm/mach-omap1/board-palmz71.c
arch/arm/mach-omap1/board-perseus2.c
arch/arm/mach-omap1/board-sx1.c
arch/arm/mach-omap1/board-voiceblue.c
arch/arm/mach-omap1/common.h
arch/arm/mach-omap1/devices.c
arch/arm/mach-omap1/io.c
arch/arm/mach-omap1/serial.c
arch/arm/mach-omap1/time.c
arch/arm/mach-omap1/timer32k.c
arch/arm/mach-omap2/Kconfig
arch/arm/mach-omap2/Makefile
arch/arm/mach-omap2/board-2430sdp.c
arch/arm/mach-omap2/board-3430sdp.c
arch/arm/mach-omap2/board-3630sdp.c
arch/arm/mach-omap2/board-4430sdp.c
arch/arm/mach-omap2/board-am3517crane.c
arch/arm/mach-omap2/board-am3517evm.c
arch/arm/mach-omap2/board-apollon.c
arch/arm/mach-omap2/board-cm-t35.c
arch/arm/mach-omap2/board-cm-t3517.c
arch/arm/mach-omap2/board-devkit8000.c
arch/arm/mach-omap2/board-generic.c
arch/arm/mach-omap2/board-h4.c
arch/arm/mach-omap2/board-igep0020.c
arch/arm/mach-omap2/board-ldp.c
arch/arm/mach-omap2/board-n8x0.c
arch/arm/mach-omap2/board-omap3beagle.c
arch/arm/mach-omap2/board-omap3evm.c
arch/arm/mach-omap2/board-omap3logic.c
arch/arm/mach-omap2/board-omap3pandora.c
arch/arm/mach-omap2/board-omap3stalker.c
arch/arm/mach-omap2/board-omap3touchbook.c
arch/arm/mach-omap2/board-omap4panda.c
arch/arm/mach-omap2/board-overo.c
arch/arm/mach-omap2/board-rm680.c
arch/arm/mach-omap2/board-rx51.c
arch/arm/mach-omap2/board-ti8168evm.c
arch/arm/mach-omap2/board-zoom.c
arch/arm/mach-omap2/common.h
arch/arm/mach-omap2/devices.c
arch/arm/mach-omap2/display.c
arch/arm/mach-omap2/dma.c
arch/arm/mach-omap2/dsp.c
arch/arm/mach-omap2/gpmc.c
arch/arm/mach-omap2/hsmmc.c
arch/arm/mach-omap2/id.c
arch/arm/mach-omap2/include/mach/omap-wakeupgen.h
arch/arm/mach-omap2/io.c
arch/arm/mach-omap2/iomap.h
arch/arm/mach-omap2/irq.c
arch/arm/mach-omap2/mux.c
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
arch/arm/mach-omap2/pm.c
arch/arm/mach-omap2/pm24xx.c
arch/arm/mach-omap2/pm34xx.c
arch/arm/mach-omap2/pm44xx.c
arch/arm/mach-omap2/powerdomains3xxx_data.c
arch/arm/mach-omap2/timer.c
arch/arm/mach-omap2/usb-musb.c
arch/arm/mach-omap2/voltagedomains3xxx_data.c
arch/arm/mach-orion5x/common.c
arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
arch/arm/mach-orion5x/ts78xx-setup.c
arch/arm/mach-pnx4008/core.c
arch/arm/mach-pnx4008/pm.c
arch/arm/mach-prima2/common.h
arch/arm/mach-prima2/pm.c
arch/arm/mach-prima2/prima2.c
arch/arm/mach-pxa/balloon3.c
arch/arm/mach-pxa/em-x270.c
arch/arm/mach-pxa/palmtx.c
arch/arm/mach-s3c24xx/Kconfig
arch/arm/mach-s3c24xx/Makefile
arch/arm/mach-s3c24xx/clock-s3c2416.c
arch/arm/mach-s3c24xx/clock-s3c2443.c
arch/arm/mach-s3c24xx/common-s3c2443.c
arch/arm/mach-s3c24xx/common.c [new file with mode: 0644]
arch/arm/mach-s3c24xx/dma-s3c2443.c
arch/arm/mach-s3c24xx/include/mach/dma.h
arch/arm/mach-s3c24xx/include/mach/irqs.h
arch/arm/mach-s3c24xx/include/mach/map.h
arch/arm/mach-s3c24xx/irq-pm.c [new file with mode: 0644]
arch/arm/mach-s3c24xx/irq-s3c2416.c
arch/arm/mach-s3c24xx/mach-smdk2416.c
arch/arm/mach-s3c24xx/pm.c [new file with mode: 0644]
arch/arm/mach-s3c24xx/s3c2416.c
arch/arm/mach-s3c24xx/setup-spi.c [new file with mode: 0644]
arch/arm/mach-s3c24xx/sleep.S [new file with mode: 0644]
arch/arm/mach-s3c64xx/common.c
arch/arm/mach-s3c64xx/common.h
arch/arm/mach-s3c64xx/cpuidle.c
arch/arm/mach-s3c64xx/mach-anw6410.c
arch/arm/mach-s3c64xx/mach-crag6410-module.c
arch/arm/mach-s3c64xx/mach-crag6410.c
arch/arm/mach-s3c64xx/mach-hmt.c
arch/arm/mach-s3c64xx/mach-mini6410.c
arch/arm/mach-s3c64xx/mach-ncp.c
arch/arm/mach-s3c64xx/mach-real6410.c
arch/arm/mach-s3c64xx/mach-smartq5.c
arch/arm/mach-s3c64xx/mach-smartq7.c
arch/arm/mach-s3c64xx/mach-smdk6400.c
arch/arm/mach-s3c64xx/mach-smdk6410.c
arch/arm/mach-s3c64xx/pm.c
arch/arm/mach-s5p64x0/mach-smdk6440.c
arch/arm/mach-s5p64x0/mach-smdk6450.c
arch/arm/mach-s5pc100/mach-smdkc100.c
arch/arm/mach-s5pv210/mach-aquila.c
arch/arm/mach-s5pv210/mach-goni.c
arch/arm/mach-s5pv210/mach-smdkv210.c
arch/arm/mach-sa1100/assabet.c
arch/arm/mach-sa1100/badge4.c
arch/arm/mach-sa1100/cerf.c
arch/arm/mach-sa1100/collie.c
arch/arm/mach-sa1100/generic.c
arch/arm/mach-sa1100/generic.h
arch/arm/mach-sa1100/h3100.c
arch/arm/mach-sa1100/h3600.c
arch/arm/mach-sa1100/hackkit.c
arch/arm/mach-sa1100/jornada720.c
arch/arm/mach-sa1100/lart.c
arch/arm/mach-sa1100/nanoengine.c
arch/arm/mach-sa1100/neponset.c
arch/arm/mach-sa1100/pleb.c
arch/arm/mach-sa1100/pm.c
arch/arm/mach-sa1100/shannon.c
arch/arm/mach-sa1100/simpad.c
arch/arm/mach-shmobile/Makefile
arch/arm/mach-shmobile/board-ag5evm.c
arch/arm/mach-shmobile/board-ap4evb.c
arch/arm/mach-shmobile/board-bonito.c
arch/arm/mach-shmobile/board-g3evm.c
arch/arm/mach-shmobile/board-g4evm.c
arch/arm/mach-shmobile/board-kota2.c
arch/arm/mach-shmobile/board-mackerel.c
arch/arm/mach-shmobile/board-marzen.c
arch/arm/mach-shmobile/common.c [new file with mode: 0644]
arch/arm/mach-shmobile/cpuidle.c
arch/arm/mach-shmobile/include/mach/common.h
arch/arm/mach-shmobile/suspend.c
arch/arm/mach-spear13xx/Kconfig [new file with mode: 0644]
arch/arm/mach-spear13xx/Makefile [new file with mode: 0644]
arch/arm/mach-spear13xx/Makefile.boot [new file with mode: 0644]
arch/arm/mach-spear13xx/headsmp.S [new file with mode: 0644]
arch/arm/mach-spear13xx/hotplug.c [new file with mode: 0644]
arch/arm/mach-spear13xx/include/mach/debug-macro.S [new file with mode: 0644]
arch/arm/mach-spear13xx/include/mach/dma.h [new file with mode: 0644]
arch/arm/mach-spear13xx/include/mach/generic.h [new file with mode: 0644]
arch/arm/mach-spear13xx/include/mach/gpio.h [new file with mode: 0644]
arch/arm/mach-spear13xx/include/mach/hardware.h [new file with mode: 0644]
arch/arm/mach-spear13xx/include/mach/irqs.h [new file with mode: 0644]
arch/arm/mach-spear13xx/include/mach/spear.h [new file with mode: 0644]
arch/arm/mach-spear13xx/include/mach/spear1310_misc_regs.h [new file with mode: 0644]
arch/arm/mach-spear13xx/include/mach/spear1340_misc_regs.h [new file with mode: 0644]
arch/arm/mach-spear13xx/include/mach/timex.h [new file with mode: 0644]
arch/arm/mach-spear13xx/include/mach/uncompress.h [new file with mode: 0644]
arch/arm/mach-spear13xx/platsmp.c [new file with mode: 0644]
arch/arm/mach-spear13xx/spear1310.c [new file with mode: 0644]
arch/arm/mach-spear13xx/spear1340.c [new file with mode: 0644]
arch/arm/mach-spear13xx/spear13xx.c [new file with mode: 0644]
arch/arm/mach-spear3xx/Makefile
arch/arm/mach-spear3xx/clock.c [deleted file]
arch/arm/mach-spear3xx/include/mach/generic.h
arch/arm/mach-spear3xx/include/mach/irqs.h
arch/arm/mach-spear3xx/include/mach/misc_regs.h
arch/arm/mach-spear3xx/include/mach/spear.h
arch/arm/mach-spear3xx/spear300.c
arch/arm/mach-spear3xx/spear310.c
arch/arm/mach-spear3xx/spear320.c
arch/arm/mach-spear3xx/spear3xx.c
arch/arm/mach-spear6xx/Makefile
arch/arm/mach-spear6xx/clock.c [deleted file]
arch/arm/mach-spear6xx/include/mach/generic.h
arch/arm/mach-spear6xx/include/mach/irqs.h
arch/arm/mach-spear6xx/include/mach/misc_regs.h
arch/arm/mach-spear6xx/include/mach/spear.h
arch/arm/mach-spear6xx/spear6xx.c
arch/arm/mach-tegra/Kconfig
arch/arm/mach-tegra/board-dt-tegra20.c
arch/arm/mach-tegra/board-dt-tegra30.c
arch/arm/mach-tegra/board-harmony.c
arch/arm/mach-tegra/board-paz00.c
arch/arm/mach-tegra/board-seaboard.c
arch/arm/mach-tegra/board-trimslice.c
arch/arm/mach-tegra/board.h
arch/arm/mach-tegra/clock.c
arch/arm/mach-tegra/common.c
arch/arm/mach-tegra/devices.c
arch/arm/mach-tegra/devices.h
arch/arm/mach-tegra/include/mach/tegra-ahb.h [new file with mode: 0644]
arch/arm/mach-tegra/include/mach/uncompress.h
arch/arm/mach-tegra/include/mach/usb_phy.h
arch/arm/mach-tegra/powergate.c
arch/arm/mach-tegra/tegra2_clocks.c
arch/arm/mach-tegra/tegra30_clocks.c
arch/arm/mach-tegra/usb_phy.c
arch/arm/mach-ux500/board-mop500-uib.c
arch/arm/mach-ux500/board-mop500.c
arch/arm/mach-ux500/board-mop500.h
arch/arm/mach-ux500/clock.c
arch/arm/mach-ux500/clock.h
arch/arm/mach-ux500/cpu-db8500.c
arch/arm/mach-ux500/cpu.c
arch/arm/mach-ux500/include/mach/setup.h
arch/arm/mach-vexpress/v2m.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/init.c
arch/arm/mm/mm.h
arch/arm/mm/mmu.c
arch/arm/mm/vmregion.h
arch/arm/plat-mxc/clock.c
arch/arm/plat-mxc/include/mach/clock.h
arch/arm/plat-mxc/include/mach/common.h
arch/arm/plat-mxc/include/mach/debug-macro.S
arch/arm/plat-mxc/include/mach/mx6q.h
arch/arm/plat-mxc/time.c
arch/arm/plat-omap/counter_32k.c
arch/arm/plat-omap/devices.c
arch/arm/plat-omap/dma.c
arch/arm/plat-omap/dmtimer.c
arch/arm/plat-omap/include/plat/common.h
arch/arm/plat-omap/include/plat/cpu.h
arch/arm/plat-omap/include/plat/dma.h
arch/arm/plat-omap/include/plat/dmtimer.h
arch/arm/plat-omap/include/plat/gpmc.h
arch/arm/plat-omap/include/plat/mmc.h
arch/arm/plat-orion/common.c
arch/arm/plat-orion/include/plat/common.h
arch/arm/plat-orion/include/plat/orion_wdt.h [deleted file]
arch/arm/plat-orion/pcie.c
arch/arm/plat-pxa/include/plat/pxa27x_keypad.h
arch/arm/plat-s3c24xx/Makefile
arch/arm/plat-s3c24xx/clock.c [deleted file]
arch/arm/plat-s3c24xx/cpu.c [deleted file]
arch/arm/plat-s3c24xx/dev-uart.c [deleted file]
arch/arm/plat-s3c24xx/irq-pm.c [deleted file]
arch/arm/plat-s3c24xx/pm.c [deleted file]
arch/arm/plat-s3c24xx/sleep.S [deleted file]
arch/arm/plat-s5p/Kconfig [deleted file]
arch/arm/plat-s5p/Makefile [deleted file]
arch/arm/plat-s5p/clock.c [deleted file]
arch/arm/plat-s5p/dev-mfc.c [deleted file]
arch/arm/plat-s5p/dev-uart.c [deleted file]
arch/arm/plat-s5p/irq-eint.c [deleted file]
arch/arm/plat-s5p/irq-gpioint.c [deleted file]
arch/arm/plat-s5p/irq-pm.c [deleted file]
arch/arm/plat-s5p/irq.c [deleted file]
arch/arm/plat-s5p/pm.c [deleted file]
arch/arm/plat-s5p/s5p-time.c [deleted file]
arch/arm/plat-s5p/setup-mipiphy.c [deleted file]
arch/arm/plat-s5p/sleep.S [deleted file]
arch/arm/plat-s5p/sysmmu.c [deleted file]
arch/arm/plat-samsung/Kconfig
arch/arm/plat-samsung/Makefile
arch/arm/plat-samsung/include/plat/cpu.h
arch/arm/plat-samsung/include/plat/devs.h
arch/arm/plat-samsung/include/plat/dma-pl330.h
arch/arm/plat-samsung/include/plat/fb.h
arch/arm/plat-samsung/include/plat/s3c2416.h
arch/arm/plat-samsung/include/plat/s5p-clock.h
arch/arm/plat-samsung/include/plat/sysmmu.h [deleted file]
arch/arm/plat-samsung/s5p-clock.c [new file with mode: 0644]
arch/arm/plat-samsung/s5p-dev-mfc.c [new file with mode: 0644]
arch/arm/plat-samsung/s5p-dev-uart.c [new file with mode: 0644]
arch/arm/plat-samsung/s5p-irq-eint.c [new file with mode: 0644]
arch/arm/plat-samsung/s5p-irq-gpioint.c [new file with mode: 0644]
arch/arm/plat-samsung/s5p-irq-pm.c [new file with mode: 0644]
arch/arm/plat-samsung/s5p-irq.c [new file with mode: 0644]
arch/arm/plat-samsung/s5p-pm.c [new file with mode: 0644]
arch/arm/plat-samsung/s5p-sleep.S [new file with mode: 0644]
arch/arm/plat-samsung/s5p-time.c [new file with mode: 0644]
arch/arm/plat-samsung/setup-mipiphy.c [new file with mode: 0644]
arch/arm/plat-spear/Kconfig
arch/arm/plat-spear/Makefile
arch/arm/plat-spear/clock.c [deleted file]
arch/arm/plat-spear/include/plat/clock.h [deleted file]
arch/arm/plat-spear/restart.c
arch/arm/plat-spear/time.c
arch/avr32/include/asm/kvm_para.h [new file with mode: 0644]
arch/avr32/include/asm/posix_types.h
arch/avr32/kernel/entry-avr32b.S
arch/avr32/kernel/signal.c
arch/blackfin/include/asm/kvm_para.h [new file with mode: 0644]
arch/blackfin/include/asm/posix_types.h
arch/blackfin/include/asm/thread_info.h
arch/blackfin/kernel/signal.c
arch/blackfin/kernel/trace.c
arch/blackfin/mach-bf561/boards/acvilon.c
arch/blackfin/mach-common/entry.S
arch/c6x/include/asm/kvm_para.h [new file with mode: 0644]
arch/c6x/kernel/signal.c
arch/cris/Kconfig
arch/cris/arch-v10/drivers/ds1302.c [deleted file]
arch/cris/arch-v10/drivers/pcf8563.c [deleted file]
arch/cris/arch-v10/kernel/fasttimer.c
arch/cris/arch-v10/kernel/kgdb.c
arch/cris/arch-v10/kernel/signal.c
arch/cris/arch-v10/kernel/time.c
arch/cris/arch-v10/lib/Makefile
arch/cris/arch-v32/drivers/cryptocop.c
arch/cris/arch-v32/kernel/ptrace.c
arch/cris/arch-v32/kernel/signal.c
arch/cris/arch-v32/kernel/time.c
arch/cris/include/arch-v32/arch/cache.h
arch/cris/include/asm/Kbuild
arch/cris/include/asm/posix_types.h
arch/cris/include/asm/rtc.h [deleted file]
arch/cris/kernel/ptrace.c
arch/cris/kernel/time.c
arch/cris/kernel/vmlinux.lds.S
arch/cris/mm/fault.c
arch/frv/include/asm/kvm_para.h [new file with mode: 0644]
arch/frv/include/asm/posix_types.h
arch/frv/include/asm/thread_info.h
arch/frv/kernel/entry.S
arch/frv/kernel/signal.c
arch/h8300/include/asm/kvm_para.h [new file with mode: 0644]
arch/h8300/include/asm/posix_types.h
arch/h8300/kernel/signal.c
arch/hexagon/include/asm/kvm_para.h [new file with mode: 0644]
arch/hexagon/kernel/signal.c
arch/ia64/include/asm/kvm_host.h
arch/ia64/include/asm/kvm_para.h
arch/ia64/include/asm/posix_types.h
arch/ia64/include/asm/thread_info.h
arch/ia64/kernel/perfmon.c
arch/ia64/kernel/process.c
arch/ia64/kernel/signal.c
arch/ia64/kernel/sys_ia64.c
arch/ia64/kvm/kvm-ia64.c
arch/m32r/include/asm/posix_types.h
arch/m32r/kernel/signal.c
arch/m68k/include/asm/kvm_para.h [new file with mode: 0644]
arch/m68k/include/asm/posix_types.h
arch/m68k/kernel/signal.c
arch/microblaze/Kconfig
arch/microblaze/include/asm/kvm_para.h [new file with mode: 0644]
arch/microblaze/include/asm/thread_info.h
arch/microblaze/kernel/entry.S
arch/microblaze/kernel/mcount.S
arch/microblaze/kernel/process.c
arch/microblaze/kernel/signal.c
arch/microblaze/mm/fault.c
arch/mips/Kconfig
arch/mips/Makefile
arch/mips/alchemy/devboards/db1200.c
arch/mips/alchemy/devboards/db1300.c
arch/mips/alchemy/devboards/db1550.c
arch/mips/ath79/Kconfig
arch/mips/ath79/Makefile
arch/mips/ath79/clock.c
arch/mips/ath79/common.c
arch/mips/ath79/dev-common.c
arch/mips/ath79/dev-gpio-buttons.c
arch/mips/ath79/dev-leds-gpio.c
arch/mips/ath79/dev-wmac.c
arch/mips/ath79/early_printk.c
arch/mips/ath79/gpio.c
arch/mips/ath79/irq.c
arch/mips/ath79/mach-db120.c [new file with mode: 0644]
arch/mips/ath79/mach-pb44.c
arch/mips/ath79/mach-ubnt-xm.c
arch/mips/ath79/machtypes.h
arch/mips/ath79/pci.c [new file with mode: 0644]
arch/mips/ath79/pci.h [new file with mode: 0644]
arch/mips/ath79/setup.c
arch/mips/bcm63xx/boards/Makefile
arch/mips/cavium-octeon/setup.c
arch/mips/cavium-octeon/smp.c
arch/mips/fw/arc/Makefile
arch/mips/include/asm/clkdev.h [new file with mode: 0644]
arch/mips/include/asm/kvm_para.h [new file with mode: 0644]
arch/mips/include/asm/mach-ath79/ar71xx_regs.h
arch/mips/include/asm/mach-ath79/ath79.h
arch/mips/include/asm/mach-ath79/irq.h
arch/mips/include/asm/mach-ath79/pci-ath724x.h [deleted file]
arch/mips/include/asm/mach-ath79/pci.h [new file with mode: 0644]
arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h [new file with mode: 0644]
arch/mips/include/asm/mach-lantiq/falcon/irq.h [new file with mode: 0644]
arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h [new file with mode: 0644]
arch/mips/include/asm/mach-lantiq/gpio.h [new file with mode: 0644]
arch/mips/include/asm/mach-lantiq/lantiq.h
arch/mips/include/asm/mach-lantiq/lantiq_platform.h
arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
arch/mips/include/asm/mips-boards/generic.h
arch/mips/include/asm/module.h
arch/mips/include/asm/octeon/cvmx-pcieep-defs.h [deleted file]
arch/mips/include/asm/pci.h
arch/mips/include/asm/posix_types.h
arch/mips/include/asm/prom.h
arch/mips/include/asm/setup.h
arch/mips/include/asm/sparsemem.h
arch/mips/include/asm/stat.h
arch/mips/include/asm/termios.h
arch/mips/include/asm/traps.h
arch/mips/include/asm/uasm.h
arch/mips/jz4740/Makefile
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/perf_event_mipsxx.c
arch/mips/kernel/proc.c
arch/mips/kernel/prom.c
arch/mips/kernel/setup.c
arch/mips/kernel/signal-common.h
arch/mips/kernel/signal.c
arch/mips/kernel/signal32.c
arch/mips/kernel/signal_n32.c
arch/mips/kernel/smp.c
arch/mips/kernel/traps.c
arch/mips/lantiq/Kconfig
arch/mips/lantiq/Makefile
arch/mips/lantiq/Platform
arch/mips/lantiq/clk.c
arch/mips/lantiq/clk.h
arch/mips/lantiq/devices.c [deleted file]
arch/mips/lantiq/devices.h [deleted file]
arch/mips/lantiq/dts/Makefile [new file with mode: 0644]
arch/mips/lantiq/dts/danube.dtsi [new file with mode: 0644]
arch/mips/lantiq/dts/easy50712.dts [new file with mode: 0644]
arch/mips/lantiq/early_printk.c
arch/mips/lantiq/falcon/Makefile [new file with mode: 0644]
arch/mips/lantiq/falcon/prom.c [new file with mode: 0644]
arch/mips/lantiq/falcon/reset.c [new file with mode: 0644]
arch/mips/lantiq/falcon/sysctrl.c [new file with mode: 0644]
arch/mips/lantiq/irq.c
arch/mips/lantiq/machtypes.h [deleted file]
arch/mips/lantiq/prom.c
arch/mips/lantiq/prom.h
arch/mips/lantiq/setup.c [deleted file]
arch/mips/lantiq/xway/Kconfig [deleted file]
arch/mips/lantiq/xway/Makefile
arch/mips/lantiq/xway/clk-ase.c [deleted file]
arch/mips/lantiq/xway/clk-xway.c [deleted file]
arch/mips/lantiq/xway/clk.c [new file with mode: 0644]
arch/mips/lantiq/xway/devices.c [deleted file]
arch/mips/lantiq/xway/devices.h [deleted file]
arch/mips/lantiq/xway/dma.c
arch/mips/lantiq/xway/ebu.c [deleted file]
arch/mips/lantiq/xway/gpio.c
arch/mips/lantiq/xway/gpio_ebu.c [deleted file]
arch/mips/lantiq/xway/gpio_stp.c [deleted file]
arch/mips/lantiq/xway/mach-easy50601.c [deleted file]
arch/mips/lantiq/xway/mach-easy50712.c [deleted file]
arch/mips/lantiq/xway/pmu.c [deleted file]
arch/mips/lantiq/xway/prom-ase.c [deleted file]
arch/mips/lantiq/xway/prom-xway.c [deleted file]
arch/mips/lantiq/xway/prom.c [new file with mode: 0644]
arch/mips/lantiq/xway/reset.c
arch/mips/lantiq/xway/setup-ase.c [deleted file]
arch/mips/lantiq/xway/setup-xway.c [deleted file]
arch/mips/lantiq/xway/sysctrl.c [new file with mode: 0644]
arch/mips/mm/c-octeon.c
arch/mips/mm/c-r4k.c
arch/mips/oprofile/Makefile
arch/mips/oprofile/op_model_mipsxx.c
arch/mips/pci/Makefile
arch/mips/pci/fixup-lantiq.c [new file with mode: 0644]
arch/mips/pci/ops-loongson2.c
arch/mips/pci/pci-ar71xx.c [new file with mode: 0644]
arch/mips/pci/pci-ar724x.c [new file with mode: 0644]
arch/mips/pci/pci-ath724x.c [deleted file]
arch/mips/pci/pci-lantiq.c
arch/mips/pci/pci.c
arch/mips/pmc-sierra/yosemite/Makefile
arch/mips/pmc-sierra/yosemite/setup.c
arch/mips/pnx833x/common/platform.c
arch/mips/powertv/Makefile
arch/mips/powertv/asic/Makefile
arch/mips/powertv/pci/Makefile
arch/mips/rb532/devices.c
arch/mips/sni/setup.c
arch/mn10300/include/asm/kvm_para.h [new file with mode: 0644]
arch/mn10300/include/asm/posix_types.h
arch/mn10300/kernel/signal.c
arch/openrisc/Kconfig
arch/openrisc/include/asm/Kbuild
arch/openrisc/include/asm/kvm_para.h [new file with mode: 0644]
arch/openrisc/include/asm/uaccess.h
arch/openrisc/kernel/signal.c
arch/openrisc/lib/string.S
arch/parisc/Kconfig
arch/parisc/include/asm/kvm_para.h [new file with mode: 0644]
arch/parisc/include/asm/posix_types.h
arch/parisc/include/asm/smp.h
arch/parisc/include/asm/stat.h
arch/parisc/include/asm/thread_info.h
arch/parisc/include/asm/uaccess.h
arch/parisc/kernel/entry.S
arch/parisc/kernel/parisc_ksyms.c
arch/parisc/kernel/signal.c
arch/parisc/kernel/signal32.c
arch/parisc/kernel/vmlinux.lds.S
arch/parisc/lib/lusercopy.S
arch/powerpc/Kconfig
arch/powerpc/boot/dts/mpc8569mds.dts
arch/powerpc/include/asm/cputable.h
arch/powerpc/include/asm/dbell.h
arch/powerpc/include/asm/hvcall.h
arch/powerpc/include/asm/hw_irq.h
arch/powerpc/include/asm/kvm.h
arch/powerpc/include/asm/kvm_asm.h
arch/powerpc/include/asm/kvm_book3s.h
arch/powerpc/include/asm/kvm_book3s_asm.h
arch/powerpc/include/asm/kvm_booke.h
arch/powerpc/include/asm/kvm_booke_hv_asm.h [new file with mode: 0644]
arch/powerpc/include/asm/kvm_e500.h [deleted file]
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/include/asm/kvm_para.h
arch/powerpc/include/asm/kvm_ppc.h
arch/powerpc/include/asm/mmu-book3e.h
arch/powerpc/include/asm/posix_types.h
arch/powerpc/include/asm/processor.h
arch/powerpc/include/asm/reg.h
arch/powerpc/include/asm/reg_booke.h
arch/powerpc/include/asm/stat.h
arch/powerpc/include/asm/switch_to.h
arch/powerpc/include/asm/thread_info.h
arch/powerpc/include/asm/time.h
arch/powerpc/include/asm/uaccess.h
arch/powerpc/include/asm/word-at-a-time.h [new file with mode: 0644]
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/cpu_setup_fsl_booke.S
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/head_44x.S
arch/powerpc/kernel/head_booke.h
arch/powerpc/kernel/head_fsl_booke.S
arch/powerpc/kernel/idle_power7.S
arch/powerpc/kernel/ppc_ksyms.c
arch/powerpc/kernel/signal.c
arch/powerpc/kernel/signal.h
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/time.c
arch/powerpc/kvm/44x.c
arch/powerpc/kvm/44x_emulate.c
arch/powerpc/kvm/Kconfig
arch/powerpc/kvm/Makefile
arch/powerpc/kvm/book3s.c
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_64_slb.S
arch/powerpc/kvm/book3s_64_vio.c [new file with mode: 0644]
arch/powerpc/kvm/book3s_64_vio_hv.c
arch/powerpc/kvm/book3s_emulate.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_interrupts.S
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/kvm/book3s_pr_papr.c
arch/powerpc/kvm/book3s_segment.S
arch/powerpc/kvm/booke.c
arch/powerpc/kvm/booke.h
arch/powerpc/kvm/booke_emulate.c
arch/powerpc/kvm/booke_interrupts.S
arch/powerpc/kvm/bookehv_interrupts.S [new file with mode: 0644]
arch/powerpc/kvm/e500.c
arch/powerpc/kvm/e500.h [new file with mode: 0644]
arch/powerpc/kvm/e500_emulate.c
arch/powerpc/kvm/e500_tlb.c
arch/powerpc/kvm/e500_tlb.h [deleted file]
arch/powerpc/kvm/e500mc.c [new file with mode: 0644]
arch/powerpc/kvm/emulate.c
arch/powerpc/kvm/powerpc.c
arch/powerpc/kvm/timing.h
arch/powerpc/lib/string.S
arch/powerpc/mm/mmu_context_nohash.c
arch/powerpc/platforms/cell/spufs/inode.c
arch/s390/Kconfig
arch/s390/hypfs/inode.c
arch/s390/include/asm/bitops.h
arch/s390/include/asm/cio.h
arch/s390/include/asm/cmpxchg.h
arch/s390/include/asm/cputime.h
arch/s390/include/asm/ctl_reg.h
arch/s390/include/asm/current.h
arch/s390/include/asm/elf.h
arch/s390/include/asm/futex.h
arch/s390/include/asm/idals.h
arch/s390/include/asm/io.h
arch/s390/include/asm/irq.h
arch/s390/include/asm/kexec.h
arch/s390/include/asm/kmap_types.h
arch/s390/include/asm/kvm.h
arch/s390/include/asm/kvm_host.h
arch/s390/include/asm/kvm_para.h
arch/s390/include/asm/mmu_context.h
arch/s390/include/asm/module.h
arch/s390/include/asm/os_info.h
arch/s390/include/asm/percpu.h
arch/s390/include/asm/pgalloc.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/posix_types.h
arch/s390/include/asm/processor.h
arch/s390/include/asm/rwsem.h
arch/s390/include/asm/sclp.h
arch/s390/include/asm/setup.h
arch/s390/include/asm/sfp-util.h
arch/s390/include/asm/string.h
arch/s390/include/asm/thread_info.h
arch/s390/include/asm/timer.h
arch/s390/include/asm/tlb.h
arch/s390/include/asm/tlbflush.h
arch/s390/include/asm/types.h
arch/s390/include/asm/uaccess.h
arch/s390/include/asm/vdso.h
arch/s390/kernel/base.S
arch/s390/kernel/compat_signal.c
arch/s390/kernel/early.c
arch/s390/kernel/entry.h
arch/s390/kernel/head_kdump.S
arch/s390/kernel/ipl.c
arch/s390/kernel/irq.c
arch/s390/kernel/machine_kexec.c
arch/s390/kernel/os_info.c
arch/s390/kernel/perf_cpum_cf.c
arch/s390/kernel/setup.c
arch/s390/kernel/signal.c
arch/s390/kernel/smp.c
arch/s390/kernel/sysinfo.c
arch/s390/kvm/diag.c
arch/s390/kvm/intercept.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/kvm-s390.h
arch/s390/kvm/priv.c
arch/s390/lib/uaccess_mvcos.c
arch/s390/lib/uaccess_std.c
arch/s390/mm/maccess.c
arch/s390/mm/vmem.c
arch/s390/oprofile/hwsampler.c
arch/score/include/asm/kvm_para.h [new file with mode: 0644]
arch/score/kernel/signal.c
arch/sh/boards/mach-migor/setup.c
arch/sh/include/asm/kvm_para.h [new file with mode: 0644]
arch/sh/include/asm/posix_types_32.h
arch/sh/include/asm/posix_types_64.h
arch/sh/include/asm/thread_info.h
arch/sh/kernel/signal_32.c
arch/sh/kernel/signal_64.c
arch/sh/kernel/smp.c
arch/sparc/Kconfig
arch/sparc/include/asm/Kbuild
arch/sparc/include/asm/asi.h
arch/sparc/include/asm/asmmacro.h
arch/sparc/include/asm/dma-mapping.h
arch/sparc/include/asm/kvm_para.h [new file with mode: 0644]
arch/sparc/include/asm/leon.h
arch/sparc/include/asm/leon_amba.h
arch/sparc/include/asm/pgtsrmmu.h
arch/sparc/include/asm/posix_types.h
arch/sparc/include/asm/psr.h
arch/sparc/include/asm/sections.h
arch/sparc/include/asm/thread_info_32.h
arch/sparc/include/asm/thread_info_64.h
arch/sparc/include/asm/uaccess_32.h
arch/sparc/include/asm/uaccess_64.h
arch/sparc/kernel/Makefile
arch/sparc/kernel/cpu.c
arch/sparc/kernel/entry.S
arch/sparc/kernel/etrap_32.S
arch/sparc/kernel/head_32.S
arch/sparc/kernel/ioport.c
arch/sparc/kernel/irq_32.c
arch/sparc/kernel/kernel.h
arch/sparc/kernel/leon_kernel.c
arch/sparc/kernel/leon_pmc.c
arch/sparc/kernel/leon_smp.c
arch/sparc/kernel/process_32.c
arch/sparc/kernel/prom_common.c
arch/sparc/kernel/rtrap_32.S
arch/sparc/kernel/setup_32.c
arch/sparc/kernel/signal32.c
arch/sparc/kernel/signal_32.c
arch/sparc/kernel/signal_64.c
arch/sparc/kernel/sys_sparc_64.c
arch/sparc/kernel/trampoline_32.S
arch/sparc/kernel/traps_64.c
arch/sparc/kernel/vmlinux.lds.S
arch/sparc/kernel/wof.S
arch/sparc/kernel/wuf.S
arch/sparc/lib/Makefile
arch/sparc/lib/ksyms.c
arch/sparc/lib/strlen_user_32.S [deleted file]
arch/sparc/lib/strlen_user_64.S [deleted file]
arch/sparc/math-emu/math_64.c
arch/sparc/mm/Makefile
arch/sparc/mm/leon_mm.c
arch/sparc/mm/srmmu.c
arch/sparc/mm/srmmu_access.S [new file with mode: 0644]
arch/tile/Kconfig
arch/tile/Makefile
arch/tile/include/arch/spr_def_32.h
arch/tile/include/arch/spr_def_64.h
arch/tile/include/asm/Kbuild
arch/tile/include/asm/atomic_32.h
arch/tile/include/asm/bitops.h
arch/tile/include/asm/byteorder.h
arch/tile/include/asm/cachectl.h [new file with mode: 0644]
arch/tile/include/asm/compat.h
arch/tile/include/asm/elf.h
arch/tile/include/asm/futex.h
arch/tile/include/asm/hardwall.h
arch/tile/include/asm/hugetlb.h
arch/tile/include/asm/irqflags.h
arch/tile/include/asm/kexec.h
arch/tile/include/asm/kvm_para.h [new file with mode: 0644]
arch/tile/include/asm/mmu.h
arch/tile/include/asm/mmu_context.h
arch/tile/include/asm/module.h [new file with mode: 0644]
arch/tile/include/asm/page.h
arch/tile/include/asm/pgalloc.h
arch/tile/include/asm/pgtable.h
arch/tile/include/asm/pgtable_32.h
arch/tile/include/asm/pgtable_64.h
arch/tile/include/asm/processor.h
arch/tile/include/asm/setup.h
arch/tile/include/asm/syscalls.h
arch/tile/include/asm/thread_info.h
arch/tile/include/asm/tlbflush.h
arch/tile/include/asm/uaccess.h
arch/tile/include/asm/unistd.h
arch/tile/include/hv/drv_xgbe_intf.h
arch/tile/include/hv/hypervisor.h
arch/tile/kernel/Makefile
arch/tile/kernel/compat_signal.c
arch/tile/kernel/entry.S
arch/tile/kernel/hardwall.c
arch/tile/kernel/head_32.S
arch/tile/kernel/head_64.S
arch/tile/kernel/hvglue.lds
arch/tile/kernel/intvec_64.S
arch/tile/kernel/machine_kexec.c
arch/tile/kernel/module.c
arch/tile/kernel/proc.c
arch/tile/kernel/process.c
arch/tile/kernel/relocate_kernel.S [deleted file]
arch/tile/kernel/relocate_kernel_32.S [new file with mode: 0644]
arch/tile/kernel/relocate_kernel_64.S [new file with mode: 0644]
arch/tile/kernel/setup.c
arch/tile/kernel/signal.c
arch/tile/kernel/single_step.c
arch/tile/kernel/smp.c
arch/tile/kernel/sys.c
arch/tile/kernel/sysfs.c
arch/tile/kernel/tlb.c
arch/tile/kernel/traps.c
arch/tile/lib/atomic_32.c
arch/tile/lib/exports.c
arch/tile/lib/memchr_64.c
arch/tile/lib/memcpy_64.c
arch/tile/lib/memcpy_tile64.c
arch/tile/lib/strchr_64.c
arch/tile/lib/string-endian.h [new file with mode: 0644]
arch/tile/lib/strlen_64.c
arch/tile/lib/usercopy_32.S
arch/tile/lib/usercopy_64.S
arch/tile/mm/fault.c
arch/tile/mm/homecache.c
arch/tile/mm/hugetlbpage.c
arch/tile/mm/init.c
arch/tile/mm/migrate.h
arch/tile/mm/migrate_32.S
arch/tile/mm/migrate_64.S
arch/tile/mm/pgtable.c
arch/um/Makefile
arch/um/include/asm/kvm_para.h [new file with mode: 0644]
arch/um/include/shared/frame_kern.h
arch/um/kernel/process.c
arch/um/kernel/reboot.c
arch/um/kernel/signal.c
arch/um/kernel/trap.c
arch/unicore32/include/asm/kvm_para.h [new file with mode: 0644]
arch/unicore32/kernel/signal.c
arch/x86/Kbuild
arch/x86/Kconfig
arch/x86/boot/compressed/eboot.c
arch/x86/boot/compressed/eboot.h
arch/x86/ia32/ia32_signal.c
arch/x86/include/asm/acpi.h
arch/x86/include/asm/bitops.h
arch/x86/include/asm/dma-contiguous.h [new file with mode: 0644]
arch/x86/include/asm/dma-mapping.h
arch/x86/include/asm/ftrace.h
arch/x86/include/asm/kvm_emulate.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/kvm_para.h
arch/x86/include/asm/pgtable-3level.h
arch/x86/include/asm/posix_types_32.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/pvclock-abi.h
arch/x86/include/asm/realmode.h [new file with mode: 0644]
arch/x86/include/asm/sighandling.h
arch/x86/include/asm/sta2x11.h [new file with mode: 0644]
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/trampoline.h [deleted file]
arch/x86/include/asm/uaccess.h
arch/x86/include/asm/uaccess_32.h
arch/x86/include/asm/uaccess_64.h
arch/x86/include/asm/word-at-a-time.h
arch/x86/include/asm/xen/events.h
arch/x86/include/asm/xen/page.h
arch/x86/kernel/Makefile
arch/x86/kernel/acpi/Makefile
arch/x86/kernel/acpi/realmode/.gitignore [deleted file]
arch/x86/kernel/acpi/realmode/Makefile [deleted file]
arch/x86/kernel/acpi/realmode/bioscall.S [deleted file]
arch/x86/kernel/acpi/realmode/copy.S [deleted file]
arch/x86/kernel/acpi/realmode/regs.c [deleted file]
arch/x86/kernel/acpi/realmode/video-bios.c [deleted file]
arch/x86/kernel/acpi/realmode/video-mode.c [deleted file]
arch/x86/kernel/acpi/realmode/video-vesa.c [deleted file]
arch/x86/kernel/acpi/realmode/video-vga.c [deleted file]
arch/x86/kernel/acpi/realmode/wakemain.c [deleted file]
arch/x86/kernel/acpi/realmode/wakeup.S [deleted file]
arch/x86/kernel/acpi/realmode/wakeup.h [deleted file]
arch/x86/kernel/acpi/realmode/wakeup.lds.S [deleted file]
arch/x86/kernel/acpi/sleep.c
arch/x86/kernel/acpi/sleep.h
arch/x86/kernel/acpi/wakeup_rm.S [deleted file]
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/mcheck/mce-apei.c
arch/x86/kernel/cpu/mcheck/mce-severity.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/mtrr/cleanup.c
arch/x86/kernel/e820.c
arch/x86/kernel/entry_32.S
arch/x86/kernel/entry_64.S
arch/x86/kernel/ftrace.c
arch/x86/kernel/head32.c
arch/x86/kernel/head64.c
arch/x86/kernel/head_32.S
arch/x86/kernel/head_64.S
arch/x86/kernel/hpet.c
arch/x86/kernel/kvmclock.c
arch/x86/kernel/mpparse.c
arch/x86/kernel/nmi.c
arch/x86/kernel/pci-dma.c
arch/x86/kernel/pci-nommu.c
arch/x86/kernel/ptrace.c
arch/x86/kernel/reboot.c
arch/x86/kernel/reboot_32.S [deleted file]
arch/x86/kernel/setup.c
arch/x86/kernel/signal.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/tboot.c
arch/x86/kernel/trampoline.c [deleted file]
arch/x86/kernel/trampoline_32.S [deleted file]
arch/x86/kernel/trampoline_64.S [deleted file]
arch/x86/kernel/traps.c
arch/x86/kernel/vmlinux.lds.S
arch/x86/kvm/Kconfig
arch/x86/kvm/cpuid.c
arch/x86/kvm/emulate.c
arch/x86/kvm/i8254.c
arch/x86/kvm/i8254.h
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.c
arch/x86/kvm/mmu_audit.c
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/lib/usercopy.c
arch/x86/lib/usercopy_32.c
arch/x86/lib/usercopy_64.c
arch/x86/mm/init.c
arch/x86/mm/numa.c
arch/x86/mm/numa_emulation.c
arch/x86/mm/pat.c
arch/x86/mm/srat.c
arch/x86/pci/xen.c
arch/x86/realmode/Makefile [new file with mode: 0644]
arch/x86/realmode/init.c [new file with mode: 0644]
arch/x86/realmode/rm/.gitignore [new file with mode: 0644]
arch/x86/realmode/rm/Makefile [new file with mode: 0644]
arch/x86/realmode/rm/bioscall.S [new file with mode: 0644]
arch/x86/realmode/rm/copy.S [new file with mode: 0644]
arch/x86/realmode/rm/header.S [new file with mode: 0644]
arch/x86/realmode/rm/realmode.h [new file with mode: 0644]
arch/x86/realmode/rm/realmode.lds.S [new file with mode: 0644]
arch/x86/realmode/rm/reboot_32.S [new file with mode: 0644]
arch/x86/realmode/rm/regs.c [new file with mode: 0644]
arch/x86/realmode/rm/stack.S [new file with mode: 0644]
arch/x86/realmode/rm/trampoline_32.S [new file with mode: 0644]
arch/x86/realmode/rm/trampoline_64.S [new file with mode: 0644]
arch/x86/realmode/rm/trampoline_common.S [new file with mode: 0644]
arch/x86/realmode/rm/video-bios.c [new file with mode: 0644]
arch/x86/realmode/rm/video-mode.c [new file with mode: 0644]
arch/x86/realmode/rm/video-vesa.c [new file with mode: 0644]
arch/x86/realmode/rm/video-vga.c [new file with mode: 0644]
arch/x86/realmode/rm/wakemain.c [new file with mode: 0644]
arch/x86/realmode/rm/wakeup.h [new file with mode: 0644]
arch/x86/realmode/rm/wakeup_asm.S [new file with mode: 0644]
arch/x86/realmode/rmpiggy.S [new file with mode: 0644]
arch/x86/syscalls/syscall_32.tbl
arch/x86/syscalls/syscall_64.tbl
arch/x86/tools/relocs.c
arch/x86/um/signal.c
arch/x86/xen/debugfs.c
arch/x86/xen/debugfs.h
arch/x86/xen/enlighten.c
arch/x86/xen/mmu.c
arch/x86/xen/p2m.c
arch/x86/xen/setup.c
arch/x86/xen/smp.c
arch/x86/xen/smp.h [new file with mode: 0644]
arch/x86/xen/spinlock.c
arch/x86/xen/xen-ops.h
arch/xtensa/include/asm/kvm_para.h [new file with mode: 0644]
arch/xtensa/kernel/signal.c
block/Kconfig.iosched
block/blk-cgroup.c
block/blk-cgroup.h
block/blk-core.c
block/blk-ioc.c
block/blk-sysfs.c
block/blk-throttle.c
block/blk.h
block/cfq-iosched.c
block/cfq.h [deleted file]
block/deadline-iosched.c
block/elevator.c
block/noop-iosched.c
drivers/Makefile
drivers/acpi/bgrt.c
drivers/acpi/bus.c
drivers/acpi/power.c
drivers/acpi/scan.c
drivers/acpi/sleep.c
drivers/amba/Makefile
drivers/amba/tegra-ahb.c [new file with mode: 0644]
drivers/ata/sata_mv.c
drivers/atm/solos-pci.c
drivers/base/Kconfig
drivers/base/Makefile
drivers/base/dma-buf.c
drivers/base/dma-coherent.c
drivers/base/dma-contiguous.c [new file with mode: 0644]
drivers/base/node.c
drivers/base/regmap/regmap-i2c.c
drivers/base/soc.c
drivers/block/drbd/drbd_actlog.c
drivers/block/drbd/drbd_bitmap.c
drivers/block/drbd/drbd_int.h
drivers/block/drbd/drbd_main.c
drivers/block/drbd/drbd_nl.c
drivers/block/drbd/drbd_proc.c
drivers/block/drbd/drbd_receiver.c
drivers/block/drbd/drbd_req.c
drivers/block/drbd/drbd_req.h
drivers/block/drbd/drbd_worker.c
drivers/block/floppy.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/mtip32xx/mtip32xx.h
drivers/block/rbd.c
drivers/block/xen-blkfront.c
drivers/clk/Kconfig
drivers/clk/Makefile
drivers/clk/clk-divider.c
drivers/clk/clk-fixed-factor.c [new file with mode: 0644]
drivers/clk/clk-fixed-rate.c
drivers/clk/clk-gate.c
drivers/clk/clk-mux.c
drivers/clk/clk.c
drivers/clk/mxs/Makefile [new file with mode: 0644]
drivers/clk/mxs/clk-div.c [new file with mode: 0644]
drivers/clk/mxs/clk-frac.c [new file with mode: 0644]
drivers/clk/mxs/clk-imx23.c [new file with mode: 0644]
drivers/clk/mxs/clk-imx28.c [new file with mode: 0644]
drivers/clk/mxs/clk-pll.c [new file with mode: 0644]
drivers/clk/mxs/clk-ref.c [new file with mode: 0644]
drivers/clk/mxs/clk.c [new file with mode: 0644]
drivers/clk/mxs/clk.h [new file with mode: 0644]
drivers/clk/spear/Makefile [new file with mode: 0644]
drivers/clk/spear/clk-aux-synth.c [new file with mode: 0644]
drivers/clk/spear/clk-frac-synth.c [new file with mode: 0644]
drivers/clk/spear/clk-gpt-synth.c [new file with mode: 0644]
drivers/clk/spear/clk-vco-pll.c [new file with mode: 0644]
drivers/clk/spear/clk.c [new file with mode: 0644]
drivers/clk/spear/clk.h [new file with mode: 0644]
drivers/clk/spear/spear1310_clock.c [new file with mode: 0644]
drivers/clk/spear/spear1340_clock.c [new file with mode: 0644]
drivers/clk/spear/spear3xx_clock.c [new file with mode: 0644]
drivers/clk/spear/spear6xx_clock.c [new file with mode: 0644]
drivers/crypto/mv_cesa.c
drivers/dma/Kconfig
drivers/dma/amba-pl08x.c
drivers/dma/at_hdmac.c
drivers/dma/at_hdmac_regs.h
drivers/dma/coh901318.c
drivers/dma/coh901318_lli.c
drivers/dma/dw_dmac.c
drivers/dma/ep93xx_dma.c
drivers/dma/imx-dma.c
drivers/dma/imx-sdma.c
drivers/dma/intel_mid_dma.c
drivers/dma/ipu/ipu_idmac.c
drivers/dma/mv_xor.c
drivers/dma/mv_xor.h
drivers/dma/mxs-dma.c
drivers/dma/pch_dma.c
drivers/dma/pl330.c
drivers/dma/ste_dma40.c
drivers/edac/amd64_edac.c
drivers/edac/amd76x_edac.c
drivers/edac/cell_edac.c
drivers/edac/cpc925_edac.c
drivers/edac/e752x_edac.c
drivers/edac/e7xxx_edac.c
drivers/edac/edac_core.h
drivers/edac/edac_device.c
drivers/edac/edac_mc.c
drivers/edac/edac_mc_sysfs.c
drivers/edac/edac_module.h
drivers/edac/edac_pci.c
drivers/edac/i3000_edac.c
drivers/edac/i3200_edac.c
drivers/edac/i5000_edac.c
drivers/edac/i5100_edac.c
drivers/edac/i5400_edac.c
drivers/edac/i7300_edac.c
drivers/edac/i7core_edac.c
drivers/edac/i82443bxgx_edac.c
drivers/edac/i82860_edac.c
drivers/edac/i82875p_edac.c
drivers/edac/i82975x_edac.c
drivers/edac/mce_amd.h
drivers/edac/mpc85xx_edac.c
drivers/edac/mv64x60_edac.c
drivers/edac/pasemi_edac.c
drivers/edac/ppc4xx_edac.c
drivers/edac/r82600_edac.c
drivers/edac/sb_edac.c
drivers/edac/tile_edac.c
drivers/edac/x38_edac.c
drivers/gpio/Kconfig
drivers/gpio/Makefile
drivers/gpio/gpio-ich.c [new file with mode: 0644]
drivers/gpio/gpio-mm-lantiq.c [new file with mode: 0644]
drivers/gpio/gpio-mxs.c
drivers/gpio/gpio-samsung.c
drivers/gpio/gpio-sch.c
drivers/gpio/gpio-sta2x11.c [new file with mode: 0644]
drivers/gpio/gpio-stp-xway.c [new file with mode: 0644]
drivers/gpio/gpio-tps65910.c
drivers/gpio/gpio-wm831x.c
drivers/gpu/drm/cirrus/cirrus_drv.c
drivers/gpu/drm/cirrus/cirrus_drv.h
drivers/gpu/drm/cirrus/cirrus_ttm.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/i810/i810_dma.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_dmabuf.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_sdvo_regs.h
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/mgag200/mgag200_drv.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_prime.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/nid.h
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_prime.c
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/radeon/rv770d.h
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/udl/udl_fb.c
drivers/gpu/drm/udl/udl_gem.c
drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
drivers/hwmon/Kconfig
drivers/hwmon/sch5627.c
drivers/hwmon/sch5636.c
drivers/hwmon/sch56xx-common.c
drivers/hwmon/sch56xx-common.h
drivers/i2c/Kconfig
drivers/i2c/algos/i2c-algo-bit.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/Makefile
drivers/i2c/busses/i2c-davinci.c
drivers/i2c/busses/i2c-designware-core.c
drivers/i2c/busses/i2c-designware-core.h
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/busses/i2c-eg20t.c
drivers/i2c/busses/i2c-gpio.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-ixp2000.c [deleted file]
drivers/i2c/busses/i2c-mpc.c
drivers/i2c/busses/i2c-mxs.c
drivers/i2c/busses/i2c-nuc900.c
drivers/i2c/busses/i2c-ocores.c
drivers/i2c/busses/i2c-pca-platform.c
drivers/i2c/busses/i2c-pxa.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/i2c/busses/i2c-sh_mobile.c
drivers/i2c/busses/i2c-tegra.c
drivers/i2c/busses/i2c-versatile.c
drivers/i2c/busses/i2c-xiic.c
drivers/i2c/i2c-core.c
drivers/i2c/i2c-dev.c
drivers/i2c/i2c-mux.c
drivers/i2c/muxes/Kconfig
drivers/i2c/muxes/Makefile
drivers/i2c/muxes/gpio-i2cmux.c [deleted file]
drivers/i2c/muxes/i2c-mux-gpio.c [new file with mode: 0644]
drivers/i2c/muxes/i2c-mux-pca9541.c [new file with mode: 0644]
drivers/i2c/muxes/i2c-mux-pca954x.c [new file with mode: 0644]
drivers/i2c/muxes/pca9541.c [deleted file]
drivers/i2c/muxes/pca954x.c [deleted file]
drivers/input/joystick/as5011.c
drivers/input/keyboard/pxa27x_keypad.c
drivers/input/misc/wm831x-on.c
drivers/input/touchscreen/wm831x-ts.c
drivers/iommu/Kconfig
drivers/iommu/Makefile
drivers/iommu/amd_iommu.c
drivers/iommu/exynos-iommu.c [new file with mode: 0644]
drivers/iommu/intel-iommu.c
drivers/iommu/iommu.c
drivers/iommu/omap-iommu.c
drivers/iommu/tegra-gart.c
drivers/iommu/tegra-smmu.c
drivers/leds/Kconfig
drivers/leds/Makefile
drivers/leds/led-class.c
drivers/leds/leds-da9052.c [new file with mode: 0644]
drivers/leds/leds-lm3530.c
drivers/leds/leds-lm3533.c [new file with mode: 0644]
drivers/leds/leds-lp5521.c
drivers/leds/leds-mc13783.c
drivers/leds/leds-pca955x.c
drivers/leds/ledtrig-backlight.c
drivers/leds/ledtrig-gpio.c
drivers/leds/ledtrig-heartbeat.c
drivers/leds/ledtrig-timer.c
drivers/leds/ledtrig-transient.c [new file with mode: 0644]
drivers/md/dm-mpath.c
drivers/md/dm-thin-metadata.c
drivers/md/dm-thin-metadata.h
drivers/md/dm-thin.c
drivers/md/persistent-data/dm-transaction-manager.c
drivers/media/video/mx3_camera.c
drivers/message/fusion/mptbase.c
drivers/message/fusion/mptctl.c
drivers/mfd/Kconfig
drivers/mfd/Makefile
drivers/mfd/ab8500-core.c
drivers/mfd/ab8500-debugfs.c
drivers/mfd/ab8500-gpadc.c
drivers/mfd/ab8500-i2c.c [deleted file]
drivers/mfd/ab8500-sysctrl.c
drivers/mfd/anatop-mfd.c
drivers/mfd/asic3.c
drivers/mfd/cs5535-mfd.c
drivers/mfd/da9052-core.c
drivers/mfd/da9052-i2c.c
drivers/mfd/da9052-spi.c
drivers/mfd/db8500-prcmu.c
drivers/mfd/intel_msic.c
drivers/mfd/janz-cmodio.c
drivers/mfd/lm3533-core.c [new file with mode: 0644]
drivers/mfd/lm3533-ctrlbank.c [new file with mode: 0644]
drivers/mfd/lpc_ich.c [new file with mode: 0644]
drivers/mfd/lpc_sch.c
drivers/mfd/max77693-irq.c [new file with mode: 0644]
drivers/mfd/max77693.c [new file with mode: 0644]
drivers/mfd/mc13xxx-core.c
drivers/mfd/mc13xxx-i2c.c [new file with mode: 0644]
drivers/mfd/mc13xxx-spi.c [new file with mode: 0644]
drivers/mfd/mc13xxx.h [new file with mode: 0644]
drivers/mfd/pcf50633-core.c
drivers/mfd/rc5t583.c
drivers/mfd/rdc321x-southbridge.c
drivers/mfd/s5m-core.c
drivers/mfd/sta2x11-mfd.c [new file with mode: 0644]
drivers/mfd/stmpe-spi.c
drivers/mfd/tps65090.c
drivers/mfd/tps65217.c
drivers/mfd/tps65910-irq.c
drivers/mfd/tps65910.c
drivers/mfd/twl4030-irq.c
drivers/mfd/twl6040-core.c
drivers/mfd/twl6040-irq.c
drivers/mfd/vx855.c
drivers/mfd/wm831x-auxadc.c
drivers/mfd/wm831x-core.c
drivers/mfd/wm831x-irq.c
drivers/mfd/wm8350-core.c
drivers/mfd/wm8350-i2c.c
drivers/mfd/wm8400-core.c
drivers/mfd/wm8994-core.c
drivers/mfd/wm8994-regmap.c
drivers/misc/ab8500-pwm.c
drivers/mmc/card/block.c
drivers/mmc/card/queue.c
drivers/mmc/core/bus.c
drivers/mmc/core/cd-gpio.c
drivers/mmc/core/core.c
drivers/mmc/core/mmc.c
drivers/mmc/core/sdio.c
drivers/mmc/core/sdio_irq.c
drivers/mmc/host/Kconfig
drivers/mmc/host/Makefile
drivers/mmc/host/atmel-mci.c
drivers/mmc/host/davinci_mmc.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/imxmmc.c [deleted file]
drivers/mmc/host/imxmmc.h [deleted file]
drivers/mmc/host/mmci.c
drivers/mmc/host/mvsdio.c
drivers/mmc/host/mxcmmc.c
drivers/mmc/host/mxs-mmc.c
drivers/mmc/host/omap.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/sdhci-pltfm.c
drivers/mmc/host/sdhci-spear.c
drivers/mmc/host/sdhci-tegra.c
drivers/mmc/host/sdhci.c
drivers/mtd/Kconfig
drivers/mtd/bcm63xxpart.c
drivers/mtd/chips/cfi_cmdset_0002.c
drivers/mtd/cmdlinepart.c
drivers/mtd/devices/block2mtd.c
drivers/mtd/devices/docg3.c
drivers/mtd/devices/m25p80.c
drivers/mtd/devices/spear_smi.c
drivers/mtd/lpddr/qinfo_probe.c
drivers/mtd/maps/Kconfig
drivers/mtd/maps/intel_vr_nor.c
drivers/mtd/maps/lantiq-flash.c
drivers/mtd/maps/pci.c
drivers/mtd/maps/scb2_flash.c
drivers/mtd/maps/wr_sbc82xx_flash.c
drivers/mtd/mtdcore.c
drivers/mtd/mtdpart.c
drivers/mtd/nand/Kconfig
drivers/mtd/nand/alauda.c
drivers/mtd/nand/atmel_nand.c
drivers/mtd/nand/au1550nd.c
drivers/mtd/nand/bcm_umi_bch.c
drivers/mtd/nand/bcm_umi_nand.c
drivers/mtd/nand/bf5xx_nand.c
drivers/mtd/nand/cafe_nand.c
drivers/mtd/nand/cs553x_nand.c
drivers/mtd/nand/denali.c
drivers/mtd/nand/docg4.c
drivers/mtd/nand/fsl_elbc_nand.c
drivers/mtd/nand/fsl_ifc_nand.c
drivers/mtd/nand/fsmc_nand.c
drivers/mtd/nand/gpmi-nand/bch-regs.h
drivers/mtd/nand/gpmi-nand/gpmi-lib.c
drivers/mtd/nand/gpmi-nand/gpmi-nand.c
drivers/mtd/nand/gpmi-nand/gpmi-nand.h
drivers/mtd/nand/h1910.c
drivers/mtd/nand/jz4740_nand.c
drivers/mtd/nand/mpc5121_nfc.c
drivers/mtd/nand/mxc_nand.c
drivers/mtd/nand/nand_base.c
drivers/mtd/nand/nand_bbt.c
drivers/mtd/nand/nand_ids.c
drivers/mtd/nand/nandsim.c
drivers/mtd/nand/omap2.c
drivers/mtd/nand/orion_nand.c
drivers/mtd/nand/pasemi_nand.c
drivers/mtd/nand/plat_nand.c
drivers/mtd/nand/pxa3xx_nand.c
drivers/mtd/nand/r852.c
drivers/mtd/nand/sh_flctl.c
drivers/mtd/nand/sm_common.c
drivers/mtd/onenand/onenand_base.c
drivers/net/cris/eth_v10.c
drivers/net/ethernet/freescale/fec.c
drivers/net/ethernet/freescale/fec_mpc52xx.c
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/en_main.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/profile.c
drivers/net/ethernet/rdc/r6040.c
drivers/net/ethernet/realtek/8139cp.c
drivers/net/ethernet/realtek/8139too.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/smsc/smsc911x.c
drivers/net/ethernet/ti/Kconfig
drivers/net/usb/asix.c
drivers/net/usb/mcs7830.c
drivers/net/usb/qmi_wwan.c
drivers/net/virtio_net.c
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/brcm80211/brcmfmac/usb.c
drivers/net/wireless/iwlwifi/Kconfig
drivers/net/wireless/iwlwifi/Makefile
drivers/net/wireless/iwlwifi/iwl-2000.c
drivers/net/wireless/iwlwifi/iwl-6000.c
drivers/net/wireless/iwlwifi/iwl-agn-rs.c
drivers/net/wireless/iwlwifi/iwl-agn-sta.c
drivers/net/wireless/iwlwifi/iwl-drv.c
drivers/net/wireless/iwlwifi/iwl-phy-db.c [deleted file]
drivers/net/wireless/iwlwifi/iwl-phy-db.h [deleted file]
drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
drivers/net/wireless/ti/wl1251/sdio.c
drivers/net/wireless/ti/wl1251/spi.c
drivers/net/wireless/ti/wlcore/acx.c
drivers/net/wireless/ti/wlcore/acx.h
drivers/net/wireless/ti/wlcore/rx.c
drivers/net/xen-netback/netback.c
drivers/nfc/pn544_hci.c
drivers/of/of_i2c.c
drivers/of/of_pci_irq.c
drivers/pci/pci.c
drivers/pinctrl/pinctrl-nomadik.c
drivers/pinctrl/spear/Kconfig
drivers/pinctrl/spear/Makefile
drivers/pinctrl/spear/pinctrl-spear.h
drivers/pinctrl/spear/pinctrl-spear1310.c [new file with mode: 0644]
drivers/pinctrl/spear/pinctrl-spear1340.c [new file with mode: 0644]
drivers/pinctrl/spear/pinctrl-spear3xx.c
drivers/platform/x86/acer-wmi.c
drivers/platform/x86/apple-gmux.c
drivers/platform/x86/dell-laptop.c
drivers/platform/x86/fujitsu-tablet.c
drivers/platform/x86/hdaps.c
drivers/platform/x86/hp-wmi.c
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/sony-laptop.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/toshiba_acpi.c
drivers/platform/x86/xo1-rfkill.c
drivers/power/Kconfig
drivers/power/ab8500_btemp.c
drivers/power/ab8500_charger.c
drivers/power/ab8500_fg.c
drivers/power/charger-manager.c
drivers/power/ds2781_battery.c
drivers/power/isp1704_charger.c
drivers/power/max17042_battery.c
drivers/power/power_supply_sysfs.c
drivers/power/sbs-battery.c
drivers/power/smb347-charger.c
drivers/power/wm831x_power.c
drivers/rapidio/Kconfig
drivers/rapidio/devices/Makefile
drivers/rapidio/devices/tsi721.c
drivers/rapidio/devices/tsi721.h
drivers/rapidio/devices/tsi721_dma.c [new file with mode: 0644]
drivers/rapidio/rio.c
drivers/regulator/anatop-regulator.c
drivers/regulator/tps65910-regulator.c
drivers/regulator/wm831x-dcdc.c
drivers/regulator/wm831x-isink.c
drivers/regulator/wm831x-ldo.c
drivers/remoteproc/remoteproc_core.c
drivers/rtc/Kconfig
drivers/rtc/Makefile
drivers/rtc/rtc-cmos.c
drivers/rtc/rtc-ds1307.c
drivers/rtc/rtc-ep93xx.c
drivers/rtc/rtc-imxdi.c
drivers/rtc/rtc-lpc32xx.c
drivers/rtc/rtc-m41t93.c
drivers/rtc/rtc-pcf8563.c
drivers/rtc/rtc-pl031.c
drivers/rtc/rtc-s3c.c
drivers/rtc/rtc-spear.c
drivers/rtc/rtc-tegra.c
drivers/rtc/rtc-wm831x.c
drivers/s390/block/dasd_int.h
drivers/s390/char/sclp_cmd.c
drivers/s390/char/sclp_sdias.c
drivers/scsi/be2iscsi/be_mgmt.c
drivers/scsi/bfa/bfad_attr.c
drivers/scsi/bfa/bfad_im.c
drivers/scsi/bfa/bfad_im.h
drivers/scsi/bnx2fc/bnx2fc.h
drivers/scsi/bnx2fc/bnx2fc_els.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/bnx2fc/bnx2fc_hwi.c
drivers/scsi/bnx2fc/bnx2fc_io.c
drivers/scsi/bnx2fc/bnx2fc_tgt.c
drivers/scsi/fcoe/Makefile
drivers/scsi/fcoe/fcoe.c
drivers/scsi/fcoe/fcoe.h
drivers/scsi/fcoe/fcoe_ctlr.c
drivers/scsi/fcoe/fcoe_sysfs.c [new file with mode: 0644]
drivers/scsi/fcoe/fcoe_transport.c
drivers/scsi/qla2xxx/Kconfig
drivers/scsi/qla2xxx/Makefile
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_bsg.c
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_dbg.h
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_gs.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_mid.c
drivers/scsi/qla2xxx/qla_nx.c
drivers/scsi/qla2xxx/qla_nx.h
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_target.c [new file with mode: 0644]
drivers/scsi/qla2xxx/qla_target.h [new file with mode: 0644]
drivers/scsi/qla2xxx/tcm_qla2xxx.c [new file with mode: 0644]
drivers/scsi/qla2xxx/tcm_qla2xxx.h [new file with mode: 0644]
drivers/scsi/qla4xxx/ql4_attr.c
drivers/scsi/qla4xxx/ql4_def.h
drivers/scsi/qla4xxx/ql4_fw.h
drivers/scsi/qla4xxx/ql4_glbl.h
drivers/scsi/qla4xxx/ql4_init.c
drivers/scsi/qla4xxx/ql4_mbx.c
drivers/scsi/qla4xxx/ql4_nx.c
drivers/scsi/qla4xxx/ql4_nx.h
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/qla4xxx/ql4_version.h
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_pm.c
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_wait_scan.c
drivers/scsi/ufs/ufshcd.c
drivers/spi/Kconfig
drivers/spi/spi-imx.c
drivers/spi/spi-orion.c
drivers/staging/android/ashmem.c
drivers/tty/amiserial.c
drivers/tty/cyclades.c
drivers/tty/n_r3964.c
drivers/tty/pty.c
drivers/tty/serial/crisv10.c
drivers/tty/serial/imx.c
drivers/tty/serial/lantiq.c
drivers/tty/serial/sb1250-duart.c
drivers/tty/serial/zs.c
drivers/tty/synclink.c
drivers/tty/synclink_gt.c
drivers/tty/synclinkmp.c
drivers/tty/tty_io.c
drivers/tty/tty_ldisc.c
drivers/tty/tty_mutex.c
drivers/tty/tty_port.c
drivers/usb/host/ehci-mxc.c
drivers/usb/host/ehci-orion.c
drivers/usb/host/ehci-tegra.c
drivers/video/Kconfig
drivers/video/Makefile
drivers/video/auo_k1900fb.c [new file with mode: 0644]
drivers/video/auo_k1901fb.c [new file with mode: 0644]
drivers/video/auo_k190x.c [new file with mode: 0644]
drivers/video/auo_k190x.h [new file with mode: 0644]
drivers/video/backlight/Kconfig
drivers/video/backlight/Makefile
drivers/video/backlight/adp5520_bl.c
drivers/video/backlight/adp8860_bl.c
drivers/video/backlight/adp8870_bl.c
drivers/video/backlight/ams369fg06.c
drivers/video/backlight/apple_bl.c
drivers/video/backlight/backlight.c
drivers/video/backlight/corgi_lcd.c
drivers/video/backlight/cr_bllcd.c
drivers/video/backlight/da903x_bl.c
drivers/video/backlight/generic_bl.c
drivers/video/backlight/ili9320.c
drivers/video/backlight/jornada720_bl.c
drivers/video/backlight/jornada720_lcd.c
drivers/video/backlight/l4f00242t03.c
drivers/video/backlight/lcd.c
drivers/video/backlight/ld9040.c
drivers/video/backlight/lm3533_bl.c [new file with mode: 0644]
drivers/video/backlight/lms283gf05.c
drivers/video/backlight/ltv350qv.c
drivers/video/backlight/omap1_bl.c
drivers/video/backlight/pcf50633-backlight.c
drivers/video/backlight/progear_bl.c
drivers/video/backlight/s6e63m0.c
drivers/video/backlight/tdo24m.c
drivers/video/backlight/tosa_bl.c
drivers/video/backlight/tosa_lcd.c
drivers/video/backlight/wm831x_bl.c
drivers/video/bfin_adv7393fb.c
drivers/video/cobalt_lcdfb.c
drivers/video/ep93xx-fb.c
drivers/video/exynos/exynos_dp_core.c
drivers/video/exynos/exynos_dp_core.h
drivers/video/exynos/exynos_dp_reg.c
drivers/video/exynos/exynos_dp_reg.h
drivers/video/exynos/exynos_mipi_dsi.c
drivers/video/exynos/exynos_mipi_dsi_common.c
drivers/video/exynos/s6e8ax0.c
drivers/video/fb_defio.c
drivers/video/fbmem.c
drivers/video/fbsysfs.c
drivers/video/fsl-diu-fb.c
drivers/video/imxfb.c
drivers/video/intelfb/intelfbdrv.c
drivers/video/matrox/matroxfb_maven.c
drivers/video/mb862xx/mb862xx-i2c.c
drivers/video/mb862xx/mb862xxfbdrv.c
drivers/video/mbx/mbxfb.c
drivers/video/mxsfb.c
drivers/video/omap/Kconfig
drivers/video/omap2/displays/panel-acx565akm.c
drivers/video/omap2/displays/panel-generic-dpi.c
drivers/video/omap2/displays/panel-n8x0.c
drivers/video/omap2/displays/panel-taal.c
drivers/video/omap2/displays/panel-tfp410.c
drivers/video/omap2/displays/panel-tpo-td043mtea1.c
drivers/video/omap2/dss/Kconfig
drivers/video/omap2/dss/apply.c
drivers/video/omap2/dss/core.c
drivers/video/omap2/dss/dispc.c
drivers/video/omap2/dss/dispc.h
drivers/video/omap2/dss/display.c
drivers/video/omap2/dss/dpi.c
drivers/video/omap2/dss/dsi.c
drivers/video/omap2/dss/dss.c
drivers/video/omap2/dss/dss.h
drivers/video/omap2/dss/dss_features.c
drivers/video/omap2/dss/dss_features.h
drivers/video/omap2/dss/hdmi.c
drivers/video/omap2/dss/hdmi_panel.c
drivers/video/omap2/dss/manager.c
drivers/video/omap2/dss/overlay.c
drivers/video/omap2/dss/rfbi.c
drivers/video/omap2/dss/sdi.c
drivers/video/omap2/dss/ti_hdmi.h
drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h
drivers/video/omap2/dss/venc.c
drivers/video/omap2/omapfb/omapfb-ioctl.c
drivers/video/omap2/omapfb/omapfb-main.c
drivers/video/omap2/omapfb/omapfb.h
drivers/video/omap2/vrfb.c
drivers/video/pxa3xx-gcu.c
drivers/video/s3c-fb.c
drivers/video/sh_mobile_hdmi.c
drivers/video/sis/init.h
drivers/video/sis/sis_main.c
drivers/video/skeletonfb.c
drivers/video/smscufx.c
drivers/video/udlfb.c
drivers/video/via/viafbdev.c
drivers/w1/masters/mxc_w1.c
drivers/watchdog/Kconfig
drivers/watchdog/Makefile
drivers/watchdog/da9052_wdt.c [new file with mode: 0644]
drivers/watchdog/iTCO_vendor.h
drivers/watchdog/iTCO_vendor_support.c
drivers/watchdog/iTCO_wdt.c
drivers/watchdog/imx2_wdt.c
drivers/watchdog/lantiq_wdt.c
drivers/watchdog/orion_wdt.c
drivers/watchdog/sp805_wdt.c
drivers/watchdog/via_wdt.c
drivers/watchdog/watchdog_core.c
drivers/watchdog/watchdog_core.h [new file with mode: 0644]
drivers/watchdog/watchdog_dev.c
drivers/watchdog/watchdog_dev.h [deleted file]
drivers/xen/Makefile
drivers/xen/acpi.c [new file with mode: 0644]
drivers/xen/events.c
drivers/xen/grant-table.c
drivers/xen/xen-acpi-processor.c
drivers/xen/xen-selfballoon.c
drivers/xen/xenbus/xenbus_comms.c
drivers/xen/xenbus/xenbus_comms.h
drivers/xen/xenbus/xenbus_dev_backend.c
fs/9p/vfs_inode.c
fs/9p/vfs_inode_dotl.c
fs/affs/affs.h
fs/affs/inode.c
fs/afs/inode.c
fs/aio.c
fs/attr.c
fs/autofs4/inode.c
fs/bad_inode.c
fs/bfs/inode.c
fs/binfmt_elf.c
fs/binfmt_flat.c
fs/binfmt_misc.c
fs/bio.c
fs/block_dev.c
fs/btrfs/acl.c
fs/btrfs/backref.c
fs/btrfs/backref.h
fs/btrfs/btrfs_inode.h
fs/btrfs/check-integrity.c
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/delayed-inode.c
fs/btrfs/delayed-ref.c
fs/btrfs/delayed-ref.h
fs/btrfs/disk-io.c
fs/btrfs/disk-io.h
fs/btrfs/export.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/ioctl.h
fs/btrfs/ordered-data.c
fs/btrfs/ordered-data.h
fs/btrfs/print-tree.c
fs/btrfs/reada.c
fs/btrfs/scrub.c
fs/btrfs/super.c
fs/btrfs/transaction.c
fs/btrfs/tree-log.c
fs/btrfs/ulist.c
fs/btrfs/ulist.h
fs/btrfs/volumes.c
fs/btrfs/volumes.h
fs/btrfs/xattr.c
fs/buffer.c
fs/ceph/export.c
fs/ceph/file.c
fs/ceph/ioctl.c
fs/ceph/ioctl.h
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/ceph/snap.c
fs/ceph/xattr.c
fs/cifs/Kconfig
fs/cifs/Makefile
fs/cifs/README
fs/cifs/cifs_debug.c
fs/cifs/cifs_debug.h
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/ioctl.c
fs/cifs/misc.c
fs/cifs/readdir.c
fs/cifs/smb1ops.c [new file with mode: 0644]
fs/cifs/smb2ops.c [new file with mode: 0644]
fs/cifs/transport.c
fs/coda/inode.c
fs/compat.c
fs/dcache.c
fs/debugfs/file.c
fs/direct-io.c
fs/ecryptfs/inode.c
fs/ecryptfs/super.c
fs/eventfd.c
fs/eventpoll.c
fs/exec.c
fs/exofs/Kbuild
fs/exofs/exofs.h
fs/exofs/inode.c
fs/exofs/super.c
fs/exofs/sys.c [new file with mode: 0644]
fs/exportfs/expfs.c
fs/ext2/balloc.c
fs/ext2/ialloc.c
fs/ext2/inode.c
fs/ext2/super.c
fs/ext2/xattr.c
fs/ext3/dir.c
fs/ext3/ext3.h
fs/ext3/hash.c
fs/ext3/ialloc.c
fs/ext3/inode.c
fs/ext3/super.c
fs/ext4/Kconfig
fs/ext4/balloc.c
fs/ext4/bitmap.c
fs/ext4/dir.c
fs/ext4/ext4.h
fs/ext4/ext4_extents.h
fs/ext4/ext4_jbd2.c
fs/ext4/ext4_jbd2.h
fs/ext4/extents.c
fs/ext4/file.c
fs/ext4/ialloc.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/ext4/mballoc.c
fs/ext4/mmp.c
fs/ext4/namei.c
fs/ext4/resize.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/ext4/xattr.h
fs/fat/dir.c
fs/fat/fat.h
fs/fat/fatent.c
fs/fat/inode.c
fs/fcntl.c
fs/file_table.c
fs/freevxfs/vxfs_inode.c
fs/fs-writeback.c
fs/fuse/file.c
fs/fuse/inode.c
fs/gfs2/export.c
fs/gfs2/super.c
fs/hfs/inode.c
fs/hfsplus/super.c
fs/hostfs/hostfs_kern.c
fs/hpfs/alloc.c
fs/hpfs/anode.c
fs/hpfs/buffer.c
fs/hpfs/dir.c
fs/hpfs/dnode.c
fs/hpfs/ea.c
fs/hpfs/hpfs.h
fs/hpfs/hpfs_fn.h
fs/hpfs/inode.c
fs/hpfs/map.c
fs/hpfs/namei.c
fs/hpfs/super.c
fs/hppfs/hppfs.c
fs/hugetlbfs/inode.c
fs/inode.c
fs/internal.h
fs/ioprio.c
fs/isofs/export.c
fs/jbd/checkpoint.c
fs/jbd/commit.c
fs/jbd/journal.c
fs/jbd/transaction.c
fs/jbd2/Kconfig
fs/jbd2/commit.c
fs/jbd2/journal.c
fs/jbd2/recovery.c
fs/jbd2/revoke.c
fs/jbd2/transaction.c
fs/jffs2/fs.c
fs/jffs2/jffs2_fs_sb.h
fs/jffs2/nodemgmt.c
fs/jffs2/os-linux.h
fs/jffs2/readinode.c
fs/jffs2/super.c
fs/jffs2/wbuf.c
fs/jffs2/xattr.c
fs/jffs2/xattr.h
fs/jfs/inode.c
fs/lockd/clntlock.c
fs/lockd/svc.c
fs/locks.c
fs/logfs/readwrite.c
fs/minix/inode.c
fs/namei.c
fs/namespace.c
fs/ncpfs/file.c
fs/ncpfs/inode.c
fs/ncpfs/ncp_fs_sb.h
fs/nfs/Kconfig
fs/nfs/Makefile
fs/nfs/blocklayout/blocklayout.c
fs/nfs/blocklayout/blocklayoutdev.c
fs/nfs/callback.c
fs/nfs/client.c
fs/nfs/delegation.c
fs/nfs/delegation.h
fs/nfs/dir.c
fs/nfs/direct.c
fs/nfs/file.c
fs/nfs/fscache.c
fs/nfs/fscache.h
fs/nfs/getroot.c
fs/nfs/idmap.c
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/namespace.c
fs/nfs/netns.h
fs/nfs/nfs2xdr.c
fs/nfs/nfs3proc.c
fs/nfs/nfs3xdr.c
fs/nfs/nfs4_fs.h
fs/nfs/nfs4filelayout.c
fs/nfs/nfs4filelayout.h
fs/nfs/nfs4filelayoutdev.c
fs/nfs/nfs4namespace.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4renewd.c
fs/nfs/nfs4state.c
fs/nfs/nfs4xdr.c
fs/nfs/objlayout/objio_osd.c
fs/nfs/objlayout/objlayout.c
fs/nfs/pagelist.c
fs/nfs/pnfs.c
fs/nfs/pnfs.h
fs/nfs/proc.c
fs/nfs/read.c
fs/nfs/super.c
fs/nfs/write.c
fs/nfsd/auth.c
fs/nfsd/export.c
fs/nfsd/fault_inject.c
fs/nfsd/idmap.h
fs/nfsd/netns.h
fs/nfsd/nfs4callback.c
fs/nfsd/nfs4idmap.c
fs/nfsd/nfs4recover.c
fs/nfsd/nfs4state.c
fs/nfsd/nfs4xdr.c
fs/nfsd/nfsctl.c
fs/nfsd/nfsfh.c
fs/nfsd/nfssvc.c
fs/nfsd/state.h
fs/nfsd/vfs.c
fs/nfsd/xdr4.h
fs/nilfs2/file.c
fs/nilfs2/inode.c
fs/nilfs2/ioctl.c
fs/nilfs2/namei.c
fs/nls/Kconfig
fs/nls/Makefile
fs/nls/mac-celtic.c [new file with mode: 0644]
fs/nls/mac-centeuro.c [new file with mode: 0644]
fs/nls/mac-croatian.c [new file with mode: 0644]
fs/nls/mac-cyrillic.c [new file with mode: 0644]
fs/nls/mac-gaelic.c [new file with mode: 0644]
fs/nls/mac-greek.c [new file with mode: 0644]
fs/nls/mac-iceland.c [new file with mode: 0644]
fs/nls/mac-inuit.c [new file with mode: 0644]
fs/nls/mac-roman.c [new file with mode: 0644]
fs/nls/mac-romanian.c [new file with mode: 0644]
fs/nls/mac-turkish.c [new file with mode: 0644]
fs/notify/fsnotify.c
fs/ntfs/file.c
fs/ntfs/inode.c
fs/ocfs2/blockcheck.c
fs/ocfs2/dlm/dlmast.c
fs/ocfs2/dlm/dlmcommon.h
fs/ocfs2/dlm/dlmdomain.c
fs/ocfs2/dlmfs/dlmfs.c
fs/ocfs2/export.c
fs/ocfs2/inode.c
fs/ocfs2/ioctl.c
fs/ocfs2/move_extents.c
fs/ocfs2/namei.c
fs/ocfs2/symlink.c
fs/ocfs2/symlink.h
fs/omfs/inode.c
fs/open.c
fs/pipe.c
fs/pnode.c
fs/proc/array.c
fs/proc/base.c
fs/proc/inode.c
fs/proc/internal.h
fs/proc/task_mmu.c
fs/proc/task_nommu.c
fs/proc_namespace.c
fs/pstore/inode.c
fs/quota/dquot.c
fs/read_write.c
fs/readdir.c
fs/reiserfs/inode.c
fs/reiserfs/journal.c
fs/reiserfs/reiserfs.h
fs/reiserfs/resize.c
fs/reiserfs/super.c
fs/select.c
fs/signalfd.c
fs/splice.c
fs/statfs.c
fs/sync.c
fs/sysfs/inode.c
fs/sysv/inode.c
fs/ubifs/dir.c
fs/ubifs/super.c
fs/udf/inode.c
fs/udf/namei.c
fs/ufs/inode.c
fs/utimes.c
fs/xattr.c
fs/xfs/kmem.c
fs/xfs/kmem.h
fs/xfs/xfs_export.c
fs/xfs/xfs_file.c
fs/xfs/xfs_log.c
fs/xfs/xfs_log_priv.h
fs/xfs/xfs_super.c
fs/xfs/xfs_trans.c
fs/xfs/xfs_trans.h
include/asm-generic/Kbuild
include/asm-generic/bitsperlong.h
include/asm-generic/dma-coherent.h
include/asm-generic/dma-contiguous.h [new file with mode: 0644]
include/asm-generic/kvm_para.h [new file with mode: 0644]
include/asm-generic/pgtable.h
include/asm-generic/posix_types.h
include/asm-generic/word-at-a-time.h [new file with mode: 0644]
include/drm/drm_mem_util.h
include/linux/Kbuild
include/linux/amba/pl08x.h
include/linux/apple_bl.h
include/linux/bio.h
include/linux/blk_types.h
include/linux/blkdev.h
include/linux/bootmem.h
include/linux/bug.h
include/linux/ceph/auth.h
include/linux/ceph/ceph_fs.h
include/linux/ceph/decode.h
include/linux/ceph/messenger.h
include/linux/ceph/osd_client.h
include/linux/ceph/osdmap.h
include/linux/clk-private.h
include/linux/clk-provider.h
include/linux/clk.h
include/linux/compat.h
include/linux/cpu.h
include/linux/cred.h
include/linux/crush/crush.h
include/linux/crush/mapper.h
include/linux/debugfs.h
include/linux/device.h
include/linux/dma-buf.h
include/linux/dma-contiguous.h [new file with mode: 0644]
include/linux/dmaengine.h
include/linux/drbd.h
include/linux/drbd_limits.h
include/linux/drbd_nl.h
include/linux/edac.h
include/linux/elevator.h
include/linux/errno.h
include/linux/eventfd.h
include/linux/exportfs.h
include/linux/fb.h
include/linux/fs.h
include/linux/fsl/mxs-dma.h
include/linux/fsnotify_backend.h
include/linux/genetlink.h
include/linux/gfp.h
include/linux/gpio-i2cmux.h [deleted file]
include/linux/huge_mm.h
include/linux/hugetlb.h
include/linux/i2c-mux-gpio.h [new file with mode: 0644]
include/linux/i2c-mux.h
include/linux/i2c.h
include/linux/interrupt.h
include/linux/iocontext.h
include/linux/iommu.h
include/linux/ioprio.h
include/linux/ipc_namespace.h
include/linux/jbd.h
include/linux/jbd2.h
include/linux/jbd_common.h
include/linux/kallsyms.h
include/linux/kcmp.h [new file with mode: 0644]
include/linux/kernel-page-flags.h
include/linux/kernel.h
include/linux/kexec.h
include/linux/key.h
include/linux/kmod.h
include/linux/kvm.h
include/linux/kvm_host.h
include/linux/lcd.h
include/linux/led-lm3530.h
include/linux/leds.h
include/linux/lglock.h
include/linux/lockd/bind.h
include/linux/memcontrol.h
include/linux/mempolicy.h
include/linux/mfd/abx500/ab8500.h
include/linux/mfd/anatop.h
include/linux/mfd/asic3.h
include/linux/mfd/da9052/da9052.h
include/linux/mfd/lm3533.h [new file with mode: 0644]
include/linux/mfd/lpc_ich.h [new file with mode: 0644]
include/linux/mfd/max77693-private.h [new file with mode: 0644]
include/linux/mfd/max77693.h [new file with mode: 0644]
include/linux/mfd/sta2x11-mfd.h [new file with mode: 0644]
include/linux/mfd/stmpe.h
include/linux/mfd/tps65910.h
include/linux/mfd/twl6040.h
include/linux/mfd/wm831x/core.h
include/linux/mfd/wm8350/core.h
include/linux/mfd/wm8400-private.h
include/linux/mfd/wm8994/core.h
include/linux/mfd/wm8994/registers.h
include/linux/mlx4/device.h
include/linux/mm.h
include/linux/mm_inline.h
include/linux/mm_types.h
include/linux/mmc/card.h
include/linux/mmc/dw_mmc.h
include/linux/mmc/host.h
include/linux/mmc/mmc.h
include/linux/mmc/mxs-mmc.h [new file with mode: 0644]
include/linux/mmdebug.h
include/linux/mmzone.h
include/linux/msdos_fs.h
include/linux/mtd/gpmi-nand.h
include/linux/mtd/mtd.h
include/linux/mtd/nand.h
include/linux/mv643xx_eth.h
include/linux/net.h
include/linux/netdevice.h
include/linux/nfs4.h
include/linux/nfs_fs.h
include/linux/nfs_fs_sb.h
include/linux/nfs_page.h
include/linux/nfs_xdr.h
include/linux/nfsd/export.h
include/linux/of_i2c.h
include/linux/of_pci.h
include/linux/oom.h
include/linux/page-isolation.h
include/linux/pagemap.h
include/linux/pci.h
include/linux/pci_ids.h
include/linux/power/charger-manager.h
include/linux/power/max17042_battery.h
include/linux/power_supply.h
include/linux/prctl.h
include/linux/res_counter.h
include/linux/rio.h
include/linux/rio_drv.h
include/linux/rmap.h
include/linux/rtc.h
include/linux/rtc/ds1307.h [new file with mode: 0644]
include/linux/sched.h
include/linux/security.h
include/linux/signal.h
include/linux/skbuff.h
include/linux/slab.h
include/linux/spi/orion_spi.h [deleted file]
include/linux/stmp_device.h [new file with mode: 0644]
include/linux/sunrpc/svc.h
include/linux/sunrpc/svcauth.h
include/linux/sunrpc/svcauth_gss.h
include/linux/swap.h
include/linux/syscalls.h
include/linux/task_work.h [new file with mode: 0644]
include/linux/thread_info.h
include/linux/tracehook.h
include/linux/tty.h
include/linux/types.h
include/linux/watchdog.h
include/linux/writeback.h
include/net/cipso_ipv4.h
include/net/dst.h
include/net/sock.h
include/scsi/fcoe_sysfs.h [new file with mode: 0644]
include/scsi/libfcoe.h
include/trace/events/jbd.h
include/trace/events/vmscan.h
include/trace/events/writeback.h
include/video/auo_k190xfb.h [new file with mode: 0644]
include/video/exynos_dp.h
include/video/exynos_mipi_dsim.h
include/video/omapdss.h
include/video/sh_mobile_hdmi.h
include/xen/acpi.h [new file with mode: 0644]
include/xen/events.h
include/xen/grant_table.h
include/xen/xenbus_dev.h
init/Kconfig
init/do_mounts.c
init/do_mounts_initrd.c
init/do_mounts_md.c
init/do_mounts_rd.c
init/initramfs.c
ipc/mq_sysctl.c
ipc/mqueue.c
ipc/shm.c
kernel/Makefile
kernel/cgroup.c
kernel/cpu.c
kernel/cpu_pm.c
kernel/cred.c
kernel/exit.c
kernel/fork.c
kernel/irq/manage.c
kernel/kallsyms.c
kernel/kcmp.c [new file with mode: 0644]
kernel/kmod.c
kernel/lglock.c [new file with mode: 0644]
kernel/pid_namespace.c
kernel/res_counter.c
kernel/resource.c
kernel/signal.c
kernel/sys.c
kernel/sys_ni.c
kernel/task_work.c [new file with mode: 0644]
kernel/trace/ring_buffer.c
kernel/watchdog.c
lib/Kconfig
lib/Makefile
lib/bitmap.c
lib/dma-debug.c
lib/dynamic_queue_limits.c
lib/list_debug.c
lib/radix-tree.c
lib/spinlock_debug.c
lib/stmp_device.c [new file with mode: 0644]
lib/string_helpers.c
lib/strncpy_from_user.c
lib/strnlen_user.c [new file with mode: 0644]
lib/swiotlb.c
lib/test-kstrtox.c
lib/vsprintf.c
mm/Kconfig
mm/Makefile
mm/bootmem.c
mm/cleancache.c
mm/compaction.c
mm/filemap.c
mm/filemap_xip.c
mm/huge_memory.c
mm/hugetlb.c
mm/internal.h
mm/madvise.c
mm/memblock.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/migrate.c
mm/mmap.c
mm/mmzone.c
mm/mremap.c
mm/nobootmem.c
mm/nommu.c
mm/oom_kill.c
mm/page-writeback.c
mm/page_alloc.c
mm/page_isolation.c
mm/pgtable-generic.c
mm/process_vm_access.c
mm/readahead.c
mm/rmap.c
mm/shmem.c
mm/slub.c
mm/sparse.c
mm/swap.c
mm/swapfile.c
mm/thrash.c [deleted file]
mm/truncate.c
mm/util.c
mm/vmalloc.c
mm/vmscan.c
mm/vmstat.c
net/bluetooth/rfcomm/tty.c
net/ceph/auth_none.c
net/ceph/auth_x.c
net/ceph/crush/crush.c
net/ceph/crush/mapper.c
net/ceph/messenger.c
net/ceph/osd_client.c
net/ceph/osdmap.c
net/core/drop_monitor.c
net/core/sock.c
net/ipv4/esp4.c
net/ipv4/inet_connection_sock.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_memcontrol.c
net/ipv6/esp6.c
net/ipv6/ip6_output.c
net/ipv6/tcp_ipv6.c
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ip6.c
net/l2tp/l2tp_netlink.c
net/mac80211/mlme.c
net/mac80211/tx.c
net/mac80211/util.c
net/netlink/genetlink.c
net/rds/ib.h
net/sched/sch_atm.c
net/sunrpc/auth_gss/gss_krb5_wrap.c
net/sunrpc/auth_gss/svcauth_gss.c
net/sunrpc/clnt.c
net/sunrpc/rpc_pipe.c
net/sunrpc/rpcb_clnt.c
net/sunrpc/svc.c
net/sunrpc/svc_xprt.c
net/sunrpc/svcauth_unix.c
net/sunrpc/xprt.c
net/wanrouter/Kconfig
net/xfrm/xfrm_policy.c
scripts/checkpatch.pl
scripts/coccinelle/misc/ifaddr.cocci [new file with mode: 0644]
scripts/coccinelle/misc/noderef.cocci [new file with mode: 0644]
scripts/config
scripts/kconfig/conf.c
scripts/link-vmlinux.sh [new file with mode: 0644]
scripts/package/builddeb
security/apparmor/lsm.c
security/capability.c
security/commoncap.c
security/keys/compat.c
security/keys/internal.h
security/keys/keyctl.c
security/keys/process_keys.c
security/keys/request_key.c
security/security.c
security/selinux/hooks.c
security/selinux/selinuxfs.c
security/smack/smack_lsm.c
sound/core/pcm_lib.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_codec.h
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
sound/pci/rme9652/hdspm.c
sound/soc/fsl/imx-ssi.c
sound/soc/kirkwood/kirkwood-i2s.c
sound/soc/kirkwood/kirkwood.h
sound/soc/omap/Kconfig
sound/soc/omap/Makefile
sound/soc/omap/mcbsp.c
sound/soc/omap/mcbsp.h
sound/soc/omap/omap-abe-twl6040.c
sound/soc/omap/omap-dmic.c
sound/soc/omap/omap-hdmi-card.c [new file with mode: 0644]
sound/soc/omap/omap-hdmi.c
sound/soc/omap/omap-hdmi.h
sound/soc/omap/omap-mcbsp.c
sound/soc/omap/omap-mcpdm.c
sound/soc/omap/omap4-hdmi-card.c [deleted file]
sound/soc/sh/fsi.c
sound/usb/pcm.c
tools/lib/traceevent/event-parse.c
tools/lib/traceevent/parse-filter.c
tools/perf/Documentation/perfconfig.example
tools/perf/Makefile
tools/perf/builtin-annotate.c
tools/perf/builtin-evlist.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-top.c
tools/perf/perf.h
tools/perf/ui/browser.c
tools/perf/ui/browser.h
tools/perf/ui/browsers/annotate.c
tools/perf/ui/browsers/hists.c
tools/perf/ui/setup.c
tools/perf/util/config.c
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/parse-events.c
tools/perf/util/thread_map.c
tools/testing/selftests/Makefile
tools/testing/selftests/kcmp/Makefile [new file with mode: 0644]
tools/testing/selftests/kcmp/kcmp_test.c [new file with mode: 0644]
tools/testing/selftests/mqueue/.gitignore [new file with mode: 0644]
tools/testing/selftests/mqueue/Makefile [new file with mode: 0644]
tools/testing/selftests/mqueue/mq_open_tests.c [new file with mode: 0644]
tools/testing/selftests/mqueue/mq_perf_tests.c [new file with mode: 0644]
tools/vm/page-types.c
usr/Kconfig
virt/kvm/Kconfig
virt/kvm/ioapic.c
virt/kvm/ioapic.h
virt/kvm/irq_comm.c
virt/kvm/kvm_main.c

index 9b0d0267a3c3f1ea75a674fe858fac2165a8b683..2909c33bc54e231057fe06852d249d52f4439b15 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -113,3 +113,5 @@ Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com>
 Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
 Takashi YOSHII <takashi.yoshii.zj@renesas.com>
 Yusuke Goda <goda.yusuke@renesas.com>
+Gustavo Padovan <gustavo@las.ic.unicamp.br>
+Gustavo Padovan <padovan@profusion.mobi>
index d535757799feda3d447c2ddbbed201794cea9279..679ce354312281846e4ef2decee0689b0775c985 100644 (file)
@@ -6,13 +6,21 @@ Description:    This is a read-only file. Dumps below driver information and
                 hardware registers.
                     - S ACTive
                     - Command Issue
-                    - Allocated
                     - Completed
                     - PORT IRQ STAT
                     - HOST IRQ STAT
+                    - Allocated
+                    - Commands in Q
 
 What:           /sys/block/rssd*/status
 Date:           April 2012
 KernelVersion:  3.4
 Contact:        Asai Thambi S P <asamymuthupa@micron.com>
-Description:   This is a read-only file. Indicates the status of the device.
+Description:    This is a read-only file. Indicates the status of the device.
+
+What:           /sys/block/rssd*/flags
+Date:           May 2012
+KernelVersion:  3.5
+Contact:        Asai Thambi S P <asamymuthupa@micron.com>
+Description:    This is a read-only file. Dumps the flags in port and driver
+                data structure
diff --git a/Documentation/ABI/testing/sysfs-bus-fcoe b/Documentation/ABI/testing/sysfs-bus-fcoe
new file mode 100644 (file)
index 0000000..469d09c
--- /dev/null
@@ -0,0 +1,77 @@
+What:          /sys/bus/fcoe/ctlr_X
+Date:          March 2012
+KernelVersion: TBD
+Contact:       Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org
+Description:   'FCoE Controller' instances on the fcoe bus
+Attributes:
+
+       fcf_dev_loss_tmo: Device loss timeout peroid (see below). Changing
+                         this value will change the dev_loss_tmo for all
+                         FCFs discovered by this controller.
+
+       lesb_link_fail:   Link Error Status Block (LESB) link failure count.
+
+       lesb_vlink_fail:  Link Error Status Block (LESB) virtual link
+                         failure count.
+
+       lesb_miss_fka:    Link Error Status Block (LESB) missed FCoE
+                         Initialization Protocol (FIP) Keep-Alives (FKA).
+
+       lesb_symb_err:    Link Error Status Block (LESB) symbolic error count.
+
+       lesb_err_block:   Link Error Status Block (LESB) block error count.
+
+       lesb_fcs_error:   Link Error Status Block (LESB) Fibre Channel
+                         Serivces error count.
+
+Notes: ctlr_X (global increment starting at 0)
+
+What:          /sys/bus/fcoe/fcf_X
+Date:          March 2012
+KernelVersion: TBD
+Contact:       Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org
+Description:   'FCoE FCF' instances on the fcoe bus. A FCF is a Fibre Channel
+               Forwarder, which is a FCoE switch that can accept FCoE
+               (Ethernet) packets, unpack them, and forward the embedded
+               Fibre Channel frames into a FC fabric. It can also take
+               outbound FC frames and pack them in Ethernet packets to
+               be sent to their destination on the Ethernet segment.
+Attributes:
+
+       fabric_name: Identifies the fabric that the FCF services.
+
+       switch_name: Identifies the FCF.
+
+       priority:    The switch's priority amongst other FCFs on the same
+                    fabric.
+
+       selected:    1 indicates that the switch has been selected for use;
+                    0 indicates that the swich will not be used.
+
+       fc_map:      The Fibre Channel MAP
+
+       vfid:        The Virtual Fabric ID
+
+       mac:         The FCF's MAC address
+
+       fka_peroid:  The FIP Keep-Alive peroid
+
+       fabric_state: The internal kernel state
+                     "Unknown" - Initialization value
+                     "Disconnected" - No link to the FCF/fabric
+                     "Connected" - Host is connected to the FCF
+                     "Deleted" - FCF is being removed from the system
+
+       dev_loss_tmo: The device loss timeout peroid for this FCF.
+
+Notes: A device loss infrastructre similar to the FC Transport's
+       is present in fcoe_sysfs. It is nice to have so that a
+       link flapping adapter doesn't continually advance the count
+       used to identify the discovered FCF. FCFs will exist in a
+       "Disconnected" state until either the timer expires and the
+       FCF becomes "Deleted" or the FCF is rediscovered and becomes
+       "Connected."
+
+
+Users: The first user of this interface will be the fcoeadm application,
+       which is commonly packaged in the fcoe-utils package.
diff --git a/Documentation/ABI/testing/sysfs-bus-i2c-devices-lm3533 b/Documentation/ABI/testing/sysfs-bus-i2c-devices-lm3533
new file mode 100644 (file)
index 0000000..1b62230
--- /dev/null
@@ -0,0 +1,15 @@
+What:          /sys/bus/i2c/devices/.../output_hvled[n]
+Date:          April 2012
+KernelVersion: 3.5
+Contact:       Johan Hovold <jhovold@gmail.com>
+Description:
+               Set the controlling backlight device for high-voltage current
+               sink HVLED[n] (n = 1, 2) (0, 1).
+
+What:          /sys/bus/i2c/devices/.../output_lvled[n]
+Date:          April 2012
+KernelVersion: 3.5
+Contact:       Johan Hovold <jhovold@gmail.com>
+Description:
+               Set the controlling led device for low-voltage current sink
+               LVLED[n] (n = 1..5) (0..3).
index dbedafb095e24d3d3a8e2d93b6cbd727268d1754..bcd88eb7ebcd240abcdbe0ef34f4a517f0f404bd 100644 (file)
@@ -65,11 +65,11 @@ snap_*
 Entries under /sys/bus/rbd/devices/<dev-id>/snap_<snap-name>
 -------------------------------------------------------------
 
-id
+snap_id
 
        The rados internal snapshot id assigned for this snapshot
 
-size
+snap_size
 
        The size of the image when this snapshot was taken.
 
diff --git a/Documentation/ABI/testing/sysfs-class-backlight-driver-lm3533 b/Documentation/ABI/testing/sysfs-class-backlight-driver-lm3533
new file mode 100644 (file)
index 0000000..77cf7ac
--- /dev/null
@@ -0,0 +1,48 @@
+What:          /sys/class/backlight/<backlight>/als_channel
+Date:          May 2012
+KernelVersion: 3.5
+Contact:       Johan Hovold <jhovold@gmail.com>
+Description:
+               Get the ALS output channel used as input in
+               ALS-current-control mode (0, 1), where
+
+               0 - out_current0 (backlight 0)
+               1 - out_current1 (backlight 1)
+
+What:          /sys/class/backlight/<backlight>/als_en
+Date:          May 2012
+KernelVersion: 3.5
+Contact:       Johan Hovold <jhovold@gmail.com>
+Description:
+               Enable ALS-current-control mode (0, 1).
+
+What:          /sys/class/backlight/<backlight>/id
+Date:          April 2012
+KernelVersion: 3.5
+Contact:       Johan Hovold <jhovold@gmail.com>
+Description:
+               Get the id of this backlight (0, 1).
+
+What:          /sys/class/backlight/<backlight>/linear
+Date:          April 2012
+KernelVersion: 3.5
+Contact:       Johan Hovold <jhovold@gmail.com>
+Description:
+               Set the brightness-mapping mode (0, 1), where
+
+               0 - exponential mode
+               1 - linear mode
+
+What:          /sys/class/backlight/<backlight>/pwm
+Date:          April 2012
+KernelVersion: 3.5
+Contact:       Johan Hovold <jhovold@gmail.com>
+Description:
+               Set the PWM-input control mask (5 bits), where
+
+               bit 5 - PWM-input enabled in Zone 4
+               bit 4 - PWM-input enabled in Zone 3
+               bit 3 - PWM-input enabled in Zone 2
+               bit 2 - PWM-input enabled in Zone 1
+               bit 1 - PWM-input enabled in Zone 0
+               bit 0 - PWM-input enabled
diff --git a/Documentation/ABI/testing/sysfs-class-led-driver-lm3533 b/Documentation/ABI/testing/sysfs-class-led-driver-lm3533
new file mode 100644 (file)
index 0000000..620ebb3
--- /dev/null
@@ -0,0 +1,65 @@
+What:          /sys/class/leds/<led>/als_channel
+Date:          May 2012
+KernelVersion: 3.5
+Contact:       Johan Hovold <jhovold@gmail.com>
+Description:
+               Set the ALS output channel to use as input in
+               ALS-current-control mode (1, 2), where
+
+               1 - out_current1
+               2 - out_current2
+
+What:          /sys/class/leds/<led>/als_en
+Date:          May 2012
+KernelVersion: 3.5
+Contact:       Johan Hovold <jhovold@gmail.com>
+Description:
+               Enable ALS-current-control mode (0, 1).
+
+What:          /sys/class/leds/<led>/falltime
+What:          /sys/class/leds/<led>/risetime
+Date:          April 2012
+KernelVersion: 3.5
+Contact:       Johan Hovold <jhovold@gmail.com>
+Description:
+               Set the pattern generator fall and rise times (0..7), where
+
+               0 - 2048 us
+               1 - 262 ms
+               2 - 524 ms
+               3 - 1.049 s
+               4 - 2.097 s
+               5 - 4.194 s
+               6 - 8.389 s
+               7 - 16.78 s
+
+What:          /sys/class/leds/<led>/id
+Date:          April 2012
+KernelVersion: 3.5
+Contact:       Johan Hovold <jhovold@gmail.com>
+Description:
+               Get the id of this led (0..3).
+
+What:          /sys/class/leds/<led>/linear
+Date:          April 2012
+KernelVersion: 3.5
+Contact:       Johan Hovold <jhovold@gmail.com>
+Description:
+               Set the brightness-mapping mode (0, 1), where
+
+               0 - exponential mode
+               1 - linear mode
+
+What:          /sys/class/leds/<led>/pwm
+Date:          April 2012
+KernelVersion: 3.5
+Contact:       Johan Hovold <jhovold@gmail.com>
+Description:
+               Set the PWM-input control mask (5 bits), where
+
+               bit 5 - PWM-input enabled in Zone 4
+               bit 4 - PWM-input enabled in Zone 3
+               bit 3 - PWM-input enabled in Zone 2
+               bit 2 - PWM-input enabled in Zone 1
+               bit 1 - PWM-input enabled in Zone 0
+               bit 0 - PWM-input enabled
index 4d55a18889813be097af27112b9e647031bf8fff..db1ad7e34fc3a00e14be0c1045564fe6c98bd37e 100644 (file)
@@ -123,3 +123,54 @@ Description:
                half page, or a quarter page).
 
                In the case of ECC NOR, it is the ECC block size.
+
+What:          /sys/class/mtd/mtdX/ecc_strength
+Date:          April 2012
+KernelVersion: 3.4
+Contact:       linux-mtd@lists.infradead.org
+Description:
+               Maximum number of bit errors that the device is capable of
+               correcting within each region covering an ecc step.  This will
+               always be a non-negative integer.  Note that some devices will
+               have multiple ecc steps within each writesize region.
+
+               In the case of devices lacking any ECC capability, it is 0.
+
+What:          /sys/class/mtd/mtdX/bitflip_threshold
+Date:          April 2012
+KernelVersion: 3.4
+Contact:       linux-mtd@lists.infradead.org
+Description:
+               This allows the user to examine and adjust the criteria by which
+               mtd returns -EUCLEAN from mtd_read().  If the maximum number of
+               bit errors that were corrected on any single region comprising
+               an ecc step (as reported by the driver) equals or exceeds this
+               value, -EUCLEAN is returned.  Otherwise, absent an error, 0 is
+               returned.  Higher layers (e.g., UBI) use this return code as an
+               indication that an erase block may be degrading and should be
+               scrutinized as a candidate for being marked as bad.
+
+               The initial value may be specified by the flash device driver.
+               If not, then the default value is ecc_strength.
+
+               The introduction of this feature brings a subtle change to the
+               meaning of the -EUCLEAN return code.  Previously, it was
+               interpreted to mean simply "one or more bit errors were
+               corrected".  Its new interpretation can be phrased as "a
+               dangerously high number of bit errors were corrected on one or
+               more regions comprising an ecc step".  The precise definition of
+               "dangerously high" can be adjusted by the user with
+               bitflip_threshold.  Users are discouraged from doing this,
+               however, unless they know what they are doing and have intimate
+               knowledge of the properties of their device.  Broadly speaking,
+               bitflip_threshold should be low enough to detect genuine erase
+               block degradation, but high enough to avoid the consequences of
+               a persistent return value of -EUCLEAN on devices where sticky
+               bitflips occur.  Note that if bitflip_threshold exceeds
+               ecc_strength, -EUCLEAN is never returned by mtd_read().
+               Conversely, if bitflip_threshold is zero, -EUCLEAN is always
+               returned, absent a hard error.
+
+               This is generally applicable only to NAND flash devices with ECC
+               capability.  It is ignored on devices lacking ECC capability;
+               i.e., devices for which ecc_strength is zero.
index c58b236bbe0467938e601e498008d4856bbbce52..cb9258b8fd35b25b8ac750b18b4237204213fbd4 100644 (file)
@@ -671,8 +671,9 @@ ones already enabled by DEBUG.
                Chapter 14: Allocating memory
 
 The kernel provides the following general purpose memory allocators:
-kmalloc(), kzalloc(), kcalloc(), vmalloc(), and vzalloc().  Please refer to
-the API documentation for further information about them.
+kmalloc(), kzalloc(), kmalloc_array(), kcalloc(), vmalloc(), and
+vzalloc().  Please refer to the API documentation for further information
+about them.
 
 The preferred form for passing a size of a struct is the following:
 
@@ -686,6 +687,17 @@ Casting the return value which is a void pointer is redundant. The conversion
 from void pointer to any other pointer type is guaranteed by the C programming
 language.
 
+The preferred form for allocating an array is the following:
+
+       p = kmalloc_array(n, sizeof(...), ...);
+
+The preferred form for allocating a zeroed array is the following:
+
+       p = kcalloc(n, sizeof(...), ...);
+
+Both forms check for overflow on the allocation size n * sizeof(...),
+and return NULL if that occurred.
+
 
                Chapter 15: The inline disease
 
index 0c674be0d3c6de3d05e1903f6fac4beecff51133..e0aedb7a782718c445d48f8f42ee9e14a3d5a33a 100644 (file)
@@ -1119,8 +1119,6 @@ in this page</entry>
                These constants are defined in nand.h. They are ored together to describe
                the chip functionality.
                <programlisting>
-/* Chip can not auto increment pages */
-#define NAND_NO_AUTOINCR       0x00000001
 /* Buswitdh is 16 bit */
 #define NAND_BUSWIDTH_16       0x00000002
 /* Device supports partial programming without padding */
index 4468ce24427cb011e7991d8f1ae2560764b170b8..c379a2a6949f1c1cac04fb6f185c633512f37061 100644 (file)
@@ -150,7 +150,8 @@ be able to justify all violations that remain in your patch.
 
 Look through the MAINTAINERS file and the source code, and determine
 if your change applies to a specific subsystem of the kernel, with
-an assigned maintainer.  If so, e-mail that person.
+an assigned maintainer.  If so, e-mail that person.  The script
+scripts/get_maintainer.pl can be very useful at this step.
 
 If no maintainer is listed, or the maintainer does not respond, send
 your patch to the primary Linux kernel developer's mailing list,
index 888ae7b83ae4783da38b4db2f69b94d45ba193c8..a564ceea9e98cc9f5f423f0e80f54c85fe7b0ef3 100644 (file)
@@ -47,6 +47,51 @@ flexible way to enable non-common multi-display configuration. In addition to
 modelling the hardware overlays, omapdss supports virtual overlays and overlay
 managers. These can be used when updating a display with CPU or system DMA.
 
+omapdss driver support for audio
+--------------------------------
+There exist several display technologies and standards that support audio as
+well. Hence, it is relevant to update the DSS device driver to provide an audio
+interface that may be used by an audio driver or any other driver interested in
+the functionality.
+
+The audio_enable function is intended to prepare the relevant
+IP for playback (e.g., enabling an audio FIFO, taking in/out of reset
+some IP, enabling companion chips, etc). It is intended to be called before
+audio_start. The audio_disable function performs the reverse operation and is
+intended to be called after audio_stop.
+
+While a given DSS device driver may support audio, it is possible that for
+certain configurations audio is not supported (e.g., an HDMI display using a
+VESA video timing). The audio_supported function is intended to query whether
+the current configuration of the display supports audio.
+
+The audio_config function is intended to configure all the relevant audio
+parameters of the display. In order to make the function independent of any
+specific DSS device driver, a struct omap_dss_audio is defined. Its purpose
+is to contain all the required parameters for audio configuration. At the
+moment, such structure contains pointers to IEC-60958 channel status word
+and CEA-861 audio infoframe structures. This should be enough to support
+HDMI and DisplayPort, as both are based on CEA-861 and IEC-60958.
+
+The audio_enable/disable, audio_config and audio_supported functions could be
+implemented as functions that may sleep. Hence, they should not be called
+while holding a spinlock or a readlock.
+
+The audio_start/audio_stop function is intended to effectively start/stop audio
+playback after the configuration has taken place. These functions are designed
+to be used in an atomic context. Hence, audio_start should return quickly and be
+called only after all the needed resources for audio playback (audio FIFOs,
+DMA channels, companion chips, etc) have been enabled to begin data transfers.
+audio_stop is designed to only stop the audio transfers. The resources used
+for playback are released using audio_disable.
+
+The enum omap_dss_audio_state may be used to help the implementations of
+the interface to keep track of the audio state. The initial state is _DISABLED;
+then, the state transitions to _CONFIGURED, and then, when it is ready to
+play audio, to _ENABLED. The state _PLAYING is used when the audio is being
+rendered.
+
+
 Panel and controller drivers
 ----------------------------
 
@@ -156,6 +201,7 @@ timings             Display timings (pixclock,xres/hfp/hbp/hsw,yres/vfp/vbp/vsw)
                "pal" and "ntsc"
 panel_name
 tear_elim      Tearing elimination 0=off, 1=on
+output_type    Output type (video encoder only): "composite" or "svideo"
 
 There are also some debugfs files at <debugfs>/omapdss/ which show information
 about clocks and registers.
index 28a9af953b9dfdccb5e2c95bb21917e490d08b9a..57aae7765c74e7a7ed60b51bd243ee94193b7c80 100644 (file)
@@ -8,9 +8,8 @@ Introduction
   weblink : http://www.st.com/spear
 
   The ST Microelectronics SPEAr range of ARM9/CortexA9 System-on-Chip CPUs are
-  supported by the 'spear' platform of ARM Linux. Currently SPEAr300,
-  SPEAr310, SPEAr320 and SPEAr600 SOCs are supported. Support for the SPEAr13XX
-  series is in progress.
+  supported by the 'spear' platform of ARM Linux. Currently SPEAr1310,
+  SPEAr1340, SPEAr300, SPEAr310, SPEAr320 and SPEAr600 SOCs are supported.
 
   Hierarchy in SPEAr is as follows:
 
@@ -26,33 +25,36 @@ Introduction
                - SPEAr600 (SOC)
                        - SPEAr600 Evaluation Board
        - SPEAr13XX (13XX SOC series, based on ARM CORTEXA9)
-               - SPEAr1300 (SOC)
+               - SPEAr1310 (SOC)
+                       - SPEAr1310 Evaluation Board
+               - SPEAr1340 (SOC)
+                       - SPEAr1340 Evaluation Board
 
   Configuration
   -------------
 
   A generic configuration is provided for each machine, and can be used as the
   default by
-       make spear600_defconfig
-       make spear300_defconfig
-       make spear310_defconfig
-       make spear320_defconfig
+       make spear13xx_defconfig
+       make spear3xx_defconfig
+       make spear6xx_defconfig
 
   Layout
   ------
 
-  The common files for multiple machine families (SPEAr3XX, SPEAr6XX and
-  SPEAr13XX) are located in the platform code contained in arch/arm/plat-spear
+  The common files for multiple machine families (SPEAr3xx, SPEAr6xx and
+  SPEAr13xx) are located in the platform code contained in arch/arm/plat-spear
   with headers in plat/.
 
   Each machine series have a directory with name arch/arm/mach-spear followed by
   series name. Like mach-spear3xx, mach-spear6xx and mach-spear13xx.
 
-  Common file for machines of spear3xx family is mach-spear3xx/spear3xx.c and for
-  spear6xx is mach-spear6xx/spear6xx.c. mach-spear* also contain soc/machine
-  specific files, like spear300.c, spear310.c, spear320.c and spear600.c.
-  mach-spear* doesn't contains board specific files as they fully support
-  Flattened Device Tree.
+  Common file for machines of spear3xx family is mach-spear3xx/spear3xx.c, for
+  spear6xx is mach-spear6xx/spear6xx.c and for spear13xx family is
+  mach-spear13xx/spear13xx.c. mach-spear* also contain soc/machine specific
+  files, like spear1310.c, spear1340.c spear300.c, spear310.c, spear320.c and
+  spear600.c.  mach-spear* doesn't contains board specific files as they fully
+  support Flattened Device Tree.
 
 
   Document Author
index 9b1067afb2245f702d962ca4a93c241af6641a02..dd88540bb995e88dab868e1f42cbd218b888d7bd 100644 (file)
@@ -184,12 +184,14 @@ behind this approach is that a cgroup that aggressively uses a shared
 page will eventually get charged for it (once it is uncharged from
 the cgroup that brought it in -- this will happen on memory pressure).
 
+But see section 8.2: when moving a task to another cgroup, its pages may
+be recharged to the new cgroup, if move_charge_at_immigrate has been chosen.
+
 Exception: If CONFIG_CGROUP_CGROUP_MEM_RES_CTLR_SWAP is not used.
 When you do swapoff and make swapped-out pages of shmem(tmpfs) to
 be backed into memory in force, charges for pages are accounted against the
 caller of swapoff rather than the users of shmem.
 
-
 2.4 Swap Extension (CONFIG_CGROUP_MEM_RES_CTLR_SWAP)
 
 Swap Extension allows you to record charge for swap. A swapped-in page is
@@ -374,14 +376,15 @@ cgroup might have some charge associated with it, even though all
 tasks have migrated away from it. (because we charge against pages, not
 against tasks.)
 
-Such charges are freed or moved to their parent. At moving, both of RSS
-and CACHES are moved to parent.
-rmdir() may return -EBUSY if freeing/moving fails. See 5.1 also.
+We move the stats to root (if use_hierarchy==0) or parent (if
+use_hierarchy==1), and no change on the charge except uncharging
+from the child.
 
 Charges recorded in swap information is not updated at removal of cgroup.
 Recorded information is discarded and a cgroup which uses swap (swapcache)
 will be charged as a new owner of it.
 
+About use_hierarchy, see Section 6.
 
 5. Misc. interfaces.
 
@@ -394,13 +397,15 @@ will be charged as a new owner of it.
 
   Almost all pages tracked by this memory cgroup will be unmapped and freed.
   Some pages cannot be freed because they are locked or in-use. Such pages are
-  moved to parent and this cgroup will be empty. This may return -EBUSY if
-  VM is too busy to free/move all pages immediately.
+  moved to parent(if use_hierarchy==1) or root (if use_hierarchy==0) and this
+  cgroup will be empty.
 
   Typical use case of this interface is that calling this before rmdir().
   Because rmdir() moves all pages to parent, some out-of-use page caches can be
   moved to the parent. If you want to avoid that, force_empty will be useful.
 
+  About use_hierarchy, see Section 6.
+
 5.2 stat file
 
 memory.stat file includes following statistics
@@ -430,17 +435,10 @@ hierarchical_memory_limit - # of bytes of memory limit with regard to hierarchy
 hierarchical_memsw_limit - # of bytes of memory+swap limit with regard to
                        hierarchy under which memory cgroup is.
 
-total_cache            - sum of all children's "cache"
-total_rss              - sum of all children's "rss"
-total_mapped_file      - sum of all children's "cache"
-total_pgpgin           - sum of all children's "pgpgin"
-total_pgpgout          - sum of all children's "pgpgout"
-total_swap             - sum of all children's "swap"
-total_inactive_anon    - sum of all children's "inactive_anon"
-total_active_anon      - sum of all children's "active_anon"
-total_inactive_file    - sum of all children's "inactive_file"
-total_active_file      - sum of all children's "active_file"
-total_unevictable      - sum of all children's "unevictable"
+total_<counter>                - # hierarchical version of <counter>, which in
+                       addition to the cgroup's own value includes the
+                       sum of all hierarchical children's values of
+                       <counter>, i.e. total_cache
 
 # The following additional stats are dependent on CONFIG_DEBUG_VM.
 
@@ -622,8 +620,7 @@ memory cgroup.
   bit | what type of charges would be moved ?
  -----+------------------------------------------------------------------------
    0  | A charge of an anonymous page(or swap of it) used by the target task.
-      | Those pages and swaps must be used only by the target task. You must
-      | enable Swap Extension(see 2.4) to enable move of swap charges.
+      | You must enable Swap Extension(see 2.4) to enable move of swap charges.
  -----+------------------------------------------------------------------------
    1  | A charge of file pages(normal file, tmpfs file(e.g. ipc shared memory)
       | and swaps of tmpfs file) mmapped by the target task. Unlike the case of
@@ -636,8 +633,6 @@ memory cgroup.
 
 8.3 TODO
 
-- Implement madvise(2) to let users decide the vma to be moved or not to be
-  moved.
 - All of moving charge operations are done under cgroup_mutex. It's not good
   behavior to hold the mutex too long, so we may need some trick.
 
index f3c4ec3626a280bfd1b7f8811c9ea2cc3502f213..0c4a344e78fa4c32693bf231240597bee5a8fe2b 100644 (file)
@@ -92,6 +92,14 @@ to work with it.
 
        The _locked routines imply that the res_counter->lock is taken.
 
+ f. void res_counter_uncharge_until
+               (struct res_counter *rc, struct res_counter *top,
+                unsinged long val)
+
+       Almost same as res_cunter_uncharge() but propagation of uncharge
+       stops when rc == top. This is useful when kill a res_coutner in
+       child cgroup.
+
  2.1 Other accounting routines
 
     There are more routines that may help you with common needs, like
index d9b086869a6054d9687ec51070cfceb51fab6a50..8dbdb1a44429aedc90e770dfba266f8f57e97ce9 100644 (file)
@@ -1,38 +1,34 @@
-Linux 2.4 on the CRIS architecture
-==================================
-$Id: README,v 1.7 2001/04/19 12:38:32 bjornw Exp $
+Linux on the CRIS architecture
+==============================
 
-This is a port of Linux 2.4 to Axis Communications ETRAX 100LX embedded 
-network CPU. For more information about CRIS and ETRAX please see further
-below.
+This is a port of Linux to Axis Communications ETRAX 100LX,
+ETRAX FS and ARTPEC-3 embedded network CPUs.
+
+For more information about CRIS and ETRAX please see further below.
 
 In order to compile this you need a version of gcc with support for the
-ETRAX chip family. Please see this link for more information on how to 
+ETRAX chip family. Please see this link for more information on how to
 download the compiler and other tools useful when building and booting
 software for the ETRAX platform:
 
-http://developer.axis.com/doc/software/devboard_lx/install-howto.html
-
-<more specific information should come in this document later>
+http://developer.axis.com/wiki/doku.php?id=axis:install-howto-2_20
 
 What is CRIS ?
 --------------
 
 CRIS is an acronym for 'Code Reduced Instruction Set'. It is the CPU
 architecture in Axis Communication AB's range of embedded network CPU's,
-called ETRAX. The latest CPU is called ETRAX 100LX, where LX stands for
-'Linux' because the chip was designed to be a good host for the Linux
-operating system.
+called ETRAX.
 
 The ETRAX 100LX chip
 --------------------
 
-For reference, please see the press-release:
+For reference, please see the following link:
 
-http://www.axis.com/news/us/001101_etrax.htm
+http://www.axis.com/products/dev_etrax_100lx/index.htm
 
-The ETRAX 100LX is a 100 MIPS processor with 8kB cache, MMU, and a very broad 
-range of  built-in interfaces, all with modern scatter/gather DMA.
+The ETRAX 100LX is a 100 MIPS processor with 8kB cache, MMU, and a very broad
+range of built-in interfaces, all with modern scatter/gather DMA.
 
 Memory interfaces:
 
@@ -51,20 +47,28 @@ I/O interfaces:
        * SCSI
        * two parallel-ports
        * two generic 8-bit ports
-       
-       (not all interfaces are available at the same time due to chip pin 
+
+       (not all interfaces are available at the same time due to chip pin
          multiplexing)
 
-The previous version of the ETRAX, the ETRAX 100, sits in almost all of
-Axis shipping thin-servers like the Axis 2100 web camera or the ETRAX 100
-developer-board. It lacks an MMU so the Linux we run on that is a version
-of uClinux (Linux 2.0 without MM-support) ported to the CRIS architecture.
-The new Linux 2.4 port has full MM and needs a CPU with an MMU, so it will
-not run on the ETRAX 100.
+ETRAX 100LX is CRISv10 architecture.
+
+
+The ETRAX FS and ARTPEC-3 chips
+-------------------------------
 
-A version of the Axis developer-board with ETRAX 100LX (running Linux
-2.4) is now available. For more information please see developer.axis.com.
+The ETRAX FS is a 200MHz 32-bit RISC processor with on-chip 16kB
+I-cache and 16kB D-cache and with a wide range of device interfaces
+including multiple high speed serial ports and an integrated USB 1.1 PHY.
 
+The ARTPEC-3 is a variant of the ETRAX FS with additional IO-units
+used by the Axis Communications network cameras.
+
+See below link for more information:
+
+http://www.axis.com/products/dev_etrax_fs/index.htm
+
+ETRAX FS and ARTPEC-3 are both CRISv32 architectures.
 
 Bootlog
 -------
@@ -182,10 +186,6 @@ SwapFree:            0 kB
 -rwxr-xr-x  1 342      100         16252  Jan 01 00:00 telnetd
 
 
-(All programs are statically linked to the libc at this point - we have not ported the
- shared libraries yet)
-
-
 
 
 
index 3370bc4d7b9885b45040b359d858f4112418b74f..f5cfc62b7ad3fa2bfbb50fb3f33339e1d402a575 100644 (file)
@@ -287,6 +287,17 @@ iii) Messages
        the current transaction id is when you change it with this
        compare-and-swap message.
 
+    reserve_metadata_snap
+
+        Reserve a copy of the data mapping btree for use by userland.
+        This allows userland to inspect the mappings as they were when
+        this message was executed.  Use the pool's status command to
+        get the root block associated with the metadata snapshot.
+
+    release_metadata_snap
+
+        Release a previously reserved copy of the data mapping btree.
+
 'thin' target
 -------------
 
index bfbc771a65f8937124089c238fe6ac0dcaa96142..ac9e7516756e62a3817d1b9ac8338c1f47a45440 100644 (file)
@@ -1,6 +1,14 @@
 Freescale i.MX Platforms Device Tree Bindings
 -----------------------------------------------
 
+i.MX23 Evaluation Kit
+Required root node properties:
+    - compatible = "fsl,imx23-evk", "fsl,imx23";
+
+i.MX28 Evaluation Kit
+Required root node properties:
+    - compatible = "fsl,imx28-evk", "fsl,imx28";
+
 i.MX51 Babbage Board
 Required root node properties:
     - compatible = "fsl,imx51-babbage", "fsl,imx51";
@@ -29,6 +37,10 @@ i.MX6 Quad SABRE Lite Board
 Required root node properties:
     - compatible = "fsl,imx6q-sabrelite", "fsl,imx6q";
 
+i.MX6 Quad SABRE Smart Device Board
+Required root node properties:
+    - compatible = "fsl,imx6q-sabresd", "fsl,imx6q";
+
 Generic i.MX boards
 -------------------
 
diff --git a/Documentation/devicetree/bindings/arm/samsung/interrupt-combiner.txt b/Documentation/devicetree/bindings/arm/samsung/interrupt-combiner.txt
new file mode 100644 (file)
index 0000000..f2f2171
--- /dev/null
@@ -0,0 +1,52 @@
+* Samsung Exynos Interrupt Combiner Controller
+
+Samsung's Exynos4 architecture includes a interrupt combiner controller which
+can combine interrupt sources as a group and provide a single interrupt request
+for the group. The interrupt request from each group are connected to a parent
+interrupt controller, such as GIC in case of Exynos4210.
+
+The interrupt combiner controller consists of multiple combiners. Upto eight
+interrupt sources can be connected to a combiner. The combiner outputs one
+combined interrupt for its eight interrupt sources. The combined interrupt
+is usually connected to a parent interrupt controller.
+
+A single node in the device tree is used to describe the interrupt combiner
+controller module (which includes multiple combiners). A combiner in the
+interrupt controller module shares config/control registers with other
+combiners. For example, a 32-bit interrupt enable/disable config register
+can accommodate upto 4 interrupt combiners (with each combiner supporting
+upto 8 interrupt sources).
+
+Required properties:
+- compatible: should be "samsung,exynos4210-combiner".
+- interrupt-controller: Identifies the node as an interrupt controller.
+- #interrupt-cells: should be <2>. The meaning of the cells are
+       * First Cell: Combiner Group Number.
+       * Second Cell: Interrupt number within the group.
+- reg: Base address and size of interrupt combiner registers.
+- interrupts: The list of interrupts generated by the combiners which are then
+    connected to a parent interrupt controller. The format of the interrupt
+    specifier depends in the interrupt parent controller.
+
+Optional properties:
+- samsung,combiner-nr: The number of interrupt combiners supported. If this
+  property is not specified, the default number of combiners is assumed
+  to be 16.
+- interrupt-parent: pHandle of the parent interrupt controller, if not
+  inherited from the parent node.
+
+
+Example:
+
+       The following is a an example from the Exynos4210 SoC dtsi file.
+
+       combiner:interrupt-controller@10440000 {
+               compatible = "samsung,exynos4210-combiner";
+               interrupt-controller;
+               #interrupt-cells = <2>;
+               reg = <0x10440000 0x1000>;
+               interrupts = <0 0 0>, <0 1 0>, <0 2 0>, <0 3 0>,
+                            <0 4 0>, <0 5 0>, <0 6 0>, <0 7 0>,
+                            <0 8 0>, <0 9 0>, <0 10 0>, <0 11 0>,
+                            <0 12 0>, <0 13 0>, <0 14 0>, <0 15 0>;
+       };
diff --git a/Documentation/devicetree/bindings/arm/spear-timer.txt b/Documentation/devicetree/bindings/arm/spear-timer.txt
new file mode 100644 (file)
index 0000000..c001722
--- /dev/null
@@ -0,0 +1,18 @@
+* SPEAr ARM Timer
+
+** Timer node required properties:
+
+- compatible : Should be:
+       "st,spear-timer"
+- reg: Address range of the timer registers
+- interrupt-parent: Should be the phandle for the interrupt controller
+  that services interrupts for this device
+- interrupt: Should contain the timer interrupt number
+
+Example:
+
+       timer@f0000000 {
+               compatible = "st,spear-timer";
+               reg = <0xf0000000 0x400>;
+               interrupts = <2>;
+       };
index aa5f355cc94726e4b497d5dea6792f9fd464d5af..0d42949df6c29aaee124655ec3a96f5224ce06c4 100644 (file)
@@ -2,25 +2,25 @@ ST SPEAr Platforms Device Tree Bindings
 ---------------------------------------
 
 Boards with the ST SPEAr600 SoC shall have the following properties:
-
 Required root node property:
-
 compatible = "st,spear600";
 
 Boards with the ST SPEAr300 SoC shall have the following properties:
-
 Required root node property:
-
 compatible = "st,spear300";
 
 Boards with the ST SPEAr310 SoC shall have the following properties:
-
 Required root node property:
-
 compatible = "st,spear310";
 
 Boards with the ST SPEAr320 SoC shall have the following properties:
+Required root node property:
+compatible = "st,spear320";
 
+Boards with the ST SPEAr1310 SoC shall have the following properties:
 Required root node property:
+compatible = "st,spear1310";
 
-compatible = "st,spear320";
+Boards with the ST SPEAr1340 SoC shall have the following properties:
+Required root node property:
+compatible = "st,spear1340";
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt
new file mode 100644 (file)
index 0000000..234406d
--- /dev/null
@@ -0,0 +1,11 @@
+NVIDIA Tegra AHB
+
+Required properties:
+- compatible : "nvidia,tegra20-ahb" or "nvidia,tegra30-ahb"
+- reg : Should contain 1 register ranges(address and length)
+
+Example:
+       ahb: ahb@6000c004 {
+               compatible = "nvidia,tegra20-ahb";
+               reg = <0x6000c004 0x10c>; /* AHB Arbitration + Gizmo Controller */
+       };
diff --git a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
new file mode 100644 (file)
index 0000000..ded0398
--- /dev/null
@@ -0,0 +1,19 @@
+* Freescale MXS DMA
+
+Required properties:
+- compatible : Should be "fsl,<chip>-dma-apbh" or "fsl,<chip>-dma-apbx"
+- reg : Should contain registers location and length
+
+Supported chips:
+imx23, imx28.
+
+Examples:
+dma-apbh@80004000 {
+       compatible = "fsl,imx28-dma-apbh";
+       reg = <0x80004000 2000>;
+};
+
+dma-apbx@80024000 {
+       compatible = "fsl,imx28-dma-apbx";
+       reg = <0x80024000 2000>;
+};
diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt
new file mode 100644 (file)
index 0000000..c0d85db
--- /dev/null
@@ -0,0 +1,17 @@
+* Synopsys Designware DMA Controller
+
+Required properties:
+- compatible: "snps,dma-spear1340"
+- reg: Address range of the DMAC registers
+- interrupt-parent: Should be the phandle for the interrupt controller
+  that services interrupts for this device
+- interrupt: Should contain the DMAC interrupt number
+
+Example:
+
+       dma@fc000000 {
+               compatible = "snps,dma-spear1340";
+               reg = <0xfc000000 0x1000>;
+               interrupt-parent = <&vic1>;
+               interrupts = <12>;
+       };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mm-lantiq.txt b/Documentation/devicetree/bindings/gpio/gpio-mm-lantiq.txt
new file mode 100644 (file)
index 0000000..f93d514
--- /dev/null
@@ -0,0 +1,38 @@
+Lantiq SoC External Bus memory mapped GPIO controller
+
+By attaching hardware latches to the EBU it is possible to create output
+only gpios. This driver configures a special memory address, which when
+written to outputs 16 bit to the latches.
+
+The node describing the memory mapped GPIOs needs to be a child of the node
+describing the "lantiq,localbus".
+
+Required properties:
+- compatible : Should be "lantiq,gpio-mm-lantiq"
+- reg : Address and length of the register set for the device
+- #gpio-cells : Should be two.  The first cell is the pin number and
+  the second cell is used to specify optional parameters (currently
+  unused).
+- gpio-controller : Marks the device node as a gpio controller.
+
+Optional properties:
+- lantiq,shadow : The default value that we shall assume as already set on the
+  shift register cascade.
+
+Example:
+
+localbus@0 {
+       #address-cells = <2>;
+       #size-cells = <1>;
+       ranges = <0 0 0x0 0x3ffffff /* addrsel0 */
+               1 0 0x4000000 0x4000010>; /* addsel1 */
+       compatible = "lantiq,localbus", "simple-bus";
+
+       gpio_mm0: gpio@4000000 {
+               compatible = "lantiq,gpio-mm";
+               reg = <1 0x0 0x10>;
+               gpio-controller;
+               #gpio-cells = <2>;
+               lantiq,shadow = <0x77f>
+       };
+}
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mxs.txt b/Documentation/devicetree/bindings/gpio/gpio-mxs.txt
new file mode 100644 (file)
index 0000000..0c35673
--- /dev/null
@@ -0,0 +1,87 @@
+* Freescale MXS GPIO controller
+
+The Freescale MXS GPIO controller is part of MXS PIN controller.  The
+GPIOs are organized in port/bank.  Each port consists of 32 GPIOs.
+
+As the GPIO controller is embedded in the PIN controller and all the
+GPIO ports share the same IO space with PIN controller, the GPIO node
+will be represented as sub-nodes of MXS pinctrl node.
+
+Required properties for GPIO node:
+- compatible : Should be "fsl,<soc>-gpio".  The supported SoCs include
+  imx23 and imx28.
+- interrupts : Should be the port interrupt shared by all 32 pins.
+- gpio-controller : Marks the device node as a gpio controller.
+- #gpio-cells : Should be two.  The first cell is the pin number and
+  the second cell is used to specify optional parameters (currently
+  unused).
+- interrupt-controller: Marks the device node as an interrupt controller.
+- #interrupt-cells : Should be 2.  The first cell is the GPIO number.
+  The second cell bits[3:0] is used to specify trigger type and level flags:
+      1 = low-to-high edge triggered.
+      2 = high-to-low edge triggered.
+      4 = active high level-sensitive.
+      8 = active low level-sensitive.
+
+Note: Each GPIO port should have an alias correctly numbered in "aliases"
+node.
+
+Examples:
+
+aliases {
+       gpio0 = &gpio0;
+       gpio1 = &gpio1;
+       gpio2 = &gpio2;
+       gpio3 = &gpio3;
+       gpio4 = &gpio4;
+};
+
+pinctrl@80018000 {
+       compatible = "fsl,imx28-pinctrl", "simple-bus";
+       reg = <0x80018000 2000>;
+
+       gpio0: gpio@0 {
+               compatible = "fsl,imx28-gpio";
+               interrupts = <127>;
+               gpio-controller;
+               #gpio-cells = <2>;
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpio1: gpio@1 {
+               compatible = "fsl,imx28-gpio";
+               interrupts = <126>;
+               gpio-controller;
+               #gpio-cells = <2>;
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpio2: gpio@2 {
+               compatible = "fsl,imx28-gpio";
+               interrupts = <125>;
+               gpio-controller;
+               #gpio-cells = <2>;
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpio3: gpio@3 {
+               compatible = "fsl,imx28-gpio";
+               interrupts = <124>;
+               gpio-controller;
+               #gpio-cells = <2>;
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpio4: gpio@4 {
+               compatible = "fsl,imx28-gpio";
+               interrupts = <123>;
+               gpio-controller;
+               #gpio-cells = <2>;
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-stp-xway.txt b/Documentation/devicetree/bindings/gpio/gpio-stp-xway.txt
new file mode 100644 (file)
index 0000000..854de13
--- /dev/null
@@ -0,0 +1,42 @@
+Lantiq SoC Serial To Parallel (STP) GPIO controller
+
+The Serial To Parallel (STP) is found on MIPS based Lantiq socs. It is a
+peripheral controller used to drive external shift register cascades. At most
+3 groups of 8 bits can be driven. The hardware is able to allow the DSL modem
+to drive the 2 LSBs of the cascade automatically.
+
+
+Required properties:
+- compatible : Should be "lantiq,gpio-stp-xway"
+- reg : Address and length of the register set for the device
+- #gpio-cells : Should be two.  The first cell is the pin number and
+  the second cell is used to specify optional parameters (currently
+  unused).
+- gpio-controller : Marks the device node as a gpio controller.
+
+Optional properties:
+- lantiq,shadow : The default value that we shall assume as already set on the
+  shift register cascade.
+- lantiq,groups : Set the 3 bit mask to select which of the 3 groups are enabled
+  in the shift register cascade.
+- lantiq,dsl : The dsl core can control the 2 LSBs of the gpio cascade. This 2 bit
+  property can enable this feature.
+- lantiq,phy1 : The gphy1 core can control 3 bits of the gpio cascade.
+- lantiq,phy2 : The gphy2 core can control 3 bits of the gpio cascade.
+- lantiq,rising : use rising instead of falling edge for the shift register
+
+Example:
+
+gpio1: stp@E100BB0 {
+       compatible = "lantiq,gpio-stp-xway";
+       reg = <0xE100BB0 0x40>;
+       #gpio-cells = <2>;
+       gpio-controller;
+
+       lantiq,shadow = <0xffff>;
+       lantiq,groups = <0x7>;
+       lantiq,dsl = <0x3>;
+       lantiq,phy1 = <0x7>;
+       lantiq,phy2 = <0x7>;
+       /* lantiq,rising; */
+};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mxs.txt b/Documentation/devicetree/bindings/i2c/i2c-mxs.txt
new file mode 100644 (file)
index 0000000..1bfc02d
--- /dev/null
@@ -0,0 +1,16 @@
+* Freescale MXS Inter IC (I2C) Controller
+
+Required properties:
+- compatible: Should be "fsl,<chip>-i2c"
+- reg: Should contain registers location and length
+- interrupts: Should contain ERROR and DMA interrupts
+
+Examples:
+
+i2c0: i2c@80058000 {
+       #address-cells = <1>;
+       #size-cells = <0>;
+       compatible = "fsl,imx28-i2c";
+       reg = <0x80058000 2000>;
+       interrupts = <111 68>;
+};
diff --git a/Documentation/devicetree/bindings/i2c/mux.txt b/Documentation/devicetree/bindings/i2c/mux.txt
new file mode 100644 (file)
index 0000000..af84cce
--- /dev/null
@@ -0,0 +1,60 @@
+Common i2c bus multiplexer/switch properties.
+
+An i2c bus multiplexer/switch will have several child busses that are
+numbered uniquely in a device dependent manner.  The nodes for an i2c bus
+multiplexer/switch will have one child node for each child
+bus.
+
+Required properties:
+- #address-cells = <1>;
+- #size-cells = <0>;
+
+Required properties for child nodes:
+- #address-cells = <1>;
+- #size-cells = <0>;
+- reg : The sub-bus number.
+
+Optional properties for child nodes:
+- Other properties specific to the multiplexer/switch hardware.
+- Child nodes conforming to i2c bus binding
+
+
+Example :
+
+       /*
+          An NXP pca9548 8 channel I2C multiplexer at address 0x70
+          with two NXP pca8574 GPIO expanders attached, one each to
+          ports 3 and 4.
+        */
+
+       mux@70 {
+               compatible = "nxp,pca9548";
+               reg = <0x70>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               i2c@3 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <3>;
+
+                       gpio1: gpio@38 {
+                               compatible = "nxp,pca8574";
+                               reg = <0x38>;
+                               #gpio-cells = <2>;
+                               gpio-controller;
+                       };
+               };
+               i2c@4 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <4>;
+
+                       gpio2: gpio@38 {
+                               compatible = "nxp,pca8574";
+                               reg = <0x38>;
+                               #gpio-cells = <2>;
+                               gpio-controller;
+                       };
+               };
+       };
index 38832c712919aa25fe4e2ba2d2368708e32953f6..b6cb5a12c672e9eb430534b3bb15dbb25ca83871 100644 (file)
@@ -6,14 +6,18 @@ Required properties:
   - compatible: value should be either of the following.
       (a) "samsung, s3c2410-i2c", for i2c compatible with s3c2410 i2c.
       (b) "samsung, s3c2440-i2c", for i2c compatible with s3c2440 i2c.
+      (c) "samsung, s3c2440-hdmiphy-i2c", for s3c2440-like i2c used
+          inside HDMIPHY block found on several samsung SoCs
   - reg: physical base address of the controller and length of memory mapped
     region.
   - interrupts: interrupt number to the cpu.
   - samsung,i2c-sda-delay: Delay (in ns) applied to data line (SDA) edges.
-  - gpios: The order of the gpios should be the following: <SDA, SCL>.
-    The gpio specifier depends on the gpio controller.
 
 Optional properties:
+  - gpios: The order of the gpios should be the following: <SDA, SCL>.
+    The gpio specifier depends on the gpio controller. Required in all
+    cases except for "samsung,s3c2440-hdmiphy-i2c" whose input/output
+    lines are permanently wired to the respective client
   - samsung,i2c-slave-addr: Slave address in multi-master enviroment. If not
     specified, default value is 0.
   - samsung,i2c-max-bus-freq: Desired frequency in Hz of the bus. If not
diff --git a/Documentation/devicetree/bindings/i2c/xiic.txt b/Documentation/devicetree/bindings/i2c/xiic.txt
new file mode 100644 (file)
index 0000000..ceabbe9
--- /dev/null
@@ -0,0 +1,22 @@
+Xilinx IIC controller:
+
+Required properties:
+- compatible : Must be "xlnx,xps-iic-2.00.a"
+- reg : IIC register location and length
+- interrupts : IIC controller unterrupt
+- #address-cells = <1>
+- #size-cells = <0>
+
+Optional properties:
+- Child nodes conforming to i2c bus binding
+
+Example:
+
+       axi_iic_0: i2c@40800000 {
+               compatible = "xlnx,xps-iic-2.00.a";
+               interrupts = < 1 2 >;
+               reg = < 0x40800000 0x10000 >;
+
+               #size-cells = <0>;
+               #address-cells = <1>;
+       };
diff --git a/Documentation/devicetree/bindings/iommu/nvidia,tegra20-gart.txt b/Documentation/devicetree/bindings/iommu/nvidia,tegra20-gart.txt
new file mode 100644 (file)
index 0000000..099d936
--- /dev/null
@@ -0,0 +1,14 @@
+NVIDIA Tegra 20 GART
+
+Required properties:
+- compatible: "nvidia,tegra20-gart"
+- reg: Two pairs of cells specifying the physical address and size of
+  the memory controller registers and the GART aperture respectively.
+
+Example:
+
+       gart {
+               compatible = "nvidia,tegra20-gart";
+               reg = <0x7000f024 0x00000018    /* controller registers */
+                      0x58000000 0x02000000>;  /* GART aperture */
+       };
diff --git a/Documentation/devicetree/bindings/mfd/da9052-i2c.txt b/Documentation/devicetree/bindings/mfd/da9052-i2c.txt
new file mode 100644 (file)
index 0000000..1857f4a
--- /dev/null
@@ -0,0 +1,60 @@
+* Dialog DA9052/53 Power Management Integrated Circuit (PMIC)
+
+Required properties:
+- compatible : Should be "dlg,da9052", "dlg,da9053-aa",
+                        "dlg,da9053-ab", or "dlg,da9053-bb"
+
+Sub-nodes:
+- regulators : Contain the regulator nodes. The DA9052/53 regulators are
+  bound using their names as listed below:
+
+    buck0     : regulator BUCK0
+    buck1     : regulator BUCK1
+    buck2     : regulator BUCK2
+    buck3     : regulator BUCK3
+    ldo4      : regulator LDO4
+    ldo5      : regulator LDO5
+    ldo6      : regulator LDO6
+    ldo7      : regulator LDO7
+    ldo8      : regulator LDO8
+    ldo9      : regulator LDO9
+    ldo10     : regulator LDO10
+    ldo11     : regulator LDO11
+    ldo12     : regulator LDO12
+    ldo13     : regulator LDO13
+
+  The bindings details of individual regulator device can be found in:
+  Documentation/devicetree/bindings/regulator/regulator.txt
+
+Examples:
+
+i2c@63fc8000 { /* I2C1 */
+       status = "okay";
+
+       pmic: dialog@48 {
+               compatible = "dlg,da9053-aa";
+               reg = <0x48>;
+
+               regulators {
+                       buck0 {
+                               regulator-min-microvolt = <500000>;
+                               regulator-max-microvolt = <2075000>;
+                       };
+
+                       buck1 {
+                               regulator-min-microvolt = <500000>;
+                               regulator-max-microvolt = <2075000>;
+                       };
+
+                       buck2 {
+                               regulator-min-microvolt = <925000>;
+                               regulator-max-microvolt = <2500000>;
+                       };
+
+                       buck3 {
+                               regulator-min-microvolt = <925000>;
+                               regulator-max-microvolt = <2500000>;
+                       };
+               };
+       };
+};
diff --git a/Documentation/devicetree/bindings/mfd/tps65910.txt b/Documentation/devicetree/bindings/mfd/tps65910.txt
new file mode 100644 (file)
index 0000000..645f5ea
--- /dev/null
@@ -0,0 +1,133 @@
+TPS65910 Power Management Integrated Circuit
+
+Required properties:
+- compatible: "ti,tps65910" or "ti,tps65911"
+- reg: I2C slave address
+- interrupts: the interrupt outputs of the controller
+- #gpio-cells: number of cells to describe a GPIO, this should be 2.
+  The first cell is the GPIO number.
+  The second cell is used to specify additional options <unused>.
+- gpio-controller: mark the device as a GPIO controller
+- #interrupt-cells: the number of cells to describe an IRQ, this should be 2.
+  The first cell is the IRQ number.
+  The second cell is the flags, encoded as the trigger masks from
+  Documentation/devicetree/bindings/interrupts.txt
+- regulators: This is the list of child nodes that specify the regulator
+  initialization data for defined regulators. Not all regulators for the given
+  device need to be present. The definition for each of these nodes is defined
+  using the standard binding for regulators found at
+  Documentation/devicetree/bindings/regulator/regulator.txt.
+
+  The valid names for regulators are:
+  tps65910: vrtc, vio, vdd1, vdd2, vdd3, vdig1, vdig2, vpll, vdac, vaux1,
+            vaux2, vaux33, vmmc
+  tps65911: vrtc, vio, vdd1, vdd3, vddctrl, ldo1, ldo2, ldo3, ldo4, ldo5,
+            ldo6, ldo7, ldo8
+
+Optional properties:
+- ti,vmbch-threshold: (tps65911) main battery charged threshold
+  comparator. (see VMBCH_VSEL in TPS65910 datasheet)
+- ti,vmbch2-threshold: (tps65911) main battery discharged threshold
+  comparator. (see VMBCH_VSEL in TPS65910 datasheet)
+- ti,en-gpio-sleep: enable sleep control for gpios
+  There should be 9 entries here, one for each gpio.
+
+Regulator Optional properties:
+- ti,regulator-ext-sleep-control: enable external sleep
+  control through external inputs [0 (not enabled), 1 (EN1), 2 (EN2) or 4(EN3)]
+  If this property is not defined, it defaults to 0 (not enabled).
+
+Example:
+
+       pmu: tps65910@d2 {
+               compatible = "ti,tps65910";
+               reg = <0xd2>;
+               interrupt-parent = <&intc>;
+               interrupts = < 0 118 0x04 >;
+
+               #gpio-cells = <2>;
+               gpio-controller;
+
+               #interrupt-cells = <2>;
+               interrupt-controller;
+
+               ti,vmbch-threshold = 0;
+               ti,vmbch2-threshold = 0;
+
+               ti,en-gpio-sleep = <0 0 1 0 0 0 0 0 0>;
+
+               regulators {
+                       vdd1_reg: vdd1 {
+                               regulator-min-microvolt = < 600000>;
+                               regulator-max-microvolt = <1500000>;
+                               regulator-always-on;
+                               regulator-boot-on;
+                               ti,regulator-ext-sleep-control = <0>;
+                       };
+                       vdd2_reg: vdd2 {
+                               regulator-min-microvolt = < 600000>;
+                               regulator-max-microvolt = <1500000>;
+                               regulator-always-on;
+                               regulator-boot-on;
+                               ti,regulator-ext-sleep-control = <4>;
+                       };
+                       vddctrl_reg: vddctrl {
+                               regulator-min-microvolt = < 600000>;
+                               regulator-max-microvolt = <1400000>;
+                               regulator-always-on;
+                               regulator-boot-on;
+                               ti,regulator-ext-sleep-control = <0>;
+                       };
+                       vio_reg: vio {
+                               regulator-min-microvolt = <1500000>;
+                               regulator-max-microvolt = <1800000>;
+                               regulator-always-on;
+                               regulator-boot-on;
+                               ti,regulator-ext-sleep-control = <1>;
+                       };
+                       ldo1_reg: ldo1 {
+                               regulator-min-microvolt = <1000000>;
+                               regulator-max-microvolt = <3300000>;
+                               ti,regulator-ext-sleep-control = <0>;
+                       };
+                       ldo2_reg: ldo2 {
+                               regulator-min-microvolt = <1050000>;
+                               regulator-max-microvolt = <1050000>;
+                               ti,regulator-ext-sleep-control = <0>;
+                       };
+                       ldo3_reg: ldo3 {
+                               regulator-min-microvolt = <1000000>;
+                               regulator-max-microvolt = <3300000>;
+                               ti,regulator-ext-sleep-control = <0>;
+                       };
+                       ldo4_reg: ldo4 {
+                               regulator-min-microvolt = <1000000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-always-on;
+                               ti,regulator-ext-sleep-control = <0>;
+                       };
+                       ldo5_reg: ldo5 {
+                               regulator-min-microvolt = <1000000>;
+                               regulator-max-microvolt = <3300000>;
+                               ti,regulator-ext-sleep-control = <0>;
+                       };
+                       ldo6_reg: ldo6 {
+                               regulator-min-microvolt = <1200000>;
+                               regulator-max-microvolt = <1200000>;
+                               ti,regulator-ext-sleep-control = <0>;
+                       };
+                       ldo7_reg: ldo7 {
+                               regulator-min-microvolt = <1200000>;
+                               regulator-max-microvolt = <1200000>;
+                               regulator-always-on;
+                               regulator-boot-on;
+                               ti,regulator-ext-sleep-control = <1>;
+                       };
+                       ldo8_reg: ldo8 {
+                               regulator-min-microvolt = <1000000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-always-on;
+                               ti,regulator-ext-sleep-control = <1>;
+                       };
+               };
+       };
diff --git a/Documentation/devicetree/bindings/mfd/twl6040.txt b/Documentation/devicetree/bindings/mfd/twl6040.txt
new file mode 100644 (file)
index 0000000..bc67c6f
--- /dev/null
@@ -0,0 +1,62 @@
+Texas Instruments TWL6040 family
+
+The TWL6040s are 8-channel high quality low-power audio codecs providing audio
+and vibra functionality on OMAP4+ platforms.
+They are connected ot the host processor via i2c for commands, McPDM for audio
+data and commands.
+
+Required properties:
+- compatible : Must be "ti,twl6040";
+- reg: must be 0x4b for i2c address
+- interrupts: twl6040 has one interrupt line connecteded to the main SoC
+- interrupt-parent: The parent interrupt controller
+- twl6040,audpwron-gpio: Power on GPIO line for the twl6040
+
+- vio-supply: Regulator for the twl6040 VIO supply
+- v2v1-supply: Regulator for the twl6040 V2V1 supply
+
+Optional properties, nodes:
+- enable-active-high: To power on the twl6040 during boot.
+
+Vibra functionality
+Required properties:
+- vddvibl-supply: Regulator for the left vibra motor
+- vddvibr-supply: Regulator for the right vibra motor
+- vibra { }: Configuration section for vibra parameters containing the following
+            properties:
+- ti,vibldrv-res: Resistance parameter for left driver
+- ti,vibrdrv-res: Resistance parameter for right driver
+- ti,viblmotor-res: Resistance parameter for left motor
+- ti,viblmotor-res: Resistance parameter for right motor
+
+Optional properties within vibra { } section:
+- vddvibl_uV: If the vddvibl default voltage need to be changed
+- vddvibr_uV: If the vddvibr default voltage need to be changed
+
+Example:
+&i2c1 {
+       twl6040: twl@4b {
+               compatible = "ti,twl6040";
+               reg = <0x4b>;
+
+               interrupts = <0 119 4>;
+               interrupt-parent = <&gic>;
+               twl6040,audpwron-gpio = <&gpio4 31 0>;
+
+               vio-supply = <&v1v8>;
+               v2v1-supply = <&v2v1>;
+               enable-active-high;
+
+               /* regulators for vibra motor */
+               vddvibl-supply = <&vbat>;
+               vddvibr-supply = <&vbat>;
+
+               vibra {
+                       /* Vibra driver, motor resistance parameters */
+                       ti,vibldrv-res = <8>;
+                       ti,vibrdrv-res = <3>;
+                       ti,viblmotor-res = <10>;
+                       ti,vibrmotor-res = <10>;
+               };
+       };
+};
index 64bcb8be973c0574ee0b835b2698f7d7501d8ff3..0d93b4b0e0e3733f77d0b668f166f2a3ba983555 100644 (file)
@@ -11,9 +11,11 @@ Required properties:
   - interrupt-parent : interrupt source phandle.
   - clock-frequency : specifies eSDHC base clock frequency.
   - sdhci,wp-inverted : (optional) specifies that eSDHC controller
-    reports inverted write-protect state;
+    reports inverted write-protect state; New devices should use
+    the generic "wp-inverted" property.
   - sdhci,1-bit-only : (optional) specifies that a controller can
-    only handle 1-bit data transfers.
+    only handle 1-bit data transfers. New devices should use the
+    generic "bus-width = <1>" property.
   - sdhci,auto-cmd12: (optional) specifies that a controller can
     only handle auto CMD12.
 
index ab22fe6e73abf00da7bd362981ce2331db284f2b..c7e404b3ef0515b5527583cae877ab48c5d69595 100644 (file)
@@ -9,7 +9,7 @@ Required properties:
 - interrupts : Should contain eSDHC interrupt
 
 Optional properties:
-- fsl,card-wired : Indicate the card is wired to host permanently
+- non-removable : Indicate the card is wired to host permanently
 - fsl,cd-internal : Indicate to use controller internal card detection
 - fsl,wp-internal : Indicate to use controller internal write protection
 - cd-gpios : Specify GPIOs for card detection
index 89a0084df2f76e625a2cdfcccf4e6c0f4281bc12..d64aea5a42032414fa99d1fa43fb0dc20114c254 100644 (file)
@@ -10,7 +10,8 @@ Required properties:
 
 Optional properties:
 - gpios : may specify GPIOs in this order: Card-Detect GPIO,
-  Write-Protect GPIO.
+  Write-Protect GPIO. Note that this does not follow the
+  binding from mmc.txt, for historic reasons.
 - interrupts : the interrupt of a card detect interrupt.
 - interrupt-parent : the phandle for the interrupt controller that
   services interrupts for this device.
diff --git a/Documentation/devicetree/bindings/mmc/mmc.txt b/Documentation/devicetree/bindings/mmc/mmc.txt
new file mode 100644 (file)
index 0000000..6e70dcd
--- /dev/null
@@ -0,0 +1,27 @@
+These properties are common to multiple MMC host controllers. Any host
+that requires the respective functionality should implement them using
+these definitions.
+
+Required properties:
+- bus-width: Number of data lines, can be <1>, <4>, or <8>
+
+Optional properties:
+- cd-gpios : Specify GPIOs for card detection, see gpio binding
+- wp-gpios : Specify GPIOs for write protection, see gpio binding
+- cd-inverted: when present, polarity on the wp gpio line is inverted
+- wp-inverted: when present, polarity on the wp gpio line is inverted
+- non-removable: non-removable slot (like eMMC)
+- max-frequency: maximum operating clock frequency
+
+Example:
+
+sdhci@ab000000 {
+       compatible = "sdhci";
+       reg = <0xab000000 0x200>;
+       interrupts = <23>;
+       bus-width = <4>;
+       cd-gpios = <&gpio 69 0>;
+       cd-inverted;
+       wp-gpios = <&gpio 70 0>;
+       max-frequency = <50000000>;
+}
diff --git a/Documentation/devicetree/bindings/mmc/mmci.txt b/Documentation/devicetree/bindings/mmc/mmci.txt
new file mode 100644 (file)
index 0000000..14a81d5
--- /dev/null
@@ -0,0 +1,19 @@
+* ARM PrimeCell MultiMedia Card Interface (MMCI) PL180/1
+
+The ARM PrimeCell MMCI PL180 and PL181 provides and interface for
+reading and writing to MultiMedia and SD cards alike.
+
+Required properties:
+- compatible             : contains "arm,pl18x", "arm,primecell".
+- reg                    : contains pl18x registers and length.
+- interrupts             : contains the device IRQ(s).
+- arm,primecell-periphid : contains the PrimeCell Peripheral ID.
+
+Optional properties:
+- wp-gpios               : contains any write protect (ro) gpios
+- cd-gpios               : contains any card detection gpios
+- cd-inverted            : indicates whether the cd gpio is inverted
+- max-frequency          : contains the maximum operating frequency
+- bus-width              : number of data lines, can be <1>, <4>, or <8>
+- mmc-cap-mmc-highspeed  : indicates whether MMC is high speed capable
+- mmc-cap-sd-highspeed   : indicates whether SD is high speed capable
diff --git a/Documentation/devicetree/bindings/mmc/mxs-mmc.txt b/Documentation/devicetree/bindings/mmc/mxs-mmc.txt
new file mode 100644 (file)
index 0000000..14d870a
--- /dev/null
@@ -0,0 +1,25 @@
+* Freescale MXS MMC controller
+
+The Freescale MXS Synchronous Serial Ports (SSP) can act as a MMC controller
+to support MMC, SD, and SDIO types of memory cards.
+
+Required properties:
+- compatible: Should be "fsl,<chip>-mmc".  The supported chips include
+  imx23 and imx28.
+- reg: Should contain registers location and length
+- interrupts: Should contain ERROR and DMA interrupts
+- fsl,ssp-dma-channel: APBH DMA channel for the SSP
+- bus-width: Number of data lines, can be <1>, <4>, or <8>
+
+Optional properties:
+- wp-gpios: Specify GPIOs for write protection
+
+Examples:
+
+ssp0: ssp@80010000 {
+       compatible = "fsl,imx28-mmc";
+       reg = <0x80010000 2000>;
+       interrupts = <96 82>;
+       fsl,ssp-dma-channel = <0>;
+       bus-width = <8>;
+};
index 7e51154679a6f17d47ce198b77dd2eeade185dc6..f77c3031607fbea26a250e3f7fcdd6373089dfff 100644 (file)
@@ -7,12 +7,12 @@ Required properties:
 - compatible : Should be "nvidia,<chip>-sdhci"
 - reg : Should contain SD/MMC registers location and length
 - interrupts : Should contain SD/MMC interrupt
+- bus-width : Number of data lines, can be <1>, <4>, or <8>
 
 Optional properties:
 - cd-gpios : Specify GPIOs for card detection
 - wp-gpios : Specify GPIOs for write protection
 - power-gpios : Specify GPIOs for power control
-- support-8bit : Boolean, indicates if 8-bit mode should be used.
 
 Example:
 
@@ -23,5 +23,5 @@ sdhci@c8000200 {
        cd-gpios = <&gpio 69 0>; /* gpio PI5 */
        wp-gpios = <&gpio 57 0>; /* gpio PH1 */
        power-gpios = <&gpio 155 0>; /* gpio PT3 */
-       support-8bit;
+       bus-width = <8>;
 };
index dbd4368ab8cc223bb3fc63147da349b25ed82db8..8a53958c9a9f50e71073daa1b8aa0650308ea13c 100644 (file)
@@ -15,7 +15,7 @@ Optional properties:
 ti,dual-volt: boolean, supports dual voltage cards
 <supply-name>-supply: phandle to the regulator device tree node
 "supply-name" examples are "vmmc", "vmmc_aux" etc
-ti,bus-width: Number of data lines, default assumed is 1 if the property is missing.
+bus-width: Number of data lines, default assumed is 1 if the property is missing.
 cd-gpios: GPIOs for card detection
 wp-gpios: GPIOs for write protection
 ti,non-removable: non-removable slot (like eMMC)
@@ -27,7 +27,7 @@ Example:
                reg = <0x4809c000 0x400>;
                ti,hwmods = "mmc1";
                ti,dual-volt;
-               ti,bus-width = <4>;
+               bus-width = <4>;
                vmmc-supply = <&vmmc>; /* phandle to regulator node */
                ti,non-removable;
        };
diff --git a/Documentation/devicetree/bindings/mtd/gpmi-nand.txt b/Documentation/devicetree/bindings/mtd/gpmi-nand.txt
new file mode 100644 (file)
index 0000000..1a5bbd3
--- /dev/null
@@ -0,0 +1,33 @@
+* Freescale General-Purpose Media Interface (GPMI)
+
+The GPMI nand controller provides an interface to control the
+NAND flash chips. We support only one NAND chip now.
+
+Required properties:
+  - compatible : should be "fsl,<chip>-gpmi-nand"
+  - reg : should contain registers location and length for gpmi and bch.
+  - reg-names: Should contain the reg names "gpmi-nand" and "bch"
+  - interrupts : The first is the DMA interrupt number for GPMI.
+                 The second is the BCH interrupt number.
+  - interrupt-names : The interrupt names "gpmi-dma", "bch";
+  - fsl,gpmi-dma-channel : Should contain the dma channel it uses.
+
+The device tree may optionally contain sub-nodes describing partitions of the
+address space. See partition.txt for more detail.
+
+Examples:
+
+gpmi-nand@8000c000 {
+       compatible = "fsl,imx28-gpmi-nand";
+       #address-cells = <1>;
+       #size-cells = <1>;
+       reg = <0x8000c000 2000>, <0x8000a000 2000>;
+       reg-names = "gpmi-nand", "bch";
+       interrupts = <88>, <41>;
+       interrupt-names = "gpmi-dma", "bch";
+       fsl,gpmi-dma-channel = <4>;
+
+       partition@0 {
+       ...
+       };
+};
diff --git a/Documentation/devicetree/bindings/mtd/mxc-nand.txt b/Documentation/devicetree/bindings/mtd/mxc-nand.txt
new file mode 100644 (file)
index 0000000..b5833d1
--- /dev/null
@@ -0,0 +1,19 @@
+* Freescale's mxc_nand
+
+Required properties:
+- compatible: "fsl,imxXX-nand"
+- reg: address range of the nfc block
+- interrupts: irq to be used
+- nand-bus-width: see nand.txt
+- nand-ecc-mode: see nand.txt
+- nand-on-flash-bbt: see nand.txt
+
+Example:
+
+       nand@d8000000 {
+               compatible = "fsl,imx27-nand";
+               reg = <0xd8000000 0x1000>;
+               interrupts = <29>;
+               nand-bus-width = <8>;
+               nand-ecc-mode = "hw";
+       };
index de439517dff08b00d73b10493e93a1f7dfb527e6..7ab9e1a2d8bec19fac2283b5703fae60d2858998 100644 (file)
@@ -14,7 +14,7 @@ Optional properties:
 
 Example:
 
-fec@83fec000 {
+ethernet@83fec000 {
        compatible = "fsl,imx51-fec", "fsl,imx27-fec";
        reg = <0x83fec000 0x4000>;
        interrupts = <87>;
index 3664d37e67994194d3a442610cd8c589f952f88d..b4480d5c3aca93a99721829c604c5a823c28bc82 100644 (file)
@@ -4,6 +4,8 @@ Required properties:
 - compatible   : "st,spear300-pinmux"
                : "st,spear310-pinmux"
                : "st,spear320-pinmux"
+               : "st,spear1310-pinmux"
+               : "st,spear1340-pinmux"
 - reg          : Address range of the pinctrl registers
 - st,pinmux-mode: Mandatory for SPEAr300 and SPEAr320 and invalid for others.
        - Its values for SPEAr300:
@@ -89,6 +91,37 @@ For SPEAr320 machines:
        "rmii0_1_grp", "i2c1_8_9_grp", "i2c1_98_99_grp", "i2c2_0_1_grp",
        "i2c2_2_3_grp", "i2c2_19_20_grp", "i2c2_75_76_grp", "i2c2_96_97_grp"
 
+For SPEAr1310 machines:
+       "i2c0_grp", "ssp0_grp", "ssp0_cs0_grp", "ssp0_cs1_2_grp", "i2s0_grp",
+       "i2s1_grp", "clcd_grp", "clcd_high_res_grp", "arm_gpio_grp",
+       "smi_2_chips_grp", "smi_4_chips_grp", "gmii_grp", "rgmii_grp",
+       "smii_0_1_2_grp", "ras_mii_txclk_grp", "nand_8bit_grp",
+       "nand_16bit_grp", "nand_4_chips_grp", "keyboard_6x6_grp",
+       "keyboard_rowcol6_8_grp", "uart0_grp", "uart0_modem_grp",
+       "gpt0_tmr0_grp", "gpt0_tmr1_grp", "gpt1_tmr0_grp", "gpt1_tmr1_grp",
+       "sdhci_grp", "cf_grp", "xd_grp", "touch_xy_grp",
+       "uart1_disable_i2c_grp", "uart1_disable_sd_grp", "uart2_3_grp",
+       "uart4_grp", "uart5_grp", "rs485_0_1_tdm_0_1_grp", "i2c_1_2_grp",
+       "i2c3_dis_smi_clcd_grp", "i2c3_dis_sd_i2s0_grp", "i2c_4_5_dis_smi_grp",
+       "i2c4_dis_sd_grp", "i2c5_dis_sd_grp", "i2c_6_7_dis_kbd_grp",
+       "i2c6_dis_sd_grp", "i2c7_dis_sd_grp", "can0_dis_nor_grp",
+       "can0_dis_sd_grp", "can1_dis_sd_grp", "can1_dis_kbd_grp", "pcie0_grp",
+       "pcie1_grp", "pcie2_grp", "sata0_grp", "sata1_grp", "sata2_grp",
+       "ssp1_dis_kbd_grp", "ssp1_dis_sd_grp", "gpt64_grp"
+
+For SPEAr1340 machines:
+       "pads_as_gpio_grp", "fsmc_8bit_grp", "fsmc_16bit_grp", "fsmc_pnor_grp",
+       "keyboard_row_col_grp", "keyboard_col5_grp", "spdif_in_grp",
+       "spdif_out_grp", "gpt_0_1_grp", "pwm0_grp", "pwm1_grp", "pwm2_grp",
+       "pwm3_grp", "vip_mux_grp", "vip_mux_cam0_grp", "vip_mux_cam1_grp",
+       "vip_mux_cam2_grp", "vip_mux_cam3_grp", "cam0_grp", "cam1_grp",
+       "cam2_grp", "cam3_grp", "smi_grp", "ssp0_grp", "ssp0_cs1_grp",
+       "ssp0_cs2_grp", "ssp0_cs3_grp", "uart0_grp", "uart0_enh_grp",
+       "uart1_grp", "i2s_in_grp", "i2s_out_grp", "gmii_grp", "rgmii_grp",
+       "rmii_grp", "sgmii_grp", "i2c0_grp", "i2c1_grp", "cec0_grp", "cec1_grp",
+       "sdhci_grp", "cf_grp", "xd_grp", "clcd_grp", "arm_trace_grp",
+       "miphy_dbg_grp", "pcie_grp", "sata_grp"
+
 Valid values for function names are:
 For All SPEAr3xx machines:
        "firda", "i2c0", "ssp_cs", "ssp0", "mii0", "gpio0", "uart0_ext",
@@ -106,3 +139,17 @@ For SPEAr320 machines:
        "uart2", "uart3", "uart4", "uart5", "uart6", "rs485", "touchscreen",
        "can0", "can1", "pwm0_1", "pwm2", "pwm3", "ssp1", "ssp2", "mii2",
        "mii0_1", "i2c1", "i2c2"
+
+
+For SPEAr1310 machines:
+       "i2c0", "ssp0", "i2s0", "i2s1", "clcd", "arm_gpio", "smi", "gmii",
+       "rgmii", "smii_0_1_2", "ras_mii_txclk", "nand", "keyboard", "uart0",
+       "gpt0", "gpt1", "sdhci", "cf", "xd", "touchscreen", "uart1", "uart2_3",
+       "uart4", "uart5", "rs485_0_1_tdm_0_1", "i2c_1_2", "i2c3_i2s1",
+       "i2c_4_5", "i2c_6_7", "can0", "can1", "pci", "sata", "ssp1", "gpt64"
+
+For SPEAr1340 machines:
+       "pads_as_gpio", "fsmc", "keyboard", "spdif_in", "spdif_out", "gpt_0_1",
+       "pwm", "vip", "cam0", "cam1", "cam2", "cam3", "smi", "ssp0", "uart0",
+       "uart1", "i2s", "gmac", "i2c0", "i2c1", "cec0", "cec1", "sdhci", "cf",
+       "xd", "clcd", "arm_trace", "miphy_dbg", "pcie", "sata"
diff --git a/Documentation/devicetree/bindings/rtc/lpc32xx-rtc.txt b/Documentation/devicetree/bindings/rtc/lpc32xx-rtc.txt
new file mode 100644 (file)
index 0000000..a87a1e9
--- /dev/null
@@ -0,0 +1,15 @@
+* NXP LPC32xx SoC Real Time Clock controller
+
+Required properties:
+- compatible: must be "nxp,lpc3220-rtc"
+- reg: physical base address of the controller and length of memory mapped
+  region.
+- interrupts: The RTC interrupt
+
+Example:
+
+       rtc@40024000 {
+               compatible = "nxp,lpc3220-rtc";
+               reg = <0x40024000 0x1000>;
+               interrupts = <52 0>;
+       };
diff --git a/Documentation/devicetree/bindings/rtc/spear-rtc.txt b/Documentation/devicetree/bindings/rtc/spear-rtc.txt
new file mode 100644 (file)
index 0000000..ca67ac6
--- /dev/null
@@ -0,0 +1,17 @@
+* SPEAr RTC
+
+Required properties:
+- compatible : "st,spear600-rtc"
+- reg : Address range of the rtc registers
+- interrupt-parent: Should be the phandle for the interrupt controller
+  that services interrupts for this device
+- interrupt: Should contain the rtc interrupt number
+
+Example:
+
+       rtc@fc000000 {
+               compatible = "st,spear600-rtc";
+               reg = <0xfc000000 0x1000>;
+               interrupt-parent = <&vic1>;
+               interrupts = <12>;
+       };
diff --git a/Documentation/devicetree/bindings/sound/omap-dmic.txt b/Documentation/devicetree/bindings/sound/omap-dmic.txt
new file mode 100644 (file)
index 0000000..fd8105f
--- /dev/null
@@ -0,0 +1,21 @@
+* Texas Instruments OMAP4+ Digital Microphone Module
+
+Required properties:
+- compatible: "ti,omap4-dmic"
+- reg: Register location and size as an array:
+       <MPU access base address, size>,
+       <L3 interconnect address, size>;
+- interrupts: Interrupt number for DMIC
+- interrupt-parent: The parent interrupt controller
+- ti,hwmods: Name of the hwmod associated with OMAP dmic IP
+
+Example:
+
+dmic: dmic@4012e000 {
+       compatible = "ti,omap4-dmic";
+       reg = <0x4012e000 0x7f>, /* MPU private access */
+             <0x4902e000 0x7f>; /* L3 Interconnect */
+       interrupts = <0 114 0x4>;
+       interrupt-parent = <&gic>;
+       ti,hwmods = "dmic";
+};
diff --git a/Documentation/devicetree/bindings/sound/omap-mcpdm.txt b/Documentation/devicetree/bindings/sound/omap-mcpdm.txt
new file mode 100644 (file)
index 0000000..0741dff
--- /dev/null
@@ -0,0 +1,21 @@
+* Texas Instruments OMAP4+ McPDM
+
+Required properties:
+- compatible: "ti,omap4-mcpdm"
+- reg: Register location and size as an array:
+       <MPU access base address, size>,
+       <L3 interconnect address, size>;
+- interrupts: Interrupt number for McPDM
+- interrupt-parent: The parent interrupt controller
+- ti,hwmods: Name of the hwmod associated to the McPDM
+
+Example:
+
+mcpdm: mcpdm@40132000 {
+       compatible = "ti,omap4-mcpdm";
+       reg = <0x40132000 0x7f>, /* MPU private access */
+             <0x49032000 0x7f>; /* L3 Interconnect */
+       interrupts = <0 112 0x4>;
+       interrupt-parent = <&gic>;
+       ti,hwmods = "mcpdm";
+};
index a9c0406280e8e444ccc988d7b331827e21a6a9ce..b462d0c548237fae605cfe63a689cc216f8f945e 100644 (file)
@@ -11,7 +11,7 @@ Optional properties:
 
 Example:
 
-uart@73fbc000 {
+serial@73fbc000 {
        compatible = "fsl,imx51-uart", "fsl,imx21-uart";
        reg = <0x73fbc000 0x4000>;
        interrupts = <31>;
index 007005ddbe12ddbf5780061a94c243cc915bbcfe..e9b005dc762538073f553cf6a0af0f964017e7e0 100644 (file)
@@ -12,6 +12,9 @@ Required properties :
  - nvidia,vbus-gpio : If present, specifies a gpio that needs to be
    activated for the bus to be powered.
 
+Required properties for phy_type == ulpi:
+  - nvidia,phy-reset-gpio : The GPIO used to reset the PHY.
+
 Optional properties:
   - dr_mode : dual role mode. Indicates the working mode for
    nvidia,tegra20-ehci compatible controllers.  Can be "host", "peripheral",
index 3bbd5c51605a39e7726088b792c6ba46385e0152..ad86fb86c9a0252ba7c51df8d5ddcf91d71eb6ed 100644 (file)
@@ -29,13 +29,6 @@ The buffer-user
    in memory, mapped into its own address space, so it can access the same area
    of memory.
 
-*IMPORTANT*: [see https://lkml.org/lkml/2011/12/20/211 for more details]
-For this first version, A buffer shared using the dma_buf sharing API:
-- *may* be exported to user space using "mmap" *ONLY* by exporter, outside of
-  this framework.
-- with this new iteration of the dma-buf api cpu access from the kernel has been
-  enable, see below for the details.
-
 dma-buf operations for device dma only
 --------------------------------------
 
@@ -300,6 +293,17 @@ Access to a dma_buf from the kernel context involves three steps:
    Note that these calls need to always succeed. The exporter needs to complete
    any preparations that might fail in begin_cpu_access.
 
+   For some cases the overhead of kmap can be too high, a vmap interface
+   is introduced. This interface should be used very carefully, as vmalloc
+   space is a limited resources on many architectures.
+
+   Interfaces:
+      void *dma_buf_vmap(struct dma_buf *dmabuf)
+      void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+
+   The vmap call can fail if there is no vmap support in the exporter, or if it
+   runs out of vmalloc space. Fallback to kmap should be implemented.
+
 3. Finish access
 
    When the importer is done accessing the range specified in begin_cpu_access,
@@ -313,6 +317,83 @@ Access to a dma_buf from the kernel context involves three steps:
                                  enum dma_data_direction dir);
 
 
+Direct Userspace Access/mmap Support
+------------------------------------
+
+Being able to mmap an export dma-buf buffer object has 2 main use-cases:
+- CPU fallback processing in a pipeline and
+- supporting existing mmap interfaces in importers.
+
+1. CPU fallback processing in a pipeline
+
+   In many processing pipelines it is sometimes required that the cpu can access
+   the data in a dma-buf (e.g. for thumbnail creation, snapshots, ...). To avoid
+   the need to handle this specially in userspace frameworks for buffer sharing
+   it's ideal if the dma_buf fd itself can be used to access the backing storage
+   from userspace using mmap.
+
+   Furthermore Android's ION framework already supports this (and is otherwise
+   rather similar to dma-buf from a userspace consumer side with using fds as
+   handles, too). So it's beneficial to support this in a similar fashion on
+   dma-buf to have a good transition path for existing Android userspace.
+
+   No special interfaces, userspace simply calls mmap on the dma-buf fd.
+
+2. Supporting existing mmap interfaces in exporters
+
+   Similar to the motivation for kernel cpu access it is again important that
+   the userspace code of a given importing subsystem can use the same interfaces
+   with a imported dma-buf buffer object as with a native buffer object. This is
+   especially important for drm where the userspace part of contemporary OpenGL,
+   X, and other drivers is huge, and reworking them to use a different way to
+   mmap a buffer rather invasive.
+
+   The assumption in the current dma-buf interfaces is that redirecting the
+   initial mmap is all that's needed. A survey of some of the existing
+   subsystems shows that no driver seems to do any nefarious thing like syncing
+   up with outstanding asynchronous processing on the device or allocating
+   special resources at fault time. So hopefully this is good enough, since
+   adding interfaces to intercept pagefaults and allow pte shootdowns would
+   increase the complexity quite a bit.
+
+   Interface:
+      int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
+                      unsigned long);
+
+   If the importing subsystem simply provides a special-purpose mmap call to set
+   up a mapping in userspace, calling do_mmap with dma_buf->file will equally
+   achieve that for a dma-buf object.
+
+3. Implementation notes for exporters
+
+   Because dma-buf buffers have invariant size over their lifetime, the dma-buf
+   core checks whether a vma is too large and rejects such mappings. The
+   exporter hence does not need to duplicate this check.
+
+   Because existing importing subsystems might presume coherent mappings for
+   userspace, the exporter needs to set up a coherent mapping. If that's not
+   possible, it needs to fake coherency by manually shooting down ptes when
+   leaving the cpu domain and flushing caches at fault time. Note that all the
+   dma_buf files share the same anon inode, hence the exporter needs to replace
+   the dma_buf file stored in vma->vm_file with it's own if pte shootdown is
+   requred. This is because the kernel uses the underlying inode's address_space
+   for vma tracking (and hence pte tracking at shootdown time with
+   unmap_mapping_range).
+
+   If the above shootdown dance turns out to be too expensive in certain
+   scenarios, we can extend dma-buf with a more explicit cache tracking scheme
+   for userspace mappings. But the current assumption is that using mmap is
+   always a slower path, so some inefficiencies should be acceptable.
+
+   Exporters that shoot down mappings (for any reasons) shall not do any
+   synchronization at fault time with outstanding device operations.
+   Synchronization is an orthogonal issue to sharing the backing storage of a
+   buffer and hence should not be handled by dma-buf itself. This is explictly
+   mentioned here because many people seem to want something like this, but if
+   different exporters handle this differently, buffer sharing can fail in
+   interesting ways depending upong the exporter (if userspace starts depending
+   upon this implicit synchronization).
+
 Miscellaneous notes
 -------------------
 
@@ -336,6 +417,20 @@ Miscellaneous notes
   the exporting driver to create a dmabuf fd must provide a way to let
   userspace control setting of O_CLOEXEC flag passed in to dma_buf_fd().
 
+- If an exporter needs to manually flush caches and hence needs to fake
+  coherency for mmap support, it needs to be able to zap all the ptes pointing
+  at the backing storage. Now linux mm needs a struct address_space associated
+  with the struct file stored in vma->vm_file to do that with the function
+  unmap_mapping_range. But the dma_buf framework only backs every dma_buf fd
+  with the anon_file struct file, i.e. all dma_bufs share the same file.
+
+  Hence exporters need to setup their own file (and address_space) association
+  by setting vma->vm_file and adjusting vma->vm_pgoff in the dma_buf mmap
+  callback. In the specific case of a gem driver the exporter could use the
+  shmem file already provided by gem (and set vm_pgoff = 0). Exporters can then
+  zap ptes by unmapping the corresponding range of the struct address_space
+  associated with their own file.
+
 References:
 [1] struct dma_buf_ops in include/linux/dma-buf.h
 [2] All interfaces mentioned above defined in include/linux/dma-buf.h
index 50d82ae09e2a685bf54146123c1d02dbfc3ed886..56000b33340bbe33f0b141934c756470d32ecbbd 100644 (file)
@@ -588,3 +588,27 @@ Why:       Remount currently allows changing bound subsystems and
        replaced with conventional fsnotify.
 
 ----------------------------
+
+What:  KVM debugfs statistics
+When:  2013
+Why:   KVM tracepoints provide mostly equivalent information in a much more
+        flexible fashion.
+
+----------------------------
+
+What:  at91-mci driver ("CONFIG_MMC_AT91")
+When:  3.7
+Why:   There are two mci drivers: at91-mci and atmel-mci. The PDC support
+       was added to atmel-mci as a first step to support more chips.
+       Then at91-mci was kept only for old IP versions (on at91rm9200 and
+       at91sam9261). The support of these IP versions has just been added
+       to atmel-mci, so atmel-mci can be used for all chips.
+Who:   Ludovic Desroches <ludovic.desroches@atmel.com>
+
+----------------------------
+
+What:  net/wanrouter/
+When:  June 2013
+Why:   Unsupported/unmaintained/unused since 2.6
+
+----------------------------
index 4fca82e5276e71726f81285ba0ffef867e917618..8e2da1e06e3b2371eb82ef07105e63ad97d224b6 100644 (file)
@@ -60,8 +60,8 @@ ata *);
        ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
        ssize_t (*listxattr) (struct dentry *, char *, size_t);
        int (*removexattr) (struct dentry *, const char *);
-       void (*truncate_range)(struct inode *, loff_t, loff_t);
        int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
+       void (*update_time)(struct inode *, struct timespec *, int);
 
 locking rules:
        all may block
@@ -87,8 +87,9 @@ setxattr:     yes
 getxattr:      no
 listxattr:     no
 removexattr:   yes
-truncate_range:        yes
 fiemap:                no
+update_time:   no
+
        Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on
 victim.
        cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem.
index b100adc38adb9af3b03d831afa26f9318b3e2855..293855e950000ce53223d137b094d266d7911ad3 100644 (file)
@@ -59,9 +59,9 @@ commit=nrsec  (*)     Ext3 can be told to sync all its data and metadata
                        Setting it to very large values will improve
                        performance.
 
-barrier=<0(*)|1>       This enables/disables the use of write barriers in
-barrier                        the jbd code.  barrier=0 disables, barrier=1 enables.
-nobarrier      (*)     This also requires an IO stack which can support
+barrier=<0|1(*)>       This enables/disables the use of write barriers in
+barrier        (*)             the jbd code.  barrier=0 disables, barrier=1 enables.
+nobarrier              This also requires an IO stack which can support
                        barriers, and if jbd gets an error on a barrier
                        write, it will disable again with a warning.
                        Write barriers enforce proper on-disk ordering
index 74acd9618819b0adc61d13bfe82fee438f602c1f..8c91d1057d9a141f77c0c64614eb17024d45256f 100644 (file)
@@ -297,7 +297,8 @@ in the beginning of ->setattr unconditionally.
 be used instead.  It gets called whenever the inode is evicted, whether it has
 remaining links or not.  Caller does *not* evict the pagecache or inode-associated
 metadata buffers; getting rid of those is responsibility of method, as it had
-been for ->delete_inode().
+been for ->delete_inode(). Caller makes sure async writeback cannot be running
+for the inode while (or after) ->evict_inode() is called.
 
        ->drop_inode() returns int now; it's called on final iput() with
 inode->i_lock held and it returns true if filesystems wants the inode to be
@@ -306,14 +307,11 @@ updated appropriately.  generic_delete_inode() is also alive and it consists
 simply of return 1.  Note that all actual eviction work is done by caller after
 ->drop_inode() returns.
 
-       clear_inode() is gone; use end_writeback() instead.  As before, it must
-be called exactly once on each call of ->evict_inode() (as it used to be for
-each call of ->delete_inode()).  Unlike before, if you are using inode-associated
-metadata buffers (i.e. mark_buffer_dirty_inode()), it's your responsibility to
-call invalidate_inode_buffers() before end_writeback().
-       No async writeback (and thus no calls of ->write_inode()) will happen
-after end_writeback() returns, so actions that should not overlap with ->write_inode()
-(e.g. freeing on-disk inode if i_nlink is 0) ought to be done after that call.
+       As before, clear_inode() must be called exactly once on each call of
+->evict_inode() (as it used to be for each call of ->delete_inode()).  Unlike
+before, if you are using inode-associated metadata buffers (i.e.
+mark_buffer_dirty_inode()), it's your responsibility to call
+invalidate_inode_buffers() before clear_inode().
 
        NOTE: checking i_nlink in the beginning of ->write_inode() and bailing out
 if it's zero is not *and* *never* *had* *been* enough.  Final unlink() and iput()
index ef088e55ab2ede67783855ecac4e59942b084bf2..fb0a6aeb936c86237fe19bcdf630339fc43ad348 100644 (file)
@@ -40,6 +40,7 @@ Table of Contents
   3.4  /proc/<pid>/coredump_filter - Core dump filtering settings
   3.5  /proc/<pid>/mountinfo - Information about mounts
   3.6  /proc/<pid>/comm  & /proc/<pid>/task/<tid>/comm
+  3.7   /proc/<pid>/task/<tid>/children - Information about task children
 
   4    Configuring procfs
   4.1  Mount options
@@ -310,6 +311,11 @@ Table 1-4: Contents of the stat files (as of 2.6.30-rc7)
   start_data    address above which program data+bss is placed
   end_data      address below which program data+bss is placed
   start_brk     address above which program heap can be expanded with brk()
+  arg_start     address above which program command line is placed
+  arg_end       address below which program command line is placed
+  env_start     address above which program environment is placed
+  env_end       address below which program environment is placed
+  exit_code     the thread's exit_code in the form reported by the waitpid system call
 ..............................................................................
 
 The /proc/PID/maps file containing the currently mapped memory regions and
@@ -743,6 +749,7 @@ Committed_AS:   100056 kB
 VmallocTotal:   112216 kB
 VmallocUsed:       428 kB
 VmallocChunk:   111088 kB
+AnonHugePages:   49152 kB
 
     MemTotal: Total usable ram (i.e. physical ram minus a few reserved
               bits and the kernel binary code)
@@ -776,6 +783,7 @@ VmallocChunk:   111088 kB
        Dirty: Memory which is waiting to get written back to the disk
    Writeback: Memory which is actively being written back to the disk
    AnonPages: Non-file backed pages mapped into userspace page tables
+AnonHugePages: Non-file backed huge pages mapped into userspace page tables
       Mapped: files which have been mmaped, such as libraries
         Slab: in-kernel data structures cache
 SReclaimable: Part of Slab, that might be reclaimed, such as caches
@@ -1576,6 +1584,23 @@ then the kernel's TASK_COMM_LEN (currently 16 chars) will result in a truncated
 comm value.
 
 
+3.7    /proc/<pid>/task/<tid>/children - Information about task children
+-------------------------------------------------------------------------
+This file provides a fast way to retrieve first level children pids
+of a task pointed by <pid>/<tid> pair. The format is a space separated
+stream of pids.
+
+Note the "first level" here -- if a child has own children they will
+not be listed here, one needs to read /proc/<children-pid>/task/<tid>/children
+to obtain the descendants.
+
+Since this interface is intended to be fast and cheap it doesn't
+guarantee to provide precise results and some children might be
+skipped, especially if they've exited right after we printed their
+pids, so one need to either stop or freeze processes being inspected
+if precise results are needed.
+
+
 ------------------------------------------------------------------------------
 Configuring procfs
 ------------------------------------------------------------------------------
index 0d0492028082c0ecda1a0931cc5100765624a80a..efd23f4817044ac9d55932bd9476d309a02918dc 100644 (file)
@@ -363,7 +363,7 @@ struct inode_operations {
        ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
        ssize_t (*listxattr) (struct dentry *, char *, size_t);
        int (*removexattr) (struct dentry *, const char *);
-       void (*truncate_range)(struct inode *, loff_t, loff_t);
+       void (*update_time)(struct inode *, struct timespec *, int);
 };
 
 Again, all methods are called without any locks being held, unless
@@ -472,9 +472,9 @@ otherwise noted.
   removexattr: called by the VFS to remove an extended attribute from
        a file. This method is called by removexattr(2) system call.
 
-  truncate_range: a method provided by the underlying filesystem to truncate a
-       range of blocks , i.e. punch a hole somewhere in a file.
-
+  update_time: called by the VFS to update a specific time or the i_version of
+       an inode.  If this is not defined the VFS will update the inode itself
+       and call mark_inode_dirty_sync.
 
 The Address Space Object
 ========================
@@ -760,7 +760,7 @@ struct file_operations
 ----------------------
 
 This describes how the VFS can manipulate an open file. As of kernel
-2.6.22, the following members are defined:
+3.5, the following members are defined:
 
 struct file_operations {
        struct module *owner;
@@ -790,6 +790,8 @@ struct file_operations {
        int (*flock) (struct file *, int, struct file_lock *);
        ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, size_t, unsigned int);
        ssize_t (*splice_read)(struct file *, struct pipe_inode_info *, size_t, unsigned int);
+       int (*setlease)(struct file *, long arg, struct file_lock **);
+       long (*fallocate)(struct file *, int mode, loff_t offset, loff_t len);
 };
 
 Again, all methods are called without any locks being held, unless
@@ -858,6 +860,11 @@ otherwise noted.
   splice_read: called by the VFS to splice data from file to a pipe. This
               method is used by the splice(2) system call
 
+  setlease: called by the VFS to set or release a file lock lease.
+           setlease has the file_lock_lock held and must not sleep.
+
+  fallocate: called by the VFS to preallocate blocks or punch a hole.
+
 Note that the file operations are implemented by the specific
 filesystem in which the inode resides. When opening a device node
 (character or block special) most filesystems will call special
index 42c17c1fb3cdf74e25a12e11d4416dd41e5f1f9a..b0ff2ab596ce56d4082850d19cc446a6d0c2782e 100644 (file)
@@ -18,9 +18,9 @@ For the most up-to-date list of functionality constants, please check
                                   adapters typically can not do these)
   I2C_FUNC_10BIT_ADDR             Handles the 10-bit address extensions
   I2C_FUNC_PROTOCOL_MANGLING      Knows about the I2C_M_IGNORE_NAK,
-                                  I2C_M_REV_DIR_ADDR, I2C_M_NOSTART and
-                                  I2C_M_NO_RD_ACK flags (which modify the
-                                  I2C protocol!)
+                                  I2C_M_REV_DIR_ADDR and I2C_M_NO_RD_ACK
+                                  flags (which modify the I2C protocol!)
+  I2C_FUNC_NOSTART                Can skip repeated start sequence
   I2C_FUNC_SMBUS_QUICK            Handles the SMBus write_quick command
   I2C_FUNC_SMBUS_READ_BYTE        Handles the SMBus read_byte command
   I2C_FUNC_SMBUS_WRITE_BYTE       Handles the SMBus write_byte command
@@ -50,6 +50,9 @@ A few combinations of the above flags are also defined for your convenience:
                                   emulated by a real I2C adapter (using
                                   the transparent emulation layer)
 
+In kernel versions prior to 3.5 I2C_FUNC_NOSTART was implemented as
+part of I2C_FUNC_PROTOCOL_MANGLING.
+
 
 ADAPTER IMPLEMENTATION
 ----------------------
index 10518dd588146f6d57c1e3c9c912bb525bc86425..0b3e62d1f77a1853765338afd9ed9d6ea56902d2 100644 (file)
@@ -49,7 +49,9 @@ a byte read, followed by a byte write:
 Modified transactions
 =====================
 
-We have found some I2C devices that needs the following modifications:
+The following modifications to the I2C protocol can also be generated,
+with the exception of I2C_M_NOSTART these are usually only needed to
+work around device issues:
 
   Flag I2C_M_NOSTART: 
     In a combined transaction, no 'S Addr Wr/Rd [A]' is generated at some
@@ -60,6 +62,11 @@ We have found some I2C devices that needs the following modifications:
     we do not generate Addr, but we do generate the startbit S. This will
     probably confuse all other clients on your bus, so don't try this.
 
+    This is often used to gather transmits from multiple data buffers in
+    system memory into something that appears as a single transfer to the
+    I2C device but may also be used between direction changes by some
+    rare devices.
+
   Flags I2C_M_REV_DIR_ADDR
     This toggles the Rd/Wr flag. That is, if you want to do a write, but
     need to emit an Rd instead of a Wr, or vice versa, you set this
diff --git a/Documentation/i2c/muxes/gpio-i2cmux b/Documentation/i2c/muxes/gpio-i2cmux
deleted file mode 100644 (file)
index 811cd78..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-Kernel driver gpio-i2cmux
-
-Author: Peter Korsgaard <peter.korsgaard@barco.com>
-
-Description
------------
-
-gpio-i2cmux is an i2c mux driver providing access to I2C bus segments
-from a master I2C bus and a hardware MUX controlled through GPIO pins.
-
-E.G.:
-
-  ----------              ----------  Bus segment 1   - - - - -
- |          | SCL/SDA    |          |-------------- |           |
- |          |------------|          |
- |          |            |          | Bus segment 2 |           |
- |  Linux   | GPIO 1..N  |   MUX    |---------------   Devices
- |          |------------|          |               |           |
- |          |            |          | Bus segment M
- |          |            |          |---------------|           |
-  ----------              ----------                  - - - - -
-
-SCL/SDA of the master I2C bus is multiplexed to bus segment 1..M
-according to the settings of the GPIO pins 1..N.
-
-Usage
------
-
-gpio-i2cmux uses the platform bus, so you need to provide a struct
-platform_device with the platform_data pointing to a struct
-gpio_i2cmux_platform_data with the I2C adapter number of the master
-bus, the number of bus segments to create and the GPIO pins used
-to control it. See include/linux/gpio-i2cmux.h for details.
-
-E.G. something like this for a MUX providing 4 bus segments
-controlled through 3 GPIO pins:
-
-#include <linux/gpio-i2cmux.h>
-#include <linux/platform_device.h>
-
-static const unsigned myboard_gpiomux_gpios[] = {
-       AT91_PIN_PC26, AT91_PIN_PC25, AT91_PIN_PC24
-};
-
-static const unsigned myboard_gpiomux_values[] = {
-       0, 1, 2, 3
-};
-
-static struct gpio_i2cmux_platform_data myboard_i2cmux_data = {
-       .parent         = 1,
-       .base_nr        = 2, /* optional */
-       .values         = myboard_gpiomux_values,
-       .n_values       = ARRAY_SIZE(myboard_gpiomux_values),
-       .gpios          = myboard_gpiomux_gpios,
-       .n_gpios        = ARRAY_SIZE(myboard_gpiomux_gpios),
-       .idle           = 4, /* optional */
-};
-
-static struct platform_device myboard_i2cmux = {
-       .name           = "gpio-i2cmux",
-       .id             = 0,
-       .dev            = {
-               .platform_data  = &myboard_i2cmux_data,
-       },
-};
diff --git a/Documentation/i2c/muxes/i2c-mux-gpio b/Documentation/i2c/muxes/i2c-mux-gpio
new file mode 100644 (file)
index 0000000..bd9b229
--- /dev/null
@@ -0,0 +1,65 @@
+Kernel driver i2c-gpio-mux
+
+Author: Peter Korsgaard <peter.korsgaard@barco.com>
+
+Description
+-----------
+
+i2c-gpio-mux is an i2c mux driver providing access to I2C bus segments
+from a master I2C bus and a hardware MUX controlled through GPIO pins.
+
+E.G.:
+
+  ----------              ----------  Bus segment 1   - - - - -
+ |          | SCL/SDA    |          |-------------- |           |
+ |          |------------|          |
+ |          |            |          | Bus segment 2 |           |
+ |  Linux   | GPIO 1..N  |   MUX    |---------------   Devices
+ |          |------------|          |               |           |
+ |          |            |          | Bus segment M
+ |          |            |          |---------------|           |
+  ----------              ----------                  - - - - -
+
+SCL/SDA of the master I2C bus is multiplexed to bus segment 1..M
+according to the settings of the GPIO pins 1..N.
+
+Usage
+-----
+
+i2c-gpio-mux uses the platform bus, so you need to provide a struct
+platform_device with the platform_data pointing to a struct
+gpio_i2cmux_platform_data with the I2C adapter number of the master
+bus, the number of bus segments to create and the GPIO pins used
+to control it. See include/linux/i2c-gpio-mux.h for details.
+
+E.G. something like this for a MUX providing 4 bus segments
+controlled through 3 GPIO pins:
+
+#include <linux/i2c-gpio-mux.h>
+#include <linux/platform_device.h>
+
+static const unsigned myboard_gpiomux_gpios[] = {
+       AT91_PIN_PC26, AT91_PIN_PC25, AT91_PIN_PC24
+};
+
+static const unsigned myboard_gpiomux_values[] = {
+       0, 1, 2, 3
+};
+
+static struct gpio_i2cmux_platform_data myboard_i2cmux_data = {
+       .parent         = 1,
+       .base_nr        = 2, /* optional */
+       .values         = myboard_gpiomux_values,
+       .n_values       = ARRAY_SIZE(myboard_gpiomux_values),
+       .gpios          = myboard_gpiomux_gpios,
+       .n_gpios        = ARRAY_SIZE(myboard_gpiomux_gpios),
+       .idle           = 4, /* optional */
+};
+
+static struct platform_device myboard_i2cmux = {
+       .name           = "i2c-gpio-mux",
+       .id             = 0,
+       .dev            = {
+               .platform_data  = &myboard_i2cmux_data,
+       },
+};
index 1ba84f3584e3022e952647ce7420894f01d8349e..4e1839ccb555e32c7fc3915dd4a76a0f3664b26f 100644 (file)
@@ -362,5 +362,5 @@ Resources
     http://www.almesberger.net/cv/papers/ols2k-9.ps.gz
 [2] newlib package (experimental), with initrd example
     http://sources.redhat.com/newlib/
-[3] Brouwer, Andries; "util-linux: Miscellaneous utilities for Linux"
-    ftp://ftp.win.tue.nl/pub/linux-local/utils/util-linux/
+[3] util-linux: Miscellaneous utilities for Linux
+    http://www.kernel.org/pub/linux/utils/util-linux/
index 68e32bb6bd807df797dd52204af2b973d021e0a6..6466704d47b5a5d0cf8e139051caa23b0ebc1503 100644 (file)
@@ -50,6 +50,10 @@ LDFLAGS_MODULE
 --------------------------------------------------
 Additional options used for $(LD) when linking modules.
 
+LDFLAGS_vmlinux
+--------------------------------------------------
+Additional options passed to final link of vmlinux.
+
 KBUILD_VERBOSE
 --------------------------------------------------
 Set the kbuild verbosity. Can be assigned same values as "V=...".
@@ -214,3 +218,18 @@ KBUILD_BUILD_USER, KBUILD_BUILD_HOST
 These two variables allow to override the user@host string displayed during
 boot and in /proc/version. The default value is the output of the commands
 whoami and host, respectively.
+
+KBUILD_LDS
+--------------------------------------------------
+The linker script with full path. Assigned by the top-level Makefile.
+
+KBUILD_VMLINUX_INIT
+--------------------------------------------------
+All object files for the init (first) part of vmlinux.
+Files specified with KBUILD_VMLINUX_INIT are linked first.
+
+KBUILD_VMLINUX_MAIN
+--------------------------------------------------
+All object files for the main part of vmlinux.
+KBUILD_VMLINUX_INIT and KBUILD_VMLINUX_MAIN together specify
+all the object files used to link vmlinux.
index 9d5f2a90dca96b600c1ee1bd107620c6dd1e4523..a09f1a6a830c0fc59c65c3880c6bf3f3129a9591 100644 (file)
@@ -53,15 +53,15 @@ KCONFIG_ALLCONFIG
 --------------------------------------------------
 (partially based on lkml email from/by Rob Landley, re: miniconfig)
 --------------------------------------------------
-The allyesconfig/allmodconfig/allnoconfig/randconfig variants can
-also use the environment variable KCONFIG_ALLCONFIG as a flag or a
-filename that contains config symbols that the user requires to be
-set to a specific value.  If KCONFIG_ALLCONFIG is used without a
-filename, "make *config" checks for a file named
-"all{yes/mod/no/def/random}.config" (corresponding to the *config command
-that was used) for symbol values that are to be forced.  If this file
-is not found, it checks for a file named "all.config" to contain forced
-values.
+The allyesconfig/allmodconfig/allnoconfig/randconfig variants can also
+use the environment variable KCONFIG_ALLCONFIG as a flag or a filename
+that contains config symbols that the user requires to be set to a
+specific value.  If KCONFIG_ALLCONFIG is used without a filename where
+KCONFIG_ALLCONFIG == "" or KCONFIG_ALLCONFIG == "1", "make *config"
+checks for a file named "all{yes/mod/no/def/random}.config"
+(corresponding to the *config command that was used) for symbol values
+that are to be forced.  If this file is not found, it checks for a
+file named "all.config" to contain forced values.
 
 This enables you to create "miniature" config (miniconfig) or custom
 config files containing just the config symbols that you are interested
index ea38cd1f0abaaf653934e5ccc1f312fe11534361..a92c5ebf373e2bf4bea68072b58fbc0471ad9c13 100644 (file)
@@ -335,6 +335,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                                          requirements as needed. This option
                                          does not override iommu=pt
 
+       amd_iommu_dump= [HW,X86-64]
+                       Enable AMD IOMMU driver option to dump the ACPI table
+                       for AMD IOMMU. With this option enabled, AMD IOMMU
+                       driver will print ACPI tables for AMD IOMMU during
+                       IOMMU initialization.
+
        amijoy.map=     [HW,JOY] Amiga joystick support
                        Map of devices attached to JOY0DAT and JOY1DAT
                        Format: <a>,<b>
@@ -397,8 +403,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
        atkbd.softrepeat= [HW]
                        Use software keyboard repeat
 
-       autotest        [IA-64]
-
        baycom_epp=     [HW,AX25]
                        Format: <io>,<mode>
 
@@ -508,6 +512,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        Also note the kernel might malfunction if you disable
                        some critical bits.
 
+       cma=nn[MG]      [ARM,KNL]
+                       Sets the size of kernel global memory area for contiguous
+                       memory allocations. For more information, see
+                       include/linux/dma-contiguous.h
+
        cmo_free_hint=  [PPC] Format: { yes | no }
                        Specify whether pages are marked as being inactive
                        when they are freed.  This is used in CMO environments
@@ -515,6 +524,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        a hypervisor.
                        Default: yes
 
+       coherent_pool=nn[KMG]   [ARM,KNL]
+                       Sets the size of memory pool for coherent, atomic dma
+                       allocations if Contiguous Memory Allocator (CMA) is used.
+
        code_bytes      [X86] How many bytes of object code to print
                        in an oops report.
                        Range: 0 - 8192
@@ -1444,8 +1457,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        devices can be requested on-demand with the
                        /dev/loop-control interface.
 
-       mcatest=        [IA-64]
-
        mce             [X86-32] Machine Check Exception
 
        mce=option      [X86-64] See Documentation/x86/x86_64/boot-options.txt
diff --git a/Documentation/leds/ledtrig-transient.txt b/Documentation/leds/ledtrig-transient.txt
new file mode 100644 (file)
index 0000000..3bd38b4
--- /dev/null
@@ -0,0 +1,152 @@
+LED Transient Trigger
+=====================
+
+The leds timer trigger does not currently have an interface to activate
+a one shot timer. The current support allows for setting two timers, one for
+specifying how long a state to be on, and the second for how long the state
+to be off. The delay_on value specifies the time period an LED should stay
+in on state, followed by a delay_off value that specifies how long the LED
+should stay in off state. The on and off cycle repeats until the trigger
+gets deactivated. There is no provision for one time activation to implement
+features that require an on or off state to be held just once and then stay in
+the original state forever.
+
+Without one shot timer interface, user space can still use timer trigger to
+set a timer to hold a state, however when user space application crashes or
+goes away without deactivating the timer, the hardware will be left in that
+state permanently.
+
+As a specific example of this use-case, let's look at vibrate feature on
+phones. Vibrate function on phones is implemented using PWM pins on SoC or
+PMIC. There is a need to activate one shot timer to control the vibrate
+feature, to prevent user space crashes leaving the phone in vibrate mode
+permanently causing the battery to drain.
+
+Transient trigger addresses the need for one shot timer activation. The
+transient trigger can be enabled and disabled just like the other leds
+triggers.
+
+When an led class device driver registers itself, it can specify all leds
+triggers it supports and a default trigger. During registration, activation
+routine for the default trigger gets called. During registration of an led
+class device, the LED state does not change.
+
+When the driver unregisters, deactivation routine for the currently active
+trigger will be called, and LED state is changed to LED_OFF.
+
+Driver suspend changes the LED state to LED_OFF and resume doesn't change
+the state. Please note that there is no explicit interaction between the
+suspend and resume actions and the currently enabled trigger. LED state
+changes are suspended while the driver is in suspend state. Any timers
+that are active at the time driver gets suspended, continue to run, without
+being able to actually change the LED state. Once driver is resumed, triggers
+start functioning again.
+
+LED state changes are controlled using brightness which is a common led
+class device property. When brightness is set to 0 from user space via
+echo 0 > brightness, it will result in deactivating the current trigger.
+
+Transient trigger uses standard register and unregister interfaces. During
+trigger registration, for each led class device that specifies this trigger
+as its default trigger, trigger activation routine will get called. During
+registration, the LED state does not change, unless there is another trigger
+active, in which case LED state changes to LED_OFF.
+
+During trigger unregistration, LED state gets changed to LED_OFF.
+
+Transient trigger activation routine doesn't change the LED state. It
+creates its properties and does its initialization. Transient trigger
+deactivation routine, will cancel any timer that is active before it cleans
+up and removes the properties it created. It will restore the LED state to
+non-transient state. When driver gets suspended, irrespective of the transient
+state, the LED state changes to LED_OFF.
+
+Transient trigger can be enabled and disabled from user space on led class
+devices, that support this trigger as shown below:
+
+echo transient > trigger
+echo none > trigger
+
+NOTE: Add a new property trigger state to control the state.
+
+This trigger exports three properties, activate, state, and duration. When
+transient trigger is activated these properties are set to default values.
+
+- duration allows setting timer value in msecs. The initial value is 0.
+- activate allows activating and deactivating the timer specified by
+  duration as needed. The initial and default value is 0.  This will allow
+  duration to be set after trigger activation.
+- state allows user to specify a transient state to be held for the specified
+  duration.
+
+       activate - one shot timer activate mechanism.
+               1 when activated, 0 when deactivated.
+               default value is zero when transient trigger is enabled,
+               to allow duration to be set.
+
+               activate state indicates a timer with a value of specified
+               duration running.
+               deactivated state indicates that there is no active timer
+               running.
+
+       duration - one shot timer value. When activate is set, duration value
+               is used to start a timer that runs once. This value doesn't
+               get changed by the trigger unless user does a set via
+               echo new_value > duration
+
+       state - transient state to be held. It has two values 0 or 1. 0 maps
+               to LED_OFF and 1 maps to LED_FULL. The specified state is
+               held for the duration of the one shot timer and then the
+               state gets changed to the non-transient state which is the
+               inverse of transient state.
+               If state = LED_FULL, when the timer runs out the state will
+               go back to LED_OFF.
+               If state = LED_OFF, when the timer runs out the state will
+               go back to LED_FULL.
+               Please note that current LED state is not checked prior to
+               changing the state to the specified state.
+               Driver could map these values to inverted depending on the
+               default states it defines for the LED in its brightness_set()
+               interface which is called from the led brightness_set()
+               interfaces to control the LED state.
+
+When timer expires activate goes back to deactivated state, duration is left
+at the set value to be used when activate is set at a future time. This will
+allow user app to set the time once and activate it to run it once for the
+specified value as needed. When timer expires, state is restored to the
+non-transient state which is the inverse of the transient state.
+
+       echo 1 > activate - starts timer = duration when duration is not 0.
+       echo 0 > activate - cancels currently running timer.
+       echo n > duration - stores timer value to be used upon next
+                            activate. Currently active timer if
+                            any, continues to run for the specified time.
+       echo 0 > duration - stores timer value to be used upon next
+                            activate. Currently active timer if any,
+                            continues to run for the specified time.
+       echo 1 > state    - stores desired transient state LED_FULL to be
+                           held for the specified duration.
+       echo 0 > state    - stores desired transient state LED_OFF to be
+                           held for the specified duration.
+
+What is not supported:
+======================
+- Timer activation is one shot and extending and/or shortening the timer
+  is not supported.
+
+Example use-case 1:
+       echo transient > trigger
+       echo n > duration
+       echo 1 > state
+repeat the following step as needed:
+       echo 1 > activate - start timer = duration to run once
+       echo 1 > activate - start timer = duration to run once
+       echo none > trigger
+
+This trigger is intended to be used for for the following example use cases:
+ - Control of vibrate (phones, tablets etc.) hardware by user space app.
+ - Use of LED by user space app as activity indicator.
+ - Use of LED by user space app as a kind of watchdog indicator -- as
+       long as the app is alive, it can keep the LED illuminated, if it dies
+       the LED will be extinguished automatically.
+ - Use by any user space app that needs a transient GPIO output.
index fdcca991df3067100330da55136d25b240fde3f5..b4f7f4b23f648e3e129bf3b8d31b68e17e004ee2 100644 (file)
@@ -44,6 +44,16 @@ Charger Manager supports the following:
        Normally, the platform will need to resume and suspend some devices
        that are used by Charger Manager.
 
+* Support for premature full-battery event handling
+       If the battery voltage drops by "fullbatt_vchkdrop_uV" after
+       "fullbatt_vchkdrop_ms" from the full-battery event, the framework
+       restarts charging. This check is also performed while suspended by
+       setting wakeup time accordingly and using suspend_again.
+
+* Support for uevent-notify
+       With the charger-related events, the device sends
+       notification to users with UEVENT.
+
 2. Global Charger-Manager Data related with suspend_again
 ========================================================
 In order to setup Charger Manager with suspend-again feature
@@ -55,7 +65,7 @@ if there are multiple batteries. If there are multiple batteries, the
 multiple instances of Charger Manager share the same charger_global_desc
 and it will manage in-suspend monitoring for all instances of Charger Manager.
 
-The user needs to provide all the two entries properly in order to activate
+The user needs to provide all the three entries properly in order to activate
 in-suspend monitoring:
 
 struct charger_global_desc {
@@ -74,6 +84,11 @@ bool (*rtc_only_wakeup)(void);
        same struct. If there is any other wakeup source triggered the
        wakeup, it should return false. If the "rtc" is the only wakeup
        reason, it should return true.
+
+bool assume_timer_stops_in_suspend;
+       : if true, Charger Manager assumes that
+       the timer (CM uses jiffies as timer) stops during suspend. Then, CM
+       assumes that the suspend-duration is same as the alarm length.
 };
 
 3. How to setup suspend_again
@@ -111,6 +126,16 @@ enum polling_modes polling_mode;
          CM_POLL_CHARGING_ONLY: poll this battery if and only if the
                                 battery is being charged.
 
+unsigned int fullbatt_vchkdrop_ms;
+unsigned int fullbatt_vchkdrop_uV;
+       : If both have non-zero values, Charger Manager will check the
+       battery voltage drop fullbatt_vchkdrop_ms after the battery is fully
+       charged. If the voltage drop is over fullbatt_vchkdrop_uV, Charger
+       Manager will try to recharge the battery by disabling and enabling
+       chargers. Recharge with voltage drop condition only (without delay
+       condition) is needed to be implemented with hardware interrupts from
+       fuel gauges or charger devices/chips.
+
 unsigned int fullbatt_uV;
        : If specified with a non-zero value, Charger Manager assumes
        that the battery is full (capacity = 100) if the battery is not being
@@ -122,6 +147,8 @@ unsigned int polling_interval_ms;
        this battery every polling_interval_ms or more frequently.
 
 enum data_source battery_present;
+       : CM_BATTERY_PRESENT: assume that the battery exists.
+       CM_NO_BATTERY: assume that the battery does not exists.
        CM_FUEL_GAUGE: get battery presence information from fuel gauge.
        CM_CHARGER_STAT: get battery presence from chargers.
 
@@ -151,7 +178,17 @@ bool measure_battery_temp;
        the value of measure_battery_temp.
 };
 
-5. Other Considerations
+5. Notify Charger-Manager of charger events: cm_notify_event()
+=========================================================
+If there is an charger event is required to notify
+Charger Manager, a charger device driver that triggers the event can call
+cm_notify_event(psy, type, msg) to notify the corresponding Charger Manager.
+In the function, psy is the charger driver's power_supply pointer, which is
+associated with Charger-Manager. The parameter "type"
+is the same as irq's type (enum cm_event_types). The event message "msg" is
+optional and is effective only if the event type is "UNDESCRIBED" or "OTHERS".
+
+6. Other Considerations
 =======================
 
 At the charger/battery-related events such as battery-pulled-out,
index 9f16c5178b662b8f9ec67f3dd7eafd6f4c89e39a..211831d4095fef63eacb13a2dbde8d647b5499a6 100644 (file)
@@ -84,6 +84,8 @@ are already charged or discharging, 'n/a' can be displayed (or
 HEALTH - represents health of the battery, values corresponds to
 POWER_SUPPLY_HEALTH_*, defined in battery.h.
 
+VOLTAGE_OCV - open circuit voltage of the battery.
+
 VOLTAGE_MAX_DESIGN, VOLTAGE_MIN_DESIGN - design values for maximal and
 minimal power supply voltages. Maximal/minimal means values of voltages
 when battery considered "full"/"empty" at normal conditions. Yes, there is
index 88fd7f5c8dcd61307171b3af852541d06a984380..13d6166d7a2798fbd54b39a90b533ad5ddebe9eb 100644 (file)
@@ -225,6 +225,13 @@ a queue must be less or equal then msg_max.
 maximum  message size value (it is every  message queue's attribute set during
 its creation).
 
+/proc/sys/fs/mqueue/msg_default is  a read/write  file for setting/getting the
+default number of messages in a queue value if attr parameter of mq_open(2) is
+NULL. If it exceed msg_max, the default value is initialized msg_max.
+
+/proc/sys/fs/mqueue/msgsize_default is a read/write file for setting/getting
+the default message size value if attr parameter of mq_open(2) is NULL. If it
+exceed msgsize_max, the default value is initialized msgsize_max.
 
 4. /proc/sys/fs/epoll - Configuration options for the epoll interface
 --------------------------------------------------------
index 6386f8c0482eaa578f68dfe850e559691e9de95d..930126698a0f5b0f6e2c458b53604d581b201063 100644 (file)
@@ -2,6 +2,7 @@ The Definitive KVM (Kernel-based Virtual Machine) API Documentation
 ===================================================================
 
 1. General description
+----------------------
 
 The kvm API is a set of ioctls that are issued to control various aspects
 of a virtual machine.  The ioctls belong to three classes
@@ -23,7 +24,9 @@ of a virtual machine.  The ioctls belong to three classes
    Only run vcpu ioctls from the same thread that was used to create the
    vcpu.
 
+
 2. File descriptors
+-------------------
 
 The kvm API is centered around file descriptors.  An initial
 open("/dev/kvm") obtains a handle to the kvm subsystem; this handle
@@ -41,7 +44,9 @@ not cause harm to the host, their actual behavior is not guaranteed by
 the API.  The only supported use is one virtual machine per process,
 and one vcpu per thread.
 
+
 3. Extensions
+-------------
 
 As of Linux 2.6.22, the KVM ABI has been stabilized: no backward
 incompatible change are allowed.  However, there is an extension
@@ -53,7 +58,9 @@ Instead, kvm defines extension identifiers and a facility to query
 whether a particular extension identifier is available.  If it is, a
 set of ioctls is available for application use.
 
+
 4. API description
+------------------
 
 This section describes ioctls that can be used to control kvm guests.
 For each ioctl, the following information is provided along with a
@@ -75,6 +82,7 @@ description:
   Returns: the return value.  General error numbers (EBADF, ENOMEM, EINVAL)
       are not detailed, but errors with specific meanings are.
 
+
 4.1 KVM_GET_API_VERSION
 
 Capability: basic
@@ -90,6 +98,7 @@ supported.  Applications should refuse to run if KVM_GET_API_VERSION
 returns a value other than 12.  If this check passes, all ioctls
 described as 'basic' will be available.
 
+
 4.2 KVM_CREATE_VM
 
 Capability: basic
@@ -109,6 +118,7 @@ In order to create user controlled virtual machines on S390, check
 KVM_CAP_S390_UCONTROL and use the flag KVM_VM_S390_UCONTROL as
 privileged user (CAP_SYS_ADMIN).
 
+
 4.3 KVM_GET_MSR_INDEX_LIST
 
 Capability: basic
@@ -135,6 +145,7 @@ Note: if kvm indicates supports MCE (KVM_CAP_MCE), then the MCE bank MSRs are
 not returned in the MSR list, as different vcpus can have a different number
 of banks, as set via the KVM_X86_SETUP_MCE ioctl.
 
+
 4.4 KVM_CHECK_EXTENSION
 
 Capability: basic
@@ -149,6 +160,7 @@ receives an integer that describes the extension availability.
 Generally 0 means no and 1 means yes, but some extensions may report
 additional information in the integer return value.
 
+
 4.5 KVM_GET_VCPU_MMAP_SIZE
 
 Capability: basic
@@ -161,6 +173,7 @@ The KVM_RUN ioctl (cf.) communicates with userspace via a shared
 memory region.  This ioctl returns the size of that region.  See the
 KVM_RUN documentation for details.
 
+
 4.6 KVM_SET_MEMORY_REGION
 
 Capability: basic
@@ -171,6 +184,7 @@ Returns: 0 on success, -1 on error
 
 This ioctl is obsolete and has been removed.
 
+
 4.7 KVM_CREATE_VCPU
 
 Capability: basic
@@ -223,6 +237,7 @@ machines, the resulting vcpu fd can be memory mapped at page offset
 KVM_S390_SIE_PAGE_OFFSET in order to obtain a memory map of the virtual
 cpu's hardware control block.
 
+
 4.8 KVM_GET_DIRTY_LOG (vm ioctl)
 
 Capability: basic
@@ -246,6 +261,7 @@ since the last call to this ioctl.  Bit 0 is the first page in the
 memory slot.  Ensure the entire structure is cleared to avoid padding
 issues.
 
+
 4.9 KVM_SET_MEMORY_ALIAS
 
 Capability: basic
@@ -256,6 +272,7 @@ Returns: 0 (success), -1 (error)
 
 This ioctl is obsolete and has been removed.
 
+
 4.10 KVM_RUN
 
 Capability: basic
@@ -272,6 +289,7 @@ obtained by mmap()ing the vcpu fd at offset 0, with the size given by
 KVM_GET_VCPU_MMAP_SIZE.  The parameter block is formatted as a 'struct
 kvm_run' (see below).
 
+
 4.11 KVM_GET_REGS
 
 Capability: basic
@@ -292,6 +310,7 @@ struct kvm_regs {
        __u64 rip, rflags;
 };
 
+
 4.12 KVM_SET_REGS
 
 Capability: basic
@@ -304,6 +323,7 @@ Writes the general purpose registers into the vcpu.
 
 See KVM_GET_REGS for the data structure.
 
+
 4.13 KVM_GET_SREGS
 
 Capability: basic
@@ -331,6 +351,7 @@ interrupt_bitmap is a bitmap of pending external interrupts.  At most
 one bit may be set.  This interrupt has been acknowledged by the APIC
 but not yet injected into the cpu core.
 
+
 4.14 KVM_SET_SREGS
 
 Capability: basic
@@ -342,6 +363,7 @@ Returns: 0 on success, -1 on error
 Writes special registers into the vcpu.  See KVM_GET_SREGS for the
 data structures.
 
+
 4.15 KVM_TRANSLATE
 
 Capability: basic
@@ -365,6 +387,7 @@ struct kvm_translation {
        __u8  pad[5];
 };
 
+
 4.16 KVM_INTERRUPT
 
 Capability: basic
@@ -413,6 +436,7 @@ c) KVM_INTERRUPT_SET_LEVEL
 Note that any value for 'irq' other than the ones stated above is invalid
 and incurs unexpected behavior.
 
+
 4.17 KVM_DEBUG_GUEST
 
 Capability: basic
@@ -423,6 +447,7 @@ Returns: -1 on error
 
 Support for this has been removed.  Use KVM_SET_GUEST_DEBUG instead.
 
+
 4.18 KVM_GET_MSRS
 
 Capability: basic
@@ -451,6 +476,7 @@ Application code should set the 'nmsrs' member (which indicates the
 size of the entries array) and the 'index' member of each array entry.
 kvm will fill in the 'data' member.
 
+
 4.19 KVM_SET_MSRS
 
 Capability: basic
@@ -466,6 +492,7 @@ Application code should set the 'nmsrs' member (which indicates the
 size of the entries array), and the 'index' and 'data' members of each
 array entry.
 
+
 4.20 KVM_SET_CPUID
 
 Capability: basic
@@ -494,6 +521,7 @@ struct kvm_cpuid {
        struct kvm_cpuid_entry entries[0];
 };
 
+
 4.21 KVM_SET_SIGNAL_MASK
 
 Capability: basic
@@ -516,6 +544,7 @@ struct kvm_signal_mask {
        __u8  sigset[0];
 };
 
+
 4.22 KVM_GET_FPU
 
 Capability: basic
@@ -541,6 +570,7 @@ struct kvm_fpu {
        __u32 pad2;
 };
 
+
 4.23 KVM_SET_FPU
 
 Capability: basic
@@ -566,6 +596,7 @@ struct kvm_fpu {
        __u32 pad2;
 };
 
+
 4.24 KVM_CREATE_IRQCHIP
 
 Capability: KVM_CAP_IRQCHIP
@@ -579,6 +610,7 @@ ioapic, a virtual PIC (two PICs, nested), and sets up future vcpus to have a
 local APIC.  IRQ routing for GSIs 0-15 is set to both PIC and IOAPIC; GSI 16-23
 only go to the IOAPIC.  On ia64, a IOSAPIC is created.
 
+
 4.25 KVM_IRQ_LINE
 
 Capability: KVM_CAP_IRQCHIP
@@ -600,6 +632,7 @@ struct kvm_irq_level {
        __u32 level;           /* 0 or 1 */
 };
 
+
 4.26 KVM_GET_IRQCHIP
 
 Capability: KVM_CAP_IRQCHIP
@@ -621,6 +654,7 @@ struct kvm_irqchip {
        } chip;
 };
 
+
 4.27 KVM_SET_IRQCHIP
 
 Capability: KVM_CAP_IRQCHIP
@@ -642,6 +676,7 @@ struct kvm_irqchip {
        } chip;
 };
 
+
 4.28 KVM_XEN_HVM_CONFIG
 
 Capability: KVM_CAP_XEN_HVM
@@ -666,6 +701,7 @@ struct kvm_xen_hvm_config {
        __u8 pad2[30];
 };
 
+
 4.29 KVM_GET_CLOCK
 
 Capability: KVM_CAP_ADJUST_CLOCK
@@ -684,6 +720,7 @@ struct kvm_clock_data {
        __u32 pad[9];
 };
 
+
 4.30 KVM_SET_CLOCK
 
 Capability: KVM_CAP_ADJUST_CLOCK
@@ -702,6 +739,7 @@ struct kvm_clock_data {
        __u32 pad[9];
 };
 
+
 4.31 KVM_GET_VCPU_EVENTS
 
 Capability: KVM_CAP_VCPU_EVENTS
@@ -741,6 +779,7 @@ struct kvm_vcpu_events {
 KVM_VCPUEVENT_VALID_SHADOW may be set in the flags field to signal that
 interrupt.shadow contains a valid state. Otherwise, this field is undefined.
 
+
 4.32 KVM_SET_VCPU_EVENTS
 
 Capability: KVM_CAP_VCPU_EVENTS
@@ -767,6 +806,7 @@ If KVM_CAP_INTR_SHADOW is available, KVM_VCPUEVENT_VALID_SHADOW can be set in
 the flags field to signal that interrupt.shadow contains a valid state and
 shall be written into the VCPU.
 
+
 4.33 KVM_GET_DEBUGREGS
 
 Capability: KVM_CAP_DEBUGREGS
@@ -785,6 +825,7 @@ struct kvm_debugregs {
        __u64 reserved[9];
 };
 
+
 4.34 KVM_SET_DEBUGREGS
 
 Capability: KVM_CAP_DEBUGREGS
@@ -798,6 +839,7 @@ Writes debug registers into the vcpu.
 See KVM_GET_DEBUGREGS for the data structure. The flags field is unused
 yet and must be cleared on entry.
 
+
 4.35 KVM_SET_USER_MEMORY_REGION
 
 Capability: KVM_CAP_USER_MEM
@@ -844,6 +886,7 @@ It is recommended to use this API instead of the KVM_SET_MEMORY_REGION ioctl.
 The KVM_SET_MEMORY_REGION does not allow fine grained control over memory
 allocation and is deprecated.
 
+
 4.36 KVM_SET_TSS_ADDR
 
 Capability: KVM_CAP_SET_TSS_ADDR
@@ -862,6 +905,7 @@ This ioctl is required on Intel-based hosts.  This is needed on Intel hardware
 because of a quirk in the virtualization implementation (see the internals
 documentation when it pops into existence).
 
+
 4.37 KVM_ENABLE_CAP
 
 Capability: KVM_CAP_ENABLE_CAP
@@ -897,6 +941,7 @@ function properly, this is the place to put them.
        __u8  pad[64];
 };
 
+
 4.38 KVM_GET_MP_STATE
 
 Capability: KVM_CAP_MP_STATE
@@ -927,6 +972,7 @@ Possible values are:
 This ioctl is only useful after KVM_CREATE_IRQCHIP.  Without an in-kernel
 irqchip, the multiprocessing state must be maintained by userspace.
 
+
 4.39 KVM_SET_MP_STATE
 
 Capability: KVM_CAP_MP_STATE
@@ -941,6 +987,7 @@ arguments.
 This ioctl is only useful after KVM_CREATE_IRQCHIP.  Without an in-kernel
 irqchip, the multiprocessing state must be maintained by userspace.
 
+
 4.40 KVM_SET_IDENTITY_MAP_ADDR
 
 Capability: KVM_CAP_SET_IDENTITY_MAP_ADDR
@@ -959,6 +1006,7 @@ This ioctl is required on Intel-based hosts.  This is needed on Intel hardware
 because of a quirk in the virtualization implementation (see the internals
 documentation when it pops into existence).
 
+
 4.41 KVM_SET_BOOT_CPU_ID
 
 Capability: KVM_CAP_SET_BOOT_CPU_ID
@@ -971,6 +1019,7 @@ Define which vcpu is the Bootstrap Processor (BSP).  Values are the same
 as the vcpu id in KVM_CREATE_VCPU.  If this ioctl is not called, the default
 is vcpu 0.
 
+
 4.42 KVM_GET_XSAVE
 
 Capability: KVM_CAP_XSAVE
@@ -985,6 +1034,7 @@ struct kvm_xsave {
 
 This ioctl would copy current vcpu's xsave struct to the userspace.
 
+
 4.43 KVM_SET_XSAVE
 
 Capability: KVM_CAP_XSAVE
@@ -999,6 +1049,7 @@ struct kvm_xsave {
 
 This ioctl would copy userspace's xsave struct to the kernel.
 
+
 4.44 KVM_GET_XCRS
 
 Capability: KVM_CAP_XCRS
@@ -1022,6 +1073,7 @@ struct kvm_xcrs {
 
 This ioctl would copy current vcpu's xcrs to the userspace.
 
+
 4.45 KVM_SET_XCRS
 
 Capability: KVM_CAP_XCRS
@@ -1045,6 +1097,7 @@ struct kvm_xcrs {
 
 This ioctl would set vcpu's xcr to the value userspace specified.
 
+
 4.46 KVM_GET_SUPPORTED_CPUID
 
 Capability: KVM_CAP_EXT_CPUID
@@ -1119,6 +1172,7 @@ support.  Instead it is reported via
 if that returns true and you use KVM_CREATE_IRQCHIP, or if you emulate the
 feature in userspace, then you can enable the feature for KVM_SET_CPUID2.
 
+
 4.47 KVM_PPC_GET_PVINFO
 
 Capability: KVM_CAP_PPC_GET_PVINFO
@@ -1142,6 +1196,7 @@ of 4 instructions that make up a hypercall.
 If any additional field gets added to this structure later on, a bit for that
 additional piece of information will be set in the flags bitmap.
 
+
 4.48 KVM_ASSIGN_PCI_DEVICE
 
 Capability: KVM_CAP_DEVICE_ASSIGNMENT
@@ -1185,6 +1240,7 @@ Only PCI header type 0 devices with PCI BAR resources are supported by
 device assignment.  The user requesting this ioctl must have read/write
 access to the PCI sysfs resource files associated with the device.
 
+
 4.49 KVM_DEASSIGN_PCI_DEVICE
 
 Capability: KVM_CAP_DEVICE_DEASSIGNMENT
@@ -1198,6 +1254,7 @@ Ends PCI device assignment, releasing all associated resources.
 See KVM_CAP_DEVICE_ASSIGNMENT for the data structure. Only assigned_dev_id is
 used in kvm_assigned_pci_dev to identify the device.
 
+
 4.50 KVM_ASSIGN_DEV_IRQ
 
 Capability: KVM_CAP_ASSIGN_DEV_IRQ
@@ -1231,6 +1288,7 @@ The following flags are defined:
 It is not valid to specify multiple types per host or guest IRQ. However, the
 IRQ type of host and guest can differ or can even be null.
 
+
 4.51 KVM_DEASSIGN_DEV_IRQ
 
 Capability: KVM_CAP_ASSIGN_DEV_IRQ
@@ -1245,6 +1303,7 @@ See KVM_ASSIGN_DEV_IRQ for the data structure. The target device is specified
 by assigned_dev_id, flags must correspond to the IRQ type specified on
 KVM_ASSIGN_DEV_IRQ. Partial deassignment of host or guest IRQ is allowed.
 
+
 4.52 KVM_SET_GSI_ROUTING
 
 Capability: KVM_CAP_IRQ_ROUTING
@@ -1293,6 +1352,7 @@ struct kvm_irq_routing_msi {
        __u32 pad;
 };
 
+
 4.53 KVM_ASSIGN_SET_MSIX_NR
 
 Capability: KVM_CAP_DEVICE_MSIX
@@ -1314,6 +1374,7 @@ struct kvm_assigned_msix_nr {
 
 #define KVM_MAX_MSIX_PER_DEV           256
 
+
 4.54 KVM_ASSIGN_SET_MSIX_ENTRY
 
 Capability: KVM_CAP_DEVICE_MSIX
@@ -1332,7 +1393,8 @@ struct kvm_assigned_msix_entry {
        __u16 padding[3];
 };
 
-4.54 KVM_SET_TSC_KHZ
+
+4.55 KVM_SET_TSC_KHZ
 
 Capability: KVM_CAP_TSC_CONTROL
 Architectures: x86
@@ -1343,7 +1405,8 @@ Returns: 0 on success, -1 on error
 Specifies the tsc frequency for the virtual machine. The unit of the
 frequency is KHz.
 
-4.55 KVM_GET_TSC_KHZ
+
+4.56 KVM_GET_TSC_KHZ
 
 Capability: KVM_CAP_GET_TSC_KHZ
 Architectures: x86
@@ -1355,7 +1418,8 @@ Returns the tsc frequency of the guest. The unit of the return value is
 KHz. If the host has unstable tsc this ioctl returns -EIO instead as an
 error.
 
-4.56 KVM_GET_LAPIC
+
+4.57 KVM_GET_LAPIC
 
 Capability: KVM_CAP_IRQCHIP
 Architectures: x86
@@ -1371,7 +1435,8 @@ struct kvm_lapic_state {
 Reads the Local APIC registers and copies them into the input argument.  The
 data format and layout are the same as documented in the architecture manual.
 
-4.57 KVM_SET_LAPIC
+
+4.58 KVM_SET_LAPIC
 
 Capability: KVM_CAP_IRQCHIP
 Architectures: x86
@@ -1387,7 +1452,8 @@ struct kvm_lapic_state {
 Copies the input argument into the the Local APIC registers.  The data format
 and layout are the same as documented in the architecture manual.
 
-4.58 KVM_IOEVENTFD
+
+4.59 KVM_IOEVENTFD
 
 Capability: KVM_CAP_IOEVENTFD
 Architectures: all
@@ -1417,7 +1483,8 @@ The following flags are defined:
 If datamatch flag is set, the event will be signaled only if the written value
 to the registered address is equal to datamatch in struct kvm_ioeventfd.
 
-4.59 KVM_DIRTY_TLB
+
+4.60 KVM_DIRTY_TLB
 
 Capability: KVM_CAP_SW_TLB
 Architectures: ppc
@@ -1449,7 +1516,8 @@ The "num_dirty" field is a performance hint for KVM to determine whether it
 should skip processing the bitmap and just invalidate everything.  It must
 be set to the number of set bits in the bitmap.
 
-4.60 KVM_ASSIGN_SET_INTX_MASK
+
+4.61 KVM_ASSIGN_SET_INTX_MASK
 
 Capability: KVM_CAP_PCI_2_3
 Architectures: x86
@@ -1482,6 +1550,7 @@ See KVM_ASSIGN_DEV_IRQ for the data structure.  The target device is specified
 by assigned_dev_id.  In the flags field, only KVM_DEV_ASSIGN_MASK_INTX is
 evaluated.
 
+
 4.62 KVM_CREATE_SPAPR_TCE
 
 Capability: KVM_CAP_SPAPR_TCE
@@ -1517,6 +1586,7 @@ the entries written by kernel-handled H_PUT_TCE calls, and also lets
 userspace update the TCE table directly which is useful in some
 circumstances.
 
+
 4.63 KVM_ALLOCATE_RMA
 
 Capability: KVM_CAP_PPC_RMA
@@ -1549,6 +1619,7 @@ is supported; 2 if the processor requires all virtual machines to have
 an RMA, or 1 if the processor can use an RMA but doesn't require it,
 because it supports the Virtual RMA (VRMA) facility.
 
+
 4.64 KVM_NMI
 
 Capability: KVM_CAP_USER_NMI
@@ -1574,6 +1645,7 @@ following algorithm:
 Some guests configure the LINT1 NMI input to cause a panic, aiding in
 debugging.
 
+
 4.65 KVM_S390_UCAS_MAP
 
 Capability: KVM_CAP_S390_UCONTROL
@@ -1593,6 +1665,7 @@ This ioctl maps the memory at "user_addr" with the length "length" to
 the vcpu's address space starting at "vcpu_addr". All parameters need to
 be alligned by 1 megabyte.
 
+
 4.66 KVM_S390_UCAS_UNMAP
 
 Capability: KVM_CAP_S390_UCONTROL
@@ -1612,6 +1685,7 @@ This ioctl unmaps the memory in the vcpu's address space starting at
 "vcpu_addr" with the length "length". The field "user_addr" is ignored.
 All parameters need to be alligned by 1 megabyte.
 
+
 4.67 KVM_S390_VCPU_FAULT
 
 Capability: KVM_CAP_S390_UCONTROL
@@ -1628,6 +1702,7 @@ table upfront. This is useful to handle validity intercepts for user
 controlled virtual machines to fault in the virtual cpu's lowcore pages
 prior to calling the KVM_RUN ioctl.
 
+
 4.68 KVM_SET_ONE_REG
 
 Capability: KVM_CAP_ONE_REG
@@ -1653,6 +1728,7 @@ registers, find a list below:
         |                       |
   PPC   | KVM_REG_PPC_HIOR      | 64
 
+
 4.69 KVM_GET_ONE_REG
 
 Capability: KVM_CAP_ONE_REG
@@ -1669,7 +1745,193 @@ at the memory location pointed to by "addr".
 The list of registers accessible using this interface is identical to the
 list in 4.64.
 
+
+4.70 KVM_KVMCLOCK_CTRL
+
+Capability: KVM_CAP_KVMCLOCK_CTRL
+Architectures: Any that implement pvclocks (currently x86 only)
+Type: vcpu ioctl
+Parameters: None
+Returns: 0 on success, -1 on error
+
+This signals to the host kernel that the specified guest is being paused by
+userspace.  The host will set a flag in the pvclock structure that is checked
+from the soft lockup watchdog.  The flag is part of the pvclock structure that
+is shared between guest and host, specifically the second bit of the flags
+field of the pvclock_vcpu_time_info structure.  It will be set exclusively by
+the host and read/cleared exclusively by the guest.  The guest operation of
+checking and clearing the flag must an atomic operation so
+load-link/store-conditional, or equivalent must be used.  There are two cases
+where the guest will clear the flag: when the soft lockup watchdog timer resets
+itself or when a soft lockup is detected.  This ioctl can be called any time
+after pausing the vcpu, but before it is resumed.
+
+
+4.71 KVM_SIGNAL_MSI
+
+Capability: KVM_CAP_SIGNAL_MSI
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_msi (in)
+Returns: >0 on delivery, 0 if guest blocked the MSI, and -1 on error
+
+Directly inject a MSI message. Only valid with in-kernel irqchip that handles
+MSI messages.
+
+struct kvm_msi {
+       __u32 address_lo;
+       __u32 address_hi;
+       __u32 data;
+       __u32 flags;
+       __u8  pad[16];
+};
+
+No flags are defined so far. The corresponding field must be 0.
+
+
+4.71 KVM_CREATE_PIT2
+
+Capability: KVM_CAP_PIT2
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_pit_config (in)
+Returns: 0 on success, -1 on error
+
+Creates an in-kernel device model for the i8254 PIT. This call is only valid
+after enabling in-kernel irqchip support via KVM_CREATE_IRQCHIP. The following
+parameters have to be passed:
+
+struct kvm_pit_config {
+       __u32 flags;
+       __u32 pad[15];
+};
+
+Valid flags are:
+
+#define KVM_PIT_SPEAKER_DUMMY     1 /* emulate speaker port stub */
+
+PIT timer interrupts may use a per-VM kernel thread for injection. If it
+exists, this thread will have a name of the following pattern:
+
+kvm-pit/<owner-process-pid>
+
+When running a guest with elevated priorities, the scheduling parameters of
+this thread may have to be adjusted accordingly.
+
+This IOCTL replaces the obsolete KVM_CREATE_PIT.
+
+
+4.72 KVM_GET_PIT2
+
+Capability: KVM_CAP_PIT_STATE2
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_pit_state2 (out)
+Returns: 0 on success, -1 on error
+
+Retrieves the state of the in-kernel PIT model. Only valid after
+KVM_CREATE_PIT2. The state is returned in the following structure:
+
+struct kvm_pit_state2 {
+       struct kvm_pit_channel_state channels[3];
+       __u32 flags;
+       __u32 reserved[9];
+};
+
+Valid flags are:
+
+/* disable PIT in HPET legacy mode */
+#define KVM_PIT_FLAGS_HPET_LEGACY  0x00000001
+
+This IOCTL replaces the obsolete KVM_GET_PIT.
+
+
+4.73 KVM_SET_PIT2
+
+Capability: KVM_CAP_PIT_STATE2
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_pit_state2 (in)
+Returns: 0 on success, -1 on error
+
+Sets the state of the in-kernel PIT model. Only valid after KVM_CREATE_PIT2.
+See KVM_GET_PIT2 for details on struct kvm_pit_state2.
+
+This IOCTL replaces the obsolete KVM_SET_PIT.
+
+
+4.74 KVM_PPC_GET_SMMU_INFO
+
+Capability: KVM_CAP_PPC_GET_SMMU_INFO
+Architectures: powerpc
+Type: vm ioctl
+Parameters: None
+Returns: 0 on success, -1 on error
+
+This populates and returns a structure describing the features of
+the "Server" class MMU emulation supported by KVM.
+This can in turn be used by userspace to generate the appropariate
+device-tree properties for the guest operating system.
+
+The structure contains some global informations, followed by an
+array of supported segment page sizes:
+
+      struct kvm_ppc_smmu_info {
+            __u64 flags;
+            __u32 slb_size;
+            __u32 pad;
+            struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ];
+      };
+
+The supported flags are:
+
+    - KVM_PPC_PAGE_SIZES_REAL:
+        When that flag is set, guest page sizes must "fit" the backing
+        store page sizes. When not set, any page size in the list can
+        be used regardless of how they are backed by userspace.
+
+    - KVM_PPC_1T_SEGMENTS
+        The emulated MMU supports 1T segments in addition to the
+        standard 256M ones.
+
+The "slb_size" field indicates how many SLB entries are supported
+
+The "sps" array contains 8 entries indicating the supported base
+page sizes for a segment in increasing order. Each entry is defined
+as follow:
+
+   struct kvm_ppc_one_seg_page_size {
+       __u32 page_shift;       /* Base page shift of segment (or 0) */
+       __u32 slb_enc;          /* SLB encoding for BookS */
+       struct kvm_ppc_one_page_size enc[KVM_PPC_PAGE_SIZES_MAX_SZ];
+   };
+
+An entry with a "page_shift" of 0 is unused. Because the array is
+organized in increasing order, a lookup can stop when encoutering
+such an entry.
+
+The "slb_enc" field provides the encoding to use in the SLB for the
+page size. The bits are in positions such as the value can directly
+be OR'ed into the "vsid" argument of the slbmte instruction.
+
+The "enc" array is a list which for each of those segment base page
+size provides the list of supported actual page sizes (which can be
+only larger or equal to the base page size), along with the
+corresponding encoding in the hash PTE. Similarily, the array is
+8 entries sorted by increasing sizes and an entry with a "0" shift
+is an empty entry and a terminator:
+
+   struct kvm_ppc_one_page_size {
+       __u32 page_shift;       /* Page shift (or 0) */
+       __u32 pte_enc;          /* Encoding in the HPTE (>>12) */
+   };
+
+The "pte_enc" field provides a value that can OR'ed into the hash
+PTE's RPN field (ie, it needs to be shifted left by 12 to OR it
+into the hash PTE second double word).
+
 5. The kvm_run structure
+------------------------
 
 Application code obtains a pointer to the kvm_run structure by
 mmap()ing a vcpu fd.  From that point, application code can control
@@ -1910,7 +2172,9 @@ and usually define the validity of a groups of registers. (e.g. one bit
 
 };
 
+
 6. Capabilities that can be enabled
+-----------------------------------
 
 There are certain capabilities that change the behavior of the virtual CPU when
 enabled. To enable them, please see section 4.37. Below you can find a list of
@@ -1926,6 +2190,7 @@ The following information is provided along with the description:
   Returns: the return value.  General error numbers (EBADF, ENOMEM, EINVAL)
       are not detailed, but errors with specific meanings are.
 
+
 6.1 KVM_CAP_PPC_OSI
 
 Architectures: ppc
@@ -1939,6 +2204,7 @@ between the guest and the host.
 
 When this capability is enabled, KVM_EXIT_OSI can occur.
 
+
 6.2 KVM_CAP_PPC_PAPR
 
 Architectures: ppc
@@ -1957,6 +2223,7 @@ HTAB invisible to the guest.
 
 When this capability is enabled, KVM_EXIT_PAPR_HCALL can occur.
 
+
 6.3 KVM_CAP_SW_TLB
 
 Architectures: ppc
index 882068538c9c95fc08fd5dec62ac008a5bda1cc3..83afe65d4966d0664186e2163083510da5aba012 100644 (file)
@@ -10,11 +10,15 @@ a guest.
 KVM cpuid functions are:
 
 function: KVM_CPUID_SIGNATURE (0x40000000)
-returns : eax = 0,
+returns : eax = 0x40000001,
           ebx = 0x4b4d564b,
           ecx = 0x564b4d56,
           edx = 0x4d.
 Note that this value in ebx, ecx and edx corresponds to the string "KVMKVMKVM".
+The value in eax corresponds to the maximum cpuid function present in this leaf,
+and will be updated if more functions are added in the future.
+Note also that old hosts set eax value to 0x0. This should
+be interpreted as if the value was 0x40000001.
 This function queries the presence of KVM cpuid leafs.
 
 
index 50317809113dd6838cc6f5145b26c2b6cb657d99..96b41bd975233502e9a31fc1906f457e1a4d259e 100644 (file)
@@ -108,6 +108,10 @@ MSR_KVM_SYSTEM_TIME_NEW:  0x4b564d01
                            |              | time measures taken across
                     0      |      24      | multiple cpus are guaranteed to
                            |              | be monotonic
+               -------------------------------------------------------------
+                           |              | guest vcpu has been paused by
+                    1      |     N/A      | the host
+                           |              | See 4.70 in api.txt
                -------------------------------------------------------------
 
        Availability of this MSR must be checked via bit 3 in 0x4000001 cpuid
index 4600cbe3d6beabc7e9fb77c147951bcda70e6b46..7587493c67f11e809861b0e592c5d8458ab75030 100644 (file)
@@ -16,7 +16,7 @@ There are three components to pagemap:
     * Bits 0-4   swap type if swapped
     * Bits 5-54  swap offset if swapped
     * Bits 55-60 page shift (page size = 1<<page shift)
-    * Bit  61    reserved for future use
+    * Bit  61    page is file-page or shared-anon
     * Bit  62    page swapped
     * Bit  63    page present
 
index 6752870c4970d73721c48a041b15aa36af929ab5..b0c6d1bbb43444096fe31b4281d60456febfb704 100644 (file)
@@ -17,7 +17,7 @@ data and perform operation on the slabs. By default slabinfo only lists
 slabs that have data in them. See "slabinfo -h" for more options when
 running the command. slabinfo can be compiled with
 
-gcc -o slabinfo tools/slub/slabinfo.c
+gcc -o slabinfo tools/vm/slabinfo.c
 
 Some of the modes of operation of slabinfo require that slub debugging
 be enabled on the command line. F.e. no tracking information will be
index 29bdf62aac09bc9bfb06789c40fa18b55d7b6752..f734bb2a78dc797aa62a2d2b0e1e7cc3390fef87 100644 (file)
@@ -166,6 +166,68 @@ behavior. So to make them effective you need to restart any
 application that could have been using hugepages. This also applies to
 the regions registered in khugepaged.
 
+== Monitoring usage ==
+
+The number of transparent huge pages currently used by the system is
+available by reading the AnonHugePages field in /proc/meminfo. To
+identify what applications are using transparent huge pages, it is
+necessary to read /proc/PID/smaps and count the AnonHugePages fields
+for each mapping. Note that reading the smaps file is expensive and
+reading it frequently will incur overhead.
+
+There are a number of counters in /proc/vmstat that may be used to
+monitor how successfully the system is providing huge pages for use.
+
+thp_fault_alloc is incremented every time a huge page is successfully
+       allocated to handle a page fault. This applies to both the
+       first time a page is faulted and for COW faults.
+
+thp_collapse_alloc is incremented by khugepaged when it has found
+       a range of pages to collapse into one huge page and has
+       successfully allocated a new huge page to store the data.
+
+thp_fault_fallback is incremented if a page fault fails to allocate
+       a huge page and instead falls back to using small pages.
+
+thp_collapse_alloc_failed is incremented if khugepaged found a range
+       of pages that should be collapsed into one huge page but failed
+       the allocation.
+
+thp_split is incremented every time a huge page is split into base
+       pages. This can happen for a variety of reasons but a common
+       reason is that a huge page is old and is being reclaimed.
+
+As the system ages, allocating huge pages may be expensive as the
+system uses memory compaction to copy data around memory to free a
+huge page for use. There are some counters in /proc/vmstat to help
+monitor this overhead.
+
+compact_stall is incremented every time a process stalls to run
+       memory compaction so that a huge page is free for use.
+
+compact_success is incremented if the system compacted memory and
+       freed a huge page for use.
+
+compact_fail is incremented if the system tries to compact memory
+       but failed.
+
+compact_pages_moved is incremented each time a page is moved. If
+       this value is increasing rapidly, it implies that the system
+       is copying a lot of data to satisfy the huge page allocation.
+       It is possible that the cost of copying exceeds any savings
+       from reduced TLB misses.
+
+compact_pagemigrate_failed is incremented when the underlying mechanism
+       for moving a page failed.
+
+compact_blocks_moved is incremented each time memory compaction examines
+       a huge page aligned range of pages.
+
+It is possible to establish how long the stalls were using the function
+tracer to record how long was spent in __alloc_pages_nodemask and
+using the mm_page_alloc tracepoint to identify which allocations were
+for huge pages.
+
 == get_user_pages and follow_page ==
 
 get_user_pages and follow_page if run on a hugepage, will return the
index 25fe4304f2fcb7b21b4065de12cfa5af4ad9f00b..086638f6c82d2e37d92e2aefa94243c5b1b74b13 100644 (file)
@@ -1,6 +1,6 @@
 The Linux WatchDog Timer Driver Core kernel API.
 ===============================================
-Last reviewed: 16-Mar-2012
+Last reviewed: 22-May-2012
 
 Wim Van Sebroeck <wim@iguana.be>
 
@@ -39,6 +39,10 @@ watchdog_device structure.
 The watchdog device structure looks like this:
 
 struct watchdog_device {
+       int id;
+       struct cdev cdev;
+       struct device *dev;
+       struct device *parent;
        const struct watchdog_info *info;
        const struct watchdog_ops *ops;
        unsigned int bootstatus;
@@ -46,10 +50,20 @@ struct watchdog_device {
        unsigned int min_timeout;
        unsigned int max_timeout;
        void *driver_data;
+       struct mutex lock;
        unsigned long status;
 };
 
 It contains following fields:
+* id: set by watchdog_register_device, id 0 is special. It has both a
+  /dev/watchdog0 cdev (dynamic major, minor 0) as well as the old
+  /dev/watchdog miscdev. The id is set automatically when calling
+  watchdog_register_device.
+* cdev: cdev for the dynamic /dev/watchdog<id> device nodes. This
+  field is also populated by watchdog_register_device.
+* dev: device under the watchdog class (created by watchdog_register_device).
+* parent: set this to the parent device (or NULL) before calling
+  watchdog_register_device.
 * info: a pointer to a watchdog_info structure. This structure gives some
   additional information about the watchdog timer itself. (Like it's unique name)
 * ops: a pointer to the list of watchdog operations that the watchdog supports.
@@ -61,6 +75,7 @@ It contains following fields:
 * driver_data: a pointer to the drivers private data of a watchdog device.
   This data should only be accessed via the watchdog_set_drvdata and
   watchdog_get_drvdata routines.
+* lock: Mutex for WatchDog Timer Driver Core internal use only.
 * status: this field contains a number of status bits that give extra
   information about the status of the device (Like: is the watchdog timer
   running/active, is the nowayout bit set, is the device opened via
@@ -78,6 +93,8 @@ struct watchdog_ops {
        unsigned int (*status)(struct watchdog_device *);
        int (*set_timeout)(struct watchdog_device *, unsigned int);
        unsigned int (*get_timeleft)(struct watchdog_device *);
+       void (*ref)(struct watchdog_device *);
+       void (*unref)(struct watchdog_device *);
        long (*ioctl)(struct watchdog_device *, unsigned int, unsigned long);
 };
 
@@ -85,6 +102,21 @@ It is important that you first define the module owner of the watchdog timer
 driver's operations. This module owner will be used to lock the module when
 the watchdog is active. (This to avoid a system crash when you unload the
 module and /dev/watchdog is still open).
+
+If the watchdog_device struct is dynamically allocated, just locking the module
+is not enough and a driver also needs to define the ref and unref operations to
+ensure the structure holding the watchdog_device does not go away.
+
+The simplest (and usually sufficient) implementation of this is to:
+1) Add a kref struct to the same structure which is holding the watchdog_device
+2) Define a release callback for the kref which frees the struct holding both
+3) Call kref_init on this kref *before* calling watchdog_register_device()
+4) Define a ref operation calling kref_get on this kref
+5) Define a unref operation calling kref_put on this kref
+6) When it is time to cleanup:
+ * Do not kfree() the struct holding both, the last kref_put will do this!
+ * *After* calling watchdog_unregister_device() call kref_put on the kref
+
 Some operations are mandatory and some are optional. The mandatory operations
 are:
 * start: this is a pointer to the routine that starts the watchdog timer
@@ -125,6 +157,10 @@ they are supported. These optional routines/operations are:
   (Note: the WDIOF_SETTIMEOUT needs to be set in the options field of the
   watchdog's info structure).
 * get_timeleft: this routines returns the time that's left before a reset.
+* ref: the operation that calls kref_get on the kref of a dynamically
+  allocated watchdog_device struct.
+* unref: the operation that calls kref_put on the kref of a dynamically
+  allocated watchdog_device struct.
 * ioctl: if this routine is present then it will be called first before we do
   our own internal ioctl call handling. This routine should return -ENOIOCTLCMD
   if a command is not supported. The parameters that are passed to the ioctl
@@ -144,6 +180,11 @@ bit-operations. The status bits that are defined are:
   (This bit should only be used by the WatchDog Timer Driver Core).
 * WDOG_NO_WAY_OUT: this bit stores the nowayout setting for the watchdog.
   If this bit is set then the watchdog timer will not be able to stop.
+* WDOG_UNREGISTERED: this bit gets set by the WatchDog Timer Driver Core
+  after calling watchdog_unregister_device, and then checked before calling
+  any watchdog_ops, so that you can be sure that no operations (other then
+  unref) will get called after unregister, even if userspace still holds a
+  reference to /dev/watchdog
 
   To set the WDOG_NO_WAY_OUT status bit (before registering your watchdog
   timer device) you can either:
index 17ddd822b4563c2cdf80ba80f422db5b4b35e002..04fddbacdbde74a03ae18ea6d91e5c0f452c0b44 100644 (file)
@@ -78,6 +78,11 @@ wd0_timeout: Default watchdog0 timeout in 1/10secs
 wd1_timeout: Default watchdog1 timeout in 1/10secs
 wd2_timeout: Default watchdog2 timeout in 1/10secs
 -------------------------------------------------
+da9052wdt:
+timeout: Watchdog timeout in seconds. 2<= timeout <=131, default=2.048s
+nowayout: Watchdog cannot be stopped once started
+       (default=kernel config parameter)
+-------------------------------------------------
 davinci_wdt:
 heartbeat: Watchdog heartbeat period in seconds from 1 to 600, default 60
 -------------------------------------------------
diff --git a/Documentation/x86/efi-stub.txt b/Documentation/x86/efi-stub.txt
new file mode 100644 (file)
index 0000000..44e6bb6
--- /dev/null
@@ -0,0 +1,65 @@
+                         The EFI Boot Stub
+                    ---------------------------
+
+On the x86 platform, a bzImage can masquerade as a PE/COFF image,
+thereby convincing EFI firmware loaders to load it as an EFI
+executable. The code that modifies the bzImage header, along with the
+EFI-specific entry point that the firmware loader jumps to are
+collectively known as the "EFI boot stub", and live in
+arch/x86/boot/header.S and arch/x86/boot/compressed/eboot.c,
+respectively.
+
+By using the EFI boot stub it's possible to boot a Linux kernel
+without the use of a conventional EFI boot loader, such as grub or
+elilo. Since the EFI boot stub performs the jobs of a boot loader, in
+a certain sense it *IS* the boot loader.
+
+The EFI boot stub is enabled with the CONFIG_EFI_STUB kernel option.
+
+
+**** How to install bzImage.efi
+
+The bzImage located in arch/x86/boot/bzImage must be copied to the EFI
+System Partiion (ESP) and renamed with the extension ".efi". Without
+the extension the EFI firmware loader will refuse to execute it. It's
+not possible to execute bzImage.efi from the usual Linux file systems
+because EFI firmware doesn't have support for them.
+
+
+**** Passing kernel parameters from the EFI shell
+
+Arguments to the kernel can be passed after bzImage.efi, e.g.
+
+       fs0:> bzImage.efi console=ttyS0 root=/dev/sda4
+
+
+**** The "initrd=" option
+
+Like most boot loaders, the EFI stub allows the user to specify
+multiple initrd files using the "initrd=" option. This is the only EFI
+stub-specific command line parameter, everything else is passed to the
+kernel when it boots.
+
+The path to the initrd file must be an absolute path from the
+beginning of the ESP, relative path names do not work. Also, the path
+is an EFI-style path and directory elements must be separated with
+backslashes (\). For example, given the following directory layout,
+
+fs0:>
+       Kernels\
+                       bzImage.efi
+                       initrd-large.img
+
+       Ramdisks\
+                       initrd-small.img
+                       initrd-medium.img
+
+to boot with the initrd-large.img file if the current working
+directory is fs0:\Kernels, the following command must be used,
+
+       fs0:\Kernels> bzImage.efi initrd=\Kernels\initrd-large.img
+
+Notice how bzImage.efi can be specified with a relative path. That's
+because the image we're executing is interpreted by the EFI shell,
+which understands relative paths, whereas the rest of the command line
+is passed to bzImage.efi.
index 150a29f3cd334abcc1ba18618931d6391af09713..55f0fda602ecc69d5242ca5181c002ab2a8dd983 100644 (file)
@@ -1905,6 +1905,16 @@ F:       Documentation/filesystems/coda.txt
 F:     fs/coda/
 F:     include/linux/coda*.h
 
+COMMON CLK FRAMEWORK
+M:     Mike Turquette <mturquette@ti.com>
+M:     Mike Turquette <mturquette@linaro.org>
+L:     linux-arm-kernel@lists.infradead.org (same as CLK API & CLKDEV)
+T:     git git://git.linaro.org/people/mturquette/linux.git
+S:     Maintained
+F:     drivers/clk/clk.c
+F:     drivers/clk/clk-*
+F:     include/linux/clk-pr*
+
 COMMON INTERNET FILE SYSTEM (CIFS)
 M:     Steve French <sfrench@samba.org>
 L:     linux-cifs@vger.kernel.org
@@ -2808,6 +2818,12 @@ F:       Documentation/firmware_class/
 F:     drivers/base/firmware*.c
 F:     include/linux/firmware.h
 
+FLOPPY DRIVER
+M:     Jiri Kosina <jkosina@suse.cz>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/floppy.git
+S:     Odd fixes
+F:     drivers/block/floppy.c
+
 FPU EMULATOR
 M:     Bill Metzenthen <billm@melbpc.org.au>
 W:     http://floatingpoint.sourceforge.net/emulator/index.html
@@ -2978,9 +2994,9 @@ GENERIC GPIO I2C MULTIPLEXER DRIVER
 M:     Peter Korsgaard <peter.korsgaard@barco.com>
 L:     linux-i2c@vger.kernel.org
 S:     Supported
-F:     drivers/i2c/muxes/gpio-i2cmux.c
-F:     include/linux/gpio-i2cmux.h
-F:     Documentation/i2c/muxes/gpio-i2cmux
+F:     drivers/i2c/muxes/i2c-mux-gpio.c
+F:     include/linux/i2c-mux-gpio.h
+F:     Documentation/i2c/muxes/i2c-mux-gpio
 
 GENERIC HDLC (WAN) DRIVERS
 M:     Krzysztof Halasa <khc@pm.waw.pl>
@@ -3222,10 +3238,8 @@ F:       include/linux/clockchips.h
 F:     include/linux/hrtimer.h
 
 HIGH-SPEED SCC DRIVER FOR AX.25
-M:     Klaus Kudielka <klaus.kudielka@ieee.org>
 L:     linux-hams@vger.kernel.org
-W:     http://www.nt.tuwien.ac.at/~kkudielk/Linux/
-S:     Maintained
+S:     Orphan
 F:     drivers/net/hamradio/dmascc.c
 F:     drivers/net/hamradio/scc.c
 
@@ -3372,6 +3386,12 @@ W:       http://www.developer.ibm.com/welcome/netfinity/serveraid.html
 S:     Supported
 F:     drivers/scsi/ips.*
 
+ICH LPC AND GPIO DRIVER
+M:     Peter Tyser <ptyser@xes-inc.com>
+S:     Maintained
+F:     drivers/mfd/lpc_ich.c
+F:     drivers/gpio/gpio-ich.c
+
 IDE SUBSYSTEM
 M:     "David S. Miller" <davem@davemloft.net>
 L:     linux-ide@vger.kernel.org
@@ -4495,12 +4515,6 @@ L:       linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     drivers/mmc/host/imxmmc.*
 
-MOUSE AND MISC DEVICES [GENERAL]
-M:     Alessandro Rubini <rubini@ipvvis.unipv.it>
-S:     Maintained
-F:     drivers/input/mouse/
-F:     include/linux/gpio_mouse.h
-
 MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD
 M:     Jiri Slaby <jirislaby@gmail.com>
 S:     Maintained
@@ -5138,7 +5152,7 @@ PCA9541 I2C BUS MASTER SELECTOR DRIVER
 M:     Guenter Roeck <guenter.roeck@ericsson.com>
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
-F:     drivers/i2c/muxes/pca9541.c
+F:     drivers/i2c/muxes/i2c-mux-pca9541.c
 
 PCA9564/PCA9665 I2C BUS DRIVER
 M:     Wolfram Sang <w.sang@pengutronix.de>
@@ -5323,7 +5337,7 @@ M:        David Woodhouse <dwmw2@infradead.org>
 T:     git git://git.infradead.org/battery-2.6.git
 S:     Maintained
 F:     include/linux/power_supply.h
-F:     drivers/power/power_supply*
+F:     drivers/power/
 
 PNP SUPPORT
 M:     Adam Belay <abelay@mit.edu>
@@ -6340,14 +6354,25 @@ F:      include/linux/compiler.h
 
 SPEAR PLATFORM SUPPORT
 M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Shiraz Hashim <shiraz.hashim@st.com>
 L:     spear-devel@list.st.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://www.st.com/spear
 S:     Maintained
 F:     arch/arm/plat-spear/
 
+SPEAR13XX MACHINE SUPPORT
+M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Shiraz Hashim <shiraz.hashim@st.com>
+L:     spear-devel@list.st.com
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+W:     http://www.st.com/spear
+S:     Maintained
+F:     arch/arm/mach-spear13xx/
+
 SPEAR3XX MACHINE SUPPORT
 M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Shiraz Hashim <shiraz.hashim@st.com>
 L:     spear-devel@list.st.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://www.st.com/spear
@@ -6356,6 +6381,8 @@ F:        arch/arm/mach-spear3xx/
 
 SPEAR6XX MACHINE SUPPORT
 M:     Rajeev Kumar <rajeev-dlh.kumar@st.com>
+M:     Shiraz Hashim <shiraz.hashim@st.com>
+M:     Viresh Kumar <viresh.kumar@st.com>
 L:     spear-devel@list.st.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://www.st.com/spear
@@ -6368,9 +6395,7 @@ L:        spear-devel@list.st.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://www.st.com/spear
 S:     Maintained
-F:     arch/arm/mach-spear*/clock.c
-F:     arch/arm/plat-spear/clock.c
-F:     arch/arm/plat-spear/include/plat/clock.h
+F:     drivers/clk/spear/
 
 SPI SUBSYSTEM
 M:     Grant Likely <grant.likely@secretlab.ca>
@@ -6632,7 +6657,7 @@ F:        include/linux/taskstats*
 F:     kernel/taskstats.c
 
 TC CLASSIFIER
-M:     Jamal Hadi Salim <hadi@cyberus.ca>
+M:     Jamal Hadi Salim <jhs@mojatatu.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     include/linux/pkt_cls.h
index b62c1e09444a981ed738581de1318aad802a2840..0d718ede9ea53949227b5beb56dcef17cb756a98 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
-PATCHLEVEL = 4
+PATCHLEVEL = 5
 SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
 NAME = Saber-toothed Squirrel
 
 # *DOCUMENTATION*
@@ -231,10 +231,6 @@ endif
 # Where to locate arch specific headers
 hdr-arch  := $(SRCARCH)
 
-ifeq ($(ARCH),m68knommu)
-       hdr-arch  := m68k
-endif
-
 KCONFIG_CONFIG ?= .config
 export KCONFIG_CONFIG
 
@@ -341,7 +337,6 @@ AWK         = awk
 GENKSYMS       = scripts/genksyms/genksyms
 INSTALLKERNEL  := installkernel
 DEPMOD         = /sbin/depmod
-KALLSYMS       = scripts/kallsyms
 PERL           = perl
 CHECK          = sparse
 
@@ -739,197 +734,21 @@ libs-y1          := $(patsubst %/, %/lib.a, $(libs-y))
 libs-y2                := $(patsubst %/, %/built-in.o, $(libs-y))
 libs-y         := $(libs-y1) $(libs-y2)
 
-# Build vmlinux
-# ---------------------------------------------------------------------------
-# vmlinux is built from the objects selected by $(vmlinux-init) and
-# $(vmlinux-main). Most are built-in.o files from top-level directories
-# in the kernel tree, others are specified in arch/$(ARCH)/Makefile.
-# Ordering when linking is important, and $(vmlinux-init) must be first.
-#
-# vmlinux
-#   ^
-#   |
-#   +-< $(vmlinux-init)
-#   |   +--< init/version.o + more
-#   |
-#   +--< $(vmlinux-main)
-#   |    +--< driver/built-in.o mm/built-in.o + more
-#   |
-#   +-< kallsyms.o (see description in CONFIG_KALLSYMS section)
-#
-# vmlinux version (uname -v) cannot be updated during normal
-# descending-into-subdirs phase since we do not yet know if we need to
-# update vmlinux.
-# Therefore this step is delayed until just before final link of vmlinux -
-# except in the kallsyms case where it is done just before adding the
-# symbols to the kernel.
-#
-# System.map is generated to document addresses of all kernel symbols
-
-vmlinux-init := $(head-y) $(init-y)
-vmlinux-main := $(core-y) $(libs-y) $(drivers-y) $(net-y)
-vmlinux-all  := $(vmlinux-init) $(vmlinux-main)
-vmlinux-lds  := arch/$(SRCARCH)/kernel/vmlinux.lds
-export KBUILD_VMLINUX_OBJS := $(vmlinux-all)
-
-# Rule to link vmlinux - also used during CONFIG_KALLSYMS
-# May be overridden by arch/$(ARCH)/Makefile
-quiet_cmd_vmlinux__ ?= LD      $@
-      cmd_vmlinux__ ?= $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) -o $@ \
-      -T $(vmlinux-lds) $(vmlinux-init)                          \
-      --start-group $(vmlinux-main) --end-group                  \
-      $(filter-out $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o FORCE ,$^)
-
-# Generate new vmlinux version
-quiet_cmd_vmlinux_version = GEN     .version
-      cmd_vmlinux_version = set -e;                     \
-       if [ ! -r .version ]; then                      \
-         rm -f .version;                               \
-         echo 1 >.version;                             \
-       else                                            \
-         mv .version .old_version;                     \
-         expr 0$$(cat .old_version) + 1 >.version;     \
-       fi;                                             \
-       $(MAKE) $(build)=init
-
-# Generate System.map
-quiet_cmd_sysmap = SYSMAP
-      cmd_sysmap = $(CONFIG_SHELL) $(srctree)/scripts/mksysmap
-
-# Sort exception table at build time
-quiet_cmd_sortextable = SORTEX
-      cmd_sortextable = $(objtree)/scripts/sortextable
-
-# Link of vmlinux
-# If CONFIG_KALLSYMS is set .version is already updated
-# Generate System.map and verify that the content is consistent
-# Use + in front of the vmlinux_version rule to silent warning with make -j2
-# First command is ':' to allow us to use + in front of the rule
-define rule_vmlinux__
-       :
-       $(if $(CONFIG_KALLSYMS),,+$(call cmd,vmlinux_version))
-
-       $(call cmd,vmlinux__)
-       $(Q)echo 'cmd_$@ := $(cmd_vmlinux__)' > $(@D)/.$(@F).cmd
-
-       $(if $(CONFIG_BUILDTIME_EXTABLE_SORT),                          \
-         $(Q)$(if $($(quiet)cmd_sortextable),                          \
-           echo '  $($(quiet)cmd_sortextable)  vmlinux' &&)            \
-         $(cmd_sortextable)  vmlinux)
-
-
-       $(Q)$(if $($(quiet)cmd_sysmap),                                      \
-         echo '  $($(quiet)cmd_sysmap)  System.map' &&)                     \
-       $(cmd_sysmap) $@ System.map;                                         \
-       if [ $$? -ne 0 ]; then                                               \
-               rm -f $@;                                                    \
-               /bin/false;                                                  \
-       fi;
-       $(verify_kallsyms)
-endef
-
-
-ifdef CONFIG_KALLSYMS
-# Generate section listing all symbols and add it into vmlinux $(kallsyms.o)
-# It's a three stage process:
-# o .tmp_vmlinux1 has all symbols and sections, but __kallsyms is
-#   empty
-#   Running kallsyms on that gives us .tmp_kallsyms1.o with
-#   the right size - vmlinux version (uname -v) is updated during this step
-# o .tmp_vmlinux2 now has a __kallsyms section of the right size,
-#   but due to the added section, some addresses have shifted.
-#   From here, we generate a correct .tmp_kallsyms2.o
-# o The correct .tmp_kallsyms2.o is linked into the final vmlinux.
-# o Verify that the System.map from vmlinux matches the map from
-#   .tmp_vmlinux2, just in case we did not generate kallsyms correctly.
-# o If 'make KALLSYMS_EXTRA_PASS=1" was used, do an extra pass using
-#   .tmp_vmlinux3 and .tmp_kallsyms3.o.  This is only meant as a
-#   temporary bypass to allow the kernel to be built while the
-#   maintainers work out what went wrong with kallsyms.
-
-last_kallsyms := 2
-
-ifdef KALLSYMS_EXTRA_PASS
-ifneq ($(KALLSYMS_EXTRA_PASS),0)
-last_kallsyms := 3
-endif
-endif
-
-kallsyms.o := .tmp_kallsyms$(last_kallsyms).o
-
-define verify_kallsyms
-       $(Q)$(if $($(quiet)cmd_sysmap),                                      \
-         echo '  $($(quiet)cmd_sysmap)  .tmp_System.map' &&)                \
-         $(cmd_sysmap) .tmp_vmlinux$(last_kallsyms) .tmp_System.map
-       $(Q)cmp -s System.map .tmp_System.map ||                             \
-               (echo Inconsistent kallsyms data;                            \
-                echo This is a bug - please report about it;                \
-                echo Try "make KALLSYMS_EXTRA_PASS=1" as a workaround;      \
-                rm .tmp_kallsyms* ; /bin/false )
-endef
-
-# Update vmlinux version before link
-# Use + in front of this rule to silent warning about make -j1
-# First command is ':' to allow us to use + in front of this rule
-cmd_ksym_ld = $(cmd_vmlinux__)
-define rule_ksym_ld
-       : 
-       +$(call cmd,vmlinux_version)
-       $(call cmd,vmlinux__)
-       $(Q)echo 'cmd_$@ := $(cmd_vmlinux__)' > $(@D)/.$(@F).cmd
-endef
-
-# Generate .S file with all kernel symbols
-quiet_cmd_kallsyms = KSYM    $@
-      cmd_kallsyms = $(NM) -n $< | $(KALLSYMS) \
-                     $(if $(CONFIG_KALLSYMS_ALL),--all-symbols) > $@
+# Externally visible symbols (used by link-vmlinux.sh)
+export KBUILD_VMLINUX_INIT := $(head-y) $(init-y)
+export KBUILD_VMLINUX_MAIN := $(core-y) $(libs-y) $(drivers-y) $(net-y)
+export KBUILD_LDS          := arch/$(SRCARCH)/kernel/vmlinux.lds
+export LDFLAGS_vmlinux
 
-.tmp_kallsyms1.o .tmp_kallsyms2.o .tmp_kallsyms3.o: %.o: %.S scripts FORCE
-       $(call if_changed_dep,as_o_S)
+vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_INIT) $(KBUILD_VMLINUX_MAIN)
 
-.tmp_kallsyms%.S: .tmp_vmlinux% $(KALLSYMS)
-       $(call cmd,kallsyms)
+# Final link of vmlinux
+      cmd_link-vmlinux = $(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux)
+quiet_cmd_link-vmlinux = LINK    $@
 
-# .tmp_vmlinux1 must be complete except kallsyms, so update vmlinux version
-.tmp_vmlinux1: $(vmlinux-lds) $(vmlinux-all) FORCE
-       $(call if_changed_rule,ksym_ld)
-
-.tmp_vmlinux2: $(vmlinux-lds) $(vmlinux-all) .tmp_kallsyms1.o FORCE
-       $(call if_changed,vmlinux__)
-
-.tmp_vmlinux3: $(vmlinux-lds) $(vmlinux-all) .tmp_kallsyms2.o FORCE
-       $(call if_changed,vmlinux__)
-
-# Needs to visit scripts/ before $(KALLSYMS) can be used.
-$(KALLSYMS): scripts ;
-
-# Generate some data for debugging strange kallsyms problems
-debug_kallsyms: .tmp_map$(last_kallsyms)
-
-.tmp_map%: .tmp_vmlinux% FORCE
-       ($(OBJDUMP) -h $< | $(AWK) '/^ +[0-9]/{print $$4 " 0 " $$2}'; $(NM) $<) | sort > $@
-
-.tmp_map3: .tmp_map2
-
-.tmp_map2: .tmp_map1
-
-endif # ifdef CONFIG_KALLSYMS
-
-# Do modpost on a prelinked vmlinux. The finally linked vmlinux has
-# relevant sections renamed as per the linker script.
-quiet_cmd_vmlinux-modpost = LD      $@
-      cmd_vmlinux-modpost = $(LD) $(LDFLAGS) -r -o $@                          \
-        $(vmlinux-init) --start-group $(vmlinux-main) --end-group             \
-        $(filter-out $(vmlinux-init) $(vmlinux-main) FORCE ,$^)
-define rule_vmlinux-modpost
-       :
-       +$(call cmd,vmlinux-modpost)
-       $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost $@
-       $(Q)echo 'cmd_$@ := $(cmd_vmlinux-modpost)' > $(dot-target).cmd
-endef
-
-# vmlinux image - including updated kernel symbols
-vmlinux: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o $(kallsyms.o) FORCE
+# Include targets which we want to
+# execute if the rest of the kernel build went well.
+vmlinux: scripts/link-vmlinux.sh $(vmlinux-deps) FORCE
 ifdef CONFIG_HEADERS_CHECK
        $(Q)$(MAKE) -f $(srctree)/Makefile headers_check
 endif
@@ -939,22 +758,11 @@ endif
 ifdef CONFIG_BUILD_DOCSRC
        $(Q)$(MAKE) $(build)=Documentation
 endif
-       $(call vmlinux-modpost)
-       $(call if_changed_rule,vmlinux__)
-       $(Q)rm -f .old_version
-
-# build vmlinux.o first to catch section mismatch errors early
-ifdef CONFIG_KALLSYMS
-.tmp_vmlinux1: vmlinux.o
-endif
-
-modpost-init := $(filter-out init/built-in.o, $(vmlinux-init))
-vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
-       $(call if_changed_rule,vmlinux-modpost)
+       +$(call if_changed,link-vmlinux)
 
 # The actual objects are generated when descending, 
 # make sure no implicit rule kicks in
-$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
+$(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
 
 # Handle descending into subdirectories listed in $(vmlinux-dirs)
 # Preset locale variables to speed up the build process. Limit locale
@@ -1181,8 +989,6 @@ endif # CONFIG_MODULES
 
 # Directories & files removed with 'make clean'
 CLEAN_DIRS  += $(MODVERDIR)
-CLEAN_FILES += vmlinux System.map \
-                .tmp_kallsyms* .tmp_version .tmp_vmlinux* .tmp_System.map
 
 # Directories & files removed with 'make mrproper'
 MRPROPER_DIRS  += include/config usr/include include/generated          \
@@ -1428,6 +1234,7 @@ scripts: ;
 endif # KBUILD_EXTMOD
 
 clean: $(clean-dirs)
+       $(Q)$(CONFIG_SHELL) $(srctree)/scripts/link-vmlinux.sh clean
        $(call cmd,rmdirs)
        $(call cmd,rmfiles)
        @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
@@ -1568,14 +1375,6 @@ quiet_cmd_depmod = DEPMOD  $(KERNELRELEASE)
 cmd_crmodverdir = $(Q)mkdir -p $(MODVERDIR) \
                   $(if $(KBUILD_MODULES),; rm -f $(MODVERDIR)/*)
 
-a_flags = -Wp,-MD,$(depfile) $(KBUILD_AFLAGS) $(AFLAGS_KERNEL) \
-         $(KBUILD_AFLAGS_KERNEL)                              \
-         $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(KBUILD_CPPFLAGS) \
-         $(modkern_aflags) $(EXTRA_AFLAGS) $(AFLAGS_$(basetarget).o)
-
-quiet_cmd_as_o_S = AS      $@
-cmd_as_o_S       = $(CC) $(a_flags) -c -o $@ $<
-
 # read all saved command lines
 
 targets := $(wildcard $(sort $(targets)))
index e9a910876cda4a75d1112d356bf4a8ada992e832..8c3d957fa8e2f6b7794223791136c75d6a66b412 100644 (file)
@@ -159,6 +159,9 @@ config HAVE_ARCH_TRACEHOOK
 config HAVE_DMA_ATTRS
        bool
 
+config HAVE_DMA_CONTIGUOUS
+       bool
+
 config USE_GENERIC_SMP_HELPERS
        bool
 
diff --git a/arch/alpha/include/asm/kvm_para.h b/arch/alpha/include/asm/kvm_para.h
new file mode 100644 (file)
index 0000000..14fab8f
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
index 24779fc95994efb5c4d69e4d507f3cba581570a4..5a8a48320efe9f5c577f9cc8363a4de2a6725559 100644 (file)
@@ -10,9 +10,6 @@
 typedef unsigned int   __kernel_ino_t;
 #define __kernel_ino_t __kernel_ino_t
 
-typedef unsigned int   __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned long  __kernel_sigset_t;      /* at least 32 bits */
 
 #include <asm-generic/posix_types.h>
index 10ab2d74ecbbede2764c8bf8cbcca14f19f418a0..a8c97d42ec8eaef9215c32a40f743bd94505bc55 100644 (file)
@@ -226,7 +226,6 @@ do_sigreturn(struct sigcontext __user *sc, struct pt_regs *regs,
        if (__get_user(set.sig[0], &sc->sc_mask))
                goto give_sigsegv;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(sc, regs, sw))
@@ -261,7 +260,6 @@ do_rt_sigreturn(struct rt_sigframe __user *frame, struct pt_regs *regs,
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto give_sigsegv;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(&frame->uc.uc_mcontext, regs, sw))
@@ -468,12 +466,9 @@ static inline void
 handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
              struct pt_regs * regs, struct switch_stack *sw)
 {
-       sigset_t *oldset = &current->blocked;
+       sigset_t *oldset = sigmask_to_save();
        int ret;
 
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-
        if (ka->sa.sa_flags & SA_SIGINFO)
                ret = setup_rt_frame(sig, ka, info, oldset, regs, sw);
        else
@@ -483,12 +478,7 @@ handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
                force_sigsegv(sig, current);
                return;
        }
-       block_sigmask(ka, sig);
-       /* A signal was successfully delivered, and the
-          saved sigmask was stored on the signal frame,
-          and will be restored by sigreturn.  So we can
-          simply clear the restore sigmask flag.  */
-       clear_thread_flag(TIF_RESTORE_SIGMASK);
+       signal_delivered(sig, info, ka, regs, 0);
 }
 
 static inline void
@@ -572,9 +562,7 @@ do_signal(struct pt_regs * regs, struct switch_stack * sw,
        }
 
        /* If there's no signal to deliver, we just restore the saved mask.  */
-       if (test_and_clear_thread_flag(TIF_RESTORE_SIGMASK))
-               set_current_blocked(&current->saved_sigmask);
-
+       restore_saved_sigmask();
        if (single_stepping)
                ptrace_set_bpt(current);        /* re-set breakpoint */
 }
@@ -590,7 +578,5 @@ do_notify_resume(struct pt_regs *regs, struct switch_stack *sw,
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
index 5458aa9db0674195afad6924426510a3808e1666..b649c5904a4ff3c993732067411e7723fc4d4d19 100644 (file)
@@ -5,6 +5,9 @@ config ARM
        select HAVE_AOUT
        select HAVE_DMA_API_DEBUG
        select HAVE_IDE if PCI || ISA || PCMCIA
+       select HAVE_DMA_ATTRS
+       select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
+       select CMA if (CPU_V6 || CPU_V6K || CPU_V7)
        select HAVE_MEMBLOCK
        select RTC_LIB
        select SYS_SUPPORTS_APM_EMULATION
@@ -54,6 +57,14 @@ config ARM
 config ARM_HAS_SG_CHAIN
        bool
 
+config NEED_SG_DMA_LENGTH
+       bool
+
+config ARM_DMA_USE_IOMMU
+       select NEED_SG_DMA_LENGTH
+       select ARM_HAS_SG_CHAIN
+       bool
+
 config HAVE_PWM
        bool
 
@@ -445,8 +456,10 @@ config ARCH_MXS
        select ARCH_REQUIRE_GPIOLIB
        select CLKDEV_LOOKUP
        select CLKSRC_MMIO
+       select COMMON_CLK
        select HAVE_CLK_PREPARE
        select PINCTRL
+       select USE_OF
        help
          Support for Freescale MXS-based family of processors
 
@@ -512,7 +525,7 @@ config ARCH_IXP4XX
        select ARCH_HAS_DMA_SET_COHERENT_MASK
        select CLKSRC_MMIO
        select CPU_XSCALE
-       select GENERIC_GPIO
+       select ARCH_REQUIRE_GPIOLIB
        select GENERIC_CLOCKEVENTS
        select MIGHT_HAVE_PCI
        select NEED_MACH_IO_H
@@ -936,6 +949,7 @@ config PLAT_SPEAR
        select ARM_AMBA
        select ARCH_REQUIRE_GPIOLIB
        select CLKDEV_LOOKUP
+       select COMMON_CLK
        select CLKSRC_MMIO
        select GENERIC_CLOCKEVENTS
        select HAVE_CLK
@@ -1040,7 +1054,6 @@ source "arch/arm/mach-sa1100/Kconfig"
 
 source "arch/arm/plat-samsung/Kconfig"
 source "arch/arm/plat-s3c24xx/Kconfig"
-source "arch/arm/plat-s5p/Kconfig"
 
 source "arch/arm/plat-spear/Kconfig"
 
@@ -1091,6 +1104,7 @@ config PLAT_ORION
        bool
        select CLKSRC_MMIO
        select GENERIC_IRQ_CHIP
+       select COMMON_CLK
 
 config PLAT_PXA
        bool
index 85348a09d655afc220fc55f71810b07830ea8936..01a134141216a1a02f5fec43defa8768ad72fa26 100644 (file)
@@ -103,6 +103,35 @@ choice
                  Say Y here if you want the debug print routines to direct
                  their output to the second serial port on these devices.
 
+       config DEBUG_DAVINCI_DA8XX_UART1
+               bool "Kernel low-level debugging on DaVinci DA8XX using UART1"
+               depends on ARCH_DAVINCI_DA8XX
+               help
+                 Say Y here if you want the debug print routines to direct
+                 their output to UART1 serial port on DaVinci DA8XX devices.
+
+       config DEBUG_DAVINCI_DA8XX_UART2
+               bool "Kernel low-level debugging on DaVinci DA8XX using UART2"
+               depends on ARCH_DAVINCI_DA8XX
+               help
+                 Say Y here if you want the debug print routines to direct
+                 their output to UART2 serial port on DaVinci DA8XX devices.
+
+       config DEBUG_DAVINCI_DMx_UART0
+               bool "Kernel low-level debugging on DaVinci DMx using UART0"
+               depends on ARCH_DAVINCI_DMx
+               help
+                 Say Y here if you want the debug print routines to direct
+                 their output to UART0 serial port on DaVinci DMx devices.
+
+       config DEBUG_DAVINCI_TNETV107X_UART1
+               bool "Kernel low-level debugging on DaVinci TNETV107x using UART1"
+               depends on ARCH_DAVINCI_TNETV107X
+               help
+                 Say Y here if you want the debug print routines to direct
+                 their output to UART1 serial port on DaVinci TNETV107X
+                 devices.
+
        config DEBUG_DC21285_PORT
                bool "Kernel low-level debugging messages via footbridge serial port"
                depends on FOOTBRIDGE
@@ -180,6 +209,14 @@ choice
                  Say Y here if you want kernel low-level debugging support
                  on i.MX50 or i.MX53.
 
+       config DEBUG_IMX6Q_UART2
+               bool "i.MX6Q Debug UART2"
+               depends on SOC_IMX6Q
+               help
+                 Say Y here if you want kernel low-level debugging support
+                 on i.MX6Q UART2. This is correct for e.g. the SabreLite
+                  board.
+
        config DEBUG_IMX6Q_UART4
                bool "i.MX6Q Debug UART4"
                depends on SOC_IMX6Q
index 157900da8782aee9f6bf63db1c16db67e7c3053f..0298b00fe2413a1964cd899b9834136c3166136a 100644 (file)
@@ -160,9 +160,7 @@ machine-$(CONFIG_ARCH_MXS)          := mxs
 machine-$(CONFIG_ARCH_NETX)            := netx
 machine-$(CONFIG_ARCH_NOMADIK)         := nomadik
 machine-$(CONFIG_ARCH_OMAP1)           := omap1
-machine-$(CONFIG_ARCH_OMAP2)           := omap2
-machine-$(CONFIG_ARCH_OMAP3)           := omap2
-machine-$(CONFIG_ARCH_OMAP4)           := omap2
+machine-$(CONFIG_ARCH_OMAP2PLUS)       := omap2
 machine-$(CONFIG_ARCH_ORION5X)         := orion5x
 machine-$(CONFIG_ARCH_PICOXCELL)       := picoxcell
 machine-$(CONFIG_ARCH_PNX4008)         := pnx4008
@@ -188,6 +186,8 @@ machine-$(CONFIG_ARCH_VEXPRESS)             := vexpress
 machine-$(CONFIG_ARCH_VT8500)          := vt8500
 machine-$(CONFIG_ARCH_W90X900)         := w90x900
 machine-$(CONFIG_FOOTBRIDGE)           := footbridge
+machine-$(CONFIG_MACH_SPEAR1310)       := spear13xx
+machine-$(CONFIG_MACH_SPEAR1340)       := spear13xx
 machine-$(CONFIG_MACH_SPEAR300)                := spear3xx
 machine-$(CONFIG_MACH_SPEAR310)                := spear3xx
 machine-$(CONFIG_MACH_SPEAR320)                := spear3xx
@@ -205,7 +205,7 @@ plat-$(CONFIG_PLAT_NOMADIK) := nomadik
 plat-$(CONFIG_PLAT_ORION)      := orion
 plat-$(CONFIG_PLAT_PXA)                := pxa
 plat-$(CONFIG_PLAT_S3C24XX)    := s3c24xx samsung
-plat-$(CONFIG_PLAT_S5P)                := s5p samsung
+plat-$(CONFIG_PLAT_S5P)                := samsung
 plat-$(CONFIG_PLAT_SPEAR)      := spear
 plat-$(CONFIG_PLAT_VERSATILE)  := versatile
 
index 881bc398784483169535a156158a4b2598beb23a..4ad5160018cb522922201b3dfebf9c0462b6ef85 100644 (file)
@@ -58,6 +58,8 @@
                                "st,nomadik-gpio";
                        reg =  <0x8012e000 0x80>;
                        interrupts = <0 119 0x4>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
                        supports-sleepmode;
                        gpio-controller;
                        #gpio-cells = <2>;
@@ -69,6 +71,8 @@
                                "st,nomadik-gpio";
                        reg =  <0x8012e080 0x80>;
                        interrupts = <0 120 0x4>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
                        supports-sleepmode;
                        gpio-controller;
                        #gpio-cells = <2>;
@@ -80,6 +84,8 @@
                                "st,nomadik-gpio";
                        reg =  <0x8000e000 0x80>;
                        interrupts = <0 121 0x4>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
                        supports-sleepmode;
                        gpio-controller;
                        #gpio-cells = <2>;
@@ -91,6 +97,8 @@
                                "st,nomadik-gpio";
                        reg =  <0x8000e080 0x80>;
                        interrupts = <0 122 0x4>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
                        supports-sleepmode;
                        gpio-controller;
                        #gpio-cells = <2>;
                                "st,nomadik-gpio";
                        reg =  <0x8000e100 0x80>;
                        interrupts = <0 123 0x4>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
                        supports-sleepmode;
                        gpio-controller;
                        #gpio-cells = <2>;
                                "st,nomadik-gpio";
                        reg =  <0x8000e180 0x80>;
                        interrupts = <0 124 0x4>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
                        supports-sleepmode;
                        gpio-controller;
                        #gpio-cells = <2>;
                                "st,nomadik-gpio";
                        reg =  <0x8011e000 0x80>;
                        interrupts = <0 125 0x4>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
                        supports-sleepmode;
                        gpio-controller;
                        #gpio-cells = <2>;
                                "st,nomadik-gpio";
                        reg =  <0x8011e080 0x80>;
                        interrupts = <0 126 0x4>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
                        supports-sleepmode;
                        gpio-controller;
                        #gpio-cells = <2>;
                                "st,nomadik-gpio";
                        reg =  <0xa03fe000 0x80>;
                        interrupts = <0 127 0x4>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
                        supports-sleepmode;
                        gpio-controller;
                        #gpio-cells = <2>;
                        gpio-bank = <8>;
                };
 
+               pinctrl {
+                       compatible = "stericsson,nmk_pinctrl";
+               };
+
                usb@a03e0000 {
                        compatible = "stericsson,db8500-musb",
                                "mentor,musb";
                prcmu@80157000 {
                        compatible = "stericsson,db8500-prcmu";
                        reg = <0x80157000 0x1000>;
-                       interrupts = <46 47>;
+                       interrupts = <0 47 0x4>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges;
 
-                               prcmu-timer-4@80157450 {
+                       prcmu-timer-4@80157450 {
                                compatible = "stericsson,db8500-prcmu-timer-4";
                                reg = <0x80157450 0xC>;
                        };
 
+                       db8500-prcmu-regulators {
+                               compatible = "stericsson,db8500-prcmu-regulator";
+
+                               // DB8500_REGULATOR_VAPE
+                               db8500_vape_reg: db8500_vape {
+                                       regulator-name = "db8500-vape";
+                                       regulator-always-on;
+                               };
+
+                               // DB8500_REGULATOR_VARM
+                               db8500_varm_reg: db8500_varm {
+                                       regulator-name = "db8500-varm";
+                               };
+
+                               // DB8500_REGULATOR_VMODEM
+                               db8500_vmodem_reg: db8500_vmodem {
+                                       regulator-name = "db8500-vmodem";
+                               };
+
+                               // DB8500_REGULATOR_VPLL
+                               db8500_vpll_reg: db8500_vpll {
+                                       regulator-name = "db8500-vpll";
+                               };
+
+                               // DB8500_REGULATOR_VSMPS1
+                               db8500_vsmps1_reg: db8500_vsmps1 {
+                                       regulator-name = "db8500-vsmps1";
+                               };
+
+                               // DB8500_REGULATOR_VSMPS2
+                               db8500_vsmps2_reg: db8500_vsmps2 {
+                                       regulator-name = "db8500-vsmps2";
+                               };
+
+                               // DB8500_REGULATOR_VSMPS3
+                               db8500_vsmps3_reg: db8500_vsmps3 {
+                                       regulator-name = "db8500-vsmps3";
+                               };
+
+                               // DB8500_REGULATOR_VRF1
+                               db8500_vrf1_reg: db8500_vrf1 {
+                                       regulator-name = "db8500-vrf1";
+                               };
+
+                               // DB8500_REGULATOR_SWITCH_SVAMMDSP
+                               db8500_sva_mmdsp_reg: db8500_sva_mmdsp {
+                                       regulator-name = "db8500-sva-mmdsp";
+                               };
+
+                               // DB8500_REGULATOR_SWITCH_SVAMMDSPRET
+                               db8500_sva_mmdsp_ret_reg: db8500_sva_mmdsp_ret {
+                                       regulator-name = "db8500-sva-mmdsp-ret";
+                               };
+
+                               // DB8500_REGULATOR_SWITCH_SVAPIPE
+                               db8500_sva_pipe_reg: db8500_sva_pipe {
+                                       regulator-name = "db8500_sva_pipe";
+                               };
+
+                               // DB8500_REGULATOR_SWITCH_SIAMMDSP
+                               db8500_sia_mmdsp_reg: db8500_sia_mmdsp {
+                                       regulator-name = "db8500_sia_mmdsp";
+                               };
+
+                               // DB8500_REGULATOR_SWITCH_SIAMMDSPRET
+                               db8500_sia_mmdsp_ret_reg: db8500_sia_mmdsp_ret {
+                                       regulator-name = "db8500-sia-mmdsp-ret";
+                               };
+
+                               // DB8500_REGULATOR_SWITCH_SIAPIPE
+                               db8500_sia_pipe_reg: db8500_sia_pipe {
+                                       regulator-name = "db8500-sia-pipe";
+                               };
+
+                               // DB8500_REGULATOR_SWITCH_SGA
+                               db8500_sga_reg: db8500_sga {
+                                       regulator-name = "db8500-sga";
+                                       vin-supply = <&db8500_vape_reg>;
+                               };
+
+                               // DB8500_REGULATOR_SWITCH_B2R2_MCDE
+                               db8500_b2r2_mcde_reg: db8500_b2r2_mcde {
+                                       regulator-name = "db8500-b2r2-mcde";
+                                       vin-supply = <&db8500_vape_reg>;
+                               };
+
+                               // DB8500_REGULATOR_SWITCH_ESRAM12
+                               db8500_esram12_reg: db8500_esram12 {
+                                       regulator-name = "db8500-esram12";
+                               };
+
+                               // DB8500_REGULATOR_SWITCH_ESRAM12RET
+                               db8500_esram12_ret_reg: db8500_esram12_ret {
+                                       regulator-name = "db8500-esram12-ret";
+                               };
+
+                               // DB8500_REGULATOR_SWITCH_ESRAM34
+                               db8500_esram34_reg: db8500_esram34 {
+                                       regulator-name = "db8500-esram34";
+                               };
+
+                               // DB8500_REGULATOR_SWITCH_ESRAM34RET
+                               db8500_esram34_ret_reg: db8500_esram34_ret {
+                                       regulator-name = "db8500-esram34-ret";
+                               };
+                       };
+
                        ab8500@5 {
                                compatible = "stericsson,ab8500";
                                reg = <5>; /* mailbox 5 is i2c */
                                interrupts = <0 40 0x4>;
+
+                               ab8500-regulators {
+                                       compatible = "stericsson,ab8500-regulator";
+
+                                       // supplies to the display/camera
+                                       ab8500_ldo_aux1_reg: ab8500_ldo_aux1 {
+                                               regulator-name = "V-DISPLAY";
+                                               regulator-min-microvolt = <2500000>;
+                                               regulator-max-microvolt = <2900000>;
+                                               regulator-boot-on;
+                                               /* BUG: If turned off MMC will be affected. */
+                                               regulator-always-on;
+                                       };
+
+                                       // supplies to the on-board eMMC
+                                       ab8500_ldo_aux2_reg: ab8500_ldo_aux2 {
+                                               regulator-name = "V-eMMC1";
+                                               regulator-min-microvolt = <1100000>;
+                                               regulator-max-microvolt = <3300000>;
+                                       };
+
+                                       // supply for VAUX3; SDcard slots
+                                       ab8500_ldo_aux3_reg: ab8500_ldo_aux3 {
+                                               regulator-name = "V-MMC-SD";
+                                               regulator-min-microvolt = <1100000>;
+                                               regulator-max-microvolt = <3300000>;
+                                       };
+
+                                       // supply for v-intcore12; VINTCORE12 LDO
+                                       ab8500_ldo_initcore_reg: ab8500_ldo_initcore {
+                                               regulator-name = "V-INTCORE";
+                                       };
+
+                                       // supply for tvout; gpadc; TVOUT LDO
+                                       ab8500_ldo_tvout_reg: ab8500_ldo_tvout {
+                                               regulator-name = "V-TVOUT";
+                                       };
+
+                                       // supply for ab8500-usb; USB LDO
+                                       ab8500_ldo_usb_reg: ab8500_ldo_usb {
+                                               regulator-name = "dummy";
+                                       };
+
+                                       // supply for ab8500-vaudio; VAUDIO LDO
+                                       ab8500_ldo_audio_reg: ab8500_ldo_audio {
+                                               regulator-name = "V-AUD";
+                                       };
+
+                                       // supply for v-anamic1 VAMic1-LDO
+                                       ab8500_ldo_anamic1_reg: ab8500_ldo_anamic1 {
+                                               regulator-name = "V-AMIC1";
+                                       };
+
+                                       // supply for v-amic2; VAMIC2 LDO; reuse constants for AMIC1
+                                       ab8500_ldo_amamic2_reg: ab8500_ldo_amamic2 {
+                                               regulator-name = "V-AMIC2";
+                                       };
+
+                                       // supply for v-dmic; VDMIC LDO
+                                       ab8500_ldo_dmic_reg: ab8500_ldo_dmic {
+                                               regulator-name = "V-DMIC";
+                                       };
+
+                                       // supply for U8500 CSI/DSI; VANA LDO
+                                       ab8500_ldo_ana_reg: ab8500_ldo_ana {
+                                               regulator-name = "V-CSI/DSI";
+                                       };
+                               };
                        };
                };
 
                        status = "disabled";
 
                        // Add one of these for each child device
-                       cs-gpios = <&gpio0 31 &gpio4 14 &gpio4 16 &gpio6 22 &gpio7 0>;
+                       cs-gpios = <&gpio0 31 0x4 &gpio4 14 0x4 &gpio4 16 0x4
+                                   &gpio6 22 0x4 &gpio7 0 0x4>;
 
                };
 
index 399d17b231d21fdb66546f69effbecda06abd463..49945cc1bc7df3795b71812c67f98c3ec88e5e72 100644 (file)
        chosen {
                bootargs = "root=/dev/ram0 rw ramdisk=8192 console=ttySAC1,115200";
        };
+
+       i2c@12C60000 {
+               samsung,i2c-sda-delay = <100>;
+               samsung,i2c-max-bus-freq = <20000>;
+               gpios = <&gpb3 0 2 3 0>,
+                       <&gpb3 1 2 3 0>;
+
+               eeprom@50 {
+                       compatible = "samsung,s524ad0xd1";
+                       reg = <0x50>;
+               };
+       };
+
+       i2c@12C70000 {
+               samsung,i2c-sda-delay = <100>;
+               samsung,i2c-max-bus-freq = <20000>;
+               gpios = <&gpb3 2 2 3 0>,
+                       <&gpb3 3 2 3 0>;
+
+               eeprom@51 {
+                       compatible = "samsung,s524ad0xd1";
+                       reg = <0x51>;
+               };
+       };
+
+       i2c@12C80000 {
+               status = "disabled";
+       };
+
+       i2c@12C90000 {
+               status = "disabled";
+       };
+
+       i2c@12CA0000 {
+               status = "disabled";
+       };
+
+       i2c@12CB0000 {
+               status = "disabled";
+       };
+
+       i2c@12CC0000 {
+               status = "disabled";
+       };
+
+       i2c@12CD0000 {
+               status = "disabled";
+       };
 };
index dfc433599436ca46b97bc93b758daab35bdfe5c8..4272b2949228ba2e4ea9d7ebb7bc2285fd9bb19b 100644 (file)
        compatible = "samsung,exynos5250";
        interrupt-parent = <&gic>;
 
-       gic:interrupt-controller@10490000 {
+       gic:interrupt-controller@10481000 {
                compatible = "arm,cortex-a9-gic";
                #interrupt-cells = <3>;
                interrupt-controller;
-               reg = <0x10490000 0x1000>, <0x10480000 0x100>;
+               reg = <0x10481000 0x1000>, <0x10482000 0x2000>;
+       };
+
+       combiner:interrupt-controller@10440000 {
+               compatible = "samsung,exynos4210-combiner";
+               #interrupt-cells = <2>;
+               interrupt-controller;
+               samsung,combiner-nr = <32>;
+               reg = <0x10440000 0x1000>;
+               interrupts = <0 0 0>, <0 1 0>, <0 2 0>, <0 3 0>,
+                            <0 4 0>, <0 5 0>, <0 6 0>, <0 7 0>,
+                            <0 8 0>, <0 9 0>, <0 10 0>, <0 11 0>,
+                            <0 12 0>, <0 13 0>, <0 14 0>, <0 15 0>,
+                            <0 16 0>, <0 17 0>, <0 18 0>, <0 19 0>,
+                            <0 20 0>, <0 21 0>, <0 22 0>, <0 23 0>,
+                            <0 24 0>, <0 25 0>, <0 26 0>, <0 27 0>,
+                            <0 28 0>, <0 29 0>, <0 30 0>, <0 31 0>;
        };
 
        watchdog {
                interrupts = <0 43 0>, <0 44 0>;
        };
 
-       sdhci@12200000 {
-               compatible = "samsung,exynos4210-sdhci";
-               reg = <0x12200000 0x100>;
-               interrupts = <0 75 0>;
-       };
-
-       sdhci@12210000 {
-               compatible = "samsung,exynos4210-sdhci";
-               reg = <0x12210000 0x100>;
-               interrupts = <0 76 0>;
-       };
-
-       sdhci@12220000 {
-               compatible = "samsung,exynos4210-sdhci";
-               reg = <0x12220000 0x100>;
-               interrupts = <0 77 0>;
-       };
-
-       sdhci@12230000 {
-               compatible = "samsung,exynos4210-sdhci";
-               reg = <0x12230000 0x100>;
-               interrupts = <0 78 0>;
-       };
-
        serial@12C00000 {
                compatible = "samsung,exynos4210-uart";
                reg = <0x12C00000 0x100>;
                compatible = "samsung,s3c2440-i2c";
                reg = <0x12C60000 0x100>;
                interrupts = <0 56 0>;
+               #address-cells = <1>;
+               #size-cells = <0>;
        };
 
        i2c@12C70000 {
                compatible = "samsung,s3c2440-i2c";
                reg = <0x12C70000 0x100>;
                interrupts = <0 57 0>;
+               #address-cells = <1>;
+               #size-cells = <0>;
        };
 
        i2c@12C80000 {
                compatible = "samsung,s3c2440-i2c";
                reg = <0x12C80000 0x100>;
                interrupts = <0 58 0>;
+               #address-cells = <1>;
+               #size-cells = <0>;
        };
 
        i2c@12C90000 {
                compatible = "samsung,s3c2440-i2c";
                reg = <0x12C90000 0x100>;
                interrupts = <0 59 0>;
+               #address-cells = <1>;
+               #size-cells = <0>;
        };
 
        i2c@12CA0000 {
                compatible = "samsung,s3c2440-i2c";
                reg = <0x12CA0000 0x100>;
                interrupts = <0 60 0>;
+               #address-cells = <1>;
+               #size-cells = <0>;
        };
 
        i2c@12CB0000 {
                compatible = "samsung,s3c2440-i2c";
                reg = <0x12CB0000 0x100>;
                interrupts = <0 61 0>;
+               #address-cells = <1>;
+               #size-cells = <0>;
        };
 
        i2c@12CC0000 {
                compatible = "samsung,s3c2440-i2c";
                reg = <0x12CC0000 0x100>;
                interrupts = <0 62 0>;
+               #address-cells = <1>;
+               #size-cells = <0>;
        };
 
        i2c@12CD0000 {
                compatible = "samsung,s3c2440-i2c";
                reg = <0x12CD0000 0x100>;
                interrupts = <0 63 0>;
+               #address-cells = <1>;
+               #size-cells = <0>;
        };
 
        amba {
                        interrupts = <0 35 0>;
                };
 
-               mdma0: pdma@10800000 {
+               mdma0: mdma@10800000 {
                        compatible = "arm,pl330", "arm,primecell";
                        reg = <0x10800000 0x1000>;
                        interrupts = <0 33 0>;
                };
 
-               mdma1: pdma@11C10000 {
+               mdma1: mdma@11C10000 {
                        compatible = "arm,pl330", "arm,primecell";
                        reg = <0x11C10000 0x1000>;
                        interrupts = <0 124 0>;
                        #gpio-cells = <4>;
                };
 
+               gpc4: gpio-controller@114002E0 {
+                       compatible = "samsung,exynos4-gpio";
+                       reg = <0x114002E0 0x20>;
+                       #gpio-cells = <4>;
+               };
+
                gpd0: gpio-controller@11400160 {
                        compatible = "samsung,exynos4-gpio";
                        reg = <0x11400160 0x20>;
 
                gpv2: gpio-controller@10D10040 {
                        compatible = "samsung,exynos4-gpio";
-                       reg = <0x10D10040 0x20>;
+                       reg = <0x10D10060 0x20>;
                        #gpio-cells = <4>;
                };
 
                gpv3: gpio-controller@10D10060 {
                        compatible = "samsung,exynos4-gpio";
-                       reg = <0x10D10060 0x20>;
+                       reg = <0x10D10080 0x20>;
                        #gpio-cells = <4>;
                };
 
                gpv4: gpio-controller@10D10080 {
                        compatible = "samsung,exynos4-gpio";
-                       reg = <0x10D10080 0x20>;
+                       reg = <0x10D100C0 0x20>;
                        #gpio-cells = <4>;
                };
 
diff --git a/arch/arm/boot/dts/imx23-evk.dts b/arch/arm/boot/dts/imx23-evk.dts
new file mode 100644 (file)
index 0000000..70bffa9
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx23.dtsi"
+
+/ {
+       model = "Freescale i.MX23 Evaluation Kit";
+       compatible = "fsl,imx23-evk", "fsl,imx23";
+
+       memory {
+               reg = <0x40000000 0x08000000>;
+       };
+
+       apb@80000000 {
+               apbh@80000000 {
+                       ssp0: ssp@80010000 {
+                               compatible = "fsl,imx23-mmc";
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&mmc0_8bit_pins_a &mmc0_pins_fixup>;
+                               bus-width = <8>;
+                               wp-gpios = <&gpio1 30 0>;
+                               status = "okay";
+                       };
+               };
+
+               apbx@80040000 {
+                       duart: serial@80070000 {
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&duart_pins_a>;
+                               status = "okay";
+                       };
+               };
+       };
+};
diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
new file mode 100644 (file)
index 0000000..8c5f999
--- /dev/null
@@ -0,0 +1,295 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+       interrupt-parent = <&icoll>;
+
+       aliases {
+               gpio0 = &gpio0;
+               gpio1 = &gpio1;
+               gpio2 = &gpio2;
+       };
+
+       cpus {
+               cpu@0 {
+                       compatible = "arm,arm926ejs";
+               };
+       };
+
+       apb@80000000 {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <1>;
+               reg = <0x80000000 0x80000>;
+               ranges;
+
+               apbh@80000000 {
+                       compatible = "simple-bus";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       reg = <0x80000000 0x40000>;
+                       ranges;
+
+                       icoll: interrupt-controller@80000000 {
+                               compatible = "fsl,imx23-icoll", "fsl,mxs-icoll";
+                               interrupt-controller;
+                               #interrupt-cells = <1>;
+                               reg = <0x80000000 0x2000>;
+                       };
+
+                       dma-apbh@80004000 {
+                               compatible = "fsl,imx23-dma-apbh";
+                               reg = <0x80004000 2000>;
+                       };
+
+                       ecc@80008000 {
+                               reg = <0x80008000 2000>;
+                               status = "disabled";
+                       };
+
+                       bch@8000a000 {
+                               reg = <0x8000a000 2000>;
+                               status = "disabled";
+                       };
+
+                       gpmi@8000c000 {
+                               reg = <0x8000c000 2000>;
+                               status = "disabled";
+                       };
+
+                       ssp0: ssp@80010000 {
+                               reg = <0x80010000 2000>;
+                               interrupts = <15 14>;
+                               fsl,ssp-dma-channel = <1>;
+                               status = "disabled";
+                       };
+
+                       etm@80014000 {
+                               reg = <0x80014000 2000>;
+                               status = "disabled";
+                       };
+
+                       pinctrl@80018000 {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                               compatible = "fsl,imx23-pinctrl", "simple-bus";
+                               reg = <0x80018000 2000>;
+
+                               gpio0: gpio@0 {
+                                       compatible = "fsl,imx23-gpio", "fsl,mxs-gpio";
+                                       interrupts = <16>;
+                                       gpio-controller;
+                                       #gpio-cells = <2>;
+                                       interrupt-controller;
+                                       #interrupt-cells = <2>;
+                               };
+
+                               gpio1: gpio@1 {
+                                       compatible = "fsl,imx23-gpio", "fsl,mxs-gpio";
+                                       interrupts = <17>;
+                                       gpio-controller;
+                                       #gpio-cells = <2>;
+                                       interrupt-controller;
+                                       #interrupt-cells = <2>;
+                               };
+
+                               gpio2: gpio@2 {
+                                       compatible = "fsl,imx23-gpio", "fsl,mxs-gpio";
+                                       interrupts = <18>;
+                                       gpio-controller;
+                                       #gpio-cells = <2>;
+                                       interrupt-controller;
+                                       #interrupt-cells = <2>;
+                               };
+
+                               duart_pins_a: duart@0 {
+                                       reg = <0>;
+                                       fsl,pinmux-ids = <0x11a2 0x11b2>;
+                                       fsl,drive-strength = <0>;
+                                       fsl,voltage = <1>;
+                                       fsl,pull-up = <0>;
+                               };
+
+                               mmc0_8bit_pins_a: mmc0-8bit@0 {
+                                       reg = <0>;
+                                       fsl,pinmux-ids = <0x2020 0x2030 0x2040
+                                               0x2050 0x0082 0x0092 0x00a2
+                                               0x00b2 0x2000 0x2010 0x2060>;
+                                       fsl,drive-strength = <1>;
+                                       fsl,voltage = <1>;
+                                       fsl,pull-up = <1>;
+                               };
+
+                               mmc0_pins_fixup: mmc0-pins-fixup {
+                                       fsl,pinmux-ids = <0x2010 0x2060>;
+                                       fsl,pull-up = <0>;
+                               };
+                       };
+
+                       digctl@8001c000 {
+                               reg = <0x8001c000 2000>;
+                               status = "disabled";
+                       };
+
+                       emi@80020000 {
+                               reg = <0x80020000 2000>;
+                               status = "disabled";
+                       };
+
+                       dma-apbx@80024000 {
+                               compatible = "fsl,imx23-dma-apbx";
+                               reg = <0x80024000 2000>;
+                       };
+
+                       dcp@80028000 {
+                               reg = <0x80028000 2000>;
+                               status = "disabled";
+                       };
+
+                       pxp@8002a000 {
+                               reg = <0x8002a000 2000>;
+                               status = "disabled";
+                       };
+
+                       ocotp@8002c000 {
+                               reg = <0x8002c000 2000>;
+                               status = "disabled";
+                       };
+
+                       axi-ahb@8002e000 {
+                               reg = <0x8002e000 2000>;
+                               status = "disabled";
+                       };
+
+                       lcdif@80030000 {
+                               reg = <0x80030000 2000>;
+                               status = "disabled";
+                       };
+
+                       ssp1: ssp@80034000 {
+                               reg = <0x80034000 2000>;
+                               interrupts = <2 20>;
+                               fsl,ssp-dma-channel = <2>;
+                               status = "disabled";
+                       };
+
+                       tvenc@80038000 {
+                               reg = <0x80038000 2000>;
+                               status = "disabled";
+                       };
+                };
+
+               apbx@80040000 {
+                       compatible = "simple-bus";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       reg = <0x80040000 0x40000>;
+                       ranges;
+
+                       clkctl@80040000 {
+                               reg = <0x80040000 2000>;
+                               status = "disabled";
+                       };
+
+                       saif0: saif@80042000 {
+                               reg = <0x80042000 2000>;
+                               status = "disabled";
+                       };
+
+                       power@80044000 {
+                               reg = <0x80044000 2000>;
+                               status = "disabled";
+                       };
+
+                       saif1: saif@80046000 {
+                               reg = <0x80046000 2000>;
+                               status = "disabled";
+                       };
+
+                       audio-out@80048000 {
+                               reg = <0x80048000 2000>;
+                               status = "disabled";
+                       };
+
+                       audio-in@8004c000 {
+                               reg = <0x8004c000 2000>;
+                               status = "disabled";
+                       };
+
+                       lradc@80050000 {
+                               reg = <0x80050000 2000>;
+                               status = "disabled";
+                       };
+
+                       spdif@80054000 {
+                               reg = <0x80054000 2000>;
+                               status = "disabled";
+                       };
+
+                       i2c@80058000 {
+                               reg = <0x80058000 2000>;
+                               status = "disabled";
+                       };
+
+                       rtc@8005c000 {
+                               reg = <0x8005c000 2000>;
+                               status = "disabled";
+                       };
+
+                       pwm@80064000 {
+                               reg = <0x80064000 2000>;
+                               status = "disabled";
+                       };
+
+                       timrot@80068000 {
+                               reg = <0x80068000 2000>;
+                               status = "disabled";
+                       };
+
+                       auart0: serial@8006c000 {
+                               reg = <0x8006c000 0x2000>;
+                               status = "disabled";
+                       };
+
+                       auart1: serial@8006e000 {
+                               reg = <0x8006e000 0x2000>;
+                               status = "disabled";
+                       };
+
+                       duart: serial@80070000 {
+                               compatible = "arm,pl011", "arm,primecell";
+                               reg = <0x80070000 0x2000>;
+                               interrupts = <0>;
+                               status = "disabled";
+                       };
+
+                       usbphy@8007c000 {
+                               reg = <0x8007c000 0x2000>;
+                               status = "disabled";
+                       };
+               };
+       };
+
+       ahb@80080000 {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <1>;
+               reg = <0x80080000 0x80000>;
+               ranges;
+
+               usbctrl@80080000 {
+                       reg = <0x80080000 0x10000>;
+                       status = "disabled";
+               };
+       };
+};
index a51a08fc2af98ac4631b7991bdb62edbabe80f3a..2b0ff60247a41468a6d3cb5886b1bba25f7e1506 100644 (file)
                                status = "okay";
                        };
 
-                       uart@1000a000 {
+                       serial@1000a000 {
                                fsl,uart-has-rtscts;
                                status = "okay";
                        };
 
-                       uart@1000b000 {
+                       serial@1000b000 {
                                fsl,uart-has-rtscts;
                                status = "okay";
                        };
 
-                       uart@1000c000 {
+                       serial@1000c000 {
                                fsl,uart-has-rtscts;
                                status = "okay";
                        };
 
-                       fec@1002b000 {
+                       ethernet@1002b000 {
                                status = "okay";
                        };
 
index bc5e7d5ddd548c01f926880e32705ec84009ee9a..386c769c38d179dcb090bba33af5aeb6ec2f0a0a 100644 (file)
                                status = "disabled";
                        };
 
-                       uart1: uart@1000a000 {
+                       uart1: serial@1000a000 {
                                compatible = "fsl,imx27-uart", "fsl,imx21-uart";
                                reg = <0x1000a000 0x1000>;
                                interrupts = <20>;
                                status = "disabled";
                        };
 
-                       uart2: uart@1000b000 {
+                       uart2: serial@1000b000 {
                                compatible = "fsl,imx27-uart", "fsl,imx21-uart";
                                reg = <0x1000b000 0x1000>;
                                interrupts = <19>;
                                status = "disabled";
                        };
 
-                       uart3: uart@1000c000 {
+                       uart3: serial@1000c000 {
                                compatible = "fsl,imx27-uart", "fsl,imx21-uart";
                                reg = <0x1000c000 0x1000>;
                                interrupts = <18>;
                                status = "disabled";
                        };
 
-                       uart4: uart@1000d000 {
+                       uart4: serial@1000d000 {
                                compatible = "fsl,imx27-uart", "fsl,imx21-uart";
                                reg = <0x1000d000 0x1000>;
                                interrupts = <17>;
                                status = "disabled";
                        };
 
-                       uart5: uart@1001b000 {
+                       uart5: serial@1001b000 {
                                compatible = "fsl,imx27-uart", "fsl,imx21-uart";
                                reg = <0x1001b000 0x1000>;
                                interrupts = <49>;
                                status = "disabled";
                        };
 
-                       uart6: uart@1001c000 {
+                       uart6: serial@1001c000 {
                                compatible = "fsl,imx27-uart", "fsl,imx21-uart";
                                reg = <0x1001c000 0x1000>;
                                interrupts = <48>;
                                status = "disabled";
                        };
 
-                       fec: fec@1002b000 {
+                       fec: ethernet@1002b000 {
                                compatible = "fsl,imx27-fec";
                                reg = <0x1002b000 0x4000>;
                                interrupts = <50>;
                                status = "disabled";
                        };
                };
+               nand@d8000000 {
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+
+                       compatible = "fsl,imx27-nand";
+                       reg = <0xd8000000 0x1000>;
+                       interrupts = <29>;
+                       status = "disabled";
+               };
        };
 };
diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts
new file mode 100644 (file)
index 0000000..ee520a5
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx28.dtsi"
+
+/ {
+       model = "Freescale i.MX28 Evaluation Kit";
+       compatible = "fsl,imx28-evk", "fsl,imx28";
+
+       memory {
+               reg = <0x40000000 0x08000000>;
+       };
+
+       apb@80000000 {
+               apbh@80000000 {
+                       ssp0: ssp@80010000 {
+                               compatible = "fsl,imx28-mmc";
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&mmc0_8bit_pins_a
+                                       &mmc0_cd_cfg &mmc0_sck_cfg>;
+                               bus-width = <8>;
+                               wp-gpios = <&gpio2 12 0>;
+                               status = "okay";
+                       };
+
+                       ssp1: ssp@80012000 {
+                               compatible = "fsl,imx28-mmc";
+                               bus-width = <8>;
+                               wp-gpios = <&gpio0 28 0>;
+                               status = "okay";
+                       };
+               };
+
+               apbx@80040000 {
+                       saif0: saif@80042000 {
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&saif0_pins_a>;
+                               status = "okay";
+                       };
+
+                       saif1: saif@80046000 {
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&saif1_pins_a>;
+                               fsl,saif-master = <&saif0>;
+                               status = "okay";
+                       };
+
+                       i2c0: i2c@80058000 {
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&i2c0_pins_a>;
+                               status = "okay";
+
+                               sgtl5000: codec@0a {
+                                       compatible = "fsl,sgtl5000";
+                                       reg = <0x0a>;
+                                       VDDA-supply = <&reg_3p3v>;
+                                       VDDIO-supply = <&reg_3p3v>;
+
+                               };
+                       };
+
+                       duart: serial@80074000 {
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&duart_pins_a>;
+                               status = "okay";
+                       };
+               };
+       };
+
+       ahb@80080000 {
+               mac0: ethernet@800f0000 {
+                       phy-mode = "rmii";
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&mac0_pins_a>;
+                       status = "okay";
+               };
+
+               mac1: ethernet@800f4000 {
+                       phy-mode = "rmii";
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&mac1_pins_a>;
+                       status = "okay";
+               };
+       };
+
+       regulators {
+               compatible = "simple-bus";
+
+               reg_3p3v: 3p3v {
+                       compatible = "regulator-fixed";
+                       regulator-name = "3P3V";
+                       regulator-min-microvolt = <3300000>;
+                       regulator-max-microvolt = <3300000>;
+                       regulator-always-on;
+               };
+       };
+
+       sound {
+               compatible = "fsl,imx28-evk-sgtl5000",
+                            "fsl,mxs-audio-sgtl5000";
+               model = "imx28-evk-sgtl5000";
+               saif-controllers = <&saif0 &saif1>;
+               audio-codec = <&sgtl5000>;
+       };
+};
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
new file mode 100644 (file)
index 0000000..4634cb8
--- /dev/null
@@ -0,0 +1,497 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+       interrupt-parent = <&icoll>;
+
+       aliases {
+               gpio0 = &gpio0;
+               gpio1 = &gpio1;
+               gpio2 = &gpio2;
+               gpio3 = &gpio3;
+               gpio4 = &gpio4;
+               saif0 = &saif0;
+               saif1 = &saif1;
+       };
+
+       cpus {
+               cpu@0 {
+                       compatible = "arm,arm926ejs";
+               };
+       };
+
+       apb@80000000 {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <1>;
+               reg = <0x80000000 0x80000>;
+               ranges;
+
+               apbh@80000000 {
+                       compatible = "simple-bus";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       reg = <0x80000000 0x3c900>;
+                       ranges;
+
+                       icoll: interrupt-controller@80000000 {
+                               compatible = "fsl,imx28-icoll", "fsl,mxs-icoll";
+                               interrupt-controller;
+                               #interrupt-cells = <1>;
+                               reg = <0x80000000 0x2000>;
+                       };
+
+                       hsadc@80002000 {
+                               reg = <0x80002000 2000>;
+                               interrupts = <13 87>;
+                               status = "disabled";
+                       };
+
+                       dma-apbh@80004000 {
+                               compatible = "fsl,imx28-dma-apbh";
+                               reg = <0x80004000 2000>;
+                       };
+
+                       perfmon@80006000 {
+                               reg = <0x80006000 800>;
+                               interrupts = <27>;
+                               status = "disabled";
+                       };
+
+                       bch@8000a000 {
+                               reg = <0x8000a000 2000>;
+                               interrupts = <41>;
+                               status = "disabled";
+                       };
+
+                       gpmi@8000c000 {
+                               reg = <0x8000c000 2000>;
+                               interrupts = <42 88>;
+                               status = "disabled";
+                       };
+
+                       ssp0: ssp@80010000 {
+                               reg = <0x80010000 2000>;
+                               interrupts = <96 82>;
+                               fsl,ssp-dma-channel = <0>;
+                               status = "disabled";
+                       };
+
+                       ssp1: ssp@80012000 {
+                               reg = <0x80012000 2000>;
+                               interrupts = <97 83>;
+                               fsl,ssp-dma-channel = <1>;
+                               status = "disabled";
+                       };
+
+                       ssp2: ssp@80014000 {
+                               reg = <0x80014000 2000>;
+                               interrupts = <98 84>;
+                               fsl,ssp-dma-channel = <2>;
+                               status = "disabled";
+                       };
+
+                       ssp3: ssp@80016000 {
+                               reg = <0x80016000 2000>;
+                               interrupts = <99 85>;
+                               fsl,ssp-dma-channel = <3>;
+                               status = "disabled";
+                       };
+
+                       pinctrl@80018000 {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                               compatible = "fsl,imx28-pinctrl", "simple-bus";
+                               reg = <0x80018000 2000>;
+
+                               gpio0: gpio@0 {
+                                       compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
+                                       interrupts = <127>;
+                                       gpio-controller;
+                                       #gpio-cells = <2>;
+                                       interrupt-controller;
+                                       #interrupt-cells = <2>;
+                               };
+
+                               gpio1: gpio@1 {
+                                       compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
+                                       interrupts = <126>;
+                                       gpio-controller;
+                                       #gpio-cells = <2>;
+                                       interrupt-controller;
+                                       #interrupt-cells = <2>;
+                               };
+
+                               gpio2: gpio@2 {
+                                       compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
+                                       interrupts = <125>;
+                                       gpio-controller;
+                                       #gpio-cells = <2>;
+                                       interrupt-controller;
+                                       #interrupt-cells = <2>;
+                               };
+
+                               gpio3: gpio@3 {
+                                       compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
+                                       interrupts = <124>;
+                                       gpio-controller;
+                                       #gpio-cells = <2>;
+                                       interrupt-controller;
+                                       #interrupt-cells = <2>;
+                               };
+
+                               gpio4: gpio@4 {
+                                       compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
+                                       interrupts = <123>;
+                                       gpio-controller;
+                                       #gpio-cells = <2>;
+                                       interrupt-controller;
+                                       #interrupt-cells = <2>;
+                               };
+
+                               duart_pins_a: duart@0 {
+                                       reg = <0>;
+                                       fsl,pinmux-ids = <0x3102 0x3112>;
+                                       fsl,drive-strength = <0>;
+                                       fsl,voltage = <1>;
+                                       fsl,pull-up = <0>;
+                               };
+
+                               mac0_pins_a: mac0@0 {
+                                       reg = <0>;
+                                       fsl,pinmux-ids = <0x4000 0x4010 0x4020
+                                               0x4030 0x4040 0x4060 0x4070
+                                               0x4080 0x4100>;
+                                       fsl,drive-strength = <1>;
+                                       fsl,voltage = <1>;
+                                       fsl,pull-up = <1>;
+                               };
+
+                               mac1_pins_a: mac1@0 {
+                                       reg = <0>;
+                                       fsl,pinmux-ids = <0x40f1 0x4091 0x40a1
+                                               0x40e1 0x40b1 0x40c1>;
+                                       fsl,drive-strength = <1>;
+                                       fsl,voltage = <1>;
+                                       fsl,pull-up = <1>;
+                               };
+
+                               mmc0_8bit_pins_a: mmc0-8bit@0 {
+                                       reg = <0>;
+                                       fsl,pinmux-ids = <0x2000 0x2010 0x2020
+                                               0x2030 0x2040 0x2050 0x2060
+                                               0x2070 0x2080 0x2090 0x20a0>;
+                                       fsl,drive-strength = <1>;
+                                       fsl,voltage = <1>;
+                                       fsl,pull-up = <1>;
+                               };
+
+                               mmc0_cd_cfg: mmc0-cd-cfg {
+                                       fsl,pinmux-ids = <0x2090>;
+                                       fsl,pull-up = <0>;
+                               };
+
+                               mmc0_sck_cfg: mmc0-sck-cfg {
+                                       fsl,pinmux-ids = <0x20a0>;
+                                       fsl,drive-strength = <2>;
+                                       fsl,pull-up = <0>;
+                               };
+
+                               i2c0_pins_a: i2c0@0 {
+                                       reg = <0>;
+                                       fsl,pinmux-ids = <0x3180 0x3190>;
+                                       fsl,drive-strength = <1>;
+                                       fsl,voltage = <1>;
+                                       fsl,pull-up = <1>;
+                               };
+
+                               saif0_pins_a: saif0@0 {
+                                       reg = <0>;
+                                       fsl,pinmux-ids =
+                                               <0x3140 0x3150 0x3160 0x3170>;
+                                       fsl,drive-strength = <2>;
+                                       fsl,voltage = <1>;
+                                       fsl,pull-up = <1>;
+                               };
+
+                               saif1_pins_a: saif1@0 {
+                                       reg = <0>;
+                                       fsl,pinmux-ids = <0x31a0>;
+                                       fsl,drive-strength = <2>;
+                                       fsl,voltage = <1>;
+                                       fsl,pull-up = <1>;
+                               };
+                       };
+
+                       digctl@8001c000 {
+                               reg = <0x8001c000 2000>;
+                               interrupts = <89>;
+                               status = "disabled";
+                       };
+
+                       etm@80022000 {
+                               reg = <0x80022000 2000>;
+                               status = "disabled";
+                       };
+
+                       dma-apbx@80024000 {
+                               compatible = "fsl,imx28-dma-apbx";
+                               reg = <0x80024000 2000>;
+                       };
+
+                       dcp@80028000 {
+                               reg = <0x80028000 2000>;
+                               interrupts = <52 53 54>;
+                               status = "disabled";
+                       };
+
+                       pxp@8002a000 {
+                               reg = <0x8002a000 2000>;
+                               interrupts = <39>;
+                               status = "disabled";
+                       };
+
+                       ocotp@8002c000 {
+                               reg = <0x8002c000 2000>;
+                               status = "disabled";
+                       };
+
+                       axi-ahb@8002e000 {
+                               reg = <0x8002e000 2000>;
+                               status = "disabled";
+                       };
+
+                       lcdif@80030000 {
+                               reg = <0x80030000 2000>;
+                               interrupts = <38 86>;
+                               status = "disabled";
+                       };
+
+                       can0: can@80032000 {
+                               reg = <0x80032000 2000>;
+                               interrupts = <8>;
+                               status = "disabled";
+                       };
+
+                       can1: can@80034000 {
+                               reg = <0x80034000 2000>;
+                               interrupts = <9>;
+                               status = "disabled";
+                       };
+
+                       simdbg@8003c000 {
+                               reg = <0x8003c000 200>;
+                               status = "disabled";
+                       };
+
+                       simgpmisel@8003c200 {
+                               reg = <0x8003c200 100>;
+                               status = "disabled";
+                       };
+
+                       simsspsel@8003c300 {
+                               reg = <0x8003c300 100>;
+                               status = "disabled";
+                       };
+
+                       simmemsel@8003c400 {
+                               reg = <0x8003c400 100>;
+                               status = "disabled";
+                       };
+
+                       gpiomon@8003c500 {
+                               reg = <0x8003c500 100>;
+                               status = "disabled";
+                       };
+
+                       simenet@8003c700 {
+                               reg = <0x8003c700 100>;
+                               status = "disabled";
+                       };
+
+                       armjtag@8003c800 {
+                               reg = <0x8003c800 100>;
+                               status = "disabled";
+                       };
+                };
+
+               apbx@80040000 {
+                       compatible = "simple-bus";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       reg = <0x80040000 0x40000>;
+                       ranges;
+
+                       clkctl@80040000 {
+                               reg = <0x80040000 2000>;
+                               status = "disabled";
+                       };
+
+                       saif0: saif@80042000 {
+                               compatible = "fsl,imx28-saif";
+                               reg = <0x80042000 2000>;
+                               interrupts = <59 80>;
+                               fsl,saif-dma-channel = <4>;
+                               status = "disabled";
+                       };
+
+                       power@80044000 {
+                               reg = <0x80044000 2000>;
+                               status = "disabled";
+                       };
+
+                       saif1: saif@80046000 {
+                               compatible = "fsl,imx28-saif";
+                               reg = <0x80046000 2000>;
+                               interrupts = <58 81>;
+                               fsl,saif-dma-channel = <5>;
+                               status = "disabled";
+                       };
+
+                       lradc@80050000 {
+                               reg = <0x80050000 2000>;
+                               status = "disabled";
+                       };
+
+                       spdif@80054000 {
+                               reg = <0x80054000 2000>;
+                               interrupts = <45 66>;
+                               status = "disabled";
+                       };
+
+                       rtc@80056000 {
+                               reg = <0x80056000 2000>;
+                               interrupts = <28 29>;
+                               status = "disabled";
+                       };
+
+                       i2c0: i2c@80058000 {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                               compatible = "fsl,imx28-i2c";
+                               reg = <0x80058000 2000>;
+                               interrupts = <111 68>;
+                               status = "disabled";
+                       };
+
+                       i2c1: i2c@8005a000 {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                               compatible = "fsl,imx28-i2c";
+                               reg = <0x8005a000 2000>;
+                               interrupts = <110 69>;
+                               status = "disabled";
+                       };
+
+                       pwm@80064000 {
+                               reg = <0x80064000 2000>;
+                               status = "disabled";
+                       };
+
+                       timrot@80068000 {
+                               reg = <0x80068000 2000>;
+                               status = "disabled";
+                       };
+
+                       auart0: serial@8006a000 {
+                               reg = <0x8006a000 0x2000>;
+                               interrupts = <112 70 71>;
+                               status = "disabled";
+                       };
+
+                       auart1: serial@8006c000 {
+                               reg = <0x8006c000 0x2000>;
+                               interrupts = <113 72 73>;
+                               status = "disabled";
+                       };
+
+                       auart2: serial@8006e000 {
+                               reg = <0x8006e000 0x2000>;
+                               interrupts = <114 74 75>;
+                               status = "disabled";
+                       };
+
+                       auart3: serial@80070000 {
+                               reg = <0x80070000 0x2000>;
+                               interrupts = <115 76 77>;
+                               status = "disabled";
+                       };
+
+                       auart4: serial@80072000 {
+                               reg = <0x80072000 0x2000>;
+                               interrupts = <116 78 79>;
+                               status = "disabled";
+                       };
+
+                       duart: serial@80074000 {
+                               compatible = "arm,pl011", "arm,primecell";
+                               reg = <0x80074000 0x1000>;
+                               interrupts = <47>;
+                               status = "disabled";
+                       };
+
+                       usbphy0: usbphy@8007c000 {
+                               reg = <0x8007c000 0x2000>;
+                               status = "disabled";
+                       };
+
+                       usbphy1: usbphy@8007e000 {
+                               reg = <0x8007e000 0x2000>;
+                               status = "disabled";
+                       };
+               };
+       };
+
+       ahb@80080000 {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <1>;
+               reg = <0x80080000 0x80000>;
+               ranges;
+
+               usbctrl0: usbctrl@80080000 {
+                       reg = <0x80080000 0x10000>;
+                       status = "disabled";
+               };
+
+               usbctrl1: usbctrl@80090000 {
+                       reg = <0x80090000 0x10000>;
+                       status = "disabled";
+               };
+
+               dflpt@800c0000 {
+                       reg = <0x800c0000 0x10000>;
+                       status = "disabled";
+               };
+
+               mac0: ethernet@800f0000 {
+                       compatible = "fsl,imx28-fec";
+                       reg = <0x800f0000 0x4000>;
+                       interrupts = <101>;
+                       status = "disabled";
+               };
+
+               mac1: ethernet@800f4000 {
+                       compatible = "fsl,imx28-fec";
+                       reg = <0x800f4000 0x4000>;
+                       interrupts = <102>;
+                       status = "disabled";
+               };
+
+               switch@800f8000 {
+                       reg = <0x800f8000 0x8000>;
+                       status = "disabled";
+               };
+
+       };
+};
index 9949e6060dee0e185ee865453aa70b83f312ebbb..de065b5976e6cf0ed025c7e550198f4b079b817c 100644 (file)
        model = "Freescale i.MX51 Babbage Board";
        compatible = "fsl,imx51-babbage", "fsl,imx51";
 
-       chosen {
-               bootargs = "console=ttymxc0,115200 root=/dev/mmcblk0p3 rootwait";
-       };
-
        memory {
                reg = <0x90000000 0x20000000>;
        };
@@ -40,7 +36,7 @@
                                        status = "okay";
                                };
 
-                               uart3: uart@7000c000 {
+                               uart3: serial@7000c000 {
                                        fsl,uart-has-rtscts;
                                        status = "okay";
                                };
                                                };
                                        };
                                };
+
+                               ssi2: ssi@70014000 {
+                                       fsl,mode = "i2s-slave";
+                                       status = "okay";
+                               };
                        };
 
                        wdog@73f98000 { /* WDOG1 */
                                reg = <0x73fa8000 0x4000>;
                        };
 
-                       uart1: uart@73fbc000 {
+                       uart1: serial@73fbc000 {
                                fsl,uart-has-rtscts;
                                status = "okay";
                        };
 
-                       uart2: uart@73fc0000 {
+                       uart2: serial@73fc0000 {
                                status = "okay";
                        };
                };
                        i2c@83fc4000 { /* I2C2 */
                                status = "okay";
 
-                               codec: sgtl5000@0a {
+                               sgtl5000: codec@0a {
                                        compatible = "fsl,sgtl5000";
                                        reg = <0x0a>;
+                                       clock-frequency = <26000000>;
+                                       VDDA-supply = <&vdig_reg>;
+                                       VDDIO-supply = <&vvideo_reg>;
                                };
                        };
 
-                       fec@83fec000 {
+                       audmux@83fd0000 {
+                               status = "okay";
+                       };
+
+                       ethernet@83fec000 {
                                phy-mode = "mii";
                                status = "okay";
                        };
                        gpio-key,wakeup;
                };
        };
+
+       sound {
+               compatible = "fsl,imx51-babbage-sgtl5000",
+                            "fsl,imx-audio-sgtl5000";
+               model = "imx51-babbage-sgtl5000";
+               ssi-controller = <&ssi2>;
+               audio-codec = <&sgtl5000>;
+               audio-routing =
+                       "MIC_IN", "Mic Jack",
+                       "Mic Jack", "Mic Bias",
+                       "Headphone Jack", "HP_OUT";
+               mux-int-port = <2>;
+               mux-ext-port = <3>;
+       };
 };
index 6663986fe1c85ea06ea5b1a64dbe864afd147290..bfa65abe8ef29444f4a56ca88c7fa1fd05591a47 100644 (file)
@@ -86,7 +86,7 @@
                                        status = "disabled";
                                };
 
-                               uart3: uart@7000c000 {
+                               uart3: serial@7000c000 {
                                        compatible = "fsl,imx51-uart", "fsl,imx21-uart";
                                        reg = <0x7000c000 0x4000>;
                                        interrupts = <33>;
                                        status = "disabled";
                                };
 
+                               ssi2: ssi@70014000 {
+                                       compatible = "fsl,imx51-ssi", "fsl,imx21-ssi";
+                                       reg = <0x70014000 0x4000>;
+                                       interrupts = <30>;
+                                       fsl,fifo-depth = <15>;
+                                       fsl,ssi-dma-events = <25 24 23 22>; /* TX0 RX0 TX1 RX1 */
+                                       status = "disabled";
+                               };
+
                                esdhc@70020000 { /* ESDHC3 */
                                        compatible = "fsl,imx51-esdhc";
                                        reg = <0x70020000 0x4000>;
                                status = "disabled";
                        };
 
-                       uart1: uart@73fbc000 {
+                       uart1: serial@73fbc000 {
                                compatible = "fsl,imx51-uart", "fsl,imx21-uart";
                                reg = <0x73fbc000 0x4000>;
                                interrupts = <31>;
                                status = "disabled";
                        };
 
-                       uart2: uart@73fc0000 {
+                       uart2: serial@73fc0000 {
                                compatible = "fsl,imx51-uart", "fsl,imx21-uart";
                                reg = <0x73fc0000 0x4000>;
                                interrupts = <32>;
                                status = "disabled";
                        };
 
-                       fec@83fec000 {
+                       ssi1: ssi@83fcc000 {
+                               compatible = "fsl,imx51-ssi", "fsl,imx21-ssi";
+                               reg = <0x83fcc000 0x4000>;
+                               interrupts = <29>;
+                               fsl,fifo-depth = <15>;
+                               fsl,ssi-dma-events = <29 28 27 26>; /* TX0 RX0 TX1 RX1 */
+                               status = "disabled";
+                       };
+
+                       audmux@83fd0000 {
+                               compatible = "fsl,imx51-audmux", "fsl,imx31-audmux";
+                               reg = <0x83fd0000 0x4000>;
+                               status = "disabled";
+                       };
+
+                       ssi3: ssi@83fe8000 {
+                               compatible = "fsl,imx51-ssi", "fsl,imx21-ssi";
+                               reg = <0x83fe8000 0x4000>;
+                               interrupts = <96>;
+                               fsl,fifo-depth = <15>;
+                               fsl,ssi-dma-events = <47 46 37 35>; /* TX0 RX0 TX1 RX1 */
+                               status = "disabled";
+                       };
+
+                       ethernet@83fec000 {
                                compatible = "fsl,imx51-fec", "fsl,imx27-fec";
                                reg = <0x83fec000 0x4000>;
                                interrupts = <87>;
index 2dccce46ed81c7f2018423e7cc1231e8b2ff6df5..5b8eafcdbeec638009a894365b577df17256b3c0 100644 (file)
        model = "Freescale i.MX53 Automotive Reference Design Board";
        compatible = "fsl,imx53-ard", "fsl,imx53";
 
-       chosen {
-               bootargs = "console=ttymxc0,115200 root=/dev/mmcblk0p3 rootwait";
-       };
-
        memory {
                reg = <0x70000000 0x40000000>;
        };
@@ -44,7 +40,7 @@
                                reg = <0x53fa8000 0x4000>;
                        };
 
-                       uart1: uart@53fbc000 {
+                       uart1: serial@53fbc000 {
                                status = "okay";
                        };
                };
index 5bac4aa4800bfeb614cdc0e33f3385fb1f0c3833..9c798034675e647726df90d6a981cecba89b93ff 100644 (file)
        model = "Freescale i.MX53 Evaluation Kit";
        compatible = "fsl,imx53-evk", "fsl,imx53";
 
-       chosen {
-               bootargs = "console=ttymxc0,115200 root=/dev/mmcblk0p3 rootwait";
-       };
-
        memory {
                reg = <0x70000000 0x80000000>;
        };
@@ -75,7 +71,7 @@
                                reg = <0x53fa8000 0x4000>;
                        };
 
-                       uart1: uart@53fbc000 {
+                       uart1: serial@53fbc000 {
                                status = "okay";
                        };
                };
@@ -99,7 +95,7 @@
                                };
                        };
 
-                       fec@63fec000 {
+                       ethernet@63fec000 {
                                phy-mode = "rmii";
                                phy-reset-gpios = <&gpio7 6 0>;
                                status = "okay";
index 5c57c8672c3634717328f554f35b99d477c8e690..2d803a9a69496d4b165dc849667c4c5bd26cb1a8 100644 (file)
        model = "Freescale i.MX53 Quick Start Board";
        compatible = "fsl,imx53-qsb", "fsl,imx53";
 
-       chosen {
-               bootargs = "console=ttymxc0,115200 root=/dev/mmcblk0p3 rootwait";
-       };
-
        memory {
                reg = <0x70000000 0x40000000>;
        };
                                        status = "okay";
                                };
 
+                               ssi2: ssi@50014000 {
+                                       fsl,mode = "i2s-slave";
+                                       status = "okay";
+                               };
+
                                esdhc@50020000 { /* ESDHC3 */
                                        cd-gpios = <&gpio3 11 0>;
                                        wp-gpios = <&gpio3 12 0>;
@@ -49,7 +50,7 @@
                                reg = <0x53fa8000 0x4000>;
                        };
 
-                       uart1: uart@53fbc000 {
+                       uart1: serial@53fbc000 {
                                status = "okay";
                        };
                };
                        i2c@63fc4000 { /* I2C2 */
                                status = "okay";
 
-                               codec: sgtl5000@0a {
+                               sgtl5000: codec@0a {
                                        compatible = "fsl,sgtl5000";
                                        reg = <0x0a>;
+                                       VDDA-supply = <&reg_3p2v>;
+                                       VDDIO-supply = <&reg_3p2v>;
                                };
                        };
 
                                };
 
                                pmic: dialog@48 {
-                                       compatible = "dialog,da9053", "dialog,da9052";
+                                       compatible = "dlg,da9053-aa", "dlg,da9052";
                                        reg = <0x48>;
+
+                                       regulators {
+                                               buck0 {
+                                                       regulator-min-microvolt = <500000>;
+                                                       regulator-max-microvolt = <2075000>;
+                                               };
+
+                                               buck1 {
+                                                       regulator-min-microvolt = <500000>;
+                                                       regulator-max-microvolt = <2075000>;
+                                               };
+
+                                               buck2 {
+                                                       regulator-min-microvolt = <925000>;
+                                                       regulator-max-microvolt = <2500000>;
+                                               };
+
+                                               buck3 {
+                                                       regulator-min-microvolt = <925000>;
+                                                       regulator-max-microvolt = <2500000>;
+                                               };
+
+                                               ldo4 {
+                                                       regulator-min-microvolt = <600000>;
+                                                       regulator-max-microvolt = <1800000>;
+                                               };
+
+                                               ldo5 {
+                                                       regulator-min-microvolt = <600000>;
+                                                       regulator-max-microvolt = <1800000>;
+                                               };
+
+                                               ldo6 {
+                                                       regulator-min-microvolt = <1725000>;
+                                                       regulator-max-microvolt = <3300000>;
+                                               };
+
+                                               ldo7 {
+                                                       regulator-min-microvolt = <1725000>;
+                                                       regulator-max-microvolt = <3300000>;
+                                               };
+
+                                               ldo8 {
+                                                       regulator-min-microvolt = <1200000>;
+                                                       regulator-max-microvolt = <3600000>;
+                                               };
+
+                                               ldo9 {
+                                                       regulator-min-microvolt = <1200000>;
+                                                       regulator-max-microvolt = <3600000>;
+                                               };
+
+                                               ldo10 {
+                                                       regulator-min-microvolt = <1200000>;
+                                                       regulator-max-microvolt = <3600000>;
+                                               };
+
+                                               ldo11 {
+                                                       regulator-min-microvolt = <1200000>;
+                                                       regulator-max-microvolt = <3600000>;
+                                               };
+
+                                               ldo12 {
+                                                       regulator-min-microvolt = <1250000>;
+                                                       regulator-max-microvolt = <3650000>;
+                                               };
+
+                                               ldo13 {
+                                                       regulator-min-microvolt = <1200000>;
+                                                       regulator-max-microvolt = <3600000>;
+                                               };
+                                       };
                                };
                        };
 
-                       fec@63fec000 {
+                       audmux@63fd0000 {
+                               status = "okay";
+                       };
+
+                       ethernet@63fec000 {
                                phy-mode = "rmii";
                                phy-reset-gpios = <&gpio7 6 0>;
                                status = "okay";
                        linux,default-trigger = "heartbeat";
                };
        };
+
+       regulators {
+               compatible = "simple-bus";
+
+               reg_3p2v: 3p2v {
+                       compatible = "regulator-fixed";
+                       regulator-name = "3P2V";
+                       regulator-min-microvolt = <3200000>;
+                       regulator-max-microvolt = <3200000>;
+                       regulator-always-on;
+               };
+       };
+
+       sound {
+               compatible = "fsl,imx53-qsb-sgtl5000",
+                            "fsl,imx-audio-sgtl5000";
+               model = "imx53-qsb-sgtl5000";
+               ssi-controller = <&ssi2>;
+               audio-codec = <&sgtl5000>;
+               audio-routing =
+                       "MIC_IN", "Mic Jack",
+                       "Mic Jack", "Mic Bias",
+                       "Headphone Jack", "HP_OUT";
+               mux-int-port = <2>;
+               mux-ext-port = <5>;
+       };
 };
index c7ee86c2dfb530a7c77a38fc2826632b110e2e9b..08091029168e9bf9d224a1101b458f60c9e847dc 100644 (file)
        model = "Freescale i.MX53 Smart Mobile Reference Design Board";
        compatible = "fsl,imx53-smd", "fsl,imx53";
 
-       chosen {
-               bootargs = "console=ttymxc0,115200 root=/dev/mmcblk0p3 rootwait";
-       };
-
        memory {
                reg = <0x70000000 0x40000000>;
        };
                                };
 
                                esdhc@50008000 { /* ESDHC2 */
-                                       fsl,card-wired;
+                                       non-removable;
                                        status = "okay";
                                };
 
-                               uart3: uart@5000c000 {
+                               uart3: serial@5000c000 {
                                        fsl,uart-has-rtscts;
                                        status = "okay";
                                };
@@ -76,7 +72,7 @@
                                };
 
                                esdhc@50020000 { /* ESDHC3 */
-                                       fsl,card-wired;
+                                       non-removable;
                                        status = "okay";
                                };
                        };
                                reg = <0x53fa8000 0x4000>;
                        };
 
-                       uart1: uart@53fbc000 {
+                       uart1: serial@53fbc000 {
                                status = "okay";
                        };
 
-                       uart2: uart@53fc0000 {
+                       uart2: serial@53fc0000 {
                                status = "okay";
                        };
                };
                                };
                        };
 
-                       fec@63fec000 {
+                       ethernet@63fec000 {
                                phy-mode = "rmii";
                                phy-reset-gpios = <&gpio7 6 0>;
                                status = "okay";
index 5dd91b942c916b9e16b3239c594492f46f9975c8..e3e869470cd3e4f6dc5f56daf2645f85bf6f14bc 100644 (file)
@@ -88,7 +88,7 @@
                                        status = "disabled";
                                };
 
-                               uart3: uart@5000c000 {
+                               uart3: serial@5000c000 {
                                        compatible = "fsl,imx53-uart", "fsl,imx21-uart";
                                        reg = <0x5000c000 0x4000>;
                                        interrupts = <33>;
                                        status = "disabled";
                                };
 
+                               ssi2: ssi@50014000 {
+                                       compatible = "fsl,imx53-ssi", "fsl,imx21-ssi";
+                                       reg = <0x50014000 0x4000>;
+                                       interrupts = <30>;
+                                       fsl,fifo-depth = <15>;
+                                       fsl,ssi-dma-events = <25 24 23 22>; /* TX0 RX0 TX1 RX1 */
+                                       status = "disabled";
+                               };
+
                                esdhc@50020000 { /* ESDHC3 */
                                        compatible = "fsl,imx53-esdhc";
                                        reg = <0x50020000 0x4000>;
                                status = "disabled";
                        };
 
-                       uart1: uart@53fbc000 {
+                       uart1: serial@53fbc000 {
                                compatible = "fsl,imx53-uart", "fsl,imx21-uart";
                                reg = <0x53fbc000 0x4000>;
                                interrupts = <31>;
                                status = "disabled";
                        };
 
-                       uart2: uart@53fc0000 {
+                       uart2: serial@53fc0000 {
                                compatible = "fsl,imx53-uart", "fsl,imx21-uart";
                                reg = <0x53fc0000 0x4000>;
                                interrupts = <32>;
                                status = "disabled";
                        };
 
-                       uart4: uart@53ff0000 {
+                       uart4: serial@53ff0000 {
                                compatible = "fsl,imx53-uart", "fsl,imx21-uart";
                                reg = <0x53ff0000 0x4000>;
                                interrupts = <13>;
                        reg = <0x60000000 0x10000000>;
                        ranges;
 
-                       uart5: uart@63f90000 {
+                       uart5: serial@63f90000 {
                                compatible = "fsl,imx53-uart", "fsl,imx21-uart";
                                reg = <0x63f90000 0x4000>;
                                interrupts = <86>;
                                status = "disabled";
                        };
 
-                       fec@63fec000 {
+                       ssi1: ssi@63fcc000 {
+                               compatible = "fsl,imx53-ssi", "fsl,imx21-ssi";
+                               reg = <0x63fcc000 0x4000>;
+                               interrupts = <29>;
+                               fsl,fifo-depth = <15>;
+                               fsl,ssi-dma-events = <29 28 27 26>; /* TX0 RX0 TX1 RX1 */
+                               status = "disabled";
+                       };
+
+                       audmux@63fd0000 {
+                               compatible = "fsl,imx53-audmux", "fsl,imx31-audmux";
+                               reg = <0x63fd0000 0x4000>;
+                               status = "disabled";
+                       };
+
+                       ssi3: ssi@63fe8000 {
+                               compatible = "fsl,imx53-ssi", "fsl,imx21-ssi";
+                               reg = <0x63fe8000 0x4000>;
+                               interrupts = <96>;
+                               fsl,fifo-depth = <15>;
+                               fsl,ssi-dma-events = <47 46 45 44>; /* TX0 RX0 TX1 RX1 */
+                               status = "disabled";
+                       };
+
+                       ethernet@63fec000 {
                                compatible = "fsl,imx53-fec", "fsl,imx25-fec";
                                reg = <0x63fec000 0x4000>;
                                interrupts = <87>;
index ce1c8238c8975c6b84f72304f7ff8ae68dda8c88..db4c6096c562eaea2415570934b4cd7fad4a981a 100644 (file)
        model = "Freescale i.MX6 Quad Armadillo2 Board";
        compatible = "fsl,imx6q-arm2", "fsl,imx6q";
 
-       chosen {
-               bootargs = "console=ttymxc0,115200 root=/dev/mmcblk3p3 rootwait";
-       };
-
        memory {
                reg = <0x10000000 0x80000000>;
        };
 
        soc {
                aips-bus@02100000 { /* AIPS2 */
-                       enet@02188000 {
+                       ethernet@02188000 {
                                phy-mode = "rgmii";
-                               local-mac-address = [00 04 9F 01 1B 61];
                                status = "okay";
                        };
 
                                cd-gpios = <&gpio6 11 0>;
                                wp-gpios = <&gpio6 14 0>;
                                vmmc-supply = <&reg_3p3v>;
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&pinctrl_usdhc3_1>;
                                status = "okay";
                        };
 
                        usdhc@0219c000 { /* uSDHC4 */
-                               fsl,card-wired;
+                               non-removable;
                                vmmc-supply = <&reg_3p3v>;
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&pinctrl_usdhc4_1>;
                                status = "okay";
                        };
 
-                       uart4: uart@021f0000 {
+                       uart4: serial@021f0000 {
                                status = "okay";
                        };
                };
index 4663a4e5a285dab5232c6bd285c17b6f2136a063..e0ec92973e7e7d253e560cb4f66fb72800be2ad9 100644 (file)
        };
 
        soc {
+               aips-bus@02000000 { /* AIPS1 */
+                       spba-bus@02000000 {
+                               ecspi@02008000 { /* eCSPI1 */
+                                       fsl,spi-num-chipselects = <1>;
+                                       cs-gpios = <&gpio3 19 0>;
+                                       status = "okay";
+
+                                       flash: m25p80@0 {
+                                               compatible = "sst,sst25vf016b";
+                                               spi-max-frequency = <20000000>;
+                                               reg = <0>;
+                                       };
+                               };
+
+                               ssi1: ssi@02028000 {
+                                       fsl,mode = "i2s-slave";
+                                       status = "okay";
+                               };
+                       };
+
+               };
+
                aips-bus@02100000 { /* AIPS2 */
-                       enet@02188000 {
+                       ethernet@02188000 {
                                phy-mode = "rgmii";
                                phy-reset-gpios = <&gpio3 23 0>;
                                status = "okay";
                                status = "okay";
                        };
 
-                       uart2: uart@021e8000 {
+                       audmux@021d8000 {
+                               status = "okay";
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&pinctrl_audmux_1>;
+                       };
+
+                       uart2: serial@021e8000 {
                                status = "okay";
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&pinctrl_serial2_1>;
                        };
 
                        i2c@021a0000 { /* I2C1 */
                                status = "okay";
                                clock-frequency = <100000>;
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&pinctrl_i2c1_1>;
 
                                codec: sgtl5000@0a {
                                        compatible = "fsl,sgtl5000";
                        regulator-always-on;
                };
        };
+
+       sound {
+               compatible = "fsl,imx6q-sabrelite-sgtl5000",
+                            "fsl,imx-audio-sgtl5000";
+               model = "imx6q-sabrelite-sgtl5000";
+               ssi-controller = <&ssi1>;
+               audio-codec = <&codec>;
+               audio-routing =
+                       "MIC_IN", "Mic Jack",
+                       "Mic Jack", "Mic Bias",
+                       "Headphone Jack", "HP_OUT";
+               mux-int-port = <1>;
+               mux-ext-port = <4>;
+       };
 };
diff --git a/arch/arm/boot/dts/imx6q-sabresd.dts b/arch/arm/boot/dts/imx6q-sabresd.dts
new file mode 100644 (file)
index 0000000..07509a1
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2011 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx6q.dtsi"
+
+/ {
+       model = "Freescale i.MX6Q SABRE Smart Device Board";
+       compatible = "fsl,imx6q-sabresd", "fsl,imx6q";
+
+       memory {
+               reg = <0x10000000 0x40000000>;
+       };
+
+       soc {
+
+               aips-bus@02000000 { /* AIPS1 */
+                       spba-bus@02000000 {
+                               uart1: serial@02020000 {
+                                       status = "okay";
+                               };
+                       };
+               };
+
+               aips-bus@02100000 { /* AIPS2 */
+                       ethernet@02188000 {
+                               phy-mode = "rgmii";
+                               status = "okay";
+                       };
+
+                       usdhc@02194000 { /* uSDHC2 */
+                               cd-gpios = <&gpio2 2 0>;
+                               wp-gpios = <&gpio2 3 0>;
+                               status = "okay";
+                       };
+
+                       usdhc@02198000 { /* uSDHC3 */
+                               cd-gpios = <&gpio2 0 0>;
+                               wp-gpios = <&gpio2 1 0>;
+                               status = "okay";
+                       };
+               };
+       };
+};
index 4905f51a106f7299465eaae802b7c98714a3e541..8c90cbac945f1d2392e590bdbb725e7154f317e5 100644 (file)
                                        status = "disabled";
                                };
 
-                               uart1: uart@02020000 {
+                               uart1: serial@02020000 {
                                        compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
                                        reg = <0x02020000 0x4000>;
                                        interrupts = <0 26 0x04>;
                                        interrupts = <0 51 0x04>;
                                };
 
-                               ssi@02028000 { /* SSI1 */
+                               ssi1: ssi@02028000 {
+                                       compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
                                        reg = <0x02028000 0x4000>;
                                        interrupts = <0 46 0x04>;
+                                       fsl,fifo-depth = <15>;
+                                       fsl,ssi-dma-events = <38 37>;
+                                       status = "disabled";
                                };
 
-                               ssi@0202c000 { /* SSI2 */
+                               ssi2: ssi@0202c000 {
+                                       compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
                                        reg = <0x0202c000 0x4000>;
                                        interrupts = <0 47 0x04>;
+                                       fsl,fifo-depth = <15>;
+                                       fsl,ssi-dma-events = <42 41>;
+                                       status = "disabled";
                                };
 
-                               ssi@02030000 { /* SSI3 */
+                               ssi3: ssi@02030000 {
+                                       compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
                                        reg = <0x02030000 0x4000>;
                                        interrupts = <0 48 0x04>;
+                                       fsl,fifo-depth = <15>;
+                                       fsl,ssi-dma-events = <46 45>;
+                                       status = "disabled";
                                };
 
                                asrc@02034000 {
                                compatible = "fsl,imx6q-anatop";
                                reg = <0x020c8000 0x1000>;
                                interrupts = <0 49 0x04 0 54 0x04 0 127 0x04>;
+
+                               regulator-1p1@110 {
+                                       compatible = "fsl,anatop-regulator";
+                                       regulator-name = "vdd1p1";
+                                       regulator-min-microvolt = <800000>;
+                                       regulator-max-microvolt = <1375000>;
+                                       regulator-always-on;
+                                       anatop-reg-offset = <0x110>;
+                                       anatop-vol-bit-shift = <8>;
+                                       anatop-vol-bit-width = <5>;
+                                       anatop-min-bit-val = <4>;
+                                       anatop-min-voltage = <800000>;
+                                       anatop-max-voltage = <1375000>;
+                               };
+
+                               regulator-3p0@120 {
+                                       compatible = "fsl,anatop-regulator";
+                                       regulator-name = "vdd3p0";
+                                       regulator-min-microvolt = <2800000>;
+                                       regulator-max-microvolt = <3150000>;
+                                       regulator-always-on;
+                                       anatop-reg-offset = <0x120>;
+                                       anatop-vol-bit-shift = <8>;
+                                       anatop-vol-bit-width = <5>;
+                                       anatop-min-bit-val = <0>;
+                                       anatop-min-voltage = <2625000>;
+                                       anatop-max-voltage = <3400000>;
+                               };
+
+                               regulator-2p5@130 {
+                                       compatible = "fsl,anatop-regulator";
+                                       regulator-name = "vdd2p5";
+                                       regulator-min-microvolt = <2000000>;
+                                       regulator-max-microvolt = <2750000>;
+                                       regulator-always-on;
+                                       anatop-reg-offset = <0x130>;
+                                       anatop-vol-bit-shift = <8>;
+                                       anatop-vol-bit-width = <5>;
+                                       anatop-min-bit-val = <0>;
+                                       anatop-min-voltage = <2000000>;
+                                       anatop-max-voltage = <2750000>;
+                               };
+
+                               regulator-vddcore@140 {
+                                       compatible = "fsl,anatop-regulator";
+                                       regulator-name = "cpu";
+                                       regulator-min-microvolt = <725000>;
+                                       regulator-max-microvolt = <1450000>;
+                                       regulator-always-on;
+                                       anatop-reg-offset = <0x140>;
+                                       anatop-vol-bit-shift = <0>;
+                                       anatop-vol-bit-width = <5>;
+                                       anatop-min-bit-val = <1>;
+                                       anatop-min-voltage = <725000>;
+                                       anatop-max-voltage = <1450000>;
+                               };
+
+                               regulator-vddpu@140 {
+                                       compatible = "fsl,anatop-regulator";
+                                       regulator-name = "vddpu";
+                                       regulator-min-microvolt = <725000>;
+                                       regulator-max-microvolt = <1450000>;
+                                       regulator-always-on;
+                                       anatop-reg-offset = <0x140>;
+                                       anatop-vol-bit-shift = <9>;
+                                       anatop-vol-bit-width = <5>;
+                                       anatop-min-bit-val = <1>;
+                                       anatop-min-voltage = <725000>;
+                                       anatop-max-voltage = <1450000>;
+                               };
+
+                               regulator-vddsoc@140 {
+                                       compatible = "fsl,anatop-regulator";
+                                       regulator-name = "vddsoc";
+                                       regulator-min-microvolt = <725000>;
+                                       regulator-max-microvolt = <1450000>;
+                                       regulator-always-on;
+                                       anatop-reg-offset = <0x140>;
+                                       anatop-vol-bit-shift = <18>;
+                                       anatop-vol-bit-width = <5>;
+                                       anatop-min-bit-val = <1>;
+                                       anatop-min-voltage = <725000>;
+                                       anatop-max-voltage = <1450000>;
+                               };
                        };
 
                        usbphy@020c9000 { /* USBPHY1 */
                        };
 
                        iomuxc@020e0000 {
+                               compatible = "fsl,imx6q-iomuxc";
                                reg = <0x020e0000 0x4000>;
+
+                               /* shared pinctrl settings */
+                               audmux {
+                                       pinctrl_audmux_1: audmux-1 {
+                                               fsl,pins = <18   0x80000000     /* MX6Q_PAD_SD2_DAT0__AUDMUX_AUD4_RXD */
+                                                           1586 0x80000000     /* MX6Q_PAD_SD2_DAT3__AUDMUX_AUD4_TXC */
+                                                           11   0x80000000     /* MX6Q_PAD_SD2_DAT2__AUDMUX_AUD4_TXD */
+                                                           3    0x80000000>;   /* MX6Q_PAD_SD2_DAT1__AUDMUX_AUD4_TXFS */
+                                       };
+                               };
+
+                               i2c1 {
+                                       pinctrl_i2c1_1: i2c1grp-1 {
+                                               fsl,pins = <137 0x4001b8b1      /* MX6Q_PAD_EIM_D21__I2C1_SCL */
+                                                           196 0x4001b8b1>;    /* MX6Q_PAD_EIM_D28__I2C1_SDA */
+                                       };
+                               };
+
+                               serial2 {
+                                       pinctrl_serial2_1: serial2grp-1 {
+                                               fsl,pins = <183 0x1b0b1         /* MX6Q_PAD_EIM_D26__UART2_TXD */
+                                                           191 0x1b0b1>;       /* MX6Q_PAD_EIM_D27__UART2_RXD */
+                                       };
+                               };
+
+                               usdhc3 {
+                                       pinctrl_usdhc3_1: usdhc3grp-1 {
+                                               fsl,pins = <1273 0x17059        /* MX6Q_PAD_SD3_CMD__USDHC3_CMD */
+                                                           1281 0x10059        /* MX6Q_PAD_SD3_CLK__USDHC3_CLK */
+                                                           1289 0x17059        /* MX6Q_PAD_SD3_DAT0__USDHC3_DAT0 */
+                                                           1297 0x17059        /* MX6Q_PAD_SD3_DAT1__USDHC3_DAT1 */
+                                                           1305 0x17059        /* MX6Q_PAD_SD3_DAT2__USDHC3_DAT2 */
+                                                           1312 0x17059        /* MX6Q_PAD_SD3_DAT3__USDHC3_DAT3 */
+                                                           1265 0x17059        /* MX6Q_PAD_SD3_DAT4__USDHC3_DAT4 */
+                                                           1257 0x17059        /* MX6Q_PAD_SD3_DAT5__USDHC3_DAT5 */
+                                                           1249 0x17059        /* MX6Q_PAD_SD3_DAT6__USDHC3_DAT6 */
+                                                           1241 0x17059>;      /* MX6Q_PAD_SD3_DAT7__USDHC3_DAT7 */
+                                       };
+                               };
+
+                               usdhc4 {
+                                       pinctrl_usdhc4_1: usdhc4grp-1 {
+                                               fsl,pins = <1386 0x17059        /* MX6Q_PAD_SD4_CMD__USDHC4_CMD */
+                                                           1392 0x10059        /* MX6Q_PAD_SD4_CLK__USDHC4_CLK */
+                                                           1462 0x17059        /* MX6Q_PAD_SD4_DAT0__USDHC4_DAT0 */
+                                                           1470 0x17059        /* MX6Q_PAD_SD4_DAT1__USDHC4_DAT1 */
+                                                           1478 0x17059        /* MX6Q_PAD_SD4_DAT2__USDHC4_DAT2 */
+                                                           1486 0x17059        /* MX6Q_PAD_SD4_DAT3__USDHC4_DAT3 */
+                                                           1493 0x17059        /* MX6Q_PAD_SD4_DAT4__USDHC4_DAT4 */
+                                                           1501 0x17059        /* MX6Q_PAD_SD4_DAT5__USDHC4_DAT5 */
+                                                           1509 0x17059        /* MX6Q_PAD_SD4_DAT6__USDHC4_DAT6 */
+                                                           1517 0x17059>;      /* MX6Q_PAD_SD4_DAT7__USDHC4_DAT7 */
+                                       };
+                               };
                        };
 
                        dcic@020e4000 { /* DCIC1 */
                                reg = <0x0217c000 0x4000>;
                        };
 
-                       enet@02188000 {
+                       ethernet@02188000 {
                                compatible = "fsl,imx6q-fec";
                                reg = <0x02188000 0x4000>;
                                interrupts = <0 118 0x04 0 119 0x04>;
                        };
 
                        audmux@021d8000 {
+                               compatible = "fsl,imx6q-audmux", "fsl,imx31-audmux";
                                reg = <0x021d8000 0x4000>;
+                               status = "disabled";
                        };
 
                        mipi@021dc000 { /* MIPI-CSI */
                                interrupts = <0 18 0x04>;
                        };
 
-                       uart2: uart@021e8000 {
+                       uart2: serial@021e8000 {
                                compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
                                reg = <0x021e8000 0x4000>;
                                interrupts = <0 27 0x04>;
                                status = "disabled";
                        };
 
-                       uart3: uart@021ec000 {
+                       uart3: serial@021ec000 {
                                compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
                                reg = <0x021ec000 0x4000>;
                                interrupts = <0 28 0x04>;
                                status = "disabled";
                        };
 
-                       uart4: uart@021f0000 {
+                       uart4: serial@021f0000 {
                                compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
                                reg = <0x021f0000 0x4000>;
                                interrupts = <0 29 0x04>;
                                status = "disabled";
                        };
 
-                       uart5: uart@021f4000 {
+                       uart5: serial@021f4000 {
                                compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
                                reg = <0x021f4000 0x4000>;
                                interrupts = <0 30 0x04>;
index 2d696866f71c4117973b8055d3ebfbc25b16786d..3f5dad801a9806ad3173a0d06dedc0c4a37e8388 100644 (file)
                        gpio: gpio@40028000 {
                                compatible = "nxp,lpc3220-gpio";
                                reg = <0x40028000 0x1000>;
-                               /* create a private address space for enumeration */
-                               #address-cells = <1>;
-                               #size-cells = <0>;
-
-                               gpio_p0: gpio-bank@0 {
-                                       gpio-controller;
-                                       #gpio-cells = <2>;
-                                       reg = <0>;
-                               };
-
-                               gpio_p1: gpio-bank@1 {
-                                       gpio-controller;
-                                       #gpio-cells = <2>;
-                                       reg = <1>;
-                               };
-
-                               gpio_p2: gpio-bank@2 {
-                                       gpio-controller;
-                                       #gpio-cells = <2>;
-                                       reg = <2>;
-                               };
-
-                               gpio_p3: gpio-bank@3 {
-                                       gpio-controller;
-                                       #gpio-cells = <2>;
-                                       reg = <3>;
-                               };
-
-                               gpi_p3: gpio-bank@4 {
-                                       gpio-controller;
-                                       #gpio-cells = <2>;
-                                       reg = <4>;
-                               };
-
-                               gpo_p3: gpio-bank@5 {
-                                       gpio-controller;
-                                       #gpio-cells = <2>;
-                                       reg = <5>;
-                               };
+                               gpio-controller;
+                               #gpio-cells = <3>; /* bank, pin, flags */
                        };
 
                        watchdog@4003C000 {
index 8c756be4d7adbf4b15bfc83ae6dcda1324f68453..5b4506c0a8c47d7e1ef506db75cbad9da303b3ec 100644 (file)
@@ -57,7 +57,7 @@
 &mmc1 {
        vmmc-supply = <&vmmc1>;
        vmmc_aux-supply = <&vsim>;
-       ti,bus-width = <8>;
+       bus-width = <8>;
 };
 
 &mmc2 {
index e671361bc79135abb97b8754d998a7977d348f49..1efe0c5879855ebbc4165d09f21403eb86ed9be6 100644 (file)
@@ -70,7 +70,7 @@
 
 &mmc1 {
        vmmc-supply = <&vmmc>;
-       ti,bus-width = <8>;
+       bus-width = <8>;
 };
 
 &mmc2 {
@@ -87,5 +87,5 @@
 
 &mmc5 {
        ti,non-removable;
-       ti,bus-width = <4>;
+       bus-width = <4>;
 };
index e5eeb6f9c6e668e596c8a24d3e2699b81a26458e..d08c4d1372800a0f944489ace4495db188c499c7 100644 (file)
 
 &mmc1 {
        vmmc-supply = <&vmmc>;
-       ti,bus-width = <8>;
+       bus-width = <8>;
 };
 
 &mmc2 {
        vmmc-supply = <&vaux1>;
-       ti,bus-width = <8>;
+       bus-width = <8>;
        ti,non-removable;
 };
 
 };
 
 &mmc5 {
-       ti,bus-width = <4>;
+       bus-width = <4>;
        ti,non-removable;
 };
index 0167e86314c011bc8dc6141a4a3f2cbef6d4dd36..c4ff6d1a018bbee575fd99432c518d0ed530e769 100644 (file)
                compatible = "gpio-leds";
 
                led0 {
-                       gpios = <&gpo_p3 1 1>; /* GPO_P3 1, GPIO 80, active low */
+                       gpios = <&gpio 5 1 1>; /* GPO_P3 1, GPIO 80, active low */
                        linux,default-trigger = "heartbeat";
                        default-state = "off";
                };
 
                led1 {
-                       gpios = <&gpo_p3 14 1>; /* GPO_P3 14, GPIO 93, active low */
+                       gpios = <&gpio 5 14 1>; /* GPO_P3 14, GPIO 93, active low */
                        linux,default-trigger = "timer";
                        default-state = "off";
                };
index d99dc04f0d910813ddd2561ce0f818070dfe7935..ec3c339751104c43594062c30a47f2601b02b731 100644 (file)
                reg = <0x00000000 0x20000000>;
        };
 
+       en_3v3_reg: en_3v3 {
+               compatible = "regulator-fixed";
+                regulator-name = "en-3v3-fixed-supply";
+                regulator-min-microvolt = <3300000>;
+                regulator-max-microvolt = <3300000>;
+                gpios = <&gpio0 26  0x4>; // 26
+                startup-delay-us = <5000>;
+                enable-active-high;
+       };
+
        gpio_keys {
                compatible = "gpio-keys";
                #address-cells = <1>;
                        wakeup = <1>;
                        linux,code = <2>;
                        label = "userpb";
-                       gpios = <&gpio1 0 0>;
+                       gpios = <&gpio1 0 0x4>;
                };
                button@2 {
                        debounce_interval = <50>;
                        wakeup = <1>;
                        linux,code = <3>;
                        label = "extkb1";
-                       gpios = <&gpio4 23 0>;
+                       gpios = <&gpio4 23 0x4>;
                };
                button@3 {
                        debounce_interval = <50>;
                        wakeup = <1>;
                        linux,code = <4>;
                        label = "extkb2";
-                       gpios = <&gpio4 24 0>;
+                       gpios = <&gpio4 24 0x4>;
                };
                button@4 {
                        debounce_interval = <50>;
                        wakeup = <1>;
                        linux,code = <5>;
                        label = "extkb3";
-                       gpios = <&gpio5 1 0>;
+                       gpios = <&gpio5 1 0x4>;
                };
                button@5 {
                        debounce_interval = <50>;
                        wakeup = <1>;
                        linux,code = <6>;
                        label = "extkb4";
-                       gpios = <&gpio5 2 0>;
+                       gpios = <&gpio5 2 0x4>;
                };
        };
 
                compatible = "gpio-leds";
                used-led {
                        label = "user_led";
-                       gpios = <&gpio4 14>;
+                       gpios = <&gpio4 14 0x4>;
                };
        };
 
        soc-u9500 {
-
                external-bus@50000000 {
                        status = "okay";
 
@@ -80,6 +89,9 @@
                                reg = <0 0x10000>;
                                interrupts = <12 0x1>;
                                interrupt-parent = <&gpio4>;
+                               vdd33a-supply = <&en_3v3_reg>;
+                               vddvario-supply = <&db8500_vape_reg>;
+
 
                                reg-shift = <1>;
                                reg-io-width = <2>;
 
                sdi@80126000 {
                        status = "enabled";
-                       cd-gpios = <&gpio6 26>;
+                       vmmc-supply = <&ab8500_ldo_aux3_reg>;
+                       cd-gpios  = <&gpio6 26 0x4>; // 218
                };
 
                sdi@80114000 {
                        status = "enabled";
+                       vmmc-supply = <&ab8500_ldo_aux2_reg>;
                };
 
                uart@80120000 {
                        tc3589x@42 {
                                //compatible = "tc3589x";
                                reg = <0x42>;
-                               interrupts = <25>;
+                               gpios = <&gpio6 25 0x4>;
                                interrupt-parent = <&gpio6>;
                        };
                        tps61052@33 {
diff --git a/arch/arm/boot/dts/spear1310-evb.dts b/arch/arm/boot/dts/spear1310-evb.dts
new file mode 100644 (file)
index 0000000..8314e41
--- /dev/null
@@ -0,0 +1,292 @@
+/*
+ * DTS file for SPEAr1310 Evaluation Baord
+ *
+ * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "spear1310.dtsi"
+
+/ {
+       model = "ST SPEAr1310 Evaluation Board";
+       compatible = "st,spear1310-evb", "st,spear1310";
+       #address-cells = <1>;
+       #size-cells = <1>;
+
+       memory {
+               reg = <0 0x40000000>;
+       };
+
+       ahb {
+               pinmux@e0700000 {
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&state_default>;
+
+                       state_default: pinmux {
+                               i2c0-pmx {
+                                       st,pins = "i2c0_grp";
+                                       st,function = "i2c0";
+                               };
+                               i2s1 {
+                                       st,pins = "i2s1_grp";
+                                       st,function = "i2s1";
+                               };
+                               gpio {
+                                       st,pins = "arm_gpio_grp";
+                                       st,function = "arm_gpio";
+                               };
+                               eth {
+                                       st,pins = "gmii_grp";
+                                       st,function = "gmii";
+                               };
+                               ssp0 {
+                                       st,pins = "ssp0_grp";
+                                       st,function = "ssp0";
+                               };
+                               kbd {
+                                       st,pins = "keyboard_6x6_grp";
+                                       st,function = "keyboard";
+                               };
+                               sdhci {
+                                       st,pins = "sdhci_grp";
+                                       st,function = "sdhci";
+                               };
+                               smi-pmx {
+                                       st,pins = "smi_2_chips_grp";
+                                       st,function = "smi";
+                               };
+                               uart0 {
+                                       st,pins = "uart0_grp";
+                                       st,function = "uart0";
+                               };
+                               rs485 {
+                                       st,pins = "rs485_0_1_tdm_0_1_grp";
+                                       st,function = "rs485_0_1_tdm_0_1";
+                               };
+                               i2c1_2 {
+                                       st,pins = "i2c_1_2_grp";
+                                       st,function = "i2c_1_2";
+                               };
+                               pci {
+                                       st,pins = "pcie0_grp","pcie1_grp",
+                                               "pcie2_grp";
+                                       st,function = "pci";
+                               };
+                               smii {
+                                       st,pins = "smii_0_1_2_grp";
+                                       st,function = "smii_0_1_2";
+                               };
+                               nand {
+                                       st,pins = "nand_8bit_grp",
+                                               "nand_16bit_grp";
+                                       st,function = "nand";
+                               };
+                       };
+               };
+
+               ahci@b1000000 {
+                       status = "okay";
+               };
+
+               cf@b2800000 {
+                       status = "okay";
+               };
+
+               dma@ea800000 {
+                       status = "okay";
+               };
+
+               dma@eb000000 {
+                       status = "okay";
+               };
+
+               fsmc: flash@b0000000 {
+                       status = "okay";
+               };
+
+               gmac0: eth@e2000000 {
+                       status = "okay";
+               };
+
+               sdhci@b3000000 {
+                       status = "okay";
+               };
+
+               smi: flash@ea000000 {
+                       status = "okay";
+                       clock-rate=<50000000>;
+
+                       flash@e6000000 {
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               reg = <0xe6000000 0x800000>;
+                               st,smi-fast-mode;
+
+                               partition@0 {
+                                       label = "xloader";
+                                       reg = <0x0 0x10000>;
+                               };
+                               partition@10000 {
+                                       label = "u-boot";
+                                       reg = <0x10000 0x40000>;
+                               };
+                               partition@50000 {
+                                       label = "linux";
+                                       reg = <0x50000 0x2c0000>;
+                               };
+                               partition@310000 {
+                                       label = "rootfs";
+                                       reg = <0x310000 0x4f0000>;
+                               };
+                       };
+               };
+
+               spi0: spi@e0100000 {
+                       status = "okay";
+               };
+
+               ehci@e4800000 {
+                       status = "okay";
+               };
+
+               ehci@e5800000 {
+                       status = "okay";
+               };
+
+               ohci@e4000000 {
+                       status = "okay";
+               };
+
+               ohci@e5000000 {
+                       status = "okay";
+               };
+
+               apb {
+                       adc@e0080000 {
+                               status = "okay";
+                       };
+
+                       gpio0: gpio@e0600000 {
+                              status = "okay";
+                       };
+
+                       gpio1: gpio@e0680000 {
+                              status = "okay";
+                       };
+
+                       i2c0: i2c@e0280000 {
+                              status = "okay";
+                       };
+
+                       i2c1: i2c@5cd00000 {
+                              status = "okay";
+                       };
+
+                       kbd@e0300000 {
+                               linux,keymap = < 0x00000001
+                                                0x00010002
+                                                0x00020003
+                                                0x00030004
+                                                0x00040005
+                                                0x00050006
+                                                0x00060007
+                                                0x00070008
+                                                0x00080009
+                                                0x0100000a
+                                                0x0101000c
+                                                0x0102000d
+                                                0x0103000e
+                                                0x0104000f
+                                                0x01050010
+                                                0x01060011
+                                                0x01070012
+                                                0x01080013
+                                                0x02000014
+                                                0x02010015
+                                                0x02020016
+                                                0x02030017
+                                                0x02040018
+                                                0x02050019
+                                                0x0206001a
+                                                0x0207001b
+                                                0x0208001c
+                                                0x0300001d
+                                                0x0301001e
+                                                0x0302001f
+                                                0x03030020
+                                                0x03040021
+                                                0x03050022
+                                                0x03060023
+                                                0x03070024
+                                                0x03080025
+                                                0x04000026
+                                                0x04010027
+                                                0x04020028
+                                                0x04030029
+                                                0x0404002a
+                                                0x0405002b
+                                                0x0406002c
+                                                0x0407002d
+                                                0x0408002e
+                                                0x0500002f
+                                                0x05010030
+                                                0x05020031
+                                                0x05030032
+                                                0x05040033
+                                                0x05050034
+                                                0x05060035
+                                                0x05070036
+                                                0x05080037
+                                                0x06000038
+                                                0x06010039
+                                                0x0602003a
+                                                0x0603003b
+                                                0x0604003c
+                                                0x0605003d
+                                                0x0606003e
+                                                0x0607003f
+                                                0x06080040
+                                                0x07000041
+                                                0x07010042
+                                                0x07020043
+                                                0x07030044
+                                                0x07040045
+                                                0x07050046
+                                                0x07060047
+                                                0x07070048
+                                                0x07080049
+                                                0x0800004a
+                                                0x0801004b
+                                                0x0802004c
+                                                0x0803004d
+                                                0x0804004e
+                                                0x0805004f
+                                                0x08060050
+                                                0x08070051
+                                                0x08080052 >;
+                              autorepeat;
+                              st,mode = <0>;
+                              status = "okay";
+                       };
+
+                       rtc@e0580000 {
+                              status = "okay";
+                       };
+
+                       serial@e0000000 {
+                              status = "okay";
+                       };
+
+                       wdt@ec800620 {
+                              status = "okay";
+                       };
+               };
+       };
+};
diff --git a/arch/arm/boot/dts/spear1310.dtsi b/arch/arm/boot/dts/spear1310.dtsi
new file mode 100644 (file)
index 0000000..9e61da4
--- /dev/null
@@ -0,0 +1,184 @@
+/*
+ * DTS file for all SPEAr1310 SoCs
+ *
+ * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/include/ "spear13xx.dtsi"
+
+/ {
+       compatible = "st,spear1310";
+
+       ahb {
+               ahci@b1000000 {
+                       compatible = "snps,spear-ahci";
+                       reg = <0xb1000000 0x10000>;
+                       interrupts = <0 68 0x4>;
+                       status = "disabled";
+               };
+
+               ahci@b1800000 {
+                       compatible = "snps,spear-ahci";
+                       reg = <0xb1800000 0x10000>;
+                       interrupts = <0 69 0x4>;
+                       status = "disabled";
+               };
+
+               ahci@b4000000 {
+                       compatible = "snps,spear-ahci";
+                       reg = <0xb4000000 0x10000>;
+                       interrupts = <0 70 0x4>;
+                       status = "disabled";
+               };
+
+               gmac1: eth@5c400000 {
+                       compatible = "st,spear600-gmac";
+                       reg = <0x5c400000 0x8000>;
+                       interrupts = <0 95 0x4>;
+                       interrupt-names = "macirq";
+                       status = "disabled";
+               };
+
+               gmac2: eth@5c500000 {
+                       compatible = "st,spear600-gmac";
+                       reg = <0x5c500000 0x8000>;
+                       interrupts = <0 96 0x4>;
+                       interrupt-names = "macirq";
+                       status = "disabled";
+               };
+
+               gmac3: eth@5c600000 {
+                       compatible = "st,spear600-gmac";
+                       reg = <0x5c600000 0x8000>;
+                       interrupts = <0 97 0x4>;
+                       interrupt-names = "macirq";
+                       status = "disabled";
+               };
+
+               gmac4: eth@5c700000 {
+                       compatible = "st,spear600-gmac";
+                       reg = <0x5c700000 0x8000>;
+                       interrupts = <0 98 0x4>;
+                       interrupt-names = "macirq";
+                       status = "disabled";
+               };
+
+               spi1: spi@5d400000 {
+                       compatible = "arm,pl022", "arm,primecell";
+                       reg = <0x5d400000 0x1000>;
+                       interrupts = <0 99 0x4>;
+                       status = "disabled";
+               };
+
+               apb {
+                       i2c1: i2c@5cd00000 {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                               compatible = "snps,designware-i2c";
+                               reg = <0x5cd00000 0x1000>;
+                               interrupts = <0 87 0x4>;
+                               status = "disabled";
+                       };
+
+                       i2c2: i2c@5ce00000 {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                               compatible = "snps,designware-i2c";
+                               reg = <0x5ce00000 0x1000>;
+                               interrupts = <0 88 0x4>;
+                               status = "disabled";
+                       };
+
+                       i2c3: i2c@5cf00000 {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                               compatible = "snps,designware-i2c";
+                               reg = <0x5cf00000 0x1000>;
+                               interrupts = <0 89 0x4>;
+                               status = "disabled";
+                       };
+
+                       i2c4: i2c@5d000000 {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                               compatible = "snps,designware-i2c";
+                               reg = <0x5d000000 0x1000>;
+                               interrupts = <0 90 0x4>;
+                               status = "disabled";
+                       };
+
+                       i2c5: i2c@5d100000 {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                               compatible = "snps,designware-i2c";
+                               reg = <0x5d100000 0x1000>;
+                               interrupts = <0 91 0x4>;
+                               status = "disabled";
+                       };
+
+                       i2c6: i2c@5d200000 {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                               compatible = "snps,designware-i2c";
+                               reg = <0x5d200000 0x1000>;
+                               interrupts = <0 92 0x4>;
+                               status = "disabled";
+                       };
+
+                       i2c7: i2c@5d300000 {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                               compatible = "snps,designware-i2c";
+                               reg = <0x5d300000 0x1000>;
+                               interrupts = <0 93 0x4>;
+                               status = "disabled";
+                       };
+
+                       serial@5c800000 {
+                               compatible = "arm,pl011", "arm,primecell";
+                               reg = <0x5c800000 0x1000>;
+                               interrupts = <0 82 0x4>;
+                               status = "disabled";
+                       };
+
+                       serial@5c900000 {
+                               compatible = "arm,pl011", "arm,primecell";
+                               reg = <0x5c900000 0x1000>;
+                               interrupts = <0 83 0x4>;
+                               status = "disabled";
+                       };
+
+                       serial@5ca00000 {
+                               compatible = "arm,pl011", "arm,primecell";
+                               reg = <0x5ca00000 0x1000>;
+                               interrupts = <0 84 0x4>;
+                               status = "disabled";
+                       };
+
+                       serial@5cb00000 {
+                               compatible = "arm,pl011", "arm,primecell";
+                               reg = <0x5cb00000 0x1000>;
+                               interrupts = <0 85 0x4>;
+                               status = "disabled";
+                       };
+
+                       serial@5cc00000 {
+                               compatible = "arm,pl011", "arm,primecell";
+                               reg = <0x5cc00000 0x1000>;
+                               interrupts = <0 86 0x4>;
+                               status = "disabled";
+                       };
+
+                       thermal@e07008c4 {
+                               st,thermal-flags = <0x7000>;
+                       };
+               };
+       };
+};
diff --git a/arch/arm/boot/dts/spear1340-evb.dts b/arch/arm/boot/dts/spear1340-evb.dts
new file mode 100644 (file)
index 0000000..0d8472e
--- /dev/null
@@ -0,0 +1,308 @@
+/*
+ * DTS file for SPEAr1340 Evaluation Baord
+ *
+ * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "spear1340.dtsi"
+
+/ {
+       model = "ST SPEAr1340 Evaluation Board";
+       compatible = "st,spear1340-evb", "st,spear1340";
+       #address-cells = <1>;
+       #size-cells = <1>;
+
+       memory {
+               reg = <0 0x40000000>;
+       };
+
+       ahb {
+               pinmux@e0700000 {
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&state_default>;
+
+                       state_default: pinmux {
+                               pads_as_gpio {
+                                       st,pins = "pads_as_gpio_grp";
+                                       st,function = "pads_as_gpio";
+                               };
+                               fsmc {
+                                       st,pins = "fsmc_8bit_grp";
+                                       st,function = "fsmc";
+                               };
+                               kbd {
+                                       st,pins = "keyboard_row_col_grp",
+                                               "keyboard_col5_grp";
+                                       st,function = "keyboard";
+                               };
+                               uart0 {
+                                       st,pins = "uart0_grp", "uart0_enh_grp";
+                                       st,function = "uart0";
+                               };
+                               i2c0-pmx {
+                                       st,pins = "i2c0_grp";
+                                       st,function = "i2c0";
+                               };
+                               i2c1-pmx {
+                                       st,pins = "i2c1_grp";
+                                       st,function = "i2c1";
+                               };
+                               spdif-in {
+                                       st,pins = "spdif_in_grp";
+                                       st,function = "spdif_in";
+                               };
+                               spdif-out {
+                                       st,pins = "spdif_out_grp";
+                                       st,function = "spdif_out";
+                               };
+                               ssp0 {
+                                       st,pins = "ssp0_grp", "ssp0_cs1_grp",
+                                               "ssp0_cs3_grp";
+                                       st,function = "ssp0";
+                               };
+                               pwm {
+                                       st,pins = "pwm2_grp", "pwm3_grp";
+                                       st,function = "pwm";
+                               };
+                               smi-pmx {
+                                       st,pins = "smi_grp";
+                                       st,function = "smi";
+                               };
+                               i2s {
+                                       st,pins = "i2s_in_grp", "i2s_out_grp";
+                                       st,function = "i2s";
+                               };
+                               gmac {
+                                       st,pins = "gmii_grp", "rgmii_grp";
+                                       st,function = "gmac";
+                               };
+                               cam3 {
+                                       st,pins = "cam3_grp";
+                                       st,function = "cam3";
+                               };
+                               cec0 {
+                                       st,pins = "cec0_grp";
+                                       st,function = "cec0";
+                               };
+                               cec1 {
+                                       st,pins = "cec1_grp";
+                                       st,function = "cec1";
+                               };
+                               sdhci {
+                                       st,pins = "sdhci_grp";
+                                       st,function = "sdhci";
+                               };
+                               clcd {
+                                       st,pins = "clcd_grp";
+                                       st,function = "clcd";
+                               };
+                               sata {
+                                       st,pins = "sata_grp";
+                                       st,function = "sata";
+                               };
+                       };
+               };
+
+               dma@ea800000 {
+                       status = "okay";
+               };
+
+               dma@eb000000 {
+                       status = "okay";
+               };
+
+               fsmc: flash@b0000000 {
+                       status = "okay";
+               };
+
+               gmac0: eth@e2000000 {
+                       status = "okay";
+               };
+
+               sdhci@b3000000 {
+                       status = "okay";
+               };
+
+               smi: flash@ea000000 {
+                       status = "okay";
+                       clock-rate=<50000000>;
+
+                       flash@e6000000 {
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               reg = <0xe6000000 0x800000>;
+                               st,smi-fast-mode;
+
+                               partition@0 {
+                                       label = "xloader";
+                                       reg = <0x0 0x10000>;
+                               };
+                               partition@10000 {
+                                       label = "u-boot";
+                                       reg = <0x10000 0x40000>;
+                               };
+                               partition@50000 {
+                                       label = "linux";
+                                       reg = <0x50000 0x2c0000>;
+                               };
+                               partition@310000 {
+                                       label = "rootfs";
+                                       reg = <0x310000 0x4f0000>;
+                               };
+                       };
+               };
+
+               spi0: spi@e0100000 {
+                       status = "okay";
+               };
+
+               ehci@e4800000 {
+                       status = "okay";
+               };
+
+               ehci@e5800000 {
+                       status = "okay";
+               };
+
+               ohci@e4000000 {
+                       status = "okay";
+               };
+
+               ohci@e5000000 {
+                       status = "okay";
+               };
+
+               apb {
+                       adc@e0080000 {
+                               status = "okay";
+                       };
+
+                       gpio0: gpio@e0600000 {
+                              status = "okay";
+                       };
+
+                       gpio1: gpio@e0680000 {
+                              status = "okay";
+                       };
+
+                       i2c0: i2c@e0280000 {
+                              status = "okay";
+                       };
+
+                       i2c1: i2c@b4000000 {
+                              status = "okay";
+                       };
+
+                       kbd@e0300000 {
+                               linux,keymap = < 0x00000001
+                                                0x00010002
+                                                0x00020003
+                                                0x00030004
+                                                0x00040005
+                                                0x00050006
+                                                0x00060007
+                                                0x00070008
+                                                0x00080009
+                                                0x0100000a
+                                                0x0101000c
+                                                0x0102000d
+                                                0x0103000e
+                                                0x0104000f
+                                                0x01050010
+                                                0x01060011
+                                                0x01070012
+                                                0x01080013
+                                                0x02000014
+                                                0x02010015
+                                                0x02020016
+                                                0x02030017
+                                                0x02040018
+                                                0x02050019
+                                                0x0206001a
+                                                0x0207001b
+                                                0x0208001c
+                                                0x0300001d
+                                                0x0301001e
+                                                0x0302001f
+                                                0x03030020
+                                                0x03040021
+                                                0x03050022
+                                                0x03060023
+                                                0x03070024
+                                                0x03080025
+                                                0x04000026
+                                                0x04010027
+                                                0x04020028
+                                                0x04030029
+                                                0x0404002a
+                                                0x0405002b
+                                                0x0406002c
+                                                0x0407002d
+                                                0x0408002e
+                                                0x0500002f
+                                                0x05010030
+                                                0x05020031
+                                                0x05030032
+                                                0x05040033
+                                                0x05050034
+                                                0x05060035
+                                                0x05070036
+                                                0x05080037
+                                                0x06000038
+                                                0x06010039
+                                                0x0602003a
+                                                0x0603003b
+                                                0x0604003c
+                                                0x0605003d
+                                                0x0606003e
+                                                0x0607003f
+                                                0x06080040
+                                                0x07000041
+                                                0x07010042
+                                                0x07020043
+                                                0x07030044
+                                                0x07040045
+                                                0x07050046
+                                                0x07060047
+                                                0x07070048
+                                                0x07080049
+                                                0x0800004a
+                                                0x0801004b
+                                                0x0802004c
+                                                0x0803004d
+                                                0x0804004e
+                                                0x0805004f
+                                                0x08060050
+                                                0x08070051
+                                                0x08080052 >;
+                              autorepeat;
+                              st,mode = <0>;
+                              status = "okay";
+                       };
+
+                       rtc@e0580000 {
+                              status = "okay";
+                       };
+
+                       serial@e0000000 {
+                              status = "okay";
+                       };
+
+                       serial@b4100000 {
+                              status = "okay";
+                       };
+
+                       wdt@ec800620 {
+                              status = "okay";
+                       };
+               };
+       };
+};
diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi
new file mode 100644 (file)
index 0000000..a26fc47
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * DTS file for all SPEAr1340 SoCs
+ *
+ * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/include/ "spear13xx.dtsi"
+
+/ {
+       compatible = "st,spear1340";
+
+       ahb {
+               ahci@b1000000 {
+                       compatible = "snps,spear-ahci";
+                       reg = <0xb1000000 0x10000>;
+                       interrupts = <0 72 0x4>;
+                       status = "disabled";
+               };
+
+               spi1: spi@5d400000 {
+                       compatible = "arm,pl022", "arm,primecell";
+                       reg = <0x5d400000 0x1000>;
+                       interrupts = <0 99 0x4>;
+                       status = "disabled";
+               };
+
+               apb {
+                       i2c1: i2c@b4000000 {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                               compatible = "snps,designware-i2c";
+                               reg = <0xb4000000 0x1000>;
+                               interrupts = <0 104 0x4>;
+                               status = "disabled";
+                       };
+
+                       serial@b4100000 {
+                               compatible = "arm,pl011", "arm,primecell";
+                               reg = <0xb4100000 0x1000>;
+                               interrupts = <0 105 0x4>;
+                               status = "disabled";
+                       };
+
+                       thermal@e07008c4 {
+                               st,thermal-flags = <0x2a00>;
+                       };
+               };
+       };
+};
diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi
new file mode 100644 (file)
index 0000000..1f8e1e1
--- /dev/null
@@ -0,0 +1,262 @@
+/*
+ * DTS file for all SPEAr13xx SoCs
+ *
+ * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+       interrupt-parent = <&gic>;
+
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               cpu@0 {
+                       compatible = "arm,cortex-a9";
+                       reg = <0>;
+                       next-level-cache = <&L2>;
+               };
+
+               cpu@1 {
+                       compatible = "arm,cortex-a9";
+                       reg = <1>;
+                       next-level-cache = <&L2>;
+               };
+       };
+
+       gic: interrupt-controller@ec801000 {
+               compatible = "arm,cortex-a9-gic";
+               interrupt-controller;
+               #interrupt-cells = <3>;
+               reg = < 0xec801000 0x1000 >,
+                     < 0xec800100 0x0100 >;
+       };
+
+       pmu {
+               compatible = "arm,cortex-a9-pmu";
+               interrupts = <0 8 0x04
+                             0 9 0x04>;
+       };
+
+       L2: l2-cache {
+                   compatible = "arm,pl310-cache";
+                   reg = <0xed000000 0x1000>;
+                   cache-unified;
+                   cache-level = <2>;
+       };
+
+       memory {
+               name = "memory";
+               device_type = "memory";
+               reg = <0 0x40000000>;
+       };
+
+       chosen {
+               bootargs = "console=ttyAMA0,115200";
+       };
+
+       ahb {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               compatible = "simple-bus";
+               ranges = <0x50000000 0x50000000 0x10000000
+                         0xb0000000 0xb0000000 0x10000000
+                         0xe0000000 0xe0000000 0x10000000>;
+
+               sdhci@b3000000 {
+                       compatible = "st,sdhci-spear";
+                       reg = <0xb3000000 0x100>;
+                       interrupts = <0 28 0x4>;
+                       status = "disabled";
+               };
+
+               cf@b2800000 {
+                       compatible = "arasan,cf-spear1340";
+                       reg = <0xb2800000 0x100>;
+                       interrupts = <0 29 0x4>;
+                       status = "disabled";
+               };
+
+               dma@ea800000 {
+                       compatible = "snps,dma-spear1340";
+                       reg = <0xea800000 0x1000>;
+                       interrupts = <0 19 0x4>;
+                       status = "disabled";
+               };
+
+               dma@eb000000 {
+                       compatible = "snps,dma-spear1340";
+                       reg = <0xeb000000 0x1000>;
+                       interrupts = <0 59 0x4>;
+                       status = "disabled";
+               };
+
+               fsmc: flash@b0000000 {
+                       compatible = "st,spear600-fsmc-nand";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       reg = <0xb0000000 0x1000        /* FSMC Register */
+                              0xb0800000 0x0010>;      /* NAND Base */
+                       reg-names = "fsmc_regs", "nand_data";
+                       interrupts = <0 20 0x4
+                                     0 21 0x4
+                                     0 22 0x4
+                                     0 23 0x4>;
+                       st,ale-off = <0x20000>;
+                       st,cle-off = <0x10000>;
+                       status = "disabled";
+               };
+
+               gmac0: eth@e2000000 {
+                       compatible = "st,spear600-gmac";
+                       reg = <0xe2000000 0x8000>;
+                       interrupts = <0 23 0x4
+                                     0 24 0x4>;
+                       interrupt-names = "macirq", "eth_wake_irq";
+                       status = "disabled";
+               };
+
+               smi: flash@ea000000 {
+                       compatible = "st,spear600-smi";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       reg = <0xea000000 0x1000>;
+                       interrupts = <0 30 0x4>;
+                       status = "disabled";
+               };
+
+               spi0: spi@e0100000 {
+                       compatible = "arm,pl022", "arm,primecell";
+                       reg = <0xe0100000 0x1000>;
+                       interrupts = <0 31 0x4>;
+                       status = "disabled";
+               };
+
+               ehci@e4800000 {
+                       compatible = "st,spear600-ehci", "usb-ehci";
+                       reg = <0xe4800000 0x1000>;
+                       interrupts = <0 64 0x4>;
+                       status = "disabled";
+               };
+
+               ehci@e5800000 {
+                       compatible = "st,spear600-ehci", "usb-ehci";
+                       reg = <0xe5800000 0x1000>;
+                       interrupts = <0 66 0x4>;
+                       status = "disabled";
+               };
+
+               ohci@e4000000 {
+                       compatible = "st,spear600-ohci", "usb-ohci";
+                       reg = <0xe4000000 0x1000>;
+                       interrupts = <0 65 0x4>;
+                       status = "disabled";
+               };
+
+               ohci@e5000000 {
+                       compatible = "st,spear600-ohci", "usb-ohci";
+                       reg = <0xe5000000 0x1000>;
+                       interrupts = <0 67 0x4>;
+                       status = "disabled";
+               };
+
+               apb {
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       compatible = "simple-bus";
+                       ranges = <0x50000000 0x50000000 0x10000000
+                                 0xb0000000 0xb0000000 0x10000000
+                                 0xe0000000 0xe0000000 0x10000000>;
+
+                       gpio0: gpio@e0600000 {
+                               compatible = "arm,pl061", "arm,primecell";
+                               reg = <0xe0600000 0x1000>;
+                               interrupts = <0 24 0x4>;
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               interrupt-controller;
+                               #interrupt-cells = <2>;
+                               status = "disabled";
+                       };
+
+                       gpio1: gpio@e0680000 {
+                               compatible = "arm,pl061", "arm,primecell";
+                               reg = <0xe0680000 0x1000>;
+                               interrupts = <0 25 0x4>;
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               interrupt-controller;
+                               #interrupt-cells = <2>;
+                               status = "disabled";
+                       };
+
+                       kbd@e0300000 {
+                               compatible = "st,spear300-kbd";
+                               reg = <0xe0300000 0x1000>;
+                               status = "disabled";
+                       };
+
+                       i2c0: i2c@e0280000 {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                               compatible = "snps,designware-i2c";
+                               reg = <0xe0280000 0x1000>;
+                               interrupts = <0 41 0x4>;
+                               status = "disabled";
+                       };
+
+                       rtc@e0580000 {
+                               compatible = "st,spear-rtc";
+                               reg = <0xe0580000 0x1000>;
+                               interrupts = <0 36 0x4>;
+                               status = "disabled";
+                       };
+
+                       serial@e0000000 {
+                               compatible = "arm,pl011", "arm,primecell";
+                               reg = <0xe0000000 0x1000>;
+                               interrupts = <0 36 0x4>;
+                               status = "disabled";
+                       };
+
+                       adc@e0080000 {
+                               compatible = "st,spear600-adc";
+                               reg = <0xe0080000 0x1000>;
+                               interrupts = <0 44 0x4>;
+                               status = "disabled";
+                       };
+
+                       timer@e0380000 {
+                               compatible = "st,spear-timer";
+                               reg = <0xe0380000 0x400>;
+                               interrupts = <0 37 0x4>;
+                       };
+
+                       timer@ec800600 {
+                               compatible = "arm,cortex-a9-twd-timer";
+                               reg = <0xec800600 0x20>;
+                               interrupts = <1 13 0x301>;
+                       };
+
+                       wdt@ec800620 {
+                               compatible = "arm,cortex-a9-twd-wdt";
+                               reg = <0xec800620 0x20>;
+                               status = "disabled";
+                       };
+
+                       thermal@e07008c4 {
+                               compatible = "st,thermal-spear1340";
+                               reg = <0xe07008c4 0x4>;
+                       };
+               };
+       };
+};
index 910e264b87c0b4df9ba8d78fd5dbf5aba2e8df2c..fc82b1a264588b34e31a251dd6c886ef754f39e4 100644 (file)
 
                smi: flash@fc000000 {
                        status = "okay";
+                       clock-rate=<50000000>;
+
+                       flash@f8000000 {
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               reg = <0xf8000000 0x800000>;
+                               st,smi-fast-mode;
+
+                               partition@0 {
+                                       label = "xloader";
+                                       reg = <0x0 0x10000>;
+                               };
+                               partition@10000 {
+                                       label = "u-boot";
+                                       reg = <0x10000 0x40000>;
+                               };
+                               partition@50000 {
+                                       label = "linux";
+                                       reg = <0x50000 0x2c0000>;
+                               };
+                               partition@310000 {
+                                       label = "rootfs";
+                                       reg = <0x310000 0x4f0000>;
+                               };
+                       };
                };
 
                spi0: spi@d0100000 {
index 6d95317100adf1b1d85ce62b68bad18a32414d1f..dc5e2d445a9352e3774b52dfec8ce0cd5fbaf4d9 100644 (file)
                        clock-rate=<50000000>;
 
                        flash@f8000000 {
-                               label = "m25p64";
-                               reg = <0xf8000000 0x800000>;
                                #address-cells = <1>;
                                #size-cells = <1>;
+                               reg = <0xf8000000 0x800000>;
                                st,smi-fast-mode;
+
+                               partition@0 {
+                                       label = "xloader";
+                                       reg = <0x0 0x10000>;
+                               };
+                               partition@10000 {
+                                       label = "u-boot";
+                                       reg = <0x10000 0x40000>;
+                               };
+                               partition@50000 {
+                                       label = "linux";
+                                       reg = <0x50000 0x2c0000>;
+                               };
+                               partition@310000 {
+                                       label = "rootfs";
+                                       reg = <0x310000 0x4f0000>;
+                               };
                        };
                };
 
index 0c6463b71a37968c93fe9cfa879264929dc974a9..6308fa3bec1ec65c19ff3a22fda6f4297e2a26ac 100644 (file)
 
                smi: flash@fc000000 {
                        status = "okay";
+                       clock-rate=<50000000>;
+
+                       flash@f8000000 {
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               reg = <0xf8000000 0x800000>;
+                               st,smi-fast-mode;
+
+                               partition@0 {
+                                       label = "xloader";
+                                       reg = <0x0 0x10000>;
+                               };
+                               partition@10000 {
+                                       label = "u-boot";
+                                       reg = <0x10000 0x40000>;
+                               };
+                               partition@50000 {
+                                       label = "linux";
+                                       reg = <0x50000 0x2c0000>;
+                               };
+                               partition@310000 {
+                                       label = "rootfs";
+                                       reg = <0x310000 0x4f0000>;
+                               };
+                       };
                };
 
                spi0: spi@d0100000 {
index 0ae7c8e86311ddc0950e11d97ad487a40d1542a2..91072553963f02566caf2d6a42c727540998a162 100644 (file)
                                interrupts = <12>;
                                status = "disabled";
                        };
+
+                       timer@f0000000 {
+                               compatible = "st,spear-timer";
+                               reg = <0xf0000000 0x400>;
+                               interrupts = <2>;
+                       };
                };
        };
 };
index 790a7a8a5ccd05f2e622d2256090bb3956718e7d..1119c22c9a829479b716263d1faf3b5db0c7c601 100644 (file)
                        status = "okay";
                };
 
+               smi: flash@fc000000 {
+                       status = "okay";
+                       clock-rate=<50000000>;
+
+                       flash@f8000000 {
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               reg = <0xf8000000 0x800000>;
+                               st,smi-fast-mode;
+
+                               partition@0 {
+                                       label = "xloader";
+                                       reg = <0x0 0x10000>;
+                               };
+                               partition@10000 {
+                                       label = "u-boot";
+                                       reg = <0x10000 0x40000>;
+                               };
+                               partition@50000 {
+                                       label = "linux";
+                                       reg = <0x50000 0x2c0000>;
+                               };
+                               partition@310000 {
+                                       label = "rootfs";
+                                       reg = <0x310000 0x4f0000>;
+                               };
+                       };
+               };
+
                apb {
                        serial@d0000000 {
                                status = "okay";
index d777e3a6f178dee9bfba070d1850e47f4da2c507..089f0a42c50ef4244765efc59450011637e33a08 100644 (file)
                                interrupts = <28>;
                                status = "disabled";
                        };
+
+                       timer@f0000000 {
+                               compatible = "st,spear-timer";
+                               reg = <0xf0000000 0x400>;
+                               interrupts = <16>;
+                       };
                };
        };
 };
index 0a9f34a2c3aae9e09cf7a3e939ec5737e030be0b..36321bceec46beb8ae812bc1ff5e80f2db45fab0 100644 (file)
@@ -7,10 +7,10 @@
        compatible = "nvidia,cardhu", "nvidia,tegra30";
 
        memory {
-               reg = < 0x80000000 0x40000000 >;
+               reg = <0x80000000 0x40000000>;
        };
 
-       pinmux@70000000 {
+       pinmux {
                pinctrl-names = "default";
                pinctrl-0 = <&state_default>;
 
                                nvidia,pull = <2>;
                                nvidia,tristate = <0>;
                        };
+                       dap2_fs_pa2 {
+                               nvidia,pins =   "dap2_fs_pa2",
+                                               "dap2_sclk_pa3",
+                                               "dap2_din_pa4",
+                                               "dap2_dout_pa5";
+                               nvidia,function = "i2s1";
+                               nvidia,pull = <0>;
+                               nvidia,tristate = <0>;
+                       };
                };
        };
 
        serial@70006000 {
-               clock-frequency = < 408000000 >;
-       };
-
-       serial@70006040 {
-               status = "disable";
-       };
-
-       serial@70006200 {
-               status = "disable";
-       };
-
-       serial@70006300 {
-               status = "disable";
-       };
-
-       serial@70006400 {
-               status = "disable";
+               status = "okay";
+               clock-frequency = <408000000>;
        };
 
        i2c@7000c000 {
+               status = "okay";
                clock-frequency = <100000>;
        };
 
        i2c@7000c400 {
+               status = "okay";
                clock-frequency = <100000>;
        };
 
        i2c@7000c500 {
+               status = "okay";
                clock-frequency = <100000>;
+
+               /* ALS and Proximity sensor */
+               isl29028@44 {
+                       compatible = "isil,isl29028";
+                       reg = <0x44>;
+                       interrupt-parent = <&gpio>;
+                       interrupts = <88 0x04>; /*gpio PL0 */
+               };
        };
 
        i2c@7000c700 {
+               status = "okay";
                clock-frequency = <100000>;
        };
 
        i2c@7000d000 {
+               status = "okay";
                clock-frequency = <100000>;
+
+               wm8903: wm8903@1a {
+                       compatible = "wlf,wm8903";
+                       reg = <0x1a>;
+                       interrupt-parent = <&gpio>;
+                       interrupts = <179 0x04>; /* gpio PW3 */
+
+                       gpio-controller;
+                       #gpio-cells = <2>;
+
+                       micdet-cfg = <0>;
+                       micdet-delay = <100>;
+                       gpio-cfg = <0xffffffff 0xffffffff 0 0xffffffff 0xffffffff>;
+               };
+
+               tps62361 {
+                       compatible = "ti,tps62361";
+                       reg = <0x60>;
+
+                       regulator-name = "tps62361-vout";
+                       regulator-min-microvolt = <500000>;
+                       regulator-max-microvolt = <1500000>;
+                       regulator-boot-on;
+                       regulator-always-on;
+                       ti,vsel0-state-high;
+                       ti,vsel1-state-high;
+               };
+       };
+
+       ahub {
+               i2s@70080400 {
+                       status = "okay";
+               };
        };
 
        sdhci@78000000 {
+               status = "okay";
                cd-gpios = <&gpio 69 0>; /* gpio PI5 */
                wp-gpios = <&gpio 155 0>; /* gpio PT3 */
                power-gpios = <&gpio 31 0>; /* gpio PD7 */
+               bus-width = <4>;
        };
 
-       sdhci@78000200 {
-               status = "disable";
+       sdhci@78000600 {
+               status = "okay";
+               support-8bit;
+               bus-width = <8>;
        };
 
-       sdhci@78000400 {
-               status = "disable";
-       };
+       sound {
+               compatible = "nvidia,tegra-audio-wm8903-cardhu",
+                            "nvidia,tegra-audio-wm8903";
+               nvidia,model = "NVIDIA Tegra Cardhu";
 
-       sdhci@78000400 {
-               support-8bit;
+               nvidia,audio-routing =
+                       "Headphone Jack", "HPOUTR",
+                       "Headphone Jack", "HPOUTL",
+                       "Int Spk", "ROP",
+                       "Int Spk", "RON",
+                       "Int Spk", "LOP",
+                       "Int Spk", "LON",
+                       "Mic Jack", "MICBIAS",
+                       "IN1L", "Mic Jack";
+
+               nvidia,i2s-controller = <&tegra_i2s1>;
+               nvidia,audio-codec = <&wm8903>;
+
+               nvidia,spkr-en-gpios = <&wm8903 2 0>;
+               nvidia,hp-det-gpios = <&gpio 178 0>; /* gpio PW2 */
        };
 };
index 1a0b1f182944b889838ffcd55ce374a721aa5453..7de701365fce6b9e6594e16491ad97da9cfa75fb 100644 (file)
@@ -6,11 +6,11 @@
        model = "NVIDIA Tegra2 Harmony evaluation board";
        compatible = "nvidia,harmony", "nvidia,tegra20";
 
-       memory@0 {
-               reg = < 0x00000000 0x40000000 >;
+       memory {
+               reg = <0x00000000 0x40000000>;
        };
 
-       pinmux@70000000 {
+       pinmux {
                pinctrl-names = "default";
                pinctrl-0 = <&state_default>;
 
                        };
                        conf_ata {
                                nvidia,pins = "ata", "atb", "atc", "atd", "ate",
-                                       "cdev1", "dap1", "dtb", "gma", "gmb",
-                                       "gmc", "gmd", "gme", "gpu7", "gpv",
-                                       "i2cp", "pta", "rm", "slxa", "slxk",
-                                       "spia", "spib";
+                                       "cdev1", "cdev2", "dap1", "dtb", "gma",
+                                       "gmb", "gmc", "gmd", "gme", "gpu7",
+                                       "gpv", "i2cp", "pta", "rm", "slxa",
+                                       "slxk", "spia", "spib", "uac";
                                nvidia,pull = <0>;
                                nvidia,tristate = <0>;
                        };
-                       conf_cdev2 {
-                               nvidia,pins = "cdev2", "csus", "spid", "spif";
-                               nvidia,pull = <1>;
-                               nvidia,tristate = <1>;
-                       };
                        conf_ck32 {
                                nvidia,pins = "ck32", "ddrc", "pmca", "pmcb",
                                        "pmcc", "pmcd", "pmce", "xm2c", "xm2d";
                                nvidia,pull = <0>;
                        };
+                       conf_csus {
+                               nvidia,pins = "csus", "spid", "spif";
+                               nvidia,pull = <1>;
+                               nvidia,tristate = <1>;
+                       };
                        conf_crtp {
                                nvidia,pins = "crtp", "dap2", "dap3", "dap4",
                                        "dtc", "dte", "dtf", "gpu", "sdio1",
                                        "slxc", "slxd", "spdi", "spdo", "spig",
-                                       "uac", "uda";
+                                       "uda";
                                nvidia,pull = <0>;
                                nvidia,tristate = <1>;
                        };
                };
        };
 
-       pmc@7000f400 {
-               nvidia,invert-interrupt;
+       i2s@70002800 {
+               status = "okay";
+       };
+
+       serial@70006300 {
+               status = "okay";
+               clock-frequency = <216000000>;
        };
 
        i2c@7000c000 {
+               status = "okay";
                clock-frequency = <400000>;
 
                wm8903: wm8903@1a {
                        compatible = "wlf,wm8903";
                        reg = <0x1a>;
                        interrupt-parent = <&gpio>;
-                       interrupts = < 187 0x04 >;
+                       interrupts = <187 0x04>;
 
                        gpio-controller;
                        #gpio-cells = <2>;
 
                        micdet-cfg = <0>;
                        micdet-delay = <100>;
-                       gpio-cfg = < 0xffffffff 0xffffffff 0 0xffffffff 0xffffffff >;
+                       gpio-cfg = <0xffffffff 0xffffffff 0 0xffffffff 0xffffffff>;
                };
        };
 
        i2c@7000c400 {
+               status = "okay";
                clock-frequency = <400000>;
        };
 
        i2c@7000c500 {
+               status = "okay";
                clock-frequency = <400000>;
        };
 
        i2c@7000d000 {
+               status = "okay";
                clock-frequency = <400000>;
        };
 
-       i2s@70002a00 {
-               status = "disable";
+       pmc {
+               nvidia,invert-interrupt;
+       };
+
+       usb@c5000000 {
+               status = "okay";
+       };
+
+       usb@c5004000 {
+               status = "okay";
+               nvidia,phy-reset-gpio = <&gpio 169 0>; /* gpio PV1 */
+       };
+
+       usb@c5008000 {
+               status = "okay";
+       };
+
+       sdhci@c8000200 {
+               status = "okay";
+               cd-gpios = <&gpio 69 0>; /* gpio PI5 */
+               wp-gpios = <&gpio 57 0>; /* gpio PH1 */
+               power-gpios = <&gpio 155 0>; /* gpio PT3 */
+               bus-width = <4>;
+       };
+
+       sdhci@c8000600 {
+               status = "okay";
+               cd-gpios = <&gpio 58 0>; /* gpio PH2 */
+               wp-gpios = <&gpio 59 0>; /* gpio PH3 */
+               power-gpios = <&gpio 70 0>; /* gpio PI6 */
+               support-8bit;
+               bus-width = <8>;
        };
 
        sound {
                nvidia,int-mic-en-gpios = <&gpio 184 0>; /*gpio PX0 */
                nvidia,ext-mic-en-gpios = <&gpio 185 0>; /* gpio PX1 */
        };
-
-       serial@70006000 {
-               status = "disable";
-       };
-
-       serial@70006040 {
-               status = "disable";
-       };
-
-       serial@70006200 {
-               status = "disable";
-       };
-
-       serial@70006300 {
-               clock-frequency = < 216000000 >;
-       };
-
-       serial@70006400 {
-               status = "disable";
-       };
-
-       sdhci@c8000000 {
-               status = "disable";
-       };
-
-       sdhci@c8000200 {
-               cd-gpios = <&gpio 69 0>; /* gpio PI5 */
-               wp-gpios = <&gpio 57 0>; /* gpio PH1 */
-               power-gpios = <&gpio 155 0>; /* gpio PT3 */
-       };
-
-       sdhci@c8000400 {
-               status = "disable";
-       };
-
-       sdhci@c8000600 {
-               cd-gpios = <&gpio 58 0>; /* gpio PH2 */
-               wp-gpios = <&gpio 59 0>; /* gpio PH3 */
-               power-gpios = <&gpio 70 0>; /* gpio PI6 */
-               support-8bit;
-       };
 };
index 10943fb2561c905ccf04b3ffe6ff34b3aab8597c..bfeb117d5aea639bdd0f8949796593b4d881da26 100644 (file)
@@ -6,11 +6,11 @@
        model = "Toshiba AC100 / Dynabook AZ";
        compatible = "compal,paz00", "nvidia,tegra20";
 
-       memory@0 {
+       memory {
                reg = <0x00000000 0x20000000>;
        };
 
-       pinmux@70000000 {
+       pinmux {
                pinctrl-names = "default";
                pinctrl-0 = <&state_default>;
 
                        };
                        conf_ata {
                                nvidia,pins = "ata", "atb", "atc", "atd", "ate",
-                                       "cdev1", "dap1", "dap2", "dtf", "gma",
-                                       "gmb", "gmc", "gmd", "gme", "gpu",
-                                       "gpu7", "gpv", "i2cp", "pta", "rm",
-                                       "sdio1", "slxk", "spdo", "uac", "uda";
+                                       "cdev1", "cdev2", "dap1", "dap2", "dtf",
+                                       "gma", "gmb", "gmc", "gmd", "gme",
+                                       "gpu", "gpu7", "gpv", "i2cp", "pta",
+                                       "rm", "sdio1", "slxk", "spdo", "uac",
+                                       "uda";
                                nvidia,pull = <0>;
                                nvidia,tristate = <0>;
                        };
-                       conf_cdev2 {
-                               nvidia,pins = "cdev2";
-                               nvidia,pull = <1>;
-                               nvidia,tristate = <0>;
-                       };
                        conf_ck32 {
                                nvidia,pins = "ck32", "ddrc", "pmca", "pmcb",
                                        "pmcc", "pmcd", "pmce", "xm2c", "xm2d";
                };
        };
 
+       i2s@70002800 {
+               status = "okay";
+       };
+
+       serial@70006000 {
+               status = "okay";
+               clock-frequency = <216000000>;
+       };
+
+       serial@70006200 {
+               status = "okay";
+               clock-frequency = <216000000>;
+       };
+
        i2c@7000c000 {
+               status = "okay";
                clock-frequency = <400000>;
 
                alc5632: alc5632@1e {
        };
 
        i2c@7000c400 {
+               status = "okay";
                clock-frequency = <400000>;
        };
 
-       i2c@7000c500 {
-               status = "disable";
-       };
-
-       nvec@7000c500 {
-               #address-cells = <1>;
-               #size-cells = <0>;
+       nvec {
                compatible = "nvidia,nvec";
-               reg = <0x7000C500 0x100>;
+               reg = <0x7000c500 0x100>;
                interrupts = <0 92 0x04>;
+               #address-cells = <1>;
+               #size-cells = <0>;
                clock-frequency = <80000>;
-               request-gpios = <&gpio 170 0>;
+               request-gpios = <&gpio 170 0>; /* gpio PV2 */
                slave-addr = <138>;
        };
 
        i2c@7000d000 {
+               status = "okay";
                clock-frequency = <400000>;
 
                adt7461@4c {
                };
        };
 
-       i2s@70002a00 {
-               status = "disable";
-       };
-
-       sound {
-               compatible = "nvidia,tegra-audio-alc5632-paz00",
-                       "nvidia,tegra-audio-alc5632";
-
-               nvidia,model = "Compal PAZ00";
-
-               nvidia,audio-routing =
-                       "Int Spk", "SPKOUT",
-                       "Int Spk", "SPKOUTN",
-                       "Headset Mic", "MICBIAS1",
-                       "MIC1", "Headset Mic",
-                       "Headset Stereophone", "HPR",
-                       "Headset Stereophone", "HPL",
-                       "DMICDAT", "Digital Mic";
-
-               nvidia,audio-codec = <&alc5632>;
-               nvidia,i2s-controller = <&tegra_i2s1>;
-               nvidia,hp-det-gpios = <&gpio 178 0>; /* gpio PW2 */
-       };
-
-       serial@70006000 {
-               clock-frequency = <216000000>;
+       usb@c5000000 {
+               status = "okay";
        };
 
-       serial@70006040 {
-               status = "disable";
+       usb@c5004000 {
+               status = "okay";
+               nvidia,phy-reset-gpio = <&gpio 168 0>; /* gpio PV0 */
        };
 
-       serial@70006200 {
-               clock-frequency = <216000000>;
-       };
-
-       serial@70006300 {
-               status = "disable";
-       };
-
-       serial@70006400 {
-               status = "disable";
+       usb@c5008000 {
+               status = "okay";
        };
 
        sdhci@c8000000 {
+               status = "okay";
                cd-gpios = <&gpio 173 0>; /* gpio PV5 */
                wp-gpios = <&gpio 57 0>;  /* gpio PH1 */
                power-gpios = <&gpio 169 0>; /* gpio PV1 */
-       };
-
-       sdhci@c8000200 {
-               status = "disable";
-       };
-
-       sdhci@c8000400 {
-               status = "disable";
+               bus-width = <4>;
        };
 
        sdhci@c8000600 {
+               status = "okay";
                support-8bit;
+               bus-width = <8>;
        };
 
        gpio-keys {
 
                wifi {
                        label = "wifi-led";
-                       gpios = <&gpio 24 0>;
+                       gpios = <&gpio 24 0>; /* gpio PD0 */
                        linux,default-trigger = "rfkill0";
                };
        };
+
+       sound {
+               compatible = "nvidia,tegra-audio-alc5632-paz00",
+                       "nvidia,tegra-audio-alc5632";
+
+               nvidia,model = "Compal PAZ00";
+
+               nvidia,audio-routing =
+                       "Int Spk", "SPKOUT",
+                       "Int Spk", "SPKOUTN",
+                       "Headset Mic", "MICBIAS1",
+                       "MIC1", "Headset Mic",
+                       "Headset Stereophone", "HPR",
+                       "Headset Stereophone", "HPL",
+                       "DMICDAT", "Digital Mic";
+
+               nvidia,audio-codec = <&alc5632>;
+               nvidia,i2s-controller = <&tegra_i2s1>;
+               nvidia,hp-det-gpios = <&gpio 178 0>; /* gpio PW2 */
+       };
 };
index ec33116f5df92a5ef2d0dda633b29d3e7218943f..89cb7f2acd92cfa9ebbe8ceb8bb18823d62ceeeb 100644 (file)
@@ -7,11 +7,10 @@
        compatible = "nvidia,seaboard", "nvidia,tegra20";
 
        memory {
-               device_type = "memory";
-               reg = < 0x00000000 0x40000000 >;
+               reg = <0x00000000 0x40000000>;
        };
 
-       pinmux@70000000 {
+       pinmux {
                pinctrl-names = "default";
                pinctrl-0 = <&state_default>;
 
                        };
                        hdint {
                                nvidia,pins = "hdint", "lpw0", "lpw2", "lsc1",
-                                       "lsck", "lsda", "pta";
+                                       "lsck", "lsda";
                                nvidia,function = "hdmi";
                        };
                        i2cp {
                                nvidia,pins = "pmc";
                                nvidia,function = "pwr_on";
                        };
+                       pta {
+                               nvidia,pins = "pta";
+                               nvidia,function = "i2c2";
+                       };
                        rm {
                                nvidia,pins = "rm";
                                nvidia,function = "i2c1";
                };
        };
 
+       i2s@70002800 {
+               status = "okay";
+       };
+
+       serial@70006300 {
+               status = "okay";
+               clock-frequency = <216000000>;
+       };
+
        i2c@7000c000 {
+               status = "okay";
                clock-frequency = <400000>;
 
                wm8903: wm8903@1a {
                        compatible = "wlf,wm8903";
                        reg = <0x1a>;
                        interrupt-parent = <&gpio>;
-                       interrupts = < 187 0x04 >;
+                       interrupts = <187 0x04>;
 
                        gpio-controller;
                        #gpio-cells = <2>;
 
                        micdet-cfg = <0>;
                        micdet-delay = <100>;
-                       gpio-cfg = < 0xffffffff 0xffffffff 0 0xffffffff 0xffffffff >;
+                       gpio-cfg = <0xffffffff 0xffffffff 0 0xffffffff 0xffffffff>;
+               };
+
+               /* ALS and proximity sensor */
+               isl29018@44 {
+                       compatible = "isil,isl29018";
+                       reg = <0x44>;
+                       interrupt-parent = <&gpio>;
+                       interrupts = <202 0x04>; /* GPIO PZ2 */
+               };
+
+               gyrometer@68 {
+                       compatible = "invn,mpu3050";
+                       reg = <0x68>;
+                       interrupt-parent = <&gpio>;
+                       interrupts = <204 0x04>; /* gpio PZ4 */
                };
        };
 
        i2c@7000c400 {
-               clock-frequency = <400000>;
+               status = "okay";
+               clock-frequency = <100000>;
+
+               smart-battery@b {
+                       compatible = "ti,bq20z75", "smart-battery-1.1";
+                       reg = <0xb>;
+                       ti,i2c-retry-count = <2>;
+                       ti,poll-retry-count = <10>;
+               };
        };
 
        i2c@7000c500 {
+               status = "okay";
                clock-frequency = <400000>;
        };
 
        i2c@7000d000 {
+               status = "okay";
                clock-frequency = <400000>;
 
-               adt7461@4c {
-                       compatible = "adt7461";
+               temperature-sensor@4c {
+                       compatible = "nct1008";
                        reg = <0x4c>;
                };
-       };
-
-       i2s@70002a00 {
-               status = "disable";
-       };
-
-       sound {
-               compatible = "nvidia,tegra-audio-wm8903-seaboard",
-                            "nvidia,tegra-audio-wm8903";
-               nvidia,model = "NVIDIA Tegra Seaboard";
-
-               nvidia,audio-routing =
-                       "Headphone Jack", "HPOUTR",
-                       "Headphone Jack", "HPOUTL",
-                       "Int Spk", "ROP",
-                       "Int Spk", "RON",
-                       "Int Spk", "LOP",
-                       "Int Spk", "LON",
-                       "Mic Jack", "MICBIAS",
-                       "IN1R", "Mic Jack";
-
-               nvidia,i2s-controller = <&tegra_i2s1>;
-               nvidia,audio-codec = <&wm8903>;
-
-               nvidia,spkr-en-gpios = <&wm8903 2 0>;
-               nvidia,hp-det-gpios = <&gpio 185 0>; /* gpio PX1 */
-       };
 
-       serial@70006000 {
-               status = "disable";
-       };
-
-       serial@70006040 {
-               status = "disable";
+               magnetometer@c {
+                       compatible = "ak8975";
+                       reg = <0xc>;
+                       interrupt-parent = <&gpio>;
+                       interrupts = <109 0x04>; /* gpio PN5 */
+               };
        };
 
-       serial@70006200 {
-               status = "disable";
-       };
+       emc {
+               emc-table@190000 {
+                       reg = <190000>;
+                       compatible = "nvidia,tegra20-emc-table";
+                       clock-frequency = <190000>;
+                       nvidia,emc-registers = <0x0000000c 0x00000026
+                               0x00000009 0x00000003 0x00000004 0x00000004
+                               0x00000002 0x0000000c 0x00000003 0x00000003
+                               0x00000002 0x00000001 0x00000004 0x00000005
+                               0x00000004 0x00000009 0x0000000d 0x0000059f
+                               0x00000000 0x00000003 0x00000003 0x00000003
+                               0x00000003 0x00000001 0x0000000b 0x000000c8
+                               0x00000003 0x00000007 0x00000004 0x0000000f
+                               0x00000002 0x00000000 0x00000000 0x00000002
+                               0x00000000 0x00000000 0x00000083 0xa06204ae
+                               0x007dc010 0x00000000 0x00000000 0x00000000
+                               0x00000000 0x00000000 0x00000000 0x00000000>;
+               };
 
-       serial@70006300 {
-               clock-frequency = < 216000000 >;
+               emc-table@380000 {
+                       reg = <380000>;
+                       compatible = "nvidia,tegra20-emc-table";
+                       clock-frequency = <380000>;
+                       nvidia,emc-registers = <0x00000017 0x0000004b
+                               0x00000012 0x00000006 0x00000004 0x00000005
+                               0x00000003 0x0000000c 0x00000006 0x00000006
+                               0x00000003 0x00000001 0x00000004 0x00000005
+                               0x00000004 0x00000009 0x0000000d 0x00000b5f
+                               0x00000000 0x00000003 0x00000003 0x00000006
+                               0x00000006 0x00000001 0x00000011 0x000000c8
+                               0x00000003 0x0000000e 0x00000007 0x0000000f
+                               0x00000002 0x00000000 0x00000000 0x00000002
+                               0x00000000 0x00000000 0x00000083 0xe044048b
+                               0x007d8010 0x00000000 0x00000000 0x00000000
+                               0x00000000 0x00000000 0x00000000 0x00000000>;
+               };
        };
 
-       serial@70006400 {
-               status = "disable";
+       usb@c5000000 {
+               status = "okay";
+               nvidia,vbus-gpio = <&gpio 24 0>; /* PD0 */
+               dr_mode = "otg";
        };
 
-       sdhci@c8000000 {
-               status = "disable";
+       usb@c5004000 {
+               status = "okay";
+               nvidia,phy-reset-gpio = <&gpio 169 0>; /* gpio PV1 */
        };
 
-       sdhci@c8000200 {
-               status = "disable";
+       usb@c5008000 {
+               status = "okay";
        };
 
        sdhci@c8000400 {
+               status = "okay";
                cd-gpios = <&gpio 69 0>; /* gpio PI5 */
                wp-gpios = <&gpio 57 0>; /* gpio PH1 */
                power-gpios = <&gpio 70 0>; /* gpio PI6 */
+               bus-width = <4>;
        };
 
        sdhci@c8000600 {
+               status = "okay";
                support-8bit;
-       };
-
-       usb@c5000000 {
-               nvidia,vbus-gpio = <&gpio 24 0>; /* PD0 */
-               dr_mode = "otg";
+               bus-width = <8>;
        };
 
        gpio-keys {
                };
        };
 
-       emc@7000f400 {
-               emc-table@190000 {
-                       reg = < 190000 >;
-                       compatible = "nvidia,tegra20-emc-table";
-                       clock-frequency = < 190000 >;
-                       nvidia,emc-registers = < 0x0000000c 0x00000026
-                               0x00000009 0x00000003 0x00000004 0x00000004
-                               0x00000002 0x0000000c 0x00000003 0x00000003
-                               0x00000002 0x00000001 0x00000004 0x00000005
-                               0x00000004 0x00000009 0x0000000d 0x0000059f
-                               0x00000000 0x00000003 0x00000003 0x00000003
-                               0x00000003 0x00000001 0x0000000b 0x000000c8
-                               0x00000003 0x00000007 0x00000004 0x0000000f
-                               0x00000002 0x00000000 0x00000000 0x00000002
-                               0x00000000 0x00000000 0x00000083 0xa06204ae
-                               0x007dc010 0x00000000 0x00000000 0x00000000
-                               0x00000000 0x00000000 0x00000000 0x00000000 >;
-               };
+       sound {
+               compatible = "nvidia,tegra-audio-wm8903-seaboard",
+                            "nvidia,tegra-audio-wm8903";
+               nvidia,model = "NVIDIA Tegra Seaboard";
 
-               emc-table@380000 {
-                       reg = < 380000 >;
-                       compatible = "nvidia,tegra20-emc-table";
-                       clock-frequency = < 380000 >;
-                       nvidia,emc-registers = < 0x00000017 0x0000004b
-                               0x00000012 0x00000006 0x00000004 0x00000005
-                               0x00000003 0x0000000c 0x00000006 0x00000006
-                               0x00000003 0x00000001 0x00000004 0x00000005
-                               0x00000004 0x00000009 0x0000000d 0x00000b5f
-                               0x00000000 0x00000003 0x00000003 0x00000006
-                               0x00000006 0x00000001 0x00000011 0x000000c8
-                               0x00000003 0x0000000e 0x00000007 0x0000000f
-                               0x00000002 0x00000000 0x00000000 0x00000002
-                               0x00000000 0x00000000 0x00000083 0xe044048b
-                               0x007d8010 0x00000000 0x00000000 0x00000000
-                               0x00000000 0x00000000 0x00000000 0x00000000 >;
-               };
+               nvidia,audio-routing =
+                       "Headphone Jack", "HPOUTR",
+                       "Headphone Jack", "HPOUTL",
+                       "Int Spk", "ROP",
+                       "Int Spk", "RON",
+                       "Int Spk", "LOP",
+                       "Int Spk", "LON",
+                       "Mic Jack", "MICBIAS",
+                       "IN1R", "Mic Jack";
+
+               nvidia,i2s-controller = <&tegra_i2s1>;
+               nvidia,audio-codec = <&wm8903>;
+
+               nvidia,spkr-en-gpios = <&wm8903 2 0>;
+               nvidia,hp-det-gpios = <&gpio 185 0>; /* gpio PX1 */
        };
 };
index 98efd5b0d7f9a085584e40a43ed366f2bd28e954..9de5636023f69694d39bf4196f1bca180e7ebb43 100644 (file)
@@ -6,11 +6,11 @@
        model = "Compulab TrimSlice board";
        compatible = "compulab,trimslice", "nvidia,tegra20";
 
-       memory@0 {
-               reg = < 0x00000000 0x40000000 >;
+       memory {
+               reg = <0x00000000 0x40000000>;
        };
 
-       pinmux@70000000 {
+       pinmux {
                pinctrl-names = "default";
                pinctrl-0 = <&state_default>;
 
                                nvidia,tristate = <1>;
                        };
                        conf_atb {
-                               nvidia,pins = "atb", "cdev1", "dap1", "gma",
-                                       "gmc", "gmd", "gpu", "gpu7", "gpv",
-                                       "sdio1", "slxa", "slxk", "uac";
+                               nvidia,pins = "atb", "cdev1", "cdev2", "dap1",
+                                       "gma", "gmc", "gmd", "gpu", "gpu7",
+                                       "gpv", "sdio1", "slxa", "slxk", "uac";
                                nvidia,pull = <0>;
                                nvidia,tristate = <0>;
                        };
-                       conf_cdev2 {
-                               nvidia,pins = "cdev2", "csus", "spia", "spib",
-                                       "spid", "spif";
-                               nvidia,pull = <1>;
-                               nvidia,tristate = <1>;
-                       };
                        conf_ck32 {
                                nvidia,pins = "ck32", "ddrc", "pmca", "pmcb",
                                        "pmcc", "pmcd", "pmce", "xm2c", "xm2d";
                                nvidia,pull = <0>;
                        };
+                       conf_csus {
+                               nvidia,pins = "csus", "spia", "spib",
+                                       "spid", "spif";
+                               nvidia,pull = <1>;
+                               nvidia,tristate = <1>;
+                       };
                        conf_ddc {
                                nvidia,pins = "ddc", "dtf", "rm", "sdc", "sdd";
                                nvidia,pull = <2>;
                };
        };
 
+       i2s@70002800 {
+               status = "okay";
+       };
+
+       serial@70006000 {
+               status = "okay";
+               clock-frequency = <216000000>;
+       };
+
        i2c@7000c000 {
+               status = "okay";
                clock-frequency = <400000>;
        };
 
        i2c@7000c400 {
+               status = "okay";
                clock-frequency = <400000>;
        };
 
        i2c@7000c500 {
+               status = "okay";
                clock-frequency = <400000>;
-       };
-
-       i2c@7000d000 {
-               status = "disable";
-       };
-
-       i2s@70002800 {
-               status = "disable";
-       };
-
-       i2s@70002a00 {
-               status = "disable";
-       };
-
-       das@70000c00 {
-               status = "disable";
-       };
 
-       serial@70006000 {
-               clock-frequency = < 216000000 >;
-       };
+               codec: codec@1a {
+                       compatible = "ti,tlv320aic23";
+                       reg = <0x1a>;
+               };
 
-       serial@70006040 {
-               status = "disable";
+               rtc@56 {
+                       compatible = "emmicro,em3027";
+                       reg = <0x56>;
+               };
        };
 
-       serial@70006200 {
-               status = "disable";
+       usb@c5000000 {
+               status = "okay";
        };
 
-       serial@70006300 {
-               status = "disable";
+       usb@c5004000 {
+               nvidia,phy-reset-gpio = <&gpio 168 0>; /* gpio PV0 */
        };
 
-       serial@70006400 {
-               status = "disable";
+       usb@c5008000 {
+               status = "okay";
        };
 
        sdhci@c8000000 {
-               status = "disable";
+               status = "okay";
+               bus-width = <4>;
        };
 
-       sdhci@c8000200 {
-               status = "disable";
-       };
-
-       sdhci@c8000400 {
-               status = "disable";
+       sdhci@c8000600 {
+               status = "okay";
+               cd-gpios = <&gpio 121 0>; /* gpio PP1 */
+               wp-gpios = <&gpio 122 0>; /* gpio PP2 */
+               bus-width = <4>;
        };
 
-       sdhci@c8000600 {
-               cd-gpios = <&gpio 121 0>;
-               wp-gpios = <&gpio 122 0>;
+       sound {
+               compatible = "nvidia,tegra-audio-trimslice";
+               nvidia,i2s-controller = <&tegra_i2s1>;
+               nvidia,audio-codec = <&codec>;
        };
 };
index 71eb2e50a66824b4f8a441c547238132ea154e02..445343b0fbdd7bc7eedbd989196bac17f7ffc383 100644 (file)
@@ -7,10 +7,10 @@
        compatible = "nvidia,ventana", "nvidia,tegra20";
 
        memory {
-               reg = < 0x00000000 0x40000000 >;
+               reg = <0x00000000 0x40000000>;
        };
 
-       pinmux@70000000 {
+       pinmux {
                pinctrl-names = "default";
                pinctrl-0 = <&state_default>;
 
                };
        };
 
+       i2s@70002800 {
+               status = "okay";
+       };
+
+       serial@70006300 {
+               status = "okay";
+               clock-frequency = <216000000>;
+       };
+
        i2c@7000c000 {
+               status = "okay";
                clock-frequency = <400000>;
 
                wm8903: wm8903@1a {
                        compatible = "wlf,wm8903";
                        reg = <0x1a>;
                        interrupt-parent = <&gpio>;
-                       interrupts = < 187 0x04 >;
+                       interrupts = <187 0x04>;
 
                        gpio-controller;
                        #gpio-cells = <2>;
 
                        micdet-cfg = <0>;
                        micdet-delay = <100>;
-                       gpio-cfg = < 0xffffffff 0xffffffff 0 0xffffffff 0xffffffff >;
+                       gpio-cfg = <0xffffffff 0xffffffff 0 0xffffffff 0xffffffff>;
+               };
+
+               /* ALS and proximity sensor */
+               isl29018@44 {
+                       compatible = "isil,isl29018";
+                       reg = <0x44>;
+                       interrupt-parent = <&gpio>;
+                       interrupts = <202 0x04>; /*gpio PZ2 */
                };
        };
 
        i2c@7000c400 {
+               status = "okay";
                clock-frequency = <400000>;
        };
 
        i2c@7000c500 {
+               status = "okay";
                clock-frequency = <400000>;
        };
 
        i2c@7000d000 {
+               status = "okay";
                clock-frequency = <400000>;
        };
 
-       i2s@70002a00 {
-               status = "disable";
+       usb@c5000000 {
+               status = "okay";
+       };
+
+       usb@c5004000 {
+               status = "okay";
+               nvidia,phy-reset-gpio = <&gpio 169 0>; /* gpio PV1 */
+       };
+
+       usb@c5008000 {
+               status = "okay";
+       };
+
+       sdhci@c8000400 {
+               status = "okay";
+               cd-gpios = <&gpio 69 0>; /* gpio PI5 */
+               wp-gpios = <&gpio 57 0>; /* gpio PH1 */
+               power-gpios = <&gpio 70 0>; /* gpio PI6 */
+               bus-width = <4>;
+       };
+
+       sdhci@c8000600 {
+               status = "okay";
+               support-8bit;
+               bus-width = <8>;
        };
 
        sound {
 
                nvidia,spkr-en-gpios = <&wm8903 2 0>;
                nvidia,hp-det-gpios = <&gpio 178 0>; /* gpio PW2 */
-               nvidia,int-mic-en-gpios = <&gpio 184 0>; /*gpio PX0 */
+               nvidia,int-mic-en-gpios = <&gpio 184 0>; /* gpio PX0 */
                nvidia,ext-mic-en-gpios = <&gpio 185 0>; /* gpio PX1 */
        };
-
-       serial@70006000 {
-               status = "disable";
-       };
-
-       serial@70006040 {
-               status = "disable";
-       };
-
-       serial@70006200 {
-               status = "disable";
-       };
-
-       serial@70006300 {
-               clock-frequency = < 216000000 >;
-       };
-
-       serial@70006400 {
-               status = "disable";
-       };
-
-       sdhci@c8000000 {
-               status = "disable";
-       };
-
-       sdhci@c8000200 {
-               status = "disable";
-       };
-
-       sdhci@c8000400 {
-               cd-gpios = <&gpio 69 0>; /* gpio PI5 */
-               wp-gpios = <&gpio 57 0>; /* gpio PH1 */
-               power-gpios = <&gpio 70 0>; /* gpio PI6 */
-       };
-
-       sdhci@c8000600 {
-               support-8bit;
-       };
 };
index 108e894a8926d790151b298fcec1b75075dbfa9d..c417d67e902755df968f812a9fe4f94101cbb796 100644 (file)
        compatible = "nvidia,tegra20";
        interrupt-parent = <&intc>;
 
-       pmc@7000f400 {
-               compatible = "nvidia,tegra20-pmc";
-               reg = <0x7000e400 0x400>;
-       };
-
-       intc: interrupt-controller@50041000 {
+       intc: interrupt-controller {
                compatible = "arm,cortex-a9-gic";
+               reg = <0x50041000 0x1000
+                      0x50040100 0x0100>;
                interrupt-controller;
                #interrupt-cells = <3>;
-               reg = < 0x50041000 0x1000 >,
-                     < 0x50040100 0x0100 >;
        };
 
-       pmu {
-               compatible = "arm,cortex-a9-pmu";
-               interrupts = <0 56 0x04
-                             0 57 0x04>;
-       };
-
-       apbdma: dma@6000a000 {
+       apbdma: dma {
                compatible = "nvidia,tegra20-apbdma";
                reg = <0x6000a000 0x1200>;
-               interrupts = < 0 104 0x04
-                              0 105 0x04
-                              0 106 0x04
-                              0 107 0x04
-                              0 108 0x04
-                              0 109 0x04
-                              0 110 0x04
-                              0 111 0x04
-                              0 112 0x04
-                              0 113 0x04
-                              0 114 0x04
-                              0 115 0x04
-                              0 116 0x04
-                              0 117 0x04
-                              0 118 0x04
-                              0 119 0x04 >;
-       };
-
-       i2c@7000c000 {
-               #address-cells = <1>;
-               #size-cells = <0>;
-               compatible = "nvidia,tegra20-i2c";
-               reg = <0x7000C000 0x100>;
-               interrupts = < 0 38 0x04 >;
-       };
-
-       i2c@7000c400 {
-               #address-cells = <1>;
-               #size-cells = <0>;
-               compatible = "nvidia,tegra20-i2c";
-               reg = <0x7000C400 0x100>;
-               interrupts = < 0 84 0x04 >;
+               interrupts = <0 104 0x04
+                             0 105 0x04
+                             0 106 0x04
+                             0 107 0x04
+                             0 108 0x04
+                             0 109 0x04
+                             0 110 0x04
+                             0 111 0x04
+                             0 112 0x04
+                             0 113 0x04
+                             0 114 0x04
+                             0 115 0x04
+                             0 116 0x04
+                             0 117 0x04
+                             0 118 0x04
+                             0 119 0x04>;
+       };
+
+       ahb {
+               compatible = "nvidia,tegra20-ahb";
+               reg = <0x6000c004 0x10c>; /* AHB Arbitration + Gizmo Controller */
+       };
+
+       gpio: gpio {
+               compatible = "nvidia,tegra20-gpio";
+               reg = <0x6000d000 0x1000>;
+               interrupts = <0 32 0x04
+                             0 33 0x04
+                             0 34 0x04
+                             0 35 0x04
+                             0 55 0x04
+                             0 87 0x04
+                             0 89 0x04>;
+               #gpio-cells = <2>;
+               gpio-controller;
+               #interrupt-cells = <2>;
+               interrupt-controller;
        };
 
-       i2c@7000c500 {
-               #address-cells = <1>;
-               #size-cells = <0>;
-               compatible = "nvidia,tegra20-i2c";
-               reg = <0x7000C500 0x100>;
-               interrupts = < 0 92 0x04 >;
+       pinmux: pinmux {
+               compatible = "nvidia,tegra20-pinmux";
+               reg = <0x70000014 0x10   /* Tri-state registers */
+                      0x70000080 0x20   /* Mux registers */
+                      0x700000a0 0x14   /* Pull-up/down registers */
+                      0x70000868 0xa8>; /* Pad control registers */
        };
 
-       i2c@7000d000 {
-               #address-cells = <1>;
-               #size-cells = <0>;
-               compatible = "nvidia,tegra20-i2c-dvc";
-               reg = <0x7000D000 0x200>;
-               interrupts = < 0 53 0x04 >;
+       das {
+               compatible = "nvidia,tegra20-das";
+               reg = <0x70000c00 0x80>;
        };
 
        tegra_i2s1: i2s@70002800 {
                compatible = "nvidia,tegra20-i2s";
                reg = <0x70002800 0x200>;
-               interrupts = < 0 13 0x04 >;
-               nvidia,dma-request-selector = < &apbdma 2 >;
+               interrupts = <0 13 0x04>;
+               nvidia,dma-request-selector = <&apbdma 2>;
+               status = "disable";
        };
 
        tegra_i2s2: i2s@70002a00 {
                compatible = "nvidia,tegra20-i2s";
                reg = <0x70002a00 0x200>;
-               interrupts = < 0 3 0x04 >;
-               nvidia,dma-request-selector = < &apbdma 1 >;
-       };
-
-       das@70000c00 {
-               compatible = "nvidia,tegra20-das";
-               reg = <0x70000c00 0x80>;
-       };
-
-       gpio: gpio@6000d000 {
-               compatible = "nvidia,tegra20-gpio";
-               reg = < 0x6000d000 0x1000 >;
-               interrupts = < 0 32 0x04
-                              0 33 0x04
-                              0 34 0x04
-                              0 35 0x04
-                              0 55 0x04
-                              0 87 0x04
-                              0 89 0x04 >;
-               #gpio-cells = <2>;
-               gpio-controller;
-               #interrupt-cells = <2>;
-               interrupt-controller;
-       };
-
-       pinmux: pinmux@70000000 {
-               compatible = "nvidia,tegra20-pinmux";
-               reg = < 0x70000014 0x10    /* Tri-state registers */
-                       0x70000080 0x20    /* Mux registers */
-                       0x700000a0 0x14    /* Pull-up/down registers */
-                       0x70000868 0xa8 >; /* Pad control registers */
+               interrupts = <0 3 0x04>;
+               nvidia,dma-request-selector = <&apbdma 1>;
+               status = "disable";
        };
 
        serial@70006000 {
                compatible = "nvidia,tegra20-uart";
                reg = <0x70006000 0x40>;
                reg-shift = <2>;
-               interrupts = < 0 36 0x04 >;
+               interrupts = <0 36 0x04>;
+               status = "disable";
        };
 
        serial@70006040 {
                compatible = "nvidia,tegra20-uart";
                reg = <0x70006040 0x40>;
                reg-shift = <2>;
-               interrupts = < 0 37 0x04 >;
+               interrupts = <0 37 0x04>;
+               status = "disable";
        };
 
        serial@70006200 {
                compatible = "nvidia,tegra20-uart";
                reg = <0x70006200 0x100>;
                reg-shift = <2>;
-               interrupts = < 0 46 0x04 >;
+               interrupts = <0 46 0x04>;
+               status = "disable";
        };
 
        serial@70006300 {
                compatible = "nvidia,tegra20-uart";
                reg = <0x70006300 0x100>;
                reg-shift = <2>;
-               interrupts = < 0 90 0x04 >;
+               interrupts = <0 90 0x04>;
+               status = "disable";
        };
 
        serial@70006400 {
                compatible = "nvidia,tegra20-uart";
                reg = <0x70006400 0x100>;
                reg-shift = <2>;
-               interrupts = < 0 91 0x04 >;
+               interrupts = <0 91 0x04>;
+               status = "disable";
        };
 
-       emc@7000f400 {
+       i2c@7000c000 {
+               compatible = "nvidia,tegra20-i2c";
+               reg = <0x7000c000 0x100>;
+               interrupts = <0 38 0x04>;
                #address-cells = <1>;
                #size-cells = <0>;
-               compatible = "nvidia,tegra20-emc";
-               reg = <0x7000f400 0x200>;
+               status = "disable";
        };
 
-       sdhci@c8000000 {
-               compatible = "nvidia,tegra20-sdhci";
-               reg = <0xc8000000 0x200>;
-               interrupts = < 0 14 0x04 >;
+       i2c@7000c400 {
+               compatible = "nvidia,tegra20-i2c";
+               reg = <0x7000c400 0x100>;
+               interrupts = <0 84 0x04>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               status = "disable";
        };
 
-       sdhci@c8000200 {
-               compatible = "nvidia,tegra20-sdhci";
-               reg = <0xc8000200 0x200>;
-               interrupts = < 0 15 0x04 >;
+       i2c@7000c500 {
+               compatible = "nvidia,tegra20-i2c";
+               reg = <0x7000c500 0x100>;
+               interrupts = <0 92 0x04>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               status = "disable";
        };
 
-       sdhci@c8000400 {
-               compatible = "nvidia,tegra20-sdhci";
-               reg = <0xc8000400 0x200>;
-               interrupts = < 0 19 0x04 >;
+       i2c@7000d000 {
+               compatible = "nvidia,tegra20-i2c-dvc";
+               reg = <0x7000d000 0x200>;
+               interrupts = <0 53 0x04>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               status = "disable";
        };
 
-       sdhci@c8000600 {
-               compatible = "nvidia,tegra20-sdhci";
-               reg = <0xc8000600 0x200>;
-               interrupts = < 0 31 0x04 >;
+       pmc {
+               compatible = "nvidia,tegra20-pmc";
+               reg = <0x7000e400 0x400>;
+       };
+
+       mc {
+               compatible = "nvidia,tegra20-mc";
+               reg = <0x7000f000 0x024
+                      0x7000f03c 0x3c4>;
+               interrupts = <0 77 0x04>;
+       };
+
+       gart {
+               compatible = "nvidia,tegra20-gart";
+               reg = <0x7000f024 0x00000018    /* controller registers */
+                      0x58000000 0x02000000>;  /* GART aperture */
+       };
+
+       emc {
+               compatible = "nvidia,tegra20-emc";
+               reg = <0x7000f400 0x200>;
+               #address-cells = <1>;
+               #size-cells = <0>;
        };
 
        usb@c5000000 {
                compatible = "nvidia,tegra20-ehci", "usb-ehci";
                reg = <0xc5000000 0x4000>;
-               interrupts = < 0 20 0x04 >;
+               interrupts = <0 20 0x04>;
                phy_type = "utmi";
                nvidia,has-legacy-mode;
+               status = "disable";
        };
 
        usb@c5004000 {
                compatible = "nvidia,tegra20-ehci", "usb-ehci";
                reg = <0xc5004000 0x4000>;
-               interrupts = < 0 21 0x04 >;
+               interrupts = <0 21 0x04>;
                phy_type = "ulpi";
+               status = "disable";
        };
 
        usb@c5008000 {
                compatible = "nvidia,tegra20-ehci", "usb-ehci";
                reg = <0xc5008000 0x4000>;
-               interrupts = < 0 97 0x04 >;
+               interrupts = <0 97 0x04>;
                phy_type = "utmi";
+               status = "disable";
+       };
+
+       sdhci@c8000000 {
+               compatible = "nvidia,tegra20-sdhci";
+               reg = <0xc8000000 0x200>;
+               interrupts = <0 14 0x04>;
+               status = "disable";
        };
-};
 
+       sdhci@c8000200 {
+               compatible = "nvidia,tegra20-sdhci";
+               reg = <0xc8000200 0x200>;
+               interrupts = <0 15 0x04>;
+               status = "disable";
+       };
+
+       sdhci@c8000400 {
+               compatible = "nvidia,tegra20-sdhci";
+               reg = <0xc8000400 0x200>;
+               interrupts = <0 19 0x04>;
+               status = "disable";
+       };
+
+       sdhci@c8000600 {
+               compatible = "nvidia,tegra20-sdhci";
+               reg = <0xc8000600 0x200>;
+               interrupts = <0 31 0x04>;
+               status = "disable";
+       };
+
+       pmu {
+               compatible = "arm,cortex-a9-pmu";
+               interrupts = <0 56 0x04
+                             0 57 0x04>;
+       };
+};
index 62a7b39f1c9a9e8f55ca409e5c192e312fe93d3b..2dcc09e784b58713cb514fe89044cd821c6a82e3 100644 (file)
        compatible = "nvidia,tegra30";
        interrupt-parent = <&intc>;
 
-       pmc@7000f400 {
-               compatible = "nvidia,tegra20-pmc", "nvidia,tegra30-pmc";
-               reg = <0x7000e400 0x400>;
-       };
-
-       intc: interrupt-controller@50041000 {
+       intc: interrupt-controller {
                compatible = "arm,cortex-a9-gic";
+               reg = <0x50041000 0x1000
+                      0x50040100 0x0100>;
                interrupt-controller;
                #interrupt-cells = <3>;
-               reg = < 0x50041000 0x1000 >,
-                     < 0x50040100 0x0100 >;
        };
 
-       pmu {
-               compatible = "arm,cortex-a9-pmu";
-               interrupts = <0 144 0x04
-                             0 145 0x04
-                             0 146 0x04
-                             0 147 0x04>;
-       };
-
-       apbdma: dma@6000a000 {
+       apbdma: dma {
                compatible = "nvidia,tegra30-apbdma", "nvidia,tegra20-apbdma";
                reg = <0x6000a000 0x1400>;
-               interrupts = < 0 104 0x04
-                              0 105 0x04
-                              0 106 0x04
-                              0 107 0x04
-                              0 108 0x04
-                              0 109 0x04
-                              0 110 0x04
-                              0 111 0x04
-                              0 112 0x04
-                              0 113 0x04
-                              0 114 0x04
-                              0 115 0x04
-                              0 116 0x04
-                              0 117 0x04
-                              0 118 0x04
-                              0 119 0x04
-                              0 128 0x04
-                              0 129 0x04
-                              0 130 0x04
-                              0 131 0x04
-                              0 132 0x04
-                              0 133 0x04
-                              0 134 0x04
-                              0 135 0x04
-                              0 136 0x04
-                              0 137 0x04
-                              0 138 0x04
-                              0 139 0x04
-                              0 140 0x04
-                              0 141 0x04
-                              0 142 0x04
-                              0 143 0x04 >;
-       };
-
-       i2c@7000c000 {
-               #address-cells = <1>;
-               #size-cells = <0>;
-               compatible =  "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
-               reg = <0x7000C000 0x100>;
-               interrupts = < 0 38 0x04 >;
-       };
-
-       i2c@7000c400 {
-               #address-cells = <1>;
-               #size-cells = <0>;
-               compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
-               reg = <0x7000C400 0x100>;
-               interrupts = < 0 84 0x04 >;
-       };
-
-       i2c@7000c500 {
-               #address-cells = <1>;
-               #size-cells = <0>;
-               compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
-               reg = <0x7000C500 0x100>;
-               interrupts = < 0 92 0x04 >;
+               interrupts = <0 104 0x04
+                             0 105 0x04
+                             0 106 0x04
+                             0 107 0x04
+                             0 108 0x04
+                             0 109 0x04
+                             0 110 0x04
+                             0 111 0x04
+                             0 112 0x04
+                             0 113 0x04
+                             0 114 0x04
+                             0 115 0x04
+                             0 116 0x04
+                             0 117 0x04
+                             0 118 0x04
+                             0 119 0x04
+                             0 128 0x04
+                             0 129 0x04
+                             0 130 0x04
+                             0 131 0x04
+                             0 132 0x04
+                             0 133 0x04
+                             0 134 0x04
+                             0 135 0x04
+                             0 136 0x04
+                             0 137 0x04
+                             0 138 0x04
+                             0 139 0x04
+                             0 140 0x04
+                             0 141 0x04
+                             0 142 0x04
+                             0 143 0x04>;
        };
 
-       i2c@7000c700 {
-               #address-cells = <1>;
-               #size-cells = <0>;
-               compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
-               reg = <0x7000c700 0x100>;
-               interrupts = < 0 120 0x04 >;
+       ahb: ahb {
+               compatible = "nvidia,tegra30-ahb";
+               reg = <0x6000c004 0x14c>; /* AHB Arbitration + Gizmo Controller */
        };
 
-       i2c@7000d000 {
-               #address-cells = <1>;
-               #size-cells = <0>;
-               compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
-               reg = <0x7000D000 0x100>;
-               interrupts = < 0 53 0x04 >;
-       };
-
-       gpio: gpio@6000d000 {
+       gpio: gpio {
                compatible = "nvidia,tegra30-gpio", "nvidia,tegra20-gpio";
-               reg = < 0x6000d000 0x1000 >;
-               interrupts = < 0 32 0x04
-                              0 33 0x04
-                              0 34 0x04
-                              0 35 0x04
-                              0 55 0x04
-                              0 87 0x04
-                              0 89 0x04
-                              0 125 0x04 >;
+               reg = <0x6000d000 0x1000>;
+               interrupts = <0 32 0x04
+                             0 33 0x04
+                             0 34 0x04
+                             0 35 0x04
+                             0 55 0x04
+                             0 87 0x04
+                             0 89 0x04
+                             0 125 0x04>;
                #gpio-cells = <2>;
                gpio-controller;
                #interrupt-cells = <2>;
                interrupt-controller;
        };
 
+       pinmux: pinmux {
+               compatible = "nvidia,tegra30-pinmux";
+               reg = <0x70000868 0xd0    /* Pad control registers */
+                      0x70003000 0x3e0>; /* Mux registers */
+       };
+
        serial@70006000 {
                compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
                reg = <0x70006000 0x40>;
                reg-shift = <2>;
-               interrupts = < 0 36 0x04 >;
+               interrupts = <0 36 0x04>;
+               status = "disable";
        };
 
        serial@70006040 {
                compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
                reg = <0x70006040 0x40>;
                reg-shift = <2>;
-               interrupts = < 0 37 0x04 >;
+               interrupts = <0 37 0x04>;
+               status = "disable";
        };
 
        serial@70006200 {
                compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
                reg = <0x70006200 0x100>;
                reg-shift = <2>;
-               interrupts = < 0 46 0x04 >;
+               interrupts = <0 46 0x04>;
+               status = "disable";
        };
 
        serial@70006300 {
                compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
                reg = <0x70006300 0x100>;
                reg-shift = <2>;
-               interrupts = < 0 90 0x04 >;
+               interrupts = <0 90 0x04>;
+               status = "disable";
        };
 
        serial@70006400 {
                compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
                reg = <0x70006400 0x100>;
                reg-shift = <2>;
-               interrupts = < 0 91 0x04 >;
+               interrupts = <0 91 0x04>;
+               status = "disable";
+       };
+
+       i2c@7000c000 {
+               compatible =  "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
+               reg = <0x7000c000 0x100>;
+               interrupts = <0 38 0x04>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               status = "disable";
+       };
+
+       i2c@7000c400 {
+               compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
+               reg = <0x7000c400 0x100>;
+               interrupts = <0 84 0x04>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               status = "disable";
+       };
+
+       i2c@7000c500 {
+               compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
+               reg = <0x7000c500 0x100>;
+               interrupts = <0 92 0x04>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               status = "disable";
+       };
+
+       i2c@7000c700 {
+               compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
+               reg = <0x7000c700 0x100>;
+               interrupts = <0 120 0x04>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               status = "disable";
+       };
+
+       i2c@7000d000 {
+               compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
+               reg = <0x7000d000 0x100>;
+               interrupts = <0 53 0x04>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               status = "disable";
+       };
+
+       pmc {
+               compatible = "nvidia,tegra20-pmc", "nvidia,tegra30-pmc";
+               reg = <0x7000e400 0x400>;
+       };
+
+       mc {
+               compatible = "nvidia,tegra30-mc";
+               reg = <0x7000f000 0x010
+                      0x7000f03c 0x1b4
+                      0x7000f200 0x028
+                      0x7000f284 0x17c>;
+               interrupts = <0 77 0x04>;
+       };
+
+       smmu {
+               compatible = "nvidia,tegra30-smmu";
+               reg = <0x7000f010 0x02c
+                      0x7000f1f0 0x010
+                      0x7000f228 0x05c>;
+               nvidia,#asids = <4>;            /* # of ASIDs */
+               dma-window = <0 0x40000000>;    /* IOVA start & length */
+               nvidia,ahb = <&ahb>;
+       };
+
+       ahub {
+               compatible = "nvidia,tegra30-ahub";
+               reg = <0x70080000 0x200
+                      0x70080200 0x100>;
+               interrupts = <0 103 0x04>;
+               nvidia,dma-request-selector = <&apbdma 1>;
+
+               ranges;
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               tegra_i2s0: i2s@70080300 {
+                       compatible = "nvidia,tegra30-i2s";
+                       reg = <0x70080300 0x100>;
+                       nvidia,ahub-cif-ids = <4 4>;
+                       status = "disable";
+               };
+
+               tegra_i2s1: i2s@70080400 {
+                       compatible = "nvidia,tegra30-i2s";
+                       reg = <0x70080400 0x100>;
+                       nvidia,ahub-cif-ids = <5 5>;
+                       status = "disable";
+               };
+
+               tegra_i2s2: i2s@70080500 {
+                       compatible = "nvidia,tegra30-i2s";
+                       reg = <0x70080500 0x100>;
+                       nvidia,ahub-cif-ids = <6 6>;
+                       status = "disable";
+               };
+
+               tegra_i2s3: i2s@70080600 {
+                       compatible = "nvidia,tegra30-i2s";
+                       reg = <0x70080600 0x100>;
+                       nvidia,ahub-cif-ids = <7 7>;
+                       status = "disable";
+               };
+
+               tegra_i2s4: i2s@70080700 {
+                       compatible = "nvidia,tegra30-i2s";
+                       reg = <0x70080700 0x100>;
+                       nvidia,ahub-cif-ids = <8 8>;
+                       status = "disable";
+               };
        };
 
        sdhci@78000000 {
                compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
                reg = <0x78000000 0x200>;
-               interrupts = < 0 14 0x04 >;
+               interrupts = <0 14 0x04>;
+               status = "disable";
        };
 
        sdhci@78000200 {
                compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
                reg = <0x78000200 0x200>;
-               interrupts = < 0 15 0x04 >;
+               interrupts = <0 15 0x04>;
+               status = "disable";
        };
 
        sdhci@78000400 {
                compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
                reg = <0x78000400 0x200>;
-               interrupts = < 0 19 0x04 >;
+               interrupts = <0 19 0x04>;
+               status = "disable";
        };
 
        sdhci@78000600 {
                compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
                reg = <0x78000600 0x200>;
-               interrupts = < 0 31 0x04 >;
+               interrupts = <0 31 0x04>;
+               status = "disable";
        };
 
-       pinmux: pinmux@70000000 {
-               compatible = "nvidia,tegra30-pinmux";
-               reg = < 0x70000868 0xd0     /* Pad control registers */
-                       0x70003000 0x3e0 >; /* Mux registers */
+       pmu {
+               compatible = "arm,cortex-a9-pmu";
+               interrupts = <0 144 0x04
+                             0 145 0x04
+                             0 146 0x04
+                             0 147 0x04>;
        };
 };
index 941b161ab78ce32c219ec6c88a1ea2f955e3fa77..7e1091d91af8b9d999b795414ebf0110c42b65c0 100644 (file)
                #address-cells = <0>;
                interrupt-controller;
                reg = <0x2c001000 0x1000>,
-                     <0x2c002000 0x100>;
+                     <0x2c002000 0x1000>,
+                     <0x2c004000 0x2000>,
+                     <0x2c006000 0x2000>;
+               interrupts = <1 9 0xf04>;
        };
 
        memory-controller@7ffd0000 {
                             <0 91 4>;
        };
 
+       timer {
+               compatible = "arm,armv7-timer";
+               interrupts = <1 13 0xf08>,
+                            <1 14 0xf08>,
+                            <1 11 0xf08>,
+                            <1 10 0xf08>;
+       };
+
        pmu {
                compatible = "arm,cortex-a15-pmu", "arm,cortex-a9-pmu";
                interrupts = <0 68 4>,
index 6905e66d474808f7021121dbb5e065cb7e5d64a0..18917a0f86047a3a444919badb09fb47da244bc4 100644 (file)
 
        timer@2c000600 {
                compatible = "arm,cortex-a5-twd-timer";
-               reg = <0x2c000600 0x38>;
-               interrupts = <1 2 0x304>,
-                            <1 3 0x304>;
+               reg = <0x2c000600 0x20>;
+               interrupts = <1 13 0x304>;
+       };
+
+       watchdog@2c000620 {
+               compatible = "arm,cortex-a5-twd-wdt";
+               reg = <0x2c000620 0x20>;
+               interrupts = <1 14 0x304>;
        };
 
        gic: interrupt-controller@2c001000 {
-               compatible = "arm,corex-a5-gic", "arm,cortex-a9-gic";
+               compatible = "arm,cortex-a5-gic", "arm,cortex-a9-gic";
                #interrupt-cells = <3>;
                #address-cells = <0>;
                interrupt-controller;
index da778693be548fdc87c6cb479ff6eefe71a7bf82..3f0c736d31d6bca211d1c76550ed28ca841a3c56 100644 (file)
        timer@1e000600 {
                compatible = "arm,cortex-a9-twd-timer";
                reg = <0x1e000600 0x20>;
-               interrupts = <1 2 0xf04>,
-                            <1 3 0xf04>;
+               interrupts = <1 13 0xf04>;
+       };
+
+       watchdog@1e000620 {
+               compatible = "arm,cortex-a9-twd-wdt";
+               reg = <0x1e000620 0x20>;
+               interrupts = <1 14 0xf04>;
        };
 
        gic: interrupt-controller@1e001000 {
index 595ecd290ebf3462b974da4fcf73b69ed2e3682b..9d7eb530f95fd926785170162812d3ef1096178d 100644 (file)
@@ -173,7 +173,8 @@ find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_
        read_lock_irqsave(&device_info->lock, flags);
 
        list_for_each_entry(b, &device_info->safe_buffers, node)
-               if (b->safe_dma_addr == safe_dma_addr) {
+               if (b->safe_dma_addr <= safe_dma_addr &&
+                   b->safe_dma_addr + b->size > safe_dma_addr) {
                        rb = b;
                        break;
                }
@@ -254,7 +255,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
        if (buf == NULL) {
                dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
                       __func__, ptr);
-               return ~0;
+               return DMA_ERROR_CODE;
        }
 
        dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -307,8 +308,9 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
  * substitute the safe buffer for the unsafe one.
  * (basically move the buffer from an unsafe area to a safe one)
  */
-dma_addr_t __dma_map_page(struct device *dev, struct page *page,
-               unsigned long offset, size_t size, enum dma_data_direction dir)
+static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
+               unsigned long offset, size_t size, enum dma_data_direction dir,
+               struct dma_attrs *attrs)
 {
        dma_addr_t dma_addr;
        int ret;
@@ -320,21 +322,20 @@ dma_addr_t __dma_map_page(struct device *dev, struct page *page,
 
        ret = needs_bounce(dev, dma_addr, size);
        if (ret < 0)
-               return ~0;
+               return DMA_ERROR_CODE;
 
        if (ret == 0) {
-               __dma_page_cpu_to_dev(page, offset, size, dir);
+               arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
                return dma_addr;
        }
 
        if (PageHighMem(page)) {
                dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
-               return ~0;
+               return DMA_ERROR_CODE;
        }
 
        return map_single(dev, page_address(page) + offset, size, dir);
 }
-EXPORT_SYMBOL(__dma_map_page);
 
 /*
  * see if a mapped address was really a "safe" buffer and if so, copy
@@ -342,8 +343,8 @@ EXPORT_SYMBOL(__dma_map_page);
  * the safe buffer.  (basically return things back to the way they
  * should be)
  */
-void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
-               enum dma_data_direction dir)
+static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
+               enum dma_data_direction dir, struct dma_attrs *attrs)
 {
        struct safe_buffer *buf;
 
@@ -352,19 +353,18 @@ void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
 
        buf = find_safe_buffer_dev(dev, dma_addr, __func__);
        if (!buf) {
-               __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)),
-                       dma_addr & ~PAGE_MASK, size, dir);
+               arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
                return;
        }
 
        unmap_single(dev, buf, size, dir);
 }
-EXPORT_SYMBOL(__dma_unmap_page);
 
-int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
-               unsigned long off, size_t sz, enum dma_data_direction dir)
+static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
+               size_t sz, enum dma_data_direction dir)
 {
        struct safe_buffer *buf;
+       unsigned long off;
 
        dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
                __func__, addr, off, sz, dir);
@@ -373,6 +373,8 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
        if (!buf)
                return 1;
 
+       off = addr - buf->safe_dma_addr;
+
        BUG_ON(buf->direction != dir);
 
        dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -388,12 +390,21 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
        }
        return 0;
 }
-EXPORT_SYMBOL(dmabounce_sync_for_cpu);
 
-int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
-               unsigned long off, size_t sz, enum dma_data_direction dir)
+static void dmabounce_sync_for_cpu(struct device *dev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
+               return;
+
+       arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
+}
+
+static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
+               size_t sz, enum dma_data_direction dir)
 {
        struct safe_buffer *buf;
+       unsigned long off;
 
        dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
                __func__, addr, off, sz, dir);
@@ -402,6 +413,8 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
        if (!buf)
                return 1;
 
+       off = addr - buf->safe_dma_addr;
+
        BUG_ON(buf->direction != dir);
 
        dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -417,7 +430,38 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
        }
        return 0;
 }
-EXPORT_SYMBOL(dmabounce_sync_for_device);
+
+static void dmabounce_sync_for_device(struct device *dev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       if (!__dmabounce_sync_for_device(dev, handle, size, dir))
+               return;
+
+       arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
+}
+
+static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
+{
+       if (dev->archdata.dmabounce)
+               return 0;
+
+       return arm_dma_ops.set_dma_mask(dev, dma_mask);
+}
+
+static struct dma_map_ops dmabounce_ops = {
+       .alloc                  = arm_dma_alloc,
+       .free                   = arm_dma_free,
+       .mmap                   = arm_dma_mmap,
+       .map_page               = dmabounce_map_page,
+       .unmap_page             = dmabounce_unmap_page,
+       .sync_single_for_cpu    = dmabounce_sync_for_cpu,
+       .sync_single_for_device = dmabounce_sync_for_device,
+       .map_sg                 = arm_dma_map_sg,
+       .unmap_sg               = arm_dma_unmap_sg,
+       .sync_sg_for_cpu        = arm_dma_sync_sg_for_cpu,
+       .sync_sg_for_device     = arm_dma_sync_sg_for_device,
+       .set_dma_mask           = dmabounce_set_mask,
+};
 
 static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
                const char *name, unsigned long size)
@@ -479,6 +523,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
 #endif
 
        dev->archdata.dmabounce = device_info;
+       set_dma_ops(dev, &dmabounce_ops);
 
        dev_info(dev, "dmabounce: registered device\n");
 
@@ -497,6 +542,7 @@ void dmabounce_unregister_dev(struct device *dev)
        struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
 
        dev->archdata.dmabounce = NULL;
+       set_dma_ops(dev, NULL);
 
        if (!device_info) {
                dev_warn(dev,
index 09a02963cf58ab9fadbc6fc5d9ecfffdf2ba35c2..e05a2f1665a78c1b6929271aed7b69a054247ef3 100644 (file)
@@ -33,6 +33,7 @@ CONFIG_MACH_IMX27LITE=y
 CONFIG_MACH_PCA100=y
 CONFIG_MACH_MXT_TD60=y
 CONFIG_MACH_IMX27IPCAM=y
+CONFIG_MACH_IMX27_DT=y
 CONFIG_MXC_IRQ_PRIOR=y
 CONFIG_MXC_PWM=y
 CONFIG_NO_HZ=y
@@ -172,7 +173,7 @@ CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_PCF8563=y
 CONFIG_RTC_DRV_IMXDI=y
-CONFIG_RTC_MXC=y
+CONFIG_RTC_DRV_MXC=y
 CONFIG_DMADEVICES=y
 CONFIG_IMX_SDMA=y
 CONFIG_IMX_DMA=y
index dc6f6411bbf5e9fb640831da44aa36f3d82cbb5c..b1d3675df72cd2c8abb336e05cd01854b9fe7918 100644 (file)
@@ -64,6 +64,12 @@ CONFIG_IPV6=y
 # CONFIG_WIRELESS is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_MTD=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_DATAFLASH=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_SST25L=y
 # CONFIG_STANDALONE is not set
 CONFIG_CONNECTOR=y
 CONFIG_BLK_DEV_LOOP=y
@@ -172,7 +178,7 @@ CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_INTF_DEV_UIE_EMUL=y
-CONFIG_RTC_MXC=y
+CONFIG_RTC_DRV_MXC=y
 CONFIG_DMADEVICES=y
 CONFIG_IMX_SDMA=y
 CONFIG_EXT2_FS=y
index 1ebbf451c48d39076446dd84406e00fb5fcb9b6e..5406c23a02e3b2dafa323300222690094f38326d 100644 (file)
@@ -22,6 +22,7 @@ CONFIG_BLK_DEV_INTEGRITY=y
 # CONFIG_IOSCHED_DEADLINE is not set
 # CONFIG_IOSCHED_CFQ is not set
 CONFIG_ARCH_MXS=y
+CONFIG_MACH_MXS_DT=y
 CONFIG_MACH_MX23EVK=y
 CONFIG_MACH_MX28EVK=y
 CONFIG_MACH_STMP378X_DEVB=y
diff --git a/arch/arm/configs/prima2_defconfig b/arch/arm/configs/prima2_defconfig
new file mode 100644 (file)
index 0000000..c328ac6
--- /dev/null
@@ -0,0 +1,69 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_ARCH_PRIMA2=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+CONFIG_KEXEC=y
+CONFIG_BINFMT_MISC=y
+CONFIG_PM_RUNTIME=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_SERIAL_SIRFSOC=y
+CONFIG_SERIAL_SIRFSOC_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_SIRF=y
+CONFIG_SPI=y
+CONFIG_SPI_SIRF=y
+CONFIG_SPI_SPIDEV=y
+# CONFIG_HWMON is not set
+# CONFIG_HID_SUPPORT is not set
+CONFIG_USB_GADGET=y
+CONFIG_USB_FILE_STORAGE=m
+CONFIG_USB_MASS_STORAGE=m
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_DMADEVICES=y
+CONFIG_DMADEVICES_DEBUG=y
+CONFIG_DMADEVICES_VDEBUG=y
+CONFIG_SIRF_DMA=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CRAMFS=y
+CONFIG_ROMFS_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_INFO=y
+CONFIG_CRC_CCITT=y
diff --git a/arch/arm/configs/spear13xx_defconfig b/arch/arm/configs/spear13xx_defconfig
new file mode 100644 (file)
index 0000000..1fdb826
--- /dev/null
@@ -0,0 +1,95 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_PLAT_SPEAR=y
+CONFIG_ARCH_SPEAR13XX=y
+CONFIG_MACH_SPEAR1310=y
+CONFIG_MACH_SPEAR1340=y
+# CONFIG_SWP_EMULATE is not set
+CONFIG_SMP=y
+# CONFIG_SMP_ON_UP is not set
+# CONFIG_ARM_CPU_TOPOLOGY is not set
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
+CONFIG_BINFMT_MISC=y
+CONFIG_NET=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_MTD=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_FSMC=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_ATA=y
+# CONFIG_SATA_PMP is not set
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_PATA_ARASAN_CF=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_FARADAY is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+CONFIG_STMMAC_ETH=y
+# CONFIG_WLAN is not set
+CONFIG_INPUT_FF_MEMLESS=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_SPEAR=y
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_RAW_DRIVER=y
+CONFIG_MAX_RAW_DEVS=8192
+CONFIG_I2C=y
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_SPI=y
+CONFIG_SPI_PL022=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_PL061=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_MPCORE_WATCHDOG=y
+# CONFIG_HID_SUPPORT is not set
+CONFIG_USB=y
+# CONFIG_USB_DEVICE_CLASS is not set
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_SPEAR=y
+CONFIG_RTC_CLASS=y
+CONFIG_DMADEVICES=y
+CONFIG_DW_DMAC=y
+CONFIG_DMATEST=m
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_AUTOFS4_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=m
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_INFO=y
index 7ed42912d69ae5f764dbf41a2da875bb18d656d9..865980c5f21288115315a238900934c7cc50dc22 100644 (file)
@@ -14,6 +14,9 @@ CONFIG_BINFMT_MISC=y
 CONFIG_NET=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_FSMC=y
 CONFIG_BLK_DEV_RAM=y
@@ -73,6 +76,7 @@ CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
 CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
 CONFIG_NLS_DEFAULT="utf8"
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ASCII=m
index cf94bc73a0e09318bed41dc0f13bed63c60fdc5e..a2a1265f86b63fd06aee7ce9a87e697835338bef 100644 (file)
@@ -8,11 +8,13 @@ CONFIG_MODVERSIONS=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_PLAT_SPEAR=y
 CONFIG_ARCH_SPEAR6XX=y
-CONFIG_BOARD_SPEAR600_DT=y
 CONFIG_BINFMT_MISC=y
 CONFIG_NET=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_FSMC=y
 CONFIG_BLK_DEV_RAM=y
@@ -64,6 +66,7 @@ CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
 CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
 CONFIG_NLS_DEFAULT="utf8"
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ASCII=m
index 351d6708c3aeadde00cce5c6c2ca18cc94b9d11a..1198dd61c7c49e6b1035082db78f77d09846bbbf 100644 (file)
@@ -45,6 +45,7 @@ CONFIG_CPU_FREQ=y
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_IDLE=y
 CONFIG_VFP=y
+CONFIG_PM_RUNTIME=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -91,6 +92,8 @@ CONFIG_USB_NET_SMSC75XX=y
 CONFIG_USB_NET_SMSC95XX=y
 # CONFIG_WLAN is not set
 CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_MPU3050=y
 # CONFIG_VT is not set
 # CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVKMEM is not set
@@ -103,12 +106,15 @@ CONFIG_I2C=y
 CONFIG_I2C_TEGRA=y
 CONFIG_SPI=y
 CONFIG_SPI_TEGRA=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_BATTERY_SBS=y
 CONFIG_SENSORS_LM90=y
 CONFIG_MFD_TPS6586X=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_VIRTUAL_CONSUMER=y
 CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_TPS62360=y
 CONFIG_REGULATOR_TPS6586X=y
 CONFIG_SOUND=y
 CONFIG_SND=y
@@ -133,16 +139,19 @@ CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_TEGRA=y
 CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_EM3027=y
 CONFIG_RTC_DRV_TEGRA=y
 CONFIG_STAGING=y
-CONFIG_IIO=y
 CONFIG_SENSORS_ISL29018=y
+CONFIG_SENSORS_ISL29028=y
 CONFIG_SENSORS_AK8975=y
 CONFIG_MFD_NVEC=y
 CONFIG_KEYBOARD_NVEC=y
 CONFIG_SERIO_NVEC_PS2=y
 CONFIG_TEGRA_IOMMU_GART=y
 CONFIG_TEGRA_IOMMU_SMMU=y
+CONFIG_MEMORY=y
+CONFIG_IIO=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT2_FS_POSIX_ACL=y
index 7e84f453e8a6f07e76c182badb2ee055eda1bde6..2d4f661d1cf6e757739429a4ba7730ea7be14eeb 100644 (file)
@@ -75,6 +75,7 @@ CONFIG_AB5500_CORE=y
 CONFIG_AB8500_CORE=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_AB8500=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
 # CONFIG_HID_SUPPORT is not set
 CONFIG_USB_GADGET=y
 CONFIG_AB8500_USB=y
index 7aa368003b05220cc1f0ba9301a15e5699db2278..b69c0d3285f8e1bcf2193b05f4faed3a169597b4 100644 (file)
@@ -7,12 +7,16 @@
 #define ASMARM_DEVICE_H
 
 struct dev_archdata {
+       struct dma_map_ops      *dma_ops;
 #ifdef CONFIG_DMABOUNCE
        struct dmabounce_device_info *dmabounce;
 #endif
 #ifdef CONFIG_IOMMU_API
        void *iommu; /* private IOMMU data */
 #endif
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+       struct dma_iommu_mapping        *mapping;
+#endif
 };
 
 struct omap_device;
diff --git a/arch/arm/include/asm/dma-contiguous.h b/arch/arm/include/asm/dma-contiguous.h
new file mode 100644 (file)
index 0000000..3ed37b4
--- /dev/null
@@ -0,0 +1,15 @@
+#ifndef ASMARM_DMA_CONTIGUOUS_H
+#define ASMARM_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+#ifdef CONFIG_CMA
+
+#include <linux/types.h>
+#include <asm-generic/dma-contiguous.h>
+
+void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
+
+#endif
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
new file mode 100644 (file)
index 0000000..799b094
--- /dev/null
@@ -0,0 +1,34 @@
+#ifndef ASMARM_DMA_IOMMU_H
+#define ASMARM_DMA_IOMMU_H
+
+#ifdef __KERNEL__
+
+#include <linux/mm_types.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-debug.h>
+#include <linux/kmemcheck.h>
+
+struct dma_iommu_mapping {
+       /* iommu specific data */
+       struct iommu_domain     *domain;
+
+       void                    *bitmap;
+       size_t                  bits;
+       unsigned int            order;
+       dma_addr_t              base;
+
+       spinlock_t              lock;
+       struct kref             kref;
+};
+
+struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
+                        int order);
+
+void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
+
+int arm_iommu_attach_device(struct device *dev,
+                                       struct dma_iommu_mapping *mapping);
+
+#endif /* __KERNEL__ */
+#endif
index cb3b7c981c4b729c31c8dbf33c9fa4334edfc07d..bbef15d04890b7c1ef4a9d4afec77867de1fc72d 100644 (file)
@@ -5,11 +5,35 @@
 
 #include <linux/mm_types.h>
 #include <linux/scatterlist.h>
+#include <linux/dma-attrs.h>
 #include <linux/dma-debug.h>
 
 #include <asm-generic/dma-coherent.h>
 #include <asm/memory.h>
 
+#define DMA_ERROR_CODE (~0)
+extern struct dma_map_ops arm_dma_ops;
+
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+       if (dev && dev->archdata.dma_ops)
+               return dev->archdata.dma_ops;
+       return &arm_dma_ops;
+}
+
+static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
+{
+       BUG_ON(!dev);
+       dev->archdata.dma_ops = ops;
+}
+
+#include <asm-generic/dma-mapping-common.h>
+
+static inline int dma_set_mask(struct device *dev, u64 mask)
+{
+       return get_dma_ops(dev)->set_dma_mask(dev, mask);
+}
+
 #ifdef __arch_page_to_dma
 #error Please update to __arch_pfn_to_dma
 #endif
@@ -61,69 +85,12 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
 }
 #endif
 
-/*
- * The DMA API is built upon the notion of "buffer ownership".  A buffer
- * is either exclusively owned by the CPU (and therefore may be accessed
- * by it) or exclusively owned by the DMA device.  These helper functions
- * represent the transitions between these two ownership states.
- *
- * Note, however, that on later ARMs, this notion does not work due to
- * speculative prefetches.  We model our approach on the assumption that
- * the CPU does do speculative prefetches, which means we clean caches
- * before transfers and delay cache invalidation until transfer completion.
- *
- * Private support functions: these are not part of the API and are
- * liable to change.  Drivers must not use these.
- */
-static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
-       enum dma_data_direction dir)
-{
-       extern void ___dma_single_cpu_to_dev(const void *, size_t,
-               enum dma_data_direction);
-
-       if (!arch_is_coherent())
-               ___dma_single_cpu_to_dev(kaddr, size, dir);
-}
-
-static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
-       enum dma_data_direction dir)
-{
-       extern void ___dma_single_dev_to_cpu(const void *, size_t,
-               enum dma_data_direction);
-
-       if (!arch_is_coherent())
-               ___dma_single_dev_to_cpu(kaddr, size, dir);
-}
-
-static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
-       size_t size, enum dma_data_direction dir)
-{
-       extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
-               size_t, enum dma_data_direction);
-
-       if (!arch_is_coherent())
-               ___dma_page_cpu_to_dev(page, off, size, dir);
-}
-
-static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
-       size_t size, enum dma_data_direction dir)
-{
-       extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
-               size_t, enum dma_data_direction);
-
-       if (!arch_is_coherent())
-               ___dma_page_dev_to_cpu(page, off, size, dir);
-}
-
-extern int dma_supported(struct device *, u64);
-extern int dma_set_mask(struct device *, u64);
-
 /*
  * DMA errors are defined by all-bits-set in the DMA address.
  */
 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
-       return dma_addr == ~0;
+       return dma_addr == DMA_ERROR_CODE;
 }
 
 /*
@@ -141,69 +108,118 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size,
 {
 }
 
+extern int dma_supported(struct device *dev, u64 mask);
+
 /**
- * dma_alloc_coherent - allocate consistent memory for DMA
+ * arm_dma_alloc - allocate consistent memory for DMA
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @size: required memory size
  * @handle: bus-specific DMA address
+ * @attrs: optinal attributes that specific mapping properties
  *
- * Allocate some uncached, unbuffered memory for a device for
- * performing DMA.  This function allocates pages, and will
- * return the CPU-viewed address, and sets @handle to be the
- * device-viewed address.
+ * Allocate some memory for a device for performing DMA.  This function
+ * allocates pages, and will return the CPU-viewed address, and sets @handle
+ * to be the device-viewed address.
  */
-extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
+extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+                          gfp_t gfp, struct dma_attrs *attrs);
+
+#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+                                      dma_addr_t *dma_handle, gfp_t flag,
+                                      struct dma_attrs *attrs)
+{
+       struct dma_map_ops *ops = get_dma_ops(dev);
+       void *cpu_addr;
+       BUG_ON(!ops);
+
+       cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
+       debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
+       return cpu_addr;
+}
 
 /**
- * dma_free_coherent - free memory allocated by dma_alloc_coherent
+ * arm_dma_free - free memory allocated by arm_dma_alloc
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @size: size of memory originally requested in dma_alloc_coherent
  * @cpu_addr: CPU-view address returned from dma_alloc_coherent
  * @handle: device-view address returned from dma_alloc_coherent
+ * @attrs: optinal attributes that specific mapping properties
  *
  * Free (and unmap) a DMA buffer previously allocated by
- * dma_alloc_coherent().
+ * arm_dma_alloc().
  *
  * References to memory and mappings associated with cpu_addr/handle
  * during and after this call executing are illegal.
  */
-extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
+extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
+                        dma_addr_t handle, struct dma_attrs *attrs);
+
+#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+                                    void *cpu_addr, dma_addr_t dma_handle,
+                                    struct dma_attrs *attrs)
+{
+       struct dma_map_ops *ops = get_dma_ops(dev);
+       BUG_ON(!ops);
+
+       debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+       ops->free(dev, size, cpu_addr, dma_handle, attrs);
+}
 
 /**
- * dma_mmap_coherent - map a coherent DMA allocation into user space
+ * arm_dma_mmap - map a coherent DMA allocation into user space
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @vma: vm_area_struct describing requested user mapping
  * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
  * @handle: device-view address returned from dma_alloc_coherent
  * @size: size of memory originally requested in dma_alloc_coherent
+ * @attrs: optinal attributes that specific mapping properties
  *
  * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
  * into user space.  The coherent DMA buffer must not be freed by the
  * driver until the user space mapping has been released.
  */
-int dma_mmap_coherent(struct device *, struct vm_area_struct *,
-               void *, dma_addr_t, size_t);
+extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+                       void *cpu_addr, dma_addr_t dma_addr, size_t size,
+                       struct dma_attrs *attrs);
 
+#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
 
-/**
- * dma_alloc_writecombine - allocate writecombining memory for DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @size: required memory size
- * @handle: bus-specific DMA address
- *
- * Allocate some uncached, buffered memory for a device for
- * performing DMA.  This function allocates pages, and will
- * return the CPU-viewed address, and sets @handle to be the
- * device-viewed address.
- */
-extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
-               gfp_t);
+static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+                                 void *cpu_addr, dma_addr_t dma_addr,
+                                 size_t size, struct dma_attrs *attrs)
+{
+       struct dma_map_ops *ops = get_dma_ops(dev);
+       BUG_ON(!ops);
+       return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
+}
+
+static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
+                                      dma_addr_t *dma_handle, gfp_t flag)
+{
+       DEFINE_DMA_ATTRS(attrs);
+       dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+       return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
+}
 
-#define dma_free_writecombine(dev,size,cpu_addr,handle) \
-       dma_free_coherent(dev,size,cpu_addr,handle)
+static inline void dma_free_writecombine(struct device *dev, size_t size,
+                                    void *cpu_addr, dma_addr_t dma_handle)
+{
+       DEFINE_DMA_ATTRS(attrs);
+       dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+       return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
+}
 
-int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
-               void *, dma_addr_t, size_t);
+static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
+                     void *cpu_addr, dma_addr_t dma_addr, size_t size)
+{
+       DEFINE_DMA_ATTRS(attrs);
+       dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+       return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
+}
 
 /*
  * This can be called during boot to increase the size of the consistent
@@ -212,8 +228,6 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
  */
 extern void __init init_consistent_dma_size(unsigned long size);
 
-
-#ifdef CONFIG_DMABOUNCE
 /*
  * For SA-1111, IXP425, and ADI systems  the dma-mapping functions are "magic"
  * and utilize bounce buffers as needed to work around limited DMA windows.
@@ -253,222 +267,19 @@ extern int dmabounce_register_dev(struct device *, unsigned long,
  */
 extern void dmabounce_unregister_dev(struct device *);
 
-/*
- * The DMA API, implemented by dmabounce.c.  See below for descriptions.
- */
-extern dma_addr_t __dma_map_page(struct device *, struct page *,
-               unsigned long, size_t, enum dma_data_direction);
-extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
-               enum dma_data_direction);
-
-/*
- * Private functions
- */
-int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
-               size_t, enum dma_data_direction);
-int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
-               size_t, enum dma_data_direction);
-#else
-static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
-       unsigned long offset, size_t size, enum dma_data_direction dir)
-{
-       return 1;
-}
 
-static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
-       unsigned long offset, size_t size, enum dma_data_direction dir)
-{
-       return 1;
-}
-
-
-static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
-            unsigned long offset, size_t size, enum dma_data_direction dir)
-{
-       __dma_page_cpu_to_dev(page, offset, size, dir);
-       return pfn_to_dma(dev, page_to_pfn(page)) + offset;
-}
-
-static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
-               size_t size, enum dma_data_direction dir)
-{
-       __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
-               handle & ~PAGE_MASK, size, dir);
-}
-#endif /* CONFIG_DMABOUNCE */
-
-/**
- * dma_map_single - map a single buffer for streaming DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @cpu_addr: CPU direct mapped address of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Ensure that any data held in the cache is appropriately discarded
- * or written back.
- *
- * The device owns this memory once this call has completed.  The CPU
- * can regain ownership by calling dma_unmap_single() or
- * dma_sync_single_for_cpu().
- */
-static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
-               size_t size, enum dma_data_direction dir)
-{
-       unsigned long offset;
-       struct page *page;
-       dma_addr_t addr;
-
-       BUG_ON(!virt_addr_valid(cpu_addr));
-       BUG_ON(!virt_addr_valid(cpu_addr + size - 1));
-       BUG_ON(!valid_dma_direction(dir));
-
-       page = virt_to_page(cpu_addr);
-       offset = (unsigned long)cpu_addr & ~PAGE_MASK;
-       addr = __dma_map_page(dev, page, offset, size, dir);
-       debug_dma_map_page(dev, page, offset, size, dir, addr, true);
-
-       return addr;
-}
-
-/**
- * dma_map_page - map a portion of a page for streaming DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @page: page that buffer resides in
- * @offset: offset into page for start of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Ensure that any data held in the cache is appropriately discarded
- * or written back.
- *
- * The device owns this memory once this call has completed.  The CPU
- * can regain ownership by calling dma_unmap_page().
- */
-static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
-            unsigned long offset, size_t size, enum dma_data_direction dir)
-{
-       dma_addr_t addr;
-
-       BUG_ON(!valid_dma_direction(dir));
-
-       addr = __dma_map_page(dev, page, offset, size, dir);
-       debug_dma_map_page(dev, page, offset, size, dir, addr, false);
-
-       return addr;
-}
-
-/**
- * dma_unmap_single - unmap a single buffer previously mapped
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @size: size of buffer (same as passed to dma_map_single)
- * @dir: DMA transfer direction (same as passed to dma_map_single)
- *
- * Unmap a single streaming mode DMA translation.  The handle and size
- * must match what was provided in the previous dma_map_single() call.
- * All other usages are undefined.
- *
- * After this call, reads by the CPU to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
-               size_t size, enum dma_data_direction dir)
-{
-       debug_dma_unmap_page(dev, handle, size, dir, true);
-       __dma_unmap_page(dev, handle, size, dir);
-}
-
-/**
- * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @size: size of buffer (same as passed to dma_map_page)
- * @dir: DMA transfer direction (same as passed to dma_map_page)
- *
- * Unmap a page streaming mode DMA translation.  The handle and size
- * must match what was provided in the previous dma_map_page() call.
- * All other usages are undefined.
- *
- * After this call, reads by the CPU to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
-               size_t size, enum dma_data_direction dir)
-{
-       debug_dma_unmap_page(dev, handle, size, dir, false);
-       __dma_unmap_page(dev, handle, size, dir);
-}
-
-/**
- * dma_sync_single_range_for_cpu
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @offset: offset of region to start sync
- * @size: size of region to sync
- * @dir: DMA transfer direction (same as passed to dma_map_single)
- *
- * Make physical memory consistent for a single streaming mode DMA
- * translation after a transfer.
- *
- * If you perform a dma_map_single() but wish to interrogate the
- * buffer using the cpu, yet do not wish to teardown the PCI dma
- * mapping, you must call this function before doing so.  At the
- * next point you give the PCI dma address back to the card, you
- * must first the perform a dma_sync_for_device, and then the
- * device again owns the buffer.
- */
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
-               dma_addr_t handle, unsigned long offset, size_t size,
-               enum dma_data_direction dir)
-{
-       BUG_ON(!valid_dma_direction(dir));
-
-       debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
-
-       if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
-               return;
-
-       __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
-}
-
-static inline void dma_sync_single_range_for_device(struct device *dev,
-               dma_addr_t handle, unsigned long offset, size_t size,
-               enum dma_data_direction dir)
-{
-       BUG_ON(!valid_dma_direction(dir));
-
-       debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
-
-       if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
-               return;
-
-       __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
-}
-
-static inline void dma_sync_single_for_cpu(struct device *dev,
-               dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-       dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
-}
-
-static inline void dma_sync_single_for_device(struct device *dev,
-               dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-       dma_sync_single_range_for_device(dev, handle, 0, size, dir);
-}
 
 /*
  * The scatter list versions of the above methods.
  */
-extern int dma_map_sg(struct device *, struct scatterlist *, int,
-               enum dma_data_direction);
-extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
+extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
+               enum dma_data_direction, struct dma_attrs *attrs);
+extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
+               enum dma_data_direction, struct dma_attrs *attrs);
+extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
                enum dma_data_direction);
-extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
+extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
                enum dma_data_direction);
-extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
-               enum dma_data_direction);
-
 
 #endif /* __KERNEL__ */
 #endif
index 33c78d7af2e15aaf2b7da93b708920fcefab9357..4eea2107214b7374ec5af34bd5c5a5e9425e5a64 100644 (file)
 #define PL080_WIDTH_16BIT                      (0x1)
 #define PL080_WIDTH_32BIT                      (0x2)
 
+#define PL080N_CONFIG_ITPROT                   (1 << 20)
+#define PL080N_CONFIG_SECPROT                  (1 << 19)
 #define PL080_CONFIG_HALT                      (1 << 18)
 #define PL080_CONFIG_ACTIVE                    (1 << 17)  /* RO */
 #define PL080_CONFIG_LOCK                      (1 << 16)
index 9af5563dd3ebbc6be0f53448e107a05249cca859..815c669fec0a1f52665120604c774dbb6e9b2e41 100644 (file)
@@ -47,9 +47,9 @@ extern void __raw_readsb(const void __iomem *addr, void *data, int bytelen);
 extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen);
 extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
 
-#define __raw_writeb(v,a)      (__chk_io_ptr(a), *(volatile unsigned char __force  *)(a) = (v))
-#define __raw_writew(v,a)      (__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v))
-#define __raw_writel(v,a)      (__chk_io_ptr(a), *(volatile unsigned int __force   *)(a) = (v))
+#define __raw_writeb(v,a)      ((void)(__chk_io_ptr(a), *(volatile unsigned char __force  *)(a) = (v)))
+#define __raw_writew(v,a)      ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v)))
+#define __raw_writel(v,a)      ((void)(__chk_io_ptr(a), *(volatile unsigned int __force   *)(a) = (v)))
 
 #define __raw_readb(a)         (__chk_io_ptr(a), *(volatile unsigned char __force  *)(a))
 #define __raw_readw(a)         (__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
@@ -229,11 +229,9 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
 #define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
                                        __raw_readl(c)); __r; })
 
-#define writeb_relaxed(v,c)    ((void)__raw_writeb(v,c))
-#define writew_relaxed(v,c)    ((void)__raw_writew((__force u16) \
-                                       cpu_to_le16(v),c))
-#define writel_relaxed(v,c)    ((void)__raw_writel((__force u32) \
-                                       cpu_to_le32(v),c))
+#define writeb_relaxed(v,c)    __raw_writeb(v,c)
+#define writew_relaxed(v,c)    __raw_writew((__force u16) cpu_to_le16(v),c)
+#define writel_relaxed(v,c)    __raw_writel((__force u32) cpu_to_le32(v),c)
 
 #define readb(c)               ({ u8  __v = readb_relaxed(c); __iormb(); __v; })
 #define readw(c)               ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
@@ -281,12 +279,12 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
 #define ioread16be(p)  ({ unsigned int __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
 #define ioread32be(p)  ({ unsigned int __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
 
-#define iowrite8(v,p)  ({ __iowmb(); (void)__raw_writeb(v, p); })
-#define iowrite16(v,p) ({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_le16(v), p); })
-#define iowrite32(v,p) ({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_le32(v), p); })
+#define iowrite8(v,p)  ({ __iowmb(); __raw_writeb(v, p); })
+#define iowrite16(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_le16(v), p); })
+#define iowrite32(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_le32(v), p); })
 
-#define iowrite16be(v,p) ({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_be16(v), p); })
-#define iowrite32be(v,p) ({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_be32(v), p); })
+#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
+#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
 
 #define ioread8_rep(p,d,c)     __raw_readsb(p,d,c)
 #define ioread16_rep(p,d,c)    __raw_readsw(p,d,c)
diff --git a/arch/arm/include/asm/kvm_para.h b/arch/arm/include/asm/kvm_para.h
new file mode 100644 (file)
index 0000000..14fab8f
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
index d7692cafde7fdd3d0b4e297620fbf2dd45d611d6..0b1c94b8c65226a85b2a8e696ec51c7cfaea6527 100644 (file)
@@ -43,6 +43,7 @@ struct machine_desc {
        void                    (*init_irq)(void);
        struct sys_timer        *timer;         /* system tick timer    */
        void                    (*init_machine)(void);
+       void                    (*init_late)(void);
 #ifdef CONFIG_MULTI_IRQ_HANDLER
        void                    (*handle_irq)(struct pt_regs *);
 #endif
index b36f3654bf54ebcc9e1c9617663c706d81bcfd97..a6efcdd6fd25135803d906329675f989e3fdb8ea 100644 (file)
@@ -30,6 +30,7 @@ struct map_desc {
 #define MT_MEMORY_DTCM         12
 #define MT_MEMORY_ITCM         13
 #define MT_MEMORY_SO           14
+#define MT_MEMORY_DMA_READY    15
 
 #ifdef CONFIG_MMU
 extern void iotable_init(struct map_desc *, int);
index efdf99045d879e240b9bc41f1f0781efea6ac10f..d2de9cbbcd9bcaf6a9e5b76eefac1f8c8eb7b39d 100644 (file)
@@ -22,9 +22,6 @@
 typedef unsigned short         __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short         __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short         __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
index 68388eb4946bda7a26a6c06868c51b890411a5a6..b79f8e97f7755f22d82ae20ee00442cd11f7af02 100644 (file)
@@ -148,6 +148,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
 #define TIF_NOTIFY_RESUME      2       /* callback before returning to user */
 #define TIF_SYSCALL_TRACE      8
 #define TIF_SYSCALL_AUDIT      9
+#define TIF_SYSCALL_RESTARTSYS 10
 #define TIF_POLLING_NRFLAG     16
 #define TIF_USING_IWMMXT       17
 #define TIF_MEMDIE             18      /* is terminating due to OOM killer */
@@ -162,16 +163,17 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
 #define _TIF_SYSCALL_AUDIT     (1 << TIF_SYSCALL_AUDIT)
 #define _TIF_POLLING_NRFLAG    (1 << TIF_POLLING_NRFLAG)
 #define _TIF_USING_IWMMXT      (1 << TIF_USING_IWMMXT)
-#define _TIF_RESTORE_SIGMASK   (1 << TIF_RESTORE_SIGMASK)
 #define _TIF_SECCOMP           (1 << TIF_SECCOMP)
+#define _TIF_SYSCALL_RESTARTSYS        (1 << TIF_SYSCALL_RESTARTSYS)
 
 /* Checks for any syscall work in entry-common.S */
-#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+                          _TIF_SYSCALL_RESTARTSYS)
 
 /*
  * Change these and you break ASM code in entry-common.S
  */
-#define _TIF_WORK_MASK         0x000000ff
+#define _TIF_WORK_MASK         (_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_RESUME)
 
 #endif /* __KERNEL__ */
 #endif /* __ASM_ARM_THREAD_INFO_H */
index 7bd2d3cb8957d2aeb6ba85baa2a5f6be49c1d3b8..4afed88d250a6f6127a8a42a7a3462394b078bfc 100644 (file)
@@ -53,9 +53,13 @@ fast_work_pending:
 work_pending:
        tst     r1, #_TIF_NEED_RESCHED
        bne     work_resched
-       tst     r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME
-       beq     no_work_pending
+       /*
+        * TIF_SIGPENDING or TIF_NOTIFY_RESUME must've been set if we got here
+        */
+       ldr     r2, [sp, #S_PSR]
        mov     r0, sp                          @ 'regs'
+       tst     r2, #15                         @ are we returning to user mode?
+       bne     no_work_pending                 @ no?  just leave, then...
        mov     r2, why                         @ 'syscall'
        tst     r1, #_TIF_SIGPENDING            @ delivering a signal?
        movne   why, #0                         @ prevent further restarts
index 14e38261cd31db9d852db2eb0b8046251a04613d..5700a7ae7f0bc1511ae048d7a3f025c401e5729c 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/regset.h>
 #include <linux/audit.h>
 #include <linux/tracehook.h>
+#include <linux/unistd.h>
 
 #include <asm/pgtable.h>
 #include <asm/traps.h>
@@ -917,6 +918,8 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
                audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0,
                                    regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
 
+       if (why == 0 && test_and_clear_thread_flag(TIF_SYSCALL_RESTARTSYS))
+               scno = __NR_restart_syscall - __NR_SYSCALL_BASE;
        if (!test_thread_flag(TIF_SYSCALL_TRACE))
                return scno;
 
index ebfac782593f048c9cf81a5619f0b3d24900ca8c..e15d83bb4ea378c1316db6a7d705eab30248a98b 100644 (file)
@@ -81,6 +81,7 @@ __setup("fpe=", fpe_setup);
 extern void paging_init(struct machine_desc *desc);
 extern void sanity_check_meminfo(void);
 extern void reboot_setup(char *str);
+extern void setup_dma_zone(struct machine_desc *desc);
 
 unsigned int processor_id;
 EXPORT_SYMBOL(processor_id);
@@ -800,6 +801,14 @@ static int __init customize_machine(void)
 }
 arch_initcall(customize_machine);
 
+static int __init init_machine_late(void)
+{
+       if (machine_desc->init_late)
+               machine_desc->init_late();
+       return 0;
+}
+late_initcall(init_machine_late);
+
 #ifdef CONFIG_KEXEC
 static inline unsigned long long get_total_mem(void)
 {
@@ -939,12 +948,8 @@ void __init setup_arch(char **cmdline_p)
        machine_desc = mdesc;
        machine_name = mdesc->name;
 
-#ifdef CONFIG_ZONE_DMA
-       if (mdesc->dma_zone_size) {
-               extern unsigned long arm_dma_zone_size;
-               arm_dma_zone_size = mdesc->dma_zone_size;
-       }
-#endif
+       setup_dma_zone(mdesc);
+
        if (mdesc->restart_mode)
                reboot_setup(&mdesc->restart_mode);
 
index 4e5fdd9bd9e39778b64c5354544b1c7f9fe89d77..fd2392a17ac1befb3464b5a0e77960a0f862b4ca 100644 (file)
 
 #include "signal.h"
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /*
  * For ARM syscalls, we encode the syscall number into the instruction.
  */
 #define SWI_SYS_SIGRETURN      (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
 #define SWI_SYS_RT_SIGRETURN   (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
-#define SWI_SYS_RESTART                (0xef000000|__NR_restart_syscall|__NR_OABI_SYSCALL_BASE)
 
 /*
  * With EABI, the syscall number has to be loaded into r7.
@@ -49,18 +46,6 @@ const unsigned long sigreturn_codes[7] = {
        MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
 };
 
-/*
- * Either we support OABI only, or we have EABI with the OABI
- * compat layer enabled.  In the later case we don't know if
- * user space is EABI or not, and if not we must not clobber r7.
- * Always using the OABI syscall solves that issue and works for
- * all those cases.
- */
-const unsigned long syscall_restart_code[2] = {
-       SWI_SYS_RESTART,        /* swi  __NR_restart_syscall */
-       0xe49df004,             /* ldr  pc, [sp], #4 */
-};
-
 /*
  * atomically swap in the new signal mask, and wait for a signal.
  */
@@ -82,10 +67,10 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
                old_sigset_t mask;
                if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
                    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
-                   __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+                   __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
+                   __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
+                   __get_user(mask, &act->sa_mask))
                        return -EFAULT;
-               __get_user(new_ka.sa.sa_flags, &act->sa_flags);
-               __get_user(mask, &act->sa_mask);
                siginitset(&new_ka.sa.sa_mask, mask);
        }
 
@@ -94,10 +79,10 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
        if (!ret && oact) {
                if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
                    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
-                   __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+                   __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
+                   __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
+                   __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
                        return -EFAULT;
-               __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
-               __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
        }
 
        return ret;
@@ -223,10 +208,8 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
        int err;
 
        err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
-       if (err == 0) {
-               sigdelsetmask(&set, ~_BLOCKABLE);
+       if (err == 0)
                set_current_blocked(&set);
-       }
 
        __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
        __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
@@ -541,13 +524,13 @@ setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
 /*
  * OK, we're invoking a handler
  */    
-static int
+static void
 handle_signal(unsigned long sig, struct k_sigaction *ka,
-             siginfo_t *info, sigset_t *oldset,
-             struct pt_regs * regs)
+             siginfo_t *info, struct pt_regs *regs)
 {
        struct thread_info *thread = current_thread_info();
        struct task_struct *tsk = current;
+       sigset_t *oldset = sigmask_to_save();
        int usig = sig;
        int ret;
 
@@ -572,17 +555,9 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
 
        if (ret != 0) {
                force_sigsegv(sig, tsk);
-               return ret;
+               return;
        }
-
-       /*
-        * Block the signal if we were successful.
-        */
-       block_sigmask(ka, sig);
-
-       tracehook_signal_handler(sig, info, ka, regs, 0);
-
-       return 0;
+       signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -601,15 +576,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
        siginfo_t info;
        int signr;
 
-       /*
-        * We want the common case to go fast, which
-        * is why we may in certain cases get here from
-        * kernel mode. Just return without doing anything
-        * if so.
-        */
-       if (!user_mode(regs))
-               return;
-
        /*
         * If we were from a system call, check for system call restarting...
         */
@@ -626,58 +592,39 @@ static void do_signal(struct pt_regs *regs, int syscall)
                case -ERESTARTNOHAND:
                case -ERESTARTSYS:
                case -ERESTARTNOINTR:
+               case -ERESTART_RESTARTBLOCK:
                        regs->ARM_r0 = regs->ARM_ORIG_r0;
                        regs->ARM_pc = restart_addr;
                        break;
-               case -ERESTART_RESTARTBLOCK:
-                       regs->ARM_r0 = -EINTR;
-                       break;
                }
        }
 
-       if (try_to_freeze())
-               goto no_signal;
-
        /*
         * Get the signal to deliver.  When running under ptrace, at this
         * point the debugger may change all our registers ...
         */
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
-               sigset_t *oldset;
-
                /*
                 * Depending on the signal settings we may need to revert the
                 * decision to restart the system call.  But skip this if a
                 * debugger has chosen to restart at a different PC.
                 */
                if (regs->ARM_pc == restart_addr) {
-                       if (retval == -ERESTARTNOHAND
+                       if (retval == -ERESTARTNOHAND ||
+                           retval == -ERESTART_RESTARTBLOCK
                            || (retval == -ERESTARTSYS
                                && !(ka.sa.sa_flags & SA_RESTART))) {
                                regs->ARM_r0 = -EINTR;
                                regs->ARM_pc = continue_addr;
                        }
+                       clear_thread_flag(TIF_SYSCALL_RESTARTSYS);
                }
 
-               if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                       oldset = &current->saved_sigmask;
-               else
-                       oldset = &current->blocked;
-               if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
-                       /*
-                        * A signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag.
-                        */
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               }
+               handle_signal(signr, &ka, &info, regs);
                return;
        }
 
- no_signal:
        if (syscall) {
                /*
                 * Handle restarting a different system call.  As above,
@@ -685,38 +632,11 @@ static void do_signal(struct pt_regs *regs, int syscall)
                 * ignore the restart.
                 */
                if (retval == -ERESTART_RESTARTBLOCK
-                   && regs->ARM_pc == continue_addr) {
-                       if (thumb_mode(regs)) {
-                               regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE;
-                               regs->ARM_pc -= 2;
-                       } else {
-#if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT)
-                               regs->ARM_r7 = __NR_restart_syscall;
-                               regs->ARM_pc -= 4;
-#else
-                               u32 __user *usp;
-
-                               regs->ARM_sp -= 4;
-                               usp = (u32 __user *)regs->ARM_sp;
-
-                               if (put_user(regs->ARM_pc, usp) == 0) {
-                                       regs->ARM_pc = KERN_RESTART_CODE;
-                               } else {
-                                       regs->ARM_sp += 4;
-                                       force_sigsegv(0, current);
-                               }
-#endif
-                       }
-               }
-
-               /* If there's no signal to deliver, we just put the saved sigmask
-                * back.
-                */
-               if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-                       clear_thread_flag(TIF_RESTORE_SIGMASK);
-                       sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-               }
+                   && regs->ARM_pc == restart_addr)
+                       set_thread_flag(TIF_SYSCALL_RESTARTSYS);
        }
+
+       restore_saved_sigmask();
 }
 
 asmlinkage void
@@ -728,7 +648,5 @@ do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall)
        if (thread_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
index 6fcfe8398aa473051fbf72396986a84dc700978f..5ff067b7c7522f428b4343832ecb759b7ccf16de 100644 (file)
@@ -8,7 +8,5 @@
  * published by the Free Software Foundation.
  */
 #define KERN_SIGRETURN_CODE    (CONFIG_VECTORS_BASE + 0x00000500)
-#define KERN_RESTART_CODE      (KERN_SIGRETURN_CODE + sizeof(sigreturn_codes))
 
 extern const unsigned long sigreturn_codes[7];
-extern const unsigned long syscall_restart_code[2];
index b735521a4a5441f7764591bc06352d86b1ddae80..2c7217d971db0b42b9e1f5859459e18f9294f662 100644 (file)
@@ -109,7 +109,6 @@ static void percpu_timer_stop(void);
 int __cpu_disable(void)
 {
        unsigned int cpu = smp_processor_id();
-       struct task_struct *p;
        int ret;
 
        ret = platform_cpu_disable(cpu);
@@ -139,12 +138,7 @@ int __cpu_disable(void)
        flush_cache_all();
        local_flush_tlb_all();
 
-       read_lock(&tasklist_lock);
-       for_each_process(p) {
-               if (p->mm)
-                       cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
-       }
-       read_unlock(&tasklist_lock);
+       clear_tasks_mm_cpumask(cpu);
 
        return 0;
 }
index 3647170e9a16ba3aa8838218ed99e74dbc5b7c59..4928d89758f4ce0dea767acdf9fcf2299dff7833 100644 (file)
@@ -820,8 +820,6 @@ void __init early_trap_init(void *vectors_base)
         */
        memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
               sigreturn_codes, sizeof(sigreturn_codes));
-       memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE),
-              syscall_restart_code, sizeof(syscall_restart_code));
 
        flush_icache_range(vectors, vectors + PAGE_SIZE);
        modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
index f6747246d64911ab3c61cc2a38ffd2c80c1f7fb4..933fc9afe7d091db78f5484d4ca331a50cb50716 100644 (file)
@@ -436,7 +436,6 @@ void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data)
        atslave->dma_dev = &at_hdmac_device.dev;
        atslave->cfg = ATC_FIFOCFG_HALFFIFO
                        | ATC_SRC_H2SEL_HW | ATC_DST_H2SEL_HW;
-       atslave->ctrla = ATC_SCSIZE_16 | ATC_DCSIZE_16;
        if (mmc_id == 0)        /* MCI0 */
                atslave->cfg |= ATC_SRC_PER(AT_DMA_ID_MCI0)
                              | ATC_DST_PER(AT_DMA_ID_MCI0);
index fff48d1a0f4efad0a6ca03619cda828e7acfafe3..cab0997be3de20ff6b574e9c3a4173b9268166b9 100644 (file)
@@ -26,18 +26,11 @@ struct at_dma_platform_data {
 /**
  * struct at_dma_slave - Controller-specific information about a slave
  * @dma_dev: required DMA master device
- * @tx_reg: physical address of data register used for
- *     memory-to-peripheral transfers
- * @rx_reg: physical address of data register used for
- *     peripheral-to-memory transfers
- * @reg_width: peripheral register width
  * @cfg: Platform-specific initializer for the CFG register
- * @ctrla: Platform-specific initializer for the CTRLA register
  */
 struct at_dma_slave {
        struct device           *dma_dev;
        u32                     cfg;
-       u32                     ctrla;
 };
 
 
@@ -64,24 +57,5 @@ struct at_dma_slave {
 #define                ATC_FIFOCFG_HALFFIFO            (0x1 << 28)
 #define                ATC_FIFOCFG_ENOUGHSPACE         (0x2 << 28)
 
-/* Platform-configurable bits in CTRLA */
-#define        ATC_SCSIZE_MASK         (0x7 << 16)     /* Source Chunk Transfer Size */
-#define                ATC_SCSIZE_1            (0x0 << 16)
-#define                ATC_SCSIZE_4            (0x1 << 16)
-#define                ATC_SCSIZE_8            (0x2 << 16)
-#define                ATC_SCSIZE_16           (0x3 << 16)
-#define                ATC_SCSIZE_32           (0x4 << 16)
-#define                ATC_SCSIZE_64           (0x5 << 16)
-#define                ATC_SCSIZE_128          (0x6 << 16)
-#define                ATC_SCSIZE_256          (0x7 << 16)
-#define        ATC_DCSIZE_MASK         (0x7 << 20)     /* Destination Chunk Transfer Size */
-#define                ATC_DCSIZE_1            (0x0 << 20)
-#define                ATC_DCSIZE_4            (0x1 << 20)
-#define                ATC_DCSIZE_8            (0x2 << 20)
-#define                ATC_DCSIZE_16           (0x3 << 20)
-#define                ATC_DCSIZE_32           (0x4 << 20)
-#define                ATC_DCSIZE_64           (0x5 << 20)
-#define                ATC_DCSIZE_128          (0x6 << 20)
-#define                ATC_DCSIZE_256          (0x7 << 20)
 
 #endif /* AT_HDMAC_H */
index dc1afe5be20cc3d305c178faaec0bfd1633e4e3f..0031864e7f116908b23b94a0a7a1df8d13e33b69 100644 (file)
@@ -681,6 +681,7 @@ MACHINE_START(DAVINCI_DA830_EVM, "DaVinci DA830/OMAP-L137/AM17x EVM")
        .init_irq       = cp_intc_init,
        .timer          = &davinci_timer,
        .init_machine   = da830_evm_init,
+       .init_late      = davinci_init_late,
        .dma_zone_size  = SZ_128M,
        .restart        = da8xx_restart,
 MACHINE_END
index 09f61073c8d9993ea0df99fc1c991d5890c4786e..0149fb453be3cd0f83b48cc4290d02d35038925c 100644 (file)
@@ -1411,6 +1411,7 @@ MACHINE_START(DAVINCI_DA850_EVM, "DaVinci DA850/OMAP-L138/AM18x EVM")
        .init_irq       = cp_intc_init,
        .timer          = &davinci_timer,
        .init_machine   = da850_evm_init,
+       .init_late      = davinci_init_late,
        .dma_zone_size  = SZ_128M,
        .restart        = da8xx_restart,
 MACHINE_END
index 82ed753fb36088936014a434c85378890647aefa..1c7b1f46a8f3c3c42284c6e954526fe8b555fa2b 100644 (file)
@@ -357,6 +357,7 @@ MACHINE_START(DAVINCI_DM355_EVM, "DaVinci DM355 EVM")
        .init_irq     = davinci_irq_init,
        .timer        = &davinci_timer,
        .init_machine = dm355_evm_init,
+       .init_late      = davinci_init_late,
        .dma_zone_size  = SZ_128M,
        .restart        = davinci_restart,
 MACHINE_END
index d74a8b3445fbc2cfd6958248cadf6fd04398000c..8e7703213b0822102c8475afe9405865ec6ba8f6 100644 (file)
@@ -276,6 +276,7 @@ MACHINE_START(DM355_LEOPARD, "DaVinci DM355 leopard")
        .init_irq     = davinci_irq_init,
        .timer        = &davinci_timer,
        .init_machine = dm355_leopard_init,
+       .init_late      = davinci_init_late,
        .dma_zone_size  = SZ_128M,
        .restart        = davinci_restart,
 MACHINE_END
index 5bce2b83bb4fcde59938104b12f76caae6de8594..688a9c556dc9081e518627095ad94f62724a98ad 100644 (file)
@@ -618,6 +618,7 @@ MACHINE_START(DAVINCI_DM365_EVM, "DaVinci DM365 EVM")
        .init_irq       = davinci_irq_init,
        .timer          = &davinci_timer,
        .init_machine   = dm365_evm_init,
+       .init_late      = davinci_init_late,
        .dma_zone_size  = SZ_128M,
        .restart        = davinci_restart,
 MACHINE_END
index 3683306e02453e4d0bf5feb8b943c79a031745b8..d34ed55912b2efd1217a0cb8d5a84bc5b271e232 100644 (file)
@@ -825,6 +825,7 @@ MACHINE_START(DAVINCI_EVM, "DaVinci DM644x EVM")
        .init_irq     = davinci_irq_init,
        .timer        = &davinci_timer,
        .init_machine = davinci_evm_init,
+       .init_late      = davinci_init_late,
        .dma_zone_size  = SZ_128M,
        .restart        = davinci_restart,
 MACHINE_END
index d72ab948d6309a26f7b79fdef5208d7e0ca6f88a..958679a20e13d3ec018e5b974ae35e1e17d400c0 100644 (file)
@@ -788,6 +788,7 @@ MACHINE_START(DAVINCI_DM6467_EVM, "DaVinci DM646x EVM")
        .init_irq     = davinci_irq_init,
        .timer        = &davinci_timer,
        .init_machine = evm_init,
+       .init_late      = davinci_init_late,
        .dma_zone_size  = SZ_128M,
        .restart        = davinci_restart,
 MACHINE_END
@@ -798,6 +799,7 @@ MACHINE_START(DAVINCI_DM6467TEVM, "DaVinci DM6467T EVM")
        .init_irq     = davinci_irq_init,
        .timer        = &davinci_timer,
        .init_machine = evm_init,
+       .init_late      = davinci_init_late,
        .dma_zone_size  = SZ_128M,
        .restart        = davinci_restart,
 MACHINE_END
index 672d820e2aa4c73d93fe128cd233d99ae2fa8eaa..beecde3a1d2f9a830362cb89aa9d6e7a249b9ae6 100644 (file)
@@ -572,6 +572,7 @@ MACHINE_START(MITYOMAPL138, "MityDSP-L138/MityARM-1808")
        .init_irq       = cp_intc_init,
        .timer          = &davinci_timer,
        .init_machine   = mityomapl138_init,
+       .init_late      = davinci_init_late,
        .dma_zone_size  = SZ_128M,
        .restart        = da8xx_restart,
 MACHINE_END
index a772bb45570a7e92de29de7aa70a1a2f4250951f..5de69f2fcca9fec965fc300b506056c4a1f29e01 100644 (file)
@@ -278,6 +278,7 @@ MACHINE_START(NEUROS_OSD2, "Neuros OSD2")
        .init_irq       = davinci_irq_init,
        .timer          = &davinci_timer,
        .init_machine = davinci_ntosd2_init,
+       .init_late      = davinci_init_late,
        .dma_zone_size  = SZ_128M,
        .restart        = davinci_restart,
 MACHINE_END
index 45e815760a27fd0cfc0f2965590eb90a1779b240..dc1208e9e664649718f2e2f4935cd5e9cc5dc86e 100644 (file)
@@ -343,6 +343,7 @@ MACHINE_START(OMAPL138_HAWKBOARD, "AM18x/OMAP-L138 Hawkboard")
        .init_irq       = cp_intc_init,
        .timer          = &davinci_timer,
        .init_machine   = omapl138_hawk_init,
+       .init_late      = davinci_init_late,
        .dma_zone_size  = SZ_128M,
        .restart        = da8xx_restart,
 MACHINE_END
index 76e675096104d8edc85c664d776bf341327909a6..9078acf94bacfe211c8ec3ff24a427eae6f11895 100644 (file)
@@ -157,6 +157,7 @@ MACHINE_START(SFFSDR, "Lyrtech SFFSDR")
        .init_irq     = davinci_irq_init,
        .timer        = &davinci_timer,
        .init_machine = davinci_sffsdr_init,
+       .init_late      = davinci_init_late,
        .dma_zone_size  = SZ_128M,
        .restart        = davinci_restart,
 MACHINE_END
index 5f14e30b00d896a79c83ded76929543c956fb93f..ac4e003ad86336e61cb4774a0934f437bbfdfaca 100644 (file)
@@ -282,6 +282,7 @@ MACHINE_START(TNETV107X, "TNETV107X EVM")
        .init_irq       = cp_intc_init,
        .timer          = &davinci_timer,
        .init_machine   = tnetv107x_evm_board_init,
+       .init_late      = davinci_init_late,
        .dma_zone_size  = SZ_128M,
        .restart        = tnetv107x_restart,
 MACHINE_END
index 008772e3b84321463c850c272065f4b0bc2d341a..34668ead53c73b1c70ee0b09261ba74ff8a3627b 100644 (file)
@@ -213,7 +213,7 @@ EXPORT_SYMBOL(clk_unregister);
 /*
  * Disable any unused clocks left on by the bootloader
  */
-static int __init clk_disable_unused(void)
+int __init davinci_clk_disable_unused(void)
 {
        struct clk *ck;
 
@@ -237,7 +237,6 @@ static int __init clk_disable_unused(void)
 
        return 0;
 }
-late_initcall(clk_disable_unused);
 #endif
 
 static unsigned long clk_sysclk_recalc(struct clk *clk)
index cb9b2e47510c8de2118bd80364f4b60b4308e07b..64b0f65a8639aa919173b597c8134e55192972b6 100644 (file)
@@ -117,3 +117,10 @@ void __init davinci_common_init(struct davinci_soc_info *soc_info)
 err:
        panic("davinci_common_init: SoC Initialization failed\n");
 }
+
+void __init davinci_init_late(void)
+{
+       davinci_cpufreq_init();
+       davinci_pm_init();
+       davinci_clk_disable_unused();
+}
index 031048fec9f5e2d175707eea2c4ed0d2edd395a2..4729eaab0f403e90c83ae2799d37b1521831efb1 100644 (file)
@@ -240,10 +240,9 @@ static struct platform_driver davinci_cpufreq_driver = {
        .remove = __exit_p(davinci_cpufreq_remove),
 };
 
-static int __init davinci_cpufreq_init(void)
+int __init davinci_cpufreq_init(void)
 {
        return platform_driver_probe(&davinci_cpufreq_driver,
                                                        davinci_cpufreq_probe);
 }
-late_initcall(davinci_cpufreq_init);
 
index 95ce019c9b98686b5fb14f69fc93665daf964f49..a685e9706b7ba305b462b103899398b3521f2917 100644 (file)
@@ -353,9 +353,10 @@ static int irq2ctlr(int irq)
  *****************************************************************************/
 static irqreturn_t dma_irq_handler(int irq, void *data)
 {
-       int i;
        int ctlr;
-       unsigned int cnt = 0;
+       u32 sh_ier;
+       u32 sh_ipr;
+       u32 bank;
 
        ctlr = irq2ctlr(irq);
        if (ctlr < 0)
@@ -363,41 +364,39 @@ static irqreturn_t dma_irq_handler(int irq, void *data)
 
        dev_dbg(data, "dma_irq_handler\n");
 
-       if ((edma_shadow0_read_array(ctlr, SH_IPR, 0) == 0) &&
-           (edma_shadow0_read_array(ctlr, SH_IPR, 1) == 0))
-               return IRQ_NONE;
+       sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 0);
+       if (!sh_ipr) {
+               sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 1);
+               if (!sh_ipr)
+                       return IRQ_NONE;
+               sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 1);
+               bank = 1;
+       } else {
+               sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 0);
+               bank = 0;
+       }
 
-       while (1) {
-               int j;
-               if (edma_shadow0_read_array(ctlr, SH_IPR, 0) &
-                               edma_shadow0_read_array(ctlr, SH_IER, 0))
-                       j = 0;
-               else if (edma_shadow0_read_array(ctlr, SH_IPR, 1) &
-                               edma_shadow0_read_array(ctlr, SH_IER, 1))
-                       j = 1;
-               else
-                       break;
-               dev_dbg(data, "IPR%d %08x\n", j,
-                               edma_shadow0_read_array(ctlr, SH_IPR, j));
-               for (i = 0; i < 32; i++) {
-                       int k = (j << 5) + i;
-                       if ((edma_shadow0_read_array(ctlr, SH_IPR, j) & BIT(i))
-                                       && (edma_shadow0_read_array(ctlr,
-                                                       SH_IER, j) & BIT(i))) {
-                               /* Clear the corresponding IPR bits */
-                               edma_shadow0_write_array(ctlr, SH_ICR, j,
-                                                       BIT(i));
-                               if (edma_cc[ctlr]->intr_data[k].callback)
-                                       edma_cc[ctlr]->intr_data[k].callback(
-                                               k, DMA_COMPLETE,
-                                               edma_cc[ctlr]->intr_data[k].
-                                               data);
-                       }
+       do {
+               u32 slot;
+               u32 channel;
+
+               dev_dbg(data, "IPR%d %08x\n", bank, sh_ipr);
+
+               slot = __ffs(sh_ipr);
+               sh_ipr &= ~(BIT(slot));
+
+               if (sh_ier & BIT(slot)) {
+                       channel = (bank << 5) | slot;
+                       /* Clear the corresponding IPR bits */
+                       edma_shadow0_write_array(ctlr, SH_ICR, bank,
+                                       BIT(slot));
+                       if (edma_cc[ctlr]->intr_data[channel].callback)
+                               edma_cc[ctlr]->intr_data[channel].callback(
+                                       channel, DMA_COMPLETE,
+                                       edma_cc[ctlr]->intr_data[channel].data);
                }
-               cnt++;
-               if (cnt > 10)
-                       break;
-       }
+       } while (sh_ipr);
+
        edma_shadow0_write(ctlr, SH_IEVAL, 1);
        return IRQ_HANDLED;
 }
index 5cd39a4e0c966d4823d2c438edf8376216f35ae6..bdc4aa8e672ac98349d10c9059e063b1294e40dc 100644 (file)
@@ -84,6 +84,25 @@ extern struct davinci_soc_info davinci_soc_info;
 extern void davinci_common_init(struct davinci_soc_info *soc_info);
 extern void davinci_init_ide(void);
 void davinci_restart(char mode, const char *cmd);
+void davinci_init_late(void);
+
+#ifdef CONFIG_DAVINCI_RESET_CLOCKS
+int davinci_clk_disable_unused(void);
+#else
+static inline int davinci_clk_disable_unused(void) { return 0; }
+#endif
+
+#ifdef CONFIG_CPU_FREQ
+int davinci_cpufreq_init(void);
+#else
+static inline int davinci_cpufreq_init(void) { return 0; }
+#endif
+
+#ifdef CONFIG_SUSPEND
+int davinci_pm_init(void);
+#else
+static inline int davinci_pm_init(void) { return 0; }
+#endif
 
 /* standard place to map on-chip SRAMs; they *may* support DMA */
 #define SRAM_VIRT      0xfffe0000
index cf94552d52740cd8c8d4903aa0549a05369b73cf..34290d14754b41b827208528c1a317e5d5fa62b3 100644 (file)
 
 #define UART_SHIFT     2
 
-               .pushsection .data
-davinci_uart_phys:     .word   0
-davinci_uart_virt:     .word   0
-               .popsection
-
-               .macro addruart, rp, rv, tmp
-
-               /* Use davinci_uart_phys/virt if already configured */
-10:            adr     \rp, 99f                @ get effective addr of 99f
-               ldr     \rv, [\rp]              @ get absolute addr of 99f
-               sub     \rv, \rv, \rp           @ offset between the two
-               ldr     \rp, [\rp, #4]          @ abs addr of omap_uart_phys
-               sub     \tmp, \rp, \rv          @ make it effective
-               ldr     \rp, [\tmp, #0]         @ davinci_uart_phys
-               ldr     \rv, [\tmp, #4]         @ davinci_uart_virt
-               cmp     \rp, #0                 @ is port configured?
-               cmpne   \rv, #0
-               bne     100f                    @ already configured
-
-               /* Check the debug UART address set in uncompress.h */
-               and     \rp, pc, #0xff000000
-               ldr     \rv, =DAVINCI_UART_INFO_OFS
-               add     \rp, \rp, \rv
-
-               /* Copy uart phys address from decompressor uart info */
-               ldr     \rv, [\rp, #0]
-               str     \rv, [\tmp, #0]
-
-               /* Copy uart virt address from decompressor uart info */
-               ldr     \rv, [\rp, #4]
-               str     \rv, [\tmp, #4]
-
-               b       10b
+#if defined(CONFIG_DEBUG_DAVINCI_DMx_UART0)
+#define UART_BASE      DAVINCI_UART0_BASE
+#elif defined(CONFIG_DEBUG_DAVINCI_DA8XX_UART0)
+#define UART_BASE      DA8XX_UART0_BASE
+#elif defined(CONFIG_DEBUG_DAVINCI_DA8XX_UART1)
+#define UART_BASE      DA8XX_UART1_BASE
+#elif defined(CONFIG_DEBUG_DAVINCI_DA8XX_UART2)
+#define UART_BASE      DA8XX_UART2_BASE
+#elif defined(CONFIG_DEBUG_DAVINCI_TNETV107X_UART1)
+#define UART_BASE      TNETV107X_UART2_BASE
+#define UART_VIRTBASE  TNETV107X_UART2_VIRT
+#else
+#error "Select a specifc port for DEBUG_LL"
+#endif
 
-               .align
-99:            .word   .
-               .word   davinci_uart_phys
-               .ltorg
+#ifndef UART_VIRTBASE
+#define UART_VIRTBASE  IO_ADDRESS(UART_BASE)
+#endif
 
-100:
+               .macro addruart, rp, rv, tmp
+               ldr     \rp, =UART_BASE
+               ldr     \rv, =UART_VIRTBASE
                .endm
 
                .macro  senduart,rd,rx
index 2184691ebc2f78ddfcb5989e2d880ac77a5d3749..16bb42291d39d5154f99f1adc91590f18c2785f8 100644 (file)
@@ -22,7 +22,7 @@
 /*
  * I/O mapping
  */
-#define IO_PHYS                                0x01c00000UL
+#define IO_PHYS                                UL(0x01c00000)
 #define IO_OFFSET                      0xfd000000 /* Virtual IO = 0xfec00000 */
 #define IO_SIZE                                0x00400000
 #define IO_VIRT                                (IO_PHYS + IO_OFFSET)
index e347d88fef91dbd4a64e4ecf62bb11a7c2b753e7..46b3cd11c3c2ec582e2972202f1c6277494fe9ea 100644 (file)
 
 #include <mach/hardware.h>
 
-/*
- * Stolen area that contains debug uart physical and virtual addresses.  These
- * addresses are filled in by the uncompress.h code, and are used by the debug
- * macros in debug-macro.S.
- *
- * This area sits just below the page tables (see arch/arm/kernel/head.S).
- * We define it as a relative offset from start of usable RAM.
- */
-#define DAVINCI_UART_INFO_OFS  0x3ff8
-
 #define DAVINCI_UART0_BASE     (IO_PHYS + 0x20000)
 #define DAVINCI_UART1_BASE     (IO_PHYS + 0x20400)
 #define DAVINCI_UART2_BASE     (IO_PHYS + 0x20800)
index da2fb2c2155a2ac84d242ad76c4de2842aa33d31..18cfd4977155b468970559945663e05a48bb2000 100644 (file)
@@ -43,37 +43,27 @@ static inline void flush(void)
                barrier();
 }
 
-static inline void set_uart_info(u32 phys, void * __iomem virt)
+static inline void set_uart_info(u32 phys)
 {
-       /*
-        * Get address of some.bss variable and round it down
-        * a la CONFIG_AUTO_ZRELADDR.
-        */
-       u32 ram_start = (u32)&uart & 0xf8000000;
-       u32 *uart_info = (u32 *)(ram_start + DAVINCI_UART_INFO_OFS);
-
        uart = (u32 *)phys;
-       uart_info[0] = phys;
-       uart_info[1] = (u32)virt;
 }
 
-#define _DEBUG_LL_ENTRY(machine, phys, virt)                   \
-       if (machine_is_##machine()) {                           \
-               set_uart_info(phys, virt);                      \
-               break;                                          \
+#define _DEBUG_LL_ENTRY(machine, phys)                         \
+       {                                                       \
+               if (machine_is_##machine()) {                   \
+                       set_uart_info(phys);                    \
+                       break;                                  \
+               }                                               \
        }
 
 #define DEBUG_LL_DAVINCI(machine, port)                                \
-       _DEBUG_LL_ENTRY(machine, DAVINCI_UART##port##_BASE,     \
-                       IO_ADDRESS(DAVINCI_UART##port##_BASE))
+       _DEBUG_LL_ENTRY(machine, DAVINCI_UART##port##_BASE)
 
 #define DEBUG_LL_DA8XX(machine, port)                          \
-       _DEBUG_LL_ENTRY(machine, DA8XX_UART##port##_BASE,       \
-                       IO_ADDRESS(DA8XX_UART##port##_BASE))
+       _DEBUG_LL_ENTRY(machine, DA8XX_UART##port##_BASE)
 
 #define DEBUG_LL_TNETV107X(machine, port)                      \
-       _DEBUG_LL_ENTRY(machine, TNETV107X_UART##port##_BASE,   \
-                       TNETV107X_UART##port##_VIRT)
+       _DEBUG_LL_ENTRY(machine, TNETV107X_UART##port##_BASE)
 
 static inline void __arch_decomp_setup(unsigned long arch_id)
 {
index 04c49f7543ef0df4a30a52c5c862f623222a93cd..eb8360b33aa9d4528a73562c585186949fe38657 100644 (file)
@@ -152,8 +152,7 @@ static struct platform_driver davinci_pm_driver = {
        .remove = __exit_p(davinci_pm_remove),
 };
 
-static int __init davinci_pm_init(void)
+int __init davinci_pm_init(void)
 {
        return platform_driver_probe(&davinci_pm_driver, davinci_pm_probe);
 }
-late_initcall(davinci_pm_init);
index 42ab1e7c4ecccf0791c3139b23d853f749fd7c10..9493076fc5948e38258ec573f223d78bafdadde8 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/pci.h>
-#include <linux/clk.h>
+#include <linux/clk-provider.h>
 #include <linux/ata_platform.h>
 #include <linux/gpio.h>
 #include <asm/page.h>
@@ -67,6 +67,19 @@ void __init dove_map_io(void)
        iotable_init(dove_io_desc, ARRAY_SIZE(dove_io_desc));
 }
 
+/*****************************************************************************
+ * CLK tree
+ ****************************************************************************/
+static struct clk *tclk;
+
+static void __init clk_init(void)
+{
+       tclk = clk_register_fixed_rate(NULL, "tclk", NULL, CLK_IS_ROOT,
+                                      get_tclk());
+
+       orion_clkdev_init(tclk);
+}
+
 /*****************************************************************************
  * EHCI0
  ****************************************************************************/
@@ -89,8 +102,7 @@ void __init dove_ehci1_init(void)
 void __init dove_ge00_init(struct mv643xx_eth_platform_data *eth_data)
 {
        orion_ge00_init(eth_data,
-                       DOVE_GE00_PHYS_BASE, IRQ_DOVE_GE00_SUM,
-                       0, get_tclk());
+                       DOVE_GE00_PHYS_BASE, IRQ_DOVE_GE00_SUM, 0);
 }
 
 /*****************************************************************************
@@ -116,7 +128,7 @@ void __init dove_sata_init(struct mv_sata_platform_data *sata_data)
 void __init dove_uart0_init(void)
 {
        orion_uart0_init(DOVE_UART0_VIRT_BASE, DOVE_UART0_PHYS_BASE,
-                        IRQ_DOVE_UART_0, get_tclk());
+                        IRQ_DOVE_UART_0, tclk);
 }
 
 /*****************************************************************************
@@ -125,7 +137,7 @@ void __init dove_uart0_init(void)
 void __init dove_uart1_init(void)
 {
        orion_uart1_init(DOVE_UART1_VIRT_BASE, DOVE_UART1_PHYS_BASE,
-                        IRQ_DOVE_UART_1, get_tclk());
+                        IRQ_DOVE_UART_1, tclk);
 }
 
 /*****************************************************************************
@@ -134,7 +146,7 @@ void __init dove_uart1_init(void)
 void __init dove_uart2_init(void)
 {
        orion_uart2_init(DOVE_UART2_VIRT_BASE, DOVE_UART2_PHYS_BASE,
-                        IRQ_DOVE_UART_2, get_tclk());
+                        IRQ_DOVE_UART_2, tclk);
 }
 
 /*****************************************************************************
@@ -143,7 +155,7 @@ void __init dove_uart2_init(void)
 void __init dove_uart3_init(void)
 {
        orion_uart3_init(DOVE_UART3_VIRT_BASE, DOVE_UART3_PHYS_BASE,
-                        IRQ_DOVE_UART_3, get_tclk());
+                        IRQ_DOVE_UART_3, tclk);
 }
 
 /*****************************************************************************
@@ -151,12 +163,12 @@ void __init dove_uart3_init(void)
  ****************************************************************************/
 void __init dove_spi0_init(void)
 {
-       orion_spi_init(DOVE_SPI0_PHYS_BASE, get_tclk());
+       orion_spi_init(DOVE_SPI0_PHYS_BASE);
 }
 
 void __init dove_spi1_init(void)
 {
-       orion_spi_1_init(DOVE_SPI1_PHYS_BASE, get_tclk());
+       orion_spi_1_init(DOVE_SPI1_PHYS_BASE);
 }
 
 /*****************************************************************************
@@ -272,18 +284,17 @@ void __init dove_sdio1_init(void)
 
 void __init dove_init(void)
 {
-       int tclk;
-
-       tclk = get_tclk();
-
        printk(KERN_INFO "Dove 88AP510 SoC, ");
-       printk(KERN_INFO "TCLK = %dMHz\n", (tclk + 499999) / 1000000);
+       printk(KERN_INFO "TCLK = %dMHz\n", (get_tclk() + 499999) / 1000000);
 
 #ifdef CONFIG_CACHE_TAUROS2
        tauros2_init();
 #endif
        dove_setup_cpu_mbus();
 
+       /* Setup root of clk tree */
+       clk_init();
+
        /* internal devices that every board has */
        dove_rtc_init();
        dove_xor0_init();
index ea77ae430b2d018f034f536471c306cf2a68db1b..bc2867f113460ab810be4d70b6343134abc95c0d 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/i2c.h>
 #include <linux/pci.h>
 #include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
 #include <linux/spi/flash.h>
 #include <linux/gpio.h>
 #include <asm/mach-types.h>
index 2d45947a3034b260bede63835a0db4606154c1cd..a472777e9eba4523acd0386af0595715c9bb16ee 100644 (file)
@@ -41,5 +41,6 @@ MACHINE_START(ADSSPHERE, "ADS Sphere board")
        .handle_irq     = vic_handle_irq,
        .timer          = &ep93xx_timer,
        .init_machine   = adssphere_init_machine,
+       .init_late      = ep93xx_init_late,
        .restart        = ep93xx_restart,
 MACHINE_END
index 66b1494f23a6f0f4b2182f23ae2433089bcd1f53..4dd07a0e3604a19498cf4fa507b544a666e2d8a5 100644 (file)
@@ -675,7 +675,7 @@ int ep93xx_keypad_acquire_gpio(struct platform_device *pdev)
 fail_gpio_d:
        gpio_free(EP93XX_GPIO_LINE_C(i));
 fail_gpio_c:
-       for ( ; i >= 0; --i) {
+       for (--i; i >= 0; --i) {
                gpio_free(EP93XX_GPIO_LINE_C(i));
                gpio_free(EP93XX_GPIO_LINE_D(i));
        }
@@ -834,3 +834,8 @@ void ep93xx_restart(char mode, const char *cmd)
        while (1)
                ;
 }
+
+void __init ep93xx_init_late(void)
+{
+       crunch_init();
+}
index 74753e2df60357c5375b6f243fbe4529f2466097..a4a2ab9648c92cd5d7a10a1eb646e63c65d2a366 100644 (file)
@@ -79,12 +79,10 @@ static struct notifier_block crunch_notifier_block = {
        .notifier_call  = crunch_do,
 };
 
-static int __init crunch_init(void)
+int __init crunch_init(void)
 {
        thread_register_notifier(&crunch_notifier_block);
        elf_hwcap |= HWCAP_CRUNCH;
 
        return 0;
 }
-
-late_initcall(crunch_init);
index da9047d726f07dc1e064409df07a46c3bd4eb8b5..d74c5cddb98b654972ae986489f90c0e700aa190 100644 (file)
@@ -255,6 +255,7 @@ MACHINE_START(EDB9301, "Cirrus Logic EDB9301 Evaluation Board")
        .handle_irq     = vic_handle_irq,
        .timer          = &ep93xx_timer,
        .init_machine   = edb93xx_init_machine,
+       .init_late      = ep93xx_init_late,
        .restart        = ep93xx_restart,
 MACHINE_END
 #endif
@@ -268,6 +269,7 @@ MACHINE_START(EDB9302, "Cirrus Logic EDB9302 Evaluation Board")
        .handle_irq     = vic_handle_irq,
        .timer          = &ep93xx_timer,
        .init_machine   = edb93xx_init_machine,
+       .init_late      = ep93xx_init_late,
        .restart        = ep93xx_restart,
 MACHINE_END
 #endif
@@ -281,6 +283,7 @@ MACHINE_START(EDB9302A, "Cirrus Logic EDB9302A Evaluation Board")
        .handle_irq     = vic_handle_irq,
        .timer          = &ep93xx_timer,
        .init_machine   = edb93xx_init_machine,
+       .init_late      = ep93xx_init_late,
        .restart        = ep93xx_restart,
 MACHINE_END
 #endif
@@ -294,6 +297,7 @@ MACHINE_START(EDB9307, "Cirrus Logic EDB9307 Evaluation Board")
        .handle_irq     = vic_handle_irq,
        .timer          = &ep93xx_timer,
        .init_machine   = edb93xx_init_machine,
+       .init_late      = ep93xx_init_late,
        .restart        = ep93xx_restart,
 MACHINE_END
 #endif
@@ -307,6 +311,7 @@ MACHINE_START(EDB9307A, "Cirrus Logic EDB9307A Evaluation Board")
        .handle_irq     = vic_handle_irq,
        .timer          = &ep93xx_timer,
        .init_machine   = edb93xx_init_machine,
+       .init_late      = ep93xx_init_late,
        .restart        = ep93xx_restart,
 MACHINE_END
 #endif
@@ -320,6 +325,7 @@ MACHINE_START(EDB9312, "Cirrus Logic EDB9312 Evaluation Board")
        .handle_irq     = vic_handle_irq,
        .timer          = &ep93xx_timer,
        .init_machine   = edb93xx_init_machine,
+       .init_late      = ep93xx_init_late,
        .restart        = ep93xx_restart,
 MACHINE_END
 #endif
@@ -333,6 +339,7 @@ MACHINE_START(EDB9315, "Cirrus Logic EDB9315 Evaluation Board")
        .handle_irq     = vic_handle_irq,
        .timer          = &ep93xx_timer,
        .init_machine   = edb93xx_init_machine,
+       .init_late      = ep93xx_init_late,
        .restart        = ep93xx_restart,
 MACHINE_END
 #endif
@@ -346,6 +353,7 @@ MACHINE_START(EDB9315A, "Cirrus Logic EDB9315A Evaluation Board")
        .handle_irq     = vic_handle_irq,
        .timer          = &ep93xx_timer,
        .init_machine   = edb93xx_init_machine,
+       .init_late      = ep93xx_init_late,
        .restart        = ep93xx_restart,
 MACHINE_END
 #endif
index fcdffbe49dcc8b17423b39e84bfa44a74f8826e0..437c3411115513156bf252c569a22ac636f3de0a 100644 (file)
@@ -41,5 +41,6 @@ MACHINE_START(GESBC9312, "Glomation GESBC-9312-sx")
        .handle_irq     = vic_handle_irq,
        .timer          = &ep93xx_timer,
        .init_machine   = gesbc9312_init_machine,
+       .init_late      = ep93xx_init_late,
        .restart        = ep93xx_restart,
 MACHINE_END
index 602bd87fd0ab4dc386dfd2481b5de71b292cbcf4..1ecb040d98bf7f3e390f76473ec85c7c50e377a3 100644 (file)
@@ -53,5 +53,12 @@ void ep93xx_init_devices(void);
 extern struct sys_timer ep93xx_timer;
 
 void ep93xx_restart(char, const char *);
+void ep93xx_init_late(void);
+
+#ifdef CONFIG_CRUNCH
+int crunch_init(void);
+#else
+static inline int crunch_init(void) { return 0; }
+#endif
 
 #endif
index dc431c5f04cee416a7fa102658189cc27949628e..3d7cdab725b20ec278635ac41c96c4724b65db54 100644 (file)
@@ -85,6 +85,7 @@ MACHINE_START(MICRO9, "Contec Micro9-High")
        .handle_irq     = vic_handle_irq,
        .timer          = &ep93xx_timer,
        .init_machine   = micro9_init_machine,
+       .init_late      = ep93xx_init_late,
        .restart        = ep93xx_restart,
 MACHINE_END
 #endif
@@ -98,6 +99,7 @@ MACHINE_START(MICRO9M, "Contec Micro9-Mid")
        .handle_irq     = vic_handle_irq,
        .timer          = &ep93xx_timer,
        .init_machine   = micro9_init_machine,
+       .init_late      = ep93xx_init_late,
        .restart        = ep93xx_restart,
 MACHINE_END
 #endif
@@ -111,6 +113,7 @@ MACHINE_START(MICRO9L, "Contec Micro9-Lite")
        .handle_irq     = vic_handle_irq,
        .timer          = &ep93xx_timer,
        .init_machine   = micro9_init_machine,
+       .init_late      = ep93xx_init_late,
        .restart        = ep93xx_restart,
 MACHINE_END
 #endif
@@ -124,6 +127,7 @@ MACHINE_START(MICRO9S, "Contec Micro9-Slim")
        .handle_irq     = vic_handle_irq,
        .timer          = &ep93xx_timer,
        .init_machine   = micro9_init_machine,
+       .init_late      = ep93xx_init_late,
        .restart        = ep93xx_restart,
 MACHINE_END
 #endif
index f40c2987e5451f2f55fa613a603f1d03e1382229..33dc07917417d4b5caf89c600ea9885c05fbb0bf 100644 (file)
@@ -86,5 +86,6 @@ MACHINE_START(SIM_ONE, "Simplemachines Sim.One Board")
        .handle_irq     = vic_handle_irq,
        .timer          = &ep93xx_timer,
        .init_machine   = simone_init_machine,
+       .init_late      = ep93xx_init_late,
        .restart        = ep93xx_restart,
 MACHINE_END
index 0c00852ef160ba73dc19519e9365170c76a4d299..01abd3516a772ef93afe3d68c0556f587b1bb180 100644 (file)
@@ -82,8 +82,6 @@ static int snappercl15_nand_dev_ready(struct mtd_info *mtd)
        return !!(__raw_readw(NAND_CTRL_ADDR(chip)) & SNAPPERCL15_NAND_RDY);
 }
 
-static const char *snappercl15_nand_part_probes[] = {"cmdlinepart", NULL};
-
 static struct mtd_partition snappercl15_nand_parts[] = {
        {
                .name           = "Kernel",
@@ -100,10 +98,8 @@ static struct mtd_partition snappercl15_nand_parts[] = {
 static struct platform_nand_data snappercl15_nand_data = {
        .chip = {
                .nr_chips               = 1,
-               .part_probe_types       = snappercl15_nand_part_probes,
                .partitions             = snappercl15_nand_parts,
                .nr_partitions          = ARRAY_SIZE(snappercl15_nand_parts),
-               .options                = NAND_NO_AUTOINCR,
                .chip_delay             = 25,
        },
        .ctrl = {
@@ -183,5 +179,6 @@ MACHINE_START(SNAPPER_CL15, "Bluewater Systems Snapper CL15")
        .handle_irq     = vic_handle_irq,
        .timer          = &ep93xx_timer,
        .init_machine   = snappercl15_init_machine,
+       .init_late      = ep93xx_init_late,
        .restart        = ep93xx_restart,
 MACHINE_END
index 5ea790942e9476cdf313ee35fc2e1baf1fc83ee6..75cab2d7ec73a6a4a4619d606a2f9c506501a957 100644 (file)
@@ -105,8 +105,6 @@ static int ts72xx_nand_device_ready(struct mtd_info *mtd)
        return !!(__raw_readb(addr) & 0x20);
 }
 
-static const char *ts72xx_nand_part_probes[] = { "cmdlinepart", NULL };
-
 #define TS72XX_BOOTROM_PART_SIZE       (SZ_16K)
 #define TS72XX_REDBOOT_PART_SIZE       (SZ_2M + SZ_1M)
 
@@ -134,7 +132,6 @@ static struct platform_nand_data ts72xx_nand_data = {
                .nr_chips       = 1,
                .chip_offset    = 0,
                .chip_delay     = 15,
-               .part_probe_types = ts72xx_nand_part_probes,
                .partitions     = ts72xx_nand_parts,
                .nr_partitions  = ARRAY_SIZE(ts72xx_nand_parts),
        },
@@ -252,5 +249,6 @@ MACHINE_START(TS72XX, "Technologic Systems TS-72xx SBC")
        .handle_irq     = vic_handle_irq,
        .timer          = &ep93xx_timer,
        .init_machine   = ts72xx_init_machine,
+       .init_late      = ep93xx_init_late,
        .restart        = ep93xx_restart,
 MACHINE_END
index ba156eb225e89c647acf87159262a44939c9d4e9..2905a4929bdc86972e66fa9a1e581681007e7772 100644 (file)
@@ -367,5 +367,6 @@ MACHINE_START(VISION_EP9307, "Vision Engraving Systems EP9307")
        .handle_irq     = vic_handle_irq,
        .timer          = &ep93xx_timer,
        .init_machine   = vision_init_machine,
+       .init_late      = ep93xx_init_late,
        .restart        = ep93xx_restart,
 MACHINE_END
index 15b05b89cc399ddd3bf36658ec3d68f1bc12581b..573be57d3d2805c70f24ab3b4dd29e1b3e6e3843 100644 (file)
@@ -61,6 +61,9 @@ config SOC_EXYNOS5250
        bool "SAMSUNG EXYNOS5250"
        default y
        depends on ARCH_EXYNOS5
+       select SAMSUNG_DMADEV
+       select S5P_PM if PM
+       select S5P_SLEEP if PM
        help
          Enable EXYNOS5250 SoC support
 
@@ -70,7 +73,7 @@ config EXYNOS4_MCT
        help
          Use MCT (Multi Core Timer) as kernel timers
 
-config EXYNOS4_DEV_DMA
+config EXYNOS_DEV_DMA
        bool
        help
          Compile in amba device definitions for DMA controller
@@ -80,15 +83,20 @@ config EXYNOS4_DEV_AHCI
        help
          Compile in platform device definitions for AHCI
 
+config EXYNOS_DEV_DRM
+       bool
+       help
+         Compile in platform device definitions for core DRM device
+
 config EXYNOS4_SETUP_FIMD0
        bool
        help
          Common setup code for FIMD0.
 
-config EXYNOS4_DEV_SYSMMU
+config EXYNOS_DEV_SYSMMU
        bool
        help
-         Common setup code for SYSTEM MMU in EXYNOS4
+         Common setup code for SYSTEM MMU in EXYNOS platforms
 
 config EXYNOS4_DEV_DWMCI
        bool
@@ -161,7 +169,7 @@ config EXYNOS4_SETUP_USB_PHY
        help
          Common setup code for USB PHY controller
 
-config EXYNOS4_SETUP_SPI
+config EXYNOS_SETUP_SPI
        bool
        help
          Common setup code for SPI GPIO configurations.
@@ -201,12 +209,12 @@ config MACH_SMDKV310
        select S3C_DEV_HSMMC3
        select SAMSUNG_DEV_BACKLIGHT
        select EXYNOS_DEV_DRM
+       select EXYNOS_DEV_SYSMMU
        select EXYNOS4_DEV_AHCI
        select SAMSUNG_DEV_KEYPAD
        select EXYNOS4_DEV_DMA
        select SAMSUNG_DEV_PWM
        select EXYNOS4_DEV_USB_OHCI
-       select EXYNOS4_DEV_SYSMMU
        select EXYNOS4_SETUP_FIMD0
        select EXYNOS4_SETUP_I2C1
        select EXYNOS4_SETUP_KEYPAD
@@ -224,8 +232,7 @@ config MACH_ARMLEX4210
        select S3C_DEV_HSMMC2
        select S3C_DEV_HSMMC3
        select EXYNOS4_DEV_AHCI
-       select EXYNOS4_DEV_DMA
-       select EXYNOS4_DEV_SYSMMU
+       select EXYNOS_DEV_DMA
        select EXYNOS4_SETUP_SDHCI
        help
          Machine support for Samsung ARMLEX4210 based on EXYNOS4210
@@ -256,6 +263,7 @@ config MACH_UNIVERSAL_C210
        select S5P_DEV_MFC
        select S5P_DEV_ONENAND
        select S5P_DEV_TV
+       select EXYNOS_DEV_SYSMMU
        select EXYNOS4_DEV_DMA
        select EXYNOS_DEV_DRM
        select EXYNOS4_SETUP_FIMD0
@@ -332,6 +340,7 @@ config MACH_ORIGEN
        select SAMSUNG_DEV_BACKLIGHT
        select SAMSUNG_DEV_PWM
        select EXYNOS_DEV_DRM
+       select EXYNOS_DEV_SYSMMU
        select EXYNOS4_DEV_DMA
        select EXYNOS4_DEV_USB_OHCI
        select EXYNOS4_SETUP_FIMD0
@@ -360,7 +369,8 @@ config MACH_SMDK4212
        select SAMSUNG_DEV_BACKLIGHT
        select SAMSUNG_DEV_KEYPAD
        select SAMSUNG_DEV_PWM
-       select EXYNOS4_DEV_DMA
+       select EXYNOS_DEV_SYSMMU
+       select EXYNOS_DEV_DMA
        select EXYNOS4_SETUP_I2C1
        select EXYNOS4_SETUP_I2C3
        select EXYNOS4_SETUP_I2C7
index 8631840d1b5e85ce8d959430bcdbdeadb339afbb..9b58024f7d43919fcc1c2c07ed6732d39bcd128d 100644 (file)
@@ -22,7 +22,7 @@ obj-$(CONFIG_PM)              += pm.o
 obj-$(CONFIG_PM_GENERIC_DOMAINS) += pm_domains.o
 obj-$(CONFIG_CPU_IDLE)         += cpuidle.o
 
-obj-$(CONFIG_ARCH_EXYNOS4)     += pmu.o
+obj-$(CONFIG_ARCH_EXYNOS     += pmu.o
 
 obj-$(CONFIG_SMP)              += platsmp.o headsmp.o
 
@@ -50,10 +50,11 @@ obj-$(CONFIG_MACH_EXYNOS5_DT)               += mach-exynos5-dt.o
 obj-y                                  += dev-uart.o
 obj-$(CONFIG_ARCH_EXYNOS4)             += dev-audio.o
 obj-$(CONFIG_EXYNOS4_DEV_AHCI)         += dev-ahci.o
-obj-$(CONFIG_EXYNOS4_DEV_SYSMMU)       += dev-sysmmu.o
 obj-$(CONFIG_EXYNOS4_DEV_DWMCI)                += dev-dwmci.o
-obj-$(CONFIG_EXYNOS4_DEV_DMA)          += dma.o
+obj-$(CONFIG_EXYNOS_DEV_DMA)           += dma.o
 obj-$(CONFIG_EXYNOS4_DEV_USB_OHCI)     += dev-ohci.o
+obj-$(CONFIG_EXYNOS_DEV_DRM)           += dev-drm.o
+obj-$(CONFIG_EXYNOS_DEV_SYSMMU)                += dev-sysmmu.o
 
 obj-$(CONFIG_ARCH_EXYNOS)              += setup-i2c0.o
 obj-$(CONFIG_EXYNOS4_SETUP_FIMC)       += setup-fimc.o
@@ -68,4 +69,4 @@ obj-$(CONFIG_EXYNOS4_SETUP_I2C7)      += setup-i2c7.o
 obj-$(CONFIG_EXYNOS4_SETUP_KEYPAD)     += setup-keypad.o
 obj-$(CONFIG_EXYNOS4_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
 obj-$(CONFIG_EXYNOS4_SETUP_USB_PHY)    += setup-usb-phy.o
-obj-$(CONFIG_EXYNOS4_SETUP_SPI)                += setup-spi.o
+obj-$(CONFIG_EXYNOS_SETUP_SPI)         += setup-spi.o
index b9862e22bf10a3ecc4ccca0c6e7ece16f3f3a2bb..31bd181b0514e2221f14fe3c8f778697ab8f32a3 100644 (file)
@@ -1,2 +1,5 @@
    zreladdr-y  += 0x40008000
 params_phys-y  := 0x40000100
+
+dtb-$(CONFIG_MACH_EXYNOS4_DT) += exynos4210-origen.dtb exynos4210-smdkv310.dtb
+dtb-$(CONFIG_MACH_EXYNOS5_DT) += exynos5250-smdk5250.dtb
index 6efd1e5919fdebcd389e61cf48e25b1967a75bb1..bcb7db45314599cd975a6ad904d199c881c4e6cf 100644 (file)
@@ -168,7 +168,7 @@ static int exynos4_clk_ip_tv_ctrl(struct clk *clk, int enable)
        return s5p_gatectrl(EXYNOS4_CLKGATE_IP_TV, clk, enable);
 }
 
-static int exynos4_clk_ip_image_ctrl(struct clk *clk, int enable)
+int exynos4_clk_ip_image_ctrl(struct clk *clk, int enable)
 {
        return s5p_gatectrl(EXYNOS4_CLKGATE_IP_IMAGE, clk, enable);
 }
@@ -198,6 +198,11 @@ static int exynos4_clk_ip_perir_ctrl(struct clk *clk, int enable)
        return s5p_gatectrl(EXYNOS4_CLKGATE_IP_PERIR, clk, enable);
 }
 
+int exynos4_clk_ip_dmc_ctrl(struct clk *clk, int enable)
+{
+       return s5p_gatectrl(EXYNOS4_CLKGATE_IP_DMC, clk, enable);
+}
+
 static int exynos4_clk_hdmiphy_ctrl(struct clk *clk, int enable)
 {
        return s5p_gatectrl(S5P_HDMI_PHY_CONTROL, clk, enable);
@@ -678,61 +683,55 @@ static struct clk exynos4_init_clocks_off[] = {
                .enable         = exynos4_clk_ip_peril_ctrl,
                .ctrlbit        = (1 << 14),
        }, {
-               .name           = "SYSMMU_MDMA",
+               .name           = SYSMMU_CLOCK_NAME,
+               .devname        = SYSMMU_CLOCK_DEVNAME(mfc_l, 0),
+               .enable         = exynos4_clk_ip_mfc_ctrl,
+               .ctrlbit        = (1 << 1),
+       }, {
+               .name           = SYSMMU_CLOCK_NAME,
+               .devname        = SYSMMU_CLOCK_DEVNAME(mfc_r, 1),
+               .enable         = exynos4_clk_ip_mfc_ctrl,
+               .ctrlbit        = (1 << 2),
+       }, {
+               .name           = SYSMMU_CLOCK_NAME,
+               .devname        = SYSMMU_CLOCK_DEVNAME(tv, 2),
+               .enable         = exynos4_clk_ip_tv_ctrl,
+               .ctrlbit        = (1 << 4),
+       }, {
+               .name           = SYSMMU_CLOCK_NAME,
+               .devname        = SYSMMU_CLOCK_DEVNAME(jpeg, 3),
+               .enable         = exynos4_clk_ip_cam_ctrl,
+               .ctrlbit        = (1 << 11),
+       }, {
+               .name           = SYSMMU_CLOCK_NAME,
+               .devname        = SYSMMU_CLOCK_DEVNAME(rot, 4),
                .enable         = exynos4_clk_ip_image_ctrl,
-               .ctrlbit        = (1 << 5),
+               .ctrlbit        = (1 << 4),
        }, {
-               .name           = "SYSMMU_FIMC0",
+               .name           = SYSMMU_CLOCK_NAME,
+               .devname        = SYSMMU_CLOCK_DEVNAME(fimc0, 5),
                .enable         = exynos4_clk_ip_cam_ctrl,
                .ctrlbit        = (1 << 7),
        }, {
-               .name           = "SYSMMU_FIMC1",
+               .name           = SYSMMU_CLOCK_NAME,
+               .devname        = SYSMMU_CLOCK_DEVNAME(fimc1, 6),
                .enable         = exynos4_clk_ip_cam_ctrl,
                .ctrlbit        = (1 << 8),
        }, {
-               .name           = "SYSMMU_FIMC2",
+               .name           = SYSMMU_CLOCK_NAME,
+               .devname        = SYSMMU_CLOCK_DEVNAME(fimc2, 7),
                .enable         = exynos4_clk_ip_cam_ctrl,
                .ctrlbit        = (1 << 9),
        }, {
-               .name           = "SYSMMU_FIMC3",
+               .name           = SYSMMU_CLOCK_NAME,
+               .devname        = SYSMMU_CLOCK_DEVNAME(fimc3, 8),
                .enable         = exynos4_clk_ip_cam_ctrl,
                .ctrlbit        = (1 << 10),
        }, {
-               .name           = "SYSMMU_JPEG",
-               .enable         = exynos4_clk_ip_cam_ctrl,
-               .ctrlbit        = (1 << 11),
-       }, {
-               .name           = "SYSMMU_FIMD0",
+               .name           = SYSMMU_CLOCK_NAME,
+               .devname        = SYSMMU_CLOCK_DEVNAME(fimd0, 10),
                .enable         = exynos4_clk_ip_lcd0_ctrl,
                .ctrlbit        = (1 << 4),
-       }, {
-               .name           = "SYSMMU_FIMD1",
-               .enable         = exynos4_clk_ip_lcd1_ctrl,
-               .ctrlbit        = (1 << 4),
-       }, {
-               .name           = "SYSMMU_PCIe",
-               .enable         = exynos4_clk_ip_fsys_ctrl,
-               .ctrlbit        = (1 << 18),
-       }, {
-               .name           = "SYSMMU_G2D",
-               .enable         = exynos4_clk_ip_image_ctrl,
-               .ctrlbit        = (1 << 3),
-       }, {
-               .name           = "SYSMMU_ROTATOR",
-               .enable         = exynos4_clk_ip_image_ctrl,
-               .ctrlbit        = (1 << 4),
-       }, {
-               .name           = "SYSMMU_TV",
-               .enable         = exynos4_clk_ip_tv_ctrl,
-               .ctrlbit        = (1 << 4),
-       }, {
-               .name           = "SYSMMU_MFC_L",
-               .enable         = exynos4_clk_ip_mfc_ctrl,
-               .ctrlbit        = (1 << 1),
-       }, {
-               .name           = "SYSMMU_MFC_R",
-               .enable         = exynos4_clk_ip_mfc_ctrl,
-               .ctrlbit        = (1 << 2),
        }
 };
 
index cb71c29c14d1adf7db49beeb1efe2b3a9b903ae5..28a1197011823b132c10542db7795d3b3eddb588 100644 (file)
@@ -26,5 +26,7 @@ extern struct clk *exynos4_clkset_group_list[];
 extern int exynos4_clksrc_mask_fsys_ctrl(struct clk *clk, int enable);
 extern int exynos4_clk_ip_fsys_ctrl(struct clk *clk, int enable);
 extern int exynos4_clk_ip_lcd1_ctrl(struct clk *clk, int enable);
+extern int exynos4_clk_ip_image_ctrl(struct clk *clk, int enable);
+extern int exynos4_clk_ip_dmc_ctrl(struct clk *clk, int enable);
 
 #endif /* __ASM_ARCH_CLOCK_H */
index 3b131e4b6ef57ec6e21428a640f4ad092f27e2fb..b8689ff60baf5cb721a5d1d5caef45393fa60743 100644 (file)
@@ -26,6 +26,7 @@
 #include <mach/hardware.h>
 #include <mach/map.h>
 #include <mach/regs-clock.h>
+#include <mach/sysmmu.h>
 
 #include "common.h"
 #include "clock-exynos4.h"
@@ -94,6 +95,16 @@ static struct clk init_clocks_off[] = {
                .devname        = "exynos4-fb.1",
                .enable         = exynos4_clk_ip_lcd1_ctrl,
                .ctrlbit        = (1 << 0),
+       }, {
+               .name           = SYSMMU_CLOCK_NAME,
+               .devname        = SYSMMU_CLOCK_DEVNAME(2d, 14),
+               .enable         = exynos4_clk_ip_image_ctrl,
+               .ctrlbit        = (1 << 3),
+       }, {
+               .name           = SYSMMU_CLOCK_NAME,
+               .devname        = SYSMMU_CLOCK_DEVNAME(fimd1, 11),
+               .enable         = exynos4_clk_ip_lcd1_ctrl,
+               .ctrlbit        = (1 << 4),
        },
 };
 
index 3ecc01e06f7497c1408f795023bdf78dba8de7bb..da397d21bbcf438cb81f73fe28adbfc7d365668f 100644 (file)
@@ -26,6 +26,7 @@
 #include <mach/hardware.h>
 #include <mach/map.h>
 #include <mach/regs-clock.h>
+#include <mach/sysmmu.h>
 
 #include "common.h"
 #include "clock-exynos4.h"
@@ -39,6 +40,16 @@ static struct sleep_save exynos4212_clock_save[] = {
 };
 #endif
 
+static int exynos4212_clk_ip_isp0_ctrl(struct clk *clk, int enable)
+{
+       return s5p_gatectrl(EXYNOS4_CLKGATE_IP_ISP0, clk, enable);
+}
+
+static int exynos4212_clk_ip_isp1_ctrl(struct clk *clk, int enable)
+{
+       return s5p_gatectrl(EXYNOS4_CLKGATE_IP_ISP1, clk, enable);
+}
+
 static struct clk *clk_src_mpll_user_list[] = {
        [0] = &clk_fin_mpll,
        [1] = &exynos4_clk_mout_mpll.clk,
@@ -66,7 +77,32 @@ static struct clksrc_clk clksrcs[] = {
 };
 
 static struct clk init_clocks_off[] = {
-       /* nothing here yet */
+       {
+               .name           = SYSMMU_CLOCK_NAME,
+               .devname        = SYSMMU_CLOCK_DEVNAME(2d, 14),
+               .enable         = exynos4_clk_ip_dmc_ctrl,
+               .ctrlbit        = (1 << 24),
+       }, {
+               .name           = SYSMMU_CLOCK_NAME,
+               .devname        = SYSMMU_CLOCK_DEVNAME(isp, 9),
+               .enable         = exynos4212_clk_ip_isp0_ctrl,
+               .ctrlbit        = (7 << 8),
+       }, {
+               .name           = SYSMMU_CLOCK_NAME2,
+               .devname        = SYSMMU_CLOCK_DEVNAME(isp, 9),
+               .enable         = exynos4212_clk_ip_isp1_ctrl,
+               .ctrlbit        = (1 << 4),
+       }, {
+               .name           = "flite",
+               .devname        = "exynos-fimc-lite.0",
+               .enable         = exynos4212_clk_ip_isp0_ctrl,
+               .ctrlbit        = (1 << 4),
+       }, {
+               .name           = "flite",
+               .devname        = "exynos-fimc-lite.1",
+               .enable         = exynos4212_clk_ip_isp0_ctrl,
+               .ctrlbit        = (1 << 3),
+       }
 };
 
 #ifdef CONFIG_PM_SLEEP
index 7ac6ff4c46bd382839234a3d8a19ffb2042e311b..fefa336be2b4baf8d841a809766be3bdf9338f83 100644 (file)
 
 #ifdef CONFIG_PM_SLEEP
 static struct sleep_save exynos5_clock_save[] = {
-       /* will be implemented */
+       SAVE_ITEM(EXYNOS5_CLKSRC_MASK_TOP),
+       SAVE_ITEM(EXYNOS5_CLKSRC_MASK_GSCL),
+       SAVE_ITEM(EXYNOS5_CLKSRC_MASK_DISP1_0),
+       SAVE_ITEM(EXYNOS5_CLKSRC_MASK_FSYS),
+       SAVE_ITEM(EXYNOS5_CLKSRC_MASK_MAUDIO),
+       SAVE_ITEM(EXYNOS5_CLKSRC_MASK_PERIC0),
+       SAVE_ITEM(EXYNOS5_CLKSRC_MASK_PERIC1),
+       SAVE_ITEM(EXYNOS5_CLKGATE_IP_GSCL),
+       SAVE_ITEM(EXYNOS5_CLKGATE_IP_DISP1),
+       SAVE_ITEM(EXYNOS5_CLKGATE_IP_MFC),
+       SAVE_ITEM(EXYNOS5_CLKGATE_IP_G3D),
+       SAVE_ITEM(EXYNOS5_CLKGATE_IP_GEN),
+       SAVE_ITEM(EXYNOS5_CLKGATE_IP_FSYS),
+       SAVE_ITEM(EXYNOS5_CLKGATE_IP_PERIC),
+       SAVE_ITEM(EXYNOS5_CLKGATE_IP_PERIS),
+       SAVE_ITEM(EXYNOS5_CLKGATE_BLOCK),
+       SAVE_ITEM(EXYNOS5_CLKDIV_TOP0),
+       SAVE_ITEM(EXYNOS5_CLKDIV_TOP1),
+       SAVE_ITEM(EXYNOS5_CLKDIV_GSCL),
+       SAVE_ITEM(EXYNOS5_CLKDIV_DISP1_0),
+       SAVE_ITEM(EXYNOS5_CLKDIV_GEN),
+       SAVE_ITEM(EXYNOS5_CLKDIV_MAUDIO),
+       SAVE_ITEM(EXYNOS5_CLKDIV_FSYS0),
+       SAVE_ITEM(EXYNOS5_CLKDIV_FSYS1),
+       SAVE_ITEM(EXYNOS5_CLKDIV_FSYS2),
+       SAVE_ITEM(EXYNOS5_CLKDIV_FSYS3),
+       SAVE_ITEM(EXYNOS5_CLKDIV_PERIC0),
+       SAVE_ITEM(EXYNOS5_CLKDIV_PERIC1),
+       SAVE_ITEM(EXYNOS5_CLKDIV_PERIC2),
+       SAVE_ITEM(EXYNOS5_CLKDIV_PERIC3),
+       SAVE_ITEM(EXYNOS5_CLKDIV_PERIC4),
+       SAVE_ITEM(EXYNOS5_CLKDIV_PERIC5),
+       SAVE_ITEM(EXYNOS5_SCLK_DIV_ISP),
+       SAVE_ITEM(EXYNOS5_CLKSRC_TOP0),
+       SAVE_ITEM(EXYNOS5_CLKSRC_TOP1),
+       SAVE_ITEM(EXYNOS5_CLKSRC_TOP2),
+       SAVE_ITEM(EXYNOS5_CLKSRC_TOP3),
+       SAVE_ITEM(EXYNOS5_CLKSRC_GSCL),
+       SAVE_ITEM(EXYNOS5_CLKSRC_DISP1_0),
+       SAVE_ITEM(EXYNOS5_CLKSRC_MAUDIO),
+       SAVE_ITEM(EXYNOS5_CLKSRC_FSYS),
+       SAVE_ITEM(EXYNOS5_CLKSRC_PERIC0),
+       SAVE_ITEM(EXYNOS5_CLKSRC_PERIC1),
+       SAVE_ITEM(EXYNOS5_SCLK_SRC_ISP),
+       SAVE_ITEM(EXYNOS5_EPLL_CON0),
+       SAVE_ITEM(EXYNOS5_EPLL_CON1),
+       SAVE_ITEM(EXYNOS5_EPLL_CON2),
+       SAVE_ITEM(EXYNOS5_VPLL_CON0),
+       SAVE_ITEM(EXYNOS5_VPLL_CON1),
+       SAVE_ITEM(EXYNOS5_VPLL_CON2),
 };
 #endif
 
@@ -82,6 +131,11 @@ static int exynos5_clksrc_mask_peric0_ctrl(struct clk *clk, int enable)
        return s5p_gatectrl(EXYNOS5_CLKSRC_MASK_PERIC0, clk, enable);
 }
 
+static int exynos5_clk_ip_acp_ctrl(struct clk *clk, int enable)
+{
+       return s5p_gatectrl(EXYNOS5_CLKGATE_IP_ACP, clk, enable);
+}
+
 static int exynos5_clk_ip_core_ctrl(struct clk *clk, int enable)
 {
        return s5p_gatectrl(EXYNOS5_CLKGATE_IP_CORE, clk, enable);
@@ -127,6 +181,21 @@ static int exynos5_clk_ip_peris_ctrl(struct clk *clk, int enable)
        return s5p_gatectrl(EXYNOS5_CLKGATE_IP_PERIS, clk, enable);
 }
 
+static int exynos5_clk_ip_gscl_ctrl(struct clk *clk, int enable)
+{
+       return s5p_gatectrl(EXYNOS5_CLKGATE_IP_GSCL, clk, enable);
+}
+
+static int exynos5_clk_ip_isp0_ctrl(struct clk *clk, int enable)
+{
+       return s5p_gatectrl(EXYNOS5_CLKGATE_IP_ISP0, clk, enable);
+}
+
+static int exynos5_clk_ip_isp1_ctrl(struct clk *clk, int enable)
+{
+       return s5p_gatectrl(EXYNOS5_CLKGATE_IP_ISP1, clk, enable);
+}
+
 /* Core list of CMU_CPU side */
 
 static struct clksrc_clk exynos5_clk_mout_apll = {
@@ -145,11 +214,29 @@ static struct clksrc_clk exynos5_clk_sclk_apll = {
        .reg_div = { .reg = EXYNOS5_CLKDIV_CPU0, .shift = 24, .size = 3 },
 };
 
+static struct clksrc_clk exynos5_clk_mout_bpll_fout = {
+       .clk    = {
+               .name           = "mout_bpll_fout",
+       },
+       .sources = &clk_src_bpll_fout,
+       .reg_src = { .reg = EXYNOS5_PLL_DIV2_SEL, .shift = 0, .size = 1 },
+};
+
+static struct clk *exynos5_clk_src_bpll_list[] = {
+       [0] = &clk_fin_bpll,
+       [1] = &exynos5_clk_mout_bpll_fout.clk,
+};
+
+static struct clksrc_sources exynos5_clk_src_bpll = {
+       .sources        = exynos5_clk_src_bpll_list,
+       .nr_sources     = ARRAY_SIZE(exynos5_clk_src_bpll_list),
+};
+
 static struct clksrc_clk exynos5_clk_mout_bpll = {
        .clk    = {
                .name           = "mout_bpll",
        },
-       .sources = &clk_src_bpll,
+       .sources = &exynos5_clk_src_bpll,
        .reg_src = { .reg = EXYNOS5_CLKSRC_CDREX, .shift = 0, .size = 1 },
 };
 
@@ -187,11 +274,29 @@ static struct clksrc_clk exynos5_clk_mout_epll = {
        .reg_src = { .reg = EXYNOS5_CLKSRC_TOP2, .shift = 12, .size = 1 },
 };
 
+static struct clksrc_clk exynos5_clk_mout_mpll_fout = {
+       .clk    = {
+               .name           = "mout_mpll_fout",
+       },
+       .sources = &clk_src_mpll_fout,
+       .reg_src = { .reg = EXYNOS5_PLL_DIV2_SEL, .shift = 4, .size = 1 },
+};
+
+static struct clk *exynos5_clk_src_mpll_list[] = {
+       [0] = &clk_fin_mpll,
+       [1] = &exynos5_clk_mout_mpll_fout.clk,
+};
+
+static struct clksrc_sources exynos5_clk_src_mpll = {
+       .sources        = exynos5_clk_src_mpll_list,
+       .nr_sources     = ARRAY_SIZE(exynos5_clk_src_mpll_list),
+};
+
 struct clksrc_clk exynos5_clk_mout_mpll = {
        .clk = {
                .name           = "mout_mpll",
        },
-       .sources = &clk_src_mpll,
+       .sources = &exynos5_clk_src_mpll,
        .reg_src = { .reg = EXYNOS5_CLKSRC_CORE1, .shift = 8, .size = 1 },
 };
 
@@ -453,6 +558,11 @@ static struct clk exynos5_init_clocks_off[] = {
                .parent         = &exynos5_clk_aclk_66.clk,
                .enable         = exynos5_clk_ip_peris_ctrl,
                .ctrlbit        = (1 << 20),
+       }, {
+               .name           = "watchdog",
+               .parent         = &exynos5_clk_aclk_66.clk,
+               .enable         = exynos5_clk_ip_peris_ctrl,
+               .ctrlbit        = (1 << 19),
        }, {
                .name           = "hsmmc",
                .devname        = "exynos4-sdhci.0",
@@ -630,6 +740,76 @@ static struct clk exynos5_init_clocks_off[] = {
                .parent         = &exynos5_clk_aclk_66.clk,
                .enable         = exynos5_clk_ip_peric_ctrl,
                .ctrlbit        = (1 << 14),
+       }, {
+               .name           = SYSMMU_CLOCK_NAME,
+               .devname        = SYSMMU_CLOCK_DEVNAME(mfc_l, 0),
+               .enable         = &exynos5_clk_ip_mfc_ctrl,
+               .ctrlbit        = (1 << 1),
+       }, {
+               .name           = SYSMMU_CLOCK_NAME,
+               .devname        = SYSMMU_CLOCK_DEVNAME(mfc_r, 1),
+               .enable         = &exynos5_clk_ip_mfc_ctrl,
+               .ctrlbit        = (1 << 2),
+       }, {
+               .name           = SYSMMU_CLOCK_NAME,
+               .devname        = SYSMMU_CLOCK_DEVNAME(tv, 2),
+               .enable         = &exynos5_clk_ip_disp1_ctrl,
+               .ctrlbit        = (1 << 9)
+       }, {
+               .name           = SYSMMU_CLOCK_NAME,
+               .devname        = SYSMMU_CLOCK_DEVNAME(jpeg, 3),
+               .enable         = &exynos5_clk_ip_gen_ctrl,
+               .ctrlbit        = (1 << 7),
+       }, {
+               .name           = SYSMMU_CLOCK_NAME,
+               .devname        = SYSMMU_CLOCK_DEVNAME(rot, 4),
+               .enable         = &exynos5_clk_ip_gen_ctrl,
+               .ctrlbit        = (1 << 6)
+       }, {
+               .name           = SYSMMU_CLOCK_NAME,
+               .devname        = SYSMMU_CLOCK_DEVNAME(gsc0, 5),
+               .enable         = &exynos5_clk_ip_gscl_ctrl,
+               .ctrlbit        = (1 << 7),
+       }, {
+               .name           = SYSMMU_CLOCK_NAME,
+               .devname        = SYSMMU_CLOCK_DEVNAME(gsc1, 6),
+               .enable         = &exynos5_clk_ip_gscl_ctrl,
+               .ctrlbit        = (1 << 8),
+       }, {
+               .name           = SYSMMU_CLOCK_NAME,
+               .devname        = SYSMMU_CLOCK_DEVNAME(gsc2, 7),
+               .enable         = &exynos5_clk_ip_gscl_ctrl,
+               .ctrlbit        = (1 << 9),
+       }, {
+               .name           = SYSMMU_CLOCK_NAME,
+               .devname        = SYSMMU_CLOCK_DEVNAME(gsc3, 8),
+               .enable         = &exynos5_clk_ip_gscl_ctrl,
+               .ctrlbit        = (1 << 10),
+       }, {
+               .name           = SYSMMU_CLOCK_NAME,
+               .devname        = SYSMMU_CLOCK_DEVNAME(isp, 9),
+               .enable         = &exynos5_clk_ip_isp0_ctrl,
+               .ctrlbit        = (0x3F << 8),
+       }, {
+               .name           = SYSMMU_CLOCK_NAME2,
+               .devname        = SYSMMU_CLOCK_DEVNAME(isp, 9),
+               .enable         = &exynos5_clk_ip_isp1_ctrl,
+               .ctrlbit        = (0xF << 4),
+       }, {
+               .name           = SYSMMU_CLOCK_NAME,
+               .devname        = SYSMMU_CLOCK_DEVNAME(camif0, 12),
+               .enable         = &exynos5_clk_ip_gscl_ctrl,
+               .ctrlbit        = (1 << 11),
+       }, {
+               .name           = SYSMMU_CLOCK_NAME,
+               .devname        = SYSMMU_CLOCK_DEVNAME(camif1, 13),
+               .enable         = &exynos5_clk_ip_gscl_ctrl,
+               .ctrlbit        = (1 << 12),
+       }, {
+               .name           = SYSMMU_CLOCK_NAME,
+               .devname        = SYSMMU_CLOCK_DEVNAME(2d, 14),
+               .enable         = &exynos5_clk_ip_acp_ctrl,
+               .ctrlbit        = (1 << 7)
        }
 };
 
@@ -941,10 +1121,12 @@ static struct clksrc_clk *exynos5_sysclks[] = {
        &exynos5_clk_mout_apll,
        &exynos5_clk_sclk_apll,
        &exynos5_clk_mout_bpll,
+       &exynos5_clk_mout_bpll_fout,
        &exynos5_clk_mout_bpll_user,
        &exynos5_clk_mout_cpll,
        &exynos5_clk_mout_epll,
        &exynos5_clk_mout_mpll,
+       &exynos5_clk_mout_mpll_fout,
        &exynos5_clk_mout_mpll_user,
        &exynos5_clk_vpllsrc,
        &exynos5_clk_sclk_vpll,
@@ -1008,7 +1190,9 @@ static struct clk *exynos5_clks[] __initdata = {
        &exynos5_clk_sclk_hdmi27m,
        &exynos5_clk_sclk_hdmiphy,
        &clk_fout_bpll,
+       &clk_fout_bpll_div2,
        &clk_fout_cpll,
+       &clk_fout_mpll_div2,
        &exynos5_clk_armclk,
 };
 
@@ -1173,8 +1357,10 @@ void __init_or_cpufreq exynos5_setup_clocks(void)
 
        clk_fout_apll.ops = &exynos5_fout_apll_ops;
        clk_fout_bpll.rate = bpll;
+       clk_fout_bpll_div2.rate = bpll >> 1;
        clk_fout_cpll.rate = cpll;
        clk_fout_mpll.rate = mpll;
+       clk_fout_mpll_div2.rate = mpll >> 1;
        clk_fout_epll.rate = epll;
        clk_fout_vpll.rate = vpll;
 
index 5ccd6e80a607fec8750409d6d5731dfa92f2a73e..742edd3bbec34b14f9c6e1817029b9c81f602c03 100644 (file)
@@ -19,6 +19,9 @@
 #include <linux/serial_core.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
+#include <linux/export.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
 
 #include <asm/proc-fns.h>
 #include <asm/exception.h>
@@ -265,12 +268,12 @@ static struct map_desc exynos5_iodesc[] __initdata = {
        }, {
                .virtual        = (unsigned long)S5P_VA_GIC_CPU,
                .pfn            = __phys_to_pfn(EXYNOS5_PA_GIC_CPU),
-               .length         = SZ_64K,
+               .length         = SZ_8K,
                .type           = MT_DEVICE,
        }, {
                .virtual        = (unsigned long)S5P_VA_GIC_DIST,
                .pfn            = __phys_to_pfn(EXYNOS5_PA_GIC_DIST),
-               .length         = SZ_64K,
+               .length         = SZ_4K,
                .type           = MT_DEVICE,
        },
 };
@@ -285,6 +288,11 @@ void exynos5_restart(char mode, const char *cmd)
        __raw_writel(0x1, EXYNOS_SWRESET);
 }
 
+void __init exynos_init_late(void)
+{
+       exynos_pm_late_initcall();
+}
+
 /*
  * exynos_map_io
  *
@@ -399,6 +407,7 @@ struct combiner_chip_data {
        void __iomem *base;
 };
 
+static struct irq_domain *combiner_irq_domain;
 static struct combiner_chip_data combiner_data[MAX_COMBINER_NR];
 
 static inline void __iomem *combiner_base(struct irq_data *data)
@@ -411,14 +420,14 @@ static inline void __iomem *combiner_base(struct irq_data *data)
 
 static void combiner_mask_irq(struct irq_data *data)
 {
-       u32 mask = 1 << (data->irq % 32);
+       u32 mask = 1 << (data->hwirq % 32);
 
        __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
 }
 
 static void combiner_unmask_irq(struct irq_data *data)
 {
-       u32 mask = 1 << (data->irq % 32);
+       u32 mask = 1 << (data->hwirq % 32);
 
        __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
 }
@@ -474,49 +483,131 @@ static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int i
        irq_set_chained_handler(irq, combiner_handle_cascade_irq);
 }
 
-static void __init combiner_init(unsigned int combiner_nr, void __iomem *base,
-                         unsigned int irq_start)
+static void __init combiner_init_one(unsigned int combiner_nr,
+                                    void __iomem *base)
 {
-       unsigned int i;
-       unsigned int max_nr;
-
-       if (soc_is_exynos5250())
-               max_nr = EXYNOS5_MAX_COMBINER_NR;
-       else
-               max_nr = EXYNOS4_MAX_COMBINER_NR;
-
-       if (combiner_nr >= max_nr)
-               BUG();
-
        combiner_data[combiner_nr].base = base;
-       combiner_data[combiner_nr].irq_offset = irq_start;
+       combiner_data[combiner_nr].irq_offset = irq_find_mapping(
+               combiner_irq_domain, combiner_nr * MAX_IRQ_IN_COMBINER);
        combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
 
        /* Disable all interrupts */
-
        __raw_writel(combiner_data[combiner_nr].irq_mask,
                     base + COMBINER_ENABLE_CLEAR);
+}
+
+#ifdef CONFIG_OF
+static int combiner_irq_domain_xlate(struct irq_domain *d,
+                                    struct device_node *controller,
+                                    const u32 *intspec, unsigned int intsize,
+                                    unsigned long *out_hwirq,
+                                    unsigned int *out_type)
+{
+       if (d->of_node != controller)
+               return -EINVAL;
+
+       if (intsize < 2)
+               return -EINVAL;
+
+       *out_hwirq = intspec[0] * MAX_IRQ_IN_COMBINER + intspec[1];
+       *out_type = 0;
+
+       return 0;
+}
+#else
+static int combiner_irq_domain_xlate(struct irq_domain *d,
+                                    struct device_node *controller,
+                                    const u32 *intspec, unsigned int intsize,
+                                    unsigned long *out_hwirq,
+                                    unsigned int *out_type)
+{
+       return -EINVAL;
+}
+#endif
+
+static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
+                                  irq_hw_number_t hw)
+{
+       irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
+       irq_set_chip_data(irq, &combiner_data[hw >> 3]);
+       set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+
+       return 0;
+}
+
+static struct irq_domain_ops combiner_irq_domain_ops = {
+       .xlate  = combiner_irq_domain_xlate,
+       .map    = combiner_irq_domain_map,
+};
+
+void __init combiner_init(void __iomem *combiner_base, struct device_node *np)
+{
+       int i, irq, irq_base;
+       unsigned int max_nr, nr_irq;
+
+       if (np) {
+               if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
+                       pr_warning("%s: number of combiners not specified, "
+                               "setting default as %d.\n",
+                               __func__, EXYNOS4_MAX_COMBINER_NR);
+                       max_nr = EXYNOS4_MAX_COMBINER_NR;
+               }
+       } else {
+               max_nr = soc_is_exynos5250() ? EXYNOS5_MAX_COMBINER_NR :
+                                               EXYNOS4_MAX_COMBINER_NR;
+       }
+       nr_irq = max_nr * MAX_IRQ_IN_COMBINER;
 
-       /* Setup the Linux IRQ subsystem */
+       irq_base = irq_alloc_descs(COMBINER_IRQ(0, 0), 1, nr_irq, 0);
+       if (IS_ERR_VALUE(irq_base)) {
+               irq_base = COMBINER_IRQ(0, 0);
+               pr_warning("%s: irq desc alloc failed. Continuing with %d as linux irq base\n", __func__, irq_base);
+       }
 
-       for (i = irq_start; i < combiner_data[combiner_nr].irq_offset
-                               + MAX_IRQ_IN_COMBINER; i++) {
-               irq_set_chip_and_handler(i, &combiner_chip, handle_level_irq);
-               irq_set_chip_data(i, &combiner_data[combiner_nr]);
-               set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
+       combiner_irq_domain = irq_domain_add_legacy(np, nr_irq, irq_base, 0,
+                               &combiner_irq_domain_ops, &combiner_data);
+       if (WARN_ON(!combiner_irq_domain)) {
+               pr_warning("%s: irq domain init failed\n", __func__);
+               return;
+       }
+
+       for (i = 0; i < max_nr; i++) {
+               combiner_init_one(i, combiner_base + (i >> 2) * 0x10);
+               irq = IRQ_SPI(i);
+#ifdef CONFIG_OF
+               if (np)
+                       irq = irq_of_parse_and_map(np, i);
+#endif
+               combiner_cascade_irq(i, irq);
        }
 }
 
 #ifdef CONFIG_OF
+int __init combiner_of_init(struct device_node *np, struct device_node *parent)
+{
+       void __iomem *combiner_base;
+
+       combiner_base = of_iomap(np, 0);
+       if (!combiner_base) {
+               pr_err("%s: failed to map combiner registers\n", __func__);
+               return -ENXIO;
+       }
+
+       combiner_init(combiner_base, np);
+
+       return 0;
+}
+
 static const struct of_device_id exynos4_dt_irq_match[] = {
        { .compatible = "arm,cortex-a9-gic", .data = gic_of_init, },
+       { .compatible = "samsung,exynos4210-combiner",
+                       .data = combiner_of_init, },
        {},
 };
 #endif
 
 void __init exynos4_init_irq(void)
 {
-       int irq;
        unsigned int gic_bank_offset;
 
        gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000;
@@ -528,12 +619,8 @@ void __init exynos4_init_irq(void)
                of_irq_init(exynos4_dt_irq_match);
 #endif
 
-       for (irq = 0; irq < EXYNOS4_MAX_COMBINER_NR; irq++) {
-
-               combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq),
-                               COMBINER_IRQ(irq, 0));
-               combiner_cascade_irq(irq, IRQ_SPI(irq));
-       }
+       if (!of_have_populated_dt())
+               combiner_init(S5P_VA_COMBINER_BASE, NULL);
 
        /*
         * The parameters of s5p_init_irq() are for VIC init.
@@ -545,18 +632,9 @@ void __init exynos4_init_irq(void)
 
 void __init exynos5_init_irq(void)
 {
-       int irq;
-
 #ifdef CONFIG_OF
        of_irq_init(exynos4_dt_irq_match);
 #endif
-
-       for (irq = 0; irq < EXYNOS5_MAX_COMBINER_NR; irq++) {
-               combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq),
-                               COMBINER_IRQ(irq, 0));
-               combiner_cascade_irq(irq, IRQ_SPI(irq));
-       }
-
        /*
         * The parameters of s5p_init_irq() are for VIC init.
         * Theses parameters should be NULL and 0 because EXYNOS4
@@ -565,30 +643,18 @@ void __init exynos5_init_irq(void)
        s5p_init_irq(NULL, 0);
 }
 
-struct bus_type exynos4_subsys = {
-       .name           = "exynos4-core",
-       .dev_name       = "exynos4-core",
-};
-
-struct bus_type exynos5_subsys = {
-       .name           = "exynos5-core",
-       .dev_name       = "exynos5-core",
+struct bus_type exynos_subsys = {
+       .name           = "exynos-core",
+       .dev_name       = "exynos-core",
 };
 
 static struct device exynos4_dev = {
-       .bus    = &exynos4_subsys,
-};
-
-static struct device exynos5_dev = {
-       .bus    = &exynos5_subsys,
+       .bus    = &exynos_subsys,
 };
 
 static int __init exynos_core_init(void)
 {
-       if (soc_is_exynos5250())
-               return subsys_system_register(&exynos5_subsys, NULL);
-       else
-               return subsys_system_register(&exynos4_subsys, NULL);
+       return subsys_system_register(&exynos_subsys, NULL);
 }
 core_initcall(exynos_core_init);
 
@@ -675,10 +741,7 @@ static int __init exynos_init(void)
 {
        printk(KERN_INFO "EXYNOS: Initializing architecture\n");
 
-       if (soc_is_exynos5250())
-               return device_register(&exynos5_dev);
-       else
-               return device_register(&exynos4_dev);
+       return device_register(&exynos4_dev);
 }
 
 /* uart registration process */
index 677b5467df186298ecd2a8f0046e4e6f46c9fdc5..aed2eeb065179f46f5d9b374974020b96e0a05a2 100644 (file)
@@ -19,6 +19,13 @@ void exynos4_init_irq(void);
 void exynos5_init_irq(void);
 void exynos4_restart(char mode, const char *cmd);
 void exynos5_restart(char mode, const char *cmd);
+void exynos_init_late(void);
+
+#ifdef CONFIG_PM_GENERIC_DOMAINS
+int exynos_pm_late_initcall(void);
+#else
+static int exynos_pm_late_initcall(void) { return 0; }
+#endif
 
 #ifdef CONFIG_ARCH_EXYNOS4
 void exynos4_register_clocks(void);
index 26dac2893b8e4f2a97bdaa0be50506b893083a50..cff0595d0d352c0d69cb9023b9b1cbf555fd16c4 100644 (file)
@@ -100,7 +100,7 @@ static int exynos4_enter_core0_aftr(struct cpuidle_device *dev,
        exynos4_set_wakeupmask();
 
        /* Set value of power down register for aftr mode */
-       exynos4_sys_powerdown_conf(SYS_AFTR);
+       exynos_sys_powerdown_conf(SYS_AFTR);
 
        __raw_writel(virt_to_phys(s3c_cpu_resume), REG_DIRECTGO_ADDR);
        __raw_writel(S5P_CHECK_AFTR, REG_DIRECTGO_FLAG);
diff --git a/arch/arm/mach-exynos/dev-drm.c b/arch/arm/mach-exynos/dev-drm.c
new file mode 100644 (file)
index 0000000..17c9c6e
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * linux/arch/arm/mach-exynos/dev-drm.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * EXYNOS - core DRM device
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+
+#include <plat/devs.h>
+
+static u64 exynos_drm_dma_mask = DMA_BIT_MASK(32);
+
+struct platform_device exynos_device_drm = {
+       .name   = "exynos-drm",
+       .dev    = {
+               .dma_mask               = &exynos_drm_dma_mask,
+               .coherent_dma_mask      = DMA_BIT_MASK(32),
+       }
+};
index 781563fcb156d7dd90926fe885239fb3a8c1d82b..c5b1ea301df044567ad36469120854636786a620 100644 (file)
@@ -1,9 +1,9 @@
-/* linux/arch/arm/mach-exynos4/dev-sysmmu.c
+/* linux/arch/arm/mach-exynos/dev-sysmmu.c
  *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2010-2012 Samsung Electronics Co., Ltd.
  *             http://www.samsung.com
  *
- * EXYNOS4 - System MMU support
+ * EXYNOS - System MMU support
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
 
 #include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
-#include <linux/export.h>
+
+#include <plat/cpu.h>
 
 #include <mach/map.h>
 #include <mach/irqs.h>
 #include <mach/sysmmu.h>
-#include <plat/s5p-clock.h>
-
-/* These names must be equal to the clock names in mach-exynos4/clock.c */
-const char *sysmmu_ips_name[EXYNOS4_SYSMMU_TOTAL_IPNUM] = {
-       "SYSMMU_MDMA"   ,
-       "SYSMMU_SSS"    ,
-       "SYSMMU_FIMC0"  ,
-       "SYSMMU_FIMC1"  ,
-       "SYSMMU_FIMC2"  ,
-       "SYSMMU_FIMC3"  ,
-       "SYSMMU_JPEG"   ,
-       "SYSMMU_FIMD0"  ,
-       "SYSMMU_FIMD1"  ,
-       "SYSMMU_PCIe"   ,
-       "SYSMMU_G2D"    ,
-       "SYSMMU_ROTATOR",
-       "SYSMMU_MDMA2"  ,
-       "SYSMMU_TV"     ,
-       "SYSMMU_MFC_L"  ,
-       "SYSMMU_MFC_R"  ,
-};
 
-static struct resource exynos4_sysmmu_resource[] = {
-       [0] = {
-               .start  = EXYNOS4_PA_SYSMMU_MDMA,
-               .end    = EXYNOS4_PA_SYSMMU_MDMA + SZ_64K - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start  = IRQ_SYSMMU_MDMA0_0,
-               .end    = IRQ_SYSMMU_MDMA0_0,
-               .flags  = IORESOURCE_IRQ,
-       },
-       [2] = {
-               .start  = EXYNOS4_PA_SYSMMU_SSS,
-               .end    = EXYNOS4_PA_SYSMMU_SSS + SZ_64K - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       [3] = {
-               .start  = IRQ_SYSMMU_SSS_0,
-               .end    = IRQ_SYSMMU_SSS_0,
-               .flags  = IORESOURCE_IRQ,
-       },
-       [4] = {
-               .start  = EXYNOS4_PA_SYSMMU_FIMC0,
-               .end    = EXYNOS4_PA_SYSMMU_FIMC0 + SZ_64K - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       [5] = {
-               .start  = IRQ_SYSMMU_FIMC0_0,
-               .end    = IRQ_SYSMMU_FIMC0_0,
-               .flags  = IORESOURCE_IRQ,
-       },
-       [6] = {
-               .start  = EXYNOS4_PA_SYSMMU_FIMC1,
-               .end    = EXYNOS4_PA_SYSMMU_FIMC1 + SZ_64K - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       [7] = {
-               .start  = IRQ_SYSMMU_FIMC1_0,
-               .end    = IRQ_SYSMMU_FIMC1_0,
-               .flags  = IORESOURCE_IRQ,
-       },
-       [8] = {
-               .start  = EXYNOS4_PA_SYSMMU_FIMC2,
-               .end    = EXYNOS4_PA_SYSMMU_FIMC2 + SZ_64K - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       [9] = {
-               .start  = IRQ_SYSMMU_FIMC2_0,
-               .end    = IRQ_SYSMMU_FIMC2_0,
-               .flags  = IORESOURCE_IRQ,
-       },
-       [10] = {
-               .start  = EXYNOS4_PA_SYSMMU_FIMC3,
-               .end    = EXYNOS4_PA_SYSMMU_FIMC3 + SZ_64K - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       [11] = {
-               .start  = IRQ_SYSMMU_FIMC3_0,
-               .end    = IRQ_SYSMMU_FIMC3_0,
-               .flags  = IORESOURCE_IRQ,
-       },
-       [12] = {
-               .start  = EXYNOS4_PA_SYSMMU_JPEG,
-               .end    = EXYNOS4_PA_SYSMMU_JPEG + SZ_64K - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       [13] = {
-               .start  = IRQ_SYSMMU_JPEG_0,
-               .end    = IRQ_SYSMMU_JPEG_0,
-               .flags  = IORESOURCE_IRQ,
-       },
-       [14] = {
-               .start  = EXYNOS4_PA_SYSMMU_FIMD0,
-               .end    = EXYNOS4_PA_SYSMMU_FIMD0 + SZ_64K - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       [15] = {
-               .start  = IRQ_SYSMMU_LCD0_M0_0,
-               .end    = IRQ_SYSMMU_LCD0_M0_0,
-               .flags  = IORESOURCE_IRQ,
-       },
-       [16] = {
-               .start  = EXYNOS4_PA_SYSMMU_FIMD1,
-               .end    = EXYNOS4_PA_SYSMMU_FIMD1 + SZ_64K - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       [17] = {
-               .start  = IRQ_SYSMMU_LCD1_M1_0,
-               .end    = IRQ_SYSMMU_LCD1_M1_0,
-               .flags  = IORESOURCE_IRQ,
-       },
-       [18] = {
-               .start  = EXYNOS4_PA_SYSMMU_PCIe,
-               .end    = EXYNOS4_PA_SYSMMU_PCIe + SZ_64K - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       [19] = {
-               .start  = IRQ_SYSMMU_PCIE_0,
-               .end    = IRQ_SYSMMU_PCIE_0,
-               .flags  = IORESOURCE_IRQ,
-       },
-       [20] = {
-               .start  = EXYNOS4_PA_SYSMMU_G2D,
-               .end    = EXYNOS4_PA_SYSMMU_G2D + SZ_64K - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       [21] = {
-               .start  = IRQ_SYSMMU_2D_0,
-               .end    = IRQ_SYSMMU_2D_0,
-               .flags  = IORESOURCE_IRQ,
-       },
-       [22] = {
-               .start  = EXYNOS4_PA_SYSMMU_ROTATOR,
-               .end    = EXYNOS4_PA_SYSMMU_ROTATOR + SZ_64K - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       [23] = {
-               .start  = IRQ_SYSMMU_ROTATOR_0,
-               .end    = IRQ_SYSMMU_ROTATOR_0,
-               .flags  = IORESOURCE_IRQ,
-       },
-       [24] = {
-               .start  = EXYNOS4_PA_SYSMMU_MDMA2,
-               .end    = EXYNOS4_PA_SYSMMU_MDMA2 + SZ_64K - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       [25] = {
-               .start  = IRQ_SYSMMU_MDMA1_0,
-               .end    = IRQ_SYSMMU_MDMA1_0,
-               .flags  = IORESOURCE_IRQ,
-       },
-       [26] = {
-               .start  = EXYNOS4_PA_SYSMMU_TV,
-               .end    = EXYNOS4_PA_SYSMMU_TV + SZ_64K - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       [27] = {
-               .start  = IRQ_SYSMMU_TV_M0_0,
-               .end    = IRQ_SYSMMU_TV_M0_0,
-               .flags  = IORESOURCE_IRQ,
-       },
-       [28] = {
-               .start  = EXYNOS4_PA_SYSMMU_MFC_L,
-               .end    = EXYNOS4_PA_SYSMMU_MFC_L + SZ_64K - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       [29] = {
-               .start  = IRQ_SYSMMU_MFC_M0_0,
-               .end    = IRQ_SYSMMU_MFC_M0_0,
-               .flags  = IORESOURCE_IRQ,
-       },
-       [30] = {
-               .start  = EXYNOS4_PA_SYSMMU_MFC_R,
-               .end    = EXYNOS4_PA_SYSMMU_MFC_R + SZ_64K - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       [31] = {
-               .start  = IRQ_SYSMMU_MFC_M1_0,
-               .end    = IRQ_SYSMMU_MFC_M1_0,
-               .flags  = IORESOURCE_IRQ,
-       },
-};
+static u64 exynos_sysmmu_dma_mask = DMA_BIT_MASK(32);
+
+#define SYSMMU_PLATFORM_DEVICE(ipname, devid)                          \
+static struct sysmmu_platform_data platdata_##ipname = {               \
+       .dbgname = #ipname,                                             \
+};                                                                     \
+struct platform_device SYSMMU_PLATDEV(ipname) =                                \
+{                                                                      \
+       .name           = SYSMMU_DEVNAME_BASE,                          \
+       .id             = devid,                                        \
+       .dev            = {                                             \
+               .dma_mask               = &exynos_sysmmu_dma_mask,      \
+               .coherent_dma_mask      = DMA_BIT_MASK(32),             \
+               .platform_data          = &platdata_##ipname,           \
+       },                                                              \
+}
+
+SYSMMU_PLATFORM_DEVICE(mfc_l,  0);
+SYSMMU_PLATFORM_DEVICE(mfc_r,  1);
+SYSMMU_PLATFORM_DEVICE(tv,     2);
+SYSMMU_PLATFORM_DEVICE(jpeg,   3);
+SYSMMU_PLATFORM_DEVICE(rot,    4);
+SYSMMU_PLATFORM_DEVICE(fimc0,  5); /* fimc* and gsc* exist exclusively */
+SYSMMU_PLATFORM_DEVICE(fimc1,  6);
+SYSMMU_PLATFORM_DEVICE(fimc2,  7);
+SYSMMU_PLATFORM_DEVICE(fimc3,  8);
+SYSMMU_PLATFORM_DEVICE(gsc0,   5);
+SYSMMU_PLATFORM_DEVICE(gsc1,   6);
+SYSMMU_PLATFORM_DEVICE(gsc2,   7);
+SYSMMU_PLATFORM_DEVICE(gsc3,   8);
+SYSMMU_PLATFORM_DEVICE(isp,    9);
+SYSMMU_PLATFORM_DEVICE(fimd0,  10);
+SYSMMU_PLATFORM_DEVICE(fimd1,  11);
+SYSMMU_PLATFORM_DEVICE(camif0, 12);
+SYSMMU_PLATFORM_DEVICE(camif1, 13);
+SYSMMU_PLATFORM_DEVICE(2d,     14);
+
+#define SYSMMU_RESOURCE_NAME(core, ipname) sysmmures_##core##_##ipname
+
+#define SYSMMU_RESOURCE(core, ipname)                                  \
+       static struct resource SYSMMU_RESOURCE_NAME(core, ipname)[] __initdata =
+
+#define DEFINE_SYSMMU_RESOURCE(core, mem, irq)                         \
+       DEFINE_RES_MEM_NAMED(core##_PA_SYSMMU_##mem, SZ_4K, #mem),      \
+       DEFINE_RES_IRQ_NAMED(core##_IRQ_SYSMMU_##irq##_0, #mem)
+
+#define SYSMMU_RESOURCE_DEFINE(core, ipname, mem, irq)                 \
+       SYSMMU_RESOURCE(core, ipname) {                                 \
+               DEFINE_SYSMMU_RESOURCE(core, mem, irq)                  \
+       }
 
-struct platform_device exynos4_device_sysmmu = {
-       .name           = "s5p-sysmmu",
-       .id             = 32,
-       .num_resources  = ARRAY_SIZE(exynos4_sysmmu_resource),
-       .resource       = exynos4_sysmmu_resource,
+struct sysmmu_resource_map {
+       struct platform_device *pdev;
+       struct resource *res;
+       u32 rnum;
+       struct device *pdd;
+       char *clocknames;
 };
-EXPORT_SYMBOL(exynos4_device_sysmmu);
 
-static struct clk *sysmmu_clk[S5P_SYSMMU_TOTAL_IPNUM];
-void sysmmu_clk_init(struct device *dev, sysmmu_ips ips)
-{
-       sysmmu_clk[ips] = clk_get(dev, sysmmu_ips_name[ips]);
-       if (IS_ERR(sysmmu_clk[ips]))
-               sysmmu_clk[ips] = NULL;
-       else
-               clk_put(sysmmu_clk[ips]);
+#define SYSMMU_RESOURCE_MAPPING(core, ipname, resname) {               \
+       .pdev = &SYSMMU_PLATDEV(ipname),                                \
+       .res = SYSMMU_RESOURCE_NAME(EXYNOS##core, resname),             \
+       .rnum = ARRAY_SIZE(SYSMMU_RESOURCE_NAME(EXYNOS##core, resname)),\
+       .clocknames = SYSMMU_CLOCK_NAME,                                \
 }
 
-void sysmmu_clk_enable(sysmmu_ips ips)
-{
-       if (sysmmu_clk[ips])
-               clk_enable(sysmmu_clk[ips]);
+#define SYSMMU_RESOURCE_MAPPING_MC(core, ipname, resname, pdata) {     \
+       .pdev = &SYSMMU_PLATDEV(ipname),                                \
+       .res = SYSMMU_RESOURCE_NAME(EXYNOS##core, resname),             \
+       .rnum = ARRAY_SIZE(SYSMMU_RESOURCE_NAME(EXYNOS##core, resname)),\
+       .clocknames = SYSMMU_CLOCK_NAME "," SYSMMU_CLOCK_NAME2,         \
+}
+
+#ifdef CONFIG_EXYNOS_DEV_PD
+#define SYSMMU_RESOURCE_MAPPING_PD(core, ipname, resname, pd) {                \
+       .pdev = &SYSMMU_PLATDEV(ipname),                                \
+       .res = &SYSMMU_RESOURCE_NAME(EXYNOS##core, resname),            \
+       .rnum = ARRAY_SIZE(SYSMMU_RESOURCE_NAME(EXYNOS##core, resname)),\
+       .clocknames = SYSMMU_CLOCK_NAME,                                \
+       .pdd = &exynos##core##_device_pd[pd].dev,                       \
+}
+
+#define SYSMMU_RESOURCE_MAPPING_MCPD(core, ipname, resname, pd, pdata) {\
+       .pdev = &SYSMMU_PLATDEV(ipname),                                \
+       .res = &SYSMMU_RESOURCE_NAME(EXYNOS##core, resname),            \
+       .rnum = ARRAY_SIZE(SYSMMU_RESOURCE_NAME(EXYNOS##core, resname)),\
+       .clocknames = SYSMMU_CLOCK_NAME "," SYSMMU_CLOCK_NAME2,         \
+       .pdd = &exynos##core##_device_pd[pd].dev,                       \
 }
+#else
+#define SYSMMU_RESOURCE_MAPPING_PD(core, ipname, resname, pd)          \
+               SYSMMU_RESOURCE_MAPPING(core, ipname, resname)
+#define SYSMMU_RESOURCE_MAPPING_MCPD(core, ipname, resname, pd, pdata) \
+               SYSMMU_RESOURCE_MAPPING_MC(core, ipname, resname, pdata)
+
+#endif /* CONFIG_EXYNOS_DEV_PD */
+
+#ifdef CONFIG_ARCH_EXYNOS4
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimc0, FIMC0,  FIMC0);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimc1, FIMC1,  FIMC1);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimc2, FIMC2,  FIMC2);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimc3, FIMC3,  FIMC3);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, jpeg,  JPEG,   JPEG);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, 2d,    G2D,    2D);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, tv,    TV,     TV_M0);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, 2d_acp,        2D_ACP, 2D);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, rot,   ROTATOR, ROTATOR);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimd0, FIMD0,  LCD0_M0);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimd1, FIMD1,  LCD1_M1);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, flite0,        FIMC_LITE0, FIMC_LITE0);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, flite1,        FIMC_LITE1, FIMC_LITE1);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, mfc_r, MFC_R,  MFC_M0);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, mfc_l, MFC_L,  MFC_M1);
+SYSMMU_RESOURCE(EXYNOS4, isp) {
+       DEFINE_SYSMMU_RESOURCE(EXYNOS4, FIMC_ISP, FIMC_ISP),
+       DEFINE_SYSMMU_RESOURCE(EXYNOS4, FIMC_DRC, FIMC_DRC),
+       DEFINE_SYSMMU_RESOURCE(EXYNOS4, FIMC_FD, FIMC_FD),
+       DEFINE_SYSMMU_RESOURCE(EXYNOS4, ISPCPU, FIMC_CX),
+};
+
+static struct sysmmu_resource_map sysmmu_resmap4[] __initdata = {
+       SYSMMU_RESOURCE_MAPPING_PD(4, fimc0,    fimc0,  PD_CAM),
+       SYSMMU_RESOURCE_MAPPING_PD(4, fimc1,    fimc1,  PD_CAM),
+       SYSMMU_RESOURCE_MAPPING_PD(4, fimc2,    fimc2,  PD_CAM),
+       SYSMMU_RESOURCE_MAPPING_PD(4, fimc3,    fimc3,  PD_CAM),
+       SYSMMU_RESOURCE_MAPPING_PD(4, tv,       tv,     PD_TV),
+       SYSMMU_RESOURCE_MAPPING_PD(4, mfc_r,    mfc_r,  PD_MFC),
+       SYSMMU_RESOURCE_MAPPING_PD(4, mfc_l,    mfc_l,  PD_MFC),
+       SYSMMU_RESOURCE_MAPPING_PD(4, rot,      rot,    PD_LCD0),
+       SYSMMU_RESOURCE_MAPPING_PD(4, jpeg,     jpeg,   PD_CAM),
+       SYSMMU_RESOURCE_MAPPING_PD(4, fimd0,    fimd0,  PD_LCD0),
+};
+
+static struct sysmmu_resource_map sysmmu_resmap4210[] __initdata = {
+       SYSMMU_RESOURCE_MAPPING_PD(4, 2d,       2d,     PD_LCD0),
+       SYSMMU_RESOURCE_MAPPING_PD(4, fimd1,    fimd1,  PD_LCD1),
+};
+
+static struct sysmmu_resource_map sysmmu_resmap4212[] __initdata = {
+       SYSMMU_RESOURCE_MAPPING(4,      2d,     2d_acp),
+       SYSMMU_RESOURCE_MAPPING_PD(4,   camif0, flite0, PD_ISP),
+       SYSMMU_RESOURCE_MAPPING_PD(4,   camif1, flite1, PD_ISP),
+       SYSMMU_RESOURCE_MAPPING_PD(4,   isp,    isp,    PD_ISP),
+};
+#endif /* CONFIG_ARCH_EXYNOS4 */
 
-void sysmmu_clk_disable(sysmmu_ips ips)
+#ifdef CONFIG_ARCH_EXYNOS5
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, jpeg,  JPEG,   JPEG);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, fimd1, FIMD1,  FIMD1);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, 2d,    2D,     2D);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, rot,   ROTATOR, ROTATOR);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, tv,    TV,     TV);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, flite0,        LITE0,  LITE0);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, flite1,        LITE1,  LITE1);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, gsc0,  GSC0,   GSC0);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, gsc1,  GSC1,   GSC1);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, gsc2,  GSC2,   GSC2);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, gsc3,  GSC3,   GSC3);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, mfc_r, MFC_R,  MFC_R);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, mfc_l, MFC_L,  MFC_L);
+SYSMMU_RESOURCE(EXYNOS5, isp) {
+       DEFINE_SYSMMU_RESOURCE(EXYNOS5, ISP, ISP),
+       DEFINE_SYSMMU_RESOURCE(EXYNOS5, DRC, DRC),
+       DEFINE_SYSMMU_RESOURCE(EXYNOS5, FD, FD),
+       DEFINE_SYSMMU_RESOURCE(EXYNOS5, ISPCPU, MCUISP),
+       DEFINE_SYSMMU_RESOURCE(EXYNOS5, SCALERC, SCALERCISP),
+       DEFINE_SYSMMU_RESOURCE(EXYNOS5, SCALERP, SCALERPISP),
+       DEFINE_SYSMMU_RESOURCE(EXYNOS5, ODC, ODC),
+       DEFINE_SYSMMU_RESOURCE(EXYNOS5, DIS0, DIS0),
+       DEFINE_SYSMMU_RESOURCE(EXYNOS5, DIS1, DIS1),
+       DEFINE_SYSMMU_RESOURCE(EXYNOS5, 3DNR, 3DNR),
+};
+
+static struct sysmmu_resource_map sysmmu_resmap5[] __initdata = {
+       SYSMMU_RESOURCE_MAPPING(5,      jpeg,   jpeg),
+       SYSMMU_RESOURCE_MAPPING(5,      fimd1,  fimd1),
+       SYSMMU_RESOURCE_MAPPING(5,      2d,     2d),
+       SYSMMU_RESOURCE_MAPPING(5,      rot,    rot),
+       SYSMMU_RESOURCE_MAPPING_PD(5,   tv,     tv,     PD_DISP1),
+       SYSMMU_RESOURCE_MAPPING_PD(5,   camif0, flite0, PD_GSCL),
+       SYSMMU_RESOURCE_MAPPING_PD(5,   camif1, flite1, PD_GSCL),
+       SYSMMU_RESOURCE_MAPPING_PD(5,   gsc0,   gsc0,   PD_GSCL),
+       SYSMMU_RESOURCE_MAPPING_PD(5,   gsc1,   gsc1,   PD_GSCL),
+       SYSMMU_RESOURCE_MAPPING_PD(5,   gsc2,   gsc2,   PD_GSCL),
+       SYSMMU_RESOURCE_MAPPING_PD(5,   gsc3,   gsc3,   PD_GSCL),
+       SYSMMU_RESOURCE_MAPPING_PD(5,   mfc_r,  mfc_r,  PD_MFC),
+       SYSMMU_RESOURCE_MAPPING_PD(5,   mfc_l,  mfc_l,  PD_MFC),
+       SYSMMU_RESOURCE_MAPPING_MCPD(5, isp,    isp,    PD_ISP, mc_platdata),
+};
+#endif /* CONFIG_ARCH_EXYNOS5 */
+
+static int __init init_sysmmu_platform_device(void)
 {
-       if (sysmmu_clk[ips])
-               clk_disable(sysmmu_clk[ips]);
+       int i, j;
+       struct sysmmu_resource_map *resmap[2] = {NULL, NULL};
+       int nmap[2] = {0, 0};
+
+#ifdef CONFIG_ARCH_EXYNOS5
+       if (soc_is_exynos5250()) {
+               resmap[0] = sysmmu_resmap5;
+               nmap[0] = ARRAY_SIZE(sysmmu_resmap5);
+               nmap[1] = 0;
+       }
+#endif
+
+#ifdef CONFIG_ARCH_EXYNOS4
+       if (resmap[0] == NULL) {
+               resmap[0] = sysmmu_resmap4;
+               nmap[0] = ARRAY_SIZE(sysmmu_resmap4);
+       }
+
+       if (soc_is_exynos4210()) {
+               resmap[1] = sysmmu_resmap4210;
+               nmap[1] = ARRAY_SIZE(sysmmu_resmap4210);
+       }
+
+       if (soc_is_exynos4412() || soc_is_exynos4212()) {
+               resmap[1] = sysmmu_resmap4212;
+               nmap[1] = ARRAY_SIZE(sysmmu_resmap4212);
+       }
+#endif
+
+       for (j = 0; j < 2; j++) {
+               for (i = 0; i < nmap[j]; i++) {
+                       struct sysmmu_resource_map *map;
+                       struct sysmmu_platform_data *platdata;
+
+                       map = &resmap[j][i];
+
+                       map->pdev->dev.parent = map->pdd;
+
+                       platdata = map->pdev->dev.platform_data;
+                       platdata->clockname = map->clocknames;
+
+                       if (platform_device_add_resources(map->pdev, map->res,
+                                                               map->rnum)) {
+                               pr_err("%s: Failed to add device resources for "
+                                               "%s.%d\n", __func__,
+                                               map->pdev->name, map->pdev->id);
+                               continue;
+                       }
+
+                       if (platform_device_register(map->pdev)) {
+                               pr_err("%s: Failed to register %s.%d\n",
+                                       __func__, map->pdev->name,
+                                               map->pdev->id);
+                       }
+               }
+       }
+
+       return 0;
 }
+arch_initcall(init_sysmmu_platform_device);
index 69aaa45032057d8409e890b90ff6a6c1bd0b106c..f60b66dbcf84ebe5ebe750e3e89521724d7a8d73 100644 (file)
@@ -103,10 +103,45 @@ static u8 exynos4212_pdma0_peri[] = {
        DMACH_MIPI_HSI5,
 };
 
-struct dma_pl330_platdata exynos4_pdma0_pdata;
+static u8 exynos5250_pdma0_peri[] = {
+       DMACH_PCM0_RX,
+       DMACH_PCM0_TX,
+       DMACH_PCM2_RX,
+       DMACH_PCM2_TX,
+       DMACH_SPI0_RX,
+       DMACH_SPI0_TX,
+       DMACH_SPI2_RX,
+       DMACH_SPI2_TX,
+       DMACH_I2S0S_TX,
+       DMACH_I2S0_RX,
+       DMACH_I2S0_TX,
+       DMACH_I2S2_RX,
+       DMACH_I2S2_TX,
+       DMACH_UART0_RX,
+       DMACH_UART0_TX,
+       DMACH_UART2_RX,
+       DMACH_UART2_TX,
+       DMACH_UART4_RX,
+       DMACH_UART4_TX,
+       DMACH_SLIMBUS0_RX,
+       DMACH_SLIMBUS0_TX,
+       DMACH_SLIMBUS2_RX,
+       DMACH_SLIMBUS2_TX,
+       DMACH_SLIMBUS4_RX,
+       DMACH_SLIMBUS4_TX,
+       DMACH_AC97_MICIN,
+       DMACH_AC97_PCMIN,
+       DMACH_AC97_PCMOUT,
+       DMACH_MIPI_HSI0,
+       DMACH_MIPI_HSI2,
+       DMACH_MIPI_HSI4,
+       DMACH_MIPI_HSI6,
+};
+
+static struct dma_pl330_platdata exynos_pdma0_pdata;
 
-static AMBA_AHB_DEVICE(exynos4_pdma0, "dma-pl330.0", 0x00041330,
-       EXYNOS4_PA_PDMA0, {EXYNOS4_IRQ_PDMA0}, &exynos4_pdma0_pdata);
+static AMBA_AHB_DEVICE(exynos_pdma0, "dma-pl330.0", 0x00041330,
+       EXYNOS4_PA_PDMA0, {EXYNOS4_IRQ_PDMA0}, &exynos_pdma0_pdata);
 
 static u8 exynos4210_pdma1_peri[] = {
        DMACH_PCM0_RX,
@@ -169,10 +204,45 @@ static u8 exynos4212_pdma1_peri[] = {
        DMACH_MIPI_HSI7,
 };
 
-static struct dma_pl330_platdata exynos4_pdma1_pdata;
+static u8 exynos5250_pdma1_peri[] = {
+       DMACH_PCM0_RX,
+       DMACH_PCM0_TX,
+       DMACH_PCM1_RX,
+       DMACH_PCM1_TX,
+       DMACH_SPI1_RX,
+       DMACH_SPI1_TX,
+       DMACH_PWM,
+       DMACH_SPDIF,
+       DMACH_I2S0S_TX,
+       DMACH_I2S0_RX,
+       DMACH_I2S0_TX,
+       DMACH_I2S1_RX,
+       DMACH_I2S1_TX,
+       DMACH_UART0_RX,
+       DMACH_UART0_TX,
+       DMACH_UART1_RX,
+       DMACH_UART1_TX,
+       DMACH_UART3_RX,
+       DMACH_UART3_TX,
+       DMACH_SLIMBUS1_RX,
+       DMACH_SLIMBUS1_TX,
+       DMACH_SLIMBUS3_RX,
+       DMACH_SLIMBUS3_TX,
+       DMACH_SLIMBUS5_RX,
+       DMACH_SLIMBUS5_TX,
+       DMACH_SLIMBUS0AUX_RX,
+       DMACH_SLIMBUS0AUX_TX,
+       DMACH_DISP1,
+       DMACH_MIPI_HSI1,
+       DMACH_MIPI_HSI3,
+       DMACH_MIPI_HSI5,
+       DMACH_MIPI_HSI7,
+};
 
-static AMBA_AHB_DEVICE(exynos4_pdma1,  "dma-pl330.1", 0x00041330,
-       EXYNOS4_PA_PDMA1, {EXYNOS4_IRQ_PDMA1}, &exynos4_pdma1_pdata);
+static struct dma_pl330_platdata exynos_pdma1_pdata;
+
+static AMBA_AHB_DEVICE(exynos_pdma1,  "dma-pl330.1", 0x00041330,
+       EXYNOS4_PA_PDMA1, {EXYNOS4_IRQ_PDMA1}, &exynos_pdma1_pdata);
 
 static u8 mdma_peri[] = {
        DMACH_MTOM_0,
@@ -185,46 +255,63 @@ static u8 mdma_peri[] = {
        DMACH_MTOM_7,
 };
 
-static struct dma_pl330_platdata exynos4_mdma1_pdata = {
+static struct dma_pl330_platdata exynos_mdma1_pdata = {
        .nr_valid_peri = ARRAY_SIZE(mdma_peri),
        .peri_id = mdma_peri,
 };
 
-static AMBA_AHB_DEVICE(exynos4_mdma1,  "dma-pl330.2", 0x00041330,
-       EXYNOS4_PA_MDMA1, {EXYNOS4_IRQ_MDMA1}, &exynos4_mdma1_pdata);
+static AMBA_AHB_DEVICE(exynos_mdma1,  "dma-pl330.2", 0x00041330,
+       EXYNOS4_PA_MDMA1, {EXYNOS4_IRQ_MDMA1}, &exynos_mdma1_pdata);
 
-static int __init exynos4_dma_init(void)
+static int __init exynos_dma_init(void)
 {
        if (of_have_populated_dt())
                return 0;
 
        if (soc_is_exynos4210()) {
-               exynos4_pdma0_pdata.nr_valid_peri =
+               exynos_pdma0_pdata.nr_valid_peri =
                        ARRAY_SIZE(exynos4210_pdma0_peri);
-               exynos4_pdma0_pdata.peri_id = exynos4210_pdma0_peri;
-               exynos4_pdma1_pdata.nr_valid_peri =
+               exynos_pdma0_pdata.peri_id = exynos4210_pdma0_peri;
+               exynos_pdma1_pdata.nr_valid_peri =
                        ARRAY_SIZE(exynos4210_pdma1_peri);
-               exynos4_pdma1_pdata.peri_id = exynos4210_pdma1_peri;
+               exynos_pdma1_pdata.peri_id = exynos4210_pdma1_peri;
        } else if (soc_is_exynos4212() || soc_is_exynos4412()) {
-               exynos4_pdma0_pdata.nr_valid_peri =
+               exynos_pdma0_pdata.nr_valid_peri =
                        ARRAY_SIZE(exynos4212_pdma0_peri);
-               exynos4_pdma0_pdata.peri_id = exynos4212_pdma0_peri;
-               exynos4_pdma1_pdata.nr_valid_peri =
+               exynos_pdma0_pdata.peri_id = exynos4212_pdma0_peri;
+               exynos_pdma1_pdata.nr_valid_peri =
                        ARRAY_SIZE(exynos4212_pdma1_peri);
-               exynos4_pdma1_pdata.peri_id = exynos4212_pdma1_peri;
+               exynos_pdma1_pdata.peri_id = exynos4212_pdma1_peri;
+       } else if (soc_is_exynos5250()) {
+               exynos_pdma0_pdata.nr_valid_peri =
+                       ARRAY_SIZE(exynos5250_pdma0_peri);
+               exynos_pdma0_pdata.peri_id = exynos5250_pdma0_peri;
+               exynos_pdma1_pdata.nr_valid_peri =
+                       ARRAY_SIZE(exynos5250_pdma1_peri);
+               exynos_pdma1_pdata.peri_id = exynos5250_pdma1_peri;
+
+               exynos_pdma0_device.res.start = EXYNOS5_PA_PDMA0;
+               exynos_pdma0_device.res.end = EXYNOS5_PA_PDMA0 + SZ_4K;
+               exynos_pdma0_device.irq[0] = EXYNOS5_IRQ_PDMA0;
+               exynos_pdma1_device.res.start = EXYNOS5_PA_PDMA1;
+               exynos_pdma1_device.res.end = EXYNOS5_PA_PDMA1 + SZ_4K;
+               exynos_pdma0_device.irq[0] = EXYNOS5_IRQ_PDMA1;
+               exynos_mdma1_device.res.start = EXYNOS5_PA_MDMA1;
+               exynos_mdma1_device.res.end = EXYNOS5_PA_MDMA1 + SZ_4K;
+               exynos_pdma0_device.irq[0] = EXYNOS5_IRQ_MDMA1;
        }
 
-       dma_cap_set(DMA_SLAVE, exynos4_pdma0_pdata.cap_mask);
-       dma_cap_set(DMA_CYCLIC, exynos4_pdma0_pdata.cap_mask);
-       amba_device_register(&exynos4_pdma0_device, &iomem_resource);
+       dma_cap_set(DMA_SLAVE, exynos_pdma0_pdata.cap_mask);
+       dma_cap_set(DMA_CYCLIC, exynos_pdma0_pdata.cap_mask);
+       amba_device_register(&exynos_pdma0_device, &iomem_resource);
 
-       dma_cap_set(DMA_SLAVE, exynos4_pdma1_pdata.cap_mask);
-       dma_cap_set(DMA_CYCLIC, exynos4_pdma1_pdata.cap_mask);
-       amba_device_register(&exynos4_pdma1_device, &iomem_resource);
+       dma_cap_set(DMA_SLAVE, exynos_pdma1_pdata.cap_mask);
+       dma_cap_set(DMA_CYCLIC, exynos_pdma1_pdata.cap_mask);
+       amba_device_register(&exynos_pdma1_device, &iomem_resource);
 
-       dma_cap_set(DMA_MEMCPY, exynos4_mdma1_pdata.cap_mask);
-       amba_device_register(&exynos4_mdma1_device, &iomem_resource);
+       dma_cap_set(DMA_MEMCPY, exynos_mdma1_pdata.cap_mask);
+       amba_device_register(&exynos_mdma1_device, &iomem_resource);
 
        return 0;
 }
-arch_initcall(exynos4_dma_init);
+arch_initcall(exynos_dma_init);
index d7498afe036ad4acd92c3c7a5a6c7b3f441a9d8b..eb24f1eb8e3b8398168add52aeb2faa963898387 100644 (file)
@@ -153,10 +153,11 @@ enum exynos4_gpio_number {
 #define EXYNOS5_GPIO_B2_NR     (4)
 #define EXYNOS5_GPIO_B3_NR     (4)
 #define EXYNOS5_GPIO_C0_NR     (7)
-#define EXYNOS5_GPIO_C1_NR     (7)
+#define EXYNOS5_GPIO_C1_NR     (4)
 #define EXYNOS5_GPIO_C2_NR     (7)
 #define EXYNOS5_GPIO_C3_NR     (7)
-#define EXYNOS5_GPIO_D0_NR     (8)
+#define EXYNOS5_GPIO_C4_NR     (7)
+#define EXYNOS5_GPIO_D0_NR     (4)
 #define EXYNOS5_GPIO_D1_NR     (8)
 #define EXYNOS5_GPIO_Y0_NR     (6)
 #define EXYNOS5_GPIO_Y1_NR     (4)
@@ -199,7 +200,8 @@ enum exynos5_gpio_number {
        EXYNOS5_GPIO_C1_START           = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C0),
        EXYNOS5_GPIO_C2_START           = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C1),
        EXYNOS5_GPIO_C3_START           = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C2),
-       EXYNOS5_GPIO_D0_START           = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C3),
+       EXYNOS5_GPIO_C4_START           = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C3),
+       EXYNOS5_GPIO_D0_START           = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C4),
        EXYNOS5_GPIO_D1_START           = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_D0),
        EXYNOS5_GPIO_Y0_START           = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_D1),
        EXYNOS5_GPIO_Y1_START           = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_Y0),
@@ -242,6 +244,7 @@ enum exynos5_gpio_number {
 #define EXYNOS5_GPC1(_nr)      (EXYNOS5_GPIO_C1_START + (_nr))
 #define EXYNOS5_GPC2(_nr)      (EXYNOS5_GPIO_C2_START + (_nr))
 #define EXYNOS5_GPC3(_nr)      (EXYNOS5_GPIO_C3_START + (_nr))
+#define EXYNOS5_GPC4(_nr)      (EXYNOS5_GPIO_C4_START + (_nr))
 #define EXYNOS5_GPD0(_nr)      (EXYNOS5_GPIO_D0_START + (_nr))
 #define EXYNOS5_GPD1(_nr)      (EXYNOS5_GPIO_D1_START + (_nr))
 #define EXYNOS5_GPY0(_nr)      (EXYNOS5_GPIO_Y0_START + (_nr))
index c02dae7bf4a37923f99ec623337d78a08d182106..7a4b4789eb7288205fcfcd555dcf50ffdb962753 100644 (file)
 #define EXYNOS4_IRQ_SYSMMU_MFC_M1_0    COMBINER_IRQ(5, 6)
 #define EXYNOS4_IRQ_SYSMMU_PCIE_0      COMBINER_IRQ(5, 7)
 
+#define EXYNOS4_IRQ_SYSMMU_FIMC_LITE0_0        COMBINER_IRQ(16, 0)
+#define EXYNOS4_IRQ_SYSMMU_FIMC_LITE1_0        COMBINER_IRQ(16, 1)
+#define EXYNOS4_IRQ_SYSMMU_FIMC_ISP_0  COMBINER_IRQ(16, 2)
+#define EXYNOS4_IRQ_SYSMMU_FIMC_DRC_0  COMBINER_IRQ(16, 3)
+#define EXYNOS4_IRQ_SYSMMU_FIMC_FD_0   COMBINER_IRQ(16, 4)
+#define EXYNOS4_IRQ_SYSMMU_FIMC_CX_0   COMBINER_IRQ(16, 5)
+
 #define EXYNOS4_IRQ_FIMD0_FIFO         COMBINER_IRQ(11, 0)
 #define EXYNOS4_IRQ_FIMD0_VSYNC                COMBINER_IRQ(11, 1)
 #define EXYNOS4_IRQ_FIMD0_SYSTEM       COMBINER_IRQ(11, 2)
 #define IRQ_KEYPAD                     EXYNOS4_IRQ_KEYPAD
 #define IRQ_PMU                                EXYNOS4_IRQ_PMU
 
-#define IRQ_SYSMMU_MDMA0_0             EXYNOS4_IRQ_SYSMMU_MDMA0_0
-#define IRQ_SYSMMU_SSS_0                EXYNOS4_IRQ_SYSMMU_SSS_0
-#define IRQ_SYSMMU_FIMC0_0              EXYNOS4_IRQ_SYSMMU_FIMC0_0
-#define IRQ_SYSMMU_FIMC1_0              EXYNOS4_IRQ_SYSMMU_FIMC1_0
-#define IRQ_SYSMMU_FIMC2_0              EXYNOS4_IRQ_SYSMMU_FIMC2_0
-#define IRQ_SYSMMU_FIMC3_0              EXYNOS4_IRQ_SYSMMU_FIMC3_0
-#define IRQ_SYSMMU_JPEG_0               EXYNOS4_IRQ_SYSMMU_JPEG_0
-#define IRQ_SYSMMU_2D_0                 EXYNOS4_IRQ_SYSMMU_2D_0
-
-#define IRQ_SYSMMU_ROTATOR_0            EXYNOS4_IRQ_SYSMMU_ROTATOR_0
-#define IRQ_SYSMMU_MDMA1_0              EXYNOS4_IRQ_SYSMMU_MDMA1_0
-#define IRQ_SYSMMU_LCD0_M0_0            EXYNOS4_IRQ_SYSMMU_LCD0_M0_0
-#define IRQ_SYSMMU_LCD1_M1_0            EXYNOS4_IRQ_SYSMMU_LCD1_M1_0
-#define IRQ_SYSMMU_TV_M0_0              EXYNOS4_IRQ_SYSMMU_TV_M0_0
-#define IRQ_SYSMMU_MFC_M0_0             EXYNOS4_IRQ_SYSMMU_MFC_M0_0
-#define IRQ_SYSMMU_MFC_M1_0             EXYNOS4_IRQ_SYSMMU_MFC_M1_0
-#define IRQ_SYSMMU_PCIE_0               EXYNOS4_IRQ_SYSMMU_PCIE_0
-
 #define IRQ_FIMD0_FIFO                 EXYNOS4_IRQ_FIMD0_FIFO
 #define IRQ_FIMD0_VSYNC                        EXYNOS4_IRQ_FIMD0_VSYNC
 #define IRQ_FIMD0_SYSTEM               EXYNOS4_IRQ_FIMD0_SYSTEM
 #define EXYNOS5_IRQ_MIPICSI1           IRQ_SPI(80)
 #define EXYNOS5_IRQ_EFNFCON_DMA_ABORT  IRQ_SPI(81)
 #define EXYNOS5_IRQ_MIPIDSI0           IRQ_SPI(82)
+#define EXYNOS5_IRQ_WDT_IOP            IRQ_SPI(83)
 #define EXYNOS5_IRQ_ROTATOR            IRQ_SPI(84)
 #define EXYNOS5_IRQ_GSC0               IRQ_SPI(85)
 #define EXYNOS5_IRQ_GSC1               IRQ_SPI(86)
 #define EXYNOS5_IRQ_JPEG               IRQ_SPI(89)
 #define EXYNOS5_IRQ_EFNFCON_DMA                IRQ_SPI(90)
 #define EXYNOS5_IRQ_2D                 IRQ_SPI(91)
-#define EXYNOS5_IRQ_SFMC0              IRQ_SPI(92)
-#define EXYNOS5_IRQ_SFMC1              IRQ_SPI(93)
+#define EXYNOS5_IRQ_EFNFCON_0          IRQ_SPI(92)
+#define EXYNOS5_IRQ_EFNFCON_1          IRQ_SPI(93)
 #define EXYNOS5_IRQ_MIXER              IRQ_SPI(94)
 #define EXYNOS5_IRQ_HDMI               IRQ_SPI(95)
 #define EXYNOS5_IRQ_MFC                        IRQ_SPI(96)
 #define EXYNOS5_IRQ_PCM2               IRQ_SPI(104)
 #define EXYNOS5_IRQ_SPDIF              IRQ_SPI(105)
 #define EXYNOS5_IRQ_ADC0               IRQ_SPI(106)
-
+#define EXYNOS5_IRQ_ADC1               IRQ_SPI(107)
 #define EXYNOS5_IRQ_SATA_PHY           IRQ_SPI(108)
 #define EXYNOS5_IRQ_SATA_PMEMREQ       IRQ_SPI(109)
 #define EXYNOS5_IRQ_CAM_C              IRQ_SPI(110)
 #define EXYNOS5_IRQ_DP1_INTP1          IRQ_SPI(113)
 #define EXYNOS5_IRQ_CEC                        IRQ_SPI(114)
 #define EXYNOS5_IRQ_SATA               IRQ_SPI(115)
-#define EXYNOS5_IRQ_NFCON              IRQ_SPI(116)
 
+#define EXYNOS5_IRQ_MCT_L0             IRQ_SPI(120)
+#define EXYNOS5_IRQ_MCT_L1             IRQ_SPI(121)
 #define EXYNOS5_IRQ_MMC44              IRQ_SPI(123)
 #define EXYNOS5_IRQ_MDMA1              IRQ_SPI(124)
 #define EXYNOS5_IRQ_FIMC_LITE0         IRQ_SPI(125)
 #define EXYNOS5_IRQ_RP_TIMER           IRQ_SPI(127)
 
 #define EXYNOS5_IRQ_PMU                        COMBINER_IRQ(1, 2)
-#define EXYNOS5_IRQ_PMU_CPU1           COMBINER_IRQ(1, 6)
 
 #define EXYNOS5_IRQ_SYSMMU_GSC0_0      COMBINER_IRQ(2, 0)
 #define EXYNOS5_IRQ_SYSMMU_GSC0_1      COMBINER_IRQ(2, 1)
 #define EXYNOS5_IRQ_SYSMMU_GSC3_0      COMBINER_IRQ(2, 6)
 #define EXYNOS5_IRQ_SYSMMU_GSC3_1      COMBINER_IRQ(2, 7)
 
+#define EXYNOS5_IRQ_SYSMMU_LITE2_0     COMBINER_IRQ(3, 0)
+#define EXYNOS5_IRQ_SYSMMU_LITE2_1     COMBINER_IRQ(3, 1)
 #define EXYNOS5_IRQ_SYSMMU_FIMD1_0     COMBINER_IRQ(3, 2)
 #define EXYNOS5_IRQ_SYSMMU_FIMD1_1     COMBINER_IRQ(3, 3)
 #define EXYNOS5_IRQ_SYSMMU_LITE0_0     COMBINER_IRQ(3, 4)
 
 #define EXYNOS5_IRQ_SYSMMU_ARM_0       COMBINER_IRQ(6, 0)
 #define EXYNOS5_IRQ_SYSMMU_ARM_1       COMBINER_IRQ(6, 1)
-#define EXYNOS5_IRQ_SYSMMU_MFC_L_0     COMBINER_IRQ(6, 2)
-#define EXYNOS5_IRQ_SYSMMU_MFC_L_1     COMBINER_IRQ(6, 3)
+#define EXYNOS5_IRQ_SYSMMU_MFC_R_0     COMBINER_IRQ(6, 2)
+#define EXYNOS5_IRQ_SYSMMU_MFC_R_1     COMBINER_IRQ(6, 3)
 #define EXYNOS5_IRQ_SYSMMU_RTIC_0      COMBINER_IRQ(6, 4)
 #define EXYNOS5_IRQ_SYSMMU_RTIC_1      COMBINER_IRQ(6, 5)
 #define EXYNOS5_IRQ_SYSMMU_SSS_0       COMBINER_IRQ(6, 6)
 #define EXYNOS5_IRQ_SYSMMU_MDMA1_1     COMBINER_IRQ(7, 3)
 #define EXYNOS5_IRQ_SYSMMU_TV_0                COMBINER_IRQ(7, 4)
 #define EXYNOS5_IRQ_SYSMMU_TV_1                COMBINER_IRQ(7, 5)
-#define EXYNOS5_IRQ_SYSMMU_GPSX_0      COMBINER_IRQ(7, 6)
-#define EXYNOS5_IRQ_SYSMMU_GPSX_1      COMBINER_IRQ(7, 7)
 
-#define EXYNOS5_IRQ_SYSMMU_MFC_R_0     COMBINER_IRQ(8, 5)
-#define EXYNOS5_IRQ_SYSMMU_MFC_R_1     COMBINER_IRQ(8, 6)
+#define EXYNOS5_IRQ_SYSMMU_MFC_L_0     COMBINER_IRQ(8, 5)
+#define EXYNOS5_IRQ_SYSMMU_MFC_L_1     COMBINER_IRQ(8, 6)
 
 #define EXYNOS5_IRQ_SYSMMU_DIS1_0      COMBINER_IRQ(9, 4)
 #define EXYNOS5_IRQ_SYSMMU_DIS1_1      COMBINER_IRQ(9, 5)
 #define EXYNOS5_IRQ_SYSMMU_DRC_0       COMBINER_IRQ(11, 6)
 #define EXYNOS5_IRQ_SYSMMU_DRC_1       COMBINER_IRQ(11, 7)
 
+#define EXYNOS5_IRQ_MDMA1_ABORT                COMBINER_IRQ(13, 1)
+
+#define EXYNOS5_IRQ_MDMA0_ABORT                COMBINER_IRQ(15, 3)
+
 #define EXYNOS5_IRQ_FIMD1_FIFO         COMBINER_IRQ(18, 4)
 #define EXYNOS5_IRQ_FIMD1_VSYNC                COMBINER_IRQ(18, 5)
 #define EXYNOS5_IRQ_FIMD1_SYSTEM       COMBINER_IRQ(18, 6)
 
+#define EXYNOS5_IRQ_ARMIOP_GIC         COMBINER_IRQ(19, 0)
+#define EXYNOS5_IRQ_ARMISP_GIC         COMBINER_IRQ(19, 1)
+#define EXYNOS5_IRQ_IOP_GIC            COMBINER_IRQ(19, 3)
+#define EXYNOS5_IRQ_ISP_GIC            COMBINER_IRQ(19, 4)
+
+#define EXYNOS5_IRQ_PMU_CPU1           COMBINER_IRQ(22, 4)
+
 #define EXYNOS5_IRQ_EINT0              COMBINER_IRQ(23, 0)
-#define EXYNOS5_IRQ_MCT_L0             COMBINER_IRQ(23, 1)
-#define EXYNOS5_IRQ_MCT_L1             COMBINER_IRQ(23, 2)
 #define EXYNOS5_IRQ_MCT_G0             COMBINER_IRQ(23, 3)
 #define EXYNOS5_IRQ_MCT_G1             COMBINER_IRQ(23, 4)
-#define EXYNOS5_IRQ_MCT_G2             COMBINER_IRQ(23, 5)
-#define EXYNOS5_IRQ_MCT_G3             COMBINER_IRQ(23, 6)
 
 #define EXYNOS5_IRQ_EINT1              COMBINER_IRQ(24, 0)
 #define EXYNOS5_IRQ_SYSMMU_LITE1_0     COMBINER_IRQ(24, 1)
 
 #define EXYNOS5_MAX_COMBINER_NR                32
 
-#define EXYNOS5_IRQ_GPIO1_NR_GROUPS    13
+#define EXYNOS5_IRQ_GPIO1_NR_GROUPS    14
 #define EXYNOS5_IRQ_GPIO2_NR_GROUPS    9
 #define EXYNOS5_IRQ_GPIO3_NR_GROUPS    5
 #define EXYNOS5_IRQ_GPIO4_NR_GROUPS    1
index e009a66477f42e579572ff8e5edbcab914bc08a0..ca4aa89aa46b357fe5ff04710bd9ffdf4caf361b 100644 (file)
@@ -34,6 +34,9 @@
 
 #define EXYNOS4_PA_JPEG                        0x11840000
 
+/* x = 0...1 */
+#define EXYNOS4_PA_FIMC_LITE(x)                (0x12390000 + ((x) * 0x10000))
+
 #define EXYNOS4_PA_G2D                 0x12800000
 
 #define EXYNOS4_PA_I2S0                        0x03830000
@@ -78,8 +81,8 @@
 
 #define EXYNOS4_PA_GIC_CPU             0x10480000
 #define EXYNOS4_PA_GIC_DIST            0x10490000
-#define EXYNOS5_PA_GIC_CPU             0x10480000
-#define EXYNOS5_PA_GIC_DIST            0x10490000
+#define EXYNOS5_PA_GIC_CPU             0x10482000
+#define EXYNOS5_PA_GIC_DIST            0x10481000
 
 #define EXYNOS4_PA_COREPERI            0x10500000
 #define EXYNOS4_PA_TWD                 0x10500600
@@ -95,6 +98,7 @@
 #define EXYNOS5_PA_PDMA1               0x121B0000
 
 #define EXYNOS4_PA_SYSMMU_MDMA         0x10A40000
+#define EXYNOS4_PA_SYSMMU_2D_ACP       0x10A40000
 #define EXYNOS4_PA_SYSMMU_SSS          0x10A50000
 #define EXYNOS4_PA_SYSMMU_FIMC0                0x11A20000
 #define EXYNOS4_PA_SYSMMU_FIMC1                0x11A30000
 #define EXYNOS4_PA_SYSMMU_JPEG         0x11A60000
 #define EXYNOS4_PA_SYSMMU_FIMD0                0x11E20000
 #define EXYNOS4_PA_SYSMMU_FIMD1                0x12220000
+#define EXYNOS4_PA_SYSMMU_FIMC_ISP     0x12260000
+#define EXYNOS4_PA_SYSMMU_FIMC_DRC     0x12270000
+#define EXYNOS4_PA_SYSMMU_FIMC_FD      0x122A0000
+#define EXYNOS4_PA_SYSMMU_ISPCPU       0x122B0000
+#define EXYNOS4_PA_SYSMMU_FIMC_LITE0   0x123B0000
+#define EXYNOS4_PA_SYSMMU_FIMC_LITE1   0x123C0000
 #define EXYNOS4_PA_SYSMMU_PCIe         0x12620000
 #define EXYNOS4_PA_SYSMMU_G2D          0x12A20000
 #define EXYNOS4_PA_SYSMMU_ROTATOR      0x12A30000
 #define EXYNOS4_PA_SYSMMU_TV           0x12E20000
 #define EXYNOS4_PA_SYSMMU_MFC_L                0x13620000
 #define EXYNOS4_PA_SYSMMU_MFC_R                0x13630000
+
+#define EXYNOS5_PA_SYSMMU_MDMA1                0x10A40000
+#define EXYNOS5_PA_SYSMMU_SSS          0x10A50000
+#define EXYNOS5_PA_SYSMMU_2D           0x10A60000
+#define EXYNOS5_PA_SYSMMU_MFC_L                0x11200000
+#define EXYNOS5_PA_SYSMMU_MFC_R                0x11210000
+#define EXYNOS5_PA_SYSMMU_ROTATOR      0x11D40000
+#define EXYNOS5_PA_SYSMMU_MDMA2                0x11D50000
+#define EXYNOS5_PA_SYSMMU_JPEG         0x11F20000
+#define EXYNOS5_PA_SYSMMU_IOP          0x12360000
+#define EXYNOS5_PA_SYSMMU_RTIC         0x12370000
+#define EXYNOS5_PA_SYSMMU_GPS          0x12630000
+#define EXYNOS5_PA_SYSMMU_ISP          0x13260000
+#define EXYNOS5_PA_SYSMMU_DRC          0x12370000
+#define EXYNOS5_PA_SYSMMU_SCALERC      0x13280000
+#define EXYNOS5_PA_SYSMMU_SCALERP      0x13290000
+#define EXYNOS5_PA_SYSMMU_FD           0x132A0000
+#define EXYNOS5_PA_SYSMMU_ISPCPU       0x132B0000
+#define EXYNOS5_PA_SYSMMU_ODC          0x132C0000
+#define EXYNOS5_PA_SYSMMU_DIS0         0x132D0000
+#define EXYNOS5_PA_SYSMMU_DIS1         0x132E0000
+#define EXYNOS5_PA_SYSMMU_3DNR         0x132F0000
+#define EXYNOS5_PA_SYSMMU_LITE0                0x13C40000
+#define EXYNOS5_PA_SYSMMU_LITE1                0x13C50000
+#define EXYNOS5_PA_SYSMMU_GSC0         0x13E80000
+#define EXYNOS5_PA_SYSMMU_GSC1         0x13E90000
+#define EXYNOS5_PA_SYSMMU_GSC2         0x13EA0000
+#define EXYNOS5_PA_SYSMMU_GSC3         0x13EB0000
+#define EXYNOS5_PA_SYSMMU_FIMD1                0x14640000
+#define EXYNOS5_PA_SYSMMU_TV           0x14650000
+
 #define EXYNOS4_PA_SPI0                        0x13920000
 #define EXYNOS4_PA_SPI1                        0x13930000
 #define EXYNOS4_PA_SPI2                        0x13940000
index 9d8da51e35caa794ff8d45405c789fa00d2e009d..a67ecfaf12160646bc05680b299ca044f039bb6e 100644 (file)
@@ -33,7 +33,7 @@ static inline void s3c_pm_arch_prepare_irqs(void)
        __raw_writel(tmp, S5P_WAKEUP_MASK);
 
        __raw_writel(s3c_irqwake_intmask, S5P_WAKEUP_MASK);
-       __raw_writel(s3c_irqwake_eintmask, S5P_EINT_WAKEUP_MASK);
+       __raw_writel(s3c_irqwake_eintmask & 0xFFFFFFFE, S5P_EINT_WAKEUP_MASK);
 }
 
 static inline void s3c_pm_arch_stop_clocks(void)
index e76b7faba66b08da7f5da4c73816a15b924d7b3e..7c27c2d4bf44d2914c3c38a76c8b33b1f1ec611b 100644 (file)
@@ -23,12 +23,12 @@ enum sys_powerdown {
 };
 
 extern unsigned long l2x0_regs_phys;
-struct exynos4_pmu_conf {
+struct exynos_pmu_conf {
        void __iomem *reg;
        unsigned int val[NUM_SYS_POWERDOWN];
 };
 
-extern void exynos4_sys_powerdown_conf(enum sys_powerdown mode);
+extern void exynos_sys_powerdown_conf(enum sys_powerdown mode);
 extern void s3c_cpu_resume(void);
 
 #endif /* __ASM_ARCH_PMU_H */
index d9578a58ae7f573b35d7065609ae4d090884acbf..8c9b38c9c5042d931ede0f3ab6f569cc3406dd86 100644 (file)
 #define EXYNOS4_CLKGATE_SCLKCPU                        EXYNOS_CLKREG(0x14800)
 #define EXYNOS4_CLKGATE_IP_CPU                 EXYNOS_CLKREG(0x14900)
 
+#define EXYNOS4_CLKGATE_IP_ISP0                        EXYNOS_CLKREG(0x18800)
+#define EXYNOS4_CLKGATE_IP_ISP1                        EXYNOS_CLKREG(0x18804)
+
 #define EXYNOS4_APLL_LOCKTIME                  (0x1C20)        /* 300us */
 
 #define EXYNOS4_APLLCON0_ENABLE_SHIFT          (31)
 
 #define EXYNOS5_CLKDIV_ACP                     EXYNOS_CLKREG(0x08500)
 
-#define EXYNOS5_CLKSRC_TOP2                    EXYNOS_CLKREG(0x10218)
 #define EXYNOS5_EPLL_CON0                      EXYNOS_CLKREG(0x10130)
 #define EXYNOS5_EPLL_CON1                      EXYNOS_CLKREG(0x10134)
+#define EXYNOS5_EPLL_CON2                      EXYNOS_CLKREG(0x10138)
 #define EXYNOS5_VPLL_CON0                      EXYNOS_CLKREG(0x10140)
 #define EXYNOS5_VPLL_CON1                      EXYNOS_CLKREG(0x10144)
+#define EXYNOS5_VPLL_CON2                      EXYNOS_CLKREG(0x10148)
 #define EXYNOS5_CPLL_CON0                      EXYNOS_CLKREG(0x10120)
 
 #define EXYNOS5_CLKSRC_TOP0                    EXYNOS_CLKREG(0x10210)
+#define EXYNOS5_CLKSRC_TOP1                    EXYNOS_CLKREG(0x10214)
+#define EXYNOS5_CLKSRC_TOP2                    EXYNOS_CLKREG(0x10218)
 #define EXYNOS5_CLKSRC_TOP3                    EXYNOS_CLKREG(0x1021C)
 #define EXYNOS5_CLKSRC_GSCL                    EXYNOS_CLKREG(0x10220)
 #define EXYNOS5_CLKSRC_DISP1_0                 EXYNOS_CLKREG(0x1022C)
+#define EXYNOS5_CLKSRC_MAUDIO                  EXYNOS_CLKREG(0x10240)
 #define EXYNOS5_CLKSRC_FSYS                    EXYNOS_CLKREG(0x10244)
 #define EXYNOS5_CLKSRC_PERIC0                  EXYNOS_CLKREG(0x10250)
+#define EXYNOS5_CLKSRC_PERIC1                  EXYNOS_CLKREG(0x10254)
+#define EXYNOS5_SCLK_SRC_ISP                   EXYNOS_CLKREG(0x10270)
 
 #define EXYNOS5_CLKSRC_MASK_TOP                        EXYNOS_CLKREG(0x10310)
 #define EXYNOS5_CLKSRC_MASK_GSCL               EXYNOS_CLKREG(0x10320)
 #define EXYNOS5_CLKSRC_MASK_DISP1_0            EXYNOS_CLKREG(0x1032C)
+#define EXYNOS5_CLKSRC_MASK_MAUDIO             EXYNOS_CLKREG(0x10334)
 #define EXYNOS5_CLKSRC_MASK_FSYS               EXYNOS_CLKREG(0x10340)
 #define EXYNOS5_CLKSRC_MASK_PERIC0             EXYNOS_CLKREG(0x10350)
+#define EXYNOS5_CLKSRC_MASK_PERIC1             EXYNOS_CLKREG(0x10354)
 
 #define EXYNOS5_CLKDIV_TOP0                    EXYNOS_CLKREG(0x10510)
 #define EXYNOS5_CLKDIV_TOP1                    EXYNOS_CLKREG(0x10514)
 #define EXYNOS5_CLKDIV_GSCL                    EXYNOS_CLKREG(0x10520)
 #define EXYNOS5_CLKDIV_DISP1_0                 EXYNOS_CLKREG(0x1052C)
 #define EXYNOS5_CLKDIV_GEN                     EXYNOS_CLKREG(0x1053C)
+#define EXYNOS5_CLKDIV_MAUDIO                  EXYNOS_CLKREG(0x10544)
 #define EXYNOS5_CLKDIV_FSYS0                   EXYNOS_CLKREG(0x10548)
 #define EXYNOS5_CLKDIV_FSYS1                   EXYNOS_CLKREG(0x1054C)
 #define EXYNOS5_CLKDIV_FSYS2                   EXYNOS_CLKREG(0x10550)
 #define EXYNOS5_CLKDIV_FSYS3                   EXYNOS_CLKREG(0x10554)
 #define EXYNOS5_CLKDIV_PERIC0                  EXYNOS_CLKREG(0x10558)
+#define EXYNOS5_CLKDIV_PERIC1                  EXYNOS_CLKREG(0x1055C)
+#define EXYNOS5_CLKDIV_PERIC2                  EXYNOS_CLKREG(0x10560)
+#define EXYNOS5_CLKDIV_PERIC3                  EXYNOS_CLKREG(0x10564)
+#define EXYNOS5_CLKDIV_PERIC4                  EXYNOS_CLKREG(0x10568)
+#define EXYNOS5_CLKDIV_PERIC5                  EXYNOS_CLKREG(0x1056C)
+#define EXYNOS5_SCLK_DIV_ISP                   EXYNOS_CLKREG(0x10580)
 
 #define EXYNOS5_CLKGATE_IP_ACP                 EXYNOS_CLKREG(0x08800)
+#define EXYNOS5_CLKGATE_IP_ISP0                        EXYNOS_CLKREG(0x0C800)
+#define EXYNOS5_CLKGATE_IP_ISP1                        EXYNOS_CLKREG(0x0C804)
 #define EXYNOS5_CLKGATE_IP_GSCL                        EXYNOS_CLKREG(0x10920)
 #define EXYNOS5_CLKGATE_IP_DISP1               EXYNOS_CLKREG(0x10928)
 #define EXYNOS5_CLKGATE_IP_MFC                 EXYNOS_CLKREG(0x1092C)
+#define EXYNOS5_CLKGATE_IP_G3D                 EXYNOS_CLKREG(0x10930)
 #define EXYNOS5_CLKGATE_IP_GEN                 EXYNOS_CLKREG(0x10934)
 #define EXYNOS5_CLKGATE_IP_FSYS                        EXYNOS_CLKREG(0x10944)
 #define EXYNOS5_CLKGATE_IP_GPS                 EXYNOS_CLKREG(0x1094C)
 #define EXYNOS5_CLKSRC_CDREX                   EXYNOS_CLKREG(0x20200)
 #define EXYNOS5_CLKDIV_CDREX                   EXYNOS_CLKREG(0x20500)
 
+#define EXYNOS5_PLL_DIV2_SEL                   EXYNOS_CLKREG(0x20A24)
+
 #define EXYNOS5_EPLL_LOCK                      EXYNOS_CLKREG(0x10030)
 
 #define EXYNOS5_EPLLCON0_LOCKED_SHIFT          (29)
index d457d052a420d680805679d6654dac7a40a5bc03..43a99e6f56ab68e638621f8fb9e6b23c910005b3 100644 (file)
@@ -1,9 +1,8 @@
-/* linux/arch/arm/mach-exynos4/include/mach/regs-pmu.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+/*
+ * Copyright (c) 2010-2012 Samsung Electronics Co., Ltd.
  *             http://www.samsung.com
  *
- * EXYNOS4 - Power management unit definition
+ * EXYNOS - Power management unit definition
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
 
 #define S5P_PMU_LCD1_CONF              S5P_PMUREG(0x3CA0)
 
-/* Only for EXYNOS4212 */
+/* Only for EXYNOS4x12 */
 #define S5P_ISP_ARM_LOWPWR                     S5P_PMUREG(0x1050)
 #define S5P_DIS_IRQ_ISP_ARM_LOCAL_LOWPWR       S5P_PMUREG(0x1054)
 #define S5P_DIS_IRQ_ISP_ARM_CENTRAL_LOWPWR     S5P_PMUREG(0x1058)
 #define S5P_SECSS_MEM_OPTION                   S5P_PMUREG(0x2EC8)
 #define S5P_ROTATOR_MEM_OPTION                 S5P_PMUREG(0x2F48)
 
+/* Only for EXYNOS4412 */
+#define S5P_ARM_CORE2_LOWPWR                   S5P_PMUREG(0x1020)
+#define S5P_DIS_IRQ_CORE2                      S5P_PMUREG(0x1024)
+#define S5P_DIS_IRQ_CENTRAL2                   S5P_PMUREG(0x1028)
+#define S5P_ARM_CORE3_LOWPWR                   S5P_PMUREG(0x1030)
+#define S5P_DIS_IRQ_CORE3                      S5P_PMUREG(0x1034)
+#define S5P_DIS_IRQ_CENTRAL3                   S5P_PMUREG(0x1038)
+
+/* For EXYNOS5 */
+
+#define EXYNOS5_USB_CFG                                                S5P_PMUREG(0x0230)
+
+#define EXYNOS5_ARM_CORE0_SYS_PWR_REG                          S5P_PMUREG(0x1000)
+#define EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG            S5P_PMUREG(0x1004)
+#define EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG          S5P_PMUREG(0x1008)
+#define EXYNOS5_ARM_CORE1_SYS_PWR_REG                          S5P_PMUREG(0x1010)
+#define EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG            S5P_PMUREG(0x1014)
+#define EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG          S5P_PMUREG(0x1018)
+#define EXYNOS5_FSYS_ARM_SYS_PWR_REG                           S5P_PMUREG(0x1040)
+#define EXYNOS5_DIS_IRQ_FSYS_ARM_CENTRAL_SYS_PWR_REG           S5P_PMUREG(0x1048)
+#define EXYNOS5_ISP_ARM_SYS_PWR_REG                            S5P_PMUREG(0x1050)
+#define EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG              S5P_PMUREG(0x1054)
+#define EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG            S5P_PMUREG(0x1058)
+#define EXYNOS5_ARM_COMMON_SYS_PWR_REG                         S5P_PMUREG(0x1080)
+#define EXYNOS5_ARM_L2_SYS_PWR_REG                             S5P_PMUREG(0x10C0)
+#define EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG                       S5P_PMUREG(0x1100)
+#define EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG                       S5P_PMUREG(0x1104)
+#define EXYNOS5_CMU_RESET_SYS_PWR_REG                          S5P_PMUREG(0x110C)
+#define EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG                        S5P_PMUREG(0x1120)
+#define EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG                        S5P_PMUREG(0x1124)
+#define EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG                   S5P_PMUREG(0x112C)
+#define EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG                     S5P_PMUREG(0x1130)
+#define EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG                      S5P_PMUREG(0x1134)
+#define EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG                     S5P_PMUREG(0x1138)
+#define EXYNOS5_APLL_SYSCLK_SYS_PWR_REG                                S5P_PMUREG(0x1140)
+#define EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG                                S5P_PMUREG(0x1144)
+#define EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG                                S5P_PMUREG(0x1148)
+#define EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG                                S5P_PMUREG(0x114C)
+#define EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG                                S5P_PMUREG(0x1150)
+#define EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG                                S5P_PMUREG(0x1154)
+#define EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG                    S5P_PMUREG(0x1164)
+#define EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG                    S5P_PMUREG(0x1170)
+#define EXYNOS5_TOP_BUS_SYS_PWR_REG                            S5P_PMUREG(0x1180)
+#define EXYNOS5_TOP_RETENTION_SYS_PWR_REG                      S5P_PMUREG(0x1184)
+#define EXYNOS5_TOP_PWR_SYS_PWR_REG                            S5P_PMUREG(0x1188)
+#define EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG                     S5P_PMUREG(0x1190)
+#define EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG               S5P_PMUREG(0x1194)
+#define EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG                     S5P_PMUREG(0x1198)
+#define EXYNOS5_LOGIC_RESET_SYS_PWR_REG                                S5P_PMUREG(0x11A0)
+#define EXYNOS5_OSCCLK_GATE_SYS_PWR_REG                                S5P_PMUREG(0x11A4)
+#define EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG                 S5P_PMUREG(0x11B0)
+#define EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG                 S5P_PMUREG(0x11B4)
+#define EXYNOS5_USBOTG_MEM_SYS_PWR_REG                         S5P_PMUREG(0x11C0)
+#define EXYNOS5_G2D_MEM_SYS_PWR_REG                            S5P_PMUREG(0x11C8)
+#define EXYNOS5_USBDRD_MEM_SYS_PWR_REG                         S5P_PMUREG(0x11CC)
+#define EXYNOS5_SDMMC_MEM_SYS_PWR_REG                          S5P_PMUREG(0x11D0)
+#define EXYNOS5_CSSYS_MEM_SYS_PWR_REG                          S5P_PMUREG(0x11D4)
+#define EXYNOS5_SECSS_MEM_SYS_PWR_REG                          S5P_PMUREG(0x11D8)
+#define EXYNOS5_ROTATOR_MEM_SYS_PWR_REG                                S5P_PMUREG(0x11DC)
+#define EXYNOS5_INTRAM_MEM_SYS_PWR_REG                         S5P_PMUREG(0x11E0)
+#define EXYNOS5_INTROM_MEM_SYS_PWR_REG                         S5P_PMUREG(0x11E4)
+#define EXYNOS5_JPEG_MEM_SYS_PWR_REG                           S5P_PMUREG(0x11E8)
+#define EXYNOS5_HSI_MEM_SYS_PWR_REG                            S5P_PMUREG(0x11EC)
+#define EXYNOS5_MCUIOP_MEM_SYS_PWR_REG                         S5P_PMUREG(0x11F4)
+#define EXYNOS5_SATA_MEM_SYS_PWR_REG                           S5P_PMUREG(0x11FC)
+#define EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG                 S5P_PMUREG(0x1200)
+#define EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG                  S5P_PMUREG(0x1204)
+#define EXYNOS5_PAD_RETENTION_EFNAND_SYS_PWR_REG               S5P_PMUREG(0x1208)
+#define EXYNOS5_PAD_RETENTION_GPIO_SYS_PWR_REG                 S5P_PMUREG(0x1220)
+#define EXYNOS5_PAD_RETENTION_UART_SYS_PWR_REG                 S5P_PMUREG(0x1224)
+#define EXYNOS5_PAD_RETENTION_MMCA_SYS_PWR_REG                 S5P_PMUREG(0x1228)
+#define EXYNOS5_PAD_RETENTION_MMCB_SYS_PWR_REG                 S5P_PMUREG(0x122C)
+#define EXYNOS5_PAD_RETENTION_EBIA_SYS_PWR_REG                 S5P_PMUREG(0x1230)
+#define EXYNOS5_PAD_RETENTION_EBIB_SYS_PWR_REG                 S5P_PMUREG(0x1234)
+#define EXYNOS5_PAD_RETENTION_SPI_SYS_PWR_REG                  S5P_PMUREG(0x1238)
+#define EXYNOS5_PAD_RETENTION_GPIO_SYSMEM_SYS_PWR_REG          S5P_PMUREG(0x123C)
+#define EXYNOS5_PAD_ISOLATION_SYS_PWR_REG                      S5P_PMUREG(0x1240)
+#define EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG               S5P_PMUREG(0x1250)
+#define EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG                                S5P_PMUREG(0x1260)
+#define EXYNOS5_XUSBXTI_SYS_PWR_REG                            S5P_PMUREG(0x1280)
+#define EXYNOS5_XXTI_SYS_PWR_REG                               S5P_PMUREG(0x1284)
+#define EXYNOS5_EXT_REGULATOR_SYS_PWR_REG                      S5P_PMUREG(0x12C0)
+#define EXYNOS5_GPIO_MODE_SYS_PWR_REG                          S5P_PMUREG(0x1300)
+#define EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG                   S5P_PMUREG(0x1320)
+#define EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG                      S5P_PMUREG(0x1340)
+#define EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG                      S5P_PMUREG(0x1344)
+#define EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG                  S5P_PMUREG(0x1348)
+#define EXYNOS5_GSCL_SYS_PWR_REG                               S5P_PMUREG(0x1400)
+#define EXYNOS5_ISP_SYS_PWR_REG                                        S5P_PMUREG(0x1404)
+#define EXYNOS5_MFC_SYS_PWR_REG                                        S5P_PMUREG(0x1408)
+#define EXYNOS5_G3D_SYS_PWR_REG                                        S5P_PMUREG(0x140C)
+#define EXYNOS5_DISP1_SYS_PWR_REG                              S5P_PMUREG(0x1414)
+#define EXYNOS5_MAU_SYS_PWR_REG                                        S5P_PMUREG(0x1418)
+#define EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG                   S5P_PMUREG(0x1480)
+#define EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG                    S5P_PMUREG(0x1484)
+#define EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG                    S5P_PMUREG(0x1488)
+#define EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG                    S5P_PMUREG(0x148C)
+#define EXYNOS5_CMU_CLKSTOP_DISP1_SYS_PWR_REG                  S5P_PMUREG(0x1494)
+#define EXYNOS5_CMU_CLKSTOP_MAU_SYS_PWR_REG                    S5P_PMUREG(0x1498)
+#define EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG                    S5P_PMUREG(0x14C0)
+#define EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG                     S5P_PMUREG(0x14C4)
+#define EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG                     S5P_PMUREG(0x14C8)
+#define EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG                     S5P_PMUREG(0x14CC)
+#define EXYNOS5_CMU_SYSCLK_DISP1_SYS_PWR_REG                   S5P_PMUREG(0x14D4)
+#define EXYNOS5_CMU_SYSCLK_MAU_SYS_PWR_REG                     S5P_PMUREG(0x14D8)
+#define EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG                     S5P_PMUREG(0x1580)
+#define EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG                      S5P_PMUREG(0x1584)
+#define EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG                      S5P_PMUREG(0x1588)
+#define EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG                      S5P_PMUREG(0x158C)
+#define EXYNOS5_CMU_RESET_DISP1_SYS_PWR_REG                    S5P_PMUREG(0x1594)
+#define EXYNOS5_CMU_RESET_MAU_SYS_PWR_REG                      S5P_PMUREG(0x1598)
+
+#define EXYNOS5_ARM_CORE0_OPTION                               S5P_PMUREG(0x2008)
+#define EXYNOS5_ARM_CORE1_OPTION                               S5P_PMUREG(0x2088)
+#define EXYNOS5_FSYS_ARM_OPTION                                        S5P_PMUREG(0x2208)
+#define EXYNOS5_ISP_ARM_OPTION                                 S5P_PMUREG(0x2288)
+#define EXYNOS5_ARM_COMMON_OPTION                              S5P_PMUREG(0x2408)
+#define EXYNOS5_TOP_PWR_OPTION                                 S5P_PMUREG(0x2C48)
+#define EXYNOS5_TOP_PWR_SYSMEM_OPTION                          S5P_PMUREG(0x2CC8)
+#define EXYNOS5_JPEG_MEM_OPTION                                        S5P_PMUREG(0x2F48)
+#define EXYNOS5_GSCL_STATUS                                    S5P_PMUREG(0x4004)
+#define EXYNOS5_ISP_STATUS                                     S5P_PMUREG(0x4024)
+#define EXYNOS5_GSCL_OPTION                                    S5P_PMUREG(0x4008)
+#define EXYNOS5_ISP_OPTION                                     S5P_PMUREG(0x4028)
+#define EXYNOS5_MFC_OPTION                                     S5P_PMUREG(0x4048)
+#define EXYNOS5_G3D_CONFIGURATION                              S5P_PMUREG(0x4060)
+#define EXYNOS5_G3D_STATUS                                     S5P_PMUREG(0x4064)
+#define EXYNOS5_G3D_OPTION                                     S5P_PMUREG(0x4068)
+#define EXYNOS5_DISP1_OPTION                                   S5P_PMUREG(0x40A8)
+#define EXYNOS5_MAU_OPTION                                     S5P_PMUREG(0x40C8)
+
+#define EXYNOS5_USE_SC_FEEDBACK                                        (1 << 1)
+#define EXYNOS5_USE_SC_COUNTER                                 (1 << 0)
+
+#define EXYNOS5_MANUAL_L2RSTDISABLE_CONTROL                    (1 << 2)
+#define EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN                 (1 << 7)
+
+#define EXYNOS5_OPTION_USE_STANDBYWFE                          (1 << 24)
+#define EXYNOS5_OPTION_USE_STANDBYWFI                          (1 << 16)
+
+#define EXYNOS5_OPTION_USE_RETENTION                           (1 << 4)
+
 #endif /* __ASM_ARCH_REGS_PMU_H */
diff --git a/arch/arm/mach-exynos/include/mach/regs-sysmmu.h b/arch/arm/mach-exynos/include/mach/regs-sysmmu.h
deleted file mode 100644 (file)
index 68ff6ad..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/* linux/arch/arm/mach-exynos4/include/mach/regs-sysmmu.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com
- *
- * EXYNOS4 - System MMU register
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_REGS_SYSMMU_H
-#define __ASM_ARCH_REGS_SYSMMU_H __FILE__
-
-#define S5P_MMU_CTRL                   0x000
-#define S5P_MMU_CFG                    0x004
-#define S5P_MMU_STATUS                 0x008
-#define S5P_MMU_FLUSH                  0x00C
-#define S5P_PT_BASE_ADDR               0x014
-#define S5P_INT_STATUS                 0x018
-#define S5P_INT_CLEAR                  0x01C
-#define S5P_PAGE_FAULT_ADDR            0x024
-#define S5P_AW_FAULT_ADDR              0x028
-#define S5P_AR_FAULT_ADDR              0x02C
-#define S5P_DEFAULT_SLAVE_ADDR         0x030
-
-#endif /* __ASM_ARCH_REGS_SYSMMU_H */
index 576efdf6d09163b1244cecc36747468d6a444de7..c71a5fba6a84c4f6a0a6cf4b538241519389dac3 100644 (file)
@@ -11,6 +11,6 @@
 #define __ASM_ARCH_SPI_CLKS_H __FILE__
 
 /* Must source from SCLK_SPI */
-#define EXYNOS4_SPI_SRCCLK_SCLK                0
+#define EXYNOS_SPI_SRCCLK_SCLK         0
 
 #endif /* __ASM_ARCH_SPI_CLKS_H */
index 6a5fbb534e821ff4a8846704878797587f40f96e..998daf2add92d9548733bc21ebe2bc167fcbe8f0 100644 (file)
@@ -1,46 +1,66 @@
-/* linux/arch/arm/mach-exynos4/include/mach/sysmmu.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+/*
+ * Copyright (c) 2011-2012 Samsung Electronics Co., Ltd.
  *             http://www.samsung.com
  *
- * Samsung sysmmu driver for EXYNOS4
+ * EXYNOS - System MMU support
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARM_ARCH_SYSMMU_H
-#define __ASM_ARM_ARCH_SYSMMU_H __FILE__
-
-enum exynos4_sysmmu_ips {
-       SYSMMU_MDMA,
-       SYSMMU_SSS,
-       SYSMMU_FIMC0,
-       SYSMMU_FIMC1,
-       SYSMMU_FIMC2,
-       SYSMMU_FIMC3,
-       SYSMMU_JPEG,
-       SYSMMU_FIMD0,
-       SYSMMU_FIMD1,
-       SYSMMU_PCIe,
-       SYSMMU_G2D,
-       SYSMMU_ROTATOR,
-       SYSMMU_MDMA2,
-       SYSMMU_TV,
-       SYSMMU_MFC_L,
-       SYSMMU_MFC_R,
-       EXYNOS4_SYSMMU_TOTAL_IPNUM,
+ */
+
+#ifndef _ARM_MACH_EXYNOS_SYSMMU_H_
+#define _ARM_MACH_EXYNOS_SYSMMU_H_
+
+struct sysmmu_platform_data {
+       char *dbgname;
+       /* comma(,) separated list of clock names for clock gating */
+       char *clockname;
 };
 
-#define S5P_SYSMMU_TOTAL_IPNUM         EXYNOS4_SYSMMU_TOTAL_IPNUM
+#define SYSMMU_DEVNAME_BASE "exynos-sysmmu"
+
+#define SYSMMU_CLOCK_NAME "sysmmu"
+#define SYSMMU_CLOCK_NAME2 "sysmmu_mc"
+
+#ifdef CONFIG_EXYNOS_DEV_SYSMMU
+#include <linux/device.h>
+struct platform_device;
+
+#define SYSMMU_PLATDEV(ipname) exynos_device_sysmmu_##ipname
+
+extern struct platform_device SYSMMU_PLATDEV(mfc_l);
+extern struct platform_device SYSMMU_PLATDEV(mfc_r);
+extern struct platform_device SYSMMU_PLATDEV(tv);
+extern struct platform_device SYSMMU_PLATDEV(jpeg);
+extern struct platform_device SYSMMU_PLATDEV(rot);
+extern struct platform_device SYSMMU_PLATDEV(fimc0);
+extern struct platform_device SYSMMU_PLATDEV(fimc1);
+extern struct platform_device SYSMMU_PLATDEV(fimc2);
+extern struct platform_device SYSMMU_PLATDEV(fimc3);
+extern struct platform_device SYSMMU_PLATDEV(gsc0);
+extern struct platform_device SYSMMU_PLATDEV(gsc1);
+extern struct platform_device SYSMMU_PLATDEV(gsc2);
+extern struct platform_device SYSMMU_PLATDEV(gsc3);
+extern struct platform_device SYSMMU_PLATDEV(isp);
+extern struct platform_device SYSMMU_PLATDEV(fimd0);
+extern struct platform_device SYSMMU_PLATDEV(fimd1);
+extern struct platform_device SYSMMU_PLATDEV(camif0);
+extern struct platform_device SYSMMU_PLATDEV(camif1);
+extern struct platform_device SYSMMU_PLATDEV(2d);
 
-extern const char *sysmmu_ips_name[EXYNOS4_SYSMMU_TOTAL_IPNUM];
+#ifdef CONFIG_IOMMU_API
+static inline void platform_set_sysmmu(
+                               struct device *sysmmu, struct device *dev)
+{
+       dev->archdata.iommu = sysmmu;
+}
+#endif
 
-typedef enum exynos4_sysmmu_ips sysmmu_ips;
+#else /* !CONFIG_EXYNOS_DEV_SYSMMU */
+#define platform_set_sysmmu(dev, sysmmu) do { } while (0)
+#endif
 
-void sysmmu_clk_init(struct device *dev, sysmmu_ips ips);
-void sysmmu_clk_enable(sysmmu_ips ips);
-void sysmmu_clk_disable(sysmmu_ips ips);
+#define SYSMMU_CLOCK_DEVNAME(ipname, id) (SYSMMU_DEVNAME_BASE "." #id)
 
-#endif /* __ASM_ARM_ARCH_SYSMMU_H */
+#endif /* _ARM_MACH_EXYNOS_SYSMMU_H_ */
index fed7116418eb1315c60b2b87cb844a0ba28ba318..5a3daa0168d85450b0aa3777fb965249d79ba735 100644 (file)
@@ -147,7 +147,6 @@ static struct platform_device *armlex4210_devices[] __initdata = {
        &s3c_device_hsmmc3,
        &s3c_device_rtc,
        &s3c_device_wdt,
-       &exynos4_device_sysmmu,
        &samsung_asoc_dma,
        &armlex4210_smsc911x,
        &exynos4_device_ahci,
@@ -204,6 +203,7 @@ MACHINE_START(ARMLEX4210, "ARMLEX4210")
        .map_io         = armlex4210_map_io,
        .handle_irq     = gic_handle_irq,
        .init_machine   = armlex4210_machine_init,
+       .init_late      = exynos_init_late,
        .timer          = &exynos4_timer,
        .restart        = exynos4_restart,
 MACHINE_END
index 8245f1c761d9f5dd5124b83f189f6f95c20c501e..e7e9743543acd6384f38f5cfcfcd7b4760872225 100644 (file)
@@ -83,6 +83,7 @@ DT_MACHINE_START(EXYNOS4210_DT, "Samsung Exynos4 (Flattened Device Tree)")
        .map_io         = exynos4210_dt_map_io,
        .handle_irq     = gic_handle_irq,
        .init_machine   = exynos4210_dt_machine_init,
+       .init_late      = exynos_init_late,
        .timer          = &exynos4_timer,
        .dt_compat      = exynos4210_dt_compat,
        .restart        = exynos4_restart,
index 4711c8920e37830d7bda8cb32ee03ed1ae70730a..7b1e11a228cce49776135f140316c83182f4d440 100644 (file)
@@ -43,6 +43,10 @@ static const struct of_dev_auxdata exynos5250_auxdata_lookup[] __initconst = {
                                "exynos4210-uart.2", NULL),
        OF_DEV_AUXDATA("samsung,exynos4210-uart", EXYNOS5_PA_UART3,
                                "exynos4210-uart.3", NULL),
+       OF_DEV_AUXDATA("samsung,s3c2440-i2c", EXYNOS5_PA_IIC(0),
+                               "s3c2440-i2c.0", NULL),
+       OF_DEV_AUXDATA("samsung,s3c2440-i2c", EXYNOS5_PA_IIC(1),
+                               "s3c2440-i2c.1", NULL),
        OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA0, "dma-pl330.0", NULL),
        OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA1, "dma-pl330.1", NULL),
        OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_MDMA1, "dma-pl330.2", NULL),
@@ -72,6 +76,7 @@ DT_MACHINE_START(EXYNOS5_DT, "SAMSUNG EXYNOS5 (Flattened Device Tree)")
        .map_io         = exynos5250_dt_map_io,
        .handle_irq     = gic_handle_irq,
        .init_machine   = exynos5250_dt_machine_init,
+       .init_late      = exynos_init_late,
        .timer          = &exynos4_timer,
        .dt_compat      = exynos5250_dt_compat,
        .restart        = exynos5_restart,
index 6c31f2ad765df785ed7f2af402fd96c50e217194..656f8fc9addd3b67ecaafbe9207f279da01b3b70 100644 (file)
@@ -237,25 +237,29 @@ static struct exynos_drm_fimd_pdata drm_fimd_pdata = {
 #else
 /* Frame Buffer */
 static struct s3c_fb_pd_win nuri_fb_win0 = {
-       .win_mode = {
-               .left_margin    = 64,
-               .right_margin   = 16,
-               .upper_margin   = 64,
-               .lower_margin   = 1,
-               .hsync_len      = 48,
-               .vsync_len      = 3,
-               .xres           = 1024,
-               .yres           = 600,
-               .refresh        = 60,
-       },
        .max_bpp        = 24,
        .default_bpp    = 16,
+       .xres           = 1024,
+       .yres           = 600,
        .virtual_x      = 1024,
        .virtual_y      = 2 * 600,
 };
 
+static struct fb_videomode nuri_lcd_timing = {
+       .left_margin    = 64,
+       .right_margin   = 16,
+       .upper_margin   = 64,
+       .lower_margin   = 1,
+       .hsync_len      = 48,
+       .vsync_len      = 3,
+       .xres           = 1024,
+       .yres           = 600,
+       .refresh        = 60,
+};
+
 static struct s3c_fb_platdata nuri_fb_pdata __initdata = {
        .win[0]         = &nuri_fb_win0,
+       .vtiming        = &nuri_lcd_timing,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB |
                          VIDCON0_CLKSEL_LCD,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
@@ -1389,6 +1393,7 @@ MACHINE_START(NURI, "NURI")
        .map_io         = nuri_map_io,
        .handle_irq     = gic_handle_irq,
        .init_machine   = nuri_machine_init,
+       .init_late      = exynos_init_late,
        .timer          = &exynos4_timer,
        .reserve        = &nuri_reserve,
        .restart        = exynos4_restart,
index 26124a38bcbdfbcf6466cb4b6667bfa20a7c4eec..f5572be9d7bf38480f931618d27507738aaf1ad6 100644 (file)
@@ -604,24 +604,28 @@ static struct exynos_drm_fimd_pdata drm_fimd_pdata = {
 };
 #else
 static struct s3c_fb_pd_win origen_fb_win0 = {
-       .win_mode = {
-               .left_margin    = 64,
-               .right_margin   = 16,
-               .upper_margin   = 64,
-               .lower_margin   = 16,
-               .hsync_len      = 48,
-               .vsync_len      = 3,
-               .xres           = 1024,
-               .yres           = 600,
-       },
+       .xres                   = 1024,
+       .yres                   = 600,
        .max_bpp                = 32,
        .default_bpp            = 24,
        .virtual_x              = 1024,
        .virtual_y              = 2 * 600,
 };
 
+static struct fb_videomode origen_lcd_timing = {
+       .left_margin    = 64,
+       .right_margin   = 16,
+       .upper_margin   = 64,
+       .lower_margin   = 16,
+       .hsync_len      = 48,
+       .vsync_len      = 3,
+       .xres           = 1024,
+       .yres           = 600,
+};
+
 static struct s3c_fb_platdata origen_lcd_pdata __initdata = {
        .win[0]         = &origen_fb_win0,
+       .vtiming        = &origen_lcd_timing,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC |
                                VIDCON1_INV_VCLK,
@@ -766,6 +770,7 @@ MACHINE_START(ORIGEN, "ORIGEN")
        .map_io         = origen_map_io,
        .handle_irq     = gic_handle_irq,
        .init_machine   = origen_machine_init,
+       .init_late      = exynos_init_late,
        .timer          = &exynos4_timer,
        .reserve        = &origen_reserve,
        .restart        = exynos4_restart,
index fe772d893cc9653ded14e3cd6fdda8c8a0fe8867..fb09c70e195af8a61e3399fb0db0926222905a66 100644 (file)
@@ -316,6 +316,7 @@ MACHINE_START(SMDK4412, "SMDK4412")
        .map_io         = smdk4x12_map_io,
        .handle_irq     = gic_handle_irq,
        .init_machine   = smdk4x12_machine_init,
+       .init_late      = exynos_init_late,
        .timer          = &exynos4_timer,
        .restart        = exynos4_restart,
        .reserve        = &smdk4x12_reserve,
index 5af96064ca5109ddec70a66a444a74ff0720168c..262e9e446a96a62934f58b978c81dcdd98e1c1a3 100644 (file)
@@ -178,22 +178,26 @@ static struct exynos_drm_fimd_pdata drm_fimd_pdata = {
 };
 #else
 static struct s3c_fb_pd_win smdkv310_fb_win0 = {
-       .win_mode = {
-               .left_margin    = 13,
-               .right_margin   = 8,
-               .upper_margin   = 7,
-               .lower_margin   = 5,
-               .hsync_len      = 3,
-               .vsync_len      = 1,
-               .xres           = 800,
-               .yres           = 480,
-       },
-       .max_bpp                = 32,
-       .default_bpp            = 24,
+       .max_bpp        = 32,
+       .default_bpp    = 24,
+       .xres           = 800,
+       .yres           = 480,
+};
+
+static struct fb_videomode smdkv310_lcd_timing = {
+       .left_margin    = 13,
+       .right_margin   = 8,
+       .upper_margin   = 7,
+       .lower_margin   = 5,
+       .hsync_len      = 3,
+       .vsync_len      = 1,
+       .xres           = 800,
+       .yres           = 480,
 };
 
 static struct s3c_fb_platdata smdkv310_lcd0_pdata __initdata = {
        .win[0]         = &smdkv310_fb_win0,
+       .vtiming        = &smdkv310_lcd_timing,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
        .setup_gpio     = exynos4_fimd0_gpio_setup_24bpp,
@@ -295,7 +299,6 @@ static struct platform_device *smdkv310_devices[] __initdata = {
        &s5p_device_mfc_l,
        &s5p_device_mfc_r,
        &exynos4_device_spdif,
-       &exynos4_device_sysmmu,
        &samsung_asoc_dma,
        &samsung_asoc_idma,
        &s5p_device_fimd0,
@@ -412,6 +415,7 @@ MACHINE_START(SMDKC210, "SMDKC210")
        .map_io         = smdkv310_map_io,
        .handle_irq     = gic_handle_irq,
        .init_machine   = smdkv310_machine_init,
+       .init_late      = exynos_init_late,
        .timer          = &exynos4_timer,
        .restart        = exynos4_restart,
 MACHINE_END
index 6b731b863275f5cc1d1362d3ccd68ade5db9b8b3..cd92fa86ba41248da1d8de5e8f122edca102d290 100644 (file)
@@ -843,25 +843,29 @@ static struct exynos_drm_fimd_pdata drm_fimd_pdata = {
 #else
 /* Frame Buffer */
 static struct s3c_fb_pd_win universal_fb_win0 = {
-       .win_mode = {
-               .left_margin    = 16,
-               .right_margin   = 16,
-               .upper_margin   = 2,
-               .lower_margin   = 28,
-               .hsync_len      = 2,
-               .vsync_len      = 1,
-               .xres           = 480,
-               .yres           = 800,
-               .refresh        = 55,
-       },
        .max_bpp        = 32,
        .default_bpp    = 16,
+       .xres           = 480,
+       .yres           = 800,
        .virtual_x      = 480,
        .virtual_y      = 2 * 800,
 };
 
+static struct fb_videomode universal_lcd_timing = {
+       .left_margin    = 16,
+       .right_margin   = 16,
+       .upper_margin   = 2,
+       .lower_margin   = 28,
+       .hsync_len      = 2,
+       .vsync_len      = 1,
+       .xres           = 480,
+       .yres           = 800,
+       .refresh        = 55,
+};
+
 static struct s3c_fb_platdata universal_lcd_pdata __initdata = {
        .win[0]         = &universal_fb_win0,
+       .vtiming        = &universal_lcd_timing,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB |
                          VIDCON0_CLKSEL_LCD,
        .vidcon1        = VIDCON1_INV_VCLK | VIDCON1_INV_VDEN
@@ -1157,6 +1161,7 @@ MACHINE_START(UNIVERSAL_C210, "UNIVERSAL_C210")
        .map_io         = universal_map_io,
        .handle_irq     = gic_handle_irq,
        .init_machine   = universal_machine_init,
+       .init_late      = exynos_init_late,
        .timer          = &s5p_timer,
        .reserve        = &universal_reserve,
        .restart        = exynos4_restart,
index 897d9a9cf2265bf6d2d3bf1c73dff584e647d551..b601fb8a408b16777b32b5cc092f9c58840f2323 100644 (file)
@@ -388,6 +388,7 @@ static int __cpuinit exynos4_local_timer_setup(struct clock_event_device *evt)
 {
        struct mct_clock_event_device *mevt;
        unsigned int cpu = smp_processor_id();
+       int mct_lx_irq;
 
        mevt = this_cpu_ptr(&percpu_mct_tick);
        mevt->evt = evt;
@@ -414,14 +415,18 @@ static int __cpuinit exynos4_local_timer_setup(struct clock_event_device *evt)
 
        if (mct_int_type == MCT_INT_SPI) {
                if (cpu == 0) {
+                       mct_lx_irq = soc_is_exynos4210() ? EXYNOS4_IRQ_MCT_L0 :
+                                               EXYNOS5_IRQ_MCT_L0;
                        mct_tick0_event_irq.dev_id = mevt;
-                       evt->irq = EXYNOS4_IRQ_MCT_L0;
-                       setup_irq(EXYNOS4_IRQ_MCT_L0, &mct_tick0_event_irq);
+                       evt->irq = mct_lx_irq;
+                       setup_irq(mct_lx_irq, &mct_tick0_event_irq);
                } else {
+                       mct_lx_irq = soc_is_exynos4210() ? EXYNOS4_IRQ_MCT_L1 :
+                                               EXYNOS5_IRQ_MCT_L1;
                        mct_tick1_event_irq.dev_id = mevt;
-                       evt->irq = EXYNOS4_IRQ_MCT_L1;
-                       setup_irq(EXYNOS4_IRQ_MCT_L1, &mct_tick1_event_irq);
-                       irq_set_affinity(EXYNOS4_IRQ_MCT_L1, cpumask_of(1));
+                       evt->irq = mct_lx_irq;
+                       setup_irq(mct_lx_irq, &mct_tick1_event_irq);
+                       irq_set_affinity(mct_lx_irq, cpumask_of(1));
                }
        } else {
                enable_percpu_irq(EXYNOS_IRQ_MCT_LOCALTIMER, 0);
@@ -473,7 +478,7 @@ static void __init exynos4_timer_resources(void)
 
 static void __init exynos4_timer_init(void)
 {
-       if (soc_is_exynos4210())
+       if ((soc_is_exynos4210()) || (soc_is_exynos5250()))
                mct_int_type = MCT_INT_SPI;
        else
                mct_int_type = MCT_INT_PPI;
index 428cfeb577248a5e812e7c954b8faa7584bbbe3a..c06c992943a139bc3017854b19c4eba1cb09d4c5 100644 (file)
@@ -1,9 +1,8 @@
-/* linux/arch/arm/mach-exynos4/pm.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+/*
+ * Copyright (c) 2011-2012 Samsung Electronics Co., Ltd.
  *             http://www.samsung.com
  *
- * EXYNOS4210 - Power Management support
+ * EXYNOS - Power Management support
  *
  * Based on arch/arm/mach-s3c2410/pm.c
  * Copyright (c) 2006 Simtec Electronics
@@ -63,90 +62,7 @@ static struct sleep_save exynos4_vpll_save[] = {
        SAVE_ITEM(EXYNOS4_VPLL_CON1),
 };
 
-static struct sleep_save exynos4_core_save[] = {
-       /* GIC side */
-       SAVE_ITEM(S5P_VA_GIC_CPU + 0x000),
-       SAVE_ITEM(S5P_VA_GIC_CPU + 0x004),
-       SAVE_ITEM(S5P_VA_GIC_CPU + 0x008),
-       SAVE_ITEM(S5P_VA_GIC_CPU + 0x00C),
-       SAVE_ITEM(S5P_VA_GIC_CPU + 0x014),
-       SAVE_ITEM(S5P_VA_GIC_CPU + 0x018),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x000),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x004),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x100),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x104),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x108),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x300),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x304),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x308),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x400),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x404),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x408),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x40C),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x410),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x414),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x418),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x41C),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x420),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x424),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x428),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x42C),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x430),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x434),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x438),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x43C),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x440),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x444),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x448),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x44C),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x450),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x454),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x458),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x45C),
-
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x800),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x804),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x808),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x80C),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x810),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x814),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x818),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x81C),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x820),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x824),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x828),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x82C),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x830),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x834),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x838),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x83C),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x840),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x844),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x848),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x84C),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x850),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x854),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x858),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0x85C),
-
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0xC00),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0xC04),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0xC08),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0xC0C),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0xC10),
-       SAVE_ITEM(S5P_VA_GIC_DIST + 0xC14),
-
-       SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x000),
-       SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x010),
-       SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x020),
-       SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x030),
-       SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x040),
-       SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x050),
-       SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x060),
-       SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x070),
-       SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x080),
-       SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x090),
-
+static struct sleep_save exynos_core_save[] = {
        /* SROM side */
        SAVE_ITEM(S5P_SROM_BW),
        SAVE_ITEM(S5P_SROM_BC0),
@@ -159,9 +75,11 @@ static struct sleep_save exynos4_core_save[] = {
 /* For Cortex-A9 Diagnostic and Power control register */
 static unsigned int save_arm_register[2];
 
-static int exynos4_cpu_suspend(unsigned long arg)
+static int exynos_cpu_suspend(unsigned long arg)
 {
+#ifdef CONFIG_CACHE_L2X0
        outer_flush_all();
+#endif
 
        /* issue the standby signal into the pm unit. */
        cpu_do_idle();
@@ -170,19 +88,25 @@ static int exynos4_cpu_suspend(unsigned long arg)
        panic("sleep resumed to originator?");
 }
 
-static void exynos4_pm_prepare(void)
+static void exynos_pm_prepare(void)
 {
-       u32 tmp;
+       unsigned int tmp;
 
-       s3c_pm_do_save(exynos4_core_save, ARRAY_SIZE(exynos4_core_save));
-       s3c_pm_do_save(exynos4_epll_save, ARRAY_SIZE(exynos4_epll_save));
-       s3c_pm_do_save(exynos4_vpll_save, ARRAY_SIZE(exynos4_vpll_save));
+       s3c_pm_do_save(exynos_core_save, ARRAY_SIZE(exynos_core_save));
 
-       tmp = __raw_readl(S5P_INFORM1);
+       if (!soc_is_exynos5250()) {
+               s3c_pm_do_save(exynos4_epll_save, ARRAY_SIZE(exynos4_epll_save));
+               s3c_pm_do_save(exynos4_vpll_save, ARRAY_SIZE(exynos4_vpll_save));
+       } else {
+               /* Disable USE_RETENTION of JPEG_MEM_OPTION */
+               tmp = __raw_readl(EXYNOS5_JPEG_MEM_OPTION);
+               tmp &= ~EXYNOS5_OPTION_USE_RETENTION;
+               __raw_writel(tmp, EXYNOS5_JPEG_MEM_OPTION);
+       }
 
        /* Set value of power down register for sleep mode */
 
-       exynos4_sys_powerdown_conf(SYS_SLEEP);
+       exynos_sys_powerdown_conf(SYS_SLEEP);
        __raw_writel(S5P_CHECK_SLEEP, S5P_INFORM1);
 
        /* ensure at least INFORM0 has the resume address */
@@ -191,17 +115,18 @@ static void exynos4_pm_prepare(void)
 
        /* Before enter central sequence mode, clock src register have to set */
 
-       s3c_pm_do_restore_core(exynos4_set_clksrc, ARRAY_SIZE(exynos4_set_clksrc));
+       if (!soc_is_exynos5250())
+               s3c_pm_do_restore_core(exynos4_set_clksrc, ARRAY_SIZE(exynos4_set_clksrc));
 
        if (soc_is_exynos4210())
                s3c_pm_do_restore_core(exynos4210_set_clksrc, ARRAY_SIZE(exynos4210_set_clksrc));
 
 }
 
-static int exynos4_pm_add(struct device *dev, struct subsys_interface *sif)
+static int exynos_pm_add(struct device *dev, struct subsys_interface *sif)
 {
-       pm_cpu_prep = exynos4_pm_prepare;
-       pm_cpu_sleep = exynos4_cpu_suspend;
+       pm_cpu_prep = exynos_pm_prepare;
+       pm_cpu_sleep = exynos_cpu_suspend;
 
        return 0;
 }
@@ -273,13 +198,13 @@ static void exynos4_restore_pll(void)
        } while (epll_wait || vpll_wait);
 }
 
-static struct subsys_interface exynos4_pm_interface = {
-       .name           = "exynos4_pm",
-       .subsys         = &exynos4_subsys,
-       .add_dev        = exynos4_pm_add,
+static struct subsys_interface exynos_pm_interface = {
+       .name           = "exynos_pm",
+       .subsys         = &exynos_subsys,
+       .add_dev        = exynos_pm_add,
 };
 
-static __init int exynos4_pm_drvinit(void)
+static __init int exynos_pm_drvinit(void)
 {
        struct clk *pll_base;
        unsigned int tmp;
@@ -292,18 +217,20 @@ static __init int exynos4_pm_drvinit(void)
        tmp |= ((0xFF << 8) | (0x1F << 1));
        __raw_writel(tmp, S5P_WAKEUP_MASK);
 
-       pll_base = clk_get(NULL, "xtal");
+       if (!soc_is_exynos5250()) {
+               pll_base = clk_get(NULL, "xtal");
 
-       if (!IS_ERR(pll_base)) {
-               pll_base_rate = clk_get_rate(pll_base);
-               clk_put(pll_base);
+               if (!IS_ERR(pll_base)) {
+                       pll_base_rate = clk_get_rate(pll_base);
+                       clk_put(pll_base);
+               }
        }
 
-       return subsys_interface_register(&exynos4_pm_interface);
+       return subsys_interface_register(&exynos_pm_interface);
 }
-arch_initcall(exynos4_pm_drvinit);
+arch_initcall(exynos_pm_drvinit);
 
-static int exynos4_pm_suspend(void)
+static int exynos_pm_suspend(void)
 {
        unsigned long tmp;
 
@@ -313,27 +240,27 @@ static int exynos4_pm_suspend(void)
        tmp &= ~S5P_CENTRAL_LOWPWR_CFG;
        __raw_writel(tmp, S5P_CENTRAL_SEQ_CONFIGURATION);
 
-       if (soc_is_exynos4212()) {
-               tmp = __raw_readl(S5P_CENTRAL_SEQ_OPTION);
-               tmp &= ~(S5P_USE_STANDBYWFI_ISP_ARM |
-                        S5P_USE_STANDBYWFE_ISP_ARM);
-               __raw_writel(tmp, S5P_CENTRAL_SEQ_OPTION);
-       }
+       /* Setting SEQ_OPTION register */
+
+       tmp = (S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFE0);
+       __raw_writel(tmp, S5P_CENTRAL_SEQ_OPTION);
 
-       /* Save Power control register */
-       asm ("mrc p15, 0, %0, c15, c0, 0"
-            : "=r" (tmp) : : "cc");
-       save_arm_register[0] = tmp;
+       if (!soc_is_exynos5250()) {
+               /* Save Power control register */
+               asm ("mrc p15, 0, %0, c15, c0, 0"
+                    : "=r" (tmp) : : "cc");
+               save_arm_register[0] = tmp;
 
-       /* Save Diagnostic register */
-       asm ("mrc p15, 0, %0, c15, c0, 1"
-            : "=r" (tmp) : : "cc");
-       save_arm_register[1] = tmp;
+               /* Save Diagnostic register */
+               asm ("mrc p15, 0, %0, c15, c0, 1"
+                    : "=r" (tmp) : : "cc");
+               save_arm_register[1] = tmp;
+       }
 
        return 0;
 }
 
-static void exynos4_pm_resume(void)
+static void exynos_pm_resume(void)
 {
        unsigned long tmp;
 
@@ -350,17 +277,19 @@ static void exynos4_pm_resume(void)
                /* No need to perform below restore code */
                goto early_wakeup;
        }
-       /* Restore Power control register */
-       tmp = save_arm_register[0];
-       asm volatile ("mcr p15, 0, %0, c15, c0, 0"
-                     : : "r" (tmp)
-                     : "cc");
-
-       /* Restore Diagnostic register */
-       tmp = save_arm_register[1];
-       asm volatile ("mcr p15, 0, %0, c15, c0, 1"
-                     : : "r" (tmp)
-                     : "cc");
+       if (!soc_is_exynos5250()) {
+               /* Restore Power control register */
+               tmp = save_arm_register[0];
+               asm volatile ("mcr p15, 0, %0, c15, c0, 0"
+                             : : "r" (tmp)
+                             : "cc");
+
+               /* Restore Diagnostic register */
+               tmp = save_arm_register[1];
+               asm volatile ("mcr p15, 0, %0, c15, c0, 1"
+                             : : "r" (tmp)
+                             : "cc");
+       }
 
        /* For release retention */
 
@@ -372,26 +301,28 @@ static void exynos4_pm_resume(void)
        __raw_writel((1 << 28), S5P_PAD_RET_EBIA_OPTION);
        __raw_writel((1 << 28), S5P_PAD_RET_EBIB_OPTION);
 
-       s3c_pm_do_restore_core(exynos4_core_save, ARRAY_SIZE(exynos4_core_save));
+       s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save));
 
-       exynos4_restore_pll();
+       if (!soc_is_exynos5250()) {
+               exynos4_restore_pll();
 
 #ifdef CONFIG_SMP
-       scu_enable(S5P_VA_SCU);
+               scu_enable(S5P_VA_SCU);
 #endif
+       }
 
 early_wakeup:
        return;
 }
 
-static struct syscore_ops exynos4_pm_syscore_ops = {
-       .suspend        = exynos4_pm_suspend,
-       .resume         = exynos4_pm_resume,
+static struct syscore_ops exynos_pm_syscore_ops = {
+       .suspend        = exynos_pm_suspend,
+       .resume         = exynos_pm_resume,
 };
 
-static __init int exynos4_pm_syscore_init(void)
+static __init int exynos_pm_syscore_init(void)
 {
-       register_syscore_ops(&exynos4_pm_syscore_ops);
+       register_syscore_ops(&exynos_pm_syscore_ops);
        return 0;
 }
-arch_initcall(exynos4_pm_syscore_init);
+arch_initcall(exynos_pm_syscore_init);
index 13b306808b42b45606ae33cf6e1b101e1baa8382..e9fafcf163de8982287876a7ca6d6cf94488f472 100644 (file)
@@ -193,9 +193,8 @@ static __init int exynos4_pm_init_power_domain(void)
 }
 arch_initcall(exynos4_pm_init_power_domain);
 
-static __init int exynos_pm_late_initcall(void)
+int __init exynos_pm_late_initcall(void)
 {
        pm_genpd_poweroff_unused();
        return 0;
 }
-late_initcall(exynos_pm_late_initcall);
index bba48f5c3e8fe21a6d2218949d929ee4e5b08d8a..4aacb66f71618da5d865a04a868878ef380aebea 100644 (file)
@@ -1,9 +1,8 @@
-/* linux/arch/arm/mach-exynos4/pmu.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+/*
+ * Copyright (c) 2011-2012 Samsung Electronics Co., Ltd.
  *             http://www.samsung.com/
  *
- * EXYNOS4210 - CPU PMU(Power Management Unit) support
+ * EXYNOS - CPU PMU(Power Management Unit) support
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
 
 #include <linux/io.h>
 #include <linux/kernel.h>
+#include <linux/bug.h>
 
 #include <mach/regs-clock.h>
 #include <mach/pmu.h>
 
-static struct exynos4_pmu_conf *exynos4_pmu_config;
+static struct exynos_pmu_conf *exynos_pmu_config;
 
-static struct exynos4_pmu_conf exynos4210_pmu_config[] = {
+static struct exynos_pmu_conf exynos4210_pmu_config[] = {
        /* { .reg = address, .val = { AFTR, LPA, SLEEP } */
        { S5P_ARM_CORE0_LOWPWR,                 { 0x0, 0x0, 0x2 } },
        { S5P_DIS_IRQ_CORE0,                    { 0x0, 0x0, 0x0 } },
@@ -94,7 +94,7 @@ static struct exynos4_pmu_conf exynos4210_pmu_config[] = {
        { PMU_TABLE_END,},
 };
 
-static struct exynos4_pmu_conf exynos4212_pmu_config[] = {
+static struct exynos_pmu_conf exynos4x12_pmu_config[] = {
        { S5P_ARM_CORE0_LOWPWR,                 { 0x0, 0x0, 0x2 } },
        { S5P_DIS_IRQ_CORE0,                    { 0x0, 0x0, 0x0 } },
        { S5P_DIS_IRQ_CENTRAL0,                 { 0x0, 0x0, 0x0 } },
@@ -202,29 +202,209 @@ static struct exynos4_pmu_conf exynos4212_pmu_config[] = {
        { PMU_TABLE_END,},
 };
 
-void exynos4_sys_powerdown_conf(enum sys_powerdown mode)
+static struct exynos_pmu_conf exynos4412_pmu_config[] = {
+       { S5P_ARM_CORE2_LOWPWR,                 { 0x0, 0x0, 0x2 } },
+       { S5P_DIS_IRQ_CORE2,                    { 0x0, 0x0, 0x0 } },
+       { S5P_DIS_IRQ_CENTRAL2,                 { 0x0, 0x0, 0x0 } },
+       { S5P_ARM_CORE3_LOWPWR,                 { 0x0, 0x0, 0x2 } },
+       { S5P_DIS_IRQ_CORE3,                    { 0x0, 0x0, 0x0 } },
+       { S5P_DIS_IRQ_CENTRAL3,                 { 0x0, 0x0, 0x0 } },
+       { PMU_TABLE_END,},
+};
+
+static struct exynos_pmu_conf exynos5250_pmu_config[] = {
+       /* { .reg = address, .val = { AFTR, LPA, SLEEP } */
+       { EXYNOS5_ARM_CORE0_SYS_PWR_REG,                { 0x0, 0x0, 0x2} },
+       { EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG,        { 0x0, 0x0, 0x0} },
+       { EXYNOS5_ARM_CORE1_SYS_PWR_REG,                { 0x0, 0x0, 0x2} },
+       { EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG,        { 0x0, 0x0, 0x0} },
+       { EXYNOS5_FSYS_ARM_SYS_PWR_REG,                 { 0x1, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_FSYS_ARM_CENTRAL_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+       { EXYNOS5_ISP_ARM_SYS_PWR_REG,                  { 0x1, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG,    { 0x0, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
+       { EXYNOS5_ARM_COMMON_SYS_PWR_REG,               { 0x0, 0x0, 0x2} },
+       { EXYNOS5_ARM_L2_SYS_PWR_REG,                   { 0x3, 0x3, 0x3} },
+       { EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG,             { 0x1, 0x0, 0x1} },
+       { EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG,             { 0x1, 0x0, 0x1} },
+       { EXYNOS5_CMU_RESET_SYS_PWR_REG,                { 0x1, 0x1, 0x0} },
+       { EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG,      { 0x1, 0x0, 0x1} },
+       { EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG,      { 0x1, 0x0, 0x1} },
+       { EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG,         { 0x1, 0x1, 0x0} },
+       { EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG,           { 0x1, 0x1, 0x1} },
+       { EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG,            { 0x1, 0x1, 0x1} },
+       { EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG,           { 0x1, 0x1, 0x1} },
+       { EXYNOS5_APLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x1, 0x0} },
+       { EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_TOP_BUS_SYS_PWR_REG,                  { 0x3, 0x0, 0x0} },
+       { EXYNOS5_TOP_RETENTION_SYS_PWR_REG,            { 0x1, 0x0, 0x1} },
+       { EXYNOS5_TOP_PWR_SYS_PWR_REG,                  { 0x3, 0x0, 0x3} },
+       { EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG,           { 0x3, 0x0, 0x0} },
+       { EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG,     { 0x1, 0x0, 0x1} },
+       { EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG,           { 0x3, 0x0, 0x3} },
+       { EXYNOS5_LOGIC_RESET_SYS_PWR_REG,              { 0x1, 0x1, 0x0} },
+       { EXYNOS5_OSCCLK_GATE_SYS_PWR_REG,              { 0x1, 0x0, 0x1} },
+       { EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
+       { EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG,       { 0x1, 0x0, 0x1} },
+       { EXYNOS5_USBOTG_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
+       { EXYNOS5_G2D_MEM_SYS_PWR_REG,                  { 0x3, 0x0, 0x0} },
+       { EXYNOS5_USBDRD_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
+       { EXYNOS5_SDMMC_MEM_SYS_PWR_REG,                { 0x3, 0x0, 0x0} },
+       { EXYNOS5_CSSYS_MEM_SYS_PWR_REG,                { 0x3, 0x0, 0x0} },
+       { EXYNOS5_SECSS_MEM_SYS_PWR_REG,                { 0x3, 0x0, 0x0} },
+       { EXYNOS5_ROTATOR_MEM_SYS_PWR_REG,              { 0x3, 0x0, 0x0} },
+       { EXYNOS5_INTRAM_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
+       { EXYNOS5_INTROM_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
+       { EXYNOS5_JPEG_MEM_SYS_PWR_REG,                 { 0x3, 0x0, 0x0} },
+       { EXYNOS5_HSI_MEM_SYS_PWR_REG,                  { 0x3, 0x0, 0x0} },
+       { EXYNOS5_MCUIOP_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
+       { EXYNOS5_SATA_MEM_SYS_PWR_REG,                 { 0x3, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG,        { 0x1, 0x1, 0x0} },
+       { EXYNOS5_PAD_RETENTION_GPIO_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_UART_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_MMCA_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_MMCB_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_EBIA_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_EBIB_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_SPI_SYS_PWR_REG,        { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_GPIO_SYSMEM_SYS_PWR_REG,        { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_ISOLATION_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG,     { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_XUSBXTI_SYS_PWR_REG,                  { 0x1, 0x1, 0x1} },
+       { EXYNOS5_XXTI_SYS_PWR_REG,                     { 0x1, 0x1, 0x0} },
+       { EXYNOS5_EXT_REGULATOR_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
+       { EXYNOS5_GPIO_MODE_SYS_PWR_REG,                { 0x1, 0x0, 0x0} },
+       { EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG,         { 0x1, 0x0, 0x0} },
+       { EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
+       { EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG,            { 0x1, 0x1, 0x1} },
+       { EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG,        { 0x1, 0x0, 0x1} },
+       { EXYNOS5_GSCL_SYS_PWR_REG,                     { 0x7, 0x0, 0x0} },
+       { EXYNOS5_ISP_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
+       { EXYNOS5_MFC_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
+       { EXYNOS5_G3D_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
+       { EXYNOS5_DISP1_SYS_PWR_REG,                    { 0x7, 0x0, 0x0} },
+       { EXYNOS5_MAU_SYS_PWR_REG,                      { 0x7, 0x7, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG,         { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_DISP1_SYS_PWR_REG,        { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_MAU_SYS_PWR_REG,          { 0x1, 0x1, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_DISP1_SYS_PWR_REG,         { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_MAU_SYS_PWR_REG,           { 0x1, 0x1, 0x0} },
+       { EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_RESET_DISP1_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_RESET_MAU_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
+       { PMU_TABLE_END,},
+};
+
+void __iomem *exynos5_list_both_cnt_feed[] = {
+       EXYNOS5_ARM_CORE0_OPTION,
+       EXYNOS5_ARM_CORE1_OPTION,
+       EXYNOS5_ARM_COMMON_OPTION,
+       EXYNOS5_GSCL_OPTION,
+       EXYNOS5_ISP_OPTION,
+       EXYNOS5_MFC_OPTION,
+       EXYNOS5_G3D_OPTION,
+       EXYNOS5_DISP1_OPTION,
+       EXYNOS5_MAU_OPTION,
+       EXYNOS5_TOP_PWR_OPTION,
+       EXYNOS5_TOP_PWR_SYSMEM_OPTION,
+};
+
+void __iomem *exynos5_list_diable_wfi_wfe[] = {
+       EXYNOS5_ARM_CORE1_OPTION,
+       EXYNOS5_FSYS_ARM_OPTION,
+       EXYNOS5_ISP_ARM_OPTION,
+};
+
+static void exynos5_init_pmu(void)
 {
        unsigned int i;
+       unsigned int tmp;
 
-       for (i = 0; (exynos4_pmu_config[i].reg != PMU_TABLE_END) ; i++)
-               __raw_writel(exynos4_pmu_config[i].val[mode],
-                               exynos4_pmu_config[i].reg);
+       /*
+        * Enable both SC_FEEDBACK and SC_COUNTER
+        */
+       for (i = 0 ; i < ARRAY_SIZE(exynos5_list_both_cnt_feed) ; i++) {
+               tmp = __raw_readl(exynos5_list_both_cnt_feed[i]);
+               tmp |= (EXYNOS5_USE_SC_FEEDBACK |
+                       EXYNOS5_USE_SC_COUNTER);
+               __raw_writel(tmp, exynos5_list_both_cnt_feed[i]);
+       }
+
+       /*
+        * SKIP_DEACTIVATE_ACEACP_IN_PWDN_BITFIELD Enable
+        * MANUAL_L2RSTDISABLE_CONTROL_BITFIELD Enable
+        */
+       tmp = __raw_readl(EXYNOS5_ARM_COMMON_OPTION);
+       tmp |= (EXYNOS5_MANUAL_L2RSTDISABLE_CONTROL |
+               EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN);
+       __raw_writel(tmp, EXYNOS5_ARM_COMMON_OPTION);
+
+       /*
+        * Disable WFI/WFE on XXX_OPTION
+        */
+       for (i = 0 ; i < ARRAY_SIZE(exynos5_list_diable_wfi_wfe) ; i++) {
+               tmp = __raw_readl(exynos5_list_diable_wfi_wfe[i]);
+               tmp &= ~(EXYNOS5_OPTION_USE_STANDBYWFE |
+                        EXYNOS5_OPTION_USE_STANDBYWFI);
+               __raw_writel(tmp, exynos5_list_diable_wfi_wfe[i]);
+       }
+}
+
+void exynos_sys_powerdown_conf(enum sys_powerdown mode)
+{
+       unsigned int i;
+
+       if (soc_is_exynos5250())
+               exynos5_init_pmu();
+
+       for (i = 0; (exynos_pmu_config[i].reg != PMU_TABLE_END) ; i++)
+               __raw_writel(exynos_pmu_config[i].val[mode],
+                               exynos_pmu_config[i].reg);
+
+       if (soc_is_exynos4412()) {
+               for (i = 0; exynos4412_pmu_config[i].reg != PMU_TABLE_END ; i++)
+                       __raw_writel(exynos4412_pmu_config[i].val[mode],
+                               exynos4412_pmu_config[i].reg);
+       }
 }
 
-static int __init exynos4_pmu_init(void)
+static int __init exynos_pmu_init(void)
 {
-       exynos4_pmu_config = exynos4210_pmu_config;
+       exynos_pmu_config = exynos4210_pmu_config;
 
        if (soc_is_exynos4210()) {
-               exynos4_pmu_config = exynos4210_pmu_config;
+               exynos_pmu_config = exynos4210_pmu_config;
                pr_info("EXYNOS4210 PMU Initialize\n");
-       } else if (soc_is_exynos4212()) {
-               exynos4_pmu_config = exynos4212_pmu_config;
-               pr_info("EXYNOS4212 PMU Initialize\n");
+       } else if (soc_is_exynos4212() || soc_is_exynos4412()) {
+               exynos_pmu_config = exynos4x12_pmu_config;
+               pr_info("EXYNOS4x12 PMU Initialize\n");
+       } else if (soc_is_exynos5250()) {
+               exynos_pmu_config = exynos5250_pmu_config;
+               pr_info("EXYNOS5250 PMU Initialize\n");
        } else {
-               pr_info("EXYNOS4: PMU not supported\n");
+               pr_info("EXYNOS: PMU not supported\n");
        }
 
        return 0;
 }
-arch_initcall(exynos4_pmu_init);
+arch_initcall(exynos_pmu_init);
index cca8c0c747946d0108ab034149b7e902c10cdc3e..0021f726b153210b04b876cabc7d36c61663c2a1 100644 (file)
@@ -34,6 +34,7 @@ config ARCH_MX53
 config SOC_IMX1
        bool
        select ARCH_MX1
+       select COMMON_CLK
        select CPU_ARM920T
        select IMX_HAVE_IOMUX_V1
        select MXC_AVIC
@@ -42,12 +43,14 @@ config SOC_IMX21
        bool
        select MACH_MX21
        select CPU_ARM926T
+       select COMMON_CLK
        select IMX_HAVE_IOMUX_V1
        select MXC_AVIC
 
 config SOC_IMX25
        bool
        select ARCH_MX25
+       select COMMON_CLK
        select CPU_ARM926T
        select ARCH_MXC_IOMUX_V3
        select MXC_AVIC
@@ -56,6 +59,7 @@ config SOC_IMX27
        bool
        select MACH_MX27
        select CPU_ARM926T
+       select COMMON_CLK
        select IMX_HAVE_IOMUX_V1
        select MXC_AVIC
 
@@ -64,12 +68,14 @@ config SOC_IMX31
        select CPU_V6
        select IMX_HAVE_PLATFORM_MXC_RNGA
        select MXC_AVIC
+       select COMMON_CLK
        select SMP_ON_UP if SMP
 
 config SOC_IMX35
        bool
        select CPU_V6
        select ARCH_MXC_IOMUX_V3
+       select COMMON_CLK
        select HAVE_EPIT
        select MXC_AVIC
        select SMP_ON_UP if SMP
@@ -77,6 +83,7 @@ config SOC_IMX35
 config SOC_IMX5
        select CPU_V7
        select MXC_TZIC
+       select COMMON_CLK
        select ARCH_MXC_IOMUX_V3
        select ARCH_HAS_CPUFREQ
        select ARCH_MX5
@@ -815,6 +822,7 @@ config SOC_IMX6Q
        bool "i.MX6 Quad support"
        select ARM_CPU_SUSPEND if PM
        select ARM_GIC
+       select COMMON_CLK
        select CPU_V7
        select HAVE_ARM_SCU
        select HAVE_IMX_GPC
index 4937c070a57e4cd2e0871449f2391d882d4bb5bd..ff29421414f24262e5e4b66c74270f58fa401798 100644 (file)
@@ -1,15 +1,18 @@
-obj-$(CONFIG_SOC_IMX1) += clock-imx1.o mm-imx1.o
-obj-$(CONFIG_SOC_IMX21) += clock-imx21.o mm-imx21.o
+obj-$(CONFIG_SOC_IMX1) += clk-imx1.o mm-imx1.o
+obj-$(CONFIG_SOC_IMX21) += clk-imx21.o mm-imx21.o
 
-obj-$(CONFIG_SOC_IMX25) += clock-imx25.o mm-imx25.o ehci-imx25.o cpu-imx25.o
+obj-$(CONFIG_SOC_IMX25) += clk-imx25.o mm-imx25.o ehci-imx25.o cpu-imx25.o
 
 obj-$(CONFIG_SOC_IMX27) += cpu-imx27.o pm-imx27.o
-obj-$(CONFIG_SOC_IMX27) += clock-imx27.o mm-imx27.o ehci-imx27.o
+obj-$(CONFIG_SOC_IMX27) += clk-imx27.o mm-imx27.o ehci-imx27.o
 
-obj-$(CONFIG_SOC_IMX31) += mm-imx3.o cpu-imx31.o clock-imx31.o iomux-imx31.o ehci-imx31.o pm-imx3.o
-obj-$(CONFIG_SOC_IMX35) += mm-imx3.o cpu-imx35.o clock-imx35.o ehci-imx35.o pm-imx3.o
+obj-$(CONFIG_SOC_IMX31) += mm-imx3.o cpu-imx31.o clk-imx31.o iomux-imx31.o ehci-imx31.o pm-imx3.o
+obj-$(CONFIG_SOC_IMX35) += mm-imx3.o cpu-imx35.o clk-imx35.o ehci-imx35.o pm-imx3.o
 
-obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clock-mx51-mx53.o ehci-imx5.o pm-imx5.o cpu_op-mx51.o
+obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clk-imx51-imx53.o ehci-imx5.o pm-imx5.o cpu_op-mx51.o
+
+obj-$(CONFIG_COMMON_CLK) += clk-pllv1.o clk-pllv2.o clk-pllv3.o clk-gate2.o \
+                           clk-pfd.o clk-busy.o
 
 # Support for CMOS sensor interface
 obj-$(CONFIG_MX1_VIDEO) += mx1-camera-fiq.o mx1-camera-fiq-ksym.o
@@ -70,7 +73,7 @@ obj-$(CONFIG_CPU_V7) += head-v7.o
 AFLAGS_head-v7.o :=-Wa,-march=armv7-a
 obj-$(CONFIG_SMP) += platsmp.o
 obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
-obj-$(CONFIG_SOC_IMX6Q) += clock-imx6q.o mach-imx6q.o
+obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o
 
 ifeq ($(CONFIG_PM),y)
 obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o
index 3851d8a27875996ac55946bda8d93d11af9f54c2..05541cf4a87873968064bab7ba90a2cb875bc7dc 100644 (file)
@@ -42,4 +42,5 @@ dtb-$(CONFIG_MACH_IMX51_DT) += imx51-babbage.dtb
 dtb-$(CONFIG_MACH_IMX53_DT) += imx53-ard.dtb imx53-evk.dtb \
                               imx53-qsb.dtb imx53-smd.dtb
 dtb-$(CONFIG_SOC_IMX6Q)        += imx6q-arm2.dtb \
-                          imx6q-sabrelite.dtb
+                          imx6q-sabrelite.dtb \
+                          imx6q-sabresd.dtb \
diff --git a/arch/arm/mach-imx/clk-busy.c b/arch/arm/mach-imx/clk-busy.c
new file mode 100644 (file)
index 0000000..1a7a8dd
--- /dev/null
@@ -0,0 +1,189 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2012 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/err.h>
+#include "clk.h"
+
+static int clk_busy_wait(void __iomem *reg, u8 shift)
+{
+       unsigned long timeout = jiffies + msecs_to_jiffies(10);
+
+       while (readl_relaxed(reg) & (1 << shift))
+               if (time_after(jiffies, timeout))
+                       return -ETIMEDOUT;
+
+       return 0;
+}
+
+struct clk_busy_divider {
+       struct clk_divider div;
+       const struct clk_ops *div_ops;
+       void __iomem *reg;
+       u8 shift;
+};
+
+static inline struct clk_busy_divider *to_clk_busy_divider(struct clk_hw *hw)
+{
+       struct clk_divider *div = container_of(hw, struct clk_divider, hw);
+
+       return container_of(div, struct clk_busy_divider, div);
+}
+
+static unsigned long clk_busy_divider_recalc_rate(struct clk_hw *hw,
+                                                 unsigned long parent_rate)
+{
+       struct clk_busy_divider *busy = to_clk_busy_divider(hw);
+
+       return busy->div_ops->recalc_rate(&busy->div.hw, parent_rate);
+}
+
+static long clk_busy_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+                                       unsigned long *prate)
+{
+       struct clk_busy_divider *busy = to_clk_busy_divider(hw);
+
+       return busy->div_ops->round_rate(&busy->div.hw, rate, prate);
+}
+
+static int clk_busy_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long parent_rate)
+{
+       struct clk_busy_divider *busy = to_clk_busy_divider(hw);
+       int ret;
+
+       ret = busy->div_ops->set_rate(&busy->div.hw, rate, parent_rate);
+       if (!ret)
+               ret = clk_busy_wait(busy->reg, busy->shift);
+
+       return ret;
+}
+
+static struct clk_ops clk_busy_divider_ops = {
+       .recalc_rate = clk_busy_divider_recalc_rate,
+       .round_rate = clk_busy_divider_round_rate,
+       .set_rate = clk_busy_divider_set_rate,
+};
+
+struct clk *imx_clk_busy_divider(const char *name, const char *parent_name,
+                                void __iomem *reg, u8 shift, u8 width,
+                                void __iomem *busy_reg, u8 busy_shift)
+{
+       struct clk_busy_divider *busy;
+       struct clk *clk;
+       struct clk_init_data init;
+
+       busy = kzalloc(sizeof(*busy), GFP_KERNEL);
+       if (!busy)
+               return ERR_PTR(-ENOMEM);
+
+       busy->reg = busy_reg;
+       busy->shift = busy_shift;
+
+       busy->div.reg = reg;
+       busy->div.shift = shift;
+       busy->div.width = width;
+       busy->div.lock = &imx_ccm_lock;
+       busy->div_ops = &clk_divider_ops;
+
+       init.name = name;
+       init.ops = &clk_busy_divider_ops;
+       init.flags = CLK_SET_RATE_PARENT;
+       init.parent_names = &parent_name;
+       init.num_parents = 1;
+
+       busy->div.hw.init = &init;
+
+       clk = clk_register(NULL, &busy->div.hw);
+       if (!clk)
+               kfree(busy);
+
+       return clk;
+}
+
+struct clk_busy_mux {
+       struct clk_mux mux;
+       const struct clk_ops *mux_ops;
+       void __iomem *reg;
+       u8 shift;
+};
+
+static inline struct clk_busy_mux *to_clk_busy_mux(struct clk_hw *hw)
+{
+       struct clk_mux *mux = container_of(hw, struct clk_mux, hw);
+
+       return container_of(mux, struct clk_busy_mux, mux);
+}
+
+static u8 clk_busy_mux_get_parent(struct clk_hw *hw)
+{
+       struct clk_busy_mux *busy = to_clk_busy_mux(hw);
+
+       return busy->mux_ops->get_parent(&busy->mux.hw);
+}
+
+static int clk_busy_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+       struct clk_busy_mux *busy = to_clk_busy_mux(hw);
+       int ret;
+
+       ret = busy->mux_ops->set_parent(&busy->mux.hw, index);
+       if (!ret)
+               ret = clk_busy_wait(busy->reg, busy->shift);
+
+       return ret;
+}
+
+struct clk_ops clk_busy_mux_ops = {
+       .get_parent = clk_busy_mux_get_parent,
+       .set_parent = clk_busy_mux_set_parent,
+};
+
+struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
+                            u8 width, void __iomem *busy_reg, u8 busy_shift,
+                            const char **parent_names, int num_parents)
+{
+       struct clk_busy_mux *busy;
+       struct clk *clk;
+       struct clk_init_data init;
+
+       busy = kzalloc(sizeof(*busy), GFP_KERNEL);
+       if (!busy)
+               return ERR_PTR(-ENOMEM);
+
+       busy->reg = busy_reg;
+       busy->shift = busy_shift;
+
+       busy->mux.reg = reg;
+       busy->mux.shift = shift;
+       busy->mux.width = width;
+       busy->mux.lock = &imx_ccm_lock;
+       busy->mux_ops = &clk_mux_ops;
+
+       init.name = name;
+       init.ops = &clk_busy_mux_ops;
+       init.flags = 0;
+       init.parent_names = parent_names;
+       init.num_parents = num_parents;
+
+       busy->mux.hw.init = &init;
+
+       clk = clk_register(NULL, &busy->mux.hw);
+       if (IS_ERR(clk))
+               kfree(busy);
+
+       return clk;
+}
diff --git a/arch/arm/mach-imx/clk-gate2.c b/arch/arm/mach-imx/clk-gate2.c
new file mode 100644 (file)
index 0000000..3c1b8ff
--- /dev/null
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
+ * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Gated clock implementation
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/string.h>
+
+/**
+ * DOC: basic gatable clock which can gate and ungate it's ouput
+ *
+ * Traits of this clock:
+ * prepare - clk_(un)prepare only ensures parent is (un)prepared
+ * enable - clk_enable and clk_disable are functional & control gating
+ * rate - inherits rate from parent.  No clk_set_rate support
+ * parent - fixed parent.  No clk_set_parent support
+ */
+
+#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
+
+static int clk_gate2_enable(struct clk_hw *hw)
+{
+       struct clk_gate *gate = to_clk_gate(hw);
+       u32 reg;
+       unsigned long flags = 0;
+
+       if (gate->lock)
+               spin_lock_irqsave(gate->lock, flags);
+
+       reg = readl(gate->reg);
+       reg |= 3 << gate->bit_idx;
+       writel(reg, gate->reg);
+
+       if (gate->lock)
+               spin_unlock_irqrestore(gate->lock, flags);
+
+       return 0;
+}
+
+static void clk_gate2_disable(struct clk_hw *hw)
+{
+       struct clk_gate *gate = to_clk_gate(hw);
+       u32 reg;
+       unsigned long flags = 0;
+
+       if (gate->lock)
+               spin_lock_irqsave(gate->lock, flags);
+
+       reg = readl(gate->reg);
+       reg &= ~(3 << gate->bit_idx);
+       writel(reg, gate->reg);
+
+       if (gate->lock)
+               spin_unlock_irqrestore(gate->lock, flags);
+}
+
+static int clk_gate2_is_enabled(struct clk_hw *hw)
+{
+       u32 reg;
+       struct clk_gate *gate = to_clk_gate(hw);
+
+       reg = readl(gate->reg);
+
+       if (((reg >> gate->bit_idx) & 3) == 3)
+               return 1;
+
+       return 0;
+}
+
+static struct clk_ops clk_gate2_ops = {
+       .enable = clk_gate2_enable,
+       .disable = clk_gate2_disable,
+       .is_enabled = clk_gate2_is_enabled,
+};
+
+struct clk *clk_register_gate2(struct device *dev, const char *name,
+               const char *parent_name, unsigned long flags,
+               void __iomem *reg, u8 bit_idx,
+               u8 clk_gate2_flags, spinlock_t *lock)
+{
+       struct clk_gate *gate;
+       struct clk *clk;
+       struct clk_init_data init;
+
+       gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
+       if (!gate)
+               return ERR_PTR(-ENOMEM);
+
+       /* struct clk_gate assignments */
+       gate->reg = reg;
+       gate->bit_idx = bit_idx;
+       gate->flags = clk_gate2_flags;
+       gate->lock = lock;
+
+       init.name = name;
+       init.ops = &clk_gate2_ops;
+       init.flags = flags;
+       init.parent_names = parent_name ? &parent_name : NULL;
+       init.num_parents = parent_name ? 1 : 0;
+
+       gate->hw.init = &init;
+
+       clk = clk_register(dev, &gate->hw);
+       if (IS_ERR(clk))
+               kfree(clk);
+
+       return clk;
+}
diff --git a/arch/arm/mach-imx/clk-imx1.c b/arch/arm/mach-imx/clk-imx1.c
new file mode 100644 (file)
index 0000000..0f0beb5
--- /dev/null
@@ -0,0 +1,115 @@
+/*
+ *  Copyright (C) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+
+#include <mach/hardware.h>
+#include <mach/common.h>
+#include "clk.h"
+
+/* CCM register addresses */
+#define IO_ADDR_CCM(off)       (MX1_IO_ADDRESS(MX1_CCM_BASE_ADDR + (off)))
+
+#define CCM_CSCR       IO_ADDR_CCM(0x0)
+#define CCM_MPCTL0     IO_ADDR_CCM(0x4)
+#define CCM_SPCTL0     IO_ADDR_CCM(0xc)
+#define CCM_PCDR       IO_ADDR_CCM(0x20)
+
+/* SCM register addresses */
+#define IO_ADDR_SCM(off)       (MX1_IO_ADDRESS(MX1_SCM_BASE_ADDR + (off)))
+
+#define SCM_GCCR       IO_ADDR_SCM(0xc)
+
+static const char *prem_sel_clks[] = { "clk32_premult", "clk16m", };
+static const char *clko_sel_clks[] = { "per1", "hclk", "clk48m", "clk16m", "prem",
+                               "fclk", };
+enum imx1_clks {
+       dummy, clk32, clk16m_ext, clk16m, clk32_premult, prem, mpll, spll, mcu,
+       fclk, hclk, clk48m, per1, per2, per3, clko, dma_gate, csi_gate,
+       mma_gate, usbd_gate, clk_max
+};
+
+static struct clk *clk[clk_max];
+
+int __init mx1_clocks_init(unsigned long fref)
+{
+       int i;
+
+       clk[dummy] = imx_clk_fixed("dummy", 0);
+       clk[clk32] = imx_clk_fixed("clk32", fref);
+       clk[clk16m_ext] = imx_clk_fixed("clk16m_ext", 16000000);
+       clk[clk16m] = imx_clk_gate("clk16m", "clk16m_ext", CCM_CSCR, 17);
+       clk[clk32_premult] = imx_clk_fixed_factor("clk32_premult", "clk32", 512, 1);
+       clk[prem] = imx_clk_mux("prem", CCM_CSCR, 16, 1, prem_sel_clks,
+                       ARRAY_SIZE(prem_sel_clks));
+       clk[mpll] = imx_clk_pllv1("mpll", "clk32_premult", CCM_MPCTL0);
+       clk[spll] = imx_clk_pllv1("spll", "prem", CCM_SPCTL0);
+       clk[mcu] = imx_clk_divider("mcu", "clk32_premult", CCM_CSCR, 15, 1);
+       clk[fclk] = imx_clk_divider("fclk", "mpll", CCM_CSCR, 15, 1);
+       clk[hclk] = imx_clk_divider("hclk", "spll", CCM_CSCR, 10, 4);
+       clk[clk48m] = imx_clk_divider("clk48m", "spll", CCM_CSCR, 26, 3);
+       clk[per1] = imx_clk_divider("per1", "spll", CCM_PCDR, 0, 4);
+       clk[per2] = imx_clk_divider("per2", "spll", CCM_PCDR, 4, 4);
+       clk[per3] = imx_clk_divider("per3", "spll", CCM_PCDR, 16, 7);
+       clk[clko] = imx_clk_mux("clko", CCM_CSCR, 29, 3, clko_sel_clks,
+                       ARRAY_SIZE(clko_sel_clks));
+       clk[dma_gate] = imx_clk_gate("dma_gate", "hclk", SCM_GCCR, 4);
+       clk[csi_gate] = imx_clk_gate("csi_gate", "hclk", SCM_GCCR, 2);
+       clk[mma_gate] = imx_clk_gate("mma_gate", "hclk", SCM_GCCR, 1);
+       clk[usbd_gate] = imx_clk_gate("usbd_gate", "clk48m", SCM_GCCR, 0);
+
+       for (i = 0; i < ARRAY_SIZE(clk); i++)
+               if (IS_ERR(clk[i]))
+                       pr_err("imx1 clk %d: register failed with %ld\n",
+                               i, PTR_ERR(clk[i]));
+
+       clk_register_clkdev(clk[dma_gate], "ahb", "imx-dma");
+       clk_register_clkdev(clk[csi_gate], NULL, "mx1-camera.0");
+       clk_register_clkdev(clk[mma_gate], "mma", NULL);
+       clk_register_clkdev(clk[usbd_gate], NULL, "imx_udc.0");
+       clk_register_clkdev(clk[per1], "per", "imx-gpt.0");
+       clk_register_clkdev(clk[hclk], "ipg", "imx-gpt.0");
+       clk_register_clkdev(clk[per1], "per", "imx1-uart.0");
+       clk_register_clkdev(clk[hclk], "ipg", "imx1-uart.0");
+       clk_register_clkdev(clk[per1], "per", "imx1-uart.1");
+       clk_register_clkdev(clk[hclk], "ipg", "imx1-uart.1");
+       clk_register_clkdev(clk[per1], "per", "imx1-uart.2");
+       clk_register_clkdev(clk[hclk], "ipg", "imx1-uart.2");
+       clk_register_clkdev(clk[hclk], NULL, "imx-i2c.0");
+       clk_register_clkdev(clk[per2], "per", "imx1-cspi.0");
+       clk_register_clkdev(clk[dummy], "ipg", "imx1-cspi.0");
+       clk_register_clkdev(clk[per2], "per", "imx1-cspi.1");
+       clk_register_clkdev(clk[dummy], "ipg", "imx1-cspi.1");
+       clk_register_clkdev(clk[per2], NULL, "imx-mmc.0");
+       clk_register_clkdev(clk[per2], "per", "imx-fb.0");
+       clk_register_clkdev(clk[dummy], "ipg", "imx-fb.0");
+       clk_register_clkdev(clk[dummy], "ahb", "imx-fb.0");
+       clk_register_clkdev(clk[hclk], "mshc", NULL);
+       clk_register_clkdev(clk[per3], "ssi", NULL);
+       clk_register_clkdev(clk[clk32], NULL, "mxc_rtc.0");
+       clk_register_clkdev(clk[clko], "clko", NULL);
+
+       mxc_timer_init(NULL, MX1_IO_ADDRESS(MX1_TIM1_BASE_ADDR),
+                       MX1_TIM1_INT);
+
+       return 0;
+}
diff --git a/arch/arm/mach-imx/clk-imx21.c b/arch/arm/mach-imx/clk-imx21.c
new file mode 100644 (file)
index 0000000..4e4f384
--- /dev/null
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
+ * Copyright 2008 Martin Fuzzey, mfuzzey@gmail.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+
+#include <mach/hardware.h>
+#include <mach/common.h>
+#include "clk.h"
+
+#define IO_ADDR_CCM(off)       (MX21_IO_ADDRESS(MX21_CCM_BASE_ADDR + (off)))
+
+/* Register offsets */
+#define CCM_CSCR               IO_ADDR_CCM(0x0)
+#define CCM_MPCTL0             IO_ADDR_CCM(0x4)
+#define CCM_MPCTL1             IO_ADDR_CCM(0x8)
+#define CCM_SPCTL0             IO_ADDR_CCM(0xc)
+#define CCM_SPCTL1             IO_ADDR_CCM(0x10)
+#define CCM_OSC26MCTL          IO_ADDR_CCM(0x14)
+#define CCM_PCDR0              IO_ADDR_CCM(0x18)
+#define CCM_PCDR1              IO_ADDR_CCM(0x1c)
+#define CCM_PCCR0              IO_ADDR_CCM(0x20)
+#define CCM_PCCR1              IO_ADDR_CCM(0x24)
+#define CCM_CCSR               IO_ADDR_CCM(0x28)
+#define CCM_PMCTL              IO_ADDR_CCM(0x2c)
+#define CCM_PMCOUNT            IO_ADDR_CCM(0x30)
+#define CCM_WKGDCTL            IO_ADDR_CCM(0x34)
+
+static const char *mpll_sel_clks[] = { "fpm", "ckih", };
+static const char *spll_sel_clks[] = { "fpm", "ckih", };
+
+enum imx21_clks {
+       ckil, ckih, fpm, mpll_sel, spll_sel, mpll, spll, fclk, hclk, ipg, per1,
+       per2, per3, per4, uart1_ipg_gate, uart2_ipg_gate, uart3_ipg_gate,
+       uart4_ipg_gate, gpt1_ipg_gate, gpt2_ipg_gate, gpt3_ipg_gate,
+       pwm_ipg_gate, sdhc1_ipg_gate, sdhc2_ipg_gate, lcdc_ipg_gate,
+       lcdc_hclk_gate, cspi3_ipg_gate, cspi2_ipg_gate, cspi1_ipg_gate,
+       per4_gate, csi_hclk_gate, usb_div, usb_gate, usb_hclk_gate, ssi1_gate,
+       ssi2_gate, nfc_div, nfc_gate, dma_gate, dma_hclk_gate, brom_gate,
+       emma_gate, emma_hclk_gate, slcdc_gate, slcdc_hclk_gate, wdog_gate,
+       gpio_gate, i2c_gate, kpp_gate, owire_gate, rtc_gate, clk_max
+};
+
+static struct clk *clk[clk_max];
+
+/*
+ * must be called very early to get information about the
+ * available clock rate when the timer framework starts
+ */
+int __init mx21_clocks_init(unsigned long lref, unsigned long href)
+{
+       int i;
+
+       clk[ckil] = imx_clk_fixed("ckil", lref);
+       clk[ckih] = imx_clk_fixed("ckih", href);
+       clk[fpm] = imx_clk_fixed_factor("fpm", "ckil", 512, 1);
+       clk[mpll_sel] = imx_clk_mux("mpll_sel", CCM_CSCR, 16, 1, mpll_sel_clks,
+                       ARRAY_SIZE(mpll_sel_clks));
+       clk[spll_sel] = imx_clk_mux("spll_sel", CCM_CSCR, 17, 1, spll_sel_clks,
+                       ARRAY_SIZE(spll_sel_clks));
+       clk[mpll] = imx_clk_pllv1("mpll", "mpll_sel", CCM_MPCTL0);
+       clk[spll] = imx_clk_pllv1("spll", "spll_sel", CCM_SPCTL0);
+       clk[fclk] = imx_clk_divider("fclk", "mpll", CCM_CSCR, 29, 3);
+       clk[hclk] = imx_clk_divider("hclk", "fclk", CCM_CSCR, 10, 4);
+       clk[ipg] = imx_clk_divider("ipg", "hclk", CCM_CSCR, 9, 1);
+       clk[per1] = imx_clk_divider("per1", "mpll", CCM_PCDR1, 0, 6);
+       clk[per2] = imx_clk_divider("per2", "mpll", CCM_PCDR1, 8, 6);
+       clk[per3] = imx_clk_divider("per3", "mpll", CCM_PCDR1, 16, 6);
+       clk[per4] = imx_clk_divider("per4", "mpll", CCM_PCDR1, 24, 6);
+       clk[uart1_ipg_gate] = imx_clk_gate("uart1_ipg_gate", "ipg", CCM_PCCR0, 0);
+       clk[uart2_ipg_gate] = imx_clk_gate("uart2_ipg_gate", "ipg", CCM_PCCR0, 1);
+       clk[uart3_ipg_gate] = imx_clk_gate("uart3_ipg_gate", "ipg", CCM_PCCR0, 2);
+       clk[uart4_ipg_gate] = imx_clk_gate("uart4_ipg_gate", "ipg", CCM_PCCR0, 3);
+       clk[gpt1_ipg_gate] = imx_clk_gate("gpt1_ipg_gate", "ipg", CCM_PCCR1, 25);
+       clk[gpt2_ipg_gate] = imx_clk_gate("gpt2_ipg_gate", "ipg", CCM_PCCR1, 26);
+       clk[gpt3_ipg_gate] = imx_clk_gate("gpt3_ipg_gate", "ipg", CCM_PCCR1, 27);
+       clk[pwm_ipg_gate] = imx_clk_gate("pwm_ipg_gate", "ipg", CCM_PCCR1, 28);
+       clk[sdhc1_ipg_gate] = imx_clk_gate("sdhc1_ipg_gate", "ipg", CCM_PCCR0, 9);
+       clk[sdhc2_ipg_gate] = imx_clk_gate("sdhc2_ipg_gate", "ipg", CCM_PCCR0, 10);
+       clk[lcdc_ipg_gate] = imx_clk_gate("lcdc_ipg_gate", "ipg", CCM_PCCR0, 18);
+       clk[lcdc_hclk_gate] = imx_clk_gate("lcdc_hclk_gate", "hclk", CCM_PCCR0, 26);
+       clk[cspi3_ipg_gate] = imx_clk_gate("cspi3_ipg_gate", "ipg", CCM_PCCR1, 23);
+       clk[cspi2_ipg_gate] = imx_clk_gate("cspi2_ipg_gate", "ipg", CCM_PCCR0, 5);
+       clk[cspi1_ipg_gate] = imx_clk_gate("cspi1_ipg_gate", "ipg", CCM_PCCR0, 4);
+       clk[per4_gate] = imx_clk_gate("per4_gate", "per4", CCM_PCCR0, 22);
+       clk[csi_hclk_gate] = imx_clk_gate("csi_hclk_gate", "hclk", CCM_PCCR0, 31);
+       clk[usb_div] = imx_clk_divider("usb_div", "spll", CCM_CSCR, 26, 3);
+       clk[usb_gate] = imx_clk_gate("usb_gate", "usb_div", CCM_PCCR0, 14);
+       clk[usb_hclk_gate] = imx_clk_gate("usb_hclk_gate", "hclk", CCM_PCCR0, 24);
+       clk[ssi1_gate] = imx_clk_gate("ssi1_gate", "ipg", CCM_PCCR0, 6);
+       clk[ssi2_gate] = imx_clk_gate("ssi2_gate", "ipg", CCM_PCCR0, 7);
+       clk[nfc_div] = imx_clk_divider("nfc_div", "ipg", CCM_PCDR0, 12, 4);
+       clk[nfc_gate] = imx_clk_gate("nfc_gate", "nfc_div", CCM_PCCR0, 19);
+       clk[dma_gate] = imx_clk_gate("dma_gate", "ipg", CCM_PCCR0, 13);
+       clk[dma_hclk_gate] = imx_clk_gate("dma_hclk_gate", "hclk", CCM_PCCR0, 30);
+       clk[brom_gate] = imx_clk_gate("brom_gate", "hclk", CCM_PCCR0, 28);
+       clk[emma_gate] = imx_clk_gate("emma_gate", "ipg", CCM_PCCR0, 15);
+       clk[emma_hclk_gate] = imx_clk_gate("emma_hclk_gate", "hclk", CCM_PCCR0, 27);
+       clk[slcdc_gate] = imx_clk_gate("slcdc_gate", "ipg", CCM_PCCR0, 25);
+       clk[slcdc_hclk_gate] = imx_clk_gate("slcdc_hclk_gate", "hclk", CCM_PCCR0, 21);
+       clk[wdog_gate] = imx_clk_gate("wdog_gate", "ipg", CCM_PCCR1, 24);
+       clk[gpio_gate] = imx_clk_gate("gpio_gate", "ipg", CCM_PCCR0, 11);
+       clk[i2c_gate] = imx_clk_gate("i2c_gate", "ipg", CCM_PCCR0, 12);
+       clk[kpp_gate] = imx_clk_gate("kpp_gate", "ipg", CCM_PCCR1, 30);
+       clk[owire_gate] = imx_clk_gate("owire_gate", "ipg", CCM_PCCR1, 31);
+       clk[rtc_gate] = imx_clk_gate("rtc_gate", "ipg", CCM_PCCR1, 29);
+
+       for (i = 0; i < ARRAY_SIZE(clk); i++)
+               if (IS_ERR(clk[i]))
+                       pr_err("i.MX21 clk %d: register failed with %ld\n",
+                               i, PTR_ERR(clk[i]));
+
+       clk_register_clkdev(clk[per1], "per1", NULL);
+       clk_register_clkdev(clk[per2], "per2", NULL);
+       clk_register_clkdev(clk[per3], "per3", NULL);
+       clk_register_clkdev(clk[per4], "per4", NULL);
+       clk_register_clkdev(clk[per1], "per", "imx21-uart.0");
+       clk_register_clkdev(clk[uart1_ipg_gate], "ipg", "imx21-uart.0");
+       clk_register_clkdev(clk[per1], "per", "imx21-uart.1");
+       clk_register_clkdev(clk[uart2_ipg_gate], "ipg", "imx21-uart.1");
+       clk_register_clkdev(clk[per1], "per", "imx21-uart.2");
+       clk_register_clkdev(clk[uart3_ipg_gate], "ipg", "imx21-uart.2");
+       clk_register_clkdev(clk[per1], "per", "imx21-uart.3");
+       clk_register_clkdev(clk[uart4_ipg_gate], "ipg", "imx21-uart.3");
+       clk_register_clkdev(clk[gpt1_ipg_gate], "ipg", "imx-gpt.0");
+       clk_register_clkdev(clk[per1], "per", "imx-gpt.0");
+       clk_register_clkdev(clk[gpt2_ipg_gate], "ipg", "imx-gpt.1");
+       clk_register_clkdev(clk[per1], "per", "imx-gpt.1");
+       clk_register_clkdev(clk[gpt3_ipg_gate], "ipg", "imx-gpt.2");
+       clk_register_clkdev(clk[per1], "per", "imx-gpt.2");
+       clk_register_clkdev(clk[pwm_ipg_gate], "pwm", "mxc_pwm.0");
+       clk_register_clkdev(clk[per2], "per", "imx21-cspi.0");
+       clk_register_clkdev(clk[cspi1_ipg_gate], "ipg", "imx21-cspi.0");
+       clk_register_clkdev(clk[per2], "per", "imx21-cspi.1");
+       clk_register_clkdev(clk[cspi2_ipg_gate], "ipg", "imx21-cspi.1");
+       clk_register_clkdev(clk[per2], "per", "imx21-cspi.2");
+       clk_register_clkdev(clk[cspi3_ipg_gate], "ipg", "imx21-cspi.2");
+       clk_register_clkdev(clk[per3], "per", "imx-fb.0");
+       clk_register_clkdev(clk[lcdc_ipg_gate], "ipg", "imx-fb.0");
+       clk_register_clkdev(clk[lcdc_hclk_gate], "ahb", "imx-fb.0");
+       clk_register_clkdev(clk[usb_gate], "per", "imx21-hcd.0");
+       clk_register_clkdev(clk[usb_hclk_gate], "ahb", "imx21-hcd.0");
+       clk_register_clkdev(clk[nfc_gate], NULL, "mxc_nand.0");
+       clk_register_clkdev(clk[dma_hclk_gate], "ahb", "imx-dma");
+       clk_register_clkdev(clk[dma_gate], "ipg", "imx-dma");
+       clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
+       clk_register_clkdev(clk[i2c_gate], NULL, "imx-i2c.0");
+       clk_register_clkdev(clk[kpp_gate], NULL, "mxc-keypad");
+       clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1.0");
+       clk_register_clkdev(clk[brom_gate], "brom", NULL);
+       clk_register_clkdev(clk[emma_gate], "emma", NULL);
+       clk_register_clkdev(clk[slcdc_gate], "slcdc", NULL);
+       clk_register_clkdev(clk[gpio_gate], "gpio", NULL);
+       clk_register_clkdev(clk[rtc_gate], "rtc", NULL);
+       clk_register_clkdev(clk[csi_hclk_gate], "csi", NULL);
+       clk_register_clkdev(clk[ssi1_gate], "ssi1", NULL);
+       clk_register_clkdev(clk[ssi2_gate], "ssi2", NULL);
+       clk_register_clkdev(clk[sdhc1_ipg_gate], "sdhc1", NULL);
+       clk_register_clkdev(clk[sdhc2_ipg_gate], "sdhc2", NULL);
+
+       mxc_timer_init(NULL, MX21_IO_ADDRESS(MX21_GPT1_BASE_ADDR),
+                       MX21_INT_GPT1);
+       return 0;
+}
diff --git a/arch/arm/mach-imx/clk-imx25.c b/arch/arm/mach-imx/clk-imx25.c
new file mode 100644 (file)
index 0000000..d9833bb
--- /dev/null
@@ -0,0 +1,248 @@
+/*
+ * Copyright (C) 2009 by Sascha Hauer, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+
+#include <mach/hardware.h>
+#include <mach/common.h>
+#include <mach/mx25.h>
+#include "clk.h"
+
+#define CRM_BASE       MX25_IO_ADDRESS(MX25_CRM_BASE_ADDR)
+
+#define CCM_MPCTL      0x00
+#define CCM_UPCTL      0x04
+#define CCM_CCTL       0x08
+#define CCM_CGCR0      0x0C
+#define CCM_CGCR1      0x10
+#define CCM_CGCR2      0x14
+#define CCM_PCDR0      0x18
+#define CCM_PCDR1      0x1C
+#define CCM_PCDR2      0x20
+#define CCM_PCDR3      0x24
+#define CCM_RCSR       0x28
+#define CCM_CRDR       0x2C
+#define CCM_DCVR0      0x30
+#define CCM_DCVR1      0x34
+#define CCM_DCVR2      0x38
+#define CCM_DCVR3      0x3c
+#define CCM_LTR0       0x40
+#define CCM_LTR1       0x44
+#define CCM_LTR2       0x48
+#define CCM_LTR3       0x4c
+#define CCM_MCR                0x64
+
+#define ccm(x) (CRM_BASE + (x))
+
+static const char *cpu_sel_clks[] = { "mpll", "mpll_cpu_3_4", };
+static const char *per_sel_clks[] = { "ahb", "upll", };
+
+enum mx25_clks {
+       dummy, osc, mpll, upll, mpll_cpu_3_4, cpu_sel, cpu, ahb, usb_div, ipg,
+       per0_sel, per1_sel, per2_sel, per3_sel, per4_sel, per5_sel, per6_sel,
+       per7_sel, per8_sel, per9_sel, per10_sel, per11_sel, per12_sel,
+       per13_sel, per14_sel, per15_sel, per0, per1, per2, per3, per4, per5,
+       per6, per7, per8, per9, per10, per11, per12, per13, per14, per15,
+       csi_ipg_per, esdhc1_ipg_per, esdhc2_ipg_per, gpt_ipg_per, i2c_ipg_per,
+       lcdc_ipg_per, nfc_ipg_per, ssi1_ipg_per, ssi2_ipg_per, uart_ipg_per,
+       csi_ahb, esdhc1_ahb, esdhc2_ahb, fec_ahb, lcdc_ahb, sdma_ahb,
+       usbotg_ahb, can1_ipg, can2_ipg, csi_ipg, cspi1_ipg, cspi2_ipg,
+       cspi3_ipg, dryice_ipg, esdhc1_ipg, esdhc2_ipg, fec_ipg, iim_ipg,
+       kpp_ipg, lcdc_ipg, pwm1_ipg, pwm2_ipg, pwm3_ipg, pwm4_ipg, sdma_ipg,
+       ssi1_ipg, ssi2_ipg, tsc_ipg, uart1_ipg, uart2_ipg, uart3_ipg,
+       uart4_ipg, uart5_ipg, wdt_ipg, clk_max
+};
+
+static struct clk *clk[clk_max];
+
+int __init mx25_clocks_init(void)
+{
+       int i;
+
+       clk[dummy] = imx_clk_fixed("dummy", 0);
+       clk[osc] = imx_clk_fixed("osc", 24000000);
+       clk[mpll] = imx_clk_pllv1("mpll", "osc", ccm(CCM_MPCTL));
+       clk[upll] = imx_clk_pllv1("upll", "osc", ccm(CCM_UPCTL));
+       clk[mpll_cpu_3_4] = imx_clk_fixed_factor("mpll_cpu_3_4", "mpll", 3, 4);
+       clk[cpu_sel] = imx_clk_mux("cpu_sel", ccm(CCM_CCTL), 14, 1, cpu_sel_clks, ARRAY_SIZE(cpu_sel_clks));
+       clk[cpu] = imx_clk_divider("cpu", "cpu_sel", ccm(CCM_CCTL), 30, 2);
+       clk[ahb] = imx_clk_divider("ahb", "cpu", ccm(CCM_CCTL), 28, 2);
+       clk[usb_div] = imx_clk_divider("usb_div", "upll", ccm(CCM_CCTL), 16, 6); 
+       clk[ipg] = imx_clk_fixed_factor("ipg", "ahb", 1, 2);
+       clk[per0_sel] = imx_clk_mux("per0_sel", ccm(CCM_MCR), 0, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+       clk[per1_sel] = imx_clk_mux("per1_sel", ccm(CCM_MCR), 1, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+       clk[per2_sel] = imx_clk_mux("per2_sel", ccm(CCM_MCR), 2, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+       clk[per3_sel] = imx_clk_mux("per3_sel", ccm(CCM_MCR), 3, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+       clk[per4_sel] = imx_clk_mux("per4_sel", ccm(CCM_MCR), 4, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+       clk[per5_sel] = imx_clk_mux("per5_sel", ccm(CCM_MCR), 5, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+       clk[per6_sel] = imx_clk_mux("per6_sel", ccm(CCM_MCR), 6, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+       clk[per7_sel] = imx_clk_mux("per7_sel", ccm(CCM_MCR), 7, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+       clk[per8_sel] = imx_clk_mux("per8_sel", ccm(CCM_MCR), 8, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+       clk[per9_sel] = imx_clk_mux("per9_sel", ccm(CCM_MCR), 9, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+       clk[per10_sel] = imx_clk_mux("per10_sel", ccm(CCM_MCR), 10, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+       clk[per11_sel] = imx_clk_mux("per11_sel", ccm(CCM_MCR), 11, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+       clk[per12_sel] = imx_clk_mux("per12_sel", ccm(CCM_MCR), 12, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+       clk[per13_sel] = imx_clk_mux("per13_sel", ccm(CCM_MCR), 13, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+       clk[per14_sel] = imx_clk_mux("per14_sel", ccm(CCM_MCR), 14, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+       clk[per15_sel] = imx_clk_mux("per15_sel", ccm(CCM_MCR), 15, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+       clk[per0] = imx_clk_divider("per0", "per0_sel", ccm(CCM_PCDR0), 0, 6);
+       clk[per1] = imx_clk_divider("per1", "per1_sel", ccm(CCM_PCDR0), 8, 6);
+       clk[per2] = imx_clk_divider("per2", "per2_sel", ccm(CCM_PCDR0), 16, 6);
+       clk[per3] = imx_clk_divider("per3", "per3_sel", ccm(CCM_PCDR0), 24, 6);
+       clk[per4] = imx_clk_divider("per4", "per4_sel", ccm(CCM_PCDR1), 0, 6);
+       clk[per5] = imx_clk_divider("per5", "per5_sel", ccm(CCM_PCDR1), 8, 6);
+       clk[per6] = imx_clk_divider("per6", "per6_sel", ccm(CCM_PCDR1), 16, 6);
+       clk[per7] = imx_clk_divider("per7", "per7_sel", ccm(CCM_PCDR1), 24, 6);
+       clk[per8] = imx_clk_divider("per8", "per8_sel", ccm(CCM_PCDR2), 0, 6);
+       clk[per9] = imx_clk_divider("per9", "per9_sel", ccm(CCM_PCDR2), 8, 6);
+       clk[per10] = imx_clk_divider("per10", "per10_sel", ccm(CCM_PCDR2), 16, 6);
+       clk[per11] = imx_clk_divider("per11", "per11_sel", ccm(CCM_PCDR2), 24, 6);
+       clk[per12] = imx_clk_divider("per12", "per12_sel", ccm(CCM_PCDR3), 0, 6);
+       clk[per13] = imx_clk_divider("per13", "per13_sel", ccm(CCM_PCDR3), 8, 6);
+       clk[per14] = imx_clk_divider("per14", "per14_sel", ccm(CCM_PCDR3), 16, 6);
+       clk[per15] = imx_clk_divider("per15", "per15_sel", ccm(CCM_PCDR3), 24, 6);
+       clk[csi_ipg_per] = imx_clk_gate("csi_ipg_per", "per0", ccm(CCM_CGCR0), 0);
+       clk[esdhc1_ipg_per] = imx_clk_gate("esdhc1_ipg_per", "per3", ccm(CCM_CGCR0),  3);
+       clk[esdhc2_ipg_per] = imx_clk_gate("esdhc2_ipg_per", "per4", ccm(CCM_CGCR0),  4);
+       clk[gpt_ipg_per] = imx_clk_gate("gpt_ipg_per", "per5", ccm(CCM_CGCR0),  5);
+       clk[i2c_ipg_per] = imx_clk_gate("i2c_ipg_per", "per6", ccm(CCM_CGCR0),  6);
+       clk[lcdc_ipg_per] = imx_clk_gate("lcdc_ipg_per", "per8", ccm(CCM_CGCR0),  7);
+       clk[nfc_ipg_per] = imx_clk_gate("nfc_ipg_per", "ipg_per", ccm(CCM_CGCR0),  8);
+       clk[ssi1_ipg_per] = imx_clk_gate("ssi1_ipg_per", "per13", ccm(CCM_CGCR0), 13);
+       clk[ssi2_ipg_per] = imx_clk_gate("ssi2_ipg_per", "per14", ccm(CCM_CGCR0), 14);
+       clk[uart_ipg_per] = imx_clk_gate("uart_ipg_per", "per15", ccm(CCM_CGCR0), 15);
+       clk[csi_ahb] = imx_clk_gate("csi_ahb", "ahb", ccm(CCM_CGCR0), 18);
+       clk[esdhc1_ahb] = imx_clk_gate("esdhc1_ahb", "ahb", ccm(CCM_CGCR0), 21);
+       clk[esdhc2_ahb] = imx_clk_gate("esdhc2_ahb", "ahb", ccm(CCM_CGCR0), 22);
+       clk[fec_ahb] = imx_clk_gate("fec_ahb", "ahb", ccm(CCM_CGCR0), 23);
+       clk[lcdc_ahb] = imx_clk_gate("lcdc_ahb", "ahb", ccm(CCM_CGCR0), 24);
+       clk[sdma_ahb] = imx_clk_gate("sdma_ahb", "ahb", ccm(CCM_CGCR0), 26);
+       clk[usbotg_ahb] = imx_clk_gate("usbotg_ahb", "ahb", ccm(CCM_CGCR0), 28);
+       clk[can1_ipg] = imx_clk_gate("can1_ipg", "ipg", ccm(CCM_CGCR1),  2);
+       clk[can2_ipg] = imx_clk_gate("can2_ipg", "ipg", ccm(CCM_CGCR1),  3);
+       clk[csi_ipg] = imx_clk_gate("csi_ipg", "ipg", ccm(CCM_CGCR1),  4);
+       clk[cspi1_ipg] = imx_clk_gate("cspi1_ipg", "ipg", ccm(CCM_CGCR1),  5);
+       clk[cspi2_ipg] = imx_clk_gate("cspi2_ipg", "ipg", ccm(CCM_CGCR1),  6);
+       clk[cspi3_ipg] = imx_clk_gate("cspi3_ipg", "ipg", ccm(CCM_CGCR1),  7);
+       clk[dryice_ipg] = imx_clk_gate("dryice_ipg", "ipg", ccm(CCM_CGCR1),  8);
+       clk[esdhc1_ipg] = imx_clk_gate("esdhc1_ipg", "ipg", ccm(CCM_CGCR1), 13);
+       clk[esdhc2_ipg] = imx_clk_gate("esdhc2_ipg", "ipg", ccm(CCM_CGCR1), 14);
+       clk[fec_ipg] = imx_clk_gate("fec_ipg", "ipg", ccm(CCM_CGCR1), 15);
+       clk[iim_ipg] = imx_clk_gate("iim_ipg", "ipg", ccm(CCM_CGCR1), 26);
+       clk[kpp_ipg] = imx_clk_gate("kpp_ipg", "ipg", ccm(CCM_CGCR1), 28);
+       clk[lcdc_ipg] = imx_clk_gate("lcdc_ipg", "ipg", ccm(CCM_CGCR1), 29);
+       clk[pwm1_ipg] = imx_clk_gate("pwm1_ipg", "ipg", ccm(CCM_CGCR1), 31);
+       clk[pwm2_ipg] = imx_clk_gate("pwm2_ipg", "ipg", ccm(CCM_CGCR2),  0);
+       clk[pwm3_ipg] = imx_clk_gate("pwm3_ipg", "ipg", ccm(CCM_CGCR2),  1);
+       clk[pwm4_ipg] = imx_clk_gate("pwm4_ipg", "ipg", ccm(CCM_CGCR2),  2);
+       clk[sdma_ipg] = imx_clk_gate("sdma_ipg", "ipg", ccm(CCM_CGCR2),  6);
+       clk[ssi1_ipg] = imx_clk_gate("ssi1_ipg", "ipg", ccm(CCM_CGCR2), 11);
+       clk[ssi2_ipg] = imx_clk_gate("ssi2_ipg", "ipg", ccm(CCM_CGCR2), 12);
+       clk[tsc_ipg] = imx_clk_gate("tsc_ipg", "ipg", ccm(CCM_CGCR2), 13);
+       clk[uart1_ipg] = imx_clk_gate("uart1_ipg", "ipg", ccm(CCM_CGCR2), 14);
+       clk[uart2_ipg] = imx_clk_gate("uart2_ipg", "ipg", ccm(CCM_CGCR2), 15);
+       clk[uart3_ipg] = imx_clk_gate("uart3_ipg", "ipg", ccm(CCM_CGCR2), 16);
+       clk[uart4_ipg] = imx_clk_gate("uart4_ipg", "ipg", ccm(CCM_CGCR2), 17);
+       clk[uart5_ipg] = imx_clk_gate("uart5_ipg", "ipg", ccm(CCM_CGCR2), 18);
+       clk[wdt_ipg] = imx_clk_gate("wdt_ipg", "ipg", ccm(CCM_CGCR2), 19);
+
+       for (i = 0; i < ARRAY_SIZE(clk); i++)
+               if (IS_ERR(clk[i]))
+                       pr_err("i.MX25 clk %d: register failed with %ld\n",
+                               i, PTR_ERR(clk[i]));
+
+       /* i.mx25 has the i.mx21 type uart */
+       clk_register_clkdev(clk[uart1_ipg], "ipg", "imx21-uart.0");
+       clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.0");
+       clk_register_clkdev(clk[uart2_ipg], "ipg", "imx21-uart.1");
+       clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.1");
+       clk_register_clkdev(clk[uart3_ipg], "ipg", "imx21-uart.2");
+       clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.2");
+       clk_register_clkdev(clk[uart4_ipg], "ipg", "imx21-uart.3");
+       clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.3");
+       clk_register_clkdev(clk[uart5_ipg], "ipg", "imx21-uart.4");
+       clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.4");
+       clk_register_clkdev(clk[ipg], "ipg", "imx-gpt.0");
+       clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0");
+       clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0");
+       clk_register_clkdev(clk[usbotg_ahb], "ahb", "mxc-ehci.0");
+       clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.0");
+       clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.1");
+       clk_register_clkdev(clk[usbotg_ahb], "ahb", "mxc-ehci.1");
+       clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.1");
+       clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2");
+       clk_register_clkdev(clk[usbotg_ahb], "ahb", "mxc-ehci.2");
+       clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2");
+       clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc");
+       clk_register_clkdev(clk[usbotg_ahb], "ahb", "fsl-usb2-udc");
+       clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc");
+       clk_register_clkdev(clk[nfc_ipg_per], NULL, "mxc_nand.0");
+       /* i.mx25 has the i.mx35 type cspi */
+       clk_register_clkdev(clk[cspi1_ipg], NULL, "imx35-cspi.0");
+       clk_register_clkdev(clk[cspi2_ipg], NULL, "imx35-cspi.1");
+       clk_register_clkdev(clk[cspi3_ipg], NULL, "imx35-cspi.2");
+       clk_register_clkdev(clk[pwm1_ipg], "ipg", "mxc_pwm.0");
+       clk_register_clkdev(clk[per10], "per", "mxc_pwm.0");
+       clk_register_clkdev(clk[pwm1_ipg], "ipg", "mxc_pwm.1");
+       clk_register_clkdev(clk[per10], "per", "mxc_pwm.1");
+       clk_register_clkdev(clk[pwm1_ipg], "ipg", "mxc_pwm.2");
+       clk_register_clkdev(clk[per10], "per", "mxc_pwm.2");
+       clk_register_clkdev(clk[pwm1_ipg], "ipg", "mxc_pwm.3");
+       clk_register_clkdev(clk[per10], "per", "mxc_pwm.3");
+       clk_register_clkdev(clk[kpp_ipg], NULL, "imx-keypad");
+       clk_register_clkdev(clk[tsc_ipg], NULL, "mx25-adc");
+       clk_register_clkdev(clk[i2c_ipg_per], NULL, "imx-i2c.0");
+       clk_register_clkdev(clk[i2c_ipg_per], NULL, "imx-i2c.1");
+       clk_register_clkdev(clk[i2c_ipg_per], NULL, "imx-i2c.2");
+       clk_register_clkdev(clk[fec_ipg], "ipg", "imx25-fec.0");
+       clk_register_clkdev(clk[fec_ahb], "ahb", "imx25-fec.0");
+       clk_register_clkdev(clk[dryice_ipg], NULL, "imxdi_rtc.0");
+       clk_register_clkdev(clk[lcdc_ipg_per], "per", "imx-fb.0");
+       clk_register_clkdev(clk[lcdc_ipg], "ipg", "imx-fb.0");
+       clk_register_clkdev(clk[lcdc_ahb], "ahb", "imx-fb.0");
+       clk_register_clkdev(clk[wdt_ipg], NULL, "imx2-wdt.0");
+       clk_register_clkdev(clk[ssi1_ipg_per], "per", "imx-ssi.0");
+       clk_register_clkdev(clk[ssi1_ipg], "ipg", "imx-ssi.0");
+       clk_register_clkdev(clk[ssi2_ipg_per], "per", "imx-ssi.1");
+       clk_register_clkdev(clk[ssi2_ipg], "ipg", "imx-ssi.1");
+       clk_register_clkdev(clk[esdhc1_ipg_per], "per", "sdhci-esdhc-imx25.0");
+       clk_register_clkdev(clk[esdhc1_ipg], "ipg", "sdhci-esdhc-imx25.0");
+       clk_register_clkdev(clk[esdhc1_ahb], "ahb", "sdhci-esdhc-imx25.0");
+       clk_register_clkdev(clk[esdhc2_ipg_per], "per", "sdhci-esdhc-imx25.1");
+       clk_register_clkdev(clk[esdhc2_ipg], "ipg", "sdhci-esdhc-imx25.1");
+       clk_register_clkdev(clk[esdhc2_ahb], "ahb", "sdhci-esdhc-imx25.1");
+       clk_register_clkdev(clk[csi_ipg_per], "per", "mx2-camera.0");
+       clk_register_clkdev(clk[csi_ipg], "ipg", "mx2-camera.0");
+       clk_register_clkdev(clk[csi_ahb], "ahb", "mx2-camera.0");
+       clk_register_clkdev(clk[dummy], "audmux", NULL);
+       clk_register_clkdev(clk[can1_ipg], NULL, "flexcan.0");
+       clk_register_clkdev(clk[can2_ipg], NULL, "flexcan.1");
+       /* i.mx25 has the i.mx35 type sdma */
+       clk_register_clkdev(clk[sdma_ipg], "ipg", "imx35-sdma");
+       clk_register_clkdev(clk[sdma_ahb], "ahb", "imx35-sdma");
+       clk_register_clkdev(clk[iim_ipg], "iim", NULL);
+
+       mxc_timer_init(NULL, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
+       return 0;
+}
diff --git a/arch/arm/mach-imx/clk-imx27.c b/arch/arm/mach-imx/clk-imx27.c
new file mode 100644 (file)
index 0000000..50a7ebd
--- /dev/null
@@ -0,0 +1,290 @@
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+
+#include <mach/common.h>
+#include <mach/hardware.h>
+#include "clk.h"
+
+#define IO_ADDR_CCM(off)       (MX27_IO_ADDRESS(MX27_CCM_BASE_ADDR + (off)))
+
+/* Register offsets */
+#define CCM_CSCR               IO_ADDR_CCM(0x0)
+#define CCM_MPCTL0             IO_ADDR_CCM(0x4)
+#define CCM_MPCTL1             IO_ADDR_CCM(0x8)
+#define CCM_SPCTL0             IO_ADDR_CCM(0xc)
+#define CCM_SPCTL1             IO_ADDR_CCM(0x10)
+#define CCM_OSC26MCTL          IO_ADDR_CCM(0x14)
+#define CCM_PCDR0              IO_ADDR_CCM(0x18)
+#define CCM_PCDR1              IO_ADDR_CCM(0x1c)
+#define CCM_PCCR0              IO_ADDR_CCM(0x20)
+#define CCM_PCCR1              IO_ADDR_CCM(0x24)
+#define CCM_CCSR               IO_ADDR_CCM(0x28)
+#define CCM_PMCTL              IO_ADDR_CCM(0x2c)
+#define CCM_PMCOUNT            IO_ADDR_CCM(0x30)
+#define CCM_WKGDCTL            IO_ADDR_CCM(0x34)
+
+#define CCM_CSCR_UPDATE_DIS    (1 << 31)
+#define CCM_CSCR_SSI2          (1 << 23)
+#define CCM_CSCR_SSI1          (1 << 22)
+#define CCM_CSCR_VPU           (1 << 21)
+#define CCM_CSCR_MSHC           (1 << 20)
+#define CCM_CSCR_SPLLRES        (1 << 19)
+#define CCM_CSCR_MPLLRES        (1 << 18)
+#define CCM_CSCR_SP             (1 << 17)
+#define CCM_CSCR_MCU            (1 << 16)
+#define CCM_CSCR_OSC26MDIV      (1 << 4)
+#define CCM_CSCR_OSC26M         (1 << 3)
+#define CCM_CSCR_FPM            (1 << 2)
+#define CCM_CSCR_SPEN           (1 << 1)
+#define CCM_CSCR_MPEN           (1 << 0)
+
+/* i.MX27 TO 2+ */
+#define CCM_CSCR_ARM_SRC        (1 << 15)
+
+#define CCM_SPCTL1_LF           (1 << 15)
+#define CCM_SPCTL1_BRMO         (1 << 6)
+
+static const char *vpu_sel_clks[] = { "spll", "mpll_main2", };
+static const char *cpu_sel_clks[] = { "mpll_main2", "mpll", };
+static const char *clko_sel_clks[] = {
+       "ckil", "prem", "ckih", "ckih",
+       "ckih", "mpll", "spll", "cpu_div",
+       "ahb", "ipg", "per1_div", "per2_div",
+       "per3_div", "per4_div", "ssi1_div", "ssi2_div",
+       "nfc_div", "mshc_div", "vpu_div", "60m",
+       "32k", "usb_div", "dptc",
+};
+
+static const char *ssi_sel_clks[] = { "spll", "mpll", };
+
+enum mx27_clks {
+       dummy, ckih, ckil, mpll, spll, mpll_main2, ahb, ipg, nfc_div, per1_div,
+       per2_div, per3_div, per4_div, vpu_sel, vpu_div, usb_div, cpu_sel,
+       clko_sel, cpu_div, clko_div, ssi1_sel, ssi2_sel, ssi1_div, ssi2_div,
+       clko_en, ssi2_ipg_gate, ssi1_ipg_gate, slcdc_ipg_gate, sdhc3_ipg_gate,
+       sdhc2_ipg_gate, sdhc1_ipg_gate, scc_ipg_gate, sahara_ipg_gate,
+       rtc_ipg_gate, pwm_ipg_gate, owire_ipg_gate, lcdc_ipg_gate,
+       kpp_ipg_gate, iim_ipg_gate, i2c2_ipg_gate, i2c1_ipg_gate,
+       gpt6_ipg_gate, gpt5_ipg_gate, gpt4_ipg_gate, gpt3_ipg_gate,
+       gpt2_ipg_gate, gpt1_ipg_gate, gpio_ipg_gate, fec_ipg_gate,
+       emma_ipg_gate, dma_ipg_gate, cspi3_ipg_gate, cspi2_ipg_gate,
+       cspi1_ipg_gate, nfc_baud_gate, ssi2_baud_gate, ssi1_baud_gate,
+       vpu_baud_gate, per4_gate, per3_gate, per2_gate, per1_gate,
+       usb_ahb_gate, slcdc_ahb_gate, sahara_ahb_gate, lcdc_ahb_gate,
+       vpu_ahb_gate, fec_ahb_gate, emma_ahb_gate, emi_ahb_gate, dma_ahb_gate,
+       csi_ahb_gate, brom_ahb_gate, ata_ahb_gate, wdog_ipg_gate, usb_ipg_gate,
+       uart6_ipg_gate, uart5_ipg_gate, uart4_ipg_gate, uart3_ipg_gate,
+       uart2_ipg_gate, uart1_ipg_gate, clk_max
+};
+
+static struct clk *clk[clk_max];
+
+int __init mx27_clocks_init(unsigned long fref)
+{
+       int i;
+
+       clk[dummy] = imx_clk_fixed("dummy", 0);
+       clk[ckih] = imx_clk_fixed("ckih", fref);
+       clk[ckil] = imx_clk_fixed("ckil", 32768);
+       clk[mpll] = imx_clk_pllv1("mpll", "ckih", CCM_MPCTL0);
+       clk[spll] = imx_clk_pllv1("spll", "ckih", CCM_SPCTL0);
+       clk[mpll_main2] = imx_clk_fixed_factor("mpll_main2", "mpll", 2, 3);
+
+       if (mx27_revision() >= IMX_CHIP_REVISION_2_0) {
+               clk[ahb] = imx_clk_divider("ahb", "mpll_main2", CCM_CSCR, 8, 2);
+               clk[ipg] = imx_clk_fixed_factor("ipg", "ahb", 1, 2);
+       } else {
+               clk[ahb] = imx_clk_divider("ahb", "mpll_main2", CCM_CSCR, 9, 4);
+               clk[ipg] = imx_clk_divider("ipg", "ahb", CCM_CSCR, 8, 1);
+       }
+
+       clk[nfc_div] = imx_clk_divider("nfc_div", "ahb", CCM_PCDR0, 6, 4);
+       clk[per1_div] = imx_clk_divider("per1_div", "mpll_main2", CCM_PCDR1, 0, 6);
+       clk[per2_div] = imx_clk_divider("per2_div", "mpll_main2", CCM_PCDR1, 8, 6);
+       clk[per3_div] = imx_clk_divider("per3_div", "mpll_main2", CCM_PCDR1, 16, 6);
+       clk[per4_div] = imx_clk_divider("per4_div", "mpll_main2", CCM_PCDR1, 24, 6);
+       clk[vpu_sel] = imx_clk_mux("vpu_sel", CCM_CSCR, 21, 1, vpu_sel_clks, ARRAY_SIZE(vpu_sel_clks));
+       clk[vpu_div] = imx_clk_divider("vpu_div", "vpu_sel", CCM_PCDR0, 10, 3);
+       clk[usb_div] = imx_clk_divider("usb_div", "spll", CCM_CSCR, 28, 3);
+       clk[cpu_sel] = imx_clk_mux("cpu_sel", CCM_CSCR, 15, 1, cpu_sel_clks, ARRAY_SIZE(cpu_sel_clks));
+       clk[clko_sel] = imx_clk_mux("clko_sel", CCM_CCSR, 0, 5, clko_sel_clks, ARRAY_SIZE(clko_sel_clks));
+       if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
+               clk[cpu_div] = imx_clk_divider("cpu_div", "cpu_sel", CCM_CSCR, 12, 2);
+       else
+               clk[cpu_div] = imx_clk_divider("cpu_div", "cpu_sel", CCM_CSCR, 13, 3);
+       clk[clko_div] = imx_clk_divider("clko_div", "clko_sel", CCM_PCDR0, 22, 3);
+       clk[ssi1_sel] = imx_clk_mux("ssi1_sel", CCM_CSCR, 22, 1, ssi_sel_clks, ARRAY_SIZE(ssi_sel_clks));
+       clk[ssi2_sel] = imx_clk_mux("ssi2_sel", CCM_CSCR, 23, 1, ssi_sel_clks, ARRAY_SIZE(ssi_sel_clks));
+       clk[ssi1_div] = imx_clk_divider("ssi1_div", "ssi1_sel", CCM_PCDR0, 16, 6);
+       clk[ssi2_div] = imx_clk_divider("ssi2_div", "ssi2_sel", CCM_PCDR0, 26, 3);
+       clk[clko_en] = imx_clk_gate("clko_en", "clko_div", CCM_PCCR0, 0);
+       clk[ssi2_ipg_gate] = imx_clk_gate("ssi2_ipg_gate", "ipg", CCM_PCCR0, 0);
+       clk[ssi1_ipg_gate] = imx_clk_gate("ssi1_ipg_gate", "ipg", CCM_PCCR0, 1);
+       clk[slcdc_ipg_gate] = imx_clk_gate("slcdc_ipg_gate", "ipg", CCM_PCCR0, 2);
+       clk[sdhc3_ipg_gate] = imx_clk_gate("sdhc3_ipg_gate", "ipg", CCM_PCCR0, 3);
+       clk[sdhc2_ipg_gate] = imx_clk_gate("sdhc2_ipg_gate", "ipg", CCM_PCCR0, 4);
+       clk[sdhc1_ipg_gate] = imx_clk_gate("sdhc1_ipg_gate", "ipg", CCM_PCCR0, 5);
+       clk[scc_ipg_gate] = imx_clk_gate("scc_ipg_gate", "ipg", CCM_PCCR0, 6);
+       clk[sahara_ipg_gate] = imx_clk_gate("sahara_ipg_gate", "ipg", CCM_PCCR0, 7);
+       clk[rtc_ipg_gate] = imx_clk_gate("rtc_ipg_gate", "ipg", CCM_PCCR0, 9);
+       clk[pwm_ipg_gate] = imx_clk_gate("pwm_ipg_gate", "ipg", CCM_PCCR0, 11);
+       clk[owire_ipg_gate] = imx_clk_gate("owire_ipg_gate", "ipg", CCM_PCCR0, 12);
+       clk[lcdc_ipg_gate] = imx_clk_gate("lcdc_ipg_gate", "ipg", CCM_PCCR0, 14);
+       clk[kpp_ipg_gate] = imx_clk_gate("kpp_ipg_gate", "ipg", CCM_PCCR0, 15);
+       clk[iim_ipg_gate] = imx_clk_gate("iim_ipg_gate", "ipg", CCM_PCCR0, 16);
+       clk[i2c2_ipg_gate] = imx_clk_gate("i2c2_ipg_gate", "ipg", CCM_PCCR0, 17);
+       clk[i2c1_ipg_gate] = imx_clk_gate("i2c1_ipg_gate", "ipg", CCM_PCCR0, 18);
+       clk[gpt6_ipg_gate] = imx_clk_gate("gpt6_ipg_gate", "ipg", CCM_PCCR0, 19);
+       clk[gpt5_ipg_gate] = imx_clk_gate("gpt5_ipg_gate", "ipg", CCM_PCCR0, 20);
+       clk[gpt4_ipg_gate] = imx_clk_gate("gpt4_ipg_gate", "ipg", CCM_PCCR0, 21);
+       clk[gpt3_ipg_gate] = imx_clk_gate("gpt3_ipg_gate", "ipg", CCM_PCCR0, 22);
+       clk[gpt2_ipg_gate] = imx_clk_gate("gpt2_ipg_gate", "ipg", CCM_PCCR0, 23);
+       clk[gpt1_ipg_gate] = imx_clk_gate("gpt1_ipg_gate", "ipg", CCM_PCCR0, 24);
+       clk[gpio_ipg_gate] = imx_clk_gate("gpio_ipg_gate", "ipg", CCM_PCCR0, 25);
+       clk[fec_ipg_gate] = imx_clk_gate("fec_ipg_gate", "ipg", CCM_PCCR0, 26);
+       clk[emma_ipg_gate] = imx_clk_gate("emma_ipg_gate", "ipg", CCM_PCCR0, 27);
+       clk[dma_ipg_gate] = imx_clk_gate("dma_ipg_gate", "ipg", CCM_PCCR0, 28);
+       clk[cspi3_ipg_gate] = imx_clk_gate("cspi3_ipg_gate", "ipg", CCM_PCCR0, 29);
+       clk[cspi2_ipg_gate] = imx_clk_gate("cspi2_ipg_gate", "ipg", CCM_PCCR0, 30);
+       clk[cspi1_ipg_gate] = imx_clk_gate("cspi1_ipg_gate", "ipg", CCM_PCCR0, 31);
+       clk[nfc_baud_gate] = imx_clk_gate("nfc_baud_gate", "nfc_div", CCM_PCCR1,  3);
+       clk[ssi2_baud_gate] = imx_clk_gate("ssi2_baud_gate", "ssi2_div", CCM_PCCR1,  4);
+       clk[ssi1_baud_gate] = imx_clk_gate("ssi1_baud_gate", "ssi1_div", CCM_PCCR1,  5);
+       clk[vpu_baud_gate] = imx_clk_gate("vpu_baud_gate", "vpu_div", CCM_PCCR1,  6);
+       clk[per4_gate] = imx_clk_gate("per4_gate", "per4_div", CCM_PCCR1,  7);
+       clk[per3_gate] = imx_clk_gate("per3_gate", "per3_div", CCM_PCCR1,  8);
+       clk[per2_gate] = imx_clk_gate("per2_gate", "per2_div", CCM_PCCR1,  9);
+       clk[per1_gate] = imx_clk_gate("per1_gate", "per1_div", CCM_PCCR1, 10);
+       clk[usb_ahb_gate] = imx_clk_gate("usb_ahb_gate", "ahb", CCM_PCCR1, 11);
+       clk[slcdc_ahb_gate] = imx_clk_gate("slcdc_ahb_gate", "ahb", CCM_PCCR1, 12);
+       clk[sahara_ahb_gate] = imx_clk_gate("sahara_ahb_gate", "ahb", CCM_PCCR1, 13);
+       clk[lcdc_ahb_gate] = imx_clk_gate("lcdc_ahb_gate", "ahb", CCM_PCCR1, 15);
+       clk[vpu_ahb_gate] = imx_clk_gate("vpu_ahb_gate", "ahb", CCM_PCCR1, 16);
+       clk[fec_ahb_gate] = imx_clk_gate("fec_ahb_gate", "ahb", CCM_PCCR1, 17);
+       clk[emma_ahb_gate] = imx_clk_gate("emma_ahb_gate", "ahb", CCM_PCCR1, 18);
+       clk[emi_ahb_gate] = imx_clk_gate("emi_ahb_gate", "ahb", CCM_PCCR1, 19);
+       clk[dma_ahb_gate] = imx_clk_gate("dma_ahb_gate", "ahb", CCM_PCCR1, 20);
+       clk[csi_ahb_gate] = imx_clk_gate("csi_ahb_gate", "ahb", CCM_PCCR1, 21);
+       clk[brom_ahb_gate] = imx_clk_gate("brom_ahb_gate", "ahb", CCM_PCCR1, 22);
+       clk[ata_ahb_gate] = imx_clk_gate("ata_ahb_gate", "ahb", CCM_PCCR1, 23);
+       clk[wdog_ipg_gate] = imx_clk_gate("wdog_ipg_gate", "ipg", CCM_PCCR1, 24);
+       clk[usb_ipg_gate] = imx_clk_gate("usb_ipg_gate", "ipg", CCM_PCCR1, 25);
+       clk[uart6_ipg_gate] = imx_clk_gate("uart6_ipg_gate", "ipg", CCM_PCCR1, 26);
+       clk[uart5_ipg_gate] = imx_clk_gate("uart5_ipg_gate", "ipg", CCM_PCCR1, 27);
+       clk[uart4_ipg_gate] = imx_clk_gate("uart4_ipg_gate", "ipg", CCM_PCCR1, 28);
+       clk[uart3_ipg_gate] = imx_clk_gate("uart3_ipg_gate", "ipg", CCM_PCCR1, 29);
+       clk[uart2_ipg_gate] = imx_clk_gate("uart2_ipg_gate", "ipg", CCM_PCCR1, 30);
+       clk[uart1_ipg_gate] = imx_clk_gate("uart1_ipg_gate", "ipg", CCM_PCCR1, 31);
+
+       for (i = 0; i < ARRAY_SIZE(clk); i++)
+               if (IS_ERR(clk[i]))
+                       pr_err("i.MX27 clk %d: register failed with %ld\n",
+                               i, PTR_ERR(clk[i]));
+
+       clk_register_clkdev(clk[uart1_ipg_gate], "ipg", "imx21-uart.0");
+       clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.0");
+       clk_register_clkdev(clk[uart2_ipg_gate], "ipg", "imx21-uart.1");
+       clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.1");
+       clk_register_clkdev(clk[uart3_ipg_gate], "ipg", "imx21-uart.2");
+       clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.2");
+       clk_register_clkdev(clk[uart4_ipg_gate], "ipg", "imx21-uart.3");
+       clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.3");
+       clk_register_clkdev(clk[uart5_ipg_gate], "ipg", "imx21-uart.4");
+       clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.4");
+       clk_register_clkdev(clk[uart6_ipg_gate], "ipg", "imx21-uart.5");
+       clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.5");
+       clk_register_clkdev(clk[gpt1_ipg_gate], "ipg", "imx-gpt.0");
+       clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.0");
+       clk_register_clkdev(clk[gpt2_ipg_gate], "ipg", "imx-gpt.1");
+       clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.1");
+       clk_register_clkdev(clk[gpt3_ipg_gate], "ipg", "imx-gpt.2");
+       clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.2");
+       clk_register_clkdev(clk[gpt4_ipg_gate], "ipg", "imx-gpt.3");
+       clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.3");
+       clk_register_clkdev(clk[gpt5_ipg_gate], "ipg", "imx-gpt.4");
+       clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.4");
+       clk_register_clkdev(clk[gpt6_ipg_gate], "ipg", "imx-gpt.5");
+       clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.5");
+       clk_register_clkdev(clk[pwm_ipg_gate], NULL, "mxc_pwm.0");
+       clk_register_clkdev(clk[per2_gate], "per", "mxc-mmc.0");
+       clk_register_clkdev(clk[sdhc1_ipg_gate], "ipg", "mxc-mmc.0");
+       clk_register_clkdev(clk[per2_gate], "per", "mxc-mmc.1");
+       clk_register_clkdev(clk[sdhc2_ipg_gate], "ipg", "mxc-mmc.1");
+       clk_register_clkdev(clk[per2_gate], "per", "mxc-mmc.2");
+       clk_register_clkdev(clk[sdhc2_ipg_gate], "ipg", "mxc-mmc.2");
+       clk_register_clkdev(clk[cspi1_ipg_gate], NULL, "imx27-cspi.0");
+       clk_register_clkdev(clk[cspi2_ipg_gate], NULL, "imx27-cspi.1");
+       clk_register_clkdev(clk[cspi3_ipg_gate], NULL, "imx27-cspi.2");
+       clk_register_clkdev(clk[per3_gate], "per", "imx-fb.0");
+       clk_register_clkdev(clk[lcdc_ipg_gate], "ipg", "imx-fb.0");
+       clk_register_clkdev(clk[lcdc_ahb_gate], "ahb", "imx-fb.0");
+       clk_register_clkdev(clk[csi_ahb_gate], NULL, "mx2-camera.0");
+       clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc");
+       clk_register_clkdev(clk[usb_ipg_gate], "ipg", "fsl-usb2-udc");
+       clk_register_clkdev(clk[usb_ahb_gate], "ahb", "fsl-usb2-udc");
+       clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.0");
+       clk_register_clkdev(clk[usb_ipg_gate], "ipg", "mxc-ehci.0");
+       clk_register_clkdev(clk[usb_ahb_gate], "ahb", "mxc-ehci.0");
+       clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.1");
+       clk_register_clkdev(clk[usb_ipg_gate], "ipg", "mxc-ehci.1");
+       clk_register_clkdev(clk[usb_ahb_gate], "ahb", "mxc-ehci.1");
+       clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2");
+       clk_register_clkdev(clk[usb_ipg_gate], "ipg", "mxc-ehci.2");
+       clk_register_clkdev(clk[usb_ahb_gate], "ahb", "mxc-ehci.2");
+       clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "imx-ssi.0");
+       clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "imx-ssi.1");
+       clk_register_clkdev(clk[nfc_baud_gate], NULL, "mxc_nand.0");
+       clk_register_clkdev(clk[vpu_baud_gate], "per", "imx-vpu");
+       clk_register_clkdev(clk[vpu_ahb_gate], "ahb", "imx-vpu");
+       clk_register_clkdev(clk[dma_ahb_gate], "ahb", "imx-dma");
+       clk_register_clkdev(clk[dma_ipg_gate], "ipg", "imx-dma");
+       clk_register_clkdev(clk[fec_ipg_gate], "ipg", "imx27-fec.0");
+       clk_register_clkdev(clk[fec_ahb_gate], "ahb", "imx27-fec.0");
+       clk_register_clkdev(clk[wdog_ipg_gate], NULL, "imx2-wdt.0");
+       clk_register_clkdev(clk[i2c1_ipg_gate], NULL, "imx-i2c.0");
+       clk_register_clkdev(clk[i2c2_ipg_gate], NULL, "imx-i2c.1");
+       clk_register_clkdev(clk[owire_ipg_gate], NULL, "mxc_w1.0");
+       clk_register_clkdev(clk[kpp_ipg_gate], NULL, "imx-keypad");
+       clk_register_clkdev(clk[emma_ahb_gate], "ahb", "imx-emma");
+       clk_register_clkdev(clk[emma_ipg_gate], "ipg", "imx-emma");
+       clk_register_clkdev(clk[iim_ipg_gate], "iim", NULL);
+       clk_register_clkdev(clk[gpio_ipg_gate], "gpio", NULL);
+       clk_register_clkdev(clk[brom_ahb_gate], "brom", NULL);
+       clk_register_clkdev(clk[ata_ahb_gate], "ata", NULL);
+       clk_register_clkdev(clk[rtc_ipg_gate], "rtc", NULL);
+       clk_register_clkdev(clk[scc_ipg_gate], "scc", NULL);
+       clk_register_clkdev(clk[cpu_div], "cpu", NULL);
+       clk_register_clkdev(clk[emi_ahb_gate], "emi_ahb" , NULL);
+       clk_register_clkdev(clk[ssi1_baud_gate], "bitrate" , "imx-ssi.0");
+       clk_register_clkdev(clk[ssi2_baud_gate], "bitrate" , "imx-ssi.1");
+
+       mxc_timer_init(NULL, MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR),
+                       MX27_INT_GPT1);
+
+       clk_prepare_enable(clk[emi_ahb_gate]);
+
+       return 0;
+}
+
+#ifdef CONFIG_OF
+int __init mx27_clocks_init_dt(void)
+{
+       struct device_node *np;
+       u32 fref = 26000000; /* default */
+
+       for_each_compatible_node(np, NULL, "fixed-clock") {
+               if (!of_device_is_compatible(np, "fsl,imx-osc26m"))
+                       continue;
+
+               if (!of_property_read_u32(np, "clock-frequency", &fref))
+                       break;
+       }
+
+       return mx27_clocks_init(fref);
+}
+#endif
diff --git a/arch/arm/mach-imx/clk-imx31.c b/arch/arm/mach-imx/clk-imx31.c
new file mode 100644 (file)
index 0000000..a854b9c
--- /dev/null
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2012 Sascha Hauer <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/err.h>
+
+#include <mach/hardware.h>
+#include <mach/mx31.h>
+#include <mach/common.h>
+
+#include "clk.h"
+#include "crmregs-imx3.h"
+
+static const char *mcu_main_sel[] = { "spll", "mpll", };
+static const char *per_sel[] = { "per_div", "ipg", };
+static const char *csi_sel[] = { "upll", "spll", };
+static const char *fir_sel[] = { "mcu_main", "upll", "spll" };
+
+enum mx31_clks {
+       ckih, ckil, mpll, spll, upll, mcu_main, hsp, ahb, nfc, ipg, per_div,
+       per, csi, fir, csi_div, usb_div_pre, usb_div_post, fir_div_pre,
+       fir_div_post, sdhc1_gate, sdhc2_gate, gpt_gate, epit1_gate, epit2_gate,
+       iim_gate, ata_gate, sdma_gate, cspi3_gate, rng_gate, uart1_gate,
+       uart2_gate, ssi1_gate, i2c1_gate, i2c2_gate, i2c3_gate, hantro_gate,
+       mstick1_gate, mstick2_gate, csi_gate, rtc_gate, wdog_gate, pwm_gate,
+       sim_gate, ect_gate, usb_gate, kpp_gate, ipu_gate, uart3_gate,
+       uart4_gate, uart5_gate, owire_gate, ssi2_gate, cspi1_gate, cspi2_gate,
+       gacc_gate, emi_gate, rtic_gate, firi_gate, clk_max
+};
+
+static struct clk *clk[clk_max];
+
+int __init mx31_clocks_init(unsigned long fref)
+{
+       void __iomem *base = MX31_IO_ADDRESS(MX31_CCM_BASE_ADDR);
+       int i;
+
+       clk[ckih] = imx_clk_fixed("ckih", fref);
+       clk[ckil] = imx_clk_fixed("ckil", 32768);
+       clk[mpll] = imx_clk_pllv1("mpll", "ckih", base + MXC_CCM_MPCTL);
+       clk[spll] = imx_clk_pllv1("spll", "ckih", base + MXC_CCM_SRPCTL);
+       clk[upll] = imx_clk_pllv1("upll", "ckih", base + MXC_CCM_UPCTL);
+       clk[mcu_main] = imx_clk_mux("mcu_main", base + MXC_CCM_PMCR0, 31, 1, mcu_main_sel, ARRAY_SIZE(mcu_main_sel));
+       clk[hsp] = imx_clk_divider("hsp", "mcu_main", base + MXC_CCM_PDR0, 11, 3);
+       clk[ahb] = imx_clk_divider("ahb", "mcu_main", base + MXC_CCM_PDR0, 3, 3);
+       clk[nfc] = imx_clk_divider("nfc", "ahb", base + MXC_CCM_PDR0, 8, 3);
+       clk[ipg] = imx_clk_divider("ipg", "ahb", base + MXC_CCM_PDR0, 6, 2);
+       clk[per_div] = imx_clk_divider("per_div", "upll", base + MXC_CCM_PDR0, 16, 5);
+       clk[per] = imx_clk_mux("per", base + MXC_CCM_CCMR, 24, 1, per_sel, ARRAY_SIZE(per_sel));
+       clk[csi] = imx_clk_mux("csi_sel", base + MXC_CCM_CCMR, 25, 1, csi_sel, ARRAY_SIZE(csi_sel));
+       clk[fir] = imx_clk_mux("fir_sel", base + MXC_CCM_CCMR, 11, 2, fir_sel, ARRAY_SIZE(fir_sel));
+       clk[csi_div] = imx_clk_divider("csi_div", "csi_sel", base + MXC_CCM_PDR0, 23, 9);
+       clk[usb_div_pre] = imx_clk_divider("usb_div_pre", "upll", base + MXC_CCM_PDR1, 30, 2);
+       clk[usb_div_post] = imx_clk_divider("usb_div_post", "usb_div_pre", base + MXC_CCM_PDR1, 27, 3);
+       clk[fir_div_pre] = imx_clk_divider("fir_div_pre", "fir_sel", base + MXC_CCM_PDR1, 24, 3);
+       clk[fir_div_post] = imx_clk_divider("fir_div_post", "fir_div_pre", base + MXC_CCM_PDR1, 23, 6);
+       clk[sdhc1_gate] = imx_clk_gate2("sdhc1_gate", "per", base + MXC_CCM_CGR0, 0);
+       clk[sdhc2_gate] = imx_clk_gate2("sdhc2_gate", "per", base + MXC_CCM_CGR0, 2);
+       clk[gpt_gate] = imx_clk_gate2("gpt_gate", "per", base + MXC_CCM_CGR0, 4);
+       clk[epit1_gate] = imx_clk_gate2("epit1_gate", "per", base + MXC_CCM_CGR0, 6);
+       clk[epit2_gate] = imx_clk_gate2("epit2_gate", "per", base + MXC_CCM_CGR0, 8);
+       clk[iim_gate] = imx_clk_gate2("iim_gate", "ipg", base + MXC_CCM_CGR0, 10);
+       clk[ata_gate] = imx_clk_gate2("ata_gate", "ipg", base + MXC_CCM_CGR0, 12);
+       clk[sdma_gate] = imx_clk_gate2("sdma_gate", "ahb", base + MXC_CCM_CGR0, 14);
+       clk[cspi3_gate] = imx_clk_gate2("cspi3_gate", "ipg", base + MXC_CCM_CGR0, 16);
+       clk[rng_gate] = imx_clk_gate2("rng_gate", "ipg", base + MXC_CCM_CGR0, 18);
+       clk[uart1_gate] = imx_clk_gate2("uart1_gate", "per", base + MXC_CCM_CGR0, 20);
+       clk[uart2_gate] = imx_clk_gate2("uart2_gate", "per", base + MXC_CCM_CGR0, 22);
+       clk[ssi1_gate] = imx_clk_gate2("ssi1_gate", "spll", base + MXC_CCM_CGR0, 24);
+       clk[i2c1_gate] = imx_clk_gate2("i2c1_gate", "per", base + MXC_CCM_CGR0, 26);
+       clk[i2c2_gate] = imx_clk_gate2("i2c2_gate", "per", base + MXC_CCM_CGR0, 28);
+       clk[i2c3_gate] = imx_clk_gate2("i2c3_gate", "per", base + MXC_CCM_CGR0, 30);
+       clk[hantro_gate] = imx_clk_gate2("hantro_gate", "per", base + MXC_CCM_CGR1, 0);
+       clk[mstick1_gate] = imx_clk_gate2("mstick1_gate", "per", base + MXC_CCM_CGR1, 2);
+       clk[mstick2_gate] = imx_clk_gate2("mstick2_gate", "per", base + MXC_CCM_CGR1, 4);
+       clk[csi_gate] = imx_clk_gate2("csi_gate", "csi_div", base + MXC_CCM_CGR1, 6);
+       clk[rtc_gate] = imx_clk_gate2("rtc_gate", "ipg", base + MXC_CCM_CGR1, 8);
+       clk[wdog_gate] = imx_clk_gate2("wdog_gate", "ipg", base + MXC_CCM_CGR1, 10);
+       clk[pwm_gate] = imx_clk_gate2("pwm_gate", "per", base + MXC_CCM_CGR1, 12);
+       clk[sim_gate] = imx_clk_gate2("sim_gate", "per", base + MXC_CCM_CGR1, 14);
+       clk[ect_gate] = imx_clk_gate2("ect_gate", "per", base + MXC_CCM_CGR1, 16);
+       clk[usb_gate] = imx_clk_gate2("usb_gate", "ahb", base + MXC_CCM_CGR1, 18);
+       clk[kpp_gate] = imx_clk_gate2("kpp_gate", "ipg", base + MXC_CCM_CGR1, 20);
+       clk[ipu_gate] = imx_clk_gate2("ipu_gate", "hsp", base + MXC_CCM_CGR1, 22);
+       clk[uart3_gate] = imx_clk_gate2("uart3_gate", "per", base + MXC_CCM_CGR1, 24);
+       clk[uart4_gate] = imx_clk_gate2("uart4_gate", "per", base + MXC_CCM_CGR1, 26);
+       clk[uart5_gate] = imx_clk_gate2("uart5_gate", "per", base + MXC_CCM_CGR1, 28);
+       clk[owire_gate] = imx_clk_gate2("owire_gate", "per", base + MXC_CCM_CGR1, 30);
+       clk[ssi2_gate] = imx_clk_gate2("ssi2_gate", "spll", base + MXC_CCM_CGR2, 0);
+       clk[cspi1_gate] = imx_clk_gate2("cspi1_gate", "ipg", base + MXC_CCM_CGR2, 2);
+       clk[cspi2_gate] = imx_clk_gate2("cspi2_gate", "ipg", base + MXC_CCM_CGR2, 4);
+       clk[gacc_gate] = imx_clk_gate2("gacc_gate", "per", base + MXC_CCM_CGR2, 6);
+       clk[emi_gate] = imx_clk_gate2("emi_gate", "ahb", base + MXC_CCM_CGR2, 8);
+       clk[rtic_gate] = imx_clk_gate2("rtic_gate", "ahb", base + MXC_CCM_CGR2, 10);
+       clk[firi_gate] = imx_clk_gate2("firi_gate", "upll", base+MXC_CCM_CGR2, 12);
+
+       for (i = 0; i < ARRAY_SIZE(clk); i++)
+               if (IS_ERR(clk[i]))
+                       pr_err("imx31 clk %d: register failed with %ld\n",
+                               i, PTR_ERR(clk[i]));
+
+       clk_register_clkdev(clk[gpt_gate], "per", "imx-gpt.0");
+       clk_register_clkdev(clk[ipg], "ipg", "imx-gpt.0");
+       clk_register_clkdev(clk[cspi1_gate], NULL, "imx31-cspi.0");
+       clk_register_clkdev(clk[cspi2_gate], NULL, "imx31-cspi.1");
+       clk_register_clkdev(clk[cspi3_gate], NULL, "imx31-cspi.2");
+       clk_register_clkdev(clk[pwm_gate], "pwm", NULL);
+       clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
+       clk_register_clkdev(clk[rtc_gate], "rtc", NULL);
+       clk_register_clkdev(clk[epit1_gate], "epit", NULL);
+       clk_register_clkdev(clk[epit2_gate], "epit", NULL);
+       clk_register_clkdev(clk[nfc], NULL, "mxc_nand.0");
+       clk_register_clkdev(clk[ipu_gate], NULL, "ipu-core");
+       clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb");
+       clk_register_clkdev(clk[kpp_gate], "kpp", NULL);
+       clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.0");
+       clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.0");
+       clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0");
+       clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.1");
+       clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.1");
+       clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.1");
+       clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.2");
+       clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.2");
+       clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2");
+       clk_register_clkdev(clk[usb_div_post], "per", "fsl-usb2-udc");
+       clk_register_clkdev(clk[usb_gate], "ahb", "fsl-usb2-udc");
+       clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc");
+       clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0");
+       /* i.mx31 has the i.mx21 type uart */
+       clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0");
+       clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0");
+       clk_register_clkdev(clk[uart2_gate], "per", "imx21-uart.1");
+       clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.1");
+       clk_register_clkdev(clk[uart3_gate], "per", "imx21-uart.2");
+       clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.2");
+       clk_register_clkdev(clk[uart4_gate], "per", "imx21-uart.3");
+       clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.3");
+       clk_register_clkdev(clk[uart5_gate], "per", "imx21-uart.4");
+       clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.4");
+       clk_register_clkdev(clk[i2c1_gate], NULL, "imx-i2c.0");
+       clk_register_clkdev(clk[i2c2_gate], NULL, "imx-i2c.1");
+       clk_register_clkdev(clk[i2c3_gate], NULL, "imx-i2c.2");
+       clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1.0");
+       clk_register_clkdev(clk[sdhc1_gate], NULL, "mxc-mmc.0");
+       clk_register_clkdev(clk[sdhc2_gate], NULL, "mxc-mmc.1");
+       clk_register_clkdev(clk[ssi1_gate], NULL, "imx-ssi.0");
+       clk_register_clkdev(clk[ssi2_gate], NULL, "imx-ssi.1");
+       clk_register_clkdev(clk[firi_gate], "firi", NULL);
+       clk_register_clkdev(clk[ata_gate], NULL, "pata_imx");
+       clk_register_clkdev(clk[rtic_gate], "rtic", NULL);
+       clk_register_clkdev(clk[rng_gate], "rng", NULL);
+       clk_register_clkdev(clk[sdma_gate], NULL, "imx31-sdma");
+       clk_register_clkdev(clk[iim_gate], "iim", NULL);
+
+       clk_set_parent(clk[csi], clk[upll]);
+       clk_prepare_enable(clk[emi_gate]);
+       clk_prepare_enable(clk[iim_gate]);
+       mx31_revision();
+       clk_disable_unprepare(clk[iim_gate]);
+
+       mxc_timer_init(NULL, MX31_IO_ADDRESS(MX31_GPT1_BASE_ADDR),
+                       MX31_INT_GPT);
+
+       return 0;
+}
diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c
new file mode 100644 (file)
index 0000000..a9e60bf
--- /dev/null
@@ -0,0 +1,278 @@
+/*
+ * Copyright (C) 2012 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/err.h>
+
+#include <mach/hardware.h>
+#include <mach/common.h>
+
+#include "crmregs-imx3.h"
+#include "clk.h"
+
+struct arm_ahb_div {
+       unsigned char arm, ahb, sel;
+};
+
+static struct arm_ahb_div clk_consumer[] = {
+       { .arm = 1, .ahb = 4, .sel = 0},
+       { .arm = 1, .ahb = 3, .sel = 1},
+       { .arm = 2, .ahb = 2, .sel = 0},
+       { .arm = 0, .ahb = 0, .sel = 0},
+       { .arm = 0, .ahb = 0, .sel = 0},
+       { .arm = 0, .ahb = 0, .sel = 0},
+       { .arm = 4, .ahb = 1, .sel = 0},
+       { .arm = 1, .ahb = 5, .sel = 0},
+       { .arm = 1, .ahb = 8, .sel = 0},
+       { .arm = 1, .ahb = 6, .sel = 1},
+       { .arm = 2, .ahb = 4, .sel = 0},
+       { .arm = 0, .ahb = 0, .sel = 0},
+       { .arm = 0, .ahb = 0, .sel = 0},
+       { .arm = 0, .ahb = 0, .sel = 0},
+       { .arm = 4, .ahb = 2, .sel = 0},
+       { .arm = 0, .ahb = 0, .sel = 0},
+};
+
+static char hsp_div_532[] = { 4, 8, 3, 0 };
+static char hsp_div_400[] = { 3, 6, 3, 0 };
+
+static const char *std_sel[] = {"ppll", "arm"};
+static const char *ipg_per_sel[] = {"ahb_per_div", "arm_per_div"};
+
+enum mx35_clks {
+       ckih, mpll, ppll, mpll_075, arm, hsp, hsp_div, hsp_sel, ahb, ipg,
+       arm_per_div, ahb_per_div, ipg_per, uart_sel, uart_div, esdhc_sel,
+       esdhc1_div, esdhc2_div, esdhc3_div, spdif_sel, spdif_div_pre,
+       spdif_div_post, ssi_sel, ssi1_div_pre, ssi1_div_post, ssi2_div_pre,
+       ssi2_div_post, usb_sel, usb_div, nfc_div, asrc_gate, pata_gate,
+       audmux_gate, can1_gate, can2_gate, cspi1_gate, cspi2_gate, ect_gate,
+       edio_gate, emi_gate, epit1_gate, epit2_gate, esai_gate, esdhc1_gate,
+       esdhc2_gate, esdhc3_gate, fec_gate, gpio1_gate, gpio2_gate, gpio3_gate,
+       gpt_gate, i2c1_gate, i2c2_gate, i2c3_gate, iomuxc_gate, ipu_gate,
+       kpp_gate, mlb_gate, mshc_gate, owire_gate, pwm_gate, rngc_gate,
+       rtc_gate, rtic_gate, scc_gate, sdma_gate, spba_gate, spdif_gate,
+       ssi1_gate, ssi2_gate, uart1_gate, uart2_gate, uart3_gate, usbotg_gate,
+       wdog_gate, max_gate, admux_gate, csi_gate, iim_gate, gpu2d_gate,
+       clk_max
+};
+
+static struct clk *clk[clk_max];
+
+int __init mx35_clocks_init()
+{
+       void __iomem *base = MX35_IO_ADDRESS(MX35_CCM_BASE_ADDR);
+       u32 pdr0, consumer_sel, hsp_sel;
+       struct arm_ahb_div *aad;
+       unsigned char *hsp_div;
+       int i;
+
+       pdr0 = __raw_readl(base + MXC_CCM_PDR0);
+       consumer_sel = (pdr0 >> 16) & 0xf;
+       aad = &clk_consumer[consumer_sel];
+       if (!aad->arm) {
+               pr_err("i.MX35 clk: illegal consumer mux selection 0x%x\n", consumer_sel);
+               /*
+                * We are basically stuck. Continue with a default entry and hope we
+                * get far enough to actually show the above message
+                */
+               aad = &clk_consumer[0];
+       }
+
+       clk[ckih] = imx_clk_fixed("ckih", 24000000);
+       clk[mpll] = imx_clk_pllv1("mpll", "ckih", base + MX35_CCM_MPCTL);
+       clk[ppll] = imx_clk_pllv1("ppll", "ckih", base + MX35_CCM_PPCTL);
+
+       clk[mpll] = imx_clk_fixed_factor("mpll_075", "mpll", 3, 4);
+
+       if (aad->sel)
+               clk[arm] = imx_clk_fixed_factor("arm", "mpll_075", 1, aad->arm);
+       else
+               clk[arm] = imx_clk_fixed_factor("arm", "mpll", 1, aad->arm);
+
+       if (clk_get_rate(clk[arm]) > 400000000)
+               hsp_div = hsp_div_532;
+       else
+               hsp_div = hsp_div_400;
+
+       hsp_sel = (pdr0 >> 20) & 0x3;
+       if (!hsp_div[hsp_sel]) {
+               pr_err("i.MX35 clk: illegal hsp clk selection 0x%x\n", hsp_sel);
+               hsp_sel = 0;
+       }
+
+       clk[hsp] = imx_clk_fixed_factor("hsp", "arm", 1, hsp_div[hsp_sel]);
+
+       clk[ahb] = imx_clk_fixed_factor("ahb", "arm", 1, aad->ahb);
+       clk[ipg] = imx_clk_fixed_factor("ipg", "ahb", 1, 2);
+
+       clk[arm_per_div] = imx_clk_divider("arm_per_div", "arm", base + MX35_CCM_PDR4, 16, 6);
+       clk[ahb_per_div] = imx_clk_divider("ahb_per_div", "ahb", base + MXC_CCM_PDR0, 12, 3);
+       clk[ipg_per] = imx_clk_mux("ipg_per", base + MXC_CCM_PDR0, 26, 1, ipg_per_sel, ARRAY_SIZE(ipg_per_sel));
+
+       clk[uart_sel] = imx_clk_mux("uart_sel", base + MX35_CCM_PDR3, 14, 1, std_sel, ARRAY_SIZE(std_sel));
+       clk[uart_div] = imx_clk_divider("uart_div", "uart_sel", base + MX35_CCM_PDR4, 10, 6);
+
+       clk[esdhc_sel] = imx_clk_mux("esdhc_sel", base + MX35_CCM_PDR4, 9, 1, std_sel, ARRAY_SIZE(std_sel));
+       clk[esdhc1_div] = imx_clk_divider("esdhc1_div", "esdhc_sel", base + MX35_CCM_PDR3, 0, 6);
+       clk[esdhc2_div] = imx_clk_divider("esdhc2_div", "esdhc_sel", base + MX35_CCM_PDR3, 8, 6);
+       clk[esdhc3_div] = imx_clk_divider("esdhc3_div", "esdhc_sel", base + MX35_CCM_PDR3, 16, 6);
+
+       clk[spdif_sel] = imx_clk_mux("spdif_sel", base + MX35_CCM_PDR3, 22, 1, std_sel, ARRAY_SIZE(std_sel));
+       clk[spdif_div_pre] = imx_clk_divider("spdif_div_pre", "spdif_sel", base + MX35_CCM_PDR3, 29, 3); /* divide by 1 not allowed */ 
+       clk[spdif_div_post] = imx_clk_divider("spdif_div_post", "spdif_div_pre", base + MX35_CCM_PDR3, 23, 6);
+
+       clk[ssi_sel] = imx_clk_mux("ssi_sel", base + MX35_CCM_PDR2, 6, 1, std_sel, ARRAY_SIZE(std_sel));
+       clk[ssi1_div_pre] = imx_clk_divider("ssi1_div_pre", "ssi_sel", base + MX35_CCM_PDR2, 24, 3);
+       clk[ssi1_div_post] = imx_clk_divider("ssi1_div_post", "ssi1_div_pre", base + MX35_CCM_PDR2, 0, 6);
+       clk[ssi2_div_pre] = imx_clk_divider("ssi2_div_pre", "ssi_sel", base + MX35_CCM_PDR2, 27, 3);
+       clk[ssi2_div_post] = imx_clk_divider("ssi2_div_post", "ssi2_div_pre", base + MX35_CCM_PDR2, 8, 6);
+
+       clk[usb_sel] = imx_clk_mux("usb_sel", base + MX35_CCM_PDR4, 9, 1, std_sel, ARRAY_SIZE(std_sel));
+       clk[usb_div] = imx_clk_divider("usb_div", "usb_sel", base + MX35_CCM_PDR4, 22, 6);
+
+       clk[nfc_div] = imx_clk_divider("nfc_div", "ahb", base + MX35_CCM_PDR4, 28, 4);
+
+       clk[asrc_gate] = imx_clk_gate2("asrc_gate", "ipg", base + MX35_CCM_CGR0,  0);
+       clk[pata_gate] = imx_clk_gate2("pata_gate", "ipg", base + MX35_CCM_CGR0,  2);
+       clk[audmux_gate] = imx_clk_gate2("audmux_gate", "ipg", base + MX35_CCM_CGR0,  4);
+       clk[can1_gate] = imx_clk_gate2("can1_gate", "ipg", base + MX35_CCM_CGR0,  6);
+       clk[can2_gate] = imx_clk_gate2("can2_gate", "ipg", base + MX35_CCM_CGR0,  8);
+       clk[cspi1_gate] = imx_clk_gate2("cspi1_gate", "ipg", base + MX35_CCM_CGR0, 10);
+       clk[cspi2_gate] = imx_clk_gate2("cspi2_gate", "ipg", base + MX35_CCM_CGR0, 12);
+       clk[ect_gate] = imx_clk_gate2("ect_gate", "ipg", base + MX35_CCM_CGR0, 14);
+       clk[edio_gate] = imx_clk_gate2("edio_gate",   "ipg", base + MX35_CCM_CGR0, 16);
+       clk[emi_gate] = imx_clk_gate2("emi_gate", "ipg", base + MX35_CCM_CGR0, 18);
+       clk[epit1_gate] = imx_clk_gate2("epit1_gate", "ipg", base + MX35_CCM_CGR0, 20);
+       clk[epit2_gate] = imx_clk_gate2("epit2_gate", "ipg", base + MX35_CCM_CGR0, 22);
+       clk[esai_gate] = imx_clk_gate2("esai_gate",   "ipg", base + MX35_CCM_CGR0, 24);
+       clk[esdhc1_gate] = imx_clk_gate2("esdhc1_gate", "esdhc1_div", base + MX35_CCM_CGR0, 26);
+       clk[esdhc2_gate] = imx_clk_gate2("esdhc2_gate", "esdhc2_div", base + MX35_CCM_CGR0, 28);
+       clk[esdhc3_gate] = imx_clk_gate2("esdhc3_gate", "esdhc3_div", base + MX35_CCM_CGR0, 30);
+
+       clk[fec_gate] = imx_clk_gate2("fec_gate", "ipg", base + MX35_CCM_CGR1,  0);
+       clk[gpio1_gate] = imx_clk_gate2("gpio1_gate", "ipg", base + MX35_CCM_CGR1,  2);
+       clk[gpio2_gate] = imx_clk_gate2("gpio2_gate", "ipg", base + MX35_CCM_CGR1,  4);
+       clk[gpio3_gate] = imx_clk_gate2("gpio3_gate", "ipg", base + MX35_CCM_CGR1,  6);
+       clk[gpt_gate] = imx_clk_gate2("gpt_gate", "ipg", base + MX35_CCM_CGR1,  8);
+       clk[i2c1_gate] = imx_clk_gate2("i2c1_gate", "ipg_per", base + MX35_CCM_CGR1, 10);
+       clk[i2c2_gate] = imx_clk_gate2("i2c2_gate", "ipg_per", base + MX35_CCM_CGR1, 12);
+       clk[i2c3_gate] = imx_clk_gate2("i2c3_gate", "ipg_per", base + MX35_CCM_CGR1, 14);
+       clk[iomuxc_gate] = imx_clk_gate2("iomuxc_gate", "ipg", base + MX35_CCM_CGR1, 16);
+       clk[ipu_gate] = imx_clk_gate2("ipu_gate", "hsp", base + MX35_CCM_CGR1, 18);
+       clk[kpp_gate] = imx_clk_gate2("kpp_gate", "ipg", base + MX35_CCM_CGR1, 20);
+       clk[mlb_gate] = imx_clk_gate2("mlb_gate", "ahb", base + MX35_CCM_CGR1, 22);
+       clk[mshc_gate] = imx_clk_gate2("mshc_gate", "dummy", base + MX35_CCM_CGR1, 24);
+       clk[owire_gate] = imx_clk_gate2("owire_gate", "ipg_per", base + MX35_CCM_CGR1, 26);
+       clk[pwm_gate] = imx_clk_gate2("pwm_gate", "ipg_per", base + MX35_CCM_CGR1, 28);
+       clk[rngc_gate] = imx_clk_gate2("rngc_gate", "ipg", base + MX35_CCM_CGR1, 30);
+
+       clk[rtc_gate] = imx_clk_gate2("rtc_gate", "ipg", base + MX35_CCM_CGR2,  0);
+       clk[rtic_gate] = imx_clk_gate2("rtic_gate", "ahb", base + MX35_CCM_CGR2,  2);
+       clk[scc_gate] = imx_clk_gate2("scc_gate", "ipg", base + MX35_CCM_CGR2,  4);
+       clk[sdma_gate] = imx_clk_gate2("sdma_gate", "ahb", base + MX35_CCM_CGR2,  6);
+       clk[spba_gate] = imx_clk_gate2("spba_gate", "ipg", base + MX35_CCM_CGR2,  8);
+       clk[spdif_gate] = imx_clk_gate2("spdif_gate", "spdif_div_post", base + MX35_CCM_CGR2, 10);
+       clk[ssi1_gate] = imx_clk_gate2("ssi1_gate", "ssi1_div_post", base + MX35_CCM_CGR2, 12);
+       clk[ssi2_gate] = imx_clk_gate2("ssi2_gate", "ssi2_div_post", base + MX35_CCM_CGR2, 14);
+       clk[uart1_gate] = imx_clk_gate2("uart1_gate", "uart_div", base + MX35_CCM_CGR2, 16);
+       clk[uart2_gate] = imx_clk_gate2("uart2_gate", "uart_div", base + MX35_CCM_CGR2, 18);
+       clk[uart3_gate] = imx_clk_gate2("uart3_gate", "uart_div", base + MX35_CCM_CGR2, 20);
+       clk[usbotg_gate] = imx_clk_gate2("usbotg_gate", "ahb", base + MX35_CCM_CGR2, 22);
+       clk[wdog_gate] = imx_clk_gate2("wdog_gate", "ipg", base + MX35_CCM_CGR2, 24);
+       clk[max_gate] = imx_clk_gate2("max_gate", "dummy", base + MX35_CCM_CGR2, 26);
+       clk[admux_gate] = imx_clk_gate2("admux_gate", "ipg", base + MX35_CCM_CGR2, 30);
+
+       clk[csi_gate] = imx_clk_gate2("csi_gate", "ipg", base + MX35_CCM_CGR3,  0);
+       clk[iim_gate] = imx_clk_gate2("iim_gate", "ipg", base + MX35_CCM_CGR3,  2);
+       clk[gpu2d_gate] = imx_clk_gate2("gpu2d_gate", "ahb", base + MX35_CCM_CGR3,  4);
+
+       for (i = 0; i < ARRAY_SIZE(clk); i++)
+               if (IS_ERR(clk[i]))
+                       pr_err("i.MX35 clk %d: register failed with %ld\n",
+                               i, PTR_ERR(clk[i]));
+
+
+       clk_register_clkdev(clk[pata_gate], NULL, "pata_imx");
+       clk_register_clkdev(clk[can1_gate], NULL, "flexcan.0");
+       clk_register_clkdev(clk[can2_gate], NULL, "flexcan.1");
+       clk_register_clkdev(clk[cspi1_gate], "per", "imx35-cspi.0");
+       clk_register_clkdev(clk[cspi1_gate], "ipg", "imx35-cspi.0");
+       clk_register_clkdev(clk[cspi2_gate], "per", "imx35-cspi.1");
+       clk_register_clkdev(clk[cspi2_gate], "ipg", "imx35-cspi.1");
+       clk_register_clkdev(clk[epit1_gate], NULL, "imx-epit.0");
+       clk_register_clkdev(clk[epit2_gate], NULL, "imx-epit.1");
+       clk_register_clkdev(clk[esdhc1_gate], "per", "sdhci-esdhc-imx35.0");
+       clk_register_clkdev(clk[ipg], "ipg", "sdhci-esdhc-imx35.0");
+       clk_register_clkdev(clk[ahb], "ahb", "sdhci-esdhc-imx35.0");
+       clk_register_clkdev(clk[esdhc2_gate], "per", "sdhci-esdhc-imx35.1");
+       clk_register_clkdev(clk[ipg], "ipg", "sdhci-esdhc-imx35.1");
+       clk_register_clkdev(clk[ahb], "ahb", "sdhci-esdhc-imx35.1");
+       clk_register_clkdev(clk[esdhc3_gate], "per", "sdhci-esdhc-imx35.2");
+       clk_register_clkdev(clk[ipg], "ipg", "sdhci-esdhc-imx35.2");
+       clk_register_clkdev(clk[ahb], "ahb", "sdhci-esdhc-imx35.2");
+       /* i.mx35 has the i.mx27 type fec */
+       clk_register_clkdev(clk[fec_gate], NULL, "imx27-fec.0");
+       clk_register_clkdev(clk[gpt_gate], "per", "imx-gpt.0");
+       clk_register_clkdev(clk[ipg], "ipg", "imx-gpt.0");
+       clk_register_clkdev(clk[i2c1_gate], NULL, "imx-i2c.0");
+       clk_register_clkdev(clk[i2c2_gate], NULL, "imx-i2c.1");
+       clk_register_clkdev(clk[i2c3_gate], NULL, "imx-i2c.2");
+       clk_register_clkdev(clk[ipu_gate], NULL, "ipu-core");
+       clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb");
+       clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1");
+       clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
+       clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.0");
+       clk_register_clkdev(clk[ssi1_div_post], "per", "imx-ssi.0");
+       clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.1");
+       clk_register_clkdev(clk[ssi2_div_post], "per", "imx-ssi.1");
+       /* i.mx35 has the i.mx21 type uart */
+       clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0");
+       clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0");
+       clk_register_clkdev(clk[uart2_gate], "per", "imx21-uart.1");
+       clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.1");
+       clk_register_clkdev(clk[uart3_gate], "per", "imx21-uart.2");
+       clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.2");
+       clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.0");
+       clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0");
+       clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.0");
+       clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.1");
+       clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.1");
+       clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.1");
+       clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2");
+       clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2");
+       clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.2");
+       clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc");
+       clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc");
+       clk_register_clkdev(clk[usbotg_gate], "ahb", "fsl-usb2-udc");
+       clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
+       clk_register_clkdev(clk[nfc_div], NULL, "mxc_nand.0");
+
+       clk_prepare_enable(clk[spba_gate]);
+       clk_prepare_enable(clk[gpio1_gate]);
+       clk_prepare_enable(clk[gpio2_gate]);
+       clk_prepare_enable(clk[gpio3_gate]);
+       clk_prepare_enable(clk[iim_gate]);
+       clk_prepare_enable(clk[emi_gate]);
+
+       imx_print_silicon_rev("i.MX35", mx35_revision());
+
+#ifdef CONFIG_MXC_USE_EPIT
+       epit_timer_init(&epit1_clk,
+                       MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1);
+#else
+       mxc_timer_init(NULL, MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR),
+                       MX35_INT_GPT);
+#endif
+
+       return 0;
+}
diff --git a/arch/arm/mach-imx/clk-imx51-imx53.c b/arch/arm/mach-imx/clk-imx51-imx53.c
new file mode 100644 (file)
index 0000000..fcd94f3
--- /dev/null
@@ -0,0 +1,506 @@
+/*
+ * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/err.h>
+
+#include <mach/hardware.h>
+#include <mach/common.h>
+
+#include "crm-regs-imx5.h"
+#include "clk.h"
+
+/* Low-power Audio Playback Mode clock */
+static const char *lp_apm_sel[] = { "osc", };
+
+/* This is used multiple times */
+static const char *standard_pll_sel[] = { "pll1_sw", "pll2_sw", "pll3_sw", "lp_apm", };
+static const char *periph_apm_sel[] = { "pll1_sw", "pll3_sw", "lp_apm", };
+static const char *main_bus_sel[] = { "pll2_sw", "periph_apm", };
+static const char *per_lp_apm_sel[] = { "main_bus", "lp_apm", };
+static const char *per_root_sel[] = { "per_podf", "ipg", };
+static const char *esdhc_c_sel[] = { "esdhc_a_podf", "esdhc_b_podf", };
+static const char *esdhc_d_sel[] = { "esdhc_a_podf", "esdhc_b_podf", };
+static const char *ssi_apm_sels[] = { "ckih1", "lp_amp", "ckih2", };
+static const char *ssi_clk_sels[] = { "pll1_sw", "pll2_sw", "pll3_sw", "ssi_apm", };
+static const char *ssi3_clk_sels[] = { "ssi1_root_gate", "ssi2_root_gate", };
+static const char *ssi_ext1_com_sels[] = { "ssi_ext1_podf", "ssi1_root_gate", };
+static const char *ssi_ext2_com_sels[] = { "ssi_ext2_podf", "ssi2_root_gate", };
+static const char *emi_slow_sel[] = { "main_bus", "ahb", };
+static const char *usb_phy_sel_str[] = { "osc", "usb_phy_podf", };
+static const char *mx51_ipu_di0_sel[] = { "di_pred", "osc", "ckih1", "tve_di", };
+static const char *mx53_ipu_di0_sel[] = { "di_pred", "osc", "ckih1", "di_pll4_podf", "dummy", "ldb_di0", };
+static const char *mx53_ldb_di0_sel[] = { "pll3_sw", "pll4_sw", };
+static const char *mx51_ipu_di1_sel[] = { "di_pred", "osc", "ckih1", "tve_di", "ipp_di1", };
+static const char *mx53_ipu_di1_sel[] = { "di_pred", "osc", "ckih1", "tve_di", "ipp_di1", "ldb_di1", };
+static const char *mx53_ldb_di1_sel[] = { "pll3_sw", "pll4_sw", };
+static const char *mx51_tve_ext_sel[] = { "osc", "ckih1", };
+static const char *mx53_tve_ext_sel[] = { "pll4_sw", "ckih1", };
+static const char *tve_sel[] = { "tve_pred", "tve_ext_sel", };
+static const char *ipu_sel[] = { "axi_a", "axi_b", "emi_slow_gate", "ahb", };
+static const char *vpu_sel[] = { "axi_a", "axi_b", "emi_slow_gate", "ahb", };
+
+enum imx5_clks {
+       dummy, ckil, osc, ckih1, ckih2, ahb, ipg, axi_a, axi_b, uart_pred,
+       uart_root, esdhc_a_pred, esdhc_b_pred, esdhc_c_s, esdhc_d_s,
+       emi_sel, emi_slow_podf, nfc_podf, ecspi_pred, ecspi_podf, usboh3_pred,
+       usboh3_podf, usb_phy_pred, usb_phy_podf, cpu_podf, di_pred, tve_di,
+       tve_s, uart1_ipg_gate, uart1_per_gate, uart2_ipg_gate,
+       uart2_per_gate, uart3_ipg_gate, uart3_per_gate, i2c1_gate, i2c2_gate,
+       gpt_ipg_gate, pwm1_ipg_gate, pwm1_hf_gate, pwm2_ipg_gate, pwm2_hf_gate,
+       gpt_gate, fec_gate, usboh3_per_gate, esdhc1_ipg_gate, esdhc2_ipg_gate,
+       esdhc3_ipg_gate, esdhc4_ipg_gate, ssi1_ipg_gate, ssi2_ipg_gate,
+       ssi3_ipg_gate, ecspi1_ipg_gate, ecspi1_per_gate, ecspi2_ipg_gate,
+       ecspi2_per_gate, cspi_ipg_gate, sdma_gate, emi_slow_gate, ipu_s,
+       ipu_gate, nfc_gate, ipu_di1_gate, vpu_s, vpu_gate,
+       vpu_reference_gate, uart4_ipg_gate, uart4_per_gate, uart5_ipg_gate,
+       uart5_per_gate, tve_gate, tve_pred, esdhc1_per_gate, esdhc2_per_gate,
+       esdhc3_per_gate, esdhc4_per_gate, usb_phy_gate, hsi2c_gate,
+       mipi_hsc1_gate, mipi_hsc2_gate, mipi_esc_gate, mipi_hsp_gate,
+       ldb_di1_div_3_5, ldb_di1_div, ldb_di0_div_3_5, ldb_di0_div,
+       ldb_di1_gate, can2_serial_gate, can2_ipg_gate, i2c3_gate, lp_apm,
+       periph_apm, main_bus, ahb_max, aips_tz1, aips_tz2, tmax1, tmax2,
+       tmax3, spba, uart_sel, esdhc_a_sel, esdhc_b_sel, esdhc_a_podf,
+       esdhc_b_podf, ecspi_sel, usboh3_sel, usb_phy_sel, iim_gate,
+       usboh3_gate, emi_fast_gate, ipu_di0_gate,gpc_dvfs, pll1_sw, pll2_sw,
+       pll3_sw, ipu_di0_sel, ipu_di1_sel, tve_ext_sel, mx51_mipi, pll4_sw,
+       ldb_di1_sel, di_pll4_podf, ldb_di0_sel, ldb_di0_gate, usb_phy1_gate,
+       usb_phy2_gate, per_lp_apm, per_pred1, per_pred2, per_podf, per_root,
+       ssi_apm, ssi1_root_sel, ssi2_root_sel, ssi3_root_sel, ssi_ext1_sel,
+       ssi_ext2_sel, ssi_ext1_com_sel, ssi_ext2_com_sel, ssi1_root_pred,
+       ssi1_root_podf, ssi2_root_pred, ssi2_root_podf, ssi_ext1_pred,
+       ssi_ext1_podf, ssi_ext2_pred, ssi_ext2_podf, ssi1_root_gate,
+       ssi2_root_gate, ssi3_root_gate, ssi_ext1_gate, ssi_ext2_gate,
+       clk_max
+};
+
+static struct clk *clk[clk_max];
+
+static void __init mx5_clocks_common_init(unsigned long rate_ckil,
+               unsigned long rate_osc, unsigned long rate_ckih1,
+               unsigned long rate_ckih2)
+{
+       int i;
+
+       clk[dummy] = imx_clk_fixed("dummy", 0);
+       clk[ckil] = imx_clk_fixed("ckil", rate_ckil);
+       clk[osc] = imx_clk_fixed("osc", rate_osc);
+       clk[ckih1] = imx_clk_fixed("ckih1", rate_ckih1);
+       clk[ckih2] = imx_clk_fixed("ckih2", rate_ckih2);
+
+       clk[lp_apm] = imx_clk_mux("lp_apm", MXC_CCM_CCSR, 9, 1,
+                               lp_apm_sel, ARRAY_SIZE(lp_apm_sel));
+       clk[periph_apm] = imx_clk_mux("periph_apm", MXC_CCM_CBCMR, 12, 2,
+                               periph_apm_sel, ARRAY_SIZE(periph_apm_sel));
+       clk[main_bus] = imx_clk_mux("main_bus", MXC_CCM_CBCDR, 25, 1,
+                               main_bus_sel, ARRAY_SIZE(main_bus_sel));
+       clk[per_lp_apm] = imx_clk_mux("per_lp_apm", MXC_CCM_CBCDR, 1, 1,
+                               per_lp_apm_sel, ARRAY_SIZE(per_lp_apm_sel));
+       clk[per_pred1] = imx_clk_divider("per_pred1", "per_lp_apm", MXC_CCM_CBCDR, 6, 2);
+       clk[per_pred2] = imx_clk_divider("per_pred2", "per_pred1", MXC_CCM_CBCDR, 3, 3);
+       clk[per_podf] = imx_clk_divider("per_podf", "per_pred2", MXC_CCM_CBCDR, 0, 3);
+       clk[per_root] = imx_clk_mux("per_root", MXC_CCM_CBCDR, 1, 0,
+                               per_root_sel, ARRAY_SIZE(per_root_sel));
+       clk[ahb] = imx_clk_divider("ahb", "main_bus", MXC_CCM_CBCDR, 10, 3);
+       clk[ahb_max] = imx_clk_gate2("ahb_max", "ahb", MXC_CCM_CCGR0, 28);
+       clk[aips_tz1] = imx_clk_gate2("aips_tz1", "ahb", MXC_CCM_CCGR0, 24);
+       clk[aips_tz2] = imx_clk_gate2("aips_tz2", "ahb", MXC_CCM_CCGR0, 26);
+       clk[tmax1] = imx_clk_gate2("tmax1", "ahb", MXC_CCM_CCGR1, 0);
+       clk[tmax2] = imx_clk_gate2("tmax2", "ahb", MXC_CCM_CCGR1, 2);
+       clk[tmax3] = imx_clk_gate2("tmax3", "ahb", MXC_CCM_CCGR1, 4);
+       clk[spba] = imx_clk_gate2("spba", "ipg", MXC_CCM_CCGR5, 0);
+       clk[ipg] = imx_clk_divider("ipg", "ahb", MXC_CCM_CBCDR, 8, 2);
+       clk[axi_a] = imx_clk_divider("axi_a", "main_bus", MXC_CCM_CBCDR, 16, 3);
+       clk[axi_b] = imx_clk_divider("axi_b", "main_bus", MXC_CCM_CBCDR, 19, 3);
+       clk[uart_sel] = imx_clk_mux("uart_sel", MXC_CCM_CSCMR1, 24, 2,
+                               standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+       clk[uart_pred] = imx_clk_divider("uart_pred", "uart_sel", MXC_CCM_CSCDR1, 3, 3);
+       clk[uart_root] = imx_clk_divider("uart_root", "uart_pred", MXC_CCM_CSCDR1, 0, 3);
+
+       clk[esdhc_a_sel] = imx_clk_mux("esdhc_a_sel", MXC_CCM_CSCMR1, 20, 2,
+                               standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+       clk[esdhc_b_sel] = imx_clk_mux("esdhc_b_sel", MXC_CCM_CSCMR1, 16, 2,
+                               standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+       clk[esdhc_a_pred] = imx_clk_divider("esdhc_a_pred", "esdhc_a_sel", MXC_CCM_CSCDR1, 16, 3);
+       clk[esdhc_a_podf] = imx_clk_divider("esdhc_a_podf", "esdhc_a_pred", MXC_CCM_CSCDR1, 11, 3);
+       clk[esdhc_b_pred] = imx_clk_divider("esdhc_b_pred", "esdhc_b_sel", MXC_CCM_CSCDR1, 22, 3);
+       clk[esdhc_b_podf] = imx_clk_divider("esdhc_b_podf", "esdhc_b_pred", MXC_CCM_CSCDR1, 19, 3);
+       clk[esdhc_c_s] = imx_clk_mux("esdhc_c_sel", MXC_CCM_CSCMR1, 19, 1, esdhc_c_sel, ARRAY_SIZE(esdhc_c_sel));
+       clk[esdhc_d_s] = imx_clk_mux("esdhc_d_sel", MXC_CCM_CSCMR1, 18, 1, esdhc_d_sel, ARRAY_SIZE(esdhc_d_sel));
+
+       clk[emi_sel] = imx_clk_mux("emi_sel", MXC_CCM_CBCDR, 26, 1,
+                               emi_slow_sel, ARRAY_SIZE(emi_slow_sel));
+       clk[emi_slow_podf] = imx_clk_divider("emi_slow_podf", "emi_sel", MXC_CCM_CBCDR, 22, 3);
+       clk[nfc_podf] = imx_clk_divider("nfc_podf", "emi_slow_podf", MXC_CCM_CBCDR, 13, 3);
+       clk[ecspi_sel] = imx_clk_mux("ecspi_sel", MXC_CCM_CSCMR1, 4, 2,
+                               standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+       clk[ecspi_pred] = imx_clk_divider("ecspi_pred", "ecspi_sel", MXC_CCM_CSCDR2, 25, 3);
+       clk[ecspi_podf] = imx_clk_divider("ecspi_podf", "ecspi_pred", MXC_CCM_CSCDR2, 19, 6);
+       clk[usboh3_sel] = imx_clk_mux("usboh3_sel", MXC_CCM_CSCMR1, 22, 2,
+                               standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+       clk[usboh3_pred] = imx_clk_divider("usboh3_pred", "usboh3_sel", MXC_CCM_CSCDR1, 8, 3);
+       clk[usboh3_podf] = imx_clk_divider("usboh3_podf", "usboh3_pred", MXC_CCM_CSCDR1, 6, 2);
+       clk[usb_phy_pred] = imx_clk_divider("usb_phy_pred", "pll3_sw", MXC_CCM_CDCDR, 3, 3);
+       clk[usb_phy_podf] = imx_clk_divider("usb_phy_podf", "usb_phy_pred", MXC_CCM_CDCDR, 0, 3);
+       clk[usb_phy_sel] = imx_clk_mux("usb_phy_sel", MXC_CCM_CSCMR1, 26, 1,
+                               usb_phy_sel_str, ARRAY_SIZE(usb_phy_sel_str));
+       clk[cpu_podf] = imx_clk_divider("cpu_podf", "pll1_sw", MXC_CCM_CACRR, 0, 3);
+       clk[di_pred] = imx_clk_divider("di_pred", "pll3_sw", MXC_CCM_CDCDR, 6, 3);
+       clk[tve_di] = imx_clk_fixed("tve_di", 65000000); /* FIXME */
+       clk[tve_s] = imx_clk_mux("tve_sel", MXC_CCM_CSCMR1, 7, 1, tve_sel, ARRAY_SIZE(tve_sel));
+       clk[iim_gate] = imx_clk_gate2("iim_gate", "ipg", MXC_CCM_CCGR0, 30);
+       clk[uart1_ipg_gate] = imx_clk_gate2("uart1_ipg_gate", "ipg", MXC_CCM_CCGR1, 6);
+       clk[uart1_per_gate] = imx_clk_gate2("uart1_per_gate", "uart_root", MXC_CCM_CCGR1, 8);
+       clk[uart2_ipg_gate] = imx_clk_gate2("uart2_ipg_gate", "ipg", MXC_CCM_CCGR1, 10);
+       clk[uart2_per_gate] = imx_clk_gate2("uart2_per_gate", "uart_root", MXC_CCM_CCGR1, 12);
+       clk[uart3_ipg_gate] = imx_clk_gate2("uart3_ipg_gate", "ipg", MXC_CCM_CCGR1, 14);
+       clk[uart3_per_gate] = imx_clk_gate2("uart3_per_gate", "uart_root", MXC_CCM_CCGR1, 16);
+       clk[i2c1_gate] = imx_clk_gate2("i2c1_gate", "per_root", MXC_CCM_CCGR1, 18);
+       clk[i2c2_gate] = imx_clk_gate2("i2c2_gate", "per_root", MXC_CCM_CCGR1, 20);
+       clk[gpt_ipg_gate] = imx_clk_gate2("gpt_ipg_gate", "ipg", MXC_CCM_CCGR2, 20);
+       clk[pwm1_ipg_gate] = imx_clk_gate2("pwm1_ipg_gate", "ipg", MXC_CCM_CCGR2, 10);
+       clk[pwm1_hf_gate] = imx_clk_gate2("pwm1_hf_gate", "ipg", MXC_CCM_CCGR2, 12);
+       clk[pwm2_ipg_gate] = imx_clk_gate2("pwm2_ipg_gate", "ipg", MXC_CCM_CCGR2, 14);
+       clk[pwm2_hf_gate] = imx_clk_gate2("pwm2_hf_gate", "ipg", MXC_CCM_CCGR2, 16);
+       clk[gpt_gate] = imx_clk_gate2("gpt_gate", "ipg", MXC_CCM_CCGR2, 18);
+       clk[fec_gate] = imx_clk_gate2("fec_gate", "ipg", MXC_CCM_CCGR2, 24);
+       clk[usboh3_gate] = imx_clk_gate2("usboh3_gate", "ipg", MXC_CCM_CCGR2, 26);
+       clk[usboh3_per_gate] = imx_clk_gate2("usboh3_per_gate", "usboh3_podf", MXC_CCM_CCGR2, 28);
+       clk[esdhc1_ipg_gate] = imx_clk_gate2("esdhc1_ipg_gate", "ipg", MXC_CCM_CCGR3, 0);
+       clk[esdhc2_ipg_gate] = imx_clk_gate2("esdhc2_ipg_gate", "ipg", MXC_CCM_CCGR3, 4);
+       clk[esdhc3_ipg_gate] = imx_clk_gate2("esdhc3_ipg_gate", "ipg", MXC_CCM_CCGR3, 8);
+       clk[esdhc4_ipg_gate] = imx_clk_gate2("esdhc4_ipg_gate", "ipg", MXC_CCM_CCGR3, 12);
+       clk[ssi1_ipg_gate] = imx_clk_gate2("ssi1_ipg_gate", "ipg", MXC_CCM_CCGR3, 16);
+       clk[ssi2_ipg_gate] = imx_clk_gate2("ssi2_ipg_gate", "ipg", MXC_CCM_CCGR3, 20);
+       clk[ssi3_ipg_gate] = imx_clk_gate2("ssi3_ipg_gate", "ipg", MXC_CCM_CCGR3, 24);
+       clk[ecspi1_ipg_gate] = imx_clk_gate2("ecspi1_ipg_gate", "ipg", MXC_CCM_CCGR4, 18);
+       clk[ecspi1_per_gate] = imx_clk_gate2("ecspi1_per_gate", "ecspi_podf", MXC_CCM_CCGR4, 20);
+       clk[ecspi2_ipg_gate] = imx_clk_gate2("ecspi2_ipg_gate", "ipg", MXC_CCM_CCGR4, 22);
+       clk[ecspi2_per_gate] = imx_clk_gate2("ecspi2_per_gate", "ecspi_podf", MXC_CCM_CCGR4, 24);
+       clk[cspi_ipg_gate] = imx_clk_gate2("cspi_ipg_gate", "ipg", MXC_CCM_CCGR4, 26);
+       clk[sdma_gate] = imx_clk_gate2("sdma_gate", "ipg", MXC_CCM_CCGR4, 30);
+       clk[emi_fast_gate] = imx_clk_gate2("emi_fast_gate", "dummy", MXC_CCM_CCGR5, 14);
+       clk[emi_slow_gate] = imx_clk_gate2("emi_slow_gate", "emi_slow_podf", MXC_CCM_CCGR5, 16);
+       clk[ipu_s] = imx_clk_mux("ipu_sel", MXC_CCM_CBCMR, 6, 2, ipu_sel, ARRAY_SIZE(ipu_sel));
+       clk[ipu_gate] = imx_clk_gate2("ipu_gate", "ipu_sel", MXC_CCM_CCGR5, 10);
+       clk[nfc_gate] = imx_clk_gate2("nfc_gate", "nfc_podf", MXC_CCM_CCGR5, 20);
+       clk[ipu_di0_gate] = imx_clk_gate2("ipu_di0_gate", "ipu_di0_sel", MXC_CCM_CCGR6, 10);
+       clk[ipu_di1_gate] = imx_clk_gate2("ipu_di1_gate", "ipu_di1_sel", MXC_CCM_CCGR6, 12);
+       clk[vpu_s] = imx_clk_mux("vpu_sel", MXC_CCM_CBCMR, 14, 2, vpu_sel, ARRAY_SIZE(vpu_sel));
+       clk[vpu_gate] = imx_clk_gate2("vpu_gate", "vpu_sel", MXC_CCM_CCGR5, 6);
+       clk[vpu_reference_gate] = imx_clk_gate2("vpu_reference_gate", "osc", MXC_CCM_CCGR5, 8);
+       clk[uart4_ipg_gate] = imx_clk_gate2("uart4_ipg_gate", "ipg", MXC_CCM_CCGR7, 8);
+       clk[uart4_per_gate] = imx_clk_gate2("uart4_per_gate", "uart_root", MXC_CCM_CCGR7, 10);
+       clk[uart5_ipg_gate] = imx_clk_gate2("uart5_ipg_gate", "ipg", MXC_CCM_CCGR7, 12);
+       clk[uart5_per_gate] = imx_clk_gate2("uart5_per_gate", "uart_root", MXC_CCM_CCGR7, 14);
+       clk[gpc_dvfs] = imx_clk_gate2("gpc_dvfs", "dummy", MXC_CCM_CCGR5, 24);
+
+       clk[ssi_apm] = imx_clk_mux("ssi_apm", MXC_CCM_CSCMR1, 8, 2, ssi_apm_sels, ARRAY_SIZE(ssi_apm_sels));
+       clk[ssi1_root_sel] = imx_clk_mux("ssi1_root_sel", MXC_CCM_CSCMR1, 14, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
+       clk[ssi2_root_sel] = imx_clk_mux("ssi2_root_sel", MXC_CCM_CSCMR1, 12, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
+       clk[ssi3_root_sel] = imx_clk_mux("ssi3_root_sel", MXC_CCM_CSCMR1, 11, 1, ssi3_clk_sels, ARRAY_SIZE(ssi3_clk_sels));
+       clk[ssi_ext1_sel] = imx_clk_mux("ssi_ext1_sel", MXC_CCM_CSCMR1, 28, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
+       clk[ssi_ext2_sel] = imx_clk_mux("ssi_ext2_sel", MXC_CCM_CSCMR1, 30, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
+       clk[ssi_ext1_com_sel] = imx_clk_mux("ssi_ext1_com_sel", MXC_CCM_CSCMR1, 0, 1, ssi_ext1_com_sels, ARRAY_SIZE(ssi_ext1_com_sels));
+       clk[ssi_ext2_com_sel] = imx_clk_mux("ssi_ext2_com_sel", MXC_CCM_CSCMR1, 1, 1, ssi_ext2_com_sels, ARRAY_SIZE(ssi_ext2_com_sels));
+       clk[ssi1_root_pred] = imx_clk_divider("ssi1_root_pred", "ssi1_root_sel", MXC_CCM_CS1CDR, 6, 3);
+       clk[ssi1_root_podf] = imx_clk_divider("ssi1_root_podf", "ssi1_root_pred", MXC_CCM_CS1CDR, 0, 6);
+       clk[ssi2_root_pred] = imx_clk_divider("ssi2_root_pred", "ssi2_root_sel", MXC_CCM_CS2CDR, 6, 3);
+       clk[ssi2_root_podf] = imx_clk_divider("ssi2_root_podf", "ssi2_root_pred", MXC_CCM_CS2CDR, 0, 6);
+       clk[ssi_ext1_pred] = imx_clk_divider("ssi_ext1_pred", "ssi_ext1_sel", MXC_CCM_CS1CDR, 22, 3);
+       clk[ssi_ext1_podf] = imx_clk_divider("ssi_ext1_podf", "ssi_ext1_pred", MXC_CCM_CS1CDR, 16, 6);
+       clk[ssi_ext2_pred] = imx_clk_divider("ssi_ext2_pred", "ssi_ext2_sel", MXC_CCM_CS2CDR, 22, 3);
+       clk[ssi_ext2_podf] = imx_clk_divider("ssi_ext2_podf", "ssi_ext2_pred", MXC_CCM_CS2CDR, 16, 6);
+       clk[ssi1_root_gate] = imx_clk_gate2("ssi1_root_gate", "ssi1_root_podf", MXC_CCM_CCGR3, 18);
+       clk[ssi2_root_gate] = imx_clk_gate2("ssi2_root_gate", "ssi2_root_podf", MXC_CCM_CCGR3, 22);
+       clk[ssi3_root_gate] = imx_clk_gate2("ssi3_root_gate", "ssi3_root_sel", MXC_CCM_CCGR3, 26);
+       clk[ssi_ext1_gate] = imx_clk_gate2("ssi_ext1_gate", "ssi_ext1_com_sel", MXC_CCM_CCGR3, 28);
+       clk[ssi_ext2_gate] = imx_clk_gate2("ssi_ext2_gate", "ssi_ext2_com_sel", MXC_CCM_CCGR3, 30);
+
+       for (i = 0; i < ARRAY_SIZE(clk); i++)
+               if (IS_ERR(clk[i]))
+                       pr_err("i.MX5 clk %d: register failed with %ld\n",
+                               i, PTR_ERR(clk[i]));
+       
+       clk_register_clkdev(clk[gpt_gate], "per", "imx-gpt.0");
+       clk_register_clkdev(clk[gpt_ipg_gate], "ipg", "imx-gpt.0");
+       clk_register_clkdev(clk[uart1_per_gate], "per", "imx21-uart.0");
+       clk_register_clkdev(clk[uart1_ipg_gate], "ipg", "imx21-uart.0");
+       clk_register_clkdev(clk[uart2_per_gate], "per", "imx21-uart.1");
+       clk_register_clkdev(clk[uart2_ipg_gate], "ipg", "imx21-uart.1");
+       clk_register_clkdev(clk[uart3_per_gate], "per", "imx21-uart.2");
+       clk_register_clkdev(clk[uart3_ipg_gate], "ipg", "imx21-uart.2");
+       clk_register_clkdev(clk[uart4_per_gate], "per", "imx21-uart.3");
+       clk_register_clkdev(clk[uart4_ipg_gate], "ipg", "imx21-uart.3");
+       clk_register_clkdev(clk[uart5_per_gate], "per", "imx21-uart.4");
+       clk_register_clkdev(clk[uart5_ipg_gate], "ipg", "imx21-uart.4");
+       clk_register_clkdev(clk[ecspi1_per_gate], "per", "imx51-ecspi.0");
+       clk_register_clkdev(clk[ecspi1_ipg_gate], "ipg", "imx51-ecspi.0");
+       clk_register_clkdev(clk[ecspi2_per_gate], "per", "imx51-ecspi.1");
+       clk_register_clkdev(clk[ecspi2_ipg_gate], "ipg", "imx51-ecspi.1");
+       clk_register_clkdev(clk[cspi_ipg_gate], NULL, "imx51-cspi.0");
+       clk_register_clkdev(clk[pwm1_ipg_gate], "pwm", "mxc_pwm.0");
+       clk_register_clkdev(clk[pwm2_ipg_gate], "pwm", "mxc_pwm.1");
+       clk_register_clkdev(clk[i2c1_gate], NULL, "imx-i2c.0");
+       clk_register_clkdev(clk[i2c2_gate], NULL, "imx-i2c.1");
+       clk_register_clkdev(clk[usboh3_per_gate], "per", "mxc-ehci.0");
+       clk_register_clkdev(clk[usboh3_gate], "ipg", "mxc-ehci.0");
+       clk_register_clkdev(clk[usboh3_gate], "ahb", "mxc-ehci.0");
+       clk_register_clkdev(clk[usboh3_per_gate], "per", "mxc-ehci.1");
+       clk_register_clkdev(clk[usboh3_gate], "ipg", "mxc-ehci.1");
+       clk_register_clkdev(clk[usboh3_gate], "ahb", "mxc-ehci.1");
+       clk_register_clkdev(clk[usboh3_per_gate], "per", "mxc-ehci.2");
+       clk_register_clkdev(clk[usboh3_gate], "ipg", "mxc-ehci.2");
+       clk_register_clkdev(clk[usboh3_gate], "ahb", "mxc-ehci.2");
+       clk_register_clkdev(clk[usboh3_per_gate], "per", "fsl-usb2-udc");
+       clk_register_clkdev(clk[usboh3_gate], "ipg", "fsl-usb2-udc");
+       clk_register_clkdev(clk[usboh3_gate], "ahb", "fsl-usb2-udc");
+       clk_register_clkdev(clk[nfc_gate], NULL, "mxc_nand");
+       clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "imx-ssi.0");
+       clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "imx-ssi.1");
+       clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "imx-ssi.2");
+       clk_register_clkdev(clk[ssi_ext1_gate], "ssi_ext1", NULL);
+       clk_register_clkdev(clk[ssi_ext2_gate], "ssi_ext2", NULL);
+       clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
+       clk_register_clkdev(clk[cpu_podf], "cpu", NULL);
+       clk_register_clkdev(clk[iim_gate], "iim", NULL);
+       clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.0");
+       clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.1");
+       clk_register_clkdev(clk[dummy], NULL, "imx-keypad");
+       clk_register_clkdev(clk[tve_gate], NULL, "imx-tve.0");
+       clk_register_clkdev(clk[ipu_di1_gate], "di1", "imx-tve.0");
+
+       /* Set SDHC parents to be PLL2 */
+       clk_set_parent(clk[esdhc_a_sel], clk[pll2_sw]);
+       clk_set_parent(clk[esdhc_b_sel], clk[pll2_sw]);
+
+       /* move usb phy clk to 24MHz */
+       clk_set_parent(clk[usb_phy_sel], clk[osc]);
+
+       clk_prepare_enable(clk[gpc_dvfs]);
+       clk_prepare_enable(clk[ahb_max]); /* esdhc3 */
+       clk_prepare_enable(clk[aips_tz1]);
+       clk_prepare_enable(clk[aips_tz2]); /* fec */
+       clk_prepare_enable(clk[spba]);
+       clk_prepare_enable(clk[emi_fast_gate]); /* fec */
+       clk_prepare_enable(clk[tmax1]);
+       clk_prepare_enable(clk[tmax2]); /* esdhc2, fec */
+       clk_prepare_enable(clk[tmax3]); /* esdhc1, esdhc4 */
+}
+
+int __init mx51_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
+                       unsigned long rate_ckih1, unsigned long rate_ckih2)
+{
+       int i;
+
+       clk[pll1_sw] = imx_clk_pllv2("pll1_sw", "osc", MX51_DPLL1_BASE);
+       clk[pll2_sw] = imx_clk_pllv2("pll2_sw", "osc", MX51_DPLL2_BASE);
+       clk[pll3_sw] = imx_clk_pllv2("pll3_sw", "osc", MX51_DPLL3_BASE);
+       clk[ipu_di0_sel] = imx_clk_mux("ipu_di0_sel", MXC_CCM_CSCMR2, 26, 3,
+                               mx51_ipu_di0_sel, ARRAY_SIZE(mx51_ipu_di0_sel));
+       clk[ipu_di1_sel] = imx_clk_mux("ipu_di1_sel", MXC_CCM_CSCMR2, 29, 3,
+                               mx51_ipu_di1_sel, ARRAY_SIZE(mx51_ipu_di1_sel));
+       clk[tve_ext_sel] = imx_clk_mux("tve_ext_sel", MXC_CCM_CSCMR1, 6, 1,
+                               mx51_tve_ext_sel, ARRAY_SIZE(mx51_tve_ext_sel));
+       clk[tve_gate] = imx_clk_gate2("tve_gate", "tve_sel", MXC_CCM_CCGR2, 30);
+       clk[tve_pred] = imx_clk_divider("tve_pred", "pll3_sw", MXC_CCM_CDCDR, 28, 3);
+       clk[esdhc1_per_gate] = imx_clk_gate2("esdhc1_per_gate", "esdhc_a_podf", MXC_CCM_CCGR3, 2);
+       clk[esdhc2_per_gate] = imx_clk_gate2("esdhc2_per_gate", "esdhc_b_podf", MXC_CCM_CCGR3, 6);
+       clk[esdhc3_per_gate] = imx_clk_gate2("esdhc3_per_gate", "esdhc_c_sel", MXC_CCM_CCGR3, 10);
+       clk[esdhc4_per_gate] = imx_clk_gate2("esdhc4_per_gate", "esdhc_d_sel", MXC_CCM_CCGR3, 14);
+       clk[usb_phy_gate] = imx_clk_gate2("usb_phy_gate", "usb_phy_sel", MXC_CCM_CCGR2, 0);
+       clk[hsi2c_gate] = imx_clk_gate2("hsi2c_gate", "ipg", MXC_CCM_CCGR1, 22);
+       clk[mipi_hsc1_gate] = imx_clk_gate2("mipi_hsc1_gate", "ipg", MXC_CCM_CCGR4, 6);
+       clk[mipi_hsc2_gate] = imx_clk_gate2("mipi_hsc2_gate", "ipg", MXC_CCM_CCGR4, 8);
+       clk[mipi_esc_gate] = imx_clk_gate2("mipi_esc_gate", "ipg", MXC_CCM_CCGR4, 10);
+       clk[mipi_hsp_gate] = imx_clk_gate2("mipi_hsp_gate", "ipg", MXC_CCM_CCGR4, 12);
+
+       for (i = 0; i < ARRAY_SIZE(clk); i++)
+               if (IS_ERR(clk[i]))
+                       pr_err("i.MX51 clk %d: register failed with %ld\n",
+                               i, PTR_ERR(clk[i]));
+
+       mx5_clocks_common_init(rate_ckil, rate_osc, rate_ckih1, rate_ckih2);
+
+       clk_register_clkdev(clk[hsi2c_gate], NULL, "imx-i2c.2");
+       clk_register_clkdev(clk[mx51_mipi], "mipi_hsp", NULL);
+       clk_register_clkdev(clk[vpu_gate], NULL, "imx51-vpu.0");
+       clk_register_clkdev(clk[fec_gate], NULL, "imx27-fec.0");
+       clk_register_clkdev(clk[gpc_dvfs], "gpc_dvfs", NULL);
+       clk_register_clkdev(clk[ipu_gate], "bus", "imx51-ipu");
+       clk_register_clkdev(clk[ipu_di0_gate], "di0", "imx51-ipu");
+       clk_register_clkdev(clk[ipu_di1_gate], "di1", "imx51-ipu");
+       clk_register_clkdev(clk[ipu_gate], "hsp", "imx51-ipu");
+       clk_register_clkdev(clk[usb_phy_gate], "phy", "mxc-ehci.0");
+       clk_register_clkdev(clk[esdhc1_ipg_gate], "ipg", "sdhci-esdhc-imx51.0");
+       clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx51.0");
+       clk_register_clkdev(clk[esdhc1_per_gate], "per", "sdhci-esdhc-imx51.0");
+       clk_register_clkdev(clk[esdhc2_ipg_gate], "ipg", "sdhci-esdhc-imx51.1");
+       clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx51.1");
+       clk_register_clkdev(clk[esdhc2_per_gate], "per", "sdhci-esdhc-imx51.1");
+       clk_register_clkdev(clk[esdhc3_ipg_gate], "ipg", "sdhci-esdhc-imx51.2");
+       clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx51.2");
+       clk_register_clkdev(clk[esdhc3_per_gate], "per", "sdhci-esdhc-imx51.2");
+       clk_register_clkdev(clk[esdhc4_ipg_gate], "ipg", "sdhci-esdhc-imx51.3");
+       clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx51.3");
+       clk_register_clkdev(clk[esdhc4_per_gate], "per", "sdhci-esdhc-imx51.3");
+       clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "83fcc000.ssi");
+       clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "70014000.ssi");
+       clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "83fe8000.ssi");
+
+       /* set the usboh3 parent to pll2_sw */
+       clk_set_parent(clk[usboh3_sel], clk[pll2_sw]);
+
+       /* set SDHC root clock to 166.25MHZ*/
+       clk_set_rate(clk[esdhc_a_podf], 166250000);
+       clk_set_rate(clk[esdhc_b_podf], 166250000);
+
+       /* System timer */
+       mxc_timer_init(NULL, MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR),
+               MX51_INT_GPT);
+
+       clk_prepare_enable(clk[iim_gate]);
+       imx_print_silicon_rev("i.MX51", mx51_revision());
+       clk_disable_unprepare(clk[iim_gate]);
+
+       return 0;
+}
+
+int __init mx53_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
+                       unsigned long rate_ckih1, unsigned long rate_ckih2)
+{
+       int i;
+       unsigned long r;
+
+       clk[pll1_sw] = imx_clk_pllv2("pll1_sw", "osc", MX53_DPLL1_BASE);
+       clk[pll2_sw] = imx_clk_pllv2("pll2_sw", "osc", MX53_DPLL2_BASE);
+       clk[pll3_sw] = imx_clk_pllv2("pll3_sw", "osc", MX53_DPLL3_BASE);
+       clk[pll4_sw] = imx_clk_pllv2("pll4_sw", "osc", MX53_DPLL4_BASE);
+
+       clk[ldb_di1_sel] = imx_clk_mux("ldb_di1_sel", MXC_CCM_CSCMR2, 9, 1,
+                               mx53_ldb_di1_sel, ARRAY_SIZE(mx53_ldb_di1_sel));
+       clk[ldb_di1_div_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
+       clk[ldb_di1_div] = imx_clk_divider("ldb_di1_div", "ldb_di1_div_3_5", MXC_CCM_CSCMR2, 11, 1);
+       clk[di_pll4_podf] = imx_clk_divider("di_pll4_podf", "pll4_sw", MXC_CCM_CDCDR, 16, 3);
+       clk[ldb_di0_sel] = imx_clk_mux("ldb_di0_sel", MXC_CCM_CSCMR2, 8, 1,
+                               mx53_ldb_di0_sel, ARRAY_SIZE(mx53_ldb_di0_sel));
+       clk[ldb_di0_div_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
+       clk[ldb_di0_div] = imx_clk_divider("ldb_di0_div", "ldb_di0_div_3_5", MXC_CCM_CSCMR2, 10, 1);
+       clk[ldb_di0_gate] = imx_clk_gate2("ldb_di0_gate", "ldb_di0_div", MXC_CCM_CCGR6, 28);
+       clk[ldb_di1_gate] = imx_clk_gate2("ldb_di1_gate", "ldb_di1_div", MXC_CCM_CCGR6, 30);
+       clk[ipu_di0_sel] = imx_clk_mux("ipu_di0_sel", MXC_CCM_CSCMR2, 26, 3,
+                               mx53_ipu_di0_sel, ARRAY_SIZE(mx53_ipu_di0_sel));
+       clk[ipu_di1_sel] = imx_clk_mux("ipu_di1_sel", MXC_CCM_CSCMR2, 29, 3,
+                               mx53_ipu_di1_sel, ARRAY_SIZE(mx53_ipu_di1_sel));
+       clk[tve_ext_sel] = imx_clk_mux("tve_ext_sel", MXC_CCM_CSCMR1, 6, 1,
+                               mx53_tve_ext_sel, ARRAY_SIZE(mx53_tve_ext_sel));
+       clk[tve_gate] = imx_clk_gate2("tve_gate", "tve_pred", MXC_CCM_CCGR2, 30);
+       clk[tve_pred] = imx_clk_divider("tve_pred", "tve_ext_sel", MXC_CCM_CDCDR, 28, 3);
+       clk[esdhc1_per_gate] = imx_clk_gate2("esdhc1_per_gate", "esdhc_a_podf", MXC_CCM_CCGR3, 2);
+       clk[esdhc2_per_gate] = imx_clk_gate2("esdhc2_per_gate", "esdhc_c_sel", MXC_CCM_CCGR3, 6);
+       clk[esdhc3_per_gate] = imx_clk_gate2("esdhc3_per_gate", "esdhc_b_podf", MXC_CCM_CCGR3, 10);
+       clk[esdhc4_per_gate] = imx_clk_gate2("esdhc4_per_gate", "esdhc_d_sel", MXC_CCM_CCGR3, 14);
+       clk[usb_phy1_gate] = imx_clk_gate2("usb_phy1_gate", "usb_phy_sel", MXC_CCM_CCGR4, 10);
+       clk[usb_phy2_gate] = imx_clk_gate2("usb_phy2_gate", "usb_phy_sel", MXC_CCM_CCGR4, 12);
+       clk[can2_serial_gate] = imx_clk_gate2("can2_serial_gate", "ipg", MXC_CCM_CCGR4, 6);
+       clk[can2_ipg_gate] = imx_clk_gate2("can2_ipg_gate", "ipg", MXC_CCM_CCGR4, 8);
+       clk[i2c3_gate] = imx_clk_gate2("i2c3_gate", "per_root", MXC_CCM_CCGR1, 22);
+
+       for (i = 0; i < ARRAY_SIZE(clk); i++)
+               if (IS_ERR(clk[i]))
+                       pr_err("i.MX53 clk %d: register failed with %ld\n",
+                               i, PTR_ERR(clk[i]));
+
+       mx5_clocks_common_init(rate_ckil, rate_osc, rate_ckih1, rate_ckih2);
+
+       clk_register_clkdev(clk[vpu_gate], NULL, "imx53-vpu.0");
+       clk_register_clkdev(clk[i2c3_gate], NULL, "imx-i2c.2");
+       clk_register_clkdev(clk[fec_gate], NULL, "imx25-fec.0");
+       clk_register_clkdev(clk[ipu_gate], "bus", "imx53-ipu");
+       clk_register_clkdev(clk[ipu_di0_gate], "di0", "imx53-ipu");
+       clk_register_clkdev(clk[ipu_di1_gate], "di1", "imx53-ipu");
+       clk_register_clkdev(clk[ipu_gate], "hsp", "imx53-ipu");
+       clk_register_clkdev(clk[usb_phy1_gate], "usb_phy1", "mxc-ehci.0");
+       clk_register_clkdev(clk[esdhc1_ipg_gate], "ipg", "sdhci-esdhc-imx53.0");
+       clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.0");
+       clk_register_clkdev(clk[esdhc1_per_gate], "per", "sdhci-esdhc-imx53.0");
+       clk_register_clkdev(clk[esdhc2_ipg_gate], "ipg", "sdhci-esdhc-imx53.1");
+       clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.1");
+       clk_register_clkdev(clk[esdhc2_per_gate], "per", "sdhci-esdhc-imx53.1");
+       clk_register_clkdev(clk[esdhc3_ipg_gate], "ipg", "sdhci-esdhc-imx53.2");
+       clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.2");
+       clk_register_clkdev(clk[esdhc3_per_gate], "per", "sdhci-esdhc-imx53.2");
+       clk_register_clkdev(clk[esdhc4_ipg_gate], "ipg", "sdhci-esdhc-imx53.3");
+       clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.3");
+       clk_register_clkdev(clk[esdhc4_per_gate], "per", "sdhci-esdhc-imx53.3");
+       clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "63fcc000.ssi");
+       clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "50014000.ssi");
+       clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "63fd0000.ssi");
+
+       /* set SDHC root clock to 200MHZ*/
+       clk_set_rate(clk[esdhc_a_podf], 200000000);
+       clk_set_rate(clk[esdhc_b_podf], 200000000);
+
+       /* System timer */
+       mxc_timer_init(NULL, MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR),
+               MX53_INT_GPT);
+
+       clk_prepare_enable(clk[iim_gate]);
+       imx_print_silicon_rev("i.MX53", mx53_revision());
+       clk_disable_unprepare(clk[iim_gate]);
+
+       r = clk_round_rate(clk[usboh3_per_gate], 54000000);
+       clk_set_rate(clk[usboh3_per_gate], r);
+
+       return 0;
+}
+
+#ifdef CONFIG_OF
+static void __init clk_get_freq_dt(unsigned long *ckil, unsigned long *osc,
+                                  unsigned long *ckih1, unsigned long *ckih2)
+{
+       struct device_node *np;
+
+       /* retrieve the freqency of fixed clocks from device tree */
+       for_each_compatible_node(np, NULL, "fixed-clock") {
+               u32 rate;
+               if (of_property_read_u32(np, "clock-frequency", &rate))
+                       continue;
+
+               if (of_device_is_compatible(np, "fsl,imx-ckil"))
+                       *ckil = rate;
+               else if (of_device_is_compatible(np, "fsl,imx-osc"))
+                       *osc = rate;
+               else if (of_device_is_compatible(np, "fsl,imx-ckih1"))
+                       *ckih1 = rate;
+               else if (of_device_is_compatible(np, "fsl,imx-ckih2"))
+                       *ckih2 = rate;
+       }
+}
+
+int __init mx51_clocks_init_dt(void)
+{
+       unsigned long ckil, osc, ckih1, ckih2;
+
+       clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2);
+       return mx51_clocks_init(ckil, osc, ckih1, ckih2);
+}
+
+int __init mx53_clocks_init_dt(void)
+{
+       unsigned long ckil, osc, ckih1, ckih2;
+
+       clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2);
+       return mx53_clocks_init(ckil, osc, ckih1, ckih2);
+}
+#endif
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
new file mode 100644 (file)
index 0000000..cab02d0
--- /dev/null
@@ -0,0 +1,444 @@
+/*
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ * Copyright 2011 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <mach/common.h>
+#include "clk.h"
+
+#define CCGR0                          0x68
+#define CCGR1                          0x6c
+#define CCGR2                          0x70
+#define CCGR3                          0x74
+#define CCGR4                          0x78
+#define CCGR5                          0x7c
+#define CCGR6                          0x80
+#define CCGR7                          0x84
+
+#define CLPCR                          0x54
+#define BP_CLPCR_LPM                   0
+#define BM_CLPCR_LPM                   (0x3 << 0)
+#define BM_CLPCR_BYPASS_PMIC_READY     (0x1 << 2)
+#define BM_CLPCR_ARM_CLK_DIS_ON_LPM    (0x1 << 5)
+#define BM_CLPCR_SBYOS                 (0x1 << 6)
+#define BM_CLPCR_DIS_REF_OSC           (0x1 << 7)
+#define BM_CLPCR_VSTBY                 (0x1 << 8)
+#define BP_CLPCR_STBY_COUNT            9
+#define BM_CLPCR_STBY_COUNT            (0x3 << 9)
+#define BM_CLPCR_COSC_PWRDOWN          (0x1 << 11)
+#define BM_CLPCR_WB_PER_AT_LPM         (0x1 << 16)
+#define BM_CLPCR_WB_CORE_AT_LPM                (0x1 << 17)
+#define BM_CLPCR_BYP_MMDC_CH0_LPM_HS   (0x1 << 19)
+#define BM_CLPCR_BYP_MMDC_CH1_LPM_HS   (0x1 << 21)
+#define BM_CLPCR_MASK_CORE0_WFI                (0x1 << 22)
+#define BM_CLPCR_MASK_CORE1_WFI                (0x1 << 23)
+#define BM_CLPCR_MASK_CORE2_WFI                (0x1 << 24)
+#define BM_CLPCR_MASK_CORE3_WFI                (0x1 << 25)
+#define BM_CLPCR_MASK_SCU_IDLE         (0x1 << 26)
+#define BM_CLPCR_MASK_L2CC_IDLE                (0x1 << 27)
+
+static void __iomem *ccm_base;
+
+void __init imx6q_clock_map_io(void) { }
+
+int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
+{
+       u32 val = readl_relaxed(ccm_base + CLPCR);
+
+       val &= ~BM_CLPCR_LPM;
+       switch (mode) {
+       case WAIT_CLOCKED:
+               break;
+       case WAIT_UNCLOCKED:
+               val |= 0x1 << BP_CLPCR_LPM;
+               break;
+       case STOP_POWER_ON:
+               val |= 0x2 << BP_CLPCR_LPM;
+               break;
+       case WAIT_UNCLOCKED_POWER_OFF:
+               val |= 0x1 << BP_CLPCR_LPM;
+               val &= ~BM_CLPCR_VSTBY;
+               val &= ~BM_CLPCR_SBYOS;
+               break;
+       case STOP_POWER_OFF:
+               val |= 0x2 << BP_CLPCR_LPM;
+               val |= 0x3 << BP_CLPCR_STBY_COUNT;
+               val |= BM_CLPCR_VSTBY;
+               val |= BM_CLPCR_SBYOS;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       writel_relaxed(val, ccm_base + CLPCR);
+
+       return 0;
+}
+
+static const char *step_sels[] = { "osc", "pll2_pfd2_396m", };
+static const char *pll1_sw_sels[]      = { "pll1_sys", "step", };
+static const char *periph_pre_sels[]   = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll2_198m", };
+static const char *periph_clk2_sels[]  = { "pll3_usb_otg", "osc", };
+static const char *periph_sels[]       = { "periph_pre", "periph_clk2", };
+static const char *periph2_sels[]      = { "periph2_pre", "periph2_clk2", };
+static const char *axi_sels[]          = { "periph", "pll2_pfd2_396m", "pll3_pfd1_540m", };
+static const char *audio_sels[]        = { "pll4_audio", "pll3_pfd2_508m", "pll3_pfd3_454m", "pll3_usb_otg", };
+static const char *gpu_axi_sels[]      = { "axi", "ahb", };
+static const char *gpu2d_core_sels[]   = { "axi", "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", };
+static const char *gpu3d_core_sels[]   = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", };
+static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", };
+static const char *ipu_sels[]          = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
+static const char *ldb_di_sels[]       = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
+static const char *ipu_di_pre_sels[]   = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
+static const char *ipu1_di0_sels[]     = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
+static const char *ipu1_di1_sels[]     = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
+static const char *ipu2_di0_sels[]     = { "ipu2_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
+static const char *ipu2_di1_sels[]     = { "ipu2_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
+static const char *hsi_tx_sels[]       = { "pll3_120m", "pll2_pfd2_396m", };
+static const char *pcie_axi_sels[]     = { "axi", "ahb", };
+static const char *ssi_sels[]          = { "pll3_pfd2_508m", "pll3_pfd3_454m", "pll4_audio", };
+static const char *usdhc_sels[]        = { "pll2_pfd2_396m", "pll2_pfd0_352m", };
+static const char *enfc_sels[] = { "pll2_pfd0_352m", "pll2_bus", "pll3_usb_otg", "pll2_pfd2_396m", };
+static const char *emi_sels[]          = { "axi", "pll3_usb_otg", "pll2_pfd2_396m", "pll2_pfd0_352m", };
+static const char *vdo_axi_sels[]      = { "axi", "ahb", };
+static const char *vpu_axi_sels[]      = { "axi", "pll2_pfd2_396m", "pll2_pfd0_352m", };
+static const char *cko1_sels[] = { "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5_video",
+                                   "dummy", "axi", "enfc", "ipu1_di0", "ipu1_di1", "ipu2_di0",
+                                   "ipu2_di1", "ahb", "ipg", "ipg_per", "ckil", "pll4_audio", };
+
+static const char * const clks_init_on[] __initconst = {
+       "mmdc_ch0_axi", "mmdc_ch1_axi", "usboh3",
+};
+
+enum mx6q_clks {
+       dummy, ckil, ckih, osc, pll2_pfd0_352m, pll2_pfd1_594m, pll2_pfd2_396m,
+       pll3_pfd0_720m, pll3_pfd1_540m, pll3_pfd2_508m, pll3_pfd3_454m,
+       pll2_198m, pll3_120m, pll3_80m, pll3_60m, twd, step, pll1_sw,
+       periph_pre, periph2_pre, periph_clk2_sel, periph2_clk2_sel, axi_sel,
+       esai_sel, asrc_sel, spdif_sel, gpu2d_axi, gpu3d_axi, gpu2d_core_sel,
+       gpu3d_core_sel, gpu3d_shader_sel, ipu1_sel, ipu2_sel, ldb_di0_sel,
+       ldb_di1_sel, ipu1_di0_pre_sel, ipu1_di1_pre_sel, ipu2_di0_pre_sel,
+       ipu2_di1_pre_sel, ipu1_di0_sel, ipu1_di1_sel, ipu2_di0_sel,
+       ipu2_di1_sel, hsi_tx_sel, pcie_axi_sel, ssi1_sel, ssi2_sel, ssi3_sel,
+       usdhc1_sel, usdhc2_sel, usdhc3_sel, usdhc4_sel, enfc_sel, emi_sel,
+       emi_slow_sel, vdo_axi_sel, vpu_axi_sel, cko1_sel, periph, periph2,
+       periph_clk2, periph2_clk2, ipg, ipg_per, esai_pred, esai_podf,
+       asrc_pred, asrc_podf, spdif_pred, spdif_podf, can_root, ecspi_root,
+       gpu2d_core_podf, gpu3d_core_podf, gpu3d_shader, ipu1_podf, ipu2_podf,
+       ldb_di0_podf, ldb_di1_podf, ipu1_di0_pre, ipu1_di1_pre, ipu2_di0_pre,
+       ipu2_di1_pre, hsi_tx_podf, ssi1_pred, ssi1_podf, ssi2_pred, ssi2_podf,
+       ssi3_pred, ssi3_podf, uart_serial_podf, usdhc1_podf, usdhc2_podf,
+       usdhc3_podf, usdhc4_podf, enfc_pred, enfc_podf, emi_podf,
+       emi_slow_podf, vpu_axi_podf, cko1_podf, axi, mmdc_ch0_axi_podf,
+       mmdc_ch1_axi_podf, arm, ahb, apbh_dma, asrc, can1_ipg, can1_serial,
+       can2_ipg, can2_serial, ecspi1, ecspi2, ecspi3, ecspi4, ecspi5, enet,
+       esai, gpt_ipg, gpt_ipg_per, gpu2d_core, gpu3d_core, hdmi_iahb,
+       hdmi_isfr, i2c1, i2c2, i2c3, iim, enfc, ipu1, ipu1_di0, ipu1_di1, ipu2,
+       ipu2_di0, ldb_di0, ldb_di1, ipu2_di1, hsi_tx, mlb, mmdc_ch0_axi,
+       mmdc_ch1_axi, ocram, openvg_axi, pcie_axi, pwm1, pwm2, pwm3, pwm4,
+       gpmi_bch_apb, gpmi_bch, gpmi_io, gpmi_apb, sata, sdma, spba, ssi1,
+       ssi2, ssi3, uart_ipg, uart_serial, usboh3, usdhc1, usdhc2, usdhc3,
+       usdhc4, vdo_axi, vpu_axi, cko1, pll1_sys, pll2_bus, pll3_usb_otg,
+       pll4_audio, pll5_video, pll6_mlb, pll7_usb_host, pll8_enet, ssi1_ipg,
+       ssi2_ipg, ssi3_ipg, clk_max
+};
+
+static struct clk *clk[clk_max];
+
+int __init mx6q_clocks_init(void)
+{
+       struct device_node *np;
+       void __iomem *base;
+       struct clk *c;
+       int i, irq;
+
+       clk[dummy] = imx_clk_fixed("dummy", 0);
+
+       /* retrieve the freqency of fixed clocks from device tree */
+       for_each_compatible_node(np, NULL, "fixed-clock") {
+               u32 rate;
+               if (of_property_read_u32(np, "clock-frequency", &rate))
+                       continue;
+
+               if (of_device_is_compatible(np, "fsl,imx-ckil"))
+                       clk[ckil] = imx_clk_fixed("ckil", rate);
+               else if (of_device_is_compatible(np, "fsl,imx-ckih1"))
+                       clk[ckih] = imx_clk_fixed("ckih", rate);
+               else if (of_device_is_compatible(np, "fsl,imx-osc"))
+                       clk[osc] = imx_clk_fixed("osc", rate);
+       }
+
+       np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-anatop");
+       base = of_iomap(np, 0);
+       WARN_ON(!base);
+
+       /*                   type                               name         parent_name  base     gate_mask div_mask */
+       clk[pll1_sys]      = imx_clk_pllv3(IMX_PLLV3_SYS,       "pll1_sys",     "osc", base,        0x2000,   0x7f);
+       clk[pll2_bus]      = imx_clk_pllv3(IMX_PLLV3_GENERIC,   "pll2_bus",     "osc", base + 0x30, 0x2000,   0x1);
+       clk[pll3_usb_otg]  = imx_clk_pllv3(IMX_PLLV3_USB,       "pll3_usb_otg", "osc", base + 0x10, 0x2000,   0x3);
+       clk[pll4_audio]    = imx_clk_pllv3(IMX_PLLV3_AV,        "pll4_audio",   "osc", base + 0x70, 0x2000,   0x7f);
+       clk[pll5_video]    = imx_clk_pllv3(IMX_PLLV3_AV,        "pll5_video",   "osc", base + 0xa0, 0x2000,   0x7f);
+       clk[pll6_mlb]      = imx_clk_pllv3(IMX_PLLV3_MLB,       "pll6_mlb",     "osc", base + 0xd0, 0x2000,   0x0);
+       clk[pll7_usb_host] = imx_clk_pllv3(IMX_PLLV3_USB,       "pll7_usb_host","osc", base + 0x20, 0x2000,   0x3);
+       clk[pll8_enet]     = imx_clk_pllv3(IMX_PLLV3_ENET,      "pll8_enet",    "osc", base + 0xe0, 0x182000, 0x3);
+
+       /*                                name              parent_name        reg       idx */
+       clk[pll2_pfd0_352m] = imx_clk_pfd("pll2_pfd0_352m", "pll2_bus",     base + 0x100, 0);
+       clk[pll2_pfd1_594m] = imx_clk_pfd("pll2_pfd1_594m", "pll2_bus",     base + 0x100, 1);
+       clk[pll2_pfd2_396m] = imx_clk_pfd("pll2_pfd2_396m", "pll2_bus",     base + 0x100, 2);
+       clk[pll3_pfd0_720m] = imx_clk_pfd("pll3_pfd0_720m", "pll3_usb_otg", base + 0xf0,  0);
+       clk[pll3_pfd1_540m] = imx_clk_pfd("pll3_pfd1_540m", "pll3_usb_otg", base + 0xf0,  1);
+       clk[pll3_pfd2_508m] = imx_clk_pfd("pll3_pfd2_508m", "pll3_usb_otg", base + 0xf0,  2);
+       clk[pll3_pfd3_454m] = imx_clk_pfd("pll3_pfd3_454m", "pll3_usb_otg", base + 0xf0,  3);
+
+       /*                                    name         parent_name     mult div */
+       clk[pll2_198m] = imx_clk_fixed_factor("pll2_198m", "pll2_pfd2_396m", 1, 2);
+       clk[pll3_120m] = imx_clk_fixed_factor("pll3_120m", "pll3_usb_otg",   1, 4);
+       clk[pll3_80m]  = imx_clk_fixed_factor("pll3_80m",  "pll3_usb_otg",   1, 6);
+       clk[pll3_60m]  = imx_clk_fixed_factor("pll3_60m",  "pll3_usb_otg",   1, 8);
+       clk[twd]       = imx_clk_fixed_factor("twd",       "arm",            1, 2);
+
+       np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ccm");
+       base = of_iomap(np, 0);
+       WARN_ON(!base);
+       ccm_base = base;
+
+       /*                                  name                reg       shift width parent_names     num_parents */
+       clk[step]             = imx_clk_mux("step",             base + 0xc,  8,  1, step_sels,         ARRAY_SIZE(step_sels));
+       clk[pll1_sw]          = imx_clk_mux("pll1_sw",          base + 0xc,  2,  1, pll1_sw_sels,      ARRAY_SIZE(pll1_sw_sels));
+       clk[periph_pre]       = imx_clk_mux("periph_pre",       base + 0x18, 18, 2, periph_pre_sels,   ARRAY_SIZE(periph_pre_sels));
+       clk[periph2_pre]      = imx_clk_mux("periph2_pre",      base + 0x18, 21, 2, periph_pre_sels,   ARRAY_SIZE(periph_pre_sels));
+       clk[periph_clk2_sel]  = imx_clk_mux("periph_clk2_sel",  base + 0x18, 12, 1, periph_clk2_sels,  ARRAY_SIZE(periph_clk2_sels));
+       clk[periph2_clk2_sel] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph_clk2_sels,  ARRAY_SIZE(periph_clk2_sels));
+       clk[axi_sel]          = imx_clk_mux("axi_sel",          base + 0x14, 6,  2, axi_sels,          ARRAY_SIZE(axi_sels));
+       clk[esai_sel]         = imx_clk_mux("esai_sel",         base + 0x20, 19, 2, audio_sels,        ARRAY_SIZE(audio_sels));
+       clk[asrc_sel]         = imx_clk_mux("asrc_sel",         base + 0x30, 7,  2, audio_sels,        ARRAY_SIZE(audio_sels));
+       clk[spdif_sel]        = imx_clk_mux("spdif_sel",        base + 0x30, 20, 2, audio_sels,        ARRAY_SIZE(audio_sels));
+       clk[gpu2d_axi]        = imx_clk_mux("gpu2d_axi",        base + 0x18, 0,  1, gpu_axi_sels,      ARRAY_SIZE(gpu_axi_sels));
+       clk[gpu3d_axi]        = imx_clk_mux("gpu3d_axi",        base + 0x18, 1,  1, gpu_axi_sels,      ARRAY_SIZE(gpu_axi_sels));
+       clk[gpu2d_core_sel]   = imx_clk_mux("gpu2d_core_sel",   base + 0x18, 16, 2, gpu2d_core_sels,   ARRAY_SIZE(gpu2d_core_sels));
+       clk[gpu3d_core_sel]   = imx_clk_mux("gpu3d_core_sel",   base + 0x18, 4,  2, gpu3d_core_sels,   ARRAY_SIZE(gpu3d_core_sels));
+       clk[gpu3d_shader_sel] = imx_clk_mux("gpu3d_shader_sel", base + 0x18, 8,  2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels));
+       clk[ipu1_sel]         = imx_clk_mux("ipu1_sel",         base + 0x3c, 9,  2, ipu_sels,          ARRAY_SIZE(ipu_sels));
+       clk[ipu2_sel]         = imx_clk_mux("ipu2_sel",         base + 0x3c, 14, 2, ipu_sels,          ARRAY_SIZE(ipu_sels));
+       clk[ldb_di0_sel]      = imx_clk_mux("ldb_di0_sel",      base + 0x2c, 9,  3, ldb_di_sels,       ARRAY_SIZE(ldb_di_sels));
+       clk[ldb_di1_sel]      = imx_clk_mux("ldb_di1_sel",      base + 0x2c, 12, 3, ldb_di_sels,       ARRAY_SIZE(ldb_di_sels));
+       clk[ipu1_di0_pre_sel] = imx_clk_mux("ipu1_di0_pre_sel", base + 0x34, 6,  3, ipu_di_pre_sels,   ARRAY_SIZE(ipu_di_pre_sels));
+       clk[ipu1_di1_pre_sel] = imx_clk_mux("ipu1_di1_pre_sel", base + 0x34, 15, 3, ipu_di_pre_sels,   ARRAY_SIZE(ipu_di_pre_sels));
+       clk[ipu2_di0_pre_sel] = imx_clk_mux("ipu2_di0_pre_sel", base + 0x38, 6,  3, ipu_di_pre_sels,   ARRAY_SIZE(ipu_di_pre_sels));
+       clk[ipu2_di1_pre_sel] = imx_clk_mux("ipu2_di1_pre_sel", base + 0x38, 15, 3, ipu_di_pre_sels,   ARRAY_SIZE(ipu_di_pre_sels));
+       clk[ipu1_di0_sel]     = imx_clk_mux("ipu1_di0_sel",     base + 0x34, 0,  3, ipu1_di0_sels,     ARRAY_SIZE(ipu1_di0_sels));
+       clk[ipu1_di1_sel]     = imx_clk_mux("ipu1_di1_sel",     base + 0x34, 9,  3, ipu1_di1_sels,     ARRAY_SIZE(ipu1_di1_sels));
+       clk[ipu2_di0_sel]     = imx_clk_mux("ipu2_di0_sel",     base + 0x38, 0,  3, ipu2_di0_sels,     ARRAY_SIZE(ipu2_di0_sels));
+       clk[ipu2_di1_sel]     = imx_clk_mux("ipu2_di1_sel",     base + 0x38, 9,  3, ipu2_di1_sels,     ARRAY_SIZE(ipu2_di1_sels));
+       clk[hsi_tx_sel]       = imx_clk_mux("hsi_tx_sel",       base + 0x30, 28, 1, hsi_tx_sels,       ARRAY_SIZE(hsi_tx_sels));
+       clk[pcie_axi_sel]     = imx_clk_mux("pcie_axi_sel",     base + 0x18, 10, 1, pcie_axi_sels,     ARRAY_SIZE(pcie_axi_sels));
+       clk[ssi1_sel]         = imx_clk_mux("ssi1_sel",         base + 0x1c, 10, 2, ssi_sels,          ARRAY_SIZE(ssi_sels));
+       clk[ssi2_sel]         = imx_clk_mux("ssi2_sel",         base + 0x1c, 12, 2, ssi_sels,          ARRAY_SIZE(ssi_sels));
+       clk[ssi3_sel]         = imx_clk_mux("ssi3_sel",         base + 0x1c, 14, 2, ssi_sels,          ARRAY_SIZE(ssi_sels));
+       clk[usdhc1_sel]       = imx_clk_mux("usdhc1_sel",       base + 0x1c, 16, 1, usdhc_sels,        ARRAY_SIZE(usdhc_sels));
+       clk[usdhc2_sel]       = imx_clk_mux("usdhc2_sel",       base + 0x1c, 17, 1, usdhc_sels,        ARRAY_SIZE(usdhc_sels));
+       clk[usdhc3_sel]       = imx_clk_mux("usdhc3_sel",       base + 0x1c, 18, 1, usdhc_sels,        ARRAY_SIZE(usdhc_sels));
+       clk[usdhc4_sel]       = imx_clk_mux("usdhc4_sel",       base + 0x1c, 19, 1, usdhc_sels,        ARRAY_SIZE(usdhc_sels));
+       clk[enfc_sel]         = imx_clk_mux("enfc_sel",         base + 0x2c, 16, 2, enfc_sels,         ARRAY_SIZE(enfc_sels));
+       clk[emi_sel]          = imx_clk_mux("emi_sel",          base + 0x1c, 27, 2, emi_sels,          ARRAY_SIZE(emi_sels));
+       clk[emi_slow_sel]     = imx_clk_mux("emi_slow_sel",     base + 0x1c, 29, 2, emi_sels,          ARRAY_SIZE(emi_sels));
+       clk[vdo_axi_sel]      = imx_clk_mux("vdo_axi_sel",      base + 0x18, 11, 1, vdo_axi_sels,      ARRAY_SIZE(vdo_axi_sels));
+       clk[vpu_axi_sel]      = imx_clk_mux("vpu_axi_sel",      base + 0x18, 14, 2, vpu_axi_sels,      ARRAY_SIZE(vpu_axi_sels));
+       clk[cko1_sel]         = imx_clk_mux("cko1_sel",         base + 0x60, 0,  4, cko1_sels,         ARRAY_SIZE(cko1_sels));
+
+       /*                              name         reg      shift width busy: reg, shift parent_names  num_parents */
+       clk[periph]  = imx_clk_busy_mux("periph",  base + 0x14, 25,  1,   base + 0x48, 5,  periph_sels,  ARRAY_SIZE(periph_sels));
+       clk[periph2] = imx_clk_busy_mux("periph2", base + 0x14, 26,  1,   base + 0x48, 3,  periph2_sels, ARRAY_SIZE(periph2_sels));
+
+       /*                                      name                parent_name          reg       shift width */
+       clk[periph_clk2]      = imx_clk_divider("periph_clk2",      "periph_clk2_sel",   base + 0x14, 27, 3);
+       clk[periph2_clk2]     = imx_clk_divider("periph2_clk2",     "periph2_clk2_sel",  base + 0x14, 0,  3);
+       clk[ipg]              = imx_clk_divider("ipg",              "ahb",               base + 0x14, 8,  2);
+       clk[ipg_per]          = imx_clk_divider("ipg_per",          "ipg",               base + 0x1c, 0,  6);
+       clk[esai_pred]        = imx_clk_divider("esai_pred",        "esai_sel",          base + 0x28, 9,  3);
+       clk[esai_podf]        = imx_clk_divider("esai_podf",        "esai_pred",         base + 0x28, 25, 3);
+       clk[asrc_pred]        = imx_clk_divider("asrc_pred",        "asrc_sel",          base + 0x30, 12, 3);
+       clk[asrc_podf]        = imx_clk_divider("asrc_podf",        "asrc_pred",         base + 0x30, 9,  3);
+       clk[spdif_pred]       = imx_clk_divider("spdif_pred",       "spdif_sel",         base + 0x30, 25, 3);
+       clk[spdif_podf]       = imx_clk_divider("spdif_podf",       "spdif_pred",        base + 0x30, 22, 3);
+       clk[can_root]         = imx_clk_divider("can_root",         "pll3_usb_otg",      base + 0x20, 2,  6);
+       clk[ecspi_root]       = imx_clk_divider("ecspi_root",       "pll3_60m",          base + 0x38, 19, 6);
+       clk[gpu2d_core_podf]  = imx_clk_divider("gpu2d_core_podf",  "gpu2d_core_sel",    base + 0x18, 23, 3);
+       clk[gpu3d_core_podf]  = imx_clk_divider("gpu3d_core_podf",  "gpu3d_core_sel",    base + 0x18, 26, 3);
+       clk[gpu3d_shader]     = imx_clk_divider("gpu3d_shader",     "gpu3d_shader_sel",  base + 0x18, 29, 3);
+       clk[ipu1_podf]        = imx_clk_divider("ipu1_podf",        "ipu1_sel",          base + 0x3c, 11, 3);
+       clk[ipu2_podf]        = imx_clk_divider("ipu2_podf",        "ipu2_sel",          base + 0x3c, 16, 3);
+       clk[ldb_di0_podf]     = imx_clk_divider("ldb_di0_podf",     "ldb_di0_sel",       base + 0x20, 10, 1);
+       clk[ldb_di1_podf]     = imx_clk_divider("ldb_di1_podf",     "ldb_di1_sel",       base + 0x20, 11, 1);
+       clk[ipu1_di0_pre]     = imx_clk_divider("ipu1_di0_pre",     "ipu1_di0_pre_sel",  base + 0x34, 3,  3);
+       clk[ipu1_di1_pre]     = imx_clk_divider("ipu1_di1_pre",     "ipu1_di1_pre_sel",  base + 0x34, 12, 3);
+       clk[ipu2_di0_pre]     = imx_clk_divider("ipu2_di0_pre",     "ipu2_di0_pre_sel",  base + 0x38, 3,  3);
+       clk[ipu2_di1_pre]     = imx_clk_divider("ipu2_di1_pre",     "ipu2_di1_pre_sel",  base + 0x38, 12, 3);
+       clk[hsi_tx_podf]      = imx_clk_divider("hsi_tx_podf",      "hsi_tx_sel",        base + 0x30, 29, 3);
+       clk[ssi1_pred]        = imx_clk_divider("ssi1_pred",        "ssi1_sel",          base + 0x28, 6,  3);
+       clk[ssi1_podf]        = imx_clk_divider("ssi1_podf",        "ssi1_pred",         base + 0x28, 0,  6);
+       clk[ssi2_pred]        = imx_clk_divider("ssi2_pred",        "ssi2_sel",          base + 0x2c, 6,  3);
+       clk[ssi2_podf]        = imx_clk_divider("ssi2_podf",        "ssi2_pred",         base + 0x2c, 0,  6);
+       clk[ssi3_pred]        = imx_clk_divider("ssi3_pred",        "ssi3_sel",          base + 0x28, 22, 3);
+       clk[ssi3_podf]        = imx_clk_divider("ssi3_podf",        "ssi3_pred",         base + 0x28, 16, 6);
+       clk[uart_serial_podf] = imx_clk_divider("uart_serial_podf", "pll3_80m",          base + 0x24, 0,  6);
+       clk[usdhc1_podf]      = imx_clk_divider("usdhc1_podf",      "usdhc1_sel",        base + 0x24, 11, 3);
+       clk[usdhc2_podf]      = imx_clk_divider("usdhc2_podf",      "usdhc2_sel",        base + 0x24, 16, 3);
+       clk[usdhc3_podf]      = imx_clk_divider("usdhc3_podf",      "usdhc3_sel",        base + 0x24, 19, 3);
+       clk[usdhc4_podf]      = imx_clk_divider("usdhc4_podf",      "usdhc4_sel",        base + 0x24, 22, 3);
+       clk[enfc_pred]        = imx_clk_divider("enfc_pred",        "enfc_sel",          base + 0x2c, 18, 3);
+       clk[enfc_podf]        = imx_clk_divider("enfc_podf",        "enfc_pred",         base + 0x2c, 21, 6);
+       clk[emi_podf]         = imx_clk_divider("emi_podf",         "emi_sel",           base + 0x1c, 20, 3);
+       clk[emi_slow_podf]    = imx_clk_divider("emi_slow_podf",    "emi_slow_sel",      base + 0x1c, 23, 3);
+       clk[vpu_axi_podf]     = imx_clk_divider("vpu_axi_podf",     "vpu_axi_sel",       base + 0x24, 25, 3);
+       clk[cko1_podf]        = imx_clk_divider("cko1_podf",        "cko1_sel",          base + 0x60, 4,  3);
+
+       /*                                            name                 parent_name    reg        shift width busy: reg, shift */
+       clk[axi]               = imx_clk_busy_divider("axi",               "axi_sel",     base + 0x14, 16,  3,   base + 0x48, 0);
+       clk[mmdc_ch0_axi_podf] = imx_clk_busy_divider("mmdc_ch0_axi_podf", "periph",      base + 0x14, 19,  3,   base + 0x48, 4);
+       clk[mmdc_ch1_axi_podf] = imx_clk_busy_divider("mmdc_ch1_axi_podf", "periph2",     base + 0x14, 3,   3,   base + 0x48, 2);
+       clk[arm]               = imx_clk_busy_divider("arm",               "pll1_sw",     base + 0x10, 0,   3,   base + 0x48, 16);
+       clk[ahb]               = imx_clk_busy_divider("ahb",               "periph",      base + 0x14, 10,  3,   base + 0x48, 1);
+
+       /*                                name             parent_name          reg         shift */
+       clk[apbh_dma]     = imx_clk_gate2("apbh_dma",      "ahb",               base + 0x68, 4);
+       clk[asrc]         = imx_clk_gate2("asrc",          "asrc_podf",         base + 0x68, 6);
+       clk[can1_ipg]     = imx_clk_gate2("can1_ipg",      "ipg",               base + 0x68, 14);
+       clk[can1_serial]  = imx_clk_gate2("can1_serial",   "can_root",          base + 0x68, 16);
+       clk[can2_ipg]     = imx_clk_gate2("can2_ipg",      "ipg",               base + 0x68, 18);
+       clk[can2_serial]  = imx_clk_gate2("can2_serial",   "can_root",          base + 0x68, 20);
+       clk[ecspi1]       = imx_clk_gate2("ecspi1",        "ecspi_root",        base + 0x6c, 0);
+       clk[ecspi2]       = imx_clk_gate2("ecspi2",        "ecspi_root",        base + 0x6c, 2);
+       clk[ecspi3]       = imx_clk_gate2("ecspi3",        "ecspi_root",        base + 0x6c, 4);
+       clk[ecspi4]       = imx_clk_gate2("ecspi4",        "ecspi_root",        base + 0x6c, 6);
+       clk[ecspi5]       = imx_clk_gate2("ecspi5",        "ecspi_root",        base + 0x6c, 8);
+       clk[enet]         = imx_clk_gate2("enet",          "ipg",               base + 0x6c, 10);
+       clk[esai]         = imx_clk_gate2("esai",          "esai_podf",         base + 0x6c, 16);
+       clk[gpt_ipg]      = imx_clk_gate2("gpt_ipg",       "ipg",               base + 0x6c, 20);
+       clk[gpt_ipg_per]  = imx_clk_gate2("gpt_ipg_per",   "ipg_per",           base + 0x6c, 22);
+       clk[gpu2d_core]   = imx_clk_gate2("gpu2d_core",    "gpu2d_core_podf",   base + 0x6c, 24);
+       clk[gpu3d_core]   = imx_clk_gate2("gpu3d_core",    "gpu3d_core_podf",   base + 0x6c, 26);
+       clk[hdmi_iahb]    = imx_clk_gate2("hdmi_iahb",     "ahb",               base + 0x70, 0);
+       clk[hdmi_isfr]    = imx_clk_gate2("hdmi_isfr",     "pll3_pfd1_540m",    base + 0x70, 4);
+       clk[i2c1]         = imx_clk_gate2("i2c1",          "ipg_per",           base + 0x70, 6);
+       clk[i2c2]         = imx_clk_gate2("i2c2",          "ipg_per",           base + 0x70, 8);
+       clk[i2c3]         = imx_clk_gate2("i2c3",          "ipg_per",           base + 0x70, 10);
+       clk[iim]          = imx_clk_gate2("iim",           "ipg",               base + 0x70, 12);
+       clk[enfc]         = imx_clk_gate2("enfc",          "enfc_podf",         base + 0x70, 14);
+       clk[ipu1]         = imx_clk_gate2("ipu1",          "ipu1_podf",         base + 0x74, 0);
+       clk[ipu1_di0]     = imx_clk_gate2("ipu1_di0",      "ipu1_di0_sel",      base + 0x74, 2);
+       clk[ipu1_di1]     = imx_clk_gate2("ipu1_di1",      "ipu1_di1_sel",      base + 0x74, 4);
+       clk[ipu2]         = imx_clk_gate2("ipu2",          "ipu2_podf",         base + 0x74, 6);
+       clk[ipu2_di0]     = imx_clk_gate2("ipu2_di0",      "ipu2_di0_sel",      base + 0x74, 8);
+       clk[ldb_di0]      = imx_clk_gate2("ldb_di0",       "ldb_di0_podf",      base + 0x74, 12);
+       clk[ldb_di1]      = imx_clk_gate2("ldb_di1",       "ldb_di1_podf",      base + 0x74, 14);
+       clk[ipu2_di1]     = imx_clk_gate2("ipu2_di1",      "ipu2_di1_sel",      base + 0x74, 10);
+       clk[hsi_tx]       = imx_clk_gate2("hsi_tx",        "hsi_tx_podf",       base + 0x74, 16);
+       clk[mlb]          = imx_clk_gate2("mlb",           "pll6_mlb",          base + 0x74, 18);
+       clk[mmdc_ch0_axi] = imx_clk_gate2("mmdc_ch0_axi",  "mmdc_ch0_axi_podf", base + 0x74, 20);
+       clk[mmdc_ch1_axi] = imx_clk_gate2("mmdc_ch1_axi",  "mmdc_ch1_axi_podf", base + 0x74, 22);
+       clk[ocram]        = imx_clk_gate2("ocram",         "ahb",               base + 0x74, 28);
+       clk[openvg_axi]   = imx_clk_gate2("openvg_axi",    "axi",               base + 0x74, 30);
+       clk[pcie_axi]     = imx_clk_gate2("pcie_axi",      "pcie_axi_sel",      base + 0x78, 0);
+       clk[pwm1]         = imx_clk_gate2("pwm1",          "ipg_per",           base + 0x78, 16);
+       clk[pwm2]         = imx_clk_gate2("pwm2",          "ipg_per",           base + 0x78, 18);
+       clk[pwm3]         = imx_clk_gate2("pwm3",          "ipg_per",           base + 0x78, 20);
+       clk[pwm4]         = imx_clk_gate2("pwm4",          "ipg_per",           base + 0x78, 22);
+       clk[gpmi_bch_apb] = imx_clk_gate2("gpmi_bch_apb",  "usdhc3",            base + 0x78, 24);
+       clk[gpmi_bch]     = imx_clk_gate2("gpmi_bch",      "usdhc4",            base + 0x78, 26);
+       clk[gpmi_io]      = imx_clk_gate2("gpmi_io",       "enfc",              base + 0x78, 28);
+       clk[gpmi_apb]     = imx_clk_gate2("gpmi_apb",      "usdhc3",            base + 0x78, 30);
+       clk[sata]         = imx_clk_gate2("sata",          "ipg",               base + 0x7c, 4);
+       clk[sdma]         = imx_clk_gate2("sdma",          "ahb",               base + 0x7c, 6);
+       clk[spba]         = imx_clk_gate2("spba",          "ipg",               base + 0x7c, 12);
+       clk[ssi1_ipg]     = imx_clk_gate2("ssi1_ipg",      "ipg",               base + 0x7c, 18);
+       clk[ssi2_ipg]     = imx_clk_gate2("ssi2_ipg",      "ipg",               base + 0x7c, 20);
+       clk[ssi3_ipg]     = imx_clk_gate2("ssi3_ipg",      "ipg",               base + 0x7c, 22);
+       clk[uart_ipg]     = imx_clk_gate2("uart_ipg",      "ipg",               base + 0x7c, 24);
+       clk[uart_serial]  = imx_clk_gate2("uart_serial",   "uart_serial_podf",  base + 0x7c, 26);
+       clk[usboh3]       = imx_clk_gate2("usboh3",        "ipg",               base + 0x80, 0);
+       clk[usdhc1]       = imx_clk_gate2("usdhc1",        "usdhc1_podf",       base + 0x80, 2);
+       clk[usdhc2]       = imx_clk_gate2("usdhc2",        "usdhc2_podf",       base + 0x80, 4);
+       clk[usdhc3]       = imx_clk_gate2("usdhc3",        "usdhc3_podf",       base + 0x80, 6);
+       clk[usdhc4]       = imx_clk_gate2("usdhc4",        "usdhc4_podf",       base + 0x80, 8);
+       clk[vdo_axi]      = imx_clk_gate2("vdo_axi",       "vdo_axi_sel",       base + 0x80, 12);
+       clk[vpu_axi]      = imx_clk_gate2("vpu_axi",       "vpu_axi_podf",      base + 0x80, 14);
+       clk[cko1]         = imx_clk_gate("cko1",           "cko1_podf",         base + 0x60, 7);
+
+       for (i = 0; i < ARRAY_SIZE(clk); i++)
+               if (IS_ERR(clk[i]))
+                       pr_err("i.MX6q clk %d: register failed with %ld\n",
+                               i, PTR_ERR(clk[i]));
+
+       clk_register_clkdev(clk[mmdc_ch0_axi], NULL, "mmdc_ch0_axi");
+       clk_register_clkdev(clk[mmdc_ch1_axi], NULL, "mmdc_ch1_axi");
+       clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0");
+       clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0");
+       clk_register_clkdev(clk[twd], NULL, "smp_twd");
+       clk_register_clkdev(clk[usboh3], NULL, "usboh3");
+       clk_register_clkdev(clk[uart_serial], "per", "2020000.serial");
+       clk_register_clkdev(clk[uart_ipg], "ipg", "2020000.serial");
+       clk_register_clkdev(clk[uart_serial], "per", "21e8000.serial");
+       clk_register_clkdev(clk[uart_ipg], "ipg", "21e8000.serial");
+       clk_register_clkdev(clk[uart_serial], "per", "21ec000.serial");
+       clk_register_clkdev(clk[uart_ipg], "ipg", "21ec000.serial");
+       clk_register_clkdev(clk[uart_serial], "per", "21f0000.serial");
+       clk_register_clkdev(clk[uart_ipg], "ipg", "21f0000.serial");
+       clk_register_clkdev(clk[uart_serial], "per", "21f4000.serial");
+       clk_register_clkdev(clk[uart_ipg], "ipg", "21f4000.serial");
+       clk_register_clkdev(clk[enet], NULL, "2188000.ethernet");
+       clk_register_clkdev(clk[usdhc1], NULL, "2190000.usdhc");
+       clk_register_clkdev(clk[usdhc2], NULL, "2194000.usdhc");
+       clk_register_clkdev(clk[usdhc3], NULL, "2198000.usdhc");
+       clk_register_clkdev(clk[usdhc4], NULL, "219c000.usdhc");
+       clk_register_clkdev(clk[i2c1], NULL, "21a0000.i2c");
+       clk_register_clkdev(clk[i2c2], NULL, "21a4000.i2c");
+       clk_register_clkdev(clk[i2c3], NULL, "21a8000.i2c");
+       clk_register_clkdev(clk[ecspi1], NULL, "2008000.ecspi");
+       clk_register_clkdev(clk[ecspi2], NULL, "200c000.ecspi");
+       clk_register_clkdev(clk[ecspi3], NULL, "2010000.ecspi");
+       clk_register_clkdev(clk[ecspi4], NULL, "2014000.ecspi");
+       clk_register_clkdev(clk[ecspi5], NULL, "2018000.ecspi");
+       clk_register_clkdev(clk[sdma], NULL, "20ec000.sdma");
+       clk_register_clkdev(clk[dummy], NULL, "20bc000.wdog");
+       clk_register_clkdev(clk[dummy], NULL, "20c0000.wdog");
+       clk_register_clkdev(clk[ssi1_ipg], NULL, "2028000.ssi");
+       clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL);
+       clk_register_clkdev(clk[ahb], "ahb", NULL);
+       clk_register_clkdev(clk[cko1], "cko1", NULL);
+
+       for (i = 0; i < ARRAY_SIZE(clks_init_on); i++) {
+               c = clk_get_sys(clks_init_on[i], NULL);
+               if (IS_ERR(c)) {
+                       pr_err("%s: failed to get clk %s", __func__,
+                              clks_init_on[i]);
+                       return PTR_ERR(c);
+               }
+               clk_prepare_enable(c);
+       }
+
+       np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt");
+       base = of_iomap(np, 0);
+       WARN_ON(!base);
+       irq = irq_of_parse_and_map(np, 0);
+       mxc_timer_init(NULL, base, irq);
+
+       return 0;
+}
diff --git a/arch/arm/mach-imx/clk-pfd.c b/arch/arm/mach-imx/clk-pfd.c
new file mode 100644 (file)
index 0000000..e2ed416
--- /dev/null
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2012 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include "clk.h"
+
+/**
+ * struct clk_pfd - IMX PFD clock
+ * @clk_hw:    clock source
+ * @reg:       PFD register address
+ * @idx:       the index of PFD encoded in the register
+ *
+ * PFD clock found on i.MX6 series.  Each register for PFD has 4 clk_pfd
+ * data encoded, and member idx is used to specify the one.  And each
+ * register has SET, CLR and TOG registers at offset 0x4 0x8 and 0xc.
+ */
+struct clk_pfd {
+       struct clk_hw   hw;
+       void __iomem    *reg;
+       u8              idx;
+};
+
+#define to_clk_pfd(_hw) container_of(_hw, struct clk_pfd, hw)
+
+#define SET    0x4
+#define CLR    0x8
+#define OTG    0xc
+
+static int clk_pfd_enable(struct clk_hw *hw)
+{
+       struct clk_pfd *pfd = to_clk_pfd(hw);
+
+       writel_relaxed(1 << ((pfd->idx + 1) * 8 - 1), pfd->reg + CLR);
+
+       return 0;
+}
+
+static void clk_pfd_disable(struct clk_hw *hw)
+{
+       struct clk_pfd *pfd = to_clk_pfd(hw);
+
+       writel_relaxed(1 << ((pfd->idx + 1) * 8 - 1), pfd->reg + SET);
+}
+
+static unsigned long clk_pfd_recalc_rate(struct clk_hw *hw,
+                                        unsigned long parent_rate)
+{
+       struct clk_pfd *pfd = to_clk_pfd(hw);
+       u64 tmp = parent_rate;
+       u8 frac = (readl_relaxed(pfd->reg) >> (pfd->idx * 8)) & 0x3f;
+
+       tmp *= 18;
+       do_div(tmp, frac);
+
+       return tmp;
+}
+
+static long clk_pfd_round_rate(struct clk_hw *hw, unsigned long rate,
+                              unsigned long *prate)
+{
+       u64 tmp = *prate;
+       u8 frac;
+
+       tmp = tmp * 18 + rate / 2;
+       do_div(tmp, rate);
+       frac = tmp;
+       if (frac < 12)
+               frac = 12;
+       else if (frac > 35)
+               frac = 35;
+       tmp = *prate;
+       tmp *= 18;
+       do_div(tmp, frac);
+
+       return tmp;
+}
+
+static int clk_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long parent_rate)
+{
+       struct clk_pfd *pfd = to_clk_pfd(hw);
+       u64 tmp = parent_rate;
+       u8 frac;
+
+       tmp = tmp * 18 + rate / 2;
+       do_div(tmp, rate);
+       frac = tmp;
+       if (frac < 12)
+               frac = 12;
+       else if (frac > 35)
+               frac = 35;
+
+       writel_relaxed(0x3f << (pfd->idx * 8), pfd->reg + CLR);
+       writel_relaxed(frac << (pfd->idx * 8), pfd->reg + SET);
+
+       return 0;
+}
+
+static const struct clk_ops clk_pfd_ops = {
+       .enable         = clk_pfd_enable,
+       .disable        = clk_pfd_disable,
+       .recalc_rate    = clk_pfd_recalc_rate,
+       .round_rate     = clk_pfd_round_rate,
+       .set_rate       = clk_pfd_set_rate,
+};
+
+struct clk *imx_clk_pfd(const char *name, const char *parent_name,
+                       void __iomem *reg, u8 idx)
+{
+       struct clk_pfd *pfd;
+       struct clk *clk;
+       struct clk_init_data init;
+
+       pfd = kzalloc(sizeof(*pfd), GFP_KERNEL);
+       if (!pfd)
+               return ERR_PTR(-ENOMEM);
+
+       pfd->reg = reg;
+       pfd->idx = idx;
+
+       init.name = name;
+       init.ops = &clk_pfd_ops;
+       init.flags = 0;
+       init.parent_names = &parent_name;
+       init.num_parents = 1;
+
+       pfd->hw.init = &init;
+
+       clk = clk_register(NULL, &pfd->hw);
+       if (IS_ERR(clk))
+               kfree(pfd);
+
+       return clk;
+}
diff --git a/arch/arm/mach-imx/clk-pllv1.c b/arch/arm/mach-imx/clk-pllv1.c
new file mode 100644 (file)
index 0000000..2d856f9
--- /dev/null
@@ -0,0 +1,66 @@
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <mach/common.h>
+#include <mach/hardware.h>
+#include <mach/clock.h>
+#include "clk.h"
+
+/**
+ * pll v1
+ *
+ * @clk_hw     clock source
+ * @parent     the parent clock name
+ * @base       base address of pll registers
+ *
+ * PLL clock version 1, found on i.MX1/21/25/27/31/35
+ */
+struct clk_pllv1 {
+       struct clk_hw   hw;
+       void __iomem    *base;
+};
+
+#define to_clk_pllv1(clk) (container_of(clk, struct clk_pllv1, clk))
+
+static unsigned long clk_pllv1_recalc_rate(struct clk_hw *hw,
+               unsigned long parent_rate)
+{
+       struct clk_pllv1 *pll = to_clk_pllv1(hw);
+
+       return mxc_decode_pll(readl(pll->base), parent_rate);
+}
+
+struct clk_ops clk_pllv1_ops = {
+       .recalc_rate = clk_pllv1_recalc_rate,
+};
+
+struct clk *imx_clk_pllv1(const char *name, const char *parent,
+               void __iomem *base)
+{
+       struct clk_pllv1 *pll;
+       struct clk *clk;
+       struct clk_init_data init;
+
+       pll = kmalloc(sizeof(*pll), GFP_KERNEL);
+       if (!pll)
+               return ERR_PTR(-ENOMEM);
+
+       pll->base = base;
+
+       init.name = name;
+       init.ops = &clk_pllv1_ops;
+       init.flags = 0;
+       init.parent_names = &parent;
+       init.num_parents = 1;
+
+       pll->hw.init = &init;
+
+       clk = clk_register(NULL, &pll->hw);
+       if (IS_ERR(clk))
+               kfree(pll);
+
+       return clk;
+}
diff --git a/arch/arm/mach-imx/clk-pllv2.c b/arch/arm/mach-imx/clk-pllv2.c
new file mode 100644 (file)
index 0000000..4685919
--- /dev/null
@@ -0,0 +1,249 @@
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+#include <asm/div64.h>
+
+#include "clk.h"
+
+#define to_clk_pllv2(clk) (container_of(clk, struct clk_pllv2, clk))
+
+/* PLL Register Offsets */
+#define MXC_PLL_DP_CTL                 0x00
+#define MXC_PLL_DP_CONFIG              0x04
+#define MXC_PLL_DP_OP                  0x08
+#define MXC_PLL_DP_MFD                 0x0C
+#define MXC_PLL_DP_MFN                 0x10
+#define MXC_PLL_DP_MFNMINUS            0x14
+#define MXC_PLL_DP_MFNPLUS             0x18
+#define MXC_PLL_DP_HFS_OP              0x1C
+#define MXC_PLL_DP_HFS_MFD             0x20
+#define MXC_PLL_DP_HFS_MFN             0x24
+#define MXC_PLL_DP_MFN_TOGC            0x28
+#define MXC_PLL_DP_DESTAT              0x2c
+
+/* PLL Register Bit definitions */
+#define MXC_PLL_DP_CTL_MUL_CTRL                0x2000
+#define MXC_PLL_DP_CTL_DPDCK0_2_EN     0x1000
+#define MXC_PLL_DP_CTL_DPDCK0_2_OFFSET 12
+#define MXC_PLL_DP_CTL_ADE             0x800
+#define MXC_PLL_DP_CTL_REF_CLK_DIV     0x400
+#define MXC_PLL_DP_CTL_REF_CLK_SEL_MASK        (3 << 8)
+#define MXC_PLL_DP_CTL_REF_CLK_SEL_OFFSET      8
+#define MXC_PLL_DP_CTL_HFSM            0x80
+#define MXC_PLL_DP_CTL_PRE             0x40
+#define MXC_PLL_DP_CTL_UPEN            0x20
+#define MXC_PLL_DP_CTL_RST             0x10
+#define MXC_PLL_DP_CTL_RCP             0x8
+#define MXC_PLL_DP_CTL_PLM             0x4
+#define MXC_PLL_DP_CTL_BRM0            0x2
+#define MXC_PLL_DP_CTL_LRF             0x1
+
+#define MXC_PLL_DP_CONFIG_BIST         0x8
+#define MXC_PLL_DP_CONFIG_SJC_CE       0x4
+#define MXC_PLL_DP_CONFIG_AREN         0x2
+#define MXC_PLL_DP_CONFIG_LDREQ                0x1
+
+#define MXC_PLL_DP_OP_MFI_OFFSET       4
+#define MXC_PLL_DP_OP_MFI_MASK         (0xF << 4)
+#define MXC_PLL_DP_OP_PDF_OFFSET       0
+#define MXC_PLL_DP_OP_PDF_MASK         0xF
+
+#define MXC_PLL_DP_MFD_OFFSET          0
+#define MXC_PLL_DP_MFD_MASK            0x07FFFFFF
+
+#define MXC_PLL_DP_MFN_OFFSET          0x0
+#define MXC_PLL_DP_MFN_MASK            0x07FFFFFF
+
+#define MXC_PLL_DP_MFN_TOGC_TOG_DIS    (1 << 17)
+#define MXC_PLL_DP_MFN_TOGC_TOG_EN     (1 << 16)
+#define MXC_PLL_DP_MFN_TOGC_CNT_OFFSET 0x0
+#define MXC_PLL_DP_MFN_TOGC_CNT_MASK   0xFFFF
+
+#define MXC_PLL_DP_DESTAT_TOG_SEL      (1 << 31)
+#define MXC_PLL_DP_DESTAT_MFN          0x07FFFFFF
+
+#define MAX_DPLL_WAIT_TRIES    1000 /* 1000 * udelay(1) = 1ms */
+
+struct clk_pllv2 {
+       struct clk_hw   hw;
+       void __iomem    *base;
+};
+
+static unsigned long clk_pllv2_recalc_rate(struct clk_hw *hw,
+               unsigned long parent_rate)
+{
+       long mfi, mfn, mfd, pdf, ref_clk, mfn_abs;
+       unsigned long dp_op, dp_mfd, dp_mfn, dp_ctl, pll_hfsm, dbl;
+       void __iomem *pllbase;
+       s64 temp;
+       struct clk_pllv2 *pll = to_clk_pllv2(hw);
+
+       pllbase = pll->base;
+
+       dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
+       pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
+       dbl = dp_ctl & MXC_PLL_DP_CTL_DPDCK0_2_EN;
+
+       if (pll_hfsm == 0) {
+               dp_op = __raw_readl(pllbase + MXC_PLL_DP_OP);
+               dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_MFD);
+               dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_MFN);
+       } else {
+               dp_op = __raw_readl(pllbase + MXC_PLL_DP_HFS_OP);
+               dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFD);
+               dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFN);
+       }
+       pdf = dp_op & MXC_PLL_DP_OP_PDF_MASK;
+       mfi = (dp_op & MXC_PLL_DP_OP_MFI_MASK) >> MXC_PLL_DP_OP_MFI_OFFSET;
+       mfi = (mfi <= 5) ? 5 : mfi;
+       mfd = dp_mfd & MXC_PLL_DP_MFD_MASK;
+       mfn = mfn_abs = dp_mfn & MXC_PLL_DP_MFN_MASK;
+       /* Sign extend to 32-bits */
+       if (mfn >= 0x04000000) {
+               mfn |= 0xFC000000;
+               mfn_abs = -mfn;
+       }
+
+       ref_clk = 2 * parent_rate;
+       if (dbl != 0)
+               ref_clk *= 2;
+
+       ref_clk /= (pdf + 1);
+       temp = (u64) ref_clk * mfn_abs;
+       do_div(temp, mfd + 1);
+       if (mfn < 0)
+               temp = -temp;
+       temp = (ref_clk * mfi) + temp;
+
+       return temp;
+}
+
+static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long parent_rate)
+{
+       struct clk_pllv2 *pll = to_clk_pllv2(hw);
+       u32 reg;
+       void __iomem *pllbase;
+       long mfi, pdf, mfn, mfd = 999999;
+       s64 temp64;
+       unsigned long quad_parent_rate;
+       unsigned long pll_hfsm, dp_ctl;
+
+       pllbase = pll->base;
+
+       quad_parent_rate = 4 * parent_rate;
+       pdf = mfi = -1;
+       while (++pdf < 16 && mfi < 5)
+               mfi = rate * (pdf+1) / quad_parent_rate;
+       if (mfi > 15)
+               return -EINVAL;
+       pdf--;
+
+       temp64 = rate * (pdf+1) - quad_parent_rate * mfi;
+       do_div(temp64, quad_parent_rate/1000000);
+       mfn = (long)temp64;
+
+       dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
+       /* use dpdck0_2 */
+       __raw_writel(dp_ctl | 0x1000L, pllbase + MXC_PLL_DP_CTL);
+       pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
+       if (pll_hfsm == 0) {
+               reg = mfi << 4 | pdf;
+               __raw_writel(reg, pllbase + MXC_PLL_DP_OP);
+               __raw_writel(mfd, pllbase + MXC_PLL_DP_MFD);
+               __raw_writel(mfn, pllbase + MXC_PLL_DP_MFN);
+       } else {
+               reg = mfi << 4 | pdf;
+               __raw_writel(reg, pllbase + MXC_PLL_DP_HFS_OP);
+               __raw_writel(mfd, pllbase + MXC_PLL_DP_HFS_MFD);
+               __raw_writel(mfn, pllbase + MXC_PLL_DP_HFS_MFN);
+       }
+
+       return 0;
+}
+
+static long clk_pllv2_round_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long *prate)
+{
+       return rate;
+}
+
+static int clk_pllv2_prepare(struct clk_hw *hw)
+{
+       struct clk_pllv2 *pll = to_clk_pllv2(hw);
+       u32 reg;
+       void __iomem *pllbase;
+       int i = 0;
+
+       pllbase = pll->base;
+       reg = __raw_readl(pllbase + MXC_PLL_DP_CTL) | MXC_PLL_DP_CTL_UPEN;
+       __raw_writel(reg, pllbase + MXC_PLL_DP_CTL);
+
+       /* Wait for lock */
+       do {
+               reg = __raw_readl(pllbase + MXC_PLL_DP_CTL);
+               if (reg & MXC_PLL_DP_CTL_LRF)
+                       break;
+
+               udelay(1);
+       } while (++i < MAX_DPLL_WAIT_TRIES);
+
+       if (i == MAX_DPLL_WAIT_TRIES) {
+               pr_err("MX5: pll locking failed\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void clk_pllv2_unprepare(struct clk_hw *hw)
+{
+       struct clk_pllv2 *pll = to_clk_pllv2(hw);
+       u32 reg;
+       void __iomem *pllbase;
+
+       pllbase = pll->base;
+       reg = __raw_readl(pllbase + MXC_PLL_DP_CTL) & ~MXC_PLL_DP_CTL_UPEN;
+       __raw_writel(reg, pllbase + MXC_PLL_DP_CTL);
+}
+
+struct clk_ops clk_pllv2_ops = {
+       .prepare = clk_pllv2_prepare,
+       .unprepare = clk_pllv2_unprepare,
+       .recalc_rate = clk_pllv2_recalc_rate,
+       .round_rate = clk_pllv2_round_rate,
+       .set_rate = clk_pllv2_set_rate,
+};
+
+struct clk *imx_clk_pllv2(const char *name, const char *parent,
+               void __iomem *base)
+{
+       struct clk_pllv2 *pll;
+       struct clk *clk;
+       struct clk_init_data init;
+
+       pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+       if (!pll)
+               return ERR_PTR(-ENOMEM);
+
+       pll->base = base;
+
+       init.name = name;
+       init.ops = &clk_pllv2_ops;
+       init.flags = 0;
+       init.parent_names = &parent;
+       init.num_parents = 1;
+
+       pll->hw.init = &init;
+
+       clk = clk_register(NULL, &pll->hw);
+       if (IS_ERR(clk))
+               kfree(pll);
+
+       return clk;
+}
diff --git a/arch/arm/mach-imx/clk-pllv3.c b/arch/arm/mach-imx/clk-pllv3.c
new file mode 100644 (file)
index 0000000..36aac94
--- /dev/null
@@ -0,0 +1,419 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2012 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/err.h>
+#include "clk.h"
+
+#define PLL_NUM_OFFSET         0x10
+#define PLL_DENOM_OFFSET       0x20
+
+#define BM_PLL_POWER           (0x1 << 12)
+#define BM_PLL_ENABLE          (0x1 << 13)
+#define BM_PLL_BYPASS          (0x1 << 16)
+#define BM_PLL_LOCK            (0x1 << 31)
+
+/**
+ * struct clk_pllv3 - IMX PLL clock version 3
+ * @clk_hw:     clock source
+ * @base:       base address of PLL registers
+ * @powerup_set: set POWER bit to power up the PLL
+ * @gate_mask:  mask of gate bits
+ * @div_mask:   mask of divider bits
+ *
+ * IMX PLL clock version 3, found on i.MX6 series.  Divider for pllv3
+ * is actually a multiplier, and always sits at bit 0.
+ */
+struct clk_pllv3 {
+       struct clk_hw   hw;
+       void __iomem    *base;
+       bool            powerup_set;
+       u32             gate_mask;
+       u32             div_mask;
+};
+
+#define to_clk_pllv3(_hw) container_of(_hw, struct clk_pllv3, hw)
+
+static int clk_pllv3_prepare(struct clk_hw *hw)
+{
+       struct clk_pllv3 *pll = to_clk_pllv3(hw);
+       unsigned long timeout = jiffies + msecs_to_jiffies(10);
+       u32 val;
+
+       val = readl_relaxed(pll->base);
+       val &= ~BM_PLL_BYPASS;
+       if (pll->powerup_set)
+               val |= BM_PLL_POWER;
+       else
+               val &= ~BM_PLL_POWER;
+       writel_relaxed(val, pll->base);
+
+       /* Wait for PLL to lock */
+       while (!(readl_relaxed(pll->base) & BM_PLL_LOCK))
+               if (time_after(jiffies, timeout))
+                       return -ETIMEDOUT;
+
+       return 0;
+}
+
+static void clk_pllv3_unprepare(struct clk_hw *hw)
+{
+       struct clk_pllv3 *pll = to_clk_pllv3(hw);
+       u32 val;
+
+       val = readl_relaxed(pll->base);
+       val |= BM_PLL_BYPASS;
+       if (pll->powerup_set)
+               val &= ~BM_PLL_POWER;
+       else
+               val |= BM_PLL_POWER;
+       writel_relaxed(val, pll->base);
+}
+
+static int clk_pllv3_enable(struct clk_hw *hw)
+{
+       struct clk_pllv3 *pll = to_clk_pllv3(hw);
+       u32 val;
+
+       val = readl_relaxed(pll->base);
+       val |= pll->gate_mask;
+       writel_relaxed(val, pll->base);
+
+       return 0;
+}
+
+static void clk_pllv3_disable(struct clk_hw *hw)
+{
+       struct clk_pllv3 *pll = to_clk_pllv3(hw);
+       u32 val;
+
+       val = readl_relaxed(pll->base);
+       val &= ~pll->gate_mask;
+       writel_relaxed(val, pll->base);
+}
+
+static unsigned long clk_pllv3_recalc_rate(struct clk_hw *hw,
+                                          unsigned long parent_rate)
+{
+       struct clk_pllv3 *pll = to_clk_pllv3(hw);
+       u32 div = readl_relaxed(pll->base)  & pll->div_mask;
+
+       return (div == 1) ? parent_rate * 22 : parent_rate * 20;
+}
+
+static long clk_pllv3_round_rate(struct clk_hw *hw, unsigned long rate,
+                                unsigned long *prate)
+{
+       unsigned long parent_rate = *prate;
+
+       return (rate >= parent_rate * 22) ? parent_rate * 22 :
+                                           parent_rate * 20;
+}
+
+static int clk_pllv3_set_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long parent_rate)
+{
+       struct clk_pllv3 *pll = to_clk_pllv3(hw);
+       u32 val, div;
+
+       if (rate == parent_rate * 22)
+               div = 1;
+       else if (rate == parent_rate * 20)
+               div = 0;
+       else
+               return -EINVAL;
+
+       val = readl_relaxed(pll->base);
+       val &= ~pll->div_mask;
+       val |= div;
+       writel_relaxed(val, pll->base);
+
+       return 0;
+}
+
+static const struct clk_ops clk_pllv3_ops = {
+       .prepare        = clk_pllv3_prepare,
+       .unprepare      = clk_pllv3_unprepare,
+       .enable         = clk_pllv3_enable,
+       .disable        = clk_pllv3_disable,
+       .recalc_rate    = clk_pllv3_recalc_rate,
+       .round_rate     = clk_pllv3_round_rate,
+       .set_rate       = clk_pllv3_set_rate,
+};
+
+static unsigned long clk_pllv3_sys_recalc_rate(struct clk_hw *hw,
+                                              unsigned long parent_rate)
+{
+       struct clk_pllv3 *pll = to_clk_pllv3(hw);
+       u32 div = readl_relaxed(pll->base) & pll->div_mask;
+
+       return parent_rate * div / 2;
+}
+
+static long clk_pllv3_sys_round_rate(struct clk_hw *hw, unsigned long rate,
+                                    unsigned long *prate)
+{
+       unsigned long parent_rate = *prate;
+       unsigned long min_rate = parent_rate * 54 / 2;
+       unsigned long max_rate = parent_rate * 108 / 2;
+       u32 div;
+
+       if (rate > max_rate)
+               rate = max_rate;
+       else if (rate < min_rate)
+               rate = min_rate;
+       div = rate * 2 / parent_rate;
+
+       return parent_rate * div / 2;
+}
+
+static int clk_pllv3_sys_set_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long parent_rate)
+{
+       struct clk_pllv3 *pll = to_clk_pllv3(hw);
+       unsigned long min_rate = parent_rate * 54 / 2;
+       unsigned long max_rate = parent_rate * 108 / 2;
+       u32 val, div;
+
+       if (rate < min_rate || rate > max_rate)
+               return -EINVAL;
+
+       div = rate * 2 / parent_rate;
+       val = readl_relaxed(pll->base);
+       val &= ~pll->div_mask;
+       val |= div;
+       writel_relaxed(val, pll->base);
+
+       return 0;
+}
+
+static const struct clk_ops clk_pllv3_sys_ops = {
+       .prepare        = clk_pllv3_prepare,
+       .unprepare      = clk_pllv3_unprepare,
+       .enable         = clk_pllv3_enable,
+       .disable        = clk_pllv3_disable,
+       .recalc_rate    = clk_pllv3_sys_recalc_rate,
+       .round_rate     = clk_pllv3_sys_round_rate,
+       .set_rate       = clk_pllv3_sys_set_rate,
+};
+
+static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw,
+                                             unsigned long parent_rate)
+{
+       struct clk_pllv3 *pll = to_clk_pllv3(hw);
+       u32 mfn = readl_relaxed(pll->base + PLL_NUM_OFFSET);
+       u32 mfd = readl_relaxed(pll->base + PLL_DENOM_OFFSET);
+       u32 div = readl_relaxed(pll->base) & pll->div_mask;
+
+       return (parent_rate * div) + ((parent_rate / mfd) * mfn);
+}
+
+static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
+                                   unsigned long *prate)
+{
+       unsigned long parent_rate = *prate;
+       unsigned long min_rate = parent_rate * 27;
+       unsigned long max_rate = parent_rate * 54;
+       u32 div;
+       u32 mfn, mfd = 1000000;
+       s64 temp64;
+
+       if (rate > max_rate)
+               rate = max_rate;
+       else if (rate < min_rate)
+               rate = min_rate;
+
+       div = rate / parent_rate;
+       temp64 = (u64) (rate - div * parent_rate);
+       temp64 *= mfd;
+       do_div(temp64, parent_rate);
+       mfn = temp64;
+
+       return parent_rate * div + parent_rate / mfd * mfn;
+}
+
+static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long parent_rate)
+{
+       struct clk_pllv3 *pll = to_clk_pllv3(hw);
+       unsigned long min_rate = parent_rate * 27;
+       unsigned long max_rate = parent_rate * 54;
+       u32 val, div;
+       u32 mfn, mfd = 1000000;
+       s64 temp64;
+
+       if (rate < min_rate || rate > max_rate)
+               return -EINVAL;
+
+       div = rate / parent_rate;
+       temp64 = (u64) (rate - div * parent_rate);
+       temp64 *= mfd;
+       do_div(temp64, parent_rate);
+       mfn = temp64;
+
+       val = readl_relaxed(pll->base);
+       val &= ~pll->div_mask;
+       val |= div;
+       writel_relaxed(val, pll->base);
+       writel_relaxed(mfn, pll->base + PLL_NUM_OFFSET);
+       writel_relaxed(mfd, pll->base + PLL_DENOM_OFFSET);
+
+       return 0;
+}
+
+static const struct clk_ops clk_pllv3_av_ops = {
+       .prepare        = clk_pllv3_prepare,
+       .unprepare      = clk_pllv3_unprepare,
+       .enable         = clk_pllv3_enable,
+       .disable        = clk_pllv3_disable,
+       .recalc_rate    = clk_pllv3_av_recalc_rate,
+       .round_rate     = clk_pllv3_av_round_rate,
+       .set_rate       = clk_pllv3_av_set_rate,
+};
+
+static unsigned long clk_pllv3_enet_recalc_rate(struct clk_hw *hw,
+                                               unsigned long parent_rate)
+{
+       struct clk_pllv3 *pll = to_clk_pllv3(hw);
+       u32 div = readl_relaxed(pll->base) & pll->div_mask;
+
+       switch (div) {
+       case 0:
+               return 25000000;
+       case 1:
+               return 50000000;
+       case 2:
+               return 100000000;
+       case 3:
+               return 125000000;
+       }
+
+       return 0;
+}
+
+static long clk_pllv3_enet_round_rate(struct clk_hw *hw, unsigned long rate,
+                                     unsigned long *prate)
+{
+       if (rate >= 125000000)
+               rate = 125000000;
+       else if (rate >= 100000000)
+               rate = 100000000;
+       else if (rate >= 50000000)
+               rate = 50000000;
+       else
+               rate = 25000000;
+       return rate;
+}
+
+static int clk_pllv3_enet_set_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long parent_rate)
+{
+       struct clk_pllv3 *pll = to_clk_pllv3(hw);
+       u32 val, div;
+
+       switch (rate) {
+       case 25000000:
+               div = 0;
+               break;
+       case 50000000:
+               div = 1;
+               break;
+       case 100000000:
+               div = 2;
+               break;
+       case 125000000:
+               div = 3;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       val = readl_relaxed(pll->base);
+       val &= ~pll->div_mask;
+       val |= div;
+       writel_relaxed(val, pll->base);
+
+       return 0;
+}
+
+static const struct clk_ops clk_pllv3_enet_ops = {
+       .prepare        = clk_pllv3_prepare,
+       .unprepare      = clk_pllv3_unprepare,
+       .enable         = clk_pllv3_enable,
+       .disable        = clk_pllv3_disable,
+       .recalc_rate    = clk_pllv3_enet_recalc_rate,
+       .round_rate     = clk_pllv3_enet_round_rate,
+       .set_rate       = clk_pllv3_enet_set_rate,
+};
+
+static const struct clk_ops clk_pllv3_mlb_ops = {
+       .prepare        = clk_pllv3_prepare,
+       .unprepare      = clk_pllv3_unprepare,
+       .enable         = clk_pllv3_enable,
+       .disable        = clk_pllv3_disable,
+};
+
+struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
+                         const char *parent_name, void __iomem *base,
+                         u32 gate_mask, u32 div_mask)
+{
+       struct clk_pllv3 *pll;
+       const struct clk_ops *ops;
+       struct clk *clk;
+       struct clk_init_data init;
+
+       pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+       if (!pll)
+               return ERR_PTR(-ENOMEM);
+
+       switch (type) {
+       case IMX_PLLV3_SYS:
+               ops = &clk_pllv3_sys_ops;
+               break;
+       case IMX_PLLV3_USB:
+               ops = &clk_pllv3_ops;
+               pll->powerup_set = true;
+               break;
+       case IMX_PLLV3_AV:
+               ops = &clk_pllv3_av_ops;
+               break;
+       case IMX_PLLV3_ENET:
+               ops = &clk_pllv3_enet_ops;
+               break;
+       case IMX_PLLV3_MLB:
+               ops = &clk_pllv3_mlb_ops;
+               break;
+       default:
+               ops = &clk_pllv3_ops;
+       }
+       pll->base = base;
+       pll->gate_mask = gate_mask;
+       pll->div_mask = div_mask;
+
+       init.name = name;
+       init.ops = ops;
+       init.flags = 0;
+       init.parent_names = &parent_name;
+       init.num_parents = 1;
+
+       pll->hw.init = &init;
+
+       clk = clk_register(NULL, &pll->hw);
+       if (IS_ERR(clk))
+               kfree(pll);
+
+       return clk;
+}
diff --git a/arch/arm/mach-imx/clk.h b/arch/arm/mach-imx/clk.h
new file mode 100644 (file)
index 0000000..1bf64fe
--- /dev/null
@@ -0,0 +1,83 @@
+#ifndef __MACH_IMX_CLK_H
+#define __MACH_IMX_CLK_H
+
+#include <linux/spinlock.h>
+#include <linux/clk-provider.h>
+#include <mach/clock.h>
+
+struct clk *imx_clk_pllv1(const char *name, const char *parent,
+               void __iomem *base);
+
+struct clk *imx_clk_pllv2(const char *name, const char *parent,
+               void __iomem *base);
+
+enum imx_pllv3_type {
+       IMX_PLLV3_GENERIC,
+       IMX_PLLV3_SYS,
+       IMX_PLLV3_USB,
+       IMX_PLLV3_AV,
+       IMX_PLLV3_ENET,
+       IMX_PLLV3_MLB,
+};
+
+struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
+               const char *parent_name, void __iomem *base, u32 gate_mask,
+               u32 div_mask);
+
+struct clk *clk_register_gate2(struct device *dev, const char *name,
+               const char *parent_name, unsigned long flags,
+               void __iomem *reg, u8 bit_idx,
+               u8 clk_gate_flags, spinlock_t *lock);
+
+static inline struct clk *imx_clk_gate2(const char *name, const char *parent,
+               void __iomem *reg, u8 shift)
+{
+       return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
+                       shift, 0, &imx_ccm_lock);
+}
+
+struct clk *imx_clk_pfd(const char *name, const char *parent_name,
+               void __iomem *reg, u8 idx);
+
+struct clk *imx_clk_busy_divider(const char *name, const char *parent_name,
+                                void __iomem *reg, u8 shift, u8 width,
+                                void __iomem *busy_reg, u8 busy_shift);
+
+struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
+                            u8 width, void __iomem *busy_reg, u8 busy_shift,
+                            const char **parent_names, int num_parents);
+
+static inline struct clk *imx_clk_fixed(const char *name, int rate)
+{
+       return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
+}
+
+static inline struct clk *imx_clk_divider(const char *name, const char *parent,
+               void __iomem *reg, u8 shift, u8 width)
+{
+       return clk_register_divider(NULL, name, parent, CLK_SET_RATE_PARENT,
+                       reg, shift, width, 0, &imx_ccm_lock);
+}
+
+static inline struct clk *imx_clk_gate(const char *name, const char *parent,
+               void __iomem *reg, u8 shift)
+{
+       return clk_register_gate(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
+                       shift, 0, &imx_ccm_lock);
+}
+
+static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg,
+               u8 shift, u8 width, const char **parents, int num_parents)
+{
+       return clk_register_mux(NULL, name, parents, num_parents, 0, reg, shift,
+                       width, 0, &imx_ccm_lock);
+}
+
+static inline struct clk *imx_clk_fixed_factor(const char *name,
+               const char *parent, unsigned int mult, unsigned int div)
+{
+       return clk_register_fixed_factor(NULL, name, parent,
+                       CLK_SET_RATE_PARENT, mult, div);
+}
+
+#endif
diff --git a/arch/arm/mach-imx/clock-imx1.c b/arch/arm/mach-imx/clock-imx1.c
deleted file mode 100644 (file)
index 4aabeb2..0000000
+++ /dev/null
@@ -1,636 +0,0 @@
-/*
- *  Copyright (C) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/math64.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/clkdev.h>
-
-#include <mach/clock.h>
-#include <mach/hardware.h>
-#include <mach/common.h>
-
-#define IO_ADDR_CCM(off)       (MX1_IO_ADDRESS(MX1_CCM_BASE_ADDR + (off)))
-
-/* CCM register addresses */
-#define CCM_CSCR       IO_ADDR_CCM(0x0)
-#define CCM_MPCTL0     IO_ADDR_CCM(0x4)
-#define CCM_SPCTL0     IO_ADDR_CCM(0xc)
-#define CCM_PCDR       IO_ADDR_CCM(0x20)
-
-#define CCM_CSCR_CLKO_OFFSET   29
-#define CCM_CSCR_CLKO_MASK     (0x7 << 29)
-#define CCM_CSCR_USB_OFFSET    26
-#define CCM_CSCR_USB_MASK      (0x7 << 26)
-#define CCM_CSCR_OSC_EN_SHIFT  17
-#define CCM_CSCR_SYSTEM_SEL    (1 << 16)
-#define CCM_CSCR_BCLK_OFFSET   10
-#define CCM_CSCR_BCLK_MASK     (0xf << 10)
-#define CCM_CSCR_PRESC         (1 << 15)
-
-#define CCM_PCDR_PCLK3_OFFSET  16
-#define CCM_PCDR_PCLK3_MASK    (0x7f << 16)
-#define CCM_PCDR_PCLK2_OFFSET  4
-#define CCM_PCDR_PCLK2_MASK    (0xf << 4)
-#define CCM_PCDR_PCLK1_OFFSET  0
-#define CCM_PCDR_PCLK1_MASK    0xf
-
-#define IO_ADDR_SCM(off)       (MX1_IO_ADDRESS(MX1_SCM_BASE_ADDR + (off)))
-
-/* SCM register addresses */
-#define SCM_GCCR       IO_ADDR_SCM(0xc)
-
-#define SCM_GCCR_DMA_CLK_EN_OFFSET     3
-#define SCM_GCCR_CSI_CLK_EN_OFFSET     2
-#define SCM_GCCR_MMA_CLK_EN_OFFSET     1
-#define SCM_GCCR_USBD_CLK_EN_OFFSET    0
-
-static int _clk_enable(struct clk *clk)
-{
-       unsigned int reg;
-
-       reg = __raw_readl(clk->enable_reg);
-       reg |= 1 << clk->enable_shift;
-       __raw_writel(reg, clk->enable_reg);
-
-       return 0;
-}
-
-static void _clk_disable(struct clk *clk)
-{
-       unsigned int reg;
-
-       reg = __raw_readl(clk->enable_reg);
-       reg &= ~(1 << clk->enable_shift);
-       __raw_writel(reg, clk->enable_reg);
-}
-
-static int _clk_can_use_parent(const struct clk *clk_arr[], unsigned int size,
-                              struct clk *parent)
-{
-       int i;
-
-       for (i = 0; i < size; i++)
-               if (parent == clk_arr[i])
-                       return i;
-
-       return -EINVAL;
-}
-
-static unsigned long
-_clk_simple_round_rate(struct clk *clk, unsigned long rate, unsigned int limit)
-{
-       int div;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       div = parent_rate / rate;
-       if (parent_rate % rate)
-               div++;
-
-       if (div > limit)
-               div = limit;
-
-       return parent_rate / div;
-}
-
-static unsigned long _clk_parent_round_rate(struct clk *clk, unsigned long rate)
-{
-       return clk->parent->round_rate(clk->parent, rate);
-}
-
-static int _clk_parent_set_rate(struct clk *clk, unsigned long rate)
-{
-       return clk->parent->set_rate(clk->parent, rate);
-}
-
-static unsigned long clk16m_get_rate(struct clk *clk)
-{
-       return 16000000;
-}
-
-static struct clk clk16m = {
-       .get_rate = clk16m_get_rate,
-       .enable = _clk_enable,
-       .enable_reg = CCM_CSCR,
-       .enable_shift = CCM_CSCR_OSC_EN_SHIFT,
-       .disable = _clk_disable,
-};
-
-/* in Hz */
-static unsigned long clk32_rate;
-
-static unsigned long clk32_get_rate(struct clk *clk)
-{
-       return clk32_rate;
-}
-
-static struct clk clk32 = {
-       .get_rate = clk32_get_rate,
-};
-
-static unsigned long clk32_premult_get_rate(struct clk *clk)
-{
-       return clk_get_rate(clk->parent) * 512;
-}
-
-static struct clk clk32_premult = {
-       .parent = &clk32,
-       .get_rate = clk32_premult_get_rate,
-};
-
-static const struct clk *prem_clk_clocks[] = {
-       &clk32_premult,
-       &clk16m,
-};
-
-static int prem_clk_set_parent(struct clk *clk, struct clk *parent)
-{
-       int i;
-       unsigned int reg = __raw_readl(CCM_CSCR);
-
-       i = _clk_can_use_parent(prem_clk_clocks, ARRAY_SIZE(prem_clk_clocks),
-                               parent);
-
-       switch (i) {
-       case 0:
-               reg &= ~CCM_CSCR_SYSTEM_SEL;
-               break;
-       case 1:
-               reg |= CCM_CSCR_SYSTEM_SEL;
-               break;
-       default:
-               return i;
-       }
-
-       __raw_writel(reg, CCM_CSCR);
-
-       return 0;
-}
-
-static struct clk prem_clk = {
-       .set_parent = prem_clk_set_parent,
-};
-
-static unsigned long system_clk_get_rate(struct clk *clk)
-{
-       return mxc_decode_pll(__raw_readl(CCM_SPCTL0),
-                             clk_get_rate(clk->parent));
-}
-
-static struct clk system_clk = {
-       .parent = &prem_clk,
-       .get_rate = system_clk_get_rate,
-};
-
-static unsigned long mcu_clk_get_rate(struct clk *clk)
-{
-       return mxc_decode_pll(__raw_readl(CCM_MPCTL0),
-                             clk_get_rate(clk->parent));
-}
-
-static struct clk mcu_clk = {
-       .parent = &clk32_premult,
-       .get_rate = mcu_clk_get_rate,
-};
-
-static unsigned long fclk_get_rate(struct clk *clk)
-{
-       unsigned long fclk = clk_get_rate(clk->parent);
-
-       if (__raw_readl(CCM_CSCR) & CCM_CSCR_PRESC)
-               fclk /= 2;
-
-       return fclk;
-}
-
-static struct clk fclk = {
-       .parent = &mcu_clk,
-       .get_rate = fclk_get_rate,
-};
-
-/*
- *  get hclk ( SDRAM, CSI, Memory Stick, I2C, DMA )
- */
-static unsigned long hclk_get_rate(struct clk *clk)
-{
-       return clk_get_rate(clk->parent) / (((__raw_readl(CCM_CSCR) &
-                       CCM_CSCR_BCLK_MASK) >> CCM_CSCR_BCLK_OFFSET) + 1);
-}
-
-static unsigned long hclk_round_rate(struct clk *clk, unsigned long rate)
-{
-       return _clk_simple_round_rate(clk, rate, 16);
-}
-
-static int hclk_set_rate(struct clk *clk, unsigned long rate)
-{
-       unsigned int div;
-       unsigned int reg;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       div = parent_rate / rate;
-
-       if (div > 16 || div < 1 || ((parent_rate / div) != rate))
-               return -EINVAL;
-
-       div--;
-
-       reg = __raw_readl(CCM_CSCR);
-       reg &= ~CCM_CSCR_BCLK_MASK;
-       reg |= div << CCM_CSCR_BCLK_OFFSET;
-       __raw_writel(reg, CCM_CSCR);
-
-       return 0;
-}
-
-static struct clk hclk = {
-       .parent = &system_clk,
-       .get_rate = hclk_get_rate,
-       .round_rate = hclk_round_rate,
-       .set_rate = hclk_set_rate,
-};
-
-static unsigned long clk48m_get_rate(struct clk *clk)
-{
-       return clk_get_rate(clk->parent) / (((__raw_readl(CCM_CSCR) &
-                       CCM_CSCR_USB_MASK) >> CCM_CSCR_USB_OFFSET) + 1);
-}
-
-static unsigned long clk48m_round_rate(struct clk *clk, unsigned long rate)
-{
-       return _clk_simple_round_rate(clk, rate, 8);
-}
-
-static int clk48m_set_rate(struct clk *clk, unsigned long rate)
-{
-       unsigned int div;
-       unsigned int reg;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       div = parent_rate / rate;
-
-       if (div > 8 || div < 1 || ((parent_rate / div) != rate))
-               return -EINVAL;
-
-       div--;
-
-       reg = __raw_readl(CCM_CSCR);
-       reg &= ~CCM_CSCR_USB_MASK;
-       reg |= div << CCM_CSCR_USB_OFFSET;
-       __raw_writel(reg, CCM_CSCR);
-
-       return 0;
-}
-
-static struct clk clk48m = {
-       .parent = &system_clk,
-       .get_rate = clk48m_get_rate,
-       .round_rate = clk48m_round_rate,
-       .set_rate = clk48m_set_rate,
-};
-
-/*
- *  get peripheral clock 1 ( UART[12], Timer[12], PWM )
- */
-static unsigned long perclk1_get_rate(struct clk *clk)
-{
-       return clk_get_rate(clk->parent) / (((__raw_readl(CCM_PCDR) &
-                       CCM_PCDR_PCLK1_MASK) >> CCM_PCDR_PCLK1_OFFSET) + 1);
-}
-
-static unsigned long perclk1_round_rate(struct clk *clk, unsigned long rate)
-{
-       return _clk_simple_round_rate(clk, rate, 16);
-}
-
-static int perclk1_set_rate(struct clk *clk, unsigned long rate)
-{
-       unsigned int div;
-       unsigned int reg;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       div = parent_rate / rate;
-
-       if (div > 16 || div < 1 || ((parent_rate / div) != rate))
-               return -EINVAL;
-
-       div--;
-
-       reg = __raw_readl(CCM_PCDR);
-       reg &= ~CCM_PCDR_PCLK1_MASK;
-       reg |= div << CCM_PCDR_PCLK1_OFFSET;
-       __raw_writel(reg, CCM_PCDR);
-
-       return 0;
-}
-
-/*
- *  get peripheral clock 2 ( LCD, SD, SPI[12] )
- */
-static unsigned long perclk2_get_rate(struct clk *clk)
-{
-       return clk_get_rate(clk->parent) / (((__raw_readl(CCM_PCDR) &
-                       CCM_PCDR_PCLK2_MASK) >> CCM_PCDR_PCLK2_OFFSET) + 1);
-}
-
-static unsigned long perclk2_round_rate(struct clk *clk, unsigned long rate)
-{
-       return _clk_simple_round_rate(clk, rate, 16);
-}
-
-static int perclk2_set_rate(struct clk *clk, unsigned long rate)
-{
-       unsigned int div;
-       unsigned int reg;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       div = parent_rate / rate;
-
-       if (div > 16 || div < 1 || ((parent_rate / div) != rate))
-               return -EINVAL;
-
-       div--;
-
-       reg = __raw_readl(CCM_PCDR);
-       reg &= ~CCM_PCDR_PCLK2_MASK;
-       reg |= div << CCM_PCDR_PCLK2_OFFSET;
-       __raw_writel(reg, CCM_PCDR);
-
-       return 0;
-}
-
-/*
- *  get peripheral clock 3 ( SSI )
- */
-static unsigned long perclk3_get_rate(struct clk *clk)
-{
-       return clk_get_rate(clk->parent) / (((__raw_readl(CCM_PCDR) &
-                       CCM_PCDR_PCLK3_MASK) >> CCM_PCDR_PCLK3_OFFSET) + 1);
-}
-
-static unsigned long perclk3_round_rate(struct clk *clk, unsigned long rate)
-{
-       return _clk_simple_round_rate(clk, rate, 128);
-}
-
-static int perclk3_set_rate(struct clk *clk, unsigned long rate)
-{
-       unsigned int div;
-       unsigned int reg;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       div = parent_rate / rate;
-
-       if (div > 128 || div < 1 || ((parent_rate / div) != rate))
-               return -EINVAL;
-
-       div--;
-
-       reg = __raw_readl(CCM_PCDR);
-       reg &= ~CCM_PCDR_PCLK3_MASK;
-       reg |= div << CCM_PCDR_PCLK3_OFFSET;
-       __raw_writel(reg, CCM_PCDR);
-
-       return 0;
-}
-
-static struct clk perclk[] = {
-       {
-               .id = 0,
-               .parent = &system_clk,
-               .get_rate = perclk1_get_rate,
-               .round_rate = perclk1_round_rate,
-               .set_rate = perclk1_set_rate,
-       }, {
-               .id = 1,
-               .parent = &system_clk,
-               .get_rate = perclk2_get_rate,
-               .round_rate = perclk2_round_rate,
-               .set_rate = perclk2_set_rate,
-       }, {
-               .id = 2,
-               .parent = &system_clk,
-               .get_rate = perclk3_get_rate,
-               .round_rate = perclk3_round_rate,
-               .set_rate = perclk3_set_rate,
-       }
-};
-
-static const struct clk *clko_clocks[] = {
-       &perclk[0],
-       &hclk,
-       &clk48m,
-       &clk16m,
-       &prem_clk,
-       &fclk,
-};
-
-static int clko_set_parent(struct clk *clk, struct clk *parent)
-{
-       int i;
-       unsigned int reg;
-
-       i = _clk_can_use_parent(clko_clocks, ARRAY_SIZE(clko_clocks), parent);
-       if (i < 0)
-               return i;
-
-       reg = __raw_readl(CCM_CSCR) & ~CCM_CSCR_CLKO_MASK;
-       reg |= i << CCM_CSCR_CLKO_OFFSET;
-       __raw_writel(reg, CCM_CSCR);
-
-       if (clko_clocks[i]->set_rate && clko_clocks[i]->round_rate) {
-               clk->set_rate = _clk_parent_set_rate;
-               clk->round_rate = _clk_parent_round_rate;
-       } else {
-               clk->set_rate = NULL;
-               clk->round_rate = NULL;
-       }
-
-       return 0;
-}
-
-static struct clk clko_clk = {
-       .set_parent = clko_set_parent,
-};
-
-static struct clk dma_clk = {
-       .parent = &hclk,
-       .round_rate = _clk_parent_round_rate,
-       .set_rate = _clk_parent_set_rate,
-       .enable = _clk_enable,
-       .enable_reg = SCM_GCCR,
-       .enable_shift = SCM_GCCR_DMA_CLK_EN_OFFSET,
-       .disable = _clk_disable,
-};
-
-static struct clk csi_clk = {
-       .parent = &hclk,
-       .round_rate = _clk_parent_round_rate,
-       .set_rate = _clk_parent_set_rate,
-       .enable = _clk_enable,
-       .enable_reg = SCM_GCCR,
-       .enable_shift = SCM_GCCR_CSI_CLK_EN_OFFSET,
-       .disable = _clk_disable,
-};
-
-static struct clk mma_clk = {
-       .parent = &hclk,
-       .round_rate = _clk_parent_round_rate,
-       .set_rate = _clk_parent_set_rate,
-       .enable = _clk_enable,
-       .enable_reg = SCM_GCCR,
-       .enable_shift = SCM_GCCR_MMA_CLK_EN_OFFSET,
-       .disable = _clk_disable,
-};
-
-static struct clk usbd_clk = {
-       .parent = &clk48m,
-       .round_rate = _clk_parent_round_rate,
-       .set_rate = _clk_parent_set_rate,
-       .enable = _clk_enable,
-       .enable_reg = SCM_GCCR,
-       .enable_shift = SCM_GCCR_USBD_CLK_EN_OFFSET,
-       .disable = _clk_disable,
-};
-
-static struct clk gpt_clk = {
-       .parent = &perclk[0],
-       .round_rate = _clk_parent_round_rate,
-       .set_rate = _clk_parent_set_rate,
-};
-
-static struct clk uart_clk = {
-       .parent = &perclk[0],
-       .round_rate = _clk_parent_round_rate,
-       .set_rate = _clk_parent_set_rate,
-};
-
-static struct clk i2c_clk = {
-       .parent = &hclk,
-       .round_rate = _clk_parent_round_rate,
-       .set_rate = _clk_parent_set_rate,
-};
-
-static struct clk spi_clk = {
-       .parent = &perclk[1],
-       .round_rate = _clk_parent_round_rate,
-       .set_rate = _clk_parent_set_rate,
-};
-
-static struct clk sdhc_clk = {
-       .parent = &perclk[1],
-       .round_rate = _clk_parent_round_rate,
-       .set_rate = _clk_parent_set_rate,
-};
-
-static struct clk lcdc_clk = {
-       .parent = &perclk[1],
-       .round_rate = _clk_parent_round_rate,
-       .set_rate = _clk_parent_set_rate,
-};
-
-static struct clk mshc_clk = {
-       .parent = &hclk,
-       .round_rate = _clk_parent_round_rate,
-       .set_rate = _clk_parent_set_rate,
-};
-
-static struct clk ssi_clk = {
-       .parent = &perclk[2],
-       .round_rate = _clk_parent_round_rate,
-       .set_rate = _clk_parent_set_rate,
-};
-
-static struct clk rtc_clk = {
-       .parent = &clk32,
-};
-
-#define _REGISTER_CLOCK(d, n, c) \
-       { \
-               .dev_id = d, \
-               .con_id = n, \
-               .clk = &c, \
-       },
-static struct clk_lookup lookups[] __initdata = {
-       _REGISTER_CLOCK(NULL, "dma", dma_clk)
-       _REGISTER_CLOCK("mx1-camera.0", NULL, csi_clk)
-       _REGISTER_CLOCK(NULL, "mma", mma_clk)
-       _REGISTER_CLOCK("imx_udc.0", NULL, usbd_clk)
-       _REGISTER_CLOCK(NULL, "gpt", gpt_clk)
-       _REGISTER_CLOCK("imx1-uart.0", NULL, uart_clk)
-       _REGISTER_CLOCK("imx1-uart.1", NULL, uart_clk)
-       _REGISTER_CLOCK("imx1-uart.2", NULL, uart_clk)
-       _REGISTER_CLOCK("imx-i2c.0", NULL, i2c_clk)
-       _REGISTER_CLOCK("imx1-cspi.0", NULL, spi_clk)
-       _REGISTER_CLOCK("imx1-cspi.1", NULL, spi_clk)
-       _REGISTER_CLOCK("imx-mmc.0", NULL, sdhc_clk)
-       _REGISTER_CLOCK("imx-fb.0", NULL, lcdc_clk)
-       _REGISTER_CLOCK(NULL, "mshc", mshc_clk)
-       _REGISTER_CLOCK(NULL, "ssi", ssi_clk)
-       _REGISTER_CLOCK("mxc_rtc.0", NULL, rtc_clk)
-};
-
-int __init mx1_clocks_init(unsigned long fref)
-{
-       unsigned int reg;
-
-       /* disable clocks we are able to */
-       __raw_writel(0, SCM_GCCR);
-
-       clk32_rate = fref;
-       reg = __raw_readl(CCM_CSCR);
-
-       /* detect clock reference for system PLL */
-       if (reg & CCM_CSCR_SYSTEM_SEL) {
-               prem_clk.parent = &clk16m;
-       } else {
-               /* ensure that oscillator is disabled */
-               reg &= ~(1 << CCM_CSCR_OSC_EN_SHIFT);
-               __raw_writel(reg, CCM_CSCR);
-               prem_clk.parent = &clk32_premult;
-       }
-
-       /* detect reference for CLKO */
-       reg = (reg & CCM_CSCR_CLKO_MASK) >> CCM_CSCR_CLKO_OFFSET;
-       clko_clk.parent = (struct clk *)clko_clocks[reg];
-
-       clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
-       clk_enable(&hclk);
-       clk_enable(&fclk);
-
-       mxc_timer_init(&gpt_clk, MX1_IO_ADDRESS(MX1_TIM1_BASE_ADDR),
-                       MX1_TIM1_INT);
-
-       return 0;
-}
diff --git a/arch/arm/mach-imx/clock-imx21.c b/arch/arm/mach-imx/clock-imx21.c
deleted file mode 100644 (file)
index ee15d8c..0000000
+++ /dev/null
@@ -1,1239 +0,0 @@
-/*
- * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
- * Copyright 2008 Martin Fuzzey, mfuzzey@gmail.com
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/clkdev.h>
-
-#include <mach/clock.h>
-#include <mach/hardware.h>
-#include <mach/common.h>
-#include <asm/div64.h>
-
-#define IO_ADDR_CCM(off)       (MX21_IO_ADDRESS(MX21_CCM_BASE_ADDR + (off)))
-
-/* Register offsets */
-#define CCM_CSCR               IO_ADDR_CCM(0x0)
-#define CCM_MPCTL0             IO_ADDR_CCM(0x4)
-#define CCM_MPCTL1             IO_ADDR_CCM(0x8)
-#define CCM_SPCTL0             IO_ADDR_CCM(0xc)
-#define CCM_SPCTL1             IO_ADDR_CCM(0x10)
-#define CCM_OSC26MCTL          IO_ADDR_CCM(0x14)
-#define CCM_PCDR0              IO_ADDR_CCM(0x18)
-#define CCM_PCDR1              IO_ADDR_CCM(0x1c)
-#define CCM_PCCR0              IO_ADDR_CCM(0x20)
-#define CCM_PCCR1              IO_ADDR_CCM(0x24)
-#define CCM_CCSR               IO_ADDR_CCM(0x28)
-#define CCM_PMCTL              IO_ADDR_CCM(0x2c)
-#define CCM_PMCOUNT            IO_ADDR_CCM(0x30)
-#define CCM_WKGDCTL            IO_ADDR_CCM(0x34)
-
-#define CCM_CSCR_PRESC_OFFSET  29
-#define CCM_CSCR_PRESC_MASK    (0x7 << CCM_CSCR_PRESC_OFFSET)
-
-#define CCM_CSCR_USB_OFFSET    26
-#define CCM_CSCR_USB_MASK      (0x7 << CCM_CSCR_USB_OFFSET)
-#define CCM_CSCR_SD_OFFSET     24
-#define CCM_CSCR_SD_MASK       (0x3 << CCM_CSCR_SD_OFFSET)
-#define CCM_CSCR_SPLLRES       (1 << 22)
-#define CCM_CSCR_MPLLRES       (1 << 21)
-#define CCM_CSCR_SSI2_OFFSET   20
-#define CCM_CSCR_SSI2          (1 << CCM_CSCR_SSI2_OFFSET)
-#define CCM_CSCR_SSI1_OFFSET   19
-#define CCM_CSCR_SSI1          (1 << CCM_CSCR_SSI1_OFFSET)
-#define CCM_CSCR_FIR_OFFSET    18
-#define CCM_CSCR_FIR           (1 << CCM_CSCR_FIR_OFFSET)
-#define CCM_CSCR_SP            (1 << 17)
-#define CCM_CSCR_MCU           (1 << 16)
-#define CCM_CSCR_BCLK_OFFSET   10
-#define CCM_CSCR_BCLK_MASK     (0xf << CCM_CSCR_BCLK_OFFSET)
-#define CCM_CSCR_IPDIV_OFFSET  9
-#define CCM_CSCR_IPDIV         (1 << CCM_CSCR_IPDIV_OFFSET)
-
-#define CCM_CSCR_OSC26MDIV     (1 << 4)
-#define CCM_CSCR_OSC26M                (1 << 3)
-#define CCM_CSCR_FPM           (1 << 2)
-#define CCM_CSCR_SPEN          (1 << 1)
-#define CCM_CSCR_MPEN          1
-
-#define CCM_MPCTL0_CPLM                (1 << 31)
-#define CCM_MPCTL0_PD_OFFSET   26
-#define CCM_MPCTL0_PD_MASK     (0xf << 26)
-#define CCM_MPCTL0_MFD_OFFSET  16
-#define CCM_MPCTL0_MFD_MASK    (0x3ff << 16)
-#define CCM_MPCTL0_MFI_OFFSET  10
-#define CCM_MPCTL0_MFI_MASK    (0xf << 10)
-#define CCM_MPCTL0_MFN_OFFSET  0
-#define CCM_MPCTL0_MFN_MASK    0x3ff
-
-#define CCM_MPCTL1_LF          (1 << 15)
-#define CCM_MPCTL1_BRMO                (1 << 6)
-
-#define CCM_SPCTL0_CPLM                (1 << 31)
-#define CCM_SPCTL0_PD_OFFSET   26
-#define CCM_SPCTL0_PD_MASK     (0xf << 26)
-#define CCM_SPCTL0_MFD_OFFSET  16
-#define CCM_SPCTL0_MFD_MASK    (0x3ff << 16)
-#define CCM_SPCTL0_MFI_OFFSET  10
-#define CCM_SPCTL0_MFI_MASK    (0xf << 10)
-#define CCM_SPCTL0_MFN_OFFSET  0
-#define CCM_SPCTL0_MFN_MASK    0x3ff
-
-#define CCM_SPCTL1_LF          (1 << 15)
-#define CCM_SPCTL1_BRMO                (1 << 6)
-
-#define CCM_OSC26MCTL_PEAK_OFFSET      16
-#define CCM_OSC26MCTL_PEAK_MASK                (0x3 << 16)
-#define CCM_OSC26MCTL_AGC_OFFSET       8
-#define CCM_OSC26MCTL_AGC_MASK         (0x3f << 8)
-#define CCM_OSC26MCTL_ANATEST_OFFSET   0
-#define CCM_OSC26MCTL_ANATEST_MASK     0x3f
-
-#define CCM_PCDR0_SSI2BAUDDIV_OFFSET   26
-#define CCM_PCDR0_SSI2BAUDDIV_MASK     (0x3f << 26)
-#define CCM_PCDR0_SSI1BAUDDIV_OFFSET   16
-#define CCM_PCDR0_SSI1BAUDDIV_MASK     (0x3f << 16)
-#define CCM_PCDR0_NFCDIV_OFFSET                12
-#define CCM_PCDR0_NFCDIV_MASK          (0xf << 12)
-#define CCM_PCDR0_48MDIV_OFFSET                5
-#define CCM_PCDR0_48MDIV_MASK          (0x7 << CCM_PCDR0_48MDIV_OFFSET)
-#define CCM_PCDR0_FIRIDIV_OFFSET       0
-#define CCM_PCDR0_FIRIDIV_MASK         0x1f
-#define CCM_PCDR1_PERDIV4_OFFSET       24
-#define CCM_PCDR1_PERDIV4_MASK         (0x3f << 24)
-#define CCM_PCDR1_PERDIV3_OFFSET       16
-#define CCM_PCDR1_PERDIV3_MASK         (0x3f << 16)
-#define CCM_PCDR1_PERDIV2_OFFSET       8
-#define CCM_PCDR1_PERDIV2_MASK         (0x3f << 8)
-#define CCM_PCDR1_PERDIV1_OFFSET       0
-#define CCM_PCDR1_PERDIV1_MASK         0x3f
-
-#define CCM_PCCR_HCLK_CSI_OFFSET       31
-#define CCM_PCCR_HCLK_CSI_REG          CCM_PCCR0
-#define CCM_PCCR_HCLK_DMA_OFFSET       30
-#define CCM_PCCR_HCLK_DMA_REG          CCM_PCCR0
-#define CCM_PCCR_HCLK_BROM_OFFSET      28
-#define CCM_PCCR_HCLK_BROM_REG         CCM_PCCR0
-#define CCM_PCCR_HCLK_EMMA_OFFSET      27
-#define CCM_PCCR_HCLK_EMMA_REG         CCM_PCCR0
-#define CCM_PCCR_HCLK_LCDC_OFFSET      26
-#define CCM_PCCR_HCLK_LCDC_REG         CCM_PCCR0
-#define CCM_PCCR_HCLK_SLCDC_OFFSET     25
-#define CCM_PCCR_HCLK_SLCDC_REG                CCM_PCCR0
-#define CCM_PCCR_HCLK_USBOTG_OFFSET    24
-#define CCM_PCCR_HCLK_USBOTG_REG       CCM_PCCR0
-#define CCM_PCCR_HCLK_BMI_OFFSET       23
-#define CCM_PCCR_BMI_MASK              (1 << CCM_PCCR_BMI_MASK)
-#define CCM_PCCR_HCLK_BMI_REG          CCM_PCCR0
-#define CCM_PCCR_PERCLK4_OFFSET                22
-#define CCM_PCCR_PERCLK4_REG           CCM_PCCR0
-#define CCM_PCCR_SLCDC_OFFSET          21
-#define CCM_PCCR_SLCDC_REG             CCM_PCCR0
-#define CCM_PCCR_FIRI_BAUD_OFFSET      20
-#define CCM_PCCR_FIRI_BAUD_MASK                (1 << CCM_PCCR_FIRI_BAUD_MASK)
-#define CCM_PCCR_FIRI_BAUD_REG         CCM_PCCR0
-#define CCM_PCCR_NFC_OFFSET            19
-#define CCM_PCCR_NFC_REG               CCM_PCCR0
-#define CCM_PCCR_LCDC_OFFSET           18
-#define CCM_PCCR_LCDC_REG              CCM_PCCR0
-#define CCM_PCCR_SSI1_BAUD_OFFSET      17
-#define CCM_PCCR_SSI1_BAUD_REG         CCM_PCCR0
-#define CCM_PCCR_SSI2_BAUD_OFFSET      16
-#define CCM_PCCR_SSI2_BAUD_REG         CCM_PCCR0
-#define CCM_PCCR_EMMA_OFFSET           15
-#define CCM_PCCR_EMMA_REG              CCM_PCCR0
-#define CCM_PCCR_USBOTG_OFFSET         14
-#define CCM_PCCR_USBOTG_REG            CCM_PCCR0
-#define CCM_PCCR_DMA_OFFSET            13
-#define CCM_PCCR_DMA_REG               CCM_PCCR0
-#define CCM_PCCR_I2C1_OFFSET           12
-#define CCM_PCCR_I2C1_REG              CCM_PCCR0
-#define CCM_PCCR_GPIO_OFFSET           11
-#define CCM_PCCR_GPIO_REG              CCM_PCCR0
-#define CCM_PCCR_SDHC2_OFFSET          10
-#define CCM_PCCR_SDHC2_REG             CCM_PCCR0
-#define CCM_PCCR_SDHC1_OFFSET          9
-#define CCM_PCCR_SDHC1_REG             CCM_PCCR0
-#define CCM_PCCR_FIRI_OFFSET           8
-#define CCM_PCCR_FIRI_MASK             (1 << CCM_PCCR_BAUD_MASK)
-#define CCM_PCCR_FIRI_REG              CCM_PCCR0
-#define CCM_PCCR_SSI2_IPG_OFFSET       7
-#define CCM_PCCR_SSI2_REG              CCM_PCCR0
-#define CCM_PCCR_SSI1_IPG_OFFSET       6
-#define CCM_PCCR_SSI1_REG              CCM_PCCR0
-#define CCM_PCCR_CSPI2_OFFSET          5
-#define        CCM_PCCR_CSPI2_REG              CCM_PCCR0
-#define CCM_PCCR_CSPI1_OFFSET          4
-#define        CCM_PCCR_CSPI1_REG              CCM_PCCR0
-#define CCM_PCCR_UART4_OFFSET          3
-#define CCM_PCCR_UART4_REG             CCM_PCCR0
-#define CCM_PCCR_UART3_OFFSET          2
-#define CCM_PCCR_UART3_REG             CCM_PCCR0
-#define CCM_PCCR_UART2_OFFSET          1
-#define CCM_PCCR_UART2_REG             CCM_PCCR0
-#define CCM_PCCR_UART1_OFFSET          0
-#define CCM_PCCR_UART1_REG             CCM_PCCR0
-
-#define CCM_PCCR_OWIRE_OFFSET          31
-#define CCM_PCCR_OWIRE_REG             CCM_PCCR1
-#define CCM_PCCR_KPP_OFFSET            30
-#define CCM_PCCR_KPP_REG               CCM_PCCR1
-#define CCM_PCCR_RTC_OFFSET            29
-#define CCM_PCCR_RTC_REG               CCM_PCCR1
-#define CCM_PCCR_PWM_OFFSET            28
-#define CCM_PCCR_PWM_REG               CCM_PCCR1
-#define CCM_PCCR_GPT3_OFFSET           27
-#define CCM_PCCR_GPT3_REG              CCM_PCCR1
-#define CCM_PCCR_GPT2_OFFSET           26
-#define CCM_PCCR_GPT2_REG              CCM_PCCR1
-#define CCM_PCCR_GPT1_OFFSET           25
-#define CCM_PCCR_GPT1_REG              CCM_PCCR1
-#define CCM_PCCR_WDT_OFFSET            24
-#define CCM_PCCR_WDT_REG               CCM_PCCR1
-#define CCM_PCCR_CSPI3_OFFSET          23
-#define        CCM_PCCR_CSPI3_REG              CCM_PCCR1
-
-#define CCM_PCCR_CSPI1_MASK            (1 << CCM_PCCR_CSPI1_OFFSET)
-#define CCM_PCCR_CSPI2_MASK            (1 << CCM_PCCR_CSPI2_OFFSET)
-#define CCM_PCCR_CSPI3_MASK            (1 << CCM_PCCR_CSPI3_OFFSET)
-#define CCM_PCCR_DMA_MASK              (1 << CCM_PCCR_DMA_OFFSET)
-#define CCM_PCCR_EMMA_MASK             (1 << CCM_PCCR_EMMA_OFFSET)
-#define CCM_PCCR_GPIO_MASK             (1 << CCM_PCCR_GPIO_OFFSET)
-#define CCM_PCCR_GPT1_MASK             (1 << CCM_PCCR_GPT1_OFFSET)
-#define CCM_PCCR_GPT2_MASK             (1 << CCM_PCCR_GPT2_OFFSET)
-#define CCM_PCCR_GPT3_MASK             (1 << CCM_PCCR_GPT3_OFFSET)
-#define CCM_PCCR_HCLK_BROM_MASK                (1 << CCM_PCCR_HCLK_BROM_OFFSET)
-#define CCM_PCCR_HCLK_CSI_MASK         (1 << CCM_PCCR_HCLK_CSI_OFFSET)
-#define CCM_PCCR_HCLK_DMA_MASK         (1 << CCM_PCCR_HCLK_DMA_OFFSET)
-#define CCM_PCCR_HCLK_EMMA_MASK                (1 << CCM_PCCR_HCLK_EMMA_OFFSET)
-#define CCM_PCCR_HCLK_LCDC_MASK                (1 << CCM_PCCR_HCLK_LCDC_OFFSET)
-#define CCM_PCCR_HCLK_SLCDC_MASK       (1 << CCM_PCCR_HCLK_SLCDC_OFFSET)
-#define CCM_PCCR_HCLK_USBOTG_MASK      (1 << CCM_PCCR_HCLK_USBOTG_OFFSET)
-#define CCM_PCCR_I2C1_MASK             (1 << CCM_PCCR_I2C1_OFFSET)
-#define CCM_PCCR_KPP_MASK              (1 << CCM_PCCR_KPP_OFFSET)
-#define CCM_PCCR_LCDC_MASK             (1 << CCM_PCCR_LCDC_OFFSET)
-#define CCM_PCCR_NFC_MASK              (1 << CCM_PCCR_NFC_OFFSET)
-#define CCM_PCCR_OWIRE_MASK            (1 << CCM_PCCR_OWIRE_OFFSET)
-#define CCM_PCCR_PERCLK4_MASK          (1 << CCM_PCCR_PERCLK4_OFFSET)
-#define CCM_PCCR_PWM_MASK              (1 << CCM_PCCR_PWM_OFFSET)
-#define CCM_PCCR_RTC_MASK              (1 << CCM_PCCR_RTC_OFFSET)
-#define CCM_PCCR_SDHC1_MASK            (1 << CCM_PCCR_SDHC1_OFFSET)
-#define CCM_PCCR_SDHC2_MASK            (1 << CCM_PCCR_SDHC2_OFFSET)
-#define CCM_PCCR_SLCDC_MASK            (1 << CCM_PCCR_SLCDC_OFFSET)
-#define CCM_PCCR_SSI1_BAUD_MASK                (1 << CCM_PCCR_SSI1_BAUD_OFFSET)
-#define CCM_PCCR_SSI1_IPG_MASK         (1 << CCM_PCCR_SSI1_IPG_OFFSET)
-#define CCM_PCCR_SSI2_BAUD_MASK                (1 << CCM_PCCR_SSI2_BAUD_OFFSET)
-#define CCM_PCCR_SSI2_IPG_MASK         (1 << CCM_PCCR_SSI2_IPG_OFFSET)
-#define CCM_PCCR_UART1_MASK            (1 << CCM_PCCR_UART1_OFFSET)
-#define CCM_PCCR_UART2_MASK            (1 << CCM_PCCR_UART2_OFFSET)
-#define CCM_PCCR_UART3_MASK            (1 << CCM_PCCR_UART3_OFFSET)
-#define CCM_PCCR_UART4_MASK            (1 << CCM_PCCR_UART4_OFFSET)
-#define CCM_PCCR_USBOTG_MASK           (1 << CCM_PCCR_USBOTG_OFFSET)
-#define CCM_PCCR_WDT_MASK              (1 << CCM_PCCR_WDT_OFFSET)
-
-#define CCM_CCSR_32KSR         (1 << 15)
-
-#define CCM_CCSR_CLKMODE1      (1 << 9)
-#define CCM_CCSR_CLKMODE0      (1 << 8)
-
-#define CCM_CCSR_CLKOSEL_OFFSET 0
-#define CCM_CCSR_CLKOSEL_MASK  0x1f
-
-#define SYS_FMCR               0x14    /* Functional Muxing Control Reg */
-#define SYS_CHIP_ID            0x00    /* The offset of CHIP ID register */
-
-static int _clk_enable(struct clk *clk)
-{
-       u32 reg;
-
-       reg = __raw_readl(clk->enable_reg);
-       reg |= 1 << clk->enable_shift;
-       __raw_writel(reg, clk->enable_reg);
-       return 0;
-}
-
-static void _clk_disable(struct clk *clk)
-{
-       u32 reg;
-
-       reg = __raw_readl(clk->enable_reg);
-       reg &= ~(1 << clk->enable_shift);
-       __raw_writel(reg, clk->enable_reg);
-}
-
-static unsigned long _clk_generic_round_rate(struct clk *clk,
-                       unsigned long rate,
-                       u32 max_divisor)
-{
-       u32 div;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       div = parent_rate / rate;
-       if (parent_rate % rate)
-               div++;
-
-       if (div > max_divisor)
-               div = max_divisor;
-
-       return parent_rate / div;
-}
-
-static int _clk_spll_enable(struct clk *clk)
-{
-       u32 reg;
-
-       reg = __raw_readl(CCM_CSCR);
-       reg |= CCM_CSCR_SPEN;
-       __raw_writel(reg, CCM_CSCR);
-
-       while ((__raw_readl(CCM_SPCTL1) & CCM_SPCTL1_LF) == 0)
-               ;
-       return 0;
-}
-
-static void _clk_spll_disable(struct clk *clk)
-{
-       u32 reg;
-
-       reg = __raw_readl(CCM_CSCR);
-       reg &= ~CCM_CSCR_SPEN;
-       __raw_writel(reg, CCM_CSCR);
-}
-
-
-#define CSCR() (__raw_readl(CCM_CSCR))
-#define PCDR0() (__raw_readl(CCM_PCDR0))
-#define PCDR1() (__raw_readl(CCM_PCDR1))
-
-static unsigned long _clk_perclkx_round_rate(struct clk *clk,
-                                            unsigned long rate)
-{
-       return _clk_generic_round_rate(clk, rate, 64);
-}
-
-static int _clk_perclkx_set_rate(struct clk *clk, unsigned long rate)
-{
-       u32 reg;
-       u32 div;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       if (clk->id < 0 || clk->id > 3)
-               return -EINVAL;
-
-       div = parent_rate / rate;
-       if (div > 64 || div < 1 || ((parent_rate / div) != rate))
-               return -EINVAL;
-       div--;
-
-       reg =
-           __raw_readl(CCM_PCDR1) & ~(CCM_PCDR1_PERDIV1_MASK <<
-                                      (clk->id << 3));
-       reg |= div << (clk->id << 3);
-       __raw_writel(reg, CCM_PCDR1);
-
-       return 0;
-}
-
-static unsigned long _clk_usb_recalc(struct clk *clk)
-{
-       unsigned long usb_pdf;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       usb_pdf = (CSCR() & CCM_CSCR_USB_MASK) >> CCM_CSCR_USB_OFFSET;
-
-       return parent_rate / (usb_pdf + 1U);
-}
-
-static unsigned long _clk_usb_round_rate(struct clk *clk,
-                                            unsigned long rate)
-{
-       return _clk_generic_round_rate(clk, rate, 8);
-}
-
-static int _clk_usb_set_rate(struct clk *clk, unsigned long rate)
-{
-       u32 reg;
-       u32 div;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       div = parent_rate / rate;
-       if (div > 8 || div < 1 || ((parent_rate / div) != rate))
-               return -EINVAL;
-       div--;
-
-       reg = CSCR() & ~CCM_CSCR_USB_MASK;
-       reg |= div << CCM_CSCR_USB_OFFSET;
-       __raw_writel(reg, CCM_CSCR);
-
-       return 0;
-}
-
-static unsigned long _clk_ssix_recalc(struct clk *clk, unsigned long pdf)
-{
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       pdf = (pdf < 2) ? 124UL : pdf;  /* MX21 & MX27 TO1 */
-
-       return 2UL * parent_rate / pdf;
-}
-
-static unsigned long _clk_ssi1_recalc(struct clk *clk)
-{
-       return _clk_ssix_recalc(clk,
-               (PCDR0() & CCM_PCDR0_SSI1BAUDDIV_MASK)
-               >> CCM_PCDR0_SSI1BAUDDIV_OFFSET);
-}
-
-static unsigned long _clk_ssi2_recalc(struct clk *clk)
-{
-       return _clk_ssix_recalc(clk,
-               (PCDR0() & CCM_PCDR0_SSI2BAUDDIV_MASK) >>
-               CCM_PCDR0_SSI2BAUDDIV_OFFSET);
-}
-
-static unsigned long _clk_nfc_recalc(struct clk *clk)
-{
-       unsigned long nfc_pdf;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       nfc_pdf = (PCDR0() & CCM_PCDR0_NFCDIV_MASK)
-               >> CCM_PCDR0_NFCDIV_OFFSET;
-
-       return parent_rate / (nfc_pdf + 1);
-}
-
-static unsigned long _clk_parent_round_rate(struct clk *clk, unsigned long rate)
-{
-       return clk->parent->round_rate(clk->parent, rate);
-}
-
-static int _clk_parent_set_rate(struct clk *clk, unsigned long rate)
-{
-       return clk->parent->set_rate(clk->parent, rate);
-}
-
-static unsigned long external_high_reference; /* in Hz */
-
-static unsigned long get_high_reference_clock_rate(struct clk *clk)
-{
-       return external_high_reference;
-}
-
-/*
- * the high frequency external clock reference
- * Default case is 26MHz.
- */
-static struct clk ckih_clk = {
-       .get_rate = get_high_reference_clock_rate,
-};
-
-static unsigned long external_low_reference; /* in Hz */
-
-static unsigned long get_low_reference_clock_rate(struct clk *clk)
-{
-       return external_low_reference;
-}
-
-/*
- * the low frequency external clock reference
- * Default case is 32.768kHz.
- */
-static struct clk ckil_clk = {
-       .get_rate = get_low_reference_clock_rate,
-};
-
-
-static unsigned long _clk_fpm_recalc(struct clk *clk)
-{
-       return clk_get_rate(clk->parent) * 512;
-}
-
-/* Output of frequency pre multiplier */
-static struct clk fpm_clk = {
-       .parent = &ckil_clk,
-       .get_rate = _clk_fpm_recalc,
-};
-
-static unsigned long get_mpll_clk(struct clk *clk)
-{
-       uint32_t reg;
-       unsigned long ref_clk;
-       unsigned long mfi = 0, mfn = 0, mfd = 0, pdf = 0;
-       unsigned long long temp;
-
-       ref_clk = clk_get_rate(clk->parent);
-
-       reg = __raw_readl(CCM_MPCTL0);
-       pdf = (reg & CCM_MPCTL0_PD_MASK)  >> CCM_MPCTL0_PD_OFFSET;
-       mfd = (reg & CCM_MPCTL0_MFD_MASK) >> CCM_MPCTL0_MFD_OFFSET;
-       mfi = (reg & CCM_MPCTL0_MFI_MASK) >> CCM_MPCTL0_MFI_OFFSET;
-       mfn = (reg & CCM_MPCTL0_MFN_MASK) >> CCM_MPCTL0_MFN_OFFSET;
-
-       mfi = (mfi <= 5) ? 5 : mfi;
-       temp = 2LL * ref_clk * mfn;
-       do_div(temp, mfd + 1);
-       temp = 2LL * ref_clk * mfi + temp;
-       do_div(temp, pdf + 1);
-
-       return (unsigned long)temp;
-}
-
-static struct clk mpll_clk = {
-       .parent = &ckih_clk,
-       .get_rate = get_mpll_clk,
-};
-
-static unsigned long _clk_fclk_get_rate(struct clk *clk)
-{
-       unsigned long parent_rate;
-       u32 div;
-
-       div = (CSCR() & CCM_CSCR_PRESC_MASK) >> CCM_CSCR_PRESC_OFFSET;
-       parent_rate = clk_get_rate(clk->parent);
-
-       return parent_rate / (div+1);
-}
-
-static struct clk fclk_clk = {
-       .parent = &mpll_clk,
-       .get_rate = _clk_fclk_get_rate
-};
-
-static unsigned long get_spll_clk(struct clk *clk)
-{
-       uint32_t reg;
-       unsigned long ref_clk;
-       unsigned long mfi = 0, mfn = 0, mfd = 0, pdf = 0;
-       unsigned long long temp;
-
-       ref_clk = clk_get_rate(clk->parent);
-
-       reg = __raw_readl(CCM_SPCTL0);
-       pdf = (reg & CCM_SPCTL0_PD_MASK) >> CCM_SPCTL0_PD_OFFSET;
-       mfd = (reg & CCM_SPCTL0_MFD_MASK) >> CCM_SPCTL0_MFD_OFFSET;
-       mfi = (reg & CCM_SPCTL0_MFI_MASK) >> CCM_SPCTL0_MFI_OFFSET;
-       mfn = (reg & CCM_SPCTL0_MFN_MASK) >> CCM_SPCTL0_MFN_OFFSET;
-
-       mfi = (mfi <= 5) ? 5 : mfi;
-       temp = 2LL * ref_clk * mfn;
-       do_div(temp, mfd + 1);
-       temp = 2LL * ref_clk * mfi + temp;
-       do_div(temp, pdf + 1);
-
-       return (unsigned long)temp;
-}
-
-static struct clk spll_clk = {
-       .parent = &ckih_clk,
-       .get_rate = get_spll_clk,
-       .enable = _clk_spll_enable,
-       .disable = _clk_spll_disable,
-};
-
-static unsigned long get_hclk_clk(struct clk *clk)
-{
-       unsigned long rate;
-       unsigned long bclk_pdf;
-
-       bclk_pdf = (CSCR() & CCM_CSCR_BCLK_MASK)
-               >> CCM_CSCR_BCLK_OFFSET;
-
-       rate = clk_get_rate(clk->parent);
-       return rate / (bclk_pdf + 1);
-}
-
-static struct clk hclk_clk = {
-       .parent = &fclk_clk,
-       .get_rate = get_hclk_clk,
-};
-
-static unsigned long get_ipg_clk(struct clk *clk)
-{
-       unsigned long rate;
-       unsigned long ipg_pdf;
-
-       ipg_pdf = (CSCR() & CCM_CSCR_IPDIV) >> CCM_CSCR_IPDIV_OFFSET;
-
-       rate = clk_get_rate(clk->parent);
-       return rate / (ipg_pdf + 1);
-}
-
-static struct clk ipg_clk = {
-       .parent = &hclk_clk,
-       .get_rate = get_ipg_clk,
-};
-
-static unsigned long _clk_perclkx_recalc(struct clk *clk)
-{
-       unsigned long perclk_pdf;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       if (clk->id < 0 || clk->id > 3)
-               return 0;
-
-       perclk_pdf = (PCDR1() >> (clk->id << 3)) & CCM_PCDR1_PERDIV1_MASK;
-
-       return parent_rate / (perclk_pdf + 1);
-}
-
-static struct clk per_clk[] = {
-       {
-               .id = 0,
-               .parent = &mpll_clk,
-               .get_rate = _clk_perclkx_recalc,
-       }, {
-               .id = 1,
-               .parent = &mpll_clk,
-               .get_rate = _clk_perclkx_recalc,
-       }, {
-               .id = 2,
-               .parent = &mpll_clk,
-               .round_rate = _clk_perclkx_round_rate,
-               .set_rate = _clk_perclkx_set_rate,
-               .get_rate = _clk_perclkx_recalc,
-               /* Enable/Disable done via lcd_clkc[1] */
-       }, {
-               .id = 3,
-               .parent = &mpll_clk,
-               .round_rate = _clk_perclkx_round_rate,
-               .set_rate = _clk_perclkx_set_rate,
-               .get_rate = _clk_perclkx_recalc,
-               /* Enable/Disable done via csi_clk[1] */
-       },
-};
-
-static struct clk uart_ipg_clk[];
-
-static struct clk uart_clk[] = {
-       {
-               .id = 0,
-               .parent = &per_clk[0],
-               .secondary = &uart_ipg_clk[0],
-       }, {
-               .id = 1,
-               .parent = &per_clk[0],
-               .secondary = &uart_ipg_clk[1],
-       }, {
-               .id = 2,
-               .parent = &per_clk[0],
-               .secondary = &uart_ipg_clk[2],
-       }, {
-               .id = 3,
-               .parent = &per_clk[0],
-               .secondary = &uart_ipg_clk[3],
-       },
-};
-
-static struct clk uart_ipg_clk[] = {
-       {
-               .id = 0,
-               .parent = &ipg_clk,
-               .enable = _clk_enable,
-               .enable_reg = CCM_PCCR_UART1_REG,
-               .enable_shift = CCM_PCCR_UART1_OFFSET,
-               .disable = _clk_disable,
-       }, {
-               .id = 1,
-               .parent = &ipg_clk,
-               .enable = _clk_enable,
-               .enable_reg = CCM_PCCR_UART2_REG,
-               .enable_shift = CCM_PCCR_UART2_OFFSET,
-               .disable = _clk_disable,
-       }, {
-               .id = 2,
-               .parent = &ipg_clk,
-               .enable = _clk_enable,
-               .enable_reg = CCM_PCCR_UART3_REG,
-               .enable_shift = CCM_PCCR_UART3_OFFSET,
-               .disable = _clk_disable,
-       }, {
-               .id = 3,
-               .parent = &ipg_clk,
-               .enable = _clk_enable,
-               .enable_reg = CCM_PCCR_UART4_REG,
-               .enable_shift = CCM_PCCR_UART4_OFFSET,
-               .disable = _clk_disable,
-       },
-};
-
-static struct clk gpt_ipg_clk[];
-
-static struct clk gpt_clk[] = {
-       {
-               .id = 0,
-               .parent = &per_clk[0],
-               .secondary = &gpt_ipg_clk[0],
-       }, {
-               .id = 1,
-               .parent = &per_clk[0],
-               .secondary = &gpt_ipg_clk[1],
-       }, {
-               .id = 2,
-               .parent = &per_clk[0],
-               .secondary = &gpt_ipg_clk[2],
-       },
-};
-
-static struct clk gpt_ipg_clk[] = {
-       {
-               .id = 0,
-               .parent = &ipg_clk,
-               .enable = _clk_enable,
-               .enable_reg = CCM_PCCR_GPT1_REG,
-               .enable_shift = CCM_PCCR_GPT1_OFFSET,
-               .disable = _clk_disable,
-       }, {
-               .id = 1,
-               .parent = &ipg_clk,
-               .enable = _clk_enable,
-               .enable_reg = CCM_PCCR_GPT2_REG,
-               .enable_shift = CCM_PCCR_GPT2_OFFSET,
-               .disable = _clk_disable,
-       }, {
-               .id = 2,
-               .parent = &ipg_clk,
-               .enable = _clk_enable,
-               .enable_reg = CCM_PCCR_GPT3_REG,
-               .enable_shift = CCM_PCCR_GPT3_OFFSET,
-               .disable = _clk_disable,
-       },
-};
-
-static struct clk pwm_clk[] = {
-       {
-               .parent = &per_clk[0],
-               .secondary = &pwm_clk[1],
-       }, {
-               .parent = &ipg_clk,
-               .enable = _clk_enable,
-               .enable_reg = CCM_PCCR_PWM_REG,
-               .enable_shift = CCM_PCCR_PWM_OFFSET,
-               .disable = _clk_disable,
-       },
-};
-
-static struct clk sdhc_ipg_clk[];
-
-static struct clk sdhc_clk[] = {
-       {
-               .id = 0,
-               .parent = &per_clk[1],
-               .secondary = &sdhc_ipg_clk[0],
-       }, {
-               .id = 1,
-               .parent = &per_clk[1],
-               .secondary = &sdhc_ipg_clk[1],
-       },
-};
-
-static struct clk sdhc_ipg_clk[] = {
-       {
-               .id = 0,
-               .parent = &ipg_clk,
-               .enable = _clk_enable,
-               .enable_reg = CCM_PCCR_SDHC1_REG,
-               .enable_shift = CCM_PCCR_SDHC1_OFFSET,
-               .disable = _clk_disable,
-       }, {
-               .id = 1,
-               .parent = &ipg_clk,
-               .enable = _clk_enable,
-               .enable_reg = CCM_PCCR_SDHC2_REG,
-               .enable_shift = CCM_PCCR_SDHC2_OFFSET,
-               .disable = _clk_disable,
-       },
-};
-
-static struct clk cspi_ipg_clk[];
-
-static struct clk cspi_clk[] = {
-       {
-               .id = 0,
-               .parent = &per_clk[1],
-               .secondary = &cspi_ipg_clk[0],
-       }, {
-               .id = 1,
-               .parent = &per_clk[1],
-               .secondary = &cspi_ipg_clk[1],
-       }, {
-               .id = 2,
-               .parent = &per_clk[1],
-               .secondary = &cspi_ipg_clk[2],
-       },
-};
-
-static struct clk cspi_ipg_clk[] = {
-       {
-               .id = 0,
-               .parent = &ipg_clk,
-               .enable = _clk_enable,
-               .enable_reg = CCM_PCCR_CSPI1_REG,
-               .enable_shift = CCM_PCCR_CSPI1_OFFSET,
-               .disable = _clk_disable,
-       }, {
-               .id = 1,
-               .parent = &ipg_clk,
-               .enable = _clk_enable,
-               .enable_reg = CCM_PCCR_CSPI2_REG,
-               .enable_shift = CCM_PCCR_CSPI2_OFFSET,
-               .disable = _clk_disable,
-       }, {
-               .id = 3,
-               .parent = &ipg_clk,
-               .enable = _clk_enable,
-               .enable_reg = CCM_PCCR_CSPI3_REG,
-               .enable_shift = CCM_PCCR_CSPI3_OFFSET,
-               .disable = _clk_disable,
-       },
-};
-
-static struct clk lcdc_clk[] = {
-       {
-               .parent = &per_clk[2],
-               .secondary = &lcdc_clk[1],
-               .round_rate = _clk_parent_round_rate,
-               .set_rate = _clk_parent_set_rate,
-       }, {
-               .parent = &ipg_clk,
-               .secondary = &lcdc_clk[2],
-               .enable = _clk_enable,
-               .enable_reg = CCM_PCCR_LCDC_REG,
-               .enable_shift = CCM_PCCR_LCDC_OFFSET,
-               .disable = _clk_disable,
-       }, {
-               .parent = &hclk_clk,
-               .enable = _clk_enable,
-               .enable_reg = CCM_PCCR_HCLK_LCDC_REG,
-               .enable_shift = CCM_PCCR_HCLK_LCDC_OFFSET,
-               .disable = _clk_disable,
-       },
-};
-
-static struct clk csi_clk[] = {
-       {
-               .parent = &per_clk[3],
-               .secondary = &csi_clk[1],
-               .round_rate = _clk_parent_round_rate,
-               .set_rate = _clk_parent_set_rate,
-       }, {
-               .parent = &hclk_clk,
-               .enable = _clk_enable,
-               .enable_reg = CCM_PCCR_HCLK_CSI_REG,
-               .enable_shift = CCM_PCCR_HCLK_CSI_OFFSET,
-               .disable = _clk_disable,
-       },
-};
-
-static struct clk usb_clk[] = {
-       {
-               .parent = &spll_clk,
-               .secondary = &usb_clk[1],
-               .get_rate = _clk_usb_recalc,
-               .enable = _clk_enable,
-               .enable_reg = CCM_PCCR_USBOTG_REG,
-               .enable_shift = CCM_PCCR_USBOTG_OFFSET,
-               .disable = _clk_disable,
-               .round_rate = _clk_usb_round_rate,
-               .set_rate = _clk_usb_set_rate,
-       }, {
-               .parent = &hclk_clk,
-               .enable = _clk_enable,
-               .enable_reg = CCM_PCCR_HCLK_USBOTG_REG,
-               .enable_shift = CCM_PCCR_HCLK_USBOTG_OFFSET,
-               .disable = _clk_disable,
-       }
-};
-
-static struct clk ssi_ipg_clk[];
-
-static struct clk ssi_clk[] = {
-       {
-               .id = 0,
-               .parent = &mpll_clk,
-               .secondary = &ssi_ipg_clk[0],
-               .get_rate = _clk_ssi1_recalc,
-               .enable = _clk_enable,
-               .enable_reg = CCM_PCCR_SSI1_BAUD_REG,
-               .enable_shift = CCM_PCCR_SSI1_BAUD_OFFSET,
-               .disable = _clk_disable,
-       }, {
-               .id = 1,
-               .parent = &mpll_clk,
-               .secondary = &ssi_ipg_clk[1],
-               .get_rate = _clk_ssi2_recalc,
-               .enable = _clk_enable,
-               .enable_reg = CCM_PCCR_SSI2_BAUD_REG,
-               .enable_shift = CCM_PCCR_SSI2_BAUD_OFFSET,
-               .disable = _clk_disable,
-       },
-};
-
-static struct clk ssi_ipg_clk[] = {
-       {
-               .id = 0,
-               .parent = &ipg_clk,
-               .enable = _clk_enable,
-               .enable_reg = CCM_PCCR_SSI1_REG,
-               .enable_shift = CCM_PCCR_SSI1_IPG_OFFSET,
-               .disable = _clk_disable,
-       }, {
-               .id = 1,
-               .parent = &ipg_clk,
-               .enable = _clk_enable,
-               .enable_reg = CCM_PCCR_SSI2_REG,
-               .enable_shift = CCM_PCCR_SSI2_IPG_OFFSET,
-               .disable = _clk_disable,
-       },
-};
-
-
-static struct clk nfc_clk = {
-       .parent = &fclk_clk,
-       .get_rate = _clk_nfc_recalc,
-       .enable = _clk_enable,
-       .enable_reg = CCM_PCCR_NFC_REG,
-       .enable_shift = CCM_PCCR_NFC_OFFSET,
-       .disable = _clk_disable,
-};
-
-static struct clk dma_clk[] = {
-       {
-               .parent = &hclk_clk,
-               .enable = _clk_enable,
-               .enable_reg = CCM_PCCR_DMA_REG,
-               .enable_shift = CCM_PCCR_DMA_OFFSET,
-               .disable = _clk_disable,
-               .secondary = &dma_clk[1],
-       },  {
-               .enable = _clk_enable,
-               .enable_reg = CCM_PCCR_HCLK_DMA_REG,
-               .enable_shift = CCM_PCCR_HCLK_DMA_OFFSET,
-               .disable = _clk_disable,
-       },
-};
-
-static struct clk brom_clk = {
-       .parent = &hclk_clk,
-       .enable = _clk_enable,
-       .enable_reg = CCM_PCCR_HCLK_BROM_REG,
-       .enable_shift = CCM_PCCR_HCLK_BROM_OFFSET,
-       .disable = _clk_disable,
-};
-
-static struct clk emma_clk[] = {
-       {
-               .parent = &hclk_clk,
-               .enable = _clk_enable,
-               .enable_reg = CCM_PCCR_EMMA_REG,
-               .enable_shift = CCM_PCCR_EMMA_OFFSET,
-               .disable = _clk_disable,
-               .secondary = &emma_clk[1],
-       }, {
-               .enable = _clk_enable,
-               .enable_reg = CCM_PCCR_HCLK_EMMA_REG,
-               .enable_shift = CCM_PCCR_HCLK_EMMA_OFFSET,
-               .disable = _clk_disable,
-       }
-};
-
-static struct clk slcdc_clk[] = {
-       {
-               .parent = &hclk_clk,
-               .enable = _clk_enable,
-               .enable_reg = CCM_PCCR_SLCDC_REG,
-               .enable_shift = CCM_PCCR_SLCDC_OFFSET,
-               .disable = _clk_disable,
-               .secondary = &slcdc_clk[1],
-       }, {
-               .enable = _clk_enable,
-               .enable_reg = CCM_PCCR_HCLK_SLCDC_REG,
-               .enable_shift = CCM_PCCR_HCLK_SLCDC_OFFSET,
-               .disable = _clk_disable,
-       }
-};
-
-static struct clk wdog_clk = {
-       .parent = &ipg_clk,
-       .enable = _clk_enable,
-       .enable_reg = CCM_PCCR_WDT_REG,
-       .enable_shift = CCM_PCCR_WDT_OFFSET,
-       .disable = _clk_disable,
-};
-
-static struct clk gpio_clk = {
-       .parent = &ipg_clk,
-       .enable = _clk_enable,
-       .enable_reg = CCM_PCCR_GPIO_REG,
-       .enable_shift = CCM_PCCR_GPIO_OFFSET,
-       .disable = _clk_disable,
-};
-
-static struct clk i2c_clk = {
-       .id = 0,
-       .parent = &ipg_clk,
-       .enable = _clk_enable,
-       .enable_reg = CCM_PCCR_I2C1_REG,
-       .enable_shift = CCM_PCCR_I2C1_OFFSET,
-       .disable = _clk_disable,
-};
-
-static struct clk kpp_clk = {
-       .parent = &ipg_clk,
-       .enable = _clk_enable,
-       .enable_reg = CCM_PCCR_KPP_REG,
-       .enable_shift = CCM_PCCR_KPP_OFFSET,
-       .disable = _clk_disable,
-};
-
-static struct clk owire_clk = {
-       .parent = &ipg_clk,
-       .enable = _clk_enable,
-       .enable_reg = CCM_PCCR_OWIRE_REG,
-       .enable_shift = CCM_PCCR_OWIRE_OFFSET,
-       .disable = _clk_disable,
-};
-
-static struct clk rtc_clk = {
-       .parent = &ipg_clk,
-       .enable = _clk_enable,
-       .enable_reg = CCM_PCCR_RTC_REG,
-       .enable_shift = CCM_PCCR_RTC_OFFSET,
-       .disable = _clk_disable,
-};
-
-static unsigned long _clk_clko_round_rate(struct clk *clk, unsigned long rate)
-{
-       return _clk_generic_round_rate(clk, rate, 8);
-}
-
-static int _clk_clko_set_rate(struct clk *clk, unsigned long rate)
-{
-       u32 reg;
-       u32 div;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       div = parent_rate / rate;
-
-       if (div > 8 || div < 1 || ((parent_rate / div) != rate))
-               return -EINVAL;
-       div--;
-
-       reg = __raw_readl(CCM_PCDR0);
-
-       if (clk->parent == &usb_clk[0]) {
-               reg &= ~CCM_PCDR0_48MDIV_MASK;
-               reg |= div << CCM_PCDR0_48MDIV_OFFSET;
-       }
-       __raw_writel(reg, CCM_PCDR0);
-
-       return 0;
-}
-
-static unsigned long _clk_clko_recalc(struct clk *clk)
-{
-       u32 div = 0;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       if (clk->parent == &usb_clk[0]) /* 48M */
-               div = __raw_readl(CCM_PCDR0) & CCM_PCDR0_48MDIV_MASK
-                        >> CCM_PCDR0_48MDIV_OFFSET;
-       div++;
-
-       return parent_rate / div;
-}
-
-static struct clk clko_clk;
-
-static int _clk_clko_set_parent(struct clk *clk, struct clk *parent)
-{
-       u32 reg;
-
-       reg = __raw_readl(CCM_CCSR) & ~CCM_CCSR_CLKOSEL_MASK;
-
-       if (parent == &ckil_clk)
-               reg |= 0 << CCM_CCSR_CLKOSEL_OFFSET;
-       else if (parent == &fpm_clk)
-               reg |= 1 << CCM_CCSR_CLKOSEL_OFFSET;
-       else if (parent == &ckih_clk)
-               reg |= 2 << CCM_CCSR_CLKOSEL_OFFSET;
-       else if (parent == mpll_clk.parent)
-               reg |= 3 << CCM_CCSR_CLKOSEL_OFFSET;
-       else if (parent == spll_clk.parent)
-               reg |= 4 << CCM_CCSR_CLKOSEL_OFFSET;
-       else if (parent == &mpll_clk)
-               reg |= 5 << CCM_CCSR_CLKOSEL_OFFSET;
-       else if (parent == &spll_clk)
-               reg |= 6 << CCM_CCSR_CLKOSEL_OFFSET;
-       else if (parent == &fclk_clk)
-               reg |= 7 << CCM_CCSR_CLKOSEL_OFFSET;
-       else if (parent == &hclk_clk)
-               reg |= 8 << CCM_CCSR_CLKOSEL_OFFSET;
-       else if (parent == &ipg_clk)
-               reg |= 9 << CCM_CCSR_CLKOSEL_OFFSET;
-       else if (parent == &per_clk[0])
-               reg |= 0xA << CCM_CCSR_CLKOSEL_OFFSET;
-       else if (parent == &per_clk[1])
-               reg |= 0xB << CCM_CCSR_CLKOSEL_OFFSET;
-       else if (parent == &per_clk[2])
-               reg |= 0xC << CCM_CCSR_CLKOSEL_OFFSET;
-       else if (parent == &per_clk[3])
-               reg |= 0xD << CCM_CCSR_CLKOSEL_OFFSET;
-       else if (parent == &ssi_clk[0])
-               reg |= 0xE << CCM_CCSR_CLKOSEL_OFFSET;
-       else if (parent == &ssi_clk[1])
-               reg |= 0xF << CCM_CCSR_CLKOSEL_OFFSET;
-       else if (parent == &nfc_clk)
-               reg |= 0x10 << CCM_CCSR_CLKOSEL_OFFSET;
-       else if (parent == &usb_clk[0])
-               reg |= 0x14 << CCM_CCSR_CLKOSEL_OFFSET;
-       else if (parent == &clko_clk)
-               reg |= 0x15 << CCM_CCSR_CLKOSEL_OFFSET;
-       else
-               return -EINVAL;
-
-       __raw_writel(reg, CCM_CCSR);
-
-       return 0;
-}
-
-static struct clk clko_clk = {
-       .get_rate = _clk_clko_recalc,
-       .set_rate = _clk_clko_set_rate,
-       .round_rate = _clk_clko_round_rate,
-       .set_parent = _clk_clko_set_parent,
-};
-
-
-#define _REGISTER_CLOCK(d, n, c) \
-       { \
-               .dev_id = d, \
-               .con_id = n, \
-               .clk = &c, \
-       },
-static struct clk_lookup lookups[] = {
-/* It's unlikely that any driver wants one of them directly:
-       _REGISTER_CLOCK(NULL, "ckih", ckih_clk)
-       _REGISTER_CLOCK(NULL, "ckil", ckil_clk)
-       _REGISTER_CLOCK(NULL, "fpm", fpm_clk)
-       _REGISTER_CLOCK(NULL, "mpll", mpll_clk)
-       _REGISTER_CLOCK(NULL, "spll", spll_clk)
-       _REGISTER_CLOCK(NULL, "fclk", fclk_clk)
-       _REGISTER_CLOCK(NULL, "hclk", hclk_clk)
-       _REGISTER_CLOCK(NULL, "ipg", ipg_clk)
-*/
-       _REGISTER_CLOCK(NULL, "perclk1", per_clk[0])
-       _REGISTER_CLOCK(NULL, "perclk2", per_clk[1])
-       _REGISTER_CLOCK(NULL, "perclk3", per_clk[2])
-       _REGISTER_CLOCK(NULL, "perclk4", per_clk[3])
-       _REGISTER_CLOCK(NULL, "clko", clko_clk)
-       _REGISTER_CLOCK("imx21-uart.0", NULL, uart_clk[0])
-       _REGISTER_CLOCK("imx21-uart.1", NULL, uart_clk[1])
-       _REGISTER_CLOCK("imx21-uart.2", NULL, uart_clk[2])
-       _REGISTER_CLOCK("imx21-uart.3", NULL, uart_clk[3])
-       _REGISTER_CLOCK(NULL, "gpt1", gpt_clk[0])
-       _REGISTER_CLOCK(NULL, "gpt1", gpt_clk[1])
-       _REGISTER_CLOCK(NULL, "gpt1", gpt_clk[2])
-       _REGISTER_CLOCK(NULL, "pwm", pwm_clk[0])
-       _REGISTER_CLOCK(NULL, "sdhc1", sdhc_clk[0])
-       _REGISTER_CLOCK(NULL, "sdhc2", sdhc_clk[1])
-       _REGISTER_CLOCK("imx21-cspi.0", NULL, cspi_clk[0])
-       _REGISTER_CLOCK("imx21-cspi.1", NULL, cspi_clk[1])
-       _REGISTER_CLOCK("imx21-cspi.2", NULL, cspi_clk[2])
-       _REGISTER_CLOCK("imx-fb.0", NULL, lcdc_clk[0])
-       _REGISTER_CLOCK(NULL, "csi", csi_clk[0])
-       _REGISTER_CLOCK("imx21-hcd.0", NULL, usb_clk[0])
-       _REGISTER_CLOCK(NULL, "ssi1", ssi_clk[0])
-       _REGISTER_CLOCK(NULL, "ssi2", ssi_clk[1])
-       _REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
-       _REGISTER_CLOCK(NULL, "dma", dma_clk[0])
-       _REGISTER_CLOCK(NULL, "brom", brom_clk)
-       _REGISTER_CLOCK(NULL, "emma", emma_clk[0])
-       _REGISTER_CLOCK(NULL, "slcdc", slcdc_clk[0])
-       _REGISTER_CLOCK("imx2-wdt.0", NULL, wdog_clk)
-       _REGISTER_CLOCK(NULL, "gpio", gpio_clk)
-       _REGISTER_CLOCK("imx-i2c.0", NULL, i2c_clk)
-       _REGISTER_CLOCK("mxc-keypad", NULL, kpp_clk)
-       _REGISTER_CLOCK(NULL, "owire", owire_clk)
-       _REGISTER_CLOCK(NULL, "rtc", rtc_clk)
-};
-
-/*
- * must be called very early to get information about the
- * available clock rate when the timer framework starts
- */
-int __init mx21_clocks_init(unsigned long lref, unsigned long href)
-{
-       u32 cscr;
-
-       external_low_reference = lref;
-       external_high_reference = href;
-
-       /* detect clock reference for both system PLL */
-       cscr = CSCR();
-       if (cscr & CCM_CSCR_MCU)
-               mpll_clk.parent = &ckih_clk;
-       else
-               mpll_clk.parent = &fpm_clk;
-
-       if (cscr & CCM_CSCR_SP)
-               spll_clk.parent = &ckih_clk;
-       else
-               spll_clk.parent = &fpm_clk;
-
-       clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
-       /* Turn off all clock gates */
-       __raw_writel(0, CCM_PCCR0);
-       __raw_writel(CCM_PCCR_GPT1_MASK, CCM_PCCR1);
-
-       /* This turns of the serial PLL as well */
-       spll_clk.disable(&spll_clk);
-
-       /* This will propagate to all children and init all the clock rates. */
-       clk_enable(&per_clk[0]);
-       clk_enable(&gpio_clk);
-
-#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
-       clk_enable(&uart_clk[0]);
-#endif
-
-       mxc_timer_init(&gpt_clk[0], MX21_IO_ADDRESS(MX21_GPT1_BASE_ADDR),
-                       MX21_INT_GPT1);
-       return 0;
-}
diff --git a/arch/arm/mach-imx/clock-imx25.c b/arch/arm/mach-imx/clock-imx25.c
deleted file mode 100644 (file)
index b0fec74..0000000
+++ /dev/null
@@ -1,346 +0,0 @@
-/*
- * Copyright (C) 2009 by Sascha Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/clkdev.h>
-
-#include <mach/clock.h>
-#include <mach/hardware.h>
-#include <mach/common.h>
-#include <mach/mx25.h>
-
-#define CRM_BASE       MX25_IO_ADDRESS(MX25_CRM_BASE_ADDR)
-
-#define CCM_MPCTL      0x00
-#define CCM_UPCTL      0x04
-#define CCM_CCTL       0x08
-#define CCM_CGCR0      0x0C
-#define CCM_CGCR1      0x10
-#define CCM_CGCR2      0x14
-#define CCM_PCDR0      0x18
-#define CCM_PCDR1      0x1C
-#define CCM_PCDR2      0x20
-#define CCM_PCDR3      0x24
-#define CCM_RCSR       0x28
-#define CCM_CRDR       0x2C
-#define CCM_DCVR0      0x30
-#define CCM_DCVR1      0x34
-#define CCM_DCVR2      0x38
-#define CCM_DCVR3      0x3c
-#define CCM_LTR0       0x40
-#define CCM_LTR1       0x44
-#define CCM_LTR2       0x48
-#define CCM_LTR3       0x4c
-
-static unsigned long get_rate_mpll(void)
-{
-       ulong mpctl = __raw_readl(CRM_BASE + CCM_MPCTL);
-
-       return mxc_decode_pll(mpctl, 24000000);
-}
-
-static unsigned long get_rate_upll(void)
-{
-       ulong mpctl = __raw_readl(CRM_BASE + CCM_UPCTL);
-
-       return mxc_decode_pll(mpctl, 24000000);
-}
-
-unsigned long get_rate_arm(struct clk *clk)
-{
-       unsigned long cctl = readl(CRM_BASE + CCM_CCTL);
-       unsigned long rate = get_rate_mpll();
-
-       if (cctl & (1 << 14))
-               rate = (rate * 3) >> 2;
-
-       return rate / ((cctl >> 30) + 1);
-}
-
-static unsigned long get_rate_ahb(struct clk *clk)
-{
-       unsigned long cctl = readl(CRM_BASE + CCM_CCTL);
-
-       return get_rate_arm(NULL) / (((cctl >> 28) & 0x3) + 1);
-}
-
-static unsigned long get_rate_ipg(struct clk *clk)
-{
-       return get_rate_ahb(NULL) >> 1;
-}
-
-static unsigned long get_rate_per(int per)
-{
-       unsigned long ofs = (per & 0x3) * 8;
-       unsigned long reg = per & ~0x3;
-       unsigned long val = (readl(CRM_BASE + CCM_PCDR0 + reg) >> ofs) & 0x3f;
-       unsigned long fref;
-
-       if (readl(CRM_BASE + 0x64) & (1 << per))
-               fref = get_rate_upll();
-       else
-               fref = get_rate_ahb(NULL);
-
-       return fref / (val + 1);
-}
-
-static unsigned long get_rate_uart(struct clk *clk)
-{
-       return get_rate_per(15);
-}
-
-static unsigned long get_rate_ssi2(struct clk *clk)
-{
-       return get_rate_per(14);
-}
-
-static unsigned long get_rate_ssi1(struct clk *clk)
-{
-       return get_rate_per(13);
-}
-
-static unsigned long get_rate_i2c(struct clk *clk)
-{
-       return get_rate_per(6);
-}
-
-static unsigned long get_rate_nfc(struct clk *clk)
-{
-       return get_rate_per(8);
-}
-
-static unsigned long get_rate_gpt(struct clk *clk)
-{
-       return get_rate_per(5);
-}
-
-static unsigned long get_rate_lcdc(struct clk *clk)
-{
-       return get_rate_per(7);
-}
-
-static unsigned long get_rate_esdhc1(struct clk *clk)
-{
-       return get_rate_per(3);
-}
-
-static unsigned long get_rate_esdhc2(struct clk *clk)
-{
-       return get_rate_per(4);
-}
-
-static unsigned long get_rate_csi(struct clk *clk)
-{
-       return get_rate_per(0);
-}
-
-static unsigned long get_rate_otg(struct clk *clk)
-{
-       unsigned long cctl = readl(CRM_BASE + CCM_CCTL);
-       unsigned long rate = get_rate_upll();
-
-       return (cctl & (1 << 23)) ? 0 : rate / ((0x3F & (cctl >> 16)) + 1);
-}
-
-static int clk_cgcr_enable(struct clk *clk)
-{
-       u32 reg;
-
-       reg = __raw_readl(clk->enable_reg);
-       reg |= 1 << clk->enable_shift;
-       __raw_writel(reg, clk->enable_reg);
-
-       return 0;
-}
-
-static void clk_cgcr_disable(struct clk *clk)
-{
-       u32 reg;
-
-       reg = __raw_readl(clk->enable_reg);
-       reg &= ~(1 << clk->enable_shift);
-       __raw_writel(reg, clk->enable_reg);
-}
-
-#define DEFINE_CLOCK(name, i, er, es, gr, sr, s)       \
-       static struct clk name = {                      \
-               .id             = i,                    \
-               .enable_reg     = CRM_BASE + er,        \
-               .enable_shift   = es,                   \
-               .get_rate       = gr,                   \
-               .set_rate       = sr,                   \
-               .enable         = clk_cgcr_enable,      \
-               .disable        = clk_cgcr_disable,     \
-               .secondary      = s,                    \
-       }
-
-/*
- * Note: the following IPG clock gating bits are wrongly marked "Reserved" in
- * the i.MX25 Reference Manual Rev 1, table 15-13. The information below is
- * taken from the Freescale released BSP.
- *
- * bit reg     offset  clock
- *
- * 0   CGCR1   0       AUDMUX
- * 12  CGCR1   12      ESAI
- * 16  CGCR1   16      GPIO1
- * 17  CGCR1   17      GPIO2
- * 18  CGCR1   18      GPIO3
- * 23  CGCR1   23      I2C1
- * 24  CGCR1   24      I2C2
- * 25  CGCR1   25      I2C3
- * 27  CGCR1   27      IOMUXC
- * 28  CGCR1   28      KPP
- * 30  CGCR1   30      OWIRE
- * 36  CGCR2   4       RTIC
- * 51  CGCR2   19      WDOG
- */
-
-DEFINE_CLOCK(gpt_clk,    0, CCM_CGCR0,  5, get_rate_gpt, NULL, NULL);
-DEFINE_CLOCK(uart_per_clk, 0, CCM_CGCR0, 15, get_rate_uart, NULL, NULL);
-DEFINE_CLOCK(ssi1_per_clk, 0, CCM_CGCR0, 13, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(ssi2_per_clk, 0, CCM_CGCR0, 14, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(cspi1_clk,  0, CCM_CGCR1,  5, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(cspi2_clk,  0, CCM_CGCR1,  6, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(cspi3_clk,  0, CCM_CGCR1,  7, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(esdhc1_ahb_clk, 0, CCM_CGCR0, 21, get_rate_esdhc1,         NULL, NULL);
-DEFINE_CLOCK(esdhc1_per_clk, 0, CCM_CGCR0,  3, get_rate_esdhc1,         NULL,
-               &esdhc1_ahb_clk);
-DEFINE_CLOCK(esdhc2_ahb_clk, 0, CCM_CGCR0, 22, get_rate_esdhc2,         NULL, NULL);
-DEFINE_CLOCK(esdhc2_per_clk, 0, CCM_CGCR0,  4, get_rate_esdhc2,         NULL,
-               &esdhc2_ahb_clk);
-DEFINE_CLOCK(sdma_ahb_clk, 0, CCM_CGCR0, 26, NULL,      NULL, NULL);
-DEFINE_CLOCK(fec_ahb_clk, 0, CCM_CGCR0, 23, NULL,       NULL, NULL);
-DEFINE_CLOCK(lcdc_ahb_clk, 0, CCM_CGCR0, 24, NULL,      NULL, NULL);
-DEFINE_CLOCK(lcdc_per_clk, 0, CCM_CGCR0,  7, NULL,      NULL, &lcdc_ahb_clk);
-DEFINE_CLOCK(csi_ahb_clk, 0, CCM_CGCR0, 18, get_rate_csi, NULL, NULL);
-DEFINE_CLOCK(csi_per_clk, 0, CCM_CGCR0, 0, get_rate_csi, NULL, &csi_ahb_clk);
-DEFINE_CLOCK(uart1_clk,  0, CCM_CGCR2, 14, get_rate_uart, NULL, &uart_per_clk);
-DEFINE_CLOCK(uart2_clk,  0, CCM_CGCR2, 15, get_rate_uart, NULL, &uart_per_clk);
-DEFINE_CLOCK(uart3_clk,  0, CCM_CGCR2, 16, get_rate_uart, NULL, &uart_per_clk);
-DEFINE_CLOCK(uart4_clk,  0, CCM_CGCR2, 17, get_rate_uart, NULL, &uart_per_clk);
-DEFINE_CLOCK(uart5_clk,  0, CCM_CGCR2, 18, get_rate_uart, NULL, &uart_per_clk);
-DEFINE_CLOCK(nfc_clk,    0, CCM_CGCR0,  8, get_rate_nfc, NULL, NULL);
-DEFINE_CLOCK(usbotg_clk, 0, CCM_CGCR0, 28, get_rate_otg, NULL, NULL);
-DEFINE_CLOCK(pwm1_clk,  0, CCM_CGCR1, 31, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(pwm2_clk,  0, CCM_CGCR2,  0, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(pwm3_clk,  0, CCM_CGCR2,  1, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(pwm4_clk,  0, CCM_CGCR2,  2, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(kpp_clk,   0, CCM_CGCR1, 28, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(tsc_clk,   0, CCM_CGCR2, 13, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(i2c_clk,   0, CCM_CGCR0,  6, get_rate_i2c, NULL, NULL);
-DEFINE_CLOCK(fec_clk,   0, CCM_CGCR1, 15, get_rate_ipg, NULL, &fec_ahb_clk);
-DEFINE_CLOCK(dryice_clk, 0, CCM_CGCR1,  8, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(lcdc_clk,  0, CCM_CGCR1, 29, get_rate_lcdc, NULL, &lcdc_per_clk);
-DEFINE_CLOCK(wdt_clk,    0, CCM_CGCR2, 19, get_rate_ipg, NULL,  NULL);
-DEFINE_CLOCK(ssi1_clk,  0, CCM_CGCR2, 11, get_rate_ssi1, NULL, &ssi1_per_clk);
-DEFINE_CLOCK(ssi2_clk,  1, CCM_CGCR2, 12, get_rate_ssi2, NULL, &ssi2_per_clk);
-DEFINE_CLOCK(sdma_clk, 0, CCM_CGCR2,  6, get_rate_ipg, NULL, &sdma_ahb_clk);
-DEFINE_CLOCK(esdhc1_clk,  0, CCM_CGCR1, 13, get_rate_esdhc1, NULL,
-               &esdhc1_per_clk);
-DEFINE_CLOCK(esdhc2_clk,  1, CCM_CGCR1, 14, get_rate_esdhc2, NULL,
-               &esdhc2_per_clk);
-DEFINE_CLOCK(audmux_clk, 0, CCM_CGCR1, 0, NULL, NULL, NULL);
-DEFINE_CLOCK(csi_clk,    0, CCM_CGCR1,  4, get_rate_csi, NULL,  &csi_per_clk);
-DEFINE_CLOCK(can1_clk,  0, CCM_CGCR1,  2, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(can2_clk,  1, CCM_CGCR1,  3, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(iim_clk,    0, CCM_CGCR1, 26, NULL, NULL, NULL);
-
-#define _REGISTER_CLOCK(d, n, c)       \
-       {                               \
-               .dev_id = d,            \
-               .con_id = n,            \
-               .clk = &c,              \
-       },
-
-static struct clk_lookup lookups[] = {
-       /* i.mx25 has the i.mx21 type uart */
-       _REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
-       _REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
-       _REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
-       _REGISTER_CLOCK("imx21-uart.3", NULL, uart4_clk)
-       _REGISTER_CLOCK("imx21-uart.4", NULL, uart5_clk)
-       _REGISTER_CLOCK("mxc-ehci.0", "usb", usbotg_clk)
-       _REGISTER_CLOCK("mxc-ehci.1", "usb", usbotg_clk)
-       _REGISTER_CLOCK("mxc-ehci.2", "usb", usbotg_clk)
-       _REGISTER_CLOCK("fsl-usb2-udc", "usb", usbotg_clk)
-       _REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
-       /* i.mx25 has the i.mx35 type cspi */
-       _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi1_clk)
-       _REGISTER_CLOCK("imx35-cspi.1", NULL, cspi2_clk)
-       _REGISTER_CLOCK("imx35-cspi.2", NULL, cspi3_clk)
-       _REGISTER_CLOCK("mxc_pwm.0", NULL, pwm1_clk)
-       _REGISTER_CLOCK("mxc_pwm.1", NULL, pwm2_clk)
-       _REGISTER_CLOCK("mxc_pwm.2", NULL, pwm3_clk)
-       _REGISTER_CLOCK("mxc_pwm.3", NULL, pwm4_clk)
-       _REGISTER_CLOCK("imx-keypad", NULL, kpp_clk)
-       _REGISTER_CLOCK("mx25-adc", NULL, tsc_clk)
-       _REGISTER_CLOCK("imx-i2c.0", NULL, i2c_clk)
-       _REGISTER_CLOCK("imx-i2c.1", NULL, i2c_clk)
-       _REGISTER_CLOCK("imx-i2c.2", NULL, i2c_clk)
-       _REGISTER_CLOCK("imx25-fec.0", NULL, fec_clk)
-       _REGISTER_CLOCK("imxdi_rtc.0", NULL, dryice_clk)
-       _REGISTER_CLOCK("imx-fb.0", NULL, lcdc_clk)
-       _REGISTER_CLOCK("imx2-wdt.0", NULL, wdt_clk)
-       _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
-       _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
-       _REGISTER_CLOCK("sdhci-esdhc-imx25.0", NULL, esdhc1_clk)
-       _REGISTER_CLOCK("sdhci-esdhc-imx25.1", NULL, esdhc2_clk)
-       _REGISTER_CLOCK("mx2-camera.0", NULL, csi_clk)
-       _REGISTER_CLOCK(NULL, "audmux", audmux_clk)
-       _REGISTER_CLOCK("flexcan.0", NULL, can1_clk)
-       _REGISTER_CLOCK("flexcan.1", NULL, can2_clk)
-       /* i.mx25 has the i.mx35 type sdma */
-       _REGISTER_CLOCK("imx35-sdma", NULL, sdma_clk)
-       _REGISTER_CLOCK(NULL, "iim", iim_clk)
-};
-
-int __init mx25_clocks_init(void)
-{
-       clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
-       /* Turn off all clocks except the ones we need to survive, namely:
-        * EMI, GPIO1-3 (CCM_CGCR1[18:16]), GPT1, IOMUXC (CCM_CGCR1[27]), IIM,
-        * SCC
-        */
-       __raw_writel((1 << 19), CRM_BASE + CCM_CGCR0);
-       __raw_writel((0xf << 16) | (3 << 26), CRM_BASE + CCM_CGCR1);
-       __raw_writel((1 << 5), CRM_BASE + CCM_CGCR2);
-#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
-       clk_enable(&uart1_clk);
-#endif
-
-       /* Clock source for lcdc and csi is upll */
-       __raw_writel(__raw_readl(CRM_BASE+0x64) | (1 << 7) | (1 << 0),
-                       CRM_BASE + 0x64);
-
-       /* Clock source for gpt is ahb_div */
-       __raw_writel(__raw_readl(CRM_BASE+0x64) & ~(1 << 5), CRM_BASE + 0x64);
-
-       clk_enable(&iim_clk);
-       imx_print_silicon_rev("i.MX25", mx25_revision());
-       clk_disable(&iim_clk);
-
-       mxc_timer_init(&gpt_clk, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
-
-       return 0;
-}
diff --git a/arch/arm/mach-imx/clock-imx27.c b/arch/arm/mach-imx/clock-imx27.c
deleted file mode 100644 (file)
index 98e04f5..0000000
+++ /dev/null
@@ -1,785 +0,0 @@
-/*
- * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
- * Copyright 2008 Martin Fuzzey, mfuzzey@gmail.com
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/clkdev.h>
-#include <linux/of.h>
-
-#include <asm/div64.h>
-
-#include <mach/clock.h>
-#include <mach/common.h>
-#include <mach/hardware.h>
-
-#define IO_ADDR_CCM(off)       (MX27_IO_ADDRESS(MX27_CCM_BASE_ADDR + (off)))
-
-/* Register offsets */
-#define CCM_CSCR               IO_ADDR_CCM(0x0)
-#define CCM_MPCTL0             IO_ADDR_CCM(0x4)
-#define CCM_MPCTL1             IO_ADDR_CCM(0x8)
-#define CCM_SPCTL0             IO_ADDR_CCM(0xc)
-#define CCM_SPCTL1             IO_ADDR_CCM(0x10)
-#define CCM_OSC26MCTL          IO_ADDR_CCM(0x14)
-#define CCM_PCDR0              IO_ADDR_CCM(0x18)
-#define CCM_PCDR1              IO_ADDR_CCM(0x1c)
-#define CCM_PCCR0              IO_ADDR_CCM(0x20)
-#define CCM_PCCR1              IO_ADDR_CCM(0x24)
-#define CCM_CCSR               IO_ADDR_CCM(0x28)
-#define CCM_PMCTL              IO_ADDR_CCM(0x2c)
-#define CCM_PMCOUNT            IO_ADDR_CCM(0x30)
-#define CCM_WKGDCTL            IO_ADDR_CCM(0x34)
-
-#define CCM_CSCR_UPDATE_DIS    (1 << 31)
-#define CCM_CSCR_SSI2          (1 << 23)
-#define CCM_CSCR_SSI1          (1 << 22)
-#define CCM_CSCR_VPU           (1 << 21)
-#define CCM_CSCR_MSHC           (1 << 20)
-#define CCM_CSCR_SPLLRES        (1 << 19)
-#define CCM_CSCR_MPLLRES        (1 << 18)
-#define CCM_CSCR_SP             (1 << 17)
-#define CCM_CSCR_MCU            (1 << 16)
-#define CCM_CSCR_OSC26MDIV      (1 << 4)
-#define CCM_CSCR_OSC26M         (1 << 3)
-#define CCM_CSCR_FPM            (1 << 2)
-#define CCM_CSCR_SPEN           (1 << 1)
-#define CCM_CSCR_MPEN           (1 << 0)
-
-/* i.MX27 TO 2+ */
-#define CCM_CSCR_ARM_SRC        (1 << 15)
-
-#define CCM_SPCTL1_LF           (1 << 15)
-#define CCM_SPCTL1_BRMO         (1 << 6)
-
-static struct clk mpll_main1_clk, mpll_main2_clk;
-
-static int clk_pccr_enable(struct clk *clk)
-{
-       unsigned long reg;
-
-       if (!clk->enable_reg)
-               return 0;
-
-       reg = __raw_readl(clk->enable_reg);
-       reg |= 1 << clk->enable_shift;
-       __raw_writel(reg, clk->enable_reg);
-
-       return 0;
-}
-
-static void clk_pccr_disable(struct clk *clk)
-{
-       unsigned long reg;
-
-       if (!clk->enable_reg)
-               return;
-
-       reg = __raw_readl(clk->enable_reg);
-       reg &= ~(1 << clk->enable_shift);
-       __raw_writel(reg, clk->enable_reg);
-}
-
-static int clk_spll_enable(struct clk *clk)
-{
-       unsigned long reg;
-
-       reg = __raw_readl(CCM_CSCR);
-       reg |= CCM_CSCR_SPEN;
-       __raw_writel(reg, CCM_CSCR);
-
-       while (!(__raw_readl(CCM_SPCTL1) & CCM_SPCTL1_LF));
-
-       return 0;
-}
-
-static void clk_spll_disable(struct clk *clk)
-{
-       unsigned long reg;
-
-       reg = __raw_readl(CCM_CSCR);
-       reg &= ~CCM_CSCR_SPEN;
-       __raw_writel(reg, CCM_CSCR);
-}
-
-static int clk_cpu_set_parent(struct clk *clk, struct clk *parent)
-{
-       int cscr = __raw_readl(CCM_CSCR);
-
-       if (clk->parent == parent)
-               return 0;
-
-       if (mx27_revision() >= IMX_CHIP_REVISION_2_0) {
-               if (parent == &mpll_main1_clk) {
-                       cscr |= CCM_CSCR_ARM_SRC;
-               } else {
-                       if (parent == &mpll_main2_clk)
-                               cscr &= ~CCM_CSCR_ARM_SRC;
-                       else
-                               return -EINVAL;
-               }
-               __raw_writel(cscr, CCM_CSCR);
-               clk->parent = parent;
-               return 0;
-       }
-       return -ENODEV;
-}
-
-static unsigned long round_rate_cpu(struct clk *clk, unsigned long rate)
-{
-       int div;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       div = parent_rate / rate;
-       if (parent_rate % rate)
-               div++;
-
-       if (div > 4)
-               div = 4;
-
-       return parent_rate / div;
-}
-
-static int set_rate_cpu(struct clk *clk, unsigned long rate)
-{
-       unsigned int div;
-       uint32_t reg;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       div = parent_rate / rate;
-
-       if (div > 4 || div < 1 || ((parent_rate / div) != rate))
-               return -EINVAL;
-
-       div--;
-
-       reg = __raw_readl(CCM_CSCR);
-       if (mx27_revision() >= IMX_CHIP_REVISION_2_0) {
-               reg &= ~(3 << 12);
-               reg |= div << 12;
-               reg &= ~(CCM_CSCR_FPM | CCM_CSCR_SPEN);
-               __raw_writel(reg | CCM_CSCR_UPDATE_DIS, CCM_CSCR);
-       } else {
-               printk(KERN_ERR "Can't set CPU frequency!\n");
-       }
-
-       return 0;
-}
-
-static unsigned long round_rate_per(struct clk *clk, unsigned long rate)
-{
-       u32 div;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       div = parent_rate / rate;
-       if (parent_rate % rate)
-               div++;
-
-       if (div > 64)
-               div = 64;
-
-       return parent_rate / div;
-}
-
-static int set_rate_per(struct clk *clk, unsigned long rate)
-{
-       u32 reg;
-       u32 div;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       if (clk->id < 0 || clk->id > 3)
-               return -EINVAL;
-
-       div = parent_rate / rate;
-       if (div > 64 || div < 1 || ((parent_rate / div) != rate))
-               return -EINVAL;
-       div--;
-
-       reg = __raw_readl(CCM_PCDR1) & ~(0x3f << (clk->id << 3));
-       reg |= div << (clk->id << 3);
-       __raw_writel(reg, CCM_PCDR1);
-
-       return 0;
-}
-
-static unsigned long get_rate_usb(struct clk *clk)
-{
-       unsigned long usb_pdf;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       usb_pdf = (__raw_readl(CCM_CSCR) >> 28) & 0x7;
-
-       return parent_rate / (usb_pdf + 1U);
-}
-
-static unsigned long get_rate_ssix(struct clk *clk, unsigned long pdf)
-{
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
-               pdf += 4;  /* MX27 TO2+ */
-       else
-               pdf = (pdf < 2) ? 124UL : pdf;  /* MX21 & MX27 TO1 */
-
-       return 2UL * parent_rate / pdf;
-}
-
-static unsigned long get_rate_ssi1(struct clk *clk)
-{
-       return get_rate_ssix(clk, (__raw_readl(CCM_PCDR0) >> 16) & 0x3f);
-}
-
-static unsigned long get_rate_ssi2(struct clk *clk)
-{
-       return get_rate_ssix(clk, (__raw_readl(CCM_PCDR0) >> 26) & 0x3f);
-}
-
-static unsigned long get_rate_nfc(struct clk *clk)
-{
-       unsigned long nfc_pdf;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
-               nfc_pdf = (__raw_readl(CCM_PCDR0) >> 6) & 0xf;
-       else
-               nfc_pdf = (__raw_readl(CCM_PCDR0) >> 12) & 0xf;
-
-       return parent_rate / (nfc_pdf + 1);
-}
-
-static unsigned long get_rate_vpu(struct clk *clk)
-{
-       unsigned long vpu_pdf;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       if (mx27_revision() >= IMX_CHIP_REVISION_2_0) {
-               vpu_pdf = (__raw_readl(CCM_PCDR0) >> 10) & 0x3f;
-               vpu_pdf += 4;
-       } else {
-               vpu_pdf = (__raw_readl(CCM_PCDR0) >> 8) & 0xf;
-               vpu_pdf = (vpu_pdf < 2) ? 124 : vpu_pdf;
-       }
-
-       return 2UL * parent_rate / vpu_pdf;
-}
-
-static unsigned long round_rate_parent(struct clk *clk, unsigned long rate)
-{
-       return clk->parent->round_rate(clk->parent, rate);
-}
-
-static unsigned long get_rate_parent(struct clk *clk)
-{
-       return clk_get_rate(clk->parent);
-}
-
-static int set_rate_parent(struct clk *clk, unsigned long rate)
-{
-       return clk->parent->set_rate(clk->parent, rate);
-}
-
-/* in Hz */
-static unsigned long external_high_reference = 26000000;
-
-static unsigned long get_rate_high_reference(struct clk *clk)
-{
-       return external_high_reference;
-}
-
-/* in Hz */
-static unsigned long external_low_reference = 32768;
-
-static unsigned long get_rate_low_reference(struct clk *clk)
-{
-       return external_low_reference;
-}
-
-static unsigned long get_rate_fpm(struct clk *clk)
-{
-       return clk_get_rate(clk->parent) * 1024;
-}
-
-static unsigned long get_rate_mpll(struct clk *clk)
-{
-       return mxc_decode_pll(__raw_readl(CCM_MPCTL0),
-                       clk_get_rate(clk->parent));
-}
-
-static unsigned long get_rate_mpll_main(struct clk *clk)
-{
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       /* i.MX27 TO2:
-        * clk->id == 0: arm clock source path 1 which is from 2 * MPLL / 2
-        * clk->id == 1: arm clock source path 2 which is from 2 * MPLL / 3
-        */
-       if (mx27_revision() >= IMX_CHIP_REVISION_2_0 && clk->id == 1)
-               return 2UL * parent_rate / 3UL;
-
-       return parent_rate;
-}
-
-static unsigned long get_rate_spll(struct clk *clk)
-{
-       uint32_t reg;
-       unsigned long rate;
-
-       rate = clk_get_rate(clk->parent);
-
-       reg = __raw_readl(CCM_SPCTL0);
-
-       /* On TO2 we have to write the value back. Otherwise we
-        * read 0 from this register the next time.
-        */
-       if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
-               __raw_writel(reg, CCM_SPCTL0);
-
-       return mxc_decode_pll(reg, rate);
-}
-
-static unsigned long get_rate_cpu(struct clk *clk)
-{
-       u32 div;
-       unsigned long rate;
-
-       if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
-               div = (__raw_readl(CCM_CSCR) >> 12) & 0x3;
-       else
-               div = (__raw_readl(CCM_CSCR) >> 13) & 0x7;
-
-       rate = clk_get_rate(clk->parent);
-       return rate / (div + 1);
-}
-
-static unsigned long get_rate_ahb(struct clk *clk)
-{
-       unsigned long rate, bclk_pdf;
-
-       if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
-               bclk_pdf = (__raw_readl(CCM_CSCR) >> 8) & 0x3;
-       else
-               bclk_pdf = (__raw_readl(CCM_CSCR) >> 9) & 0xf;
-
-       rate = clk_get_rate(clk->parent);
-       return rate / (bclk_pdf + 1);
-}
-
-static unsigned long get_rate_ipg(struct clk *clk)
-{
-       unsigned long rate, ipg_pdf;
-
-       if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
-               return clk_get_rate(clk->parent);
-       else
-               ipg_pdf = (__raw_readl(CCM_CSCR) >> 8) & 1;
-
-       rate = clk_get_rate(clk->parent);
-       return rate / (ipg_pdf + 1);
-}
-
-static unsigned long get_rate_per(struct clk *clk)
-{
-       unsigned long perclk_pdf, parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       if (clk->id < 0 || clk->id > 3)
-               return 0;
-
-       perclk_pdf = (__raw_readl(CCM_PCDR1) >> (clk->id << 3)) & 0x3f;
-
-       return parent_rate / (perclk_pdf + 1);
-}
-
-/*
- * the high frequency external clock reference
- * Default case is 26MHz. Could be changed at runtime
- * with a call to change_external_high_reference()
- */
-static struct clk ckih_clk = {
-       .get_rate       = get_rate_high_reference,
-};
-
-static struct clk mpll_clk = {
-       .parent         = &ckih_clk,
-       .get_rate       = get_rate_mpll,
-};
-
-/* For i.MX27 TO2, it is the MPLL path 1 of ARM core
- * It provides the clock source whose rate is same as MPLL
- */
-static struct clk mpll_main1_clk = {
-       .id             = 0,
-       .parent         = &mpll_clk,
-       .get_rate       = get_rate_mpll_main,
-};
-
-/* For i.MX27 TO2, it is the MPLL path 2 of ARM core
- * It provides the clock source whose rate is same MPLL * 2 / 3
- */
-static struct clk mpll_main2_clk = {
-       .id             = 1,
-       .parent         = &mpll_clk,
-       .get_rate       = get_rate_mpll_main,
-};
-
-static struct clk ahb_clk = {
-       .parent         = &mpll_main2_clk,
-       .get_rate       = get_rate_ahb,
-};
-
-static struct clk ipg_clk = {
-       .parent         = &ahb_clk,
-       .get_rate       = get_rate_ipg,
-};
-
-static struct clk cpu_clk = {
-       .parent = &mpll_main2_clk,
-       .set_parent = clk_cpu_set_parent,
-       .round_rate = round_rate_cpu,
-       .get_rate = get_rate_cpu,
-       .set_rate = set_rate_cpu,
-};
-
-static struct clk spll_clk = {
-       .parent = &ckih_clk,
-       .get_rate = get_rate_spll,
-       .enable = clk_spll_enable,
-       .disable = clk_spll_disable,
-};
-
-/*
- * the low frequency external clock reference
- * Default case is 32.768kHz.
- */
-static struct clk ckil_clk = {
-       .get_rate = get_rate_low_reference,
-};
-
-/* Output of frequency pre multiplier */
-static struct clk fpm_clk = {
-       .parent = &ckil_clk,
-       .get_rate = get_rate_fpm,
-};
-
-#define PCCR0 CCM_PCCR0
-#define PCCR1 CCM_PCCR1
-
-#define DEFINE_CLOCK(name, i, er, es, gr, s, p)                \
-       static struct clk name = {                      \
-               .id             = i,                    \
-               .enable_reg     = er,                   \
-               .enable_shift   = es,                   \
-               .get_rate       = gr,                   \
-               .enable         = clk_pccr_enable,      \
-               .disable        = clk_pccr_disable,     \
-               .secondary      = s,                    \
-               .parent         = p,                    \
-       }
-
-#define DEFINE_CLOCK1(name, i, er, es, getsetround, s, p)      \
-       static struct clk name = {                              \
-               .id             = i,                            \
-               .enable_reg     = er,                           \
-               .enable_shift   = es,                           \
-               .get_rate       = get_rate_##getsetround,       \
-               .set_rate       = set_rate_##getsetround,       \
-               .round_rate     = round_rate_##getsetround,     \
-               .enable         = clk_pccr_enable,              \
-               .disable        = clk_pccr_disable,             \
-               .secondary      = s,                            \
-               .parent         = p,                            \
-       }
-
-/* Forward declaration to keep the following list in order */
-static struct clk slcdc_clk1, sahara2_clk1, rtic_clk1, fec_clk1, emma_clk1,
-                 dma_clk1, lcdc_clk2, vpu_clk1;
-
-/* All clocks we can gate through PCCRx in the order of PCCRx bits */
-DEFINE_CLOCK(ssi2_clk1,    1, PCCR0,  0, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(ssi1_clk1,    0, PCCR0,  1, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(slcdc_clk,    0, PCCR0,  2, NULL, &slcdc_clk1, &ahb_clk);
-DEFINE_CLOCK(sdhc3_clk1,   0, PCCR0,  3, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(sdhc2_clk1,   0, PCCR0,  4, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(sdhc1_clk1,   0, PCCR0,  5, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(scc_clk,      0, PCCR0,  6, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(sahara2_clk,  0, PCCR0,  7, NULL, &sahara2_clk1, &ahb_clk);
-DEFINE_CLOCK(rtic_clk,     0, PCCR0,  8, NULL, &rtic_clk1, &ahb_clk);
-DEFINE_CLOCK(rtc_clk,      0, PCCR0,  9, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(pwm_clk1,     0, PCCR0, 11, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(owire_clk,    0, PCCR0, 12, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(mstick_clk1,  0, PCCR0, 13, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(lcdc_clk1,    0, PCCR0, 14, NULL, &lcdc_clk2, &ipg_clk);
-DEFINE_CLOCK(kpp_clk,      0, PCCR0, 15, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(iim_clk,      0, PCCR0, 16, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(i2c2_clk,     1, PCCR0, 17, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(i2c1_clk,     0, PCCR0, 18, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(gpt6_clk1,    0, PCCR0, 29, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(gpt5_clk1,    0, PCCR0, 20, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(gpt4_clk1,    0, PCCR0, 21, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(gpt3_clk1,    0, PCCR0, 22, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(gpt2_clk1,    0, PCCR0, 23, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(gpt1_clk1,    0, PCCR0, 24, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(gpio_clk,     0, PCCR0, 25, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(fec_clk,      0, PCCR0, 26, NULL, &fec_clk1, &ahb_clk);
-DEFINE_CLOCK(emma_clk,     0, PCCR0, 27, NULL, &emma_clk1, &ahb_clk);
-DEFINE_CLOCK(dma_clk,      0, PCCR0, 28, NULL, &dma_clk1, &ahb_clk);
-DEFINE_CLOCK(cspi13_clk1,  0, PCCR0, 29, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(cspi2_clk1,   0, PCCR0, 30, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(cspi1_clk1,   0, PCCR0, 31, NULL, NULL, &ipg_clk);
-
-DEFINE_CLOCK(mstick_clk,   0, PCCR1,  2, NULL, &mstick_clk1, &ipg_clk);
-DEFINE_CLOCK(nfc_clk,      0, PCCR1,  3, get_rate_nfc, NULL, &cpu_clk);
-DEFINE_CLOCK(ssi2_clk,     1, PCCR1,  4, get_rate_ssi2, &ssi2_clk1, &mpll_main2_clk);
-DEFINE_CLOCK(ssi1_clk,     0, PCCR1,  5, get_rate_ssi1, &ssi1_clk1, &mpll_main2_clk);
-DEFINE_CLOCK(vpu_clk,      0, PCCR1,  6, get_rate_vpu, &vpu_clk1, &mpll_main2_clk);
-DEFINE_CLOCK1(per4_clk,    3, PCCR1,  7, per, NULL, &mpll_main2_clk);
-DEFINE_CLOCK1(per3_clk,    2, PCCR1,  8, per, NULL, &mpll_main2_clk);
-DEFINE_CLOCK1(per2_clk,    1, PCCR1,  9, per, NULL, &mpll_main2_clk);
-DEFINE_CLOCK1(per1_clk,    0, PCCR1, 10, per, NULL, &mpll_main2_clk);
-DEFINE_CLOCK(usb_clk1,     0, PCCR1, 11, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(slcdc_clk1,   0, PCCR1, 12, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(sahara2_clk1, 0, PCCR1, 13, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(rtic_clk1,    0, PCCR1, 14, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(lcdc_clk2,    0, PCCR1, 15, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(vpu_clk1,     0, PCCR1, 16, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(fec_clk1,     0, PCCR1, 17, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(emma_clk1,    0, PCCR1, 18, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(emi_clk,      0, PCCR1, 19, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(dma_clk1,     0, PCCR1, 20, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(csi_clk1,     0, PCCR1, 21, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(brom_clk,     0, PCCR1, 22, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(pata_clk,      0, PCCR1, 23, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(wdog_clk,     0, PCCR1, 24, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(usb_clk,      0, PCCR1, 25, get_rate_usb, &usb_clk1, &spll_clk);
-DEFINE_CLOCK(uart6_clk1,   0, PCCR1, 26, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(uart5_clk1,   0, PCCR1, 27, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(uart4_clk1,   0, PCCR1, 28, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(uart3_clk1,   0, PCCR1, 29, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(uart2_clk1,   0, PCCR1, 30, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(uart1_clk1,   0, PCCR1, 31, NULL, NULL, &ipg_clk);
-
-/* Clocks we cannot directly gate, but drivers need their rates */
-DEFINE_CLOCK(cspi1_clk,    0, NULL,   0, NULL, &cspi1_clk1, &per2_clk);
-DEFINE_CLOCK(cspi2_clk,    1, NULL,   0, NULL, &cspi2_clk1, &per2_clk);
-DEFINE_CLOCK(cspi3_clk,    2, NULL,   0, NULL, &cspi13_clk1, &per2_clk);
-DEFINE_CLOCK(sdhc1_clk,    0, NULL,   0, NULL, &sdhc1_clk1, &per2_clk);
-DEFINE_CLOCK(sdhc2_clk,    1, NULL,   0, NULL, &sdhc2_clk1, &per2_clk);
-DEFINE_CLOCK(sdhc3_clk,    2, NULL,   0, NULL, &sdhc3_clk1, &per2_clk);
-DEFINE_CLOCK(pwm_clk,      0, NULL,   0, NULL, &pwm_clk1, &per1_clk);
-DEFINE_CLOCK(gpt1_clk,     0, NULL,   0, NULL, &gpt1_clk1, &per1_clk);
-DEFINE_CLOCK(gpt2_clk,     1, NULL,   0, NULL, &gpt2_clk1, &per1_clk);
-DEFINE_CLOCK(gpt3_clk,     2, NULL,   0, NULL, &gpt3_clk1, &per1_clk);
-DEFINE_CLOCK(gpt4_clk,     3, NULL,   0, NULL, &gpt4_clk1, &per1_clk);
-DEFINE_CLOCK(gpt5_clk,     4, NULL,   0, NULL, &gpt5_clk1, &per1_clk);
-DEFINE_CLOCK(gpt6_clk,     5, NULL,   0, NULL, &gpt6_clk1, &per1_clk);
-DEFINE_CLOCK(uart1_clk,    0, NULL,   0, NULL, &uart1_clk1, &per1_clk);
-DEFINE_CLOCK(uart2_clk,    1, NULL,   0, NULL, &uart2_clk1, &per1_clk);
-DEFINE_CLOCK(uart3_clk,    2, NULL,   0, NULL, &uart3_clk1, &per1_clk);
-DEFINE_CLOCK(uart4_clk,    3, NULL,   0, NULL, &uart4_clk1, &per1_clk);
-DEFINE_CLOCK(uart5_clk,    4, NULL,   0, NULL, &uart5_clk1, &per1_clk);
-DEFINE_CLOCK(uart6_clk,    5, NULL,   0, NULL, &uart6_clk1, &per1_clk);
-DEFINE_CLOCK1(lcdc_clk,    0, NULL,   0, parent, &lcdc_clk1, &per3_clk);
-DEFINE_CLOCK1(csi_clk,     0, NULL,   0, parent, &csi_clk1, &per4_clk);
-
-#define _REGISTER_CLOCK(d, n, c) \
-       { \
-               .dev_id = d, \
-               .con_id = n, \
-               .clk = &c, \
-       },
-
-static struct clk_lookup lookups[] = {
-       /* i.mx27 has the i.mx21 type uart */
-       _REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
-       _REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
-       _REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
-       _REGISTER_CLOCK("imx21-uart.3", NULL, uart4_clk)
-       _REGISTER_CLOCK("imx21-uart.4", NULL, uart5_clk)
-       _REGISTER_CLOCK("imx21-uart.5", NULL, uart6_clk)
-       _REGISTER_CLOCK(NULL, "gpt1", gpt1_clk)
-       _REGISTER_CLOCK(NULL, "gpt2", gpt2_clk)
-       _REGISTER_CLOCK(NULL, "gpt3", gpt3_clk)
-       _REGISTER_CLOCK(NULL, "gpt4", gpt4_clk)
-       _REGISTER_CLOCK(NULL, "gpt5", gpt5_clk)
-       _REGISTER_CLOCK(NULL, "gpt6", gpt6_clk)
-       _REGISTER_CLOCK("mxc_pwm.0", NULL, pwm_clk)
-       _REGISTER_CLOCK("mxc-mmc.0", NULL, sdhc1_clk)
-       _REGISTER_CLOCK("mxc-mmc.1", NULL, sdhc2_clk)
-       _REGISTER_CLOCK("mxc-mmc.2", NULL, sdhc3_clk)
-       _REGISTER_CLOCK("imx27-cspi.0", NULL, cspi1_clk)
-       _REGISTER_CLOCK("imx27-cspi.1", NULL, cspi2_clk)
-       _REGISTER_CLOCK("imx27-cspi.2", NULL, cspi3_clk)
-       _REGISTER_CLOCK("imx-fb.0", NULL, lcdc_clk)
-       _REGISTER_CLOCK("mx2-camera.0", NULL, csi_clk)
-       _REGISTER_CLOCK("fsl-usb2-udc", "usb", usb_clk)
-       _REGISTER_CLOCK("fsl-usb2-udc", "usb_ahb", usb_clk1)
-       _REGISTER_CLOCK("mxc-ehci.0", "usb", usb_clk)
-       _REGISTER_CLOCK("mxc-ehci.0", "usb_ahb", usb_clk1)
-       _REGISTER_CLOCK("mxc-ehci.1", "usb", usb_clk)
-       _REGISTER_CLOCK("mxc-ehci.1", "usb_ahb", usb_clk1)
-       _REGISTER_CLOCK("mxc-ehci.2", "usb", usb_clk)
-       _REGISTER_CLOCK("mxc-ehci.2", "usb_ahb", usb_clk1)
-       _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
-       _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
-       _REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
-       _REGISTER_CLOCK(NULL, "vpu", vpu_clk)
-       _REGISTER_CLOCK(NULL, "dma", dma_clk)
-       _REGISTER_CLOCK(NULL, "rtic", rtic_clk)
-       _REGISTER_CLOCK(NULL, "brom", brom_clk)
-       _REGISTER_CLOCK(NULL, "emma", emma_clk)
-       _REGISTER_CLOCK("m2m-emmaprp.0", NULL, emma_clk)
-       _REGISTER_CLOCK(NULL, "slcdc", slcdc_clk)
-       _REGISTER_CLOCK("imx27-fec.0", NULL, fec_clk)
-       _REGISTER_CLOCK(NULL, "emi", emi_clk)
-       _REGISTER_CLOCK(NULL, "sahara2", sahara2_clk)
-       _REGISTER_CLOCK("pata_imx", NULL, pata_clk)
-       _REGISTER_CLOCK(NULL, "mstick", mstick_clk)
-       _REGISTER_CLOCK("imx2-wdt.0", NULL, wdog_clk)
-       _REGISTER_CLOCK(NULL, "gpio", gpio_clk)
-       _REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
-       _REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
-       _REGISTER_CLOCK(NULL, "iim", iim_clk)
-       _REGISTER_CLOCK(NULL, "kpp", kpp_clk)
-       _REGISTER_CLOCK("mxc_w1.0", NULL, owire_clk)
-       _REGISTER_CLOCK(NULL, "rtc", rtc_clk)
-       _REGISTER_CLOCK(NULL, "scc", scc_clk)
-};
-
-/* Adjust the clock path for TO2 and later */
-static void __init to2_adjust_clocks(void)
-{
-       unsigned long cscr = __raw_readl(CCM_CSCR);
-
-       if (mx27_revision() >= IMX_CHIP_REVISION_2_0) {
-               if (cscr & CCM_CSCR_ARM_SRC)
-                       cpu_clk.parent = &mpll_main1_clk;
-
-               if (!(cscr & CCM_CSCR_SSI2))
-                       ssi1_clk.parent = &spll_clk;
-
-               if (!(cscr & CCM_CSCR_SSI1))
-                       ssi1_clk.parent = &spll_clk;
-
-               if (!(cscr & CCM_CSCR_VPU))
-                       vpu_clk.parent = &spll_clk;
-       } else {
-               cpu_clk.parent = &mpll_clk;
-               cpu_clk.set_parent = NULL;
-               cpu_clk.round_rate = NULL;
-               cpu_clk.set_rate = NULL;
-               ahb_clk.parent = &mpll_clk;
-
-               per1_clk.parent = &mpll_clk;
-               per2_clk.parent = &mpll_clk;
-               per3_clk.parent = &mpll_clk;
-               per4_clk.parent = &mpll_clk;
-
-               ssi1_clk.parent = &mpll_clk;
-               ssi2_clk.parent = &mpll_clk;
-
-               vpu_clk.parent = &mpll_clk;
-       }
-}
-
-/*
- * must be called very early to get information about the
- * available clock rate when the timer framework starts
- */
-int __init mx27_clocks_init(unsigned long fref)
-{
-       u32 cscr = __raw_readl(CCM_CSCR);
-
-       external_high_reference = fref;
-
-       /* detect clock reference for both system PLLs */
-       if (cscr & CCM_CSCR_MCU)
-               mpll_clk.parent = &ckih_clk;
-       else
-               mpll_clk.parent = &fpm_clk;
-
-       if (cscr & CCM_CSCR_SP)
-               spll_clk.parent = &ckih_clk;
-       else
-               spll_clk.parent = &fpm_clk;
-
-       to2_adjust_clocks();
-
-       clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
-       /* Turn off all clocks we do not need */
-       __raw_writel(0, CCM_PCCR0);
-       __raw_writel((1 << 10) | (1 << 19), CCM_PCCR1);
-
-       spll_clk.disable(&spll_clk);
-
-       /* enable basic clocks */
-       clk_enable(&per1_clk);
-       clk_enable(&gpio_clk);
-       clk_enable(&emi_clk);
-       clk_enable(&iim_clk);
-       imx_print_silicon_rev("i.MX27", mx27_revision());
-       clk_disable(&iim_clk);
-
-#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
-       clk_enable(&uart1_clk);
-#endif
-
-       mxc_timer_init(&gpt1_clk, MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR),
-                       MX27_INT_GPT1);
-
-       return 0;
-}
-
-#ifdef CONFIG_OF
-int __init mx27_clocks_init_dt(void)
-{
-       struct device_node *np;
-       u32 fref = 26000000; /* default */
-
-       for_each_compatible_node(np, NULL, "fixed-clock") {
-               if (!of_device_is_compatible(np, "fsl,imx-osc26m"))
-                       continue;
-
-               if (!of_property_read_u32(np, "clock-frequency", &fref))
-                       break;
-       }
-
-       return mx27_clocks_init(fref);
-}
-#endif
diff --git a/arch/arm/mach-imx/clock-imx31.c b/arch/arm/mach-imx/clock-imx31.c
deleted file mode 100644 (file)
index 3a943cd..0000000
+++ /dev/null
@@ -1,630 +0,0 @@
-/*
- * Copyright 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright (C) 2008 by Sascha Hauer <kernel@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/clkdev.h>
-
-#include <asm/div64.h>
-
-#include <mach/clock.h>
-#include <mach/hardware.h>
-#include <mach/mx31.h>
-#include <mach/common.h>
-
-#include "crmregs-imx3.h"
-
-#define PRE_DIV_MIN_FREQ    10000000 /* Minimum Frequency after Predivider */
-
-static void __calc_pre_post_dividers(u32 div, u32 *pre, u32 *post)
-{
-       u32 min_pre, temp_pre, old_err, err;
-
-       if (div >= 512) {
-               *pre = 8;
-               *post = 64;
-       } else if (div >= 64) {
-               min_pre = (div - 1) / 64 + 1;
-               old_err = 8;
-               for (temp_pre = 8; temp_pre >= min_pre; temp_pre--) {
-                       err = div % temp_pre;
-                       if (err == 0) {
-                               *pre = temp_pre;
-                               break;
-                       }
-                       err = temp_pre - err;
-                       if (err < old_err) {
-                               old_err = err;
-                               *pre = temp_pre;
-                       }
-               }
-               *post = (div + *pre - 1) / *pre;
-       } else if (div <= 8) {
-               *pre = div;
-               *post = 1;
-       } else {
-               *pre = 1;
-               *post = div;
-       }
-}
-
-static struct clk mcu_pll_clk;
-static struct clk serial_pll_clk;
-static struct clk ipg_clk;
-static struct clk ckih_clk;
-
-static int cgr_enable(struct clk *clk)
-{
-       u32 reg;
-
-       if (!clk->enable_reg)
-               return 0;
-
-       reg = __raw_readl(clk->enable_reg);
-       reg |= 3 << clk->enable_shift;
-       __raw_writel(reg, clk->enable_reg);
-
-       return 0;
-}
-
-static void cgr_disable(struct clk *clk)
-{
-       u32 reg;
-
-       if (!clk->enable_reg)
-               return;
-
-       reg = __raw_readl(clk->enable_reg);
-       reg &= ~(3 << clk->enable_shift);
-
-       /* special case for EMI clock */
-       if (clk->enable_reg == MXC_CCM_CGR2 && clk->enable_shift == 8)
-               reg |= (1 << clk->enable_shift);
-
-       __raw_writel(reg, clk->enable_reg);
-}
-
-static unsigned long pll_ref_get_rate(void)
-{
-       unsigned long ccmr;
-       unsigned int prcs;
-
-       ccmr = __raw_readl(MXC_CCM_CCMR);
-       prcs = (ccmr & MXC_CCM_CCMR_PRCS_MASK) >> MXC_CCM_CCMR_PRCS_OFFSET;
-       if (prcs == 0x1)
-               return CKIL_CLK_FREQ * 1024;
-       else
-               return clk_get_rate(&ckih_clk);
-}
-
-static unsigned long usb_pll_get_rate(struct clk *clk)
-{
-       unsigned long reg;
-
-       reg = __raw_readl(MXC_CCM_UPCTL);
-
-       return mxc_decode_pll(reg, pll_ref_get_rate());
-}
-
-static unsigned long serial_pll_get_rate(struct clk *clk)
-{
-       unsigned long reg;
-
-       reg = __raw_readl(MXC_CCM_SRPCTL);
-
-       return mxc_decode_pll(reg, pll_ref_get_rate());
-}
-
-static unsigned long mcu_pll_get_rate(struct clk *clk)
-{
-       unsigned long reg, ccmr;
-
-       ccmr = __raw_readl(MXC_CCM_CCMR);
-
-       if (!(ccmr & MXC_CCM_CCMR_MPE) || (ccmr & MXC_CCM_CCMR_MDS))
-               return clk_get_rate(&ckih_clk);
-
-       reg = __raw_readl(MXC_CCM_MPCTL);
-
-       return mxc_decode_pll(reg, pll_ref_get_rate());
-}
-
-static int usb_pll_enable(struct clk *clk)
-{
-       u32 reg;
-
-       reg = __raw_readl(MXC_CCM_CCMR);
-       reg |= MXC_CCM_CCMR_UPE;
-       __raw_writel(reg, MXC_CCM_CCMR);
-
-       /* No lock bit on MX31, so using max time from spec */
-       udelay(80);
-
-       return 0;
-}
-
-static void usb_pll_disable(struct clk *clk)
-{
-       u32 reg;
-
-       reg = __raw_readl(MXC_CCM_CCMR);
-       reg &= ~MXC_CCM_CCMR_UPE;
-       __raw_writel(reg, MXC_CCM_CCMR);
-}
-
-static int serial_pll_enable(struct clk *clk)
-{
-       u32 reg;
-
-       reg = __raw_readl(MXC_CCM_CCMR);
-       reg |= MXC_CCM_CCMR_SPE;
-       __raw_writel(reg, MXC_CCM_CCMR);
-
-       /* No lock bit on MX31, so using max time from spec */
-       udelay(80);
-
-       return 0;
-}
-
-static void serial_pll_disable(struct clk *clk)
-{
-       u32 reg;
-
-       reg = __raw_readl(MXC_CCM_CCMR);
-       reg &= ~MXC_CCM_CCMR_SPE;
-       __raw_writel(reg, MXC_CCM_CCMR);
-}
-
-#define PDR0(mask, off) ((__raw_readl(MXC_CCM_PDR0) & mask) >> off)
-#define PDR1(mask, off) ((__raw_readl(MXC_CCM_PDR1) & mask) >> off)
-#define PDR2(mask, off) ((__raw_readl(MXC_CCM_PDR2) & mask) >> off)
-
-static unsigned long mcu_main_get_rate(struct clk *clk)
-{
-       u32 pmcr0 = __raw_readl(MXC_CCM_PMCR0);
-
-       if ((pmcr0 & MXC_CCM_PMCR0_DFSUP1) == MXC_CCM_PMCR0_DFSUP1_SPLL)
-               return clk_get_rate(&serial_pll_clk);
-       else
-               return clk_get_rate(&mcu_pll_clk);
-}
-
-static unsigned long ahb_get_rate(struct clk *clk)
-{
-       unsigned long max_pdf;
-
-       max_pdf = PDR0(MXC_CCM_PDR0_MAX_PODF_MASK,
-                      MXC_CCM_PDR0_MAX_PODF_OFFSET);
-       return clk_get_rate(clk->parent) / (max_pdf + 1);
-}
-
-static unsigned long ipg_get_rate(struct clk *clk)
-{
-       unsigned long ipg_pdf;
-
-       ipg_pdf = PDR0(MXC_CCM_PDR0_IPG_PODF_MASK,
-                      MXC_CCM_PDR0_IPG_PODF_OFFSET);
-       return clk_get_rate(clk->parent) / (ipg_pdf + 1);
-}
-
-static unsigned long nfc_get_rate(struct clk *clk)
-{
-       unsigned long nfc_pdf;
-
-       nfc_pdf = PDR0(MXC_CCM_PDR0_NFC_PODF_MASK,
-                      MXC_CCM_PDR0_NFC_PODF_OFFSET);
-       return clk_get_rate(clk->parent) / (nfc_pdf + 1);
-}
-
-static unsigned long hsp_get_rate(struct clk *clk)
-{
-       unsigned long hsp_pdf;
-
-       hsp_pdf = PDR0(MXC_CCM_PDR0_HSP_PODF_MASK,
-                      MXC_CCM_PDR0_HSP_PODF_OFFSET);
-       return clk_get_rate(clk->parent) / (hsp_pdf + 1);
-}
-
-static unsigned long usb_get_rate(struct clk *clk)
-{
-       unsigned long usb_pdf, usb_prepdf;
-
-       usb_pdf = PDR1(MXC_CCM_PDR1_USB_PODF_MASK,
-                      MXC_CCM_PDR1_USB_PODF_OFFSET);
-       usb_prepdf = PDR1(MXC_CCM_PDR1_USB_PRDF_MASK,
-                         MXC_CCM_PDR1_USB_PRDF_OFFSET);
-       return clk_get_rate(clk->parent) / (usb_prepdf + 1) / (usb_pdf + 1);
-}
-
-static unsigned long csi_get_rate(struct clk *clk)
-{
-       u32 reg, pre, post;
-
-       reg = __raw_readl(MXC_CCM_PDR0);
-       pre = (reg & MXC_CCM_PDR0_CSI_PRDF_MASK) >>
-           MXC_CCM_PDR0_CSI_PRDF_OFFSET;
-       pre++;
-       post = (reg & MXC_CCM_PDR0_CSI_PODF_MASK) >>
-           MXC_CCM_PDR0_CSI_PODF_OFFSET;
-       post++;
-       return clk_get_rate(clk->parent) / (pre * post);
-}
-
-static unsigned long csi_round_rate(struct clk *clk, unsigned long rate)
-{
-       u32 pre, post, parent = clk_get_rate(clk->parent);
-       u32 div = parent / rate;
-
-       if (parent % rate)
-               div++;
-
-       __calc_pre_post_dividers(div, &pre, &post);
-
-       return parent / (pre * post);
-}
-
-static int csi_set_rate(struct clk *clk, unsigned long rate)
-{
-       u32 reg, div, pre, post, parent = clk_get_rate(clk->parent);
-
-       div = parent / rate;
-
-       if ((parent / div) != rate)
-               return -EINVAL;
-
-       __calc_pre_post_dividers(div, &pre, &post);
-
-       /* Set CSI clock divider */
-       reg = __raw_readl(MXC_CCM_PDR0) &
-           ~(MXC_CCM_PDR0_CSI_PODF_MASK | MXC_CCM_PDR0_CSI_PRDF_MASK);
-       reg |= (post - 1) << MXC_CCM_PDR0_CSI_PODF_OFFSET;
-       reg |= (pre - 1) << MXC_CCM_PDR0_CSI_PRDF_OFFSET;
-       __raw_writel(reg, MXC_CCM_PDR0);
-
-       return 0;
-}
-
-static unsigned long ssi1_get_rate(struct clk *clk)
-{
-       unsigned long ssi1_pdf, ssi1_prepdf;
-
-       ssi1_pdf = PDR1(MXC_CCM_PDR1_SSI1_PODF_MASK,
-                       MXC_CCM_PDR1_SSI1_PODF_OFFSET);
-       ssi1_prepdf = PDR1(MXC_CCM_PDR1_SSI1_PRE_PODF_MASK,
-                          MXC_CCM_PDR1_SSI1_PRE_PODF_OFFSET);
-       return clk_get_rate(clk->parent) / (ssi1_prepdf + 1) / (ssi1_pdf + 1);
-}
-
-static unsigned long ssi2_get_rate(struct clk *clk)
-{
-       unsigned long ssi2_pdf, ssi2_prepdf;
-
-       ssi2_pdf = PDR1(MXC_CCM_PDR1_SSI2_PODF_MASK,
-                       MXC_CCM_PDR1_SSI2_PODF_OFFSET);
-       ssi2_prepdf = PDR1(MXC_CCM_PDR1_SSI2_PRE_PODF_MASK,
-                          MXC_CCM_PDR1_SSI2_PRE_PODF_OFFSET);
-       return clk_get_rate(clk->parent) / (ssi2_prepdf + 1) / (ssi2_pdf + 1);
-}
-
-static unsigned long firi_get_rate(struct clk *clk)
-{
-       unsigned long firi_pdf, firi_prepdf;
-
-       firi_pdf = PDR1(MXC_CCM_PDR1_FIRI_PODF_MASK,
-                       MXC_CCM_PDR1_FIRI_PODF_OFFSET);
-       firi_prepdf = PDR1(MXC_CCM_PDR1_FIRI_PRE_PODF_MASK,
-                          MXC_CCM_PDR1_FIRI_PRE_PODF_OFFSET);
-       return clk_get_rate(clk->parent) / (firi_prepdf + 1) / (firi_pdf + 1);
-}
-
-static unsigned long firi_round_rate(struct clk *clk, unsigned long rate)
-{
-       u32 pre, post;
-       u32 parent = clk_get_rate(clk->parent);
-       u32 div = parent / rate;
-
-       if (parent % rate)
-               div++;
-
-       __calc_pre_post_dividers(div, &pre, &post);
-
-       return parent / (pre * post);
-
-}
-
-static int firi_set_rate(struct clk *clk, unsigned long rate)
-{
-       u32 reg, div, pre, post, parent = clk_get_rate(clk->parent);
-
-       div = parent / rate;
-
-       if ((parent / div) != rate)
-               return -EINVAL;
-
-       __calc_pre_post_dividers(div, &pre, &post);
-
-       /* Set FIRI clock divider */
-       reg = __raw_readl(MXC_CCM_PDR1) &
-           ~(MXC_CCM_PDR1_FIRI_PODF_MASK | MXC_CCM_PDR1_FIRI_PRE_PODF_MASK);
-       reg |= (pre - 1) << MXC_CCM_PDR1_FIRI_PRE_PODF_OFFSET;
-       reg |= (post - 1) << MXC_CCM_PDR1_FIRI_PODF_OFFSET;
-       __raw_writel(reg, MXC_CCM_PDR1);
-
-       return 0;
-}
-
-static unsigned long mbx_get_rate(struct clk *clk)
-{
-       return clk_get_rate(clk->parent) / 2;
-}
-
-static unsigned long mstick1_get_rate(struct clk *clk)
-{
-       unsigned long msti_pdf;
-
-       msti_pdf = PDR2(MXC_CCM_PDR2_MST1_PDF_MASK,
-                       MXC_CCM_PDR2_MST1_PDF_OFFSET);
-       return clk_get_rate(clk->parent) / (msti_pdf + 1);
-}
-
-static unsigned long mstick2_get_rate(struct clk *clk)
-{
-       unsigned long msti_pdf;
-
-       msti_pdf = PDR2(MXC_CCM_PDR2_MST2_PDF_MASK,
-                       MXC_CCM_PDR2_MST2_PDF_OFFSET);
-       return clk_get_rate(clk->parent) / (msti_pdf + 1);
-}
-
-static unsigned long ckih_rate;
-
-static unsigned long clk_ckih_get_rate(struct clk *clk)
-{
-       return ckih_rate;
-}
-
-static unsigned long clk_ckil_get_rate(struct clk *clk)
-{
-       return CKIL_CLK_FREQ;
-}
-
-static struct clk ckih_clk = {
-       .get_rate = clk_ckih_get_rate,
-};
-
-static struct clk mcu_pll_clk = {
-       .parent = &ckih_clk,
-       .get_rate = mcu_pll_get_rate,
-};
-
-static struct clk mcu_main_clk = {
-       .parent = &mcu_pll_clk,
-       .get_rate = mcu_main_get_rate,
-};
-
-static struct clk serial_pll_clk = {
-       .parent = &ckih_clk,
-       .get_rate = serial_pll_get_rate,
-       .enable = serial_pll_enable,
-       .disable = serial_pll_disable,
-};
-
-static struct clk usb_pll_clk = {
-       .parent = &ckih_clk,
-       .get_rate = usb_pll_get_rate,
-       .enable = usb_pll_enable,
-       .disable = usb_pll_disable,
-};
-
-static struct clk ahb_clk = {
-       .parent = &mcu_main_clk,
-       .get_rate = ahb_get_rate,
-};
-
-#define DEFINE_CLOCK(name, i, er, es, gr, s, p)                \
-       static struct clk name = {                      \
-               .id             = i,                    \
-               .enable_reg     = er,                   \
-               .enable_shift   = es,                   \
-               .get_rate       = gr,                   \
-               .enable         = cgr_enable,           \
-               .disable        = cgr_disable,          \
-               .secondary      = s,                    \
-               .parent         = p,                    \
-       }
-
-#define DEFINE_CLOCK1(name, i, er, es, getsetround, s, p)      \
-       static struct clk name = {                              \
-               .id             = i,                            \
-               .enable_reg     = er,                           \
-               .enable_shift   = es,                           \
-               .get_rate       = getsetround##_get_rate,       \
-               .set_rate       = getsetround##_set_rate,       \
-               .round_rate     = getsetround##_round_rate,     \
-               .enable         = cgr_enable,                   \
-               .disable        = cgr_disable,                  \
-               .secondary      = s,                            \
-               .parent         = p,                            \
-       }
-
-DEFINE_CLOCK(perclk_clk,  0, NULL,          0, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(ckil_clk,    0, NULL,          0, clk_ckil_get_rate, NULL, NULL);
-
-DEFINE_CLOCK(sdhc1_clk,   0, MXC_CCM_CGR0,  0, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(sdhc2_clk,   1, MXC_CCM_CGR0,  2, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(gpt_clk,     0, MXC_CCM_CGR0,  4, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(epit1_clk,   0, MXC_CCM_CGR0,  6, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(epit2_clk,   1, MXC_CCM_CGR0,  8, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(iim_clk,     0, MXC_CCM_CGR0, 10, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(pata_clk,     0, MXC_CCM_CGR0, 12, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(sdma_clk1,   0, MXC_CCM_CGR0, 14, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(cspi3_clk,   2, MXC_CCM_CGR0, 16, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(rng_clk,     0, MXC_CCM_CGR0, 18, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(uart1_clk,   0, MXC_CCM_CGR0, 20, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(uart2_clk,   1, MXC_CCM_CGR0, 22, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(ssi1_clk,    0, MXC_CCM_CGR0, 24, ssi1_get_rate, NULL, &serial_pll_clk);
-DEFINE_CLOCK(i2c1_clk,    0, MXC_CCM_CGR0, 26, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(i2c2_clk,    1, MXC_CCM_CGR0, 28, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(i2c3_clk,    2, MXC_CCM_CGR0, 30, NULL, NULL, &perclk_clk);
-
-DEFINE_CLOCK(mpeg4_clk,   0, MXC_CCM_CGR1,  0, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(mstick1_clk, 0, MXC_CCM_CGR1,  2, mstick1_get_rate, NULL, &usb_pll_clk);
-DEFINE_CLOCK(mstick2_clk, 1, MXC_CCM_CGR1,  4, mstick2_get_rate, NULL, &usb_pll_clk);
-DEFINE_CLOCK1(csi_clk,    0, MXC_CCM_CGR1,  6, csi, NULL, &serial_pll_clk);
-DEFINE_CLOCK(rtc_clk,     0, MXC_CCM_CGR1,  8, NULL, NULL, &ckil_clk);
-DEFINE_CLOCK(wdog_clk,    0, MXC_CCM_CGR1, 10, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(pwm_clk,     0, MXC_CCM_CGR1, 12, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(usb_clk2,    0, MXC_CCM_CGR1, 18, usb_get_rate, NULL, &ahb_clk);
-DEFINE_CLOCK(kpp_clk,     0, MXC_CCM_CGR1, 20, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(ipu_clk,     0, MXC_CCM_CGR1, 22, hsp_get_rate, NULL, &mcu_main_clk);
-DEFINE_CLOCK(uart3_clk,   2, MXC_CCM_CGR1, 24, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(uart4_clk,   3, MXC_CCM_CGR1, 26, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(uart5_clk,   4, MXC_CCM_CGR1, 28, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(owire_clk,   0, MXC_CCM_CGR1, 30, NULL, NULL, &perclk_clk);
-
-DEFINE_CLOCK(ssi2_clk,    1, MXC_CCM_CGR2,  0, ssi2_get_rate, NULL, &serial_pll_clk);
-DEFINE_CLOCK(cspi1_clk,   0, MXC_CCM_CGR2,  2, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(cspi2_clk,   1, MXC_CCM_CGR2,  4, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(mbx_clk,     0, MXC_CCM_CGR2,  6, mbx_get_rate, NULL, &ahb_clk);
-DEFINE_CLOCK(emi_clk,     0, MXC_CCM_CGR2,  8, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(rtic_clk,    0, MXC_CCM_CGR2, 10, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK1(firi_clk,   0, MXC_CCM_CGR2, 12, firi, NULL, &usb_pll_clk);
-
-DEFINE_CLOCK(sdma_clk2,   0, NULL,          0, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(usb_clk1,    0, NULL,          0, usb_get_rate, NULL, &usb_pll_clk);
-DEFINE_CLOCK(nfc_clk,     0, NULL,          0, nfc_get_rate, NULL, &ahb_clk);
-DEFINE_CLOCK(scc_clk,     0, NULL,          0, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(ipg_clk,     0, NULL,          0, ipg_get_rate, NULL, &ahb_clk);
-
-#define _REGISTER_CLOCK(d, n, c) \
-       { \
-               .dev_id = d, \
-               .con_id = n, \
-               .clk = &c, \
-       },
-
-static struct clk_lookup lookups[] = {
-       _REGISTER_CLOCK(NULL, "emi", emi_clk)
-       _REGISTER_CLOCK("imx31-cspi.0", NULL, cspi1_clk)
-       _REGISTER_CLOCK("imx31-cspi.1", NULL, cspi2_clk)
-       _REGISTER_CLOCK("imx31-cspi.2", NULL, cspi3_clk)
-       _REGISTER_CLOCK(NULL, "gpt", gpt_clk)
-       _REGISTER_CLOCK(NULL, "pwm", pwm_clk)
-       _REGISTER_CLOCK("imx2-wdt.0", NULL, wdog_clk)
-       _REGISTER_CLOCK(NULL, "rtc", rtc_clk)
-       _REGISTER_CLOCK(NULL, "epit", epit1_clk)
-       _REGISTER_CLOCK(NULL, "epit", epit2_clk)
-       _REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
-       _REGISTER_CLOCK("ipu-core", NULL, ipu_clk)
-       _REGISTER_CLOCK("mx3_sdc_fb", NULL, ipu_clk)
-       _REGISTER_CLOCK(NULL, "kpp", kpp_clk)
-       _REGISTER_CLOCK("mxc-ehci.0", "usb", usb_clk1)
-       _REGISTER_CLOCK("mxc-ehci.0", "usb_ahb", usb_clk2)
-       _REGISTER_CLOCK("mxc-ehci.1", "usb", usb_clk1)
-       _REGISTER_CLOCK("mxc-ehci.1", "usb_ahb", usb_clk2)
-       _REGISTER_CLOCK("mxc-ehci.2", "usb", usb_clk1)
-       _REGISTER_CLOCK("mxc-ehci.2", "usb_ahb", usb_clk2)
-       _REGISTER_CLOCK("fsl-usb2-udc", "usb", usb_clk1)
-       _REGISTER_CLOCK("fsl-usb2-udc", "usb_ahb", usb_clk2)
-       _REGISTER_CLOCK("mx3-camera.0", NULL, csi_clk)
-       /* i.mx31 has the i.mx21 type uart */
-       _REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
-       _REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
-       _REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
-       _REGISTER_CLOCK("imx21-uart.3", NULL, uart4_clk)
-       _REGISTER_CLOCK("imx21-uart.4", NULL, uart5_clk)
-       _REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
-       _REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
-       _REGISTER_CLOCK("imx-i2c.2", NULL, i2c3_clk)
-       _REGISTER_CLOCK("mxc_w1.0", NULL, owire_clk)
-       _REGISTER_CLOCK("mxc-mmc.0", NULL, sdhc1_clk)
-       _REGISTER_CLOCK("mxc-mmc.1", NULL, sdhc2_clk)
-       _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
-       _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
-       _REGISTER_CLOCK(NULL, "firi", firi_clk)
-       _REGISTER_CLOCK("pata_imx", NULL, pata_clk)
-       _REGISTER_CLOCK(NULL, "rtic", rtic_clk)
-       _REGISTER_CLOCK(NULL, "rng", rng_clk)
-       _REGISTER_CLOCK("imx31-sdma", NULL, sdma_clk1)
-       _REGISTER_CLOCK(NULL, "sdma_ipg", sdma_clk2)
-       _REGISTER_CLOCK(NULL, "mstick", mstick1_clk)
-       _REGISTER_CLOCK(NULL, "mstick", mstick2_clk)
-       _REGISTER_CLOCK(NULL, "scc", scc_clk)
-       _REGISTER_CLOCK(NULL, "iim", iim_clk)
-       _REGISTER_CLOCK(NULL, "mpeg4", mpeg4_clk)
-       _REGISTER_CLOCK(NULL, "mbx", mbx_clk)
-};
-
-int __init mx31_clocks_init(unsigned long fref)
-{
-       u32 reg;
-
-       ckih_rate = fref;
-
-       clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
-       /* change the csi_clk parent if necessary */
-       reg = __raw_readl(MXC_CCM_CCMR);
-       if (!(reg & MXC_CCM_CCMR_CSCS))
-               if (clk_set_parent(&csi_clk, &usb_pll_clk))
-                       pr_err("%s: error changing csi_clk parent\n", __func__);
-
-
-       /* Turn off all possible clocks */
-       __raw_writel((3 << 4), MXC_CCM_CGR0);
-       __raw_writel(0, MXC_CCM_CGR1);
-       __raw_writel((3 << 8) | (3 << 14) | (3 << 16)|
-                    1 << 27 | 1 << 28, /* Bit 27 and 28 are not defined for
-                                          MX32, but still required to be set */
-                    MXC_CCM_CGR2);
-
-       /*
-        * Before turning off usb_pll make sure ipg_per_clk is generated
-        * by ipg_clk and not usb_pll.
-        */
-       __raw_writel(__raw_readl(MXC_CCM_CCMR) | (1 << 24), MXC_CCM_CCMR);
-
-       usb_pll_disable(&usb_pll_clk);
-
-       pr_info("Clock input source is %ld\n", clk_get_rate(&ckih_clk));
-
-       clk_enable(&gpt_clk);
-       clk_enable(&emi_clk);
-       clk_enable(&iim_clk);
-       mx31_revision();
-       clk_disable(&iim_clk);
-
-       clk_enable(&serial_pll_clk);
-
-       if (mx31_revision() >= IMX_CHIP_REVISION_2_0) {
-               reg = __raw_readl(MXC_CCM_PMCR1);
-               /* No PLL restart on DVFS switch; enable auto EMI handshake */
-               reg |= MXC_CCM_PMCR1_PLLRDIS | MXC_CCM_PMCR1_EMIRQ_EN;
-               __raw_writel(reg, MXC_CCM_PMCR1);
-       }
-
-       mxc_timer_init(&ipg_clk, MX31_IO_ADDRESS(MX31_GPT1_BASE_ADDR),
-                       MX31_INT_GPT);
-
-       return 0;
-}
diff --git a/arch/arm/mach-imx/clock-imx35.c b/arch/arm/mach-imx/clock-imx35.c
deleted file mode 100644 (file)
index e56c1a8..0000000
+++ /dev/null
@@ -1,536 +0,0 @@
-/*
- * Copyright (C) 2009 by Sascha Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/clkdev.h>
-
-#include <mach/clock.h>
-#include <mach/hardware.h>
-#include <mach/common.h>
-
-#include "crmregs-imx3.h"
-
-#ifdef HAVE_SET_RATE_SUPPORT
-static void calc_dividers(u32 div, u32 *pre, u32 *post, u32 maxpost)
-{
-       u32 min_pre, temp_pre, old_err, err;
-
-       min_pre = (div - 1) / maxpost + 1;
-       old_err = 8;
-
-       for (temp_pre = 8; temp_pre >= min_pre; temp_pre--) {
-               if (div > (temp_pre * maxpost))
-                       break;
-
-               if (div < (temp_pre * temp_pre))
-                       continue;
-
-               err = div % temp_pre;
-
-               if (err == 0) {
-                       *pre = temp_pre;
-                       break;
-               }
-
-               err = temp_pre - err;
-
-               if (err < old_err) {
-                       old_err = err;
-                       *pre = temp_pre;
-               }
-       }
-
-       *post = (div + *pre - 1) / *pre;
-}
-
-/* get the best values for a 3-bit divider combined with a 6-bit divider */
-static void calc_dividers_3_6(u32 div, u32 *pre, u32 *post)
-{
-       if (div >= 512) {
-               *pre = 8;
-               *post = 64;
-       } else if (div >= 64) {
-               calc_dividers(div, pre, post, 64);
-       } else if (div <= 8) {
-               *pre = div;
-               *post = 1;
-       } else {
-               *pre = 1;
-               *post = div;
-       }
-}
-
-/* get the best values for two cascaded 3-bit dividers */
-static void calc_dividers_3_3(u32 div, u32 *pre, u32 *post)
-{
-       if (div >= 64) {
-               *pre = *post = 8;
-       } else if (div > 8) {
-               calc_dividers(div, pre, post, 8);
-       } else {
-               *pre = 1;
-               *post = div;
-       }
-}
-#endif
-
-static unsigned long get_rate_mpll(void)
-{
-       ulong mpctl = __raw_readl(MX35_CCM_MPCTL);
-
-       return mxc_decode_pll(mpctl, 24000000);
-}
-
-static unsigned long get_rate_ppll(void)
-{
-       ulong ppctl = __raw_readl(MX35_CCM_PPCTL);
-
-       return mxc_decode_pll(ppctl, 24000000);
-}
-
-struct arm_ahb_div {
-       unsigned char arm, ahb, sel;
-};
-
-static struct arm_ahb_div clk_consumer[] = {
-       { .arm = 1, .ahb = 4, .sel = 0},
-       { .arm = 1, .ahb = 3, .sel = 1},
-       { .arm = 2, .ahb = 2, .sel = 0},
-       { .arm = 0, .ahb = 0, .sel = 0},
-       { .arm = 0, .ahb = 0, .sel = 0},
-       { .arm = 0, .ahb = 0, .sel = 0},
-       { .arm = 4, .ahb = 1, .sel = 0},
-       { .arm = 1, .ahb = 5, .sel = 0},
-       { .arm = 1, .ahb = 8, .sel = 0},
-       { .arm = 1, .ahb = 6, .sel = 1},
-       { .arm = 2, .ahb = 4, .sel = 0},
-       { .arm = 0, .ahb = 0, .sel = 0},
-       { .arm = 0, .ahb = 0, .sel = 0},
-       { .arm = 0, .ahb = 0, .sel = 0},
-       { .arm = 4, .ahb = 2, .sel = 0},
-       { .arm = 0, .ahb = 0, .sel = 0},
-};
-
-static unsigned long get_rate_arm(void)
-{
-       unsigned long pdr0 = __raw_readl(MXC_CCM_PDR0);
-       struct arm_ahb_div *aad;
-       unsigned long fref = get_rate_mpll();
-
-       aad = &clk_consumer[(pdr0 >> 16) & 0xf];
-       if (aad->sel)
-               fref = fref * 3 / 4;
-
-       return fref / aad->arm;
-}
-
-static unsigned long get_rate_ahb(struct clk *clk)
-{
-       unsigned long pdr0 = __raw_readl(MXC_CCM_PDR0);
-       struct arm_ahb_div *aad;
-       unsigned long fref = get_rate_arm();
-
-       aad = &clk_consumer[(pdr0 >> 16) & 0xf];
-
-       return fref / aad->ahb;
-}
-
-static unsigned long get_rate_ipg(struct clk *clk)
-{
-       return get_rate_ahb(NULL) >> 1;
-}
-
-static unsigned long get_rate_uart(struct clk *clk)
-{
-       unsigned long pdr3 = __raw_readl(MX35_CCM_PDR3);
-       unsigned long pdr4 = __raw_readl(MX35_CCM_PDR4);
-       unsigned long div = ((pdr4 >> 10) & 0x3f) + 1;
-
-       if (pdr3 & (1 << 14))
-               return get_rate_arm() / div;
-       else
-               return get_rate_ppll() / div;
-}
-
-static unsigned long get_rate_sdhc(struct clk *clk)
-{
-       unsigned long pdr3 = __raw_readl(MX35_CCM_PDR3);
-       unsigned long div, rate;
-
-       if (pdr3 & (1 << 6))
-               rate = get_rate_arm();
-       else
-               rate = get_rate_ppll();
-
-       switch (clk->id) {
-       default:
-       case 0:
-               div = pdr3 & 0x3f;
-               break;
-       case 1:
-               div = (pdr3 >> 8) & 0x3f;
-               break;
-       case 2:
-               div = (pdr3 >> 16) & 0x3f;
-               break;
-       }
-
-       return rate / (div + 1);
-}
-
-static unsigned long get_rate_mshc(struct clk *clk)
-{
-       unsigned long pdr1 = __raw_readl(MXC_CCM_PDR1);
-       unsigned long div1, div2, rate;
-
-       if (pdr1 & (1 << 7))
-               rate = get_rate_arm();
-       else
-               rate = get_rate_ppll();
-
-       div1 = (pdr1 >> 29) & 0x7;
-       div2 = (pdr1 >> 22) & 0x3f;
-
-       return rate / ((div1 + 1) * (div2 + 1));
-}
-
-static unsigned long get_rate_ssi(struct clk *clk)
-{
-       unsigned long pdr2 = __raw_readl(MX35_CCM_PDR2);
-       unsigned long div1, div2, rate;
-
-       if (pdr2 & (1 << 6))
-               rate = get_rate_arm();
-       else
-               rate = get_rate_ppll();
-
-       switch (clk->id) {
-       default:
-       case 0:
-               div1 = pdr2 & 0x3f;
-               div2 = (pdr2 >> 24) & 0x7;
-               break;
-       case 1:
-               div1 = (pdr2 >> 8) & 0x3f;
-               div2 = (pdr2 >> 27) & 0x7;
-               break;
-       }
-
-       return rate / ((div1 + 1) * (div2 + 1));
-}
-
-static unsigned long get_rate_csi(struct clk *clk)
-{
-       unsigned long pdr2 = __raw_readl(MX35_CCM_PDR2);
-       unsigned long rate;
-
-       if (pdr2 & (1 << 7))
-               rate = get_rate_arm();
-       else
-               rate = get_rate_ppll();
-
-       return rate / (((pdr2 >> 16) & 0x3f) + 1);
-}
-
-static unsigned long get_rate_otg(struct clk *clk)
-{
-       unsigned long pdr4 = __raw_readl(MX35_CCM_PDR4);
-       unsigned long rate;
-
-       if (pdr4 & (1 << 9))
-               rate = get_rate_arm();
-       else
-               rate = get_rate_ppll();
-
-       return rate / (((pdr4 >> 22) & 0x3f) + 1);
-}
-
-static unsigned long get_rate_ipg_per(struct clk *clk)
-{
-       unsigned long pdr0 = __raw_readl(MXC_CCM_PDR0);
-       unsigned long pdr4 = __raw_readl(MX35_CCM_PDR4);
-       unsigned long div;
-
-       if (pdr0 & (1 << 26)) {
-               div = (pdr4 >> 16) & 0x3f;
-               return get_rate_arm() / (div + 1);
-       } else {
-               div = (pdr0 >> 12) & 0x7;
-               return get_rate_ahb(NULL) / (div + 1);
-       }
-}
-
-static unsigned long get_rate_hsp(struct clk *clk)
-{
-       unsigned long hsp_podf = (__raw_readl(MXC_CCM_PDR0) >> 20) & 0x03;
-       unsigned long fref = get_rate_mpll();
-
-       if (fref > 400 * 1000 * 1000) {
-               switch (hsp_podf) {
-               case 0:
-                       return fref >> 2;
-               case 1:
-                       return fref >> 3;
-               case 2:
-                       return fref / 3;
-               }
-       } else {
-               switch (hsp_podf) {
-               case 0:
-               case 2:
-                       return fref / 3;
-               case 1:
-                       return fref / 6;
-               }
-       }
-
-       return 0;
-}
-
-static int clk_cgr_enable(struct clk *clk)
-{
-       u32 reg;
-
-       reg = __raw_readl(clk->enable_reg);
-       reg |= 3 << clk->enable_shift;
-       __raw_writel(reg, clk->enable_reg);
-
-       return 0;
-}
-
-static void clk_cgr_disable(struct clk *clk)
-{
-       u32 reg;
-
-       reg = __raw_readl(clk->enable_reg);
-       reg &= ~(3 << clk->enable_shift);
-       __raw_writel(reg, clk->enable_reg);
-}
-
-#define DEFINE_CLOCK(name, i, er, es, gr, sr)          \
-       static struct clk name = {                      \
-               .id             = i,                    \
-               .enable_reg     = er,                   \
-               .enable_shift   = es,                   \
-               .get_rate       = gr,                   \
-               .set_rate       = sr,                   \
-               .enable         = clk_cgr_enable,       \
-               .disable        = clk_cgr_disable,      \
-       }
-
-DEFINE_CLOCK(asrc_clk,   0, MX35_CCM_CGR0,  0, NULL, NULL);
-DEFINE_CLOCK(pata_clk,    0, MX35_CCM_CGR0,  2, get_rate_ipg, NULL);
-/* DEFINE_CLOCK(audmux_clk, 0, MX35_CCM_CGR0,  4, NULL, NULL); */
-DEFINE_CLOCK(can1_clk,   0, MX35_CCM_CGR0,  6, get_rate_ipg, NULL);
-DEFINE_CLOCK(can2_clk,   1, MX35_CCM_CGR0,  8, get_rate_ipg, NULL);
-DEFINE_CLOCK(cspi1_clk,  0, MX35_CCM_CGR0, 10, get_rate_ipg, NULL);
-DEFINE_CLOCK(cspi2_clk,  1, MX35_CCM_CGR0, 12, get_rate_ipg, NULL);
-DEFINE_CLOCK(ect_clk,    0, MX35_CCM_CGR0, 14, get_rate_ipg, NULL);
-DEFINE_CLOCK(edio_clk,   0, MX35_CCM_CGR0, 16, NULL, NULL);
-DEFINE_CLOCK(emi_clk,    0, MX35_CCM_CGR0, 18, get_rate_ipg, NULL);
-DEFINE_CLOCK(epit1_clk,  0, MX35_CCM_CGR0, 20, get_rate_ipg, NULL);
-DEFINE_CLOCK(epit2_clk,  1, MX35_CCM_CGR0, 22, get_rate_ipg, NULL);
-DEFINE_CLOCK(esai_clk,   0, MX35_CCM_CGR0, 24, NULL, NULL);
-DEFINE_CLOCK(esdhc1_clk, 0, MX35_CCM_CGR0, 26, get_rate_sdhc, NULL);
-DEFINE_CLOCK(esdhc2_clk, 1, MX35_CCM_CGR0, 28, get_rate_sdhc, NULL);
-DEFINE_CLOCK(esdhc3_clk, 2, MX35_CCM_CGR0, 30, get_rate_sdhc, NULL);
-
-DEFINE_CLOCK(fec_clk,    0, MX35_CCM_CGR1,  0, get_rate_ipg, NULL);
-DEFINE_CLOCK(gpio1_clk,  0, MX35_CCM_CGR1,  2, NULL, NULL);
-DEFINE_CLOCK(gpio2_clk,  1, MX35_CCM_CGR1,  4, NULL, NULL);
-DEFINE_CLOCK(gpio3_clk,  2, MX35_CCM_CGR1,  6, NULL, NULL);
-DEFINE_CLOCK(gpt_clk,    0, MX35_CCM_CGR1,  8, get_rate_ipg, NULL);
-DEFINE_CLOCK(i2c1_clk,   0, MX35_CCM_CGR1, 10, get_rate_ipg_per, NULL);
-DEFINE_CLOCK(i2c2_clk,   1, MX35_CCM_CGR1, 12, get_rate_ipg_per, NULL);
-DEFINE_CLOCK(i2c3_clk,   2, MX35_CCM_CGR1, 14, get_rate_ipg_per, NULL);
-DEFINE_CLOCK(iomuxc_clk, 0, MX35_CCM_CGR1, 16, NULL, NULL);
-DEFINE_CLOCK(ipu_clk,    0, MX35_CCM_CGR1, 18, get_rate_hsp, NULL);
-DEFINE_CLOCK(kpp_clk,    0, MX35_CCM_CGR1, 20, get_rate_ipg, NULL);
-DEFINE_CLOCK(mlb_clk,    0, MX35_CCM_CGR1, 22, get_rate_ahb, NULL);
-DEFINE_CLOCK(mshc_clk,   0, MX35_CCM_CGR1, 24, get_rate_mshc, NULL);
-DEFINE_CLOCK(owire_clk,  0, MX35_CCM_CGR1, 26, get_rate_ipg_per, NULL);
-DEFINE_CLOCK(pwm_clk,    0, MX35_CCM_CGR1, 28, get_rate_ipg_per, NULL);
-DEFINE_CLOCK(rngc_clk,   0, MX35_CCM_CGR1, 30, get_rate_ipg, NULL);
-
-DEFINE_CLOCK(rtc_clk,    0, MX35_CCM_CGR2,  0, get_rate_ipg, NULL);
-DEFINE_CLOCK(rtic_clk,   0, MX35_CCM_CGR2,  2, get_rate_ahb, NULL);
-DEFINE_CLOCK(scc_clk,    0, MX35_CCM_CGR2,  4, get_rate_ipg, NULL);
-DEFINE_CLOCK(sdma_clk,   0, MX35_CCM_CGR2,  6, NULL, NULL);
-DEFINE_CLOCK(spba_clk,   0, MX35_CCM_CGR2,  8, get_rate_ipg, NULL);
-DEFINE_CLOCK(spdif_clk,  0, MX35_CCM_CGR2, 10, NULL, NULL);
-DEFINE_CLOCK(ssi1_clk,   0, MX35_CCM_CGR2, 12, get_rate_ssi, NULL);
-DEFINE_CLOCK(ssi2_clk,   1, MX35_CCM_CGR2, 14, get_rate_ssi, NULL);
-DEFINE_CLOCK(uart1_clk,  0, MX35_CCM_CGR2, 16, get_rate_uart, NULL);
-DEFINE_CLOCK(uart2_clk,  1, MX35_CCM_CGR2, 18, get_rate_uart, NULL);
-DEFINE_CLOCK(uart3_clk,  2, MX35_CCM_CGR2, 20, get_rate_uart, NULL);
-DEFINE_CLOCK(usbotg_clk, 0, MX35_CCM_CGR2, 22, get_rate_otg, NULL);
-DEFINE_CLOCK(wdog_clk,   0, MX35_CCM_CGR2, 24, NULL, NULL);
-DEFINE_CLOCK(max_clk,    0, MX35_CCM_CGR2, 26, NULL, NULL);
-DEFINE_CLOCK(audmux_clk, 0, MX35_CCM_CGR2, 30, NULL, NULL);
-
-DEFINE_CLOCK(csi_clk,    0, MX35_CCM_CGR3,  0, get_rate_csi, NULL);
-DEFINE_CLOCK(iim_clk,    0, MX35_CCM_CGR3,  2, NULL, NULL);
-DEFINE_CLOCK(gpu2d_clk,  0, MX35_CCM_CGR3,  4, NULL, NULL);
-
-DEFINE_CLOCK(usbahb_clk, 0, 0,         0, get_rate_ahb, NULL);
-
-static int clk_dummy_enable(struct clk *clk)
-{
-       return 0;
-}
-
-static void clk_dummy_disable(struct clk *clk)
-{
-}
-
-static unsigned long get_rate_nfc(struct clk *clk)
-{
-       unsigned long div1;
-
-       div1 = (__raw_readl(MX35_CCM_PDR4) >> 28) + 1;
-
-       return get_rate_ahb(NULL) / div1;
-}
-
-/* NAND Controller: It seems it can't be disabled */
-static struct clk nfc_clk = {
-       .id             = 0,
-       .enable_reg     = 0,
-       .enable_shift   = 0,
-       .get_rate       = get_rate_nfc,
-       .set_rate       = NULL, /* set_rate_nfc, */
-       .enable         = clk_dummy_enable,
-       .disable        = clk_dummy_disable
-};
-
-#define _REGISTER_CLOCK(d, n, c)       \
-       {                               \
-               .dev_id = d,            \
-               .con_id = n,            \
-               .clk = &c,              \
-       },
-
-static struct clk_lookup lookups[] = {
-       _REGISTER_CLOCK(NULL, "asrc", asrc_clk)
-       _REGISTER_CLOCK("pata_imx", NULL, pata_clk)
-       _REGISTER_CLOCK("flexcan.0", NULL, can1_clk)
-       _REGISTER_CLOCK("flexcan.1", NULL, can2_clk)
-       _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi1_clk)
-       _REGISTER_CLOCK("imx35-cspi.1", NULL, cspi2_clk)
-       _REGISTER_CLOCK(NULL, "ect", ect_clk)
-       _REGISTER_CLOCK(NULL, "edio", edio_clk)
-       _REGISTER_CLOCK(NULL, "emi", emi_clk)
-       _REGISTER_CLOCK("imx-epit.0", NULL, epit1_clk)
-       _REGISTER_CLOCK("imx-epit.1", NULL, epit2_clk)
-       _REGISTER_CLOCK(NULL, "esai", esai_clk)
-       _REGISTER_CLOCK("sdhci-esdhc-imx35.0", NULL, esdhc1_clk)
-       _REGISTER_CLOCK("sdhci-esdhc-imx35.1", NULL, esdhc2_clk)
-       _REGISTER_CLOCK("sdhci-esdhc-imx35.2", NULL, esdhc3_clk)
-       /* i.mx35 has the i.mx27 type fec */
-       _REGISTER_CLOCK("imx27-fec.0", NULL, fec_clk)
-       _REGISTER_CLOCK(NULL, "gpio", gpio1_clk)
-       _REGISTER_CLOCK(NULL, "gpio", gpio2_clk)
-       _REGISTER_CLOCK(NULL, "gpio", gpio3_clk)
-       _REGISTER_CLOCK("gpt.0", NULL, gpt_clk)
-       _REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
-       _REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
-       _REGISTER_CLOCK("imx-i2c.2", NULL, i2c3_clk)
-       _REGISTER_CLOCK(NULL, "iomuxc", iomuxc_clk)
-       _REGISTER_CLOCK("ipu-core", NULL, ipu_clk)
-       _REGISTER_CLOCK("mx3_sdc_fb", NULL, ipu_clk)
-       _REGISTER_CLOCK(NULL, "kpp", kpp_clk)
-       _REGISTER_CLOCK(NULL, "mlb", mlb_clk)
-       _REGISTER_CLOCK(NULL, "mshc", mshc_clk)
-       _REGISTER_CLOCK("mxc_w1", NULL, owire_clk)
-       _REGISTER_CLOCK(NULL, "pwm", pwm_clk)
-       _REGISTER_CLOCK(NULL, "rngc", rngc_clk)
-       _REGISTER_CLOCK(NULL, "rtc", rtc_clk)
-       _REGISTER_CLOCK(NULL, "rtic", rtic_clk)
-       _REGISTER_CLOCK(NULL, "scc", scc_clk)
-       _REGISTER_CLOCK("imx35-sdma", NULL, sdma_clk)
-       _REGISTER_CLOCK(NULL, "spba", spba_clk)
-       _REGISTER_CLOCK(NULL, "spdif", spdif_clk)
-       _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
-       _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
-       /* i.mx35 has the i.mx21 type uart */
-       _REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
-       _REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
-       _REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
-       _REGISTER_CLOCK("mxc-ehci.0", "usb", usbotg_clk)
-       _REGISTER_CLOCK("mxc-ehci.1", "usb", usbotg_clk)
-       _REGISTER_CLOCK("mxc-ehci.2", "usb", usbotg_clk)
-       _REGISTER_CLOCK("fsl-usb2-udc", "usb", usbotg_clk)
-       _REGISTER_CLOCK("fsl-usb2-udc", "usb_ahb", usbahb_clk)
-       _REGISTER_CLOCK("imx2-wdt.0", NULL, wdog_clk)
-       _REGISTER_CLOCK(NULL, "max", max_clk)
-       _REGISTER_CLOCK(NULL, "audmux", audmux_clk)
-       _REGISTER_CLOCK("mx3-camera.0", NULL, csi_clk)
-       _REGISTER_CLOCK(NULL, "iim", iim_clk)
-       _REGISTER_CLOCK(NULL, "gpu2d", gpu2d_clk)
-       _REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
-};
-
-int __init mx35_clocks_init()
-{
-       unsigned int cgr2 = 3 << 26;
-
-#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
-       cgr2 |= 3 << 16;
-#endif
-
-       clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
-       /* Turn off all clocks except the ones we need to survive, namely:
-        * EMI, GPIO1/2/3, GPT, IOMUX, MAX and eventually uart
-        */
-       __raw_writel((3 << 18), MX35_CCM_CGR0);
-       __raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16),
-                       MX35_CCM_CGR1);
-       __raw_writel(cgr2, MX35_CCM_CGR2);
-       __raw_writel(0, MX35_CCM_CGR3);
-
-       clk_enable(&iim_clk);
-       imx_print_silicon_rev("i.MX35", mx35_revision());
-       clk_disable(&iim_clk);
-
-       /*
-        * Check if we came up in internal boot mode. If yes, we need some
-        * extra clocks turned on, otherwise the MX35 boot ROM code will
-        * hang after a watchdog reset.
-        */
-       if (!(__raw_readl(MX35_CCM_RCSR) & (3 << 10))) {
-               /* Additionally turn on UART1, SCC, and IIM clocks */
-               clk_enable(&iim_clk);
-               clk_enable(&uart1_clk);
-               clk_enable(&scc_clk);
-       }
-
-#ifdef CONFIG_MXC_USE_EPIT
-       epit_timer_init(&epit1_clk,
-                       MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1);
-#else
-       mxc_timer_init(&gpt_clk,
-                       MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT);
-#endif
-
-       return 0;
-}
diff --git a/arch/arm/mach-imx/clock-imx6q.c b/arch/arm/mach-imx/clock-imx6q.c
deleted file mode 100644 (file)
index 111c328..0000000
+++ /dev/null
@@ -1,2111 +0,0 @@
-/*
- * Copyright 2011 Freescale Semiconductor, Inc.
- * Copyright 2011 Linaro Ltd.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
-
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <asm/div64.h>
-#include <asm/mach/map.h>
-#include <mach/clock.h>
-#include <mach/common.h>
-#include <mach/hardware.h>
-
-#define PLL_BASE               IMX_IO_ADDRESS(MX6Q_ANATOP_BASE_ADDR)
-#define PLL1_SYS               (PLL_BASE + 0x000)
-#define PLL2_BUS               (PLL_BASE + 0x030)
-#define PLL3_USB_OTG           (PLL_BASE + 0x010)
-#define PLL4_AUDIO             (PLL_BASE + 0x070)
-#define PLL5_VIDEO             (PLL_BASE + 0x0a0)
-#define PLL6_MLB               (PLL_BASE + 0x0d0)
-#define PLL7_USB_HOST          (PLL_BASE + 0x020)
-#define PLL8_ENET              (PLL_BASE + 0x0e0)
-#define PFD_480                        (PLL_BASE + 0x0f0)
-#define PFD_528                        (PLL_BASE + 0x100)
-#define PLL_NUM_OFFSET         0x010
-#define PLL_DENOM_OFFSET       0x020
-
-#define PFD0                   7
-#define PFD1                   15
-#define PFD2                   23
-#define PFD3                   31
-#define PFD_FRAC_MASK          0x3f
-
-#define BM_PLL_BYPASS                  (0x1 << 16)
-#define BM_PLL_ENABLE                  (0x1 << 13)
-#define BM_PLL_POWER_DOWN              (0x1 << 12)
-#define BM_PLL_LOCK                    (0x1 << 31)
-#define BP_PLL_SYS_DIV_SELECT          0
-#define BM_PLL_SYS_DIV_SELECT          (0x7f << 0)
-#define BP_PLL_BUS_DIV_SELECT          0
-#define BM_PLL_BUS_DIV_SELECT          (0x1 << 0)
-#define BP_PLL_USB_DIV_SELECT          0
-#define BM_PLL_USB_DIV_SELECT          (0x3 << 0)
-#define BP_PLL_AV_DIV_SELECT           0
-#define BM_PLL_AV_DIV_SELECT           (0x7f << 0)
-#define BP_PLL_ENET_DIV_SELECT         0
-#define BM_PLL_ENET_DIV_SELECT         (0x3 << 0)
-#define BM_PLL_ENET_EN_PCIE            (0x1 << 19)
-#define BM_PLL_ENET_EN_SATA            (0x1 << 20)
-
-#define CCM_BASE       IMX_IO_ADDRESS(MX6Q_CCM_BASE_ADDR)
-#define CCR            (CCM_BASE + 0x00)
-#define CCDR           (CCM_BASE + 0x04)
-#define CSR            (CCM_BASE + 0x08)
-#define CCSR           (CCM_BASE + 0x0c)
-#define CACRR          (CCM_BASE + 0x10)
-#define CBCDR          (CCM_BASE + 0x14)
-#define CBCMR          (CCM_BASE + 0x18)
-#define CSCMR1         (CCM_BASE + 0x1c)
-#define CSCMR2         (CCM_BASE + 0x20)
-#define CSCDR1         (CCM_BASE + 0x24)
-#define CS1CDR         (CCM_BASE + 0x28)
-#define CS2CDR         (CCM_BASE + 0x2c)
-#define CDCDR          (CCM_BASE + 0x30)
-#define CHSCCDR                (CCM_BASE + 0x34)
-#define CSCDR2         (CCM_BASE + 0x38)
-#define CSCDR3         (CCM_BASE + 0x3c)
-#define CSCDR4         (CCM_BASE + 0x40)
-#define CWDR           (CCM_BASE + 0x44)
-#define CDHIPR         (CCM_BASE + 0x48)
-#define CDCR           (CCM_BASE + 0x4c)
-#define CTOR           (CCM_BASE + 0x50)
-#define CLPCR          (CCM_BASE + 0x54)
-#define CISR           (CCM_BASE + 0x58)
-#define CIMR           (CCM_BASE + 0x5c)
-#define CCOSR          (CCM_BASE + 0x60)
-#define CGPR           (CCM_BASE + 0x64)
-#define CCGR0          (CCM_BASE + 0x68)
-#define CCGR1          (CCM_BASE + 0x6c)
-#define CCGR2          (CCM_BASE + 0x70)
-#define CCGR3          (CCM_BASE + 0x74)
-#define CCGR4          (CCM_BASE + 0x78)
-#define CCGR5          (CCM_BASE + 0x7c)
-#define CCGR6          (CCM_BASE + 0x80)
-#define CCGR7          (CCM_BASE + 0x84)
-#define CMEOR          (CCM_BASE + 0x88)
-
-#define CG0            0
-#define CG1            2
-#define CG2            4
-#define CG3            6
-#define CG4            8
-#define CG5            10
-#define CG6            12
-#define CG7            14
-#define CG8            16
-#define CG9            18
-#define CG10           20
-#define CG11           22
-#define CG12           24
-#define CG13           26
-#define CG14           28
-#define CG15           30
-
-#define BM_CCSR_PLL1_SW_SEL            (0x1 << 2)
-#define BM_CCSR_STEP_SEL               (0x1 << 8)
-
-#define BP_CACRR_ARM_PODF              0
-#define BM_CACRR_ARM_PODF              (0x7 << 0)
-
-#define BP_CBCDR_PERIPH2_CLK2_PODF     0
-#define BM_CBCDR_PERIPH2_CLK2_PODF     (0x7 << 0)
-#define BP_CBCDR_MMDC_CH1_AXI_PODF     3
-#define BM_CBCDR_MMDC_CH1_AXI_PODF     (0x7 << 3)
-#define BP_CBCDR_AXI_SEL               6
-#define BM_CBCDR_AXI_SEL               (0x3 << 6)
-#define BP_CBCDR_IPG_PODF              8
-#define BM_CBCDR_IPG_PODF              (0x3 << 8)
-#define BP_CBCDR_AHB_PODF              10
-#define BM_CBCDR_AHB_PODF              (0x7 << 10)
-#define BP_CBCDR_AXI_PODF              16
-#define BM_CBCDR_AXI_PODF              (0x7 << 16)
-#define BP_CBCDR_MMDC_CH0_AXI_PODF     19
-#define BM_CBCDR_MMDC_CH0_AXI_PODF     (0x7 << 19)
-#define BP_CBCDR_PERIPH_CLK_SEL                25
-#define BM_CBCDR_PERIPH_CLK_SEL                (0x1 << 25)
-#define BP_CBCDR_PERIPH2_CLK_SEL       26
-#define BM_CBCDR_PERIPH2_CLK_SEL       (0x1 << 26)
-#define BP_CBCDR_PERIPH_CLK2_PODF      27
-#define BM_CBCDR_PERIPH_CLK2_PODF      (0x7 << 27)
-
-#define BP_CBCMR_GPU2D_AXI_SEL         0
-#define BM_CBCMR_GPU2D_AXI_SEL         (0x1 << 0)
-#define BP_CBCMR_GPU3D_AXI_SEL         1
-#define BM_CBCMR_GPU3D_AXI_SEL         (0x1 << 1)
-#define BP_CBCMR_GPU3D_CORE_SEL                4
-#define BM_CBCMR_GPU3D_CORE_SEL                (0x3 << 4)
-#define BP_CBCMR_GPU3D_SHADER_SEL      8
-#define BM_CBCMR_GPU3D_SHADER_SEL      (0x3 << 8)
-#define BP_CBCMR_PCIE_AXI_SEL          10
-#define BM_CBCMR_PCIE_AXI_SEL          (0x1 << 10)
-#define BP_CBCMR_VDO_AXI_SEL           11
-#define BM_CBCMR_VDO_AXI_SEL           (0x1 << 11)
-#define BP_CBCMR_PERIPH_CLK2_SEL       12
-#define BM_CBCMR_PERIPH_CLK2_SEL       (0x3 << 12)
-#define BP_CBCMR_VPU_AXI_SEL           14
-#define BM_CBCMR_VPU_AXI_SEL           (0x3 << 14)
-#define BP_CBCMR_GPU2D_CORE_SEL                16
-#define BM_CBCMR_GPU2D_CORE_SEL                (0x3 << 16)
-#define BP_CBCMR_PRE_PERIPH_CLK_SEL    18
-#define BM_CBCMR_PRE_PERIPH_CLK_SEL    (0x3 << 18)
-#define BP_CBCMR_PERIPH2_CLK2_SEL      20
-#define BM_CBCMR_PERIPH2_CLK2_SEL      (0x1 << 20)
-#define BP_CBCMR_PRE_PERIPH2_CLK_SEL   21
-#define BM_CBCMR_PRE_PERIPH2_CLK_SEL   (0x3 << 21)
-#define BP_CBCMR_GPU2D_CORE_PODF       23
-#define BM_CBCMR_GPU2D_CORE_PODF       (0x7 << 23)
-#define BP_CBCMR_GPU3D_CORE_PODF       26
-#define BM_CBCMR_GPU3D_CORE_PODF       (0x7 << 26)
-#define BP_CBCMR_GPU3D_SHADER_PODF     29
-#define BM_CBCMR_GPU3D_SHADER_PODF     (0x7 << 29)
-
-#define BP_CSCMR1_PERCLK_PODF          0
-#define BM_CSCMR1_PERCLK_PODF          (0x3f << 0)
-#define BP_CSCMR1_SSI1_SEL             10
-#define BM_CSCMR1_SSI1_SEL             (0x3 << 10)
-#define BP_CSCMR1_SSI2_SEL             12
-#define BM_CSCMR1_SSI2_SEL             (0x3 << 12)
-#define BP_CSCMR1_SSI3_SEL             14
-#define BM_CSCMR1_SSI3_SEL             (0x3 << 14)
-#define BP_CSCMR1_USDHC1_SEL           16
-#define BM_CSCMR1_USDHC1_SEL           (0x1 << 16)
-#define BP_CSCMR1_USDHC2_SEL           17
-#define BM_CSCMR1_USDHC2_SEL           (0x1 << 17)
-#define BP_CSCMR1_USDHC3_SEL           18
-#define BM_CSCMR1_USDHC3_SEL           (0x1 << 18)
-#define BP_CSCMR1_USDHC4_SEL           19
-#define BM_CSCMR1_USDHC4_SEL           (0x1 << 19)
-#define BP_CSCMR1_EMI_PODF             20
-#define BM_CSCMR1_EMI_PODF             (0x7 << 20)
-#define BP_CSCMR1_EMI_SLOW_PODF                23
-#define BM_CSCMR1_EMI_SLOW_PODF                (0x7 << 23)
-#define BP_CSCMR1_EMI_SEL              27
-#define BM_CSCMR1_EMI_SEL              (0x3 << 27)
-#define BP_CSCMR1_EMI_SLOW_SEL         29
-#define BM_CSCMR1_EMI_SLOW_SEL         (0x3 << 29)
-
-#define BP_CSCMR2_CAN_PODF             2
-#define BM_CSCMR2_CAN_PODF             (0x3f << 2)
-#define BM_CSCMR2_LDB_DI0_IPU_DIV      (0x1 << 10)
-#define BM_CSCMR2_LDB_DI1_IPU_DIV      (0x1 << 11)
-#define BP_CSCMR2_ESAI_SEL             19
-#define BM_CSCMR2_ESAI_SEL             (0x3 << 19)
-
-#define BP_CSCDR1_UART_PODF            0
-#define BM_CSCDR1_UART_PODF            (0x3f << 0)
-#define BP_CSCDR1_USDHC1_PODF          11
-#define BM_CSCDR1_USDHC1_PODF          (0x7 << 11)
-#define BP_CSCDR1_USDHC2_PODF          16
-#define BM_CSCDR1_USDHC2_PODF          (0x7 << 16)
-#define BP_CSCDR1_USDHC3_PODF          19
-#define BM_CSCDR1_USDHC3_PODF          (0x7 << 19)
-#define BP_CSCDR1_USDHC4_PODF          22
-#define BM_CSCDR1_USDHC4_PODF          (0x7 << 22)
-#define BP_CSCDR1_VPU_AXI_PODF         25
-#define BM_CSCDR1_VPU_AXI_PODF         (0x7 << 25)
-
-#define BP_CS1CDR_SSI1_PODF            0
-#define BM_CS1CDR_SSI1_PODF            (0x3f << 0)
-#define BP_CS1CDR_SSI1_PRED            6
-#define BM_CS1CDR_SSI1_PRED            (0x7 << 6)
-#define BP_CS1CDR_ESAI_PRED            9
-#define BM_CS1CDR_ESAI_PRED            (0x7 << 9)
-#define BP_CS1CDR_SSI3_PODF            16
-#define BM_CS1CDR_SSI3_PODF            (0x3f << 16)
-#define BP_CS1CDR_SSI3_PRED            22
-#define BM_CS1CDR_SSI3_PRED            (0x7 << 22)
-#define BP_CS1CDR_ESAI_PODF            25
-#define BM_CS1CDR_ESAI_PODF            (0x7 << 25)
-
-#define BP_CS2CDR_SSI2_PODF            0
-#define BM_CS2CDR_SSI2_PODF            (0x3f << 0)
-#define BP_CS2CDR_SSI2_PRED            6
-#define BM_CS2CDR_SSI2_PRED            (0x7 << 6)
-#define BP_CS2CDR_LDB_DI0_SEL          9
-#define BM_CS2CDR_LDB_DI0_SEL          (0x7 << 9)
-#define BP_CS2CDR_LDB_DI1_SEL          12
-#define BM_CS2CDR_LDB_DI1_SEL          (0x7 << 12)
-#define BP_CS2CDR_ENFC_SEL             16
-#define BM_CS2CDR_ENFC_SEL             (0x3 << 16)
-#define BP_CS2CDR_ENFC_PRED            18
-#define BM_CS2CDR_ENFC_PRED            (0x7 << 18)
-#define BP_CS2CDR_ENFC_PODF            21
-#define BM_CS2CDR_ENFC_PODF            (0x3f << 21)
-
-#define BP_CDCDR_ASRC_SERIAL_SEL       7
-#define BM_CDCDR_ASRC_SERIAL_SEL       (0x3 << 7)
-#define BP_CDCDR_ASRC_SERIAL_PODF      9
-#define BM_CDCDR_ASRC_SERIAL_PODF      (0x7 << 9)
-#define BP_CDCDR_ASRC_SERIAL_PRED      12
-#define BM_CDCDR_ASRC_SERIAL_PRED      (0x7 << 12)
-#define BP_CDCDR_SPDIF_SEL             20
-#define BM_CDCDR_SPDIF_SEL             (0x3 << 20)
-#define BP_CDCDR_SPDIF_PODF            22
-#define BM_CDCDR_SPDIF_PODF            (0x7 << 22)
-#define BP_CDCDR_SPDIF_PRED            25
-#define BM_CDCDR_SPDIF_PRED            (0x7 << 25)
-#define BP_CDCDR_HSI_TX_PODF           29
-#define BM_CDCDR_HSI_TX_PODF           (0x7 << 29)
-#define BP_CDCDR_HSI_TX_SEL            28
-#define BM_CDCDR_HSI_TX_SEL            (0x1 << 28)
-
-#define BP_CHSCCDR_IPU1_DI0_SEL                0
-#define BM_CHSCCDR_IPU1_DI0_SEL                (0x7 << 0)
-#define BP_CHSCCDR_IPU1_DI0_PRE_PODF   3
-#define BM_CHSCCDR_IPU1_DI0_PRE_PODF   (0x7 << 3)
-#define BP_CHSCCDR_IPU1_DI0_PRE_SEL    6
-#define BM_CHSCCDR_IPU1_DI0_PRE_SEL    (0x7 << 6)
-#define BP_CHSCCDR_IPU1_DI1_SEL                9
-#define BM_CHSCCDR_IPU1_DI1_SEL                (0x7 << 9)
-#define BP_CHSCCDR_IPU1_DI1_PRE_PODF   12
-#define BM_CHSCCDR_IPU1_DI1_PRE_PODF   (0x7 << 12)
-#define BP_CHSCCDR_IPU1_DI1_PRE_SEL    15
-#define BM_CHSCCDR_IPU1_DI1_PRE_SEL    (0x7 << 15)
-
-#define BP_CSCDR2_IPU2_DI0_SEL         0
-#define BM_CSCDR2_IPU2_DI0_SEL         (0x7)
-#define BP_CSCDR2_IPU2_DI0_PRE_PODF    3
-#define BM_CSCDR2_IPU2_DI0_PRE_PODF    (0x7 << 3)
-#define BP_CSCDR2_IPU2_DI0_PRE_SEL     6
-#define BM_CSCDR2_IPU2_DI0_PRE_SEL     (0x7 << 6)
-#define BP_CSCDR2_IPU2_DI1_SEL         9
-#define BM_CSCDR2_IPU2_DI1_SEL         (0x7 << 9)
-#define BP_CSCDR2_IPU2_DI1_PRE_PODF    12
-#define BM_CSCDR2_IPU2_DI1_PRE_PODF    (0x7 << 12)
-#define BP_CSCDR2_IPU2_DI1_PRE_SEL     15
-#define BM_CSCDR2_IPU2_DI1_PRE_SEL     (0x7 << 15)
-#define BP_CSCDR2_ECSPI_CLK_PODF       19
-#define BM_CSCDR2_ECSPI_CLK_PODF       (0x3f << 19)
-
-#define BP_CSCDR3_IPU1_HSP_SEL         9
-#define BM_CSCDR3_IPU1_HSP_SEL         (0x3 << 9)
-#define BP_CSCDR3_IPU1_HSP_PODF                11
-#define BM_CSCDR3_IPU1_HSP_PODF                (0x7 << 11)
-#define BP_CSCDR3_IPU2_HSP_SEL         14
-#define BM_CSCDR3_IPU2_HSP_SEL         (0x3 << 14)
-#define BP_CSCDR3_IPU2_HSP_PODF                16
-#define BM_CSCDR3_IPU2_HSP_PODF                (0x7 << 16)
-
-#define BM_CDHIPR_AXI_PODF_BUSY                (0x1 << 0)
-#define BM_CDHIPR_AHB_PODF_BUSY                (0x1 << 1)
-#define BM_CDHIPR_MMDC_CH1_PODF_BUSY   (0x1 << 2)
-#define BM_CDHIPR_PERIPH2_SEL_BUSY     (0x1 << 3)
-#define BM_CDHIPR_MMDC_CH0_PODF_BUSY   (0x1 << 4)
-#define BM_CDHIPR_PERIPH_SEL_BUSY      (0x1 << 5)
-#define BM_CDHIPR_ARM_PODF_BUSY                (0x1 << 16)
-
-#define BP_CLPCR_LPM                   0
-#define BM_CLPCR_LPM                   (0x3 << 0)
-#define BM_CLPCR_BYPASS_PMIC_READY     (0x1 << 2)
-#define BM_CLPCR_ARM_CLK_DIS_ON_LPM    (0x1 << 5)
-#define BM_CLPCR_SBYOS                 (0x1 << 6)
-#define BM_CLPCR_DIS_REF_OSC           (0x1 << 7)
-#define BM_CLPCR_VSTBY                 (0x1 << 8)
-#define BP_CLPCR_STBY_COUNT            9
-#define BM_CLPCR_STBY_COUNT            (0x3 << 9)
-#define BM_CLPCR_COSC_PWRDOWN          (0x1 << 11)
-#define BM_CLPCR_WB_PER_AT_LPM         (0x1 << 16)
-#define BM_CLPCR_WB_CORE_AT_LPM                (0x1 << 17)
-#define BM_CLPCR_BYP_MMDC_CH0_LPM_HS   (0x1 << 19)
-#define BM_CLPCR_BYP_MMDC_CH1_LPM_HS   (0x1 << 21)
-#define BM_CLPCR_MASK_CORE0_WFI                (0x1 << 22)
-#define BM_CLPCR_MASK_CORE1_WFI                (0x1 << 23)
-#define BM_CLPCR_MASK_CORE2_WFI                (0x1 << 24)
-#define BM_CLPCR_MASK_CORE3_WFI                (0x1 << 25)
-#define BM_CLPCR_MASK_SCU_IDLE         (0x1 << 26)
-#define BM_CLPCR_MASK_L2CC_IDLE                (0x1 << 27)
-
-#define BP_CCOSR_CKO1_EN               7
-#define BP_CCOSR_CKO1_PODF             4
-#define BM_CCOSR_CKO1_PODF             (0x7 << 4)
-#define BP_CCOSR_CKO1_SEL              0
-#define BM_CCOSR_CKO1_SEL              (0xf << 0)
-
-#define FREQ_480M      480000000
-#define FREQ_528M      528000000
-#define FREQ_594M      594000000
-#define FREQ_650M      650000000
-#define FREQ_1300M     1300000000
-
-static struct clk pll1_sys;
-static struct clk pll2_bus;
-static struct clk pll3_usb_otg;
-static struct clk pll4_audio;
-static struct clk pll5_video;
-static struct clk pll6_mlb;
-static struct clk pll7_usb_host;
-static struct clk pll8_enet;
-static struct clk apbh_dma_clk;
-static struct clk arm_clk;
-static struct clk ipg_clk;
-static struct clk ahb_clk;
-static struct clk axi_clk;
-static struct clk mmdc_ch0_axi_clk;
-static struct clk mmdc_ch1_axi_clk;
-static struct clk periph_clk;
-static struct clk periph_pre_clk;
-static struct clk periph_clk2_clk;
-static struct clk periph2_clk;
-static struct clk periph2_pre_clk;
-static struct clk periph2_clk2_clk;
-static struct clk gpu2d_core_clk;
-static struct clk gpu3d_core_clk;
-static struct clk gpu3d_shader_clk;
-static struct clk ipg_perclk;
-static struct clk emi_clk;
-static struct clk emi_slow_clk;
-static struct clk can1_clk;
-static struct clk uart_clk;
-static struct clk usdhc1_clk;
-static struct clk usdhc2_clk;
-static struct clk usdhc3_clk;
-static struct clk usdhc4_clk;
-static struct clk vpu_clk;
-static struct clk hsi_tx_clk;
-static struct clk ipu1_di0_pre_clk;
-static struct clk ipu1_di1_pre_clk;
-static struct clk ipu2_di0_pre_clk;
-static struct clk ipu2_di1_pre_clk;
-static struct clk ipu1_clk;
-static struct clk ipu2_clk;
-static struct clk ssi1_clk;
-static struct clk ssi3_clk;
-static struct clk esai_clk;
-static struct clk ssi2_clk;
-static struct clk spdif_clk;
-static struct clk asrc_serial_clk;
-static struct clk gpu2d_axi_clk;
-static struct clk gpu3d_axi_clk;
-static struct clk pcie_clk;
-static struct clk vdo_axi_clk;
-static struct clk ldb_di0_clk;
-static struct clk ldb_di1_clk;
-static struct clk ipu1_di0_clk;
-static struct clk ipu1_di1_clk;
-static struct clk ipu2_di0_clk;
-static struct clk ipu2_di1_clk;
-static struct clk enfc_clk;
-static struct clk cko1_clk;
-static struct clk dummy_clk = {};
-
-static unsigned long external_high_reference;
-static unsigned long external_low_reference;
-static unsigned long oscillator_reference;
-
-static unsigned long get_oscillator_reference_clock_rate(struct clk *clk)
-{
-       return oscillator_reference;
-}
-
-static unsigned long get_high_reference_clock_rate(struct clk *clk)
-{
-       return external_high_reference;
-}
-
-static unsigned long get_low_reference_clock_rate(struct clk *clk)
-{
-       return external_low_reference;
-}
-
-static struct clk ckil_clk = {
-       .get_rate = get_low_reference_clock_rate,
-};
-
-static struct clk ckih_clk = {
-       .get_rate = get_high_reference_clock_rate,
-};
-
-static struct clk osc_clk = {
-       .get_rate = get_oscillator_reference_clock_rate,
-};
-
-static inline void __iomem *pll_get_reg_addr(struct clk *pll)
-{
-       if (pll == &pll1_sys)
-               return PLL1_SYS;
-       else if (pll == &pll2_bus)
-               return PLL2_BUS;
-       else if (pll == &pll3_usb_otg)
-               return PLL3_USB_OTG;
-       else if (pll == &pll4_audio)
-               return PLL4_AUDIO;
-       else if (pll == &pll5_video)
-               return PLL5_VIDEO;
-       else if (pll == &pll6_mlb)
-               return PLL6_MLB;
-       else if (pll == &pll7_usb_host)
-               return PLL7_USB_HOST;
-       else if (pll == &pll8_enet)
-               return PLL8_ENET;
-       else
-               BUG();
-
-       return NULL;
-}
-
-static int pll_enable(struct clk *clk)
-{
-       int timeout = 0x100000;
-       void __iomem *reg;
-       u32 val;
-
-       reg = pll_get_reg_addr(clk);
-       val = readl_relaxed(reg);
-       val &= ~BM_PLL_BYPASS;
-       val &= ~BM_PLL_POWER_DOWN;
-       /* 480MHz PLLs have the opposite definition for power bit */
-       if (clk == &pll3_usb_otg || clk == &pll7_usb_host)
-               val |= BM_PLL_POWER_DOWN;
-       writel_relaxed(val, reg);
-
-       /* Wait for PLL to lock */
-       while (!(readl_relaxed(reg) & BM_PLL_LOCK) && --timeout)
-               cpu_relax();
-
-       if (unlikely(!timeout))
-               return -EBUSY;
-
-       /* Enable the PLL output now */
-       val = readl_relaxed(reg);
-       val |= BM_PLL_ENABLE;
-       writel_relaxed(val, reg);
-
-       return 0;
-}
-
-static void pll_disable(struct clk *clk)
-{
-       void __iomem *reg;
-       u32 val;
-
-       reg = pll_get_reg_addr(clk);
-       val = readl_relaxed(reg);
-       val &= ~BM_PLL_ENABLE;
-       val |= BM_PLL_BYPASS;
-       val |= BM_PLL_POWER_DOWN;
-       if (clk == &pll3_usb_otg || clk == &pll7_usb_host)
-               val &= ~BM_PLL_POWER_DOWN;
-       writel_relaxed(val, reg);
-}
-
-static unsigned long pll1_sys_get_rate(struct clk *clk)
-{
-       u32 div = (readl_relaxed(PLL1_SYS) & BM_PLL_SYS_DIV_SELECT) >>
-                 BP_PLL_SYS_DIV_SELECT;
-
-       return clk_get_rate(clk->parent) * div / 2;
-}
-
-static int pll1_sys_set_rate(struct clk *clk, unsigned long rate)
-{
-       u32 val, div;
-
-       if (rate < FREQ_650M || rate > FREQ_1300M)
-               return -EINVAL;
-
-       div = rate * 2 / clk_get_rate(clk->parent);
-       val = readl_relaxed(PLL1_SYS);
-       val &= ~BM_PLL_SYS_DIV_SELECT;
-       val |= div << BP_PLL_SYS_DIV_SELECT;
-       writel_relaxed(val, PLL1_SYS);
-
-       return 0;
-}
-
-static unsigned long pll8_enet_get_rate(struct clk *clk)
-{
-       u32 div = (readl_relaxed(PLL8_ENET) & BM_PLL_ENET_DIV_SELECT) >>
-                 BP_PLL_ENET_DIV_SELECT;
-
-       switch (div) {
-       case 0:
-               return 25000000;
-       case 1:
-               return 50000000;
-       case 2:
-               return 100000000;
-       case 3:
-               return 125000000;
-       }
-
-       return 0;
-}
-
-static int pll8_enet_set_rate(struct clk *clk, unsigned long rate)
-{
-       u32 val, div;
-
-       switch (rate) {
-       case 25000000:
-               div = 0;
-               break;
-       case 50000000:
-               div = 1;
-               break;
-       case 100000000:
-               div = 2;
-               break;
-       case 125000000:
-               div = 3;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       val = readl_relaxed(PLL8_ENET);
-       val &= ~BM_PLL_ENET_DIV_SELECT;
-       val |= div << BP_PLL_ENET_DIV_SELECT;
-       writel_relaxed(val, PLL8_ENET);
-
-       return 0;
-}
-
-static unsigned long pll_av_get_rate(struct clk *clk)
-{
-       void __iomem *reg = (clk == &pll4_audio) ? PLL4_AUDIO : PLL5_VIDEO;
-       unsigned long parent_rate = clk_get_rate(clk->parent);
-       u32 mfn = readl_relaxed(reg + PLL_NUM_OFFSET);
-       u32 mfd = readl_relaxed(reg + PLL_DENOM_OFFSET);
-       u32 div = (readl_relaxed(reg) & BM_PLL_AV_DIV_SELECT) >>
-                 BP_PLL_AV_DIV_SELECT;
-
-       return (parent_rate * div) + ((parent_rate / mfd) * mfn);
-}
-
-static int pll_av_set_rate(struct clk *clk, unsigned long rate)
-{
-       void __iomem *reg = (clk == &pll4_audio) ? PLL4_AUDIO : PLL5_VIDEO;
-       unsigned int parent_rate = clk_get_rate(clk->parent);
-       u32 val, div;
-       u32 mfn, mfd = 1000000;
-       s64 temp64;
-
-       if (rate < FREQ_650M || rate > FREQ_1300M)
-               return -EINVAL;
-
-       div = rate / parent_rate;
-       temp64 = (u64) (rate - div * parent_rate);
-       temp64 *= mfd;
-       do_div(temp64, parent_rate);
-       mfn = temp64;
-
-       val = readl_relaxed(reg);
-       val &= ~BM_PLL_AV_DIV_SELECT;
-       val |= div << BP_PLL_AV_DIV_SELECT;
-       writel_relaxed(val, reg);
-       writel_relaxed(mfn, reg + PLL_NUM_OFFSET);
-       writel_relaxed(mfd, reg + PLL_DENOM_OFFSET);
-
-       return 0;
-}
-
-static void __iomem *pll_get_div_reg_bit(struct clk *clk, u32 *bp, u32 *bm)
-{
-       void __iomem *reg;
-
-       if (clk == &pll2_bus) {
-               reg = PLL2_BUS;
-               *bp = BP_PLL_BUS_DIV_SELECT;
-               *bm = BM_PLL_BUS_DIV_SELECT;
-       } else if (clk == &pll3_usb_otg) {
-               reg = PLL3_USB_OTG;
-               *bp = BP_PLL_USB_DIV_SELECT;
-               *bm = BM_PLL_USB_DIV_SELECT;
-       } else if (clk == &pll7_usb_host) {
-               reg = PLL7_USB_HOST;
-               *bp = BP_PLL_USB_DIV_SELECT;
-               *bm = BM_PLL_USB_DIV_SELECT;
-       } else {
-               BUG();
-       }
-
-       return reg;
-}
-
-static unsigned long pll_get_rate(struct clk *clk)
-{
-       void __iomem *reg;
-       u32 div, bp, bm;
-
-       reg = pll_get_div_reg_bit(clk, &bp, &bm);
-       div = (readl_relaxed(reg) & bm) >> bp;
-
-       return (div == 1) ? clk_get_rate(clk->parent) * 22 :
-                           clk_get_rate(clk->parent) * 20;
-}
-
-static int pll_set_rate(struct clk *clk, unsigned long rate)
-{
-       void __iomem *reg;
-       u32 val, div, bp, bm;
-
-       if (rate == FREQ_528M)
-               div = 1;
-       else if (rate == FREQ_480M)
-               div = 0;
-       else
-               return -EINVAL;
-
-       reg = pll_get_div_reg_bit(clk, &bp, &bm);
-       val = readl_relaxed(reg);
-       val &= ~bm;
-       val |= div << bp;
-       writel_relaxed(val, reg);
-
-       return 0;
-}
-
-#define pll2_bus_get_rate      pll_get_rate
-#define pll2_bus_set_rate      pll_set_rate
-#define pll3_usb_otg_get_rate  pll_get_rate
-#define pll3_usb_otg_set_rate  pll_set_rate
-#define pll7_usb_host_get_rate pll_get_rate
-#define pll7_usb_host_set_rate pll_set_rate
-#define pll4_audio_get_rate    pll_av_get_rate
-#define pll4_audio_set_rate    pll_av_set_rate
-#define pll5_video_get_rate    pll_av_get_rate
-#define pll5_video_set_rate    pll_av_set_rate
-#define pll6_mlb_get_rate      NULL
-#define pll6_mlb_set_rate      NULL
-
-#define DEF_PLL(name)                                  \
-       static struct clk name = {                      \
-               .enable         = pll_enable,           \
-               .disable        = pll_disable,          \
-               .get_rate       = name##_get_rate,      \
-               .set_rate       = name##_set_rate,      \
-               .parent         = &osc_clk,             \
-       }
-
-DEF_PLL(pll1_sys);
-DEF_PLL(pll2_bus);
-DEF_PLL(pll3_usb_otg);
-DEF_PLL(pll4_audio);
-DEF_PLL(pll5_video);
-DEF_PLL(pll6_mlb);
-DEF_PLL(pll7_usb_host);
-DEF_PLL(pll8_enet);
-
-static unsigned long pfd_get_rate(struct clk *clk)
-{
-       u64 tmp = (u64) clk_get_rate(clk->parent) * 18;
-       u32 frac, bp_frac;
-
-       if (apbh_dma_clk.usecount == 0)
-               apbh_dma_clk.enable(&apbh_dma_clk);
-
-       bp_frac = clk->enable_shift - 7;
-       frac = readl_relaxed(clk->enable_reg) >> bp_frac & PFD_FRAC_MASK;
-       do_div(tmp, frac);
-
-       return tmp;
-}
-
-static int pfd_set_rate(struct clk *clk, unsigned long rate)
-{
-       u32 val, frac, bp_frac;
-       u64 tmp = (u64) clk_get_rate(clk->parent) * 18;
-
-       if (apbh_dma_clk.usecount == 0)
-               apbh_dma_clk.enable(&apbh_dma_clk);
-
-       /*
-        * Round up the divider so that we don't set a rate
-        * higher than what is requested
-        */
-       tmp += rate / 2;
-       do_div(tmp, rate);
-       frac = tmp;
-       frac = (frac < 12) ? 12 : frac;
-       frac = (frac > 35) ? 35 : frac;
-
-       /*
-        * The frac field always starts from 7 bits lower
-        * position of enable bit
-        */
-       bp_frac = clk->enable_shift - 7;
-       val = readl_relaxed(clk->enable_reg);
-       val &= ~(PFD_FRAC_MASK << bp_frac);
-       val |= frac << bp_frac;
-       writel_relaxed(val, clk->enable_reg);
-
-       tmp = (u64) clk_get_rate(clk->parent) * 18;
-       do_div(tmp, frac);
-
-       if (apbh_dma_clk.usecount == 0)
-               apbh_dma_clk.disable(&apbh_dma_clk);
-
-       return 0;
-}
-
-static unsigned long pfd_round_rate(struct clk *clk, unsigned long rate)
-{
-       u32 frac;
-       u64 tmp;
-
-       tmp = (u64) clk_get_rate(clk->parent) * 18;
-       tmp += rate / 2;
-       do_div(tmp, rate);
-       frac = tmp;
-       frac = (frac < 12) ? 12 : frac;
-       frac = (frac > 35) ? 35 : frac;
-       tmp = (u64) clk_get_rate(clk->parent) * 18;
-       do_div(tmp, frac);
-
-       return tmp;
-}
-
-static int pfd_enable(struct clk *clk)
-{
-       u32 val;
-
-       if (apbh_dma_clk.usecount == 0)
-               apbh_dma_clk.enable(&apbh_dma_clk);
-
-       val = readl_relaxed(clk->enable_reg);
-       val &= ~(1 << clk->enable_shift);
-       writel_relaxed(val, clk->enable_reg);
-
-       if (apbh_dma_clk.usecount == 0)
-               apbh_dma_clk.disable(&apbh_dma_clk);
-
-       return 0;
-}
-
-static void pfd_disable(struct clk *clk)
-{
-       u32 val;
-
-       if (apbh_dma_clk.usecount == 0)
-               apbh_dma_clk.enable(&apbh_dma_clk);
-
-       val = readl_relaxed(clk->enable_reg);
-       val |= 1 << clk->enable_shift;
-       writel_relaxed(val, clk->enable_reg);
-
-       if (apbh_dma_clk.usecount == 0)
-               apbh_dma_clk.disable(&apbh_dma_clk);
-}
-
-#define DEF_PFD(name, er, es, p)                       \
-       static struct clk name = {                      \
-               .enable_reg     = er,                   \
-               .enable_shift   = es,                   \
-               .enable         = pfd_enable,           \
-               .disable        = pfd_disable,          \
-               .get_rate       = pfd_get_rate,         \
-               .set_rate       = pfd_set_rate,         \
-               .round_rate     = pfd_round_rate,       \
-               .parent         = p,                    \
-       }
-
-DEF_PFD(pll2_pfd_352m, PFD_528, PFD0, &pll2_bus);
-DEF_PFD(pll2_pfd_594m, PFD_528, PFD1, &pll2_bus);
-DEF_PFD(pll2_pfd_400m, PFD_528, PFD2, &pll2_bus);
-DEF_PFD(pll3_pfd_720m, PFD_480, PFD0, &pll3_usb_otg);
-DEF_PFD(pll3_pfd_540m, PFD_480, PFD1, &pll3_usb_otg);
-DEF_PFD(pll3_pfd_508m, PFD_480, PFD2, &pll3_usb_otg);
-DEF_PFD(pll3_pfd_454m, PFD_480, PFD3, &pll3_usb_otg);
-
-static unsigned long twd_clk_get_rate(struct clk *clk)
-{
-       return clk_get_rate(clk->parent) / 2;
-}
-
-static struct clk twd_clk = {
-       .parent = &arm_clk,
-       .get_rate = twd_clk_get_rate,
-};
-
-static unsigned long pll2_200m_get_rate(struct clk *clk)
-{
-       return clk_get_rate(clk->parent) / 2;
-}
-
-static struct clk pll2_200m = {
-       .parent = &pll2_pfd_400m,
-       .get_rate = pll2_200m_get_rate,
-};
-
-static unsigned long pll3_120m_get_rate(struct clk *clk)
-{
-       return clk_get_rate(clk->parent) / 4;
-}
-
-static struct clk pll3_120m = {
-       .parent = &pll3_usb_otg,
-       .get_rate = pll3_120m_get_rate,
-};
-
-static unsigned long pll3_80m_get_rate(struct clk *clk)
-{
-       return clk_get_rate(clk->parent) / 6;
-}
-
-static struct clk pll3_80m = {
-       .parent = &pll3_usb_otg,
-       .get_rate = pll3_80m_get_rate,
-};
-
-static unsigned long pll3_60m_get_rate(struct clk *clk)
-{
-       return clk_get_rate(clk->parent) / 8;
-}
-
-static struct clk pll3_60m = {
-       .parent = &pll3_usb_otg,
-       .get_rate = pll3_60m_get_rate,
-};
-
-static int pll1_sw_clk_set_parent(struct clk *clk, struct clk *parent)
-{
-       u32 val = readl_relaxed(CCSR);
-
-       if (parent == &pll1_sys) {
-               val &= ~BM_CCSR_PLL1_SW_SEL;
-               val &= ~BM_CCSR_STEP_SEL;
-       } else if (parent == &osc_clk) {
-               val |= BM_CCSR_PLL1_SW_SEL;
-               val &= ~BM_CCSR_STEP_SEL;
-       } else if (parent == &pll2_pfd_400m) {
-               val |= BM_CCSR_PLL1_SW_SEL;
-               val |= BM_CCSR_STEP_SEL;
-       } else {
-               return -EINVAL;
-       }
-
-       writel_relaxed(val, CCSR);
-
-       return 0;
-}
-
-static struct clk pll1_sw_clk = {
-       .parent = &pll1_sys,
-       .set_parent = pll1_sw_clk_set_parent,
-};
-
-static void calc_pred_podf_dividers(u32 div, u32 *pred, u32 *podf)
-{
-       u32 min_pred, temp_pred, old_err, err;
-
-       if (div >= 512) {
-               *pred = 8;
-               *podf = 64;
-       } else if (div >= 8) {
-               min_pred = (div - 1) / 64 + 1;
-               old_err = 8;
-               for (temp_pred = 8; temp_pred >= min_pred; temp_pred--) {
-                       err = div % temp_pred;
-                       if (err == 0) {
-                               *pred = temp_pred;
-                               break;
-                       }
-                       err = temp_pred - err;
-                       if (err < old_err) {
-                               old_err = err;
-                               *pred = temp_pred;
-                       }
-               }
-               *podf = (div + *pred - 1) / *pred;
-       } else if (div < 8) {
-               *pred = div;
-               *podf = 1;
-       }
-}
-
-static int _clk_enable(struct clk *clk)
-{
-       u32 reg;
-       reg = readl_relaxed(clk->enable_reg);
-       reg |= 0x3 << clk->enable_shift;
-       writel_relaxed(reg, clk->enable_reg);
-
-       return 0;
-}
-
-static void _clk_disable(struct clk *clk)
-{
-       u32 reg;
-       reg = readl_relaxed(clk->enable_reg);
-       reg &= ~(0x3 << clk->enable_shift);
-       writel_relaxed(reg, clk->enable_reg);
-}
-
-static int _clk_enable_1b(struct clk *clk)
-{
-       u32 reg;
-       reg = readl_relaxed(clk->enable_reg);
-       reg |= 0x1 << clk->enable_shift;
-       writel_relaxed(reg, clk->enable_reg);
-
-       return 0;
-}
-
-static void _clk_disable_1b(struct clk *clk)
-{
-       u32 reg;
-       reg = readl_relaxed(clk->enable_reg);
-       reg &= ~(0x1 << clk->enable_shift);
-       writel_relaxed(reg, clk->enable_reg);
-}
-
-struct divider {
-       struct clk *clk;
-       void __iomem *reg;
-       u32 bp_pred;
-       u32 bm_pred;
-       u32 bp_podf;
-       u32 bm_podf;
-};
-
-#define DEF_CLK_DIV1(d, c, r, b)                               \
-       static struct divider d = {                             \
-               .clk = c,                                       \
-               .reg = r,                                       \
-               .bp_podf = BP_##r##_##b##_PODF,                 \
-               .bm_podf = BM_##r##_##b##_PODF,                 \
-       }
-
-DEF_CLK_DIV1(arm_div,          &arm_clk,               CACRR,  ARM);
-DEF_CLK_DIV1(ipg_div,          &ipg_clk,               CBCDR,  IPG);
-DEF_CLK_DIV1(ahb_div,          &ahb_clk,               CBCDR,  AHB);
-DEF_CLK_DIV1(axi_div,          &axi_clk,               CBCDR,  AXI);
-DEF_CLK_DIV1(mmdc_ch0_axi_div, &mmdc_ch0_axi_clk,      CBCDR,  MMDC_CH0_AXI);
-DEF_CLK_DIV1(mmdc_ch1_axi_div, &mmdc_ch1_axi_clk,      CBCDR,  MMDC_CH1_AXI);
-DEF_CLK_DIV1(periph_clk2_div,  &periph_clk2_clk,       CBCDR,  PERIPH_CLK2);
-DEF_CLK_DIV1(periph2_clk2_div, &periph2_clk2_clk,      CBCDR,  PERIPH2_CLK2);
-DEF_CLK_DIV1(gpu2d_core_div,   &gpu2d_core_clk,        CBCMR,  GPU2D_CORE);
-DEF_CLK_DIV1(gpu3d_core_div,   &gpu3d_core_clk,        CBCMR,  GPU3D_CORE);
-DEF_CLK_DIV1(gpu3d_shader_div, &gpu3d_shader_clk,      CBCMR,  GPU3D_SHADER);
-DEF_CLK_DIV1(ipg_perclk_div,   &ipg_perclk,            CSCMR1, PERCLK);
-DEF_CLK_DIV1(emi_div,          &emi_clk,               CSCMR1, EMI);
-DEF_CLK_DIV1(emi_slow_div,     &emi_slow_clk,          CSCMR1, EMI_SLOW);
-DEF_CLK_DIV1(can_div,          &can1_clk,              CSCMR2, CAN);
-DEF_CLK_DIV1(uart_div,         &uart_clk,              CSCDR1, UART);
-DEF_CLK_DIV1(usdhc1_div,       &usdhc1_clk,            CSCDR1, USDHC1);
-DEF_CLK_DIV1(usdhc2_div,       &usdhc2_clk,            CSCDR1, USDHC2);
-DEF_CLK_DIV1(usdhc3_div,       &usdhc3_clk,            CSCDR1, USDHC3);
-DEF_CLK_DIV1(usdhc4_div,       &usdhc4_clk,            CSCDR1, USDHC4);
-DEF_CLK_DIV1(vpu_div,          &vpu_clk,               CSCDR1, VPU_AXI);
-DEF_CLK_DIV1(hsi_tx_div,       &hsi_tx_clk,            CDCDR,  HSI_TX);
-DEF_CLK_DIV1(ipu1_di0_pre_div, &ipu1_di0_pre_clk,      CHSCCDR, IPU1_DI0_PRE);
-DEF_CLK_DIV1(ipu1_di1_pre_div, &ipu1_di1_pre_clk,      CHSCCDR, IPU1_DI1_PRE);
-DEF_CLK_DIV1(ipu2_di0_pre_div, &ipu2_di0_pre_clk,      CSCDR2, IPU2_DI0_PRE);
-DEF_CLK_DIV1(ipu2_di1_pre_div, &ipu2_di1_pre_clk,      CSCDR2, IPU2_DI1_PRE);
-DEF_CLK_DIV1(ipu1_div,         &ipu1_clk,              CSCDR3, IPU1_HSP);
-DEF_CLK_DIV1(ipu2_div,         &ipu2_clk,              CSCDR3, IPU2_HSP);
-DEF_CLK_DIV1(cko1_div,         &cko1_clk,              CCOSR, CKO1);
-
-#define DEF_CLK_DIV2(d, c, r, b)                               \
-       static struct divider d = {                             \
-               .clk = c,                                       \
-               .reg = r,                                       \
-               .bp_pred = BP_##r##_##b##_PRED,                 \
-               .bm_pred = BM_##r##_##b##_PRED,                 \
-               .bp_podf = BP_##r##_##b##_PODF,                 \
-               .bm_podf = BM_##r##_##b##_PODF,                 \
-       }
-
-DEF_CLK_DIV2(ssi1_div,         &ssi1_clk,              CS1CDR, SSI1);
-DEF_CLK_DIV2(ssi3_div,         &ssi3_clk,              CS1CDR, SSI3);
-DEF_CLK_DIV2(esai_div,         &esai_clk,              CS1CDR, ESAI);
-DEF_CLK_DIV2(ssi2_div,         &ssi2_clk,              CS2CDR, SSI2);
-DEF_CLK_DIV2(enfc_div,         &enfc_clk,              CS2CDR, ENFC);
-DEF_CLK_DIV2(spdif_div,                &spdif_clk,             CDCDR,  SPDIF);
-DEF_CLK_DIV2(asrc_serial_div,  &asrc_serial_clk,       CDCDR,  ASRC_SERIAL);
-
-static struct divider *dividers[] = {
-       &arm_div,
-       &ipg_div,
-       &ahb_div,
-       &axi_div,
-       &mmdc_ch0_axi_div,
-       &mmdc_ch1_axi_div,
-       &periph_clk2_div,
-       &periph2_clk2_div,
-       &gpu2d_core_div,
-       &gpu3d_core_div,
-       &gpu3d_shader_div,
-       &ipg_perclk_div,
-       &emi_div,
-       &emi_slow_div,
-       &can_div,
-       &uart_div,
-       &usdhc1_div,
-       &usdhc2_div,
-       &usdhc3_div,
-       &usdhc4_div,
-       &vpu_div,
-       &hsi_tx_div,
-       &ipu1_di0_pre_div,
-       &ipu1_di1_pre_div,
-       &ipu2_di0_pre_div,
-       &ipu2_di1_pre_div,
-       &ipu1_div,
-       &ipu2_div,
-       &ssi1_div,
-       &ssi3_div,
-       &esai_div,
-       &ssi2_div,
-       &enfc_div,
-       &spdif_div,
-       &asrc_serial_div,
-       &cko1_div,
-};
-
-static unsigned long ldb_di_clk_get_rate(struct clk *clk)
-{
-       u32 val = readl_relaxed(CSCMR2);
-
-       val &= (clk == &ldb_di0_clk) ? BM_CSCMR2_LDB_DI0_IPU_DIV :
-                                      BM_CSCMR2_LDB_DI1_IPU_DIV;
-       if (val)
-               return clk_get_rate(clk->parent) / 7;
-       else
-               return clk_get_rate(clk->parent) * 2 / 7;
-}
-
-static int ldb_di_clk_set_rate(struct clk *clk, unsigned long rate)
-{
-       unsigned long parent_rate = clk_get_rate(clk->parent);
-       u32 val = readl_relaxed(CSCMR2);
-
-       if (rate * 7 <= parent_rate + parent_rate / 20)
-               val |= BM_CSCMR2_LDB_DI0_IPU_DIV;
-       else
-               val &= ~BM_CSCMR2_LDB_DI0_IPU_DIV;
-
-       writel_relaxed(val, CSCMR2);
-
-       return 0;
-}
-
-static unsigned long ldb_di_clk_round_rate(struct clk *clk, unsigned long rate)
-{
-       unsigned long parent_rate = clk_get_rate(clk->parent);
-
-       if (rate * 7 <= parent_rate + parent_rate / 20)
-               return parent_rate / 7;
-       else
-               return 2 * parent_rate / 7;
-}
-
-static unsigned long _clk_get_rate(struct clk *clk)
-{
-       struct divider *d;
-       u32 val, pred, podf;
-       int i, num;
-
-       if (clk == &ldb_di0_clk || clk == &ldb_di1_clk)
-               return ldb_di_clk_get_rate(clk);
-
-       num = ARRAY_SIZE(dividers);
-       for (i = 0; i < num; i++)
-               if (dividers[i]->clk == clk) {
-                       d = dividers[i];
-                       break;
-               }
-       if (i == num)
-               return clk_get_rate(clk->parent);
-
-       val = readl_relaxed(d->reg);
-       pred = ((val & d->bm_pred) >> d->bp_pred) + 1;
-       podf = ((val & d->bm_podf) >> d->bp_podf) + 1;
-
-       return clk_get_rate(clk->parent) / (pred * podf);
-}
-
-static int clk_busy_wait(struct clk *clk)
-{
-       int timeout = 0x100000;
-       u32 bm;
-
-       if (clk == &axi_clk)
-               bm = BM_CDHIPR_AXI_PODF_BUSY;
-       else if (clk == &ahb_clk)
-               bm = BM_CDHIPR_AHB_PODF_BUSY;
-       else if (clk == &mmdc_ch0_axi_clk)
-               bm = BM_CDHIPR_MMDC_CH0_PODF_BUSY;
-       else if (clk == &periph_clk)
-               bm = BM_CDHIPR_PERIPH_SEL_BUSY;
-       else if (clk == &arm_clk)
-               bm = BM_CDHIPR_ARM_PODF_BUSY;
-       else
-               return -EINVAL;
-
-       while ((readl_relaxed(CDHIPR) & bm) && --timeout)
-               cpu_relax();
-
-       if (unlikely(!timeout))
-               return -EBUSY;
-
-       return 0;
-}
-
-static int _clk_set_rate(struct clk *clk, unsigned long rate)
-{
-       unsigned long parent_rate = clk_get_rate(clk->parent);
-       struct divider *d;
-       u32 val, div, max_div, pred = 0, podf;
-       int i, num;
-
-       if (clk == &ldb_di0_clk || clk == &ldb_di1_clk)
-               return ldb_di_clk_set_rate(clk, rate);
-
-       num = ARRAY_SIZE(dividers);
-       for (i = 0; i < num; i++)
-               if (dividers[i]->clk == clk) {
-                       d = dividers[i];
-                       break;
-               }
-       if (i == num)
-               return -EINVAL;
-
-       max_div = ((d->bm_pred >> d->bp_pred) + 1) *
-                 ((d->bm_podf >> d->bp_podf) + 1);
-
-       div = parent_rate / rate;
-       if (div == 0)
-               div++;
-
-       if ((parent_rate / div != rate) || div > max_div)
-               return -EINVAL;
-
-       if (d->bm_pred) {
-               calc_pred_podf_dividers(div, &pred, &podf);
-       } else {
-               pred = 1;
-               podf = div;
-       }
-
-       val = readl_relaxed(d->reg);
-       val &= ~(d->bm_pred | d->bm_podf);
-       val |= (pred - 1) << d->bp_pred | (podf - 1) << d->bp_podf;
-       writel_relaxed(val, d->reg);
-
-       if (clk == &axi_clk || clk == &ahb_clk ||
-           clk == &mmdc_ch0_axi_clk || clk == &arm_clk)
-               return clk_busy_wait(clk);
-
-       return 0;
-}
-
-static unsigned long _clk_round_rate(struct clk *clk, unsigned long rate)
-{
-       unsigned long parent_rate = clk_get_rate(clk->parent);
-       u32 div = parent_rate / rate;
-       u32 div_max, pred = 0, podf;
-       struct divider *d;
-       int i, num;
-
-       if (clk == &ldb_di0_clk || clk == &ldb_di1_clk)
-               return ldb_di_clk_round_rate(clk, rate);
-
-       num = ARRAY_SIZE(dividers);
-       for (i = 0; i < num; i++)
-               if (dividers[i]->clk == clk) {
-                       d = dividers[i];
-                       break;
-               }
-       if (i == num)
-               return -EINVAL;
-
-       if (div == 0 || parent_rate % rate)
-               div++;
-
-       if (d->bm_pred) {
-               calc_pred_podf_dividers(div, &pred, &podf);
-               div = pred * podf;
-       } else {
-               div_max = (d->bm_podf >> d->bp_podf) + 1;
-               if (div > div_max)
-                       div = div_max;
-       }
-
-       return parent_rate / div;
-}
-
-struct multiplexer {
-       struct clk *clk;
-       void __iomem *reg;
-       u32 bp;
-       u32 bm;
-       int pnum;
-       struct clk *parents[];
-};
-
-static struct multiplexer axi_mux = {
-       .clk = &axi_clk,
-       .reg = CBCDR,
-       .bp = BP_CBCDR_AXI_SEL,
-       .bm = BM_CBCDR_AXI_SEL,
-       .parents = {
-               &periph_clk,
-               &pll2_pfd_400m,
-               &pll3_pfd_540m,
-               NULL
-       },
-};
-
-static struct multiplexer periph_mux = {
-       .clk = &periph_clk,
-       .reg = CBCDR,
-       .bp = BP_CBCDR_PERIPH_CLK_SEL,
-       .bm = BM_CBCDR_PERIPH_CLK_SEL,
-       .parents = {
-               &periph_pre_clk,
-               &periph_clk2_clk,
-               NULL
-       },
-};
-
-static struct multiplexer periph_pre_mux = {
-       .clk = &periph_pre_clk,
-       .reg = CBCMR,
-       .bp = BP_CBCMR_PRE_PERIPH_CLK_SEL,
-       .bm = BM_CBCMR_PRE_PERIPH_CLK_SEL,
-       .parents = {
-               &pll2_bus,
-               &pll2_pfd_400m,
-               &pll2_pfd_352m,
-               &pll2_200m,
-               NULL
-       },
-};
-
-static struct multiplexer periph_clk2_mux = {
-       .clk = &periph_clk2_clk,
-       .reg = CBCMR,
-       .bp = BP_CBCMR_PERIPH_CLK2_SEL,
-       .bm = BM_CBCMR_PERIPH_CLK2_SEL,
-       .parents = {
-               &pll3_usb_otg,
-               &osc_clk,
-               NULL
-       },
-};
-
-static struct multiplexer periph2_mux = {
-       .clk = &periph2_clk,
-       .reg = CBCDR,
-       .bp = BP_CBCDR_PERIPH2_CLK_SEL,
-       .bm = BM_CBCDR_PERIPH2_CLK_SEL,
-       .parents = {
-               &periph2_pre_clk,
-               &periph2_clk2_clk,
-               NULL
-       },
-};
-
-static struct multiplexer periph2_pre_mux = {
-       .clk = &periph2_pre_clk,
-       .reg = CBCMR,
-       .bp = BP_CBCMR_PRE_PERIPH2_CLK_SEL,
-       .bm = BM_CBCMR_PRE_PERIPH2_CLK_SEL,
-       .parents = {
-               &pll2_bus,
-               &pll2_pfd_400m,
-               &pll2_pfd_352m,
-               &pll2_200m,
-               NULL
-       },
-};
-
-static struct multiplexer periph2_clk2_mux = {
-       .clk = &periph2_clk2_clk,
-       .reg = CBCMR,
-       .bp = BP_CBCMR_PERIPH2_CLK2_SEL,
-       .bm = BM_CBCMR_PERIPH2_CLK2_SEL,
-       .parents = {
-               &pll3_usb_otg,
-               &osc_clk,
-               NULL
-       },
-};
-
-static struct multiplexer gpu2d_axi_mux = {
-       .clk = &gpu2d_axi_clk,
-       .reg = CBCMR,
-       .bp = BP_CBCMR_GPU2D_AXI_SEL,
-       .bm = BM_CBCMR_GPU2D_AXI_SEL,
-       .parents = {
-               &axi_clk,
-               &ahb_clk,
-               NULL
-       },
-};
-
-static struct multiplexer gpu3d_axi_mux = {
-       .clk = &gpu3d_axi_clk,
-       .reg = CBCMR,
-       .bp = BP_CBCMR_GPU3D_AXI_SEL,
-       .bm = BM_CBCMR_GPU3D_AXI_SEL,
-       .parents = {
-               &axi_clk,
-               &ahb_clk,
-               NULL
-       },
-};
-
-static struct multiplexer gpu3d_core_mux = {
-       .clk = &gpu3d_core_clk,
-       .reg = CBCMR,
-       .bp = BP_CBCMR_GPU3D_CORE_SEL,
-       .bm = BM_CBCMR_GPU3D_CORE_SEL,
-       .parents = {
-               &mmdc_ch0_axi_clk,
-               &pll3_usb_otg,
-               &pll2_pfd_594m,
-               &pll2_pfd_400m,
-               NULL
-       },
-};
-
-static struct multiplexer gpu3d_shader_mux = {
-       .clk = &gpu3d_shader_clk,
-       .reg = CBCMR,
-       .bp = BP_CBCMR_GPU3D_SHADER_SEL,
-       .bm = BM_CBCMR_GPU3D_SHADER_SEL,
-       .parents = {
-               &mmdc_ch0_axi_clk,
-               &pll3_usb_otg,
-               &pll2_pfd_594m,
-               &pll3_pfd_720m,
-               NULL
-       },
-};
-
-static struct multiplexer pcie_axi_mux = {
-       .clk = &pcie_clk,
-       .reg = CBCMR,
-       .bp = BP_CBCMR_PCIE_AXI_SEL,
-       .bm = BM_CBCMR_PCIE_AXI_SEL,
-       .parents = {
-               &axi_clk,
-               &ahb_clk,
-               NULL
-       },
-};
-
-static struct multiplexer vdo_axi_mux = {
-       .clk = &vdo_axi_clk,
-       .reg = CBCMR,
-       .bp = BP_CBCMR_VDO_AXI_SEL,
-       .bm = BM_CBCMR_VDO_AXI_SEL,
-       .parents = {
-               &axi_clk,
-               &ahb_clk,
-               NULL
-       },
-};
-
-static struct multiplexer vpu_axi_mux = {
-       .clk = &vpu_clk,
-       .reg = CBCMR,
-       .bp = BP_CBCMR_VPU_AXI_SEL,
-       .bm = BM_CBCMR_VPU_AXI_SEL,
-       .parents = {
-               &axi_clk,
-               &pll2_pfd_400m,
-               &pll2_pfd_352m,
-               NULL
-       },
-};
-
-static struct multiplexer gpu2d_core_mux = {
-       .clk = &gpu2d_core_clk,
-       .reg = CBCMR,
-       .bp = BP_CBCMR_GPU2D_CORE_SEL,
-       .bm = BM_CBCMR_GPU2D_CORE_SEL,
-       .parents = {
-               &axi_clk,
-               &pll3_usb_otg,
-               &pll2_pfd_352m,
-               &pll2_pfd_400m,
-               NULL
-       },
-};
-
-#define DEF_SSI_MUX(id)                                                        \
-       static struct multiplexer ssi##id##_mux = {                     \
-               .clk = &ssi##id##_clk,                                  \
-               .reg = CSCMR1,                                          \
-               .bp = BP_CSCMR1_SSI##id##_SEL,                          \
-               .bm = BM_CSCMR1_SSI##id##_SEL,                          \
-               .parents = {                                            \
-                       &pll3_pfd_508m,                                 \
-                       &pll3_pfd_454m,                                 \
-                       &pll4_audio,                                    \
-                       NULL                                            \
-               },                                                      \
-       }
-
-DEF_SSI_MUX(1);
-DEF_SSI_MUX(2);
-DEF_SSI_MUX(3);
-
-#define DEF_USDHC_MUX(id)                                              \
-       static struct multiplexer usdhc##id##_mux = {                   \
-               .clk = &usdhc##id##_clk,                                \
-               .reg = CSCMR1,                                          \
-               .bp = BP_CSCMR1_USDHC##id##_SEL,                        \
-               .bm = BM_CSCMR1_USDHC##id##_SEL,                        \
-               .parents = {                                            \
-                       &pll2_pfd_400m,                                 \
-                       &pll2_pfd_352m,                                 \
-                       NULL                                            \
-               },                                                      \
-       }
-
-DEF_USDHC_MUX(1);
-DEF_USDHC_MUX(2);
-DEF_USDHC_MUX(3);
-DEF_USDHC_MUX(4);
-
-static struct multiplexer emi_mux = {
-       .clk = &emi_clk,
-       .reg = CSCMR1,
-       .bp = BP_CSCMR1_EMI_SEL,
-       .bm = BM_CSCMR1_EMI_SEL,
-       .parents = {
-               &axi_clk,
-               &pll3_usb_otg,
-               &pll2_pfd_400m,
-               &pll2_pfd_352m,
-               NULL
-       },
-};
-
-static struct multiplexer emi_slow_mux = {
-       .clk = &emi_slow_clk,
-       .reg = CSCMR1,
-       .bp = BP_CSCMR1_EMI_SLOW_SEL,
-       .bm = BM_CSCMR1_EMI_SLOW_SEL,
-       .parents = {
-               &axi_clk,
-               &pll3_usb_otg,
-               &pll2_pfd_400m,
-               &pll2_pfd_352m,
-               NULL
-       },
-};
-
-static struct multiplexer esai_mux = {
-       .clk = &esai_clk,
-       .reg = CSCMR2,
-       .bp = BP_CSCMR2_ESAI_SEL,
-       .bm = BM_CSCMR2_ESAI_SEL,
-       .parents = {
-               &pll4_audio,
-               &pll3_pfd_508m,
-               &pll3_pfd_454m,
-               &pll3_usb_otg,
-               NULL
-       },
-};
-
-#define DEF_LDB_DI_MUX(id)                                             \
-       static struct multiplexer ldb_di##id##_mux = {                  \
-               .clk = &ldb_di##id##_clk,                               \
-               .reg = CS2CDR,                                          \
-               .bp = BP_CS2CDR_LDB_DI##id##_SEL,                       \
-               .bm = BM_CS2CDR_LDB_DI##id##_SEL,                       \
-               .parents = {                                            \
-                       &pll5_video,                                    \
-                       &pll2_pfd_352m,                                 \
-                       &pll2_pfd_400m,                                 \
-                       &pll3_pfd_540m,                                 \
-                       &pll3_usb_otg,                                  \
-                       NULL                                            \
-               },                                                      \
-       }
-
-DEF_LDB_DI_MUX(0);
-DEF_LDB_DI_MUX(1);
-
-static struct multiplexer enfc_mux = {
-       .clk = &enfc_clk,
-       .reg = CS2CDR,
-       .bp = BP_CS2CDR_ENFC_SEL,
-       .bm = BM_CS2CDR_ENFC_SEL,
-       .parents = {
-               &pll2_pfd_352m,
-               &pll2_bus,
-               &pll3_usb_otg,
-               &pll2_pfd_400m,
-               NULL
-       },
-};
-
-static struct multiplexer spdif_mux = {
-       .clk = &spdif_clk,
-       .reg = CDCDR,
-       .bp = BP_CDCDR_SPDIF_SEL,
-       .bm = BM_CDCDR_SPDIF_SEL,
-       .parents = {
-               &pll4_audio,
-               &pll3_pfd_508m,
-               &pll3_pfd_454m,
-               &pll3_usb_otg,
-               NULL
-       },
-};
-
-static struct multiplexer asrc_serial_mux = {
-       .clk = &asrc_serial_clk,
-       .reg = CDCDR,
-       .bp = BP_CDCDR_ASRC_SERIAL_SEL,
-       .bm = BM_CDCDR_ASRC_SERIAL_SEL,
-       .parents = {
-               &pll4_audio,
-               &pll3_pfd_508m,
-               &pll3_pfd_454m,
-               &pll3_usb_otg,
-               NULL
-       },
-};
-
-static struct multiplexer hsi_tx_mux = {
-       .clk = &hsi_tx_clk,
-       .reg = CDCDR,
-       .bp = BP_CDCDR_HSI_TX_SEL,
-       .bm = BM_CDCDR_HSI_TX_SEL,
-       .parents = {
-               &pll3_120m,
-               &pll2_pfd_400m,
-               NULL
-       },
-};
-
-#define DEF_IPU_DI_PRE_MUX(r, i, d)                                    \
-       static struct multiplexer ipu##i##_di##d##_pre_mux = {          \
-               .clk = &ipu##i##_di##d##_pre_clk,                       \
-               .reg = r,                                               \
-               .bp = BP_##r##_IPU##i##_DI##d##_PRE_SEL,                \
-               .bm = BM_##r##_IPU##i##_DI##d##_PRE_SEL,                \
-               .parents = {                                            \
-                       &mmdc_ch0_axi_clk,                              \
-                       &pll3_usb_otg,                                  \
-                       &pll5_video,                                    \
-                       &pll2_pfd_352m,                                 \
-                       &pll2_pfd_400m,                                 \
-                       &pll3_pfd_540m,                                 \
-                       NULL                                            \
-               },                                                      \
-       }
-
-DEF_IPU_DI_PRE_MUX(CHSCCDR, 1, 0);
-DEF_IPU_DI_PRE_MUX(CHSCCDR, 1, 1);
-DEF_IPU_DI_PRE_MUX(CSCDR2, 2, 0);
-DEF_IPU_DI_PRE_MUX(CSCDR2, 2, 1);
-
-#define DEF_IPU_DI_MUX(r, i, d)                                                \
-       static struct multiplexer ipu##i##_di##d##_mux = {              \
-               .clk = &ipu##i##_di##d##_clk,                           \
-               .reg = r,                                               \
-               .bp = BP_##r##_IPU##i##_DI##d##_SEL,                    \
-               .bm = BM_##r##_IPU##i##_DI##d##_SEL,                    \
-               .parents = {                                            \
-                       &ipu##i##_di##d##_pre_clk,                      \
-                       &dummy_clk,                                     \
-                       &dummy_clk,                                     \
-                       &ldb_di0_clk,                                   \
-                       &ldb_di1_clk,                                   \
-                       NULL                                            \
-               },                                                      \
-       }
-
-DEF_IPU_DI_MUX(CHSCCDR, 1, 0);
-DEF_IPU_DI_MUX(CHSCCDR, 1, 1);
-DEF_IPU_DI_MUX(CSCDR2, 2, 0);
-DEF_IPU_DI_MUX(CSCDR2, 2, 1);
-
-#define DEF_IPU_MUX(id)                                                        \
-       static struct multiplexer ipu##id##_mux = {                     \
-               .clk = &ipu##id##_clk,                                  \
-               .reg = CSCDR3,                                          \
-               .bp = BP_CSCDR3_IPU##id##_HSP_SEL,                      \
-               .bm = BM_CSCDR3_IPU##id##_HSP_SEL,                      \
-               .parents = {                                            \
-                       &mmdc_ch0_axi_clk,                              \
-                       &pll2_pfd_400m,                                 \
-                       &pll3_120m,                                     \
-                       &pll3_pfd_540m,                                 \
-                       NULL                                            \
-               },                                                      \
-       }
-
-DEF_IPU_MUX(1);
-DEF_IPU_MUX(2);
-
-static struct multiplexer cko1_mux = {
-       .clk = &cko1_clk,
-       .reg = CCOSR,
-       .bp = BP_CCOSR_CKO1_SEL,
-       .bm = BM_CCOSR_CKO1_SEL,
-       .parents = {
-               &pll3_usb_otg,
-               &pll2_bus,
-               &pll1_sys,
-               &pll5_video,
-               &dummy_clk,
-               &axi_clk,
-               &enfc_clk,
-               &ipu1_di0_clk,
-               &ipu1_di1_clk,
-               &ipu2_di0_clk,
-               &ipu2_di1_clk,
-               &ahb_clk,
-               &ipg_clk,
-               &ipg_perclk,
-               &ckil_clk,
-               &pll4_audio,
-               NULL
-       },
-};
-
-static struct multiplexer *multiplexers[] = {
-       &axi_mux,
-       &periph_mux,
-       &periph_pre_mux,
-       &periph_clk2_mux,
-       &periph2_mux,
-       &periph2_pre_mux,
-       &periph2_clk2_mux,
-       &gpu2d_axi_mux,
-       &gpu3d_axi_mux,
-       &gpu3d_core_mux,
-       &gpu3d_shader_mux,
-       &pcie_axi_mux,
-       &vdo_axi_mux,
-       &vpu_axi_mux,
-       &gpu2d_core_mux,
-       &ssi1_mux,
-       &ssi2_mux,
-       &ssi3_mux,
-       &usdhc1_mux,
-       &usdhc2_mux,
-       &usdhc3_mux,
-       &usdhc4_mux,
-       &emi_mux,
-       &emi_slow_mux,
-       &esai_mux,
-       &ldb_di0_mux,
-       &ldb_di1_mux,
-       &enfc_mux,
-       &spdif_mux,
-       &asrc_serial_mux,
-       &hsi_tx_mux,
-       &ipu1_di0_pre_mux,
-       &ipu1_di0_mux,
-       &ipu1_di1_pre_mux,
-       &ipu1_di1_mux,
-       &ipu2_di0_pre_mux,
-       &ipu2_di0_mux,
-       &ipu2_di1_pre_mux,
-       &ipu2_di1_mux,
-       &ipu1_mux,
-       &ipu2_mux,
-       &cko1_mux,
-};
-
-static int _clk_set_parent(struct clk *clk, struct clk *parent)
-{
-       struct multiplexer *m;
-       int i, num;
-       u32 val;
-
-       num = ARRAY_SIZE(multiplexers);
-       for (i = 0; i < num; i++)
-               if (multiplexers[i]->clk == clk) {
-                       m = multiplexers[i];
-                       break;
-               }
-       if (i == num)
-               return -EINVAL;
-
-       i = 0;
-       while (m->parents[i]) {
-               if (parent == m->parents[i])
-                       break;
-               i++;
-       }
-       if (!m->parents[i] || m->parents[i] == &dummy_clk)
-               return -EINVAL;
-
-       val = readl_relaxed(m->reg);
-       val &= ~m->bm;
-       val |= i << m->bp;
-       writel_relaxed(val, m->reg);
-
-       if (clk == &periph_clk)
-               return clk_busy_wait(clk);
-
-       return 0;
-}
-
-#define DEF_NG_CLK(name, p)                            \
-       static struct clk name = {                      \
-               .get_rate       = _clk_get_rate,        \
-               .set_rate       = _clk_set_rate,        \
-               .round_rate     = _clk_round_rate,      \
-               .set_parent     = _clk_set_parent,      \
-               .parent         = p,                    \
-       }
-
-DEF_NG_CLK(periph_clk2_clk,    &osc_clk);
-DEF_NG_CLK(periph_pre_clk,     &pll2_bus);
-DEF_NG_CLK(periph_clk,         &periph_pre_clk);
-DEF_NG_CLK(periph2_clk2_clk,   &osc_clk);
-DEF_NG_CLK(periph2_pre_clk,    &pll2_bus);
-DEF_NG_CLK(periph2_clk,                &periph2_pre_clk);
-DEF_NG_CLK(axi_clk,            &periph_clk);
-DEF_NG_CLK(emi_clk,            &axi_clk);
-DEF_NG_CLK(arm_clk,            &pll1_sw_clk);
-DEF_NG_CLK(ahb_clk,            &periph_clk);
-DEF_NG_CLK(ipg_clk,            &ahb_clk);
-DEF_NG_CLK(ipg_perclk,         &ipg_clk);
-DEF_NG_CLK(ipu1_di0_pre_clk,   &pll3_pfd_540m);
-DEF_NG_CLK(ipu1_di1_pre_clk,   &pll3_pfd_540m);
-DEF_NG_CLK(ipu2_di0_pre_clk,   &pll3_pfd_540m);
-DEF_NG_CLK(ipu2_di1_pre_clk,   &pll3_pfd_540m);
-DEF_NG_CLK(asrc_serial_clk,    &pll3_usb_otg);
-
-#define DEF_CLK(name, er, es, p, s)                    \
-       static struct clk name = {                      \
-               .enable_reg     = er,                   \
-               .enable_shift   = es,                   \
-               .enable         = _clk_enable,          \
-               .disable        = _clk_disable,         \
-               .get_rate       = _clk_get_rate,        \
-               .set_rate       = _clk_set_rate,        \
-               .round_rate     = _clk_round_rate,      \
-               .set_parent     = _clk_set_parent,      \
-               .parent         = p,                    \
-               .secondary      = s,                    \
-       }
-
-#define DEF_CLK_1B(name, er, es, p, s)                 \
-       static struct clk name = {                      \
-               .enable_reg     = er,                   \
-               .enable_shift   = es,                   \
-               .enable         = _clk_enable_1b,       \
-               .disable        = _clk_disable_1b,      \
-               .get_rate       = _clk_get_rate,        \
-               .set_rate       = _clk_set_rate,        \
-               .round_rate     = _clk_round_rate,      \
-               .set_parent     = _clk_set_parent,      \
-               .parent         = p,                    \
-               .secondary      = s,                    \
-       }
-
-DEF_CLK(aips_tz1_clk,    CCGR0, CG0,  &ahb_clk,          NULL);
-DEF_CLK(aips_tz2_clk,    CCGR0, CG1,  &ahb_clk,          NULL);
-DEF_CLK(apbh_dma_clk,    CCGR0, CG2,  &ahb_clk,          NULL);
-DEF_CLK(asrc_clk,        CCGR0, CG3,  &pll4_audio,       NULL);
-DEF_CLK(can1_serial_clk,  CCGR0, CG8,  &pll3_usb_otg,    NULL);
-DEF_CLK(can1_clk,        CCGR0, CG7,  &pll3_usb_otg,     &can1_serial_clk);
-DEF_CLK(can2_serial_clk,  CCGR0, CG10, &pll3_usb_otg,    NULL);
-DEF_CLK(can2_clk,        CCGR0, CG9,  &pll3_usb_otg,     &can2_serial_clk);
-DEF_CLK(ecspi1_clk,      CCGR1, CG0,  &pll3_60m,         NULL);
-DEF_CLK(ecspi2_clk,      CCGR1, CG1,  &pll3_60m,         NULL);
-DEF_CLK(ecspi3_clk,      CCGR1, CG2,  &pll3_60m,         NULL);
-DEF_CLK(ecspi4_clk,      CCGR1, CG3,  &pll3_60m,         NULL);
-DEF_CLK(ecspi5_clk,      CCGR1, CG4,  &pll3_60m,         NULL);
-DEF_CLK(enet_clk,        CCGR1, CG5,  &ipg_clk,          NULL);
-DEF_CLK(esai_clk,        CCGR1, CG8,  &pll3_usb_otg,     NULL);
-DEF_CLK(gpt_serial_clk,          CCGR1, CG11, &ipg_perclk,       NULL);
-DEF_CLK(gpt_clk,         CCGR1, CG10, &ipg_perclk,       &gpt_serial_clk);
-DEF_CLK(gpu2d_core_clk,          CCGR1, CG12, &pll2_pfd_352m,    &gpu2d_axi_clk);
-DEF_CLK(gpu3d_core_clk,          CCGR1, CG13, &pll2_pfd_594m,    &gpu3d_axi_clk);
-DEF_CLK(gpu3d_shader_clk, CCGR1, CG13, &pll3_pfd_720m,   &gpu3d_axi_clk);
-DEF_CLK(hdmi_iahb_clk,   CCGR2, CG0,  &ahb_clk,          NULL);
-DEF_CLK(hdmi_isfr_clk,   CCGR2, CG2,  &pll3_pfd_540m,    &hdmi_iahb_clk);
-DEF_CLK(i2c1_clk,        CCGR2, CG3,  &ipg_perclk,       NULL);
-DEF_CLK(i2c2_clk,        CCGR2, CG4,  &ipg_perclk,       NULL);
-DEF_CLK(i2c3_clk,        CCGR2, CG5,  &ipg_perclk,       NULL);
-DEF_CLK(iim_clk,         CCGR2, CG6,  &ipg_clk,          NULL);
-DEF_CLK(enfc_clk,        CCGR2, CG7,  &pll2_pfd_352m,    NULL);
-DEF_CLK(ipu1_clk,        CCGR3, CG0,  &mmdc_ch0_axi_clk, NULL);
-DEF_CLK(ipu1_di0_clk,    CCGR3, CG1,  &ipu1_di0_pre_clk, NULL);
-DEF_CLK(ipu1_di1_clk,    CCGR3, CG2,  &ipu1_di1_pre_clk, NULL);
-DEF_CLK(ipu2_clk,        CCGR3, CG3,  &mmdc_ch0_axi_clk, NULL);
-DEF_CLK(ipu2_di0_clk,    CCGR3, CG4,  &ipu2_di0_pre_clk, NULL);
-DEF_CLK(ipu2_di1_clk,    CCGR3, CG5,  &ipu2_di1_pre_clk, NULL);
-DEF_CLK(ldb_di0_clk,     CCGR3, CG6,  &pll3_pfd_540m,    NULL);
-DEF_CLK(ldb_di1_clk,     CCGR3, CG7,  &pll3_pfd_540m,    NULL);
-DEF_CLK(hsi_tx_clk,      CCGR3, CG8,  &pll2_pfd_400m,    NULL);
-DEF_CLK(mlb_clk,         CCGR3, CG9,  &pll6_mlb,         NULL);
-DEF_CLK(mmdc_ch0_ipg_clk, CCGR3, CG12, &ipg_clk,         NULL);
-DEF_CLK(mmdc_ch0_axi_clk, CCGR3, CG10, &periph_clk,      &mmdc_ch0_ipg_clk);
-DEF_CLK(mmdc_ch1_ipg_clk, CCGR3, CG13, &ipg_clk,         NULL);
-DEF_CLK(mmdc_ch1_axi_clk, CCGR3, CG11, &periph2_clk,     &mmdc_ch1_ipg_clk);
-DEF_CLK(openvg_axi_clk,   CCGR3, CG13, &axi_clk,         NULL);
-DEF_CLK(pwm1_clk,        CCGR4, CG8,  &ipg_perclk,       NULL);
-DEF_CLK(pwm2_clk,        CCGR4, CG9,  &ipg_perclk,       NULL);
-DEF_CLK(pwm3_clk,        CCGR4, CG10, &ipg_perclk,       NULL);
-DEF_CLK(pwm4_clk,        CCGR4, CG11, &ipg_perclk,       NULL);
-DEF_CLK(gpmi_bch_apb_clk, CCGR4, CG12, &usdhc3_clk,      NULL);
-DEF_CLK(gpmi_bch_clk,    CCGR4, CG13, &usdhc4_clk,       &gpmi_bch_apb_clk);
-DEF_CLK(gpmi_apb_clk,    CCGR4, CG15, &usdhc3_clk,       &gpmi_bch_clk);
-DEF_CLK(gpmi_io_clk,     CCGR4, CG14, &enfc_clk,         &gpmi_apb_clk);
-DEF_CLK(sdma_clk,        CCGR5, CG3,  &ahb_clk,          NULL);
-DEF_CLK(spba_clk,        CCGR5, CG6,  &ipg_clk,          NULL);
-DEF_CLK(spdif_clk,       CCGR5, CG7,  &pll3_usb_otg,     &spba_clk);
-DEF_CLK(ssi1_clk,        CCGR5, CG9,  &pll3_pfd_508m,    NULL);
-DEF_CLK(ssi2_clk,        CCGR5, CG10, &pll3_pfd_508m,    NULL);
-DEF_CLK(ssi3_clk,        CCGR5, CG11, &pll3_pfd_508m,    NULL);
-DEF_CLK(uart_serial_clk,  CCGR5, CG13, &pll3_usb_otg,    NULL);
-DEF_CLK(uart_clk,        CCGR5, CG12, &pll3_80m,         &uart_serial_clk);
-DEF_CLK(usboh3_clk,      CCGR6, CG0,  &ipg_clk,          NULL);
-DEF_CLK(usdhc1_clk,      CCGR6, CG1,  &pll2_pfd_400m,    NULL);
-DEF_CLK(usdhc2_clk,      CCGR6, CG2,  &pll2_pfd_400m,    NULL);
-DEF_CLK(usdhc3_clk,      CCGR6, CG3,  &pll2_pfd_400m,    NULL);
-DEF_CLK(usdhc4_clk,      CCGR6, CG4,  &pll2_pfd_400m,    NULL);
-DEF_CLK(emi_slow_clk,    CCGR6, CG5,  &axi_clk,          NULL);
-DEF_CLK(vdo_axi_clk,     CCGR6, CG6,  &axi_clk,          NULL);
-DEF_CLK(vpu_clk,         CCGR6, CG7,  &axi_clk,          NULL);
-DEF_CLK_1B(cko1_clk,     CCOSR, BP_CCOSR_CKO1_EN, &pll2_bus, NULL);
-
-static int pcie_clk_enable(struct clk *clk)
-{
-       u32 val;
-
-       val = readl_relaxed(PLL8_ENET);
-       val |= BM_PLL_ENET_EN_PCIE;
-       writel_relaxed(val, PLL8_ENET);
-
-       return _clk_enable(clk);
-}
-
-static void pcie_clk_disable(struct clk *clk)
-{
-       u32 val;
-
-       _clk_disable(clk);
-
-       val = readl_relaxed(PLL8_ENET);
-       val &= BM_PLL_ENET_EN_PCIE;
-       writel_relaxed(val, PLL8_ENET);
-}
-
-static struct clk pcie_clk = {
-       .enable_reg = CCGR4,
-       .enable_shift = CG0,
-       .enable = pcie_clk_enable,
-       .disable = pcie_clk_disable,
-       .set_parent = _clk_set_parent,
-       .parent = &axi_clk,
-       .secondary = &pll8_enet,
-};
-
-static int sata_clk_enable(struct clk *clk)
-{
-       u32 val;
-
-       val = readl_relaxed(PLL8_ENET);
-       val |= BM_PLL_ENET_EN_SATA;
-       writel_relaxed(val, PLL8_ENET);
-
-       return _clk_enable(clk);
-}
-
-static void sata_clk_disable(struct clk *clk)
-{
-       u32 val;
-
-       _clk_disable(clk);
-
-       val = readl_relaxed(PLL8_ENET);
-       val &= BM_PLL_ENET_EN_SATA;
-       writel_relaxed(val, PLL8_ENET);
-}
-
-static struct clk sata_clk = {
-       .enable_reg = CCGR5,
-       .enable_shift = CG2,
-       .enable = sata_clk_enable,
-       .disable = sata_clk_disable,
-       .parent = &ipg_clk,
-       .secondary = &pll8_enet,
-};
-
-#define _REGISTER_CLOCK(d, n, c) \
-       { \
-               .dev_id = d, \
-               .con_id = n, \
-               .clk = &c, \
-       }
-
-static struct clk_lookup lookups[] = {
-       _REGISTER_CLOCK("2020000.uart", NULL, uart_clk),
-       _REGISTER_CLOCK("21e8000.uart", NULL, uart_clk),
-       _REGISTER_CLOCK("21ec000.uart", NULL, uart_clk),
-       _REGISTER_CLOCK("21f0000.uart", NULL, uart_clk),
-       _REGISTER_CLOCK("21f4000.uart", NULL, uart_clk),
-       _REGISTER_CLOCK("2188000.enet", NULL, enet_clk),
-       _REGISTER_CLOCK("2190000.usdhc", NULL, usdhc1_clk),
-       _REGISTER_CLOCK("2194000.usdhc", NULL, usdhc2_clk),
-       _REGISTER_CLOCK("2198000.usdhc", NULL, usdhc3_clk),
-       _REGISTER_CLOCK("219c000.usdhc", NULL, usdhc4_clk),
-       _REGISTER_CLOCK("21a0000.i2c", NULL, i2c1_clk),
-       _REGISTER_CLOCK("21a4000.i2c", NULL, i2c2_clk),
-       _REGISTER_CLOCK("21a8000.i2c", NULL, i2c3_clk),
-       _REGISTER_CLOCK("2008000.ecspi", NULL, ecspi1_clk),
-       _REGISTER_CLOCK("200c000.ecspi", NULL, ecspi2_clk),
-       _REGISTER_CLOCK("2010000.ecspi", NULL, ecspi3_clk),
-       _REGISTER_CLOCK("2014000.ecspi", NULL, ecspi4_clk),
-       _REGISTER_CLOCK("2018000.ecspi", NULL, ecspi5_clk),
-       _REGISTER_CLOCK("20ec000.sdma", NULL, sdma_clk),
-       _REGISTER_CLOCK("20bc000.wdog", NULL, dummy_clk),
-       _REGISTER_CLOCK("20c0000.wdog", NULL, dummy_clk),
-       _REGISTER_CLOCK("smp_twd", NULL, twd_clk),
-       _REGISTER_CLOCK(NULL, "ckih", ckih_clk),
-       _REGISTER_CLOCK(NULL, "ckil_clk", ckil_clk),
-       _REGISTER_CLOCK(NULL, "aips_tz1_clk", aips_tz1_clk),
-       _REGISTER_CLOCK(NULL, "aips_tz2_clk", aips_tz2_clk),
-       _REGISTER_CLOCK(NULL, "asrc_clk", asrc_clk),
-       _REGISTER_CLOCK(NULL, "can2_clk", can2_clk),
-       _REGISTER_CLOCK(NULL, "hdmi_isfr_clk", hdmi_isfr_clk),
-       _REGISTER_CLOCK(NULL, "iim_clk", iim_clk),
-       _REGISTER_CLOCK(NULL, "mlb_clk", mlb_clk),
-       _REGISTER_CLOCK(NULL, "openvg_axi_clk", openvg_axi_clk),
-       _REGISTER_CLOCK(NULL, "pwm1_clk", pwm1_clk),
-       _REGISTER_CLOCK(NULL, "pwm2_clk", pwm2_clk),
-       _REGISTER_CLOCK(NULL, "pwm3_clk", pwm3_clk),
-       _REGISTER_CLOCK(NULL, "pwm4_clk", pwm4_clk),
-       _REGISTER_CLOCK(NULL, "gpmi_io_clk", gpmi_io_clk),
-       _REGISTER_CLOCK(NULL, "usboh3_clk", usboh3_clk),
-       _REGISTER_CLOCK(NULL, "sata_clk", sata_clk),
-       _REGISTER_CLOCK(NULL, "cko1_clk", cko1_clk),
-};
-
-int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
-{
-       u32 val = readl_relaxed(CLPCR);
-
-       val &= ~BM_CLPCR_LPM;
-       switch (mode) {
-       case WAIT_CLOCKED:
-               break;
-       case WAIT_UNCLOCKED:
-               val |= 0x1 << BP_CLPCR_LPM;
-               break;
-       case STOP_POWER_ON:
-               val |= 0x2 << BP_CLPCR_LPM;
-               break;
-       case WAIT_UNCLOCKED_POWER_OFF:
-               val |= 0x1 << BP_CLPCR_LPM;
-               val &= ~BM_CLPCR_VSTBY;
-               val &= ~BM_CLPCR_SBYOS;
-               break;
-       case STOP_POWER_OFF:
-               val |= 0x2 << BP_CLPCR_LPM;
-               val |= 0x3 << BP_CLPCR_STBY_COUNT;
-               val |= BM_CLPCR_VSTBY;
-               val |= BM_CLPCR_SBYOS;
-               break;
-       default:
-               return -EINVAL;
-       }
-       writel_relaxed(val, CLPCR);
-
-       return 0;
-}
-
-static struct map_desc imx6q_clock_desc[] = {
-       imx_map_entry(MX6Q, CCM, MT_DEVICE),
-       imx_map_entry(MX6Q, ANATOP, MT_DEVICE),
-};
-
-void __init imx6q_clock_map_io(void)
-{
-       iotable_init(imx6q_clock_desc, ARRAY_SIZE(imx6q_clock_desc));
-}
-
-int __init mx6q_clocks_init(void)
-{
-       struct device_node *np;
-       void __iomem *base;
-       int i, irq;
-
-       /* retrieve the freqency of fixed clocks from device tree */
-       for_each_compatible_node(np, NULL, "fixed-clock") {
-               u32 rate;
-               if (of_property_read_u32(np, "clock-frequency", &rate))
-                       continue;
-
-               if (of_device_is_compatible(np, "fsl,imx-ckil"))
-                       external_low_reference = rate;
-               else if (of_device_is_compatible(np, "fsl,imx-ckih1"))
-                       external_high_reference = rate;
-               else if (of_device_is_compatible(np, "fsl,imx-osc"))
-                       oscillator_reference = rate;
-       }
-
-       for (i = 0; i < ARRAY_SIZE(lookups); i++)
-               clkdev_add(&lookups[i]);
-
-       /* only keep necessary clocks on */
-       writel_relaxed(0x3 << CG0  | 0x3 << CG1  | 0x3 << CG2,  CCGR0);
-       writel_relaxed(0x3 << CG8  | 0x3 << CG9  | 0x3 << CG10, CCGR2);
-       writel_relaxed(0x3 << CG10 | 0x3 << CG12,               CCGR3);
-       writel_relaxed(0x3 << CG4  | 0x3 << CG6  | 0x3 << CG7,  CCGR4);
-       writel_relaxed(0x3 << CG0,                              CCGR5);
-       writel_relaxed(0,                                       CCGR6);
-       writel_relaxed(0,                                       CCGR7);
-
-       clk_enable(&uart_clk);
-       clk_enable(&mmdc_ch0_axi_clk);
-
-       clk_set_rate(&pll4_audio, FREQ_650M);
-       clk_set_rate(&pll5_video, FREQ_650M);
-       clk_set_parent(&ipu1_di0_clk, &ipu1_di0_pre_clk);
-       clk_set_parent(&ipu1_di0_pre_clk, &pll5_video);
-       clk_set_parent(&gpu3d_shader_clk, &pll2_pfd_594m);
-       clk_set_rate(&gpu3d_shader_clk, FREQ_594M);
-       clk_set_parent(&gpu3d_core_clk, &mmdc_ch0_axi_clk);
-       clk_set_rate(&gpu3d_core_clk, FREQ_528M);
-       clk_set_parent(&asrc_serial_clk, &pll3_usb_otg);
-       clk_set_rate(&asrc_serial_clk, 1500000);
-       clk_set_rate(&enfc_clk, 11000000);
-
-       /*
-        * Before pinctrl API is available, we have to rely on the pad
-        * configuration set up by bootloader.  For usdhc example here,
-        * u-boot sets up the pads for 49.5 MHz case, and we have to lower
-        * the usdhc clock from 198 to 49.5 MHz to match the pad configuration.
-        *
-        * FIXME: This is should be removed after pinctrl API is available.
-        * At that time, usdhc driver can call pinctrl API to change pad
-        * configuration dynamically per different usdhc clock settings.
-        */
-       clk_set_rate(&usdhc1_clk, 49500000);
-       clk_set_rate(&usdhc2_clk, 49500000);
-       clk_set_rate(&usdhc3_clk, 49500000);
-       clk_set_rate(&usdhc4_clk, 49500000);
-
-       clk_set_parent(&cko1_clk, &ahb_clk);
-
-       np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt");
-       base = of_iomap(np, 0);
-       WARN_ON(!base);
-       irq = irq_of_parse_and_map(np, 0);
-       mxc_timer_init(&gpt_clk, base, irq);
-
-       return 0;
-}
diff --git a/arch/arm/mach-imx/clock-mx51-mx53.c b/arch/arm/mach-imx/clock-mx51-mx53.c
deleted file mode 100644 (file)
index 0847050..0000000
+++ /dev/null
@@ -1,1675 +0,0 @@
-/*
- * Copyright 2008-2010 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright (C) 2009-2010 Amit Kucheria <amit.kucheria@canonical.com>
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
-
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/clkdev.h>
-#include <linux/of.h>
-
-#include <asm/div64.h>
-
-#include <mach/hardware.h>
-#include <mach/common.h>
-#include <mach/clock.h>
-
-#include "crm-regs-imx5.h"
-
-/* External clock values passed-in by the board code */
-static unsigned long external_high_reference, external_low_reference;
-static unsigned long oscillator_reference, ckih2_reference;
-
-static struct clk osc_clk;
-static struct clk pll1_main_clk;
-static struct clk pll1_sw_clk;
-static struct clk pll2_sw_clk;
-static struct clk pll3_sw_clk;
-static struct clk mx53_pll4_sw_clk;
-static struct clk lp_apm_clk;
-static struct clk periph_apm_clk;
-static struct clk ahb_clk;
-static struct clk ipg_clk;
-static struct clk usboh3_clk;
-static struct clk emi_fast_clk;
-static struct clk ipu_clk;
-static struct clk mipi_hsc1_clk;
-static struct clk esdhc1_clk;
-static struct clk esdhc2_clk;
-static struct clk esdhc3_mx53_clk;
-
-#define MAX_DPLL_WAIT_TRIES    1000 /* 1000 * udelay(1) = 1ms */
-
-/* calculate best pre and post dividers to get the required divider */
-static void __calc_pre_post_dividers(u32 div, u32 *pre, u32 *post,
-       u32 max_pre, u32 max_post)
-{
-       if (div >= max_pre * max_post) {
-               *pre = max_pre;
-               *post = max_post;
-       } else if (div >= max_pre) {
-               u32 min_pre, temp_pre, old_err, err;
-               min_pre = DIV_ROUND_UP(div, max_post);
-               old_err = max_pre;
-               for (temp_pre = max_pre; temp_pre >= min_pre; temp_pre--) {
-                       err = div % temp_pre;
-                       if (err == 0) {
-                               *pre = temp_pre;
-                               break;
-                       }
-                       err = temp_pre - err;
-                       if (err < old_err) {
-                               old_err = err;
-                               *pre = temp_pre;
-                       }
-               }
-               *post = DIV_ROUND_UP(div, *pre);
-       } else {
-               *pre = div;
-               *post = 1;
-       }
-}
-
-static void _clk_ccgr_setclk(struct clk *clk, unsigned mode)
-{
-       u32 reg = __raw_readl(clk->enable_reg);
-
-       reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift);
-       reg |= mode << clk->enable_shift;
-
-       __raw_writel(reg, clk->enable_reg);
-}
-
-static int _clk_ccgr_enable(struct clk *clk)
-{
-       _clk_ccgr_setclk(clk, MXC_CCM_CCGRx_MOD_ON);
-       return 0;
-}
-
-static void _clk_ccgr_disable(struct clk *clk)
-{
-       _clk_ccgr_setclk(clk, MXC_CCM_CCGRx_MOD_OFF);
-}
-
-static int _clk_ccgr_enable_inrun(struct clk *clk)
-{
-       _clk_ccgr_setclk(clk, MXC_CCM_CCGRx_MOD_IDLE);
-       return 0;
-}
-
-static void _clk_ccgr_disable_inwait(struct clk *clk)
-{
-       _clk_ccgr_setclk(clk, MXC_CCM_CCGRx_MOD_IDLE);
-}
-
-/*
- * For the 4-to-1 muxed input clock
- */
-static inline u32 _get_mux(struct clk *parent, struct clk *m0,
-                          struct clk *m1, struct clk *m2, struct clk *m3)
-{
-       if (parent == m0)
-               return 0;
-       else if (parent == m1)
-               return 1;
-       else if (parent == m2)
-               return 2;
-       else if (parent == m3)
-               return 3;
-       else
-               BUG();
-
-       return -EINVAL;
-}
-
-static inline void __iomem *_mx51_get_pll_base(struct clk *pll)
-{
-       if (pll == &pll1_main_clk)
-               return MX51_DPLL1_BASE;
-       else if (pll == &pll2_sw_clk)
-               return MX51_DPLL2_BASE;
-       else if (pll == &pll3_sw_clk)
-               return MX51_DPLL3_BASE;
-       else
-               BUG();
-
-       return NULL;
-}
-
-static inline void __iomem *_mx53_get_pll_base(struct clk *pll)
-{
-       if (pll == &pll1_main_clk)
-               return MX53_DPLL1_BASE;
-       else if (pll == &pll2_sw_clk)
-               return MX53_DPLL2_BASE;
-       else if (pll == &pll3_sw_clk)
-               return MX53_DPLL3_BASE;
-       else if (pll == &mx53_pll4_sw_clk)
-               return MX53_DPLL4_BASE;
-       else
-               BUG();
-
-       return NULL;
-}
-
-static inline void __iomem *_get_pll_base(struct clk *pll)
-{
-       if (cpu_is_mx51())
-               return _mx51_get_pll_base(pll);
-       else
-               return _mx53_get_pll_base(pll);
-}
-
-static unsigned long clk_pll_get_rate(struct clk *clk)
-{
-       long mfi, mfn, mfd, pdf, ref_clk, mfn_abs;
-       unsigned long dp_op, dp_mfd, dp_mfn, dp_ctl, pll_hfsm, dbl;
-       void __iomem *pllbase;
-       s64 temp;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       pllbase = _get_pll_base(clk);
-
-       dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
-       pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
-       dbl = dp_ctl & MXC_PLL_DP_CTL_DPDCK0_2_EN;
-
-       if (pll_hfsm == 0) {
-               dp_op = __raw_readl(pllbase + MXC_PLL_DP_OP);
-               dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_MFD);
-               dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_MFN);
-       } else {
-               dp_op = __raw_readl(pllbase + MXC_PLL_DP_HFS_OP);
-               dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFD);
-               dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFN);
-       }
-       pdf = dp_op & MXC_PLL_DP_OP_PDF_MASK;
-       mfi = (dp_op & MXC_PLL_DP_OP_MFI_MASK) >> MXC_PLL_DP_OP_MFI_OFFSET;
-       mfi = (mfi <= 5) ? 5 : mfi;
-       mfd = dp_mfd & MXC_PLL_DP_MFD_MASK;
-       mfn = mfn_abs = dp_mfn & MXC_PLL_DP_MFN_MASK;
-       /* Sign extend to 32-bits */
-       if (mfn >= 0x04000000) {
-               mfn |= 0xFC000000;
-               mfn_abs = -mfn;
-       }
-
-       ref_clk = 2 * parent_rate;
-       if (dbl != 0)
-               ref_clk *= 2;
-
-       ref_clk /= (pdf + 1);
-       temp = (u64) ref_clk * mfn_abs;
-       do_div(temp, mfd + 1);
-       if (mfn < 0)
-               temp = -temp;
-       temp = (ref_clk * mfi) + temp;
-
-       return temp;
-}
-
-static int _clk_pll_set_rate(struct clk *clk, unsigned long rate)
-{
-       u32 reg;
-       void __iomem *pllbase;
-
-       long mfi, pdf, mfn, mfd = 999999;
-       s64 temp64;
-       unsigned long quad_parent_rate;
-       unsigned long pll_hfsm, dp_ctl;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       pllbase = _get_pll_base(clk);
-
-       quad_parent_rate = 4 * parent_rate;
-       pdf = mfi = -1;
-       while (++pdf < 16 && mfi < 5)
-               mfi = rate * (pdf+1) / quad_parent_rate;
-       if (mfi > 15)
-               return -EINVAL;
-       pdf--;
-
-       temp64 = rate * (pdf+1) - quad_parent_rate * mfi;
-       do_div(temp64, quad_parent_rate/1000000);
-       mfn = (long)temp64;
-
-       dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
-       /* use dpdck0_2 */
-       __raw_writel(dp_ctl | 0x1000L, pllbase + MXC_PLL_DP_CTL);
-       pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
-       if (pll_hfsm == 0) {
-               reg = mfi << 4 | pdf;
-               __raw_writel(reg, pllbase + MXC_PLL_DP_OP);
-               __raw_writel(mfd, pllbase + MXC_PLL_DP_MFD);
-               __raw_writel(mfn, pllbase + MXC_PLL_DP_MFN);
-       } else {
-               reg = mfi << 4 | pdf;
-               __raw_writel(reg, pllbase + MXC_PLL_DP_HFS_OP);
-               __raw_writel(mfd, pllbase + MXC_PLL_DP_HFS_MFD);
-               __raw_writel(mfn, pllbase + MXC_PLL_DP_HFS_MFN);
-       }
-
-       return 0;
-}
-
-static int _clk_pll_enable(struct clk *clk)
-{
-       u32 reg;
-       void __iomem *pllbase;
-       int i = 0;
-
-       pllbase = _get_pll_base(clk);
-       reg = __raw_readl(pllbase + MXC_PLL_DP_CTL);
-       if (reg & MXC_PLL_DP_CTL_UPEN)
-               return 0;
-
-       reg |= MXC_PLL_DP_CTL_UPEN;
-       __raw_writel(reg, pllbase + MXC_PLL_DP_CTL);
-
-       /* Wait for lock */
-       do {
-               reg = __raw_readl(pllbase + MXC_PLL_DP_CTL);
-               if (reg & MXC_PLL_DP_CTL_LRF)
-                       break;
-
-               udelay(1);
-       } while (++i < MAX_DPLL_WAIT_TRIES);
-
-       if (i == MAX_DPLL_WAIT_TRIES) {
-               pr_err("MX5: pll locking failed\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static void _clk_pll_disable(struct clk *clk)
-{
-       u32 reg;
-       void __iomem *pllbase;
-
-       pllbase = _get_pll_base(clk);
-       reg = __raw_readl(pllbase + MXC_PLL_DP_CTL) & ~MXC_PLL_DP_CTL_UPEN;
-       __raw_writel(reg, pllbase + MXC_PLL_DP_CTL);
-}
-
-static int _clk_pll1_sw_set_parent(struct clk *clk, struct clk *parent)
-{
-       u32 reg, step;
-
-       reg = __raw_readl(MXC_CCM_CCSR);
-
-       /* When switching from pll_main_clk to a bypass clock, first select a
-        * multiplexed clock in 'step_sel', then shift the glitchless mux
-        * 'pll1_sw_clk_sel'.
-        *
-        * When switching back, do it in reverse order
-        */
-       if (parent == &pll1_main_clk) {
-               /* Switch to pll1_main_clk */
-               reg &= ~MXC_CCM_CCSR_PLL1_SW_CLK_SEL;
-               __raw_writel(reg, MXC_CCM_CCSR);
-               /* step_clk mux switched to lp_apm, to save power. */
-               reg = __raw_readl(MXC_CCM_CCSR);
-               reg &= ~MXC_CCM_CCSR_STEP_SEL_MASK;
-               reg |= (MXC_CCM_CCSR_STEP_SEL_LP_APM <<
-                               MXC_CCM_CCSR_STEP_SEL_OFFSET);
-       } else {
-               if (parent == &lp_apm_clk) {
-                       step = MXC_CCM_CCSR_STEP_SEL_LP_APM;
-               } else  if (parent == &pll2_sw_clk) {
-                       step = MXC_CCM_CCSR_STEP_SEL_PLL2_DIVIDED;
-               } else  if (parent == &pll3_sw_clk) {
-                       step = MXC_CCM_CCSR_STEP_SEL_PLL3_DIVIDED;
-               } else
-                       return -EINVAL;
-
-               reg &= ~MXC_CCM_CCSR_STEP_SEL_MASK;
-               reg |= (step << MXC_CCM_CCSR_STEP_SEL_OFFSET);
-
-               __raw_writel(reg, MXC_CCM_CCSR);
-               /* Switch to step_clk */
-               reg = __raw_readl(MXC_CCM_CCSR);
-               reg |= MXC_CCM_CCSR_PLL1_SW_CLK_SEL;
-       }
-       __raw_writel(reg, MXC_CCM_CCSR);
-       return 0;
-}
-
-static unsigned long clk_pll1_sw_get_rate(struct clk *clk)
-{
-       u32 reg, div;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       reg = __raw_readl(MXC_CCM_CCSR);
-
-       if (clk->parent == &pll2_sw_clk) {
-               div = ((reg & MXC_CCM_CCSR_PLL2_PODF_MASK) >>
-                      MXC_CCM_CCSR_PLL2_PODF_OFFSET) + 1;
-       } else if (clk->parent == &pll3_sw_clk) {
-               div = ((reg & MXC_CCM_CCSR_PLL3_PODF_MASK) >>
-                      MXC_CCM_CCSR_PLL3_PODF_OFFSET) + 1;
-       } else
-               div = 1;
-       return parent_rate / div;
-}
-
-static int _clk_pll2_sw_set_parent(struct clk *clk, struct clk *parent)
-{
-       u32 reg;
-
-       reg = __raw_readl(MXC_CCM_CCSR);
-
-       if (parent == &pll2_sw_clk)
-               reg &= ~MXC_CCM_CCSR_PLL2_SW_CLK_SEL;
-       else
-               reg |= MXC_CCM_CCSR_PLL2_SW_CLK_SEL;
-
-       __raw_writel(reg, MXC_CCM_CCSR);
-       return 0;
-}
-
-static int _clk_lp_apm_set_parent(struct clk *clk, struct clk *parent)
-{
-       u32 reg;
-
-       if (parent == &osc_clk)
-               reg = __raw_readl(MXC_CCM_CCSR) & ~MXC_CCM_CCSR_LP_APM_SEL;
-       else
-               return -EINVAL;
-
-       __raw_writel(reg, MXC_CCM_CCSR);
-
-       return 0;
-}
-
-static unsigned long clk_cpu_get_rate(struct clk *clk)
-{
-       u32 cacrr, div;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-       cacrr = __raw_readl(MXC_CCM_CACRR);
-       div = (cacrr & MXC_CCM_CACRR_ARM_PODF_MASK) + 1;
-
-       return parent_rate / div;
-}
-
-static int clk_cpu_set_rate(struct clk *clk, unsigned long rate)
-{
-       u32 reg, cpu_podf;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-       cpu_podf = parent_rate / rate - 1;
-       /* use post divider to change freq */
-       reg = __raw_readl(MXC_CCM_CACRR);
-       reg &= ~MXC_CCM_CACRR_ARM_PODF_MASK;
-       reg |= cpu_podf << MXC_CCM_CACRR_ARM_PODF_OFFSET;
-       __raw_writel(reg, MXC_CCM_CACRR);
-
-       return 0;
-}
-
-static int _clk_periph_apm_set_parent(struct clk *clk, struct clk *parent)
-{
-       u32 reg, mux;
-       int i = 0;
-
-       mux = _get_mux(parent, &pll1_sw_clk, &pll3_sw_clk, &lp_apm_clk, NULL);
-
-       reg = __raw_readl(MXC_CCM_CBCMR) & ~MXC_CCM_CBCMR_PERIPH_CLK_SEL_MASK;
-       reg |= mux << MXC_CCM_CBCMR_PERIPH_CLK_SEL_OFFSET;
-       __raw_writel(reg, MXC_CCM_CBCMR);
-
-       /* Wait for lock */
-       do {
-               reg = __raw_readl(MXC_CCM_CDHIPR);
-               if (!(reg &  MXC_CCM_CDHIPR_PERIPH_CLK_SEL_BUSY))
-                       break;
-
-               udelay(1);
-       } while (++i < MAX_DPLL_WAIT_TRIES);
-
-       if (i == MAX_DPLL_WAIT_TRIES) {
-               pr_err("MX5: Set parent for periph_apm clock failed\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int _clk_main_bus_set_parent(struct clk *clk, struct clk *parent)
-{
-       u32 reg;
-
-       reg = __raw_readl(MXC_CCM_CBCDR);
-
-       if (parent == &pll2_sw_clk)
-               reg &= ~MXC_CCM_CBCDR_PERIPH_CLK_SEL;
-       else if (parent == &periph_apm_clk)
-               reg |= MXC_CCM_CBCDR_PERIPH_CLK_SEL;
-       else
-               return -EINVAL;
-
-       __raw_writel(reg, MXC_CCM_CBCDR);
-
-       return 0;
-}
-
-static struct clk main_bus_clk = {
-       .parent = &pll2_sw_clk,
-       .set_parent = _clk_main_bus_set_parent,
-};
-
-static unsigned long clk_ahb_get_rate(struct clk *clk)
-{
-       u32 reg, div;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       reg = __raw_readl(MXC_CCM_CBCDR);
-       div = ((reg & MXC_CCM_CBCDR_AHB_PODF_MASK) >>
-              MXC_CCM_CBCDR_AHB_PODF_OFFSET) + 1;
-       return parent_rate / div;
-}
-
-
-static int _clk_ahb_set_rate(struct clk *clk, unsigned long rate)
-{
-       u32 reg, div;
-       unsigned long parent_rate;
-       int i = 0;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       div = parent_rate / rate;
-       if (div > 8 || div < 1 || ((parent_rate / div) != rate))
-               return -EINVAL;
-
-       reg = __raw_readl(MXC_CCM_CBCDR);
-       reg &= ~MXC_CCM_CBCDR_AHB_PODF_MASK;
-       reg |= (div - 1) << MXC_CCM_CBCDR_AHB_PODF_OFFSET;
-       __raw_writel(reg, MXC_CCM_CBCDR);
-
-       /* Wait for lock */
-       do {
-               reg = __raw_readl(MXC_CCM_CDHIPR);
-               if (!(reg & MXC_CCM_CDHIPR_AHB_PODF_BUSY))
-                       break;
-
-               udelay(1);
-       } while (++i < MAX_DPLL_WAIT_TRIES);
-
-       if (i == MAX_DPLL_WAIT_TRIES) {
-               pr_err("MX5: clk_ahb_set_rate failed\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static unsigned long _clk_ahb_round_rate(struct clk *clk,
-                                               unsigned long rate)
-{
-       u32 div;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       div = parent_rate / rate;
-       if (div > 8)
-               div = 8;
-       else if (div == 0)
-               div++;
-       return parent_rate / div;
-}
-
-
-static int _clk_max_enable(struct clk *clk)
-{
-       u32 reg;
-
-       _clk_ccgr_enable(clk);
-
-       /* Handshake with MAX when LPM is entered. */
-       reg = __raw_readl(MXC_CCM_CLPCR);
-       if (cpu_is_mx51())
-               reg &= ~MX51_CCM_CLPCR_BYPASS_MAX_LPM_HS;
-       else if (cpu_is_mx53())
-               reg &= ~MX53_CCM_CLPCR_BYPASS_MAX_LPM_HS;
-       __raw_writel(reg, MXC_CCM_CLPCR);
-
-       return 0;
-}
-
-static void _clk_max_disable(struct clk *clk)
-{
-       u32 reg;
-
-       _clk_ccgr_disable_inwait(clk);
-
-       /* No Handshake with MAX when LPM is entered as its disabled. */
-       reg = __raw_readl(MXC_CCM_CLPCR);
-       if (cpu_is_mx51())
-               reg |= MX51_CCM_CLPCR_BYPASS_MAX_LPM_HS;
-       else if (cpu_is_mx53())
-               reg &= ~MX53_CCM_CLPCR_BYPASS_MAX_LPM_HS;
-       __raw_writel(reg, MXC_CCM_CLPCR);
-}
-
-static unsigned long clk_ipg_get_rate(struct clk *clk)
-{
-       u32 reg, div;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       reg = __raw_readl(MXC_CCM_CBCDR);
-       div = ((reg & MXC_CCM_CBCDR_IPG_PODF_MASK) >>
-              MXC_CCM_CBCDR_IPG_PODF_OFFSET) + 1;
-
-       return parent_rate / div;
-}
-
-static unsigned long clk_ipg_per_get_rate(struct clk *clk)
-{
-       u32 reg, prediv1, prediv2, podf;
-       unsigned long parent_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       if (clk->parent == &main_bus_clk || clk->parent == &lp_apm_clk) {
-               /* the main_bus_clk is the one before the DVFS engine */
-               reg = __raw_readl(MXC_CCM_CBCDR);
-               prediv1 = ((reg & MXC_CCM_CBCDR_PERCLK_PRED1_MASK) >>
-                          MXC_CCM_CBCDR_PERCLK_PRED1_OFFSET) + 1;
-               prediv2 = ((reg & MXC_CCM_CBCDR_PERCLK_PRED2_MASK) >>
-                          MXC_CCM_CBCDR_PERCLK_PRED2_OFFSET) + 1;
-               podf = ((reg & MXC_CCM_CBCDR_PERCLK_PODF_MASK) >>
-                       MXC_CCM_CBCDR_PERCLK_PODF_OFFSET) + 1;
-               return parent_rate / (prediv1 * prediv2 * podf);
-       } else if (clk->parent == &ipg_clk)
-               return parent_rate;
-       else
-               BUG();
-}
-
-static int _clk_ipg_per_set_parent(struct clk *clk, struct clk *parent)
-{
-       u32 reg;
-
-       reg = __raw_readl(MXC_CCM_CBCMR);
-
-       reg &= ~MXC_CCM_CBCMR_PERCLK_LP_APM_CLK_SEL;
-       reg &= ~MXC_CCM_CBCMR_PERCLK_IPG_CLK_SEL;
-
-       if (parent == &ipg_clk)
-               reg |= MXC_CCM_CBCMR_PERCLK_IPG_CLK_SEL;
-       else if (parent == &lp_apm_clk)
-               reg |= MXC_CCM_CBCMR_PERCLK_LP_APM_CLK_SEL;
-       else if (parent != &main_bus_clk)
-               return -EINVAL;
-
-       __raw_writel(reg, MXC_CCM_CBCMR);
-
-       return 0;
-}
-
-#define clk_nfc_set_parent     NULL
-
-static unsigned long clk_nfc_get_rate(struct clk *clk)
-{
-       unsigned long rate;
-       u32 reg, div;
-
-       reg = __raw_readl(MXC_CCM_CBCDR);
-       div = ((reg & MXC_CCM_CBCDR_NFC_PODF_MASK) >>
-              MXC_CCM_CBCDR_NFC_PODF_OFFSET) + 1;
-       rate = clk_get_rate(clk->parent) / div;
-       WARN_ON(rate == 0);
-       return rate;
-}
-
-static unsigned long clk_nfc_round_rate(struct clk *clk,
-                                               unsigned long rate)
-{
-       u32 div;
-       unsigned long parent_rate = clk_get_rate(clk->parent);
-
-       if (!rate)
-               return -EINVAL;
-
-       div = parent_rate / rate;
-
-       if (parent_rate % rate)
-               div++;
-
-       if (div > 8)
-               return -EINVAL;
-
-       return parent_rate / div;
-
-}
-
-static int clk_nfc_set_rate(struct clk *clk, unsigned long rate)
-{
-       u32 reg, div;
-
-       div = clk_get_rate(clk->parent) / rate;
-       if (div == 0)
-               div++;
-       if (((clk_get_rate(clk->parent) / div) != rate) || (div > 8))
-               return -EINVAL;
-
-       reg = __raw_readl(MXC_CCM_CBCDR);
-       reg &= ~MXC_CCM_CBCDR_NFC_PODF_MASK;
-       reg |= (div - 1) << MXC_CCM_CBCDR_NFC_PODF_OFFSET;
-       __raw_writel(reg, MXC_CCM_CBCDR);
-
-       while (__raw_readl(MXC_CCM_CDHIPR) &
-                       MXC_CCM_CDHIPR_NFC_IPG_INT_MEM_PODF_BUSY){
-       }
-
-       return 0;
-}
-
-static unsigned long get_high_reference_clock_rate(struct clk *clk)
-{
-       return external_high_reference;
-}
-
-static unsigned long get_low_reference_clock_rate(struct clk *clk)
-{
-       return external_low_reference;
-}
-
-static unsigned long get_oscillator_reference_clock_rate(struct clk *clk)
-{
-       return oscillator_reference;
-}
-
-static unsigned long get_ckih2_reference_clock_rate(struct clk *clk)
-{
-       return ckih2_reference;
-}
-
-static unsigned long clk_emi_slow_get_rate(struct clk *clk)
-{
-       u32 reg, div;
-
-       reg = __raw_readl(MXC_CCM_CBCDR);
-       div = ((reg & MXC_CCM_CBCDR_EMI_PODF_MASK) >>
-              MXC_CCM_CBCDR_EMI_PODF_OFFSET) + 1;
-
-       return clk_get_rate(clk->parent) / div;
-}
-
-static unsigned long _clk_ddr_hf_get_rate(struct clk *clk)
-{
-       unsigned long rate;
-       u32 reg, div;
-
-       reg = __raw_readl(MXC_CCM_CBCDR);
-       div = ((reg & MXC_CCM_CBCDR_DDR_PODF_MASK) >>
-               MXC_CCM_CBCDR_DDR_PODF_OFFSET) + 1;
-       rate = clk_get_rate(clk->parent) / div;
-
-       return rate;
-}
-
-/* External high frequency clock */
-static struct clk ckih_clk = {
-       .get_rate = get_high_reference_clock_rate,
-};
-
-static struct clk ckih2_clk = {
-       .get_rate = get_ckih2_reference_clock_rate,
-};
-
-static struct clk osc_clk = {
-       .get_rate = get_oscillator_reference_clock_rate,
-};
-
-/* External low frequency (32kHz) clock */
-static struct clk ckil_clk = {
-       .get_rate = get_low_reference_clock_rate,
-};
-
-static struct clk pll1_main_clk = {
-       .parent = &osc_clk,
-       .get_rate = clk_pll_get_rate,
-       .enable = _clk_pll_enable,
-       .disable = _clk_pll_disable,
-};
-
-/* Clock tree block diagram (WIP):
- *     CCM: Clock Controller Module
- *
- * PLL output -> |
- *               | CCM Switcher -> CCM_CLK_ROOT_GEN ->
- * PLL bypass -> |
- *
- */
-
-/* PLL1 SW supplies to ARM core */
-static struct clk pll1_sw_clk = {
-       .parent = &pll1_main_clk,
-       .set_parent = _clk_pll1_sw_set_parent,
-       .get_rate = clk_pll1_sw_get_rate,
-};
-
-/* PLL2 SW supplies to AXI/AHB/IP buses */
-static struct clk pll2_sw_clk = {
-       .parent = &osc_clk,
-       .get_rate = clk_pll_get_rate,
-       .set_rate = _clk_pll_set_rate,
-       .set_parent = _clk_pll2_sw_set_parent,
-       .enable = _clk_pll_enable,
-       .disable = _clk_pll_disable,
-};
-
-/* PLL3 SW supplies to serial clocks like USB, SSI, etc. */
-static struct clk pll3_sw_clk = {
-       .parent = &osc_clk,
-       .set_rate = _clk_pll_set_rate,
-       .get_rate = clk_pll_get_rate,
-       .enable = _clk_pll_enable,
-       .disable = _clk_pll_disable,
-};
-
-/* PLL4 SW supplies to LVDS Display Bridge(LDB) */
-static struct clk mx53_pll4_sw_clk = {
-       .parent = &osc_clk,
-       .set_rate = _clk_pll_set_rate,
-       .enable = _clk_pll_enable,
-       .disable = _clk_pll_disable,
-};
-
-/* Low-power Audio Playback Mode clock */
-static struct clk lp_apm_clk = {
-       .parent = &osc_clk,
-       .set_parent = _clk_lp_apm_set_parent,
-};
-
-static struct clk periph_apm_clk = {
-       .parent = &pll1_sw_clk,
-       .set_parent = _clk_periph_apm_set_parent,
-};
-
-static struct clk cpu_clk = {
-       .parent = &pll1_sw_clk,
-       .get_rate = clk_cpu_get_rate,
-       .set_rate = clk_cpu_set_rate,
-};
-
-static struct clk ahb_clk = {
-       .parent = &main_bus_clk,
-       .get_rate = clk_ahb_get_rate,
-       .set_rate = _clk_ahb_set_rate,
-       .round_rate = _clk_ahb_round_rate,
-};
-
-static struct clk iim_clk = {
-       .parent = &ipg_clk,
-       .enable_reg = MXC_CCM_CCGR0,
-       .enable_shift = MXC_CCM_CCGRx_CG15_OFFSET,
-};
-
-/* Main IP interface clock for access to registers */
-static struct clk ipg_clk = {
-       .parent = &ahb_clk,
-       .get_rate = clk_ipg_get_rate,
-};
-
-static struct clk ipg_perclk = {
-       .parent = &lp_apm_clk,
-       .get_rate = clk_ipg_per_get_rate,
-       .set_parent = _clk_ipg_per_set_parent,
-};
-
-static struct clk ahb_max_clk = {
-       .parent = &ahb_clk,
-       .enable_reg = MXC_CCM_CCGR0,
-       .enable_shift = MXC_CCM_CCGRx_CG14_OFFSET,
-       .enable = _clk_max_enable,
-       .disable = _clk_max_disable,
-};
-
-static struct clk aips_tz1_clk = {
-       .parent = &ahb_clk,
-       .secondary = &ahb_max_clk,
-       .enable_reg = MXC_CCM_CCGR0,
-       .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
-       .enable = _clk_ccgr_enable,
-       .disable = _clk_ccgr_disable_inwait,
-};
-
-static struct clk aips_tz2_clk = {
-       .parent = &ahb_clk,
-       .secondary = &ahb_max_clk,
-       .enable_reg = MXC_CCM_CCGR0,
-       .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
-       .enable = _clk_ccgr_enable,
-       .disable = _clk_ccgr_disable_inwait,
-};
-
-static struct clk gpc_dvfs_clk = {
-       .enable_reg = MXC_CCM_CCGR5,
-       .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
-       .enable = _clk_ccgr_enable,
-       .disable = _clk_ccgr_disable,
-};
-
-static struct clk gpt_32k_clk = {
-       .id = 0,
-       .parent = &ckil_clk,
-};
-
-static struct clk dummy_clk = {
-       .id = 0,
-};
-
-static struct clk emi_slow_clk = {
-       .parent = &pll2_sw_clk,
-       .enable_reg = MXC_CCM_CCGR5,
-       .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
-       .enable = _clk_ccgr_enable,
-       .disable = _clk_ccgr_disable_inwait,
-       .get_rate = clk_emi_slow_get_rate,
-};
-
-static int clk_ipu_enable(struct clk *clk)
-{
-       u32 reg;
-
-       _clk_ccgr_enable(clk);
-
-       /* Enable handshake with IPU when certain clock rates are changed */
-       reg = __raw_readl(MXC_CCM_CCDR);
-       reg &= ~MXC_CCM_CCDR_IPU_HS_MASK;
-       __raw_writel(reg, MXC_CCM_CCDR);
-
-       /* Enable handshake with IPU when LPM is entered */
-       reg = __raw_readl(MXC_CCM_CLPCR);
-       reg &= ~MXC_CCM_CLPCR_BYPASS_IPU_LPM_HS;
-       __raw_writel(reg, MXC_CCM_CLPCR);
-
-       return 0;
-}
-
-static void clk_ipu_disable(struct clk *clk)
-{
-       u32 reg;
-
-       _clk_ccgr_disable(clk);
-
-       /* Disable handshake with IPU whe dividers are changed */
-       reg = __raw_readl(MXC_CCM_CCDR);
-       reg |= MXC_CCM_CCDR_IPU_HS_MASK;
-       __raw_writel(reg, MXC_CCM_CCDR);
-
-       /* Disable handshake with IPU when LPM is entered */
-       reg = __raw_readl(MXC_CCM_CLPCR);
-       reg |= MXC_CCM_CLPCR_BYPASS_IPU_LPM_HS;
-       __raw_writel(reg, MXC_CCM_CLPCR);
-}
-
-static struct clk ahbmux1_clk = {
-       .parent = &ahb_clk,
-       .secondary = &ahb_max_clk,
-       .enable_reg = MXC_CCM_CCGR0,
-       .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
-       .enable = _clk_ccgr_enable,
-       .disable = _clk_ccgr_disable_inwait,
-};
-
-static struct clk ipu_sec_clk = {
-       .parent = &emi_fast_clk,
-       .secondary = &ahbmux1_clk,
-};
-
-static struct clk ddr_hf_clk = {
-       .parent = &pll1_sw_clk,
-       .get_rate = _clk_ddr_hf_get_rate,
-};
-
-static struct clk ddr_clk = {
-       .parent = &ddr_hf_clk,
-};
-
-/* clock definitions for MIPI HSC unit which has been removed
- * from documentation, but not from hardware
- */
-static int _clk_hsc_enable(struct clk *clk)
-{
-       u32 reg;
-
-       _clk_ccgr_enable(clk);
-       /* Handshake with IPU when certain clock rates are changed. */
-       reg = __raw_readl(MXC_CCM_CCDR);
-       reg &= ~MXC_CCM_CCDR_HSC_HS_MASK;
-       __raw_writel(reg, MXC_CCM_CCDR);
-
-       reg = __raw_readl(MXC_CCM_CLPCR);
-       reg &= ~MXC_CCM_CLPCR_BYPASS_HSC_LPM_HS;
-       __raw_writel(reg, MXC_CCM_CLPCR);
-
-       return 0;
-}
-
-static void _clk_hsc_disable(struct clk *clk)
-{
-       u32 reg;
-
-       _clk_ccgr_disable(clk);
-       /* No handshake with HSC as its not enabled. */
-       reg = __raw_readl(MXC_CCM_CCDR);
-       reg |= MXC_CCM_CCDR_HSC_HS_MASK;
-       __raw_writel(reg, MXC_CCM_CCDR);
-
-       reg = __raw_readl(MXC_CCM_CLPCR);
-       reg |= MXC_CCM_CLPCR_BYPASS_HSC_LPM_HS;
-       __raw_writel(reg, MXC_CCM_CLPCR);
-}
-
-static struct clk mipi_hsp_clk = {
-       .parent = &ipu_clk,
-       .enable_reg = MXC_CCM_CCGR4,
-       .enable_shift = MXC_CCM_CCGRx_CG6_OFFSET,
-       .enable = _clk_hsc_enable,
-       .disable = _clk_hsc_disable,
-       .secondary = &mipi_hsc1_clk,
-};
-
-#define DEFINE_CLOCK_CCGR(name, i, er, es, pfx, p, s)  \
-       static struct clk name = {                      \
-               .id             = i,                    \
-               .enable_reg     = er,                   \
-               .enable_shift   = es,                   \
-               .get_rate       = pfx##_get_rate,       \
-               .set_rate       = pfx##_set_rate,       \
-               .round_rate     = pfx##_round_rate,     \
-               .set_parent     = pfx##_set_parent,     \
-               .enable         = _clk_ccgr_enable,     \
-               .disable        = _clk_ccgr_disable,    \
-               .parent         = p,                    \
-               .secondary      = s,                    \
-       }
-
-#define DEFINE_CLOCK_MAX(name, i, er, es, pfx, p, s)   \
-       static struct clk name = {                      \
-               .id             = i,                    \
-               .enable_reg     = er,                   \
-               .enable_shift   = es,                   \
-               .get_rate       = pfx##_get_rate,       \
-               .set_rate       = pfx##_set_rate,       \
-               .set_parent     = pfx##_set_parent,     \
-               .enable         = _clk_max_enable,      \
-               .disable        = _clk_max_disable,     \
-               .parent         = p,                    \
-               .secondary      = s,                    \
-       }
-
-#define CLK_GET_RATE(name, nr, bitsname)                               \
-static unsigned long clk_##name##_get_rate(struct clk *clk)            \
-{                                                                      \
-       u32 reg, pred, podf;                                            \
-                                                                       \
-       reg = __raw_readl(MXC_CCM_CSCDR##nr);                           \
-       pred = (reg & MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_MASK)   \
-               >> MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_OFFSET;    \
-       podf = (reg & MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_MASK)   \
-               >> MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_OFFSET;    \
-                                                                       \
-       return DIV_ROUND_CLOSEST(clk_get_rate(clk->parent),             \
-                       (pred + 1) * (podf + 1));                       \
-}
-
-#define CLK_SET_PARENT(name, nr, bitsname)                             \
-static int clk_##name##_set_parent(struct clk *clk, struct clk *parent)        \
-{                                                                      \
-       u32 reg, mux;                                                   \
-                                                                       \
-       mux = _get_mux(parent, &pll1_sw_clk, &pll2_sw_clk,              \
-                       &pll3_sw_clk, &lp_apm_clk);                     \
-       reg = __raw_readl(MXC_CCM_CSCMR##nr) &                          \
-               ~MXC_CCM_CSCMR##nr##_##bitsname##_CLK_SEL_MASK;         \
-       reg |= mux << MXC_CCM_CSCMR##nr##_##bitsname##_CLK_SEL_OFFSET;  \
-       __raw_writel(reg, MXC_CCM_CSCMR##nr);                           \
-                                                                       \
-       return 0;                                                       \
-}
-
-#define CLK_SET_RATE(name, nr, bitsname)                               \
-static int clk_##name##_set_rate(struct clk *clk, unsigned long rate)  \
-{                                                                      \
-       u32 reg, div, parent_rate;                                      \
-       u32 pre = 0, post = 0;                                          \
-                                                                       \
-       parent_rate = clk_get_rate(clk->parent);                        \
-       div = parent_rate / rate;                                       \
-                                                                       \
-       if ((parent_rate / div) != rate)                                \
-               return -EINVAL;                                         \
-                                                                       \
-       __calc_pre_post_dividers(div, &pre, &post,                      \
-               (MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_MASK >>      \
-               MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_OFFSET) + 1,  \
-               (MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_MASK >>      \
-               MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_OFFSET) + 1);\
-                                                                       \
-       /* Set sdhc1 clock divider */                                   \
-       reg = __raw_readl(MXC_CCM_CSCDR##nr) &                          \
-               ~(MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_MASK        \
-               | MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_MASK);      \
-       reg |= (post - 1) <<                                            \
-               MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_OFFSET;       \
-       reg |= (pre - 1) <<                                             \
-               MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_OFFSET;       \
-       __raw_writel(reg, MXC_CCM_CSCDR##nr);                           \
-                                                                       \
-       return 0;                                                       \
-}
-
-/* UART */
-CLK_GET_RATE(uart, 1, UART)
-CLK_SET_PARENT(uart, 1, UART)
-
-static struct clk uart_root_clk = {
-       .parent = &pll2_sw_clk,
-       .get_rate = clk_uart_get_rate,
-       .set_parent = clk_uart_set_parent,
-};
-
-/* USBOH3 */
-CLK_GET_RATE(usboh3, 1, USBOH3)
-CLK_SET_PARENT(usboh3, 1, USBOH3)
-
-static struct clk usboh3_clk = {
-       .parent = &pll2_sw_clk,
-       .get_rate = clk_usboh3_get_rate,
-       .set_parent = clk_usboh3_set_parent,
-       .enable = _clk_ccgr_enable,
-       .disable = _clk_ccgr_disable,
-       .enable_reg = MXC_CCM_CCGR2,
-       .enable_shift = MXC_CCM_CCGRx_CG14_OFFSET,
-};
-
-static struct clk usb_ahb_clk = {
-       .parent = &ipg_clk,
-       .enable = _clk_ccgr_enable,
-       .disable = _clk_ccgr_disable,
-       .enable_reg = MXC_CCM_CCGR2,
-       .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
-};
-
-static int clk_usb_phy1_set_parent(struct clk *clk, struct clk *parent)
-{
-       u32 reg;
-
-       reg = __raw_readl(MXC_CCM_CSCMR1) & ~MXC_CCM_CSCMR1_USB_PHY_CLK_SEL;
-
-       if (parent == &pll3_sw_clk)
-               reg |= 1 << MXC_CCM_CSCMR1_USB_PHY_CLK_SEL_OFFSET;
-
-       __raw_writel(reg, MXC_CCM_CSCMR1);
-
-       return 0;
-}
-
-static struct clk usb_phy1_clk = {
-       .parent = &pll3_sw_clk,
-       .set_parent = clk_usb_phy1_set_parent,
-       .enable = _clk_ccgr_enable,
-       .enable_reg = MXC_CCM_CCGR2,
-       .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
-       .disable = _clk_ccgr_disable,
-};
-
-/* eCSPI */
-CLK_GET_RATE(ecspi, 2, CSPI)
-CLK_SET_PARENT(ecspi, 1, CSPI)
-
-static struct clk ecspi_main_clk = {
-       .parent = &pll3_sw_clk,
-       .get_rate = clk_ecspi_get_rate,
-       .set_parent = clk_ecspi_set_parent,
-};
-
-/* eSDHC */
-CLK_GET_RATE(esdhc1, 1, ESDHC1_MSHC1)
-CLK_SET_PARENT(esdhc1, 1, ESDHC1_MSHC1)
-CLK_SET_RATE(esdhc1, 1, ESDHC1_MSHC1)
-
-/* mx51 specific */
-CLK_GET_RATE(esdhc2, 1, ESDHC2_MSHC2)
-CLK_SET_PARENT(esdhc2, 1, ESDHC2_MSHC2)
-CLK_SET_RATE(esdhc2, 1, ESDHC2_MSHC2)
-
-static int clk_esdhc3_set_parent(struct clk *clk, struct clk *parent)
-{
-       u32 reg;
-
-       reg = __raw_readl(MXC_CCM_CSCMR1);
-       if (parent == &esdhc1_clk)
-               reg &= ~MXC_CCM_CSCMR1_ESDHC3_CLK_SEL;
-       else if (parent == &esdhc2_clk)
-               reg |= MXC_CCM_CSCMR1_ESDHC3_CLK_SEL;
-       else
-               return -EINVAL;
-       __raw_writel(reg, MXC_CCM_CSCMR1);
-
-       return 0;
-}
-
-static int clk_esdhc4_set_parent(struct clk *clk, struct clk *parent)
-{
-       u32 reg;
-
-       reg = __raw_readl(MXC_CCM_CSCMR1);
-       if (parent == &esdhc1_clk)
-               reg &= ~MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
-       else if (parent == &esdhc2_clk)
-               reg |= MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
-       else
-               return -EINVAL;
-       __raw_writel(reg, MXC_CCM_CSCMR1);
-
-       return 0;
-}
-
-/* mx53 specific */
-static int clk_esdhc2_mx53_set_parent(struct clk *clk, struct clk *parent)
-{
-       u32 reg;
-
-       reg = __raw_readl(MXC_CCM_CSCMR1);
-       if (parent == &esdhc1_clk)
-               reg &= ~MXC_CCM_CSCMR1_ESDHC2_MSHC2_MX53_CLK_SEL;
-       else if (parent == &esdhc3_mx53_clk)
-               reg |= MXC_CCM_CSCMR1_ESDHC2_MSHC2_MX53_CLK_SEL;
-       else
-               return -EINVAL;
-       __raw_writel(reg, MXC_CCM_CSCMR1);
-
-       return 0;
-}
-
-CLK_GET_RATE(esdhc3_mx53, 1, ESDHC3_MX53)
-CLK_SET_PARENT(esdhc3_mx53, 1, ESDHC3_MX53)
-CLK_SET_RATE(esdhc3_mx53, 1, ESDHC3_MX53)
-
-static int clk_esdhc4_mx53_set_parent(struct clk *clk, struct clk *parent)
-{
-       u32 reg;
-
-       reg = __raw_readl(MXC_CCM_CSCMR1);
-       if (parent == &esdhc1_clk)
-               reg &= ~MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
-       else if (parent == &esdhc3_mx53_clk)
-               reg |= MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
-       else
-               return -EINVAL;
-       __raw_writel(reg, MXC_CCM_CSCMR1);
-
-       return 0;
-}
-
-#define DEFINE_CLOCK_FULL(name, i, er, es, gr, sr, e, d, p, s)         \
-       static struct clk name = {                                      \
-               .id             = i,                                    \
-               .enable_reg     = er,                                   \
-               .enable_shift   = es,                                   \
-               .get_rate       = gr,                                   \
-               .set_rate       = sr,                                   \
-               .enable         = e,                                    \
-               .disable        = d,                                    \
-               .parent         = p,                                    \
-               .secondary      = s,                                    \
-       }
-
-#define DEFINE_CLOCK(name, i, er, es, gr, sr, p, s)                    \
-       DEFINE_CLOCK_FULL(name, i, er, es, gr, sr, _clk_ccgr_enable, _clk_ccgr_disable, p, s)
-
-/* Shared peripheral bus arbiter */
-DEFINE_CLOCK(spba_clk, 0, MXC_CCM_CCGR5, MXC_CCM_CCGRx_CG0_OFFSET,
-       NULL,  NULL, &ipg_clk, NULL);
-
-/* UART */
-DEFINE_CLOCK(uart1_ipg_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG3_OFFSET,
-       NULL,  NULL, &ipg_clk, &aips_tz1_clk);
-DEFINE_CLOCK(uart2_ipg_clk, 1, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG5_OFFSET,
-       NULL,  NULL, &ipg_clk, &aips_tz1_clk);
-DEFINE_CLOCK(uart3_ipg_clk, 2, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG7_OFFSET,
-       NULL,  NULL, &ipg_clk, &spba_clk);
-DEFINE_CLOCK(uart4_ipg_clk, 3, MXC_CCM_CCGR7, MXC_CCM_CCGRx_CG4_OFFSET,
-       NULL,  NULL, &ipg_clk, &spba_clk);
-DEFINE_CLOCK(uart5_ipg_clk, 4, MXC_CCM_CCGR7, MXC_CCM_CCGRx_CG6_OFFSET,
-       NULL,  NULL, &ipg_clk, &spba_clk);
-DEFINE_CLOCK(uart1_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG4_OFFSET,
-       NULL,  NULL, &uart_root_clk, &uart1_ipg_clk);
-DEFINE_CLOCK(uart2_clk, 1, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG6_OFFSET,
-       NULL,  NULL, &uart_root_clk, &uart2_ipg_clk);
-DEFINE_CLOCK(uart3_clk, 2, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG8_OFFSET,
-       NULL,  NULL, &uart_root_clk, &uart3_ipg_clk);
-DEFINE_CLOCK(uart4_clk, 3, MXC_CCM_CCGR7, MXC_CCM_CCGRx_CG5_OFFSET,
-       NULL,  NULL, &uart_root_clk, &uart4_ipg_clk);
-DEFINE_CLOCK(uart5_clk, 4, MXC_CCM_CCGR7, MXC_CCM_CCGRx_CG7_OFFSET,
-       NULL,  NULL, &uart_root_clk, &uart5_ipg_clk);
-
-/* GPT */
-DEFINE_CLOCK(gpt_ipg_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG10_OFFSET,
-       NULL,  NULL, &ipg_clk, NULL);
-DEFINE_CLOCK(gpt_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG9_OFFSET,
-       NULL,  NULL, &ipg_clk, &gpt_ipg_clk);
-
-DEFINE_CLOCK(pwm1_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG6_OFFSET,
-       NULL, NULL, &ipg_perclk, NULL);
-DEFINE_CLOCK(pwm2_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG8_OFFSET,
-       NULL, NULL, &ipg_perclk, NULL);
-
-/* I2C */
-DEFINE_CLOCK(i2c1_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG9_OFFSET,
-       NULL, NULL, &ipg_perclk, NULL);
-DEFINE_CLOCK(i2c2_clk, 1, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG10_OFFSET,
-       NULL, NULL, &ipg_perclk, NULL);
-DEFINE_CLOCK(hsi2c_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG11_OFFSET,
-       NULL, NULL, &ipg_clk, NULL);
-DEFINE_CLOCK(i2c3_mx53_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG11_OFFSET,
-       NULL, NULL, &ipg_perclk, NULL);
-
-/* FEC */
-DEFINE_CLOCK(fec_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG12_OFFSET,
-       NULL,  NULL, &ipg_clk, NULL);
-
-/* NFC */
-DEFINE_CLOCK_CCGR(nfc_clk, 0, MXC_CCM_CCGR5, MXC_CCM_CCGRx_CG10_OFFSET,
-       clk_nfc, &emi_slow_clk, NULL);
-
-/* SSI */
-DEFINE_CLOCK(ssi1_ipg_clk, 0, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG8_OFFSET,
-       NULL, NULL, &ipg_clk, NULL);
-DEFINE_CLOCK(ssi1_clk, 0, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG9_OFFSET,
-       NULL, NULL, &pll3_sw_clk, &ssi1_ipg_clk);
-DEFINE_CLOCK(ssi2_ipg_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG10_OFFSET,
-       NULL, NULL, &ipg_clk, NULL);
-DEFINE_CLOCK(ssi2_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG11_OFFSET,
-       NULL, NULL, &pll3_sw_clk, &ssi2_ipg_clk);
-DEFINE_CLOCK(ssi3_ipg_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG12_OFFSET,
-       NULL, NULL, &ipg_clk, NULL);
-DEFINE_CLOCK(ssi3_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG13_OFFSET,
-       NULL, NULL, &pll3_sw_clk, &ssi3_ipg_clk);
-
-/* eCSPI */
-DEFINE_CLOCK_FULL(ecspi1_ipg_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG9_OFFSET,
-               NULL, NULL, _clk_ccgr_enable_inrun, _clk_ccgr_disable,
-               &ipg_clk, &spba_clk);
-DEFINE_CLOCK(ecspi1_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG10_OFFSET,
-               NULL, NULL, &ecspi_main_clk, &ecspi1_ipg_clk);
-DEFINE_CLOCK_FULL(ecspi2_ipg_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG11_OFFSET,
-               NULL, NULL, _clk_ccgr_enable_inrun, _clk_ccgr_disable,
-               &ipg_clk, &aips_tz2_clk);
-DEFINE_CLOCK(ecspi2_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG12_OFFSET,
-               NULL, NULL, &ecspi_main_clk, &ecspi2_ipg_clk);
-
-/* CSPI */
-DEFINE_CLOCK(cspi_ipg_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG9_OFFSET,
-               NULL, NULL, &ipg_clk, &aips_tz2_clk);
-DEFINE_CLOCK(cspi_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG13_OFFSET,
-               NULL, NULL, &ipg_clk, &cspi_ipg_clk);
-
-/* SDMA */
-DEFINE_CLOCK(sdma_clk, 1, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG15_OFFSET,
-               NULL, NULL, &ahb_clk, NULL);
-
-/* eSDHC */
-DEFINE_CLOCK_FULL(esdhc1_ipg_clk, 0, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG0_OFFSET,
-       NULL,  NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
-DEFINE_CLOCK_MAX(esdhc1_clk, 0, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG1_OFFSET,
-       clk_esdhc1, &pll2_sw_clk, &esdhc1_ipg_clk);
-DEFINE_CLOCK_FULL(esdhc2_ipg_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG2_OFFSET,
-       NULL,  NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
-DEFINE_CLOCK_FULL(esdhc3_ipg_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG4_OFFSET,
-       NULL,  NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
-DEFINE_CLOCK_FULL(esdhc4_ipg_clk, 3, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG6_OFFSET,
-       NULL,  NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
-
-/* mx51 specific */
-DEFINE_CLOCK_MAX(esdhc2_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG3_OFFSET,
-       clk_esdhc2, &pll2_sw_clk, &esdhc2_ipg_clk);
-
-static struct clk esdhc3_clk = {
-       .id = 2,
-       .parent = &esdhc1_clk,
-       .set_parent = clk_esdhc3_set_parent,
-       .enable_reg = MXC_CCM_CCGR3,
-       .enable_shift = MXC_CCM_CCGRx_CG5_OFFSET,
-       .enable  = _clk_max_enable,
-       .disable = _clk_max_disable,
-       .secondary = &esdhc3_ipg_clk,
-};
-static struct clk esdhc4_clk = {
-       .id = 3,
-       .parent = &esdhc1_clk,
-       .set_parent = clk_esdhc4_set_parent,
-       .enable_reg = MXC_CCM_CCGR3,
-       .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
-       .enable  = _clk_max_enable,
-       .disable = _clk_max_disable,
-       .secondary = &esdhc4_ipg_clk,
-};
-
-/* mx53 specific */
-static struct clk esdhc2_mx53_clk = {
-       .id = 2,
-       .parent = &esdhc1_clk,
-       .set_parent = clk_esdhc2_mx53_set_parent,
-       .enable_reg = MXC_CCM_CCGR3,
-       .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
-       .enable  = _clk_max_enable,
-       .disable = _clk_max_disable,
-       .secondary = &esdhc3_ipg_clk,
-};
-
-DEFINE_CLOCK_MAX(esdhc3_mx53_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG5_OFFSET,
-       clk_esdhc3_mx53, &pll2_sw_clk, &esdhc2_ipg_clk);
-
-static struct clk esdhc4_mx53_clk = {
-       .id = 3,
-       .parent = &esdhc1_clk,
-       .set_parent = clk_esdhc4_mx53_set_parent,
-       .enable_reg = MXC_CCM_CCGR3,
-       .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
-       .enable  = _clk_max_enable,
-       .disable = _clk_max_disable,
-       .secondary = &esdhc4_ipg_clk,
-};
-
-static struct clk sata_clk = {
-       .parent = &ipg_clk,
-       .enable = _clk_max_enable,
-       .enable_reg = MXC_CCM_CCGR4,
-       .enable_shift = MXC_CCM_CCGRx_CG1_OFFSET,
-       .disable = _clk_max_disable,
-};
-
-static struct clk ahci_phy_clk = {
-       .parent = &usb_phy1_clk,
-};
-
-static struct clk ahci_dma_clk = {
-       .parent = &ahb_clk,
-};
-
-DEFINE_CLOCK(mipi_esc_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG5_OFFSET, NULL, NULL, NULL, &pll2_sw_clk);
-DEFINE_CLOCK(mipi_hsc2_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG4_OFFSET, NULL, NULL, &mipi_esc_clk, &pll2_sw_clk);
-DEFINE_CLOCK(mipi_hsc1_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG3_OFFSET, NULL, NULL, &mipi_hsc2_clk, &pll2_sw_clk);
-
-/* IPU */
-DEFINE_CLOCK_FULL(ipu_clk, 0, MXC_CCM_CCGR5, MXC_CCM_CCGRx_CG5_OFFSET,
-       NULL,  NULL, clk_ipu_enable, clk_ipu_disable, &ahb_clk, &ipu_sec_clk);
-
-DEFINE_CLOCK_FULL(emi_fast_clk, 0, MXC_CCM_CCGR5, MXC_CCM_CCGRx_CG7_OFFSET,
-               NULL, NULL, _clk_ccgr_enable, _clk_ccgr_disable_inwait,
-               &ddr_clk, NULL);
-
-DEFINE_CLOCK(ipu_di0_clk, 0, MXC_CCM_CCGR6, MXC_CCM_CCGRx_CG5_OFFSET,
-               NULL, NULL, &pll3_sw_clk, NULL);
-DEFINE_CLOCK(ipu_di1_clk, 0, MXC_CCM_CCGR6, MXC_CCM_CCGRx_CG6_OFFSET,
-               NULL, NULL, &pll3_sw_clk, NULL);
-
-/* PATA */
-DEFINE_CLOCK(pata_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG0_OFFSET,
-               NULL, NULL, &ipg_clk, &spba_clk);
-
-#define _REGISTER_CLOCK(d, n, c) \
-       { \
-               .dev_id = d, \
-               .con_id = n, \
-               .clk = &c,   \
-       },
-
-static struct clk_lookup mx51_lookups[] = {
-       /* i.mx51 has the i.mx21 type uart */
-       _REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
-       _REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
-       _REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
-       _REGISTER_CLOCK(NULL, "gpt", gpt_clk)
-       /* i.mx51 has the i.mx27 type fec */
-       _REGISTER_CLOCK("imx27-fec.0", NULL, fec_clk)
-       _REGISTER_CLOCK("mxc_pwm.0", "pwm", pwm1_clk)
-       _REGISTER_CLOCK("mxc_pwm.1", "pwm", pwm2_clk)
-       _REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
-       _REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
-       _REGISTER_CLOCK("imx-i2c.2", NULL, hsi2c_clk)
-       _REGISTER_CLOCK("mxc-ehci.0", "usb", usboh3_clk)
-       _REGISTER_CLOCK("mxc-ehci.0", "usb_ahb", usb_ahb_clk)
-       _REGISTER_CLOCK("mxc-ehci.0", "usb_phy1", usb_phy1_clk)
-       _REGISTER_CLOCK("mxc-ehci.1", "usb", usboh3_clk)
-       _REGISTER_CLOCK("mxc-ehci.1", "usb_ahb", usb_ahb_clk)
-       _REGISTER_CLOCK("mxc-ehci.2", "usb", usboh3_clk)
-       _REGISTER_CLOCK("mxc-ehci.2", "usb_ahb", usb_ahb_clk)
-       _REGISTER_CLOCK("fsl-usb2-udc", "usb", usboh3_clk)
-       _REGISTER_CLOCK("fsl-usb2-udc", "usb_ahb", ahb_clk)
-       _REGISTER_CLOCK("imx-keypad", NULL, dummy_clk)
-       _REGISTER_CLOCK("mxc_nand", NULL, nfc_clk)
-       _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
-       _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
-       _REGISTER_CLOCK("imx-ssi.2", NULL, ssi3_clk)
-       /* i.mx51 has the i.mx35 type sdma */
-       _REGISTER_CLOCK("imx35-sdma", NULL, sdma_clk)
-       _REGISTER_CLOCK(NULL, "ckih", ckih_clk)
-       _REGISTER_CLOCK(NULL, "ckih2", ckih2_clk)
-       _REGISTER_CLOCK(NULL, "gpt_32k", gpt_32k_clk)
-       _REGISTER_CLOCK("imx51-ecspi.0", NULL, ecspi1_clk)
-       _REGISTER_CLOCK("imx51-ecspi.1", NULL, ecspi2_clk)
-       /* i.mx51 has the i.mx35 type cspi */
-       _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi_clk)
-       _REGISTER_CLOCK("sdhci-esdhc-imx51.0", NULL, esdhc1_clk)
-       _REGISTER_CLOCK("sdhci-esdhc-imx51.1", NULL, esdhc2_clk)
-       _REGISTER_CLOCK("sdhci-esdhc-imx51.2", NULL, esdhc3_clk)
-       _REGISTER_CLOCK("sdhci-esdhc-imx51.3", NULL, esdhc4_clk)
-       _REGISTER_CLOCK(NULL, "cpu_clk", cpu_clk)
-       _REGISTER_CLOCK(NULL, "iim_clk", iim_clk)
-       _REGISTER_CLOCK("imx2-wdt.0", NULL, dummy_clk)
-       _REGISTER_CLOCK("imx2-wdt.1", NULL, dummy_clk)
-       _REGISTER_CLOCK(NULL, "mipi_hsp", mipi_hsp_clk)
-       _REGISTER_CLOCK("imx-ipuv3", NULL, ipu_clk)
-       _REGISTER_CLOCK("imx-ipuv3", "di0", ipu_di0_clk)
-       _REGISTER_CLOCK("imx-ipuv3", "di1", ipu_di1_clk)
-       _REGISTER_CLOCK(NULL, "gpc_dvfs", gpc_dvfs_clk)
-       _REGISTER_CLOCK("pata_imx", NULL, pata_clk)
-};
-
-static struct clk_lookup mx53_lookups[] = {
-       /* i.mx53 has the i.mx21 type uart */
-       _REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
-       _REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
-       _REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
-       _REGISTER_CLOCK("imx21-uart.3", NULL, uart4_clk)
-       _REGISTER_CLOCK("imx21-uart.4", NULL, uart5_clk)
-       _REGISTER_CLOCK(NULL, "gpt", gpt_clk)
-       /* i.mx53 has the i.mx25 type fec */
-       _REGISTER_CLOCK("imx25-fec.0", NULL, fec_clk)
-       _REGISTER_CLOCK(NULL, "iim_clk", iim_clk)
-       _REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
-       _REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
-       _REGISTER_CLOCK("imx-i2c.2", NULL, i2c3_mx53_clk)
-       /* i.mx53 has the i.mx51 type ecspi */
-       _REGISTER_CLOCK("imx51-ecspi.0", NULL, ecspi1_clk)
-       _REGISTER_CLOCK("imx51-ecspi.1", NULL, ecspi2_clk)
-       /* i.mx53 has the i.mx25 type cspi */
-       _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi_clk)
-       _REGISTER_CLOCK("sdhci-esdhc-imx53.0", NULL, esdhc1_clk)
-       _REGISTER_CLOCK("sdhci-esdhc-imx53.1", NULL, esdhc2_mx53_clk)
-       _REGISTER_CLOCK("sdhci-esdhc-imx53.2", NULL, esdhc3_mx53_clk)
-       _REGISTER_CLOCK("sdhci-esdhc-imx53.3", NULL, esdhc4_mx53_clk)
-       _REGISTER_CLOCK("imx2-wdt.0", NULL, dummy_clk)
-       _REGISTER_CLOCK("imx2-wdt.1", NULL, dummy_clk)
-       /* i.mx53 has the i.mx35 type sdma */
-       _REGISTER_CLOCK("imx35-sdma", NULL, sdma_clk)
-       _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
-       _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
-       _REGISTER_CLOCK("imx-ssi.2", NULL, ssi3_clk)
-       _REGISTER_CLOCK("imx-keypad", NULL, dummy_clk)
-       _REGISTER_CLOCK("pata_imx", NULL, pata_clk)
-       _REGISTER_CLOCK("imx53-ahci.0", "ahci", sata_clk)
-       _REGISTER_CLOCK("imx53-ahci.0", "ahci_phy", ahci_phy_clk)
-       _REGISTER_CLOCK("imx53-ahci.0", "ahci_dma", ahci_dma_clk)
-};
-
-static void clk_tree_init(void)
-{
-       u32 reg;
-
-       ipg_perclk.set_parent(&ipg_perclk, &lp_apm_clk);
-
-       /*
-        * Initialise the IPG PER CLK dividers to 3. IPG_PER_CLK should be at
-        * 8MHz, its derived from lp_apm.
-        *
-        * FIXME: Verify if true for all boards
-        */
-       reg = __raw_readl(MXC_CCM_CBCDR);
-       reg &= ~MXC_CCM_CBCDR_PERCLK_PRED1_MASK;
-       reg &= ~MXC_CCM_CBCDR_PERCLK_PRED2_MASK;
-       reg &= ~MXC_CCM_CBCDR_PERCLK_PODF_MASK;
-       reg |= (2 << MXC_CCM_CBCDR_PERCLK_PRED1_OFFSET);
-       __raw_writel(reg, MXC_CCM_CBCDR);
-}
-
-int __init mx51_clocks_init(unsigned long ckil, unsigned long osc,
-                       unsigned long ckih1, unsigned long ckih2)
-{
-       int i;
-
-       external_low_reference = ckil;
-       external_high_reference = ckih1;
-       ckih2_reference = ckih2;
-       oscillator_reference = osc;
-
-       for (i = 0; i < ARRAY_SIZE(mx51_lookups); i++)
-               clkdev_add(&mx51_lookups[i]);
-
-       clk_tree_init();
-
-       clk_enable(&cpu_clk);
-       clk_enable(&main_bus_clk);
-
-       clk_enable(&iim_clk);
-       imx_print_silicon_rev("i.MX51", mx51_revision());
-       clk_disable(&iim_clk);
-
-       /* move usb_phy_clk to 24MHz */
-       clk_set_parent(&usb_phy1_clk, &osc_clk);
-
-       /* set the usboh3_clk parent to pll2_sw_clk */
-       clk_set_parent(&usboh3_clk, &pll2_sw_clk);
-
-       /* Set SDHC parents to be PLL2 */
-       clk_set_parent(&esdhc1_clk, &pll2_sw_clk);
-       clk_set_parent(&esdhc2_clk, &pll2_sw_clk);
-
-       /* set SDHC root clock as 166.25MHZ*/
-       clk_set_rate(&esdhc1_clk, 166250000);
-       clk_set_rate(&esdhc2_clk, 166250000);
-
-       /* System timer */
-       mxc_timer_init(&gpt_clk, MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR),
-               MX51_INT_GPT);
-       return 0;
-}
-
-int __init mx53_clocks_init(unsigned long ckil, unsigned long osc,
-                       unsigned long ckih1, unsigned long ckih2)
-{
-       int i;
-
-       external_low_reference = ckil;
-       external_high_reference = ckih1;
-       ckih2_reference = ckih2;
-       oscillator_reference = osc;
-
-       for (i = 0; i < ARRAY_SIZE(mx53_lookups); i++)
-               clkdev_add(&mx53_lookups[i]);
-
-       clk_tree_init();
-
-       clk_set_parent(&uart_root_clk, &pll3_sw_clk);
-       clk_enable(&cpu_clk);
-       clk_enable(&main_bus_clk);
-
-       clk_enable(&iim_clk);
-       imx_print_silicon_rev("i.MX53", mx53_revision());
-       clk_disable(&iim_clk);
-
-       /* Set SDHC parents to be PLL2 */
-       clk_set_parent(&esdhc1_clk, &pll2_sw_clk);
-       clk_set_parent(&esdhc3_mx53_clk, &pll2_sw_clk);
-
-       /* set SDHC root clock as 200MHZ*/
-       clk_set_rate(&esdhc1_clk, 200000000);
-       clk_set_rate(&esdhc3_mx53_clk, 200000000);
-
-       /* System timer */
-       mxc_timer_init(&gpt_clk, MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR),
-               MX53_INT_GPT);
-       return 0;
-}
-
-#ifdef CONFIG_OF
-static void __init clk_get_freq_dt(unsigned long *ckil, unsigned long *osc,
-                                  unsigned long *ckih1, unsigned long *ckih2)
-{
-       struct device_node *np;
-
-       /* retrieve the freqency of fixed clocks from device tree */
-       for_each_compatible_node(np, NULL, "fixed-clock") {
-               u32 rate;
-               if (of_property_read_u32(np, "clock-frequency", &rate))
-                       continue;
-
-               if (of_device_is_compatible(np, "fsl,imx-ckil"))
-                       *ckil = rate;
-               else if (of_device_is_compatible(np, "fsl,imx-osc"))
-                       *osc = rate;
-               else if (of_device_is_compatible(np, "fsl,imx-ckih1"))
-                       *ckih1 = rate;
-               else if (of_device_is_compatible(np, "fsl,imx-ckih2"))
-                       *ckih2 = rate;
-       }
-}
-
-int __init mx51_clocks_init_dt(void)
-{
-       unsigned long ckil, osc, ckih1, ckih2;
-
-       clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2);
-       return mx51_clocks_init(ckil, osc, ckih1, ckih2);
-}
-
-int __init mx53_clocks_init_dt(void)
-{
-       unsigned long ckil, osc, ckih1, ckih2;
-
-       clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2);
-       return mx53_clocks_init(ckil, osc, ckih1, ckih2);
-}
-#endif
index aa15c517d06e9338293b2669b78b8f486ffba232..8eb15a2fcaf9307e047a16252ec25e0a5e59e9bb 100644 (file)
@@ -62,11 +62,8 @@ EXPORT_SYMBOL(mx51_revision);
  * Dependent on link order - so the assumption is that vfp_init is called
  * before us.
  */
-static int __init mx51_neon_fixup(void)
+int __init mx51_neon_fixup(void)
 {
-       if (!cpu_is_mx51())
-               return 0;
-
        if (mx51_revision() < IMX_CHIP_REVISION_3_0 &&
                        (elf_hwcap & HWCAP_NEON)) {
                elf_hwcap &= ~HWCAP_NEON;
@@ -75,7 +72,6 @@ static int __init mx51_neon_fixup(void)
        return 0;
 }
 
-late_initcall(mx51_neon_fixup);
 #endif
 
 static int get_mx53_srev(void)
index 53141273df452059ed38e43e0a2cb23e5df8421e..a1dfde53e335bafa291a33faf5a7400b82366be8 100644 (file)
 #define CKIH_CLK_FREQ_27MHZ     27000000
 #define CKIL_CLK_FREQ           32768
 
-#define MXC_CCM_BASE           (cpu_is_mx31() ? \
-MX31_IO_ADDRESS(MX31_CCM_BASE_ADDR) : MX35_IO_ADDRESS(MX35_CCM_BASE_ADDR))
+extern void __iomem *mx3_ccm_base;
 
 /* Register addresses */
-#define MXC_CCM_CCMR           (MXC_CCM_BASE + 0x00)
-#define MXC_CCM_PDR0           (MXC_CCM_BASE + 0x04)
-#define MXC_CCM_PDR1           (MXC_CCM_BASE + 0x08)
-#define MX35_CCM_PDR2          (MXC_CCM_BASE + 0x0C)
-#define MXC_CCM_RCSR           (MXC_CCM_BASE + 0x0C)
-#define MX35_CCM_PDR3          (MXC_CCM_BASE + 0x10)
-#define MXC_CCM_MPCTL          (MXC_CCM_BASE + 0x10)
-#define MX35_CCM_PDR4          (MXC_CCM_BASE + 0x14)
-#define MXC_CCM_UPCTL          (MXC_CCM_BASE + 0x14)
-#define MX35_CCM_RCSR          (MXC_CCM_BASE + 0x18)
-#define MXC_CCM_SRPCTL         (MXC_CCM_BASE + 0x18)
-#define MX35_CCM_MPCTL         (MXC_CCM_BASE + 0x1C)
-#define MXC_CCM_COSR           (MXC_CCM_BASE + 0x1C)
-#define MX35_CCM_PPCTL         (MXC_CCM_BASE + 0x20)
-#define MXC_CCM_CGR0           (MXC_CCM_BASE + 0x20)
-#define MX35_CCM_ACMR          (MXC_CCM_BASE + 0x24)
-#define MXC_CCM_CGR1           (MXC_CCM_BASE + 0x24)
-#define MX35_CCM_COSR          (MXC_CCM_BASE + 0x28)
-#define MXC_CCM_CGR2           (MXC_CCM_BASE + 0x28)
-#define MX35_CCM_CGR0          (MXC_CCM_BASE + 0x2C)
-#define MXC_CCM_WIMR           (MXC_CCM_BASE + 0x2C)
-#define MX35_CCM_CGR1          (MXC_CCM_BASE + 0x30)
-#define MXC_CCM_LDC            (MXC_CCM_BASE + 0x30)
-#define MX35_CCM_CGR2          (MXC_CCM_BASE + 0x34)
-#define MXC_CCM_DCVR0          (MXC_CCM_BASE + 0x34)
-#define MX35_CCM_CGR3          (MXC_CCM_BASE + 0x38)
-#define MXC_CCM_DCVR1          (MXC_CCM_BASE + 0x38)
-#define MXC_CCM_DCVR2          (MXC_CCM_BASE + 0x3C)
-#define MXC_CCM_DCVR3          (MXC_CCM_BASE + 0x40)
-#define MXC_CCM_LTR0           (MXC_CCM_BASE + 0x44)
-#define MXC_CCM_LTR1           (MXC_CCM_BASE + 0x48)
-#define MXC_CCM_LTR2           (MXC_CCM_BASE + 0x4C)
-#define MXC_CCM_LTR3           (MXC_CCM_BASE + 0x50)
-#define MXC_CCM_LTBR0          (MXC_CCM_BASE + 0x54)
-#define MXC_CCM_LTBR1          (MXC_CCM_BASE + 0x58)
-#define MXC_CCM_PMCR0          (MXC_CCM_BASE + 0x5C)
-#define MXC_CCM_PMCR1          (MXC_CCM_BASE + 0x60)
-#define MXC_CCM_PDR2           (MXC_CCM_BASE + 0x64)
+#define MXC_CCM_CCMR           0x00
+#define MXC_CCM_PDR0           0x04
+#define MXC_CCM_PDR1           0x08
+#define MX35_CCM_PDR2          0x0C
+#define MXC_CCM_RCSR           0x0C
+#define MX35_CCM_PDR3          0x10
+#define MXC_CCM_MPCTL          0x10
+#define MX35_CCM_PDR4          0x14
+#define MXC_CCM_UPCTL          0x14
+#define MX35_CCM_RCSR          0x18
+#define MXC_CCM_SRPCTL         0x18
+#define MX35_CCM_MPCTL         0x1C
+#define MXC_CCM_COSR           0x1C
+#define MX35_CCM_PPCTL         0x20
+#define MXC_CCM_CGR0           0x20
+#define MX35_CCM_ACMR          0x24
+#define MXC_CCM_CGR1           0x24
+#define MX35_CCM_COSR          0x28
+#define MXC_CCM_CGR2           0x28
+#define MX35_CCM_CGR0          0x2C
+#define MXC_CCM_WIMR           0x2C
+#define MX35_CCM_CGR1          0x30
+#define MXC_CCM_LDC            0x30
+#define MX35_CCM_CGR2          0x34
+#define MXC_CCM_DCVR0          0x34
+#define MX35_CCM_CGR3          0x38
+#define MXC_CCM_DCVR1          0x38
+#define MXC_CCM_DCVR2          0x3C
+#define MXC_CCM_DCVR3          0x40
+#define MXC_CCM_LTR0           0x44
+#define MXC_CCM_LTR1           0x48
+#define MXC_CCM_LTR2           0x4C
+#define MXC_CCM_LTR3           0x50
+#define MXC_CCM_LTBR0          0x54
+#define MXC_CCM_LTBR1          0x58
+#define MXC_CCM_PMCR0          0x5C
+#define MXC_CCM_PMCR1          0x60
+#define MXC_CCM_PDR2           0x64
 
 /* Register bit definitions */
 #define MXC_CCM_CCMR_WBEN                       (1 << 27)
index ed38d03c61f22296acb782e7e4cc9c8dfde20344..eee0cc8d92a43e84300723b05e6e9bb86bcb84f1 100644 (file)
@@ -29,6 +29,7 @@ static const struct of_dev_auxdata imx27_auxdata_lookup[] __initconst = {
        OF_DEV_AUXDATA("fsl,imx27-cspi", MX27_CSPI2_BASE_ADDR, "imx27-cspi.1", NULL),
        OF_DEV_AUXDATA("fsl,imx27-cspi", MX27_CSPI3_BASE_ADDR, "imx27-cspi.2", NULL),
        OF_DEV_AUXDATA("fsl,imx27-wdt", MX27_WDOG_BASE_ADDR, "imx2-wdt.0", NULL),
+       OF_DEV_AUXDATA("fsl,imx27-nand", MX27_NFC_BASE_ADDR, "mxc_nand.0", NULL),
        { /* sentinel */ }
 };
 
index 5f577fbda2c8621d861c2304fbbc7fc355184fd4..18e78dba4298ad90aebc7f0a86e44e723d413924 100644 (file)
@@ -118,6 +118,7 @@ DT_MACHINE_START(IMX51_DT, "Freescale i.MX51 (Device Tree Support)")
        .handle_irq     = imx51_handle_irq,
        .timer          = &imx51_timer,
        .init_machine   = imx51_dt_init,
+       .init_late      = imx51_init_late,
        .dt_compat      = imx51_dt_board_compat,
        .restart        = mxc_restart,
 MACHINE_END
index 574eca4b89a5b4daa6f55dd4809daebc647cd645..eb04b6248e48ee9a93751ede8c6cafee15904896 100644 (file)
@@ -10,6 +10,9 @@
  * http://www.gnu.org/copyleft/gpl.html
  */
 
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/irqdomain.h>
@@ -81,6 +84,19 @@ static const struct of_device_id imx53_iomuxc_of_match[] __initconst = {
        { /* sentinel */ }
 };
 
+static void __init imx53_qsb_init(void)
+{
+       struct clk *clk;
+
+       clk = clk_get_sys(NULL, "ssi_ext1");
+       if (IS_ERR(clk)) {
+               pr_err("failed to get clk ssi_ext1\n");
+               return;
+       }
+
+       clk_register_clkdev(clk, NULL, "0-000a");
+}
+
 static void __init imx53_dt_init(void)
 {
        struct device_node *node;
@@ -99,6 +115,9 @@ static void __init imx53_dt_init(void)
                of_node_put(node);
        }
 
+       if (of_machine_is_compatible("fsl,imx53-qsb"))
+               imx53_qsb_init();
+
        of_platform_populate(NULL, of_default_bus_match_table,
                             imx53_auxdata_lookup, NULL);
 }
index 0213f8dcee81752291551bf37103b1b16a3a664f..c40a34c0048910c974789695b0ecb13374d3cb94 100644 (file)
 #include <mach/hardware.h>
 
 static struct map_desc imx_lluart_desc = {
+#ifdef CONFIG_DEBUG_IMX6Q_UART2
+       .virtual        = MX6Q_IO_P2V(MX6Q_UART2_BASE_ADDR),
+       .pfn            = __phys_to_pfn(MX6Q_UART2_BASE_ADDR),
+       .length         = MX6Q_UART2_SIZE,
+       .type           = MT_DEVICE,
+#endif
 #ifdef CONFIG_DEBUG_IMX6Q_UART4
        .virtual        = MX6Q_IO_P2V(MX6Q_UART4_BASE_ADDR),
        .pfn            = __phys_to_pfn(MX6Q_UART4_BASE_ADDR),
index ce341a6874fc892e867319f43c16aa19f558c9d0..ac50f1671e381447d7fda9335d32cac33432b7ca 100644 (file)
@@ -369,5 +369,6 @@ MACHINE_START(EUKREA_CPUIMX51SD, "Eukrea CPUIMX51SD")
        .handle_irq = imx51_handle_irq,
        .timer = &mxc_timer,
        .init_machine = eukrea_cpuimx51sd_init,
+       .init_late      = imx51_init_late,
        .restart        = mxc_restart,
 MACHINE_END
index 3df360a52c17da001048bf121bde520037c897be..b47e98b7d539fc9bce4cd4f0060004f086d528c4 100644 (file)
@@ -10,6 +10,8 @@
  * http://www.gnu.org/copyleft/gpl.html
  */
 
+#include <linux/clk.h>
+#include <linux/clkdev.h>
 #include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/io.h>
@@ -64,18 +66,53 @@ soft:
 /* For imx6q sabrelite board: set KSZ9021RN RGMII pad skew */
 static int ksz9021rn_phy_fixup(struct phy_device *phydev)
 {
-       /* min rx data delay */
-       phy_write(phydev, 0x0b, 0x8105);
-       phy_write(phydev, 0x0c, 0x0000);
+       if (IS_ENABLED(CONFIG_PHYLIB)) {
+               /* min rx data delay */
+               phy_write(phydev, 0x0b, 0x8105);
+               phy_write(phydev, 0x0c, 0x0000);
 
-       /* max rx/tx clock delay, min rx/tx control delay */
-       phy_write(phydev, 0x0b, 0x8104);
-       phy_write(phydev, 0x0c, 0xf0f0);
-       phy_write(phydev, 0x0b, 0x104);
+               /* max rx/tx clock delay, min rx/tx control delay */
+               phy_write(phydev, 0x0b, 0x8104);
+               phy_write(phydev, 0x0c, 0xf0f0);
+               phy_write(phydev, 0x0b, 0x104);
+       }
 
        return 0;
 }
 
+static void __init imx6q_sabrelite_cko1_setup(void)
+{
+       struct clk *cko1_sel, *ahb, *cko1;
+       unsigned long rate;
+
+       cko1_sel = clk_get_sys(NULL, "cko1_sel");
+       ahb = clk_get_sys(NULL, "ahb");
+       cko1 = clk_get_sys(NULL, "cko1");
+       if (IS_ERR(cko1_sel) || IS_ERR(ahb) || IS_ERR(cko1)) {
+               pr_err("cko1 setup failed!\n");
+               goto put_clk;
+       }
+       clk_set_parent(cko1_sel, ahb);
+       rate = clk_round_rate(cko1, 16000000);
+       clk_set_rate(cko1, rate);
+       clk_register_clkdev(cko1, NULL, "0-000a");
+put_clk:
+       if (!IS_ERR(cko1_sel))
+               clk_put(cko1_sel);
+       if (!IS_ERR(ahb))
+               clk_put(ahb);
+       if (!IS_ERR(cko1))
+               clk_put(cko1);
+}
+
+static void __init imx6q_sabrelite_init(void)
+{
+       if (IS_ENABLED(CONFIG_PHYLIB))
+               phy_register_fixup_for_uid(PHY_ID_KSZ9021, MICREL_PHY_ID_MASK,
+                               ksz9021rn_phy_fixup);
+       imx6q_sabrelite_cko1_setup();
+}
+
 static void __init imx6q_init_machine(void)
 {
        /*
@@ -85,8 +122,7 @@ static void __init imx6q_init_machine(void)
        pinctrl_provide_dummies();
 
        if (of_machine_is_compatible("fsl,imx6q-sabrelite"))
-               phy_register_fixup_for_uid(PHY_ID_KSZ9021, MICREL_PHY_ID_MASK,
-                                          ksz9021rn_phy_fixup);
+               imx6q_sabrelite_init();
 
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 
@@ -139,6 +175,7 @@ static struct sys_timer imx6q_timer = {
 static const char *imx6q_dt_compat[] __initdata = {
        "fsl,imx6q-arm2",
        "fsl,imx6q-sabrelite",
+       "fsl,imx6q-sabresd",
        "fsl,imx6q",
        NULL,
 };
index 83eab4176ca48668f1f7295698c3ca6b4aaa4502..3c5b163923f63e4b20ab311fee76a70bb431d666 100644 (file)
@@ -175,5 +175,6 @@ MACHINE_START(MX51_3DS, "Freescale MX51 3-Stack Board")
        .handle_irq = imx51_handle_irq,
        .timer = &mx51_3ds_timer,
        .init_machine = mx51_3ds_init,
+       .init_late      = imx51_init_late,
        .restart        = mxc_restart,
 MACHINE_END
index e4b822e9f719eb04c2af016500c62ae489fa3275..dde397014d4b709d2cd2026d8ea879676f3b35b8 100644 (file)
@@ -163,6 +163,12 @@ static iomux_v3_cfg_t mx51babbage_pads[] = {
        MX51_PAD_CSPI1_SCLK__ECSPI1_SCLK,
        MX51_PAD_CSPI1_SS0__GPIO4_24,
        MX51_PAD_CSPI1_SS1__GPIO4_25,
+
+       /* Audio */
+       MX51_PAD_AUD3_BB_TXD__AUD3_TXD,
+       MX51_PAD_AUD3_BB_RXD__AUD3_RXD,
+       MX51_PAD_AUD3_BB_CK__AUD3_TXC,
+       MX51_PAD_AUD3_BB_FS__AUD3_TXFS,
 };
 
 /* Serial ports */
@@ -426,5 +432,6 @@ MACHINE_START(MX51_BABBAGE, "Freescale MX51 Babbage Board")
        .handle_irq = imx51_handle_irq,
        .timer = &mx51_babbage_timer,
        .init_machine = mx51_babbage_init,
+       .init_late      = imx51_init_late,
        .restart        = mxc_restart,
 MACHINE_END
index 86e96ef11f9d554ef926b35b344d1d70441bfa55..8d09c0126cabeb505a269f79c1131e9ffb5debd5 100644 (file)
@@ -207,29 +207,32 @@ static void mx51_efikamx_power_off(void)
 
 static int __init mx51_efikamx_power_init(void)
 {
-       if (machine_is_mx51_efikamx()) {
-               pwgt1 = regulator_get(NULL, "pwgt1");
-               pwgt2 = regulator_get(NULL, "pwgt2");
-               if (!IS_ERR(pwgt1) && !IS_ERR(pwgt2)) {
-                       regulator_enable(pwgt1);
-                       regulator_enable(pwgt2);
-               }
-               gpio_request(EFIKAMX_POWEROFF, "poweroff");
-               pm_power_off = mx51_efikamx_power_off;
-
-               /* enable coincell charger. maybe need a small power driver ? */
-               coincell = regulator_get(NULL, "coincell");
-               if (!IS_ERR(coincell)) {
-                       regulator_set_voltage(coincell, 3000000, 3000000);
-                       regulator_enable(coincell);
-               }
-
-               regulator_has_full_constraints();
+       pwgt1 = regulator_get(NULL, "pwgt1");
+       pwgt2 = regulator_get(NULL, "pwgt2");
+       if (!IS_ERR(pwgt1) && !IS_ERR(pwgt2)) {
+               regulator_enable(pwgt1);
+               regulator_enable(pwgt2);
+       }
+       gpio_request(EFIKAMX_POWEROFF, "poweroff");
+       pm_power_off = mx51_efikamx_power_off;
+
+       /* enable coincell charger. maybe need a small power driver ? */
+       coincell = regulator_get(NULL, "coincell");
+       if (!IS_ERR(coincell)) {
+               regulator_set_voltage(coincell, 3000000, 3000000);
+               regulator_enable(coincell);
        }
 
+       regulator_has_full_constraints();
+
        return 0;
 }
-late_initcall(mx51_efikamx_power_init);
+
+static void __init mx51_efikamx_init_late(void)
+{
+       imx51_init_late();
+       mx51_efikamx_power_init();
+}
 
 static void __init mx51_efikamx_init(void)
 {
@@ -292,5 +295,6 @@ MACHINE_START(MX51_EFIKAMX, "Genesi Efika MX (Smarttop)")
        .handle_irq = imx51_handle_irq,
        .timer = &mx51_efikamx_timer,
        .init_machine = mx51_efikamx_init,
+       .init_late = mx51_efikamx_init_late,
        .restart = mx51_efikamx_restart,
 MACHINE_END
index 88f837a6cc76d44c841dfcc23de37b735d878953..fdbd181b97efaf015378a3b35c44bd80197404e6 100644 (file)
@@ -211,22 +211,25 @@ static void mx51_efikasb_power_off(void)
 
 static int __init mx51_efikasb_power_init(void)
 {
-       if (machine_is_mx51_efikasb()) {
-               pwgt1 = regulator_get(NULL, "pwgt1");
-               pwgt2 = regulator_get(NULL, "pwgt2");
-               if (!IS_ERR(pwgt1) && !IS_ERR(pwgt2)) {
-                       regulator_enable(pwgt1);
-                       regulator_enable(pwgt2);
-               }
-               gpio_request(EFIKASB_POWEROFF, "poweroff");
-               pm_power_off = mx51_efikasb_power_off;
-
-               regulator_has_full_constraints();
+       pwgt1 = regulator_get(NULL, "pwgt1");
+       pwgt2 = regulator_get(NULL, "pwgt2");
+       if (!IS_ERR(pwgt1) && !IS_ERR(pwgt2)) {
+               regulator_enable(pwgt1);
+               regulator_enable(pwgt2);
        }
+       gpio_request(EFIKASB_POWEROFF, "poweroff");
+       pm_power_off = mx51_efikasb_power_off;
+
+       regulator_has_full_constraints();
 
        return 0;
 }
-late_initcall(mx51_efikasb_power_init);
+
+static void __init mx51_efikasb_init_late(void)
+{
+       imx51_init_late();
+       mx51_efikasb_power_init();
+}
 
 /* 01     R1.3 board
    10     R2.0 board */
@@ -287,6 +290,7 @@ MACHINE_START(MX51_EFIKASB, "Genesi Efika MX (Smartbook)")
        .init_irq = mx51_init_irq,
        .handle_irq = imx51_handle_irq,
        .init_machine =  efikasb_board_init,
+       .init_late = mx51_efikasb_init_late,
        .timer = &mx51_efikasb_timer,
        .restart        = mxc_restart,
 MACHINE_END
index 10c9795934a3c87673b9c3434dec4d4de40bc3d9..0a40004154f234e4afba7f20477a902509bb6637 100644 (file)
@@ -694,6 +694,11 @@ static void __init pcm037_reserve(void)
                        MX3_CAMERA_BUF_SIZE);
 }
 
+static void __init pcm037_init_late(void)
+{
+       pcm037_eet_init_devices();
+}
+
 MACHINE_START(PCM037, "Phytec Phycore pcm037")
        /* Maintainer: Pengutronix */
        .atag_offset = 0x100,
@@ -704,5 +709,6 @@ MACHINE_START(PCM037, "Phytec Phycore pcm037")
        .handle_irq = imx31_handle_irq,
        .timer = &pcm037_timer,
        .init_machine = pcm037_init,
+       .init_late = pcm037_init_late,
        .restart        = mxc_restart,
 MACHINE_END
index 1b7606bef8f4f0047b585974cd9f49fc6854c07d..11ffa81ad17db2d9501286df2201371ce3fdc0cc 100644 (file)
@@ -160,9 +160,9 @@ static const struct gpio_keys_platform_data
        .rep            = 0, /* No auto-repeat */
 };
 
-static int __init eet_init_devices(void)
+int __init pcm037_eet_init_devices(void)
 {
-       if (!machine_is_pcm037() || pcm037_variant() != PCM037_EET)
+       if (pcm037_variant() != PCM037_EET)
                return 0;
 
        mxc_iomux_setup_multiple_pins(pcm037_eet_pins,
@@ -176,4 +176,3 @@ static int __init eet_init_devices(void)
 
        return 0;
 }
-late_initcall(eet_init_devices);
index 9128d15b1eb7fdd34fac2b23c81a8037e9ca4d7f..967ed5b35a45914b3e26678db38bc0aaf0868faa 100644 (file)
 #include <mach/iomux-v3.h>
 #include <mach/irqs.h>
 
+#include "crmregs-imx3.h"
+
+void __iomem *mx3_ccm_base;
+
 static void imx3_idle(void)
 {
        unsigned long reg = 0;
@@ -138,6 +142,7 @@ void __init imx31_init_early(void)
        mxc_arch_reset_init(MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR));
        arch_ioremap_caller = imx3_ioremap_caller;
        arm_pm_idle = imx3_idle;
+       mx3_ccm_base = MX31_IO_ADDRESS(MX31_CCM_BASE_ADDR);
 }
 
 void __init mx31_init_irq(void)
@@ -211,6 +216,7 @@ void __init imx35_init_early(void)
        mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR));
        arm_pm_idle = imx3_idle;
        arch_ioremap_caller = imx3_ioremap_caller;
+       mx3_ccm_base = MX35_IO_ADDRESS(MX35_CCM_BASE_ADDR);
 }
 
 void __init mx35_init_irq(void)
index ba91e6b31cf45fd5b7575aac2314193eb3da6c87..feeee17da96b227b769c1748c6175b6071b8bd59 100644 (file)
@@ -33,6 +33,7 @@ static void imx5_idle(void)
                gpc_dvfs_clk = clk_get(NULL, "gpc_dvfs");
                if (IS_ERR(gpc_dvfs_clk))
                        return;
+               clk_prepare(gpc_dvfs_clk);
        }
        clk_enable(gpc_dvfs_clk);
        mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
@@ -236,3 +237,8 @@ void __init imx53_soc_init(void)
        platform_device_register_simple("imx31-audmux", 0, imx53_audmux_res,
                                        ARRAY_SIZE(imx53_audmux_res));
 }
+
+void __init imx51_init_late(void)
+{
+       mx51_neon_fixup();
+}
index d6929721a5fd620e40f75b570fca567d4aa9d967..7d167690e17dab2e2e3ff6318010bbaf3198965b 100644 (file)
@@ -8,4 +8,10 @@ enum pcm037_board_variant {
 
 extern enum pcm037_board_variant pcm037_variant(void);
 
+#ifdef CONFIG_MACH_PCM037_EET
+int pcm037_eet_init_devices(void);
+#else
+static inline int pcm037_eet_init_devices(void) { return 0; }
+#endif
+
 #endif
index b3752439632ec1ecabbbaec64bd8f5ed3dc0e966..822103bdb7092eab3ba63c3260f9da4f587abc2f 100644 (file)
  */
 void mx3_cpu_lp_set(enum mx3_cpu_pwr_mode mode)
 {
-       int reg = __raw_readl(MXC_CCM_CCMR);
+       int reg = __raw_readl(mx3_ccm_base + MXC_CCM_CCMR);
        reg &= ~MXC_CCM_CCMR_LPM_MASK;
 
        switch (mode) {
        case MX3_WAIT:
                if (cpu_is_mx35())
                        reg |= MXC_CCM_CCMR_LPM_WAIT_MX35;
-               __raw_writel(reg, MXC_CCM_CCMR);
+               __raw_writel(reg, mx3_ccm_base + MXC_CCM_CCMR);
                break;
        default:
                pr_err("Unknown cpu power mode: %d\n", mode);
index ebbd7fc90eb47488b320de65feee82ce578ff288..a9f80943d01fe8b468a0cba2a78cf8f05a51cac7 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/clockchips.h>
 #include <linux/io.h>
 #include <linux/export.h>
+#include <linux/gpio.h>
 
 #include <mach/udc.h>
 #include <mach/hardware.h>
@@ -107,7 +108,7 @@ static signed char irq2gpio[32] = {
         7,  8,  9, 10, 11, 12, -1, -1,
 };
 
-int gpio_to_irq(int gpio)
+static int ixp4xx_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
 {
        int irq;
 
@@ -117,7 +118,6 @@ int gpio_to_irq(int gpio)
        }
        return -EINVAL;
 }
-EXPORT_SYMBOL(gpio_to_irq);
 
 int irq_to_gpio(unsigned int irq)
 {
@@ -383,12 +383,56 @@ static struct platform_device *ixp46x_devices[] __initdata = {
 unsigned long ixp4xx_exp_bus_size;
 EXPORT_SYMBOL(ixp4xx_exp_bus_size);
 
+static int ixp4xx_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+       gpio_line_config(gpio, IXP4XX_GPIO_IN);
+
+       return 0;
+}
+
+static int ixp4xx_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
+                                       int level)
+{
+       gpio_line_set(gpio, level);
+       gpio_line_config(gpio, IXP4XX_GPIO_OUT);
+
+       return 0;
+}
+
+static int ixp4xx_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
+{
+       int value;
+
+       gpio_line_get(gpio, &value);
+
+       return value;
+}
+
+static void ixp4xx_gpio_set_value(struct gpio_chip *chip, unsigned gpio,
+                                 int value)
+{
+       gpio_line_set(gpio, value);
+}
+
+static struct gpio_chip ixp4xx_gpio_chip = {
+       .label                  = "IXP4XX_GPIO_CHIP",
+       .direction_input        = ixp4xx_gpio_direction_input,
+       .direction_output       = ixp4xx_gpio_direction_output,
+       .get                    = ixp4xx_gpio_get_value,
+       .set                    = ixp4xx_gpio_set_value,
+       .to_irq                 = ixp4xx_gpio_to_irq,
+       .base                   = 0,
+       .ngpio                  = 16,
+};
+
 void __init ixp4xx_sys_init(void)
 {
        ixp4xx_exp_bus_size = SZ_16M;
 
        platform_add_devices(ixp4xx_devices, ARRAY_SIZE(ixp4xx_devices));
 
+       gpiochip_add(&ixp4xx_gpio_chip);
+
        if (cpu_is_ixp46x()) {
                int region;
 
index 83d6b4ed60bbd42f3e912d68611205d720ad98a1..ef37f2635b0e4a5812ab30ce065525e3853a44d8 100644 (file)
@@ -1,79 +1,2 @@
-/*
- * arch/arm/mach-ixp4xx/include/mach/gpio.h
- *
- * IXP4XX GPIO wrappers for arch-neutral GPIO calls
- *
- * Written by Milan Svoboda <msvoboda@ra.rockwell.com>
- * Based on PXA implementation by Philipp Zabel <philipp.zabel@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef __ASM_ARCH_IXP4XX_GPIO_H
-#define __ASM_ARCH_IXP4XX_GPIO_H
-
-#include <linux/kernel.h>
-#include <mach/hardware.h>
-
-#define __ARM_GPIOLIB_COMPLEX
-
-static inline int gpio_request(unsigned gpio, const char *label)
-{
-       return 0;
-}
-
-static inline void gpio_free(unsigned gpio)
-{
-       might_sleep();
-
-       return;
-}
-
-static inline int gpio_direction_input(unsigned gpio)
-{
-       gpio_line_config(gpio, IXP4XX_GPIO_IN);
-       return 0;
-}
-
-static inline int gpio_direction_output(unsigned gpio, int level)
-{
-       gpio_line_set(gpio, level);
-       gpio_line_config(gpio, IXP4XX_GPIO_OUT);
-       return 0;
-}
-
-static inline int gpio_get_value(unsigned gpio)
-{
-       int value;
-
-       gpio_line_get(gpio, &value);
-
-       return value;
-}
-
-static inline void gpio_set_value(unsigned gpio, int value)
-{
-       gpio_line_set(gpio, value);
-}
-
-#include <asm-generic/gpio.h>                  /* cansleep wrappers */
-
-extern int gpio_to_irq(int gpio);
-#define gpio_to_irq gpio_to_irq
-extern int irq_to_gpio(unsigned int irq);
-
-#endif
+/* empty */
 
index 3d742aee177304250d78aa775d69475364a94866..108a9d3f382da148ff3b6cf281440ab4a302d1c4 100644 (file)
@@ -60,8 +60,6 @@ static struct platform_device ixdp425_flash = {
 #if defined(CONFIG_MTD_NAND_PLATFORM) || \
     defined(CONFIG_MTD_NAND_PLATFORM_MODULE)
 
-const char *part_probes[] = { "cmdlinepart", NULL };
-
 static struct mtd_partition ixdp425_partitions[] = {
        {
                .name   = "ixp400 NAND FS 0",
@@ -100,8 +98,6 @@ static struct platform_nand_data ixdp425_flash_nand_data = {
        .chip = {
                .nr_chips               = 1,
                .chip_delay             = 30,
-               .options                = NAND_NO_AUTOINCR,
-               .part_probe_types       = part_probes,
                .partitions             = ixdp425_partitions,
                .nr_partitions          = ARRAY_SIZE(ixdp425_partitions),
        },
index 985453994dd3940d796df8d9bf7458e41298f87f..55e357ab2923e95db3848f177a83bb713f1c7baa 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/mtd/physmap.h>
 #include <linux/spi/flash.h>
 #include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
index 10d1969b9e3a8050b66ead369458419c9bb8b1b0..edc3f8a9d45e8eb85b9884b4e52f5e117568a366 100644 (file)
@@ -43,6 +43,9 @@ static void __init kirkwood_dt_init(void)
        kirkwood_l2_init();
 #endif
 
+       /* Setup root of clk tree */
+       kirkwood_clk_init();
+
        /* internal devices that every board has */
        kirkwood_wdt_init();
        kirkwood_xor0_init();
index 3ad037385a5e8ad1883d8c242a689417f1f15c7f..25fb3fd418efbe7e30b94136d920fd851e371de8 100644 (file)
@@ -15,7 +15,8 @@
 #include <linux/ata_platform.h>
 #include <linux/mtd/nand.h>
 #include <linux/dma-mapping.h>
-#include <linux/of.h>
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
 #include <net/dsa.h>
 #include <asm/page.h>
 #include <asm/timex.h>
@@ -32,6 +33,7 @@
 #include <plat/common.h>
 #include <plat/time.h>
 #include <plat/addr-map.h>
+#include <plat/mv_xor.h>
 #include "common.h"
 
 /*****************************************************************************
@@ -61,20 +63,188 @@ void __init kirkwood_map_io(void)
        iotable_init(kirkwood_io_desc, ARRAY_SIZE(kirkwood_io_desc));
 }
 
-/*
- * Default clock control bits.  Any bit _not_ set in this variable
- * will be cleared from the hardware after platform devices have been
- * registered.  Some reserved bits must be set to 1.
- */
-unsigned int kirkwood_clk_ctrl = CGC_DUNIT | CGC_RESERVED;
+/*****************************************************************************
+ * CLK tree
+ ****************************************************************************/
+
+static void disable_sata0(void)
+{
+       /* Disable PLL and IVREF */
+       writel(readl(SATA0_PHY_MODE_2) & ~0xf, SATA0_PHY_MODE_2);
+       /* Disable PHY */
+       writel(readl(SATA0_IF_CTRL) | 0x200, SATA0_IF_CTRL);
+}
+
+static void disable_sata1(void)
+{
+       /* Disable PLL and IVREF */
+       writel(readl(SATA1_PHY_MODE_2) & ~0xf, SATA1_PHY_MODE_2);
+       /* Disable PHY */
+       writel(readl(SATA1_IF_CTRL) | 0x200, SATA1_IF_CTRL);
+}
+
+static void disable_pcie0(void)
+{
+       writel(readl(PCIE_LINK_CTRL) | 0x10, PCIE_LINK_CTRL);
+       while (1)
+               if (readl(PCIE_STATUS) & 0x1)
+                       break;
+       writel(readl(PCIE_LINK_CTRL) & ~0x10, PCIE_LINK_CTRL);
+}
+
+static void disable_pcie1(void)
+{
+       u32 dev, rev;
+
+       kirkwood_pcie_id(&dev, &rev);
+
+       if (dev == MV88F6282_DEV_ID) {
+               writel(readl(PCIE1_LINK_CTRL) | 0x10, PCIE1_LINK_CTRL);
+               while (1)
+                       if (readl(PCIE1_STATUS) & 0x1)
+                               break;
+               writel(readl(PCIE1_LINK_CTRL) & ~0x10, PCIE1_LINK_CTRL);
+       }
+}
+
+/* An extended version of the gated clk. This calls fn() before
+ * disabling the clock. We use this to turn off PHYs etc. */
+struct clk_gate_fn {
+       struct clk_gate gate;
+       void (*fn)(void);
+};
+
+#define to_clk_gate_fn(_gate) container_of(_gate, struct clk_gate_fn, gate)
+#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
+
+static void clk_gate_fn_disable(struct clk_hw *hw)
+{
+       struct clk_gate *gate = to_clk_gate(hw);
+       struct clk_gate_fn *gate_fn = to_clk_gate_fn(gate);
+
+       if (gate_fn->fn)
+               gate_fn->fn();
+
+       clk_gate_ops.disable(hw);
+}
+
+static struct clk_ops clk_gate_fn_ops;
+
+static struct clk __init *clk_register_gate_fn(struct device *dev,
+               const char *name,
+               const char *parent_name, unsigned long flags,
+               void __iomem *reg, u8 bit_idx,
+               u8 clk_gate_flags, spinlock_t *lock,
+               void (*fn)(void))
+{
+       struct clk_gate_fn *gate_fn;
+       struct clk *clk;
+       struct clk_init_data init;
+
+       gate_fn = kzalloc(sizeof(struct clk_gate_fn), GFP_KERNEL);
+       if (!gate_fn) {
+               pr_err("%s: could not allocate gated clk\n", __func__);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       init.name = name;
+       init.ops = &clk_gate_fn_ops;
+       init.flags = flags;
+       init.parent_names = (parent_name ? &parent_name : NULL);
+       init.num_parents = (parent_name ? 1 : 0);
+
+       /* struct clk_gate assignments */
+       gate_fn->gate.reg = reg;
+       gate_fn->gate.bit_idx = bit_idx;
+       gate_fn->gate.flags = clk_gate_flags;
+       gate_fn->gate.lock = lock;
+       gate_fn->gate.hw.init = &init;
+
+       /* ops is the gate ops, but with our disable function */
+       if (clk_gate_fn_ops.disable != clk_gate_fn_disable) {
+               clk_gate_fn_ops = clk_gate_ops;
+               clk_gate_fn_ops.disable = clk_gate_fn_disable;
+       }
 
+       clk = clk_register(dev, &gate_fn->gate.hw);
+
+       if (IS_ERR(clk))
+               kfree(gate_fn);
+
+       return clk;
+}
+
+static DEFINE_SPINLOCK(gating_lock);
+static struct clk *tclk;
+
+static struct clk __init *kirkwood_register_gate(const char *name, u8 bit_idx)
+{
+       return clk_register_gate(NULL, name, "tclk", 0,
+                                (void __iomem *)CLOCK_GATING_CTRL,
+                                bit_idx, 0, &gating_lock);
+}
+
+static struct clk __init *kirkwood_register_gate_fn(const char *name,
+                                                   u8 bit_idx,
+                                                   void (*fn)(void))
+{
+       return clk_register_gate_fn(NULL, name, "tclk", 0,
+                                   (void __iomem *)CLOCK_GATING_CTRL,
+                                   bit_idx, 0, &gating_lock, fn);
+}
+
+void __init kirkwood_clk_init(void)
+{
+       struct clk *runit, *ge0, *ge1, *sata0, *sata1, *usb0, *sdio;
+       struct clk *crypto, *xor0, *xor1, *pex0, *pex1, *audio;
+
+       tclk = clk_register_fixed_rate(NULL, "tclk", NULL,
+                                      CLK_IS_ROOT, kirkwood_tclk);
+
+       runit = kirkwood_register_gate("runit",  CGC_BIT_RUNIT);
+       ge0 = kirkwood_register_gate("ge0",    CGC_BIT_GE0);
+       ge1 = kirkwood_register_gate("ge1",    CGC_BIT_GE1);
+       sata0 = kirkwood_register_gate_fn("sata0",  CGC_BIT_SATA0,
+                                         disable_sata0);
+       sata1 = kirkwood_register_gate_fn("sata1",  CGC_BIT_SATA1,
+                                         disable_sata1);
+       usb0 = kirkwood_register_gate("usb0",   CGC_BIT_USB0);
+       sdio = kirkwood_register_gate("sdio",   CGC_BIT_SDIO);
+       crypto = kirkwood_register_gate("crypto", CGC_BIT_CRYPTO);
+       xor0 = kirkwood_register_gate("xor0",   CGC_BIT_XOR0);
+       xor1 = kirkwood_register_gate("xor1",   CGC_BIT_XOR1);
+       pex0 = kirkwood_register_gate_fn("pex0",   CGC_BIT_PEX0,
+                                        disable_pcie0);
+       pex1 = kirkwood_register_gate_fn("pex1",   CGC_BIT_PEX1,
+                                        disable_pcie1);
+       audio = kirkwood_register_gate("audio",  CGC_BIT_AUDIO);
+       kirkwood_register_gate("tdm",    CGC_BIT_TDM);
+       kirkwood_register_gate("tsu",    CGC_BIT_TSU);
+
+       /* clkdev entries, mapping clks to devices */
+       orion_clkdev_add(NULL, "orion_spi.0", runit);
+       orion_clkdev_add(NULL, "orion_spi.1", runit);
+       orion_clkdev_add(NULL, MV643XX_ETH_NAME ".0", ge0);
+       orion_clkdev_add(NULL, MV643XX_ETH_NAME ".1", ge1);
+       orion_clkdev_add(NULL, "orion_wdt", tclk);
+       orion_clkdev_add("0", "sata_mv.0", sata0);
+       orion_clkdev_add("1", "sata_mv.0", sata1);
+       orion_clkdev_add(NULL, "orion-ehci.0", usb0);
+       orion_clkdev_add(NULL, "orion_nand", runit);
+       orion_clkdev_add(NULL, "mvsdio", sdio);
+       orion_clkdev_add(NULL, "mv_crypto", crypto);
+       orion_clkdev_add(NULL, MV_XOR_SHARED_NAME ".0", xor0);
+       orion_clkdev_add(NULL, MV_XOR_SHARED_NAME ".1", xor1);
+       orion_clkdev_add("0", "pcie", pex0);
+       orion_clkdev_add("1", "pcie", pex1);
+       orion_clkdev_add(NULL, "kirkwood-i2s", audio);
+}
 
 /*****************************************************************************
  * EHCI0
  ****************************************************************************/
 void __init kirkwood_ehci_init(void)
 {
-       kirkwood_clk_ctrl |= CGC_USB0;
        orion_ehci_init(USB_PHYS_BASE, IRQ_KIRKWOOD_USB, EHCI_PHY_NA);
 }
 
@@ -84,11 +254,9 @@ void __init kirkwood_ehci_init(void)
  ****************************************************************************/
 void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data)
 {
-       kirkwood_clk_ctrl |= CGC_GE0;
-
        orion_ge00_init(eth_data,
                        GE00_PHYS_BASE, IRQ_KIRKWOOD_GE00_SUM,
-                       IRQ_KIRKWOOD_GE00_ERR, kirkwood_tclk);
+                       IRQ_KIRKWOOD_GE00_ERR);
 }
 
 
@@ -97,12 +265,9 @@ void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data)
  ****************************************************************************/
 void __init kirkwood_ge01_init(struct mv643xx_eth_platform_data *eth_data)
 {
-
-       kirkwood_clk_ctrl |= CGC_GE1;
-
        orion_ge01_init(eth_data,
                        GE01_PHYS_BASE, IRQ_KIRKWOOD_GE01_SUM,
-                       IRQ_KIRKWOOD_GE01_ERR, kirkwood_tclk);
+                       IRQ_KIRKWOOD_GE01_ERR);
 }
 
 
@@ -144,7 +309,6 @@ static struct platform_device kirkwood_nand_flash = {
 void __init kirkwood_nand_init(struct mtd_partition *parts, int nr_parts,
                               int chip_delay)
 {
-       kirkwood_clk_ctrl |= CGC_RUNIT;
        kirkwood_nand_data.parts = parts;
        kirkwood_nand_data.nr_parts = nr_parts;
        kirkwood_nand_data.chip_delay = chip_delay;
@@ -154,7 +318,6 @@ void __init kirkwood_nand_init(struct mtd_partition *parts, int nr_parts,
 void __init kirkwood_nand_init_rnb(struct mtd_partition *parts, int nr_parts,
                                   int (*dev_ready)(struct mtd_info *))
 {
-       kirkwood_clk_ctrl |= CGC_RUNIT;
        kirkwood_nand_data.parts = parts;
        kirkwood_nand_data.nr_parts = nr_parts;
        kirkwood_nand_data.dev_ready = dev_ready;
@@ -175,10 +338,6 @@ static void __init kirkwood_rtc_init(void)
  ****************************************************************************/
 void __init kirkwood_sata_init(struct mv_sata_platform_data *sata_data)
 {
-       kirkwood_clk_ctrl |= CGC_SATA0;
-       if (sata_data->n_ports > 1)
-               kirkwood_clk_ctrl |= CGC_SATA1;
-
        orion_sata_init(sata_data, SATA_PHYS_BASE, IRQ_KIRKWOOD_SATA);
 }
 
@@ -221,7 +380,6 @@ void __init kirkwood_sdio_init(struct mvsdio_platform_data *mvsdio_data)
                mvsdio_data->clock = 100000000;
        else
                mvsdio_data->clock = 200000000;
-       kirkwood_clk_ctrl |= CGC_SDIO;
        kirkwood_sdio.dev.platform_data = mvsdio_data;
        platform_device_register(&kirkwood_sdio);
 }
@@ -232,8 +390,7 @@ void __init kirkwood_sdio_init(struct mvsdio_platform_data *mvsdio_data)
  ****************************************************************************/
 void __init kirkwood_spi_init()
 {
-       kirkwood_clk_ctrl |= CGC_RUNIT;
-       orion_spi_init(SPI_PHYS_BASE, kirkwood_tclk);
+       orion_spi_init(SPI_PHYS_BASE);
 }
 
 
@@ -253,7 +410,7 @@ void __init kirkwood_i2c_init(void)
 void __init kirkwood_uart0_init(void)
 {
        orion_uart0_init(UART0_VIRT_BASE, UART0_PHYS_BASE,
-                        IRQ_KIRKWOOD_UART_0, kirkwood_tclk);
+                        IRQ_KIRKWOOD_UART_0, tclk);
 }
 
 
@@ -263,7 +420,7 @@ void __init kirkwood_uart0_init(void)
 void __init kirkwood_uart1_init(void)
 {
        orion_uart1_init(UART1_VIRT_BASE, UART1_PHYS_BASE,
-                        IRQ_KIRKWOOD_UART_1, kirkwood_tclk);
+                        IRQ_KIRKWOOD_UART_1, tclk);
 }
 
 /*****************************************************************************
@@ -271,7 +428,6 @@ void __init kirkwood_uart1_init(void)
  ****************************************************************************/
 void __init kirkwood_crypto_init(void)
 {
-       kirkwood_clk_ctrl |= CGC_CRYPTO;
        orion_crypto_init(CRYPTO_PHYS_BASE, KIRKWOOD_SRAM_PHYS_BASE,
                          KIRKWOOD_SRAM_SIZE, IRQ_KIRKWOOD_CRYPTO);
 }
@@ -282,8 +438,6 @@ void __init kirkwood_crypto_init(void)
  ****************************************************************************/
 void __init kirkwood_xor0_init(void)
 {
-       kirkwood_clk_ctrl |= CGC_XOR0;
-
        orion_xor0_init(XOR0_PHYS_BASE, XOR0_HIGH_PHYS_BASE,
                        IRQ_KIRKWOOD_XOR_00, IRQ_KIRKWOOD_XOR_01);
 }
@@ -294,8 +448,6 @@ void __init kirkwood_xor0_init(void)
  ****************************************************************************/
 void __init kirkwood_xor1_init(void)
 {
-       kirkwood_clk_ctrl |= CGC_XOR1;
-
        orion_xor1_init(XOR1_PHYS_BASE, XOR1_HIGH_PHYS_BASE,
                        IRQ_KIRKWOOD_XOR_10, IRQ_KIRKWOOD_XOR_11);
 }
@@ -306,7 +458,7 @@ void __init kirkwood_xor1_init(void)
  ****************************************************************************/
 void __init kirkwood_wdt_init(void)
 {
-       orion_wdt_init(kirkwood_tclk);
+       orion_wdt_init();
 }
 
 
@@ -382,7 +534,6 @@ static struct platform_device kirkwood_pcm_device = {
 
 void __init kirkwood_audio_init(void)
 {
-       kirkwood_clk_ctrl |= CGC_AUDIO;
        platform_device_register(&kirkwood_i2s_device);
        platform_device_register(&kirkwood_pcm_device);
 }
@@ -466,6 +617,9 @@ void __init kirkwood_init(void)
        kirkwood_l2_init();
 #endif
 
+       /* Setup root of clk tree */
+       kirkwood_clk_init();
+
        /* internal devices that every board has */
        kirkwood_rtc_init();
        kirkwood_wdt_init();
@@ -478,72 +632,6 @@ void __init kirkwood_init(void)
 #endif
 }
 
-static int __init kirkwood_clock_gate(void)
-{
-       unsigned int curr = readl(CLOCK_GATING_CTRL);
-       u32 dev, rev;
-
-#ifdef CONFIG_OF
-       struct device_node *np;
-#endif
-       kirkwood_pcie_id(&dev, &rev);
-       printk(KERN_DEBUG "Gating clock of unused units\n");
-       printk(KERN_DEBUG "before: 0x%08x\n", curr);
-
-       /* Make sure those units are accessible */
-       writel(curr | CGC_SATA0 | CGC_SATA1 | CGC_PEX0 | CGC_PEX1, CLOCK_GATING_CTRL);
-
-#ifdef CONFIG_OF
-       np = of_find_compatible_node(NULL, NULL, "mrvl,orion-nand");
-       if (np && of_device_is_available(np)) {
-               kirkwood_clk_ctrl |= CGC_RUNIT;
-               of_node_put(np);
-       }
-#endif
-
-       /* For SATA: first shutdown the phy */
-       if (!(kirkwood_clk_ctrl & CGC_SATA0)) {
-               /* Disable PLL and IVREF */
-               writel(readl(SATA0_PHY_MODE_2) & ~0xf, SATA0_PHY_MODE_2);
-               /* Disable PHY */
-               writel(readl(SATA0_IF_CTRL) | 0x200, SATA0_IF_CTRL);
-       }
-       if (!(kirkwood_clk_ctrl & CGC_SATA1)) {
-               /* Disable PLL and IVREF */
-               writel(readl(SATA1_PHY_MODE_2) & ~0xf, SATA1_PHY_MODE_2);
-               /* Disable PHY */
-               writel(readl(SATA1_IF_CTRL) | 0x200, SATA1_IF_CTRL);
-       }
-       
-       /* For PCIe: first shutdown the phy */
-       if (!(kirkwood_clk_ctrl & CGC_PEX0)) {
-               writel(readl(PCIE_LINK_CTRL) | 0x10, PCIE_LINK_CTRL);
-               while (1)
-                       if (readl(PCIE_STATUS) & 0x1)
-                               break;
-               writel(readl(PCIE_LINK_CTRL) & ~0x10, PCIE_LINK_CTRL);
-       }
-
-       /* For PCIe 1: first shutdown the phy */
-       if (dev == MV88F6282_DEV_ID) {
-               if (!(kirkwood_clk_ctrl & CGC_PEX1)) {
-                       writel(readl(PCIE1_LINK_CTRL) | 0x10, PCIE1_LINK_CTRL);
-                       while (1)
-                               if (readl(PCIE1_STATUS) & 0x1)
-                                       break;
-                       writel(readl(PCIE1_LINK_CTRL) & ~0x10, PCIE1_LINK_CTRL);
-               }
-       } else  /* keep this bit set for devices that don't have PCIe1 */
-               kirkwood_clk_ctrl |= CGC_PEX1;
-
-       /* Now gate clock the required units */
-       writel(kirkwood_clk_ctrl, CLOCK_GATING_CTRL);
-       printk(KERN_DEBUG " after: 0x%08x\n", readl(CLOCK_GATING_CTRL));
-
-       return 0;
-}
-late_initcall(kirkwood_clock_gate);
-
 void kirkwood_restart(char mode, const char *cmd)
 {
        /*
index a34c41a5172eb0bdfd13f441c8569ee564a00e42..9248fa2c165bbaded5a830afbf542bb09ae3e955 100644 (file)
@@ -50,6 +50,7 @@ void kirkwood_nand_init(struct mtd_partition *parts, int nr_parts, int delay);
 void kirkwood_nand_init_rnb(struct mtd_partition *parts, int nr_parts, int (*dev_ready)(struct mtd_info *));
 void kirkwood_audio_init(void);
 void kirkwood_restart(char, const char *);
+void kirkwood_clk_init(void);
 
 /* board init functions for boards not fully converted to fdt */
 #ifdef CONFIG_MACH_DREAMPLUG_DT
index 957bd7997d7e3a8850132ccc97d70e2da3fe7bba..3eee37a3b501a81e594fed45997301ae27f67ce4 100644 (file)
 #define L2_WRITETHROUGH                0x00000010
 
 #define CLOCK_GATING_CTRL      (BRIDGE_VIRT_BASE | 0x11c)
+#define CGC_BIT_GE0            (0)
+#define CGC_BIT_PEX0           (2)
+#define CGC_BIT_USB0           (3)
+#define CGC_BIT_SDIO           (4)
+#define CGC_BIT_TSU            (5)
+#define CGC_BIT_DUNIT          (6)
+#define CGC_BIT_RUNIT          (7)
+#define CGC_BIT_XOR0           (8)
+#define CGC_BIT_AUDIO          (9)
+#define CGC_BIT_SATA0          (14)
+#define CGC_BIT_SATA1          (15)
+#define CGC_BIT_XOR1           (16)
+#define CGC_BIT_CRYPTO         (17)
+#define CGC_BIT_PEX1           (18)
+#define CGC_BIT_GE1            (19)
+#define CGC_BIT_TDM            (20)
 #define CGC_GE0                        (1 << 0)
 #define CGC_PEX0               (1 << 2)
 #define CGC_USB0               (1 << 3)
index 85f6169c24846178570cb1cc079a12dd209fd9b3..6d8364a97810f83e897e1515be37c4ade616d45e 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/gpio_keys.h>
 #include <linux/spi/flash.h>
 #include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
 #include <net/dsa.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
index de373176ee670f7c435d9cd9849cb50b857f49f4..6e8b2efa3c353ae830639b5a25a526b305dd922e 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/kernel.h>
 #include <linux/pci.h>
 #include <linux/slab.h>
+#include <linux/clk.h>
 #include <video/vga.h>
 #include <asm/irq.h>
 #include <asm/mach/pci.h>
 #include <plat/addr-map.h>
 #include "common.h"
 
+static void kirkwood_enable_pcie_clk(const char *port)
+{
+       struct clk *clk;
+
+       clk = clk_get_sys("pcie", port);
+       if (IS_ERR(clk)) {
+               printk(KERN_ERR "PCIE clock %s missing\n", port);
+               return;
+       }
+       clk_prepare_enable(clk);
+       clk_put(clk);
+}
+
+/* This function is called very early in the boot when probing the
+   hardware to determine what we actually are, and what rate tclk is
+   ticking at. Hence calling kirkwood_enable_pcie_clk() is not
+   possible since the clk tree has not been created yet. */
 void kirkwood_enable_pcie(void)
 {
        u32 curr = readl(CLOCK_GATING_CTRL);
@@ -26,7 +44,7 @@ void kirkwood_enable_pcie(void)
                writel(curr | CGC_PEX0, CLOCK_GATING_CTRL);
 }
 
-void __init kirkwood_pcie_id(u32 *dev, u32 *rev)
+void kirkwood_pcie_id(u32 *dev, u32 *rev)
 {
        kirkwood_enable_pcie();
        *dev = orion_pcie_dev_id((void __iomem *)PCIE_VIRT_BASE);
@@ -159,7 +177,6 @@ static void __init pcie1_ioresources_init(struct pcie_port *pp)
 
 static int __init kirkwood_pcie_setup(int nr, struct pci_sys_data *sys)
 {
-       extern unsigned int kirkwood_clk_ctrl;
        struct pcie_port *pp;
        int index;
 
@@ -178,11 +195,11 @@ static int __init kirkwood_pcie_setup(int nr, struct pci_sys_data *sys)
 
        switch (index) {
        case 0:
-               kirkwood_clk_ctrl |= CGC_PEX0;
+               kirkwood_enable_pcie_clk("0");
                pcie0_ioresources_init(pp);
                break;
        case 1:
-               kirkwood_clk_ctrl |= CGC_PEX1;
+               kirkwood_enable_pcie_clk("1");
                pcie1_ioresources_init(pp);
                break;
        default:
index fd2c9c8b6831a1d0ae808bef994e691a9a38c721..f742a66a7045478a8428c35e8bc65ef65b14e23a 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/gpio.h>
 #include <linux/spi/flash.h>
 #include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <mach/kirkwood.h>
index f9d2a11b7f9649468c26a882323df785e27783a0..bad738e440445d4d31ebc038a4cc812f64397983 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/mtd/physmap.h>
 #include <linux/spi/flash.h>
 #include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
 #include <linux/i2c.h>
 #include <linux/mv643xx_eth.h>
 #include <linux/ata_platform.h>
index 24294b2bc4690365b8092d6ee702738cab1c3cc5..8943ede29b4458b14217aebd0873b896eaa15df8 100644 (file)
@@ -4,7 +4,6 @@
 #include <linux/mtd/physmap.h>
 #include <linux/spi/flash.h>
 #include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
 #include <linux/serial_reg.h>
 #include <mach/kirkwood.h>
 #include "common.h"
index 26aac363a06405f865e76ea8c0db6324af8d85aa..4fa3e99d9a62afbc38feeed5e916730f8bb984a4 100644 (file)
@@ -94,6 +94,11 @@ static void __init halibut_map_io(void)
        msm_clock_init(msm_clocks_7x01a, msm_num_clocks_7x01a);
 }
 
+static void __init halibut_init_late(void)
+{
+       smd_debugfs_init();
+}
+
 MACHINE_START(HALIBUT, "Halibut Board (QCT SURF7200A)")
        .atag_offset    = 0x100,
        .fixup          = halibut_fixup,
@@ -101,5 +106,6 @@ MACHINE_START(HALIBUT, "Halibut Board (QCT SURF7200A)")
        .init_early     = halibut_init_early,
        .init_irq       = halibut_init_irq,
        .init_machine   = halibut_init,
+       .init_late      = halibut_init_late,
        .timer          = &msm_timer,
 MACHINE_END
index 5a4882fc6f7a0faaf5122b150290fb7ed74d715e..cf1f89a5dc622ad55bc28bca21275913c494ab88 100644 (file)
@@ -71,6 +71,11 @@ static void __init mahimahi_map_io(void)
        msm_clock_init();
 }
 
+static void __init mahimahi_init_late(void)
+{
+       smd_debugfs_init();
+}
+
 extern struct sys_timer msm_timer;
 
 MACHINE_START(MAHIMAHI, "mahimahi")
@@ -79,5 +84,6 @@ MACHINE_START(MAHIMAHI, "mahimahi")
        .map_io         = mahimahi_map_io,
        .init_irq       = msm_init_irq,
        .init_machine   = mahimahi_init,
+       .init_late      = mahimahi_init_late,
        .timer          = &msm_timer,
 MACHINE_END
index 6d84ee740df483911363f2ef54311b618787bf54..451ab1d43c927017a1f8743cf9c55438c7463837 100644 (file)
@@ -128,11 +128,17 @@ static void __init msm7x2x_map_io(void)
 #endif
 }
 
+static void __init msm7x2x_init_late(void)
+{
+       smd_debugfs_init();
+}
+
 MACHINE_START(MSM7X27_SURF, "QCT MSM7x27 SURF")
        .atag_offset    = 0x100,
        .map_io         = msm7x2x_map_io,
        .init_irq       = msm7x2x_init_irq,
        .init_machine   = msm7x2x_init,
+       .init_late      = msm7x2x_init_late,
        .timer          = &msm_timer,
 MACHINE_END
 
@@ -141,6 +147,7 @@ MACHINE_START(MSM7X27_FFA, "QCT MSM7x27 FFA")
        .map_io         = msm7x2x_map_io,
        .init_irq       = msm7x2x_init_irq,
        .init_machine   = msm7x2x_init,
+       .init_late      = msm7x2x_init_late,
        .timer          = &msm_timer,
 MACHINE_END
 
@@ -149,6 +156,7 @@ MACHINE_START(MSM7X25_SURF, "QCT MSM7x25 SURF")
        .map_io         = msm7x2x_map_io,
        .init_irq       = msm7x2x_init_irq,
        .init_machine   = msm7x2x_init,
+       .init_late      = msm7x2x_init_late,
        .timer          = &msm_timer,
 MACHINE_END
 
@@ -157,5 +165,6 @@ MACHINE_START(MSM7X25_FFA, "QCT MSM7x25 FFA")
        .map_io         = msm7x2x_map_io,
        .init_irq       = msm7x2x_init_irq,
        .init_machine   = msm7x2x_init,
+       .init_late      = msm7x2x_init_late,
        .timer          = &msm_timer,
 MACHINE_END
index 75b3cfcada6d06586aff8ec0b627db092e408e75..a5001378135d4d3ec8615022370457a293cd14f7 100644 (file)
@@ -119,6 +119,11 @@ static void __init msm7x30_map_io(void)
        msm_clock_init(msm_clocks_7x30, msm_num_clocks_7x30);
 }
 
+static void __init msm7x30_init_late(void)
+{
+       smd_debugfs_init();
+}
+
 MACHINE_START(MSM7X30_SURF, "QCT MSM7X30 SURF")
        .atag_offset = 0x100,
        .fixup = msm7x30_fixup,
@@ -126,6 +131,7 @@ MACHINE_START(MSM7X30_SURF, "QCT MSM7X30 SURF")
        .map_io = msm7x30_map_io,
        .init_irq = msm7x30_init_irq,
        .init_machine = msm7x30_init,
+       .init_late = msm7x30_init_late,
        .timer = &msm_timer,
 MACHINE_END
 
@@ -136,6 +142,7 @@ MACHINE_START(MSM7X30_FFA, "QCT MSM7X30 FFA")
        .map_io = msm7x30_map_io,
        .init_irq = msm7x30_init_irq,
        .init_machine = msm7x30_init,
+       .init_late = msm7x30_init_late,
        .timer = &msm_timer,
 MACHINE_END
 
@@ -146,5 +153,6 @@ MACHINE_START(MSM7X30_FLUID, "QCT MSM7X30 FLUID")
        .map_io = msm7x30_map_io,
        .init_irq = msm7x30_init_irq,
        .init_machine = msm7x30_init,
+       .init_late = msm7x30_init_late,
        .timer = &msm_timer,
 MACHINE_END
index ed3598128530f143df0c89fedf66a96177c8f4e0..65f4a1daa2e5f6d5250bee88ceba4d3590a0f39e 100644 (file)
@@ -93,6 +93,11 @@ static void __init msm8960_rumi3_init(void)
        platform_add_devices(rumi3_devices, ARRAY_SIZE(rumi3_devices));
 }
 
+static void __init msm8960_init_late(void)
+{
+       smd_debugfs_init();
+}
+
 MACHINE_START(MSM8960_SIM, "QCT MSM8960 SIMULATOR")
        .fixup = msm8960_fixup,
        .reserve = msm8960_reserve,
@@ -101,6 +106,7 @@ MACHINE_START(MSM8960_SIM, "QCT MSM8960 SIMULATOR")
        .timer = &msm_timer,
        .handle_irq = gic_handle_irq,
        .init_machine = msm8960_sim_init,
+       .init_late = msm8960_init_late,
 MACHINE_END
 
 MACHINE_START(MSM8960_RUMI3, "QCT MSM8960 RUMI3")
@@ -111,5 +117,6 @@ MACHINE_START(MSM8960_RUMI3, "QCT MSM8960 RUMI3")
        .timer = &msm_timer,
        .handle_irq = gic_handle_irq,
        .init_machine = msm8960_rumi3_init,
+       .init_late = msm8960_init_late,
 MACHINE_END
 
index fb3496a52ef4c08921de04e75e4682ad00e796a2..e37a724cd1eb5c1e2703571deb84f0adfc3d28f9 100644 (file)
@@ -81,6 +81,11 @@ static void __init msm8x60_init(void)
 {
 }
 
+static void __init msm8x60_init_late(void)
+{
+       smd_debugfs_init();
+}
+
 #ifdef CONFIG_OF
 static struct of_dev_auxdata msm_auxdata_lookup[] __initdata = {
        {}
@@ -111,6 +116,7 @@ MACHINE_START(MSM8X60_RUMI3, "QCT MSM8X60 RUMI3")
        .init_irq = msm8x60_init_irq,
        .handle_irq = gic_handle_irq,
        .init_machine = msm8x60_init,
+       .init_late = msm8x60_init_late,
        .timer = &msm_timer,
 MACHINE_END
 
@@ -121,6 +127,7 @@ MACHINE_START(MSM8X60_SURF, "QCT MSM8X60 SURF")
        .init_irq = msm8x60_init_irq,
        .handle_irq = gic_handle_irq,
        .init_machine = msm8x60_init,
+       .init_late = msm8x60_init_late,
        .timer = &msm_timer,
 MACHINE_END
 
@@ -131,6 +138,7 @@ MACHINE_START(MSM8X60_SIM, "QCT MSM8X60 SIMULATOR")
        .init_irq = msm8x60_init_irq,
        .handle_irq = gic_handle_irq,
        .init_machine = msm8x60_init,
+       .init_late = msm8x60_init_late,
        .timer = &msm_timer,
 MACHINE_END
 
@@ -141,6 +149,7 @@ MACHINE_START(MSM8X60_FFA, "QCT MSM8X60 FFA")
        .init_irq = msm8x60_init_irq,
        .handle_irq = gic_handle_irq,
        .init_machine = msm8x60_init,
+       .init_late = msm8x60_init_late,
        .timer = &msm_timer,
 MACHINE_END
 
@@ -150,6 +159,7 @@ DT_MACHINE_START(MSM_DT, "Qualcomm MSM (Flattened Device Tree)")
        .map_io = msm8x60_map_io,
        .init_irq = msm8x60_init_irq,
        .init_machine = msm8x60_dt_init,
+       .init_late = msm8x60_init_late,
        .timer = &msm_timer,
        .dt_compat = msm8x60_fluid_match,
 MACHINE_END
index fbaa4ed95a3cf9dc578b53fb7ea511e06b2187ee..c8fe0edb9761961f877378601dad5c3c0a810be4 100644 (file)
@@ -190,11 +190,17 @@ static void __init qsd8x50_init(void)
        qsd8x50_init_mmc();
 }
 
+static void __init qsd8x50_init_late(void)
+{
+       smd_debugfs_init();
+}
+
 MACHINE_START(QSD8X50_SURF, "QCT QSD8X50 SURF")
        .atag_offset = 0x100,
        .map_io = qsd8x50_map_io,
        .init_irq = qsd8x50_init_irq,
        .init_machine = qsd8x50_init,
+       .init_late = qsd8x50_init_late,
        .timer = &msm_timer,
 MACHINE_END
 
@@ -203,5 +209,6 @@ MACHINE_START(QSD8X50A_ST1_5, "QCT QSD8X50A ST1.5")
        .map_io = qsd8x50_map_io,
        .init_irq = qsd8x50_init_irq,
        .init_machine = qsd8x50_init,
+       .init_late = qsd8x50_init_late,
        .timer = &msm_timer,
 MACHINE_END
index 4a8ea0d40b6ffe0b794d95a29f6d9e9003456eab..2e569ab10eef3f253b4f429d5b01b12dddaf933a 100644 (file)
@@ -101,6 +101,11 @@ static void __init sapphire_map_io(void)
        msm_clock_init();
 }
 
+static void __init sapphire_init_late(void)
+{
+       smd_debugfs_init();
+}
+
 MACHINE_START(SAPPHIRE, "sapphire")
 /* Maintainer: Brian Swetland <swetland@google.com> */
        .atag_offset    = 0x100,
@@ -108,5 +113,6 @@ MACHINE_START(SAPPHIRE, "sapphire")
        .map_io         = sapphire_map_io,
        .init_irq       = sapphire_init_irq,
        .init_machine   = sapphire_init,
+       .init_late      = sapphire_init_late,
        .timer          = &msm_timer,
 MACHINE_END
index d4060a37e23d39776456974e97b361ac6522b1e5..bbe13f12fa0197f54d8b63f7ad420fbdd4b8c7c9 100644 (file)
@@ -98,6 +98,11 @@ static void __init trout_map_io(void)
        msm_clock_init(msm_clocks_7x01a, msm_num_clocks_7x01a);
 }
 
+static void __init trout_init_late(void)
+{
+       smd_debugfs_init();
+}
+
 MACHINE_START(TROUT, "HTC Dream")
        .atag_offset    = 0x100,
        .fixup          = trout_fixup,
@@ -105,5 +110,6 @@ MACHINE_START(TROUT, "HTC Dream")
        .init_early     = trout_init_early,
        .init_irq       = trout_init_irq,
        .init_machine   = trout_init,
+       .init_late      = trout_init_late,
        .timer          = &msm_timer,
 MACHINE_END
index 2ce8f1f2fc4d57c163193229340b7b04d04c7474..435f8edfafd1bb78855a5ba8ece372935e78f481 100644 (file)
@@ -47,4 +47,10 @@ int __init msm_add_sdcc(unsigned int controller,
                        struct msm_mmc_platform_data *plat,
                        unsigned int stat_irq, unsigned long stat_irq_flags);
 
+#if defined(CONFIG_MSM_SMD) && defined(CONFIG_DEBUG_FS)
+int smd_debugfs_init(void);
+#else
+static inline int smd_debugfs_init(void) { return 0; }
+#endif
+
 #endif
index c56df9e932aec303aca92252040fe2d9344b5391..8056b3e5590f9969a2d2928a4efaa47e5c9fc087 100644 (file)
@@ -216,7 +216,7 @@ static void debug_create(const char *name, umode_t mode,
        debugfs_create_file(name, mode, dent, fill, &debug_ops);
 }
 
-static int smd_debugfs_init(void)
+int __init smd_debugfs_init(void)
 {
        struct dentry *dent;
 
@@ -234,7 +234,6 @@ static int smd_debugfs_init(void)
        return 0;
 }
 
-late_initcall(smd_debugfs_init);
 #endif
 
 
index a5dcf766a3f9ca53a050c8f4be1ec2823ac98145..b4c53b846c9caa8402ce764aec9f9f254379031a 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/platform_device.h>
 #include <linux/serial_8250.h>
 #include <linux/ata_platform.h>
+#include <linux/clk-provider.h>
 #include <linux/ethtool.h>
 #include <asm/mach/map.h>
 #include <asm/mach/time.h>
@@ -103,24 +104,24 @@ static void get_pclk_l2clk(int hclk, int core_index, int *pclk, int *l2clk)
 
 static int get_tclk(void)
 {
-       int tclk;
+       int tclk_freq;
 
        /*
         * TCLK tick rate is configured by DEV_A[2:0] strap pins.
         */
        switch ((readl(SAMPLE_AT_RESET_HIGH) >> 6) & 7) {
        case 1:
-               tclk = 166666667;
+               tclk_freq = 166666667;
                break;
        case 3:
-               tclk = 200000000;
+               tclk_freq = 200000000;
                break;
        default:
                panic("unknown TCLK PLL setting: %.8x\n",
                        readl(SAMPLE_AT_RESET_HIGH));
        }
 
-       return tclk;
+       return tclk_freq;
 }
 
 
@@ -165,6 +166,19 @@ void __init mv78xx0_map_io(void)
 }
 
 
+/*****************************************************************************
+ * CLK tree
+ ****************************************************************************/
+static struct clk *tclk;
+
+static void __init clk_init(void)
+{
+       tclk = clk_register_fixed_rate(NULL, "tclk", NULL, CLK_IS_ROOT,
+                                      get_tclk());
+
+       orion_clkdev_init(tclk);
+}
+
 /*****************************************************************************
  * EHCI
  ****************************************************************************/
@@ -199,7 +213,7 @@ void __init mv78xx0_ge00_init(struct mv643xx_eth_platform_data *eth_data)
 {
        orion_ge00_init(eth_data,
                        GE00_PHYS_BASE, IRQ_MV78XX0_GE00_SUM,
-                       IRQ_MV78XX0_GE_ERR, get_tclk());
+                       IRQ_MV78XX0_GE_ERR);
 }
 
 
@@ -210,7 +224,7 @@ void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data)
 {
        orion_ge01_init(eth_data,
                        GE01_PHYS_BASE, IRQ_MV78XX0_GE01_SUM,
-                       NO_IRQ, get_tclk());
+                       NO_IRQ);
 }
 
 
@@ -234,7 +248,7 @@ void __init mv78xx0_ge10_init(struct mv643xx_eth_platform_data *eth_data)
 
        orion_ge10_init(eth_data,
                        GE10_PHYS_BASE, IRQ_MV78XX0_GE10_SUM,
-                       NO_IRQ, get_tclk());
+                       NO_IRQ);
 }
 
 
@@ -258,7 +272,7 @@ void __init mv78xx0_ge11_init(struct mv643xx_eth_platform_data *eth_data)
 
        orion_ge11_init(eth_data,
                        GE11_PHYS_BASE, IRQ_MV78XX0_GE11_SUM,
-                       NO_IRQ, get_tclk());
+                       NO_IRQ);
 }
 
 /*****************************************************************************
@@ -285,7 +299,7 @@ void __init mv78xx0_sata_init(struct mv_sata_platform_data *sata_data)
 void __init mv78xx0_uart0_init(void)
 {
        orion_uart0_init(UART0_VIRT_BASE, UART0_PHYS_BASE,
-                        IRQ_MV78XX0_UART_0, get_tclk());
+                        IRQ_MV78XX0_UART_0, tclk);
 }
 
 
@@ -295,7 +309,7 @@ void __init mv78xx0_uart0_init(void)
 void __init mv78xx0_uart1_init(void)
 {
        orion_uart1_init(UART1_VIRT_BASE, UART1_PHYS_BASE,
-                        IRQ_MV78XX0_UART_1, get_tclk());
+                        IRQ_MV78XX0_UART_1, tclk);
 }
 
 
@@ -305,7 +319,7 @@ void __init mv78xx0_uart1_init(void)
 void __init mv78xx0_uart2_init(void)
 {
        orion_uart2_init(UART2_VIRT_BASE, UART2_PHYS_BASE,
-                        IRQ_MV78XX0_UART_2, get_tclk());
+                        IRQ_MV78XX0_UART_2, tclk);
 }
 
 /*****************************************************************************
@@ -314,7 +328,7 @@ void __init mv78xx0_uart2_init(void)
 void __init mv78xx0_uart3_init(void)
 {
        orion_uart3_init(UART3_VIRT_BASE, UART3_PHYS_BASE,
-                        IRQ_MV78XX0_UART_3, get_tclk());
+                        IRQ_MV78XX0_UART_3, tclk);
 }
 
 /*****************************************************************************
@@ -378,25 +392,26 @@ void __init mv78xx0_init(void)
        int hclk;
        int pclk;
        int l2clk;
-       int tclk;
 
        core_index = mv78xx0_core_index();
        hclk = get_hclk();
        get_pclk_l2clk(hclk, core_index, &pclk, &l2clk);
-       tclk = get_tclk();
 
        printk(KERN_INFO "%s ", mv78xx0_id());
        printk("core #%d, ", core_index);
        printk("PCLK = %dMHz, ", (pclk + 499999) / 1000000);
        printk("L2 = %dMHz, ", (l2clk + 499999) / 1000000);
        printk("HCLK = %dMHz, ", (hclk + 499999) / 1000000);
-       printk("TCLK = %dMHz\n", (tclk + 499999) / 1000000);
+       printk("TCLK = %dMHz\n", (get_tclk() + 499999) / 1000000);
 
        mv78xx0_setup_cpu_mbus();
 
 #ifdef CONFIG_CACHE_FEROCEON_L2
        feroceon_l2_init(is_l2_writethrough());
 #endif
+
+       /* Setup root of clk tree */
+       clk_init();
 }
 
 void mv78xx0_restart(char mode, const char *cmd)
index 07d5383d68ee69f7e5f9bdd652f339d2f9f8f130..91cf0625819c2f62639cff7216e6563b9813e585 100644 (file)
@@ -7,18 +7,28 @@ config MXS_OCOTP
 
 config SOC_IMX23
        bool
+       select ARM_AMBA
        select CPU_ARM926T
        select HAVE_PWM
        select PINCTRL_IMX23
 
 config SOC_IMX28
        bool
+       select ARM_AMBA
        select CPU_ARM926T
        select HAVE_PWM
        select PINCTRL_IMX28
 
 comment "MXS platforms:"
 
+config MACH_MXS_DT
+       bool "Support MXS platforms from device tree"
+       select SOC_IMX23
+       select SOC_IMX28
+       help
+         Include support for Freescale MXS platforms(i.MX23 and i.MX28)
+         using the device tree for discovery
+
 config MACH_STMP378X_DEVB
        bool "Support STMP378x_devb Platform"
        select SOC_IMX23
index 908bf9a567f18f490a81c708be9ddb9c4744fc40..e41590ccb437feb62a0a9154778d91a700185521 100644 (file)
@@ -1,12 +1,10 @@
 # Common support
-obj-y := clock.o devices.o icoll.o iomux.o system.o timer.o mm.o
+obj-y := devices.o icoll.o iomux.o system.o timer.o mm.o
 
 obj-$(CONFIG_MXS_OCOTP) += ocotp.o
 obj-$(CONFIG_PM) += pm.o
 
-obj-$(CONFIG_SOC_IMX23) += clock-mx23.o
-obj-$(CONFIG_SOC_IMX28) += clock-mx28.o
-
+obj-$(CONFIG_MACH_MXS_DT) += mach-mxs.o
 obj-$(CONFIG_MACH_STMP378X_DEVB) += mach-stmp378x_devb.o
 obj-$(CONFIG_MACH_MX23EVK) += mach-mx23evk.o
 obj-$(CONFIG_MACH_MX28EVK) += mach-mx28evk.o
diff --git a/arch/arm/mach-mxs/clock-mx23.c b/arch/arm/mach-mxs/clock-mx23.c
deleted file mode 100644 (file)
index e3ac52c..0000000
+++ /dev/null
@@ -1,536 +0,0 @@
-/*
- * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/clkdev.h>
-
-#include <asm/clkdev.h>
-#include <asm/div64.h>
-
-#include <mach/mx23.h>
-#include <mach/common.h>
-#include <mach/clock.h>
-
-#include "regs-clkctrl-mx23.h"
-
-#define CLKCTRL_BASE_ADDR      MX23_IO_ADDRESS(MX23_CLKCTRL_BASE_ADDR)
-#define DIGCTRL_BASE_ADDR      MX23_IO_ADDRESS(MX23_DIGCTL_BASE_ADDR)
-
-#define PARENT_RATE_SHIFT      8
-
-static int _raw_clk_enable(struct clk *clk)
-{
-       u32 reg;
-
-       if (clk->enable_reg) {
-               reg = __raw_readl(clk->enable_reg);
-               reg &= ~(1 << clk->enable_shift);
-               __raw_writel(reg, clk->enable_reg);
-       }
-
-       return 0;
-}
-
-static void _raw_clk_disable(struct clk *clk)
-{
-       u32 reg;
-
-       if (clk->enable_reg) {
-               reg = __raw_readl(clk->enable_reg);
-               reg |= 1 << clk->enable_shift;
-               __raw_writel(reg, clk->enable_reg);
-       }
-}
-
-/*
- * ref_xtal_clk
- */
-static unsigned long ref_xtal_clk_get_rate(struct clk *clk)
-{
-       return 24000000;
-}
-
-static struct clk ref_xtal_clk = {
-       .get_rate = ref_xtal_clk_get_rate,
-};
-
-/*
- * pll_clk
- */
-static unsigned long pll_clk_get_rate(struct clk *clk)
-{
-       return 480000000;
-}
-
-static int pll_clk_enable(struct clk *clk)
-{
-       __raw_writel(BM_CLKCTRL_PLLCTRL0_POWER |
-                       BM_CLKCTRL_PLLCTRL0_EN_USB_CLKS,
-                       CLKCTRL_BASE_ADDR + HW_CLKCTRL_PLLCTRL0_SET);
-
-       /* Only a 10us delay is need. PLLCTRL1 LOCK bitfied is only a timer
-        * and is incorrect (excessive). Per definition of the PLLCTRL0
-        * POWER field, waiting at least 10us.
-        */
-       udelay(10);
-
-       return 0;
-}
-
-static void pll_clk_disable(struct clk *clk)
-{
-       __raw_writel(BM_CLKCTRL_PLLCTRL0_POWER |
-                       BM_CLKCTRL_PLLCTRL0_EN_USB_CLKS,
-                       CLKCTRL_BASE_ADDR + HW_CLKCTRL_PLLCTRL0_CLR);
-}
-
-static struct clk pll_clk = {
-        .get_rate = pll_clk_get_rate,
-        .enable = pll_clk_enable,
-        .disable = pll_clk_disable,
-        .parent = &ref_xtal_clk,
-};
-
-/*
- * ref_clk
- */
-#define _CLK_GET_RATE_REF(name, sr, ss)                                        \
-static unsigned long name##_get_rate(struct clk *clk)                  \
-{                                                                      \
-       unsigned long parent_rate;                                      \
-       u32 reg, div;                                                   \
-                                                                       \
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##sr);         \
-       div = (reg >> BP_CLKCTRL_##sr##_##ss##FRAC) & 0x3f;             \
-       parent_rate = clk_get_rate(clk->parent);                        \
-                                                                       \
-       return SH_DIV((parent_rate >> PARENT_RATE_SHIFT) * 18,          \
-                       div, PARENT_RATE_SHIFT);                        \
-}
-
-_CLK_GET_RATE_REF(ref_cpu_clk, FRAC, CPU)
-_CLK_GET_RATE_REF(ref_emi_clk, FRAC, EMI)
-_CLK_GET_RATE_REF(ref_pix_clk, FRAC, PIX)
-_CLK_GET_RATE_REF(ref_io_clk, FRAC, IO)
-
-#define _DEFINE_CLOCK_REF(name, er, es)                                        \
-       static struct clk name = {                                      \
-               .enable_reg     = CLKCTRL_BASE_ADDR + HW_CLKCTRL_##er,  \
-               .enable_shift   = BP_CLKCTRL_##er##_CLKGATE##es,        \
-               .get_rate       = name##_get_rate,                      \
-               .enable         = _raw_clk_enable,                      \
-               .disable        = _raw_clk_disable,                     \
-               .parent         = &pll_clk,                             \
-       }
-
-_DEFINE_CLOCK_REF(ref_cpu_clk, FRAC, CPU);
-_DEFINE_CLOCK_REF(ref_emi_clk, FRAC, EMI);
-_DEFINE_CLOCK_REF(ref_pix_clk, FRAC, PIX);
-_DEFINE_CLOCK_REF(ref_io_clk, FRAC, IO);
-
-/*
- * General clocks
- *
- * clk_get_rate
- */
-static unsigned long rtc_clk_get_rate(struct clk *clk)
-{
-       /* ref_xtal_clk is implemented as the only parent */
-       return clk_get_rate(clk->parent) / 768;
-}
-
-static unsigned long clk32k_clk_get_rate(struct clk *clk)
-{
-       return clk->parent->get_rate(clk->parent) / 750;
-}
-
-#define _CLK_GET_RATE(name, rs)                                                \
-static unsigned long name##_get_rate(struct clk *clk)                  \
-{                                                                      \
-       u32 reg, div;                                                   \
-                                                                       \
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs);         \
-                                                                       \
-       if (clk->parent == &ref_xtal_clk)                               \
-               div = (reg & BM_CLKCTRL_##rs##_DIV_XTAL) >>             \
-                       BP_CLKCTRL_##rs##_DIV_XTAL;                     \
-       else                                                            \
-               div = (reg & BM_CLKCTRL_##rs##_DIV_##rs) >>             \
-                       BP_CLKCTRL_##rs##_DIV_##rs;                     \
-                                                                       \
-       if (!div)                                                       \
-               return -EINVAL;                                         \
-                                                                       \
-       return clk_get_rate(clk->parent) / div;                         \
-}
-
-_CLK_GET_RATE(cpu_clk, CPU)
-_CLK_GET_RATE(emi_clk, EMI)
-
-#define _CLK_GET_RATE1(name, rs)                                       \
-static unsigned long name##_get_rate(struct clk *clk)                  \
-{                                                                      \
-       u32 reg, div;                                                   \
-                                                                       \
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs);         \
-       div = (reg & BM_CLKCTRL_##rs##_DIV) >> BP_CLKCTRL_##rs##_DIV;   \
-                                                                       \
-       if (!div)                                                       \
-               return -EINVAL;                                         \
-                                                                       \
-       return clk_get_rate(clk->parent) / div;                         \
-}
-
-_CLK_GET_RATE1(hbus_clk, HBUS)
-_CLK_GET_RATE1(xbus_clk, XBUS)
-_CLK_GET_RATE1(ssp_clk, SSP)
-_CLK_GET_RATE1(gpmi_clk, GPMI)
-_CLK_GET_RATE1(lcdif_clk, PIX)
-
-#define _CLK_GET_RATE_STUB(name)                                       \
-static unsigned long name##_get_rate(struct clk *clk)                  \
-{                                                                      \
-       return clk_get_rate(clk->parent);                               \
-}
-
-_CLK_GET_RATE_STUB(uart_clk)
-_CLK_GET_RATE_STUB(audio_clk)
-_CLK_GET_RATE_STUB(pwm_clk)
-
-/*
- * clk_set_rate
- */
-static int cpu_clk_set_rate(struct clk *clk, unsigned long rate)
-{
-       u32 reg, bm_busy, div_max, d, f, div, frac;
-       unsigned long diff, parent_rate, calc_rate;
-
-       parent_rate = clk_get_rate(clk->parent);
-
-       if (clk->parent == &ref_xtal_clk) {
-               div_max = BM_CLKCTRL_CPU_DIV_XTAL >> BP_CLKCTRL_CPU_DIV_XTAL;
-               bm_busy = BM_CLKCTRL_CPU_BUSY_REF_XTAL;
-               div = DIV_ROUND_UP(parent_rate, rate);
-               if (div == 0 || div > div_max)
-                       return -EINVAL;
-       } else {
-               div_max = BM_CLKCTRL_CPU_DIV_CPU >> BP_CLKCTRL_CPU_DIV_CPU;
-               bm_busy = BM_CLKCTRL_CPU_BUSY_REF_CPU;
-               rate >>= PARENT_RATE_SHIFT;
-               parent_rate >>= PARENT_RATE_SHIFT;
-               diff = parent_rate;
-               div = frac = 1;
-               for (d = 1; d <= div_max; d++) {
-                       f = parent_rate * 18 / d / rate;
-                       if ((parent_rate * 18 / d) % rate)
-                               f++;
-                       if (f < 18 || f > 35)
-                               continue;
-
-                       calc_rate = parent_rate * 18 / f / d;
-                       if (calc_rate > rate)
-                               continue;
-
-                       if (rate - calc_rate < diff) {
-                               frac = f;
-                               div = d;
-                               diff = rate - calc_rate;
-                       }
-
-                       if (diff == 0)
-                               break;
-               }
-
-               if (diff == parent_rate)
-                       return -EINVAL;
-
-               reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC);
-               reg &= ~BM_CLKCTRL_FRAC_CPUFRAC;
-               reg |= frac;
-               __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC);
-       }
-
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU);
-       reg &= ~BM_CLKCTRL_CPU_DIV_CPU;
-       reg |= div << BP_CLKCTRL_CPU_DIV_CPU;
-       __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU);
-
-       mxs_clkctrl_timeout(HW_CLKCTRL_CPU, bm_busy);
-
-       return 0;
-}
-
-#define _CLK_SET_RATE(name, dr)                                                \
-static int name##_set_rate(struct clk *clk, unsigned long rate)                \
-{                                                                      \
-       u32 reg, div_max, div;                                          \
-       unsigned long parent_rate;                                      \
-                                                                       \
-       parent_rate = clk_get_rate(clk->parent);                        \
-       div_max = BM_CLKCTRL_##dr##_DIV >> BP_CLKCTRL_##dr##_DIV;       \
-                                                                       \
-       div = DIV_ROUND_UP(parent_rate, rate);                          \
-       if (div == 0 || div > div_max)                                  \
-               return -EINVAL;                                         \
-                                                                       \
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr);         \
-       reg &= ~BM_CLKCTRL_##dr##_DIV;                                  \
-       reg |= div << BP_CLKCTRL_##dr##_DIV;                            \
-       if (reg & (1 << clk->enable_shift)) {                           \
-               pr_err("%s: clock is gated\n", __func__);               \
-               return -EINVAL;                                         \
-       }                                                               \
-       __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr);         \
-                                                                       \
-       mxs_clkctrl_timeout(HW_CLKCTRL_##dr, BM_CLKCTRL_##dr##_BUSY);   \
-       return 0;                                                       \
-}
-
-_CLK_SET_RATE(xbus_clk, XBUS)
-_CLK_SET_RATE(ssp_clk, SSP)
-_CLK_SET_RATE(gpmi_clk, GPMI)
-_CLK_SET_RATE(lcdif_clk, PIX)
-
-#define _CLK_SET_RATE_STUB(name)                                       \
-static int name##_set_rate(struct clk *clk, unsigned long rate)                \
-{                                                                      \
-       return -EINVAL;                                                 \
-}
-
-_CLK_SET_RATE_STUB(emi_clk)
-_CLK_SET_RATE_STUB(uart_clk)
-_CLK_SET_RATE_STUB(audio_clk)
-_CLK_SET_RATE_STUB(pwm_clk)
-_CLK_SET_RATE_STUB(clk32k_clk)
-
-/*
- * clk_set_parent
- */
-#define _CLK_SET_PARENT(name, bit)                                     \
-static int name##_set_parent(struct clk *clk, struct clk *parent)      \
-{                                                                      \
-       if (parent != clk->parent) {                                    \
-               __raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit,            \
-                        CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ_TOG);    \
-               clk->parent = parent;                                   \
-       }                                                               \
-                                                                       \
-       return 0;                                                       \
-}
-
-_CLK_SET_PARENT(cpu_clk, CPU)
-_CLK_SET_PARENT(emi_clk, EMI)
-_CLK_SET_PARENT(ssp_clk, SSP)
-_CLK_SET_PARENT(gpmi_clk, GPMI)
-_CLK_SET_PARENT(lcdif_clk, PIX)
-
-#define _CLK_SET_PARENT_STUB(name)                                     \
-static int name##_set_parent(struct clk *clk, struct clk *parent)      \
-{                                                                      \
-       if (parent != clk->parent)                                      \
-               return -EINVAL;                                         \
-       else                                                            \
-               return 0;                                               \
-}
-
-_CLK_SET_PARENT_STUB(uart_clk)
-_CLK_SET_PARENT_STUB(audio_clk)
-_CLK_SET_PARENT_STUB(pwm_clk)
-_CLK_SET_PARENT_STUB(clk32k_clk)
-
-/*
- * clk definition
- */
-static struct clk cpu_clk = {
-       .get_rate = cpu_clk_get_rate,
-       .set_rate = cpu_clk_set_rate,
-       .set_parent = cpu_clk_set_parent,
-       .parent = &ref_cpu_clk,
-};
-
-static struct clk hbus_clk = {
-       .get_rate = hbus_clk_get_rate,
-       .parent = &cpu_clk,
-};
-
-static struct clk xbus_clk = {
-       .get_rate = xbus_clk_get_rate,
-       .set_rate = xbus_clk_set_rate,
-       .parent = &ref_xtal_clk,
-};
-
-static struct clk rtc_clk = {
-       .get_rate = rtc_clk_get_rate,
-       .parent = &ref_xtal_clk,
-};
-
-/* usb_clk gate is controlled in DIGCTRL other than CLKCTRL */
-static struct clk usb_clk = {
-       .enable_reg = DIGCTRL_BASE_ADDR,
-       .enable_shift = 2,
-       .enable = _raw_clk_enable,
-       .disable = _raw_clk_disable,
-       .parent = &pll_clk,
-};
-
-#define _DEFINE_CLOCK(name, er, es, p)                                 \
-       static struct clk name = {                                      \
-               .enable_reg     = CLKCTRL_BASE_ADDR + HW_CLKCTRL_##er,  \
-               .enable_shift   = BP_CLKCTRL_##er##_##es,               \
-               .get_rate       = name##_get_rate,                      \
-               .set_rate       = name##_set_rate,                      \
-               .set_parent     = name##_set_parent,                    \
-               .enable         = _raw_clk_enable,                      \
-               .disable        = _raw_clk_disable,                     \
-               .parent         = p,                                    \
-       }
-
-_DEFINE_CLOCK(emi_clk, EMI, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(ssp_clk, SSP, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(gpmi_clk, GPMI, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(lcdif_clk, PIX, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(uart_clk, XTAL, UART_CLK_GATE, &ref_xtal_clk);
-_DEFINE_CLOCK(audio_clk, XTAL, FILT_CLK24M_GATE, &ref_xtal_clk);
-_DEFINE_CLOCK(pwm_clk, XTAL, PWM_CLK24M_GATE, &ref_xtal_clk);
-_DEFINE_CLOCK(clk32k_clk, XTAL, TIMROT_CLK32K_GATE, &ref_xtal_clk);
-
-#define _REGISTER_CLOCK(d, n, c) \
-       { \
-               .dev_id = d, \
-               .con_id = n, \
-               .clk = &c, \
-       },
-
-static struct clk_lookup lookups[] = {
-       /* for amba bus driver */
-       _REGISTER_CLOCK("duart", "apb_pclk", xbus_clk)
-       /* for amba-pl011 driver */
-       _REGISTER_CLOCK("duart", NULL, uart_clk)
-       _REGISTER_CLOCK("mxs-auart.0", NULL, uart_clk)
-       _REGISTER_CLOCK("rtc", NULL, rtc_clk)
-       _REGISTER_CLOCK("mxs-dma-apbh", NULL, hbus_clk)
-       _REGISTER_CLOCK("mxs-dma-apbx", NULL, xbus_clk)
-       _REGISTER_CLOCK("mxs-mmc.0", NULL, ssp_clk)
-       _REGISTER_CLOCK("mxs-mmc.1", NULL, ssp_clk)
-       _REGISTER_CLOCK(NULL, "usb", usb_clk)
-       _REGISTER_CLOCK(NULL, "audio", audio_clk)
-       _REGISTER_CLOCK("mxs-pwm.0", NULL, pwm_clk)
-       _REGISTER_CLOCK("mxs-pwm.1", NULL, pwm_clk)
-       _REGISTER_CLOCK("mxs-pwm.2", NULL, pwm_clk)
-       _REGISTER_CLOCK("mxs-pwm.3", NULL, pwm_clk)
-       _REGISTER_CLOCK("mxs-pwm.4", NULL, pwm_clk)
-       _REGISTER_CLOCK("imx23-fb", NULL, lcdif_clk)
-       _REGISTER_CLOCK("imx23-gpmi-nand", NULL, gpmi_clk)
-};
-
-static int clk_misc_init(void)
-{
-       u32 reg;
-       int ret;
-
-       /* Fix up parent per register setting */
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ);
-       cpu_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_CPU) ?
-                       &ref_xtal_clk : &ref_cpu_clk;
-       emi_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_EMI) ?
-                       &ref_xtal_clk : &ref_emi_clk;
-       ssp_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SSP) ?
-                       &ref_xtal_clk : &ref_io_clk;
-       gpmi_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_GPMI) ?
-                       &ref_xtal_clk : &ref_io_clk;
-       lcdif_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_PIX) ?
-                       &ref_xtal_clk : &ref_pix_clk;
-
-       /* Use int div over frac when both are available */
-       __raw_writel(BM_CLKCTRL_CPU_DIV_XTAL_FRAC_EN,
-                       CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_CLR);
-       __raw_writel(BM_CLKCTRL_CPU_DIV_CPU_FRAC_EN,
-                       CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_CLR);
-       __raw_writel(BM_CLKCTRL_HBUS_DIV_FRAC_EN,
-                       CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS_CLR);
-
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_XBUS);
-       reg &= ~BM_CLKCTRL_XBUS_DIV_FRAC_EN;
-       __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_XBUS);
-
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP);
-       reg &= ~BM_CLKCTRL_SSP_DIV_FRAC_EN;
-       __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP);
-
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_GPMI);
-       reg &= ~BM_CLKCTRL_GPMI_DIV_FRAC_EN;
-       __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_GPMI);
-
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_PIX);
-       reg &= ~BM_CLKCTRL_PIX_DIV_FRAC_EN;
-       __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_PIX);
-
-       /*
-        * Set safe hbus clock divider. A divider of 3 ensure that
-        * the Vddd voltage required for the cpu clock is sufficiently
-        * high for the hbus clock.
-        */
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS);
-       reg &= BM_CLKCTRL_HBUS_DIV;
-       reg |= 3 << BP_CLKCTRL_HBUS_DIV;
-       __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS);
-
-       ret = mxs_clkctrl_timeout(HW_CLKCTRL_HBUS, BM_CLKCTRL_HBUS_BUSY);
-
-       /* Gate off cpu clock in WFI for power saving */
-       __raw_writel(BM_CLKCTRL_CPU_INTERRUPT_WAIT,
-                       CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_SET);
-
-       /*
-        * 480 MHz seems too high to be ssp clock source directly,
-        * so set frac to get a 288 MHz ref_io.
-        */
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC);
-       reg &= ~BM_CLKCTRL_FRAC_IOFRAC;
-       reg |= 30 << BP_CLKCTRL_FRAC_IOFRAC;
-       __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC);
-
-       return ret;
-}
-
-int __init mx23_clocks_init(void)
-{
-       clk_misc_init();
-
-       /*
-        * source ssp clock from ref_io than ref_xtal,
-        * as ref_xtal only provides 24 MHz as maximum.
-        */
-       clk_set_parent(&ssp_clk, &ref_io_clk);
-
-       clk_prepare_enable(&cpu_clk);
-       clk_prepare_enable(&hbus_clk);
-       clk_prepare_enable(&xbus_clk);
-       clk_prepare_enable(&emi_clk);
-       clk_prepare_enable(&uart_clk);
-
-       clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
-       mxs_timer_init(&clk32k_clk, MX23_INT_TIMER0);
-
-       return 0;
-}
diff --git a/arch/arm/mach-mxs/clock-mx28.c b/arch/arm/mach-mxs/clock-mx28.c
deleted file mode 100644 (file)
index cea29c9..0000000
+++ /dev/null
@@ -1,803 +0,0 @@
-/*
- * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/clkdev.h>
-#include <linux/spinlock.h>
-
-#include <asm/clkdev.h>
-#include <asm/div64.h>
-
-#include <mach/mx28.h>
-#include <mach/common.h>
-#include <mach/clock.h>
-#include <mach/digctl.h>
-
-#include "regs-clkctrl-mx28.h"
-
-#define CLKCTRL_BASE_ADDR      MX28_IO_ADDRESS(MX28_CLKCTRL_BASE_ADDR)
-#define DIGCTRL_BASE_ADDR      MX28_IO_ADDRESS(MX28_DIGCTL_BASE_ADDR)
-
-#define PARENT_RATE_SHIFT      8
-
-static struct clk pll2_clk;
-static struct clk cpu_clk;
-static struct clk emi_clk;
-static struct clk saif0_clk;
-static struct clk saif1_clk;
-static struct clk clk32k_clk;
-static DEFINE_SPINLOCK(clkmux_lock);
-
-/*
- * HW_SAIF_CLKMUX_SEL:
- *  DIRECT(0x0): SAIF0 clock pins selected for SAIF0 input clocks, and SAIF1
- *             clock pins selected for SAIF1 input clocks.
- *  CROSSINPUT(0x1): SAIF1 clock inputs selected for SAIF0 input clocks, and
- *             SAIF0 clock inputs selected for SAIF1 input clocks.
- *  EXTMSTR0(0x2): SAIF0 clock pin selected for both SAIF0 and SAIF1 input
- *             clocks.
- *  EXTMSTR1(0x3): SAIF1 clock pin selected for both SAIF0 and SAIF1 input
- *             clocks.
- */
-int mxs_saif_clkmux_select(unsigned int clkmux)
-{
-       if (clkmux > 0x3)
-               return -EINVAL;
-
-       spin_lock(&clkmux_lock);
-       __raw_writel(BM_DIGCTL_CTRL_SAIF_CLKMUX,
-                       DIGCTRL_BASE_ADDR + HW_DIGCTL_CTRL + MXS_CLR_ADDR);
-       __raw_writel(clkmux << BP_DIGCTL_CTRL_SAIF_CLKMUX,
-                       DIGCTRL_BASE_ADDR + HW_DIGCTL_CTRL + MXS_SET_ADDR);
-       spin_unlock(&clkmux_lock);
-
-       return 0;
-}
-
-static int _raw_clk_enable(struct clk *clk)
-{
-       u32 reg;
-
-       if (clk->enable_reg) {
-               reg = __raw_readl(clk->enable_reg);
-               reg &= ~(1 << clk->enable_shift);
-               __raw_writel(reg, clk->enable_reg);
-       }
-
-       return 0;
-}
-
-static void _raw_clk_disable(struct clk *clk)
-{
-       u32 reg;
-
-       if (clk->enable_reg) {
-               reg = __raw_readl(clk->enable_reg);
-               reg |= 1 << clk->enable_shift;
-               __raw_writel(reg, clk->enable_reg);
-       }
-}
-
-/*
- * ref_xtal_clk
- */
-static unsigned long ref_xtal_clk_get_rate(struct clk *clk)
-{
-       return 24000000;
-}
-
-static struct clk ref_xtal_clk = {
-       .get_rate = ref_xtal_clk_get_rate,
-};
-
-/*
- * pll_clk
- */
-static unsigned long pll0_clk_get_rate(struct clk *clk)
-{
-       return 480000000;
-}
-
-static unsigned long pll1_clk_get_rate(struct clk *clk)
-{
-       return 480000000;
-}
-
-static unsigned long pll2_clk_get_rate(struct clk *clk)
-{
-       return 50000000;
-}
-
-#define _CLK_ENABLE_PLL(name, r, g)                                    \
-static int name##_enable(struct clk *clk)                              \
-{                                                                      \
-       __raw_writel(BM_CLKCTRL_##r##CTRL0_POWER,                       \
-                    CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_SET);    \
-       udelay(10);                                                     \
-                                                                       \
-       if (clk == &pll2_clk)                                           \
-               __raw_writel(BM_CLKCTRL_##r##CTRL0_##g,                 \
-                       CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_CLR); \
-       else                                                            \
-               __raw_writel(BM_CLKCTRL_##r##CTRL0_##g,                 \
-                       CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_SET); \
-                                                                       \
-       return 0;                                                       \
-}
-
-_CLK_ENABLE_PLL(pll0_clk, PLL0, EN_USB_CLKS)
-_CLK_ENABLE_PLL(pll1_clk, PLL1, EN_USB_CLKS)
-_CLK_ENABLE_PLL(pll2_clk, PLL2, CLKGATE)
-
-#define _CLK_DISABLE_PLL(name, r, g)                                   \
-static void name##_disable(struct clk *clk)                            \
-{                                                                      \
-       __raw_writel(BM_CLKCTRL_##r##CTRL0_POWER,                       \
-                    CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_CLR);    \
-                                                                       \
-       if (clk == &pll2_clk)                                           \
-               __raw_writel(BM_CLKCTRL_##r##CTRL0_##g,                 \
-                       CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_SET); \
-       else                                                            \
-               __raw_writel(BM_CLKCTRL_##r##CTRL0_##g,                 \
-                       CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_CLR); \
-                                                                       \
-}
-
-_CLK_DISABLE_PLL(pll0_clk, PLL0, EN_USB_CLKS)
-_CLK_DISABLE_PLL(pll1_clk, PLL1, EN_USB_CLKS)
-_CLK_DISABLE_PLL(pll2_clk, PLL2, CLKGATE)
-
-#define _DEFINE_CLOCK_PLL(name)                                                \
-       static struct clk name = {                                      \
-               .get_rate       = name##_get_rate,                      \
-               .enable         = name##_enable,                        \
-               .disable        = name##_disable,                       \
-               .parent         = &ref_xtal_clk,                        \
-       }
-
-_DEFINE_CLOCK_PLL(pll0_clk);
-_DEFINE_CLOCK_PLL(pll1_clk);
-_DEFINE_CLOCK_PLL(pll2_clk);
-
-/*
- * ref_clk
- */
-#define _CLK_GET_RATE_REF(name, sr, ss)                                        \
-static unsigned long name##_get_rate(struct clk *clk)                  \
-{                                                                      \
-       unsigned long parent_rate;                                      \
-       u32 reg, div;                                                   \
-                                                                       \
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##sr);         \
-       div = (reg >> BP_CLKCTRL_##sr##_##ss##FRAC) & 0x3f;             \
-       parent_rate = clk_get_rate(clk->parent);                        \
-                                                                       \
-       return SH_DIV((parent_rate >> PARENT_RATE_SHIFT) * 18,          \
-                       div, PARENT_RATE_SHIFT);                        \
-}
-
-_CLK_GET_RATE_REF(ref_cpu_clk, FRAC0, CPU)
-_CLK_GET_RATE_REF(ref_emi_clk, FRAC0, EMI)
-_CLK_GET_RATE_REF(ref_io0_clk, FRAC0, IO0)
-_CLK_GET_RATE_REF(ref_io1_clk, FRAC0, IO1)
-_CLK_GET_RATE_REF(ref_pix_clk, FRAC1, PIX)
-_CLK_GET_RATE_REF(ref_gpmi_clk, FRAC1, GPMI)
-
-#define _DEFINE_CLOCK_REF(name, er, es)                                        \
-       static struct clk name = {                                      \
-               .enable_reg     = CLKCTRL_BASE_ADDR + HW_CLKCTRL_##er,  \
-               .enable_shift   = BP_CLKCTRL_##er##_CLKGATE##es,        \
-               .get_rate       = name##_get_rate,                      \
-               .enable         = _raw_clk_enable,                      \
-               .disable        = _raw_clk_disable,                     \
-               .parent         = &pll0_clk,                            \
-       }
-
-_DEFINE_CLOCK_REF(ref_cpu_clk, FRAC0, CPU);
-_DEFINE_CLOCK_REF(ref_emi_clk, FRAC0, EMI);
-_DEFINE_CLOCK_REF(ref_io0_clk, FRAC0, IO0);
-_DEFINE_CLOCK_REF(ref_io1_clk, FRAC0, IO1);
-_DEFINE_CLOCK_REF(ref_pix_clk, FRAC1, PIX);
-_DEFINE_CLOCK_REF(ref_gpmi_clk, FRAC1, GPMI);
-
-/*
- * General clocks
- *
- * clk_get_rate
- */
-static unsigned long lradc_clk_get_rate(struct clk *clk)
-{
-       return clk_get_rate(clk->parent) / 16;
-}
-
-static unsigned long rtc_clk_get_rate(struct clk *clk)
-{
-       /* ref_xtal_clk is implemented as the only parent */
-       return clk_get_rate(clk->parent) / 768;
-}
-
-static unsigned long clk32k_clk_get_rate(struct clk *clk)
-{
-       return clk->parent->get_rate(clk->parent) / 750;
-}
-
-static unsigned long spdif_clk_get_rate(struct clk *clk)
-{
-       return clk_get_rate(clk->parent) / 4;
-}
-
-#define _CLK_GET_RATE(name, rs)                                                \
-static unsigned long name##_get_rate(struct clk *clk)                  \
-{                                                                      \
-       u32 reg, div;                                                   \
-                                                                       \
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs);         \
-                                                                       \
-       if (clk->parent == &ref_xtal_clk)                               \
-               div = (reg & BM_CLKCTRL_##rs##_DIV_XTAL) >>             \
-                       BP_CLKCTRL_##rs##_DIV_XTAL;                     \
-       else                                                            \
-               div = (reg & BM_CLKCTRL_##rs##_DIV_##rs) >>             \
-                       BP_CLKCTRL_##rs##_DIV_##rs;                     \
-                                                                       \
-       if (!div)                                                       \
-               return -EINVAL;                                         \
-                                                                       \
-       return clk_get_rate(clk->parent) / div;                         \
-}
-
-_CLK_GET_RATE(cpu_clk, CPU)
-_CLK_GET_RATE(emi_clk, EMI)
-
-#define _CLK_GET_RATE1(name, rs)                                       \
-static unsigned long name##_get_rate(struct clk *clk)                  \
-{                                                                      \
-       u32 reg, div;                                                   \
-                                                                       \
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs);         \
-       div = (reg & BM_CLKCTRL_##rs##_DIV) >> BP_CLKCTRL_##rs##_DIV;   \
-                                                                       \
-       if (!div)                                                       \
-               return -EINVAL;                                         \
-                                                                       \
-       if (clk == &saif0_clk || clk == &saif1_clk)                     \
-               return clk_get_rate(clk->parent) >> 16 * div;           \
-       else                                                            \
-               return clk_get_rate(clk->parent) / div;                 \
-}
-
-_CLK_GET_RATE1(hbus_clk, HBUS)
-_CLK_GET_RATE1(xbus_clk, XBUS)
-_CLK_GET_RATE1(ssp0_clk, SSP0)
-_CLK_GET_RATE1(ssp1_clk, SSP1)
-_CLK_GET_RATE1(ssp2_clk, SSP2)
-_CLK_GET_RATE1(ssp3_clk, SSP3)
-_CLK_GET_RATE1(gpmi_clk, GPMI)
-_CLK_GET_RATE1(lcdif_clk, DIS_LCDIF)
-_CLK_GET_RATE1(saif0_clk, SAIF0)
-_CLK_GET_RATE1(saif1_clk, SAIF1)
-
-#define _CLK_GET_RATE_STUB(name)                                       \
-static unsigned long name##_get_rate(struct clk *clk)                  \
-{                                                                      \
-       return clk_get_rate(clk->parent);                               \
-}
-
-_CLK_GET_RATE_STUB(uart_clk)
-_CLK_GET_RATE_STUB(pwm_clk)
-_CLK_GET_RATE_STUB(can0_clk)
-_CLK_GET_RATE_STUB(can1_clk)
-_CLK_GET_RATE_STUB(fec_clk)
-
-/*
- * clk_set_rate
- */
-/* fool compiler */
-#define BM_CLKCTRL_CPU_DIV     0
-#define BP_CLKCTRL_CPU_DIV     0
-#define BM_CLKCTRL_CPU_BUSY    0
-
-#define _CLK_SET_RATE(name, dr, fr, fs)                                        \
-static int name##_set_rate(struct clk *clk, unsigned long rate)                \
-{                                                                      \
-       u32 reg, bm_busy, div_max, d, f, div, frac;                     \
-       unsigned long diff, parent_rate, calc_rate;                     \
-                                                                       \
-       div_max = BM_CLKCTRL_##dr##_DIV >> BP_CLKCTRL_##dr##_DIV;       \
-       bm_busy = BM_CLKCTRL_##dr##_BUSY;                               \
-                                                                       \
-       if (clk->parent == &ref_xtal_clk) {                             \
-               parent_rate = clk_get_rate(clk->parent);                \
-               div = DIV_ROUND_UP(parent_rate, rate);                  \
-               if (clk == &cpu_clk) {                                  \
-                       div_max = BM_CLKCTRL_CPU_DIV_XTAL >>            \
-                               BP_CLKCTRL_CPU_DIV_XTAL;                \
-                       bm_busy = BM_CLKCTRL_CPU_BUSY_REF_XTAL;         \
-               }                                                       \
-               if (div == 0 || div > div_max)                          \
-                       return -EINVAL;                                 \
-       } else {                                                        \
-               /*                                                      \
-                * hack alert: this block modifies clk->parent, too,    \
-                * so the base to use it the grand parent.              \
-                */                                                     \
-               parent_rate = clk_get_rate(clk->parent->parent);        \
-               rate >>= PARENT_RATE_SHIFT;                             \
-               parent_rate >>= PARENT_RATE_SHIFT;                      \
-               diff = parent_rate;                                     \
-               div = frac = 1;                                         \
-               if (clk == &cpu_clk) {                                  \
-                       div_max = BM_CLKCTRL_CPU_DIV_CPU >>             \
-                               BP_CLKCTRL_CPU_DIV_CPU;                 \
-                       bm_busy = BM_CLKCTRL_CPU_BUSY_REF_CPU;          \
-               }                                                       \
-               for (d = 1; d <= div_max; d++) {                        \
-                       f = parent_rate * 18 / d / rate;                \
-                       if ((parent_rate * 18 / d) % rate)              \
-                               f++;                                    \
-                       if (f < 18 || f > 35)                           \
-                               continue;                               \
-                                                                       \
-                       calc_rate = parent_rate * 18 / f / d;           \
-                       if (calc_rate > rate)                           \
-                               continue;                               \
-                                                                       \
-                       if (rate - calc_rate < diff) {                  \
-                               frac = f;                               \
-                               div = d;                                \
-                               diff = rate - calc_rate;                \
-                       }                                               \
-                                                                       \
-                       if (diff == 0)                                  \
-                               break;                                  \
-               }                                                       \
-                                                                       \
-               if (diff == parent_rate)                                \
-                       return -EINVAL;                                 \
-                                                                       \
-               reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##fr); \
-               reg &= ~BM_CLKCTRL_##fr##_##fs##FRAC;                   \
-               reg |= frac << BP_CLKCTRL_##fr##_##fs##FRAC;            \
-               __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##fr); \
-       }                                                               \
-                                                                       \
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr);         \
-       if (clk == &cpu_clk) {                                          \
-               reg &= ~BM_CLKCTRL_CPU_DIV_CPU;                         \
-               reg |= div << BP_CLKCTRL_CPU_DIV_CPU;                   \
-       } else {                                                        \
-               reg &= ~BM_CLKCTRL_##dr##_DIV;                          \
-               reg |= div << BP_CLKCTRL_##dr##_DIV;                    \
-               if (reg & (1 << clk->enable_shift)) {                   \
-                       pr_err("%s: clock is gated\n", __func__);       \
-                       return -EINVAL;                                 \
-               }                                                       \
-       }                                                               \
-       __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr);         \
-                                                                       \
-       return mxs_clkctrl_timeout(HW_CLKCTRL_##dr, bm_busy);           \
-}
-
-_CLK_SET_RATE(cpu_clk, CPU, FRAC0, CPU)
-_CLK_SET_RATE(ssp0_clk, SSP0, FRAC0, IO0)
-_CLK_SET_RATE(ssp1_clk, SSP1, FRAC0, IO0)
-_CLK_SET_RATE(ssp2_clk, SSP2, FRAC0, IO1)
-_CLK_SET_RATE(ssp3_clk, SSP3, FRAC0, IO1)
-_CLK_SET_RATE(lcdif_clk, DIS_LCDIF, FRAC1, PIX)
-_CLK_SET_RATE(gpmi_clk, GPMI, FRAC1, GPMI)
-
-#define _CLK_SET_RATE1(name, dr)                                       \
-static int name##_set_rate(struct clk *clk, unsigned long rate)                \
-{                                                                      \
-       u32 reg, div_max, div;                                          \
-       unsigned long parent_rate;                                      \
-                                                                       \
-       parent_rate = clk_get_rate(clk->parent);                        \
-       div_max = BM_CLKCTRL_##dr##_DIV >> BP_CLKCTRL_##dr##_DIV;       \
-                                                                       \
-       div = DIV_ROUND_UP(parent_rate, rate);                          \
-       if (div == 0 || div > div_max)                                  \
-               return -EINVAL;                                         \
-                                                                       \
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr);         \
-       reg &= ~BM_CLKCTRL_##dr##_DIV;                                  \
-       reg |= div << BP_CLKCTRL_##dr##_DIV;                            \
-       if (reg & (1 << clk->enable_shift)) {                           \
-               pr_err("%s: clock is gated\n", __func__);               \
-               return -EINVAL;                                         \
-       }                                                               \
-       __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr);         \
-                                                                       \
-       return mxs_clkctrl_timeout(HW_CLKCTRL_##dr, BM_CLKCTRL_##dr##_BUSY);\
-}
-
-_CLK_SET_RATE1(xbus_clk, XBUS)
-
-/* saif clock uses 16 bits frac div */
-#define _CLK_SET_RATE_SAIF(name, rs)                                   \
-static int name##_set_rate(struct clk *clk, unsigned long rate)                \
-{                                                                      \
-       u16 div;                                                        \
-       u32 reg;                                                        \
-       u64 lrate;                                                      \
-       unsigned long parent_rate;                                      \
-                                                                       \
-       parent_rate = clk_get_rate(clk->parent);                        \
-       if (rate > parent_rate)                                         \
-               return -EINVAL;                                         \
-                                                                       \
-       lrate = (u64)rate << 16;                                        \
-       do_div(lrate, parent_rate);                                     \
-       div = (u16)lrate;                                               \
-                                                                       \
-       if (!div)                                                       \
-               return -EINVAL;                                         \
-                                                                       \
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs);         \
-       reg &= ~BM_CLKCTRL_##rs##_DIV;                                  \
-       reg |= div << BP_CLKCTRL_##rs##_DIV;                            \
-       if (reg & (1 << clk->enable_shift)) {                           \
-               pr_err("%s: clock is gated\n", __func__);               \
-               return -EINVAL;                                         \
-       }                                                               \
-       __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs);         \
-                                                                       \
-       return mxs_clkctrl_timeout(HW_CLKCTRL_##rs, BM_CLKCTRL_##rs##_BUSY);\
-}
-
-_CLK_SET_RATE_SAIF(saif0_clk, SAIF0)
-_CLK_SET_RATE_SAIF(saif1_clk, SAIF1)
-
-#define _CLK_SET_RATE_STUB(name)                                       \
-static int name##_set_rate(struct clk *clk, unsigned long rate)                \
-{                                                                      \
-       return -EINVAL;                                                 \
-}
-
-_CLK_SET_RATE_STUB(emi_clk)
-_CLK_SET_RATE_STUB(uart_clk)
-_CLK_SET_RATE_STUB(pwm_clk)
-_CLK_SET_RATE_STUB(spdif_clk)
-_CLK_SET_RATE_STUB(clk32k_clk)
-_CLK_SET_RATE_STUB(can0_clk)
-_CLK_SET_RATE_STUB(can1_clk)
-_CLK_SET_RATE_STUB(fec_clk)
-
-/*
- * clk_set_parent
- */
-#define _CLK_SET_PARENT(name, bit)                                     \
-static int name##_set_parent(struct clk *clk, struct clk *parent)      \
-{                                                                      \
-       if (parent != clk->parent) {                                    \
-               __raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit,            \
-                        CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ_TOG);    \
-               clk->parent = parent;                                   \
-       }                                                               \
-                                                                       \
-       return 0;                                                       \
-}
-
-_CLK_SET_PARENT(cpu_clk, CPU)
-_CLK_SET_PARENT(emi_clk, EMI)
-_CLK_SET_PARENT(ssp0_clk, SSP0)
-_CLK_SET_PARENT(ssp1_clk, SSP1)
-_CLK_SET_PARENT(ssp2_clk, SSP2)
-_CLK_SET_PARENT(ssp3_clk, SSP3)
-_CLK_SET_PARENT(lcdif_clk, DIS_LCDIF)
-_CLK_SET_PARENT(gpmi_clk, GPMI)
-_CLK_SET_PARENT(saif0_clk, SAIF0)
-_CLK_SET_PARENT(saif1_clk, SAIF1)
-
-#define _CLK_SET_PARENT_STUB(name)                                     \
-static int name##_set_parent(struct clk *clk, struct clk *parent)      \
-{                                                                      \
-       if (parent != clk->parent)                                      \
-               return -EINVAL;                                         \
-       else                                                            \
-               return 0;                                               \
-}
-
-_CLK_SET_PARENT_STUB(pwm_clk)
-_CLK_SET_PARENT_STUB(uart_clk)
-_CLK_SET_PARENT_STUB(clk32k_clk)
-_CLK_SET_PARENT_STUB(spdif_clk)
-_CLK_SET_PARENT_STUB(fec_clk)
-_CLK_SET_PARENT_STUB(can0_clk)
-_CLK_SET_PARENT_STUB(can1_clk)
-
-/*
- * clk definition
- */
-static struct clk cpu_clk = {
-       .get_rate = cpu_clk_get_rate,
-       .set_rate = cpu_clk_set_rate,
-       .set_parent = cpu_clk_set_parent,
-       .parent = &ref_cpu_clk,
-};
-
-static struct clk hbus_clk = {
-       .get_rate = hbus_clk_get_rate,
-       .parent = &cpu_clk,
-};
-
-static struct clk xbus_clk = {
-       .get_rate = xbus_clk_get_rate,
-       .set_rate = xbus_clk_set_rate,
-       .parent = &ref_xtal_clk,
-};
-
-static struct clk lradc_clk = {
-       .get_rate = lradc_clk_get_rate,
-       .parent = &clk32k_clk,
-};
-
-static struct clk rtc_clk = {
-       .get_rate = rtc_clk_get_rate,
-       .parent = &ref_xtal_clk,
-};
-
-/* usb_clk gate is controlled in DIGCTRL other than CLKCTRL */
-static struct clk usb0_clk = {
-       .enable_reg = DIGCTRL_BASE_ADDR,
-       .enable_shift = 2,
-       .enable = _raw_clk_enable,
-       .disable = _raw_clk_disable,
-       .parent = &pll0_clk,
-};
-
-static struct clk usb1_clk = {
-       .enable_reg = DIGCTRL_BASE_ADDR,
-       .enable_shift = 16,
-       .enable = _raw_clk_enable,
-       .disable = _raw_clk_disable,
-       .parent = &pll1_clk,
-};
-
-#define _DEFINE_CLOCK(name, er, es, p)                                 \
-       static struct clk name = {                                      \
-               .enable_reg     = CLKCTRL_BASE_ADDR + HW_CLKCTRL_##er,  \
-               .enable_shift   = BP_CLKCTRL_##er##_##es,               \
-               .get_rate       = name##_get_rate,                      \
-               .set_rate       = name##_set_rate,                      \
-               .set_parent     = name##_set_parent,                    \
-               .enable         = _raw_clk_enable,                      \
-               .disable        = _raw_clk_disable,                     \
-               .parent         = p,                                    \
-       }
-
-_DEFINE_CLOCK(emi_clk, EMI, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(ssp0_clk, SSP0, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(ssp1_clk, SSP1, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(ssp2_clk, SSP2, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(ssp3_clk, SSP3, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(lcdif_clk, DIS_LCDIF, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(gpmi_clk, GPMI, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(saif0_clk, SAIF0, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(saif1_clk, SAIF1, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(can0_clk, FLEXCAN, STOP_CAN0, &ref_xtal_clk);
-_DEFINE_CLOCK(can1_clk, FLEXCAN, STOP_CAN1, &ref_xtal_clk);
-_DEFINE_CLOCK(pwm_clk, XTAL, PWM_CLK24M_GATE, &ref_xtal_clk);
-_DEFINE_CLOCK(uart_clk, XTAL, UART_CLK_GATE, &ref_xtal_clk);
-_DEFINE_CLOCK(clk32k_clk, XTAL, TIMROT_CLK32K_GATE, &ref_xtal_clk);
-_DEFINE_CLOCK(spdif_clk, SPDIF, CLKGATE, &pll0_clk);
-_DEFINE_CLOCK(fec_clk, ENET, DISABLE, &hbus_clk);
-
-#define _REGISTER_CLOCK(d, n, c) \
-       { \
-               .dev_id = d, \
-               .con_id = n, \
-               .clk = &c, \
-       },
-
-static struct clk_lookup lookups[] = {
-       /* for amba bus driver */
-       _REGISTER_CLOCK("duart", "apb_pclk", xbus_clk)
-       /* for amba-pl011 driver */
-       _REGISTER_CLOCK("duart", NULL, uart_clk)
-       _REGISTER_CLOCK("imx28-fec.0", NULL, fec_clk)
-       _REGISTER_CLOCK("imx28-fec.1", NULL, fec_clk)
-       _REGISTER_CLOCK("imx28-gpmi-nand", NULL, gpmi_clk)
-       _REGISTER_CLOCK("mxs-auart.0", NULL, uart_clk)
-       _REGISTER_CLOCK("mxs-auart.1", NULL, uart_clk)
-       _REGISTER_CLOCK("mxs-auart.2", NULL, uart_clk)
-       _REGISTER_CLOCK("mxs-auart.3", NULL, uart_clk)
-       _REGISTER_CLOCK("mxs-auart.4", NULL, uart_clk)
-       _REGISTER_CLOCK("rtc", NULL, rtc_clk)
-       _REGISTER_CLOCK("pll2", NULL, pll2_clk)
-       _REGISTER_CLOCK("mxs-dma-apbh", NULL, hbus_clk)
-       _REGISTER_CLOCK("mxs-dma-apbx", NULL, xbus_clk)
-       _REGISTER_CLOCK("mxs-mmc.0", NULL, ssp0_clk)
-       _REGISTER_CLOCK("mxs-mmc.1", NULL, ssp1_clk)
-       _REGISTER_CLOCK("mxs-mmc.2", NULL, ssp2_clk)
-       _REGISTER_CLOCK("mxs-mmc.3", NULL, ssp3_clk)
-       _REGISTER_CLOCK("flexcan.0", NULL, can0_clk)
-       _REGISTER_CLOCK("flexcan.1", NULL, can1_clk)
-       _REGISTER_CLOCK(NULL, "usb0", usb0_clk)
-       _REGISTER_CLOCK(NULL, "usb1", usb1_clk)
-       _REGISTER_CLOCK("mxs-pwm.0", NULL, pwm_clk)
-       _REGISTER_CLOCK("mxs-pwm.1", NULL, pwm_clk)
-       _REGISTER_CLOCK("mxs-pwm.2", NULL, pwm_clk)
-       _REGISTER_CLOCK("mxs-pwm.3", NULL, pwm_clk)
-       _REGISTER_CLOCK("mxs-pwm.4", NULL, pwm_clk)
-       _REGISTER_CLOCK("mxs-pwm.5", NULL, pwm_clk)
-       _REGISTER_CLOCK("mxs-pwm.6", NULL, pwm_clk)
-       _REGISTER_CLOCK("mxs-pwm.7", NULL, pwm_clk)
-       _REGISTER_CLOCK(NULL, "lradc", lradc_clk)
-       _REGISTER_CLOCK(NULL, "spdif", spdif_clk)
-       _REGISTER_CLOCK("imx28-fb", NULL, lcdif_clk)
-       _REGISTER_CLOCK("mxs-saif.0", NULL, saif0_clk)
-       _REGISTER_CLOCK("mxs-saif.1", NULL, saif1_clk)
-};
-
-static int clk_misc_init(void)
-{
-       u32 reg;
-       int ret;
-
-       /* Fix up parent per register setting */
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ);
-       cpu_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_CPU) ?
-                       &ref_xtal_clk : &ref_cpu_clk;
-       emi_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_EMI) ?
-                       &ref_xtal_clk : &ref_emi_clk;
-       ssp0_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SSP0) ?
-                       &ref_xtal_clk : &ref_io0_clk;
-       ssp1_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SSP1) ?
-                       &ref_xtal_clk : &ref_io0_clk;
-       ssp2_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SSP2) ?
-                       &ref_xtal_clk : &ref_io1_clk;
-       ssp3_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SSP3) ?
-                       &ref_xtal_clk : &ref_io1_clk;
-       lcdif_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_DIS_LCDIF) ?
-                       &ref_xtal_clk : &ref_pix_clk;
-       gpmi_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_GPMI) ?
-                       &ref_xtal_clk : &ref_gpmi_clk;
-       saif0_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SAIF0) ?
-                       &ref_xtal_clk : &pll0_clk;
-       saif1_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SAIF1) ?
-                       &ref_xtal_clk : &pll0_clk;
-
-       /* Use int div over frac when both are available */
-       __raw_writel(BM_CLKCTRL_CPU_DIV_XTAL_FRAC_EN,
-                       CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_CLR);
-       __raw_writel(BM_CLKCTRL_CPU_DIV_CPU_FRAC_EN,
-                       CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_CLR);
-       __raw_writel(BM_CLKCTRL_HBUS_DIV_FRAC_EN,
-                       CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS_CLR);
-
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_XBUS);
-       reg &= ~BM_CLKCTRL_XBUS_DIV_FRAC_EN;
-       __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_XBUS);
-
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP0);
-       reg &= ~BM_CLKCTRL_SSP0_DIV_FRAC_EN;
-       __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP0);
-
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP1);
-       reg &= ~BM_CLKCTRL_SSP1_DIV_FRAC_EN;
-       __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP1);
-
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP2);
-       reg &= ~BM_CLKCTRL_SSP2_DIV_FRAC_EN;
-       __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP2);
-
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP3);
-       reg &= ~BM_CLKCTRL_SSP3_DIV_FRAC_EN;
-       __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP3);
-
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_GPMI);
-       reg &= ~BM_CLKCTRL_GPMI_DIV_FRAC_EN;
-       __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_GPMI);
-
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_DIS_LCDIF);
-       reg &= ~BM_CLKCTRL_DIS_LCDIF_DIV_FRAC_EN;
-       __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_DIS_LCDIF);
-
-       /* SAIF has to use frac div for functional operation */
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SAIF0);
-       reg |= BM_CLKCTRL_SAIF0_DIV_FRAC_EN;
-       __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SAIF0);
-
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SAIF1);
-       reg |= BM_CLKCTRL_SAIF1_DIV_FRAC_EN;
-       __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SAIF1);
-
-       /*
-        * Set safe hbus clock divider. A divider of 3 ensure that
-        * the Vddd voltage required for the cpu clock is sufficiently
-        * high for the hbus clock.
-        */
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS);
-       reg &= BM_CLKCTRL_HBUS_DIV;
-       reg |= 3 << BP_CLKCTRL_HBUS_DIV;
-       __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS);
-
-       ret = mxs_clkctrl_timeout(HW_CLKCTRL_HBUS, BM_CLKCTRL_HBUS_ASM_BUSY);
-
-       /* Gate off cpu clock in WFI for power saving */
-       __raw_writel(BM_CLKCTRL_CPU_INTERRUPT_WAIT,
-                       CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_SET);
-
-       /*
-        * Extra fec clock setting
-        * The DENX M28 uses an external clock source
-        * and the clock output must not be enabled
-        */
-       if (!machine_is_m28evk()) {
-               reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_ENET);
-               reg &= ~BM_CLKCTRL_ENET_SLEEP;
-               reg |= BM_CLKCTRL_ENET_CLK_OUT_EN;
-               __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_ENET);
-       }
-
-       /*
-        * 480 MHz seems too high to be ssp clock source directly,
-        * so set frac0 to get a 288 MHz ref_io0.
-        */
-       reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC0);
-       reg &= ~BM_CLKCTRL_FRAC0_IO0FRAC;
-       reg |= 30 << BP_CLKCTRL_FRAC0_IO0FRAC;
-       __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC0);
-
-       return ret;
-}
-
-int __init mx28_clocks_init(void)
-{
-       clk_misc_init();
-
-       /*
-        * source ssp clock from ref_io0 than ref_xtal,
-        * as ref_xtal only provides 24 MHz as maximum.
-        */
-       clk_set_parent(&ssp0_clk, &ref_io0_clk);
-       clk_set_parent(&ssp1_clk, &ref_io0_clk);
-       clk_set_parent(&ssp2_clk, &ref_io1_clk);
-       clk_set_parent(&ssp3_clk, &ref_io1_clk);
-
-       clk_prepare_enable(&cpu_clk);
-       clk_prepare_enable(&hbus_clk);
-       clk_prepare_enable(&xbus_clk);
-       clk_prepare_enable(&emi_clk);
-       clk_prepare_enable(&uart_clk);
-
-       clk_set_parent(&lcdif_clk, &ref_pix_clk);
-       clk_set_parent(&saif0_clk, &pll0_clk);
-       clk_set_parent(&saif1_clk, &pll0_clk);
-
-       /*
-        * Set an initial clock rate for the saif internal logic to work
-        * properly. This is important when working in EXTMASTER mode that
-        * uses the other saif's BITCLK&LRCLK but it still needs a basic
-        * clock which should be fast enough for the internal logic.
-        */
-       clk_set_rate(&saif0_clk, 24000000);
-       clk_set_rate(&saif1_clk, 24000000);
-
-       clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
-       mxs_timer_init(&clk32k_clk, MX28_INT_TIMER0);
-
-       return 0;
-}
diff --git a/arch/arm/mach-mxs/clock.c b/arch/arm/mach-mxs/clock.c
deleted file mode 100644 (file)
index 97a6f4a..0000000
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * Based on arch/arm/plat-omap/clock.c
- *
- * Copyright (C) 2004 - 2005 Nokia corporation
- * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
- * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
- * Copyright 2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA  02110-1301, USA.
- */
-
-/* #define DEBUG */
-
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/platform_device.h>
-#include <linux/proc_fs.h>
-#include <linux/semaphore.h>
-#include <linux/string.h>
-
-#include <mach/clock.h>
-
-static LIST_HEAD(clocks);
-static DEFINE_MUTEX(clocks_mutex);
-
-/*-------------------------------------------------------------------------
- * Standard clock functions defined in include/linux/clk.h
- *-------------------------------------------------------------------------*/
-
-static void __clk_disable(struct clk *clk)
-{
-       if (clk == NULL || IS_ERR(clk))
-               return;
-       WARN_ON(!clk->usecount);
-
-       if (!(--clk->usecount)) {
-               if (clk->disable)
-                       clk->disable(clk);
-               __clk_disable(clk->parent);
-       }
-}
-
-static int __clk_enable(struct clk *clk)
-{
-       if (clk == NULL || IS_ERR(clk))
-               return -EINVAL;
-
-       if (clk->usecount++ == 0) {
-               __clk_enable(clk->parent);
-
-               if (clk->enable)
-                       clk->enable(clk);
-       }
-       return 0;
-}
-
-/*
- * The clk_enable/clk_disable could be called by drivers in atomic context,
- * so they should not really hold mutex.  Instead, clk_prepare/clk_unprepare
- * can hold a mutex, as the pair will only be called in non-atomic context.
- * Before migrating to common clk framework, we can have __clk_enable and
- * __clk_disable called in clk_prepare/clk_unprepare with mutex held and
- * leave clk_enable/clk_disable as the dummy functions.
- */
-int clk_prepare(struct clk *clk)
-{
-       int ret = 0;
-
-       if (clk == NULL || IS_ERR(clk))
-               return -EINVAL;
-
-       mutex_lock(&clocks_mutex);
-       ret = __clk_enable(clk);
-       mutex_unlock(&clocks_mutex);
-
-       return ret;
-}
-EXPORT_SYMBOL(clk_prepare);
-
-void clk_unprepare(struct clk *clk)
-{
-       if (clk == NULL || IS_ERR(clk))
-               return;
-
-       mutex_lock(&clocks_mutex);
-       __clk_disable(clk);
-       mutex_unlock(&clocks_mutex);
-}
-EXPORT_SYMBOL(clk_unprepare);
-
-int clk_enable(struct clk *clk)
-{
-       return 0;
-}
-EXPORT_SYMBOL(clk_enable);
-
-void clk_disable(struct clk *clk)
-{
-       /* nothing to do */
-}
-EXPORT_SYMBOL(clk_disable);
-
-/* Retrieve the *current* clock rate. If the clock itself
- * does not provide a special calculation routine, ask
- * its parent and so on, until one is able to return
- * a valid clock rate
- */
-unsigned long clk_get_rate(struct clk *clk)
-{
-       if (clk == NULL || IS_ERR(clk))
-               return 0UL;
-
-       if (clk->get_rate)
-               return clk->get_rate(clk);
-
-       return clk_get_rate(clk->parent);
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-/* Round the requested clock rate to the nearest supported
- * rate that is less than or equal to the requested rate.
- * This is dependent on the clock's current parent.
- */
-long clk_round_rate(struct clk *clk, unsigned long rate)
-{
-       if (clk == NULL || IS_ERR(clk) || !clk->round_rate)
-               return 0;
-
-       return clk->round_rate(clk, rate);
-}
-EXPORT_SYMBOL(clk_round_rate);
-
-/* Set the clock to the requested clock rate. The rate must
- * match a supported rate exactly based on what clk_round_rate returns
- */
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
-       int ret = -EINVAL;
-
-       if (clk == NULL || IS_ERR(clk) || clk->set_rate == NULL || rate == 0)
-               return ret;
-
-       mutex_lock(&clocks_mutex);
-       ret = clk->set_rate(clk, rate);
-       mutex_unlock(&clocks_mutex);
-
-       return ret;
-}
-EXPORT_SYMBOL(clk_set_rate);
-
-/* Set the clock's parent to another clock source */
-int clk_set_parent(struct clk *clk, struct clk *parent)
-{
-       int ret = -EINVAL;
-       struct clk *old;
-
-       if (clk == NULL || IS_ERR(clk) || parent == NULL ||
-           IS_ERR(parent) || clk->set_parent == NULL)
-               return ret;
-
-       if (clk->usecount)
-               clk_prepare_enable(parent);
-
-       mutex_lock(&clocks_mutex);
-       ret = clk->set_parent(clk, parent);
-       if (ret == 0) {
-               old = clk->parent;
-               clk->parent = parent;
-       } else {
-               old = parent;
-       }
-       mutex_unlock(&clocks_mutex);
-
-       if (clk->usecount)
-               clk_disable(old);
-
-       return ret;
-}
-EXPORT_SYMBOL(clk_set_parent);
-
-/* Retrieve the clock's parent clock source */
-struct clk *clk_get_parent(struct clk *clk)
-{
-       struct clk *ret = NULL;
-
-       if (clk == NULL || IS_ERR(clk))
-               return ret;
-
-       return clk->parent;
-}
-EXPORT_SYMBOL(clk_get_parent);
index b8913df4cfa209402affd945457517f1b1ef8677..19659de1c4e86c131958d50a73c0c7e11c3f5f9e 100644 (file)
@@ -1,6 +1,5 @@
 config MXS_HAVE_AMBA_DUART
        bool
-       select ARM_AMBA
 
 config MXS_HAVE_PLATFORM_AUART
        bool
index 6a0202b1016c7b287ccc04ea1d7e9e7c90116ee9..46824501de003505a6f7332ac819a65d67a5549e 100644 (file)
@@ -14,7 +14,7 @@
 #include <mach/mx28.h>
 #include <mach/devices-common.h>
 
-static struct platform_device *__init mxs_add_dma(const char *devid,
+struct platform_device *__init mxs_add_dma(const char *devid,
                                                resource_size_t base)
 {
        struct resource res[] = {
@@ -29,22 +29,3 @@ static struct platform_device *__init mxs_add_dma(const char *devid,
                                res, ARRAY_SIZE(res), NULL, 0,
                                DMA_BIT_MASK(32));
 }
-
-static int __init mxs_add_mxs_dma(void)
-{
-       char *apbh = "mxs-dma-apbh";
-       char *apbx = "mxs-dma-apbx";
-
-       if (cpu_is_mx23()) {
-               mxs_add_dma(apbh, MX23_APBH_DMA_BASE_ADDR);
-               mxs_add_dma(apbx, MX23_APBX_DMA_BASE_ADDR);
-       }
-
-       if (cpu_is_mx28()) {
-               mxs_add_dma(apbh, MX28_APBH_DMA_BASE_ADDR);
-               mxs_add_dma(apbx, MX28_APBX_DMA_BASE_ADDR);
-       }
-
-       return 0;
-}
-arch_initcall(mxs_add_mxs_dma);
index ed0885e414e09eb12b3f8f71fb50a9e4521bd6ed..cd99f19ec637ac3a6d61065189aa1d294076eccd 100644 (file)
@@ -14,7 +14,7 @@
 #include <mach/devices-common.h>
 
 struct platform_device *__init mxs_add_gpio(
-       int id, resource_size_t iobase, int irq)
+       char *name, int id, resource_size_t iobase, int irq)
 {
        struct resource res[] = {
                {
@@ -29,25 +29,5 @@ struct platform_device *__init mxs_add_gpio(
        };
 
        return platform_device_register_resndata(&mxs_apbh_bus,
-                       "gpio-mxs", id, res, ARRAY_SIZE(res), NULL, 0);
+                       name, id, res, ARRAY_SIZE(res), NULL, 0);
 }
-
-static int __init mxs_add_mxs_gpio(void)
-{
-       if (cpu_is_mx23()) {
-               mxs_add_gpio(0, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO0);
-               mxs_add_gpio(1, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO1);
-               mxs_add_gpio(2, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO2);
-       }
-
-       if (cpu_is_mx28()) {
-               mxs_add_gpio(0, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO0);
-               mxs_add_gpio(1, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO1);
-               mxs_add_gpio(2, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO2);
-               mxs_add_gpio(3, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO3);
-               mxs_add_gpio(4, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO4);
-       }
-
-       return 0;
-}
-postcore_initcall(mxs_add_mxs_gpio);
index bef9d923f54e89e9288366ee2f54fa85c688d7d3..b33c9d05c552de1a3f9bc8a6fde0dc9ca7122802 100644 (file)
@@ -17,8 +17,9 @@
 #include <mach/mx28.h>
 #include <mach/devices-common.h>
 
-#define mxs_mxs_mmc_data_entry_single(soc, _id, hwid)                  \
+#define mxs_mxs_mmc_data_entry_single(soc, _devid, _id, hwid)          \
        {                                                               \
+               .devid = _devid,                                        \
                .id = _id,                                              \
                .iobase = soc ## _SSP ## hwid ## _BASE_ADDR,            \
                .dma = soc ## _DMA_SSP ## hwid,                         \
                .irq_dma = soc ## _INT_SSP ## hwid ## _DMA,             \
        }
 
-#define mxs_mxs_mmc_data_entry(soc, _id, hwid)                         \
-       [_id] = mxs_mxs_mmc_data_entry_single(soc, _id, hwid)
+#define mxs_mxs_mmc_data_entry(soc, _devid, _id, hwid)                 \
+       [_id] = mxs_mxs_mmc_data_entry_single(soc, _devid, _id, hwid)
 
 
 #ifdef CONFIG_SOC_IMX23
 const struct mxs_mxs_mmc_data mx23_mxs_mmc_data[] __initconst = {
-       mxs_mxs_mmc_data_entry(MX23, 0, 1),
-       mxs_mxs_mmc_data_entry(MX23, 1, 2),
+       mxs_mxs_mmc_data_entry(MX23, "imx23-mmc", 0, 1),
+       mxs_mxs_mmc_data_entry(MX23, "imx23-mmc", 1, 2),
 };
 #endif
 
 #ifdef CONFIG_SOC_IMX28
 const struct mxs_mxs_mmc_data mx28_mxs_mmc_data[] __initconst = {
-       mxs_mxs_mmc_data_entry(MX28, 0, 0),
-       mxs_mxs_mmc_data_entry(MX28, 1, 1),
-       mxs_mxs_mmc_data_entry(MX28, 2, 2),
-       mxs_mxs_mmc_data_entry(MX28, 3, 3),
+       mxs_mxs_mmc_data_entry(MX28, "imx28-mmc", 0, 0),
+       mxs_mxs_mmc_data_entry(MX28, "imx28-mmc", 1, 1),
+       mxs_mxs_mmc_data_entry(MX28, "imx28-mmc", 2, 2),
+       mxs_mxs_mmc_data_entry(MX28, "imx28-mmc", 3, 3),
 };
 #endif
 
@@ -70,6 +71,6 @@ struct platform_device *__init mxs_add_mxs_mmc(
                },
        };
 
-       return mxs_add_platform_device("mxs-mmc", data->id,
+       return mxs_add_platform_device(data->devid, data->id,
                        res, ARRAY_SIZE(res), pdata, sizeof(*pdata));
 }
diff --git a/arch/arm/mach-mxs/include/mach/clock.h b/arch/arm/mach-mxs/include/mach/clock.h
deleted file mode 100644 (file)
index 592c9ab..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA  02110-1301, USA.
- */
-
-#ifndef __MACH_MXS_CLOCK_H__
-#define __MACH_MXS_CLOCK_H__
-
-#ifndef __ASSEMBLY__
-#include <linux/list.h>
-
-struct module;
-
-struct clk {
-       int id;
-       /* Source clock this clk depends on */
-       struct clk *parent;
-       /* Reference count of clock enable/disable */
-       __s8 usecount;
-       /* Register bit position for clock's enable/disable control. */
-       u8 enable_shift;
-       /* Register address for clock's enable/disable control. */
-       void __iomem *enable_reg;
-       u32 flags;
-       /* get the current clock rate (always a fresh value) */
-       unsigned long (*get_rate) (struct clk *);
-       /* Function ptr to set the clock to a new rate. The rate must match a
-          supported rate returned from round_rate. Leave blank if clock is not
-          programmable */
-       int (*set_rate) (struct clk *, unsigned long);
-       /* Function ptr to round the requested clock rate to the nearest
-          supported rate that is less than or equal to the requested rate. */
-       unsigned long (*round_rate) (struct clk *, unsigned long);
-       /* Function ptr to enable the clock. Leave blank if clock can not
-          be gated. */
-       int (*enable) (struct clk *);
-       /* Function ptr to disable the clock. Leave blank if clock can not
-          be gated. */
-       void (*disable) (struct clk *);
-       /* Function ptr to set the parent clock of the clock. */
-       int (*set_parent) (struct clk *, struct clk *);
-};
-
-int clk_register(struct clk *clk);
-void clk_unregister(struct clk *clk);
-
-#endif /* __ASSEMBLY__ */
-#endif /* __MACH_MXS_CLOCK_H__ */
index 8d88399b73eff893651d829917bc1ec0bc6e07ba..de6c7ba425444ac2f7488c2a45fed2081887c79e 100644 (file)
 #ifndef __MACH_MXS_COMMON_H__
 #define __MACH_MXS_COMMON_H__
 
-struct clk;
-
 extern const u32 *mxs_get_ocotp(void);
 extern int mxs_reset_block(void __iomem *);
-extern void mxs_timer_init(struct clk *, int);
+extern void mxs_timer_init(int);
 extern void mxs_restart(char, const char *);
 extern int mxs_saif_clkmux_select(unsigned int clkmux);
 
 extern void mx23_soc_init(void);
-extern int mx23_register_gpios(void);
 extern int mx23_clocks_init(void);
 extern void mx23_map_io(void);
 extern void mx23_init_irq(void);
 
 extern void mx28_soc_init(void);
-extern int mx28_register_gpios(void);
 extern int mx28_clocks_init(void);
 extern void mx28_map_io(void);
 extern void mx28_init_irq(void);
 
 extern void icoll_init_irq(void);
 
-extern int mxs_clkctrl_timeout(unsigned int reg_offset, unsigned int mask);
+extern struct platform_device *mxs_add_dma(const char *devid,
+                                               resource_size_t base);
+extern struct platform_device *mxs_add_gpio(char *name, int id,
+                                           resource_size_t iobase, int irq);
 
 #endif /* __MACH_MXS_COMMON_H__ */
index 21e45a70d344643c72b61296ad71afa75e215492..e8b1d958240b6a6ca757c0e28cd58847135a6383 100644 (file)
@@ -82,8 +82,9 @@ struct platform_device * __init mxs_add_mxs_i2c(
                const struct mxs_mxs_i2c_data *data);
 
 /* mmc */
-#include <mach/mmc.h>
+#include <linux/mmc/mxs-mmc.h>
 struct mxs_mxs_mmc_data {
+       const char *devid;
        int id;
        resource_size_t iobase;
        resource_size_t dma;
diff --git a/arch/arm/mach-mxs/include/mach/mmc.h b/arch/arm/mach-mxs/include/mach/mmc.h
deleted file mode 100644 (file)
index 211547a..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __MACH_MXS_MMC_H__
-#define __MACH_MXS_MMC_H__
-
-struct mxs_mmc_platform_data {
-       int wp_gpio;    /* write protect pin */
-       unsigned int flags;
-#define SLOTF_4_BIT_CAPABLE    (1 << 0)
-#define SLOTF_8_BIT_CAPABLE    (1 << 1)
-};
-#endif /* __MACH_MXS_MMC_H__ */
index da4610ebe9e6deafa4dd6097404c6db14918f960..dafd48e86c8cdabd783f812ac5d0274a9a8cd1b0 100644 (file)
@@ -226,7 +226,7 @@ static void __init mx28evk_fec_reset(void)
        struct clk *clk;
 
        /* Enable fec phy clock */
-       clk = clk_get_sys("pll2", NULL);
+       clk = clk_get_sys("enet_out", NULL);
        if (!IS_ERR(clk))
                clk_prepare_enable(clk);
 
diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c
new file mode 100644 (file)
index 0000000..8cac94b
--- /dev/null
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2012 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/init.h>
+#include <linux/irqdomain.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/time.h>
+#include <mach/common.h>
+
+static int __init mxs_icoll_add_irq_domain(struct device_node *np,
+                               struct device_node *interrupt_parent)
+{
+       irq_domain_add_legacy(np, 128, 0, 0, &irq_domain_simple_ops, NULL);
+
+       return 0;
+}
+
+static int __init mxs_gpio_add_irq_domain(struct device_node *np,
+                               struct device_node *interrupt_parent)
+{
+       static int gpio_irq_base = MXS_GPIO_IRQ_START;
+
+       irq_domain_add_legacy(np, 32, gpio_irq_base, 0, &irq_domain_simple_ops, NULL);
+       gpio_irq_base += 32;
+
+       return 0;
+}
+
+static const struct of_device_id mxs_irq_match[] __initconst = {
+       { .compatible = "fsl,mxs-icoll", .data = mxs_icoll_add_irq_domain, },
+       { .compatible = "fsl,mxs-gpio", .data = mxs_gpio_add_irq_domain, },
+       { /* sentinel */ }
+};
+
+static void __init mxs_dt_init_irq(void)
+{
+       icoll_init_irq();
+       of_irq_init(mxs_irq_match);
+}
+
+static void __init imx23_timer_init(void)
+{
+       mx23_clocks_init();
+}
+
+static struct sys_timer imx23_timer = {
+       .init = imx23_timer_init,
+};
+
+static void __init imx28_timer_init(void)
+{
+       mx28_clocks_init();
+}
+
+static struct sys_timer imx28_timer = {
+       .init = imx28_timer_init,
+};
+
+static void __init imx28_evk_init(void)
+{
+       struct clk *clk;
+
+       /* Enable fec phy clock */
+       clk = clk_get_sys("enet_out", NULL);
+       if (!IS_ERR(clk))
+               clk_prepare_enable(clk);
+}
+
+static void __init mxs_machine_init(void)
+{
+       if (of_machine_is_compatible("fsl,imx28-evk"))
+               imx28_evk_init();
+
+       of_platform_populate(NULL, of_default_bus_match_table,
+                               NULL, NULL);
+}
+
+static const char *imx23_dt_compat[] __initdata = {
+       "fsl,imx23-evk",
+       "fsl,imx23",
+       NULL,
+};
+
+static const char *imx28_dt_compat[] __initdata = {
+       "fsl,imx28-evk",
+       "fsl,imx28",
+       NULL,
+};
+
+DT_MACHINE_START(IMX23, "Freescale i.MX23 (Device Tree)")
+       .map_io         = mx23_map_io,
+       .init_irq       = mxs_dt_init_irq,
+       .timer          = &imx23_timer,
+       .init_machine   = mxs_machine_init,
+       .dt_compat      = imx23_dt_compat,
+       .restart        = mxs_restart,
+MACHINE_END
+
+DT_MACHINE_START(IMX28, "Freescale i.MX28 (Device Tree)")
+       .map_io         = mx28_map_io,
+       .init_irq       = mxs_dt_init_irq,
+       .timer          = &imx28_timer,
+       .init_machine   = mxs_machine_init,
+       .dt_compat      = imx28_dt_compat,
+       .restart        = mxs_restart,
+MACHINE_END
index 67a384edcf5b8f794a6dcf463aef37a48cdce1af..dccb67a9e7c4f5968668380aa55c4fcd2fdacd7b 100644 (file)
@@ -66,9 +66,25 @@ void __init mx28_init_irq(void)
 void __init mx23_soc_init(void)
 {
        pinctrl_provide_dummies();
+
+       mxs_add_dma("imx23-dma-apbh", MX23_APBH_DMA_BASE_ADDR);
+       mxs_add_dma("imx23-dma-apbx", MX23_APBX_DMA_BASE_ADDR);
+
+       mxs_add_gpio("imx23-gpio", 0, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO0);
+       mxs_add_gpio("imx23-gpio", 1, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO1);
+       mxs_add_gpio("imx23-gpio", 2, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO2);
 }
 
 void __init mx28_soc_init(void)
 {
        pinctrl_provide_dummies();
+
+       mxs_add_dma("imx28-dma-apbh", MX23_APBH_DMA_BASE_ADDR);
+       mxs_add_dma("imx28-dma-apbx", MX23_APBX_DMA_BASE_ADDR);
+
+       mxs_add_gpio("imx28-gpio", 0, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO0);
+       mxs_add_gpio("imx28-gpio", 1, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO1);
+       mxs_add_gpio("imx28-gpio", 2, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO2);
+       mxs_add_gpio("imx28-gpio", 3, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO3);
+       mxs_add_gpio("imx28-gpio", 4, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO4);
 }
diff --git a/arch/arm/mach-mxs/regs-clkctrl-mx23.h b/arch/arm/mach-mxs/regs-clkctrl-mx23.h
deleted file mode 100644 (file)
index 0ea5c9d..0000000
+++ /dev/null
@@ -1,331 +0,0 @@
-/*
- * Freescale CLKCTRL Register Definitions
- *
- * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
- * Copyright 2008-2010 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
- *
- * This file is created by xml file. Don't Edit it.
- *
- * Xml Revision: 1.48
- * Template revision: 26195
- */
-
-#ifndef __REGS_CLKCTRL_MX23_H__
-#define __REGS_CLKCTRL_MX23_H__
-
-
-#define HW_CLKCTRL_PLLCTRL0    (0x00000000)
-#define HW_CLKCTRL_PLLCTRL0_SET        (0x00000004)
-#define HW_CLKCTRL_PLLCTRL0_CLR        (0x00000008)
-#define HW_CLKCTRL_PLLCTRL0_TOG        (0x0000000c)
-
-#define BP_CLKCTRL_PLLCTRL0_LFR_SEL    28
-#define BM_CLKCTRL_PLLCTRL0_LFR_SEL    0x30000000
-#define BF_CLKCTRL_PLLCTRL0_LFR_SEL(v)  \
-               (((v) << 28) & BM_CLKCTRL_PLLCTRL0_LFR_SEL)
-#define BV_CLKCTRL_PLLCTRL0_LFR_SEL__DEFAULT   0x0
-#define BV_CLKCTRL_PLLCTRL0_LFR_SEL__TIMES_2   0x1
-#define BV_CLKCTRL_PLLCTRL0_LFR_SEL__TIMES_05  0x2
-#define BV_CLKCTRL_PLLCTRL0_LFR_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLLCTRL0_CP_SEL     24
-#define BM_CLKCTRL_PLLCTRL0_CP_SEL     0x03000000
-#define BF_CLKCTRL_PLLCTRL0_CP_SEL(v)  \
-               (((v) << 24) & BM_CLKCTRL_PLLCTRL0_CP_SEL)
-#define BV_CLKCTRL_PLLCTRL0_CP_SEL__DEFAULT   0x0
-#define BV_CLKCTRL_PLLCTRL0_CP_SEL__TIMES_2   0x1
-#define BV_CLKCTRL_PLLCTRL0_CP_SEL__TIMES_05  0x2
-#define BV_CLKCTRL_PLLCTRL0_CP_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLLCTRL0_DIV_SEL    20
-#define BM_CLKCTRL_PLLCTRL0_DIV_SEL    0x00300000
-#define BF_CLKCTRL_PLLCTRL0_DIV_SEL(v)  \
-               (((v) << 20) & BM_CLKCTRL_PLLCTRL0_DIV_SEL)
-#define BV_CLKCTRL_PLLCTRL0_DIV_SEL__DEFAULT   0x0
-#define BV_CLKCTRL_PLLCTRL0_DIV_SEL__LOWER     0x1
-#define BV_CLKCTRL_PLLCTRL0_DIV_SEL__LOWEST    0x2
-#define BV_CLKCTRL_PLLCTRL0_DIV_SEL__UNDEFINED 0x3
-#define BM_CLKCTRL_PLLCTRL0_EN_USB_CLKS        0x00040000
-#define BM_CLKCTRL_PLLCTRL0_POWER      0x00010000
-
-#define HW_CLKCTRL_PLLCTRL1    (0x00000010)
-
-#define BM_CLKCTRL_PLLCTRL1_LOCK       0x80000000
-#define BM_CLKCTRL_PLLCTRL1_FORCE_LOCK 0x40000000
-#define BP_CLKCTRL_PLLCTRL1_LOCK_COUNT 0
-#define BM_CLKCTRL_PLLCTRL1_LOCK_COUNT 0x0000FFFF
-#define BF_CLKCTRL_PLLCTRL1_LOCK_COUNT(v)  \
-               (((v) << 0) & BM_CLKCTRL_PLLCTRL1_LOCK_COUNT)
-
-#define HW_CLKCTRL_CPU (0x00000020)
-#define HW_CLKCTRL_CPU_SET     (0x00000024)
-#define HW_CLKCTRL_CPU_CLR     (0x00000028)
-#define HW_CLKCTRL_CPU_TOG     (0x0000002c)
-
-#define BM_CLKCTRL_CPU_BUSY_REF_XTAL   0x20000000
-#define BM_CLKCTRL_CPU_BUSY_REF_CPU    0x10000000
-#define BM_CLKCTRL_CPU_DIV_XTAL_FRAC_EN        0x04000000
-#define BP_CLKCTRL_CPU_DIV_XTAL        16
-#define BM_CLKCTRL_CPU_DIV_XTAL        0x03FF0000
-#define BF_CLKCTRL_CPU_DIV_XTAL(v)  \
-               (((v) << 16) & BM_CLKCTRL_CPU_DIV_XTAL)
-#define BM_CLKCTRL_CPU_INTERRUPT_WAIT  0x00001000
-#define BM_CLKCTRL_CPU_DIV_CPU_FRAC_EN 0x00000400
-#define BP_CLKCTRL_CPU_DIV_CPU 0
-#define BM_CLKCTRL_CPU_DIV_CPU 0x0000003F
-#define BF_CLKCTRL_CPU_DIV_CPU(v)  \
-               (((v) << 0) & BM_CLKCTRL_CPU_DIV_CPU)
-
-#define HW_CLKCTRL_HBUS        (0x00000030)
-#define HW_CLKCTRL_HBUS_SET    (0x00000034)
-#define HW_CLKCTRL_HBUS_CLR    (0x00000038)
-#define HW_CLKCTRL_HBUS_TOG    (0x0000003c)
-
-#define BM_CLKCTRL_HBUS_BUSY   0x20000000
-#define BM_CLKCTRL_HBUS_DCP_AS_ENABLE  0x10000000
-#define BM_CLKCTRL_HBUS_PXP_AS_ENABLE  0x08000000
-#define BM_CLKCTRL_HBUS_APBHDMA_AS_ENABLE      0x04000000
-#define BM_CLKCTRL_HBUS_APBXDMA_AS_ENABLE      0x02000000
-#define BM_CLKCTRL_HBUS_TRAFFIC_JAM_AS_ENABLE  0x01000000
-#define BM_CLKCTRL_HBUS_TRAFFIC_AS_ENABLE      0x00800000
-#define BM_CLKCTRL_HBUS_CPU_DATA_AS_ENABLE     0x00400000
-#define BM_CLKCTRL_HBUS_CPU_INSTR_AS_ENABLE    0x00200000
-#define BM_CLKCTRL_HBUS_AUTO_SLOW_MODE 0x00100000
-#define BP_CLKCTRL_HBUS_SLOW_DIV       16
-#define BM_CLKCTRL_HBUS_SLOW_DIV       0x00070000
-#define BF_CLKCTRL_HBUS_SLOW_DIV(v)  \
-               (((v) << 16) & BM_CLKCTRL_HBUS_SLOW_DIV)
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY1  0x0
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY2  0x1
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY4  0x2
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY8  0x3
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY16 0x4
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY32 0x5
-#define BM_CLKCTRL_HBUS_DIV_FRAC_EN    0x00000020
-#define BP_CLKCTRL_HBUS_DIV    0
-#define BM_CLKCTRL_HBUS_DIV    0x0000001F
-#define BF_CLKCTRL_HBUS_DIV(v)  \
-               (((v) << 0) & BM_CLKCTRL_HBUS_DIV)
-
-#define HW_CLKCTRL_XBUS        (0x00000040)
-
-#define BM_CLKCTRL_XBUS_BUSY   0x80000000
-#define BM_CLKCTRL_XBUS_DIV_FRAC_EN    0x00000400
-#define BP_CLKCTRL_XBUS_DIV    0
-#define BM_CLKCTRL_XBUS_DIV    0x000003FF
-#define BF_CLKCTRL_XBUS_DIV(v)  \
-               (((v) << 0) & BM_CLKCTRL_XBUS_DIV)
-
-#define HW_CLKCTRL_XTAL        (0x00000050)
-#define HW_CLKCTRL_XTAL_SET    (0x00000054)
-#define HW_CLKCTRL_XTAL_CLR    (0x00000058)
-#define HW_CLKCTRL_XTAL_TOG    (0x0000005c)
-
-#define BP_CLKCTRL_XTAL_UART_CLK_GATE  31
-#define BM_CLKCTRL_XTAL_UART_CLK_GATE  0x80000000
-#define BP_CLKCTRL_XTAL_FILT_CLK24M_GATE       30
-#define BM_CLKCTRL_XTAL_FILT_CLK24M_GATE       0x40000000
-#define BP_CLKCTRL_XTAL_PWM_CLK24M_GATE        29
-#define BM_CLKCTRL_XTAL_PWM_CLK24M_GATE        0x20000000
-#define BM_CLKCTRL_XTAL_DRI_CLK24M_GATE        0x10000000
-#define BM_CLKCTRL_XTAL_DIGCTRL_CLK1M_GATE     0x08000000
-#define BP_CLKCTRL_XTAL_TIMROT_CLK32K_GATE     26
-#define BM_CLKCTRL_XTAL_TIMROT_CLK32K_GATE     0x04000000
-#define BP_CLKCTRL_XTAL_DIV_UART       0
-#define BM_CLKCTRL_XTAL_DIV_UART       0x00000003
-#define BF_CLKCTRL_XTAL_DIV_UART(v)  \
-               (((v) << 0) & BM_CLKCTRL_XTAL_DIV_UART)
-
-#define HW_CLKCTRL_PIX (0x00000060)
-
-#define BP_CLKCTRL_PIX_CLKGATE 31
-#define BM_CLKCTRL_PIX_CLKGATE 0x80000000
-#define BM_CLKCTRL_PIX_BUSY    0x20000000
-#define BM_CLKCTRL_PIX_DIV_FRAC_EN     0x00001000
-#define BP_CLKCTRL_PIX_DIV     0
-#define BM_CLKCTRL_PIX_DIV     0x00000FFF
-#define BF_CLKCTRL_PIX_DIV(v)  \
-               (((v) << 0) & BM_CLKCTRL_PIX_DIV)
-
-#define HW_CLKCTRL_SSP (0x00000070)
-
-#define BP_CLKCTRL_SSP_CLKGATE 31
-#define BM_CLKCTRL_SSP_CLKGATE 0x80000000
-#define BM_CLKCTRL_SSP_BUSY    0x20000000
-#define BM_CLKCTRL_SSP_DIV_FRAC_EN     0x00000200
-#define BP_CLKCTRL_SSP_DIV     0
-#define BM_CLKCTRL_SSP_DIV     0x000001FF
-#define BF_CLKCTRL_SSP_DIV(v)  \
-               (((v) << 0) & BM_CLKCTRL_SSP_DIV)
-
-#define HW_CLKCTRL_GPMI        (0x00000080)
-
-#define BP_CLKCTRL_GPMI_CLKGATE        31
-#define BM_CLKCTRL_GPMI_CLKGATE        0x80000000
-#define BM_CLKCTRL_GPMI_BUSY   0x20000000
-#define BM_CLKCTRL_GPMI_DIV_FRAC_EN    0x00000400
-#define BP_CLKCTRL_GPMI_DIV    0
-#define BM_CLKCTRL_GPMI_DIV    0x000003FF
-#define BF_CLKCTRL_GPMI_DIV(v)  \
-               (((v) << 0) & BM_CLKCTRL_GPMI_DIV)
-
-#define HW_CLKCTRL_SPDIF       (0x00000090)
-
-#define BM_CLKCTRL_SPDIF_CLKGATE       0x80000000
-
-#define HW_CLKCTRL_EMI (0x000000a0)
-
-#define BP_CLKCTRL_EMI_CLKGATE 31
-#define BM_CLKCTRL_EMI_CLKGATE 0x80000000
-#define BM_CLKCTRL_EMI_SYNC_MODE_EN    0x40000000
-#define BM_CLKCTRL_EMI_BUSY_REF_XTAL   0x20000000
-#define BM_CLKCTRL_EMI_BUSY_REF_EMI    0x10000000
-#define BM_CLKCTRL_EMI_BUSY_REF_CPU    0x08000000
-#define BM_CLKCTRL_EMI_BUSY_SYNC_MODE  0x04000000
-#define BM_CLKCTRL_EMI_BUSY_DCC_RESYNC 0x00020000
-#define BM_CLKCTRL_EMI_DCC_RESYNC_ENABLE       0x00010000
-#define BP_CLKCTRL_EMI_DIV_XTAL        8
-#define BM_CLKCTRL_EMI_DIV_XTAL        0x00000F00
-#define BF_CLKCTRL_EMI_DIV_XTAL(v)  \
-               (((v) << 8) & BM_CLKCTRL_EMI_DIV_XTAL)
-#define BP_CLKCTRL_EMI_DIV_EMI 0
-#define BM_CLKCTRL_EMI_DIV_EMI 0x0000003F
-#define BF_CLKCTRL_EMI_DIV_EMI(v)  \
-               (((v) << 0) & BM_CLKCTRL_EMI_DIV_EMI)
-
-#define HW_CLKCTRL_IR  (0x000000b0)
-
-#define BM_CLKCTRL_IR_CLKGATE  0x80000000
-#define BM_CLKCTRL_IR_AUTO_DIV 0x20000000
-#define BM_CLKCTRL_IR_IR_BUSY  0x10000000
-#define BM_CLKCTRL_IR_IROV_BUSY        0x08000000
-#define BP_CLKCTRL_IR_IROV_DIV 16
-#define BM_CLKCTRL_IR_IROV_DIV 0x01FF0000
-#define BF_CLKCTRL_IR_IROV_DIV(v)  \
-               (((v) << 16) & BM_CLKCTRL_IR_IROV_DIV)
-#define BP_CLKCTRL_IR_IR_DIV   0
-#define BM_CLKCTRL_IR_IR_DIV   0x000003FF
-#define BF_CLKCTRL_IR_IR_DIV(v)  \
-               (((v) << 0) & BM_CLKCTRL_IR_IR_DIV)
-
-#define HW_CLKCTRL_SAIF        (0x000000c0)
-
-#define BM_CLKCTRL_SAIF_CLKGATE        0x80000000
-#define BM_CLKCTRL_SAIF_BUSY   0x20000000
-#define BM_CLKCTRL_SAIF_DIV_FRAC_EN    0x00010000
-#define BP_CLKCTRL_SAIF_DIV    0
-#define BM_CLKCTRL_SAIF_DIV    0x0000FFFF
-#define BF_CLKCTRL_SAIF_DIV(v)  \
-               (((v) << 0) & BM_CLKCTRL_SAIF_DIV)
-
-#define HW_CLKCTRL_TV  (0x000000d0)
-
-#define BM_CLKCTRL_TV_CLK_TV108M_GATE  0x80000000
-#define BM_CLKCTRL_TV_CLK_TV_GATE      0x40000000
-
-#define HW_CLKCTRL_ETM (0x000000e0)
-
-#define BM_CLKCTRL_ETM_CLKGATE 0x80000000
-#define BM_CLKCTRL_ETM_BUSY    0x20000000
-#define BM_CLKCTRL_ETM_DIV_FRAC_EN     0x00000040
-#define BP_CLKCTRL_ETM_DIV     0
-#define BM_CLKCTRL_ETM_DIV     0x0000003F
-#define BF_CLKCTRL_ETM_DIV(v)  \
-               (((v) << 0) & BM_CLKCTRL_ETM_DIV)
-
-#define HW_CLKCTRL_FRAC        (0x000000f0)
-#define HW_CLKCTRL_FRAC_SET    (0x000000f4)
-#define HW_CLKCTRL_FRAC_CLR    (0x000000f8)
-#define HW_CLKCTRL_FRAC_TOG    (0x000000fc)
-
-#define BP_CLKCTRL_FRAC_CLKGATEIO      31
-#define BM_CLKCTRL_FRAC_CLKGATEIO      0x80000000
-#define BM_CLKCTRL_FRAC_IO_STABLE      0x40000000
-#define BP_CLKCTRL_FRAC_IOFRAC 24
-#define BM_CLKCTRL_FRAC_IOFRAC 0x3F000000
-#define BF_CLKCTRL_FRAC_IOFRAC(v)  \
-               (((v) << 24) & BM_CLKCTRL_FRAC_IOFRAC)
-#define BP_CLKCTRL_FRAC_CLKGATEPIX     23
-#define BM_CLKCTRL_FRAC_CLKGATEPIX     0x00800000
-#define BM_CLKCTRL_FRAC_PIX_STABLE     0x00400000
-#define BP_CLKCTRL_FRAC_PIXFRAC        16
-#define BM_CLKCTRL_FRAC_PIXFRAC        0x003F0000
-#define BF_CLKCTRL_FRAC_PIXFRAC(v)  \
-               (((v) << 16) & BM_CLKCTRL_FRAC_PIXFRAC)
-#define BP_CLKCTRL_FRAC_CLKGATEEMI     15
-#define BM_CLKCTRL_FRAC_CLKGATEEMI     0x00008000
-#define BM_CLKCTRL_FRAC_EMI_STABLE     0x00004000
-#define BP_CLKCTRL_FRAC_EMIFRAC        8
-#define BM_CLKCTRL_FRAC_EMIFRAC        0x00003F00
-#define BF_CLKCTRL_FRAC_EMIFRAC(v)  \
-               (((v) << 8) & BM_CLKCTRL_FRAC_EMIFRAC)
-#define BP_CLKCTRL_FRAC_CLKGATECPU     7
-#define BM_CLKCTRL_FRAC_CLKGATECPU     0x00000080
-#define BM_CLKCTRL_FRAC_CPU_STABLE     0x00000040
-#define BP_CLKCTRL_FRAC_CPUFRAC        0
-#define BM_CLKCTRL_FRAC_CPUFRAC        0x0000003F
-#define BF_CLKCTRL_FRAC_CPUFRAC(v)  \
-               (((v) << 0) & BM_CLKCTRL_FRAC_CPUFRAC)
-
-#define HW_CLKCTRL_FRAC1       (0x00000100)
-#define HW_CLKCTRL_FRAC1_SET   (0x00000104)
-#define HW_CLKCTRL_FRAC1_CLR   (0x00000108)
-#define HW_CLKCTRL_FRAC1_TOG   (0x0000010c)
-
-#define BM_CLKCTRL_FRAC1_CLKGATEVID    0x80000000
-#define BM_CLKCTRL_FRAC1_VID_STABLE    0x40000000
-
-#define HW_CLKCTRL_CLKSEQ      (0x00000110)
-#define HW_CLKCTRL_CLKSEQ_SET  (0x00000114)
-#define HW_CLKCTRL_CLKSEQ_CLR  (0x00000118)
-#define HW_CLKCTRL_CLKSEQ_TOG  (0x0000011c)
-
-#define BM_CLKCTRL_CLKSEQ_BYPASS_ETM   0x00000100
-#define BM_CLKCTRL_CLKSEQ_BYPASS_CPU   0x00000080
-#define BM_CLKCTRL_CLKSEQ_BYPASS_EMI   0x00000040
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SSP   0x00000020
-#define BM_CLKCTRL_CLKSEQ_BYPASS_GPMI  0x00000010
-#define BM_CLKCTRL_CLKSEQ_BYPASS_IR    0x00000008
-#define BM_CLKCTRL_CLKSEQ_BYPASS_PIX   0x00000002
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SAIF  0x00000001
-
-#define HW_CLKCTRL_RESET       (0x00000120)
-
-#define BM_CLKCTRL_RESET_CHIP  0x00000002
-#define BM_CLKCTRL_RESET_DIG   0x00000001
-
-#define HW_CLKCTRL_STATUS      (0x00000130)
-
-#define BP_CLKCTRL_STATUS_CPU_LIMIT    30
-#define BM_CLKCTRL_STATUS_CPU_LIMIT    0xC0000000
-#define BF_CLKCTRL_STATUS_CPU_LIMIT(v) \
-               (((v) << 30) & BM_CLKCTRL_STATUS_CPU_LIMIT)
-
-#define HW_CLKCTRL_VERSION     (0x00000140)
-
-#define BP_CLKCTRL_VERSION_MAJOR       24
-#define BM_CLKCTRL_VERSION_MAJOR       0xFF000000
-#define BF_CLKCTRL_VERSION_MAJOR(v) \
-               (((v) << 24) & BM_CLKCTRL_VERSION_MAJOR)
-#define BP_CLKCTRL_VERSION_MINOR       16
-#define BM_CLKCTRL_VERSION_MINOR       0x00FF0000
-#define BF_CLKCTRL_VERSION_MINOR(v)  \
-               (((v) << 16) & BM_CLKCTRL_VERSION_MINOR)
-#define BP_CLKCTRL_VERSION_STEP        0
-#define BM_CLKCTRL_VERSION_STEP        0x0000FFFF
-#define BF_CLKCTRL_VERSION_STEP(v)  \
-               (((v) << 0) & BM_CLKCTRL_VERSION_STEP)
-
-#endif /* __REGS_CLKCTRL_MX23_H__ */
diff --git a/arch/arm/mach-mxs/regs-clkctrl-mx28.h b/arch/arm/mach-mxs/regs-clkctrl-mx28.h
deleted file mode 100644 (file)
index 7d1b061..0000000
+++ /dev/null
@@ -1,486 +0,0 @@
-/*
- * Freescale CLKCTRL Register Definitions
- *
- * Copyright 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
- *
- * This file is created by xml file. Don't Edit it.
- *
- * Xml Revision: 1.48
- * Template revision: 26195
- */
-
-#ifndef __REGS_CLKCTRL_MX28_H__
-#define __REGS_CLKCTRL_MX28_H__
-
-#define HW_CLKCTRL_PLL0CTRL0   (0x00000000)
-#define HW_CLKCTRL_PLL0CTRL0_SET       (0x00000004)
-#define HW_CLKCTRL_PLL0CTRL0_CLR       (0x00000008)
-#define HW_CLKCTRL_PLL0CTRL0_TOG       (0x0000000c)
-
-#define BP_CLKCTRL_PLL0CTRL0_LFR_SEL   28
-#define BM_CLKCTRL_PLL0CTRL0_LFR_SEL   0x30000000
-#define BF_CLKCTRL_PLL0CTRL0_LFR_SEL(v)  \
-               (((v) << 28) & BM_CLKCTRL_PLL0CTRL0_LFR_SEL)
-#define BV_CLKCTRL_PLL0CTRL0_LFR_SEL__DEFAULT   0x0
-#define BV_CLKCTRL_PLL0CTRL0_LFR_SEL__TIMES_2   0x1
-#define BV_CLKCTRL_PLL0CTRL0_LFR_SEL__TIMES_05  0x2
-#define BV_CLKCTRL_PLL0CTRL0_LFR_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLL0CTRL0_CP_SEL    24
-#define BM_CLKCTRL_PLL0CTRL0_CP_SEL    0x03000000
-#define BF_CLKCTRL_PLL0CTRL0_CP_SEL(v)  \
-               (((v) << 24) & BM_CLKCTRL_PLL0CTRL0_CP_SEL)
-#define BV_CLKCTRL_PLL0CTRL0_CP_SEL__DEFAULT   0x0
-#define BV_CLKCTRL_PLL0CTRL0_CP_SEL__TIMES_2   0x1
-#define BV_CLKCTRL_PLL0CTRL0_CP_SEL__TIMES_05  0x2
-#define BV_CLKCTRL_PLL0CTRL0_CP_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLL0CTRL0_DIV_SEL   20
-#define BM_CLKCTRL_PLL0CTRL0_DIV_SEL   0x00300000
-#define BF_CLKCTRL_PLL0CTRL0_DIV_SEL(v)  \
-               (((v) << 20) & BM_CLKCTRL_PLL0CTRL0_DIV_SEL)
-#define BV_CLKCTRL_PLL0CTRL0_DIV_SEL__DEFAULT   0x0
-#define BV_CLKCTRL_PLL0CTRL0_DIV_SEL__LOWER     0x1
-#define BV_CLKCTRL_PLL0CTRL0_DIV_SEL__LOWEST    0x2
-#define BV_CLKCTRL_PLL0CTRL0_DIV_SEL__UNDEFINED 0x3
-#define BM_CLKCTRL_PLL0CTRL0_EN_USB_CLKS       0x00040000
-#define BM_CLKCTRL_PLL0CTRL0_POWER     0x00020000
-
-#define HW_CLKCTRL_PLL0CTRL1   (0x00000010)
-
-#define BM_CLKCTRL_PLL0CTRL1_LOCK      0x80000000
-#define BM_CLKCTRL_PLL0CTRL1_FORCE_LOCK        0x40000000
-#define BP_CLKCTRL_PLL0CTRL1_LOCK_COUNT        0
-#define BM_CLKCTRL_PLL0CTRL1_LOCK_COUNT        0x0000FFFF
-#define BF_CLKCTRL_PLL0CTRL1_LOCK_COUNT(v)  \
-               (((v) << 0) & BM_CLKCTRL_PLL0CTRL1_LOCK_COUNT)
-
-#define HW_CLKCTRL_PLL1CTRL0   (0x00000020)
-#define HW_CLKCTRL_PLL1CTRL0_SET       (0x00000024)
-#define HW_CLKCTRL_PLL1CTRL0_CLR       (0x00000028)
-#define HW_CLKCTRL_PLL1CTRL0_TOG       (0x0000002c)
-
-#define BM_CLKCTRL_PLL1CTRL0_CLKGATEEMI        0x80000000
-#define BP_CLKCTRL_PLL1CTRL0_LFR_SEL   28
-#define BM_CLKCTRL_PLL1CTRL0_LFR_SEL   0x30000000
-#define BF_CLKCTRL_PLL1CTRL0_LFR_SEL(v)  \
-               (((v) << 28) & BM_CLKCTRL_PLL1CTRL0_LFR_SEL)
-#define BV_CLKCTRL_PLL1CTRL0_LFR_SEL__DEFAULT   0x0
-#define BV_CLKCTRL_PLL1CTRL0_LFR_SEL__TIMES_2   0x1
-#define BV_CLKCTRL_PLL1CTRL0_LFR_SEL__TIMES_05  0x2
-#define BV_CLKCTRL_PLL1CTRL0_LFR_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLL1CTRL0_CP_SEL    24
-#define BM_CLKCTRL_PLL1CTRL0_CP_SEL    0x03000000
-#define BF_CLKCTRL_PLL1CTRL0_CP_SEL(v)  \
-               (((v) << 24) & BM_CLKCTRL_PLL1CTRL0_CP_SEL)
-#define BV_CLKCTRL_PLL1CTRL0_CP_SEL__DEFAULT   0x0
-#define BV_CLKCTRL_PLL1CTRL0_CP_SEL__TIMES_2   0x1
-#define BV_CLKCTRL_PLL1CTRL0_CP_SEL__TIMES_05  0x2
-#define BV_CLKCTRL_PLL1CTRL0_CP_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLL1CTRL0_DIV_SEL   20
-#define BM_CLKCTRL_PLL1CTRL0_DIV_SEL   0x00300000
-#define BF_CLKCTRL_PLL1CTRL0_DIV_SEL(v)  \
-               (((v) << 20) & BM_CLKCTRL_PLL1CTRL0_DIV_SEL)
-#define BV_CLKCTRL_PLL1CTRL0_DIV_SEL__DEFAULT   0x0
-#define BV_CLKCTRL_PLL1CTRL0_DIV_SEL__LOWER     0x1
-#define BV_CLKCTRL_PLL1CTRL0_DIV_SEL__LOWEST    0x2
-#define BV_CLKCTRL_PLL1CTRL0_DIV_SEL__UNDEFINED 0x3
-#define BM_CLKCTRL_PLL1CTRL0_EN_USB_CLKS       0x00040000
-#define BM_CLKCTRL_PLL1CTRL0_POWER     0x00020000
-
-#define HW_CLKCTRL_PLL1CTRL1   (0x00000030)
-
-#define BM_CLKCTRL_PLL1CTRL1_LOCK      0x80000000
-#define BM_CLKCTRL_PLL1CTRL1_FORCE_LOCK        0x40000000
-#define BP_CLKCTRL_PLL1CTRL1_LOCK_COUNT        0
-#define BM_CLKCTRL_PLL1CTRL1_LOCK_COUNT        0x0000FFFF
-#define BF_CLKCTRL_PLL1CTRL1_LOCK_COUNT(v)  \
-               (((v) << 0) & BM_CLKCTRL_PLL1CTRL1_LOCK_COUNT)
-
-#define HW_CLKCTRL_PLL2CTRL0   (0x00000040)
-#define HW_CLKCTRL_PLL2CTRL0_SET       (0x00000044)
-#define HW_CLKCTRL_PLL2CTRL0_CLR       (0x00000048)
-#define HW_CLKCTRL_PLL2CTRL0_TOG       (0x0000004c)
-
-#define BM_CLKCTRL_PLL2CTRL0_CLKGATE   0x80000000
-#define BP_CLKCTRL_PLL2CTRL0_LFR_SEL   28
-#define BM_CLKCTRL_PLL2CTRL0_LFR_SEL   0x30000000
-#define BF_CLKCTRL_PLL2CTRL0_LFR_SEL(v)  \
-               (((v) << 28) & BM_CLKCTRL_PLL2CTRL0_LFR_SEL)
-#define BM_CLKCTRL_PLL2CTRL0_HOLD_RING_OFF_B   0x04000000
-#define BP_CLKCTRL_PLL2CTRL0_CP_SEL    24
-#define BM_CLKCTRL_PLL2CTRL0_CP_SEL    0x03000000
-#define BF_CLKCTRL_PLL2CTRL0_CP_SEL(v)  \
-               (((v) << 24) & BM_CLKCTRL_PLL2CTRL0_CP_SEL)
-#define BM_CLKCTRL_PLL2CTRL0_POWER     0x00800000
-
-#define HW_CLKCTRL_CPU (0x00000050)
-#define HW_CLKCTRL_CPU_SET     (0x00000054)
-#define HW_CLKCTRL_CPU_CLR     (0x00000058)
-#define HW_CLKCTRL_CPU_TOG     (0x0000005c)
-
-#define BM_CLKCTRL_CPU_BUSY_REF_XTAL   0x20000000
-#define BM_CLKCTRL_CPU_BUSY_REF_CPU    0x10000000
-#define BM_CLKCTRL_CPU_DIV_XTAL_FRAC_EN        0x04000000
-#define BP_CLKCTRL_CPU_DIV_XTAL        16
-#define BM_CLKCTRL_CPU_DIV_XTAL        0x03FF0000
-#define BF_CLKCTRL_CPU_DIV_XTAL(v)  \
-               (((v) << 16) & BM_CLKCTRL_CPU_DIV_XTAL)
-#define BM_CLKCTRL_CPU_INTERRUPT_WAIT  0x00001000
-#define BM_CLKCTRL_CPU_DIV_CPU_FRAC_EN 0x00000400
-#define BP_CLKCTRL_CPU_DIV_CPU 0
-#define BM_CLKCTRL_CPU_DIV_CPU 0x0000003F
-#define BF_CLKCTRL_CPU_DIV_CPU(v)  \
-               (((v) << 0) & BM_CLKCTRL_CPU_DIV_CPU)
-
-#define HW_CLKCTRL_HBUS        (0x00000060)
-#define HW_CLKCTRL_HBUS_SET    (0x00000064)
-#define HW_CLKCTRL_HBUS_CLR    (0x00000068)
-#define HW_CLKCTRL_HBUS_TOG    (0x0000006c)
-
-#define BM_CLKCTRL_HBUS_ASM_BUSY       0x80000000
-#define BM_CLKCTRL_HBUS_DCP_AS_ENABLE  0x40000000
-#define BM_CLKCTRL_HBUS_PXP_AS_ENABLE  0x20000000
-#define BM_CLKCTRL_HBUS_ASM_EMIPORT_AS_ENABLE  0x08000000
-#define BM_CLKCTRL_HBUS_APBHDMA_AS_ENABLE      0x04000000
-#define BM_CLKCTRL_HBUS_APBXDMA_AS_ENABLE      0x02000000
-#define BM_CLKCTRL_HBUS_TRAFFIC_JAM_AS_ENABLE  0x01000000
-#define BM_CLKCTRL_HBUS_TRAFFIC_AS_ENABLE      0x00800000
-#define BM_CLKCTRL_HBUS_CPU_DATA_AS_ENABLE     0x00400000
-#define BM_CLKCTRL_HBUS_CPU_INSTR_AS_ENABLE    0x00200000
-#define BM_CLKCTRL_HBUS_ASM_ENABLE     0x00100000
-#define BM_CLKCTRL_HBUS_AUTO_CLEAR_DIV_ENABLE  0x00080000
-#define BP_CLKCTRL_HBUS_SLOW_DIV       16
-#define BM_CLKCTRL_HBUS_SLOW_DIV       0x00070000
-#define BF_CLKCTRL_HBUS_SLOW_DIV(v)  \
-               (((v) << 16) & BM_CLKCTRL_HBUS_SLOW_DIV)
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY1  0x0
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY2  0x1
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY4  0x2
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY8  0x3
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY16 0x4
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY32 0x5
-#define BM_CLKCTRL_HBUS_DIV_FRAC_EN    0x00000020
-#define BP_CLKCTRL_HBUS_DIV    0
-#define BM_CLKCTRL_HBUS_DIV    0x0000001F
-#define BF_CLKCTRL_HBUS_DIV(v)  \
-               (((v) << 0) & BM_CLKCTRL_HBUS_DIV)
-
-#define HW_CLKCTRL_XBUS        (0x00000070)
-
-#define BM_CLKCTRL_XBUS_BUSY   0x80000000
-#define BM_CLKCTRL_XBUS_AUTO_CLEAR_DIV_ENABLE  0x00000800
-#define BM_CLKCTRL_XBUS_DIV_FRAC_EN    0x00000400
-#define BP_CLKCTRL_XBUS_DIV    0
-#define BM_CLKCTRL_XBUS_DIV    0x000003FF
-#define BF_CLKCTRL_XBUS_DIV(v)  \
-               (((v) << 0) & BM_CLKCTRL_XBUS_DIV)
-
-#define HW_CLKCTRL_XTAL        (0x00000080)
-#define HW_CLKCTRL_XTAL_SET    (0x00000084)
-#define HW_CLKCTRL_XTAL_CLR    (0x00000088)
-#define HW_CLKCTRL_XTAL_TOG    (0x0000008c)
-
-#define BP_CLKCTRL_XTAL_UART_CLK_GATE  31
-#define BM_CLKCTRL_XTAL_UART_CLK_GATE  0x80000000
-#define BP_CLKCTRL_XTAL_PWM_CLK24M_GATE        29
-#define BM_CLKCTRL_XTAL_PWM_CLK24M_GATE        0x20000000
-#define BP_CLKCTRL_XTAL_TIMROT_CLK32K_GATE     26
-#define BM_CLKCTRL_XTAL_TIMROT_CLK32K_GATE     0x04000000
-#define BP_CLKCTRL_XTAL_DIV_UART       0
-#define BM_CLKCTRL_XTAL_DIV_UART       0x00000003
-#define BF_CLKCTRL_XTAL_DIV_UART(v)  \
-               (((v) << 0) & BM_CLKCTRL_XTAL_DIV_UART)
-
-#define HW_CLKCTRL_SSP0        (0x00000090)
-
-#define BP_CLKCTRL_SSP0_CLKGATE        31
-#define BM_CLKCTRL_SSP0_CLKGATE        0x80000000
-#define BM_CLKCTRL_SSP0_BUSY   0x20000000
-#define BM_CLKCTRL_SSP0_DIV_FRAC_EN    0x00000200
-#define BP_CLKCTRL_SSP0_DIV    0
-#define BM_CLKCTRL_SSP0_DIV    0x000001FF
-#define BF_CLKCTRL_SSP0_DIV(v)  \
-               (((v) << 0) & BM_CLKCTRL_SSP0_DIV)
-
-#define HW_CLKCTRL_SSP1        (0x000000a0)
-
-#define BP_CLKCTRL_SSP1_CLKGATE        31
-#define BM_CLKCTRL_SSP1_CLKGATE        0x80000000
-#define BM_CLKCTRL_SSP1_BUSY   0x20000000
-#define BM_CLKCTRL_SSP1_DIV_FRAC_EN    0x00000200
-#define BP_CLKCTRL_SSP1_DIV    0
-#define BM_CLKCTRL_SSP1_DIV    0x000001FF
-#define BF_CLKCTRL_SSP1_DIV(v)  \
-               (((v) << 0) & BM_CLKCTRL_SSP1_DIV)
-
-#define HW_CLKCTRL_SSP2        (0x000000b0)
-
-#define BP_CLKCTRL_SSP2_CLKGATE        31
-#define BM_CLKCTRL_SSP2_CLKGATE        0x80000000
-#define BM_CLKCTRL_SSP2_BUSY   0x20000000
-#define BM_CLKCTRL_SSP2_DIV_FRAC_EN    0x00000200
-#define BP_CLKCTRL_SSP2_DIV    0
-#define BM_CLKCTRL_SSP2_DIV    0x000001FF
-#define BF_CLKCTRL_SSP2_DIV(v)  \
-               (((v) << 0) & BM_CLKCTRL_SSP2_DIV)
-
-#define HW_CLKCTRL_SSP3        (0x000000c0)
-
-#define BP_CLKCTRL_SSP3_CLKGATE        31
-#define BM_CLKCTRL_SSP3_CLKGATE        0x80000000
-#define BM_CLKCTRL_SSP3_BUSY   0x20000000
-#define BM_CLKCTRL_SSP3_DIV_FRAC_EN    0x00000200
-#define BP_CLKCTRL_SSP3_DIV    0
-#define BM_CLKCTRL_SSP3_DIV    0x000001FF
-#define BF_CLKCTRL_SSP3_DIV(v)  \
-               (((v) << 0) & BM_CLKCTRL_SSP3_DIV)
-
-#define HW_CLKCTRL_GPMI        (0x000000d0)
-
-#define BP_CLKCTRL_GPMI_CLKGATE        31
-#define BM_CLKCTRL_GPMI_CLKGATE        0x80000000
-#define BM_CLKCTRL_GPMI_BUSY   0x20000000
-#define BM_CLKCTRL_GPMI_DIV_FRAC_EN    0x00000400
-#define BP_CLKCTRL_GPMI_DIV    0
-#define BM_CLKCTRL_GPMI_DIV    0x000003FF
-#define BF_CLKCTRL_GPMI_DIV(v)  \
-               (((v) << 0) & BM_CLKCTRL_GPMI_DIV)
-
-#define HW_CLKCTRL_SPDIF       (0x000000e0)
-
-#define BP_CLKCTRL_SPDIF_CLKGATE       31
-#define BM_CLKCTRL_SPDIF_CLKGATE       0x80000000
-
-#define HW_CLKCTRL_EMI (0x000000f0)
-
-#define BP_CLKCTRL_EMI_CLKGATE 31
-#define BM_CLKCTRL_EMI_CLKGATE 0x80000000
-#define BM_CLKCTRL_EMI_SYNC_MODE_EN    0x40000000
-#define BM_CLKCTRL_EMI_BUSY_REF_XTAL   0x20000000
-#define BM_CLKCTRL_EMI_BUSY_REF_EMI    0x10000000
-#define BM_CLKCTRL_EMI_BUSY_REF_CPU    0x08000000
-#define BM_CLKCTRL_EMI_BUSY_SYNC_MODE  0x04000000
-#define BM_CLKCTRL_EMI_BUSY_DCC_RESYNC 0x00020000
-#define BM_CLKCTRL_EMI_DCC_RESYNC_ENABLE       0x00010000
-#define BP_CLKCTRL_EMI_DIV_XTAL        8
-#define BM_CLKCTRL_EMI_DIV_XTAL        0x00000F00
-#define BF_CLKCTRL_EMI_DIV_XTAL(v)  \
-               (((v) << 8) & BM_CLKCTRL_EMI_DIV_XTAL)
-#define BP_CLKCTRL_EMI_DIV_EMI 0
-#define BM_CLKCTRL_EMI_DIV_EMI 0x0000003F
-#define BF_CLKCTRL_EMI_DIV_EMI(v)  \
-               (((v) << 0) & BM_CLKCTRL_EMI_DIV_EMI)
-
-#define HW_CLKCTRL_SAIF0       (0x00000100)
-
-#define BP_CLKCTRL_SAIF0_CLKGATE       31
-#define BM_CLKCTRL_SAIF0_CLKGATE       0x80000000
-#define BM_CLKCTRL_SAIF0_BUSY  0x20000000
-#define BM_CLKCTRL_SAIF0_DIV_FRAC_EN   0x00010000
-#define BP_CLKCTRL_SAIF0_DIV   0
-#define BM_CLKCTRL_SAIF0_DIV   0x0000FFFF
-#define BF_CLKCTRL_SAIF0_DIV(v)  \
-               (((v) << 0) & BM_CLKCTRL_SAIF0_DIV)
-
-#define HW_CLKCTRL_SAIF1       (0x00000110)
-
-#define BP_CLKCTRL_SAIF1_CLKGATE       31
-#define BM_CLKCTRL_SAIF1_CLKGATE       0x80000000
-#define BM_CLKCTRL_SAIF1_BUSY  0x20000000
-#define BM_CLKCTRL_SAIF1_DIV_FRAC_EN   0x00010000
-#define BP_CLKCTRL_SAIF1_DIV   0
-#define BM_CLKCTRL_SAIF1_DIV   0x0000FFFF
-#define BF_CLKCTRL_SAIF1_DIV(v)  \
-               (((v) << 0) & BM_CLKCTRL_SAIF1_DIV)
-
-#define HW_CLKCTRL_DIS_LCDIF   (0x00000120)
-
-#define BP_CLKCTRL_DIS_LCDIF_CLKGATE   31
-#define BM_CLKCTRL_DIS_LCDIF_CLKGATE   0x80000000
-#define BM_CLKCTRL_DIS_LCDIF_BUSY      0x20000000
-#define BM_CLKCTRL_DIS_LCDIF_DIV_FRAC_EN       0x00002000
-#define BP_CLKCTRL_DIS_LCDIF_DIV       0
-#define BM_CLKCTRL_DIS_LCDIF_DIV       0x00001FFF
-#define BF_CLKCTRL_DIS_LCDIF_DIV(v)  \
-               (((v) << 0) & BM_CLKCTRL_DIS_LCDIF_DIV)
-
-#define HW_CLKCTRL_ETM (0x00000130)
-
-#define BM_CLKCTRL_ETM_CLKGATE 0x80000000
-#define BM_CLKCTRL_ETM_BUSY    0x20000000
-#define BM_CLKCTRL_ETM_DIV_FRAC_EN     0x00000080
-#define BP_CLKCTRL_ETM_DIV     0
-#define BM_CLKCTRL_ETM_DIV     0x0000007F
-#define BF_CLKCTRL_ETM_DIV(v)  \
-               (((v) << 0) & BM_CLKCTRL_ETM_DIV)
-
-#define HW_CLKCTRL_ENET        (0x00000140)
-
-#define BM_CLKCTRL_ENET_SLEEP  0x80000000
-#define BP_CLKCTRL_ENET_DISABLE        30
-#define BM_CLKCTRL_ENET_DISABLE        0x40000000
-#define BM_CLKCTRL_ENET_STATUS 0x20000000
-#define BM_CLKCTRL_ENET_BUSY_TIME      0x08000000
-#define BP_CLKCTRL_ENET_DIV_TIME       21
-#define BM_CLKCTRL_ENET_DIV_TIME       0x07E00000
-#define BF_CLKCTRL_ENET_DIV_TIME(v)  \
-               (((v) << 21) & BM_CLKCTRL_ENET_DIV_TIME)
-#define BM_CLKCTRL_ENET_BUSY   0x08000000
-#define BP_CLKCTRL_ENET_DIV    21
-#define BM_CLKCTRL_ENET_DIV    0x07E00000
-#define BF_CLKCTRL_ENET_DIV(v)  \
-               (((v) << 21) & BM_CLKCTRL_ENET_DIV)
-#define BP_CLKCTRL_ENET_TIME_SEL       19
-#define BM_CLKCTRL_ENET_TIME_SEL       0x00180000
-#define BF_CLKCTRL_ENET_TIME_SEL(v)  \
-               (((v) << 19) & BM_CLKCTRL_ENET_TIME_SEL)
-#define BV_CLKCTRL_ENET_TIME_SEL__XTAL      0x0
-#define BV_CLKCTRL_ENET_TIME_SEL__PLL       0x1
-#define BV_CLKCTRL_ENET_TIME_SEL__RMII_CLK  0x2
-#define BV_CLKCTRL_ENET_TIME_SEL__UNDEFINED 0x3
-#define BM_CLKCTRL_ENET_CLK_OUT_EN     0x00040000
-#define BM_CLKCTRL_ENET_RESET_BY_SW_CHIP       0x00020000
-#define BM_CLKCTRL_ENET_RESET_BY_SW    0x00010000
-
-#define HW_CLKCTRL_HSADC       (0x00000150)
-
-#define BM_CLKCTRL_HSADC_RESETB        0x40000000
-#define BP_CLKCTRL_HSADC_FREQDIV       28
-#define BM_CLKCTRL_HSADC_FREQDIV       0x30000000
-#define BF_CLKCTRL_HSADC_FREQDIV(v)  \
-               (((v) << 28) & BM_CLKCTRL_HSADC_FREQDIV)
-
-#define HW_CLKCTRL_FLEXCAN     (0x00000160)
-
-#define BP_CLKCTRL_FLEXCAN_STOP_CAN0   30
-#define BM_CLKCTRL_FLEXCAN_STOP_CAN0   0x40000000
-#define BM_CLKCTRL_FLEXCAN_CAN0_STATUS 0x20000000
-#define BP_CLKCTRL_FLEXCAN_STOP_CAN1   28
-#define BM_CLKCTRL_FLEXCAN_STOP_CAN1   0x10000000
-#define BM_CLKCTRL_FLEXCAN_CAN1_STATUS 0x08000000
-
-#define HW_CLKCTRL_FRAC0       (0x000001b0)
-#define HW_CLKCTRL_FRAC0_SET   (0x000001b4)
-#define HW_CLKCTRL_FRAC0_CLR   (0x000001b8)
-#define HW_CLKCTRL_FRAC0_TOG   (0x000001bc)
-
-#define BP_CLKCTRL_FRAC0_CLKGATEIO0    31
-#define BM_CLKCTRL_FRAC0_CLKGATEIO0    0x80000000
-#define BM_CLKCTRL_FRAC0_IO0_STABLE    0x40000000
-#define BP_CLKCTRL_FRAC0_IO0FRAC       24
-#define BM_CLKCTRL_FRAC0_IO0FRAC       0x3F000000
-#define BF_CLKCTRL_FRAC0_IO0FRAC(v)  \
-               (((v) << 24) & BM_CLKCTRL_FRAC0_IO0FRAC)
-#define BP_CLKCTRL_FRAC0_CLKGATEIO1    23
-#define BM_CLKCTRL_FRAC0_CLKGATEIO1    0x00800000
-#define BM_CLKCTRL_FRAC0_IO1_STABLE    0x00400000
-#define BP_CLKCTRL_FRAC0_IO1FRAC       16
-#define BM_CLKCTRL_FRAC0_IO1FRAC       0x003F0000
-#define BF_CLKCTRL_FRAC0_IO1FRAC(v)  \
-               (((v) << 16) & BM_CLKCTRL_FRAC0_IO1FRAC)
-#define BP_CLKCTRL_FRAC0_CLKGATEEMI    15
-#define BM_CLKCTRL_FRAC0_CLKGATEEMI    0x00008000
-#define BM_CLKCTRL_FRAC0_EMI_STABLE    0x00004000
-#define BP_CLKCTRL_FRAC0_EMIFRAC       8
-#define BM_CLKCTRL_FRAC0_EMIFRAC       0x00003F00
-#define BF_CLKCTRL_FRAC0_EMIFRAC(v)  \
-               (((v) << 8) & BM_CLKCTRL_FRAC0_EMIFRAC)
-#define BP_CLKCTRL_FRAC0_CLKGATECPU    7
-#define BM_CLKCTRL_FRAC0_CLKGATECPU    0x00000080
-#define BM_CLKCTRL_FRAC0_CPU_STABLE    0x00000040
-#define BP_CLKCTRL_FRAC0_CPUFRAC       0
-#define BM_CLKCTRL_FRAC0_CPUFRAC       0x0000003F
-#define BF_CLKCTRL_FRAC0_CPUFRAC(v)  \
-               (((v) << 0) & BM_CLKCTRL_FRAC0_CPUFRAC)
-
-#define HW_CLKCTRL_FRAC1       (0x000001c0)
-#define HW_CLKCTRL_FRAC1_SET   (0x000001c4)
-#define HW_CLKCTRL_FRAC1_CLR   (0x000001c8)
-#define HW_CLKCTRL_FRAC1_TOG   (0x000001cc)
-
-#define BP_CLKCTRL_FRAC1_CLKGATEGPMI   23
-#define BM_CLKCTRL_FRAC1_CLKGATEGPMI   0x00800000
-#define BM_CLKCTRL_FRAC1_GPMI_STABLE   0x00400000
-#define BP_CLKCTRL_FRAC1_GPMIFRAC      16
-#define BM_CLKCTRL_FRAC1_GPMIFRAC      0x003F0000
-#define BF_CLKCTRL_FRAC1_GPMIFRAC(v)  \
-               (((v) << 16) & BM_CLKCTRL_FRAC1_GPMIFRAC)
-#define BP_CLKCTRL_FRAC1_CLKGATEHSADC  15
-#define BM_CLKCTRL_FRAC1_CLKGATEHSADC  0x00008000
-#define BM_CLKCTRL_FRAC1_HSADC_STABLE  0x00004000
-#define BP_CLKCTRL_FRAC1_HSADCFRAC     8
-#define BM_CLKCTRL_FRAC1_HSADCFRAC     0x00003F00
-#define BF_CLKCTRL_FRAC1_HSADCFRAC(v)  \
-               (((v) << 8) & BM_CLKCTRL_FRAC1_HSADCFRAC)
-#define BP_CLKCTRL_FRAC1_CLKGATEPIX    7
-#define BM_CLKCTRL_FRAC1_CLKGATEPIX    0x00000080
-#define BM_CLKCTRL_FRAC1_PIX_STABLE    0x00000040
-#define BP_CLKCTRL_FRAC1_PIXFRAC       0
-#define BM_CLKCTRL_FRAC1_PIXFRAC       0x0000003F
-#define BF_CLKCTRL_FRAC1_PIXFRAC(v)  \
-               (((v) << 0) & BM_CLKCTRL_FRAC1_PIXFRAC)
-
-#define HW_CLKCTRL_CLKSEQ      (0x000001d0)
-#define HW_CLKCTRL_CLKSEQ_SET  (0x000001d4)
-#define HW_CLKCTRL_CLKSEQ_CLR  (0x000001d8)
-#define HW_CLKCTRL_CLKSEQ_TOG  (0x000001dc)
-
-#define BM_CLKCTRL_CLKSEQ_BYPASS_CPU   0x00040000
-#define BM_CLKCTRL_CLKSEQ_BYPASS_DIS_LCDIF     0x00004000
-#define BV_CLKCTRL_CLKSEQ_BYPASS_DIS_LCDIF__BYPASS 0x1
-#define BV_CLKCTRL_CLKSEQ_BYPASS_DIS_LCDIF__PFD    0x0
-#define BM_CLKCTRL_CLKSEQ_BYPASS_ETM   0x00000100
-#define BM_CLKCTRL_CLKSEQ_BYPASS_EMI   0x00000080
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SSP3  0x00000040
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SSP2  0x00000020
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SSP1  0x00000010
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SSP0  0x00000008
-#define BM_CLKCTRL_CLKSEQ_BYPASS_GPMI  0x00000004
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SAIF1 0x00000002
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SAIF0 0x00000001
-
-#define HW_CLKCTRL_RESET       (0x000001e0)
-
-#define BM_CLKCTRL_RESET_WDOG_POR_DISABLE      0x00000020
-#define BM_CLKCTRL_RESET_EXTERNAL_RESET_ENABLE 0x00000010
-#define BM_CLKCTRL_RESET_THERMAL_RESET_ENABLE  0x00000008
-#define BM_CLKCTRL_RESET_THERMAL_RESET_DEFAULT 0x00000004
-#define BM_CLKCTRL_RESET_CHIP  0x00000002
-#define BM_CLKCTRL_RESET_DIG   0x00000001
-
-#define HW_CLKCTRL_STATUS      (0x000001f0)
-
-#define BP_CLKCTRL_STATUS_CPU_LIMIT    30
-#define BM_CLKCTRL_STATUS_CPU_LIMIT    0xC0000000
-#define BF_CLKCTRL_STATUS_CPU_LIMIT(v) \
-               (((v) << 30) & BM_CLKCTRL_STATUS_CPU_LIMIT)
-
-#define HW_CLKCTRL_VERSION     (0x00000200)
-
-#define BP_CLKCTRL_VERSION_MAJOR       24
-#define BM_CLKCTRL_VERSION_MAJOR       0xFF000000
-#define BF_CLKCTRL_VERSION_MAJOR(v) \
-               (((v) << 24) & BM_CLKCTRL_VERSION_MAJOR)
-#define BP_CLKCTRL_VERSION_MINOR       16
-#define BM_CLKCTRL_VERSION_MINOR       0x00FF0000
-#define BF_CLKCTRL_VERSION_MINOR(v)  \
-               (((v) << 16) & BM_CLKCTRL_VERSION_MINOR)
-#define BP_CLKCTRL_VERSION_STEP        0
-#define BM_CLKCTRL_VERSION_STEP        0x0000FFFF
-#define BF_CLKCTRL_VERSION_STEP(v)  \
-               (((v) << 0) & BM_CLKCTRL_VERSION_STEP)
-
-#endif /* __REGS_CLKCTRL_MX28_H__ */
index 80ac1fca8a004d91bf7f5be1fa4c7adfaf703841..30042e23bfa7cf6ca7eea1fd2f543ae11f8b4fbf 100644 (file)
@@ -37,8 +37,6 @@
 #define MXS_MODULE_CLKGATE             (1 << 30)
 #define MXS_MODULE_SFTRST              (1 << 31)
 
-#define CLKCTRL_TIMEOUT                10      /* 10 ms */
-
 static void __iomem *mxs_clkctrl_reset_addr;
 
 /*
@@ -139,17 +137,3 @@ error:
        return -ETIMEDOUT;
 }
 EXPORT_SYMBOL(mxs_reset_block);
-
-int mxs_clkctrl_timeout(unsigned int reg_offset, unsigned int mask)
-{
-       unsigned long timeout = jiffies + msecs_to_jiffies(CLKCTRL_TIMEOUT);
-       while (readl_relaxed(MXS_IO_ADDRESS(MXS_CLKCTRL_BASE_ADDR)
-                                               + reg_offset) & mask) {
-               if (time_after(jiffies, timeout)) {
-                       pr_err("Timeout at CLKCTRL + 0x%x\n", reg_offset);
-                       return -ETIMEDOUT;
-               }
-       }
-
-       return 0;
-}
index 564a63279f184544455f091c85a7c9543099a805..02d36de9c4e8c394a2b498fff4fbe877dea2a205 100644 (file)
@@ -20,6 +20,7 @@
  * MA 02110-1301, USA.
  */
 
+#include <linux/err.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/clockchips.h>
@@ -243,8 +244,16 @@ static int __init mxs_clocksource_init(struct clk *timer_clk)
        return 0;
 }
 
-void __init mxs_timer_init(struct clk *timer_clk, int irq)
+void __init mxs_timer_init(int irq)
 {
+       struct clk *timer_clk;
+
+       timer_clk = clk_get_sys("timrot", NULL);
+       if (IS_ERR(timer_clk)) {
+               pr_err("%s: failed to get clk\n", __func__);
+               return;
+       }
+
        clk_prepare_enable(timer_clk);
 
        /*
index 58cacafcf6628758a27623abb8f8be1b1b240317..2e8d3e176bc70b7216b5b8aa818b648ae63ebee0 100644 (file)
@@ -111,7 +111,7 @@ static struct nomadik_nand_platform_data nhk8815_nand_data = {
        .parts          = nhk8815_partitions,
        .nparts         = ARRAY_SIZE(nhk8815_partitions),
        .options        = NAND_COPYBACK | NAND_CACHEPRG | NAND_NO_PADDING \
-                       | NAND_NO_READRDY | NAND_NO_AUTOINCR,
+                       | NAND_NO_READRDY,
        .init           = nhk8815_nand_init,
 };
 
index c1b681ef4cba80870ccec99715b14eb0ddfad07a..f2f8a58470182c5d1d783b1a3496833f1b1dfbc7 100644 (file)
@@ -595,7 +595,12 @@ gpio_free:
        gpio_free(AMS_DELTA_GPIO_PIN_MODEM_IRQ);
        return err;
 }
-late_initcall(late_init);
+
+static void __init ams_delta_init_late(void)
+{
+       omap1_init_late();
+       late_init();
+}
 
 static void __init ams_delta_map_io(void)
 {
@@ -611,6 +616,7 @@ MACHINE_START(AMS_DELTA, "Amstrad E3 (Delta)")
        .reserve        = omap_reserve,
        .init_irq       = omap1_init_irq,
        .init_machine   = ams_delta_init,
+       .init_late      = ams_delta_init_late,
        .timer          = &omap1_timer,
        .restart        = omap1_restart,
 MACHINE_END
index 4a4afb371022e0898d78a1cc673f02d392b5b957..6872f3fd400ffd242029b67f8688fda2853b6899 100644 (file)
@@ -192,14 +192,11 @@ static int nand_dev_ready(struct mtd_info *mtd)
        return gpio_get_value(FSAMPLE_NAND_RB_GPIO_PIN);
 }
 
-static const char *part_probes[] = { "cmdlinepart", NULL };
-
 static struct platform_nand_data nand_data = {
        .chip   = {
                .nr_chips               = 1,
                .chip_offset            = 0,
                .options                = NAND_SAMSUNG_LP_OPTIONS,
-               .part_probe_types       = part_probes,
        },
        .ctrl   = {
                .cmd_ctrl       = omap1_nand_cmd_ctl,
@@ -369,6 +366,7 @@ MACHINE_START(OMAP_FSAMPLE, "OMAP730 F-Sample")
        .reserve        = omap_reserve,
        .init_irq       = omap1_init_irq,
        .init_machine   = omap_fsample_init,
+       .init_late      = omap1_init_late,
        .timer          = &omap1_timer,
        .restart        = omap1_restart,
 MACHINE_END
index 9a5fe581bc1c8c0e22264edbe5b8dd17b9215780..e75e2d55a2d72ba133c5e923389c03512058461e 100644 (file)
@@ -88,6 +88,7 @@ MACHINE_START(OMAP_GENERIC, "Generic OMAP1510/1610/1710")
        .reserve        = omap_reserve,
        .init_irq       = omap1_init_irq,
        .init_machine   = omap_generic_init,
+       .init_late      = omap1_init_late,
        .timer          = &omap1_timer,
        .restart        = omap1_restart,
 MACHINE_END
index 057ec13f06490bfbdcc5c425fd36f54aa8b1863b..a28e989a63f4369f94cd0081e11cb4f28f407215 100644 (file)
@@ -186,8 +186,6 @@ static int h2_nand_dev_ready(struct mtd_info *mtd)
        return gpio_get_value(H2_NAND_RB_GPIO_PIN);
 }
 
-static const char *h2_part_probes[] = { "cmdlinepart", NULL };
-
 static struct platform_nand_data h2_nand_platdata = {
        .chip   = {
                .nr_chips               = 1,
@@ -195,7 +193,6 @@ static struct platform_nand_data h2_nand_platdata = {
                .nr_partitions          = ARRAY_SIZE(h2_nand_partitions),
                .partitions             = h2_nand_partitions,
                .options                = NAND_SAMSUNG_LP_OPTIONS,
-               .part_probe_types       = h2_part_probes,
        },
        .ctrl   = {
                .cmd_ctrl       = omap1_nand_cmd_ctl,
@@ -431,6 +428,7 @@ MACHINE_START(OMAP_H2, "TI-H2")
        .reserve        = omap_reserve,
        .init_irq       = omap1_init_irq,
        .init_machine   = h2_init,
+       .init_late      = omap1_init_late,
        .timer          = &omap1_timer,
        .restart        = omap1_restart,
 MACHINE_END
index f6ddf875965712ebcf05698e589b2ddc3fddae5c..108a8640fc6f04c2c42c0accc0d6602716831ef0 100644 (file)
@@ -188,8 +188,6 @@ static int nand_dev_ready(struct mtd_info *mtd)
        return gpio_get_value(H3_NAND_RB_GPIO_PIN);
 }
 
-static const char *part_probes[] = { "cmdlinepart", NULL };
-
 static struct platform_nand_data nand_platdata = {
        .chip   = {
                .nr_chips               = 1,
@@ -197,7 +195,6 @@ static struct platform_nand_data nand_platdata = {
                .nr_partitions          = ARRAY_SIZE(nand_partitions),
                .partitions             = nand_partitions,
                .options                = NAND_SAMSUNG_LP_OPTIONS,
-               .part_probe_types       = part_probes,
        },
        .ctrl   = {
                .cmd_ctrl       = omap1_nand_cmd_ctl,
@@ -425,6 +422,7 @@ MACHINE_START(OMAP_H3, "TI OMAP1710 H3 board")
        .reserve        = omap_reserve,
        .init_irq       = omap1_init_irq,
        .init_machine   = h3_init,
+       .init_late      = omap1_init_late,
        .timer          = &omap1_timer,
        .restart        = omap1_restart,
 MACHINE_END
index 60c06ee23855d018198698dbd74110611447d8e6..118a9d4a4c54a56b2bad8b261c60335b3f3a3f82 100644 (file)
@@ -605,6 +605,7 @@ MACHINE_START(HERALD, "HTC Herald")
        .reserve        = omap_reserve,
        .init_irq       = omap1_init_irq,
        .init_machine   = htcherald_init,
+       .init_late      = omap1_init_late,
        .timer          = &omap1_timer,
        .restart        = omap1_restart,
 MACHINE_END
index 67d7fd57a692b7d7c209b01859ad0a97cc8c86f0..7970223a559d552d270953905a2107e3dfb2446f 100644 (file)
@@ -457,6 +457,7 @@ MACHINE_START(OMAP_INNOVATOR, "TI-Innovator")
        .reserve        = omap_reserve,
        .init_irq       = omap1_init_irq,
        .init_machine   = innovator_init,
+       .init_late      = omap1_init_late,
        .timer          = &omap1_timer,
        .restart        = omap1_restart,
 MACHINE_END
index d21dcc2fbc5af3a3b53477e8e735aa5bcb428447..7212ae97f44acfc1ff508fbca348a3ff26702a8a 100644 (file)
@@ -255,6 +255,7 @@ MACHINE_START(NOKIA770, "Nokia 770")
        .reserve        = omap_reserve,
        .init_irq       = omap1_init_irq,
        .init_machine   = omap_nokia770_init,
+       .init_late      = omap1_init_late,
        .timer          = &omap1_timer,
        .restart        = omap1_restart,
 MACHINE_END
index a5f85dda3f6924ce84dce90270a704edfafd6962..da8d872d3d1cddf00bdab9c3bdec4684ec585989 100644 (file)
@@ -574,6 +574,7 @@ MACHINE_START(OMAP_OSK, "TI-OSK")
        .reserve        = omap_reserve,
        .init_irq       = omap1_init_irq,
        .init_machine   = osk_init,
+       .init_late      = omap1_init_late,
        .timer          = &omap1_timer,
        .restart        = omap1_restart,
 MACHINE_END
index a60e6c22f8169ed9027e26b2425b54875d23deba..949b62a736931b0a74158727ca2ca2ecf488c6e8 100644 (file)
@@ -267,6 +267,7 @@ MACHINE_START(OMAP_PALMTE, "OMAP310 based Palm Tungsten E")
        .reserve        = omap_reserve,
        .init_irq       = omap1_init_irq,
        .init_machine   = omap_palmte_init,
+       .init_late      = omap1_init_late,
        .timer          = &omap1_timer,
        .restart        = omap1_restart,
 MACHINE_END
index 8d854878547be0fcdcc77e9e539b3d819d33ae97..7f1e1cf2bf46a47e525338ff47398b2226ea997f 100644 (file)
@@ -313,6 +313,7 @@ MACHINE_START(OMAP_PALMTT, "OMAP1510 based Palm Tungsten|T")
        .reserve        = omap_reserve,
        .init_irq       = omap1_init_irq,
        .init_machine   = omap_palmtt_init,
+       .init_late      = omap1_init_late,
        .timer          = &omap1_timer,
        .restart        = omap1_restart,
 MACHINE_END
index 61ed4f0247ce37567e8fb0046f4d1cf503e4181b..3c71c6bace2cd09489b358d6569c2652471040aa 100644 (file)
@@ -330,6 +330,7 @@ MACHINE_START(OMAP_PALMZ71, "OMAP310 based Palm Zire71")
        .reserve        = omap_reserve,
        .init_irq       = omap1_init_irq,
        .init_machine   = omap_palmz71_init,
+       .init_late      = omap1_init_late,
        .timer          = &omap1_timer,
        .restart        = omap1_restart,
 MACHINE_END
index a2c88890e767e369488c4b5eb16f87f524e0f51a..703d55ecffe2b2411c77af5d262a3372e5431a86 100644 (file)
@@ -150,14 +150,11 @@ static int nand_dev_ready(struct mtd_info *mtd)
        return gpio_get_value(P2_NAND_RB_GPIO_PIN);
 }
 
-static const char *part_probes[] = { "cmdlinepart", NULL };
-
 static struct platform_nand_data nand_data = {
        .chip   = {
                .nr_chips               = 1,
                .chip_offset            = 0,
                .options                = NAND_SAMSUNG_LP_OPTIONS,
-               .part_probe_types       = part_probes,
        },
        .ctrl   = {
                .cmd_ctrl       = omap1_nand_cmd_ctl,
@@ -331,6 +328,7 @@ MACHINE_START(OMAP_PERSEUS2, "OMAP730 Perseus2")
        .reserve        = omap_reserve,
        .init_irq       = omap1_init_irq,
        .init_machine   = omap_perseus2_init,
+       .init_late      = omap1_init_late,
        .timer          = &omap1_timer,
        .restart        = omap1_restart,
 MACHINE_END
index f34cb74a9f41d12cbe3983dcd121fa11f4ccb5cd..3b7b82b136840ac05fcedaa1e9ba3da1617df96b 100644 (file)
@@ -407,6 +407,7 @@ MACHINE_START(SX1, "OMAP310 based Siemens SX1")
        .reserve        = omap_reserve,
        .init_irq       = omap1_init_irq,
        .init_machine   = omap_sx1_init,
+       .init_late      = omap1_init_late,
        .timer          = &omap1_timer,
        .restart        = omap1_restart,
 MACHINE_END
index 37232d04233ff779bb44e631d4675559ef34de19..afd67f0ec495160d4c5f996f8f81b6832b03e1c2 100644 (file)
@@ -294,6 +294,7 @@ MACHINE_START(VOICEBLUE, "VoiceBlue OMAP5910")
        .reserve        = omap_reserve,
        .init_irq       = omap1_init_irq,
        .init_machine   = voiceblue_init,
+       .init_late      = omap1_init_late,
        .timer          = &omap1_timer,
        .restart        = voiceblue_restart,
 MACHINE_END
index bb7779b577958f5c3ea47ed5b82180d5f2fca2ce..c2552b24f9f295381b36132cc4d59b850ade8d3f 100644 (file)
@@ -53,8 +53,18 @@ static inline void omap16xx_map_io(void)
 }
 #endif
 
+#ifdef CONFIG_OMAP_SERIAL_WAKE
+int omap_serial_wakeup_init(void);
+#else
+static inline int omap_serial_wakeup_init(void)
+{
+       return 0;
+}
+#endif
+
 void omap1_init_early(void);
 void omap1_init_irq(void);
+void omap1_init_late(void);
 void omap1_restart(char, const char *);
 
 extern void __init omap_check_revision(void);
@@ -63,7 +73,14 @@ extern void omap1_nand_cmd_ctl(struct mtd_info *mtd, int cmd,
                               unsigned int ctrl);
 
 extern struct sys_timer omap1_timer;
-extern bool omap_32k_timer_init(void);
+#ifdef CONFIG_OMAP_32K_TIMER
+extern int omap_32k_timer_init(void);
+#else
+static inline int __init omap_32k_timer_init(void)
+{
+       return -ENODEV;
+}
+#endif
 
 extern u32 omap_irq_flags;
 
index dcd8ddbec2bbc92b7577bc6f143d4085e3ec426f..fa1fa4deb6aa4ae65c523d7e84686ffb9cba1406 100644 (file)
@@ -22,6 +22,7 @@
 #include <plat/tc.h>
 #include <plat/board.h>
 #include <plat/mux.h>
+#include <plat/dma.h>
 #include <plat/mmc.h>
 #include <plat/omap7xx.h>
 
 #include "common.h"
 #include "clock.h"
 
+#if defined(CONFIG_SND_SOC) || defined(CONFIG_SND_SOC_MODULE)
+
+static struct platform_device omap_pcm = {
+       .name   = "omap-pcm-audio",
+       .id     = -1,
+};
+
+static void omap_init_audio(void)
+{
+       platform_device_register(&omap_pcm);
+}
+
+#else
+static inline void omap_init_audio(void) {}
+#endif
+
 /*-------------------------------------------------------------------------*/
 
 #if defined(CONFIG_RTC_DRV_OMAP) || defined(CONFIG_RTC_DRV_OMAP_MODULE)
@@ -128,6 +145,56 @@ static inline void omap1_mmc_mux(struct omap_mmc_platform_data *mmc_controller,
        }
 }
 
+#define OMAP_MMC_NR_RES                4
+
+/*
+ * Register MMC devices.
+ */
+static int __init omap_mmc_add(const char *name, int id, unsigned long base,
+                               unsigned long size, unsigned int irq,
+                               unsigned rx_req, unsigned tx_req,
+                               struct omap_mmc_platform_data *data)
+{
+       struct platform_device *pdev;
+       struct resource res[OMAP_MMC_NR_RES];
+       int ret;
+
+       pdev = platform_device_alloc(name, id);
+       if (!pdev)
+               return -ENOMEM;
+
+       memset(res, 0, OMAP_MMC_NR_RES * sizeof(struct resource));
+       res[0].start = base;
+       res[0].end = base + size - 1;
+       res[0].flags = IORESOURCE_MEM;
+       res[1].start = res[1].end = irq;
+       res[1].flags = IORESOURCE_IRQ;
+       res[2].start = rx_req;
+       res[2].name = "rx";
+       res[2].flags = IORESOURCE_DMA;
+       res[3].start = tx_req;
+       res[3].name = "tx";
+       res[3].flags = IORESOURCE_DMA;
+
+       ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
+       if (ret == 0)
+               ret = platform_device_add_data(pdev, data, sizeof(*data));
+       if (ret)
+               goto fail;
+
+       ret = platform_device_add(pdev);
+       if (ret)
+               goto fail;
+
+       /* return device handle to board setup code */
+       data->dev = &pdev->dev;
+       return 0;
+
+fail:
+       platform_device_put(pdev);
+       return ret;
+}
+
 void __init omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
                        int nr_controllers)
 {
@@ -135,6 +202,7 @@ void __init omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
 
        for (i = 0; i < nr_controllers; i++) {
                unsigned long base, size;
+               unsigned rx_req, tx_req;
                unsigned int irq = 0;
 
                if (!mmc_data[i])
@@ -146,19 +214,24 @@ void __init omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
                case 0:
                        base = OMAP1_MMC1_BASE;
                        irq = INT_MMC;
+                       rx_req = OMAP_DMA_MMC_RX;
+                       tx_req = OMAP_DMA_MMC_TX;
                        break;
                case 1:
                        if (!cpu_is_omap16xx())
                                return;
                        base = OMAP1_MMC2_BASE;
                        irq = INT_1610_MMC2;
+                       rx_req = OMAP_DMA_MMC2_RX;
+                       tx_req = OMAP_DMA_MMC2_TX;
                        break;
                default:
                        continue;
                }
                size = OMAP1_MMC_SIZE;
 
-               omap_mmc_add("mmci-omap", i, base, size, irq, mmc_data[i]);
+               omap_mmc_add("mmci-omap", i, base, size, irq,
+                               rx_req, tx_req, mmc_data[i]);
        };
 }
 
@@ -242,23 +315,48 @@ void __init omap1_camera_init(void *info)
 
 static inline void omap_init_sti(void) {}
 
-#if defined(CONFIG_SND_SOC) || defined(CONFIG_SND_SOC_MODULE)
+/* Numbering for the SPI-capable controllers when used for SPI:
+ * spi         = 1
+ * uwire       = 2
+ * mmc1..2     = 3..4
+ * mcbsp1..3   = 5..7
+ */
 
-static struct platform_device omap_pcm = {
-       .name   = "omap-pcm-audio",
-       .id     = -1,
+#if defined(CONFIG_SPI_OMAP_UWIRE) || defined(CONFIG_SPI_OMAP_UWIRE_MODULE)
+
+#define        OMAP_UWIRE_BASE         0xfffb3000
+
+static struct resource uwire_resources[] = {
+       {
+               .start          = OMAP_UWIRE_BASE,
+               .end            = OMAP_UWIRE_BASE + 0x20,
+               .flags          = IORESOURCE_MEM,
+       },
 };
 
-static void omap_init_audio(void)
+static struct platform_device omap_uwire_device = {
+       .name      = "omap_uwire",
+       .id          = -1,
+       .num_resources  = ARRAY_SIZE(uwire_resources),
+       .resource       = uwire_resources,
+};
+
+static void omap_init_uwire(void)
 {
-       platform_device_register(&omap_pcm);
-}
+       /* FIXME define and use a boot tag; not all boards will be hooking
+        * up devices to the microwire controller, and multi-board configs
+        * mean that CONFIG_SPI_OMAP_UWIRE may be configured anyway...
+        */
 
+       /* board-specific code must configure chipselects (only a few
+        * are normally used) and SCLK/SDI/SDO (each has two choices).
+        */
+       (void) platform_device_register(&omap_uwire_device);
+}
 #else
-static inline void omap_init_audio(void) {}
+static inline void omap_init_uwire(void) {}
 #endif
 
-/*-------------------------------------------------------------------------*/
 
 /*
  * This gets called after board-specific INIT_MACHINE, and initializes most
@@ -292,11 +390,12 @@ static int __init omap1_init_devices(void)
         * in alphabetical order so they're easier to sort through.
         */
 
+       omap_init_audio();
        omap_init_mbox();
        omap_init_rtc();
        omap_init_spi100k();
        omap_init_sti();
-       omap_init_audio();
+       omap_init_uwire();
 
        return 0;
 }
index 71ce017bf5d8a895c9ddb8f2b4c1a12dba4ef7d4..6c95a59f0f1648275f2b75db41b11493096ce2d0 100644 (file)
@@ -137,6 +137,11 @@ void __init omap1_init_early(void)
        omap_init_consistent_dma_size();
 }
 
+void __init omap1_init_late(void)
+{
+       omap_serial_wakeup_init();
+}
+
 /*
  * NOTE: Please use ioremap + __raw_read/write where possible instead of these
  */
index 93ae8f29727e61a002bcf705a6d2492f4772023a..6809c9e56c9317dfdbaf8eff92e79d651d164237 100644 (file)
@@ -237,7 +237,7 @@ static void __init omap_serial_set_port_wakeup(int gpio_nr)
        enable_irq_wake(gpio_to_irq(gpio_nr));
 }
 
-static int __init omap_serial_wakeup_init(void)
+int __init omap_serial_wakeup_init(void)
 {
        if (!cpu_is_omap16xx())
                return 0;
@@ -251,7 +251,6 @@ static int __init omap_serial_wakeup_init(void)
 
        return 0;
 }
-late_initcall(omap_serial_wakeup_init);
 
 #endif /* CONFIG_OMAP_SERIAL_WAKE */
 
index 4d8dd9a1b04cedc07fb1a1e0689ae610e3188480..4062480bfec7ca6baf5237d177d77d853ba029bd 100644 (file)
@@ -232,20 +232,6 @@ static inline void omap_mpu_timer_init(void)
 }
 #endif /* CONFIG_OMAP_MPU_TIMER */
 
-static inline int omap_32k_timer_usable(void)
-{
-       int res = false;
-
-       if (cpu_is_omap730() || cpu_is_omap15xx())
-               return res;
-
-#ifdef CONFIG_OMAP_32K_TIMER
-       res = omap_32k_timer_init();
-#endif
-
-       return res;
-}
-
 /*
  * ---------------------------------------------------------------------------
  * Timer initialization
@@ -253,7 +239,7 @@ static inline int omap_32k_timer_usable(void)
  */
 static void __init omap1_timer_init(void)
 {
-       if (!omap_32k_timer_usable())
+       if (omap_32k_timer_init() != 0)
                omap_mpu_timer_init();
 }
 
index 325b9a0aa4a00c9d71bdcf4d3f13dc7732ebe136..eae49c3980c9b22695af9a4e7e3c8db06da460ca 100644 (file)
@@ -71,6 +71,7 @@
 
 /* 16xx specific defines */
 #define OMAP1_32K_TIMER_BASE           0xfffb9000
+#define OMAP1_32KSYNC_TIMER_BASE       0xfffbc400
 #define OMAP1_32K_TIMER_CR             0x08
 #define OMAP1_32K_TIMER_TVR            0x00
 #define OMAP1_32K_TIMER_TCR            0x04
@@ -182,10 +183,29 @@ static __init void omap_init_32k_timer(void)
  * Timer initialization
  * ---------------------------------------------------------------------------
  */
-bool __init omap_32k_timer_init(void)
+int __init omap_32k_timer_init(void)
 {
-       omap_init_clocksource_32k();
-       omap_init_32k_timer();
+       int ret = -ENODEV;
 
-       return true;
+       if (cpu_is_omap16xx()) {
+               void __iomem *base;
+               struct clk *sync32k_ick;
+
+               base = ioremap(OMAP1_32KSYNC_TIMER_BASE, SZ_1K);
+               if (!base) {
+                       pr_err("32k_counter: failed to map base addr\n");
+                       return -ENODEV;
+               }
+
+               sync32k_ick = clk_get(NULL, "omap_32ksync_ick");
+               if (!IS_ERR(sync32k_ick))
+                       clk_enable(sync32k_ick);
+
+               ret = omap_init_clocksource_32k(base);
+       }
+
+       if (!ret)
+               omap_init_32k_timer();
+
+       return ret;
 }
index 964ee67a3b77045df64a02594d3bae5119547789..4cf5142f22ccd9543f6795f491a24a64bc3d12a9 100644 (file)
@@ -78,12 +78,12 @@ config SOC_OMAP3430
        default y
        select ARCH_OMAP_OTG
 
-config SOC_OMAPTI81XX
+config SOC_TI81XX
        bool "TI81XX support"
        depends on ARCH_OMAP3
        default y
 
-config SOC_OMAPAM33XX
+config SOC_AM33XX
        bool "AM33XX support"
        depends on ARCH_OMAP3
        default y
@@ -320,12 +320,12 @@ config MACH_OMAP_3630SDP
 
 config MACH_TI8168EVM
        bool "TI8168 Evaluation Module"
-       depends on SOC_OMAPTI81XX
+       depends on SOC_TI81XX
        default y
 
 config MACH_TI8148EVM
        bool "TI8148 Evaluation Module"
-       depends on SOC_OMAPTI81XX
+       depends on SOC_TI81XX
        default y
 
 config MACH_OMAP_4430SDP
index 385c083d24b2fbfc960a95826719457614a643f8..fa742f3c262947313f23df2b65e8b36521cf71d9 100644 (file)
@@ -24,10 +24,11 @@ endif
 obj-$(CONFIG_TWL4030_CORE) += omap_twl.o
 
 # SMP support ONLY available for OMAP4
+
 obj-$(CONFIG_SMP)                      += omap-smp.o omap-headsmp.o
 obj-$(CONFIG_HOTPLUG_CPU)              += omap-hotplug.o
-obj-$(CONFIG_ARCH_OMAP4)               += omap4-common.o omap-wakeupgen.o \
-                                          sleep44xx.o
+obj-$(CONFIG_ARCH_OMAP4)               += omap4-common.o omap-wakeupgen.o
+obj-$(CONFIG_ARCH_OMAP4)               += sleep44xx.o
 
 plus_sec := $(call as-instr,.arch_extension sec,+sec)
 AFLAGS_omap-headsmp.o                  :=-Wa,-march=armv7-a$(plus_sec)
@@ -64,10 +65,10 @@ endif
 ifeq ($(CONFIG_PM),y)
 obj-$(CONFIG_ARCH_OMAP2)               += pm24xx.o
 obj-$(CONFIG_ARCH_OMAP2)               += sleep24xx.o
-obj-$(CONFIG_ARCH_OMAP3)               += pm34xx.o sleep34xx.o \
-                                          cpuidle34xx.o
-obj-$(CONFIG_ARCH_OMAP4)               += pm44xx.o omap-mpuss-lowpower.o \
-                                          cpuidle44xx.o
+obj-$(CONFIG_ARCH_OMAP3)               += pm34xx.o sleep34xx.o
+obj-$(CONFIG_ARCH_OMAP3)               += cpuidle34xx.o
+obj-$(CONFIG_ARCH_OMAP4)               += pm44xx.o omap-mpuss-lowpower.o
+obj-$(CONFIG_ARCH_OMAP4)               += cpuidle44xx.o
 obj-$(CONFIG_PM_DEBUG)                 += pm-debug.o
 obj-$(CONFIG_OMAP_SMARTREFLEX)          += sr_device.o smartreflex.o
 obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3)  += smartreflex-class3.o
@@ -84,90 +85,86 @@ endif
 # PRCM
 obj-y                                  += prm_common.o
 obj-$(CONFIG_ARCH_OMAP2)               += prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o
-obj-$(CONFIG_ARCH_OMAP3)               += prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o \
-                                          vc3xxx_data.o vp3xxx_data.o
-# XXX The presence of cm2xxx_3xxx.o on the line below is temporary and
-# will be removed once the OMAP4 part of the codebase is converted to
-# use OMAP4-specific PRCM functions.
-obj-$(CONFIG_ARCH_OMAP4)               += prcm.o cm2xxx_3xxx.o cminst44xx.o \
-                                          cm44xx.o prcm_mpu44xx.o \
-                                          prminst44xx.o vc44xx_data.o \
-                                          vp44xx_data.o prm44xx.o
+obj-$(CONFIG_ARCH_OMAP3)               += prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o
+obj-$(CONFIG_ARCH_OMAP3)               += vc3xxx_data.o vp3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP4)               += prcm.o cminst44xx.o cm44xx.o
+obj-$(CONFIG_ARCH_OMAP4)               += prcm_mpu44xx.o prminst44xx.o
+obj-$(CONFIG_ARCH_OMAP4)               += vc44xx_data.o vp44xx_data.o prm44xx.o
 
 # OMAP voltage domains
 voltagedomain-common                   := voltage.o vc.o vp.o
-obj-$(CONFIG_ARCH_OMAP2)               += $(voltagedomain-common) \
-                                          voltagedomains2xxx_data.o
-obj-$(CONFIG_ARCH_OMAP3)               += $(voltagedomain-common) \
-                                          voltagedomains3xxx_data.o
-obj-$(CONFIG_ARCH_OMAP4)               += $(voltagedomain-common) \
-                                          voltagedomains44xx_data.o
+obj-$(CONFIG_ARCH_OMAP2)               += $(voltagedomain-common)
+obj-$(CONFIG_ARCH_OMAP2)               += voltagedomains2xxx_data.o
+obj-$(CONFIG_ARCH_OMAP3)               += $(voltagedomain-common)
+obj-$(CONFIG_ARCH_OMAP3)               += voltagedomains3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP4)               += $(voltagedomain-common)
+obj-$(CONFIG_ARCH_OMAP4)               += voltagedomains44xx_data.o
 
 # OMAP powerdomain framework
 powerdomain-common                     += powerdomain.o powerdomain-common.o
-obj-$(CONFIG_ARCH_OMAP2)               += $(powerdomain-common) \
-                                          powerdomain2xxx_3xxx.o \
-                                          powerdomains2xxx_data.o \
-                                          powerdomains2xxx_3xxx_data.o
-obj-$(CONFIG_ARCH_OMAP3)               += $(powerdomain-common) \
-                                          powerdomain2xxx_3xxx.o \
-                                          powerdomains3xxx_data.o \
-                                          powerdomains2xxx_3xxx_data.o
-obj-$(CONFIG_ARCH_OMAP4)               += $(powerdomain-common) \
-                                          powerdomain44xx.o \
-                                          powerdomains44xx_data.o
+obj-$(CONFIG_ARCH_OMAP2)               += $(powerdomain-common)
+obj-$(CONFIG_ARCH_OMAP2)               += powerdomains2xxx_data.o
+obj-$(CONFIG_ARCH_OMAP2)               += powerdomain2xxx_3xxx.o
+obj-$(CONFIG_ARCH_OMAP2)               += powerdomains2xxx_3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP3)               += $(powerdomain-common)
+obj-$(CONFIG_ARCH_OMAP3)               += powerdomain2xxx_3xxx.o
+obj-$(CONFIG_ARCH_OMAP3)               += powerdomains3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP3)               += powerdomains2xxx_3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP4)               += $(powerdomain-common)
+obj-$(CONFIG_ARCH_OMAP4)               += powerdomain44xx.o
+obj-$(CONFIG_ARCH_OMAP4)               += powerdomains44xx_data.o
 
 # PRCM clockdomain control
-clockdomain-common                     += clockdomain.o \
-                                          clockdomains_common_data.o
-obj-$(CONFIG_ARCH_OMAP2)               += $(clockdomain-common) \
-                                          clockdomain2xxx_3xxx.o \
-                                          clockdomains2xxx_3xxx_data.o
+clockdomain-common                     += clockdomain.o
+clockdomain-common                     += clockdomains_common_data.o
+obj-$(CONFIG_ARCH_OMAP2)               += $(clockdomain-common)
+obj-$(CONFIG_ARCH_OMAP2)               += clockdomain2xxx_3xxx.o
+obj-$(CONFIG_ARCH_OMAP2)               += clockdomains2xxx_3xxx_data.o
 obj-$(CONFIG_SOC_OMAP2420)             += clockdomains2420_data.o
 obj-$(CONFIG_SOC_OMAP2430)             += clockdomains2430_data.o
-obj-$(CONFIG_ARCH_OMAP3)               += $(clockdomain-common) \
-                                          clockdomain2xxx_3xxx.o \
-                                          clockdomains2xxx_3xxx_data.o \
-                                          clockdomains3xxx_data.o
-obj-$(CONFIG_ARCH_OMAP4)               += $(clockdomain-common) \
-                                          clockdomain44xx.o \
-                                          clockdomains44xx_data.o
+obj-$(CONFIG_ARCH_OMAP3)               += $(clockdomain-common)
+obj-$(CONFIG_ARCH_OMAP3)               += clockdomain2xxx_3xxx.o
+obj-$(CONFIG_ARCH_OMAP3)               += clockdomains2xxx_3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP3)               += clockdomains3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP4)               += $(clockdomain-common)
+obj-$(CONFIG_ARCH_OMAP4)               += clockdomain44xx.o
+obj-$(CONFIG_ARCH_OMAP4)               += clockdomains44xx_data.o
 
 # Clock framework
-obj-$(CONFIG_ARCH_OMAP2)               += $(clock-common) clock2xxx.o \
-                                          clkt2xxx_sys.o \
-                                          clkt2xxx_dpllcore.o \
-                                          clkt2xxx_virt_prcm_set.o \
-                                          clkt2xxx_apll.o clkt2xxx_osc.o \
-                                          clkt2xxx_dpll.o clkt_iclk.o
+obj-$(CONFIG_ARCH_OMAP2)               += $(clock-common) clock2xxx.o
+obj-$(CONFIG_ARCH_OMAP2)               += clkt2xxx_sys.o
+obj-$(CONFIG_ARCH_OMAP2)               += clkt2xxx_dpllcore.o
+obj-$(CONFIG_ARCH_OMAP2)               += clkt2xxx_virt_prcm_set.o
+obj-$(CONFIG_ARCH_OMAP2)               += clkt2xxx_apll.o clkt2xxx_osc.o
+obj-$(CONFIG_ARCH_OMAP2)               += clkt2xxx_dpll.o clkt_iclk.o
 obj-$(CONFIG_SOC_OMAP2420)             += clock2420_data.o
 obj-$(CONFIG_SOC_OMAP2430)             += clock2430.o clock2430_data.o
-obj-$(CONFIG_ARCH_OMAP3)               += $(clock-common) clock3xxx.o \
-                                          clock34xx.o clkt34xx_dpll3m2.o \
-                                          clock3517.o clock36xx.o \
-                                          dpll3xxx.o clock3xxx_data.o \
-                                          clkt_iclk.o
-obj-$(CONFIG_ARCH_OMAP4)               += $(clock-common) clock44xx_data.o \
-                                          dpll3xxx.o dpll44xx.o
+obj-$(CONFIG_ARCH_OMAP3)               += $(clock-common) clock3xxx.o
+obj-$(CONFIG_ARCH_OMAP3)               += clock34xx.o clkt34xx_dpll3m2.o
+obj-$(CONFIG_ARCH_OMAP3)               += clock3517.o clock36xx.o
+obj-$(CONFIG_ARCH_OMAP3)               += dpll3xxx.o clock3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP3)               += clkt_iclk.o
+obj-$(CONFIG_ARCH_OMAP4)               += $(clock-common) clock44xx_data.o
+obj-$(CONFIG_ARCH_OMAP4)               += dpll3xxx.o dpll44xx.o
 
 # OMAP2 clock rate set data (old "OPP" data)
 obj-$(CONFIG_SOC_OMAP2420)             += opp2420_data.o
 obj-$(CONFIG_SOC_OMAP2430)             += opp2430_data.o
 
 # hwmod data
-obj-$(CONFIG_SOC_OMAP2420)             += omap_hwmod_2xxx_ipblock_data.o \
-                                          omap_hwmod_2xxx_3xxx_ipblock_data.o \
-                                          omap_hwmod_2xxx_interconnect_data.o \
-                                          omap_hwmod_2xxx_3xxx_interconnect_data.o \
-                                          omap_hwmod_2420_data.o
-obj-$(CONFIG_SOC_OMAP2430)             += omap_hwmod_2xxx_ipblock_data.o \
-                                          omap_hwmod_2xxx_3xxx_ipblock_data.o \
-                                          omap_hwmod_2xxx_interconnect_data.o \
-                                          omap_hwmod_2xxx_3xxx_interconnect_data.o \
-                                          omap_hwmod_2430_data.o
-obj-$(CONFIG_ARCH_OMAP3)               += omap_hwmod_2xxx_3xxx_ipblock_data.o \
-                                          omap_hwmod_2xxx_3xxx_interconnect_data.o \
-                                          omap_hwmod_3xxx_data.o
+obj-$(CONFIG_SOC_OMAP2420)             += omap_hwmod_2xxx_ipblock_data.o
+obj-$(CONFIG_SOC_OMAP2420)             += omap_hwmod_2xxx_3xxx_ipblock_data.o
+obj-$(CONFIG_SOC_OMAP2420)             += omap_hwmod_2xxx_interconnect_data.o
+obj-$(CONFIG_SOC_OMAP2420)             += omap_hwmod_2xxx_3xxx_interconnect_data.o
+obj-$(CONFIG_SOC_OMAP2420)             += omap_hwmod_2420_data.o
+obj-$(CONFIG_SOC_OMAP2430)             += omap_hwmod_2xxx_ipblock_data.o
+obj-$(CONFIG_SOC_OMAP2430)             += omap_hwmod_2xxx_3xxx_ipblock_data.o
+obj-$(CONFIG_SOC_OMAP2430)             += omap_hwmod_2xxx_interconnect_data.o
+obj-$(CONFIG_SOC_OMAP2430)             += omap_hwmod_2xxx_3xxx_interconnect_data.o
+obj-$(CONFIG_SOC_OMAP2430)             += omap_hwmod_2430_data.o
+obj-$(CONFIG_ARCH_OMAP3)               += omap_hwmod_2xxx_3xxx_ipblock_data.o
+obj-$(CONFIG_ARCH_OMAP3)               += omap_hwmod_2xxx_3xxx_interconnect_data.o
+obj-$(CONFIG_ARCH_OMAP3)               += omap_hwmod_3xxx_data.o
 obj-$(CONFIG_ARCH_OMAP4)               += omap_hwmod_44xx_data.o
 
 # EMU peripherals
@@ -208,23 +205,19 @@ obj-$(CONFIG_MACH_OMAP3EVM)               += board-omap3evm.o
 obj-$(CONFIG_MACH_OMAP3_PANDORA)       += board-omap3pandora.o
 obj-$(CONFIG_MACH_OMAP_3430SDP)                += board-3430sdp.o
 obj-$(CONFIG_MACH_NOKIA_N8X0)          += board-n8x0.o
-obj-$(CONFIG_MACH_NOKIA_RM680)         += board-rm680.o \
-                                          sdram-nokia.o
-obj-$(CONFIG_MACH_NOKIA_RX51)          += board-rx51.o \
-                                          sdram-nokia.o \
-                                          board-rx51-peripherals.o \
-                                          board-rx51-video.o
-obj-$(CONFIG_MACH_OMAP_ZOOM2)          += board-zoom.o \
-                                          board-zoom-peripherals.o \
-                                          board-zoom-display.o \
-                                          board-zoom-debugboard.o
-obj-$(CONFIG_MACH_OMAP_ZOOM3)          += board-zoom.o \
-                                          board-zoom-peripherals.o \
-                                          board-zoom-display.o \
-                                          board-zoom-debugboard.o
-obj-$(CONFIG_MACH_OMAP_3630SDP)                += board-3630sdp.o \
-                                          board-zoom-peripherals.o \
-                                          board-zoom-display.o
+obj-$(CONFIG_MACH_NOKIA_RM680)         += board-rm680.o sdram-nokia.o
+obj-$(CONFIG_MACH_NOKIA_RX51)          += board-rx51.o sdram-nokia.o
+obj-$(CONFIG_MACH_NOKIA_RX51)          += board-rx51-peripherals.o
+obj-$(CONFIG_MACH_NOKIA_RX51)          += board-rx51-video.o
+obj-$(CONFIG_MACH_OMAP_ZOOM2)          += board-zoom.o board-zoom-peripherals.o
+obj-$(CONFIG_MACH_OMAP_ZOOM2)          += board-zoom-display.o
+obj-$(CONFIG_MACH_OMAP_ZOOM2)          += board-zoom-debugboard.o
+obj-$(CONFIG_MACH_OMAP_ZOOM3)          += board-zoom.o board-zoom-peripherals.o
+obj-$(CONFIG_MACH_OMAP_ZOOM3)          += board-zoom-display.o
+obj-$(CONFIG_MACH_OMAP_ZOOM3)          += board-zoom-debugboard.o
+obj-$(CONFIG_MACH_OMAP_3630SDP)                += board-3630sdp.o
+obj-$(CONFIG_MACH_OMAP_3630SDP)                += board-zoom-peripherals.o
+obj-$(CONFIG_MACH_OMAP_3630SDP)                += board-zoom-display.o
 obj-$(CONFIG_MACH_CM_T35)              += board-cm-t35.o
 obj-$(CONFIG_MACH_CM_T3517)            += board-cm-t3517.o
 obj-$(CONFIG_MACH_IGEP0020)            += board-igep0020.o
index e658f835d0de3da65c24a4ccba0ec8252fa65e18..99ca6bad5c30c210c0a6e19d5eae570ef862d632 100644 (file)
@@ -303,6 +303,7 @@ MACHINE_START(OMAP_2430SDP, "OMAP2430 sdp2430 board")
        .init_irq       = omap2_init_irq,
        .handle_irq     = omap2_intc_handle_irq,
        .init_machine   = omap_2430sdp_init,
+       .init_late      = omap2430_init_late,
        .timer          = &omap2_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
index 37abb0d49b5178efca0a682bb9abcc25a9d861fa..a98c688058a92e8cb9251e289567515631b6443e 100644 (file)
@@ -605,6 +605,7 @@ MACHINE_START(OMAP_3430SDP, "OMAP3430 3430SDP board")
        .init_irq       = omap3_init_irq,
        .handle_irq     = omap3_intc_handle_irq,
        .init_machine   = omap_3430sdp_init,
+       .init_late      = omap3430_init_late,
        .timer          = &omap3_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
index 6ef350d1ae4f40a89a8b1bc54c6858780db1b2a8..2dc9ba523c7a1fcfd8dc8226180328430fb87685 100644 (file)
@@ -217,6 +217,7 @@ MACHINE_START(OMAP_3630SDP, "OMAP 3630SDP board")
        .init_irq       = omap3_init_irq,
        .handle_irq     = omap3_intc_handle_irq,
        .init_machine   = omap_sdp_init,
+       .init_late      = omap3630_init_late,
        .timer          = &omap3_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
index 94af6cde2e36d4868a338ce46644689945bf2f81..8e17284a803ff0cae6ae473aced0a9793418c059 100644 (file)
@@ -912,6 +912,7 @@ MACHINE_START(OMAP_4430SDP, "OMAP4430 4430SDP board")
        .init_irq       = gic_init_irq,
        .handle_irq     = gic_handle_irq,
        .init_machine   = omap_4430sdp_init,
+       .init_late      = omap4430_init_late,
        .timer          = &omap4_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
index 3b8a53c1f2a897a21e24d510849459709459bc96..92432c28673dfe9a17971bcc8f86b477c4f7cb03 100644 (file)
@@ -102,6 +102,7 @@ MACHINE_START(CRANEBOARD, "AM3517/05 CRANEBOARD")
        .init_irq       = omap3_init_irq,
        .handle_irq     = omap3_intc_handle_irq,
        .init_machine   = am3517_crane_init,
+       .init_late      = am35xx_init_late,
        .timer          = &omap3_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
index 99790eb646e848c8ddd47d72a62554c36b669b45..18f601096ce1807aaf9ba2fa8712f707a7c3bba3 100644 (file)
@@ -385,6 +385,7 @@ MACHINE_START(OMAP3517EVM, "OMAP3517/AM3517 EVM")
        .init_irq       = omap3_init_irq,
        .handle_irq     = omap3_intc_handle_irq,
        .init_machine   = am3517_evm_init,
+       .init_late      = am35xx_init_late,
        .timer          = &omap3_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
index 768ece2e9c3b4bcf14175a7068c801e68575e6d6..502c31e123be0308f46b44bb4c40dadb88ef6e4c 100644 (file)
@@ -356,6 +356,7 @@ MACHINE_START(OMAP_APOLLON, "OMAP24xx Apollon")
        .init_irq       = omap2_init_irq,
        .handle_irq     = omap2_intc_handle_irq,
        .init_machine   = omap_apollon_init,
+       .init_late      = omap2420_init_late,
        .timer          = &omap2_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
index c03df142ea672ea11c4470a983be031bbef5bffb..ded100c80a91cb55f50245a924efcca310cee621 100644 (file)
@@ -669,6 +669,7 @@ MACHINE_START(CM_T35, "Compulab CM-T35")
        .init_irq       = omap3_init_irq,
        .handle_irq     = omap3_intc_handle_irq,
        .init_machine   = cm_t35_init,
+       .init_late      = omap35xx_init_late,
        .timer          = &omap3_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
@@ -681,6 +682,7 @@ MACHINE_START(CM_T3730, "Compulab CM-T3730")
        .init_irq       = omap3_init_irq,
        .handle_irq     = omap3_intc_handle_irq,
        .init_machine   = cm_t3730_init,
+       .init_late     = omap3630_init_late,
        .timer          = &omap3_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
index 9e66e167e4f39f67a75b4a130e6207a3ec7edcf1..a33ad4641d9ad5b1a2a440bca1c1dc65fa09b963 100644 (file)
@@ -303,6 +303,7 @@ MACHINE_START(CM_T3517, "Compulab CM-T3517")
        .init_irq       = omap3_init_irq,
        .handle_irq     = omap3_intc_handle_irq,
        .init_machine   = cm_t3517_init,
+       .init_late      = am35xx_init_late,
        .timer          = &omap3_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
index b063f0d2faa6811a6aaab8376e17e5e8fe4a319b..6567c1cd55729ce167cf90a48ab18d700ebe545f 100644 (file)
@@ -644,6 +644,7 @@ MACHINE_START(DEVKIT8000, "OMAP3 Devkit8000")
        .init_irq       = omap3_init_irq,
        .handle_irq     = omap3_intc_handle_irq,
        .init_machine   = devkit8000_init,
+       .init_late      = omap35xx_init_late,
        .timer          = &omap3_secure_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
index 7302ba7ff1b9007f0952566f8588571dfdd53b91..20293465786701f8650e1377ea070c6eee536673 100644 (file)
@@ -125,6 +125,7 @@ DT_MACHINE_START(OMAP4_DT, "Generic OMAP4 (Flattened Device Tree)")
        .init_irq       = omap_init_irq,
        .handle_irq     = gic_handle_irq,
        .init_machine   = omap_generic_init,
+       .init_late      = omap4430_init_late,
        .timer          = &omap4_timer,
        .dt_compat      = omap4_boards_compat,
        .restart        = omap_prcm_restart,
index 0bbbabe28fcc94bc80f9c3851af7a5d5559405ff..876becf8205a475ad6e168c5ca5ac513f737a49f 100644 (file)
@@ -398,6 +398,7 @@ MACHINE_START(OMAP_H4, "OMAP2420 H4 board")
        .init_irq       = omap2_init_irq,
        .handle_irq     = omap2_intc_handle_irq,
        .init_machine   = omap_h4_init,
+       .init_late      = omap2420_init_late,
        .timer          = &omap2_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
index 7a274098f67bf5d5946e4d3becc4343ed6c5a9a2..74915295482ec849e0d0fefe5fc9a22703ee1f2c 100644 (file)
@@ -650,6 +650,7 @@ MACHINE_START(IGEP0020, "IGEP v2 board")
        .init_irq       = omap3_init_irq,
        .handle_irq     = omap3_intc_handle_irq,
        .init_machine   = igep_init,
+       .init_late      = omap35xx_init_late,
        .timer          = &omap3_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
@@ -662,6 +663,7 @@ MACHINE_START(IGEP0030, "IGEP OMAP3 module")
        .init_irq       = omap3_init_irq,
        .handle_irq     = omap3_intc_handle_irq,
        .init_machine   = igep_init,
+       .init_late      = omap35xx_init_late,
        .timer          = &omap3_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
index 1b6049567ab49621d59061d80b34d6cb84a94cf0..ef9e82977499678dabccf463c48ec3396eba84fe 100644 (file)
@@ -442,6 +442,7 @@ MACHINE_START(OMAP_LDP, "OMAP LDP board")
        .init_irq       = omap3_init_irq,
        .handle_irq     = omap3_intc_handle_irq,
        .init_machine   = omap_ldp_init,
+       .init_late      = omap3430_init_late,
        .timer          = &omap3_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
index 518091c5f77c74fe20df08f2714746062a8b004f..8ca14e88a31af12f43d219fe6d8a6b63b61ec4f1 100644 (file)
@@ -694,6 +694,7 @@ MACHINE_START(NOKIA_N800, "Nokia N800")
        .init_irq       = omap2_init_irq,
        .handle_irq     = omap2_intc_handle_irq,
        .init_machine   = n8x0_init_machine,
+       .init_late      = omap2420_init_late,
        .timer          = &omap2_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
@@ -706,6 +707,7 @@ MACHINE_START(NOKIA_N810, "Nokia N810")
        .init_irq       = omap2_init_irq,
        .handle_irq     = omap2_intc_handle_irq,
        .init_machine   = n8x0_init_machine,
+       .init_late      = omap2420_init_late,
        .timer          = &omap2_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
@@ -718,6 +720,7 @@ MACHINE_START(NOKIA_N810_WIMAX, "Nokia N810 WiMAX")
        .init_irq       = omap2_init_irq,
        .handle_irq     = omap2_intc_handle_irq,
        .init_machine   = n8x0_init_machine,
+       .init_late      = omap2420_init_late,
        .timer          = &omap2_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
index 2a7b9a9da1db58081642b6bd700239e7e3d85079..79c6909eeb785ef24d71be68f076034c0cd476df 100644 (file)
@@ -543,6 +543,7 @@ MACHINE_START(OMAP3_BEAGLE, "OMAP3 Beagle Board")
        .init_irq       = omap3_init_irq,
        .handle_irq     = omap3_intc_handle_irq,
        .init_machine   = omap3_beagle_init,
+       .init_late      = omap3_init_late,
        .timer          = &omap3_secure_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
index ace3c675e9c22a15e1712f39a1fd95bdc42c0dc5..639bd07ea38a1cca473ac4c6fa5da29024ad9d77 100644 (file)
@@ -671,6 +671,7 @@ MACHINE_START(OMAP3EVM, "OMAP3 EVM")
        .init_irq       = omap3_init_irq,
        .handle_irq     = omap3_intc_handle_irq,
        .init_machine   = omap3_evm_init,
+       .init_late      = omap35xx_init_late,
        .timer          = &omap3_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
index c008bf8e1c36612c32595a043b24fd39fa7c4950..932e1778aff94187c4ff2056c11f489747061c27 100644 (file)
@@ -242,6 +242,7 @@ MACHINE_START(OMAP3_TORPEDO, "Logic OMAP3 Torpedo board")
        .init_irq       = omap3_init_irq,
        .handle_irq     = omap3_intc_handle_irq,
        .init_machine   = omap3logic_init,
+       .init_late      = omap35xx_init_late,
        .timer          = &omap3_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
@@ -254,6 +255,7 @@ MACHINE_START(OMAP3530_LV_SOM, "OMAP Logic 3530 LV SOM board")
        .init_irq       = omap3_init_irq,
        .handle_irq     = omap3_intc_handle_irq,
        .init_machine   = omap3logic_init,
+       .init_late      = omap35xx_init_late,
        .timer          = &omap3_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
index 33d995d0f0755e73f2a920c1af2ea59df4ea2430..57aebee44fd0311a0c115aa1a408227dd8b698d7 100644 (file)
@@ -622,6 +622,7 @@ MACHINE_START(OMAP3_PANDORA, "Pandora Handheld Console")
        .init_irq       = omap3_init_irq,
        .handle_irq     = omap3_intc_handle_irq,
        .init_machine   = omap3pandora_init,
+       .init_late      = omap35xx_init_late,
        .timer          = &omap3_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
index 4396bae9167713dbd951abb28dfad4e7cd9cf27e..b318f5602e36ed15ee51f0a0f94d74ea3802dab6 100644 (file)
@@ -436,6 +436,7 @@ MACHINE_START(SBC3530, "OMAP3 STALKER")
        .init_irq               = omap3_init_irq,
        .handle_irq             = omap3_intc_handle_irq,
        .init_machine           = omap3_stalker_init,
+       .init_late              = omap35xx_init_late,
        .timer                  = &omap3_secure_timer,
        .restart                = omap_prcm_restart,
 MACHINE_END
index ae2251fa4a69346567a816e01aa4c2b2601ed875..485d14d6a8cd0683b0b74bb6ef63194292f6b4ab 100644 (file)
@@ -387,6 +387,7 @@ MACHINE_START(TOUCHBOOK, "OMAP3 touchbook Board")
        .init_irq       = omap3_init_irq,
        .handle_irq     = omap3_intc_handle_irq,
        .init_machine   = omap3_touchbook_init,
+       .init_late      = omap3430_init_late,
        .timer          = &omap3_secure_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
index 68b8fc9ff0101d0b3a40633dc106d46b8db6c9fe..982fb2622ab83063840b61d867098b8ebf700df5 100644 (file)
@@ -521,6 +521,7 @@ MACHINE_START(OMAP4_PANDA, "OMAP4 Panda board")
        .init_irq       = gic_init_irq,
        .handle_irq     = gic_handle_irq,
        .init_machine   = omap4_panda_init,
+       .init_late      = omap4430_init_late,
        .timer          = &omap4_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
index 5527c1979a168983a2b0608f5af7229cb4658018..8fa2fc3a4c3c51e973eb07c91f3c687c86aca62a 100644 (file)
@@ -554,6 +554,7 @@ MACHINE_START(OVERO, "Gumstix Overo")
        .init_irq       = omap3_init_irq,
        .handle_irq     = omap3_intc_handle_irq,
        .init_machine   = overo_init,
+       .init_late      = omap35xx_init_late,
        .timer          = &omap3_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
index ae53d71f0ce076bace77dd6fcf26895083dc0b16..0ad1bb3bdb98dfdde02d9ffdb83f25e151ec5d70 100644 (file)
@@ -151,6 +151,7 @@ MACHINE_START(NOKIA_RM680, "Nokia RM-680 board")
        .init_irq       = omap3_init_irq,
        .handle_irq     = omap3_intc_handle_irq,
        .init_machine   = rm680_init,
+       .init_late      = omap3630_init_late,
        .timer          = &omap3_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
@@ -163,6 +164,7 @@ MACHINE_START(NOKIA_RM696, "Nokia RM-696 board")
        .init_irq       = omap3_init_irq,
        .handle_irq     = omap3_intc_handle_irq,
        .init_machine   = rm680_init,
+       .init_late      = omap3630_init_late,
        .timer          = &omap3_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
index 2da92a6ba40ab0fe8f44311f1200e5c0912d517a..345dd931f76fe86bb7ecf6550412a1acf3afb6f0 100644 (file)
@@ -127,6 +127,7 @@ MACHINE_START(NOKIA_RX51, "Nokia RX-51 board")
        .init_irq       = omap3_init_irq,
        .handle_irq     = omap3_intc_handle_irq,
        .init_machine   = rx51_init,
+       .init_late      = omap3430_init_late,
        .timer          = &omap3_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
index ab9a7a9e9d643a8d7cb347d048ea43eb2dd1df5e..d4c8392cadb67f6fb0e70697965fb4a141e404a7 100644 (file)
@@ -52,6 +52,7 @@ MACHINE_START(TI8168EVM, "ti8168evm")
        .init_irq       = ti81xx_init_irq,
        .timer          = &omap3_timer,
        .init_machine   = ti81xx_evm_init,
+       .init_late      = ti81xx_init_late,
        .restart        = omap_prcm_restart,
 MACHINE_END
 
@@ -63,5 +64,6 @@ MACHINE_START(TI8148EVM, "ti8148evm")
        .init_irq       = ti81xx_init_irq,
        .timer          = &omap3_timer,
        .init_machine   = ti81xx_evm_init,
+       .init_late      = ti81xx_init_late,
        .restart        = omap_prcm_restart,
 MACHINE_END
index 5c20bcc57f2b951d50fe2bef11080b36e9729a36..4e7e56142e6fef6c972196ec2eac8b96f186b4ae 100644 (file)
@@ -137,6 +137,7 @@ MACHINE_START(OMAP_ZOOM2, "OMAP Zoom2 board")
        .init_irq       = omap3_init_irq,
        .handle_irq     = omap3_intc_handle_irq,
        .init_machine   = omap_zoom_init,
+       .init_late      = omap3430_init_late,
        .timer          = &omap3_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
@@ -149,6 +150,7 @@ MACHINE_START(OMAP_ZOOM3, "OMAP Zoom3 board")
        .init_irq       = omap3_init_irq,
        .handle_irq     = omap3_intc_handle_irq,
        .init_machine   = omap_zoom_init,
+       .init_late      = omap3630_init_late,
        .timer          = &omap3_timer,
        .restart        = omap_prcm_restart,
 MACHINE_END
index d6c9e6180318c4400b3ffb61d69efaa5152954a9..be9dfd1abe603f247c089f7c0feab81bb3402223 100644 (file)
@@ -55,7 +55,7 @@ static inline void omap34xx_map_common_io(void)
 }
 #endif
 
-#ifdef CONFIG_SOC_OMAPTI81XX
+#ifdef CONFIG_SOC_TI81XX
 extern void omapti81xx_map_common_io(void);
 #else
 static inline void omapti81xx_map_common_io(void)
@@ -63,7 +63,7 @@ static inline void omapti81xx_map_common_io(void)
 }
 #endif
 
-#ifdef CONFIG_SOC_OMAPAM33XX
+#ifdef CONFIG_SOC_AM33XX
 extern void omapam33xx_map_common_io(void);
 #else
 static inline void omapam33xx_map_common_io(void)
@@ -79,6 +79,42 @@ static inline void omap44xx_map_common_io(void)
 }
 #endif
 
+#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP2)
+int omap2_pm_init(void);
+#else
+static inline int omap2_pm_init(void)
+{
+       return 0;
+}
+#endif
+
+#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP3)
+int omap3_pm_init(void);
+#else
+static inline int omap3_pm_init(void)
+{
+       return 0;
+}
+#endif
+
+#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP4)
+int omap4_pm_init(void);
+#else
+static inline int omap4_pm_init(void)
+{
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_OMAP_MUX
+int omap_mux_late_init(void);
+#else
+static inline int omap_mux_late_init(void)
+{
+       return 0;
+}
+#endif
+
 extern void omap2_init_common_infrastructure(void);
 
 extern struct sys_timer omap2_timer;
@@ -95,6 +131,17 @@ void omap3_init_early(void);        /* Do not use this one */
 void am35xx_init_early(void);
 void ti81xx_init_early(void);
 void omap4430_init_early(void);
+void omap3_init_late(void);    /* Do not use this one */
+void omap4430_init_late(void);
+void omap2420_init_late(void);
+void omap2430_init_late(void);
+void omap3430_init_late(void);
+void omap35xx_init_late(void);
+void omap3630_init_late(void);
+void am35xx_init_late(void);
+void ti81xx_init_late(void);
+void omap4430_init_late(void);
+int omap2_common_pm_late_init(void);
 void omap_prcm_restart(char, const char *);
 
 /*
index ae62ece04ef979b56ad559849fff57987ae75d08..7b4b9327e54332e1edc3bacec2a35269169fd6b2 100644 (file)
@@ -645,7 +645,11 @@ static inline void omap242x_mmc_mux(struct omap_mmc_platform_data
 
 void __init omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data)
 {
-       char *name = "mmci-omap";
+       struct platform_device *pdev;
+       struct omap_hwmod *oh;
+       int id = 0;
+       char *oh_name = "msdi1";
+       char *dev_name = "mmci-omap";
 
        if (!mmc_data[0]) {
                pr_err("%s fails: Incomplete platform data\n", __func__);
@@ -653,8 +657,17 @@ void __init omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data)
        }
 
        omap242x_mmc_mux(mmc_data[0]);
-       omap_mmc_add(name, 0, OMAP2_MMC1_BASE, OMAP2420_MMC_SIZE,
-                                       INT_24XX_MMC_IRQ, mmc_data[0]);
+
+       oh = omap_hwmod_lookup(oh_name);
+       if (!oh) {
+               pr_err("Could not look up %s\n", oh_name);
+               return;
+       }
+       pdev = omap_device_build(dev_name, id, oh, mmc_data[0],
+                                sizeof(struct omap_mmc_platform_data), NULL, 0, 0);
+       if (IS_ERR(pdev))
+               WARN(1, "Can'd build omap_device for %s:%s.\n",
+                                       dev_name, oh->name);
 }
 
 #endif
index db5a88a36c63418d746e6fbfd6ecb89d97e2c2e5..54d49ddb9b81c9ad8680d7a111995bc3190f0b5e 100644 (file)
@@ -180,16 +180,133 @@ static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask)
                omap4_dsi_mux_pads(dsi_id, 0);
 }
 
+static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput)
+{
+       return omap_pm_set_min_bus_tput(dev, OCP_INITIATOR_AGENT, tput);
+}
+
+static struct platform_device *create_dss_pdev(const char *pdev_name,
+               int pdev_id, const char *oh_name, void *pdata, int pdata_len,
+               struct platform_device *parent)
+{
+       struct platform_device *pdev;
+       struct omap_device *od;
+       struct omap_hwmod *ohs[1];
+       struct omap_hwmod *oh;
+       int r;
+
+       oh = omap_hwmod_lookup(oh_name);
+       if (!oh) {
+               pr_err("Could not look up %s\n", oh_name);
+               r = -ENODEV;
+               goto err;
+       }
+
+       pdev = platform_device_alloc(pdev_name, pdev_id);
+       if (!pdev) {
+               pr_err("Could not create pdev for %s\n", pdev_name);
+               r = -ENOMEM;
+               goto err;
+       }
+
+       if (parent != NULL)
+               pdev->dev.parent = &parent->dev;
+
+       if (pdev->id != -1)
+               dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
+       else
+               dev_set_name(&pdev->dev, "%s", pdev->name);
+
+       ohs[0] = oh;
+       od = omap_device_alloc(pdev, ohs, 1, NULL, 0);
+       if (!od) {
+               pr_err("Could not alloc omap_device for %s\n", pdev_name);
+               r = -ENOMEM;
+               goto err;
+       }
+
+       r = platform_device_add_data(pdev, pdata, pdata_len);
+       if (r) {
+               pr_err("Could not set pdata for %s\n", pdev_name);
+               goto err;
+       }
+
+       r = omap_device_register(pdev);
+       if (r) {
+               pr_err("Could not register omap_device for %s\n", pdev_name);
+               goto err;
+       }
+
+       return pdev;
+
+err:
+       return ERR_PTR(r);
+}
+
+static struct platform_device *create_simple_dss_pdev(const char *pdev_name,
+               int pdev_id, void *pdata, int pdata_len,
+               struct platform_device *parent)
+{
+       struct platform_device *pdev;
+       int r;
+
+       pdev = platform_device_alloc(pdev_name, pdev_id);
+       if (!pdev) {
+               pr_err("Could not create pdev for %s\n", pdev_name);
+               r = -ENOMEM;
+               goto err;
+       }
+
+       if (parent != NULL)
+               pdev->dev.parent = &parent->dev;
+
+       if (pdev->id != -1)
+               dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
+       else
+               dev_set_name(&pdev->dev, "%s", pdev->name);
+
+       r = platform_device_add_data(pdev, pdata, pdata_len);
+       if (r) {
+               pr_err("Could not set pdata for %s\n", pdev_name);
+               goto err;
+       }
+
+       r = omap_device_register(pdev);
+       if (r) {
+               pr_err("Could not register omap_device for %s\n", pdev_name);
+               goto err;
+       }
+
+       return pdev;
+
+err:
+       return ERR_PTR(r);
+}
+
 int __init omap_display_init(struct omap_dss_board_info *board_data)
 {
        int r = 0;
-       struct omap_hwmod *oh;
        struct platform_device *pdev;
        int i, oh_count;
-       struct omap_display_platform_data pdata;
        const struct omap_dss_hwmod_data *curr_dss_hwmod;
+       struct platform_device *dss_pdev;
+
+       /* create omapdss device */
+
+       board_data->dsi_enable_pads = omap_dsi_enable_pads;
+       board_data->dsi_disable_pads = omap_dsi_disable_pads;
+       board_data->get_context_loss_count = omap_pm_get_dev_context_loss_count;
+       board_data->set_min_bus_tput = omap_dss_set_min_bus_tput;
+
+       omap_display_device.dev.platform_data = board_data;
+
+       r = platform_device_register(&omap_display_device);
+       if (r < 0) {
+               pr_err("Unable to register omapdss device\n");
+               return r;
+       }
 
-       memset(&pdata, 0, sizeof(pdata));
+       /* create devices for dss hwmods */
 
        if (cpu_is_omap24xx()) {
                curr_dss_hwmod = omap2_dss_hwmod_data;
@@ -202,39 +319,58 @@ int __init omap_display_init(struct omap_dss_board_info *board_data)
                oh_count = ARRAY_SIZE(omap4_dss_hwmod_data);
        }
 
-       if (board_data->dsi_enable_pads == NULL)
-               board_data->dsi_enable_pads = omap_dsi_enable_pads;
-       if (board_data->dsi_disable_pads == NULL)
-               board_data->dsi_disable_pads = omap_dsi_disable_pads;
-
-       pdata.board_data = board_data;
-       pdata.board_data->get_context_loss_count =
-               omap_pm_get_dev_context_loss_count;
-
-       for (i = 0; i < oh_count; i++) {
-               oh = omap_hwmod_lookup(curr_dss_hwmod[i].oh_name);
-               if (!oh) {
-                       pr_err("Could not look up %s\n",
-                               curr_dss_hwmod[i].oh_name);
-                       return -ENODEV;
+       /*
+        * First create the pdev for dss_core, which is used as a parent device
+        * by the other dss pdevs. Note: dss_core has to be the first item in
+        * the hwmod list.
+        */
+       dss_pdev = create_dss_pdev(curr_dss_hwmod[0].dev_name,
+                       curr_dss_hwmod[0].id,
+                       curr_dss_hwmod[0].oh_name,
+                       board_data, sizeof(*board_data),
+                       NULL);
+
+       if (IS_ERR(dss_pdev)) {
+               pr_err("Could not build omap_device for %s\n",
+                               curr_dss_hwmod[0].oh_name);
+
+               return PTR_ERR(dss_pdev);
+       }
+
+       for (i = 1; i < oh_count; i++) {
+               pdev = create_dss_pdev(curr_dss_hwmod[i].dev_name,
+                               curr_dss_hwmod[i].id,
+                               curr_dss_hwmod[i].oh_name,
+                               board_data, sizeof(*board_data),
+                               dss_pdev);
+
+               if (IS_ERR(pdev)) {
+                       pr_err("Could not build omap_device for %s\n",
+                                       curr_dss_hwmod[i].oh_name);
+
+                       return PTR_ERR(pdev);
                }
+       }
 
-               pdev = omap_device_build(curr_dss_hwmod[i].dev_name,
-                               curr_dss_hwmod[i].id, oh, &pdata,
-                               sizeof(struct omap_display_platform_data),
-                               NULL, 0, 0);
+       /* Create devices for DPI and SDI */
 
-               if (WARN((IS_ERR(pdev)), "Could not build omap_device for %s\n",
-                               curr_dss_hwmod[i].oh_name))
-                       return -ENODEV;
+       pdev = create_simple_dss_pdev("omapdss_dpi", -1,
+                       board_data, sizeof(*board_data), dss_pdev);
+       if (IS_ERR(pdev)) {
+               pr_err("Could not build platform_device for omapdss_dpi\n");
+               return PTR_ERR(pdev);
        }
-       omap_display_device.dev.platform_data = board_data;
 
-       r = platform_device_register(&omap_display_device);
-       if (r < 0)
-               printk(KERN_ERR "Unable to register OMAP-Display device\n");
+       if (cpu_is_omap34xx()) {
+               pdev = create_simple_dss_pdev("omapdss_sdi", -1,
+                               board_data, sizeof(*board_data), dss_pdev);
+               if (IS_ERR(pdev)) {
+                       pr_err("Could not build platform_device for omapdss_sdi\n");
+                       return PTR_ERR(pdev);
+               }
+       }
 
-       return r;
+       return 0;
 }
 
 static void dispc_disable_outputs(void)
index b19d8496c16ed2dd1e62eb4afb29a08c27052110..ff75abe60af2c25eac226987d022584219cfa9fb 100644 (file)
@@ -227,10 +227,6 @@ static int __init omap2_system_dma_init_dev(struct omap_hwmod *oh, void *unused)
 
        dma_stride              = OMAP2_DMA_STRIDE;
        dma_common_ch_start     = CSDP;
-       if (cpu_is_omap3630() || cpu_is_omap44xx())
-               dma_common_ch_end = CCDN;
-       else
-               dma_common_ch_end = CCFN;
 
        p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL);
        if (!p) {
@@ -277,6 +273,13 @@ static int __init omap2_system_dma_init_dev(struct omap_hwmod *oh, void *unused)
                dev_err(&pdev->dev, "%s: kzalloc fail\n", __func__);
                return -ENOMEM;
        }
+
+       /* Check the capabilities register for descriptor loading feature */
+       if (dma_read(CAPS_0, 0) & DMA_HAS_DESCRIPTOR_CAPS)
+               dma_common_ch_end = CCDN;
+       else
+               dma_common_ch_end = CCFN;
+
        return 0;
 }
 
index 3376388b317a8e597112ddccb855fbc2ce971619..845309f146fe317fd82ec74dcc39a1158ce6d0aa 100644 (file)
@@ -28,8 +28,6 @@
 
 #include <plat/dsp.h>
 
-extern phys_addr_t omap_dsp_get_mempool_base(void);
-
 static struct platform_device *omap_dsp_pdev;
 
 static struct omap_dsp_platform_data omap_dsp_pdata __initdata = {
@@ -47,6 +45,31 @@ static struct omap_dsp_platform_data omap_dsp_pdata __initdata = {
        .dsp_cm_rmw_bits = omap2_cm_rmw_mod_reg_bits,
 };
 
+static phys_addr_t omap_dsp_phys_mempool_base;
+
+void __init omap_dsp_reserve_sdram_memblock(void)
+{
+       phys_addr_t size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE;
+       phys_addr_t paddr;
+
+       if (!size)
+               return;
+
+       paddr = arm_memblock_steal(size, SZ_1M);
+       if (!paddr) {
+               pr_err("%s: failed to reserve %llx bytes\n",
+                               __func__, (unsigned long long)size);
+               return;
+       }
+
+       omap_dsp_phys_mempool_base = paddr;
+}
+
+static phys_addr_t omap_dsp_get_mempool_base(void)
+{
+       return omap_dsp_phys_mempool_base;
+}
+
 static int __init omap_dsp_init(void)
 {
        struct platform_device *pdev;
index 580e684e8825f43b9b081133c579d4ac1d1bc9dc..2286410671e7e6ecded10796c3d268ea92863b8c 100644 (file)
 #define GPMC_ECC_CONTROL       0x1f8
 #define GPMC_ECC_SIZE_CONFIG   0x1fc
 #define GPMC_ECC1_RESULT        0x200
+#define GPMC_ECC_BCH_RESULT_0   0x240   /* not available on OMAP2 */
+
+/* GPMC ECC control settings */
+#define GPMC_ECC_CTRL_ECCCLEAR         0x100
+#define GPMC_ECC_CTRL_ECCDISABLE       0x000
+#define GPMC_ECC_CTRL_ECCREG1          0x001
+#define GPMC_ECC_CTRL_ECCREG2          0x002
+#define GPMC_ECC_CTRL_ECCREG3          0x003
+#define GPMC_ECC_CTRL_ECCREG4          0x004
+#define GPMC_ECC_CTRL_ECCREG5          0x005
+#define GPMC_ECC_CTRL_ECCREG6          0x006
+#define GPMC_ECC_CTRL_ECCREG7          0x007
+#define GPMC_ECC_CTRL_ECCREG8          0x008
+#define GPMC_ECC_CTRL_ECCREG9          0x009
 
 #define GPMC_CS0_OFFSET                0x60
 #define GPMC_CS_SIZE           0x30
@@ -860,8 +874,9 @@ int gpmc_enable_hwecc(int cs, int mode, int dev_width, int ecc_size)
        gpmc_ecc_used = cs;
 
        /* clear ecc and enable bits */
-       val = ((0x00000001<<8) | 0x00000001);
-       gpmc_write_reg(GPMC_ECC_CONTROL, val);
+       gpmc_write_reg(GPMC_ECC_CONTROL,
+                       GPMC_ECC_CTRL_ECCCLEAR |
+                       GPMC_ECC_CTRL_ECCREG1);
 
        /* program ecc and result sizes */
        val = ((((ecc_size >> 1) - 1) << 22) | (0x0000000F));
@@ -869,13 +884,15 @@ int gpmc_enable_hwecc(int cs, int mode, int dev_width, int ecc_size)
 
        switch (mode) {
        case GPMC_ECC_READ:
-               gpmc_write_reg(GPMC_ECC_CONTROL, 0x101);
+       case GPMC_ECC_WRITE:
+               gpmc_write_reg(GPMC_ECC_CONTROL,
+                               GPMC_ECC_CTRL_ECCCLEAR |
+                               GPMC_ECC_CTRL_ECCREG1);
                break;
        case GPMC_ECC_READSYN:
-                gpmc_write_reg(GPMC_ECC_CONTROL, 0x100);
-               break;
-       case GPMC_ECC_WRITE:
-               gpmc_write_reg(GPMC_ECC_CONTROL, 0x101);
+               gpmc_write_reg(GPMC_ECC_CONTROL,
+                               GPMC_ECC_CTRL_ECCCLEAR |
+                               GPMC_ECC_CTRL_ECCDISABLE);
                break;
        default:
                printk(KERN_INFO "Error: Unrecognized Mode[%d]!\n", mode);
@@ -919,3 +936,186 @@ int gpmc_calculate_ecc(int cs, const u_char *dat, u_char *ecc_code)
        return 0;
 }
 EXPORT_SYMBOL_GPL(gpmc_calculate_ecc);
+
+#ifdef CONFIG_ARCH_OMAP3
+
+/**
+ * gpmc_init_hwecc_bch - initialize hardware BCH ecc functionality
+ * @cs: chip select number
+ * @nsectors: how many 512-byte sectors to process
+ * @nerrors: how many errors to correct per sector (4 or 8)
+ *
+ * This function must be executed before any call to gpmc_enable_hwecc_bch.
+ */
+int gpmc_init_hwecc_bch(int cs, int nsectors, int nerrors)
+{
+       /* check if ecc module is in use */
+       if (gpmc_ecc_used != -EINVAL)
+               return -EINVAL;
+
+       /* support only OMAP3 class */
+       if (!cpu_is_omap34xx()) {
+               printk(KERN_ERR "BCH ecc is not supported on this CPU\n");
+               return -EINVAL;
+       }
+
+       /*
+        * For now, assume 4-bit mode is only supported on OMAP3630 ES1.x, x>=1.
+        * Other chips may be added if confirmed to work.
+        */
+       if ((nerrors == 4) &&
+           (!cpu_is_omap3630() || (GET_OMAP_REVISION() == 0))) {
+               printk(KERN_ERR "BCH 4-bit mode is not supported on this CPU\n");
+               return -EINVAL;
+       }
+
+       /* sanity check */
+       if (nsectors > 8) {
+               printk(KERN_ERR "BCH cannot process %d sectors (max is 8)\n",
+                      nsectors);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(gpmc_init_hwecc_bch);
+
+/**
+ * gpmc_enable_hwecc_bch - enable hardware BCH ecc functionality
+ * @cs: chip select number
+ * @mode: read/write mode
+ * @dev_width: device bus width(1 for x16, 0 for x8)
+ * @nsectors: how many 512-byte sectors to process
+ * @nerrors: how many errors to correct per sector (4 or 8)
+ */
+int gpmc_enable_hwecc_bch(int cs, int mode, int dev_width, int nsectors,
+                         int nerrors)
+{
+       unsigned int val;
+
+       /* check if ecc module is in use */
+       if (gpmc_ecc_used != -EINVAL)
+               return -EINVAL;
+
+       gpmc_ecc_used = cs;
+
+       /* clear ecc and enable bits */
+       gpmc_write_reg(GPMC_ECC_CONTROL, 0x1);
+
+       /*
+        * When using BCH, sector size is hardcoded to 512 bytes.
+        * Here we are using wrapping mode 6 both for reading and writing, with:
+        *  size0 = 0  (no additional protected byte in spare area)
+        *  size1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
+        */
+       gpmc_write_reg(GPMC_ECC_SIZE_CONFIG, (32 << 22) | (0 << 12));
+
+       /* BCH configuration */
+       val = ((1                        << 16) | /* enable BCH */
+              (((nerrors == 8) ? 1 : 0) << 12) | /* 8 or 4 bits */
+              (0x06                     <<  8) | /* wrap mode = 6 */
+              (dev_width                <<  7) | /* bus width */
+              (((nsectors-1) & 0x7)     <<  4) | /* number of sectors */
+              (cs                       <<  1) | /* ECC CS */
+              (0x1));                            /* enable ECC */
+
+       gpmc_write_reg(GPMC_ECC_CONFIG, val);
+       gpmc_write_reg(GPMC_ECC_CONTROL, 0x101);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(gpmc_enable_hwecc_bch);
+
+/**
+ * gpmc_calculate_ecc_bch4 - Generate 7 ecc bytes per sector of 512 data bytes
+ * @cs:  chip select number
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc: The ecc output buffer
+ */
+int gpmc_calculate_ecc_bch4(int cs, const u_char *dat, u_char *ecc)
+{
+       int i;
+       unsigned long nsectors, reg, val1, val2;
+
+       if (gpmc_ecc_used != cs)
+               return -EINVAL;
+
+       nsectors = ((gpmc_read_reg(GPMC_ECC_CONFIG) >> 4) & 0x7) + 1;
+
+       for (i = 0; i < nsectors; i++) {
+
+               reg = GPMC_ECC_BCH_RESULT_0 + 16*i;
+
+               /* Read hw-computed remainder */
+               val1 = gpmc_read_reg(reg + 0);
+               val2 = gpmc_read_reg(reg + 4);
+
+               /*
+                * Add constant polynomial to remainder, in order to get an ecc
+                * sequence of 0xFFs for a buffer filled with 0xFFs; and
+                * left-justify the resulting polynomial.
+                */
+               *ecc++ = 0x28 ^ ((val2 >> 12) & 0xFF);
+               *ecc++ = 0x13 ^ ((val2 >>  4) & 0xFF);
+               *ecc++ = 0xcc ^ (((val2 & 0xF) << 4)|((val1 >> 28) & 0xF));
+               *ecc++ = 0x39 ^ ((val1 >> 20) & 0xFF);
+               *ecc++ = 0x96 ^ ((val1 >> 12) & 0xFF);
+               *ecc++ = 0xac ^ ((val1 >> 4) & 0xFF);
+               *ecc++ = 0x7f ^ ((val1 & 0xF) << 4);
+       }
+
+       gpmc_ecc_used = -EINVAL;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(gpmc_calculate_ecc_bch4);
+
+/**
+ * gpmc_calculate_ecc_bch8 - Generate 13 ecc bytes per block of 512 data bytes
+ * @cs:  chip select number
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc: The ecc output buffer
+ */
+int gpmc_calculate_ecc_bch8(int cs, const u_char *dat, u_char *ecc)
+{
+       int i;
+       unsigned long nsectors, reg, val1, val2, val3, val4;
+
+       if (gpmc_ecc_used != cs)
+               return -EINVAL;
+
+       nsectors = ((gpmc_read_reg(GPMC_ECC_CONFIG) >> 4) & 0x7) + 1;
+
+       for (i = 0; i < nsectors; i++) {
+
+               reg = GPMC_ECC_BCH_RESULT_0 + 16*i;
+
+               /* Read hw-computed remainder */
+               val1 = gpmc_read_reg(reg + 0);
+               val2 = gpmc_read_reg(reg + 4);
+               val3 = gpmc_read_reg(reg + 8);
+               val4 = gpmc_read_reg(reg + 12);
+
+               /*
+                * Add constant polynomial to remainder, in order to get an ecc
+                * sequence of 0xFFs for a buffer filled with 0xFFs.
+                */
+               *ecc++ = 0xef ^ (val4 & 0xFF);
+               *ecc++ = 0x51 ^ ((val3 >> 24) & 0xFF);
+               *ecc++ = 0x2e ^ ((val3 >> 16) & 0xFF);
+               *ecc++ = 0x09 ^ ((val3 >> 8) & 0xFF);
+               *ecc++ = 0xed ^ (val3 & 0xFF);
+               *ecc++ = 0x93 ^ ((val2 >> 24) & 0xFF);
+               *ecc++ = 0x9a ^ ((val2 >> 16) & 0xFF);
+               *ecc++ = 0xc2 ^ ((val2 >> 8) & 0xFF);
+               *ecc++ = 0x97 ^ (val2 & 0xFF);
+               *ecc++ = 0x79 ^ ((val1 >> 24) & 0xFF);
+               *ecc++ = 0xe5 ^ ((val1 >> 16) & 0xFF);
+               *ecc++ = 0x24 ^ ((val1 >> 8) & 0xFF);
+               *ecc++ = 0xb5 ^ (val1 & 0xFF);
+       }
+
+       gpmc_ecc_used = -EINVAL;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(gpmc_calculate_ecc_bch8);
+
+#endif /* CONFIG_ARCH_OMAP3 */
index b0268eaffe1353dcba99f46eaf76971ccb94c178..be697d4e084357e3c2a4938a60f2caab03605fbb 100644 (file)
@@ -355,7 +355,7 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
         *
         * temporary HACK: ocr_mask instead of fixed supply
         */
-       if (cpu_is_omap3505() || cpu_is_omap3517())
+       if (soc_is_am35xx())
                mmc->slots[0].ocr_mask = MMC_VDD_165_195 |
                                         MMC_VDD_26_27 |
                                         MMC_VDD_27_28 |
@@ -365,7 +365,7 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
        else
                mmc->slots[0].ocr_mask = c->ocr_mask;
 
-       if (!cpu_is_omap3517() && !cpu_is_omap3505())
+       if (!soc_is_am35xx())
                mmc->slots[0].features |= HSMMC_HAS_PBIAS;
 
        if (cpu_is_omap44xx() && (omap_rev() > OMAP4430_REV_ES1_0))
@@ -388,7 +388,7 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
                        }
                }
 
-               if (cpu_is_omap3517() || cpu_is_omap3505())
+               if (soc_is_am35xx())
                        mmc->slots[0].set_power = nop_mmc_set_power;
 
                /* OMAP3630 HSMMC1 supports only 4-bit */
@@ -400,7 +400,7 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
                }
                break;
        case 2:
-               if (cpu_is_omap3517() || cpu_is_omap3505())
+               if (soc_is_am35xx())
                        mmc->slots[0].set_power = am35x_hsmmc2_set_power;
 
                if (c->ext_clock)
index f1398171d8a2613e27e93f9507c69a5eebcced8a..0389b3264abe78fa65a978c78223a10574e42614 100644 (file)
@@ -185,8 +185,7 @@ static void __init omap3_cpuinfo(void)
         */
        if (cpu_is_omap3630()) {
                cpu_name = "OMAP3630";
-       } else if (cpu_is_omap3517()) {
-               /* AM35xx devices */
+       } else if (soc_is_am35xx()) {
                cpu_name = (omap3_has_sgx()) ? "AM3517" : "AM3505";
        } else if (cpu_is_ti816x()) {
                cpu_name = "TI816X";
@@ -352,13 +351,13 @@ void __init omap3xxx_check_revision(void)
                 */
                switch (rev) {
                case 0:
-                       omap_revision = OMAP3517_REV_ES1_0;
+                       omap_revision = AM35XX_REV_ES1_0;
                        cpu_rev = "1.0";
                        break;
                case 1:
                /* FALLTHROUGH */
                default:
-                       omap_revision = OMAP3517_REV_ES1_1;
+                       omap_revision = AM35XX_REV_ES1_1;
                        cpu_rev = "1.1";
                }
                break;
index d79321b0f2a26079c9993e1c4866820b3fe71561..548de90b58c2c76de55ea0a6b12382f1e897ee4c 100644 (file)
 #define OMAP_WKG_ENB_B_0                       0x14
 #define OMAP_WKG_ENB_C_0                       0x18
 #define OMAP_WKG_ENB_D_0                       0x1c
-#define OMAP_WKG_ENB_SECURE_A_0                        0x20
-#define OMAP_WKG_ENB_SECURE_B_0                        0x24
-#define OMAP_WKG_ENB_SECURE_C_0                        0x28
-#define OMAP_WKG_ENB_SECURE_D_0                        0x2c
 #define OMAP_WKG_ENB_A_1                       0x410
 #define OMAP_WKG_ENB_B_1                       0x414
 #define OMAP_WKG_ENB_C_1                       0x418
 #define OMAP_WKG_ENB_D_1                       0x41c
-#define OMAP_WKG_ENB_SECURE_A_1                        0x420
-#define OMAP_WKG_ENB_SECURE_B_1                        0x424
-#define OMAP_WKG_ENB_SECURE_C_1                        0x428
-#define OMAP_WKG_ENB_SECURE_D_1                        0x42c
 #define OMAP_AUX_CORE_BOOT_0                   0x800
 #define OMAP_AUX_CORE_BOOT_1                   0x804
 #define OMAP_PTMSYNCREQ_MASK                   0xc00
index 4b9491aa36fab8210696f23dd69ccfdeb26ec06f..8d014ba04abcc7fe0fe0416db50dcb212458bb94 100644 (file)
@@ -173,7 +173,7 @@ static struct map_desc omap34xx_io_desc[] __initdata = {
 };
 #endif
 
-#ifdef CONFIG_SOC_OMAPTI81XX
+#ifdef CONFIG_SOC_TI81XX
 static struct map_desc omapti81xx_io_desc[] __initdata = {
        {
                .virtual        = L4_34XX_VIRT,
@@ -184,7 +184,7 @@ static struct map_desc omapti81xx_io_desc[] __initdata = {
 };
 #endif
 
-#ifdef CONFIG_SOC_OMAPAM33XX
+#ifdef CONFIG_SOC_AM33XX
 static struct map_desc omapam33xx_io_desc[] __initdata = {
        {
                .virtual        = L4_34XX_VIRT,
@@ -215,42 +215,12 @@ static struct map_desc omap44xx_io_desc[] __initdata = {
                .length         = L4_44XX_SIZE,
                .type           = MT_DEVICE,
        },
-       {
-               .virtual        = OMAP44XX_GPMC_VIRT,
-               .pfn            = __phys_to_pfn(OMAP44XX_GPMC_PHYS),
-               .length         = OMAP44XX_GPMC_SIZE,
-               .type           = MT_DEVICE,
-       },
-       {
-               .virtual        = OMAP44XX_EMIF1_VIRT,
-               .pfn            = __phys_to_pfn(OMAP44XX_EMIF1_PHYS),
-               .length         = OMAP44XX_EMIF1_SIZE,
-               .type           = MT_DEVICE,
-       },
-       {
-               .virtual        = OMAP44XX_EMIF2_VIRT,
-               .pfn            = __phys_to_pfn(OMAP44XX_EMIF2_PHYS),
-               .length         = OMAP44XX_EMIF2_SIZE,
-               .type           = MT_DEVICE,
-       },
-       {
-               .virtual        = OMAP44XX_DMM_VIRT,
-               .pfn            = __phys_to_pfn(OMAP44XX_DMM_PHYS),
-               .length         = OMAP44XX_DMM_SIZE,
-               .type           = MT_DEVICE,
-       },
        {
                .virtual        = L4_PER_44XX_VIRT,
                .pfn            = __phys_to_pfn(L4_PER_44XX_PHYS),
                .length         = L4_PER_44XX_SIZE,
                .type           = MT_DEVICE,
        },
-       {
-               .virtual        = L4_EMU_44XX_VIRT,
-               .pfn            = __phys_to_pfn(L4_EMU_44XX_PHYS),
-               .length         = L4_EMU_44XX_SIZE,
-               .type           = MT_DEVICE,
-       },
 #ifdef CONFIG_OMAP4_ERRATA_I688
        {
                .virtual        = OMAP4_SRAM_VA,
@@ -286,14 +256,14 @@ void __init omap34xx_map_common_io(void)
 }
 #endif
 
-#ifdef CONFIG_SOC_OMAPTI81XX
+#ifdef CONFIG_SOC_TI81XX
 void __init omapti81xx_map_common_io(void)
 {
        iotable_init(omapti81xx_io_desc, ARRAY_SIZE(omapti81xx_io_desc));
 }
 #endif
 
-#ifdef CONFIG_SOC_OMAPAM33XX
+#ifdef CONFIG_SOC_AM33XX
 void __init omapam33xx_map_common_io(void)
 {
        iotable_init(omapam33xx_io_desc, ARRAY_SIZE(omapam33xx_io_desc));
@@ -380,6 +350,13 @@ void __init omap2420_init_early(void)
        omap_hwmod_init_postsetup();
        omap2420_clk_init();
 }
+
+void __init omap2420_init_late(void)
+{
+       omap_mux_late_init();
+       omap2_common_pm_late_init();
+       omap2_pm_init();
+}
 #endif
 
 #ifdef CONFIG_SOC_OMAP2430
@@ -395,6 +372,13 @@ void __init omap2430_init_early(void)
        omap_hwmod_init_postsetup();
        omap2430_clk_init();
 }
+
+void __init omap2430_init_late(void)
+{
+       omap_mux_late_init();
+       omap2_common_pm_late_init();
+       omap2_pm_init();
+}
 #endif
 
 /*
@@ -449,6 +433,48 @@ void __init ti81xx_init_early(void)
        omap_hwmod_init_postsetup();
        omap3xxx_clk_init();
 }
+
+void __init omap3_init_late(void)
+{
+       omap_mux_late_init();
+       omap2_common_pm_late_init();
+       omap3_pm_init();
+}
+
+void __init omap3430_init_late(void)
+{
+       omap_mux_late_init();
+       omap2_common_pm_late_init();
+       omap3_pm_init();
+}
+
+void __init omap35xx_init_late(void)
+{
+       omap_mux_late_init();
+       omap2_common_pm_late_init();
+       omap3_pm_init();
+}
+
+void __init omap3630_init_late(void)
+{
+       omap_mux_late_init();
+       omap2_common_pm_late_init();
+       omap3_pm_init();
+}
+
+void __init am35xx_init_late(void)
+{
+       omap_mux_late_init();
+       omap2_common_pm_late_init();
+       omap3_pm_init();
+}
+
+void __init ti81xx_init_late(void)
+{
+       omap_mux_late_init();
+       omap2_common_pm_late_init();
+       omap3_pm_init();
+}
 #endif
 
 #ifdef CONFIG_ARCH_OMAP4
@@ -465,6 +491,13 @@ void __init omap4430_init_early(void)
        omap_hwmod_init_postsetup();
        omap4xxx_clk_init();
 }
+
+void __init omap4430_init_late(void)
+{
+       omap_mux_late_init();
+       omap2_common_pm_late_init();
+       omap4_pm_init();
+}
 #endif
 
 void __init omap_sdrc_init(struct omap_sdrc_params *sdrc_cs0,
index 0812b154f5b5aa109ea3c5957e05b111be819793..80b88921faba9cad422f0644847da0433697cd52 100644 (file)
@@ -37,9 +37,6 @@
 #define OMAP4_L3_PER_IO_OFFSET 0xb1100000
 #define OMAP4_L3_PER_IO_ADDRESS(pa)    IOMEM((pa) + OMAP4_L3_PER_IO_OFFSET)
 
-#define OMAP4_GPMC_IO_OFFSET           0xa9000000
-#define OMAP4_GPMC_IO_ADDRESS(pa)      IOMEM((pa) + OMAP4_GPMC_IO_OFFSET)
-
 #define OMAP2_EMU_IO_OFFSET            0xaa800000      /* Emulation */
 #define OMAP2_EMU_IO_ADDRESS(pa)       IOMEM((pa) + OMAP2_EMU_IO_OFFSET)
 
 #define L4_ABE_44XX_VIRT       (L4_ABE_44XX_PHYS + OMAP2_L4_IO_OFFSET)
 #define L4_ABE_44XX_SIZE       SZ_1M
 
-#define L4_EMU_44XX_PHYS       L4_EMU_44XX_BASE
-                                               /* 0x54000000 --> 0xfe800000 */
-#define L4_EMU_44XX_VIRT       (L4_EMU_44XX_PHYS + OMAP2_EMU_IO_OFFSET)
-#define L4_EMU_44XX_SIZE       SZ_8M
-
-#define OMAP44XX_GPMC_PHYS     OMAP44XX_GPMC_BASE
-                                               /* 0x50000000 --> 0xf9000000 */
-#define OMAP44XX_GPMC_VIRT     (OMAP44XX_GPMC_PHYS + OMAP4_GPMC_IO_OFFSET)
-#define OMAP44XX_GPMC_SIZE     SZ_1M
-
-
-#define OMAP44XX_EMIF1_PHYS    OMAP44XX_EMIF1_BASE
-                                               /* 0x4c000000 --> 0xfd100000 */
-#define OMAP44XX_EMIF1_VIRT    (OMAP44XX_EMIF1_PHYS + OMAP4_L3_PER_IO_OFFSET)
-#define OMAP44XX_EMIF1_SIZE    SZ_1M
-
-#define OMAP44XX_EMIF2_PHYS    OMAP44XX_EMIF2_BASE
-                                               /* 0x4d000000 --> 0xfd200000 */
-#define OMAP44XX_EMIF2_SIZE    SZ_1M
-#define OMAP44XX_EMIF2_VIRT    (OMAP44XX_EMIF1_VIRT + OMAP44XX_EMIF1_SIZE)
-
-#define OMAP44XX_DMM_PHYS      OMAP44XX_DMM_BASE
-                                               /* 0x4e000000 --> 0xfd300000 */
-#define OMAP44XX_DMM_SIZE      SZ_1M
-#define OMAP44XX_DMM_VIRT      (OMAP44XX_EMIF2_VIRT + OMAP44XX_EMIF2_SIZE)
index 1ecf54565fe2ac6ead1abb6e8c2980c6186f4ebe..fdc4303be563169dedbd458bd8391a16e43e252b 100644 (file)
@@ -231,7 +231,7 @@ static inline void omap_intc_handle_irq(void __iomem *base_addr, struct pt_regs
                        goto out;
 
                irqnr = readl_relaxed(base_addr + 0xd8);
-#ifdef CONFIG_SOC_OMAPTI81XX
+#ifdef CONFIG_SOC_TI81XX
                if (irqnr)
                        goto out;
                irqnr = readl_relaxed(base_addr + 0xf8);
index 3268ee24eada87ee74d24ce90bc9ea918260bfd2..80e55c5c99988c2bf63d49a63bf9b562cfaff036 100644 (file)
@@ -788,7 +788,7 @@ static void __init omap_mux_free_names(struct omap_mux *m)
 }
 
 /* Free all data except for GPIO pins unless CONFIG_DEBUG_FS is set */
-static int __init omap_mux_late_init(void)
+int __init omap_mux_late_init(void)
 {
        struct omap_mux_partition *partition;
        int ret;
@@ -823,7 +823,6 @@ static int __init omap_mux_late_init(void)
 
        return 0;
 }
-late_initcall(omap_mux_late_init);
 
 static void __init omap_mux_package_fixup(struct omap_mux *p,
                                        struct omap_mux *superset)
index fd48797fa95ae2778f7e495868966f73bd4ee3e1..b26d3c9bca1621cab9ac87eea39aff6cc489fc24 100644 (file)
@@ -3306,7 +3306,7 @@ int __init omap3xxx_hwmod_init(void)
            rev == OMAP3430_REV_ES2_1 || rev == OMAP3430_REV_ES3_0 ||
            rev == OMAP3430_REV_ES3_1 || rev == OMAP3430_REV_ES3_1_2) {
                h = omap34xx_hwmod_ocp_ifs;
-       } else if (rev == OMAP3517_REV_ES1_0 || rev == OMAP3517_REV_ES1_1) {
+       } else if (rev == AM35XX_REV_ES1_0 || rev == AM35XX_REV_ES1_1) {
                h = am35xx_hwmod_ocp_ifs;
        } else if (rev == OMAP3630_REV_ES1_0 || rev == OMAP3630_REV_ES1_1 ||
                   rev == OMAP3630_REV_ES1_2) {
index d0c1c9695996d43f6ba7aa1de95e2cb58cd31048..9cb5cede0f5053632db80f17afecfbc0ecbdbed3 100644 (file)
@@ -295,7 +295,7 @@ static int __init omap2_common_pm_init(void)
 }
 postcore_initcall(omap2_common_pm_init);
 
-static int __init omap2_common_pm_late_init(void)
+int __init omap2_common_pm_late_init(void)
 {
        /*
         * In the case of DT, the PMIC and SR initialization will be done using
@@ -322,4 +322,3 @@ static int __init omap2_common_pm_late_init(void)
 
        return 0;
 }
-late_initcall(omap2_common_pm_late_init);
index facfffca9eacb33471413136e61bea8fd3c9e6cb..2edeffc923a641d3a996e56e7575ba3543b2af55 100644 (file)
@@ -298,13 +298,10 @@ static void __init prcm_setup_regs(void)
                                WKUP_MOD, PM_WKEN);
 }
 
-static int __init omap2_pm_init(void)
+int __init omap2_pm_init(void)
 {
        u32 l;
 
-       if (!cpu_is_omap24xx())
-               return -ENODEV;
-
        printk(KERN_INFO "Power Management for OMAP2 initializing\n");
        l = omap2_prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_REVISION_OFFSET);
        printk(KERN_INFO "PRCM revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f);
@@ -370,17 +367,13 @@ static int __init omap2_pm_init(void)
         * These routines need to be in SRAM as that's the only
         * memory the MPU can see when it wakes up.
         */
-       if (cpu_is_omap24xx()) {
-               omap2_sram_idle = omap_sram_push(omap24xx_idle_loop_suspend,
-                                                omap24xx_idle_loop_suspend_sz);
+       omap2_sram_idle = omap_sram_push(omap24xx_idle_loop_suspend,
+                                        omap24xx_idle_loop_suspend_sz);
 
-               omap2_sram_suspend = omap_sram_push(omap24xx_cpu_suspend,
-                                                   omap24xx_cpu_suspend_sz);
-       }
+       omap2_sram_suspend = omap_sram_push(omap24xx_cpu_suspend,
+                                           omap24xx_cpu_suspend_sz);
 
        arm_pm_idle = omap2_pm_idle;
 
        return 0;
 }
-
-late_initcall(omap2_pm_init);
index 8b43aefba0eae19b85a574f7324ed672d6670677..a34023d0ca7c665627d96ba4109a8a0e17b4ec6e 100644 (file)
@@ -697,15 +697,12 @@ static void __init pm_errata_configure(void)
        }
 }
 
-static int __init omap3_pm_init(void)
+int __init omap3_pm_init(void)
 {
        struct power_state *pwrst, *tmp;
        struct clockdomain *neon_clkdm, *mpu_clkdm;
        int ret;
 
-       if (!cpu_is_omap34xx())
-               return -ENODEV;
-
        if (!omap3_has_io_chain_ctrl())
                pr_warning("PM: no software I/O chain control; some wakeups may be lost\n");
 
@@ -804,5 +801,3 @@ err2:
 err1:
        return ret;
 }
-
-late_initcall(omap3_pm_init);
index 8856253524292dcfb9ebffca53bcc3f83cf3151d..ea24174f5707177d635d19a7671c3b8d2ed4443e 100644 (file)
@@ -141,15 +141,12 @@ static void omap_default_idle(void)
  * Initializes all powerdomain and clockdomain target states
  * and all PRCM settings.
  */
-static int __init omap4_pm_init(void)
+int __init omap4_pm_init(void)
 {
        int ret;
        struct clockdomain *emif_clkdm, *mpuss_clkdm, *l3_1_clkdm, *l4wkup;
        struct clockdomain *ducati_clkdm, *l3_2_clkdm, *l4_per_clkdm;
 
-       if (!cpu_is_omap44xx())
-               return -ENODEV;
-
        if (omap_rev() == OMAP4430_REV_ES1_0) {
                WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
                return -ENODEV;
@@ -217,4 +214,3 @@ static int __init omap4_pm_init(void)
 err2:
        return ret;
 }
-late_initcall(omap4_pm_init);
index b7ea468eea326aa02537572139b9cc50da183b81..fb0a0a6869d17b783834f93616ba29a2f3430e43 100644 (file)
@@ -311,7 +311,7 @@ void __init omap3xxx_powerdomains_init(void)
                 rev == OMAP3430_REV_ES3_0 || rev == OMAP3630_REV_ES1_0)
                pwrdm_register_pwrdms(powerdomains_omap3430es2_es3_0);
        else if (rev == OMAP3430_REV_ES3_1 || rev == OMAP3430_REV_ES3_1_2 ||
-                rev == OMAP3517_REV_ES1_0 || rev == OMAP3517_REV_ES1_1 ||
+                rev == AM35XX_REV_ES1_0 || rev == AM35XX_REV_ES1_1 ||
                 rev == OMAP3630_REV_ES1_1 || rev == OMAP3630_REV_ES1_2)
                pwrdm_register_pwrdms(powerdomains_omap3430es3_1plus);
        else
index 1b7835865c83b59c1e0ca82d7611ef239b3f8211..840929bd9daecce4ef8e1ee95c2d404a2536c9c4 100644 (file)
@@ -90,7 +90,7 @@ static irqreturn_t omap2_gp_timer_interrupt(int irq, void *dev_id)
 }
 
 static struct irqaction omap2_gp_timer_irq = {
-       .name           = "gp timer",
+       .name           = "gp_timer",
        .flags          = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
        .handler        = omap2_gp_timer_interrupt,
 };
@@ -132,7 +132,7 @@ static void omap2_gp_timer_set_mode(enum clock_event_mode mode,
 }
 
 static struct clock_event_device clockevent_gpt = {
-       .name           = "gp timer",
+       .name           = "gp_timer",
        .features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
        .shift          = 32,
        .set_next_event = omap2_gp_timer_set_next_event,
@@ -236,22 +236,8 @@ static void __init omap2_gp_clockevent_init(int gptimer_id,
 }
 
 /* Clocksource code */
-
-#ifdef CONFIG_OMAP_32K_TIMER
-/*
- * When 32k-timer is enabled, don't use GPTimer for clocksource
- * instead, just leave default clocksource which uses the 32k
- * sync counter.  See clocksource setup in plat-omap/counter_32k.c
- */
-
-static void __init omap2_gp_clocksource_init(int unused, const char *dummy)
-{
-       omap_init_clocksource_32k();
-}
-
-#else
-
 static struct omap_dm_timer clksrc;
+static bool use_gptimer_clksrc;
 
 /*
  * clocksource
@@ -262,7 +248,7 @@ static cycle_t clocksource_read_cycles(struct clocksource *cs)
 }
 
 static struct clocksource clocksource_gpt = {
-       .name           = "gp timer",
+       .name           = "gp_timer",
        .rating         = 300,
        .read           = clocksource_read_cycles,
        .mask           = CLOCKSOURCE_MASK(32),
@@ -278,7 +264,46 @@ static u32 notrace dmtimer_read_sched_clock(void)
 }
 
 /* Setup free-running counter for clocksource */
-static void __init omap2_gp_clocksource_init(int gptimer_id,
+static int __init omap2_sync32k_clocksource_init(void)
+{
+       int ret;
+       struct omap_hwmod *oh;
+       void __iomem *vbase;
+       const char *oh_name = "counter_32k";
+
+       /*
+        * First check hwmod data is available for sync32k counter
+        */
+       oh = omap_hwmod_lookup(oh_name);
+       if (!oh || oh->slaves_cnt == 0)
+               return -ENODEV;
+
+       omap_hwmod_setup_one(oh_name);
+
+       vbase = omap_hwmod_get_mpu_rt_va(oh);
+       if (!vbase) {
+               pr_warn("%s: failed to get counter_32k resource\n", __func__);
+               return -ENXIO;
+       }
+
+       ret = omap_hwmod_enable(oh);
+       if (ret) {
+               pr_warn("%s: failed to enable counter_32k module (%d)\n",
+                                                       __func__, ret);
+               return ret;
+       }
+
+       ret = omap_init_clocksource_32k(vbase);
+       if (ret) {
+               pr_warn("%s: failed to initialize counter_32k as a clocksource (%d)\n",
+                                                       __func__, ret);
+               omap_hwmod_idle(oh);
+       }
+
+       return ret;
+}
+
+static void __init omap2_gptimer_clocksource_init(int gptimer_id,
                                                const char *fck_source)
 {
        int res;
@@ -286,9 +311,6 @@ static void __init omap2_gp_clocksource_init(int gptimer_id,
        res = omap_dm_timer_init_one(&clksrc, gptimer_id, fck_source);
        BUG_ON(res);
 
-       pr_info("OMAP clocksource: GPTIMER%d at %lu Hz\n",
-               gptimer_id, clksrc.rate);
-
        __omap_dm_timer_load_start(&clksrc,
                        OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR, 0, 1);
        setup_sched_clock(dmtimer_read_sched_clock, 32, clksrc.rate);
@@ -296,15 +318,36 @@ static void __init omap2_gp_clocksource_init(int gptimer_id,
        if (clocksource_register_hz(&clocksource_gpt, clksrc.rate))
                pr_err("Could not register clocksource %s\n",
                        clocksource_gpt.name);
+       else
+               pr_info("OMAP clocksource: GPTIMER%d at %lu Hz\n",
+                       gptimer_id, clksrc.rate);
+}
+
+static void __init omap2_clocksource_init(int gptimer_id,
+                                               const char *fck_source)
+{
+       /*
+        * First give preference to kernel parameter configuration
+        * by user (clocksource="gp_timer").
+        *
+        * In case of missing kernel parameter for clocksource,
+        * first check for availability for 32k-sync timer, in case
+        * of failure in finding 32k_counter module or registering
+        * it as clocksource, execution will fallback to gp-timer.
+        */
+       if (use_gptimer_clksrc == true)
+               omap2_gptimer_clocksource_init(gptimer_id, fck_source);
+       else if (omap2_sync32k_clocksource_init())
+               /* Fall back to gp-timer code */
+               omap2_gptimer_clocksource_init(gptimer_id, fck_source);
 }
-#endif
 
 #define OMAP_SYS_TIMER_INIT(name, clkev_nr, clkev_src,                 \
                                clksrc_nr, clksrc_src)                  \
 static void __init omap##name##_timer_init(void)                       \
 {                                                                      \
        omap2_gp_clockevent_init((clkev_nr), clkev_src);                \
-       omap2_gp_clocksource_init((clksrc_nr), clksrc_src);             \
+       omap2_clocksource_init((clksrc_nr), clksrc_src);                \
 }
 
 #define OMAP_SYS_TIMER(name)                                           \
@@ -335,7 +378,7 @@ static DEFINE_TWD_LOCAL_TIMER(twd_local_timer,
 static void __init omap4_timer_init(void)
 {
        omap2_gp_clockevent_init(1, OMAP4_CLKEV_SOURCE);
-       omap2_gp_clocksource_init(2, OMAP4_MPU_SOURCE);
+       omap2_clocksource_init(2, OMAP4_MPU_SOURCE);
 #ifdef CONFIG_LOCAL_TIMERS
        /* Local timers are not supprted on OMAP4430 ES1.0 */
        if (omap_rev() != OMAP4430_REV_ES1_0) {
@@ -503,3 +546,28 @@ static int __init omap2_dm_timer_init(void)
        return 0;
 }
 arch_initcall(omap2_dm_timer_init);
+
+/**
+ * omap2_override_clocksource - clocksource override with user configuration
+ *
+ * Allows user to override default clocksource, using kernel parameter
+ *   clocksource="gp_timer"    (For all OMAP2PLUS architectures)
+ *
+ * Note that, here we are using same standard kernel parameter "clocksource=",
+ * and not introducing any OMAP specific interface.
+ */
+static int __init omap2_override_clocksource(char *str)
+{
+       if (!str)
+               return 0;
+       /*
+        * For OMAP architecture, we only have two options
+        *    - sync_32k (default)
+        *    - gp_timer (sys_clk based)
+        */
+       if (!strcmp(str, "gp_timer"))
+               use_gptimer_clksrc = true;
+
+       return 0;
+}
+early_param("clocksource", omap2_override_clocksource);
index 8d5ed775dd567c0c55c1450c4465b91e1ee1efc9..b19d1b43c12e59ef678debd195225c52f97e1e5a 100644 (file)
@@ -90,7 +90,7 @@ void __init usb_musb_init(struct omap_musb_board_data *musb_board_data)
        musb_plat.mode = board_data->mode;
        musb_plat.extvbus = board_data->extvbus;
 
-       if (cpu_is_omap3517() || cpu_is_omap3505()) {
+       if (soc_is_am35xx()) {
                oh_name = "am35x_otg_hs";
                name = "musb-am35x";
        } else if (cpu_is_ti81xx()) {
index 57db2038b23c59c307e480a4872ee6ebebec30d3..d0103c80d04085376a41d77d97995d4c558831c6 100644 (file)
@@ -118,7 +118,7 @@ void __init omap3xxx_voltagedomains_init(void)
        }
 #endif
 
-       if (cpu_is_omap3517() || cpu_is_omap3505())
+       if (soc_is_am35xx())
                voltdms = voltagedomains_am35xx;
        else
                voltdms = voltagedomains_omap3;
index e2e9db492d0c3622e20a21a3832d6b485508353d..9148b229d0de925b4f95fea5154421fa462331ba 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/mv643xx_i2c.h>
 #include <linux/ata_platform.h>
 #include <linux/delay.h>
+#include <linux/clk-provider.h>
 #include <net/dsa.h>
 #include <asm/page.h>
 #include <asm/setup.h>
@@ -69,6 +70,19 @@ void __init orion5x_map_io(void)
 }
 
 
+/*****************************************************************************
+ * CLK tree
+ ****************************************************************************/
+static struct clk *tclk;
+
+static void __init clk_init(void)
+{
+       tclk = clk_register_fixed_rate(NULL, "tclk", NULL, CLK_IS_ROOT,
+                                      orion5x_tclk);
+
+       orion_clkdev_init(tclk);
+}
+
 /*****************************************************************************
  * EHCI0
  ****************************************************************************/
@@ -95,7 +109,7 @@ void __init orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data)
 {
        orion_ge00_init(eth_data,
                        ORION5X_ETH_PHYS_BASE, IRQ_ORION5X_ETH_SUM,
-                       IRQ_ORION5X_ETH_ERR, orion5x_tclk);
+                       IRQ_ORION5X_ETH_ERR);
 }
 
 
@@ -132,7 +146,7 @@ void __init orion5x_sata_init(struct mv_sata_platform_data *sata_data)
  ****************************************************************************/
 void __init orion5x_spi_init()
 {
-       orion_spi_init(SPI_PHYS_BASE, orion5x_tclk);
+       orion_spi_init(SPI_PHYS_BASE);
 }
 
 
@@ -142,7 +156,7 @@ void __init orion5x_spi_init()
 void __init orion5x_uart0_init(void)
 {
        orion_uart0_init(UART0_VIRT_BASE, UART0_PHYS_BASE,
-                        IRQ_ORION5X_UART0, orion5x_tclk);
+                        IRQ_ORION5X_UART0, tclk);
 }
 
 /*****************************************************************************
@@ -151,7 +165,7 @@ void __init orion5x_uart0_init(void)
 void __init orion5x_uart1_init(void)
 {
        orion_uart1_init(UART1_VIRT_BASE, UART1_PHYS_BASE,
-                        IRQ_ORION5X_UART1, orion5x_tclk);
+                        IRQ_ORION5X_UART1, tclk);
 }
 
 /*****************************************************************************
@@ -179,7 +193,7 @@ static void __init orion5x_crypto_init(void)
  ****************************************************************************/
 void __init orion5x_wdt_init(void)
 {
-       orion_wdt_init(orion5x_tclk);
+       orion_wdt_init();
 }
 
 
@@ -276,6 +290,9 @@ void __init orion5x_init(void)
         */
        orion5x_setup_cpu_mbus_bridge();
 
+       /* Setup root of clk tree */
+       clk_init();
+
        /*
         * Don't issue "Wait for Interrupt" instruction if we are
         * running on D0 5281 silicon.
index e91bf0ba4e8ef32323adbebdc14f273b93af1c7f..92df49c1b62ad4c75cc510034411dc5722b1c34c 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/mtd/physmap.h>
 #include <linux/mv643xx_eth.h>
 #include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
 #include <linux/spi/flash.h>
 #include <linux/ethtool.h>
 #include <net/dsa.h>
index a74f3cf54cc56b8ae0c1ecf1caefbd0d2dccc138..b4203277f3cd5b732c746a481ef435be1d3bdbad 100644 (file)
@@ -251,8 +251,6 @@ static void ts78xx_ts_nand_read_buf(struct mtd_info *mtd,
                readsb(io_base, buf, len);
 }
 
-const char *ts_nand_part_probes[] = { "cmdlinepart", NULL };
-
 static struct mtd_partition ts78xx_ts_nand_parts[] = {
        {
                .name           = "mbr",
@@ -277,7 +275,6 @@ static struct mtd_partition ts78xx_ts_nand_parts[] = {
 static struct platform_nand_data ts78xx_ts_nand_data = {
        .chip   = {
                .nr_chips               = 1,
-               .part_probe_types       = ts_nand_part_probes,
                .partitions             = ts78xx_ts_nand_parts,
                .nr_partitions          = ARRAY_SIZE(ts78xx_ts_nand_parts),
                .chip_delay             = 15,
index be4c92858509f60975efadc3fd02c6d00084e996..a00d2f1254ed5194f31788a0776f84a231f7a743 100644 (file)
@@ -265,6 +265,17 @@ static void pnx4008_restart(char mode, const char *cmd)
        soft_restart(0);
 }
 
+#ifdef CONFIG_PM
+extern int pnx4008_pm_init(void);
+#else
+static inline int pnx4008_pm_init(void) { return 0; }
+#endif
+
+void __init pnx4008_init_late(void)
+{
+       pnx4008_pm_init();
+}
+
 extern struct sys_timer pnx4008_timer;
 
 MACHINE_START(PNX4008, "Philips PNX4008")
@@ -273,6 +284,7 @@ MACHINE_START(PNX4008, "Philips PNX4008")
        .map_io                 = pnx4008_map_io,
        .init_irq               = pnx4008_init_irq,
        .init_machine           = pnx4008_init,
+       .init_late              = pnx4008_init_late,
        .timer                  = &pnx4008_timer,
        .restart                = pnx4008_restart,
 MACHINE_END
index f3e60a049f983c5eb79c20646137f3b2c17f2806..26f8d06b142ade13d14116f51c8001dfea3ad898 100644 (file)
@@ -124,7 +124,7 @@ static const struct platform_suspend_ops pnx4008_pm_ops = {
        .valid = pnx4008_pm_valid,
 };
 
-static int __init pnx4008_pm_init(void)
+int __init pnx4008_pm_init(void)
 {
        u32 sram_size_to_allocate;
 
@@ -151,5 +151,3 @@ static int __init pnx4008_pm_init(void)
        suspend_set_ops(&pnx4008_pm_ops);
        return 0;
 }
-
-late_initcall(pnx4008_pm_init);
index b28a930d4f8ad0e0cf2f54deec014d4233d00a16..60d826fc2185143c5d0edfdef5fd08755ead758f 100644 (file)
@@ -24,4 +24,10 @@ static inline void sirfsoc_map_lluart(void)  {}
 extern void __init sirfsoc_map_lluart(void);
 #endif
 
+#ifdef CONFIG_SUSPEND
+extern int sirfsoc_pm_init(void);
+#else
+static inline int sirfsoc_pm_init(void) { return 0; }
+#endif
+
 #endif
index 26ebb57719df5d3fbf389e46cb723b92029b8191..fb5a7910af35b7a86534116f53b821aa2d36755c 100644 (file)
@@ -85,12 +85,11 @@ static const struct platform_suspend_ops sirfsoc_pm_ops = {
        .valid = suspend_valid_only_mem,
 };
 
-static int __init sirfsoc_pm_init(void)
+int __init sirfsoc_pm_init(void)
 {
        suspend_set_ops(&sirfsoc_pm_ops);
        return 0;
 }
-late_initcall(sirfsoc_pm_init);
 
 static const struct of_device_id pwrc_ids[] = {
        { .compatible = "sirf,prima2-pwrc" },
index 02b9c05ff9905b5ca94d1f5672709fe68d6b8c5c..8f0429d4b79f698d5c0e77117a75a3210f49c34b 100644 (file)
@@ -25,6 +25,11 @@ void __init sirfsoc_mach_init(void)
        of_platform_bus_probe(NULL, sirfsoc_of_bus_ids, NULL);
 }
 
+void __init sirfsoc_init_late(void)
+{
+       sirfsoc_pm_init();
+}
+
 static const char *prima2cb_dt_match[] __initdata = {
        "sirf,prima2-cb",
        NULL
@@ -39,6 +44,7 @@ MACHINE_START(PRIMA2_EVB, "prima2cb")
        .timer          = &sirfsoc_timer,
        .dma_zone_size  = SZ_256M,
        .init_machine   = sirfsoc_mach_init,
+       .init_late      = sirfsoc_init_late,
        .dt_compat      = prima2cb_dt_match,
        .restart        = sirfsoc_restart,
 MACHINE_END
index 56e8cebeb7d5240dab37761ddc25978d997a6ece..9244493dbcb7dc402ca1f69eb34b120c01487a15 100644 (file)
@@ -679,8 +679,6 @@ static struct mtd_partition balloon3_partition_info[] = {
        },
 };
 
-static const char *balloon3_part_probes[] = { "cmdlinepart", NULL };
-
 struct platform_nand_data balloon3_nand_pdata = {
        .chip = {
                .nr_chips       = 4,
@@ -688,7 +686,6 @@ struct platform_nand_data balloon3_nand_pdata = {
                .nr_partitions  = ARRAY_SIZE(balloon3_partition_info),
                .partitions     = balloon3_partition_info,
                .chip_delay     = 50,
-               .part_probe_types = balloon3_part_probes,
        },
        .ctrl = {
                .hwcontrol      = 0,
index a3a4a38d49727a818e2228043f4194a8046eb6c6..97f82ad341bfbcbb98123499856b8811106a223e 100644 (file)
@@ -338,8 +338,6 @@ static struct mtd_partition em_x270_partition_info[] = {
        },
 };
 
-static const char *em_x270_part_probes[] = { "cmdlinepart", NULL };
-
 struct platform_nand_data em_x270_nand_platdata = {
        .chip = {
                .nr_chips = 1,
@@ -347,7 +345,6 @@ struct platform_nand_data em_x270_nand_platdata = {
                .nr_partitions = ARRAY_SIZE(em_x270_partition_info),
                .partitions = em_x270_partition_info,
                .chip_delay = 20,
-               .part_probe_types = em_x270_part_probes,
        },
        .ctrl = {
                .hwcontrol = 0,
index 9507605ed547a5366abf75cb704ee7c23a50ee62..0da35dccfd8932aa4e8ed82209fe07d593deb5bd 100644 (file)
@@ -268,8 +268,6 @@ static struct mtd_partition palmtx_partition_info[] = {
        },
 };
 
-static const char *palmtx_part_probes[] = { "cmdlinepart", NULL };
-
 struct platform_nand_data palmtx_nand_platdata = {
        .chip   = {
                .nr_chips               = 1,
@@ -277,7 +275,6 @@ struct platform_nand_data palmtx_nand_platdata = {
                .nr_partitions          = ARRAY_SIZE(palmtx_partition_info),
                .partitions             = palmtx_partition_info,
                .chip_delay             = 20,
-               .part_probe_types       = palmtx_part_probes,
        },
        .ctrl   = {
                .cmd_ctrl       = palmtx_nand_cmd_ctl,
index b34287ab5afd93502e9b5779918085c3b8eb68ec..e24961109b70286655c36c46a3083809511870c5 100644 (file)
@@ -518,6 +518,11 @@ config S3C2443_DMA
        help
          Internal config node for S3C2443 DMA support
 
+config S3C2443_SETUP_SPI
+       bool
+       help
+         Common setup code for SPI GPIO configurations
+
 endif  # CPU_S3C2443 || CPU_S3C2416
 
 if CPU_S3C2443
index 3518fe812d5f73b183d1fae156a821a0b947eb82..0ab6ab15da4ca11fdb79c5144051333e4d10f00a 100644 (file)
@@ -14,6 +14,8 @@ obj-                          :=
 
 # core
 
+obj-y                          += common.o
+
 obj-$(CONFIG_CPU_S3C2410)      += s3c2410.o
 obj-$(CONFIG_S3C2410_DMA)      += dma-s3c2410.o
 obj-$(CONFIG_S3C2410_PM)       += pm-s3c2410.o sleep-s3c2410.o
@@ -33,6 +35,10 @@ obj-$(CONFIG_S3C2440_DMA)    += dma-s3c2440.o
 
 obj-$(CONFIG_CPU_S3C2443)      += s3c2443.o irq-s3c2443.o clock-s3c2443.o
 
+# PM
+
+obj-$(CONFIG_PM)               += pm.o irq-pm.o sleep.o
+
 # common code
 
 obj-$(CONFIG_S3C2443_COMMON)   += common-s3c2443.o
@@ -91,5 +97,6 @@ obj-$(CONFIG_MACH_OSIRIS_DVS)         += mach-osiris-dvs.o
 # device setup
 
 obj-$(CONFIG_S3C2416_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
+obj-$(CONFIG_S3C2443_SETUP_SPI)                += setup-spi.o
 obj-$(CONFIG_ARCH_S3C24XX)             += setup-i2c.o
 obj-$(CONFIG_S3C24XX_SETUP_TS)         += setup-ts.o
index dbc9ab4aaca233485dfaa9f8dc414e85e6fdc931..8702ecfaab3098f3987cd4bc6c23a2bc88a07a8e 100644 (file)
@@ -144,6 +144,7 @@ static struct clk_lookup s3c2416_clk_lookup[] = {
        CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.0", &hsmmc0_clk),
        CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &hsmmc_mux0.clk),
        CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &hsmmc_mux1.clk),
+       CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk2", &hsspi_mux.clk),
 };
 
 void __init s3c2416_init_clocks(int xtal)
index efb3ac359566de3b141f5ff0e3eaabed565f0cbc..a4c5a520d9942abe648af870f978e79ebf15bab4 100644 (file)
@@ -179,6 +179,11 @@ static struct clk *clks[] __initdata = {
        &clk_hsmmc,
 };
 
+static struct clk_lookup s3c2443_clk_lookup[] = {
+       CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &clk_hsmmc),
+       CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk2", &clk_hsspi.clk),
+};
+
 void __init s3c2443_init_clocks(int xtal)
 {
        unsigned long epllcon = __raw_readl(S3C2443_EPLLCON);
@@ -210,6 +215,7 @@ void __init s3c2443_init_clocks(int xtal)
 
        s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
        s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+       clkdev_add_table(s3c2443_clk_lookup, ARRAY_SIZE(s3c2443_clk_lookup));
 
        s3c_pwmclk_init();
 }
index 460431589f393898c15c351a60c41524ccb313e0..aeeb2be283fae341492bbe08efb3a44298182ebb 100644 (file)
@@ -423,11 +423,6 @@ static struct clk init_clocks_off[] = {
                .parent         = &clk_p,
                .enable         = s3c2443_clkcon_enable_p,
                .ctrlbit        = S3C2443_PCLKCON_IIS,
-       }, {
-               .name           = "hsspi",
-               .parent         = &clk_p,
-               .enable         = s3c2443_clkcon_enable_p,
-               .ctrlbit        = S3C2443_PCLKCON_HSSPI,
        }, {
                .name           = "adc",
                .parent         = &clk_p,
@@ -562,6 +557,14 @@ static struct clk hsmmc1_clk = {
        .ctrlbit        = S3C2443_HCLKCON_HSMMC,
 };
 
+static struct clk hsspi_clk = {
+       .name           = "spi",
+       .devname        = "s3c64xx-spi.0",
+       .parent         = &clk_p,
+       .enable         = s3c2443_clkcon_enable_p,
+       .ctrlbit        = S3C2443_PCLKCON_HSSPI,
+};
+
 /* EPLLCON compatible enough to get on/off information */
 
 void __init_or_cpufreq s3c2443_common_setup_clocks(pll_fn get_mpll)
@@ -612,6 +615,7 @@ static struct clk *clks[] __initdata = {
        &clk_usb_bus,
        &clk_armdiv,
        &hsmmc1_clk,
+       &hsspi_clk,
 };
 
 static struct clksrc_clk *clksrcs[] __initdata = {
@@ -629,6 +633,7 @@ static struct clk_lookup s3c2443_clk_lookup[] = {
        CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p),
        CLKDEV_INIT(NULL, "clk_uart_baud3", &clk_esys_uart.clk),
        CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.0", &hsmmc1_clk),
+       CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk0", &hsspi_clk),
 };
 
 void __init s3c2443_common_init_clocks(int xtal, pll_fn get_mpll,
diff --git a/arch/arm/mach-s3c24xx/common.c b/arch/arm/mach-s3c24xx/common.c
new file mode 100644 (file)
index 0000000..56cdd34
--- /dev/null
@@ -0,0 +1,303 @@
+/* linux/arch/arm/plat-s3c24xx/cpu.c
+ *
+ * Copyright (c) 2004-2005 Simtec Electronics
+ *     http://www.simtec.co.uk/products/SWLINUX/
+ *     Ben Dooks <ben@simtec.co.uk>
+ *
+ * Common code for S3C24XX machines
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+*/
+
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/serial_core.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <mach/regs-clock.h>
+#include <asm/irq.h>
+#include <asm/cacheflush.h>
+#include <asm/system_info.h>
+#include <asm/system_misc.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <mach/regs-clock.h>
+#include <mach/regs-gpio.h>
+#include <plat/regs-serial.h>
+
+#include <plat/cpu.h>
+#include <plat/devs.h>
+#include <plat/clock.h>
+#include <plat/s3c2410.h>
+#include <plat/s3c2412.h>
+#include <plat/s3c2416.h>
+#include <plat/s3c244x.h>
+#include <plat/s3c2443.h>
+#include <plat/cpu-freq.h>
+#include <plat/pll.h>
+
+/* table of supported CPUs */
+
+static const char name_s3c2410[]  = "S3C2410";
+static const char name_s3c2412[]  = "S3C2412";
+static const char name_s3c2416[]  = "S3C2416/S3C2450";
+static const char name_s3c2440[]  = "S3C2440";
+static const char name_s3c2442[]  = "S3C2442";
+static const char name_s3c2442b[]  = "S3C2442B";
+static const char name_s3c2443[]  = "S3C2443";
+static const char name_s3c2410a[] = "S3C2410A";
+static const char name_s3c2440a[] = "S3C2440A";
+
+static struct cpu_table cpu_ids[] __initdata = {
+       {
+               .idcode         = 0x32410000,
+               .idmask         = 0xffffffff,
+               .map_io         = s3c2410_map_io,
+               .init_clocks    = s3c2410_init_clocks,
+               .init_uarts     = s3c2410_init_uarts,
+               .init           = s3c2410_init,
+               .name           = name_s3c2410
+       },
+       {
+               .idcode         = 0x32410002,
+               .idmask         = 0xffffffff,
+               .map_io         = s3c2410_map_io,
+               .init_clocks    = s3c2410_init_clocks,
+               .init_uarts     = s3c2410_init_uarts,
+               .init           = s3c2410a_init,
+               .name           = name_s3c2410a
+       },
+       {
+               .idcode         = 0x32440000,
+               .idmask         = 0xffffffff,
+               .map_io         = s3c2440_map_io,
+               .init_clocks    = s3c244x_init_clocks,
+               .init_uarts     = s3c244x_init_uarts,
+               .init           = s3c2440_init,
+               .name           = name_s3c2440
+       },
+       {
+               .idcode         = 0x32440001,
+               .idmask         = 0xffffffff,
+               .map_io         = s3c2440_map_io,
+               .init_clocks    = s3c244x_init_clocks,
+               .init_uarts     = s3c244x_init_uarts,
+               .init           = s3c2440_init,
+               .name           = name_s3c2440a
+       },
+       {
+               .idcode         = 0x32440aaa,
+               .idmask         = 0xffffffff,
+               .map_io         = s3c2442_map_io,
+               .init_clocks    = s3c244x_init_clocks,
+               .init_uarts     = s3c244x_init_uarts,
+               .init           = s3c2442_init,
+               .name           = name_s3c2442
+       },
+       {
+               .idcode         = 0x32440aab,
+               .idmask         = 0xffffffff,
+               .map_io         = s3c2442_map_io,
+               .init_clocks    = s3c244x_init_clocks,
+               .init_uarts     = s3c244x_init_uarts,
+               .init           = s3c2442_init,
+               .name           = name_s3c2442b
+       },
+       {
+               .idcode         = 0x32412001,
+               .idmask         = 0xffffffff,
+               .map_io         = s3c2412_map_io,
+               .init_clocks    = s3c2412_init_clocks,
+               .init_uarts     = s3c2412_init_uarts,
+               .init           = s3c2412_init,
+               .name           = name_s3c2412,
+       },
+       {                       /* a newer version of the s3c2412 */
+               .idcode         = 0x32412003,
+               .idmask         = 0xffffffff,
+               .map_io         = s3c2412_map_io,
+               .init_clocks    = s3c2412_init_clocks,
+               .init_uarts     = s3c2412_init_uarts,
+               .init           = s3c2412_init,
+               .name           = name_s3c2412,
+       },
+       {                       /* a strange version of the s3c2416 */
+               .idcode         = 0x32450003,
+               .idmask         = 0xffffffff,
+               .map_io         = s3c2416_map_io,
+               .init_clocks    = s3c2416_init_clocks,
+               .init_uarts     = s3c2416_init_uarts,
+               .init           = s3c2416_init,
+               .name           = name_s3c2416,
+       },
+       {
+               .idcode         = 0x32443001,
+               .idmask         = 0xffffffff,
+               .map_io         = s3c2443_map_io,
+               .init_clocks    = s3c2443_init_clocks,
+               .init_uarts     = s3c2443_init_uarts,
+               .init           = s3c2443_init,
+               .name           = name_s3c2443,
+       },
+};
+
+/* minimal IO mapping */
+
+static struct map_desc s3c_iodesc[] __initdata = {
+       IODESC_ENT(GPIO),
+       IODESC_ENT(IRQ),
+       IODESC_ENT(MEMCTRL),
+       IODESC_ENT(UART)
+};
+
+/* read cpu identificaiton code */
+
+static unsigned long s3c24xx_read_idcode_v5(void)
+{
+#if defined(CONFIG_CPU_S3C2416)
+       /* s3c2416 is v5, with S3C24XX_GSTATUS1 instead of S3C2412_GSTATUS1 */
+
+       u32 gs = __raw_readl(S3C24XX_GSTATUS1);
+
+       /* test for s3c2416 or similar device */
+       if ((gs >> 16) == 0x3245)
+               return gs;
+#endif
+
+#if defined(CONFIG_CPU_S3C2412) || defined(CONFIG_CPU_S3C2413)
+       return __raw_readl(S3C2412_GSTATUS1);
+#else
+       return 1UL;     /* don't look like an 2400 */
+#endif
+}
+
+static unsigned long s3c24xx_read_idcode_v4(void)
+{
+       return __raw_readl(S3C2410_GSTATUS1);
+}
+
+static void s3c24xx_default_idle(void)
+{
+       unsigned long tmp;
+       int i;
+
+       /* idle the system by using the idle mode which will wait for an
+        * interrupt to happen before restarting the system.
+        */
+
+       /* Warning: going into idle state upsets jtag scanning */
+
+       __raw_writel(__raw_readl(S3C2410_CLKCON) | S3C2410_CLKCON_IDLE,
+                    S3C2410_CLKCON);
+
+       /* the samsung port seems to do a loop and then unset idle.. */
+       for (i = 0; i < 50; i++)
+               tmp += __raw_readl(S3C2410_CLKCON); /* ensure loop not optimised out */
+
+       /* this bit is not cleared on re-start... */
+
+       __raw_writel(__raw_readl(S3C2410_CLKCON) & ~S3C2410_CLKCON_IDLE,
+                    S3C2410_CLKCON);
+}
+
+void __init s3c24xx_init_io(struct map_desc *mach_desc, int size)
+{
+       arm_pm_idle = s3c24xx_default_idle;
+
+       /* initialise the io descriptors we need for initialisation */
+       iotable_init(mach_desc, size);
+       iotable_init(s3c_iodesc, ARRAY_SIZE(s3c_iodesc));
+
+       if (cpu_architecture() >= CPU_ARCH_ARMv5) {
+               samsung_cpu_id = s3c24xx_read_idcode_v5();
+       } else {
+               samsung_cpu_id = s3c24xx_read_idcode_v4();
+       }
+       s3c24xx_init_cpu();
+
+       s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
+}
+
+/* Serial port registrations */
+
+static struct resource s3c2410_uart0_resource[] = {
+       [0] = DEFINE_RES_MEM(S3C2410_PA_UART0, SZ_16K),
+       [1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX0, \
+                       IRQ_S3CUART_ERR0 - IRQ_S3CUART_RX0 + 1, \
+                       NULL, IORESOURCE_IRQ)
+};
+
+static struct resource s3c2410_uart1_resource[] = {
+       [0] = DEFINE_RES_MEM(S3C2410_PA_UART1, SZ_16K),
+       [1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX1, \
+                       IRQ_S3CUART_ERR1 - IRQ_S3CUART_RX1 + 1, \
+                       NULL, IORESOURCE_IRQ)
+};
+
+static struct resource s3c2410_uart2_resource[] = {
+       [0] = DEFINE_RES_MEM(S3C2410_PA_UART2, SZ_16K),
+       [1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX2, \
+                       IRQ_S3CUART_ERR2 - IRQ_S3CUART_RX2 + 1, \
+                       NULL, IORESOURCE_IRQ)
+};
+
+static struct resource s3c2410_uart3_resource[] = {
+       [0] = DEFINE_RES_MEM(S3C2443_PA_UART3, SZ_16K),
+       [1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX3, \
+                       IRQ_S3CUART_ERR3 - IRQ_S3CUART_RX3 + 1, \
+                       NULL, IORESOURCE_IRQ)
+};
+
+struct s3c24xx_uart_resources s3c2410_uart_resources[] __initdata = {
+       [0] = {
+               .resources      = s3c2410_uart0_resource,
+               .nr_resources   = ARRAY_SIZE(s3c2410_uart0_resource),
+       },
+       [1] = {
+               .resources      = s3c2410_uart1_resource,
+               .nr_resources   = ARRAY_SIZE(s3c2410_uart1_resource),
+       },
+       [2] = {
+               .resources      = s3c2410_uart2_resource,
+               .nr_resources   = ARRAY_SIZE(s3c2410_uart2_resource),
+       },
+       [3] = {
+               .resources      = s3c2410_uart3_resource,
+               .nr_resources   = ARRAY_SIZE(s3c2410_uart3_resource),
+       },
+};
+
+/* initialise all the clocks */
+
+void __init_or_cpufreq s3c24xx_setup_clocks(unsigned long fclk,
+                                          unsigned long hclk,
+                                          unsigned long pclk)
+{
+       clk_upll.rate = s3c24xx_get_pll(__raw_readl(S3C2410_UPLLCON),
+                                       clk_xtal.rate);
+
+       clk_mpll.rate = fclk;
+       clk_h.rate = hclk;
+       clk_p.rate = pclk;
+       clk_f.rate = fclk;
+}
index e227c472a40ab45530e69ad4dc57a9f1055d9139..2d94228d2866eef4bc56408a641b1f3084492d2a 100644 (file)
@@ -55,12 +55,20 @@ static struct s3c24xx_dma_map __initdata s3c2443_dma_mappings[] = {
                .name           = "sdi",
                .channels       = MAP(S3C2443_DMAREQSEL_SDI),
        },
-       [DMACH_SPI0] = {
-               .name           = "spi0",
+       [DMACH_SPI0_RX] = {
+               .name           = "spi0-rx",
+               .channels       = MAP(S3C2443_DMAREQSEL_SPI0RX),
+       },
+       [DMACH_SPI0_TX] = {
+               .name           = "spi0-tx",
                .channels       = MAP(S3C2443_DMAREQSEL_SPI0TX),
        },
-       [DMACH_SPI1] = { /* only on S3C2443/S3C2450 */
-               .name           = "spi1",
+       [DMACH_SPI1_RX] = { /* only on S3C2443/S3C2450 */
+               .name           = "spi1-rx",
+               .channels       = MAP(S3C2443_DMAREQSEL_SPI1RX),
+       },
+       [DMACH_SPI1_TX] = { /* only on S3C2443/S3C2450 */
+               .name           = "spi1-tx",
                .channels       = MAP(S3C2443_DMAREQSEL_SPI1TX),
        },
        [DMACH_UART0] = {
index acbdfecd4186f6aa0f309315069f13a28f1625e5..454831b66037f9c64d37521bf14e118dcc92bc9a 100644 (file)
@@ -47,6 +47,10 @@ enum dma_ch {
        DMACH_UART2_SRC2,
        DMACH_UART3,            /* s3c2443 has extra uart */
        DMACH_UART3_SRC2,
+       DMACH_SPI0_TX,          /* s3c2443/2416/2450 hsspi0 */
+       DMACH_SPI0_RX,          /* s3c2443/2416/2450 hsspi0 */
+       DMACH_SPI1_TX,          /* s3c2443/2450 hsspi1 */
+       DMACH_SPI1_RX,          /* s3c2443/2450 hsspi1 */
        DMACH_MAX,              /* the end entry */
 };
 
index e53b2177319e701cdb44e8f479e2de62282e826d..b7a9f4d469e816bf6ca1f389f31d9280d06ecc46 100644 (file)
 #define IRQ_S32416_WDT         S3C2410_IRQSUB(27)
 #define IRQ_S32416_AC97                S3C2410_IRQSUB(28)
 
+/* second interrupt-register of s3c2416/s3c2450 */
+
+#define S3C2416_IRQ(x)         S3C2410_IRQ((x) + 54 + 29)
+#define IRQ_S3C2416_2D         S3C2416_IRQ(0)
+#define IRQ_S3C2416_IIC1       S3C2416_IRQ(1)
+#define IRQ_S3C2416_RESERVED2  S3C2416_IRQ(2)
+#define IRQ_S3C2416_RESERVED3  S3C2416_IRQ(3)
+#define IRQ_S3C2416_PCM0       S3C2416_IRQ(4)
+#define IRQ_S3C2416_PCM1       S3C2416_IRQ(5)
+#define IRQ_S3C2416_I2S0       S3C2416_IRQ(6)
+#define IRQ_S3C2416_I2S1       S3C2416_IRQ(7)
 
 /* extra irqs for s3c2440 */
 
 #define IRQ_S3C2443_WDT                S3C2410_IRQSUB(27)
 #define IRQ_S3C2443_AC97       S3C2410_IRQSUB(28)
 
-#if defined(CONFIG_CPU_S3C2443) || defined(CONFIG_CPU_S3C2416)
+#if defined(CONFIG_CPU_S3C2416)
+#define NR_IRQS (IRQ_S3C2416_I2S1 + 1)
+#elif defined(CONFIG_CPU_S3C2443)
 #define NR_IRQS (IRQ_S3C2443_AC97+1)
 #else
 #define NR_IRQS (IRQ_S3C2440_AC97+1)
index 78ae807f1281e6a250cfab6b440428c4283ed7fa..8ba381f2dbe1b306d07943eccc1f3e550302c3d6 100644 (file)
@@ -98,6 +98,8 @@
 
 /* SPI */
 #define S3C2410_PA_SPI    (0x59000000)
+#define S3C2443_PA_SPI0                (0x52000000)
+#define S3C2443_PA_SPI1                S3C2410_PA_SPI
 
 /* SDI */
 #define S3C2410_PA_SDI    (0x5A000000)
 #define S3C_PA_WDT         S3C2410_PA_WATCHDOG
 #define S3C_PA_NAND        S3C24XX_PA_NAND
 
+#define S3C_PA_SPI0            S3C2443_PA_SPI0
+#define S3C_PA_SPI1            S3C2443_PA_SPI1
+
 #endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-s3c24xx/irq-pm.c b/arch/arm/mach-s3c24xx/irq-pm.c
new file mode 100644 (file)
index 0000000..0efb2e2
--- /dev/null
@@ -0,0 +1,95 @@
+/* linux/arch/arm/plat-s3c24xx/irq-om.c
+ *
+ * Copyright (c) 2003-2004 Simtec Electronics
+ *     Ben Dooks <ben@simtec.co.uk>
+ *     http://armlinux.simtec.co.uk/
+ *
+ * S3C24XX - IRQ PM code
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include <plat/cpu.h>
+#include <plat/pm.h>
+#include <plat/irq.h>
+
+#include <asm/irq.h>
+
+/* state for IRQs over sleep */
+
+/* default is to allow for EINT0..EINT15, and IRQ_RTC as wakeup sources
+ *
+ * set bit to 1 in allow bitfield to enable the wakeup settings on it
+*/
+
+unsigned long s3c_irqwake_intallow     = 1L << (IRQ_RTC - IRQ_EINT0) | 0xfL;
+unsigned long s3c_irqwake_eintallow    = 0x0000fff0L;
+
+int s3c_irq_wake(struct irq_data *data, unsigned int state)
+{
+       unsigned long irqbit = 1 << (data->irq - IRQ_EINT0);
+
+       if (!(s3c_irqwake_intallow & irqbit))
+               return -ENOENT;
+
+       printk(KERN_INFO "wake %s for irq %d\n",
+              state ? "enabled" : "disabled", data->irq);
+
+       if (!state)
+               s3c_irqwake_intmask |= irqbit;
+       else
+               s3c_irqwake_intmask &= ~irqbit;
+
+       return 0;
+}
+
+static struct sleep_save irq_save[] = {
+       SAVE_ITEM(S3C2410_INTMSK),
+       SAVE_ITEM(S3C2410_INTSUBMSK),
+};
+
+/* the extint values move between the s3c2410/s3c2440 and the s3c2412
+ * so we use an array to hold them, and to calculate the address of
+ * the register at run-time
+*/
+
+static unsigned long save_extint[3];
+static unsigned long save_eintflt[4];
+static unsigned long save_eintmask;
+
+int s3c24xx_irq_suspend(void)
+{
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(save_extint); i++)
+               save_extint[i] = __raw_readl(S3C24XX_EXTINT0 + (i*4));
+
+       for (i = 0; i < ARRAY_SIZE(save_eintflt); i++)
+               save_eintflt[i] = __raw_readl(S3C24XX_EINFLT0 + (i*4));
+
+       s3c_pm_do_save(irq_save, ARRAY_SIZE(irq_save));
+       save_eintmask = __raw_readl(S3C24XX_EINTMASK);
+
+       return 0;
+}
+
+void s3c24xx_irq_resume(void)
+{
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(save_extint); i++)
+               __raw_writel(save_extint[i], S3C24XX_EXTINT0 + (i*4));
+
+       for (i = 0; i < ARRAY_SIZE(save_eintflt); i++)
+               __raw_writel(save_eintflt[i], S3C24XX_EINFLT0 + (i*4));
+
+       s3c_pm_do_restore(irq_save, ARRAY_SIZE(irq_save));
+       __raw_writel(save_eintmask, S3C24XX_EINTMASK);
+}
index fd49f35e448ec7090098a4293493df7ee8a32f86..23ec97370f3272ea21ad6f745b859c5105ff04e2 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/ioport.h>
 #include <linux/device.h>
 #include <linux/io.h>
+#include <linux/syscore_ops.h>
 
 #include <mach/hardware.h>
 #include <asm/irq.h>
@@ -192,6 +193,43 @@ static struct irq_chip s3c2416_irq_uart3 = {
        .irq_ack        = s3c2416_irq_uart3_ack,
 };
 
+/* second interrupt register */
+
+static inline void s3c2416_irq_ack_second(struct irq_data *data)
+{
+       unsigned long bitval = 1UL << (data->irq - IRQ_S3C2416_2D);
+
+       __raw_writel(bitval, S3C2416_SRCPND2);
+       __raw_writel(bitval, S3C2416_INTPND2);
+}
+
+static void s3c2416_irq_mask_second(struct irq_data *data)
+{
+       unsigned long bitval = 1UL << (data->irq - IRQ_S3C2416_2D);
+       unsigned long mask;
+
+       mask = __raw_readl(S3C2416_INTMSK2);
+       mask |= bitval;
+       __raw_writel(mask, S3C2416_INTMSK2);
+}
+
+static void s3c2416_irq_unmask_second(struct irq_data *data)
+{
+       unsigned long bitval = 1UL << (data->irq - IRQ_S3C2416_2D);
+       unsigned long mask;
+
+       mask = __raw_readl(S3C2416_INTMSK2);
+       mask &= ~bitval;
+       __raw_writel(mask, S3C2416_INTMSK2);
+}
+
+struct irq_chip s3c2416_irq_second = {
+       .irq_ack        = s3c2416_irq_ack_second,
+       .irq_mask       = s3c2416_irq_mask_second,
+       .irq_unmask     = s3c2416_irq_unmask_second,
+};
+
+
 /* IRQ initialisation code */
 
 static int __init s3c2416_add_sub(unsigned int base,
@@ -213,6 +251,42 @@ static int __init s3c2416_add_sub(unsigned int base,
        return 0;
 }
 
+static void __init s3c2416_irq_add_second(void)
+{
+       unsigned long pend;
+       unsigned long last;
+       int irqno;
+       int i;
+
+       /* first, clear all interrupts pending... */
+       last = 0;
+       for (i = 0; i < 4; i++) {
+               pend = __raw_readl(S3C2416_INTPND2);
+
+               if (pend == 0 || pend == last)
+                       break;
+
+               __raw_writel(pend, S3C2416_SRCPND2);
+               __raw_writel(pend, S3C2416_INTPND2);
+               printk(KERN_INFO "irq: clearing pending status %08x\n",
+                      (int)pend);
+               last = pend;
+       }
+
+       for (irqno = IRQ_S3C2416_2D; irqno <= IRQ_S3C2416_I2S1; irqno++) {
+               switch (irqno) {
+               case IRQ_S3C2416_RESERVED2:
+               case IRQ_S3C2416_RESERVED3:
+                       /* no IRQ here */
+                       break;
+               default:
+                       irq_set_chip_and_handler(irqno, &s3c2416_irq_second,
+                                                handle_edge_irq);
+                       set_irq_flags(irqno, IRQF_VALID);
+               }
+       }
+}
+
 static int __init s3c2416_irq_add(struct device *dev,
                                  struct subsys_interface *sif)
 {
@@ -232,6 +306,8 @@ static int __init s3c2416_irq_add(struct device *dev,
                        &s3c2416_irq_wdtac97,
                        IRQ_S3C2443_WDT, IRQ_S3C2443_AC97);
 
+       s3c2416_irq_add_second();
+
        return 0;
 }
 
@@ -248,3 +324,25 @@ static int __init s3c2416_irq_init(void)
 
 arch_initcall(s3c2416_irq_init);
 
+#ifdef CONFIG_PM
+static struct sleep_save irq_save[] = {
+       SAVE_ITEM(S3C2416_INTMSK2),
+};
+
+int s3c2416_irq_suspend(void)
+{
+       s3c_pm_do_save(irq_save, ARRAY_SIZE(irq_save));
+
+       return 0;
+}
+
+void s3c2416_irq_resume(void)
+{
+       s3c_pm_do_restore(irq_save, ARRAY_SIZE(irq_save));
+}
+
+struct syscore_ops s3c2416_irq_syscore_ops = {
+       .suspend        = s3c2416_irq_suspend,
+       .resume         = s3c2416_irq_resume,
+};
+#endif
index 30a44f806e0154b90bb658287c734b6f9a5126cd..c3100a044fbe9b632d364e8adc1125da464a0b15 100644 (file)
@@ -148,23 +148,25 @@ static struct s3c24xx_hsudc_platdata smdk2416_hsudc_platdata = {
 
 static struct s3c_fb_pd_win smdk2416_fb_win[] = {
        [0] = {
-               /* think this is the same as the smdk6410 */
-               .win_mode       = {
-                       .pixclock       = 41094,
-                       .left_margin    = 8,
-                       .right_margin   = 13,
-                       .upper_margin   = 7,
-                       .lower_margin   = 5,
-                       .hsync_len      = 3,
-                       .vsync_len      = 1,
-                       .xres           = 800,
-                       .yres           = 480,
-               },
                .default_bpp    = 16,
                .max_bpp        = 32,
+               .xres           = 800,
+               .yres           = 480,
        },
 };
 
+static struct fb_videomode smdk2416_lcd_timing = {
+       .pixclock       = 41094,
+       .left_margin    = 8,
+       .right_margin   = 13,
+       .upper_margin   = 7,
+       .lower_margin   = 5,
+       .hsync_len      = 3,
+       .vsync_len      = 1,
+       .xres           = 800,
+       .yres           = 480,
+};
+
 static void s3c2416_fb_gpio_setup_24bpp(void)
 {
        unsigned int gpio;
@@ -187,6 +189,7 @@ static void s3c2416_fb_gpio_setup_24bpp(void)
 
 static struct s3c_fb_platdata smdk2416_fb_platdata = {
        .win[0]         = &smdk2416_fb_win[0],
+       .vtiming        = &smdk2416_lcd_timing,
        .setup_gpio     = s3c2416_fb_gpio_setup_24bpp,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
diff --git a/arch/arm/mach-s3c24xx/pm.c b/arch/arm/mach-s3c24xx/pm.c
new file mode 100644 (file)
index 0000000..60627e6
--- /dev/null
@@ -0,0 +1,149 @@
+/* linux/arch/arm/plat-s3c24xx/pm.c
+ *
+ * Copyright (c) 2004-2006 Simtec Electronics
+ *     Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C24XX Power Manager (Suspend-To-RAM) support
+ *
+ * See Documentation/arm/Samsung-S3C24XX/Suspend.txt for more information
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * Parts based on arch/arm/mach-pxa/pm.c
+ *
+ * Thanks to Dimitry Andric for debugging
+*/
+
+#include <linux/init.h>
+#include <linux/suspend.h>
+#include <linux/errno.h>
+#include <linux/time.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/serial_core.h>
+#include <linux/io.h>
+
+#include <plat/regs-serial.h>
+#include <mach/regs-clock.h>
+#include <mach/regs-gpio.h>
+#include <mach/regs-mem.h>
+#include <mach/regs-irq.h>
+
+#include <asm/mach/time.h>
+
+#include <plat/gpio-cfg.h>
+#include <plat/pm.h>
+
+#define PFX "s3c24xx-pm: "
+
+static struct sleep_save core_save[] = {
+       SAVE_ITEM(S3C2410_LOCKTIME),
+       SAVE_ITEM(S3C2410_CLKCON),
+
+       /* we restore the timings here, with the proviso that the board
+        * brings the system up in an slower, or equal frequency setting
+        * to the original system.
+        *
+        * if we cannot guarantee this, then things are going to go very
+        * wrong here, as we modify the refresh and both pll settings.
+        */
+
+       SAVE_ITEM(S3C2410_BWSCON),
+       SAVE_ITEM(S3C2410_BANKCON0),
+       SAVE_ITEM(S3C2410_BANKCON1),
+       SAVE_ITEM(S3C2410_BANKCON2),
+       SAVE_ITEM(S3C2410_BANKCON3),
+       SAVE_ITEM(S3C2410_BANKCON4),
+       SAVE_ITEM(S3C2410_BANKCON5),
+
+#ifndef CONFIG_CPU_FREQ
+       SAVE_ITEM(S3C2410_CLKDIVN),
+       SAVE_ITEM(S3C2410_MPLLCON),
+       SAVE_ITEM(S3C2410_REFRESH),
+#endif
+       SAVE_ITEM(S3C2410_UPLLCON),
+       SAVE_ITEM(S3C2410_CLKSLOW),
+};
+
+static struct sleep_save misc_save[] = {
+       SAVE_ITEM(S3C2410_DCLKCON),
+};
+
+/* s3c_pm_check_resume_pin
+ *
+ * check to see if the pin is configured correctly for sleep mode, and
+ * make any necessary adjustments if it is not
+*/
+
+static void s3c_pm_check_resume_pin(unsigned int pin, unsigned int irqoffs)
+{
+       unsigned long irqstate;
+       unsigned long pinstate;
+       int irq = gpio_to_irq(pin);
+
+       if (irqoffs < 4)
+               irqstate = s3c_irqwake_intmask & (1L<<irqoffs);
+       else
+               irqstate = s3c_irqwake_eintmask & (1L<<irqoffs);
+
+       pinstate = s3c_gpio_getcfg(pin);
+
+       if (!irqstate) {
+               if (pinstate == S3C2410_GPIO_IRQ)
+                       S3C_PMDBG("Leaving IRQ %d (pin %d) as is\n", irq, pin);
+       } else {
+               if (pinstate == S3C2410_GPIO_IRQ) {
+                       S3C_PMDBG("Disabling IRQ %d (pin %d)\n", irq, pin);
+                       s3c_gpio_cfgpin(pin, S3C2410_GPIO_INPUT);
+               }
+       }
+}
+
+/* s3c_pm_configure_extint
+ *
+ * configure all external interrupt pins
+*/
+
+void s3c_pm_configure_extint(void)
+{
+       int pin;
+
+       /* for each of the external interrupts (EINT0..EINT15) we
+        * need to check wether it is an external interrupt source,
+        * and then configure it as an input if it is not
+       */
+
+       for (pin = S3C2410_GPF(0); pin <= S3C2410_GPF(7); pin++) {
+               s3c_pm_check_resume_pin(pin, pin - S3C2410_GPF(0));
+       }
+
+       for (pin = S3C2410_GPG(0); pin <= S3C2410_GPG(7); pin++) {
+               s3c_pm_check_resume_pin(pin, (pin - S3C2410_GPG(0))+8);
+       }
+}
+
+
+void s3c_pm_restore_core(void)
+{
+       s3c_pm_do_restore_core(core_save, ARRAY_SIZE(core_save));
+       s3c_pm_do_restore(misc_save, ARRAY_SIZE(misc_save));
+}
+
+void s3c_pm_save_core(void)
+{
+       s3c_pm_do_save(misc_save, ARRAY_SIZE(misc_save));
+       s3c_pm_do_save(core_save, ARRAY_SIZE(core_save));
+}
+
index 7743fade50dfa59e1c3d669d5aa55478adb993e1..ed5a95ece9eb2d365b7152cdb4bd5cfe508e672b 100644 (file)
@@ -106,6 +106,7 @@ int __init s3c2416_init(void)
        register_syscore_ops(&s3c2416_pm_syscore_ops);
 #endif
        register_syscore_ops(&s3c24xx_irq_syscore_ops);
+       register_syscore_ops(&s3c2416_irq_syscore_ops);
 
        return device_register(&s3c2416_dev);
 }
diff --git a/arch/arm/mach-s3c24xx/setup-spi.c b/arch/arm/mach-s3c24xx/setup-spi.c
new file mode 100644 (file)
index 0000000..5712c85
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * HS-SPI device setup for S3C2443/S3C2416
+ *
+ * Copyright (C) 2011 Samsung Electronics Ltd.
+ *             http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+
+#include <plat/gpio-cfg.h>
+#include <plat/s3c64xx-spi.h>
+
+#include <mach/hardware.h>
+#include <mach/regs-gpio.h>
+
+#ifdef CONFIG_S3C64XX_DEV_SPI0
+struct s3c64xx_spi_info s3c64xx_spi0_pdata __initdata = {
+       .fifo_lvl_mask  = 0x7f,
+       .rx_lvl_offset  = 13,
+       .tx_st_done     = 21,
+       .high_speed     = 1,
+};
+
+int s3c64xx_spi0_cfg_gpio(struct platform_device *pdev)
+{
+       /* enable hsspi bit in misccr */
+       s3c2410_modify_misccr(S3C2416_MISCCR_HSSPI_EN2, 1);
+
+       s3c_gpio_cfgall_range(S3C2410_GPE(11), 3,
+                             S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
+
+       return 0;
+}
+#endif
diff --git a/arch/arm/mach-s3c24xx/sleep.S b/arch/arm/mach-s3c24xx/sleep.S
new file mode 100644 (file)
index 0000000..c566125
--- /dev/null
@@ -0,0 +1,84 @@
+/* linux/arch/arm/plat-s3c24xx/sleep.S
+ *
+ * Copyright (c) 2004 Simtec Electronics
+ *     Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C2410 Power Manager (Suspend-To-RAM) support
+ *
+ * Based on PXA/SA1100 sleep code by:
+ *     Nicolas Pitre, (c) 2002 Monta Vista Software Inc
+ *     Cliff Brake, (c) 2001
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+*/
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <mach/hardware.h>
+#include <mach/map.h>
+
+#include <mach/regs-gpio.h>
+#include <mach/regs-clock.h>
+#include <mach/regs-mem.h>
+#include <plat/regs-serial.h>
+
+/* CONFIG_DEBUG_RESUME is dangerous if your bootloader does not
+ * reset the UART configuration, only enable if you really need this!
+*/
+//#define CONFIG_DEBUG_RESUME
+
+       .text
+
+       /* sleep magic, to allow the bootloader to check for an valid
+        * image to resume to. Must be the first word before the
+        * s3c_cpu_resume entry.
+       */
+
+       .word   0x2bedf00d
+
+       /* s3c_cpu_resume
+        *
+        * resume code entry for bootloader to call
+       */
+
+ENTRY(s3c_cpu_resume)
+       mov     r0, #PSR_I_BIT | PSR_F_BIT | SVC_MODE
+       msr     cpsr_c, r0
+
+       @@ load UART to allow us to print the two characters for
+       @@ resume debug
+
+       mov     r2, #S3C24XX_PA_UART & 0xff000000
+       orr     r2, r2, #S3C24XX_PA_UART & 0xff000
+
+#if 0
+       /* SMDK2440 LED set */
+       mov     r14, #S3C24XX_PA_GPIO
+       ldr     r12, [ r14, #0x54 ]
+       bic     r12, r12, #3<<4
+       orr     r12, r12, #1<<7
+       str     r12, [ r14, #0x54 ]
+#endif
+
+#ifdef CONFIG_DEBUG_RESUME
+       mov     r3, #'L'
+       strb    r3, [ r2, #S3C2410_UTXH ]
+1001:
+       ldrb    r14, [ r3, #S3C2410_UTRSTAT ]
+       tst     r14, #S3C2410_UTRSTAT_TXE
+       beq     1001b
+#endif /* CONFIG_DEBUG_RESUME */
+
+       b       cpu_resume
index b313380342a5790efbcb62104353e54f44672dbd..be746e33e86c08a6d91fdb371c9cae2f27c8a91d 100644 (file)
@@ -384,3 +384,8 @@ void s3c64xx_restart(char mode, const char *cmd)
        /* if all else fails, or mode was for soft, jump to 0 */
        soft_restart(0);
 }
+
+void __init s3c64xx_init_late(void)
+{
+       s3c64xx_pm_late_initcall();
+}
index 7a10be629aba26723cb51bc28d460a0a5dc4ef53..6cfc99bdfb3733e3e311c0872a4d3df82558f9b0 100644 (file)
@@ -24,6 +24,7 @@ void s3c64xx_register_clocks(unsigned long xtal, unsigned armclk_limit);
 void s3c64xx_setup_clocks(void);
 
 void s3c64xx_restart(char mode, const char *cmd);
+void s3c64xx_init_late(void);
 
 #ifdef CONFIG_CPU_S3C6400
 
@@ -51,4 +52,10 @@ extern void s3c6410_init_clocks(int xtal);
 #define s3c6410_init NULL
 #endif
 
+#ifdef CONFIG_PM
+int __init s3c64xx_pm_late_initcall(void);
+#else
+static inline int s3c64xx_pm_late_initcall(void) { return 0; }
+#endif
+
 #endif /* __ARCH_ARM_MACH_S3C64XX_COMMON_H */
index 179460f38db7587c94321ca2c401173cf174da77..acb197ccf3f7e7635ccbba7a3426303701e0fbf8 100644 (file)
@@ -27,12 +27,7 @@ static int s3c64xx_enter_idle(struct cpuidle_device *dev,
                              struct cpuidle_driver *drv,
                              int index)
 {
-       struct timeval before, after;
        unsigned long tmp;
-       int idle_time;
-
-       local_irq_disable();
-       do_gettimeofday(&before);
 
        /* Setup PWRCFG to enter idle mode */
        tmp = __raw_readl(S3C64XX_PWR_CFG);
@@ -42,42 +37,32 @@ static int s3c64xx_enter_idle(struct cpuidle_device *dev,
 
        cpu_do_idle();
 
-       do_gettimeofday(&after);
-       local_irq_enable();
-       idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
-                   (after.tv_usec - before.tv_usec);
-
-       dev->last_residency = idle_time;
        return index;
 }
 
-static struct cpuidle_state s3c64xx_cpuidle_set[] = {
-       [0] = {
-               .enter                  = s3c64xx_enter_idle,
-               .exit_latency           = 1,
-               .target_residency       = 1,
-               .flags                  = CPUIDLE_FLAG_TIME_VALID,
-               .name                   = "IDLE",
-               .desc                   = "System active, ARM gated",
-       },
-};
+static DEFINE_PER_CPU(struct cpuidle_device, s3c64xx_cpuidle_device);
 
 static struct cpuidle_driver s3c64xx_cpuidle_driver = {
-       .name           = "s3c64xx_cpuidle",
-       .owner          = THIS_MODULE,
-       .state_count    = ARRAY_SIZE(s3c64xx_cpuidle_set),
-};
-
-static struct cpuidle_device s3c64xx_cpuidle_device = {
-       .state_count    = ARRAY_SIZE(s3c64xx_cpuidle_set),
+       .name   = "s3c64xx_cpuidle",
+       .owner  = THIS_MODULE,
+       .en_core_tk_irqen = 1,
+       .states = {
+               {
+                       .enter            = s3c64xx_enter_idle,
+                       .exit_latency     = 1,
+                       .target_residency = 1,
+                       .flags            = CPUIDLE_FLAG_TIME_VALID,
+                       .name             = "IDLE",
+                       .desc             = "System active, ARM gated",
+               },
+       },
+       .state_count = 1,
 };
 
 static int __init s3c64xx_init_cpuidle(void)
 {
        int ret;
 
-       memcpy(s3c64xx_cpuidle_driver.states, s3c64xx_cpuidle_set,
-              sizeof(s3c64xx_cpuidle_set));
        cpuidle_register_driver(&s3c64xx_cpuidle_driver);
 
        ret = cpuidle_register_device(&s3c64xx_cpuidle_device);
index f252691fb209d73fd316882014e12653c56cc2d8..ffa29ddfdfced9084d16b49c509b79fa342208b5 100644 (file)
@@ -134,24 +134,27 @@ static struct platform_device anw6410_lcd_powerdev = {
 };
 
 static struct s3c_fb_pd_win anw6410_fb_win0 = {
-       /* this is to ensure we use win0 */
-       .win_mode       = {
-               .left_margin    = 8,
-               .right_margin   = 13,
-               .upper_margin   = 7,
-               .lower_margin   = 5,
-               .hsync_len      = 3,
-               .vsync_len      = 1,
-               .xres           = 800,
-               .yres           = 480,
-       },
        .max_bpp        = 32,
        .default_bpp    = 16,
+       .xres           = 800,
+       .yres           = 480,
+};
+
+static struct fb_videomode anw6410_lcd_timing = {
+       .left_margin    = 8,
+       .right_margin   = 13,
+       .upper_margin   = 7,
+       .lower_margin   = 5,
+       .hsync_len      = 3,
+       .vsync_len      = 1,
+       .xres           = 800,
+       .yres           = 480,
 };
 
 /* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */
 static struct s3c_fb_platdata anw6410_lcd_pdata __initdata = {
        .setup_gpio     = s3c64xx_fb_gpio_setup_24bpp,
+       .vtiming        = &anw6410_lcd_timing,
        .win[0]         = &anw6410_fb_win0,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
@@ -230,6 +233,7 @@ MACHINE_START(ANW6410, "A&W6410")
        .handle_irq     = vic_handle_irq,
        .map_io         = anw6410_map_io,
        .init_machine   = anw6410_machine_init,
+       .init_late      = s3c64xx_init_late,
        .timer          = &s3c24xx_timer,
        .restart        = s3c64xx_restart,
 MACHINE_END
index 0ace108c3e3d710264f4c88dfa5676702723fbfc..7a27f5603c7405fca49409c33ee9af79413de701 100644 (file)
@@ -182,6 +182,11 @@ static const struct i2c_board_info wm1277_devs[] = {
        },
 };
 
+static const struct i2c_board_info wm6230_i2c_devs[] = {
+       { I2C_BOARD_INFO("wm9081", 0x6c),
+         .platform_data = &wm9081_pdata, },
+};
+
 static __devinitdata const struct {
        u8 id;
        const char *name;
@@ -195,7 +200,9 @@ static __devinitdata const struct {
        { .id = 0x03, .name = "1252-EV1 Glenlivet" },
        { .id = 0x11, .name = "6249-EV2 Glenfarclas", },
        { .id = 0x14, .name = "6271-EV1 Lochnagar" },
-       { .id = 0x15, .name = "XXXX-EV1 Bells" },
+       { .id = 0x15, .name = "6320-EV1 Bells",
+         .i2c_devs = wm6230_i2c_devs,
+         .num_i2c_devs = ARRAY_SIZE(wm6230_i2c_devs) },
        { .id = 0x21, .name = "1275-EV1 Mortlach" },
        { .id = 0x25, .name = "1274-EV1 Glencadam" },
        { .id = 0x31, .name = "1253-EV1 Tomatin",
index aa1137fb47e616b3a43d569553ca0cd1db9e5a6e..d0c352d861f8f341fd93ac2da561b7c2bab35805 100644 (file)
@@ -151,26 +151,29 @@ static struct platform_device crag6410_lcd_powerdev = {
 
 /* 640x480 URT */
 static struct s3c_fb_pd_win crag6410_fb_win0 = {
-       /* this is to ensure we use win0 */
-       .win_mode       = {
-               .left_margin    = 150,
-               .right_margin   = 80,
-               .upper_margin   = 40,
-               .lower_margin   = 5,
-               .hsync_len      = 40,
-               .vsync_len      = 5,
-               .xres           = 640,
-               .yres           = 480,
-       },
        .max_bpp        = 32,
        .default_bpp    = 16,
+       .xres           = 640,
+       .yres           = 480,
        .virtual_y      = 480 * 2,
        .virtual_x      = 640,
 };
 
+static struct fb_videomode crag6410_lcd_timing = {
+       .left_margin    = 150,
+       .right_margin   = 80,
+       .upper_margin   = 40,
+       .lower_margin   = 5,
+       .hsync_len      = 40,
+       .vsync_len      = 5,
+       .xres           = 640,
+       .yres           = 480,
+};
+
 /* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */
 static struct s3c_fb_platdata crag6410_lcd_pdata __initdata = {
        .setup_gpio     = s3c64xx_fb_gpio_setup_24bpp,
+       .vtiming        = &crag6410_lcd_timing,
        .win[0]         = &crag6410_fb_win0,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
@@ -671,6 +674,7 @@ static struct i2c_board_info i2c_devs1[] __initdata = {
          .irq = S3C_EINT(0),
          .platform_data = &glenfarclas_pmic_pdata },
 
+       { I2C_BOARD_INFO("wlf-gf-module", 0x22) },
        { I2C_BOARD_INFO("wlf-gf-module", 0x24) },
        { I2C_BOARD_INFO("wlf-gf-module", 0x25) },
        { I2C_BOARD_INFO("wlf-gf-module", 0x26) },
@@ -813,6 +817,7 @@ MACHINE_START(WLF_CRAGG_6410, "Wolfson Cragganmore 6410")
        .handle_irq     = vic_handle_irq,
        .map_io         = crag6410_map_io,
        .init_machine   = crag6410_machine_init,
+       .init_late      = s3c64xx_init_late,
        .timer          = &s3c24xx_timer,
        .restart        = s3c64xx_restart,
 MACHINE_END
index 521e07b8501b18780d335f0162bac4cd140fdb03..689088162f77e4eaab4868cb519889e976ab4401 100644 (file)
@@ -129,23 +129,27 @@ static struct platform_device hmt_backlight_device = {
 };
 
 static struct s3c_fb_pd_win hmt_fb_win0 = {
-       .win_mode       = {
-               .left_margin    = 8,
-               .right_margin   = 13,
-               .upper_margin   = 7,
-               .lower_margin   = 5,
-               .hsync_len      = 3,
-               .vsync_len      = 1,
-               .xres           = 800,
-               .yres           = 480,
-       },
        .max_bpp        = 32,
        .default_bpp    = 16,
+       .xres           = 800,
+       .yres           = 480,
+};
+
+static struct fb_videomode hmt_lcd_timing = {
+       .left_margin    = 8,
+       .right_margin   = 13,
+       .upper_margin   = 7,
+       .lower_margin   = 5,
+       .hsync_len      = 3,
+       .vsync_len      = 1,
+       .xres           = 800,
+       .yres           = 480,
 };
 
 /* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */
 static struct s3c_fb_platdata hmt_lcd_pdata __initdata = {
        .setup_gpio     = s3c64xx_fb_gpio_setup_24bpp,
+       .vtiming        = &hmt_lcd_timing,
        .win[0]         = &hmt_fb_win0,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
@@ -272,6 +276,7 @@ MACHINE_START(HMT, "Airgoo-HMT")
        .handle_irq     = vic_handle_irq,
        .map_io         = hmt_map_io,
        .init_machine   = hmt_machine_init,
+       .init_late      = s3c64xx_init_late,
        .timer          = &s3c24xx_timer,
        .restart        = s3c64xx_restart,
 MACHINE_END
index b2166d4a5538caa33f0e9acd74b64aaa24a0a2a6..5539a255a70446a8e89f3c2aa6e3735fadef08c2 100644 (file)
@@ -140,41 +140,59 @@ static struct s3c2410_platform_nand mini6410_nand_info = {
        .sets           = mini6410_nand_sets,
 };
 
-static struct s3c_fb_pd_win mini6410_fb_win[] = {
+static struct s3c_fb_pd_win mini6410_lcd_type0_fb_win = {
+       .max_bpp        = 32,
+       .default_bpp    = 16,
+       .xres           = 480,
+       .yres           = 272,
+};
+
+static struct fb_videomode mini6410_lcd_type0_timing = {
+       /* 4.3" 480x272 */
+       .left_margin    = 3,
+       .right_margin   = 2,
+       .upper_margin   = 1,
+       .lower_margin   = 1,
+       .hsync_len      = 40,
+       .vsync_len      = 1,
+       .xres           = 480,
+       .yres           = 272,
+};
+
+static struct s3c_fb_pd_win mini6410_lcd_type1_fb_win = {
+       .max_bpp        = 32,
+       .default_bpp    = 16,
+       .xres           = 800,
+       .yres           = 480,
+};
+
+static struct fb_videomode mini6410_lcd_type1_timing = {
+       /* 7.0" 800x480 */
+       .left_margin    = 8,
+       .right_margin   = 13,
+       .upper_margin   = 7,
+       .lower_margin   = 5,
+       .hsync_len      = 3,
+       .vsync_len      = 1,
+       .xres           = 800,
+       .yres           = 480,
+};
+
+static struct s3c_fb_platdata mini6410_lcd_pdata[] __initdata = {
        {
-               .win_mode       = {     /* 4.3" 480x272 */
-                       .left_margin    = 3,
-                       .right_margin   = 2,
-                       .upper_margin   = 1,
-                       .lower_margin   = 1,
-                       .hsync_len      = 40,
-                       .vsync_len      = 1,
-                       .xres           = 480,
-                       .yres           = 272,
-               },
-               .max_bpp        = 32,
-               .default_bpp    = 16,
+               .setup_gpio     = s3c64xx_fb_gpio_setup_24bpp,
+               .vtiming        = &mini6410_lcd_type0_timing,
+               .win[0]         = &mini6410_lcd_type0_fb_win,
+               .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
+               .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
        }, {
-               .win_mode       = {     /* 7.0" 800x480 */
-                       .left_margin    = 8,
-                       .right_margin   = 13,
-                       .upper_margin   = 7,
-                       .lower_margin   = 5,
-                       .hsync_len      = 3,
-                       .vsync_len      = 1,
-                       .xres           = 800,
-                       .yres           = 480,
-               },
-               .max_bpp        = 32,
-               .default_bpp    = 16,
+               .setup_gpio     = s3c64xx_fb_gpio_setup_24bpp,
+               .vtiming        = &mini6410_lcd_type1_timing,
+               .win[0]         = &mini6410_lcd_type1_fb_win,
+               .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
+               .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
        },
-};
-
-static struct s3c_fb_platdata mini6410_lcd_pdata __initdata = {
-       .setup_gpio     = s3c64xx_fb_gpio_setup_24bpp,
-       .win[0]         = &mini6410_fb_win[0],
-       .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
-       .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
+       { },
 };
 
 static void mini6410_lcd_power_set(struct plat_lcd_data *pd,
@@ -272,7 +290,7 @@ static void mini6410_parse_features(
                                        "screen type already set\n", f);
                        } else {
                                int li = f - '0';
-                               if (li >= ARRAY_SIZE(mini6410_fb_win))
+                               if (li >= ARRAY_SIZE(mini6410_lcd_pdata))
                                        printk(KERN_INFO "MINI6410: '%c' out "
                                                "of range LCD mode\n", f);
                                else {
@@ -296,14 +314,12 @@ static void __init mini6410_machine_init(void)
        /* Parse the feature string */
        mini6410_parse_features(&features, mini6410_features_str);
 
-       mini6410_lcd_pdata.win[0] = &mini6410_fb_win[features.lcd_index];
-
        printk(KERN_INFO "MINI6410: selected LCD display is %dx%d\n",
-               mini6410_lcd_pdata.win[0]->win_mode.xres,
-               mini6410_lcd_pdata.win[0]->win_mode.yres);
+               mini6410_lcd_pdata[features.lcd_index].win[0]->xres,
+               mini6410_lcd_pdata[features.lcd_index].win[0]->yres);
 
        s3c_nand_set_platdata(&mini6410_nand_info);
-       s3c_fb_set_platdata(&mini6410_lcd_pdata);
+       s3c_fb_set_platdata(&mini6410_lcd_pdata[features.lcd_index]);
        s3c24xx_ts_set_platdata(NULL);
 
        /* configure nCS1 width to 16 bits */
@@ -339,6 +355,7 @@ MACHINE_START(MINI6410, "MINI6410")
        .handle_irq     = vic_handle_irq,
        .map_io         = mini6410_map_io,
        .init_machine   = mini6410_machine_init,
+       .init_late      = s3c64xx_init_late,
        .timer          = &s3c24xx_timer,
        .restart        = s3c64xx_restart,
 MACHINE_END
index 0efa2ba783b269f2c8824974f34c2e33ae7935dd..cad2e05eddf781e2429c2afc143626ce563d7f59 100644 (file)
@@ -104,6 +104,7 @@ MACHINE_START(NCP, "NCP")
        .handle_irq     = vic_handle_irq,
        .map_io         = ncp_map_io,
        .init_machine   = ncp_machine_init,
+       .init_late      = s3c64xx_init_late,
        .timer          = &s3c24xx_timer,
        .restart        = s3c64xx_restart,
 MACHINE_END
index 5c08266cea216957aa2b4051f82c4bd47356eb79..326b21604bc332a940a7056537b86aaf6d950872 100644 (file)
@@ -106,41 +106,57 @@ static struct platform_device real6410_device_eth = {
        },
 };
 
-static struct s3c_fb_pd_win real6410_fb_win[] = {
+static struct s3c_fb_pd_win real6410_lcd_type0_fb_win = {
+       .max_bpp        = 32,
+       .default_bpp    = 16,
+       .xres           = 480,
+       .yres           = 272,
+};
+
+static struct fb_videomode real6410_lcd_type0_timing = {
+       /* 4.3" 480x272 */
+       .left_margin    = 3,
+       .right_margin   = 2,
+       .upper_margin   = 1,
+       .lower_margin   = 1,
+       .hsync_len      = 40,
+       .vsync_len      = 1,
+};
+
+static struct s3c_fb_pd_win real6410_lcd_type1_fb_win = {
+       .max_bpp        = 32,
+       .default_bpp    = 16,
+       .xres           = 800,
+       .yres           = 480,
+};
+
+static struct fb_videomode real6410_lcd_type1_timing = {
+       /* 7.0" 800x480 */
+       .left_margin    = 8,
+       .right_margin   = 13,
+       .upper_margin   = 7,
+       .lower_margin   = 5,
+       .hsync_len      = 3,
+       .vsync_len      = 1,
+       .xres           = 800,
+       .yres           = 480,
+};
+
+static struct s3c_fb_platdata real6410_lcd_pdata[] __initdata = {
        {
-               .win_mode       = {     /* 4.3" 480x272 */
-                       .left_margin    = 3,
-                       .right_margin   = 2,
-                       .upper_margin   = 1,
-                       .lower_margin   = 1,
-                       .hsync_len      = 40,
-                       .vsync_len      = 1,
-                       .xres           = 480,
-                       .yres           = 272,
-               },
-               .max_bpp        = 32,
-               .default_bpp    = 16,
+               .setup_gpio     = s3c64xx_fb_gpio_setup_24bpp,
+               .vtiming        = &real6410_lcd_type0_timing,
+               .win[0]         = &real6410_lcd_type0_fb_win,
+               .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
+               .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
        }, {
-               .win_mode       = {     /* 7.0" 800x480 */
-                       .left_margin    = 8,
-                       .right_margin   = 13,
-                       .upper_margin   = 7,
-                       .lower_margin   = 5,
-                       .hsync_len      = 3,
-                       .vsync_len      = 1,
-                       .xres           = 800,
-                       .yres           = 480,
-               },
-               .max_bpp        = 32,
-               .default_bpp    = 16,
+               .setup_gpio     = s3c64xx_fb_gpio_setup_24bpp,
+               .vtiming        = &real6410_lcd_type1_timing,
+               .win[0]         = &real6410_lcd_type1_fb_win,
+               .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
+               .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
        },
-};
-
-static struct s3c_fb_platdata real6410_lcd_pdata __initdata = {
-       .setup_gpio     = s3c64xx_fb_gpio_setup_24bpp,
-       .win[0]         = &real6410_fb_win[0],
-       .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
-       .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
+       { },
 };
 
 static struct mtd_partition real6410_nand_part[] = {
@@ -253,7 +269,7 @@ static void real6410_parse_features(
                                        "screen type already set\n", f);
                        } else {
                                int li = f - '0';
-                               if (li >= ARRAY_SIZE(real6410_fb_win))
+                               if (li >= ARRAY_SIZE(real6410_lcd_pdata))
                                        printk(KERN_INFO "REAL6410: '%c' out "
                                                "of range LCD mode\n", f);
                                else {
@@ -277,13 +293,11 @@ static void __init real6410_machine_init(void)
        /* Parse the feature string */
        real6410_parse_features(&features, real6410_features_str);
 
-       real6410_lcd_pdata.win[0] = &real6410_fb_win[features.lcd_index];
-
        printk(KERN_INFO "REAL6410: selected LCD display is %dx%d\n",
-               real6410_lcd_pdata.win[0]->win_mode.xres,
-               real6410_lcd_pdata.win[0]->win_mode.yres);
+               real6410_lcd_pdata[features.lcd_index].win[0]->xres,
+               real6410_lcd_pdata[features.lcd_index].win[0]->yres);
 
-       s3c_fb_set_platdata(&real6410_lcd_pdata);
+       s3c_fb_set_platdata(&real6410_lcd_pdata[features.lcd_index]);
        s3c_nand_set_platdata(&real6410_nand_info);
        s3c24xx_ts_set_platdata(NULL);
 
@@ -320,6 +334,7 @@ MACHINE_START(REAL6410, "REAL6410")
        .handle_irq     = vic_handle_irq,
        .map_io         = real6410_map_io,
        .init_machine   = real6410_machine_init,
+       .init_late      = s3c64xx_init_late,
        .timer          = &s3c24xx_timer,
        .restart        = s3c64xx_restart,
 MACHINE_END
index 3f42431d4ddaf5ce47c186fd977d59e1d4f87623..d6266d8b43c91c049d2961e24dbdfd2b9ba7feac 100644 (file)
@@ -108,23 +108,27 @@ static struct platform_device smartq5_buttons_device  = {
 };
 
 static struct s3c_fb_pd_win smartq5_fb_win0 = {
-       .win_mode       = {
-               .left_margin    = 216,
-               .right_margin   = 40,
-               .upper_margin   = 35,
-               .lower_margin   = 10,
-               .hsync_len      = 1,
-               .vsync_len      = 1,
-               .xres           = 800,
-               .yres           = 480,
-               .refresh        = 80,
-       },
        .max_bpp        = 32,
        .default_bpp    = 16,
+       .xres           = 800,
+       .yres           = 480,
+};
+
+static struct fb_videomode smartq5_lcd_timing = {
+       .left_margin    = 216,
+       .right_margin   = 40,
+       .upper_margin   = 35,
+       .lower_margin   = 10,
+       .hsync_len      = 1,
+       .vsync_len      = 1,
+       .xres           = 800,
+       .yres           = 480,
+       .refresh        = 80,
 };
 
 static struct s3c_fb_platdata smartq5_lcd_pdata __initdata = {
        .setup_gpio     = s3c64xx_fb_gpio_setup_24bpp,
+       .vtiming        = &smartq5_lcd_timing,
        .win[0]         = &smartq5_fb_win0,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC |
@@ -152,6 +156,7 @@ MACHINE_START(SMARTQ5, "SmartQ 5")
        .handle_irq     = vic_handle_irq,
        .map_io         = smartq_map_io,
        .init_machine   = smartq5_machine_init,
+       .init_late      = s3c64xx_init_late,
        .timer          = &s3c24xx_timer,
        .restart        = s3c64xx_restart,
 MACHINE_END
index e5c09b6db9677c06f625fba604e8b19fe1662a49..0957d2a980e13f239e6deec8965e8c9939473e65 100644 (file)
@@ -124,23 +124,27 @@ static struct platform_device smartq7_buttons_device  = {
 };
 
 static struct s3c_fb_pd_win smartq7_fb_win0 = {
-       .win_mode       = {
-               .left_margin    = 3,
-               .right_margin   = 5,
-               .upper_margin   = 1,
-               .lower_margin   = 20,
-               .hsync_len      = 10,
-               .vsync_len      = 3,
-               .xres           = 800,
-               .yres           = 480,
-               .refresh        = 80,
-       },
        .max_bpp        = 32,
        .default_bpp    = 16,
+       .xres           = 800,
+       .yres           = 480,
+};
+
+static struct fb_videomode smartq7_lcd_timing = {
+       .left_margin    = 3,
+       .right_margin   = 5,
+       .upper_margin   = 1,
+       .lower_margin   = 20,
+       .hsync_len      = 10,
+       .vsync_len      = 3,
+       .xres           = 800,
+       .yres           = 480,
+       .refresh        = 80,
 };
 
 static struct s3c_fb_platdata smartq7_lcd_pdata __initdata = {
        .setup_gpio     = s3c64xx_fb_gpio_setup_24bpp,
+       .vtiming        = &smartq7_lcd_timing,
        .win[0]         = &smartq7_fb_win0,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC |
@@ -168,6 +172,7 @@ MACHINE_START(SMARTQ7, "SmartQ 7")
        .handle_irq     = vic_handle_irq,
        .map_io         = smartq_map_io,
        .init_machine   = smartq7_machine_init,
+       .init_late      = s3c64xx_init_late,
        .timer          = &s3c24xx_timer,
        .restart        = s3c64xx_restart,
 MACHINE_END
index 5f096534f4c4daf02c337a1371ba9d0393bac6e4..b0f4525c66bdf4549f0276bc4b085c9c7b1c898c 100644 (file)
@@ -93,6 +93,7 @@ MACHINE_START(SMDK6400, "SMDK6400")
        .handle_irq     = vic_handle_irq,
        .map_io         = smdk6400_map_io,
        .init_machine   = smdk6400_machine_init,
+       .init_late      = s3c64xx_init_late,
        .timer          = &s3c24xx_timer,
        .restart        = s3c64xx_restart,
 MACHINE_END
index 7da044f738acfaa28533f51cfa34a9a42d3500e5..df3103d450e22c93f55976634ea003a03c6881d9 100644 (file)
@@ -146,26 +146,29 @@ static struct platform_device smdk6410_lcd_powerdev = {
 };
 
 static struct s3c_fb_pd_win smdk6410_fb_win0 = {
-       /* this is to ensure we use win0 */
-       .win_mode       = {
-               .left_margin    = 8,
-               .right_margin   = 13,
-               .upper_margin   = 7,
-               .lower_margin   = 5,
-               .hsync_len      = 3,
-               .vsync_len      = 1,
-               .xres           = 800,
-               .yres           = 480,
-       },
        .max_bpp        = 32,
        .default_bpp    = 16,
+       .xres           = 800,
+       .yres           = 480,
        .virtual_y      = 480 * 2,
        .virtual_x      = 800,
 };
 
+static struct fb_videomode smdk6410_lcd_timing = {
+       .left_margin    = 8,
+       .right_margin   = 13,
+       .upper_margin   = 7,
+       .lower_margin   = 5,
+       .hsync_len      = 3,
+       .vsync_len      = 1,
+       .xres           = 800,
+       .yres           = 480,
+};
+
 /* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */
 static struct s3c_fb_platdata smdk6410_lcd_pdata __initdata = {
        .setup_gpio     = s3c64xx_fb_gpio_setup_24bpp,
+       .vtiming        = &smdk6410_lcd_timing,
        .win[0]         = &smdk6410_fb_win0,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
@@ -702,6 +705,7 @@ MACHINE_START(SMDK6410, "SMDK6410")
        .handle_irq     = vic_handle_irq,
        .map_io         = smdk6410_map_io,
        .init_machine   = smdk6410_machine_init,
+       .init_late      = s3c64xx_init_late,
        .timer          = &s3c24xx_timer,
        .restart        = s3c64xx_restart,
 MACHINE_END
index 7d3e81b9dd06229034603aa260064384d8d44a5d..7feb426fc202d10be8f74a8992b5de212b6e4ba0 100644 (file)
@@ -365,10 +365,9 @@ static __init int s3c64xx_pm_initcall(void)
 }
 arch_initcall(s3c64xx_pm_initcall);
 
-static __init int s3c64xx_pm_late_initcall(void)
+int __init s3c64xx_pm_late_initcall(void)
 {
        pm_genpd_poweroff_unused();
 
        return 0;
 }
-late_initcall(s3c64xx_pm_late_initcall);
index a40e325d62c88d51adee0e6f0449535508042751..92fefad505cc3972207ea7d3ec70f156148e30a8 100644 (file)
@@ -103,22 +103,26 @@ static struct s3c2410_uartcfg smdk6440_uartcfgs[] __initdata = {
 
 /* Frame Buffer */
 static struct s3c_fb_pd_win smdk6440_fb_win0 = {
-       .win_mode = {
-               .left_margin    = 8,
-               .right_margin   = 13,
-               .upper_margin   = 7,
-               .lower_margin   = 5,
-               .hsync_len      = 3,
-               .vsync_len      = 1,
-               .xres           = 800,
-               .yres           = 480,
-       },
        .max_bpp        = 32,
        .default_bpp    = 24,
+       .xres           = 800,
+       .yres           = 480,
+};
+
+static struct fb_videomode smdk6440_lcd_timing = {
+       .left_margin    = 8,
+       .right_margin   = 13,
+       .upper_margin   = 7,
+       .lower_margin   = 5,
+       .hsync_len      = 3,
+       .vsync_len      = 1,
+       .xres           = 800,
+       .yres           = 480,
 };
 
 static struct s3c_fb_platdata smdk6440_lcd_pdata __initdata = {
        .win[0]         = &smdk6440_fb_win0,
+       .vtiming        = &smdk6440_lcd_timing,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
        .setup_gpio     = s5p64x0_fb_gpio_setup_24bpp,
index efb69e2f2afe7a460b4a820f071b499eb7d33d32..e2335ecf6eae793d04dbbf1ef0f37bd8832d0940 100644 (file)
@@ -121,22 +121,26 @@ static struct s3c2410_uartcfg smdk6450_uartcfgs[] __initdata = {
 
 /* Frame Buffer */
 static struct s3c_fb_pd_win smdk6450_fb_win0 = {
-       .win_mode       = {
-               .left_margin    = 8,
-               .right_margin   = 13,
-               .upper_margin   = 7,
-               .lower_margin   = 5,
-               .hsync_len      = 3,
-               .vsync_len      = 1,
-               .xres           = 800,
-               .yres           = 480,
-       },
        .max_bpp        = 32,
        .default_bpp    = 24,
+       .xres           = 800,
+       .yres           = 480,
+};
+
+static struct fb_videomode smdk6450_lcd_timing = {
+       .left_margin    = 8,
+       .right_margin   = 13,
+       .upper_margin   = 7,
+       .lower_margin   = 5,
+       .hsync_len      = 3,
+       .vsync_len      = 1,
+       .xres           = 800,
+       .yres           = 480,
 };
 
 static struct s3c_fb_platdata smdk6450_lcd_pdata __initdata = {
        .win[0]         = &smdk6450_fb_win0,
+       .vtiming        = &smdk6450_lcd_timing,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
        .setup_gpio     = s5p64x0_fb_gpio_setup_24bpp,
index 674d22992f3c10a0e6b160b78a099a7cde98e5fc..0c3ae38d27ca0e8b78d03a58833107a2329d9914 100644 (file)
@@ -136,24 +136,27 @@ static struct platform_device smdkc100_lcd_powerdev = {
 
 /* Frame Buffer */
 static struct s3c_fb_pd_win smdkc100_fb_win0 = {
-       /* this is to ensure we use win0 */
-       .win_mode       = {
-               .left_margin    = 8,
-               .right_margin   = 13,
-               .upper_margin   = 7,
-               .lower_margin   = 5,
-               .hsync_len      = 3,
-               .vsync_len      = 1,
-               .xres           = 800,
-               .yres           = 480,
-               .refresh        = 80,
-       },
        .max_bpp        = 32,
        .default_bpp    = 16,
+       .xres           = 800,
+       .yres           = 480,
+};
+
+static struct fb_videomode smdkc100_lcd_timing = {
+       .left_margin    = 8,
+       .right_margin   = 13,
+       .upper_margin   = 7,
+       .lower_margin   = 5,
+       .hsync_len      = 3,
+       .vsync_len      = 1,
+       .xres           = 800,
+       .yres           = 480,
+       .refresh        = 80,
 };
 
 static struct s3c_fb_platdata smdkc100_lcd_pdata __initdata = {
        .win[0]         = &smdkc100_fb_win0,
+       .vtiming        = &smdkc100_lcd_timing,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
        .setup_gpio     = s5pc100_fb_gpio_setup_24bpp,
index 48d018f2332bc020239ca49f7d42986dd30134f8..af528f9e97f976ea0bb9efe654ef46a843280194 100644 (file)
@@ -96,38 +96,34 @@ static struct s3c2410_uartcfg aquila_uartcfgs[] __initdata = {
 
 /* Frame Buffer */
 static struct s3c_fb_pd_win aquila_fb_win0 = {
-       .win_mode = {
-               .left_margin = 16,
-               .right_margin = 16,
-               .upper_margin = 3,
-               .lower_margin = 28,
-               .hsync_len = 2,
-               .vsync_len = 2,
-               .xres = 480,
-               .yres = 800,
-       },
        .max_bpp = 32,
        .default_bpp = 16,
+       .xres = 480,
+       .yres = 800,
 };
 
 static struct s3c_fb_pd_win aquila_fb_win1 = {
-       .win_mode = {
-               .left_margin = 16,
-               .right_margin = 16,
-               .upper_margin = 3,
-               .lower_margin = 28,
-               .hsync_len = 2,
-               .vsync_len = 2,
-               .xres = 480,
-               .yres = 800,
-       },
        .max_bpp = 32,
        .default_bpp = 16,
+       .xres = 480,
+       .yres = 800,
+};
+
+static struct fb_videomode aquila_lcd_timing = {
+       .left_margin = 16,
+       .right_margin = 16,
+       .upper_margin = 3,
+       .lower_margin = 28,
+       .hsync_len = 2,
+       .vsync_len = 2,
+       .xres = 480,
+       .yres = 800,
 };
 
 static struct s3c_fb_platdata aquila_lcd_pdata __initdata = {
        .win[0]         = &aquila_fb_win0,
        .win[1]         = &aquila_fb_win1,
+       .vtiming        = &aquila_lcd_timing,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC |
                          VIDCON1_INV_VCLK | VIDCON1_INV_VDEN,
index f20a97c8e4117d02462c7033d6b098cfe985a13b..bf5087c2b7fe6078e40837374fe1933a4c143cf3 100644 (file)
@@ -107,25 +107,29 @@ static struct s3c2410_uartcfg goni_uartcfgs[] __initdata = {
 
 /* Frame Buffer */
 static struct s3c_fb_pd_win goni_fb_win0 = {
-       .win_mode = {
-               .left_margin    = 16,
-               .right_margin   = 16,
-               .upper_margin   = 2,
-               .lower_margin   = 28,
-               .hsync_len      = 2,
-               .vsync_len      = 1,
-               .xres           = 480,
-               .yres           = 800,
-               .refresh        = 55,
-       },
        .max_bpp        = 32,
        .default_bpp    = 16,
+       .xres           = 480,
+       .yres           = 800,
        .virtual_x      = 480,
        .virtual_y      = 2 * 800,
 };
 
+static struct fb_videomode goni_lcd_timing = {
+       .left_margin    = 16,
+       .right_margin   = 16,
+       .upper_margin   = 2,
+       .lower_margin   = 28,
+       .hsync_len      = 2,
+       .vsync_len      = 1,
+       .xres           = 480,
+       .yres           = 800,
+       .refresh        = 55,
+};
+
 static struct s3c_fb_platdata goni_lcd_pdata __initdata = {
        .win[0]         = &goni_fb_win0,
+       .vtiming        = &goni_lcd_timing,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB |
                          VIDCON0_CLKSEL_LCD,
        .vidcon1        = VIDCON1_INV_VCLK | VIDCON1_INV_VDEN
index fa1b61209fd93e11d9283f7513739e9184adeb60..0d7ddec88eb74ade7505bca367a855e714752b2b 100644 (file)
@@ -178,22 +178,26 @@ static struct platform_device smdkv210_lcd_lte480wv = {
 };
 
 static struct s3c_fb_pd_win smdkv210_fb_win0 = {
-       .win_mode = {
-               .left_margin    = 13,
-               .right_margin   = 8,
-               .upper_margin   = 7,
-               .lower_margin   = 5,
-               .hsync_len      = 3,
-               .vsync_len      = 1,
-               .xres           = 800,
-               .yres           = 480,
-       },
        .max_bpp        = 32,
        .default_bpp    = 24,
+       .xres           = 800,
+       .yres           = 480,
+};
+
+static struct fb_videomode smdkv210_lcd_timing = {
+       .left_margin    = 13,
+       .right_margin   = 8,
+       .upper_margin   = 7,
+       .lower_margin   = 5,
+       .hsync_len      = 3,
+       .vsync_len      = 1,
+       .xres           = 800,
+       .yres           = 480,
 };
 
 static struct s3c_fb_platdata smdkv210_lcd0_pdata __initdata = {
        .win[0]         = &smdkv210_fb_win0,
+       .vtiming        = &smdkv210_lcd_timing,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
        .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
        .setup_gpio     = s5pv210_fb_gpio_setup_24bpp,
index 375d3f779a88a6f213aa296fd12aef7fd0631d0f..d1dc7f1a239ca057a5264eef71e05df77e885eda 100644 (file)
@@ -538,6 +538,7 @@ MACHINE_START(ASSABET, "Intel-Assabet")
        .init_irq       = sa1100_init_irq,
        .timer          = &sa1100_timer,
        .init_machine   = assabet_init,
+       .init_late      = sa11x0_init_late,
 #ifdef CONFIG_SA1111
        .dma_zone_size  = SZ_1M,
 #endif
index e0f0c030258c5614bedf2abc8bd5d05e946e4233..b30fb99b587c9fe1025def432f7c7c0a6eb9bbf2 100644 (file)
@@ -305,6 +305,7 @@ MACHINE_START(BADGE4, "Hewlett-Packard Laboratories BadgePAD 4")
        .map_io         = badge4_map_io,
        .nr_irqs        = SA1100_NR_IRQS,
        .init_irq       = sa1100_init_irq,
+       .init_late      = sa11x0_init_late,
        .timer          = &sa1100_timer,
 #ifdef CONFIG_SA1111
        .dma_zone_size  = SZ_1M,
index 4a61f60e0502dc595dd54dc0a0a2e46a149227da..09d7f4b4b35487509840fe72ad0dd2d313c0e285 100644 (file)
@@ -134,5 +134,6 @@ MACHINE_START(CERF, "Intrinsyc CerfBoard/CerfCube")
        .init_irq       = cerf_init_irq,
        .timer          = &sa1100_timer,
        .init_machine   = cerf_init,
+       .init_late      = sa11x0_init_late,
        .restart        = sa11x0_restart,
 MACHINE_END
index c7f418b0cde9bde3ba23c8d1ea73533fa775b700..ea5cff38745c63ab666051695b5299497ae9b580 100644 (file)
@@ -401,5 +401,6 @@ MACHINE_START(COLLIE, "Sharp-Collie")
        .init_irq       = sa1100_init_irq,
        .timer          = &sa1100_timer,
        .init_machine   = collie_init,
+       .init_late      = sa11x0_init_late,
        .restart        = sa11x0_restart,
 MACHINE_END
index 16be4c56abe3ff37a153807b93d066c49c0d94ab..9db3e98e8b85dbf8f4c27106e8c3e5ea76d89ccd 100644 (file)
@@ -359,6 +359,10 @@ static int __init sa1100_init(void)
 
 arch_initcall(sa1100_init);
 
+void __init sa11x0_init_late(void)
+{
+       sa11x0_pm_init();
+}
 
 /*
  * Common I/O mapping:
index 9eb3b3cd5a63501f18297676fc8c32dbdca21b57..a5b7c13da3e31bc7d331068f72c32420f95759b5 100644 (file)
@@ -11,6 +11,7 @@ extern void __init sa1100_map_io(void);
 extern void __init sa1100_init_irq(void);
 extern void __init sa1100_init_gpio(void);
 extern void sa11x0_restart(char, const char *);
+extern void sa11x0_init_late(void);
 
 #define SET_BANK(__nr,__start,__size) \
        mi->bank[__nr].start = (__start), \
@@ -41,3 +42,9 @@ void sa11x0_register_mcp(struct mcp_plat_data *data);
 
 struct sa1100fb_mach_info;
 void sa11x0_register_lcd(struct sa1100fb_mach_info *inf);
+
+#ifdef CONFIG_PM
+int sa11x0_pm_init(void);
+#else
+static inline int sa11x0_pm_init(void) { return 0; }
+#endif
index b2e8d0f418e09ef08ce2529d25defc7f9ef6aa9e..e1571eab08aeb3b5f5d48d5c62b2b7cea206c0cf 100644 (file)
@@ -110,6 +110,7 @@ MACHINE_START(H3100, "Compaq iPAQ H3100")
        .init_irq       = sa1100_init_irq,
        .timer          = &sa1100_timer,
        .init_machine   = h3100_mach_init,
+       .init_late      = sa11x0_init_late,
        .restart        = sa11x0_restart,
 MACHINE_END
 
index cb6659f294fe37b35687fefd448d5d572a3ed274..ba7a2901ab88232da9bea4df8abc38266cc4544d 100644 (file)
@@ -160,6 +160,7 @@ MACHINE_START(H3600, "Compaq iPAQ H3600")
        .init_irq       = sa1100_init_irq,
        .timer          = &sa1100_timer,
        .init_machine   = h3600_mach_init,
+       .init_late      = sa11x0_init_late,
        .restart        = sa11x0_restart,
 MACHINE_END
 
index 5535475bf58334b222733bd5fd6d16b76360aa66..7f86bd911826a4327462d577d43c53031c2fe185 100644 (file)
@@ -199,5 +199,6 @@ MACHINE_START(HACKKIT, "HackKit Cpu Board")
        .init_irq       = sa1100_init_irq,
        .timer          = &sa1100_timer,
        .init_machine   = hackkit_init,
+       .init_late      = sa11x0_init_late,
        .restart        = sa11x0_restart,
 MACHINE_END
index ca7a7e834720a1cb3ef692f35f1d1b09ec2cf5f0..e3084f47027d8bf32c90af399825617cc4b62fa3 100644 (file)
@@ -348,6 +348,7 @@ MACHINE_START(JORNADA720, "HP Jornada 720")
        .init_irq       = sa1100_init_irq,
        .timer          = &sa1100_timer,
        .init_machine   = jornada720_mach_init,
+       .init_late      = sa11x0_init_late,
 #ifdef CONFIG_SA1111
        .dma_zone_size  = SZ_1M,
 #endif
index eb6534e0b0d01c64e1ee88fa65bd23fbadaea99d..b775a0abec0af7575dafebe8673eef8758c14300 100644 (file)
@@ -147,6 +147,7 @@ MACHINE_START(LART, "LART")
        .nr_irqs        = SA1100_NR_IRQS,
        .init_irq       = sa1100_init_irq,
        .init_machine   = lart_init,
+       .init_late      = sa11x0_init_late,
        .timer          = &sa1100_timer,
        .restart        = sa11x0_restart,
 MACHINE_END
index 8f6446b9f025231669c19aa84ad2fed17d057e62..41f69d97066f5c0a5e1783d42b6be9dfaf3bbd9d 100644 (file)
@@ -112,5 +112,6 @@ MACHINE_START(NANOENGINE, "BSE nanoEngine")
        .init_irq       = sa1100_init_irq,
        .timer          = &sa1100_timer,
        .init_machine   = nanoengine_init,
+       .init_late      = sa11x0_init_late,
        .restart        = sa11x0_restart,
 MACHINE_END
index 6c58f01b358a1064db6a869ee2cbac52b4b8ba0a..266db873a4e4c6c328a19b038eea18d29cab8997 100644 (file)
@@ -89,6 +89,7 @@ void neponset_ncr_frob(unsigned int mask, unsigned int val)
                WARN(1, "nep_base unset\n");
        }
 }
+EXPORT_SYMBOL(neponset_ncr_frob);
 
 static void neponset_set_mctrl(struct uart_port *port, u_int mctrl)
 {
index 1602575a0d5c47f8c3be09387f1fff63c737e41e..37fe0a0a53692951db7335c05eb5249419e1d010 100644 (file)
@@ -135,5 +135,6 @@ MACHINE_START(PLEB, "PLEB")
        .init_irq       = sa1100_init_irq,
        .timer          = &sa1100_timer,
        .init_machine   = pleb_init,
+       .init_late      = sa11x0_init_late,
        .restart        = sa11x0_restart,
 MACHINE_END
index 2fa499ec6afebf52cad16e2641192029a1cf44ef..690cf0ce5c0caa8f06ca8b10d0b31d0a36cc3032 100644 (file)
@@ -117,10 +117,8 @@ static const struct platform_suspend_ops sa11x0_pm_ops = {
        .valid          = suspend_valid_only_mem,
 };
 
-static int __init sa11x0_pm_init(void)
+int __init sa11x0_pm_init(void)
 {
        suspend_set_ops(&sa11x0_pm_ops);
        return 0;
 }
-
-late_initcall(sa11x0_pm_init);
index ca8bf59b9047d0fe07a31d4fc9dda277271c4b07..5d33fc3108ef7c3d4c291f00b2199b429c90ef21 100644 (file)
@@ -104,5 +104,6 @@ MACHINE_START(SHANNON, "Shannon (AKA: Tuxscreen)")
        .init_irq       = sa1100_init_irq,
        .timer          = &sa1100_timer,
        .init_machine   = shannon_init,
+       .init_late      = sa11x0_init_late,
        .restart        = sa11x0_restart,
 MACHINE_END
index 3efae03cb3d7defef1f3f88e7112b09addb3a1cf..fbd53593be54ff37e409c9909c684682e5ce7dd0 100644 (file)
@@ -395,6 +395,7 @@ MACHINE_START(SIMPAD, "Simpad")
        .map_io         = simpad_map_io,
        .nr_irqs        = SA1100_NR_IRQS,
        .init_irq       = sa1100_init_irq,
+       .init_late      = sa11x0_init_late,
        .timer          = &sa1100_timer,
        .restart        = sa11x0_restart,
 MACHINE_END
index e6b177bc94107683808ac734ebe132a7c951f863..8aa1962c22a278dd233022b0886b9a77bba39af4 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 # Common objects
-obj-y                          := timer.o console.o clock.o
+obj-y                          := timer.o console.o clock.o common.o
 
 # CPU objects
 obj-$(CONFIG_ARCH_SH7367)      += setup-sh7367.o clock-sh7367.o intc-sh7367.o
index 0891ec6e27f502b8fe220a3bff3b97dfbbe44c69..5a6f22f05e99bc3a166a92d968f48e3d43c56e5b 100644 (file)
@@ -580,5 +580,6 @@ MACHINE_START(AG5EVM, "ag5evm")
        .init_irq       = sh73a0_init_irq,
        .handle_irq     = gic_handle_irq,
        .init_machine   = ag5evm_init,
+       .init_late      = shmobile_init_late,
        .timer          = &shmobile_timer,
 MACHINE_END
index b540b8eb20ca91d860fdf75bab7ce97d4a675434..ace60246a5dfd429d3ffd98c778c6910cd839d39 100644 (file)
@@ -1469,5 +1469,6 @@ MACHINE_START(AP4EVB, "ap4evb")
        .init_irq       = sh7372_init_irq,
        .handle_irq     = shmobile_handle_irq_intc,
        .init_machine   = ap4evb_init,
+       .init_late      = shmobile_init_late,
        .timer          = &shmobile_timer,
 MACHINE_END
index 63ab7062bee33746e1ab439cd4475a13e596b7b6..e9b32cfbf741061f378fedf05b556553cdd6ceb3 100644 (file)
@@ -500,5 +500,6 @@ MACHINE_START(BONITO, "bonito")
        .init_irq       = r8a7740_init_irq,
        .handle_irq     = shmobile_handle_irq_intc,
        .init_machine   = bonito_init,
+       .init_late      = shmobile_init_late,
        .timer          = &shmobile_timer,
 MACHINE_END
index 39b6cf85ced6cc2d98d3132c56629ff0a7398874..796fa00ad3c459cc3432a522446592bc2e18f33a 100644 (file)
@@ -338,5 +338,6 @@ MACHINE_START(G3EVM, "g3evm")
        .init_irq       = sh7367_init_irq,
        .handle_irq     = shmobile_handle_irq_intc,
        .init_machine   = g3evm_init,
+       .init_late      = shmobile_init_late,
        .timer          = &shmobile_timer,
 MACHINE_END
index 0e5a39c670bc259731790e7f275bcfeb47ead4c9..f1257321999a3ca585b15706d413077339552f46 100644 (file)
@@ -381,5 +381,6 @@ MACHINE_START(G4EVM, "g4evm")
        .init_irq       = sh7377_init_irq,
        .handle_irq     = shmobile_handle_irq_intc,
        .init_machine   = g4evm_init,
+       .init_late      = shmobile_init_late,
        .timer          = &shmobile_timer,
 MACHINE_END
index 200dcd42a3a0f819bc4581b8f9b82e0da9a3f962..f60f1b281cc46117c2cd2a03585ac8b21a8359dd 100644 (file)
@@ -521,5 +521,6 @@ MACHINE_START(KOTA2, "kota2")
        .init_irq       = sh73a0_init_irq,
        .handle_irq     = gic_handle_irq,
        .init_machine   = kota2_init,
+       .init_late      = shmobile_init_late,
        .timer          = &shmobile_timer,
 MACHINE_END
index 50c67b22d08749eab3b985da19b6706906127729..b577f7c44678ade9a79753742e6606b3eb120600 100644 (file)
@@ -1638,5 +1638,6 @@ MACHINE_START(MACKEREL, "mackerel")
        .init_irq       = sh7372_init_irq,
        .handle_irq     = shmobile_handle_irq_intc,
        .init_machine   = mackerel_init,
+       .init_late      = shmobile_init_late,
        .timer          = &shmobile_timer,
 MACHINE_END
index ef0e13bf0b3a46911564e1ef7515740a77028574..14de3787cafcc7193abefe6e4c1c589988a76acc 100644 (file)
@@ -98,5 +98,6 @@ MACHINE_START(MARZEN, "marzen")
        .init_irq       = r8a7779_init_irq,
        .handle_irq     = gic_handle_irq,
        .init_machine   = marzen_init,
+       .init_late      = shmobile_init_late,
        .timer          = &shmobile_timer,
 MACHINE_END
diff --git a/arch/arm/mach-shmobile/common.c b/arch/arm/mach-shmobile/common.c
new file mode 100644 (file)
index 0000000..608aba9
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <mach/common.h>
+
+void __init shmobile_init_late(void)
+{
+       shmobile_suspend_init();
+       shmobile_cpuidle_init();
+}
index 7e6559105d40f27be8ffbdbc394cc377cec65ade..7b541e911ab4aebe99262ef74c8ec0fb6084c01c 100644 (file)
@@ -46,7 +46,7 @@ static struct cpuidle_driver shmobile_cpuidle_driver = {
 
 void (*shmobile_cpuidle_setup)(struct cpuidle_driver *drv);
 
-static int shmobile_cpuidle_init(void)
+int shmobile_cpuidle_init(void)
 {
        struct cpuidle_device *dev = &shmobile_cpuidle_dev;
        struct cpuidle_driver *drv = &shmobile_cpuidle_driver;
@@ -65,4 +65,3 @@ static int shmobile_cpuidle_init(void)
 
        return 0;
 }
-late_initcall(shmobile_cpuidle_init);
index ff5f12fd742fb737c923eb2210cebcdafed5a537..01e2bc014f1501f41f2f2a9b18a7596bb686ac27 100644 (file)
@@ -85,4 +85,18 @@ extern int r8a7779_boot_secondary(unsigned int cpu);
 extern void r8a7779_smp_prepare_cpus(void);
 extern void r8a7779_register_twd(void);
 
+extern void shmobile_init_late(void);
+
+#ifdef CONFIG_SUSPEND
+int shmobile_suspend_init(void);
+#else
+static inline int shmobile_suspend_init(void) { return 0; }
+#endif
+
+#ifdef CONFIG_CPU_IDLE
+int shmobile_cpuidle_init(void);
+#else
+static inline int shmobile_cpuidle_init(void) { return 0; }
+#endif
+
 #endif /* __ARCH_MACH_COMMON_H */
index 4d1b86a49923bf7f28f004ad4bb8c4a3f902095f..47d83f7a70b6065d2053730ad14237622fcdad7f 100644 (file)
@@ -39,9 +39,8 @@ struct platform_suspend_ops shmobile_suspend_ops = {
        .valid          = suspend_valid_only_mem,
 };
 
-static int __init shmobile_suspend_init(void)
+int __init shmobile_suspend_init(void)
 {
        suspend_set_ops(&shmobile_suspend_ops);
        return 0;
 }
-late_initcall(shmobile_suspend_init);
diff --git a/arch/arm/mach-spear13xx/Kconfig b/arch/arm/mach-spear13xx/Kconfig
new file mode 100644 (file)
index 0000000..eaadc66
--- /dev/null
@@ -0,0 +1,20 @@
+#
+# SPEAr13XX Machine configuration file
+#
+
+if ARCH_SPEAR13XX
+
+menu "SPEAr13xx Implementations"
+config MACH_SPEAR1310
+       bool "SPEAr1310 Machine support with Device Tree"
+       select PINCTRL_SPEAR1310
+       help
+         Supports ST SPEAr1310 machine configured via the device-tree
+
+config MACH_SPEAR1340
+       bool "SPEAr1340 Machine support with Device Tree"
+       select PINCTRL_SPEAR1340
+       help
+         Supports ST SPEAr1340 machine configured via the device-tree
+endmenu
+endif #ARCH_SPEAR13XX
diff --git a/arch/arm/mach-spear13xx/Makefile b/arch/arm/mach-spear13xx/Makefile
new file mode 100644 (file)
index 0000000..3435ea7
--- /dev/null
@@ -0,0 +1,10 @@
+#
+# Makefile for SPEAr13XX machine series
+#
+
+obj-$(CONFIG_SMP)              += headsmp.o platsmp.o
+obj-$(CONFIG_HOTPLUG_CPU)      += hotplug.o
+
+obj-$(CONFIG_ARCH_SPEAR13XX)   += spear13xx.o
+obj-$(CONFIG_MACH_SPEAR1310)   += spear1310.o
+obj-$(CONFIG_MACH_SPEAR1340)   += spear1340.o
diff --git a/arch/arm/mach-spear13xx/Makefile.boot b/arch/arm/mach-spear13xx/Makefile.boot
new file mode 100644 (file)
index 0000000..403efd7
--- /dev/null
@@ -0,0 +1,6 @@
+zreladdr-y     += 0x00008000
+params_phys-y  := 0x00000100
+initrd_phys-y  := 0x00800000
+
+dtb-$(CONFIG_MACH_SPEAR1310)   += spear1310-evb.dtb
+dtb-$(CONFIG_MACH_SPEAR1340)   += spear1340-evb.dtb
diff --git a/arch/arm/mach-spear13xx/headsmp.S b/arch/arm/mach-spear13xx/headsmp.S
new file mode 100644 (file)
index 0000000..ed85473
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * arch/arm/mach-spear13XX/headsmp.S
+ *
+ * Picked from realview
+ * Copyright (c) 2012 ST Microelectronics Limited
+ * Shiraz Hashim <shiraz.hashim@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+       __INIT
+
+/*
+ * spear13xx specific entry point for secondary CPUs. This provides
+ * a "holding pen" into which all secondary cores are held until we're
+ * ready for them to initialise.
+ */
+ENTRY(spear13xx_secondary_startup)
+       mrc     p15, 0, r0, c0, c0, 5
+       and     r0, r0, #15
+       adr     r4, 1f
+       ldmia   r4, {r5, r6}
+       sub     r4, r4, r5
+       add     r6, r6, r4
+pen:   ldr     r7, [r6]
+       cmp     r7, r0
+       bne     pen
+
+       /* re-enable coherency */
+       mrc     p15, 0, r0, c1, c0, 1
+       orr     r0, r0, #(1 << 6) | (1 << 0)
+       mcr     p15, 0, r0, c1, c0, 1
+       /*
+        * we've been released from the holding pen: secondary_stack
+        * should now contain the SVC stack for this core
+        */
+       b       secondary_startup
+
+       .align
+1:     .long   .
+       .long   pen_release
+ENDPROC(spear13xx_secondary_startup)
diff --git a/arch/arm/mach-spear13xx/hotplug.c b/arch/arm/mach-spear13xx/hotplug.c
new file mode 100644 (file)
index 0000000..5c6867b
--- /dev/null
@@ -0,0 +1,119 @@
+/*
+ * linux/arch/arm/mach-spear13xx/hotplug.c
+ *
+ * Copyright (C) 2012 ST Microelectronics Ltd.
+ * Deepak Sikri <deepak.sikri@st.com>
+ *
+ * based upon linux/arch/arm/mach-realview/hotplug.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+#include <asm/cacheflush.h>
+#include <asm/cp15.h>
+#include <asm/smp_plat.h>
+
+extern volatile int pen_release;
+
+static inline void cpu_enter_lowpower(void)
+{
+       unsigned int v;
+
+       flush_cache_all();
+       asm volatile(
+       "       mcr     p15, 0, %1, c7, c5, 0\n"
+       "       dsb\n"
+       /*
+        * Turn off coherency
+        */
+       "       mrc     p15, 0, %0, c1, c0, 1\n"
+       "       bic     %0, %0, #0x20\n"
+       "       mcr     p15, 0, %0, c1, c0, 1\n"
+       "       mrc     p15, 0, %0, c1, c0, 0\n"
+       "       bic     %0, %0, %2\n"
+       "       mcr     p15, 0, %0, c1, c0, 0\n"
+       : "=&r" (v)
+       : "r" (0), "Ir" (CR_C)
+       : "cc", "memory");
+}
+
+static inline void cpu_leave_lowpower(void)
+{
+       unsigned int v;
+
+       asm volatile("mrc       p15, 0, %0, c1, c0, 0\n"
+       "       orr     %0, %0, %1\n"
+       "       mcr     p15, 0, %0, c1, c0, 0\n"
+       "       mrc     p15, 0, %0, c1, c0, 1\n"
+       "       orr     %0, %0, #0x20\n"
+       "       mcr     p15, 0, %0, c1, c0, 1\n"
+       : "=&r" (v)
+       : "Ir" (CR_C)
+       : "cc");
+}
+
+static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
+{
+       for (;;) {
+               wfi();
+
+               if (pen_release == cpu) {
+                       /*
+                        * OK, proper wakeup, we're done
+                        */
+                       break;
+               }
+
+               /*
+                * Getting here, means that we have come out of WFI without
+                * having been woken up - this shouldn't happen
+                *
+                * Just note it happening - when we're woken, we can report
+                * its occurrence.
+                */
+               (*spurious)++;
+       }
+}
+
+int platform_cpu_kill(unsigned int cpu)
+{
+       return 1;
+}
+
+/*
+ * platform-specific code to shutdown a CPU
+ *
+ * Called with IRQs disabled
+ */
+void __cpuinit platform_cpu_die(unsigned int cpu)
+{
+       int spurious = 0;
+
+       /*
+        * we're ready for shutdown now, so do it
+        */
+       cpu_enter_lowpower();
+       platform_do_lowpower(cpu, &spurious);
+
+       /*
+        * bring this CPU back into the world of cache
+        * coherency, and then restore interrupts
+        */
+       cpu_leave_lowpower();
+
+       if (spurious)
+               pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
+}
+
+int platform_cpu_disable(unsigned int cpu)
+{
+       /*
+        * we don't allow CPU 0 to be shutdown (it is still too special
+        * e.g. clock tick interrupts)
+        */
+       return cpu == 0 ? -EPERM : 0;
+}
diff --git a/arch/arm/mach-spear13xx/include/mach/debug-macro.S b/arch/arm/mach-spear13xx/include/mach/debug-macro.S
new file mode 100644 (file)
index 0000000..ea15646
--- /dev/null
@@ -0,0 +1,14 @@
+/*
+ * arch/arm/mach-spear13xx/include/mach/debug-macro.S
+ *
+ * Debugging macro include header spear13xx machine family
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <plat/debug-macro.S>
diff --git a/arch/arm/mach-spear13xx/include/mach/dma.h b/arch/arm/mach-spear13xx/include/mach/dma.h
new file mode 100644 (file)
index 0000000..383ab04
--- /dev/null
@@ -0,0 +1,128 @@
+/*
+ * arch/arm/mach-spear13xx/include/mach/dma.h
+ *
+ * DMA information for SPEAr13xx machine family
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_DMA_H
+#define __MACH_DMA_H
+
+/* request id of all the peripherals */
+enum dma_master_info {
+       /* Accessible from only one master */
+       DMA_MASTER_MCIF = 0,
+       DMA_MASTER_FSMC = 1,
+       /* Accessible from both 0 & 1 */
+       DMA_MASTER_MEMORY = 0,
+       DMA_MASTER_ADC = 0,
+       DMA_MASTER_UART0 = 0,
+       DMA_MASTER_SSP0 = 0,
+       DMA_MASTER_I2C0 = 0,
+
+#ifdef CONFIG_MACH_SPEAR1310
+       /* Accessible from only one master */
+       SPEAR1310_DMA_MASTER_JPEG = 1,
+
+       /* Accessible from both 0 & 1 */
+       SPEAR1310_DMA_MASTER_I2S = 0,
+       SPEAR1310_DMA_MASTER_UART1 = 0,
+       SPEAR1310_DMA_MASTER_UART2 = 0,
+       SPEAR1310_DMA_MASTER_UART3 = 0,
+       SPEAR1310_DMA_MASTER_UART4 = 0,
+       SPEAR1310_DMA_MASTER_UART5 = 0,
+       SPEAR1310_DMA_MASTER_I2C1 = 0,
+       SPEAR1310_DMA_MASTER_I2C2 = 0,
+       SPEAR1310_DMA_MASTER_I2C3 = 0,
+       SPEAR1310_DMA_MASTER_I2C4 = 0,
+       SPEAR1310_DMA_MASTER_I2C5 = 0,
+       SPEAR1310_DMA_MASTER_I2C6 = 0,
+       SPEAR1310_DMA_MASTER_I2C7 = 0,
+       SPEAR1310_DMA_MASTER_SSP1 = 0,
+#endif
+
+#ifdef CONFIG_MACH_SPEAR1340
+       /* Accessible from only one master */
+       SPEAR1340_DMA_MASTER_I2S_PLAY = 1,
+       SPEAR1340_DMA_MASTER_I2S_REC = 1,
+       SPEAR1340_DMA_MASTER_I2C1 = 1,
+       SPEAR1340_DMA_MASTER_UART1 = 1,
+
+       /* following are accessible from both master 0 & 1 */
+       SPEAR1340_DMA_MASTER_SPDIF = 0,
+       SPEAR1340_DMA_MASTER_CAM = 1,
+       SPEAR1340_DMA_MASTER_VIDEO_IN = 0,
+       SPEAR1340_DMA_MASTER_MALI = 0,
+#endif
+};
+
+enum request_id {
+       DMA_REQ_ADC = 0,
+       DMA_REQ_SSP0_TX = 4,
+       DMA_REQ_SSP0_RX = 5,
+       DMA_REQ_UART0_TX = 6,
+       DMA_REQ_UART0_RX = 7,
+       DMA_REQ_I2C0_TX = 8,
+       DMA_REQ_I2C0_RX = 9,
+
+#ifdef CONFIG_MACH_SPEAR1310
+       SPEAR1310_DMA_REQ_FROM_JPEG = 2,
+       SPEAR1310_DMA_REQ_TO_JPEG = 3,
+       SPEAR1310_DMA_REQ_I2S_TX = 10,
+       SPEAR1310_DMA_REQ_I2S_RX = 11,
+
+       SPEAR1310_DMA_REQ_I2C1_RX = 0,
+       SPEAR1310_DMA_REQ_I2C1_TX = 1,
+       SPEAR1310_DMA_REQ_I2C2_RX = 2,
+       SPEAR1310_DMA_REQ_I2C2_TX = 3,
+       SPEAR1310_DMA_REQ_I2C3_RX = 4,
+       SPEAR1310_DMA_REQ_I2C3_TX = 5,
+       SPEAR1310_DMA_REQ_I2C4_RX = 6,
+       SPEAR1310_DMA_REQ_I2C4_TX = 7,
+       SPEAR1310_DMA_REQ_I2C5_RX = 8,
+       SPEAR1310_DMA_REQ_I2C5_TX = 9,
+       SPEAR1310_DMA_REQ_I2C6_RX = 10,
+       SPEAR1310_DMA_REQ_I2C6_TX = 11,
+       SPEAR1310_DMA_REQ_UART1_RX = 12,
+       SPEAR1310_DMA_REQ_UART1_TX = 13,
+       SPEAR1310_DMA_REQ_UART2_RX = 14,
+       SPEAR1310_DMA_REQ_UART2_TX = 15,
+       SPEAR1310_DMA_REQ_UART5_RX = 16,
+       SPEAR1310_DMA_REQ_UART5_TX = 17,
+       SPEAR1310_DMA_REQ_SSP1_RX = 18,
+       SPEAR1310_DMA_REQ_SSP1_TX = 19,
+       SPEAR1310_DMA_REQ_I2C7_RX = 20,
+       SPEAR1310_DMA_REQ_I2C7_TX = 21,
+       SPEAR1310_DMA_REQ_UART3_RX = 28,
+       SPEAR1310_DMA_REQ_UART3_TX = 29,
+       SPEAR1310_DMA_REQ_UART4_RX = 30,
+       SPEAR1310_DMA_REQ_UART4_TX = 31,
+#endif
+
+#ifdef CONFIG_MACH_SPEAR1340
+       SPEAR1340_DMA_REQ_SPDIF_TX = 2,
+       SPEAR1340_DMA_REQ_SPDIF_RX = 3,
+       SPEAR1340_DMA_REQ_I2S_TX = 10,
+       SPEAR1340_DMA_REQ_I2S_RX = 11,
+       SPEAR1340_DMA_REQ_UART1_TX = 12,
+       SPEAR1340_DMA_REQ_UART1_RX = 13,
+       SPEAR1340_DMA_REQ_I2C1_TX = 14,
+       SPEAR1340_DMA_REQ_I2C1_RX = 15,
+       SPEAR1340_DMA_REQ_CAM0_EVEN = 0,
+       SPEAR1340_DMA_REQ_CAM0_ODD = 1,
+       SPEAR1340_DMA_REQ_CAM1_EVEN = 2,
+       SPEAR1340_DMA_REQ_CAM1_ODD = 3,
+       SPEAR1340_DMA_REQ_CAM2_EVEN = 4,
+       SPEAR1340_DMA_REQ_CAM2_ODD = 5,
+       SPEAR1340_DMA_REQ_CAM3_EVEN = 6,
+       SPEAR1340_DMA_REQ_CAM3_ODD = 7,
+#endif
+};
+
+#endif /* __MACH_DMA_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/generic.h b/arch/arm/mach-spear13xx/include/mach/generic.h
new file mode 100644 (file)
index 0000000..6d8c45b
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * arch/arm/mach-spear13xx/include/mach/generic.h
+ *
+ * spear13xx machine family generic header file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_GENERIC_H
+#define __MACH_GENERIC_H
+
+#include <linux/dmaengine.h>
+#include <asm/mach/time.h>
+
+/* Add spear13xx structure declarations here */
+extern struct sys_timer spear13xx_timer;
+extern struct pl022_ssp_controller pl022_plat_data;
+extern struct dw_dma_platform_data dmac_plat_data;
+extern struct dw_dma_slave cf_dma_priv;
+extern struct dw_dma_slave nand_read_dma_priv;
+extern struct dw_dma_slave nand_write_dma_priv;
+
+/* Add spear13xx family function declarations here */
+void __init spear_setup_of_timer(void);
+void __init spear13xx_map_io(void);
+void __init spear13xx_dt_init_irq(void);
+void __init spear13xx_l2x0_init(void);
+bool dw_dma_filter(struct dma_chan *chan, void *slave);
+void spear_restart(char, const char *);
+void spear13xx_secondary_startup(void);
+
+#ifdef CONFIG_MACH_SPEAR1310
+void __init spear1310_clk_init(void);
+#else
+static inline void spear1310_clk_init(void) {}
+#endif
+
+#ifdef CONFIG_MACH_SPEAR1340
+void __init spear1340_clk_init(void);
+#else
+static inline void spear1340_clk_init(void) {}
+#endif
+
+#endif /* __MACH_GENERIC_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/gpio.h b/arch/arm/mach-spear13xx/include/mach/gpio.h
new file mode 100644 (file)
index 0000000..cd6f4f8
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear13xx/include/mach/gpio.h
+ *
+ * GPIO macros for SPEAr13xx machine family
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_GPIO_H
+#define __MACH_GPIO_H
+
+#include <plat/gpio.h>
+
+#endif /* __MACH_GPIO_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/hardware.h b/arch/arm/mach-spear13xx/include/mach/hardware.h
new file mode 100644 (file)
index 0000000..40a8c17
--- /dev/null
@@ -0,0 +1 @@
+/* empty */
diff --git a/arch/arm/mach-spear13xx/include/mach/irqs.h b/arch/arm/mach-spear13xx/include/mach/irqs.h
new file mode 100644 (file)
index 0000000..f542a24
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * arch/arm/mach-spear13xx/include/mach/irqs.h
+ *
+ * IRQ helper macros for spear13xx machine family
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_IRQS_H
+#define __MACH_IRQS_H
+
+#define IRQ_GIC_END                    160
+#define NR_IRQS                                IRQ_GIC_END
+
+#endif /* __MACH_IRQS_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/spear.h b/arch/arm/mach-spear13xx/include/mach/spear.h
new file mode 100644 (file)
index 0000000..30c57ef
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * arch/arm/mach-spear13xx/include/mach/spear.h
+ *
+ * spear13xx Machine family specific definition
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_SPEAR13XX_H
+#define __MACH_SPEAR13XX_H
+
+#include <asm/memory.h>
+
+#define PERIP_GRP2_BASE                                UL(0xB3000000)
+#define VA_PERIP_GRP2_BASE                     UL(0xFE000000)
+#define MCIF_SDHCI_BASE                                UL(0xB3000000)
+#define SYSRAM0_BASE                           UL(0xB3800000)
+#define VA_SYSRAM0_BASE                                UL(0xFE800000)
+#define SYS_LOCATION                           (VA_SYSRAM0_BASE + 0x600)
+
+#define PERIP_GRP1_BASE                                UL(0xE0000000)
+#define VA_PERIP_GRP1_BASE                     UL(0xFD000000)
+#define UART_BASE                              UL(0xE0000000)
+#define VA_UART_BASE                           UL(0xFD000000)
+#define SSP_BASE                               UL(0xE0100000)
+#define MISC_BASE                              UL(0xE0700000)
+#define VA_MISC_BASE                           IOMEM(UL(0xFD700000))
+
+#define A9SM_AND_MPMC_BASE                     UL(0xEC000000)
+#define VA_A9SM_AND_MPMC_BASE                  UL(0xFC000000)
+
+/* A9SM peripheral offsets */
+#define A9SM_PERIP_BASE                                UL(0xEC800000)
+#define VA_A9SM_PERIP_BASE                     UL(0xFC800000)
+#define VA_SCU_BASE                            (VA_A9SM_PERIP_BASE + 0x00)
+
+#define L2CC_BASE                              UL(0xED000000)
+#define VA_L2CC_BASE                           IOMEM(UL(0xFB000000))
+
+/* others */
+#define DMAC0_BASE                             UL(0xEA800000)
+#define DMAC1_BASE                             UL(0xEB000000)
+#define MCIF_CF_BASE                           UL(0xB2800000)
+
+/* Devices present in SPEAr1310 */
+#ifdef CONFIG_MACH_SPEAR1310
+#define SPEAR1310_RAS_GRP1_BASE                        UL(0xD8000000)
+#define VA_SPEAR1310_RAS_GRP1_BASE             UL(0xFA000000)
+#define SPEAR1310_RAS_BASE                     UL(0xD8400000)
+#define VA_SPEAR1310_RAS_BASE                  IOMEM(UL(0xFA400000))
+#endif /* CONFIG_MACH_SPEAR1310 */
+
+/* Debug uart for linux, will be used for debug and uncompress messages */
+#define SPEAR_DBG_UART_BASE                    UART_BASE
+#define VA_SPEAR_DBG_UART_BASE                 VA_UART_BASE
+
+#endif /* __MACH_SPEAR13XX_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/spear1310_misc_regs.h b/arch/arm/mach-spear13xx/include/mach/spear1310_misc_regs.h
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/arch/arm/mach-spear13xx/include/mach/spear1340_misc_regs.h b/arch/arm/mach-spear13xx/include/mach/spear1340_misc_regs.h
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/arch/arm/mach-spear13xx/include/mach/timex.h b/arch/arm/mach-spear13xx/include/mach/timex.h
new file mode 100644 (file)
index 0000000..31af3e8
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/timex.h
+ *
+ * SPEAr3XX machine family specific timex definitions
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_TIMEX_H
+#define __MACH_TIMEX_H
+
+#include <plat/timex.h>
+
+#endif /* __MACH_TIMEX_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/uncompress.h b/arch/arm/mach-spear13xx/include/mach/uncompress.h
new file mode 100644 (file)
index 0000000..c784089
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear13xx/include/mach/uncompress.h
+ *
+ * Serial port stubs for kernel decompress status messages
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_UNCOMPRESS_H
+#define __MACH_UNCOMPRESS_H
+
+#include <plat/uncompress.h>
+
+#endif /* __MACH_UNCOMPRESS_H */
diff --git a/arch/arm/mach-spear13xx/platsmp.c b/arch/arm/mach-spear13xx/platsmp.c
new file mode 100644 (file)
index 0000000..f5d07f2
--- /dev/null
@@ -0,0 +1,127 @@
+/*
+ * arch/arm/mach-spear13xx/platsmp.c
+ *
+ * based upon linux/arch/arm/mach-realview/platsmp.c
+ *
+ * Copyright (C) 2012 ST Microelectronics Ltd.
+ * Shiraz Hashim <shiraz.hashim@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/io.h>
+#include <linux/smp.h>
+#include <asm/cacheflush.h>
+#include <asm/hardware/gic.h>
+#include <asm/smp_scu.h>
+#include <mach/spear.h>
+
+/*
+ * control for which core is the next to come out of the secondary
+ * boot "holding pen"
+ */
+volatile int __cpuinitdata pen_release = -1;
+static DEFINE_SPINLOCK(boot_lock);
+
+static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
+extern void spear13xx_secondary_startup(void);
+
+void __cpuinit platform_secondary_init(unsigned int cpu)
+{
+       /*
+        * if any interrupts are already enabled for the primary
+        * core (e.g. timer irq), then they will not have been enabled
+        * for us: do so
+        */
+       gic_secondary_init(0);
+
+       /*
+        * let the primary processor know we're out of the
+        * pen, then head off into the C entry point
+        */
+       pen_release = -1;
+       smp_wmb();
+
+       /*
+        * Synchronise with the boot thread.
+        */
+       spin_lock(&boot_lock);
+       spin_unlock(&boot_lock);
+}
+
+int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+       unsigned long timeout;
+
+       /*
+        * set synchronisation state between this boot processor
+        * and the secondary one
+        */
+       spin_lock(&boot_lock);
+
+       /*
+        * The secondary processor is waiting to be released from
+        * the holding pen - release it, then wait for it to flag
+        * that it has been released by resetting pen_release.
+        *
+        * Note that "pen_release" is the hardware CPU ID, whereas
+        * "cpu" is Linux's internal ID.
+        */
+       pen_release = cpu;
+       flush_cache_all();
+       outer_flush_all();
+
+       timeout = jiffies + (1 * HZ);
+       while (time_before(jiffies, timeout)) {
+               smp_rmb();
+               if (pen_release == -1)
+                       break;
+
+               udelay(10);
+       }
+
+       /*
+        * now the secondary core is starting up let it run its
+        * calibrations, then wait for it to finish
+        */
+       spin_unlock(&boot_lock);
+
+       return pen_release != -1 ? -ENOSYS : 0;
+}
+
+/*
+ * Initialise the CPU possible map early - this describes the CPUs
+ * which may be present or become present in the system.
+ */
+void __init smp_init_cpus(void)
+{
+       unsigned int i, ncores = scu_get_core_count(scu_base);
+
+       if (ncores > nr_cpu_ids) {
+               pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
+                       ncores, nr_cpu_ids);
+               ncores = nr_cpu_ids;
+       }
+
+       for (i = 0; i < ncores; i++)
+               set_cpu_possible(i, true);
+
+       set_smp_cross_call(gic_raise_softirq);
+}
+
+void __init platform_smp_prepare_cpus(unsigned int max_cpus)
+{
+
+       scu_enable(scu_base);
+
+       /*
+        * Write the address of secondary startup into the system-wide location
+        * (presently it is in SRAM). The BootMonitor waits until it receives a
+        * soft interrupt, and then the secondary CPU branches to this address.
+        */
+       __raw_writel(virt_to_phys(spear13xx_secondary_startup), SYS_LOCATION);
+}
diff --git a/arch/arm/mach-spear13xx/spear1310.c b/arch/arm/mach-spear13xx/spear1310.c
new file mode 100644 (file)
index 0000000..fefd15b
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+ * arch/arm/mach-spear13xx/spear1310.c
+ *
+ * SPEAr1310 machine source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) "SPEAr1310: " fmt
+
+#include <linux/amba/pl022.h>
+#include <linux/of_platform.h>
+#include <asm/hardware/gic.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+
+/* Base addresses */
+#define SPEAR1310_SSP1_BASE                    UL(0x5D400000)
+#define SPEAR1310_SATA0_BASE                   UL(0xB1000000)
+#define SPEAR1310_SATA1_BASE                   UL(0xB1800000)
+#define SPEAR1310_SATA2_BASE                   UL(0xB4000000)
+
+/* ssp device registration */
+static struct pl022_ssp_controller ssp1_plat_data = {
+       .bus_id = 0,
+       .enable_dma = 0,
+       .num_chipselect = 3,
+};
+
+/* Add SPEAr1310 auxdata to pass platform data */
+static struct of_dev_auxdata spear1310_auxdata_lookup[] __initdata = {
+       OF_DEV_AUXDATA("arasan,cf-spear1340", MCIF_CF_BASE, NULL, &cf_dma_priv),
+       OF_DEV_AUXDATA("snps,dma-spear1340", DMAC0_BASE, NULL, &dmac_plat_data),
+       OF_DEV_AUXDATA("snps,dma-spear1340", DMAC1_BASE, NULL, &dmac_plat_data),
+       OF_DEV_AUXDATA("arm,pl022", SSP_BASE, NULL, &pl022_plat_data),
+
+       OF_DEV_AUXDATA("arm,pl022", SPEAR1310_SSP1_BASE, NULL, &ssp1_plat_data),
+       {}
+};
+
+static void __init spear1310_dt_init(void)
+{
+       of_platform_populate(NULL, of_default_bus_match_table,
+                       spear1310_auxdata_lookup, NULL);
+}
+
+static const char * const spear1310_dt_board_compat[] = {
+       "st,spear1310",
+       "st,spear1310-evb",
+       NULL,
+};
+
+/*
+ * Following will create 16MB static virtual/physical mappings
+ * PHYSICAL            VIRTUAL
+ * 0xD8000000          0xFA000000
+ */
+struct map_desc spear1310_io_desc[] __initdata = {
+       {
+               .virtual        = VA_SPEAR1310_RAS_GRP1_BASE,
+               .pfn            = __phys_to_pfn(SPEAR1310_RAS_GRP1_BASE),
+               .length         = SZ_16M,
+               .type           = MT_DEVICE
+       },
+};
+
+static void __init spear1310_map_io(void)
+{
+       iotable_init(spear1310_io_desc, ARRAY_SIZE(spear1310_io_desc));
+       spear13xx_map_io();
+}
+
+DT_MACHINE_START(SPEAR1310_DT, "ST SPEAr1310 SoC with Flattened Device Tree")
+       .map_io         =       spear1310_map_io,
+       .init_irq       =       spear13xx_dt_init_irq,
+       .handle_irq     =       gic_handle_irq,
+       .timer          =       &spear13xx_timer,
+       .init_machine   =       spear1310_dt_init,
+       .restart        =       spear_restart,
+       .dt_compat      =       spear1310_dt_board_compat,
+MACHINE_END
diff --git a/arch/arm/mach-spear13xx/spear1340.c b/arch/arm/mach-spear13xx/spear1340.c
new file mode 100644 (file)
index 0000000..ee38cbc
--- /dev/null
@@ -0,0 +1,192 @@
+/*
+ * arch/arm/mach-spear13xx/spear1340.c
+ *
+ * SPEAr1340 machine source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) "SPEAr1340: " fmt
+
+#include <linux/ahci_platform.h>
+#include <linux/amba/serial.h>
+#include <linux/delay.h>
+#include <linux/dw_dmac.h>
+#include <linux/of_platform.h>
+#include <asm/hardware/gic.h>
+#include <asm/mach/arch.h>
+#include <mach/dma.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+
+/* Base addresses */
+#define SPEAR1340_SATA_BASE                    UL(0xB1000000)
+#define SPEAR1340_UART1_BASE                   UL(0xB4100000)
+
+/* Power Management Registers */
+#define SPEAR1340_PCM_CFG                      (VA_MISC_BASE + 0x100)
+#define SPEAR1340_PCM_WKUP_CFG                 (VA_MISC_BASE + 0x104)
+#define SPEAR1340_SWITCH_CTR                   (VA_MISC_BASE + 0x108)
+
+#define SPEAR1340_PERIP1_SW_RST                        (VA_MISC_BASE + 0x318)
+#define SPEAR1340_PERIP2_SW_RST                        (VA_MISC_BASE + 0x31C)
+#define SPEAR1340_PERIP3_SW_RST                        (VA_MISC_BASE + 0x320)
+
+/* PCIE - SATA configuration registers */
+#define SPEAR1340_PCIE_SATA_CFG                        (VA_MISC_BASE + 0x424)
+       /* PCIE CFG MASks */
+       #define SPEAR1340_PCIE_CFG_DEVICE_PRESENT       (1 << 11)
+       #define SPEAR1340_PCIE_CFG_POWERUP_RESET        (1 << 10)
+       #define SPEAR1340_PCIE_CFG_CORE_CLK_EN          (1 << 9)
+       #define SPEAR1340_PCIE_CFG_AUX_CLK_EN           (1 << 8)
+       #define SPEAR1340_SATA_CFG_TX_CLK_EN            (1 << 4)
+       #define SPEAR1340_SATA_CFG_RX_CLK_EN            (1 << 3)
+       #define SPEAR1340_SATA_CFG_POWERUP_RESET        (1 << 2)
+       #define SPEAR1340_SATA_CFG_PM_CLK_EN            (1 << 1)
+       #define SPEAR1340_PCIE_SATA_SEL_PCIE            (0)
+       #define SPEAR1340_PCIE_SATA_SEL_SATA            (1)
+       #define SPEAR1340_SATA_PCIE_CFG_MASK            0xF1F
+       #define SPEAR1340_PCIE_CFG_VAL  (SPEAR1340_PCIE_SATA_SEL_PCIE | \
+                       SPEAR1340_PCIE_CFG_AUX_CLK_EN | \
+                       SPEAR1340_PCIE_CFG_CORE_CLK_EN | \
+                       SPEAR1340_PCIE_CFG_POWERUP_RESET | \
+                       SPEAR1340_PCIE_CFG_DEVICE_PRESENT)
+       #define SPEAR1340_SATA_CFG_VAL  (SPEAR1340_PCIE_SATA_SEL_SATA | \
+                       SPEAR1340_SATA_CFG_PM_CLK_EN | \
+                       SPEAR1340_SATA_CFG_POWERUP_RESET | \
+                       SPEAR1340_SATA_CFG_RX_CLK_EN | \
+                       SPEAR1340_SATA_CFG_TX_CLK_EN)
+
+#define SPEAR1340_PCIE_MIPHY_CFG               (VA_MISC_BASE + 0x428)
+       #define SPEAR1340_MIPHY_OSC_BYPASS_EXT          (1 << 31)
+       #define SPEAR1340_MIPHY_CLK_REF_DIV2            (1 << 27)
+       #define SPEAR1340_MIPHY_CLK_REF_DIV4            (2 << 27)
+       #define SPEAR1340_MIPHY_CLK_REF_DIV8            (3 << 27)
+       #define SPEAR1340_MIPHY_PLL_RATIO_TOP(x)        (x << 0)
+       #define SPEAR1340_PCIE_SATA_MIPHY_CFG_SATA \
+                       (SPEAR1340_MIPHY_OSC_BYPASS_EXT | \
+                       SPEAR1340_MIPHY_CLK_REF_DIV2 | \
+                       SPEAR1340_MIPHY_PLL_RATIO_TOP(60))
+       #define SPEAR1340_PCIE_SATA_MIPHY_CFG_SATA_25M_CRYSTAL_CLK \
+                       (SPEAR1340_MIPHY_PLL_RATIO_TOP(120))
+       #define SPEAR1340_PCIE_SATA_MIPHY_CFG_PCIE \
+                       (SPEAR1340_MIPHY_OSC_BYPASS_EXT | \
+                       SPEAR1340_MIPHY_PLL_RATIO_TOP(25))
+
+static struct dw_dma_slave uart1_dma_param[] = {
+       {
+               /* Tx */
+               .cfg_hi = DWC_CFGH_DST_PER(SPEAR1340_DMA_REQ_UART1_TX),
+               .cfg_lo = 0,
+               .src_master = DMA_MASTER_MEMORY,
+               .dst_master = SPEAR1340_DMA_MASTER_UART1,
+       }, {
+               /* Rx */
+               .cfg_hi = DWC_CFGH_SRC_PER(SPEAR1340_DMA_REQ_UART1_RX),
+               .cfg_lo = 0,
+               .src_master = SPEAR1340_DMA_MASTER_UART1,
+               .dst_master = DMA_MASTER_MEMORY,
+       }
+};
+
+static struct amba_pl011_data uart1_data = {
+       .dma_filter = dw_dma_filter,
+       .dma_tx_param = &uart1_dma_param[0],
+       .dma_rx_param = &uart1_dma_param[1],
+};
+
+/* SATA device registration */
+static int sata_miphy_init(struct device *dev, void __iomem *addr)
+{
+       writel(SPEAR1340_SATA_CFG_VAL, SPEAR1340_PCIE_SATA_CFG);
+       writel(SPEAR1340_PCIE_SATA_MIPHY_CFG_SATA_25M_CRYSTAL_CLK,
+                       SPEAR1340_PCIE_MIPHY_CFG);
+       /* Switch on sata power domain */
+       writel((readl(SPEAR1340_PCM_CFG) | (0x800)), SPEAR1340_PCM_CFG);
+       msleep(20);
+       /* Disable PCIE SATA Controller reset */
+       writel((readl(SPEAR1340_PERIP1_SW_RST) & (~0x1000)),
+                       SPEAR1340_PERIP1_SW_RST);
+       msleep(20);
+
+       return 0;
+}
+
+void sata_miphy_exit(struct device *dev)
+{
+       writel(0, SPEAR1340_PCIE_SATA_CFG);
+       writel(0, SPEAR1340_PCIE_MIPHY_CFG);
+
+       /* Enable PCIE SATA Controller reset */
+       writel((readl(SPEAR1340_PERIP1_SW_RST) | (0x1000)),
+                       SPEAR1340_PERIP1_SW_RST);
+       msleep(20);
+       /* Switch off sata power domain */
+       writel((readl(SPEAR1340_PCM_CFG) & (~0x800)), SPEAR1340_PCM_CFG);
+       msleep(20);
+}
+
+int sata_suspend(struct device *dev)
+{
+       if (dev->power.power_state.event == PM_EVENT_FREEZE)
+               return 0;
+
+       sata_miphy_exit(dev);
+
+       return 0;
+}
+
+int sata_resume(struct device *dev)
+{
+       if (dev->power.power_state.event == PM_EVENT_THAW)
+               return 0;
+
+       return sata_miphy_init(dev, NULL);
+}
+
+static struct ahci_platform_data sata_pdata = {
+       .init = sata_miphy_init,
+       .exit = sata_miphy_exit,
+       .suspend = sata_suspend,
+       .resume = sata_resume,
+};
+
+/* Add SPEAr1340 auxdata to pass platform data */
+static struct of_dev_auxdata spear1340_auxdata_lookup[] __initdata = {
+       OF_DEV_AUXDATA("arasan,cf-spear1340", MCIF_CF_BASE, NULL, &cf_dma_priv),
+       OF_DEV_AUXDATA("snps,dma-spear1340", DMAC0_BASE, NULL, &dmac_plat_data),
+       OF_DEV_AUXDATA("snps,dma-spear1340", DMAC1_BASE, NULL, &dmac_plat_data),
+       OF_DEV_AUXDATA("arm,pl022", SSP_BASE, NULL, &pl022_plat_data),
+
+       OF_DEV_AUXDATA("snps,spear-ahci", SPEAR1340_SATA_BASE, NULL,
+                       &sata_pdata),
+       OF_DEV_AUXDATA("arm,pl011", SPEAR1340_UART1_BASE, NULL, &uart1_data),
+       {}
+};
+
+static void __init spear1340_dt_init(void)
+{
+       of_platform_populate(NULL, of_default_bus_match_table,
+                       spear1340_auxdata_lookup, NULL);
+}
+
+static const char * const spear1340_dt_board_compat[] = {
+       "st,spear1340",
+       "st,spear1340-evb",
+       NULL,
+};
+
+DT_MACHINE_START(SPEAR1340_DT, "ST SPEAr1340 SoC with Flattened Device Tree")
+       .map_io         =       spear13xx_map_io,
+       .init_irq       =       spear13xx_dt_init_irq,
+       .handle_irq     =       gic_handle_irq,
+       .timer          =       &spear13xx_timer,
+       .init_machine   =       spear1340_dt_init,
+       .restart        =       spear_restart,
+       .dt_compat      =       spear1340_dt_board_compat,
+MACHINE_END
diff --git a/arch/arm/mach-spear13xx/spear13xx.c b/arch/arm/mach-spear13xx/spear13xx.c
new file mode 100644 (file)
index 0000000..50b349a
--- /dev/null
@@ -0,0 +1,197 @@
+/*
+ * arch/arm/mach-spear13xx/spear13xx.c
+ *
+ * SPEAr13XX machines common source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) "SPEAr13xx: " fmt
+
+#include <linux/amba/pl022.h>
+#include <linux/clk.h>
+#include <linux/dw_dmac.h>
+#include <linux/err.h>
+#include <linux/of_irq.h>
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/hardware/gic.h>
+#include <asm/mach/map.h>
+#include <asm/smp_twd.h>
+#include <mach/dma.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+
+/* common dw_dma filter routine to be used by peripherals */
+bool dw_dma_filter(struct dma_chan *chan, void *slave)
+{
+       struct dw_dma_slave *dws = (struct dw_dma_slave *)slave;
+
+       if (chan->device->dev == dws->dma_dev) {
+               chan->private = slave;
+               return true;
+       } else {
+               return false;
+       }
+}
+
+/* ssp device registration */
+static struct dw_dma_slave ssp_dma_param[] = {
+       {
+               /* Tx */
+               .cfg_hi = DWC_CFGH_DST_PER(DMA_REQ_SSP0_TX),
+               .cfg_lo = 0,
+               .src_master = DMA_MASTER_MEMORY,
+               .dst_master = DMA_MASTER_SSP0,
+       }, {
+               /* Rx */
+               .cfg_hi = DWC_CFGH_SRC_PER(DMA_REQ_SSP0_RX),
+               .cfg_lo = 0,
+               .src_master = DMA_MASTER_SSP0,
+               .dst_master = DMA_MASTER_MEMORY,
+       }
+};
+
+struct pl022_ssp_controller pl022_plat_data = {
+       .bus_id = 0,
+       .enable_dma = 1,
+       .dma_filter = dw_dma_filter,
+       .dma_rx_param = &ssp_dma_param[1],
+       .dma_tx_param = &ssp_dma_param[0],
+       .num_chipselect = 3,
+};
+
+/* CF device registration */
+struct dw_dma_slave cf_dma_priv = {
+       .cfg_hi = 0,
+       .cfg_lo = 0,
+       .src_master = 0,
+       .dst_master = 0,
+};
+
+/* dmac device registeration */
+struct dw_dma_platform_data dmac_plat_data = {
+       .nr_channels = 8,
+       .chan_allocation_order = CHAN_ALLOCATION_DESCENDING,
+       .chan_priority = CHAN_PRIORITY_DESCENDING,
+};
+
+void __init spear13xx_l2x0_init(void)
+{
+       /*
+        * 512KB (64KB/way), 8-way associativity, parity supported
+        *
+        * FIXME: 9th bit, of Auxillary Controller register must be set
+        * for some spear13xx devices for stable L2 operation.
+        *
+        * Enable Early BRESP, L2 prefetch for Instruction and Data,
+        * write alloc and 'Full line of zero' options
+        *
+        */
+
+       writel_relaxed(0x06, VA_L2CC_BASE + L2X0_PREFETCH_CTRL);
+
+       /*
+        * Program following latencies in order to make
+        * SPEAr1340 work at 600 MHz
+        */
+       writel_relaxed(0x221, VA_L2CC_BASE + L2X0_TAG_LATENCY_CTRL);
+       writel_relaxed(0x441, VA_L2CC_BASE + L2X0_DATA_LATENCY_CTRL);
+       l2x0_init(VA_L2CC_BASE, 0x70A60001, 0xfe00ffff);
+}
+
+/*
+ * Following will create 16MB static virtual/physical mappings
+ * PHYSICAL            VIRTUAL
+ * 0xB3000000          0xFE000000
+ * 0xE0000000          0xFD000000
+ * 0xEC000000          0xFC000000
+ * 0xED000000          0xFB000000
+ */
+struct map_desc spear13xx_io_desc[] __initdata = {
+       {
+               .virtual        = VA_PERIP_GRP2_BASE,
+               .pfn            = __phys_to_pfn(PERIP_GRP2_BASE),
+               .length         = SZ_16M,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = VA_PERIP_GRP1_BASE,
+               .pfn            = __phys_to_pfn(PERIP_GRP1_BASE),
+               .length         = SZ_16M,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = VA_A9SM_AND_MPMC_BASE,
+               .pfn            = __phys_to_pfn(A9SM_AND_MPMC_BASE),
+               .length         = SZ_16M,
+               .type           = MT_DEVICE
+       }, {
+               .virtual        = (unsigned long)VA_L2CC_BASE,
+               .pfn            = __phys_to_pfn(L2CC_BASE),
+               .length         = SZ_4K,
+               .type           = MT_DEVICE
+       },
+};
+
+/* This will create static memory mapping for selected devices */
+void __init spear13xx_map_io(void)
+{
+       iotable_init(spear13xx_io_desc, ARRAY_SIZE(spear13xx_io_desc));
+}
+
+static void __init spear13xx_clk_init(void)
+{
+       if (of_machine_is_compatible("st,spear1310"))
+               spear1310_clk_init();
+       else if (of_machine_is_compatible("st,spear1340"))
+               spear1340_clk_init();
+       else
+               pr_err("%s: Unknown machine\n", __func__);
+}
+
+static void __init spear13xx_timer_init(void)
+{
+       char pclk_name[] = "osc_24m_clk";
+       struct clk *gpt_clk, *pclk;
+
+       spear13xx_clk_init();
+
+       /* get the system timer clock */
+       gpt_clk = clk_get_sys("gpt0", NULL);
+       if (IS_ERR(gpt_clk)) {
+               pr_err("%s:couldn't get clk for gpt\n", __func__);
+               BUG();
+       }
+
+       /* get the suitable parent clock for timer*/
+       pclk = clk_get(NULL, pclk_name);
+       if (IS_ERR(pclk)) {
+               pr_err("%s:couldn't get %s as parent for gpt\n", __func__,
+                               pclk_name);
+               BUG();
+       }
+
+       clk_set_parent(gpt_clk, pclk);
+       clk_put(gpt_clk);
+       clk_put(pclk);
+
+       spear_setup_of_timer();
+       twd_local_timer_of_register();
+}
+
+struct sys_timer spear13xx_timer = {
+       .init = spear13xx_timer_init,
+};
+
+static const struct of_device_id gic_of_match[] __initconst = {
+       { .compatible = "arm,cortex-a9-gic", .data = gic_of_init },
+       { /* Sentinel */ }
+};
+
+void __init spear13xx_dt_init_irq(void)
+{
+       of_irq_init(gic_of_match);
+}
index 17b5d83cf2d5459457c80841fef4b5d3838d2964..8d12faa178fd9dc82472f065f4549631c4861e49 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 # common files
-obj-$(CONFIG_ARCH_SPEAR3XX)    += spear3xx.o clock.o
+obj-$(CONFIG_ARCH_SPEAR3XX)    += spear3xx.o
 
 # spear300 specific files
 obj-$(CONFIG_MACH_SPEAR300) += spear300.o
diff --git a/arch/arm/mach-spear3xx/clock.c b/arch/arm/mach-spear3xx/clock.c
deleted file mode 100644 (file)
index cd6c110..0000000
+++ /dev/null
@@ -1,892 +0,0 @@
-/*
- * arch/arm/mach-spear3xx/clock.c
- *
- * SPEAr3xx machines clock framework source file
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/clkdev.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/of_platform.h>
-#include <asm/mach-types.h>
-#include <plat/clock.h>
-#include <mach/misc_regs.h>
-#include <mach/spear.h>
-
-#define PLL1_CTR               (MISC_BASE + 0x008)
-#define PLL1_FRQ               (MISC_BASE + 0x00C)
-#define PLL1_MOD               (MISC_BASE + 0x010)
-#define PLL2_CTR               (MISC_BASE + 0x014)
-/* PLL_CTR register masks */
-#define PLL_ENABLE             2
-#define PLL_MODE_SHIFT         4
-#define PLL_MODE_MASK          0x3
-#define PLL_MODE_NORMAL                0
-#define PLL_MODE_FRACTION      1
-#define PLL_MODE_DITH_DSB      2
-#define PLL_MODE_DITH_SSB      3
-
-#define PLL2_FRQ               (MISC_BASE + 0x018)
-/* PLL FRQ register masks */
-#define PLL_DIV_N_SHIFT                0
-#define PLL_DIV_N_MASK         0xFF
-#define PLL_DIV_P_SHIFT                8
-#define PLL_DIV_P_MASK         0x7
-#define PLL_NORM_FDBK_M_SHIFT  24
-#define PLL_NORM_FDBK_M_MASK   0xFF
-#define PLL_DITH_FDBK_M_SHIFT  16
-#define PLL_DITH_FDBK_M_MASK   0xFFFF
-
-#define PLL2_MOD               (MISC_BASE + 0x01C)
-#define PLL_CLK_CFG            (MISC_BASE + 0x020)
-#define CORE_CLK_CFG           (MISC_BASE + 0x024)
-/* CORE CLK CFG register masks */
-#define PLL_HCLK_RATIO_SHIFT   10
-#define PLL_HCLK_RATIO_MASK    0x3
-#define HCLK_PCLK_RATIO_SHIFT  8
-#define HCLK_PCLK_RATIO_MASK   0x3
-
-#define PERIP_CLK_CFG          (MISC_BASE + 0x028)
-/* PERIP_CLK_CFG register masks */
-#define UART_CLK_SHIFT         4
-#define UART_CLK_MASK          0x1
-#define FIRDA_CLK_SHIFT                5
-#define FIRDA_CLK_MASK         0x3
-#define GPT0_CLK_SHIFT         8
-#define GPT1_CLK_SHIFT         11
-#define GPT2_CLK_SHIFT         12
-#define GPT_CLK_MASK           0x1
-#define AUX_CLK_PLL3_VAL       0
-#define AUX_CLK_PLL1_VAL       1
-
-#define PERIP1_CLK_ENB         (MISC_BASE + 0x02C)
-/* PERIP1_CLK_ENB register masks */
-#define UART_CLK_ENB           3
-#define SSP_CLK_ENB            5
-#define I2C_CLK_ENB            7
-#define JPEG_CLK_ENB           8
-#define FIRDA_CLK_ENB          10
-#define GPT1_CLK_ENB           11
-#define GPT2_CLK_ENB           12
-#define ADC_CLK_ENB            15
-#define RTC_CLK_ENB            17
-#define GPIO_CLK_ENB           18
-#define DMA_CLK_ENB            19
-#define SMI_CLK_ENB            21
-#define GMAC_CLK_ENB           23
-#define USBD_CLK_ENB           24
-#define USBH_CLK_ENB           25
-#define C3_CLK_ENB             31
-
-#define RAS_CLK_ENB            (MISC_BASE + 0x034)
-
-#define PRSC1_CLK_CFG          (MISC_BASE + 0x044)
-#define PRSC2_CLK_CFG          (MISC_BASE + 0x048)
-#define PRSC3_CLK_CFG          (MISC_BASE + 0x04C)
-/* gpt synthesizer register masks */
-#define GPT_MSCALE_SHIFT       0
-#define GPT_MSCALE_MASK                0xFFF
-#define GPT_NSCALE_SHIFT       12
-#define GPT_NSCALE_MASK                0xF
-
-#define AMEM_CLK_CFG           (MISC_BASE + 0x050)
-#define EXPI_CLK_CFG           (MISC_BASE + 0x054)
-#define CLCD_CLK_SYNT          (MISC_BASE + 0x05C)
-#define FIRDA_CLK_SYNT         (MISC_BASE + 0x060)
-#define UART_CLK_SYNT          (MISC_BASE + 0x064)
-#define GMAC_CLK_SYNT          (MISC_BASE + 0x068)
-#define RAS1_CLK_SYNT          (MISC_BASE + 0x06C)
-#define RAS2_CLK_SYNT          (MISC_BASE + 0x070)
-#define RAS3_CLK_SYNT          (MISC_BASE + 0x074)
-#define RAS4_CLK_SYNT          (MISC_BASE + 0x078)
-/* aux clk synthesiser register masks for irda to ras4 */
-#define AUX_SYNT_ENB           31
-#define AUX_EQ_SEL_SHIFT       30
-#define AUX_EQ_SEL_MASK                1
-#define AUX_EQ1_SEL            0
-#define AUX_EQ2_SEL            1
-#define AUX_XSCALE_SHIFT       16
-#define AUX_XSCALE_MASK                0xFFF
-#define AUX_YSCALE_SHIFT       0
-#define AUX_YSCALE_MASK                0xFFF
-
-/* root clks */
-/* 32 KHz oscillator clock */
-static struct clk osc_32k_clk = {
-       .flags = ALWAYS_ENABLED,
-       .rate = 32000,
-};
-
-/* 24 MHz oscillator clock */
-static struct clk osc_24m_clk = {
-       .flags = ALWAYS_ENABLED,
-       .rate = 24000000,
-};
-
-/* clock derived from 32 KHz osc clk */
-/* rtc clock */
-static struct clk rtc_clk = {
-       .pclk = &osc_32k_clk,
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = RTC_CLK_ENB,
-       .recalc = &follow_parent,
-};
-
-/* clock derived from 24 MHz osc clk */
-/* pll masks structure */
-static struct pll_clk_masks pll1_masks = {
-       .mode_mask = PLL_MODE_MASK,
-       .mode_shift = PLL_MODE_SHIFT,
-       .norm_fdbk_m_mask = PLL_NORM_FDBK_M_MASK,
-       .norm_fdbk_m_shift = PLL_NORM_FDBK_M_SHIFT,
-       .dith_fdbk_m_mask = PLL_DITH_FDBK_M_MASK,
-       .dith_fdbk_m_shift = PLL_DITH_FDBK_M_SHIFT,
-       .div_p_mask = PLL_DIV_P_MASK,
-       .div_p_shift = PLL_DIV_P_SHIFT,
-       .div_n_mask = PLL_DIV_N_MASK,
-       .div_n_shift = PLL_DIV_N_SHIFT,
-};
-
-/* pll1 configuration structure */
-static struct pll_clk_config pll1_config = {
-       .mode_reg = PLL1_CTR,
-       .cfg_reg = PLL1_FRQ,
-       .masks = &pll1_masks,
-};
-
-/* pll rate configuration table, in ascending order of rates */
-struct pll_rate_tbl pll_rtbl[] = {
-       {.mode = 0, .m = 0x85, .n = 0x0C, .p = 0x1}, /* 266 MHz */
-       {.mode = 0, .m = 0xA6, .n = 0x0C, .p = 0x1}, /* 332 MHz */
-};
-
-/* PLL1 clock */
-static struct clk pll1_clk = {
-       .flags = ENABLED_ON_INIT,
-       .pclk = &osc_24m_clk,
-       .en_reg = PLL1_CTR,
-       .en_reg_bit = PLL_ENABLE,
-       .calc_rate = &pll_calc_rate,
-       .recalc = &pll_clk_recalc,
-       .set_rate = &pll_clk_set_rate,
-       .rate_config = {pll_rtbl, ARRAY_SIZE(pll_rtbl), 1},
-       .private_data = &pll1_config,
-};
-
-/* PLL3 48 MHz clock */
-static struct clk pll3_48m_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &osc_24m_clk,
-       .rate = 48000000,
-};
-
-/* watch dog timer clock */
-static struct clk wdt_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &osc_24m_clk,
-       .recalc = &follow_parent,
-};
-
-/* clock derived from pll1 clk */
-/* cpu clock */
-static struct clk cpu_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &pll1_clk,
-       .recalc = &follow_parent,
-};
-
-/* ahb masks structure */
-static struct bus_clk_masks ahb_masks = {
-       .mask = PLL_HCLK_RATIO_MASK,
-       .shift = PLL_HCLK_RATIO_SHIFT,
-};
-
-/* ahb configuration structure */
-static struct bus_clk_config ahb_config = {
-       .reg = CORE_CLK_CFG,
-       .masks = &ahb_masks,
-};
-
-/* ahb rate configuration table, in ascending order of rates */
-struct bus_rate_tbl bus_rtbl[] = {
-       {.div = 3}, /* == parent divided by 4 */
-       {.div = 2}, /* == parent divided by 3 */
-       {.div = 1}, /* == parent divided by 2 */
-       {.div = 0}, /* == parent divided by 1 */
-};
-
-/* ahb clock */
-static struct clk ahb_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &pll1_clk,
-       .calc_rate = &bus_calc_rate,
-       .recalc = &bus_clk_recalc,
-       .set_rate = &bus_clk_set_rate,
-       .rate_config = {bus_rtbl, ARRAY_SIZE(bus_rtbl), 2},
-       .private_data = &ahb_config,
-};
-
-/* auxiliary synthesizers masks */
-static struct aux_clk_masks aux_masks = {
-       .eq_sel_mask = AUX_EQ_SEL_MASK,
-       .eq_sel_shift = AUX_EQ_SEL_SHIFT,
-       .eq1_mask = AUX_EQ1_SEL,
-       .eq2_mask = AUX_EQ2_SEL,
-       .xscale_sel_mask = AUX_XSCALE_MASK,
-       .xscale_sel_shift = AUX_XSCALE_SHIFT,
-       .yscale_sel_mask = AUX_YSCALE_MASK,
-       .yscale_sel_shift = AUX_YSCALE_SHIFT,
-};
-
-/* uart synth configurations */
-static struct aux_clk_config uart_synth_config = {
-       .synth_reg = UART_CLK_SYNT,
-       .masks = &aux_masks,
-};
-
-/* aux rate configuration table, in ascending order of rates */
-struct aux_rate_tbl aux_rtbl[] = {
-       /* For PLL1 = 332 MHz */
-       {.xscale = 1, .yscale = 8, .eq = 1}, /* 41.5 MHz */
-       {.xscale = 1, .yscale = 4, .eq = 1}, /* 83 MHz */
-       {.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
-};
-
-/* uart synth clock */
-static struct clk uart_synth_clk = {
-       .en_reg = UART_CLK_SYNT,
-       .en_reg_bit = AUX_SYNT_ENB,
-       .pclk = &pll1_clk,
-       .calc_rate = &aux_calc_rate,
-       .recalc = &aux_clk_recalc,
-       .set_rate = &aux_clk_set_rate,
-       .rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 1},
-       .private_data = &uart_synth_config,
-};
-
-/* uart parents */
-static struct pclk_info uart_pclk_info[] = {
-       {
-               .pclk = &uart_synth_clk,
-               .pclk_val = AUX_CLK_PLL1_VAL,
-       }, {
-               .pclk = &pll3_48m_clk,
-               .pclk_val = AUX_CLK_PLL3_VAL,
-       },
-};
-
-/* uart parent select structure */
-static struct pclk_sel uart_pclk_sel = {
-       .pclk_info = uart_pclk_info,
-       .pclk_count = ARRAY_SIZE(uart_pclk_info),
-       .pclk_sel_reg = PERIP_CLK_CFG,
-       .pclk_sel_mask = UART_CLK_MASK,
-};
-
-/* uart clock */
-static struct clk uart_clk = {
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = UART_CLK_ENB,
-       .pclk_sel = &uart_pclk_sel,
-       .pclk_sel_shift = UART_CLK_SHIFT,
-       .recalc = &follow_parent,
-};
-
-/* firda configurations */
-static struct aux_clk_config firda_synth_config = {
-       .synth_reg = FIRDA_CLK_SYNT,
-       .masks = &aux_masks,
-};
-
-/* firda synth clock */
-static struct clk firda_synth_clk = {
-       .en_reg = FIRDA_CLK_SYNT,
-       .en_reg_bit = AUX_SYNT_ENB,
-       .pclk = &pll1_clk,
-       .calc_rate = &aux_calc_rate,
-       .recalc = &aux_clk_recalc,
-       .set_rate = &aux_clk_set_rate,
-       .rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 1},
-       .private_data = &firda_synth_config,
-};
-
-/* firda parents */
-static struct pclk_info firda_pclk_info[] = {
-       {
-               .pclk = &firda_synth_clk,
-               .pclk_val = AUX_CLK_PLL1_VAL,
-       }, {
-               .pclk = &pll3_48m_clk,
-               .pclk_val = AUX_CLK_PLL3_VAL,
-       },
-};
-
-/* firda parent select structure */
-static struct pclk_sel firda_pclk_sel = {
-       .pclk_info = firda_pclk_info,
-       .pclk_count = ARRAY_SIZE(firda_pclk_info),
-       .pclk_sel_reg = PERIP_CLK_CFG,
-       .pclk_sel_mask = FIRDA_CLK_MASK,
-};
-
-/* firda clock */
-static struct clk firda_clk = {
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = FIRDA_CLK_ENB,
-       .pclk_sel = &firda_pclk_sel,
-       .pclk_sel_shift = FIRDA_CLK_SHIFT,
-       .recalc = &follow_parent,
-};
-
-/* gpt synthesizer masks */
-static struct gpt_clk_masks gpt_masks = {
-       .mscale_sel_mask = GPT_MSCALE_MASK,
-       .mscale_sel_shift = GPT_MSCALE_SHIFT,
-       .nscale_sel_mask = GPT_NSCALE_MASK,
-       .nscale_sel_shift = GPT_NSCALE_SHIFT,
-};
-
-/* gpt rate configuration table, in ascending order of rates */
-struct gpt_rate_tbl gpt_rtbl[] = {
-       /* For pll1 = 332 MHz */
-       {.mscale = 4, .nscale = 0}, /* 41.5 MHz */
-       {.mscale = 2, .nscale = 0}, /* 55.3 MHz */
-       {.mscale = 1, .nscale = 0}, /* 83 MHz */
-};
-
-/* gpt0 synth clk config*/
-static struct gpt_clk_config gpt0_synth_config = {
-       .synth_reg = PRSC1_CLK_CFG,
-       .masks = &gpt_masks,
-};
-
-/* gpt synth clock */
-static struct clk gpt0_synth_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &pll1_clk,
-       .calc_rate = &gpt_calc_rate,
-       .recalc = &gpt_clk_recalc,
-       .set_rate = &gpt_clk_set_rate,
-       .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
-       .private_data = &gpt0_synth_config,
-};
-
-/* gpt parents */
-static struct pclk_info gpt0_pclk_info[] = {
-       {
-               .pclk = &gpt0_synth_clk,
-               .pclk_val = AUX_CLK_PLL1_VAL,
-       }, {
-               .pclk = &pll3_48m_clk,
-               .pclk_val = AUX_CLK_PLL3_VAL,
-       },
-};
-
-/* gpt parent select structure */
-static struct pclk_sel gpt0_pclk_sel = {
-       .pclk_info = gpt0_pclk_info,
-       .pclk_count = ARRAY_SIZE(gpt0_pclk_info),
-       .pclk_sel_reg = PERIP_CLK_CFG,
-       .pclk_sel_mask = GPT_CLK_MASK,
-};
-
-/* gpt0 timer clock */
-static struct clk gpt0_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk_sel = &gpt0_pclk_sel,
-       .pclk_sel_shift = GPT0_CLK_SHIFT,
-       .recalc = &follow_parent,
-};
-
-/* gpt1 synth clk configurations */
-static struct gpt_clk_config gpt1_synth_config = {
-       .synth_reg = PRSC2_CLK_CFG,
-       .masks = &gpt_masks,
-};
-
-/* gpt1 synth clock */
-static struct clk gpt1_synth_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &pll1_clk,
-       .calc_rate = &gpt_calc_rate,
-       .recalc = &gpt_clk_recalc,
-       .set_rate = &gpt_clk_set_rate,
-       .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
-       .private_data = &gpt1_synth_config,
-};
-
-static struct pclk_info gpt1_pclk_info[] = {
-       {
-               .pclk = &gpt1_synth_clk,
-               .pclk_val = AUX_CLK_PLL1_VAL,
-       }, {
-               .pclk = &pll3_48m_clk,
-               .pclk_val = AUX_CLK_PLL3_VAL,
-       },
-};
-
-/* gpt parent select structure */
-static struct pclk_sel gpt1_pclk_sel = {
-       .pclk_info = gpt1_pclk_info,
-       .pclk_count = ARRAY_SIZE(gpt1_pclk_info),
-       .pclk_sel_reg = PERIP_CLK_CFG,
-       .pclk_sel_mask = GPT_CLK_MASK,
-};
-
-/* gpt1 timer clock */
-static struct clk gpt1_clk = {
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = GPT1_CLK_ENB,
-       .pclk_sel = &gpt1_pclk_sel,
-       .pclk_sel_shift = GPT1_CLK_SHIFT,
-       .recalc = &follow_parent,
-};
-
-/* gpt2 synth clk configurations */
-static struct gpt_clk_config gpt2_synth_config = {
-       .synth_reg = PRSC3_CLK_CFG,
-       .masks = &gpt_masks,
-};
-
-/* gpt1 synth clock */
-static struct clk gpt2_synth_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &pll1_clk,
-       .calc_rate = &gpt_calc_rate,
-       .recalc = &gpt_clk_recalc,
-       .set_rate = &gpt_clk_set_rate,
-       .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
-       .private_data = &gpt2_synth_config,
-};
-
-static struct pclk_info gpt2_pclk_info[] = {
-       {
-               .pclk = &gpt2_synth_clk,
-               .pclk_val = AUX_CLK_PLL1_VAL,
-       }, {
-               .pclk = &pll3_48m_clk,
-               .pclk_val = AUX_CLK_PLL3_VAL,
-       },
-};
-
-/* gpt parent select structure */
-static struct pclk_sel gpt2_pclk_sel = {
-       .pclk_info = gpt2_pclk_info,
-       .pclk_count = ARRAY_SIZE(gpt2_pclk_info),
-       .pclk_sel_reg = PERIP_CLK_CFG,
-       .pclk_sel_mask = GPT_CLK_MASK,
-};
-
-/* gpt2 timer clock */
-static struct clk gpt2_clk = {
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = GPT2_CLK_ENB,
-       .pclk_sel = &gpt2_pclk_sel,
-       .pclk_sel_shift = GPT2_CLK_SHIFT,
-       .recalc = &follow_parent,
-};
-
-/* clock derived from pll3 clk */
-/* usbh clock */
-static struct clk usbh_clk = {
-       .pclk = &pll3_48m_clk,
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = USBH_CLK_ENB,
-       .recalc = &follow_parent,
-};
-
-/* usbd clock */
-static struct clk usbd_clk = {
-       .pclk = &pll3_48m_clk,
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = USBD_CLK_ENB,
-       .recalc = &follow_parent,
-};
-
-/* clock derived from usbh clk */
-/* usbh0 clock */
-static struct clk usbh0_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &usbh_clk,
-       .recalc = &follow_parent,
-};
-
-/* usbh1 clock */
-static struct clk usbh1_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &usbh_clk,
-       .recalc = &follow_parent,
-};
-
-/* clock derived from ahb clk */
-/* apb masks structure */
-static struct bus_clk_masks apb_masks = {
-       .mask = HCLK_PCLK_RATIO_MASK,
-       .shift = HCLK_PCLK_RATIO_SHIFT,
-};
-
-/* apb configuration structure */
-static struct bus_clk_config apb_config = {
-       .reg = CORE_CLK_CFG,
-       .masks = &apb_masks,
-};
-
-/* apb clock */
-static struct clk apb_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &ahb_clk,
-       .calc_rate = &bus_calc_rate,
-       .recalc = &bus_clk_recalc,
-       .set_rate = &bus_clk_set_rate,
-       .rate_config = {bus_rtbl, ARRAY_SIZE(bus_rtbl), 2},
-       .private_data = &apb_config,
-};
-
-/* i2c clock */
-static struct clk i2c_clk = {
-       .pclk = &ahb_clk,
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = I2C_CLK_ENB,
-       .recalc = &follow_parent,
-};
-
-/* dma clock */
-static struct clk dma_clk = {
-       .pclk = &ahb_clk,
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = DMA_CLK_ENB,
-       .recalc = &follow_parent,
-};
-
-/* jpeg clock */
-static struct clk jpeg_clk = {
-       .pclk = &ahb_clk,
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = JPEG_CLK_ENB,
-       .recalc = &follow_parent,
-};
-
-/* gmac clock */
-static struct clk gmac_clk = {
-       .pclk = &ahb_clk,
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = GMAC_CLK_ENB,
-       .recalc = &follow_parent,
-};
-
-/* smi clock */
-static struct clk smi_clk = {
-       .pclk = &ahb_clk,
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = SMI_CLK_ENB,
-       .recalc = &follow_parent,
-};
-
-/* c3 clock */
-static struct clk c3_clk = {
-       .pclk = &ahb_clk,
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = C3_CLK_ENB,
-       .recalc = &follow_parent,
-};
-
-/* clock derived from apb clk */
-/* adc clock */
-static struct clk adc_clk = {
-       .pclk = &apb_clk,
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = ADC_CLK_ENB,
-       .recalc = &follow_parent,
-};
-
-#if defined(CONFIG_MACH_SPEAR310) || defined(CONFIG_MACH_SPEAR320)
-/* emi clock */
-static struct clk emi_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &ahb_clk,
-       .recalc = &follow_parent,
-};
-#endif
-
-/* ssp clock */
-static struct clk ssp0_clk = {
-       .pclk = &apb_clk,
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = SSP_CLK_ENB,
-       .recalc = &follow_parent,
-};
-
-/* gpio clock */
-static struct clk gpio_clk = {
-       .pclk = &apb_clk,
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = GPIO_CLK_ENB,
-       .recalc = &follow_parent,
-};
-
-static struct clk dummy_apb_pclk;
-
-#if defined(CONFIG_MACH_SPEAR300) || defined(CONFIG_MACH_SPEAR310) || \
-       defined(CONFIG_MACH_SPEAR320)
-/* fsmc clock */
-static struct clk fsmc_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &ahb_clk,
-       .recalc = &follow_parent,
-};
-#endif
-
-/* common clocks to spear310 and spear320 */
-#if defined(CONFIG_MACH_SPEAR310) || defined(CONFIG_MACH_SPEAR320)
-/* uart1 clock */
-static struct clk uart1_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &apb_clk,
-       .recalc = &follow_parent,
-};
-
-/* uart2 clock */
-static struct clk uart2_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &apb_clk,
-       .recalc = &follow_parent,
-};
-#endif /* CONFIG_MACH_SPEAR310 || CONFIG_MACH_SPEAR320 */
-
-/* common clocks to spear300 and spear320 */
-#if defined(CONFIG_MACH_SPEAR300) || defined(CONFIG_MACH_SPEAR320)
-/* clcd clock */
-static struct clk clcd_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &pll3_48m_clk,
-       .recalc = &follow_parent,
-};
-
-/* sdhci clock */
-static struct clk sdhci_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &ahb_clk,
-       .recalc = &follow_parent,
-};
-#endif /* CONFIG_MACH_SPEAR300 || CONFIG_MACH_SPEAR320 */
-
-/* spear300 machine specific clock structures */
-#ifdef CONFIG_MACH_SPEAR300
-/* gpio1 clock */
-static struct clk gpio1_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &apb_clk,
-       .recalc = &follow_parent,
-};
-
-/* keyboard clock */
-static struct clk kbd_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &apb_clk,
-       .recalc = &follow_parent,
-};
-
-#endif
-
-/* spear310 machine specific clock structures */
-#ifdef CONFIG_MACH_SPEAR310
-/* uart3 clock */
-static struct clk uart3_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &apb_clk,
-       .recalc = &follow_parent,
-};
-
-/* uart4 clock */
-static struct clk uart4_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &apb_clk,
-       .recalc = &follow_parent,
-};
-
-/* uart5 clock */
-static struct clk uart5_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &apb_clk,
-       .recalc = &follow_parent,
-};
-#endif
-
-/* spear320 machine specific clock structures */
-#ifdef CONFIG_MACH_SPEAR320
-/* can0 clock */
-static struct clk can0_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &apb_clk,
-       .recalc = &follow_parent,
-};
-
-/* can1 clock */
-static struct clk can1_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &apb_clk,
-       .recalc = &follow_parent,
-};
-
-/* i2c1 clock */
-static struct clk i2c1_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &ahb_clk,
-       .recalc = &follow_parent,
-};
-
-/* ssp1 clock */
-static struct clk ssp1_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &apb_clk,
-       .recalc = &follow_parent,
-};
-
-/* ssp2 clock */
-static struct clk ssp2_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &apb_clk,
-       .recalc = &follow_parent,
-};
-
-/* pwm clock */
-static struct clk pwm_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &apb_clk,
-       .recalc = &follow_parent,
-};
-#endif
-
-/* array of all spear 3xx clock lookups */
-static struct clk_lookup spear_clk_lookups[] = {
-       CLKDEV_INIT(NULL, "apb_pclk", &dummy_apb_pclk),
-       /* root clks */
-       CLKDEV_INIT(NULL, "osc_32k_clk", &osc_32k_clk),
-       CLKDEV_INIT(NULL, "osc_24m_clk", &osc_24m_clk),
-       /* clock derived from 32 KHz osc clk */
-       CLKDEV_INIT("fc900000.rtc", NULL, &rtc_clk),
-       /* clock derived from 24 MHz osc clk */
-       CLKDEV_INIT(NULL, "pll1_clk", &pll1_clk),
-       CLKDEV_INIT(NULL, "pll3_48m_clk", &pll3_48m_clk),
-       CLKDEV_INIT("fc880000.wdt", NULL, &wdt_clk),
-       /* clock derived from pll1 clk */
-       CLKDEV_INIT(NULL, "cpu_clk", &cpu_clk),
-       CLKDEV_INIT(NULL, "ahb_clk", &ahb_clk),
-       CLKDEV_INIT(NULL, "uart_synth_clk", &uart_synth_clk),
-       CLKDEV_INIT(NULL, "firda_synth_clk", &firda_synth_clk),
-       CLKDEV_INIT(NULL, "gpt0_synth_clk", &gpt0_synth_clk),
-       CLKDEV_INIT(NULL, "gpt1_synth_clk", &gpt1_synth_clk),
-       CLKDEV_INIT(NULL, "gpt2_synth_clk", &gpt2_synth_clk),
-       CLKDEV_INIT("d0000000.serial", NULL, &uart_clk),
-       CLKDEV_INIT("firda", NULL, &firda_clk),
-       CLKDEV_INIT("gpt0", NULL, &gpt0_clk),
-       CLKDEV_INIT("gpt1", NULL, &gpt1_clk),
-       CLKDEV_INIT("gpt2", NULL, &gpt2_clk),
-       /* clock derived from pll3 clk */
-       CLKDEV_INIT("designware_udc", NULL, &usbd_clk),
-       CLKDEV_INIT(NULL, "usbh_clk", &usbh_clk),
-       /* clock derived from usbh clk */
-       CLKDEV_INIT(NULL, "usbh.0_clk", &usbh0_clk),
-       CLKDEV_INIT(NULL, "usbh.1_clk", &usbh1_clk),
-       /* clock derived from ahb clk */
-       CLKDEV_INIT(NULL, "apb_clk", &apb_clk),
-       CLKDEV_INIT("d0180000.i2c", NULL, &i2c_clk),
-       CLKDEV_INIT("fc400000.dma", NULL, &dma_clk),
-       CLKDEV_INIT("jpeg", NULL, &jpeg_clk),
-       CLKDEV_INIT("e0800000.eth", NULL, &gmac_clk),
-       CLKDEV_INIT("fc000000.flash", NULL, &smi_clk),
-       CLKDEV_INIT("c3", NULL, &c3_clk),
-       /* clock derived from apb clk */
-       CLKDEV_INIT("adc", NULL, &adc_clk),
-       CLKDEV_INIT("d0100000.spi", NULL, &ssp0_clk),
-       CLKDEV_INIT("fc980000.gpio", NULL, &gpio_clk),
-};
-
-/* array of all spear 300 clock lookups */
-#ifdef CONFIG_MACH_SPEAR300
-static struct clk_lookup spear300_clk_lookups[] = {
-       CLKDEV_INIT("60000000.clcd", NULL, &clcd_clk),
-       CLKDEV_INIT("94000000.flash", NULL, &fsmc_clk),
-       CLKDEV_INIT("a9000000.gpio", NULL, &gpio1_clk),
-       CLKDEV_INIT("a0000000.kbd", NULL, &kbd_clk),
-       CLKDEV_INIT("70000000.sdhci", NULL, &sdhci_clk),
-};
-
-void __init spear300_clk_init(void)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(spear_clk_lookups); i++)
-               clk_register(&spear_clk_lookups[i]);
-
-       for (i = 0; i < ARRAY_SIZE(spear300_clk_lookups); i++)
-               clk_register(&spear300_clk_lookups[i]);
-
-       clk_init();
-}
-#endif
-
-/* array of all spear 310 clock lookups */
-#ifdef CONFIG_MACH_SPEAR310
-static struct clk_lookup spear310_clk_lookups[] = {
-       CLKDEV_INIT("44000000.flash", NULL, &fsmc_clk),
-       CLKDEV_INIT(NULL, "emi", &emi_clk),
-       CLKDEV_INIT("b2000000.serial", NULL, &uart1_clk),
-       CLKDEV_INIT("b2080000.serial", NULL, &uart2_clk),
-       CLKDEV_INIT("b2100000.serial", NULL, &uart3_clk),
-       CLKDEV_INIT("b2180000.serial", NULL, &uart4_clk),
-       CLKDEV_INIT("b2200000.serial", NULL, &uart5_clk),
-};
-
-void __init spear310_clk_init(void)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(spear_clk_lookups); i++)
-               clk_register(&spear_clk_lookups[i]);
-
-       for (i = 0; i < ARRAY_SIZE(spear310_clk_lookups); i++)
-               clk_register(&spear310_clk_lookups[i]);
-
-       clk_init();
-}
-#endif
-
-/* array of all spear 320 clock lookups */
-#ifdef CONFIG_MACH_SPEAR320
-static struct clk_lookup spear320_clk_lookups[] = {
-       CLKDEV_INIT("90000000.clcd", NULL, &clcd_clk),
-       CLKDEV_INIT("4c000000.flash", NULL, &fsmc_clk),
-       CLKDEV_INIT("a7000000.i2c", NULL, &i2c1_clk),
-       CLKDEV_INIT(NULL, "emi", &emi_clk),
-       CLKDEV_INIT("pwm", NULL, &pwm_clk),
-       CLKDEV_INIT("70000000.sdhci", NULL, &sdhci_clk),
-       CLKDEV_INIT("c_can_platform.0", NULL, &can0_clk),
-       CLKDEV_INIT("c_can_platform.1", NULL, &can1_clk),
-       CLKDEV_INIT("a5000000.spi", NULL, &ssp1_clk),
-       CLKDEV_INIT("a6000000.spi", NULL, &ssp2_clk),
-       CLKDEV_INIT("a3000000.serial", NULL, &uart1_clk),
-       CLKDEV_INIT("a4000000.serial", NULL, &uart2_clk),
-};
-
-void __init spear320_clk_init(void)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(spear_clk_lookups); i++)
-               clk_register(&spear_clk_lookups[i]);
-
-       for (i = 0; i < ARRAY_SIZE(spear320_clk_lookups); i++)
-               clk_register(&spear320_clk_lookups[i]);
-
-       clk_init();
-}
-#endif
index bdb304551cafe3a90679bb3a9e8ca5e6ce54c713..4a95b9453c2a5ce67bb3f636c016d9c6c4e246bc 100644 (file)
@@ -27,28 +27,11 @@ extern struct pl022_ssp_controller pl022_plat_data;
 extern struct pl08x_platform_data pl080_plat_data;
 
 /* Add spear3xx family function declarations here */
-void __init spear_setup_timer(resource_size_t base, int irq);
+void __init spear_setup_of_timer(void);
+void __init spear3xx_clk_init(void);
 void __init spear3xx_map_io(void);
 void __init spear3xx_dt_init_irq(void);
 
 void spear_restart(char, const char *);
 
-/* spear300 declarations */
-#ifdef CONFIG_MACH_SPEAR300
-void __init spear300_clk_init(void);
-
-#endif /* CONFIG_MACH_SPEAR300 */
-
-/* spear310 declarations */
-#ifdef CONFIG_MACH_SPEAR310
-void __init spear310_clk_init(void);
-
-#endif /* CONFIG_MACH_SPEAR310 */
-
-/* spear320 declarations */
-#ifdef CONFIG_MACH_SPEAR320
-void __init spear320_clk_init(void);
-
-#endif /* CONFIG_MACH_SPEAR320 */
-
 #endif /* __MACH_GENERIC_H */
index 319620a1afb4ea517654c7861caaac8668566bc0..51bd62a0254c814acca905b44696e834700c9cb2 100644 (file)
@@ -16,7 +16,6 @@
 
 /* FIXME: probe all these from DT */
 #define SPEAR3XX_IRQ_INTRCOMM_RAS_ARM          1
-#define SPEAR3XX_IRQ_CPU_GPT1_1                        2
 #define SPEAR3XX_IRQ_GEN_RAS_1                 28
 #define SPEAR3XX_IRQ_GEN_RAS_2                 29
 #define SPEAR3XX_IRQ_GEN_RAS_3                 30
index e0ab72e61507d27254bd016703106a54544536f7..18e2ac576f25cc209880e4f239f40b92fcadee89 100644 (file)
@@ -14,6 +14,8 @@
 #ifndef __MACH_MISC_REGS_H
 #define __MACH_MISC_REGS_H
 
+#include <mach/spear.h>
+
 #define MISC_BASE              IOMEM(VA_SPEAR3XX_ICM3_MISC_REG_BASE)
 #define DMA_CHN_CFG            (MISC_BASE + 0x0A0)
 
index 6d4dadc67633bd119602839d5ac502de0afab79c..51eb953148a998733f656670cb24705b03f0fbc9 100644 (file)
@@ -26,7 +26,6 @@
 /* ML1 - Multi Layer CPU Subsystem */
 #define SPEAR3XX_ICM3_ML1_2_BASE       UL(0xF0000000)
 #define VA_SPEAR6XX_ML_CPU_BASE                UL(0xF0000000)
-#define SPEAR3XX_CPU_TMR_BASE          UL(0xF0000000)
 
 /* ICM3 - Basic Subsystem */
 #define SPEAR3XX_ICM3_SMI_CTRL_BASE    UL(0xFC000000)
 #define SPEAR_SYS_CTRL_BASE            SPEAR3XX_ICM3_SYS_CTRL_BASE
 #define VA_SPEAR_SYS_CTRL_BASE         VA_SPEAR3XX_ICM3_SYS_CTRL_BASE
 
+/* SPEAr320 Macros */
+#define SPEAR320_SOC_CONFIG_BASE       UL(0xB3000000)
+#define VA_SPEAR320_SOC_CONFIG_BASE    UL(0xFE000000)
+#define SPEAR320_CONTROL_REG           IOMEM(VA_SPEAR320_SOC_CONFIG_BASE)
+#define SPEAR320_EXT_CTRL_REG          IOMEM(VA_SPEAR320_SOC_CONFIG_BASE + 0x0018)
+       #define SPEAR320_UARTX_PCLK_MASK                0x1
+       #define SPEAR320_UART2_PCLK_SHIFT               8
+       #define SPEAR320_UART3_PCLK_SHIFT               9
+       #define SPEAR320_UART4_PCLK_SHIFT               10
+       #define SPEAR320_UART5_PCLK_SHIFT               11
+       #define SPEAR320_UART6_PCLK_SHIFT               12
+       #define SPEAR320_RS485_PCLK_SHIFT               13
+
 #endif /* __MACH_SPEAR3XX_H */
index f75fe25a620ce5256222bb873b9701f41406eb50..f74a05bdb829d5edb37c76bbb6395d39d661a431 100644 (file)
@@ -337,7 +337,6 @@ static const char * const spear300_dt_board_compat[] = {
 static void __init spear300_map_io(void)
 {
        spear3xx_map_io();
-       spear300_clk_init();
 }
 
 DT_MACHINE_START(SPEAR300_DT, "ST SPEAr300 SoC with Flattened Device Tree")
index f0842a58dc024c585dda40ec4aab283d61983046..84dfb09007470062cc6f14633c1d455e98562ca0 100644 (file)
@@ -478,7 +478,6 @@ static const char * const spear310_dt_board_compat[] = {
 static void __init spear310_map_io(void)
 {
        spear3xx_map_io();
-       spear310_clk_init();
 }
 
 DT_MACHINE_START(SPEAR310_DT, "ST SPEAr310 SoC with Flattened Device Tree")
index e8caeef50a5ccec4018dbe5d68930855b5f7cf13..a88fa841d29d021068d644a16d7c60eb7d7df93f 100644 (file)
@@ -27,7 +27,6 @@
 #define SPEAR320_UART2_BASE            UL(0xA4000000)
 #define SPEAR320_SSP0_BASE             UL(0xA5000000)
 #define SPEAR320_SSP1_BASE             UL(0xA6000000)
-#define SPEAR320_SOC_CONFIG_BASE       UL(0xB3000000)
 
 /* Interrupt registers offsets and masks */
 #define SPEAR320_INT_STS_MASK_REG              0x04
@@ -481,10 +480,19 @@ static const char * const spear320_dt_board_compat[] = {
        NULL,
 };
 
+struct map_desc spear320_io_desc[] __initdata = {
+       {
+               .virtual        = VA_SPEAR320_SOC_CONFIG_BASE,
+               .pfn            = __phys_to_pfn(SPEAR320_SOC_CONFIG_BASE),
+               .length         = SZ_16M,
+               .type           = MT_DEVICE
+       },
+};
+
 static void __init spear320_map_io(void)
 {
+       iotable_init(spear320_io_desc, ARRAY_SIZE(spear320_io_desc));
        spear3xx_map_io();
-       spear320_clk_init();
 }
 
 DT_MACHINE_START(SPEAR320_DT, "ST SPEAr320 SoC with Flattened Device Tree")
index 826ac20ef1e7a3e5202fded05a8e5653c5b439b9..f22419ed74a82cf394c69d0e5d51d2e53fad447d 100644 (file)
@@ -90,6 +90,8 @@ static void __init spear3xx_timer_init(void)
        char pclk_name[] = "pll3_48m_clk";
        struct clk *gpt_clk, *pclk;
 
+       spear3xx_clk_init();
+
        /* get the system timer clock */
        gpt_clk = clk_get_sys("gpt0", NULL);
        if (IS_ERR(gpt_clk)) {
@@ -109,7 +111,7 @@ static void __init spear3xx_timer_init(void)
        clk_put(gpt_clk);
        clk_put(pclk);
 
-       spear_setup_timer(SPEAR3XX_CPU_TMR_BASE, SPEAR3XX_IRQ_CPU_GPT1_1);
+       spear_setup_of_timer();
 }
 
 struct sys_timer spear3xx_timer = {
index 76e5750552fcef01705786da3c0db2f7e2088151..898831d93f370c2014f830d4d16aa398408fb2d0 100644 (file)
@@ -3,4 +3,4 @@
 #
 
 # common files
-obj-y  += clock.o spear6xx.o
+obj-y  += spear6xx.o
diff --git a/arch/arm/mach-spear6xx/clock.c b/arch/arm/mach-spear6xx/clock.c
deleted file mode 100644 (file)
index bef77d4..0000000
+++ /dev/null
@@ -1,789 +0,0 @@
-/*
- * arch/arm/mach-spear6xx/clock.c
- *
- * SPEAr6xx machines clock framework source file
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <plat/clock.h>
-#include <mach/misc_regs.h>
-#include <mach/spear.h>
-
-#define PLL1_CTR               (MISC_BASE + 0x008)
-#define PLL1_FRQ               (MISC_BASE + 0x00C)
-#define PLL1_MOD               (MISC_BASE + 0x010)
-#define PLL2_CTR               (MISC_BASE + 0x014)
-/* PLL_CTR register masks */
-#define PLL_ENABLE             2
-#define PLL_MODE_SHIFT         4
-#define PLL_MODE_MASK          0x3
-#define PLL_MODE_NORMAL                0
-#define PLL_MODE_FRACTION      1
-#define PLL_MODE_DITH_DSB      2
-#define PLL_MODE_DITH_SSB      3
-
-#define PLL2_FRQ               (MISC_BASE + 0x018)
-/* PLL FRQ register masks */
-#define PLL_DIV_N_SHIFT                0
-#define PLL_DIV_N_MASK         0xFF
-#define PLL_DIV_P_SHIFT                8
-#define PLL_DIV_P_MASK         0x7
-#define PLL_NORM_FDBK_M_SHIFT  24
-#define PLL_NORM_FDBK_M_MASK   0xFF
-#define PLL_DITH_FDBK_M_SHIFT  16
-#define PLL_DITH_FDBK_M_MASK   0xFFFF
-
-#define PLL2_MOD               (MISC_BASE + 0x01C)
-#define PLL_CLK_CFG            (MISC_BASE + 0x020)
-#define CORE_CLK_CFG           (MISC_BASE + 0x024)
-/* CORE CLK CFG register masks */
-#define PLL_HCLK_RATIO_SHIFT   10
-#define PLL_HCLK_RATIO_MASK    0x3
-#define HCLK_PCLK_RATIO_SHIFT  8
-#define HCLK_PCLK_RATIO_MASK   0x3
-
-#define PERIP_CLK_CFG          (MISC_BASE + 0x028)
-/* PERIP_CLK_CFG register masks */
-#define CLCD_CLK_SHIFT         2
-#define CLCD_CLK_MASK          0x3
-#define UART_CLK_SHIFT         4
-#define UART_CLK_MASK          0x1
-#define FIRDA_CLK_SHIFT                5
-#define FIRDA_CLK_MASK         0x3
-#define GPT0_CLK_SHIFT         8
-#define GPT1_CLK_SHIFT         10
-#define GPT2_CLK_SHIFT         11
-#define GPT3_CLK_SHIFT         12
-#define GPT_CLK_MASK           0x1
-#define AUX_CLK_PLL3_VAL       0
-#define AUX_CLK_PLL1_VAL       1
-
-#define PERIP1_CLK_ENB         (MISC_BASE + 0x02C)
-/* PERIP1_CLK_ENB register masks */
-#define UART0_CLK_ENB          3
-#define UART1_CLK_ENB          4
-#define SSP0_CLK_ENB           5
-#define SSP1_CLK_ENB           6
-#define I2C_CLK_ENB            7
-#define JPEG_CLK_ENB           8
-#define FSMC_CLK_ENB           9
-#define FIRDA_CLK_ENB          10
-#define GPT2_CLK_ENB           11
-#define GPT3_CLK_ENB           12
-#define GPIO2_CLK_ENB          13
-#define SSP2_CLK_ENB           14
-#define ADC_CLK_ENB            15
-#define GPT1_CLK_ENB           11
-#define RTC_CLK_ENB            17
-#define GPIO1_CLK_ENB          18
-#define DMA_CLK_ENB            19
-#define SMI_CLK_ENB            21
-#define CLCD_CLK_ENB           22
-#define GMAC_CLK_ENB           23
-#define USBD_CLK_ENB           24
-#define USBH0_CLK_ENB          25
-#define USBH1_CLK_ENB          26
-
-#define PRSC1_CLK_CFG          (MISC_BASE + 0x044)
-#define PRSC2_CLK_CFG          (MISC_BASE + 0x048)
-#define PRSC3_CLK_CFG          (MISC_BASE + 0x04C)
-/* gpt synthesizer register masks */
-#define GPT_MSCALE_SHIFT       0
-#define GPT_MSCALE_MASK                0xFFF
-#define GPT_NSCALE_SHIFT       12
-#define GPT_NSCALE_MASK                0xF
-
-#define AMEM_CLK_CFG           (MISC_BASE + 0x050)
-#define EXPI_CLK_CFG           (MISC_BASE + 0x054)
-#define CLCD_CLK_SYNT          (MISC_BASE + 0x05C)
-#define FIRDA_CLK_SYNT         (MISC_BASE + 0x060)
-#define UART_CLK_SYNT          (MISC_BASE + 0x064)
-#define GMAC_CLK_SYNT          (MISC_BASE + 0x068)
-#define RAS1_CLK_SYNT          (MISC_BASE + 0x06C)
-#define RAS2_CLK_SYNT          (MISC_BASE + 0x070)
-#define RAS3_CLK_SYNT          (MISC_BASE + 0x074)
-#define RAS4_CLK_SYNT          (MISC_BASE + 0x078)
-/* aux clk synthesiser register masks for irda to ras4 */
-#define AUX_SYNT_ENB           31
-#define AUX_EQ_SEL_SHIFT       30
-#define AUX_EQ_SEL_MASK                1
-#define AUX_EQ1_SEL            0
-#define AUX_EQ2_SEL            1
-#define AUX_XSCALE_SHIFT       16
-#define AUX_XSCALE_MASK                0xFFF
-#define AUX_YSCALE_SHIFT       0
-#define AUX_YSCALE_MASK                0xFFF
-
-/* root clks */
-/* 32 KHz oscillator clock */
-static struct clk osc_32k_clk = {
-       .flags = ALWAYS_ENABLED,
-       .rate = 32000,
-};
-
-/* 30 MHz oscillator clock */
-static struct clk osc_30m_clk = {
-       .flags = ALWAYS_ENABLED,
-       .rate = 30000000,
-};
-
-/* clock derived from 32 KHz osc clk */
-/* rtc clock */
-static struct clk rtc_clk = {
-       .pclk = &osc_32k_clk,
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = RTC_CLK_ENB,
-       .recalc = &follow_parent,
-};
-
-/* clock derived from 30 MHz osc clk */
-/* pll masks structure */
-static struct pll_clk_masks pll1_masks = {
-       .mode_mask = PLL_MODE_MASK,
-       .mode_shift = PLL_MODE_SHIFT,
-       .norm_fdbk_m_mask = PLL_NORM_FDBK_M_MASK,
-       .norm_fdbk_m_shift = PLL_NORM_FDBK_M_SHIFT,
-       .dith_fdbk_m_mask = PLL_DITH_FDBK_M_MASK,
-       .dith_fdbk_m_shift = PLL_DITH_FDBK_M_SHIFT,
-       .div_p_mask = PLL_DIV_P_MASK,
-       .div_p_shift = PLL_DIV_P_SHIFT,
-       .div_n_mask = PLL_DIV_N_MASK,
-       .div_n_shift = PLL_DIV_N_SHIFT,
-};
-
-/* pll1 configuration structure */
-static struct pll_clk_config pll1_config = {
-       .mode_reg = PLL1_CTR,
-       .cfg_reg = PLL1_FRQ,
-       .masks = &pll1_masks,
-};
-
-/* pll rate configuration table, in ascending order of rates */
-struct pll_rate_tbl pll_rtbl[] = {
-       {.mode = 0, .m = 0x85, .n = 0x0C, .p = 0x1}, /* 266 MHz */
-       {.mode = 0, .m = 0xA6, .n = 0x0C, .p = 0x1}, /* 332 MHz */
-};
-
-/* PLL1 clock */
-static struct clk pll1_clk = {
-       .flags = ENABLED_ON_INIT,
-       .pclk = &osc_30m_clk,
-       .en_reg = PLL1_CTR,
-       .en_reg_bit = PLL_ENABLE,
-       .calc_rate = &pll_calc_rate,
-       .recalc = &pll_clk_recalc,
-       .set_rate = &pll_clk_set_rate,
-       .rate_config = {pll_rtbl, ARRAY_SIZE(pll_rtbl), 1},
-       .private_data = &pll1_config,
-};
-
-/* PLL3 48 MHz clock */
-static struct clk pll3_48m_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &osc_30m_clk,
-       .rate = 48000000,
-};
-
-/* watch dog timer clock */
-static struct clk wdt_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &osc_30m_clk,
-       .recalc = &follow_parent,
-};
-
-/* clock derived from pll1 clk */
-/* cpu clock */
-static struct clk cpu_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &pll1_clk,
-       .recalc = &follow_parent,
-};
-
-/* ahb masks structure */
-static struct bus_clk_masks ahb_masks = {
-       .mask = PLL_HCLK_RATIO_MASK,
-       .shift = PLL_HCLK_RATIO_SHIFT,
-};
-
-/* ahb configuration structure */
-static struct bus_clk_config ahb_config = {
-       .reg = CORE_CLK_CFG,
-       .masks = &ahb_masks,
-};
-
-/* ahb rate configuration table, in ascending order of rates */
-struct bus_rate_tbl bus_rtbl[] = {
-       {.div = 3}, /* == parent divided by 4 */
-       {.div = 2}, /* == parent divided by 3 */
-       {.div = 1}, /* == parent divided by 2 */
-       {.div = 0}, /* == parent divided by 1 */
-};
-
-/* ahb clock */
-static struct clk ahb_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &pll1_clk,
-       .calc_rate = &bus_calc_rate,
-       .recalc = &bus_clk_recalc,
-       .set_rate = &bus_clk_set_rate,
-       .rate_config = {bus_rtbl, ARRAY_SIZE(bus_rtbl), 2},
-       .private_data = &ahb_config,
-};
-
-/* auxiliary synthesizers masks */
-static struct aux_clk_masks aux_masks = {
-       .eq_sel_mask = AUX_EQ_SEL_MASK,
-       .eq_sel_shift = AUX_EQ_SEL_SHIFT,
-       .eq1_mask = AUX_EQ1_SEL,
-       .eq2_mask = AUX_EQ2_SEL,
-       .xscale_sel_mask = AUX_XSCALE_MASK,
-       .xscale_sel_shift = AUX_XSCALE_SHIFT,
-       .yscale_sel_mask = AUX_YSCALE_MASK,
-       .yscale_sel_shift = AUX_YSCALE_SHIFT,
-};
-
-/* uart configurations */
-static struct aux_clk_config uart_synth_config = {
-       .synth_reg = UART_CLK_SYNT,
-       .masks = &aux_masks,
-};
-
-/* aux rate configuration table, in ascending order of rates */
-struct aux_rate_tbl aux_rtbl[] = {
-       /* For PLL1 = 332 MHz */
-       {.xscale = 1, .yscale = 8, .eq = 1}, /* 41.5 MHz */
-       {.xscale = 1, .yscale = 4, .eq = 1}, /* 83 MHz */
-       {.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
-};
-
-/* uart synth clock */
-static struct clk uart_synth_clk = {
-       .en_reg = UART_CLK_SYNT,
-       .en_reg_bit = AUX_SYNT_ENB,
-       .pclk = &pll1_clk,
-       .calc_rate = &aux_calc_rate,
-       .recalc = &aux_clk_recalc,
-       .set_rate = &aux_clk_set_rate,
-       .rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 2},
-       .private_data = &uart_synth_config,
-};
-
-/* uart parents */
-static struct pclk_info uart_pclk_info[] = {
-       {
-               .pclk = &uart_synth_clk,
-               .pclk_val = AUX_CLK_PLL1_VAL,
-       }, {
-               .pclk = &pll3_48m_clk,
-               .pclk_val = AUX_CLK_PLL3_VAL,
-       },
-};
-
-/* uart parent select structure */
-static struct pclk_sel uart_pclk_sel = {
-       .pclk_info = uart_pclk_info,
-       .pclk_count = ARRAY_SIZE(uart_pclk_info),
-       .pclk_sel_reg = PERIP_CLK_CFG,
-       .pclk_sel_mask = UART_CLK_MASK,
-};
-
-/* uart0 clock */
-static struct clk uart0_clk = {
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = UART0_CLK_ENB,
-       .pclk_sel = &uart_pclk_sel,
-       .pclk_sel_shift = UART_CLK_SHIFT,
-       .recalc = &follow_parent,
-};
-
-/* uart1 clock */
-static struct clk uart1_clk = {
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = UART1_CLK_ENB,
-       .pclk_sel = &uart_pclk_sel,
-       .pclk_sel_shift = UART_CLK_SHIFT,
-       .recalc = &follow_parent,
-};
-
-/* firda configurations */
-static struct aux_clk_config firda_synth_config = {
-       .synth_reg = FIRDA_CLK_SYNT,
-       .masks = &aux_masks,
-};
-
-/* firda synth clock */
-static struct clk firda_synth_clk = {
-       .en_reg = FIRDA_CLK_SYNT,
-       .en_reg_bit = AUX_SYNT_ENB,
-       .pclk = &pll1_clk,
-       .calc_rate = &aux_calc_rate,
-       .recalc = &aux_clk_recalc,
-       .set_rate = &aux_clk_set_rate,
-       .rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 2},
-       .private_data = &firda_synth_config,
-};
-
-/* firda parents */
-static struct pclk_info firda_pclk_info[] = {
-       {
-               .pclk = &firda_synth_clk,
-               .pclk_val = AUX_CLK_PLL1_VAL,
-       }, {
-               .pclk = &pll3_48m_clk,
-               .pclk_val = AUX_CLK_PLL3_VAL,
-       },
-};
-
-/* firda parent select structure */
-static struct pclk_sel firda_pclk_sel = {
-       .pclk_info = firda_pclk_info,
-       .pclk_count = ARRAY_SIZE(firda_pclk_info),
-       .pclk_sel_reg = PERIP_CLK_CFG,
-       .pclk_sel_mask = FIRDA_CLK_MASK,
-};
-
-/* firda clock */
-static struct clk firda_clk = {
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = FIRDA_CLK_ENB,
-       .pclk_sel = &firda_pclk_sel,
-       .pclk_sel_shift = FIRDA_CLK_SHIFT,
-       .recalc = &follow_parent,
-};
-
-/* clcd configurations */
-static struct aux_clk_config clcd_synth_config = {
-       .synth_reg = CLCD_CLK_SYNT,
-       .masks = &aux_masks,
-};
-
-/* firda synth clock */
-static struct clk clcd_synth_clk = {
-       .en_reg = CLCD_CLK_SYNT,
-       .en_reg_bit = AUX_SYNT_ENB,
-       .pclk = &pll1_clk,
-       .calc_rate = &aux_calc_rate,
-       .recalc = &aux_clk_recalc,
-       .set_rate = &aux_clk_set_rate,
-       .rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 2},
-       .private_data = &clcd_synth_config,
-};
-
-/* clcd parents */
-static struct pclk_info clcd_pclk_info[] = {
-       {
-               .pclk = &clcd_synth_clk,
-               .pclk_val = AUX_CLK_PLL1_VAL,
-       }, {
-               .pclk = &pll3_48m_clk,
-               .pclk_val = AUX_CLK_PLL3_VAL,
-       },
-};
-
-/* clcd parent select structure */
-static struct pclk_sel clcd_pclk_sel = {
-       .pclk_info = clcd_pclk_info,
-       .pclk_count = ARRAY_SIZE(clcd_pclk_info),
-       .pclk_sel_reg = PERIP_CLK_CFG,
-       .pclk_sel_mask = CLCD_CLK_MASK,
-};
-
-/* clcd clock */
-static struct clk clcd_clk = {
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = CLCD_CLK_ENB,
-       .pclk_sel = &clcd_pclk_sel,
-       .pclk_sel_shift = CLCD_CLK_SHIFT,
-       .recalc = &follow_parent,
-};
-
-/* gpt synthesizer masks */
-static struct gpt_clk_masks gpt_masks = {
-       .mscale_sel_mask = GPT_MSCALE_MASK,
-       .mscale_sel_shift = GPT_MSCALE_SHIFT,
-       .nscale_sel_mask = GPT_NSCALE_MASK,
-       .nscale_sel_shift = GPT_NSCALE_SHIFT,
-};
-
-/* gpt rate configuration table, in ascending order of rates */
-struct gpt_rate_tbl gpt_rtbl[] = {
-       /* For pll1 = 332 MHz */
-       {.mscale = 4, .nscale = 0}, /* 41.5 MHz */
-       {.mscale = 2, .nscale = 0}, /* 55.3 MHz */
-       {.mscale = 1, .nscale = 0}, /* 83 MHz */
-};
-
-/* gpt0 synth clk config*/
-static struct gpt_clk_config gpt0_synth_config = {
-       .synth_reg = PRSC1_CLK_CFG,
-       .masks = &gpt_masks,
-};
-
-/* gpt synth clock */
-static struct clk gpt0_synth_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &pll1_clk,
-       .calc_rate = &gpt_calc_rate,
-       .recalc = &gpt_clk_recalc,
-       .set_rate = &gpt_clk_set_rate,
-       .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
-       .private_data = &gpt0_synth_config,
-};
-
-/* gpt parents */
-static struct pclk_info gpt0_pclk_info[] = {
-       {
-               .pclk = &gpt0_synth_clk,
-               .pclk_val = AUX_CLK_PLL1_VAL,
-       }, {
-               .pclk = &pll3_48m_clk,
-               .pclk_val = AUX_CLK_PLL3_VAL,
-       },
-};
-
-/* gpt parent select structure */
-static struct pclk_sel gpt0_pclk_sel = {
-       .pclk_info = gpt0_pclk_info,
-       .pclk_count = ARRAY_SIZE(gpt0_pclk_info),
-       .pclk_sel_reg = PERIP_CLK_CFG,
-       .pclk_sel_mask = GPT_CLK_MASK,
-};
-
-/* gpt0 ARM1 subsystem timer clock */
-static struct clk gpt0_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk_sel = &gpt0_pclk_sel,
-       .pclk_sel_shift = GPT0_CLK_SHIFT,
-       .recalc = &follow_parent,
-};
-
-
-/* Note: gpt0 and gpt1 share same parent clocks */
-/* gpt parent select structure */
-static struct pclk_sel gpt1_pclk_sel = {
-       .pclk_info = gpt0_pclk_info,
-       .pclk_count = ARRAY_SIZE(gpt0_pclk_info),
-       .pclk_sel_reg = PERIP_CLK_CFG,
-       .pclk_sel_mask = GPT_CLK_MASK,
-};
-
-/* gpt1 timer clock */
-static struct clk gpt1_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk_sel = &gpt1_pclk_sel,
-       .pclk_sel_shift = GPT1_CLK_SHIFT,
-       .recalc = &follow_parent,
-};
-
-/* gpt2 synth clk config*/
-static struct gpt_clk_config gpt2_synth_config = {
-       .synth_reg = PRSC2_CLK_CFG,
-       .masks = &gpt_masks,
-};
-
-/* gpt synth clock */
-static struct clk gpt2_synth_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &pll1_clk,
-       .calc_rate = &gpt_calc_rate,
-       .recalc = &gpt_clk_recalc,
-       .set_rate = &gpt_clk_set_rate,
-       .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
-       .private_data = &gpt2_synth_config,
-};
-
-/* gpt parents */
-static struct pclk_info gpt2_pclk_info[] = {
-       {
-               .pclk = &gpt2_synth_clk,
-               .pclk_val = AUX_CLK_PLL1_VAL,
-       }, {
-               .pclk = &pll3_48m_clk,
-               .pclk_val = AUX_CLK_PLL3_VAL,
-       },
-};
-
-/* gpt parent select structure */
-static struct pclk_sel gpt2_pclk_sel = {
-       .pclk_info = gpt2_pclk_info,
-       .pclk_count = ARRAY_SIZE(gpt2_pclk_info),
-       .pclk_sel_reg = PERIP_CLK_CFG,
-       .pclk_sel_mask = GPT_CLK_MASK,
-};
-
-/* gpt2 timer clock */
-static struct clk gpt2_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk_sel = &gpt2_pclk_sel,
-       .pclk_sel_shift = GPT2_CLK_SHIFT,
-       .recalc = &follow_parent,
-};
-
-/* gpt3 synth clk config*/
-static struct gpt_clk_config gpt3_synth_config = {
-       .synth_reg = PRSC3_CLK_CFG,
-       .masks = &gpt_masks,
-};
-
-/* gpt synth clock */
-static struct clk gpt3_synth_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &pll1_clk,
-       .calc_rate = &gpt_calc_rate,
-       .recalc = &gpt_clk_recalc,
-       .set_rate = &gpt_clk_set_rate,
-       .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
-       .private_data = &gpt3_synth_config,
-};
-
-/* gpt parents */
-static struct pclk_info gpt3_pclk_info[] = {
-       {
-               .pclk = &gpt3_synth_clk,
-               .pclk_val = AUX_CLK_PLL1_VAL,
-       }, {
-               .pclk = &pll3_48m_clk,
-               .pclk_val = AUX_CLK_PLL3_VAL,
-       },
-};
-
-/* gpt parent select structure */
-static struct pclk_sel gpt3_pclk_sel = {
-       .pclk_info = gpt3_pclk_info,
-       .pclk_count = ARRAY_SIZE(gpt3_pclk_info),
-       .pclk_sel_reg = PERIP_CLK_CFG,
-       .pclk_sel_mask = GPT_CLK_MASK,
-};
-
-/* gpt3 timer clock */
-static struct clk gpt3_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk_sel = &gpt3_pclk_sel,
-       .pclk_sel_shift = GPT3_CLK_SHIFT,
-       .recalc = &follow_parent,
-};
-
-/* clock derived from pll3 clk */
-/* usbh0 clock */
-static struct clk usbh0_clk = {
-       .pclk = &pll3_48m_clk,
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = USBH0_CLK_ENB,
-       .recalc = &follow_parent,
-};
-
-/* usbh1 clock */
-static struct clk usbh1_clk = {
-       .pclk = &pll3_48m_clk,
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = USBH1_CLK_ENB,
-       .recalc = &follow_parent,
-};
-
-/* usbd clock */
-static struct clk usbd_clk = {
-       .pclk = &pll3_48m_clk,
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = USBD_CLK_ENB,
-       .recalc = &follow_parent,
-};
-
-/* clock derived from ahb clk */
-/* apb masks structure */
-static struct bus_clk_masks apb_masks = {
-       .mask = HCLK_PCLK_RATIO_MASK,
-       .shift = HCLK_PCLK_RATIO_SHIFT,
-};
-
-/* apb configuration structure */
-static struct bus_clk_config apb_config = {
-       .reg = CORE_CLK_CFG,
-       .masks = &apb_masks,
-};
-
-/* apb clock */
-static struct clk apb_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &ahb_clk,
-       .calc_rate = &bus_calc_rate,
-       .recalc = &bus_clk_recalc,
-       .set_rate = &bus_clk_set_rate,
-       .rate_config = {bus_rtbl, ARRAY_SIZE(bus_rtbl), 2},
-       .private_data = &apb_config,
-};
-
-/* i2c clock */
-static struct clk i2c_clk = {
-       .pclk = &ahb_clk,
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = I2C_CLK_ENB,
-       .recalc = &follow_parent,
-};
-
-/* dma clock */
-static struct clk dma_clk = {
-       .pclk = &ahb_clk,
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = DMA_CLK_ENB,
-       .recalc = &follow_parent,
-};
-
-/* jpeg clock */
-static struct clk jpeg_clk = {
-       .pclk = &ahb_clk,
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = JPEG_CLK_ENB,
-       .recalc = &follow_parent,
-};
-
-/* gmac clock */
-static struct clk gmac_clk = {
-       .pclk = &ahb_clk,
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = GMAC_CLK_ENB,
-       .recalc = &follow_parent,
-};
-
-/* smi clock */
-static struct clk smi_clk = {
-       .pclk = &ahb_clk,
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = SMI_CLK_ENB,
-       .recalc = &follow_parent,
-};
-
-/* fsmc clock */
-static struct clk fsmc_clk = {
-       .pclk = &ahb_clk,
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = FSMC_CLK_ENB,
-       .recalc = &follow_parent,
-};
-
-/* clock derived from apb clk */
-/* adc clock */
-static struct clk adc_clk = {
-       .pclk = &apb_clk,
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = ADC_CLK_ENB,
-       .recalc = &follow_parent,
-};
-
-/* ssp0 clock */
-static struct clk ssp0_clk = {
-       .pclk = &apb_clk,
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = SSP0_CLK_ENB,
-       .recalc = &follow_parent,
-};
-
-/* ssp1 clock */
-static struct clk ssp1_clk = {
-       .pclk = &apb_clk,
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = SSP1_CLK_ENB,
-       .recalc = &follow_parent,
-};
-
-/* ssp2 clock */
-static struct clk ssp2_clk = {
-       .pclk = &apb_clk,
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = SSP2_CLK_ENB,
-       .recalc = &follow_parent,
-};
-
-/* gpio0 ARM subsystem clock */
-static struct clk gpio0_clk = {
-       .flags = ALWAYS_ENABLED,
-       .pclk = &apb_clk,
-       .recalc = &follow_parent,
-};
-
-/* gpio1 clock */
-static struct clk gpio1_clk = {
-       .pclk = &apb_clk,
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = GPIO1_CLK_ENB,
-       .recalc = &follow_parent,
-};
-
-/* gpio2 clock */
-static struct clk gpio2_clk = {
-       .pclk = &apb_clk,
-       .en_reg = PERIP1_CLK_ENB,
-       .en_reg_bit = GPIO2_CLK_ENB,
-       .recalc = &follow_parent,
-};
-
-static struct clk dummy_apb_pclk;
-
-/* array of all spear 6xx clock lookups */
-static struct clk_lookup spear_clk_lookups[] = {
-       CLKDEV_INIT(NULL, "apb_pclk", &dummy_apb_pclk),
-       /* root clks */
-       CLKDEV_INIT(NULL, "osc_32k_clk", &osc_32k_clk),
-       CLKDEV_INIT(NULL, "osc_30m_clk", &osc_30m_clk),
-       /* clock derived from 32 KHz os          clk */
-       CLKDEV_INIT("rtc-spear", NULL, &rtc_clk),
-       /* clock derived from 30 MHz os          clk */
-       CLKDEV_INIT(NULL, "pll1_clk", &pll1_clk),
-       CLKDEV_INIT(NULL, "pll3_48m_clk", &pll3_48m_clk),
-       CLKDEV_INIT("wdt", NULL, &wdt_clk),
-       /* clock derived from pll1 clk */
-       CLKDEV_INIT(NULL, "cpu_clk", &cpu_clk),
-       CLKDEV_INIT(NULL, "ahb_clk", &ahb_clk),
-       CLKDEV_INIT(NULL, "uart_synth_clk", &uart_synth_clk),
-       CLKDEV_INIT(NULL, "firda_synth_clk", &firda_synth_clk),
-       CLKDEV_INIT(NULL, "clcd_synth_clk", &clcd_synth_clk),
-       CLKDEV_INIT(NULL, "gpt0_synth_clk", &gpt0_synth_clk),
-       CLKDEV_INIT(NULL, "gpt2_synth_clk", &gpt2_synth_clk),
-       CLKDEV_INIT(NULL, "gpt3_synth_clk", &gpt3_synth_clk),
-       CLKDEV_INIT("d0000000.serial", NULL, &uart0_clk),
-       CLKDEV_INIT("d0080000.serial", NULL, &uart1_clk),
-       CLKDEV_INIT("firda", NULL, &firda_clk),
-       CLKDEV_INIT("clcd", NULL, &clcd_clk),
-       CLKDEV_INIT("gpt0", NULL, &gpt0_clk),
-       CLKDEV_INIT("gpt1", NULL, &gpt1_clk),
-       CLKDEV_INIT("gpt2", NULL, &gpt2_clk),
-       CLKDEV_INIT("gpt3", NULL, &gpt3_clk),
-       /* clock derived from pll3 clk */
-       CLKDEV_INIT("designware_udc", NULL, &usbd_clk),
-       CLKDEV_INIT(NULL, "usbh.0_clk", &usbh0_clk),
-       CLKDEV_INIT(NULL, "usbh.1_clk", &usbh1_clk),
-       /* clock derived from ahb clk */
-       CLKDEV_INIT(NULL, "apb_clk", &apb_clk),
-       CLKDEV_INIT("d0200000.i2c", NULL, &i2c_clk),
-       CLKDEV_INIT("fc400000.dma", NULL, &dma_clk),
-       CLKDEV_INIT("jpeg", NULL, &jpeg_clk),
-       CLKDEV_INIT("gmac", NULL, &gmac_clk),
-       CLKDEV_INIT("fc000000.flash", NULL, &smi_clk),
-       CLKDEV_INIT("d1800000.flash", NULL, &fsmc_clk),
-       /* clock derived from apb clk */
-       CLKDEV_INIT("adc", NULL, &adc_clk),
-       CLKDEV_INIT("ssp-pl022.0", NULL, &ssp0_clk),
-       CLKDEV_INIT("ssp-pl022.1", NULL, &ssp1_clk),
-       CLKDEV_INIT("ssp-pl022.2", NULL, &ssp2_clk),
-       CLKDEV_INIT("f0100000.gpio", NULL, &gpio0_clk),
-       CLKDEV_INIT("fc980000.gpio", NULL, &gpio1_clk),
-       CLKDEV_INIT("d8100000.gpio", NULL, &gpio2_clk),
-};
-
-void __init spear6xx_clk_init(void)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(spear_clk_lookups); i++)
-               clk_register(&spear_clk_lookups[i]);
-
-       clk_init();
-}
index 7167fd331d86e8e6a05546e7d1ecb3dd4774f94f..65514b159370d909a89a2b9fc9d36897cb4f28f1 100644 (file)
@@ -16,7 +16,7 @@
 
 #include <linux/init.h>
 
-void __init spear_setup_timer(resource_size_t base, int irq);
+void __init spear_setup_of_timer(void);
 void spear_restart(char, const char *);
 void __init spear6xx_clk_init(void);
 
index 2b735389e74b341c76bd75f8a140ae8bc7163cf3..37a5c411a8665808fc910acac2811f05486068c6 100644 (file)
@@ -16,9 +16,6 @@
 
 /* IRQ definitions */
 /* VIC 1 */
-/* FIXME: probe this from DT */
-#define IRQ_CPU_GPT1_1                         16
-
 #define IRQ_VIC_END                            64
 
 /* GPIO pins virtual irqs */
index 2b9aaa6cdd113a88dc3548352d53c0559533cdf6..179e45774b3a9722182b02140ff52566e76bdd53 100644 (file)
@@ -14,6 +14,8 @@
 #ifndef __MACH_MISC_REGS_H
 #define __MACH_MISC_REGS_H
 
+#include <mach/spear.h>
+
 #define MISC_BASE              IOMEM(VA_SPEAR6XX_ICM3_MISC_REG_BASE)
 #define DMA_CHN_CFG            (MISC_BASE + 0x0A0)
 
index d278ed047a53024b0da18e0c09ce029034b6e60b..cb8ed2f4dc85fd5db84df13fceb674664304c6cf 100644 (file)
@@ -25,7 +25,6 @@
 /* ML-1, 2 - Multi Layer CPU Subsystem */
 #define SPEAR6XX_ML_CPU_BASE           UL(0xF0000000)
 #define VA_SPEAR6XX_ML_CPU_BASE                UL(0xF0000000)
-#define SPEAR6XX_CPU_TMR_BASE          UL(0xF0000000)
 
 /* ICM3 - Basic Subsystem */
 #define SPEAR6XX_ICM3_SMI_CTRL_BASE    UL(0xFC000000)
index de194dbb83715803887309dad8c2377db2a1a4ca..2e2e3596583e9072318dbf895644c5aa67a90e38 100644 (file)
@@ -419,9 +419,6 @@ struct map_desc spear6xx_io_desc[] __initdata = {
 void __init spear6xx_map_io(void)
 {
        iotable_init(spear6xx_io_desc, ARRAY_SIZE(spear6xx_io_desc));
-
-       /* This will initialize clock framework */
-       spear6xx_clk_init();
 }
 
 static void __init spear6xx_timer_init(void)
@@ -429,6 +426,8 @@ static void __init spear6xx_timer_init(void)
        char pclk_name[] = "pll3_48m_clk";
        struct clk *gpt_clk, *pclk;
 
+       spear6xx_clk_init();
+
        /* get the system timer clock */
        gpt_clk = clk_get_sys("gpt0", NULL);
        if (IS_ERR(gpt_clk)) {
@@ -448,7 +447,7 @@ static void __init spear6xx_timer_init(void)
        clk_put(gpt_clk);
        clk_put(pclk);
 
-       spear_setup_timer(SPEAR6XX_CPU_TMR_BASE, IRQ_CPU_GPT1_1);
+       spear_setup_of_timer();
 }
 
 struct sys_timer spear6xx_timer = {
index d0f2546706ca1b53d6cae8d3eecb84dc89f79a17..6a113a9bb87a73b17368267715aec791127fcd83 100644 (file)
@@ -50,6 +50,14 @@ config TEGRA_PCI
        depends on ARCH_TEGRA_2x_SOC
        select PCI
 
+config TEGRA_AHB
+       bool "Enable AHB driver for NVIDIA Tegra SoCs"
+       default y
+       help
+         Adds AHB configuration functionality for NVIDIA Tegra SoCs,
+         which controls AHB bus master arbitration and some
+         perfomance parameters(priority, prefech size).
+
 comment "Tegra board type"
 
 config MACH_HARMONY
@@ -111,7 +119,7 @@ config MACH_VENTANA
          Support for the nVidia Ventana development platform
 
 choice
-        prompt "Low-level debug console UART"
+        prompt "Default low-level debug console UART"
         default TEGRA_DEBUG_UART_NONE
 
 config TEGRA_DEBUG_UART_NONE
@@ -134,6 +142,33 @@ config TEGRA_DEBUG_UARTE
 
 endchoice
 
+choice
+       prompt "Automatic low-level debug console UART"
+       default TEGRA_DEBUG_UART_AUTO_NONE
+
+config TEGRA_DEBUG_UART_AUTO_NONE
+       bool "None"
+
+config TEGRA_DEBUG_UART_AUTO_ODMDATA
+       bool "Via ODMDATA"
+       help
+         Automatically determines which UART to use for low-level debug based
+         on the ODMDATA value. This value is part of the BCT, and is written
+         to the boot memory device using nvflash, or other flashing tool.
+         When bits 19:18 are 3, then bits 17:15 indicate which UART to use;
+         0/1/2/3/4 are UART A/B/C/D/E.
+
+config TEGRA_DEBUG_UART_AUTO_SCRATCH
+       bool "Via UART scratch register"
+       help
+         Automatically determines which UART to use for low-level debug based
+         on the UART scratch register value. Some bootloaders put ASCII 'D'
+         in this register when they initialize their own console UART output.
+         Using this option allows the kernel to automatically pick the same
+         UART.
+
+endchoice
+
 config TEGRA_SYSTEM_DMA
        bool "Enable system DMA driver for NVIDIA Tegra SoCs"
        default y
index fac3eb1af17e0257c043b9cbb91181b35caf1f84..eb7249db50a59cada041847515313f60cd05eb59 100644 (file)
@@ -110,6 +110,7 @@ DT_MACHINE_START(TEGRA_DT, "nVidia Tegra20 (Flattened Device Tree)")
        .handle_irq     = gic_handle_irq,
        .timer          = &tegra_timer,
        .init_machine   = tegra_dt_init,
+       .init_late      = tegra_init_late,
        .restart        = tegra_assert_system_reset,
        .dt_compat      = tegra20_dt_board_compat,
 MACHINE_END
index 5f7c03e972f3dc2654f82022d81129bb5982b0d7..4f76fa7a5da396860f9bf3a5a4d672c037b52a47 100644 (file)
@@ -51,12 +51,22 @@ struct of_dev_auxdata tegra30_auxdata_lookup[] __initdata = {
        OF_DEV_AUXDATA("nvidia,tegra20-i2c", 0x7000C500, "tegra-i2c.2", NULL),
        OF_DEV_AUXDATA("nvidia,tegra20-i2c", 0x7000C700, "tegra-i2c.3", NULL),
        OF_DEV_AUXDATA("nvidia,tegra20-i2c", 0x7000D000, "tegra-i2c.4", NULL),
+       OF_DEV_AUXDATA("nvidia,tegra30-ahub", 0x70080000, "tegra30-ahub", NULL),
        {}
 };
 
 static __initdata struct tegra_clk_init_table tegra_dt_clk_init_table[] = {
        /* name         parent          rate            enabled */
        { "uarta",      "pll_p",        408000000,      true },
+       { "pll_a",      "pll_p_out1",   564480000,      true },
+       { "pll_a_out0", "pll_a",        11289600,       true },
+       { "extern1",    "pll_a_out0",   0,              true },
+       { "clk_out_1",  "extern1",      0,              true },
+       { "i2s0",       "pll_a_out0",   11289600,       false},
+       { "i2s1",       "pll_a_out0",   11289600,       false},
+       { "i2s2",       "pll_a_out0",   11289600,       false},
+       { "i2s3",       "pll_a_out0",   11289600,       false},
+       { "i2s4",       "pll_a_out0",   11289600,       false},
        { NULL,         NULL,           0,              0},
 };
 
@@ -80,6 +90,7 @@ DT_MACHINE_START(TEGRA30_DT, "NVIDIA Tegra30 (Flattened Device Tree)")
        .handle_irq     = gic_handle_irq,
        .timer          = &tegra_timer,
        .init_machine   = tegra30_dt_init,
+       .init_late      = tegra_init_late,
        .restart        = tegra_assert_system_reset,
        .dt_compat      = tegra30_dt_board_compat,
 MACHINE_END
index b906b3b6077b97a85a0e058605e4a6ccdfabb89a..e65e837f40136bba659b5ce2ae3b640fc70e9a0b 100644 (file)
@@ -192,5 +192,6 @@ MACHINE_START(HARMONY, "harmony")
        .handle_irq     = gic_handle_irq,
        .timer          = &tegra_timer,
        .init_machine   = tegra_harmony_init,
+       .init_late      = tegra_init_late,
        .restart        = tegra_assert_system_reset,
 MACHINE_END
index d0735c70d688e8054fc7aa5e16f1731eb0e00c9b..bbc1907e98a69875144b32e367629a9f8b2739f9 100644 (file)
@@ -162,6 +162,8 @@ static void paz00_i2c_init(void)
 
 static void paz00_usb_init(void)
 {
+       tegra_ehci2_ulpi_phy_config.reset_gpio = TEGRA_ULPI_RST;
+
        platform_device_register(&tegra_ehci2_device);
        platform_device_register(&tegra_ehci3_device);
 }
@@ -179,7 +181,6 @@ static __initdata struct tegra_clk_init_table paz00_clk_init_table[] = {
        { "uarta",      "pll_p",        216000000,      true },
        { "uartc",      "pll_p",        216000000,      true },
 
-       { "pll_p_out4", "pll_p",        24000000,       true },
        { "usbd",       "clk_m",        12000000,       false },
        { "usb2",       "clk_m",        12000000,       false },
        { "usb3",       "clk_m",        12000000,       false },
@@ -224,5 +225,6 @@ MACHINE_START(PAZ00, "Toshiba AC100 / Dynabook AZ")
        .handle_irq     = gic_handle_irq,
        .timer          = &tegra_timer,
        .init_machine   = tegra_paz00_init,
+       .init_late      = tegra_init_late,
        .restart        = tegra_assert_system_reset,
 MACHINE_END
index 79064c7a7907023eb1c4ffb82fc84aeddd44ca8c..71e9f3fc7fba28ffbd893c978342d3c7ad7c53e5 100644 (file)
@@ -277,6 +277,7 @@ MACHINE_START(SEABOARD, "seaboard")
        .handle_irq     = gic_handle_irq,
        .timer          = &tegra_timer,
        .init_machine   = tegra_seaboard_init,
+       .init_late      = tegra_init_late,
        .restart        = tegra_assert_system_reset,
 MACHINE_END
 
@@ -288,6 +289,7 @@ MACHINE_START(KAEN, "kaen")
        .handle_irq     = gic_handle_irq,
        .timer          = &tegra_timer,
        .init_machine   = tegra_kaen_init,
+       .init_late      = tegra_init_late,
        .restart        = tegra_assert_system_reset,
 MACHINE_END
 
@@ -299,5 +301,6 @@ MACHINE_START(WARIO, "wario")
        .handle_irq     = gic_handle_irq,
        .timer          = &tegra_timer,
        .init_machine   = tegra_wario_init,
+       .init_late      = tegra_init_late,
        .restart        = tegra_assert_system_reset,
 MACHINE_END
index bc59b379c6fe609db1d413c874b24e54e1b378a2..776aa9564d5d61e3c803f701c2ed0bf87cca8dbc 100644 (file)
@@ -118,6 +118,8 @@ static void trimslice_usb_init(void)
        pdata = tegra_ehci1_device.dev.platform_data;
        pdata->vbus_gpio = TRIMSLICE_GPIO_USB1_MODE;
 
+       tegra_ehci2_ulpi_phy_config.reset_gpio = TEGRA_GPIO_PV0;
+
        platform_device_register(&tegra_ehci3_device);
        platform_device_register(&tegra_ehci2_device);
        platform_device_register(&tegra_ehci1_device);
@@ -176,5 +178,6 @@ MACHINE_START(TRIMSLICE, "trimslice")
        .handle_irq     = gic_handle_irq,
        .timer          = &tegra_timer,
        .init_machine   = tegra_trimslice_init,
+       .init_late      = tegra_init_late,
        .restart        = tegra_assert_system_reset,
 MACHINE_END
index 75d1543d77c0d0f2029191f96fabc07d98c4e8fe..65014968fc6c05ad1144c421856034b40756f037 100644 (file)
@@ -32,5 +32,19 @@ void __init tegra_init_irq(void);
 void __init tegra_dt_init_irq(void);
 int __init tegra_pcie_init(bool init_port0, bool init_port1);
 
+void tegra_init_late(void);
+
+#ifdef CONFIG_DEBUG_FS
+int tegra_clk_debugfs_init(void);
+#else
+static inline int tegra_clk_debugfs_init(void) { return 0; }
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC) && defined(CONFIG_DEBUG_FS)
+int __init tegra_powergate_debugfs_init(void);
+#else
+static inline int tegra_powergate_debugfs_init(void) { return 0; }
+#endif
+
 extern struct sys_timer tegra_timer;
 #endif
index 8dad8d18cb49cd26e12212718de9295a4f4e550a..58f981c0819c717883ae99c8d9d7d4c825ea1cae 100644 (file)
@@ -642,7 +642,7 @@ static int clk_debugfs_register(struct clk *c)
        return 0;
 }
 
-static int __init clk_debugfs_init(void)
+int __init tegra_clk_debugfs_init(void)
 {
        struct clk *c;
        struct dentry *d;
@@ -669,5 +669,4 @@ err_out:
        return err;
 }
 
-late_initcall(clk_debugfs_init);
 #endif
index 22df10fb9972877069f92637ff5849ba990ad9f7..204a5c8b0b574bd0cf2b46f5b0d4d1ccde87dd61 100644 (file)
@@ -82,10 +82,12 @@ static __initdata struct tegra_clk_init_table tegra20_clk_init_table[] = {
        { "pll_p_out1", "pll_p",        28800000,       true },
        { "pll_p_out2", "pll_p",        48000000,       true },
        { "pll_p_out3", "pll_p",        72000000,       true },
-       { "pll_p_out4", "pll_p",        108000000,      true },
-       { "sclk",       "pll_p_out4",   108000000,      true },
-       { "hclk",       "sclk",         108000000,      true },
-       { "pclk",       "hclk",         54000000,       true },
+       { "pll_p_out4", "pll_p",        24000000,       true },
+       { "pll_c",      "clk_m",        600000000,      true },
+       { "pll_c_out1", "pll_c",        120000000,      true },
+       { "sclk",       "pll_c_out1",   120000000,      true },
+       { "hclk",       "sclk",         120000000,      true },
+       { "pclk",       "hclk",         60000000,       true },
        { "csite",      NULL,           0,              true },
        { "emc",        NULL,           0,              true },
        { "cpu",        NULL,           0,              true },
@@ -93,6 +95,17 @@ static __initdata struct tegra_clk_init_table tegra20_clk_init_table[] = {
 };
 #endif
 
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+static __initdata struct tegra_clk_init_table tegra30_clk_init_table[] = {
+       /* name         parent          rate            enabled */
+       { "clk_m",      NULL,           0,              true },
+       { "pll_p",      "clk_m",        408000000,      true },
+       { "pll_p_out1", "pll_p",        9600000,        true },
+       { NULL,         NULL,           0,              0},
+};
+#endif
+
+
 static void __init tegra_init_cache(u32 tag_latency, u32 data_latency)
 {
 #ifdef CONFIG_CACHE_L2X0
@@ -127,8 +140,15 @@ void __init tegra30_init_early(void)
 {
        tegra_init_fuse();
        tegra30_init_clocks();
+       tegra_clk_init_from_table(tegra30_clk_init_table);
        tegra_init_cache(0x441, 0x551);
        tegra_pmc_init();
        tegra_powergate_init();
 }
 #endif
+
+void __init tegra_init_late(void)
+{
+       tegra_clk_debugfs_init();
+       tegra_powergate_debugfs_init();
+}
index 2d8dfa2faf8f09d0dcdfb8f301713912ca553782..c70e65ffa36ba8a91e16b372c8c616abd4414c20 100644 (file)
@@ -439,9 +439,8 @@ static struct resource tegra_usb3_resources[] = {
        },
 };
 
-static struct tegra_ulpi_config tegra_ehci2_ulpi_phy_config = {
-       /* All existing boards use GPIO PV0 for phy reset */
-       .reset_gpio = TEGRA_GPIO_PV0,
+struct tegra_ulpi_config tegra_ehci2_ulpi_phy_config = {
+       .reset_gpio = -1,
        .clk = "cdev2",
 };
 
index 138c642e59f4cec93143845d8e072df332b083e3..4f50527264956b0f28413e462b9afef024b00322 100644 (file)
 #include <linux/platform_device.h>
 #include <linux/platform_data/tegra_usb.h>
 
+#include <mach/usb_phy.h>
+
+extern struct tegra_ulpi_config tegra_ehci2_ulpi_phy_config;
+
 extern struct tegra_ehci_platform_data tegra_ehci1_pdata;
 extern struct tegra_ehci_platform_data tegra_ehci2_pdata;
 extern struct tegra_ehci_platform_data tegra_ehci3_pdata;
diff --git a/arch/arm/mach-tegra/include/mach/tegra-ahb.h b/arch/arm/mach-tegra/include/mach/tegra-ahb.h
new file mode 100644 (file)
index 0000000..e0f8c84
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __MACH_TEGRA_AHB_H__
+#define __MACH_TEGRA_AHB_H__
+
+extern int tegra_ahb_enable_smmu(struct device_node *ahb);
+
+#endif /* __MACH_TEGRA_AHB_H__ */
index 5a440f315e575d29c43958fd1c84226843056447..937c4c50219e1a818c89b32f965598318fbd55cc 100644 (file)
@@ -63,52 +63,86 @@ static inline void save_uart_address(void)
                buf[0] = 0;
 }
 
-/*
- * Setup before decompression.  This is where we do UART selection for
- * earlyprintk and init the uart_base register.
- */
-static inline void arch_decomp_setup(void)
+static const struct {
+       u32 base;
+       u32 reset_reg;
+       u32 clock_reg;
+       u32 bit;
+} uarts[] = {
+       {
+               TEGRA_UARTA_BASE,
+               TEGRA_CLK_RESET_BASE + 0x04,
+               TEGRA_CLK_RESET_BASE + 0x10,
+               6,
+       },
+       {
+               TEGRA_UARTB_BASE,
+               TEGRA_CLK_RESET_BASE + 0x04,
+               TEGRA_CLK_RESET_BASE + 0x10,
+               7,
+       },
+       {
+               TEGRA_UARTC_BASE,
+               TEGRA_CLK_RESET_BASE + 0x08,
+               TEGRA_CLK_RESET_BASE + 0x14,
+               23,
+       },
+       {
+               TEGRA_UARTD_BASE,
+               TEGRA_CLK_RESET_BASE + 0x0c,
+               TEGRA_CLK_RESET_BASE + 0x18,
+               1,
+       },
+       {
+               TEGRA_UARTE_BASE,
+               TEGRA_CLK_RESET_BASE + 0x0c,
+               TEGRA_CLK_RESET_BASE + 0x18,
+               2,
+       },
+};
+
+static inline bool uart_clocked(int i)
+{
+       if (*(u8 *)uarts[i].reset_reg & BIT(uarts[i].bit))
+               return false;
+
+       if (!(*(u8 *)uarts[i].clock_reg & BIT(uarts[i].bit)))
+               return false;
+
+       return true;
+}
+
+#ifdef CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA
+int auto_odmdata(void)
+{
+       volatile u32 *pmc = (volatile u32 *)TEGRA_PMC_BASE;
+       u32 odmdata = pmc[0xa0 / 4];
+
+       /*
+        * Bits 19:18 are the console type: 0=default, 1=none, 2==DCC, 3==UART
+        * Some boards apparently swap the last two values, but we don't have
+        * any way of catering for that here, so we just accept either. If this
+        * doesn't make sense for your board, just don't enable this feature.
+        *
+        * Bits 17:15 indicate the UART to use, 0/1/2/3/4 are UART A/B/C/D/E.
+        */
+
+       switch  ((odmdata >> 18) & 3) {
+       case 2:
+       case 3:
+               break;
+       default:
+               return -1;
+       }
+
+       return (odmdata >> 15) & 7;
+}
+#endif
+
+#ifdef CONFIG_TEGRA_DEBUG_UART_AUTO_SCRATCH
+int auto_scratch(void)
 {
-       static const struct {
-               u32 base;
-               u32 reset_reg;
-               u32 clock_reg;
-               u32 bit;
-       } uarts[] = {
-               {
-                       TEGRA_UARTA_BASE,
-                       TEGRA_CLK_RESET_BASE + 0x04,
-                       TEGRA_CLK_RESET_BASE + 0x10,
-                       6,
-               },
-               {
-                       TEGRA_UARTB_BASE,
-                       TEGRA_CLK_RESET_BASE + 0x04,
-                       TEGRA_CLK_RESET_BASE + 0x10,
-                       7,
-               },
-               {
-                       TEGRA_UARTC_BASE,
-                       TEGRA_CLK_RESET_BASE + 0x08,
-                       TEGRA_CLK_RESET_BASE + 0x14,
-                       23,
-               },
-               {
-                       TEGRA_UARTD_BASE,
-                       TEGRA_CLK_RESET_BASE + 0x0c,
-                       TEGRA_CLK_RESET_BASE + 0x18,
-                       1,
-               },
-               {
-                       TEGRA_UARTE_BASE,
-                       TEGRA_CLK_RESET_BASE + 0x0c,
-                       TEGRA_CLK_RESET_BASE + 0x18,
-                       2,
-               },
-       };
        int i;
-       volatile u32 *apb_misc = (volatile u32 *)TEGRA_APB_MISC_BASE;
-       u32 chip, div;
 
        /*
         * Look for the first UART that:
@@ -125,20 +159,60 @@ static inline void arch_decomp_setup(void)
         * back to what's specified in TEGRA_DEBUG_UART_BASE.
         */
        for (i = 0; i < ARRAY_SIZE(uarts); i++) {
-               if (*(u8 *)uarts[i].reset_reg & BIT(uarts[i].bit))
-                       continue;
-
-               if (!(*(u8 *)uarts[i].clock_reg & BIT(uarts[i].bit)))
+               if (!uart_clocked(i))
                        continue;
 
                uart = (volatile u8 *)uarts[i].base;
                if (uart[UART_SCR << DEBUG_UART_SHIFT] != 'D')
                        continue;
 
-               break;
+               return i;
        }
-       if (i == ARRAY_SIZE(uarts))
-               uart = (volatile u8 *)TEGRA_DEBUG_UART_BASE;
+
+       return -1;
+}
+#endif
+
+/*
+ * Setup before decompression.  This is where we do UART selection for
+ * earlyprintk and init the uart_base register.
+ */
+static inline void arch_decomp_setup(void)
+{
+       int uart_id, auto_uart_id;
+       volatile u32 *apb_misc = (volatile u32 *)TEGRA_APB_MISC_BASE;
+       u32 chip, div;
+
+#if defined(CONFIG_TEGRA_DEBUG_UARTA)
+       uart_id = 0;
+#elif defined(CONFIG_TEGRA_DEBUG_UARTB)
+       uart_id = 1;
+#elif defined(CONFIG_TEGRA_DEBUG_UARTC)
+       uart_id = 2;
+#elif defined(CONFIG_TEGRA_DEBUG_UARTD)
+       uart_id = 3;
+#elif defined(CONFIG_TEGRA_DEBUG_UARTE)
+       uart_id = 4;
+#else
+       uart_id = -1;
+#endif
+
+#if defined(CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA)
+       auto_uart_id = auto_odmdata();
+#elif defined(CONFIG_TEGRA_DEBUG_UART_AUTO_SCRATCH)
+       auto_uart_id = auto_scratch();
+#else
+       auto_uart_id = -1;
+#endif
+       if (auto_uart_id != -1)
+               uart_id = auto_uart_id;
+
+       if (uart_id < 0 || uart_id >= ARRAY_SIZE(uarts) ||
+           !uart_clocked(uart_id))
+               uart = NULL;
+       else
+               uart = (volatile u8 *)uarts[uart_id].base;
+
        save_uart_address();
        if (uart == NULL)
                return;
index de1a0f602b2891c5dfd2eeb3b0322c1ba84f3adf..935ce9f6559031b79102eac935376dd3fea3e1a8 100644 (file)
@@ -61,8 +61,8 @@ struct tegra_usb_phy {
        struct usb_phy *ulpi;
 };
 
-struct tegra_usb_phy *tegra_usb_phy_open(int instance, void __iomem *regs,
-                       void *config, enum tegra_usb_phy_mode phy_mode);
+struct tegra_usb_phy *tegra_usb_phy_open(struct device *dev, int instance,
+       void __iomem *regs, void *config, enum tegra_usb_phy_mode phy_mode);
 
 int tegra_usb_phy_power_on(struct tegra_usb_phy *phy);
 
index c238699ae86f5021defde301911bcde0c0696a0f..f5b12fb4ff12306563fd5da0152230424118a843 100644 (file)
@@ -234,7 +234,7 @@ static const struct file_operations powergate_fops = {
        .release        = single_release,
 };
 
-static int __init powergate_debugfs_init(void)
+int __init tegra_powergate_debugfs_init(void)
 {
        struct dentry *d;
        int err = -ENOMEM;
@@ -247,6 +247,4 @@ static int __init powergate_debugfs_init(void)
        return err;
 }
 
-late_initcall(powergate_debugfs_init);
-
 #endif
index bae09b8598912b2f310ea854401457bcba9983da..b59315ce3691c8c73fefdccd9e91328b14d07dab 100644 (file)
@@ -1486,6 +1486,10 @@ static struct clk tegra_clk_m = {
 };
 
 static struct clk_pll_freq_table tegra_pll_c_freq_table[] = {
+       { 12000000, 600000000, 600, 12, 1, 8 },
+       { 13000000, 600000000, 600, 13, 1, 8 },
+       { 19200000, 600000000, 500, 16, 1, 6 },
+       { 26000000, 600000000, 600, 26, 1, 8 },
        { 0, 0, 0, 0, 0, 0 },
 };
 
index 6d08b53f92d21cde44c0834aa80ac5cda3f42bf9..e33fe4b14a2a5ab86f674c1848320740448bfa6c 100644 (file)
@@ -3015,6 +3015,15 @@ struct clk_duplicate tegra_clk_duplicates[] = {
        CLK_DUPLICATE("sbc6", "spi_slave_tegra.5", NULL),
        CLK_DUPLICATE("twd", "smp_twd", NULL),
        CLK_DUPLICATE("vcp", "nvavp", "vcp"),
+       CLK_DUPLICATE("i2s0", NULL, "i2s0"),
+       CLK_DUPLICATE("i2s1", NULL, "i2s1"),
+       CLK_DUPLICATE("i2s2", NULL, "i2s2"),
+       CLK_DUPLICATE("i2s3", NULL, "i2s3"),
+       CLK_DUPLICATE("i2s4", NULL, "i2s4"),
+       CLK_DUPLICATE("dam0", NULL, "dam0"),
+       CLK_DUPLICATE("dam1", NULL, "dam1"),
+       CLK_DUPLICATE("dam2", NULL, "dam2"),
+       CLK_DUPLICATE("spdif_in", NULL, "spdif_in"),
 };
 
 struct clk *tegra_ptr_clks[] = {
index d71d2fed67216227f1958dfa187be7e7fa5a57ad..54e353c8e3042f61e7c487c16f1cd29d22758fb6 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/platform_device.h>
 #include <linux/io.h>
 #include <linux/gpio.h>
+#include <linux/of_gpio.h>
 #include <linux/usb/otg.h>
 #include <linux/usb/ulpi.h>
 #include <asm/mach-types.h>
@@ -654,8 +655,8 @@ static void ulpi_phy_power_off(struct tegra_usb_phy *phy)
        clk_disable(phy->clk);
 }
 
-struct tegra_usb_phy *tegra_usb_phy_open(int instance, void __iomem *regs,
-                       void *config, enum tegra_usb_phy_mode phy_mode)
+struct tegra_usb_phy *tegra_usb_phy_open(struct device *dev, int instance,
+       void __iomem *regs, void *config, enum tegra_usb_phy_mode phy_mode)
 {
        struct tegra_usb_phy *phy;
        struct tegra_ulpi_config *ulpi_config;
@@ -711,6 +712,16 @@ struct tegra_usb_phy *tegra_usb_phy_open(int instance, void __iomem *regs,
                        err = -ENXIO;
                        goto err1;
                }
+               if (!gpio_is_valid(ulpi_config->reset_gpio))
+                       ulpi_config->reset_gpio =
+                               of_get_named_gpio(dev->of_node,
+                                                 "nvidia,phy-reset-gpio", 0);
+               if (!gpio_is_valid(ulpi_config->reset_gpio)) {
+                       pr_err("%s: invalid reset gpio: %d\n", __func__,
+                              ulpi_config->reset_gpio);
+                       err = -EINVAL;
+                       goto err1;
+               }
                gpio_request(ulpi_config->reset_gpio, "ulpi_phy_reset_b");
                gpio_direction_output(ulpi_config->reset_gpio, 0);
                phy->ulpi = otg_ulpi_create(&ulpi_viewport_access_ops, 0);
index b29a788f498cbcd93e96f7c21329785bab4dd094..1f47d962e3a12f6212a7e9450075bd94e3a49193 100644 (file)
@@ -96,7 +96,7 @@ static void __init __mop500_uib_init(struct uib *uib, const char *why)
 /*
  * Detect the UIB attached based on the presence or absence of i2c devices.
  */
-static int __init mop500_uib_init(void)
+int __init mop500_uib_init(void)
 {
        struct uib *uib = mop500_uib;
        struct i2c_adapter *i2c0;
@@ -131,5 +131,3 @@ static int __init mop500_uib_init(void)
 
        return 0;
 }
-
-module_init(mop500_uib_init);
index f943687acaf0e214d1bebc4ed317976c0b640cb5..9c74ac54584955be16798a69c1a997247d5c70b3 100644 (file)
@@ -206,7 +206,7 @@ static struct resource ab8500_resources[] = {
 };
 
 struct platform_device ab8500_device = {
-       .name = "ab8500-i2c",
+       .name = "ab8500-core",
        .id = 0,
        .dev = {
                .platform_data = &ab8500_platdata,
@@ -673,9 +673,15 @@ static void __init u8500_cryp1_hash1_init(struct device *parent)
 static struct platform_device *snowball_platform_devs[] __initdata = {
        &snowball_led_dev,
        &snowball_key_dev,
+       &snowball_sbnet_dev,
        &ab8500_device,
 };
 
+static struct platform_device *snowball_of_platform_devs[] __initdata = {
+       &snowball_led_dev,
+       &snowball_key_dev,
+};
+
 static void __init mop500_init_machine(void)
 {
        struct device *parent = NULL;
@@ -710,6 +716,8 @@ static void __init mop500_init_machine(void)
 
        /* This board has full regulator constraints */
        regulator_has_full_constraints();
+
+       mop500_uib_init();
 }
 
 static void __init snowball_init_machine(void)
@@ -774,6 +782,8 @@ static void __init hrefv60_init_machine(void)
 
        /* This board has full regulator constraints */
        regulator_has_full_constraints();
+
+       mop500_uib_init();
 }
 
 MACHINE_START(U8500, "ST-Ericsson MOP500 platform")
@@ -785,6 +795,7 @@ MACHINE_START(U8500, "ST-Ericsson MOP500 platform")
        .timer          = &ux500_timer,
        .handle_irq     = gic_handle_irq,
        .init_machine   = mop500_init_machine,
+       .init_late      = ux500_init_late,
 MACHINE_END
 
 MACHINE_START(HREFV60, "ST-Ericsson U8500 Platform HREFv60+")
@@ -794,6 +805,7 @@ MACHINE_START(HREFV60, "ST-Ericsson U8500 Platform HREFv60+")
        .timer          = &ux500_timer,
        .handle_irq     = gic_handle_irq,
        .init_machine   = hrefv60_init_machine,
+       .init_late      = ux500_init_late,
 MACHINE_END
 
 MACHINE_START(SNOWBALL, "Calao Systems Snowball platform")
@@ -804,6 +816,7 @@ MACHINE_START(SNOWBALL, "Calao Systems Snowball platform")
        .timer          = &ux500_timer,
        .handle_irq     = gic_handle_irq,
        .init_machine   = snowball_init_machine,
+       .init_late      = ux500_init_late,
 MACHINE_END
 
 #ifdef CONFIG_MACH_UX500_DT
@@ -831,6 +844,10 @@ struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
 static const struct of_device_id u8500_local_bus_nodes[] = {
        /* only create devices below soc node */
        { .compatible = "stericsson,db8500", },
+       { .compatible = "stericsson,db8500-prcmu", },
+       { .compatible = "stericsson,db8500-prcmu-regulator", },
+       { .compatible = "stericsson,ab8500", },
+       { .compatible = "stericsson,ab8500-regulator", },
        { .compatible = "simple-bus"},
        { },
 };
@@ -849,7 +866,7 @@ static void __init u8500_init_machine(void)
        else if (of_machine_is_compatible("st-ericsson,hrefv60+"))
                hrefv60_pinmaps_init();
 
-       parent = u8500_init_devices();
+       parent = u8500_of_init_devices();
 
        for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++)
                mop500_platform_devs[i]->dev.parent = parent;
@@ -866,15 +883,23 @@ static void __init u8500_init_machine(void)
                                ARRAY_SIZE(mop500_platform_devs));
 
                mop500_sdi_init(parent);
-
                i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);
                i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs);
                i2c_register_board_info(2, mop500_i2c2_devices,
                                        ARRAY_SIZE(mop500_i2c2_devices));
 
+               mop500_uib_init();
+
        } else if (of_machine_is_compatible("calaosystems,snowball-a9500")) {
-               platform_add_devices(snowball_platform_devs,
-                               ARRAY_SIZE(snowball_platform_devs));
+               /*
+                * Devices to be DT:ed:
+                *   snowball_led_dev   = todo
+                *   snowball_key_dev   = todo
+                *   snowball_sbnet_dev = done
+                *   ab8500_device      = done
+                */
+               platform_add_devices(snowball_of_platform_devs,
+                               ARRAY_SIZE(snowball_of_platform_devs));
 
                snowball_sdi_init(parent);
        } else if (of_machine_is_compatible("st-ericsson,hrefv60+")) {
@@ -895,6 +920,8 @@ static void __init u8500_init_machine(void)
                i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs);
                i2c_register_board_info(2, mop500_i2c2_devices,
                                        ARRAY_SIZE(mop500_i2c2_devices));
+
+               mop500_uib_init();
        }
        mop500_i2c_init(parent);
 
@@ -918,6 +945,7 @@ DT_MACHINE_START(U8500_DT, "ST-Ericsson U8500 platform (Device Tree Support)")
        .timer          = &ux500_timer,
        .handle_irq     = gic_handle_irq,
        .init_machine   = u8500_init_machine,
+       .init_late      = ux500_init_late,
        .dt_compat      = u8500_dt_board_compat,
 MACHINE_END
 #endif
index bc44c07c71a99a6fcf4f66ba7b3193f1ac0dc911..2f87b25a908a31c74264ac0b7a29eb34f85f2c58 100644 (file)
@@ -89,7 +89,11 @@ void __init mop500_pinmaps_init(void);
 void __init snowball_pinmaps_init(void);
 void __init hrefv60_pinmaps_init(void);
 
+int __init mop500_uib_init(void);
 void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info,
                unsigned n);
 
+/* TODO: Once all pieces are DT:ed, remove completely. */
+struct device * __init u8500_of_init_devices(void);
+
 #endif
index 1762c4728f1e24b3dd41881d1d145cbac31c7c49..8d73b066a18d31dda628af949f7c84c260bd56a7 100644 (file)
@@ -635,7 +635,7 @@ static int clk_debugfs_register(struct clk *c)
        return 0;
 }
 
-static int __init clk_debugfs_init(void)
+int __init clk_debugfs_init(void)
 {
        struct clk *c;
        struct dentry *d;
@@ -657,7 +657,6 @@ err_out:
        return err;
 }
 
-late_initcall(clk_debugfs_init);
 #endif /* defined(CONFIG_DEBUG_FS) */
 
 unsigned long clk_smp_twd_rate = 500000000;
@@ -696,12 +695,11 @@ static struct notifier_block clk_twd_cpufreq_nb = {
        .notifier_call = clk_twd_cpufreq_transition,
 };
 
-static int clk_init_smp_twd_cpufreq(void)
+int clk_init_smp_twd_cpufreq(void)
 {
        return cpufreq_register_notifier(&clk_twd_cpufreq_nb,
                                  CPUFREQ_TRANSITION_NOTIFIER);
 }
-late_initcall(clk_init_smp_twd_cpufreq);
 
 #endif
 
index d776ada08dbf1062b0cb8d5733a31c00dcc7a78b..65d27a13f46d5073abbe4e7cc201ee4493247c74 100644 (file)
@@ -150,3 +150,15 @@ struct clk clk_##_name = {                                         \
 
 int __init clk_db8500_ed_fixup(void);
 int __init clk_init(void);
+
+#ifdef CONFIG_DEBUG_FS
+int clk_debugfs_init(void);
+#else
+static inline int clk_debugfs_init(void) { return 0; }
+#endif
+
+#ifdef CONFIG_CPU_FREQ
+int clk_init_smp_twd_cpufreq(void);
+#else
+static inline int clk_init_smp_twd_cpufreq(void) { return 0; }
+#endif
index 16169c4bf6ca5226b748bee3d50d6f6b76f8138e..33275eb4c6890ab7745bbf0d2b2601ecca5e413f 100644 (file)
@@ -140,7 +140,6 @@ static struct platform_device *platform_devs[] __initdata = {
 static struct platform_device *of_platform_devs[] __initdata = {
        &u8500_dma40_device,
        &db8500_pmu_device,
-       &db8500_prcmu_device,
 };
 
 static resource_size_t __initdata db8500_gpio_base[] = {
@@ -219,6 +218,28 @@ struct device * __init u8500_init_devices(void)
        db8500_add_gpios(parent);
        db8500_add_usb(parent, usb_db8500_rx_dma_cfg, usb_db8500_tx_dma_cfg);
 
+       platform_device_register_data(parent,
+               "cpufreq-u8500", -1, NULL, 0);
+
+       for (i = 0; i < ARRAY_SIZE(platform_devs); i++)
+               platform_devs[i]->dev.parent = parent;
+
+       platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs));
+
+       return parent;
+}
+
+/* TODO: Once all pieces are DT:ed, remove completely. */
+struct device * __init u8500_of_init_devices(void)
+{
+       struct device *parent;
+       int i;
+
+       parent = db8500_soc_device_init();
+
+       db8500_add_rtc(parent);
+       db8500_add_usb(parent, usb_db8500_rx_dma_cfg, usb_db8500_tx_dma_cfg);
+
        platform_device_register_data(parent,
                "cpufreq-u8500", -1, NULL, 0);
 
@@ -229,7 +250,7 @@ struct device * __init u8500_init_devices(void)
         * Devices to be DT:ed:
         *   u8500_dma40_device  = todo
         *   db8500_pmu_device   = todo
-        *   db8500_prcmu_device = todo
+        *   db8500_prcmu_device = done
         */
        platform_add_devices(of_platform_devs, ARRAY_SIZE(of_platform_devs));
 
index a29a0e3adcf9da3e3aebca1905a3f7764bee4fdd..e2360e7c770d3a6c99b8abedfc38810503f290a7 100644 (file)
@@ -73,6 +73,12 @@ void __init ux500_init_irq(void)
        clk_init();
 }
 
+void __init ux500_init_late(void)
+{
+       clk_debugfs_init();
+       clk_init_smp_twd_cpufreq();
+}
+
 static const char * __init ux500_get_machine(void)
 {
        return kasprintf(GFP_KERNEL, "DB%4x", dbx500_partnumber());
index 4e369f1645ec9bbbfdc3ee3a28927a7f0e01ff2d..8b7ed82a286665961f04d548a0ab1137027fc7d8 100644 (file)
@@ -20,6 +20,7 @@ extern void __init u8500_map_io(void);
 extern struct device * __init u8500_init_devices(void);
 
 extern void __init ux500_init_irq(void);
+extern void __init ux500_init_late(void);
 
 extern struct device *ux500_soc_device_init(const char *soc_id);
 
index 04dd092211b893271fc65df344ef42adb1e3f754..fde26adaef32d964a539a12a97f4596af27b1fe1 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/ata_platform.h>
 #include <linux/smsc911x.h>
 #include <linux/spinlock.h>
-#include <linux/device.h>
 #include <linux/usb/isp1760.h>
 #include <linux/clkdev.h>
 #include <linux/mtd/physmap.h>
@@ -31,7 +30,6 @@
 #include <asm/hardware/gic.h>
 #include <asm/hardware/timer-sp.h>
 #include <asm/hardware/sp810.h>
-#include <asm/hardware/gic.h>
 
 #include <mach/ct-ca9x4.h>
 #include <mach/motherboard.h>
index db23ae4aaaaba3384d000181e4dfb1c9ac476e60..ea6b43154090a2af00cbb168c7ef2d3a0589b2b2 100644 (file)
 #include <linux/init.h>
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
 #include <linux/highmem.h>
+#include <linux/memblock.h>
 #include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/vmalloc.h>
 
 #include <asm/memory.h>
 #include <asm/highmem.h>
 #include <asm/tlbflush.h>
 #include <asm/sizes.h>
 #include <asm/mach/arch.h>
+#include <asm/dma-iommu.h>
+#include <asm/mach/map.h>
+#include <asm/system_info.h>
+#include <asm/dma-contiguous.h>
 
 #include "mm.h"
 
+/*
+ * The DMA API is built upon the notion of "buffer ownership".  A buffer
+ * is either exclusively owned by the CPU (and therefore may be accessed
+ * by it) or exclusively owned by the DMA device.  These helper functions
+ * represent the transitions between these two ownership states.
+ *
+ * Note, however, that on later ARMs, this notion does not work due to
+ * speculative prefetches.  We model our approach on the assumption that
+ * the CPU does do speculative prefetches, which means we clean caches
+ * before transfers and delay cache invalidation until transfer completion.
+ *
+ */
+static void __dma_page_cpu_to_dev(struct page *, unsigned long,
+               size_t, enum dma_data_direction);
+static void __dma_page_dev_to_cpu(struct page *, unsigned long,
+               size_t, enum dma_data_direction);
+
+/**
+ * arm_dma_map_page - map a portion of a page for streaming DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Ensure that any data held in the cache is appropriately discarded
+ * or written back.
+ *
+ * The device owns this memory once this call has completed.  The CPU
+ * can regain ownership by calling dma_unmap_page().
+ */
+static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
+            unsigned long offset, size_t size, enum dma_data_direction dir,
+            struct dma_attrs *attrs)
+{
+       if (!arch_is_coherent())
+               __dma_page_cpu_to_dev(page, offset, size, dir);
+       return pfn_to_dma(dev, page_to_pfn(page)) + offset;
+}
+
+/**
+ * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @handle: DMA address of buffer
+ * @size: size of buffer (same as passed to dma_map_page)
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
+ *
+ * Unmap a page streaming mode DMA translation.  The handle and size
+ * must match what was provided in the previous dma_map_page() call.
+ * All other usages are undefined.
+ *
+ * After this call, reads by the CPU to the buffer are guaranteed to see
+ * whatever the device wrote there.
+ */
+static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
+               size_t size, enum dma_data_direction dir,
+               struct dma_attrs *attrs)
+{
+       if (!arch_is_coherent())
+               __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
+                                     handle & ~PAGE_MASK, size, dir);
+}
+
+static void arm_dma_sync_single_for_cpu(struct device *dev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       unsigned int offset = handle & (PAGE_SIZE - 1);
+       struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
+       if (!arch_is_coherent())
+               __dma_page_dev_to_cpu(page, offset, size, dir);
+}
+
+static void arm_dma_sync_single_for_device(struct device *dev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       unsigned int offset = handle & (PAGE_SIZE - 1);
+       struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
+       if (!arch_is_coherent())
+               __dma_page_cpu_to_dev(page, offset, size, dir);
+}
+
+static int arm_dma_set_mask(struct device *dev, u64 dma_mask);
+
+struct dma_map_ops arm_dma_ops = {
+       .alloc                  = arm_dma_alloc,
+       .free                   = arm_dma_free,
+       .mmap                   = arm_dma_mmap,
+       .map_page               = arm_dma_map_page,
+       .unmap_page             = arm_dma_unmap_page,
+       .map_sg                 = arm_dma_map_sg,
+       .unmap_sg               = arm_dma_unmap_sg,
+       .sync_single_for_cpu    = arm_dma_sync_single_for_cpu,
+       .sync_single_for_device = arm_dma_sync_single_for_device,
+       .sync_sg_for_cpu        = arm_dma_sync_sg_for_cpu,
+       .sync_sg_for_device     = arm_dma_sync_sg_for_device,
+       .set_dma_mask           = arm_dma_set_mask,
+};
+EXPORT_SYMBOL(arm_dma_ops);
+
 static u64 get_coherent_dma_mask(struct device *dev)
 {
        u64 mask = (u64)arm_dma_limit;
@@ -56,6 +163,21 @@ static u64 get_coherent_dma_mask(struct device *dev)
        return mask;
 }
 
+static void __dma_clear_buffer(struct page *page, size_t size)
+{
+       void *ptr;
+       /*
+        * Ensure that the allocated pages are zeroed, and that any data
+        * lurking in the kernel direct-mapped region is invalidated.
+        */
+       ptr = page_address(page);
+       if (ptr) {
+               memset(ptr, 0, size);
+               dmac_flush_range(ptr, ptr + size);
+               outer_flush_range(__pa(ptr), __pa(ptr) + size);
+       }
+}
+
 /*
  * Allocate a DMA buffer for 'dev' of size 'size' using the
  * specified gfp mask.  Note that 'size' must be page aligned.
@@ -64,23 +186,6 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
 {
        unsigned long order = get_order(size);
        struct page *page, *p, *e;
-       void *ptr;
-       u64 mask = get_coherent_dma_mask(dev);
-
-#ifdef CONFIG_DMA_API_DEBUG
-       u64 limit = (mask + 1) & ~mask;
-       if (limit && size >= limit) {
-               dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
-                       size, mask);
-               return NULL;
-       }
-#endif
-
-       if (!mask)
-               return NULL;
-
-       if (mask < 0xffffffffULL)
-               gfp |= GFP_DMA;
 
        page = alloc_pages(gfp, order);
        if (!page)
@@ -93,14 +198,7 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
        for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
                __free_page(p);
 
-       /*
-        * Ensure that the allocated pages are zeroed, and that any data
-        * lurking in the kernel direct-mapped region is invalidated.
-        */
-       ptr = page_address(page);
-       memset(ptr, 0, size);
-       dmac_flush_range(ptr, ptr + size);
-       outer_flush_range(__pa(ptr), __pa(ptr) + size);
+       __dma_clear_buffer(page, size);
 
        return page;
 }
@@ -170,6 +268,11 @@ static int __init consistent_init(void)
        unsigned long base = consistent_base;
        unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
 
+#ifndef CONFIG_ARM_DMA_USE_IOMMU
+       if (cpu_architecture() >= CPU_ARCH_ARMv6)
+               return 0;
+#endif
+
        consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
        if (!consistent_pte) {
                pr_err("%s: no memory\n", __func__);
@@ -184,14 +287,14 @@ static int __init consistent_init(void)
 
                pud = pud_alloc(&init_mm, pgd, base);
                if (!pud) {
-                       printk(KERN_ERR "%s: no pud tables\n", __func__);
+                       pr_err("%s: no pud tables\n", __func__);
                        ret = -ENOMEM;
                        break;
                }
 
                pmd = pmd_alloc(&init_mm, pud, base);
                if (!pmd) {
-                       printk(KERN_ERR "%s: no pmd tables\n", __func__);
+                       pr_err("%s: no pmd tables\n", __func__);
                        ret = -ENOMEM;
                        break;
                }
@@ -199,7 +302,7 @@ static int __init consistent_init(void)
 
                pte = pte_alloc_kernel(pmd, base);
                if (!pte) {
-                       printk(KERN_ERR "%s: no pte tables\n", __func__);
+                       pr_err("%s: no pte tables\n", __func__);
                        ret = -ENOMEM;
                        break;
                }
@@ -210,9 +313,101 @@ static int __init consistent_init(void)
 
        return ret;
 }
-
 core_initcall(consistent_init);
 
+static void *__alloc_from_contiguous(struct device *dev, size_t size,
+                                    pgprot_t prot, struct page **ret_page);
+
+static struct arm_vmregion_head coherent_head = {
+       .vm_lock        = __SPIN_LOCK_UNLOCKED(&coherent_head.vm_lock),
+       .vm_list        = LIST_HEAD_INIT(coherent_head.vm_list),
+};
+
+size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
+
+static int __init early_coherent_pool(char *p)
+{
+       coherent_pool_size = memparse(p, &p);
+       return 0;
+}
+early_param("coherent_pool", early_coherent_pool);
+
+/*
+ * Initialise the coherent pool for atomic allocations.
+ */
+static int __init coherent_init(void)
+{
+       pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
+       size_t size = coherent_pool_size;
+       struct page *page;
+       void *ptr;
+
+       if (cpu_architecture() < CPU_ARCH_ARMv6)
+               return 0;
+
+       ptr = __alloc_from_contiguous(NULL, size, prot, &page);
+       if (ptr) {
+               coherent_head.vm_start = (unsigned long) ptr;
+               coherent_head.vm_end = (unsigned long) ptr + size;
+               printk(KERN_INFO "DMA: preallocated %u KiB pool for atomic coherent allocations\n",
+                      (unsigned)size / 1024);
+               return 0;
+       }
+       printk(KERN_ERR "DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
+              (unsigned)size / 1024);
+       return -ENOMEM;
+}
+/*
+ * CMA is activated by core_initcall, so we must be called after it.
+ */
+postcore_initcall(coherent_init);
+
+struct dma_contig_early_reserve {
+       phys_addr_t base;
+       unsigned long size;
+};
+
+static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
+
+static int dma_mmu_remap_num __initdata;
+
+void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
+{
+       dma_mmu_remap[dma_mmu_remap_num].base = base;
+       dma_mmu_remap[dma_mmu_remap_num].size = size;
+       dma_mmu_remap_num++;
+}
+
+void __init dma_contiguous_remap(void)
+{
+       int i;
+       for (i = 0; i < dma_mmu_remap_num; i++) {
+               phys_addr_t start = dma_mmu_remap[i].base;
+               phys_addr_t end = start + dma_mmu_remap[i].size;
+               struct map_desc map;
+               unsigned long addr;
+
+               if (end > arm_lowmem_limit)
+                       end = arm_lowmem_limit;
+               if (start >= end)
+                       return;
+
+               map.pfn = __phys_to_pfn(start);
+               map.virtual = __phys_to_virt(start);
+               map.length = end - start;
+               map.type = MT_MEMORY_DMA_READY;
+
+               /*
+                * Clear previous low-memory mapping
+                */
+               for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
+                    addr += PMD_SIZE)
+                       pmd_clear(pmd_off_k(addr));
+
+               iotable_init(&map, 1);
+       }
+}
+
 static void *
 __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
        const void *caller)
@@ -222,7 +417,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
        int bit;
 
        if (!consistent_pte) {
-               printk(KERN_ERR "%s: not initialised\n", __func__);
+               pr_err("%s: not initialised\n", __func__);
                dump_stack();
                return NULL;
        }
@@ -249,7 +444,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
                u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
 
                pte = consistent_pte[idx] + off;
-               c->vm_pages = page;
+               c->priv = page;
 
                do {
                        BUG_ON(!pte_none(*pte));
@@ -281,14 +476,14 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
 
        c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
        if (!c) {
-               printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
+               pr_err("%s: trying to free invalid coherent area: %p\n",
                       __func__, cpu_addr);
                dump_stack();
                return;
        }
 
        if ((c->vm_end - c->vm_start) != size) {
-               printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
+               pr_err("%s: freeing wrong coherent size (%ld != %d)\n",
                       __func__, c->vm_end - c->vm_start, size);
                dump_stack();
                size = c->vm_end - c->vm_start;
@@ -310,8 +505,8 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
                }
 
                if (pte_none(pte) || !pte_present(pte))
-                       printk(KERN_CRIT "%s: bad page in kernel page table\n",
-                              __func__);
+                       pr_crit("%s: bad page in kernel page table\n",
+                               __func__);
        } while (size -= PAGE_SIZE);
 
        flush_tlb_kernel_range(c->vm_start, c->vm_end);
@@ -319,20 +514,182 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
        arm_vmregion_free(&consistent_head, c);
 }
 
+static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
+                           void *data)
+{
+       struct page *page = virt_to_page(addr);
+       pgprot_t prot = *(pgprot_t *)data;
+
+       set_pte_ext(pte, mk_pte(page, prot), 0);
+       return 0;
+}
+
+static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
+{
+       unsigned long start = (unsigned long) page_address(page);
+       unsigned end = start + size;
+
+       apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
+       dsb();
+       flush_tlb_kernel_range(start, end);
+}
+
+static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
+                                pgprot_t prot, struct page **ret_page,
+                                const void *caller)
+{
+       struct page *page;
+       void *ptr;
+       page = __dma_alloc_buffer(dev, size, gfp);
+       if (!page)
+               return NULL;
+
+       ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
+       if (!ptr) {
+               __dma_free_buffer(page, size);
+               return NULL;
+       }
+
+       *ret_page = page;
+       return ptr;
+}
+
+static void *__alloc_from_pool(struct device *dev, size_t size,
+                              struct page **ret_page, const void *caller)
+{
+       struct arm_vmregion *c;
+       size_t align;
+
+       if (!coherent_head.vm_start) {
+               printk(KERN_ERR "%s: coherent pool not initialised!\n",
+                      __func__);
+               dump_stack();
+               return NULL;
+       }
+
+       /*
+        * Align the region allocation - allocations from pool are rather
+        * small, so align them to their order in pages, minimum is a page
+        * size. This helps reduce fragmentation of the DMA space.
+        */
+       align = PAGE_SIZE << get_order(size);
+       c = arm_vmregion_alloc(&coherent_head, align, size, 0, caller);
+       if (c) {
+               void *ptr = (void *)c->vm_start;
+               struct page *page = virt_to_page(ptr);
+               *ret_page = page;
+               return ptr;
+       }
+       return NULL;
+}
+
+static int __free_from_pool(void *cpu_addr, size_t size)
+{
+       unsigned long start = (unsigned long)cpu_addr;
+       unsigned long end = start + size;
+       struct arm_vmregion *c;
+
+       if (start < coherent_head.vm_start || end > coherent_head.vm_end)
+               return 0;
+
+       c = arm_vmregion_find_remove(&coherent_head, (unsigned long)start);
+
+       if ((c->vm_end - c->vm_start) != size) {
+               printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
+                      __func__, c->vm_end - c->vm_start, size);
+               dump_stack();
+               size = c->vm_end - c->vm_start;
+       }
+
+       arm_vmregion_free(&coherent_head, c);
+       return 1;
+}
+
+static void *__alloc_from_contiguous(struct device *dev, size_t size,
+                                    pgprot_t prot, struct page **ret_page)
+{
+       unsigned long order = get_order(size);
+       size_t count = size >> PAGE_SHIFT;
+       struct page *page;
+
+       page = dma_alloc_from_contiguous(dev, count, order);
+       if (!page)
+               return NULL;
+
+       __dma_clear_buffer(page, size);
+       __dma_remap(page, size, prot);
+
+       *ret_page = page;
+       return page_address(page);
+}
+
+static void __free_from_contiguous(struct device *dev, struct page *page,
+                                  size_t size)
+{
+       __dma_remap(page, size, pgprot_kernel);
+       dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
+}
+
+static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
+{
+       prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
+                           pgprot_writecombine(prot) :
+                           pgprot_dmacoherent(prot);
+       return prot;
+}
+
+#define nommu() 0
+
 #else  /* !CONFIG_MMU */
 
-#define __dma_alloc_remap(page, size, gfp, prot, c)    page_address(page)
-#define __dma_free_remap(addr, size)                   do { } while (0)
+#define nommu() 1
+
+#define __get_dma_pgprot(attrs, prot)  __pgprot(0)
+#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c)     NULL
+#define __alloc_from_pool(dev, size, ret_page, c)              NULL
+#define __alloc_from_contiguous(dev, size, prot, ret)          NULL
+#define __free_from_pool(cpu_addr, size)                       0
+#define __free_from_contiguous(dev, page, size)                        do { } while (0)
+#define __dma_free_remap(cpu_addr, size)                       do { } while (0)
 
 #endif /* CONFIG_MMU */
 
-static void *
-__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
-           pgprot_t prot, const void *caller)
+static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
+                                  struct page **ret_page)
+{
+       struct page *page;
+       page = __dma_alloc_buffer(dev, size, gfp);
+       if (!page)
+               return NULL;
+
+       *ret_page = page;
+       return page_address(page);
+}
+
+
+
+static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+                        gfp_t gfp, pgprot_t prot, const void *caller)
 {
+       u64 mask = get_coherent_dma_mask(dev);
        struct page *page;
        void *addr;
 
+#ifdef CONFIG_DMA_API_DEBUG
+       u64 limit = (mask + 1) & ~mask;
+       if (limit && size >= limit) {
+               dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
+                       size, mask);
+               return NULL;
+       }
+#endif
+
+       if (!mask)
+               return NULL;
+
+       if (mask < 0xffffffffULL)
+               gfp |= GFP_DMA;
+
        /*
         * Following is a work-around (a.k.a. hack) to prevent pages
         * with __GFP_COMP being passed to split_page() which cannot
@@ -342,22 +699,20 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
         */
        gfp &= ~(__GFP_COMP);
 
-       *handle = ~0;
+       *handle = DMA_ERROR_CODE;
        size = PAGE_ALIGN(size);
 
-       page = __dma_alloc_buffer(dev, size, gfp);
-       if (!page)
-               return NULL;
-
-       if (!arch_is_coherent())
-               addr = __dma_alloc_remap(page, size, gfp, prot, caller);
+       if (arch_is_coherent() || nommu())
+               addr = __alloc_simple_buffer(dev, size, gfp, &page);
+       else if (cpu_architecture() < CPU_ARCH_ARMv6)
+               addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
+       else if (gfp & GFP_ATOMIC)
+               addr = __alloc_from_pool(dev, size, &page, caller);
        else
-               addr = page_address(page);
+               addr = __alloc_from_contiguous(dev, size, prot, &page);
 
        if (addr)
                *handle = pfn_to_dma(dev, page_to_pfn(page));
-       else
-               __dma_free_buffer(page, size);
 
        return addr;
 }
@@ -366,138 +721,71 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
  * Allocate DMA-coherent memory space and return both the kernel remapped
  * virtual and bus address for that space.
  */
-void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
+void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+                   gfp_t gfp, struct dma_attrs *attrs)
 {
+       pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
        void *memory;
 
        if (dma_alloc_from_coherent(dev, size, handle, &memory))
                return memory;
 
-       return __dma_alloc(dev, size, handle, gfp,
-                          pgprot_dmacoherent(pgprot_kernel),
+       return __dma_alloc(dev, size, handle, gfp, prot,
                           __builtin_return_address(0));
 }
-EXPORT_SYMBOL(dma_alloc_coherent);
 
 /*
- * Allocate a writecombining region, in much the same way as
- * dma_alloc_coherent above.
+ * Create userspace mapping for the DMA-coherent memory.
  */
-void *
-dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
-{
-       return __dma_alloc(dev, size, handle, gfp,
-                          pgprot_writecombine(pgprot_kernel),
-                          __builtin_return_address(0));
-}
-EXPORT_SYMBOL(dma_alloc_writecombine);
-
-static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
-                   void *cpu_addr, dma_addr_t dma_addr, size_t size)
+int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+                void *cpu_addr, dma_addr_t dma_addr, size_t size,
+                struct dma_attrs *attrs)
 {
        int ret = -ENXIO;
 #ifdef CONFIG_MMU
-       unsigned long user_size, kern_size;
-       struct arm_vmregion *c;
-
-       user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+       unsigned long pfn = dma_to_pfn(dev, dma_addr);
+       vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
 
-       c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
-       if (c) {
-               unsigned long off = vma->vm_pgoff;
+       if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+               return ret;
 
-               kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
-
-               if (off < kern_size &&
-                   user_size <= (kern_size - off)) {
-                       ret = remap_pfn_range(vma, vma->vm_start,
-                                             page_to_pfn(c->vm_pages) + off,
-                                             user_size << PAGE_SHIFT,
-                                             vma->vm_page_prot);
-               }
-       }
+       ret = remap_pfn_range(vma, vma->vm_start,
+                             pfn + vma->vm_pgoff,
+                             vma->vm_end - vma->vm_start,
+                             vma->vm_page_prot);
 #endif /* CONFIG_MMU */
 
        return ret;
 }
 
-int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
-                     void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
-       vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
-       return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-EXPORT_SYMBOL(dma_mmap_coherent);
-
-int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
-                         void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
-       vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-       return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-EXPORT_SYMBOL(dma_mmap_writecombine);
-
 /*
- * free a page as defined by the above mapping.
- * Must not be called with IRQs disabled.
+ * Free a buffer as defined by the above mapping.
  */
-void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
+void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
+                 dma_addr_t handle, struct dma_attrs *attrs)
 {
-       WARN_ON(irqs_disabled());
+       struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
 
        if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
                return;
 
        size = PAGE_ALIGN(size);
 
-       if (!arch_is_coherent())
+       if (arch_is_coherent() || nommu()) {
+               __dma_free_buffer(page, size);
+       } else if (cpu_architecture() < CPU_ARCH_ARMv6) {
                __dma_free_remap(cpu_addr, size);
-
-       __dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
-}
-EXPORT_SYMBOL(dma_free_coherent);
-
-/*
- * Make an area consistent for devices.
- * Note: Drivers should NOT use this function directly, as it will break
- * platforms with CONFIG_DMABOUNCE.
- * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
- */
-void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
-       enum dma_data_direction dir)
-{
-       unsigned long paddr;
-
-       BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
-
-       dmac_map_area(kaddr, size, dir);
-
-       paddr = __pa(kaddr);
-       if (dir == DMA_FROM_DEVICE) {
-               outer_inv_range(paddr, paddr + size);
+               __dma_free_buffer(page, size);
        } else {
-               outer_clean_range(paddr, paddr + size);
+               if (__free_from_pool(cpu_addr, size))
+                       return;
+               /*
+                * Non-atomic allocations cannot be freed with IRQs disabled
+                */
+               WARN_ON(irqs_disabled());
+               __free_from_contiguous(dev, page, size);
        }
-       /* FIXME: non-speculating: flush on bidirectional mappings? */
 }
-EXPORT_SYMBOL(___dma_single_cpu_to_dev);
-
-void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
-       enum dma_data_direction dir)
-{
-       BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
-
-       /* FIXME: non-speculating: not required */
-       /* don't bother invalidating if DMA to device */
-       if (dir != DMA_TO_DEVICE) {
-               unsigned long paddr = __pa(kaddr);
-               outer_inv_range(paddr, paddr + size);
-       }
-
-       dmac_unmap_area(kaddr, size, dir);
-}
-EXPORT_SYMBOL(___dma_single_dev_to_cpu);
 
 static void dma_cache_maint_page(struct page *page, unsigned long offset,
        size_t size, enum dma_data_direction dir,
@@ -543,7 +831,13 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
        } while (left);
 }
 
-void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
+/*
+ * Make an area consistent for devices.
+ * Note: Drivers should NOT use this function directly, as it will break
+ * platforms with CONFIG_DMABOUNCE.
+ * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
+ */
+static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
        size_t size, enum dma_data_direction dir)
 {
        unsigned long paddr;
@@ -558,9 +852,8 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
        }
        /* FIXME: non-speculating: flush on bidirectional mappings? */
 }
-EXPORT_SYMBOL(___dma_page_cpu_to_dev);
 
-void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
+static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
        size_t size, enum dma_data_direction dir)
 {
        unsigned long paddr = page_to_phys(page) + off;
@@ -578,10 +871,9 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
        if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
                set_bit(PG_dcache_clean, &page->flags);
 }
-EXPORT_SYMBOL(___dma_page_dev_to_cpu);
 
 /**
- * dma_map_sg - map a set of SG buffers for streaming mode DMA
+ * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @sg: list of buffers
  * @nents: number of buffers to map
@@ -596,32 +888,32 @@ EXPORT_SYMBOL(___dma_page_dev_to_cpu);
  * Device ownership issues as mentioned for dma_map_single are the same
  * here.
  */
-int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-               enum dma_data_direction dir)
+int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+               enum dma_data_direction dir, struct dma_attrs *attrs)
 {
+       struct dma_map_ops *ops = get_dma_ops(dev);
        struct scatterlist *s;
        int i, j;
 
-       BUG_ON(!valid_dma_direction(dir));
-
        for_each_sg(sg, s, nents, i) {
-               s->dma_address = __dma_map_page(dev, sg_page(s), s->offset,
-                                               s->length, dir);
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+               s->dma_length = s->length;
+#endif
+               s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
+                                               s->length, dir, attrs);
                if (dma_mapping_error(dev, s->dma_address))
                        goto bad_mapping;
        }
-       debug_dma_map_sg(dev, sg, nents, nents, dir);
        return nents;
 
  bad_mapping:
        for_each_sg(sg, s, i, j)
-               __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+               ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
        return 0;
 }
-EXPORT_SYMBOL(dma_map_sg);
 
 /**
- * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @sg: list of buffers
  * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
@@ -630,70 +922,55 @@ EXPORT_SYMBOL(dma_map_sg);
  * Unmap a set of streaming mode DMA translations.  Again, CPU access
  * rules concerning calls here are the same as for dma_unmap_single().
  */
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
-               enum dma_data_direction dir)
+void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+               enum dma_data_direction dir, struct dma_attrs *attrs)
 {
+       struct dma_map_ops *ops = get_dma_ops(dev);
        struct scatterlist *s;
-       int i;
 
-       debug_dma_unmap_sg(dev, sg, nents, dir);
+       int i;
 
        for_each_sg(sg, s, nents, i)
-               __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+               ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
 }
-EXPORT_SYMBOL(dma_unmap_sg);
 
 /**
- * dma_sync_sg_for_cpu
+ * arm_dma_sync_sg_for_cpu
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @sg: list of buffers
  * @nents: number of buffers to map (returned from dma_map_sg)
  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
  */
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
                        int nents, enum dma_data_direction dir)
 {
+       struct dma_map_ops *ops = get_dma_ops(dev);
        struct scatterlist *s;
        int i;
 
-       for_each_sg(sg, s, nents, i) {
-               if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
-                                           sg_dma_len(s), dir))
-                       continue;
-
-               __dma_page_dev_to_cpu(sg_page(s), s->offset,
-                                     s->length, dir);
-       }
-
-       debug_dma_sync_sg_for_cpu(dev, sg, nents, dir);
+       for_each_sg(sg, s, nents, i)
+               ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
+                                        dir);
 }
-EXPORT_SYMBOL(dma_sync_sg_for_cpu);
 
 /**
- * dma_sync_sg_for_device
+ * arm_dma_sync_sg_for_device
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @sg: list of buffers
  * @nents: number of buffers to map (returned from dma_map_sg)
  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
  */
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
                        int nents, enum dma_data_direction dir)
 {
+       struct dma_map_ops *ops = get_dma_ops(dev);
        struct scatterlist *s;
        int i;
 
-       for_each_sg(sg, s, nents, i) {
-               if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0,
-                                       sg_dma_len(s), dir))
-                       continue;
-
-               __dma_page_cpu_to_dev(sg_page(s), s->offset,
-                                     s->length, dir);
-       }
-
-       debug_dma_sync_sg_for_device(dev, sg, nents, dir);
+       for_each_sg(sg, s, nents, i)
+               ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
+                                           dir);
 }
-EXPORT_SYMBOL(dma_sync_sg_for_device);
 
 /*
  * Return whether the given device DMA address mask can be supported
@@ -709,18 +986,15 @@ int dma_supported(struct device *dev, u64 mask)
 }
 EXPORT_SYMBOL(dma_supported);
 
-int dma_set_mask(struct device *dev, u64 dma_mask)
+static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
 {
        if (!dev->dma_mask || !dma_supported(dev, dma_mask))
                return -EIO;
 
-#ifndef CONFIG_DMABOUNCE
        *dev->dma_mask = dma_mask;
-#endif
 
        return 0;
 }
-EXPORT_SYMBOL(dma_set_mask);
 
 #define PREALLOC_DMA_DEBUG_ENTRIES     4096
 
@@ -733,3 +1007,679 @@ static int __init dma_debug_do_init(void)
        return 0;
 }
 fs_initcall(dma_debug_do_init);
+
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+
+/* IOMMU */
+
+static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
+                                     size_t size)
+{
+       unsigned int order = get_order(size);
+       unsigned int align = 0;
+       unsigned int count, start;
+       unsigned long flags;
+
+       count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
+                (1 << mapping->order) - 1) >> mapping->order;
+
+       if (order > mapping->order)
+               align = (1 << (order - mapping->order)) - 1;
+
+       spin_lock_irqsave(&mapping->lock, flags);
+       start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
+                                          count, align);
+       if (start > mapping->bits) {
+               spin_unlock_irqrestore(&mapping->lock, flags);
+               return DMA_ERROR_CODE;
+       }
+
+       bitmap_set(mapping->bitmap, start, count);
+       spin_unlock_irqrestore(&mapping->lock, flags);
+
+       return mapping->base + (start << (mapping->order + PAGE_SHIFT));
+}
+
+static inline void __free_iova(struct dma_iommu_mapping *mapping,
+                              dma_addr_t addr, size_t size)
+{
+       unsigned int start = (addr - mapping->base) >>
+                            (mapping->order + PAGE_SHIFT);
+       unsigned int count = ((size >> PAGE_SHIFT) +
+                             (1 << mapping->order) - 1) >> mapping->order;
+       unsigned long flags;
+
+       spin_lock_irqsave(&mapping->lock, flags);
+       bitmap_clear(mapping->bitmap, start, count);
+       spin_unlock_irqrestore(&mapping->lock, flags);
+}
+
+static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
+{
+       struct page **pages;
+       int count = size >> PAGE_SHIFT;
+       int array_size = count * sizeof(struct page *);
+       int i = 0;
+
+       if (array_size <= PAGE_SIZE)
+               pages = kzalloc(array_size, gfp);
+       else
+               pages = vzalloc(array_size);
+       if (!pages)
+               return NULL;
+
+       while (count) {
+               int j, order = __ffs(count);
+
+               pages[i] = alloc_pages(gfp | __GFP_NOWARN, order);
+               while (!pages[i] && order)
+                       pages[i] = alloc_pages(gfp | __GFP_NOWARN, --order);
+               if (!pages[i])
+                       goto error;
+
+               if (order)
+                       split_page(pages[i], order);
+               j = 1 << order;
+               while (--j)
+                       pages[i + j] = pages[i] + j;
+
+               __dma_clear_buffer(pages[i], PAGE_SIZE << order);
+               i += 1 << order;
+               count -= 1 << order;
+       }
+
+       return pages;
+error:
+       while (--i)
+               if (pages[i])
+                       __free_pages(pages[i], 0);
+       if (array_size < PAGE_SIZE)
+               kfree(pages);
+       else
+               vfree(pages);
+       return NULL;
+}
+
+static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size)
+{
+       int count = size >> PAGE_SHIFT;
+       int array_size = count * sizeof(struct page *);
+       int i;
+       for (i = 0; i < count; i++)
+               if (pages[i])
+                       __free_pages(pages[i], 0);
+       if (array_size < PAGE_SIZE)
+               kfree(pages);
+       else
+               vfree(pages);
+       return 0;
+}
+
+/*
+ * Create a CPU mapping for a specified pages
+ */
+static void *
+__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot)
+{
+       struct arm_vmregion *c;
+       size_t align;
+       size_t count = size >> PAGE_SHIFT;
+       int bit;
+
+       if (!consistent_pte[0]) {
+               pr_err("%s: not initialised\n", __func__);
+               dump_stack();
+               return NULL;
+       }
+
+       /*
+        * Align the virtual region allocation - maximum alignment is
+        * a section size, minimum is a page size.  This helps reduce
+        * fragmentation of the DMA space, and also prevents allocations
+        * smaller than a section from crossing a section boundary.
+        */
+       bit = fls(size - 1);
+       if (bit > SECTION_SHIFT)
+               bit = SECTION_SHIFT;
+       align = 1 << bit;
+
+       /*
+        * Allocate a virtual address in the consistent mapping region.
+        */
+       c = arm_vmregion_alloc(&consistent_head, align, size,
+                           gfp & ~(__GFP_DMA | __GFP_HIGHMEM), NULL);
+       if (c) {
+               pte_t *pte;
+               int idx = CONSISTENT_PTE_INDEX(c->vm_start);
+               int i = 0;
+               u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
+
+               pte = consistent_pte[idx] + off;
+               c->priv = pages;
+
+               do {
+                       BUG_ON(!pte_none(*pte));
+
+                       set_pte_ext(pte, mk_pte(pages[i], prot), 0);
+                       pte++;
+                       off++;
+                       i++;
+                       if (off >= PTRS_PER_PTE) {
+                               off = 0;
+                               pte = consistent_pte[++idx];
+                       }
+               } while (i < count);
+
+               dsb();
+
+               return (void *)c->vm_start;
+       }
+       return NULL;
+}
+
+/*
+ * Create a mapping in device IO address space for specified pages
+ */
+static dma_addr_t
+__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
+{
+       struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+       unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       dma_addr_t dma_addr, iova;
+       int i, ret = DMA_ERROR_CODE;
+
+       dma_addr = __alloc_iova(mapping, size);
+       if (dma_addr == DMA_ERROR_CODE)
+               return dma_addr;
+
+       iova = dma_addr;
+       for (i = 0; i < count; ) {
+               unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
+               phys_addr_t phys = page_to_phys(pages[i]);
+               unsigned int len, j;
+
+               for (j = i + 1; j < count; j++, next_pfn++)
+                       if (page_to_pfn(pages[j]) != next_pfn)
+                               break;
+
+               len = (j - i) << PAGE_SHIFT;
+               ret = iommu_map(mapping->domain, iova, phys, len, 0);
+               if (ret < 0)
+                       goto fail;
+               iova += len;
+               i = j;
+       }
+       return dma_addr;
+fail:
+       iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
+       __free_iova(mapping, dma_addr, size);
+       return DMA_ERROR_CODE;
+}
+
+static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
+{
+       struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+       /*
+        * add optional in-page offset from iova to size and align
+        * result to page size
+        */
+       size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
+       iova &= PAGE_MASK;
+
+       iommu_unmap(mapping->domain, iova, size);
+       __free_iova(mapping, iova, size);
+       return 0;
+}
+
+static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
+           dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+{
+       pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
+       struct page **pages;
+       void *addr = NULL;
+
+       *handle = DMA_ERROR_CODE;
+       size = PAGE_ALIGN(size);
+
+       pages = __iommu_alloc_buffer(dev, size, gfp);
+       if (!pages)
+               return NULL;
+
+       *handle = __iommu_create_mapping(dev, pages, size);
+       if (*handle == DMA_ERROR_CODE)
+               goto err_buffer;
+
+       addr = __iommu_alloc_remap(pages, size, gfp, prot);
+       if (!addr)
+               goto err_mapping;
+
+       return addr;
+
+err_mapping:
+       __iommu_remove_mapping(dev, *handle, size);
+err_buffer:
+       __iommu_free_buffer(dev, pages, size);
+       return NULL;
+}
+
+static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+                   void *cpu_addr, dma_addr_t dma_addr, size_t size,
+                   struct dma_attrs *attrs)
+{
+       struct arm_vmregion *c;
+
+       vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+       c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
+
+       if (c) {
+               struct page **pages = c->priv;
+
+               unsigned long uaddr = vma->vm_start;
+               unsigned long usize = vma->vm_end - vma->vm_start;
+               int i = 0;
+
+               do {
+                       int ret;
+
+                       ret = vm_insert_page(vma, uaddr, pages[i++]);
+                       if (ret) {
+                               pr_err("Remapping memory, error: %d\n", ret);
+                               return ret;
+                       }
+
+                       uaddr += PAGE_SIZE;
+                       usize -= PAGE_SIZE;
+               } while (usize > 0);
+       }
+       return 0;
+}
+
+/*
+ * free a page as defined by the above mapping.
+ * Must not be called with IRQs disabled.
+ */
+void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+                         dma_addr_t handle, struct dma_attrs *attrs)
+{
+       struct arm_vmregion *c;
+       size = PAGE_ALIGN(size);
+
+       c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
+       if (c) {
+               struct page **pages = c->priv;
+               __dma_free_remap(cpu_addr, size);
+               __iommu_remove_mapping(dev, handle, size);
+               __iommu_free_buffer(dev, pages, size);
+       }
+}
+
+/*
+ * Map a part of the scatter-gather list into contiguous io address space
+ */
+static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
+                         size_t size, dma_addr_t *handle,
+                         enum dma_data_direction dir)
+{
+       struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+       dma_addr_t iova, iova_base;
+       int ret = 0;
+       unsigned int count;
+       struct scatterlist *s;
+
+       size = PAGE_ALIGN(size);
+       *handle = DMA_ERROR_CODE;
+
+       iova_base = iova = __alloc_iova(mapping, size);
+       if (iova == DMA_ERROR_CODE)
+               return -ENOMEM;
+
+       for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
+               phys_addr_t phys = page_to_phys(sg_page(s));
+               unsigned int len = PAGE_ALIGN(s->offset + s->length);
+
+               if (!arch_is_coherent())
+                       __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+
+               ret = iommu_map(mapping->domain, iova, phys, len, 0);
+               if (ret < 0)
+                       goto fail;
+               count += len >> PAGE_SHIFT;
+               iova += len;
+       }
+       *handle = iova_base;
+
+       return 0;
+fail:
+       iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
+       __free_iova(mapping, iova_base, size);
+       return ret;
+}
+
+/**
+ * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
+ * The scatter gather list elements are merged together (if possible) and
+ * tagged with the appropriate dma address and length. They are obtained via
+ * sg_dma_{address,length}.
+ */
+int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+                    enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+       struct scatterlist *s = sg, *dma = sg, *start = sg;
+       int i, count = 0;
+       unsigned int offset = s->offset;
+       unsigned int size = s->offset + s->length;
+       unsigned int max = dma_get_max_seg_size(dev);
+
+       for (i = 1; i < nents; i++) {
+               s = sg_next(s);
+
+               s->dma_address = DMA_ERROR_CODE;
+               s->dma_length = 0;
+
+               if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
+                       if (__map_sg_chunk(dev, start, size, &dma->dma_address,
+                           dir) < 0)
+                               goto bad_mapping;
+
+                       dma->dma_address += offset;
+                       dma->dma_length = size - offset;
+
+                       size = offset = s->offset;
+                       start = s;
+                       dma = sg_next(dma);
+                       count += 1;
+               }
+               size += s->length;
+       }
+       if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir) < 0)
+               goto bad_mapping;
+
+       dma->dma_address += offset;
+       dma->dma_length = size - offset;
+
+       return count+1;
+
+bad_mapping:
+       for_each_sg(sg, s, count, i)
+               __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
+       return 0;
+}
+
+/**
+ * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ *
+ * Unmap a set of streaming mode DMA translations.  Again, CPU access
+ * rules concerning calls here are the same as for dma_unmap_single().
+ */
+void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+                       enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+       struct scatterlist *s;
+       int i;
+
+       for_each_sg(sg, s, nents, i) {
+               if (sg_dma_len(s))
+                       __iommu_remove_mapping(dev, sg_dma_address(s),
+                                              sg_dma_len(s));
+               if (!arch_is_coherent())
+                       __dma_page_dev_to_cpu(sg_page(s), s->offset,
+                                             s->length, dir);
+       }
+}
+
+/**
+ * arm_iommu_sync_sg_for_cpu
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+                       int nents, enum dma_data_direction dir)
+{
+       struct scatterlist *s;
+       int i;
+
+       for_each_sg(sg, s, nents, i)
+               if (!arch_is_coherent())
+                       __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
+
+}
+
+/**
+ * arm_iommu_sync_sg_for_device
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+                       int nents, enum dma_data_direction dir)
+{
+       struct scatterlist *s;
+       int i;
+
+       for_each_sg(sg, s, nents, i)
+               if (!arch_is_coherent())
+                       __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+}
+
+
+/**
+ * arm_iommu_map_page
+ * @dev: valid struct device pointer
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * IOMMU aware version of arm_dma_map_page()
+ */
+static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
+            unsigned long offset, size_t size, enum dma_data_direction dir,
+            struct dma_attrs *attrs)
+{
+       struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+       dma_addr_t dma_addr;
+       int ret, len = PAGE_ALIGN(size + offset);
+
+       if (!arch_is_coherent())
+               __dma_page_cpu_to_dev(page, offset, size, dir);
+
+       dma_addr = __alloc_iova(mapping, len);
+       if (dma_addr == DMA_ERROR_CODE)
+               return dma_addr;
+
+       ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0);
+       if (ret < 0)
+               goto fail;
+
+       return dma_addr + offset;
+fail:
+       __free_iova(mapping, dma_addr, len);
+       return DMA_ERROR_CODE;
+}
+
+/**
+ * arm_iommu_unmap_page
+ * @dev: valid struct device pointer
+ * @handle: DMA address of buffer
+ * @size: size of buffer (same as passed to dma_map_page)
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
+ *
+ * IOMMU aware version of arm_dma_unmap_page()
+ */
+static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
+               size_t size, enum dma_data_direction dir,
+               struct dma_attrs *attrs)
+{
+       struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+       dma_addr_t iova = handle & PAGE_MASK;
+       struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+       int offset = handle & ~PAGE_MASK;
+       int len = PAGE_ALIGN(size + offset);
+
+       if (!iova)
+               return;
+
+       if (!arch_is_coherent())
+               __dma_page_dev_to_cpu(page, offset, size, dir);
+
+       iommu_unmap(mapping->domain, iova, len);
+       __free_iova(mapping, iova, len);
+}
+
+static void arm_iommu_sync_single_for_cpu(struct device *dev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+       dma_addr_t iova = handle & PAGE_MASK;
+       struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+       unsigned int offset = handle & ~PAGE_MASK;
+
+       if (!iova)
+               return;
+
+       if (!arch_is_coherent())
+               __dma_page_dev_to_cpu(page, offset, size, dir);
+}
+
+static void arm_iommu_sync_single_for_device(struct device *dev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+       dma_addr_t iova = handle & PAGE_MASK;
+       struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+       unsigned int offset = handle & ~PAGE_MASK;
+
+       if (!iova)
+               return;
+
+       __dma_page_cpu_to_dev(page, offset, size, dir);
+}
+
+struct dma_map_ops iommu_ops = {
+       .alloc          = arm_iommu_alloc_attrs,
+       .free           = arm_iommu_free_attrs,
+       .mmap           = arm_iommu_mmap_attrs,
+
+       .map_page               = arm_iommu_map_page,
+       .unmap_page             = arm_iommu_unmap_page,
+       .sync_single_for_cpu    = arm_iommu_sync_single_for_cpu,
+       .sync_single_for_device = arm_iommu_sync_single_for_device,
+
+       .map_sg                 = arm_iommu_map_sg,
+       .unmap_sg               = arm_iommu_unmap_sg,
+       .sync_sg_for_cpu        = arm_iommu_sync_sg_for_cpu,
+       .sync_sg_for_device     = arm_iommu_sync_sg_for_device,
+};
+
+/**
+ * arm_iommu_create_mapping
+ * @bus: pointer to the bus holding the client device (for IOMMU calls)
+ * @base: start address of the valid IO address space
+ * @size: size of the valid IO address space
+ * @order: accuracy of the IO addresses allocations
+ *
+ * Creates a mapping structure which holds information about used/unused
+ * IO address ranges, which is required to perform memory allocation and
+ * mapping with IOMMU aware functions.
+ *
+ * The client device need to be attached to the mapping with
+ * arm_iommu_attach_device function.
+ */
+struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
+                        int order)
+{
+       unsigned int count = size >> (PAGE_SHIFT + order);
+       unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
+       struct dma_iommu_mapping *mapping;
+       int err = -ENOMEM;
+
+       if (!count)
+               return ERR_PTR(-EINVAL);
+
+       mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
+       if (!mapping)
+               goto err;
+
+       mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+       if (!mapping->bitmap)
+               goto err2;
+
+       mapping->base = base;
+       mapping->bits = BITS_PER_BYTE * bitmap_size;
+       mapping->order = order;
+       spin_lock_init(&mapping->lock);
+
+       mapping->domain = iommu_domain_alloc(bus);
+       if (!mapping->domain)
+               goto err3;
+
+       kref_init(&mapping->kref);
+       return mapping;
+err3:
+       kfree(mapping->bitmap);
+err2:
+       kfree(mapping);
+err:
+       return ERR_PTR(err);
+}
+
+static void release_iommu_mapping(struct kref *kref)
+{
+       struct dma_iommu_mapping *mapping =
+               container_of(kref, struct dma_iommu_mapping, kref);
+
+       iommu_domain_free(mapping->domain);
+       kfree(mapping->bitmap);
+       kfree(mapping);
+}
+
+void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
+{
+       if (mapping)
+               kref_put(&mapping->kref, release_iommu_mapping);
+}
+
+/**
+ * arm_iommu_attach_device
+ * @dev: valid struct device pointer
+ * @mapping: io address space mapping structure (returned from
+ *     arm_iommu_create_mapping)
+ *
+ * Attaches specified io address space mapping to the provided device,
+ * this replaces the dma operations (dma_map_ops pointer) with the
+ * IOMMU aware version. More than one client might be attached to
+ * the same io address space mapping.
+ */
+int arm_iommu_attach_device(struct device *dev,
+                           struct dma_iommu_mapping *mapping)
+{
+       int err;
+
+       err = iommu_attach_device(mapping->domain, dev);
+       if (err)
+               return err;
+
+       kref_get(&mapping->kref);
+       dev->archdata.mapping = mapping;
+       set_dma_ops(dev, &iommu_ops);
+
+       pr_info("Attached IOMMU controller to %s device.\n", dev_name(dev));
+       return 0;
+}
+
+#endif
index 8f5813bbffb560b15b44974ff3543f0b5457e026..c21d06c7dd7ec0ae4bdf036b22b74983aa2e5c6f 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/highmem.h>
 #include <linux/gfp.h>
 #include <linux/memblock.h>
+#include <linux/dma-contiguous.h>
 
 #include <asm/mach-types.h>
 #include <asm/memblock.h>
@@ -226,6 +227,17 @@ static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
 }
 #endif
 
+void __init setup_dma_zone(struct machine_desc *mdesc)
+{
+#ifdef CONFIG_ZONE_DMA
+       if (mdesc->dma_zone_size) {
+               arm_dma_zone_size = mdesc->dma_zone_size;
+               arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
+       } else
+               arm_dma_limit = 0xffffffff;
+#endif
+}
+
 static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
        unsigned long max_high)
 {
@@ -273,12 +285,9 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
         * Adjust the sizes according to any special requirements for
         * this machine type.
         */
-       if (arm_dma_zone_size) {
+       if (arm_dma_zone_size)
                arm_adjust_dma_zone(zone_size, zhole_size,
                        arm_dma_zone_size >> PAGE_SHIFT);
-               arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
-       } else
-               arm_dma_limit = 0xffffffff;
 #endif
 
        free_area_init_node(0, zone_size, min, zhole_size);
@@ -364,6 +373,12 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
        if (mdesc->reserve)
                mdesc->reserve();
 
+       /*
+        * reserve memory for DMA contigouos allocations,
+        * must come from DMA area inside low memory
+        */
+       dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit));
+
        arm_memblock_steal_permitted = false;
        memblock_allow_resize();
        memblock_dump_all();
index 27f4a619b35d1c0039d7883c7804994c0540cdc6..93dc0c17cdcbddf0f5294be8ea6c022aa693ca1f 100644 (file)
@@ -67,5 +67,8 @@ extern u32 arm_dma_limit;
 #define arm_dma_limit ((u32)~0)
 #endif
 
+extern phys_addr_t arm_lowmem_limit;
+
 void __init bootmem_init(void);
 void arm_mm_memblock_reserve(void);
+void dma_contiguous_remap(void);
index aa78de8bfdd3b86e6b34d7cbecd2de3393e50ba5..e5dad60b558b468315294b2c8b95c70193b6f74d 100644 (file)
@@ -288,6 +288,11 @@ static struct mem_type mem_types[] = {
                                PMD_SECT_UNCACHED | PMD_SECT_XN,
                .domain    = DOMAIN_KERNEL,
        },
+       [MT_MEMORY_DMA_READY] = {
+               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
+               .prot_l1   = PMD_TYPE_TABLE,
+               .domain    = DOMAIN_KERNEL,
+       },
 };
 
 const struct mem_type *get_mem_type(unsigned int type)
@@ -429,6 +434,7 @@ static void __init build_mem_type_table(void)
        if (arch_is_coherent() && cpu_is_xsc3()) {
                mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
                mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+               mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
                mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
                mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
        }
@@ -460,6 +466,7 @@ static void __init build_mem_type_table(void)
                        mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
                        mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
                        mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+                       mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
                        mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
                        mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
                }
@@ -512,6 +519,7 @@ static void __init build_mem_type_table(void)
        mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
        mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
        mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
+       mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
        mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
        mem_types[MT_ROM].prot_sect |= cp->pmd;
 
@@ -596,7 +604,7 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr,
         * L1 entries, whereas PGDs refer to a group of L1 entries making
         * up one logical pointer to an L2 table.
         */
-       if (((addr | end | phys) & ~SECTION_MASK) == 0) {
+       if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
                pmd_t *p = pmd;
 
 #ifndef CONFIG_ARM_LPAE
@@ -814,7 +822,7 @@ static int __init early_vmalloc(char *arg)
 }
 early_param("vmalloc", early_vmalloc);
 
-static phys_addr_t lowmem_limit __initdata = 0;
+phys_addr_t arm_lowmem_limit __initdata = 0;
 
 void __init sanity_check_meminfo(void)
 {
@@ -897,8 +905,8 @@ void __init sanity_check_meminfo(void)
                        bank->size = newsize;
                }
 #endif
-               if (!bank->highmem && bank->start + bank->size > lowmem_limit)
-                       lowmem_limit = bank->start + bank->size;
+               if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
+                       arm_lowmem_limit = bank->start + bank->size;
 
                j++;
        }
@@ -923,8 +931,8 @@ void __init sanity_check_meminfo(void)
        }
 #endif
        meminfo.nr_banks = j;
-       high_memory = __va(lowmem_limit - 1) + 1;
-       memblock_set_current_limit(lowmem_limit);
+       high_memory = __va(arm_lowmem_limit - 1) + 1;
+       memblock_set_current_limit(arm_lowmem_limit);
 }
 
 static inline void prepare_page_table(void)
@@ -949,8 +957,8 @@ static inline void prepare_page_table(void)
         * Find the end of the first block of lowmem.
         */
        end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
-       if (end >= lowmem_limit)
-               end = lowmem_limit;
+       if (end >= arm_lowmem_limit)
+               end = arm_lowmem_limit;
 
        /*
         * Clear out all the kernel space mappings, except for the first
@@ -1093,8 +1101,8 @@ static void __init map_lowmem(void)
                phys_addr_t end = start + reg->size;
                struct map_desc map;
 
-               if (end > lowmem_limit)
-                       end = lowmem_limit;
+               if (end > arm_lowmem_limit)
+                       end = arm_lowmem_limit;
                if (start >= end)
                        break;
 
@@ -1115,11 +1123,12 @@ void __init paging_init(struct machine_desc *mdesc)
 {
        void *zero_page;
 
-       memblock_set_current_limit(lowmem_limit);
+       memblock_set_current_limit(arm_lowmem_limit);
 
        build_mem_type_table();
        prepare_page_table();
        map_lowmem();
+       dma_contiguous_remap();
        devicemaps_init(mdesc);
        kmap_init();
 
index 162be662c0888c87b492bd2e13a8b51d1e2288e7..bf312c354a214761646a3a3738fb6080b84fff33 100644 (file)
@@ -17,7 +17,7 @@ struct arm_vmregion {
        struct list_head        vm_list;
        unsigned long           vm_start;
        unsigned long           vm_end;
-       struct page             *vm_pages;
+       void                    *priv;
        int                     vm_active;
        const void              *caller;
 };
index 2ed3ab173addcae894a1c52190e7cc9419dddc90..5079787273d27a4ca0b54cd63bcf62008120847a 100644 (file)
@@ -41,6 +41,7 @@
 #include <mach/clock.h>
 #include <mach/hardware.h>
 
+#ifndef CONFIG_COMMON_CLK
 static LIST_HEAD(clocks);
 static DEFINE_MUTEX(clocks_mutex);
 
@@ -200,6 +201,16 @@ struct clk *clk_get_parent(struct clk *clk)
 }
 EXPORT_SYMBOL(clk_get_parent);
 
+#else
+
+/*
+ * Lock to protect the clock module (ccm) registers. Used
+ * on all i.MXs
+ */
+DEFINE_SPINLOCK(imx_ccm_lock);
+
+#endif /* CONFIG_COMMON_CLK */
+
 /*
  * Get the resulting clock rate from a PLL register value and the input
  * frequency. PLLs with this register layout can at least be found on
index 753a5988d85c16e9c28181f632462d9e47e60c74..bd940c795cbbff0c3a1785247e43aac1a43e7acf 100644 (file)
@@ -23,6 +23,7 @@
 #ifndef __ASSEMBLY__
 #include <linux/list.h>
 
+#ifndef CONFIG_COMMON_CLK
 struct module;
 
 struct clk {
@@ -59,6 +60,9 @@ struct clk {
 
 int clk_register(struct clk *clk);
 void clk_unregister(struct clk *clk);
+#endif /* CONFIG_COMMON_CLK */
+
+extern spinlock_t imx_ccm_lock;
 
 unsigned long mxc_decode_pll(unsigned int pll, u32 f_ref);
 
index 0319c4a0cafaffc0869e2b74d632340ce2a05867..cf663d84e7c1d1397291e4667c526087c3aaca56 100644 (file)
@@ -53,6 +53,7 @@ extern void imx35_soc_init(void);
 extern void imx50_soc_init(void);
 extern void imx51_soc_init(void);
 extern void imx53_soc_init(void);
+extern void imx51_init_late(void);
 extern void epit_timer_init(struct clk *timer_clk, void __iomem *base, int irq);
 extern void mxc_timer_init(struct clk *timer_clk, void __iomem *, int);
 extern int mx1_clocks_init(unsigned long fref);
@@ -149,4 +150,10 @@ extern void imx6q_pm_init(void);
 static inline void imx6q_pm_init(void) {}
 #endif
 
+#ifdef CONFIG_NEON
+extern int mx51_neon_fixup(void);
+#else
+static inline int mx51_neon_fixup(void) { return 0; }
+#endif
+
 #endif
index 8ddda365f1a0c3637b73e456312740dd3de13126..761e45f9456f1a8adfe3806b2b157dcc186e21ce 100644 (file)
@@ -24,6 +24,8 @@
 #define UART_PADDR     MX51_UART1_BASE_ADDR
 #elif defined (CONFIG_DEBUG_IMX50_IMX53_UART)
 #define UART_PADDR     MX53_UART1_BASE_ADDR
+#elif defined (CONFIG_DEBUG_IMX6Q_UART2)
+#define UART_PADDR     MX6Q_UART2_BASE_ADDR
 #elif defined (CONFIG_DEBUG_IMX6Q_UART4)
 #define UART_PADDR     MX6Q_UART4_BASE_ADDR
 #endif
index 254a561a2799aa2dee8b4a0d2461a80fb8199f2c..f7e7dbac8f4be64464d70aca1d621d7bef52c682 100644 (file)
@@ -27,6 +27,8 @@
 #define MX6Q_CCM_SIZE                  0x4000
 #define MX6Q_ANATOP_BASE_ADDR          0x020c8000
 #define MX6Q_ANATOP_SIZE               0x1000
+#define MX6Q_UART2_BASE_ADDR           0x021e8000
+#define MX6Q_UART2_SIZE                        0x4000
 #define MX6Q_UART4_BASE_ADDR           0x021f0000
 #define MX6Q_UART4_SIZE                        0x4000
 
index 7daf7c9a413bf62eb9a7ad86f659a0eadf016d44..99f958ca6cb8c34ac650896418d1dc8e1a4cd247 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/irq.h>
 #include <linux/clockchips.h>
 #include <linux/clk.h>
+#include <linux/err.h>
 
 #include <mach/hardware.h>
 #include <asm/sched_clock.h>
@@ -282,6 +283,19 @@ static int __init mxc_clockevent_init(struct clk *timer_clk)
 void __init mxc_timer_init(struct clk *timer_clk, void __iomem *base, int irq)
 {
        uint32_t tctl_val;
+       struct clk *timer_ipg_clk;
+
+       if (!timer_clk) {
+               timer_clk = clk_get_sys("imx-gpt.0", "per");
+               if (IS_ERR(timer_clk)) {
+                       pr_err("i.MX timer: unable to get clk\n");
+                       return;
+               }
+
+               timer_ipg_clk = clk_get_sys("imx-gpt.0", "ipg");
+               if (!IS_ERR(timer_ipg_clk))
+                       clk_prepare_enable(timer_ipg_clk);
+       }
 
        clk_prepare_enable(timer_clk);
 
index 44ae077dbc28ae965d1f220c87d5cd7a85446df4..2132c4f389e1a799db97a95c78ee4924d57caa16 100644 (file)
 
 #include <plat/clock.h>
 
+/* OMAP2_32KSYNCNT_CR_OFF: offset of 32ksync counter register */
+#define OMAP2_32KSYNCNT_CR_OFF         0x10
+
 /*
  * 32KHz clocksource ... always available, on pretty most chips except
  * OMAP 730 and 1510.  Other timers could be used as clocksources, with
  * higher resolution in free-running counter modes (e.g. 12 MHz xtal),
  * but systems won't necessarily want to spend resources that way.
  */
-static void __iomem *timer_32k_base;
-
-#define OMAP16XX_TIMER_32K_SYNCHRONIZED                0xfffbc410
+static void __iomem *sync32k_cnt_reg;
 
 static u32 notrace omap_32k_read_sched_clock(void)
 {
-       return timer_32k_base ? __raw_readl(timer_32k_base) : 0;
+       return sync32k_cnt_reg ? __raw_readl(sync32k_cnt_reg) : 0;
 }
 
 /**
@@ -60,7 +61,7 @@ static void omap_read_persistent_clock(struct timespec *ts)
        struct timespec *tsp = &persistent_ts;
 
        last_cycles = cycles;
-       cycles = timer_32k_base ? __raw_readl(timer_32k_base) : 0;
+       cycles = sync32k_cnt_reg ? __raw_readl(sync32k_cnt_reg) : 0;
        delta = cycles - last_cycles;
 
        nsecs = clocksource_cyc2ns(delta, persistent_mult, persistent_shift);
@@ -69,55 +70,41 @@ static void omap_read_persistent_clock(struct timespec *ts)
        *ts = *tsp;
 }
 
-int __init omap_init_clocksource_32k(void)
+/**
+ * omap_init_clocksource_32k - setup and register counter 32k as a
+ * kernel clocksource
+ * @pbase: base addr of counter_32k module
+ * @size: size of counter_32k to map
+ *
+ * Returns 0 upon success or negative error code upon failure.
+ *
+ */
+int __init omap_init_clocksource_32k(void __iomem *vbase)
 {
-       static char err[] __initdata = KERN_ERR
-                       "%s: can't register clocksource!\n";
-
-       if (cpu_is_omap16xx() || cpu_class_is_omap2()) {
-               u32 pbase;
-               unsigned long size = SZ_4K;
-               void __iomem *base;
-               struct clk *sync_32k_ick;
-
-               if (cpu_is_omap16xx()) {
-                       pbase = OMAP16XX_TIMER_32K_SYNCHRONIZED;
-                       size = SZ_1K;
-               } else if (cpu_is_omap2420())
-                       pbase = OMAP2420_32KSYNCT_BASE + 0x10;
-               else if (cpu_is_omap2430())
-                       pbase = OMAP2430_32KSYNCT_BASE + 0x10;
-               else if (cpu_is_omap34xx())
-                       pbase = OMAP3430_32KSYNCT_BASE + 0x10;
-               else if (cpu_is_omap44xx())
-                       pbase = OMAP4430_32KSYNCT_BASE + 0x10;
-               else
-                       return -ENODEV;
-
-               /* For this to work we must have a static mapping in io.c for this area */
-               base = ioremap(pbase, size);
-               if (!base)
-                       return -ENODEV;
-
-               sync_32k_ick = clk_get(NULL, "omap_32ksync_ick");
-               if (!IS_ERR(sync_32k_ick))
-                       clk_enable(sync_32k_ick);
-
-               timer_32k_base = base;
-
-               /*
-                * 120000 rough estimate from the calculations in
-                * __clocksource_updatefreq_scale.
-                */
-               clocks_calc_mult_shift(&persistent_mult, &persistent_shift,
-                               32768, NSEC_PER_SEC, 120000);
-
-               if (clocksource_mmio_init(base, "32k_counter", 32768, 250, 32,
-                                         clocksource_mmio_readl_up))
-                       printk(err, "32k_counter");
-
-               setup_sched_clock(omap_32k_read_sched_clock, 32, 32768);
-               register_persistent_clock(NULL, omap_read_persistent_clock);
+       int ret;
+
+       /*
+        * 32k sync Counter register offset is at 0x10
+        */
+       sync32k_cnt_reg = vbase + OMAP2_32KSYNCNT_CR_OFF;
+
+       /*
+        * 120000 rough estimate from the calculations in
+        * __clocksource_updatefreq_scale.
+        */
+       clocks_calc_mult_shift(&persistent_mult, &persistent_shift,
+                       32768, NSEC_PER_SEC, 120000);
+
+       ret = clocksource_mmio_init(sync32k_cnt_reg, "32k_counter", 32768,
+                               250, 32, clocksource_mmio_readl_up);
+       if (ret) {
+               pr_err("32k_counter: can't register clocksource\n");
+               return ret;
        }
+
+       setup_sched_clock(omap_32k_read_sched_clock, 32, 32768);
+       register_persistent_clock(NULL, omap_read_persistent_clock);
+       pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n");
+
        return 0;
 }
index 09b07d25289273223d20287b3a4ca35099eec8f2..1cba9273d2cb4ff68832937b2e450a140020e6ca 100644 (file)
 #include <plat/menelaus.h>
 #include <plat/omap44xx.h>
 
-#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) || \
-       defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE)
-
-#define OMAP_MMC_NR_RES                2
-
-/*
- * Register MMC devices. Called from mach-omap1 and mach-omap2 device init.
- */
-int __init omap_mmc_add(const char *name, int id, unsigned long base,
-                               unsigned long size, unsigned int irq,
-                               struct omap_mmc_platform_data *data)
-{
-       struct platform_device *pdev;
-       struct resource res[OMAP_MMC_NR_RES];
-       int ret;
-
-       pdev = platform_device_alloc(name, id);
-       if (!pdev)
-               return -ENOMEM;
-
-       memset(res, 0, OMAP_MMC_NR_RES * sizeof(struct resource));
-       res[0].start = base;
-       res[0].end = base + size - 1;
-       res[0].flags = IORESOURCE_MEM;
-       res[1].start = res[1].end = irq;
-       res[1].flags = IORESOURCE_IRQ;
-
-       ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
-       if (ret == 0)
-               ret = platform_device_add_data(pdev, data, sizeof(*data));
-       if (ret)
-               goto fail;
-
-       ret = platform_device_add(pdev);
-       if (ret)
-               goto fail;
-
-       /* return device handle to board setup code */
-       data->dev = &pdev->dev;
-       return 0;
-
-fail:
-       platform_device_put(pdev);
-       return ret;
-}
-
-#endif
-
 /*-------------------------------------------------------------------------*/
 
 #if defined(CONFIG_HW_RANDOM_OMAP) || defined(CONFIG_HW_RANDOM_OMAP_MODULE)
@@ -109,79 +61,6 @@ static void omap_init_rng(void)
 static inline void omap_init_rng(void) {}
 #endif
 
-/*-------------------------------------------------------------------------*/
-
-/* Numbering for the SPI-capable controllers when used for SPI:
- * spi         = 1
- * uwire       = 2
- * mmc1..2     = 3..4
- * mcbsp1..3   = 5..7
- */
-
-#if defined(CONFIG_SPI_OMAP_UWIRE) || defined(CONFIG_SPI_OMAP_UWIRE_MODULE)
-
-#define        OMAP_UWIRE_BASE         0xfffb3000
-
-static struct resource uwire_resources[] = {
-       {
-               .start          = OMAP_UWIRE_BASE,
-               .end            = OMAP_UWIRE_BASE + 0x20,
-               .flags          = IORESOURCE_MEM,
-       },
-};
-
-static struct platform_device omap_uwire_device = {
-       .name      = "omap_uwire",
-       .id          = -1,
-       .num_resources  = ARRAY_SIZE(uwire_resources),
-       .resource       = uwire_resources,
-};
-
-static void omap_init_uwire(void)
-{
-       /* FIXME define and use a boot tag; not all boards will be hooking
-        * up devices to the microwire controller, and multi-board configs
-        * mean that CONFIG_SPI_OMAP_UWIRE may be configured anyway...
-        */
-
-       /* board-specific code must configure chipselects (only a few
-        * are normally used) and SCLK/SDI/SDO (each has two choices).
-        */
-       (void) platform_device_register(&omap_uwire_device);
-}
-#else
-static inline void omap_init_uwire(void) {}
-#endif
-
-#if defined(CONFIG_TIDSPBRIDGE) || defined(CONFIG_TIDSPBRIDGE_MODULE)
-
-static phys_addr_t omap_dsp_phys_mempool_base;
-
-void __init omap_dsp_reserve_sdram_memblock(void)
-{
-       phys_addr_t size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE;
-       phys_addr_t paddr;
-
-       if (!size)
-               return;
-
-       paddr = arm_memblock_steal(size, SZ_1M);
-       if (!paddr) {
-               pr_err("%s: failed to reserve %llx bytes\n",
-                               __func__, (unsigned long long)size);
-               return;
-       }
-
-       omap_dsp_phys_mempool_base = paddr;
-}
-
-phys_addr_t omap_dsp_get_mempool_base(void)
-{
-       return omap_dsp_phys_mempool_base;
-}
-EXPORT_SYMBOL(omap_dsp_get_mempool_base);
-#endif
-
 /*
  * This gets called after board-specific INIT_MACHINE, and initializes most
  * on-chip peripherals accessible on this board (except for few like USB):
@@ -208,7 +87,6 @@ static int __init omap_init_devices(void)
         * in alphabetical order so they're easier to sort through.
         */
        omap_init_rng();
-       omap_init_uwire();
        return 0;
 }
 arch_initcall(omap_init_devices);
index 987e6101267df6d76eca0623115e61a05916933c..cb16ade437cb6d94fc608c4e2060dfa512514e58 100644 (file)
@@ -852,7 +852,7 @@ omap_dma_set_prio_lch(int lch, unsigned char read_prio,
        }
        l = p->dma_read(CCR, lch);
        l &= ~((1 << 6) | (1 << 26));
-       if (cpu_is_omap2430() || cpu_is_omap34xx() ||  cpu_is_omap44xx())
+       if (cpu_class_is_omap2() && !cpu_is_omap242x())
                l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
        else
                l |= ((read_prio & 0x1) << 6);
@@ -2080,7 +2080,7 @@ static int __devinit omap_system_dma_probe(struct platform_device *pdev)
                }
        }
 
-       if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx())
+       if (cpu_class_is_omap2() && !cpu_is_omap242x())
                omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE,
                                DMA_DEFAULT_FIFO_DEPTH, 0);
 
index c4ed35e89fbde45ee252263b47846d70c11d1f7a..3b0cfeb33d05a21c4b58ab477a2d02dd8f481024 100644 (file)
@@ -82,8 +82,6 @@ static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, u32 reg,
 
 static void omap_timer_restore_context(struct omap_dm_timer *timer)
 {
-       __raw_writel(timer->context.tiocp_cfg,
-                       timer->io_base + OMAP_TIMER_OCP_CFG_OFFSET);
        if (timer->revision == 1)
                __raw_writel(timer->context.tistat, timer->sys_stat);
 
index a557b8484e6cfada3d20ccd65ded958b8df3d7a7..d1cb6f527b7e8195afc43e797567bb3adcce41a3 100644 (file)
@@ -30,7 +30,7 @@
 #include <plat/i2c.h>
 #include <plat/omap_hwmod.h>
 
-extern int __init omap_init_clocksource_32k(void);
+extern int __init omap_init_clocksource_32k(void __iomem *vbase);
 
 extern void __init omap_check_revision(void);
 
index 4bdf14ec6747777ead47438745cea6b83769f327..297245dba66e4c286a73f08b90e39aa062739155 100644 (file)
@@ -121,6 +121,7 @@ IS_OMAP_CLASS(16xx, 0x16)
 IS_OMAP_CLASS(24xx, 0x24)
 IS_OMAP_CLASS(34xx, 0x34)
 IS_OMAP_CLASS(44xx, 0x44)
+IS_AM_CLASS(35xx, 0x35)
 IS_AM_CLASS(33xx, 0x33)
 
 IS_TI_CLASS(81xx, 0x81)
@@ -148,6 +149,7 @@ IS_AM_SUBCLASS(335x, 0x335)
 #define cpu_is_ti81xx()                        0
 #define cpu_is_ti816x()                        0
 #define cpu_is_ti814x()                        0
+#define soc_is_am35xx()                        0
 #define cpu_is_am33xx()                        0
 #define cpu_is_am335x()                        0
 #define cpu_is_omap44xx()              0
@@ -357,6 +359,7 @@ IS_OMAP_TYPE(3517, 0x3517)
 # undef cpu_is_ti81xx
 # undef cpu_is_ti816x
 # undef cpu_is_ti814x
+# undef soc_is_am35xx
 # undef cpu_is_am33xx
 # undef cpu_is_am335x
 # define cpu_is_omap3430()             is_omap3430()
@@ -378,6 +381,7 @@ IS_OMAP_TYPE(3517, 0x3517)
 # define cpu_is_ti81xx()               is_ti81xx()
 # define cpu_is_ti816x()               is_ti816x()
 # define cpu_is_ti814x()               is_ti814x()
+# define soc_is_am35xx()               is_am35xx()
 # define cpu_is_am33xx()               is_am33xx()
 # define cpu_is_am335x()               is_am335x()
 #endif
@@ -433,6 +437,10 @@ IS_OMAP_TYPE(3517, 0x3517)
 #define TI8148_REV_ES2_0       (TI814X_CLASS | (0x1 << 8))
 #define TI8148_REV_ES2_1       (TI814X_CLASS | (0x2 << 8))
 
+#define AM35XX_CLASS           0x35170034
+#define AM35XX_REV_ES1_0       AM35XX_CLASS
+#define AM35XX_REV_ES1_1       (AM35XX_CLASS | (0x1 << 8))
+
 #define AM335X_CLASS           0x33500034
 #define AM335X_REV_ES1_0       AM335X_CLASS
 
index 42afb4c45517f54d9b54c088b1e76e2b218e01ee..c5811d4409b0438ab5c37c3338b6822d88a1228c 100644 (file)
 #define CLEAR_CSR_ON_READ              BIT(0xC)
 #define IS_WORD_16                     BIT(0xD)
 
+/* Defines for DMA Capabilities */
+#define DMA_HAS_TRANSPARENT_CAPS       (0x1 << 18)
+#define DMA_HAS_CONSTANT_FILL_CAPS     (0x1 << 19)
+#define DMA_HAS_DESCRIPTOR_CAPS                (0x3 << 20)
+
 enum omap_reg_offsets {
 
 GCR,           GSCR,           GRST1,          HW_ID,
index bdf871a84d62c67f8ec7feb326d4c27b991a8a49..5da73562e4867ce732a9ca9b3aeb2db33eebee45 100644 (file)
@@ -75,7 +75,6 @@ struct clk;
 
 struct timer_regs {
        u32 tidr;
-       u32 tiocp_cfg;
        u32 tistat;
        u32 tisr;
        u32 tier;
index 1527929b445a44a75ee8433fb26afce169048ae1..f37764a36072e4b5cbdd632e0cc6369f9de5e6fc 100644 (file)
@@ -92,6 +92,8 @@ enum omap_ecc {
        OMAP_ECC_HAMMING_CODE_HW, /* gpmc to detect the error */
                /* 1-bit ecc: stored at beginning of spare area as romcode */
        OMAP_ECC_HAMMING_CODE_HW_ROMCODE, /* gpmc method & romcode layout */
+       OMAP_ECC_BCH4_CODE_HW, /* 4-bit BCH ecc code */
+       OMAP_ECC_BCH8_CODE_HW, /* 8-bit BCH ecc code */
 };
 
 /*
@@ -157,4 +159,13 @@ extern int gpmc_nand_write(int cs, int cmd, int wval);
 
 int gpmc_enable_hwecc(int cs, int mode, int dev_width, int ecc_size);
 int gpmc_calculate_ecc(int cs, const u_char *dat, u_char *ecc_code);
+
+#ifdef CONFIG_ARCH_OMAP3
+int gpmc_init_hwecc_bch(int cs, int nsectors, int nerrors);
+int gpmc_enable_hwecc_bch(int cs, int mode, int dev_width, int nsectors,
+                         int nerrors);
+int gpmc_calculate_ecc_bch4(int cs, const u_char *dat, u_char *ecc);
+int gpmc_calculate_ecc_bch8(int cs, const u_char *dat, u_char *ecc);
+#endif /* CONFIG_ARCH_OMAP3 */
+
 #endif
index 3e7ae0f0215feeeb130d292bd1c5c828ab2729af..a7754a886d428af5599c00c4fefdcb4d2d058d12 100644 (file)
@@ -177,9 +177,6 @@ extern void omap_mmc_notify_cover_event(struct device *dev, int slot,
 void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
                                int nr_controllers);
 void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data);
-int omap_mmc_add(const char *name, int id, unsigned long base,
-                               unsigned long size, unsigned int irq,
-                               struct omap_mmc_platform_data *data);
 #else
 static inline void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
                                int nr_controllers)
@@ -188,12 +185,6 @@ static inline void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
 static inline void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data)
 {
 }
-static inline int omap_mmc_add(const char *name, int id, unsigned long base,
-                               unsigned long size, unsigned int irq,
-                               struct omap_mmc_platform_data *data)
-{
-       return 0;
-}
 
 #endif
 
index 74daf5ed1432726ca7fc7d69c3fddd174d310ae6..61fd837624a8532dc38e13a078d1d83941afc1d8 100644 (file)
 #include <linux/dma-mapping.h>
 #include <linux/serial_8250.h>
 #include <linux/ata_platform.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
 #include <linux/mv643xx_eth.h>
 #include <linux/mv643xx_i2c.h>
 #include <net/dsa.h>
-#include <linux/spi/orion_spi.h>
-#include <plat/orion_wdt.h>
 #include <plat/mv_xor.h>
 #include <plat/ehci-orion.h>
 #include <mach/bridge-regs.h>
 
+/* Create a clkdev entry for a given device/clk */
+void __init orion_clkdev_add(const char *con_id, const char *dev_id,
+                            struct clk *clk)
+{
+       struct clk_lookup *cl;
+
+       cl = clkdev_alloc(clk, con_id, dev_id);
+       if (cl)
+               clkdev_add(cl);
+}
+
+/* Create clkdev entries for all orion platforms except kirkwood.
+   Kirkwood has gated clocks for some of its peripherals, so creates
+   its own clkdev entries. For all the other orion devices, create
+   clkdev entries to the tclk. */
+void __init orion_clkdev_init(struct clk *tclk)
+{
+       orion_clkdev_add(NULL, "orion_spi.0", tclk);
+       orion_clkdev_add(NULL, "orion_spi.1", tclk);
+       orion_clkdev_add(NULL, MV643XX_ETH_NAME ".0", tclk);
+       orion_clkdev_add(NULL, MV643XX_ETH_NAME ".1", tclk);
+       orion_clkdev_add(NULL, MV643XX_ETH_NAME ".2", tclk);
+       orion_clkdev_add(NULL, MV643XX_ETH_NAME ".3", tclk);
+       orion_clkdev_add(NULL, "orion_wdt", tclk);
+}
+
 /* Fill in the resources structure and link it into the platform
    device structure. There is always a memory region, and nearly
    always an interrupt.*/
@@ -49,6 +75,12 @@ static void fill_resources(struct platform_device *device,
 /*****************************************************************************
  * UART
  ****************************************************************************/
+static unsigned long __init uart_get_clk_rate(struct clk *clk)
+{
+       clk_prepare_enable(clk);
+       return clk_get_rate(clk);
+}
+
 static void __init uart_complete(
        struct platform_device *orion_uart,
        struct plat_serial8250_port *data,
@@ -56,12 +88,12 @@ static void __init uart_complete(
        unsigned int membase,
        resource_size_t mapbase,
        unsigned int irq,
-       unsigned int uartclk)
+       struct clk *clk)
 {
        data->mapbase = mapbase;
        data->membase = (void __iomem *)membase;
        data->irq = irq;
-       data->uartclk = uartclk;
+       data->uartclk = uart_get_clk_rate(clk);
        orion_uart->dev.platform_data = data;
 
        fill_resources(orion_uart, resources, mapbase, 0xff, irq);
@@ -90,10 +122,10 @@ static struct platform_device orion_uart0 = {
 void __init orion_uart0_init(unsigned int membase,
                             resource_size_t mapbase,
                             unsigned int irq,
-                            unsigned int uartclk)
+                            struct clk *clk)
 {
        uart_complete(&orion_uart0, orion_uart0_data, orion_uart0_resources,
-                     membase, mapbase, irq, uartclk);
+                     membase, mapbase, irq, clk);
 }
 
 /*****************************************************************************
@@ -118,10 +150,10 @@ static struct platform_device orion_uart1 = {
 void __init orion_uart1_init(unsigned int membase,
                             resource_size_t mapbase,
                             unsigned int irq,
-                            unsigned int uartclk)
+                            struct clk *clk)
 {
        uart_complete(&orion_uart1, orion_uart1_data, orion_uart1_resources,
-                     membase, mapbase, irq, uartclk);
+                     membase, mapbase, irq, clk);
 }
 
 /*****************************************************************************
@@ -146,10 +178,10 @@ static struct platform_device orion_uart2 = {
 void __init orion_uart2_init(unsigned int membase,
                             resource_size_t mapbase,
                             unsigned int irq,
-                            unsigned int uartclk)
+                            struct clk *clk)
 {
        uart_complete(&orion_uart2, orion_uart2_data, orion_uart2_resources,
-                     membase, mapbase, irq, uartclk);
+                     membase, mapbase, irq, clk);
 }
 
 /*****************************************************************************
@@ -174,10 +206,10 @@ static struct platform_device orion_uart3 = {
 void __init orion_uart3_init(unsigned int membase,
                             resource_size_t mapbase,
                             unsigned int irq,
-                            unsigned int uartclk)
+                            struct clk *clk)
 {
        uart_complete(&orion_uart3, orion_uart3_data, orion_uart3_resources,
-                     membase, mapbase, irq, uartclk);
+                     membase, mapbase, irq, clk);
 }
 
 /*****************************************************************************
@@ -203,13 +235,11 @@ void __init orion_rtc_init(unsigned long mapbase,
  ****************************************************************************/
 static __init void ge_complete(
        struct mv643xx_eth_shared_platform_data *orion_ge_shared_data,
-       int tclk,
        struct resource *orion_ge_resource, unsigned long irq,
        struct platform_device *orion_ge_shared,
        struct mv643xx_eth_platform_data *eth_data,
        struct platform_device *orion_ge)
 {
-       orion_ge_shared_data->t_clk = tclk;
        orion_ge_resource->start = irq;
        orion_ge_resource->end = irq;
        eth_data->shared = orion_ge_shared;
@@ -260,12 +290,11 @@ static struct platform_device orion_ge00 = {
 void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
                            unsigned long mapbase,
                            unsigned long irq,
-                           unsigned long irq_err,
-                           int tclk)
+                           unsigned long irq_err)
 {
        fill_resources(&orion_ge00_shared, orion_ge00_shared_resources,
                       mapbase + 0x2000, SZ_16K - 1, irq_err);
-       ge_complete(&orion_ge00_shared_data, tclk,
+       ge_complete(&orion_ge00_shared_data,
                    orion_ge00_resources, irq, &orion_ge00_shared,
                    eth_data, &orion_ge00);
 }
@@ -313,12 +342,11 @@ static struct platform_device orion_ge01 = {
 void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
                            unsigned long mapbase,
                            unsigned long irq,
-                           unsigned long irq_err,
-                           int tclk)
+                           unsigned long irq_err)
 {
        fill_resources(&orion_ge01_shared, orion_ge01_shared_resources,
                       mapbase + 0x2000, SZ_16K - 1, irq_err);
-       ge_complete(&orion_ge01_shared_data, tclk,
+       ge_complete(&orion_ge01_shared_data,
                    orion_ge01_resources, irq, &orion_ge01_shared,
                    eth_data, &orion_ge01);
 }
@@ -366,12 +394,11 @@ static struct platform_device orion_ge10 = {
 void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data,
                            unsigned long mapbase,
                            unsigned long irq,
-                           unsigned long irq_err,
-                           int tclk)
+                           unsigned long irq_err)
 {
        fill_resources(&orion_ge10_shared, orion_ge10_shared_resources,
                       mapbase + 0x2000, SZ_16K - 1, irq_err);
-       ge_complete(&orion_ge10_shared_data, tclk,
+       ge_complete(&orion_ge10_shared_data,
                    orion_ge10_resources, irq, &orion_ge10_shared,
                    eth_data, &orion_ge10);
 }
@@ -419,12 +446,11 @@ static struct platform_device orion_ge11 = {
 void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
                            unsigned long mapbase,
                            unsigned long irq,
-                           unsigned long irq_err,
-                           int tclk)
+                           unsigned long irq_err)
 {
        fill_resources(&orion_ge11_shared, orion_ge11_shared_resources,
                       mapbase + 0x2000, SZ_16K - 1, irq_err);
-       ge_complete(&orion_ge11_shared_data, tclk,
+       ge_complete(&orion_ge11_shared_data,
                    orion_ge11_resources, irq, &orion_ge11_shared,
                    eth_data, &orion_ge11);
 }
@@ -521,44 +547,32 @@ void __init orion_i2c_1_init(unsigned long mapbase,
 /*****************************************************************************
  * SPI
  ****************************************************************************/
-static struct orion_spi_info orion_spi_plat_data;
 static struct resource orion_spi_resources;
 
 static struct platform_device orion_spi = {
        .name           = "orion_spi",
        .id             = 0,
-       .dev            = {
-               .platform_data  = &orion_spi_plat_data,
-       },
 };
 
-static struct orion_spi_info orion_spi_1_plat_data;
 static struct resource orion_spi_1_resources;
 
 static struct platform_device orion_spi_1 = {
        .name           = "orion_spi",
        .id             = 1,
-       .dev            = {
-               .platform_data  = &orion_spi_1_plat_data,
-       },
 };
 
 /* Note: The SPI silicon core does have interrupts. However the
  * current Linux software driver does not use interrupts. */
 
-void __init orion_spi_init(unsigned long mapbase,
-                          unsigned long tclk)
+void __init orion_spi_init(unsigned long mapbase)
 {
-       orion_spi_plat_data.tclk = tclk;
        fill_resources(&orion_spi, &orion_spi_resources,
                       mapbase, SZ_512 - 1, NO_IRQ);
        platform_device_register(&orion_spi);
 }
 
-void __init orion_spi_1_init(unsigned long mapbase,
-                            unsigned long tclk)
+void __init orion_spi_1_init(unsigned long mapbase)
 {
-       orion_spi_1_plat_data.tclk = tclk;
        fill_resources(&orion_spi_1, &orion_spi_1_resources,
                       mapbase, SZ_512 - 1, NO_IRQ);
        platform_device_register(&orion_spi_1);
@@ -567,24 +581,18 @@ void __init orion_spi_1_init(unsigned long mapbase,
 /*****************************************************************************
  * Watchdog
  ****************************************************************************/
-static struct orion_wdt_platform_data orion_wdt_data;
-
 static struct resource orion_wdt_resource =
                DEFINE_RES_MEM(TIMER_VIRT_BASE, 0x28);
 
 static struct platform_device orion_wdt_device = {
        .name           = "orion_wdt",
        .id             = -1,
-       .dev            = {
-               .platform_data  = &orion_wdt_data,
-       },
-       .resource       = &orion_wdt_resource,
        .num_resources  = 1,
+       .resource       = &orion_wdt_resource,
 };
 
-void __init orion_wdt_init(unsigned long tclk)
+void __init orion_wdt_init(void)
 {
-       orion_wdt_data.tclk = tclk;
        platform_device_register(&orion_wdt_device);
 }
 
index a7fa005a5a0eb335e6b0267a090664a5a9d86ae7..e00fdb2136090154ea930c9f224365677644444e 100644 (file)
@@ -16,22 +16,22 @@ struct dsa_platform_data;
 void __init orion_uart0_init(unsigned int membase,
                             resource_size_t mapbase,
                             unsigned int irq,
-                            unsigned int uartclk);
+                            struct clk *clk);
 
 void __init orion_uart1_init(unsigned int membase,
                             resource_size_t mapbase,
                             unsigned int irq,
-                            unsigned int uartclk);
+                            struct clk *clk);
 
 void __init orion_uart2_init(unsigned int membase,
                             resource_size_t mapbase,
                             unsigned int irq,
-                            unsigned int uartclk);
+                            struct clk *clk);
 
 void __init orion_uart3_init(unsigned int membase,
                             resource_size_t mapbase,
                             unsigned int irq,
-                            unsigned int uartclk);
+                            struct clk *clk);
 
 void __init orion_rtc_init(unsigned long mapbase,
                           unsigned long irq);
@@ -39,29 +39,26 @@ void __init orion_rtc_init(unsigned long mapbase,
 void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
                            unsigned long mapbase,
                            unsigned long irq,
-                           unsigned long irq_err,
-                           int tclk);
+                           unsigned long irq_err);
 
 void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
                            unsigned long mapbase,
                            unsigned long irq,
-                           unsigned long irq_err,
-                           int tclk);
+                           unsigned long irq_err);
 
 void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data,
                            unsigned long mapbase,
                            unsigned long irq,
-                           unsigned long irq_err,
-                           int tclk);
+                           unsigned long irq_err);
 
 void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
                            unsigned long mapbase,
                            unsigned long irq,
-                           unsigned long irq_err,
-                           int tclk);
+                           unsigned long irq_err);
 
 void __init orion_ge00_switch_init(struct dsa_platform_data *d,
                                   int irq);
+
 void __init orion_i2c_init(unsigned long mapbase,
                           unsigned long irq,
                           unsigned long freq_m);
@@ -70,13 +67,11 @@ void __init orion_i2c_1_init(unsigned long mapbase,
                             unsigned long irq,
                             unsigned long freq_m);
 
-void __init orion_spi_init(unsigned long mapbase,
-                          unsigned long tclk);
+void __init orion_spi_init(unsigned long mapbase);
 
-void __init orion_spi_1_init(unsigned long mapbase,
-                            unsigned long tclk);
+void __init orion_spi_1_init(unsigned long mapbase);
 
-void __init orion_wdt_init(unsigned long tclk);
+void __init orion_wdt_init(void);
 
 void __init orion_xor0_init(unsigned long mapbase_low,
                            unsigned long mapbase_high,
@@ -106,4 +101,9 @@ void __init orion_crypto_init(unsigned long mapbase,
                              unsigned long srambase,
                              unsigned long sram_size,
                              unsigned long irq);
+
+void __init orion_clkdev_add(const char *con_id, const char *dev_id,
+                            struct clk *clk);
+
+void __init orion_clkdev_init(struct clk *tclk);
 #endif
diff --git a/arch/arm/plat-orion/include/plat/orion_wdt.h b/arch/arm/plat-orion/include/plat/orion_wdt.h
deleted file mode 100644 (file)
index 665c362..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * arch/arm/plat-orion/include/plat/orion_wdt.h
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __PLAT_ORION_WDT_H
-#define __PLAT_ORION_WDT_H
-
-struct orion_wdt_platform_data {
-       u32     tclk;           /* no <linux/clk.h> support yet */
-};
-
-
-#endif
-
index 86dbb5bdb1722339b5f86683897c7b4c2f7c2a60..f20a321088a243bb847269c56bc43fb4c1b7ad90 100644 (file)
 #define  PCIE_DEBUG_SOFT_RESET         (1<<20)
 
 
-u32 __init orion_pcie_dev_id(void __iomem *base)
+u32 orion_pcie_dev_id(void __iomem *base)
 {
        return readl(base + PCIE_DEV_ID_OFF) >> 16;
 }
 
-u32 __init orion_pcie_rev(void __iomem *base)
+u32 orion_pcie_rev(void __iomem *base)
 {
        return readl(base + PCIE_DEV_REV_OFF) & 0xff;
 }
index abcc36eb12425ac04a28da7e61f6e9fa171ddac0..5ce8d5e6ea518e7f4d0303b4d4ac066dcd294224 100644 (file)
@@ -44,6 +44,10 @@ struct pxa27x_keypad_platform_data {
        /* direct keys */
        int             direct_key_num;
        unsigned int    direct_key_map[MAX_DIRECT_KEY_NUM];
+       /* the key output may be low active */
+       int             direct_key_low_active;
+       /* give board a chance to choose the start direct key */
+       unsigned int    direct_key_mask;
 
        /* rotary encoders 0 */
        int             enable_rotary0;
index 2467b800cc768416de3e0f93eba09b799421ee8d..9f60549c8da1c2318a209c4b503cfe8cb9bc86d3 100644 (file)
@@ -12,10 +12,7 @@ obj-                         :=
 
 # Core files
 
-obj-y                          += cpu.o
 obj-y                          += irq.o
-obj-y                          += dev-uart.o
-obj-y                          += clock.o
 obj-$(CONFIG_S3C24XX_DCLK)     += clock-dclk.o
 
 obj-$(CONFIG_CPU_FREQ_S3C24XX) += cpu-freq.o
@@ -23,9 +20,6 @@ obj-$(CONFIG_CPU_FREQ_S3C24XX_DEBUGFS) += cpu-freq-debugfs.o
 
 # Architecture dependent builds
 
-obj-$(CONFIG_PM)               += pm.o
-obj-$(CONFIG_PM)               += irq-pm.o
-obj-$(CONFIG_PM)               += sleep.o
 obj-$(CONFIG_S3C2410_CLOCK)    += s3c2410-clock.o
 obj-$(CONFIG_S3C24XX_DMA)      += dma.o
 obj-$(CONFIG_S3C2410_IOTIMING) += s3c2410-iotiming.o
diff --git a/arch/arm/plat-s3c24xx/clock.c b/arch/arm/plat-s3c24xx/clock.c
deleted file mode 100644 (file)
index 931d26d..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-/* linux/arch/arm/plat-s3c24xx/clock.c
- *
- * Copyright (c) 2004-2005 Simtec Electronics
- *     Ben Dooks <ben@simtec.co.uk>
- *
- * S3C24XX Core clock control support
- *
- * Based on, and code from linux/arch/arm/mach-versatile/clock.c
- **
- **  Copyright (C) 2004 ARM Limited.
- **  Written by Deep Blue Solutions Limited.
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-*/
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-#include <asm/irq.h>
-
-#include <mach/regs-clock.h>
-#include <mach/regs-gpio.h>
-
-#include <plat/cpu-freq.h>
-
-#include <plat/clock.h>
-#include <plat/cpu.h>
-#include <plat/pll.h>
-
-/* initialise all the clocks */
-
-void __init_or_cpufreq s3c24xx_setup_clocks(unsigned long fclk,
-                                          unsigned long hclk,
-                                          unsigned long pclk)
-{
-       clk_upll.rate = s3c24xx_get_pll(__raw_readl(S3C2410_UPLLCON),
-                                       clk_xtal.rate);
-
-       clk_mpll.rate = fclk;
-       clk_h.rate = hclk;
-       clk_p.rate = pclk;
-       clk_f.rate = fclk;
-}
diff --git a/arch/arm/plat-s3c24xx/cpu.c b/arch/arm/plat-s3c24xx/cpu.c
deleted file mode 100644 (file)
index 290942d..0000000
+++ /dev/null
@@ -1,236 +0,0 @@
-/* linux/arch/arm/plat-s3c24xx/cpu.c
- *
- * Copyright (c) 2004-2005 Simtec Electronics
- *     http://www.simtec.co.uk/products/SWLINUX/
- *     Ben Dooks <ben@simtec.co.uk>
- *
- * S3C24XX CPU Support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-*/
-
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/serial_core.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-#include <mach/regs-clock.h>
-#include <asm/irq.h>
-#include <asm/cacheflush.h>
-#include <asm/system_info.h>
-#include <asm/system_misc.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#include <mach/regs-gpio.h>
-#include <plat/regs-serial.h>
-
-#include <plat/cpu.h>
-#include <plat/devs.h>
-#include <plat/clock.h>
-#include <plat/s3c2410.h>
-#include <plat/s3c2412.h>
-#include <plat/s3c2416.h>
-#include <plat/s3c244x.h>
-#include <plat/s3c2443.h>
-
-/* table of supported CPUs */
-
-static const char name_s3c2410[]  = "S3C2410";
-static const char name_s3c2412[]  = "S3C2412";
-static const char name_s3c2416[]  = "S3C2416/S3C2450";
-static const char name_s3c2440[]  = "S3C2440";
-static const char name_s3c2442[]  = "S3C2442";
-static const char name_s3c2442b[]  = "S3C2442B";
-static const char name_s3c2443[]  = "S3C2443";
-static const char name_s3c2410a[] = "S3C2410A";
-static const char name_s3c2440a[] = "S3C2440A";
-
-static struct cpu_table cpu_ids[] __initdata = {
-       {
-               .idcode         = 0x32410000,
-               .idmask         = 0xffffffff,
-               .map_io         = s3c2410_map_io,
-               .init_clocks    = s3c2410_init_clocks,
-               .init_uarts     = s3c2410_init_uarts,
-               .init           = s3c2410_init,
-               .name           = name_s3c2410
-       },
-       {
-               .idcode         = 0x32410002,
-               .idmask         = 0xffffffff,
-               .map_io         = s3c2410_map_io,
-               .init_clocks    = s3c2410_init_clocks,
-               .init_uarts     = s3c2410_init_uarts,
-               .init           = s3c2410a_init,
-               .name           = name_s3c2410a
-       },
-       {
-               .idcode         = 0x32440000,
-               .idmask         = 0xffffffff,
-               .map_io         = s3c2440_map_io,
-               .init_clocks    = s3c244x_init_clocks,
-               .init_uarts     = s3c244x_init_uarts,
-               .init           = s3c2440_init,
-               .name           = name_s3c2440
-       },
-       {
-               .idcode         = 0x32440001,
-               .idmask         = 0xffffffff,
-               .map_io         = s3c2440_map_io,
-               .init_clocks    = s3c244x_init_clocks,
-               .init_uarts     = s3c244x_init_uarts,
-               .init           = s3c2440_init,
-               .name           = name_s3c2440a
-       },
-       {
-               .idcode         = 0x32440aaa,
-               .idmask         = 0xffffffff,
-               .map_io         = s3c2442_map_io,
-               .init_clocks    = s3c244x_init_clocks,
-               .init_uarts     = s3c244x_init_uarts,
-               .init           = s3c2442_init,
-               .name           = name_s3c2442
-       },
-       {
-               .idcode         = 0x32440aab,
-               .idmask         = 0xffffffff,
-               .map_io         = s3c2442_map_io,
-               .init_clocks    = s3c244x_init_clocks,
-               .init_uarts     = s3c244x_init_uarts,
-               .init           = s3c2442_init,
-               .name           = name_s3c2442b
-       },
-       {
-               .idcode         = 0x32412001,
-               .idmask         = 0xffffffff,
-               .map_io         = s3c2412_map_io,
-               .init_clocks    = s3c2412_init_clocks,
-               .init_uarts     = s3c2412_init_uarts,
-               .init           = s3c2412_init,
-               .name           = name_s3c2412,
-       },
-       {                       /* a newer version of the s3c2412 */
-               .idcode         = 0x32412003,
-               .idmask         = 0xffffffff,
-               .map_io         = s3c2412_map_io,
-               .init_clocks    = s3c2412_init_clocks,
-               .init_uarts     = s3c2412_init_uarts,
-               .init           = s3c2412_init,
-               .name           = name_s3c2412,
-       },
-       {                       /* a strange version of the s3c2416 */
-               .idcode         = 0x32450003,
-               .idmask         = 0xffffffff,
-               .map_io         = s3c2416_map_io,
-               .init_clocks    = s3c2416_init_clocks,
-               .init_uarts     = s3c2416_init_uarts,
-               .init           = s3c2416_init,
-               .name           = name_s3c2416,
-       },
-       {
-               .idcode         = 0x32443001,
-               .idmask         = 0xffffffff,
-               .map_io         = s3c2443_map_io,
-               .init_clocks    = s3c2443_init_clocks,
-               .init_uarts     = s3c2443_init_uarts,
-               .init           = s3c2443_init,
-               .name           = name_s3c2443,
-       },
-};
-
-/* minimal IO mapping */
-
-static struct map_desc s3c_iodesc[] __initdata = {
-       IODESC_ENT(GPIO),
-       IODESC_ENT(IRQ),
-       IODESC_ENT(MEMCTRL),
-       IODESC_ENT(UART)
-};
-
-/* read cpu identificaiton code */
-
-static unsigned long s3c24xx_read_idcode_v5(void)
-{
-#if defined(CONFIG_CPU_S3C2416)
-       /* s3c2416 is v5, with S3C24XX_GSTATUS1 instead of S3C2412_GSTATUS1 */
-
-       u32 gs = __raw_readl(S3C24XX_GSTATUS1);
-
-       /* test for s3c2416 or similar device */
-       if ((gs >> 16) == 0x3245)
-               return gs;
-#endif
-
-#if defined(CONFIG_CPU_S3C2412) || defined(CONFIG_CPU_S3C2413)
-       return __raw_readl(S3C2412_GSTATUS1);
-#else
-       return 1UL;     /* don't look like an 2400 */
-#endif
-}
-
-static unsigned long s3c24xx_read_idcode_v4(void)
-{
-       return __raw_readl(S3C2410_GSTATUS1);
-}
-
-static void s3c24xx_default_idle(void)
-{
-       unsigned long tmp;
-       int i;
-
-       /* idle the system by using the idle mode which will wait for an
-        * interrupt to happen before restarting the system.
-        */
-
-       /* Warning: going into idle state upsets jtag scanning */
-
-       __raw_writel(__raw_readl(S3C2410_CLKCON) | S3C2410_CLKCON_IDLE,
-                    S3C2410_CLKCON);
-
-       /* the samsung port seems to do a loop and then unset idle.. */
-       for (i = 0; i < 50; i++)
-               tmp += __raw_readl(S3C2410_CLKCON); /* ensure loop not optimised out */
-
-       /* this bit is not cleared on re-start... */
-
-       __raw_writel(__raw_readl(S3C2410_CLKCON) & ~S3C2410_CLKCON_IDLE,
-                    S3C2410_CLKCON);
-}
-
-void __init s3c24xx_init_io(struct map_desc *mach_desc, int size)
-{
-       arm_pm_idle = s3c24xx_default_idle;
-
-       /* initialise the io descriptors we need for initialisation */
-       iotable_init(mach_desc, size);
-       iotable_init(s3c_iodesc, ARRAY_SIZE(s3c_iodesc));
-
-       if (cpu_architecture() >= CPU_ARCH_ARMv5) {
-               samsung_cpu_id = s3c24xx_read_idcode_v5();
-       } else {
-               samsung_cpu_id = s3c24xx_read_idcode_v4();
-       }
-       s3c24xx_init_cpu();
-
-       s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
-}
diff --git a/arch/arm/plat-s3c24xx/dev-uart.c b/arch/arm/plat-s3c24xx/dev-uart.c
deleted file mode 100644 (file)
index 9ab22e6..0000000
+++ /dev/null
@@ -1,100 +0,0 @@
-/* linux/arch/arm/plat-s3c24xx/dev-uart.c
- *
- * Copyright (c) 2004 Simtec Electronics
- *     Ben Dooks <ben@simtec.co.uk>
- *
- * Base S3C24XX UART resource and platform device definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/list.h>
-#include <linux/serial_core.h>
-#include <linux/platform_device.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/mach/irq.h>
-#include <mach/hardware.h>
-#include <mach/map.h>
-
-#include <plat/devs.h>
-#include <plat/regs-serial.h>
-
-/* Serial port registrations */
-
-static struct resource s3c2410_uart0_resource[] = {
-       [0] = {
-               .start = S3C2410_PA_UART0,
-               .end   = S3C2410_PA_UART0 + 0x3fff,
-               .flags = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start = IRQ_S3CUART_RX0,
-               .end   = IRQ_S3CUART_ERR0,
-               .flags = IORESOURCE_IRQ,
-       }
-};
-
-static struct resource s3c2410_uart1_resource[] = {
-       [0] = {
-               .start = S3C2410_PA_UART1,
-               .end   = S3C2410_PA_UART1 + 0x3fff,
-               .flags = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start = IRQ_S3CUART_RX1,
-               .end   = IRQ_S3CUART_ERR1,
-               .flags = IORESOURCE_IRQ,
-       }
-};
-
-static struct resource s3c2410_uart2_resource[] = {
-       [0] = {
-               .start = S3C2410_PA_UART2,
-               .end   = S3C2410_PA_UART2 + 0x3fff,
-               .flags = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start = IRQ_S3CUART_RX2,
-               .end   = IRQ_S3CUART_ERR2,
-               .flags = IORESOURCE_IRQ,
-       }
-};
-
-static struct resource s3c2410_uart3_resource[] = {
-       [0] = {
-               .start = S3C2443_PA_UART3,
-               .end   = S3C2443_PA_UART3 + 0x3fff,
-               .flags = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start = IRQ_S3CUART_RX3,
-               .end   = IRQ_S3CUART_ERR3,
-               .flags = IORESOURCE_IRQ,
-       },
-};
-
-struct s3c24xx_uart_resources s3c2410_uart_resources[] __initdata = {
-       [0] = {
-               .resources      = s3c2410_uart0_resource,
-               .nr_resources   = ARRAY_SIZE(s3c2410_uart0_resource),
-       },
-       [1] = {
-               .resources      = s3c2410_uart1_resource,
-               .nr_resources   = ARRAY_SIZE(s3c2410_uart1_resource),
-       },
-       [2] = {
-               .resources      = s3c2410_uart2_resource,
-               .nr_resources   = ARRAY_SIZE(s3c2410_uart2_resource),
-       },
-       [3] = {
-               .resources      = s3c2410_uart3_resource,
-               .nr_resources   = ARRAY_SIZE(s3c2410_uart3_resource),
-       },
-};
diff --git a/arch/arm/plat-s3c24xx/irq-pm.c b/arch/arm/plat-s3c24xx/irq-pm.c
deleted file mode 100644 (file)
index 0efb2e2..0000000
+++ /dev/null
@@ -1,95 +0,0 @@
-/* linux/arch/arm/plat-s3c24xx/irq-om.c
- *
- * Copyright (c) 2003-2004 Simtec Electronics
- *     Ben Dooks <ben@simtec.co.uk>
- *     http://armlinux.simtec.co.uk/
- *
- * S3C24XX - IRQ PM code
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-
-#include <plat/cpu.h>
-#include <plat/pm.h>
-#include <plat/irq.h>
-
-#include <asm/irq.h>
-
-/* state for IRQs over sleep */
-
-/* default is to allow for EINT0..EINT15, and IRQ_RTC as wakeup sources
- *
- * set bit to 1 in allow bitfield to enable the wakeup settings on it
-*/
-
-unsigned long s3c_irqwake_intallow     = 1L << (IRQ_RTC - IRQ_EINT0) | 0xfL;
-unsigned long s3c_irqwake_eintallow    = 0x0000fff0L;
-
-int s3c_irq_wake(struct irq_data *data, unsigned int state)
-{
-       unsigned long irqbit = 1 << (data->irq - IRQ_EINT0);
-
-       if (!(s3c_irqwake_intallow & irqbit))
-               return -ENOENT;
-
-       printk(KERN_INFO "wake %s for irq %d\n",
-              state ? "enabled" : "disabled", data->irq);
-
-       if (!state)
-               s3c_irqwake_intmask |= irqbit;
-       else
-               s3c_irqwake_intmask &= ~irqbit;
-
-       return 0;
-}
-
-static struct sleep_save irq_save[] = {
-       SAVE_ITEM(S3C2410_INTMSK),
-       SAVE_ITEM(S3C2410_INTSUBMSK),
-};
-
-/* the extint values move between the s3c2410/s3c2440 and the s3c2412
- * so we use an array to hold them, and to calculate the address of
- * the register at run-time
-*/
-
-static unsigned long save_extint[3];
-static unsigned long save_eintflt[4];
-static unsigned long save_eintmask;
-
-int s3c24xx_irq_suspend(void)
-{
-       unsigned int i;
-
-       for (i = 0; i < ARRAY_SIZE(save_extint); i++)
-               save_extint[i] = __raw_readl(S3C24XX_EXTINT0 + (i*4));
-
-       for (i = 0; i < ARRAY_SIZE(save_eintflt); i++)
-               save_eintflt[i] = __raw_readl(S3C24XX_EINFLT0 + (i*4));
-
-       s3c_pm_do_save(irq_save, ARRAY_SIZE(irq_save));
-       save_eintmask = __raw_readl(S3C24XX_EINTMASK);
-
-       return 0;
-}
-
-void s3c24xx_irq_resume(void)
-{
-       unsigned int i;
-
-       for (i = 0; i < ARRAY_SIZE(save_extint); i++)
-               __raw_writel(save_extint[i], S3C24XX_EXTINT0 + (i*4));
-
-       for (i = 0; i < ARRAY_SIZE(save_eintflt); i++)
-               __raw_writel(save_eintflt[i], S3C24XX_EINFLT0 + (i*4));
-
-       s3c_pm_do_restore(irq_save, ARRAY_SIZE(irq_save));
-       __raw_writel(save_eintmask, S3C24XX_EINTMASK);
-}
diff --git a/arch/arm/plat-s3c24xx/pm.c b/arch/arm/plat-s3c24xx/pm.c
deleted file mode 100644 (file)
index 60627e6..0000000
+++ /dev/null
@@ -1,149 +0,0 @@
-/* linux/arch/arm/plat-s3c24xx/pm.c
- *
- * Copyright (c) 2004-2006 Simtec Electronics
- *     Ben Dooks <ben@simtec.co.uk>
- *
- * S3C24XX Power Manager (Suspend-To-RAM) support
- *
- * See Documentation/arm/Samsung-S3C24XX/Suspend.txt for more information
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- * Parts based on arch/arm/mach-pxa/pm.c
- *
- * Thanks to Dimitry Andric for debugging
-*/
-
-#include <linux/init.h>
-#include <linux/suspend.h>
-#include <linux/errno.h>
-#include <linux/time.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
-#include <linux/serial_core.h>
-#include <linux/io.h>
-
-#include <plat/regs-serial.h>
-#include <mach/regs-clock.h>
-#include <mach/regs-gpio.h>
-#include <mach/regs-mem.h>
-#include <mach/regs-irq.h>
-
-#include <asm/mach/time.h>
-
-#include <plat/gpio-cfg.h>
-#include <plat/pm.h>
-
-#define PFX "s3c24xx-pm: "
-
-static struct sleep_save core_save[] = {
-       SAVE_ITEM(S3C2410_LOCKTIME),
-       SAVE_ITEM(S3C2410_CLKCON),
-
-       /* we restore the timings here, with the proviso that the board
-        * brings the system up in an slower, or equal frequency setting
-        * to the original system.
-        *
-        * if we cannot guarantee this, then things are going to go very
-        * wrong here, as we modify the refresh and both pll settings.
-        */
-
-       SAVE_ITEM(S3C2410_BWSCON),
-       SAVE_ITEM(S3C2410_BANKCON0),
-       SAVE_ITEM(S3C2410_BANKCON1),
-       SAVE_ITEM(S3C2410_BANKCON2),
-       SAVE_ITEM(S3C2410_BANKCON3),
-       SAVE_ITEM(S3C2410_BANKCON4),
-       SAVE_ITEM(S3C2410_BANKCON5),
-
-#ifndef CONFIG_CPU_FREQ
-       SAVE_ITEM(S3C2410_CLKDIVN),
-       SAVE_ITEM(S3C2410_MPLLCON),
-       SAVE_ITEM(S3C2410_REFRESH),
-#endif
-       SAVE_ITEM(S3C2410_UPLLCON),
-       SAVE_ITEM(S3C2410_CLKSLOW),
-};
-
-static struct sleep_save misc_save[] = {
-       SAVE_ITEM(S3C2410_DCLKCON),
-};
-
-/* s3c_pm_check_resume_pin
- *
- * check to see if the pin is configured correctly for sleep mode, and
- * make any necessary adjustments if it is not
-*/
-
-static void s3c_pm_check_resume_pin(unsigned int pin, unsigned int irqoffs)
-{
-       unsigned long irqstate;
-       unsigned long pinstate;
-       int irq = gpio_to_irq(pin);
-
-       if (irqoffs < 4)
-               irqstate = s3c_irqwake_intmask & (1L<<irqoffs);
-       else
-               irqstate = s3c_irqwake_eintmask & (1L<<irqoffs);
-
-       pinstate = s3c_gpio_getcfg(pin);
-
-       if (!irqstate) {
-               if (pinstate == S3C2410_GPIO_IRQ)
-                       S3C_PMDBG("Leaving IRQ %d (pin %d) as is\n", irq, pin);
-       } else {
-               if (pinstate == S3C2410_GPIO_IRQ) {
-                       S3C_PMDBG("Disabling IRQ %d (pin %d)\n", irq, pin);
-                       s3c_gpio_cfgpin(pin, S3C2410_GPIO_INPUT);
-               }
-       }
-}
-
-/* s3c_pm_configure_extint
- *
- * configure all external interrupt pins
-*/
-
-void s3c_pm_configure_extint(void)
-{
-       int pin;
-
-       /* for each of the external interrupts (EINT0..EINT15) we
-        * need to check wether it is an external interrupt source,
-        * and then configure it as an input if it is not
-       */
-
-       for (pin = S3C2410_GPF(0); pin <= S3C2410_GPF(7); pin++) {
-               s3c_pm_check_resume_pin(pin, pin - S3C2410_GPF(0));
-       }
-
-       for (pin = S3C2410_GPG(0); pin <= S3C2410_GPG(7); pin++) {
-               s3c_pm_check_resume_pin(pin, (pin - S3C2410_GPG(0))+8);
-       }
-}
-
-
-void s3c_pm_restore_core(void)
-{
-       s3c_pm_do_restore_core(core_save, ARRAY_SIZE(core_save));
-       s3c_pm_do_restore(misc_save, ARRAY_SIZE(misc_save));
-}
-
-void s3c_pm_save_core(void)
-{
-       s3c_pm_do_save(misc_save, ARRAY_SIZE(misc_save));
-       s3c_pm_do_save(core_save, ARRAY_SIZE(core_save));
-}
-
diff --git a/arch/arm/plat-s3c24xx/sleep.S b/arch/arm/plat-s3c24xx/sleep.S
deleted file mode 100644 (file)
index c566125..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-/* linux/arch/arm/plat-s3c24xx/sleep.S
- *
- * Copyright (c) 2004 Simtec Electronics
- *     Ben Dooks <ben@simtec.co.uk>
- *
- * S3C2410 Power Manager (Suspend-To-RAM) support
- *
- * Based on PXA/SA1100 sleep code by:
- *     Nicolas Pitre, (c) 2002 Monta Vista Software Inc
- *     Cliff Brake, (c) 2001
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-*/
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <mach/hardware.h>
-#include <mach/map.h>
-
-#include <mach/regs-gpio.h>
-#include <mach/regs-clock.h>
-#include <mach/regs-mem.h>
-#include <plat/regs-serial.h>
-
-/* CONFIG_DEBUG_RESUME is dangerous if your bootloader does not
- * reset the UART configuration, only enable if you really need this!
-*/
-//#define CONFIG_DEBUG_RESUME
-
-       .text
-
-       /* sleep magic, to allow the bootloader to check for an valid
-        * image to resume to. Must be the first word before the
-        * s3c_cpu_resume entry.
-       */
-
-       .word   0x2bedf00d
-
-       /* s3c_cpu_resume
-        *
-        * resume code entry for bootloader to call
-       */
-
-ENTRY(s3c_cpu_resume)
-       mov     r0, #PSR_I_BIT | PSR_F_BIT | SVC_MODE
-       msr     cpsr_c, r0
-
-       @@ load UART to allow us to print the two characters for
-       @@ resume debug
-
-       mov     r2, #S3C24XX_PA_UART & 0xff000000
-       orr     r2, r2, #S3C24XX_PA_UART & 0xff000
-
-#if 0
-       /* SMDK2440 LED set */
-       mov     r14, #S3C24XX_PA_GPIO
-       ldr     r12, [ r14, #0x54 ]
-       bic     r12, r12, #3<<4
-       orr     r12, r12, #1<<7
-       str     r12, [ r14, #0x54 ]
-#endif
-
-#ifdef CONFIG_DEBUG_RESUME
-       mov     r3, #'L'
-       strb    r3, [ r2, #S3C2410_UTXH ]
-1001:
-       ldrb    r14, [ r3, #S3C2410_UTRSTAT ]
-       tst     r14, #S3C2410_UTRSTAT_TXE
-       beq     1001b
-#endif /* CONFIG_DEBUG_RESUME */
-
-       b       cpu_resume
diff --git a/arch/arm/plat-s5p/Kconfig b/arch/arm/plat-s5p/Kconfig
deleted file mode 100644 (file)
index 96bea32..0000000
+++ /dev/null
@@ -1,140 +0,0 @@
-# arch/arm/plat-s5p/Kconfig
-#
-# Copyright (c) 2009 Samsung Electronics Co., Ltd.
-#              http://www.samsung.com/
-#
-# Licensed under GPLv2
-
-config PLAT_S5P
-       bool
-       depends on (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS)
-       default y
-       select ARM_VIC if !ARCH_EXYNOS
-       select ARM_GIC if ARCH_EXYNOS
-       select GIC_NON_BANKED if ARCH_EXYNOS4
-       select NO_IOPORT
-       select ARCH_REQUIRE_GPIOLIB
-       select S3C_GPIO_TRACK
-       select S5P_GPIO_DRVSTR
-       select SAMSUNG_GPIOLIB_4BIT
-       select PLAT_SAMSUNG
-       select SAMSUNG_CLKSRC
-       select SAMSUNG_IRQ_VIC_TIMER
-       help
-         Base platform code for Samsung's S5P series SoC.
-
-config S5P_EXT_INT
-       bool
-       help
-         Use the external interrupts (other than GPIO interrupts.)
-         Note: Do not choose this for S5P6440 and S5P6450.
-
-config S5P_GPIO_INT
-       bool
-       help
-         Common code for the GPIO interrupts (other than external interrupts.)
-
-config S5P_HRT
-       bool
-       select SAMSUNG_DEV_PWM
-       help
-         Use the High Resolution timer support
-
-config S5P_DEV_UART
-       def_bool y
-       depends on (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210)
-
-config S5P_PM
-       bool
-       help
-         Common code for power management support on S5P and newer SoCs
-         Note: Do not select this for S5P6440 and S5P6450.
-
-comment "System MMU"
-
-config S5P_SYSTEM_MMU
-       bool "S5P SYSTEM MMU"
-       depends on ARCH_EXYNOS4
-       help
-         Say Y here if you want to enable System MMU
-
-config S5P_SLEEP
-       bool
-       help
-         Internal config node to apply common S5P sleep management code.
-         Can be selected by S5P and newer SoCs with similar sleep procedure.
-
-config S5P_DEV_FIMC0
-       bool
-       help
-         Compile in platform device definitions for FIMC controller 0
-
-config S5P_DEV_FIMC1
-       bool
-       help
-         Compile in platform device definitions for FIMC controller 1
-
-config S5P_DEV_FIMC2
-       bool
-       help
-         Compile in platform device definitions for FIMC controller 2
-
-config S5P_DEV_FIMC3
-       bool
-       help
-         Compile in platform device definitions for FIMC controller 3
-
-config S5P_DEV_JPEG
-       bool
-       help
-         Compile in platform device definitions for JPEG codec
-
-config S5P_DEV_G2D
-       bool
-       help
-         Compile in platform device definitions for G2D device
-
-config S5P_DEV_FIMD0
-       bool
-       help
-         Compile in platform device definitions for FIMD controller 0
-
-config S5P_DEV_I2C_HDMIPHY
-       bool
-       help
-         Compile in platform device definitions for I2C HDMIPHY controller
-
-config S5P_DEV_MFC
-       bool
-       help
-         Compile in platform device definitions for MFC
-
-config S5P_DEV_ONENAND
-       bool
-       help
-         Compile in platform device definition for OneNAND controller
-
-config S5P_DEV_CSIS0
-       bool
-       help
-         Compile in platform device definitions for MIPI-CSIS channel 0
-
-config S5P_DEV_CSIS1
-       bool
-       help
-         Compile in platform device definitions for MIPI-CSIS channel 1
-
-config S5P_DEV_TV
-       bool
-       help
-         Compile in platform device definition for TV interface
-
-config S5P_DEV_USB_EHCI
-       bool
-       help
-         Compile in platform device definition for USB EHCI
-
-config S5P_SETUP_MIPIPHY
-       bool
-       help
-         Compile in common setup code for MIPI-CSIS and MIPI-DSIM devices
diff --git a/arch/arm/plat-s5p/Makefile b/arch/arm/plat-s5p/Makefile
deleted file mode 100644 (file)
index 4bd8241..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-# arch/arm/plat-s5p/Makefile
-#
-# Copyright (c) 2009 Samsung Electronics Co., Ltd.
-#              http://www.samsung.com/
-#
-# Licensed under GPLv2
-
-obj-y                          :=
-obj-m                          :=
-obj-n                          := dummy.o
-obj-                           :=
-
-# Core files
-
-obj-y                          += clock.o
-obj-y                          += irq.o
-obj-$(CONFIG_S5P_EXT_INT)      += irq-eint.o
-obj-$(CONFIG_S5P_GPIO_INT)     += irq-gpioint.o
-obj-$(CONFIG_S5P_SYSTEM_MMU)   += sysmmu.o
-obj-$(CONFIG_S5P_PM)           += pm.o irq-pm.o
-obj-$(CONFIG_S5P_SLEEP)                += sleep.o
-obj-$(CONFIG_S5P_HRT)          += s5p-time.o
-
-# devices
-
-obj-$(CONFIG_S5P_DEV_UART)     += dev-uart.o
-obj-$(CONFIG_S5P_DEV_MFC)      += dev-mfc.o
-obj-$(CONFIG_S5P_SETUP_MIPIPHY)        += setup-mipiphy.o
diff --git a/arch/arm/plat-s5p/clock.c b/arch/arm/plat-s5p/clock.c
deleted file mode 100644 (file)
index f68a9bb..0000000
+++ /dev/null
@@ -1,264 +0,0 @@
-/* linux/arch/arm/plat-s5p/clock.c
- *
- * Copyright 2009 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com/
- *
- * S5P - Common clock support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/device.h>
-#include <linux/io.h>
-#include <asm/div64.h>
-
-#include <mach/regs-clock.h>
-
-#include <plat/clock.h>
-#include <plat/clock-clksrc.h>
-#include <plat/s5p-clock.h>
-
-/* fin_apll, fin_mpll and fin_epll are all the same clock, which we call
- * clk_ext_xtal_mux.
-*/
-struct clk clk_ext_xtal_mux = {
-       .name           = "ext_xtal",
-       .id             = -1,
-};
-
-struct clk clk_xusbxti = {
-       .name           = "xusbxti",
-       .id             = -1,
-};
-
-struct clk s5p_clk_27m = {
-       .name           = "clk_27m",
-       .id             = -1,
-       .rate           = 27000000,
-};
-
-/* 48MHz USB Phy clock output */
-struct clk clk_48m = {
-       .name           = "clk_48m",
-       .id             = -1,
-       .rate           = 48000000,
-};
-
-/* APLL clock output
- * No need .ctrlbit, this is always on
-*/
-struct clk clk_fout_apll = {
-       .name           = "fout_apll",
-       .id             = -1,
-};
-
-/* BPLL clock output */
-
-struct clk clk_fout_bpll = {
-       .name           = "fout_bpll",
-       .id             = -1,
-};
-
-/* CPLL clock output */
-
-struct clk clk_fout_cpll = {
-       .name           = "fout_cpll",
-       .id             = -1,
-};
-
-/* MPLL clock output
- * No need .ctrlbit, this is always on
-*/
-struct clk clk_fout_mpll = {
-       .name           = "fout_mpll",
-       .id             = -1,
-};
-
-/* EPLL clock output */
-struct clk clk_fout_epll = {
-       .name           = "fout_epll",
-       .id             = -1,
-       .ctrlbit        = (1 << 31),
-};
-
-/* DPLL clock output */
-struct clk clk_fout_dpll = {
-       .name           = "fout_dpll",
-       .id             = -1,
-       .ctrlbit        = (1 << 31),
-};
-
-/* VPLL clock output */
-struct clk clk_fout_vpll = {
-       .name           = "fout_vpll",
-       .id             = -1,
-       .ctrlbit        = (1 << 31),
-};
-
-/* Possible clock sources for APLL Mux */
-static struct clk *clk_src_apll_list[] = {
-       [0] = &clk_fin_apll,
-       [1] = &clk_fout_apll,
-};
-
-struct clksrc_sources clk_src_apll = {
-       .sources        = clk_src_apll_list,
-       .nr_sources     = ARRAY_SIZE(clk_src_apll_list),
-};
-
-/* Possible clock sources for BPLL Mux */
-static struct clk *clk_src_bpll_list[] = {
-       [0] = &clk_fin_bpll,
-       [1] = &clk_fout_bpll,
-};
-
-struct clksrc_sources clk_src_bpll = {
-       .sources        = clk_src_bpll_list,
-       .nr_sources     = ARRAY_SIZE(clk_src_bpll_list),
-};
-
-/* Possible clock sources for CPLL Mux */
-static struct clk *clk_src_cpll_list[] = {
-       [0] = &clk_fin_cpll,
-       [1] = &clk_fout_cpll,
-};
-
-struct clksrc_sources clk_src_cpll = {
-       .sources        = clk_src_cpll_list,
-       .nr_sources     = ARRAY_SIZE(clk_src_cpll_list),
-};
-
-/* Possible clock sources for MPLL Mux */
-static struct clk *clk_src_mpll_list[] = {
-       [0] = &clk_fin_mpll,
-       [1] = &clk_fout_mpll,
-};
-
-struct clksrc_sources clk_src_mpll = {
-       .sources        = clk_src_mpll_list,
-       .nr_sources     = ARRAY_SIZE(clk_src_mpll_list),
-};
-
-/* Possible clock sources for EPLL Mux */
-static struct clk *clk_src_epll_list[] = {
-       [0] = &clk_fin_epll,
-       [1] = &clk_fout_epll,
-};
-
-struct clksrc_sources clk_src_epll = {
-       .sources        = clk_src_epll_list,
-       .nr_sources     = ARRAY_SIZE(clk_src_epll_list),
-};
-
-/* Possible clock sources for DPLL Mux */
-static struct clk *clk_src_dpll_list[] = {
-       [0] = &clk_fin_dpll,
-       [1] = &clk_fout_dpll,
-};
-
-struct clksrc_sources clk_src_dpll = {
-       .sources        = clk_src_dpll_list,
-       .nr_sources     = ARRAY_SIZE(clk_src_dpll_list),
-};
-
-struct clk clk_vpll = {
-       .name           = "vpll",
-       .id             = -1,
-};
-
-int s5p_gatectrl(void __iomem *reg, struct clk *clk, int enable)
-{
-       unsigned int ctrlbit = clk->ctrlbit;
-       u32 con;
-
-       con = __raw_readl(reg);
-       con = enable ? (con | ctrlbit) : (con & ~ctrlbit);
-       __raw_writel(con, reg);
-       return 0;
-}
-
-int s5p_epll_enable(struct clk *clk, int enable)
-{
-       unsigned int ctrlbit = clk->ctrlbit;
-       unsigned int epll_con = __raw_readl(S5P_EPLL_CON) & ~ctrlbit;
-
-       if (enable)
-               __raw_writel(epll_con | ctrlbit, S5P_EPLL_CON);
-       else
-               __raw_writel(epll_con, S5P_EPLL_CON);
-
-       return 0;
-}
-
-unsigned long s5p_epll_get_rate(struct clk *clk)
-{
-       return clk->rate;
-}
-
-int s5p_spdif_set_rate(struct clk *clk, unsigned long rate)
-{
-       struct clk *pclk;
-       int ret;
-
-       pclk = clk_get_parent(clk);
-       if (IS_ERR(pclk))
-               return -EINVAL;
-
-       ret = pclk->ops->set_rate(pclk, rate);
-       clk_put(pclk);
-
-       return ret;
-}
-
-unsigned long s5p_spdif_get_rate(struct clk *clk)
-{
-       struct clk *pclk;
-       int rate;
-
-       pclk = clk_get_parent(clk);
-       if (IS_ERR(pclk))
-               return -EINVAL;
-
-       rate = pclk->ops->get_rate(pclk);
-       clk_put(pclk);
-
-       return rate;
-}
-
-struct clk_ops s5p_sclk_spdif_ops = {
-       .set_rate       = s5p_spdif_set_rate,
-       .get_rate       = s5p_spdif_get_rate,
-};
-
-static struct clk *s5p_clks[] __initdata = {
-       &clk_ext_xtal_mux,
-       &clk_48m,
-       &s5p_clk_27m,
-       &clk_fout_apll,
-       &clk_fout_mpll,
-       &clk_fout_epll,
-       &clk_fout_dpll,
-       &clk_fout_vpll,
-       &clk_vpll,
-       &clk_xusbxti,
-};
-
-void __init s5p_register_clocks(unsigned long xtal_freq)
-{
-       int ret;
-
-       clk_ext_xtal_mux.rate = xtal_freq;
-
-       ret = s3c24xx_register_clocks(s5p_clks, ARRAY_SIZE(s5p_clks));
-       if (ret > 0)
-               printk(KERN_ERR "Failed to register s5p clocks\n");
-}
diff --git a/arch/arm/plat-s5p/dev-mfc.c b/arch/arm/plat-s5p/dev-mfc.c
deleted file mode 100644 (file)
index a30d36b..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-/* linux/arch/arm/plat-s5p/dev-mfc.c
- *
- * Copyright (C) 2010-2011 Samsung Electronics Co.Ltd
- *
- * Base S5P MFC resource and device definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-#include <linux/memblock.h>
-#include <linux/ioport.h>
-
-#include <mach/map.h>
-#include <plat/devs.h>
-#include <plat/irqs.h>
-#include <plat/mfc.h>
-
-struct s5p_mfc_reserved_mem {
-       phys_addr_t     base;
-       unsigned long   size;
-       struct device   *dev;
-};
-
-static struct s5p_mfc_reserved_mem s5p_mfc_mem[2] __initdata;
-
-void __init s5p_mfc_reserve_mem(phys_addr_t rbase, unsigned int rsize,
-                               phys_addr_t lbase, unsigned int lsize)
-{
-       int i;
-
-       s5p_mfc_mem[0].dev = &s5p_device_mfc_r.dev;
-       s5p_mfc_mem[0].base = rbase;
-       s5p_mfc_mem[0].size = rsize;
-
-       s5p_mfc_mem[1].dev = &s5p_device_mfc_l.dev;
-       s5p_mfc_mem[1].base = lbase;
-       s5p_mfc_mem[1].size = lsize;
-
-       for (i = 0; i < ARRAY_SIZE(s5p_mfc_mem); i++) {
-               struct s5p_mfc_reserved_mem *area = &s5p_mfc_mem[i];
-               if (memblock_remove(area->base, area->size)) {
-                       printk(KERN_ERR "Failed to reserve memory for MFC device (%ld bytes at 0x%08lx)\n",
-                              area->size, (unsigned long) area->base);
-                       area->base = 0;
-               }
-       }
-}
-
-static int __init s5p_mfc_memory_init(void)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(s5p_mfc_mem); i++) {
-               struct s5p_mfc_reserved_mem *area = &s5p_mfc_mem[i];
-               if (!area->base)
-                       continue;
-
-               if (dma_declare_coherent_memory(area->dev, area->base,
-                               area->base, area->size,
-                               DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE) == 0)
-                       printk(KERN_ERR "Failed to declare coherent memory for MFC device (%ld bytes at 0x%08lx)\n",
-                              area->size, (unsigned long) area->base);
-       }
-       return 0;
-}
-device_initcall(s5p_mfc_memory_init);
diff --git a/arch/arm/plat-s5p/dev-uart.c b/arch/arm/plat-s5p/dev-uart.c
deleted file mode 100644 (file)
index c9308db..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-/* linux/arch/arm/plat-s5p/dev-uart.c
- *
- * Copyright (c) 2009 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com/
- *
- * Base S5P UART resource and device definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/list.h>
-#include <linux/platform_device.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/irq.h>
-#include <mach/hardware.h>
-#include <mach/map.h>
-
-#include <plat/devs.h>
-
- /* Serial port registrations */
-
-static struct resource s5p_uart0_resource[] = {
-       [0] = {
-               .start  = S5P_PA_UART0,
-               .end    = S5P_PA_UART0 + S5P_SZ_UART - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start  = IRQ_UART0,
-               .end    = IRQ_UART0,
-               .flags  = IORESOURCE_IRQ,
-       },
-};
-
-static struct resource s5p_uart1_resource[] = {
-       [0] = {
-               .start  = S5P_PA_UART1,
-               .end    = S5P_PA_UART1 + S5P_SZ_UART - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start  = IRQ_UART1,
-               .end    = IRQ_UART1,
-               .flags  = IORESOURCE_IRQ,
-       },
-};
-
-static struct resource s5p_uart2_resource[] = {
-       [0] = {
-               .start  = S5P_PA_UART2,
-               .end    = S5P_PA_UART2 + S5P_SZ_UART - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start  = IRQ_UART2,
-               .end    = IRQ_UART2,
-               .flags  = IORESOURCE_IRQ,
-       },
-};
-
-static struct resource s5p_uart3_resource[] = {
-#if CONFIG_SERIAL_SAMSUNG_UARTS > 3
-       [0] = {
-               .start  = S5P_PA_UART3,
-               .end    = S5P_PA_UART3 + S5P_SZ_UART - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start  = IRQ_UART3,
-               .end    = IRQ_UART3,
-               .flags  = IORESOURCE_IRQ,
-       },
-#endif
-};
-
-static struct resource s5p_uart4_resource[] = {
-#if CONFIG_SERIAL_SAMSUNG_UARTS > 4
-       [0] = {
-               .start  = S5P_PA_UART4,
-               .end    = S5P_PA_UART4 + S5P_SZ_UART - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start  = IRQ_UART4,
-               .end    = IRQ_UART4,
-               .flags  = IORESOURCE_IRQ,
-       },
-#endif
-};
-
-static struct resource s5p_uart5_resource[] = {
-#if CONFIG_SERIAL_SAMSUNG_UARTS > 5
-       [0] = {
-               .start  = S5P_PA_UART5,
-               .end    = S5P_PA_UART5 + S5P_SZ_UART - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start  = IRQ_UART5,
-               .end    = IRQ_UART5,
-               .flags  = IORESOURCE_IRQ,
-       },
-#endif
-};
-
-struct s3c24xx_uart_resources s5p_uart_resources[] __initdata = {
-       [0] = {
-               .resources      = s5p_uart0_resource,
-               .nr_resources   = ARRAY_SIZE(s5p_uart0_resource),
-       },
-       [1] = {
-               .resources      = s5p_uart1_resource,
-               .nr_resources   = ARRAY_SIZE(s5p_uart1_resource),
-       },
-       [2] = {
-               .resources      = s5p_uart2_resource,
-               .nr_resources   = ARRAY_SIZE(s5p_uart2_resource),
-       },
-       [3] = {
-               .resources      = s5p_uart3_resource,
-               .nr_resources   = ARRAY_SIZE(s5p_uart3_resource),
-       },
-       [4] = {
-               .resources      = s5p_uart4_resource,
-               .nr_resources   = ARRAY_SIZE(s5p_uart4_resource),
-       },
-       [5] = {
-               .resources      = s5p_uart5_resource,
-               .nr_resources   = ARRAY_SIZE(s5p_uart5_resource),
-       },
-};
diff --git a/arch/arm/plat-s5p/irq-eint.c b/arch/arm/plat-s5p/irq-eint.c
deleted file mode 100644 (file)
index 139c050..0000000
+++ /dev/null
@@ -1,219 +0,0 @@
-/* linux/arch/arm/plat-s5p/irq-eint.c
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com
- *
- * S5P - IRQ EINT support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/device.h>
-#include <linux/gpio.h>
-
-#include <asm/hardware/vic.h>
-
-#include <plat/regs-irqtype.h>
-
-#include <mach/map.h>
-#include <plat/cpu.h>
-#include <plat/pm.h>
-
-#include <plat/gpio-cfg.h>
-#include <mach/regs-gpio.h>
-
-static inline void s5p_irq_eint_mask(struct irq_data *data)
-{
-       u32 mask;
-
-       mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
-       mask |= eint_irq_to_bit(data->irq);
-       __raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
-}
-
-static void s5p_irq_eint_unmask(struct irq_data *data)
-{
-       u32 mask;
-
-       mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
-       mask &= ~(eint_irq_to_bit(data->irq));
-       __raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
-}
-
-static inline void s5p_irq_eint_ack(struct irq_data *data)
-{
-       __raw_writel(eint_irq_to_bit(data->irq),
-                    S5P_EINT_PEND(EINT_REG_NR(data->irq)));
-}
-
-static void s5p_irq_eint_maskack(struct irq_data *data)
-{
-       /* compiler should in-line these */
-       s5p_irq_eint_mask(data);
-       s5p_irq_eint_ack(data);
-}
-
-static int s5p_irq_eint_set_type(struct irq_data *data, unsigned int type)
-{
-       int offs = EINT_OFFSET(data->irq);
-       int shift;
-       u32 ctrl, mask;
-       u32 newvalue = 0;
-
-       switch (type) {
-       case IRQ_TYPE_EDGE_RISING:
-               newvalue = S5P_IRQ_TYPE_EDGE_RISING;
-               break;
-
-       case IRQ_TYPE_EDGE_FALLING:
-               newvalue = S5P_IRQ_TYPE_EDGE_FALLING;
-               break;
-
-       case IRQ_TYPE_EDGE_BOTH:
-               newvalue = S5P_IRQ_TYPE_EDGE_BOTH;
-               break;
-
-       case IRQ_TYPE_LEVEL_LOW:
-               newvalue = S5P_IRQ_TYPE_LEVEL_LOW;
-               break;
-
-       case IRQ_TYPE_LEVEL_HIGH:
-               newvalue = S5P_IRQ_TYPE_LEVEL_HIGH;
-               break;
-
-       default:
-               printk(KERN_ERR "No such irq type %d", type);
-               return -EINVAL;
-       }
-
-       shift = (offs & 0x7) * 4;
-       mask = 0x7 << shift;
-
-       ctrl = __raw_readl(S5P_EINT_CON(EINT_REG_NR(data->irq)));
-       ctrl &= ~mask;
-       ctrl |= newvalue << shift;
-       __raw_writel(ctrl, S5P_EINT_CON(EINT_REG_NR(data->irq)));
-
-       if ((0 <= offs) && (offs < 8))
-               s3c_gpio_cfgpin(EINT_GPIO_0(offs & 0x7), EINT_MODE);
-
-       else if ((8 <= offs) && (offs < 16))
-               s3c_gpio_cfgpin(EINT_GPIO_1(offs & 0x7), EINT_MODE);
-
-       else if ((16 <= offs) && (offs < 24))
-               s3c_gpio_cfgpin(EINT_GPIO_2(offs & 0x7), EINT_MODE);
-
-       else if ((24 <= offs) && (offs < 32))
-               s3c_gpio_cfgpin(EINT_GPIO_3(offs & 0x7), EINT_MODE);
-
-       else
-               printk(KERN_ERR "No such irq number %d", offs);
-
-       return 0;
-}
-
-static struct irq_chip s5p_irq_eint = {
-       .name           = "s5p-eint",
-       .irq_mask       = s5p_irq_eint_mask,
-       .irq_unmask     = s5p_irq_eint_unmask,
-       .irq_mask_ack   = s5p_irq_eint_maskack,
-       .irq_ack        = s5p_irq_eint_ack,
-       .irq_set_type   = s5p_irq_eint_set_type,
-#ifdef CONFIG_PM
-       .irq_set_wake   = s3c_irqext_wake,
-#endif
-};
-
-/* s5p_irq_demux_eint
- *
- * This function demuxes the IRQ from the group0 external interrupts,
- * from EINTs 16 to 31. It is designed to be inlined into the specific
- * handler s5p_irq_demux_eintX_Y.
- *
- * Each EINT pend/mask registers handle eight of them.
- */
-static inline void s5p_irq_demux_eint(unsigned int start)
-{
-       u32 status = __raw_readl(S5P_EINT_PEND(EINT_REG_NR(start)));
-       u32 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(start)));
-       unsigned int irq;
-
-       status &= ~mask;
-       status &= 0xff;
-
-       while (status) {
-               irq = fls(status) - 1;
-               generic_handle_irq(irq + start);
-               status &= ~(1 << irq);
-       }
-}
-
-static void s5p_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
-{
-       s5p_irq_demux_eint(IRQ_EINT(16));
-       s5p_irq_demux_eint(IRQ_EINT(24));
-}
-
-static inline void s5p_irq_vic_eint_mask(struct irq_data *data)
-{
-       void __iomem *base = irq_data_get_irq_chip_data(data);
-
-       s5p_irq_eint_mask(data);
-       writel(1 << EINT_OFFSET(data->irq), base + VIC_INT_ENABLE_CLEAR);
-}
-
-static void s5p_irq_vic_eint_unmask(struct irq_data *data)
-{
-       void __iomem *base = irq_data_get_irq_chip_data(data);
-
-       s5p_irq_eint_unmask(data);
-       writel(1 << EINT_OFFSET(data->irq), base + VIC_INT_ENABLE);
-}
-
-static inline void s5p_irq_vic_eint_ack(struct irq_data *data)
-{
-       __raw_writel(eint_irq_to_bit(data->irq),
-                    S5P_EINT_PEND(EINT_REG_NR(data->irq)));
-}
-
-static void s5p_irq_vic_eint_maskack(struct irq_data *data)
-{
-       s5p_irq_vic_eint_mask(data);
-       s5p_irq_vic_eint_ack(data);
-}
-
-static struct irq_chip s5p_irq_vic_eint = {
-       .name           = "s5p_vic_eint",
-       .irq_mask       = s5p_irq_vic_eint_mask,
-       .irq_unmask     = s5p_irq_vic_eint_unmask,
-       .irq_mask_ack   = s5p_irq_vic_eint_maskack,
-       .irq_ack        = s5p_irq_vic_eint_ack,
-       .irq_set_type   = s5p_irq_eint_set_type,
-#ifdef CONFIG_PM
-       .irq_set_wake   = s3c_irqext_wake,
-#endif
-};
-
-static int __init s5p_init_irq_eint(void)
-{
-       int irq;
-
-       for (irq = IRQ_EINT(0); irq <= IRQ_EINT(15); irq++)
-               irq_set_chip(irq, &s5p_irq_vic_eint);
-
-       for (irq = IRQ_EINT(16); irq <= IRQ_EINT(31); irq++) {
-               irq_set_chip_and_handler(irq, &s5p_irq_eint, handle_level_irq);
-               set_irq_flags(irq, IRQF_VALID);
-       }
-
-       irq_set_chained_handler(IRQ_EINT16_31, s5p_irq_demux_eint16_31);
-       return 0;
-}
-
-arch_initcall(s5p_init_irq_eint);
diff --git a/arch/arm/plat-s5p/irq-gpioint.c b/arch/arm/plat-s5p/irq-gpioint.c
deleted file mode 100644 (file)
index 82c7311..0000000
+++ /dev/null
@@ -1,216 +0,0 @@
-/* linux/arch/arm/plat-s5p/irq-gpioint.c
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * Author: Kyungmin Park <kyungmin.park@samsung.com>
- * Author: Joonyoung Shim <jy0922.shim@samsung.com>
- * Author: Marek Szyprowski <m.szyprowski@samsung.com>
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-#include <linux/slab.h>
-
-#include <mach/map.h>
-#include <plat/gpio-core.h>
-#include <plat/gpio-cfg.h>
-
-#include <asm/mach/irq.h>
-
-#define GPIO_BASE(chip)                (((unsigned long)(chip)->base) & 0xFFFFF000u)
-
-#define CON_OFFSET             0x700
-#define MASK_OFFSET            0x900
-#define PEND_OFFSET            0xA00
-#define REG_OFFSET(x)          ((x) << 2)
-
-struct s5p_gpioint_bank {
-       struct list_head        list;
-       int                     start;
-       int                     nr_groups;
-       int                     irq;
-       struct samsung_gpio_chip        **chips;
-       void                    (*handler)(unsigned int, struct irq_desc *);
-};
-
-static LIST_HEAD(banks);
-
-static int s5p_gpioint_set_type(struct irq_data *d, unsigned int type)
-{
-       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
-       struct irq_chip_type *ct = gc->chip_types;
-       unsigned int shift = (d->irq - gc->irq_base) << 2;
-
-       switch (type) {
-       case IRQ_TYPE_EDGE_RISING:
-               type = S5P_IRQ_TYPE_EDGE_RISING;
-               break;
-       case IRQ_TYPE_EDGE_FALLING:
-               type = S5P_IRQ_TYPE_EDGE_FALLING;
-               break;
-       case IRQ_TYPE_EDGE_BOTH:
-               type = S5P_IRQ_TYPE_EDGE_BOTH;
-               break;
-       case IRQ_TYPE_LEVEL_HIGH:
-               type = S5P_IRQ_TYPE_LEVEL_HIGH;
-               break;
-       case IRQ_TYPE_LEVEL_LOW:
-               type = S5P_IRQ_TYPE_LEVEL_LOW;
-               break;
-       case IRQ_TYPE_NONE:
-       default:
-               printk(KERN_WARNING "No irq type\n");
-               return -EINVAL;
-       }
-
-       gc->type_cache &= ~(0x7 << shift);
-       gc->type_cache |= type << shift;
-       writel(gc->type_cache, gc->reg_base + ct->regs.type);
-       return 0;
-}
-
-static void s5p_gpioint_handler(unsigned int irq, struct irq_desc *desc)
-{
-       struct s5p_gpioint_bank *bank = irq_get_handler_data(irq);
-       int group, pend_offset, mask_offset;
-       unsigned int pend, mask;
-
-       struct irq_chip *chip = irq_get_chip(irq);
-       chained_irq_enter(chip, desc);
-
-       for (group = 0; group < bank->nr_groups; group++) {
-               struct samsung_gpio_chip *chip = bank->chips[group];
-               if (!chip)
-                       continue;
-
-               pend_offset = REG_OFFSET(group);
-               pend = __raw_readl(GPIO_BASE(chip) + PEND_OFFSET + pend_offset);
-               if (!pend)
-                       continue;
-
-               mask_offset = REG_OFFSET(group);
-               mask = __raw_readl(GPIO_BASE(chip) + MASK_OFFSET + mask_offset);
-               pend &= ~mask;
-
-               while (pend) {
-                       int offset = fls(pend) - 1;
-                       int real_irq = chip->irq_base + offset;
-                       generic_handle_irq(real_irq);
-                       pend &= ~BIT(offset);
-               }
-       }
-       chained_irq_exit(chip, desc);
-}
-
-static __init int s5p_gpioint_add(struct samsung_gpio_chip *chip)
-{
-       static int used_gpioint_groups = 0;
-       int group = chip->group;
-       struct s5p_gpioint_bank *b, *bank = NULL;
-       struct irq_chip_generic *gc;
-       struct irq_chip_type *ct;
-
-       if (used_gpioint_groups >= S5P_GPIOINT_GROUP_COUNT)
-               return -ENOMEM;
-
-       list_for_each_entry(b, &banks, list) {
-               if (group >= b->start && group < b->start + b->nr_groups) {
-                       bank = b;
-                       break;
-               }
-       }
-       if (!bank)
-               return -EINVAL;
-
-       if (!bank->handler) {
-               bank->chips = kzalloc(sizeof(struct samsung_gpio_chip *) *
-                                     bank->nr_groups, GFP_KERNEL);
-               if (!bank->chips)
-                       return -ENOMEM;
-
-               irq_set_chained_handler(bank->irq, s5p_gpioint_handler);
-               irq_set_handler_data(bank->irq, bank);
-               bank->handler = s5p_gpioint_handler;
-               printk(KERN_INFO "Registered chained gpio int handler for interrupt %d.\n",
-                      bank->irq);
-       }
-
-       /*
-        * chained GPIO irq has been successfully registered, allocate new gpio
-        * int group and assign irq nubmers
-        */
-       chip->irq_base = S5P_GPIOINT_BASE +
-                        used_gpioint_groups * S5P_GPIOINT_GROUP_SIZE;
-       used_gpioint_groups++;
-
-       bank->chips[group - bank->start] = chip;
-
-       gc = irq_alloc_generic_chip("s5p_gpioint", 1, chip->irq_base,
-                                   (void __iomem *)GPIO_BASE(chip),
-                                   handle_level_irq);
-       if (!gc)
-               return -ENOMEM;
-       ct = gc->chip_types;
-       ct->chip.irq_ack = irq_gc_ack_set_bit;
-       ct->chip.irq_mask = irq_gc_mask_set_bit;
-       ct->chip.irq_unmask = irq_gc_mask_clr_bit;
-       ct->chip.irq_set_type = s5p_gpioint_set_type,
-       ct->regs.ack = PEND_OFFSET + REG_OFFSET(group - bank->start);
-       ct->regs.mask = MASK_OFFSET + REG_OFFSET(group - bank->start);
-       ct->regs.type = CON_OFFSET + REG_OFFSET(group - bank->start);
-       irq_setup_generic_chip(gc, IRQ_MSK(chip->chip.ngpio),
-                              IRQ_GC_INIT_MASK_CACHE,
-                              IRQ_NOREQUEST | IRQ_NOPROBE, 0);
-       return 0;
-}
-
-int __init s5p_register_gpio_interrupt(int pin)
-{
-       struct samsung_gpio_chip *my_chip = samsung_gpiolib_getchip(pin);
-       int offset, group;
-       int ret;
-
-       if (!my_chip)
-               return -EINVAL;
-
-       offset = pin - my_chip->chip.base;
-       group = my_chip->group;
-
-       /* check if the group has been already registered */
-       if (my_chip->irq_base)
-               return my_chip->irq_base + offset;
-
-       /* register gpio group */
-       ret = s5p_gpioint_add(my_chip);
-       if (ret == 0) {
-               my_chip->chip.to_irq = samsung_gpiolib_to_irq;
-               printk(KERN_INFO "Registered interrupt support for gpio group %d.\n",
-                      group);
-               return my_chip->irq_base + offset;
-       }
-       return ret;
-}
-
-int __init s5p_register_gpioint_bank(int chain_irq, int start, int nr_groups)
-{
-       struct s5p_gpioint_bank *bank;
-
-       bank = kzalloc(sizeof(*bank), GFP_KERNEL);
-       if (!bank)
-               return -ENOMEM;
-
-       bank->start = start;
-       bank->nr_groups = nr_groups;
-       bank->irq = chain_irq;
-
-       list_add_tail(&bank->list, &banks);
-       return 0;
-}
diff --git a/arch/arm/plat-s5p/irq-pm.c b/arch/arm/plat-s5p/irq-pm.c
deleted file mode 100644 (file)
index d1bfeca..0000000
+++ /dev/null
@@ -1,103 +0,0 @@
-/* linux/arch/arm/plat-s5p/irq-pm.c
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com
- *
- * Based on arch/arm/plat-s3c24xx/irq-pm.c,
- * Copyright (c) 2003,2004 Simtec Electronics
- *     Ben Dooks <ben@simtec.co.uk>
- *     http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-
-#include <plat/cpu.h>
-#include <plat/irqs.h>
-#include <plat/pm.h>
-#include <mach/map.h>
-
-#include <mach/regs-gpio.h>
-#include <mach/regs-irq.h>
-
-/* state for IRQs over sleep */
-
-/* default is to allow for EINT0..EINT31, and IRQ_RTC_TIC, IRQ_RTC_ALARM,
- * as wakeup sources
- *
- * set bit to 1 in allow bitfield to enable the wakeup settings on it
-*/
-
-unsigned long s3c_irqwake_intallow     = 0x00000006L;
-unsigned long s3c_irqwake_eintallow    = 0xffffffffL;
-
-int s3c_irq_wake(struct irq_data *data, unsigned int state)
-{
-       unsigned long irqbit;
-       unsigned int irq_rtc_tic, irq_rtc_alarm;
-
-#ifdef CONFIG_ARCH_EXYNOS
-       if (soc_is_exynos5250()) {
-               irq_rtc_tic = EXYNOS5_IRQ_RTC_TIC;
-               irq_rtc_alarm = EXYNOS5_IRQ_RTC_ALARM;
-       } else {
-               irq_rtc_tic = EXYNOS4_IRQ_RTC_TIC;
-               irq_rtc_alarm = EXYNOS4_IRQ_RTC_ALARM;
-       }
-#else
-       irq_rtc_tic = IRQ_RTC_TIC;
-       irq_rtc_alarm = IRQ_RTC_ALARM;
-#endif
-
-       if (data->irq == irq_rtc_tic || data->irq == irq_rtc_alarm) {
-               irqbit = 1 << (data->irq + 1 - irq_rtc_alarm);
-
-               if (!state)
-                       s3c_irqwake_intmask |= irqbit;
-               else
-                       s3c_irqwake_intmask &= ~irqbit;
-       } else {
-               return -ENOENT;
-       }
-
-       return 0;
-}
-
-static struct sleep_save eint_save[] = {
-       SAVE_ITEM(S5P_EINT_CON(0)),
-       SAVE_ITEM(S5P_EINT_CON(1)),
-       SAVE_ITEM(S5P_EINT_CON(2)),
-       SAVE_ITEM(S5P_EINT_CON(3)),
-
-       SAVE_ITEM(S5P_EINT_FLTCON(0)),
-       SAVE_ITEM(S5P_EINT_FLTCON(1)),
-       SAVE_ITEM(S5P_EINT_FLTCON(2)),
-       SAVE_ITEM(S5P_EINT_FLTCON(3)),
-       SAVE_ITEM(S5P_EINT_FLTCON(4)),
-       SAVE_ITEM(S5P_EINT_FLTCON(5)),
-       SAVE_ITEM(S5P_EINT_FLTCON(6)),
-       SAVE_ITEM(S5P_EINT_FLTCON(7)),
-
-       SAVE_ITEM(S5P_EINT_MASK(0)),
-       SAVE_ITEM(S5P_EINT_MASK(1)),
-       SAVE_ITEM(S5P_EINT_MASK(2)),
-       SAVE_ITEM(S5P_EINT_MASK(3)),
-};
-
-int s3c24xx_irq_suspend(void)
-{
-       s3c_pm_do_save(eint_save, ARRAY_SIZE(eint_save));
-
-       return 0;
-}
-
-void s3c24xx_irq_resume(void)
-{
-       s3c_pm_do_restore(eint_save, ARRAY_SIZE(eint_save));
-}
-
diff --git a/arch/arm/plat-s5p/irq.c b/arch/arm/plat-s5p/irq.c
deleted file mode 100644 (file)
index afdaa10..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/* arch/arm/plat-s5p/irq.c
- *
- * Copyright (c) 2009 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com/
- *
- * S5P - Interrupt handling
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-
-#include <asm/hardware/vic.h>
-
-#include <mach/map.h>
-#include <plat/regs-timer.h>
-#include <plat/cpu.h>
-#include <plat/irq-vic-timer.h>
-
-void __init s5p_init_irq(u32 *vic, u32 num_vic)
-{
-#ifdef CONFIG_ARM_VIC
-       int irq;
-
-       /* initialize the VICs */
-       for (irq = 0; irq < num_vic; irq++)
-               vic_init(VA_VIC(irq), VIC_BASE(irq), vic[irq], 0);
-#endif
-
-       s3c_init_vic_timer_irq(5, IRQ_TIMER0);
-}
diff --git a/arch/arm/plat-s5p/pm.c b/arch/arm/plat-s5p/pm.c
deleted file mode 100644 (file)
index d15dc47..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-/* linux/arch/arm/plat-s5p/pm.c
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com
- *
- * S5P Power Manager (Suspend-To-RAM) support
- *
- * Based on arch/arm/plat-s3c24xx/pm.c
- * Copyright (c) 2004,2006 Simtec Electronics
- *     Ben Dooks <ben@simtec.co.uk>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/suspend.h>
-#include <plat/pm.h>
-
-#define PFX "s5p pm: "
-
-/* s3c_pm_configure_extint
- *
- * configure all external interrupt pins
-*/
-
-void s3c_pm_configure_extint(void)
-{
-       /* nothing here yet */
-}
-
-void s3c_pm_restore_core(void)
-{
-       /* nothing here yet */
-}
-
-void s3c_pm_save_core(void)
-{
-       /* nothing here yet */
-}
-
diff --git a/arch/arm/plat-s5p/s5p-time.c b/arch/arm/plat-s5p/s5p-time.c
deleted file mode 100644 (file)
index 17c0a2c..0000000
+++ /dev/null
@@ -1,406 +0,0 @@
-/* linux/arch/arm/plat-s5p/s5p-time.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com/
- *
- * S5P - Common hr-timer support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/clockchips.h>
-#include <linux/platform_device.h>
-
-#include <asm/smp_twd.h>
-#include <asm/mach/time.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/sched_clock.h>
-
-#include <mach/map.h>
-#include <plat/devs.h>
-#include <plat/regs-timer.h>
-#include <plat/s5p-time.h>
-
-static struct clk *tin_event;
-static struct clk *tin_source;
-static struct clk *tdiv_event;
-static struct clk *tdiv_source;
-static struct clk *timerclk;
-static struct s5p_timer_source timer_source;
-static unsigned long clock_count_per_tick;
-static void s5p_timer_resume(void);
-
-static void s5p_time_stop(enum s5p_timer_mode mode)
-{
-       unsigned long tcon;
-
-       tcon = __raw_readl(S3C2410_TCON);
-
-       switch (mode) {
-       case S5P_PWM0:
-               tcon &= ~S3C2410_TCON_T0START;
-               break;
-
-       case S5P_PWM1:
-               tcon &= ~S3C2410_TCON_T1START;
-               break;
-
-       case S5P_PWM2:
-               tcon &= ~S3C2410_TCON_T2START;
-               break;
-
-       case S5P_PWM3:
-               tcon &= ~S3C2410_TCON_T3START;
-               break;
-
-       case S5P_PWM4:
-               tcon &= ~S3C2410_TCON_T4START;
-               break;
-
-       default:
-               printk(KERN_ERR "Invalid Timer %d\n", mode);
-               break;
-       }
-       __raw_writel(tcon, S3C2410_TCON);
-}
-
-static void s5p_time_setup(enum s5p_timer_mode mode, unsigned long tcnt)
-{
-       unsigned long tcon;
-
-       tcon = __raw_readl(S3C2410_TCON);
-
-       tcnt--;
-
-       switch (mode) {
-       case S5P_PWM0:
-               tcon &= ~(0x0f << 0);
-               tcon |= S3C2410_TCON_T0MANUALUPD;
-               break;
-
-       case S5P_PWM1:
-               tcon &= ~(0x0f << 8);
-               tcon |= S3C2410_TCON_T1MANUALUPD;
-               break;
-
-       case S5P_PWM2:
-               tcon &= ~(0x0f << 12);
-               tcon |= S3C2410_TCON_T2MANUALUPD;
-               break;
-
-       case S5P_PWM3:
-               tcon &= ~(0x0f << 16);
-               tcon |= S3C2410_TCON_T3MANUALUPD;
-               break;
-
-       case S5P_PWM4:
-               tcon &= ~(0x07 << 20);
-               tcon |= S3C2410_TCON_T4MANUALUPD;
-               break;
-
-       default:
-               printk(KERN_ERR "Invalid Timer %d\n", mode);
-               break;
-       }
-
-       __raw_writel(tcnt, S3C2410_TCNTB(mode));
-       __raw_writel(tcnt, S3C2410_TCMPB(mode));
-       __raw_writel(tcon, S3C2410_TCON);
-}
-
-static void s5p_time_start(enum s5p_timer_mode mode, bool periodic)
-{
-       unsigned long tcon;
-
-       tcon  = __raw_readl(S3C2410_TCON);
-
-       switch (mode) {
-       case S5P_PWM0:
-               tcon |= S3C2410_TCON_T0START;
-               tcon &= ~S3C2410_TCON_T0MANUALUPD;
-
-               if (periodic)
-                       tcon |= S3C2410_TCON_T0RELOAD;
-               else
-                       tcon &= ~S3C2410_TCON_T0RELOAD;
-               break;
-
-       case S5P_PWM1:
-               tcon |= S3C2410_TCON_T1START;
-               tcon &= ~S3C2410_TCON_T1MANUALUPD;
-
-               if (periodic)
-                       tcon |= S3C2410_TCON_T1RELOAD;
-               else
-                       tcon &= ~S3C2410_TCON_T1RELOAD;
-               break;
-
-       case S5P_PWM2:
-               tcon |= S3C2410_TCON_T2START;
-               tcon &= ~S3C2410_TCON_T2MANUALUPD;
-
-               if (periodic)
-                       tcon |= S3C2410_TCON_T2RELOAD;
-               else
-                       tcon &= ~S3C2410_TCON_T2RELOAD;
-               break;
-
-       case S5P_PWM3:
-               tcon |= S3C2410_TCON_T3START;
-               tcon &= ~S3C2410_TCON_T3MANUALUPD;
-
-               if (periodic)
-                       tcon |= S3C2410_TCON_T3RELOAD;
-               else
-                       tcon &= ~S3C2410_TCON_T3RELOAD;
-               break;
-
-       case S5P_PWM4:
-               tcon |= S3C2410_TCON_T4START;
-               tcon &= ~S3C2410_TCON_T4MANUALUPD;
-
-               if (periodic)
-                       tcon |= S3C2410_TCON_T4RELOAD;
-               else
-                       tcon &= ~S3C2410_TCON_T4RELOAD;
-               break;
-
-       default:
-               printk(KERN_ERR "Invalid Timer %d\n", mode);
-               break;
-       }
-       __raw_writel(tcon, S3C2410_TCON);
-}
-
-static int s5p_set_next_event(unsigned long cycles,
-                               struct clock_event_device *evt)
-{
-       s5p_time_setup(timer_source.event_id, cycles);
-       s5p_time_start(timer_source.event_id, NON_PERIODIC);
-
-       return 0;
-}
-
-static void s5p_set_mode(enum clock_event_mode mode,
-                               struct clock_event_device *evt)
-{
-       s5p_time_stop(timer_source.event_id);
-
-       switch (mode) {
-       case CLOCK_EVT_MODE_PERIODIC:
-               s5p_time_setup(timer_source.event_id, clock_count_per_tick);
-               s5p_time_start(timer_source.event_id, PERIODIC);
-               break;
-
-       case CLOCK_EVT_MODE_ONESHOT:
-               break;
-
-       case CLOCK_EVT_MODE_UNUSED:
-       case CLOCK_EVT_MODE_SHUTDOWN:
-               break;
-
-       case CLOCK_EVT_MODE_RESUME:
-               s5p_timer_resume();
-               break;
-       }
-}
-
-static void s5p_timer_resume(void)
-{
-       /* event timer restart */
-       s5p_time_setup(timer_source.event_id, clock_count_per_tick);
-       s5p_time_start(timer_source.event_id, PERIODIC);
-
-       /* source timer restart */
-       s5p_time_setup(timer_source.source_id, TCNT_MAX);
-       s5p_time_start(timer_source.source_id, PERIODIC);
-}
-
-void __init s5p_set_timer_source(enum s5p_timer_mode event,
-                                enum s5p_timer_mode source)
-{
-       s3c_device_timer[event].dev.bus = &platform_bus_type;
-       s3c_device_timer[source].dev.bus = &platform_bus_type;
-
-       timer_source.event_id = event;
-       timer_source.source_id = source;
-}
-
-static struct clock_event_device time_event_device = {
-       .name           = "s5p_event_timer",
-       .features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-       .rating         = 200,
-       .set_next_event = s5p_set_next_event,
-       .set_mode       = s5p_set_mode,
-};
-
-static irqreturn_t s5p_clock_event_isr(int irq, void *dev_id)
-{
-       struct clock_event_device *evt = dev_id;
-
-       evt->event_handler(evt);
-
-       return IRQ_HANDLED;
-}
-
-static struct irqaction s5p_clock_event_irq = {
-       .name           = "s5p_time_irq",
-       .flags          = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
-       .handler        = s5p_clock_event_isr,
-       .dev_id         = &time_event_device,
-};
-
-static void __init s5p_clockevent_init(void)
-{
-       unsigned long pclk;
-       unsigned long clock_rate;
-       unsigned int irq_number;
-       struct clk *tscaler;
-
-       pclk = clk_get_rate(timerclk);
-
-       tscaler = clk_get_parent(tdiv_event);
-
-       clk_set_rate(tscaler, pclk / 2);
-       clk_set_rate(tdiv_event, pclk / 2);
-       clk_set_parent(tin_event, tdiv_event);
-
-       clock_rate = clk_get_rate(tin_event);
-       clock_count_per_tick = clock_rate / HZ;
-
-       clockevents_calc_mult_shift(&time_event_device,
-                                   clock_rate, S5PTIMER_MIN_RANGE);
-       time_event_device.max_delta_ns =
-               clockevent_delta2ns(-1, &time_event_device);
-       time_event_device.min_delta_ns =
-               clockevent_delta2ns(1, &time_event_device);
-
-       time_event_device.cpumask = cpumask_of(0);
-       clockevents_register_device(&time_event_device);
-
-       irq_number = timer_source.event_id + IRQ_TIMER0;
-       setup_irq(irq_number, &s5p_clock_event_irq);
-}
-
-static void __iomem *s5p_timer_reg(void)
-{
-       unsigned long offset = 0;
-
-       switch (timer_source.source_id) {
-       case S5P_PWM0:
-       case S5P_PWM1:
-       case S5P_PWM2:
-       case S5P_PWM3:
-               offset = (timer_source.source_id * 0x0c) + 0x14;
-               break;
-
-       case S5P_PWM4:
-               offset = 0x40;
-               break;
-
-       default:
-               printk(KERN_ERR "Invalid Timer %d\n", timer_source.source_id);
-               return NULL;
-       }
-
-       return S3C_TIMERREG(offset);
-}
-
-/*
- * Override the global weak sched_clock symbol with this
- * local implementation which uses the clocksource to get some
- * better resolution when scheduling the kernel. We accept that
- * this wraps around for now, since it is just a relative time
- * stamp. (Inspired by U300 implementation.)
- */
-static u32 notrace s5p_read_sched_clock(void)
-{
-       void __iomem *reg = s5p_timer_reg();
-
-       if (!reg)
-               return 0;
-
-       return ~__raw_readl(reg);
-}
-
-static void __init s5p_clocksource_init(void)
-{
-       unsigned long pclk;
-       unsigned long clock_rate;
-
-       pclk = clk_get_rate(timerclk);
-
-       clk_set_rate(tdiv_source, pclk / 2);
-       clk_set_parent(tin_source, tdiv_source);
-
-       clock_rate = clk_get_rate(tin_source);
-
-       s5p_time_setup(timer_source.source_id, TCNT_MAX);
-       s5p_time_start(timer_source.source_id, PERIODIC);
-
-       setup_sched_clock(s5p_read_sched_clock, 32, clock_rate);
-
-       if (clocksource_mmio_init(s5p_timer_reg(), "s5p_clocksource_timer",
-                       clock_rate, 250, 32, clocksource_mmio_readl_down))
-               panic("s5p_clocksource_timer: can't register clocksource\n");
-}
-
-static void __init s5p_timer_resources(void)
-{
-
-       unsigned long event_id = timer_source.event_id;
-       unsigned long source_id = timer_source.source_id;
-       char devname[15];
-
-       timerclk = clk_get(NULL, "timers");
-       if (IS_ERR(timerclk))
-               panic("failed to get timers clock for timer");
-
-       clk_enable(timerclk);
-
-       sprintf(devname, "s3c24xx-pwm.%lu", event_id);
-       s3c_device_timer[event_id].id = event_id;
-       s3c_device_timer[event_id].dev.init_name = devname;
-
-       tin_event = clk_get(&s3c_device_timer[event_id].dev, "pwm-tin");
-       if (IS_ERR(tin_event))
-               panic("failed to get pwm-tin clock for event timer");
-
-       tdiv_event = clk_get(&s3c_device_timer[event_id].dev, "pwm-tdiv");
-       if (IS_ERR(tdiv_event))
-               panic("failed to get pwm-tdiv clock for event timer");
-
-       clk_enable(tin_event);
-
-       sprintf(devname, "s3c24xx-pwm.%lu", source_id);
-       s3c_device_timer[source_id].id = source_id;
-       s3c_device_timer[source_id].dev.init_name = devname;
-
-       tin_source = clk_get(&s3c_device_timer[source_id].dev, "pwm-tin");
-       if (IS_ERR(tin_source))
-               panic("failed to get pwm-tin clock for source timer");
-
-       tdiv_source = clk_get(&s3c_device_timer[source_id].dev, "pwm-tdiv");
-       if (IS_ERR(tdiv_source))
-               panic("failed to get pwm-tdiv clock for source timer");
-
-       clk_enable(tin_source);
-}
-
-static void __init s5p_timer_init(void)
-{
-       s5p_timer_resources();
-       s5p_clockevent_init();
-       s5p_clocksource_init();
-}
-
-struct sys_timer s5p_timer = {
-       .init           = s5p_timer_init,
-};
diff --git a/arch/arm/plat-s5p/setup-mipiphy.c b/arch/arm/plat-s5p/setup-mipiphy.c
deleted file mode 100644 (file)
index 683c466..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (C) 2011 Samsung Electronics Co., Ltd.
- *
- * S5P - Helper functions for MIPI-CSIS and MIPI-DSIM D-PHY control
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/spinlock.h>
-#include <mach/regs-clock.h>
-
-static int __s5p_mipi_phy_control(struct platform_device *pdev,
-                                 bool on, u32 reset)
-{
-       static DEFINE_SPINLOCK(lock);
-       void __iomem *addr;
-       unsigned long flags;
-       int pid;
-       u32 cfg;
-
-       if (!pdev)
-               return -EINVAL;
-
-       pid = (pdev->id == -1) ? 0 : pdev->id;
-
-       if (pid != 0 && pid != 1)
-               return -EINVAL;
-
-       addr = S5P_MIPI_DPHY_CONTROL(pid);
-
-       spin_lock_irqsave(&lock, flags);
-
-       cfg = __raw_readl(addr);
-       cfg = on ? (cfg | reset) : (cfg & ~reset);
-       __raw_writel(cfg, addr);
-
-       if (on) {
-               cfg |= S5P_MIPI_DPHY_ENABLE;
-       } else if (!(cfg & (S5P_MIPI_DPHY_SRESETN |
-                           S5P_MIPI_DPHY_MRESETN) & ~reset)) {
-               cfg &= ~S5P_MIPI_DPHY_ENABLE;
-       }
-
-       __raw_writel(cfg, addr);
-       spin_unlock_irqrestore(&lock, flags);
-
-       return 0;
-}
-
-int s5p_csis_phy_enable(struct platform_device *pdev, bool on)
-{
-       return __s5p_mipi_phy_control(pdev, on, S5P_MIPI_DPHY_SRESETN);
-}
-
-int s5p_dsim_phy_enable(struct platform_device *pdev, bool on)
-{
-       return __s5p_mipi_phy_control(pdev, on, S5P_MIPI_DPHY_MRESETN);
-}
diff --git a/arch/arm/plat-s5p/sleep.S b/arch/arm/plat-s5p/sleep.S
deleted file mode 100644 (file)
index 006bd01..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-/* linux/arch/arm/plat-s5p/sleep.S
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com
- *
- * Common S5P Sleep Code
- * Based on S3C64XX sleep code by:
- *     Ben Dooks, (c) 2008 Simtec Electronics
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-*/
-
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/hardware/cache-l2x0.h>
-
-/*
- *      The following code is located into the .data section. This is to
- *      allow l2x0_regs_phys to be accessed with a relative load while we
- *      can't rely on any MMU translation. We could have put l2x0_regs_phys
- *      in the .text section as well, but some setups might insist on it to
- *      be truly read-only. (Reference from: arch/arm/kernel/sleep.S)
- */
-       .data
-       .align
-
-       /*
-        * sleep magic, to allow the bootloader to check for an valid
-        * image to resume to. Must be the first word before the
-        * s3c_cpu_resume entry.
-        */
-
-       .word   0x2bedf00d
-
-       /*
-        * s3c_cpu_resume
-        *
-        * resume code entry for bootloader to call
-        */
-
-ENTRY(s3c_cpu_resume)
-#ifdef CONFIG_CACHE_L2X0
-       adr     r0, l2x0_regs_phys
-       ldr     r0, [r0]
-       ldr     r1, [r0, #L2X0_R_PHY_BASE]
-       ldr     r2, [r1, #L2X0_CTRL]
-       tst     r2, #0x1
-       bne     resume_l2on
-       ldr     r2, [r0, #L2X0_R_AUX_CTRL]
-       str     r2, [r1, #L2X0_AUX_CTRL]
-       ldr     r2, [r0, #L2X0_R_TAG_LATENCY]
-       str     r2, [r1, #L2X0_TAG_LATENCY_CTRL]
-       ldr     r2, [r0, #L2X0_R_DATA_LATENCY]
-       str     r2, [r1, #L2X0_DATA_LATENCY_CTRL]
-       ldr     r2, [r0, #L2X0_R_PREFETCH_CTRL]
-       str     r2, [r1, #L2X0_PREFETCH_CTRL]
-       ldr     r2, [r0, #L2X0_R_PWR_CTRL]
-       str     r2, [r1, #L2X0_POWER_CTRL]
-       mov     r2, #1
-       str     r2, [r1, #L2X0_CTRL]
-resume_l2on:
-#endif
-       b       cpu_resume
-ENDPROC(s3c_cpu_resume)
-#ifdef CONFIG_CACHE_L2X0
-       .globl l2x0_regs_phys
-l2x0_regs_phys:
-       .long   0
-#endif
diff --git a/arch/arm/plat-s5p/sysmmu.c b/arch/arm/plat-s5p/sysmmu.c
deleted file mode 100644 (file)
index c8bec9c..0000000
+++ /dev/null
@@ -1,313 +0,0 @@
-/* linux/arch/arm/plat-s5p/sysmmu.c
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/export.h>
-
-#include <asm/pgtable.h>
-
-#include <mach/map.h>
-#include <mach/regs-sysmmu.h>
-#include <plat/sysmmu.h>
-
-#define CTRL_ENABLE    0x5
-#define CTRL_BLOCK     0x7
-#define CTRL_DISABLE   0x0
-
-static struct device *dev;
-
-static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
-       S5P_PAGE_FAULT_ADDR,
-       S5P_AR_FAULT_ADDR,
-       S5P_AW_FAULT_ADDR,
-       S5P_DEFAULT_SLAVE_ADDR,
-       S5P_AR_FAULT_ADDR,
-       S5P_AR_FAULT_ADDR,
-       S5P_AW_FAULT_ADDR,
-       S5P_AW_FAULT_ADDR
-};
-
-static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
-       "PAGE FAULT",
-       "AR MULTI-HIT FAULT",
-       "AW MULTI-HIT FAULT",
-       "BUS ERROR",
-       "AR SECURITY PROTECTION FAULT",
-       "AR ACCESS PROTECTION FAULT",
-       "AW SECURITY PROTECTION FAULT",
-       "AW ACCESS PROTECTION FAULT"
-};
-
-static int (*fault_handlers[S5P_SYSMMU_TOTAL_IPNUM])(
-               enum S5P_SYSMMU_INTERRUPT_TYPE itype,
-               unsigned long pgtable_base,
-               unsigned long fault_addr);
-
-/*
- * If adjacent 2 bits are true, the system MMU is enabled.
- * The system MMU is disabled, otherwise.
- */
-static unsigned long sysmmu_states;
-
-static inline void set_sysmmu_active(sysmmu_ips ips)
-{
-       sysmmu_states |= 3 << (ips * 2);
-}
-
-static inline void set_sysmmu_inactive(sysmmu_ips ips)
-{
-       sysmmu_states &= ~(3 << (ips * 2));
-}
-
-static inline int is_sysmmu_active(sysmmu_ips ips)
-{
-       return sysmmu_states & (3 << (ips * 2));
-}
-
-static void __iomem *sysmmusfrs[S5P_SYSMMU_TOTAL_IPNUM];
-
-static inline void sysmmu_block(sysmmu_ips ips)
-{
-       __raw_writel(CTRL_BLOCK, sysmmusfrs[ips] + S5P_MMU_CTRL);
-       dev_dbg(dev, "%s is blocked.\n", sysmmu_ips_name[ips]);
-}
-
-static inline void sysmmu_unblock(sysmmu_ips ips)
-{
-       __raw_writel(CTRL_ENABLE, sysmmusfrs[ips] + S5P_MMU_CTRL);
-       dev_dbg(dev, "%s is unblocked.\n", sysmmu_ips_name[ips]);
-}
-
-static inline void __sysmmu_tlb_invalidate(sysmmu_ips ips)
-{
-       __raw_writel(0x1, sysmmusfrs[ips] + S5P_MMU_FLUSH);
-       dev_dbg(dev, "TLB of %s is invalidated.\n", sysmmu_ips_name[ips]);
-}
-
-static inline void __sysmmu_set_ptbase(sysmmu_ips ips, unsigned long pgd)
-{
-       if (unlikely(pgd == 0)) {
-               pgd = (unsigned long)ZERO_PAGE(0);
-               __raw_writel(0x20, sysmmusfrs[ips] + S5P_MMU_CFG); /* 4KB LV1 */
-       } else {
-               __raw_writel(0x0, sysmmusfrs[ips] + S5P_MMU_CFG); /* 16KB LV1 */
-       }
-
-       __raw_writel(pgd, sysmmusfrs[ips] + S5P_PT_BASE_ADDR);
-
-       dev_dbg(dev, "Page table base of %s is initialized with 0x%08lX.\n",
-                                               sysmmu_ips_name[ips], pgd);
-       __sysmmu_tlb_invalidate(ips);
-}
-
-void sysmmu_set_fault_handler(sysmmu_ips ips,
-                       int (*handler)(enum S5P_SYSMMU_INTERRUPT_TYPE itype,
-                                       unsigned long pgtable_base,
-                                       unsigned long fault_addr))
-{
-       BUG_ON(!((ips >= SYSMMU_MDMA) && (ips < S5P_SYSMMU_TOTAL_IPNUM)));
-       fault_handlers[ips] = handler;
-}
-
-static irqreturn_t s5p_sysmmu_irq(int irq, void *dev_id)
-{
-       /* SYSMMU is in blocked when interrupt occurred. */
-       unsigned long base = 0;
-       sysmmu_ips ips = (sysmmu_ips)dev_id;
-       enum S5P_SYSMMU_INTERRUPT_TYPE itype;
-
-       itype = (enum S5P_SYSMMU_INTERRUPT_TYPE)
-               __ffs(__raw_readl(sysmmusfrs[ips] + S5P_INT_STATUS));
-
-       BUG_ON(!((itype >= 0) && (itype < 8)));
-
-       dev_alert(dev, "%s occurred by %s.\n", sysmmu_fault_name[itype],
-                                                       sysmmu_ips_name[ips]);
-
-       if (fault_handlers[ips]) {
-               unsigned long addr;
-
-               base = __raw_readl(sysmmusfrs[ips] + S5P_PT_BASE_ADDR);
-               addr = __raw_readl(sysmmusfrs[ips] + fault_reg_offset[itype]);
-
-               if (fault_handlers[ips](itype, base, addr)) {
-                       __raw_writel(1 << itype,
-                                       sysmmusfrs[ips] + S5P_INT_CLEAR);
-                       dev_notice(dev, "%s from %s is resolved."
-                                       " Retrying translation.\n",
-                               sysmmu_fault_name[itype], sysmmu_ips_name[ips]);
-               } else {
-                       base = 0;
-               }
-       }
-
-       sysmmu_unblock(ips);
-
-       if (!base)
-               dev_notice(dev, "%s from %s is not handled.\n",
-                       sysmmu_fault_name[itype], sysmmu_ips_name[ips]);
-
-       return IRQ_HANDLED;
-}
-
-void s5p_sysmmu_set_tablebase_pgd(sysmmu_ips ips, unsigned long pgd)
-{
-       if (is_sysmmu_active(ips)) {
-               sysmmu_block(ips);
-               __sysmmu_set_ptbase(ips, pgd);
-               sysmmu_unblock(ips);
-       } else {
-               dev_dbg(dev, "%s is disabled. "
-                       "Skipping initializing page table base.\n",
-                                               sysmmu_ips_name[ips]);
-       }
-}
-
-void s5p_sysmmu_enable(sysmmu_ips ips, unsigned long pgd)
-{
-       if (!is_sysmmu_active(ips)) {
-               sysmmu_clk_enable(ips);
-
-               __sysmmu_set_ptbase(ips, pgd);
-
-               __raw_writel(CTRL_ENABLE, sysmmusfrs[ips] + S5P_MMU_CTRL);
-
-               set_sysmmu_active(ips);
-               dev_dbg(dev, "%s is enabled.\n", sysmmu_ips_name[ips]);
-       } else {
-               dev_dbg(dev, "%s is already enabled.\n", sysmmu_ips_name[ips]);
-       }
-}
-
-void s5p_sysmmu_disable(sysmmu_ips ips)
-{
-       if (is_sysmmu_active(ips)) {
-               __raw_writel(CTRL_DISABLE, sysmmusfrs[ips] + S5P_MMU_CTRL);
-               set_sysmmu_inactive(ips);
-               sysmmu_clk_disable(ips);
-               dev_dbg(dev, "%s is disabled.\n", sysmmu_ips_name[ips]);
-       } else {
-               dev_dbg(dev, "%s is already disabled.\n", sysmmu_ips_name[ips]);
-       }
-}
-
-void s5p_sysmmu_tlb_invalidate(sysmmu_ips ips)
-{
-       if (is_sysmmu_active(ips)) {
-               sysmmu_block(ips);
-               __sysmmu_tlb_invalidate(ips);
-               sysmmu_unblock(ips);
-       } else {
-               dev_dbg(dev, "%s is disabled. "
-                       "Skipping invalidating TLB.\n", sysmmu_ips_name[ips]);
-       }
-}
-
-static int s5p_sysmmu_probe(struct platform_device *pdev)
-{
-       int i, ret;
-       struct resource *res, *mem;
-
-       dev = &pdev->dev;
-
-       for (i = 0; i < S5P_SYSMMU_TOTAL_IPNUM; i++) {
-               int irq;
-
-               sysmmu_clk_init(dev, i);
-               sysmmu_clk_disable(i);
-
-               res = platform_get_resource(pdev, IORESOURCE_MEM, i);
-               if (!res) {
-                       dev_err(dev, "Failed to get the resource of %s.\n",
-                                                       sysmmu_ips_name[i]);
-                       ret = -ENODEV;
-                       goto err_res;
-               }
-
-               mem = request_mem_region(res->start, resource_size(res),
-                                        pdev->name);
-               if (!mem) {
-                       dev_err(dev, "Failed to request the memory region of %s.\n",
-                                                       sysmmu_ips_name[i]);
-                       ret = -EBUSY;
-                       goto err_res;
-               }
-
-               sysmmusfrs[i] = ioremap(res->start, resource_size(res));
-               if (!sysmmusfrs[i]) {
-                       dev_err(dev, "Failed to ioremap() for %s.\n",
-                                                       sysmmu_ips_name[i]);
-                       ret = -ENXIO;
-                       goto err_reg;
-               }
-
-               irq = platform_get_irq(pdev, i);
-               if (irq <= 0) {
-                       dev_err(dev, "Failed to get the IRQ resource of %s.\n",
-                                                       sysmmu_ips_name[i]);
-                       ret = -ENOENT;
-                       goto err_map;
-               }
-
-               if (request_irq(irq, s5p_sysmmu_irq, IRQF_DISABLED,
-                                               pdev->name, (void *)i)) {
-                       dev_err(dev, "Failed to request IRQ for %s.\n",
-                                                       sysmmu_ips_name[i]);
-                       ret = -ENOENT;
-                       goto err_map;
-               }
-       }
-
-       return 0;
-
-err_map:
-       iounmap(sysmmusfrs[i]);
-err_reg:
-       release_mem_region(mem->start, resource_size(mem));
-err_res:
-       return ret;
-}
-
-static int s5p_sysmmu_remove(struct platform_device *pdev)
-{
-       return 0;
-}
-int s5p_sysmmu_runtime_suspend(struct device *dev)
-{
-       return 0;
-}
-
-int s5p_sysmmu_runtime_resume(struct device *dev)
-{
-       return 0;
-}
-
-const struct dev_pm_ops s5p_sysmmu_pm_ops = {
-       .runtime_suspend        = s5p_sysmmu_runtime_suspend,
-       .runtime_resume         = s5p_sysmmu_runtime_resume,
-};
-
-static struct platform_driver s5p_sysmmu_driver = {
-       .probe          = s5p_sysmmu_probe,
-       .remove         = s5p_sysmmu_remove,
-       .driver         = {
-               .owner          = THIS_MODULE,
-               .name           = "s5p-sysmmu",
-               .pm             = &s5p_sysmmu_pm_ops,
-       }
-};
-
-static int __init s5p_sysmmu_init(void)
-{
-       return platform_driver_register(&s5p_sysmmu_driver);
-}
-arch_initcall(s5p_sysmmu_init);
index a0ffc77da8091a3ab962b8570a7592e9d254bfb5..a2fae4ea0936655b0f3b0131c1be748c943c71ee 100644 (file)
@@ -13,6 +13,24 @@ config PLAT_SAMSUNG
        help
          Base platform code for all Samsung SoC based systems
 
+config PLAT_S5P
+       bool
+       depends on (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS)
+       default y
+       select ARM_VIC if !ARCH_EXYNOS
+       select ARM_GIC if ARCH_EXYNOS
+       select GIC_NON_BANKED if ARCH_EXYNOS4
+       select NO_IOPORT
+       select ARCH_REQUIRE_GPIOLIB
+       select S3C_GPIO_TRACK
+       select S5P_GPIO_DRVSTR
+       select SAMSUNG_GPIOLIB_4BIT
+       select PLAT_SAMSUNG
+       select SAMSUNG_CLKSRC
+       select SAMSUNG_IRQ_VIC_TIMER
+       help
+         Base platform code for Samsung's S5P series SoC.
+
 if PLAT_SAMSUNG
 
 # boot configurations
@@ -50,6 +68,14 @@ config S3C_LOWLEVEL_UART_PORT
          this configuration should be between zero and two. The port
          must have been initialised by the boot-loader before use.
 
+# timer options
+
+config S5P_HRT
+       bool
+       select SAMSUNG_DEV_PWM
+       help
+         Use the High Resolution timer support
+
 # clock options
 
 config SAMSUNG_CLKSRC
@@ -58,6 +84,11 @@ config SAMSUNG_CLKSRC
          Select the clock code for the clksrc implementation
          used by newer systems such as the S3C64XX.
 
+config S5P_CLOCK
+       def_bool (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS)
+       help
+         Support common clock part for ARCH_S5P and ARCH_EXYNOS SoCs
+
 # options for IRQ support
 
 config SAMSUNG_IRQ_VIC_TIMER
@@ -65,6 +96,22 @@ config SAMSUNG_IRQ_VIC_TIMER
        help
          Internal configuration to build the VIC timer interrupt code.
 
+config S5P_IRQ
+       def_bool (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS)
+       help
+         Support common interrup part for ARCH_S5P and ARCH_EXYNOS SoCs
+
+config S5P_EXT_INT
+       bool
+       help
+         Use the external interrupts (other than GPIO interrupts.)
+         Note: Do not choose this for S5P6440 and S5P6450.
+
+config S5P_GPIO_INT
+       bool
+       help
+         Common code for the GPIO interrupts (other than external interrupts.)
+
 # options for gpio configuration support
 
 config SAMSUNG_GPIOLIB_4BIT
@@ -117,6 +164,12 @@ config S3C_GPIO_TRACK
          Internal configuration option to enable the s3c specific gpio
          chip tracking if the platform requires it.
 
+# uart options
+
+config S5P_DEV_UART
+       def_bool y
+       depends on (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210)
+
 # ADC driver
 
 config S3C_ADC
@@ -274,6 +327,76 @@ config SAMSUNG_DEV_BACKLIGHT
        help
          Compile in platform device definition LCD backlight with PWM Timer
 
+config S5P_DEV_CSIS0
+       bool
+       help
+         Compile in platform device definitions for MIPI-CSIS channel 0
+
+config S5P_DEV_CSIS1
+       bool
+       help
+         Compile in platform device definitions for MIPI-CSIS channel 1
+
+config S5P_DEV_FIMC0
+       bool
+       help
+         Compile in platform device definitions for FIMC controller 0
+
+config S5P_DEV_FIMC1
+       bool
+       help
+         Compile in platform device definitions for FIMC controller 1
+
+config S5P_DEV_FIMC2
+       bool
+       help
+         Compile in platform device definitions for FIMC controller 2
+
+config S5P_DEV_FIMC3
+       bool
+       help
+         Compile in platform device definitions for FIMC controller 3
+
+config S5P_DEV_FIMD0
+       bool
+       help
+         Compile in platform device definitions for FIMD controller 0
+
+config S5P_DEV_G2D
+       bool
+       help
+         Compile in platform device definitions for G2D device
+
+config S5P_DEV_I2C_HDMIPHY
+       bool
+       help
+         Compile in platform device definitions for I2C HDMIPHY controller
+
+config S5P_DEV_JPEG
+       bool
+       help
+         Compile in platform device definitions for JPEG codec
+
+config S5P_DEV_MFC
+       bool
+       help
+         Compile in setup memory (init) code for MFC
+
+config S5P_DEV_ONENAND
+       bool
+       help
+         Compile in platform device definition for OneNAND controller
+
+config S5P_DEV_TV
+       bool
+       help
+         Compile in platform device definition for TV interface
+
+config S5P_DEV_USB_EHCI
+       bool
+       help
+         Compile in platform device definition for USB EHCI
+
 config S3C24XX_PWM
        bool "PWM device support"
        select HAVE_PWM
@@ -281,6 +404,11 @@ config S3C24XX_PWM
          Support for exporting the PWM timer blocks via the pwm device
          system
 
+config S5P_SETUP_MIPIPHY
+       bool
+       help
+         Compile in common setup code for MIPI-CSIS and MIPI-DSIM devices
+
 # DMA
 
 config S3C_DMA
@@ -291,7 +419,7 @@ config S3C_DMA
 config SAMSUNG_DMADEV
        bool
        select DMADEVICES
-       select PL330_DMA if (CPU_EXYNOS4210 || CPU_S5PV210 || CPU_S5PC100 || \
+       select PL330_DMA if (ARCH_EXYNOS5 || ARCH_EXYNOS4 || CPU_S5PV210 || CPU_S5PC100 || \
                                        CPU_S5P6450 || CPU_S5P6440)
        select ARM_AMBA
        help
@@ -351,6 +479,18 @@ config SAMSUNG_WAKEMASK
          and above. This code allows a set of interrupt to wakeup-mask
          mappings. See <plat/wakeup-mask.h>
 
+config S5P_PM
+       bool
+       help
+         Common code for power management support on S5P and newer SoCs
+         Note: Do not select this for S5P6440 and S5P6450.
+
+config S5P_SLEEP
+       bool
+       help
+         Internal config node to apply common S5P sleep management code.
+         Can be selected by S5P and newer SoCs with similar sleep procedure.
+
 comment "Power Domain"
 
 config SAMSUNG_PD
index 6012366f33cb5ebf42ed978a6e424059a4d62f26..860b2db4db155062b66361ce6688a11d6b86ff49 100644 (file)
@@ -13,12 +13,18 @@ obj-                                :=
 
 obj-y                          += init.o cpu.o
 obj-$(CONFIG_ARCH_USES_GETTIMEOFFSET)   += time.o
+obj-$(CONFIG_S5P_HRT)          += s5p-time.o
+
 obj-y                          += clock.o
 obj-y                          += pwm-clock.o
 
 obj-$(CONFIG_SAMSUNG_CLKSRC)   += clock-clksrc.o
+obj-$(CONFIG_S5P_CLOCK)                += s5p-clock.o
 
 obj-$(CONFIG_SAMSUNG_IRQ_VIC_TIMER) += irq-vic-timer.o
+obj-$(CONFIG_S5P_IRQ)          += s5p-irq.o
+obj-$(CONFIG_S5P_EXT_INT)      += s5p-irq-eint.o
+obj-$(CONFIG_S5P_GPIO_INT)     += s5p-irq-gpioint.o
 
 # ADC
 
@@ -30,9 +36,13 @@ obj-y                                += platformdata.o
 
 obj-y                          += devs.o
 obj-y                          += dev-uart.o
+obj-$(CONFIG_S5P_DEV_MFC)      += s5p-dev-mfc.o
+obj-$(CONFIG_S5P_DEV_UART)     += s5p-dev-uart.o
 
 obj-$(CONFIG_SAMSUNG_DEV_BACKLIGHT)    += dev-backlight.o
 
+obj-$(CONFIG_S5P_SETUP_MIPIPHY)        += setup-mipiphy.o
+
 # DMA support
 
 obj-$(CONFIG_S3C_DMA)          += dma.o s3c-dma-ops.o
@@ -47,6 +57,9 @@ obj-$(CONFIG_SAMSUNG_PM_CHECK)        += pm-check.o
 
 obj-$(CONFIG_SAMSUNG_WAKEMASK) += wakeup-mask.o
 
+obj-$(CONFIG_S5P_PM)           += s5p-pm.o s5p-irq-pm.o
+obj-$(CONFIG_S5P_SLEEP)                += s5p-sleep.o
+
 # PD support
 
 obj-$(CONFIG_SAMSUNG_PD)       += pd.o
index 787ceaca0be8aeb74122df8270ee995398da6f24..0721293fad635b913a3a07f082f13aba32ea0179 100644 (file)
@@ -202,7 +202,7 @@ extern struct bus_type s3c2443_subsys;
 extern struct bus_type s3c6410_subsys;
 extern struct bus_type s5p64x0_subsys;
 extern struct bus_type s5pv210_subsys;
-extern struct bus_type exynos4_subsys;
+extern struct bus_type exynos_subsys;
 
 extern void (*s5pc1xx_idle)(void);
 
index 2155d4af62a30ce2d83c016e097690ba1b0ae316..61ca2f356c52d6d4ea3fac036788cc422aa02235 100644 (file)
@@ -133,7 +133,8 @@ extern struct platform_device exynos4_device_pcm1;
 extern struct platform_device exynos4_device_pcm2;
 extern struct platform_device exynos4_device_pd[];
 extern struct platform_device exynos4_device_spdif;
-extern struct platform_device exynos4_device_sysmmu;
+
+extern struct platform_device exynos_device_drm;
 
 extern struct platform_device samsung_asoc_dma;
 extern struct platform_device samsung_asoc_idma;
index 0670f37aaaedcfe7990d359df010db7ced9c935a..d384a8016b47d2463218748289cfbac3c8b272cc 100644 (file)
@@ -90,6 +90,7 @@ enum dma_ch {
        DMACH_MIPI_HSI5,
        DMACH_MIPI_HSI6,
        DMACH_MIPI_HSI7,
+       DMACH_DISP1,
        DMACH_MTOM_0,
        DMACH_MTOM_1,
        DMACH_MTOM_2,
index 0fedf47fa502482a117dc9427d5067d11d8e3a3d..536002ff2ab8d665993b1f554a90ac74642917ad 100644 (file)
 
 /**
  * struct s3c_fb_pd_win - per window setup data
- * @win_mode: The display parameters to initialise (not for window 0)
+ * @xres     : The window X size.
+ * @yres     : The window Y size.
  * @virtual_x: The virtual X size.
  * @virtual_y: The virtual Y size.
  */
 struct s3c_fb_pd_win {
-       struct fb_videomode     win_mode;
-
        unsigned short          default_bpp;
        unsigned short          max_bpp;
+       unsigned short          xres;
+       unsigned short          yres;
        unsigned short          virtual_x;
        unsigned short          virtual_y;
 };
@@ -45,6 +46,7 @@ struct s3c_fb_pd_win {
  * @default_win: default window layer number to be used for UI layer.
  * @vidcon0: The base vidcon0 values to control the panel data format.
  * @vidcon1: The base vidcon1 values to control the panel data output.
+ * @vtiming: Video timing when connected to a RGB type panel.
  * @win: The setup data for each hardware window, or NULL for unused.
  * @display_mode: The LCD output display mode.
  *
@@ -58,8 +60,7 @@ struct s3c_fb_platdata {
        void    (*setup_gpio)(void);
 
        struct s3c_fb_pd_win    *win[S3C_FB_MAX_WIN];
-
-       u32                      default_win;
+       struct fb_videomode     *vtiming;
 
        u32                      vidcon0;
        u32                      vidcon1;
index de2b5bdc5ebd860a3e65020c6c63dfc75fb97dbb..7178e338e25ed8e7981e9b134dd7508aa8f4f128 100644 (file)
@@ -24,6 +24,9 @@ extern void s3c2416_init_clocks(int xtal);
 extern  int s3c2416_baseclk_add(void);
 
 extern void s3c2416_restart(char mode, const char *cmd);
+
+extern struct syscore_ops s3c2416_irq_syscore_ops;
+
 #else
 #define s3c2416_init_clocks NULL
 #define s3c2416_init_uarts NULL
index 1de4b32f98e9a81ad55b2f921f87c174d0707365..8364b4bea8b8f5e3add2e8a3fa454a0314ec8d9f 100644 (file)
@@ -32,8 +32,10 @@ extern struct clk clk_48m;
 extern struct clk s5p_clk_27m;
 extern struct clk clk_fout_apll;
 extern struct clk clk_fout_bpll;
+extern struct clk clk_fout_bpll_div2;
 extern struct clk clk_fout_cpll;
 extern struct clk clk_fout_mpll;
+extern struct clk clk_fout_mpll_div2;
 extern struct clk clk_fout_epll;
 extern struct clk clk_fout_dpll;
 extern struct clk clk_fout_vpll;
@@ -42,8 +44,10 @@ extern struct clk clk_vpll;
 
 extern struct clksrc_sources clk_src_apll;
 extern struct clksrc_sources clk_src_bpll;
+extern struct clksrc_sources clk_src_bpll_fout;
 extern struct clksrc_sources clk_src_cpll;
 extern struct clksrc_sources clk_src_mpll;
+extern struct clksrc_sources clk_src_mpll_fout;
 extern struct clksrc_sources clk_src_epll;
 extern struct clksrc_sources clk_src_dpll;
 
diff --git a/arch/arm/plat-samsung/include/plat/sysmmu.h b/arch/arm/plat-samsung/include/plat/sysmmu.h
deleted file mode 100644 (file)
index 5fe8ee0..0000000
+++ /dev/null
@@ -1,95 +0,0 @@
-/* linux/arch/arm/plat-samsung/include/plat/sysmmu.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com
- *
- * Samsung System MMU driver for S5P platform
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __PLAT_SAMSUNG_SYSMMU_H
-#define __PLAT_SAMSUNG_SYSMMU_H __FILE__
-
-enum S5P_SYSMMU_INTERRUPT_TYPE {
-       SYSMMU_PAGEFAULT,
-       SYSMMU_AR_MULTIHIT,
-       SYSMMU_AW_MULTIHIT,
-       SYSMMU_BUSERROR,
-       SYSMMU_AR_SECURITY,
-       SYSMMU_AR_ACCESS,
-       SYSMMU_AW_SECURITY,
-       SYSMMU_AW_PROTECTION, /* 7 */
-       SYSMMU_FAULTS_NUM
-};
-
-#ifdef CONFIG_S5P_SYSTEM_MMU
-
-#include <mach/sysmmu.h>
-
-/**
- * s5p_sysmmu_enable() - enable system mmu of ip
- * @ips: The ip connected system mmu.
- * #pgd: Base physical address of the 1st level page table
- *
- * This function enable system mmu to transfer address
- * from virtual address to physical address
- */
-void s5p_sysmmu_enable(sysmmu_ips ips, unsigned long pgd);
-
-/**
- * s5p_sysmmu_disable() - disable sysmmu mmu of ip
- * @ips: The ip connected system mmu.
- *
- * This function disable system mmu to transfer address
- * from virtual address to physical address
- */
-void s5p_sysmmu_disable(sysmmu_ips ips);
-
-/**
- * s5p_sysmmu_set_tablebase_pgd() - set page table base address to refer page table
- * @ips: The ip connected system mmu.
- * @pgd: The page table base address.
- *
- * This function set page table base address
- * When system mmu transfer address from virtaul address to physical address,
- * system mmu refer address information from page table
- */
-void s5p_sysmmu_set_tablebase_pgd(sysmmu_ips ips, unsigned long pgd);
-
-/**
- * s5p_sysmmu_tlb_invalidate() - flush all TLB entry in system mmu
- * @ips: The ip connected system mmu.
- *
- * This function flush all TLB entry in system mmu
- */
-void s5p_sysmmu_tlb_invalidate(sysmmu_ips ips);
-
-/** s5p_sysmmu_set_fault_handler() - Fault handler for System MMUs
- * @itype: type of fault.
- * @pgtable_base: the physical address of page table base. This is 0 if @ips is
- *               SYSMMU_BUSERROR.
- * @fault_addr: the device (virtual) address that the System MMU tried to
- *             translated. This is 0 if @ips is SYSMMU_BUSERROR.
- * Called when interrupt occurred by the System MMUs
- * The device drivers of peripheral devices that has a System MMU can implement
- * a fault handler to resolve address translation fault by System MMU.
- * The meanings of return value and parameters are described below.
-
- * return value: non-zero if the fault is correctly resolved.
- *         zero if the fault is not handled.
- */
-void s5p_sysmmu_set_fault_handler(sysmmu_ips ips,
-                       int (*handler)(enum S5P_SYSMMU_INTERRUPT_TYPE itype,
-                                       unsigned long pgtable_base,
-                                       unsigned long fault_addr));
-#else
-#define s5p_sysmmu_enable(ips, pgd) do { } while (0)
-#define s5p_sysmmu_disable(ips) do { } while (0)
-#define s5p_sysmmu_set_tablebase_pgd(ips, pgd) do { } while (0)
-#define s5p_sysmmu_tlb_invalidate(ips) do { } while (0)
-#define s5p_sysmmu_set_fault_handler(ips, handler) do { } while (0)
-#endif
-#endif /* __ASM_PLAT_SYSMMU_H */
diff --git a/arch/arm/plat-samsung/s5p-clock.c b/arch/arm/plat-samsung/s5p-clock.c
new file mode 100644 (file)
index 0000000..031a618
--- /dev/null
@@ -0,0 +1,293 @@
+/*
+ * Copyright 2009 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com/
+ *
+ * S5P - Common clock support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <asm/div64.h>
+
+#include <mach/regs-clock.h>
+
+#include <plat/clock.h>
+#include <plat/clock-clksrc.h>
+#include <plat/s5p-clock.h>
+
+/* fin_apll, fin_mpll and fin_epll are all the same clock, which we call
+ * clk_ext_xtal_mux.
+*/
+struct clk clk_ext_xtal_mux = {
+       .name           = "ext_xtal",
+       .id             = -1,
+};
+
+struct clk clk_xusbxti = {
+       .name           = "xusbxti",
+       .id             = -1,
+};
+
+struct clk s5p_clk_27m = {
+       .name           = "clk_27m",
+       .id             = -1,
+       .rate           = 27000000,
+};
+
+/* 48MHz USB Phy clock output */
+struct clk clk_48m = {
+       .name           = "clk_48m",
+       .id             = -1,
+       .rate           = 48000000,
+};
+
+/* APLL clock output
+ * No need .ctrlbit, this is always on
+*/
+struct clk clk_fout_apll = {
+       .name           = "fout_apll",
+       .id             = -1,
+};
+
+/* BPLL clock output */
+
+struct clk clk_fout_bpll = {
+       .name           = "fout_bpll",
+       .id             = -1,
+};
+
+struct clk clk_fout_bpll_div2 = {
+       .name           = "fout_bpll_div2",
+       .id             = -1,
+};
+
+/* CPLL clock output */
+
+struct clk clk_fout_cpll = {
+       .name           = "fout_cpll",
+       .id             = -1,
+};
+
+/* MPLL clock output
+ * No need .ctrlbit, this is always on
+*/
+struct clk clk_fout_mpll = {
+       .name           = "fout_mpll",
+       .id             = -1,
+};
+
+struct clk clk_fout_mpll_div2 = {
+       .name           = "fout_mpll_div2",
+       .id             = -1,
+};
+
+/* EPLL clock output */
+struct clk clk_fout_epll = {
+       .name           = "fout_epll",
+       .id             = -1,
+       .ctrlbit        = (1 << 31),
+};
+
+/* DPLL clock output */
+struct clk clk_fout_dpll = {
+       .name           = "fout_dpll",
+       .id             = -1,
+       .ctrlbit        = (1 << 31),
+};
+
+/* VPLL clock output */
+struct clk clk_fout_vpll = {
+       .name           = "fout_vpll",
+       .id             = -1,
+       .ctrlbit        = (1 << 31),
+};
+
+/* Possible clock sources for APLL Mux */
+static struct clk *clk_src_apll_list[] = {
+       [0] = &clk_fin_apll,
+       [1] = &clk_fout_apll,
+};
+
+struct clksrc_sources clk_src_apll = {
+       .sources        = clk_src_apll_list,
+       .nr_sources     = ARRAY_SIZE(clk_src_apll_list),
+};
+
+/* Possible clock sources for BPLL Mux */
+static struct clk *clk_src_bpll_list[] = {
+       [0] = &clk_fin_bpll,
+       [1] = &clk_fout_bpll,
+};
+
+struct clksrc_sources clk_src_bpll = {
+       .sources        = clk_src_bpll_list,
+       .nr_sources     = ARRAY_SIZE(clk_src_bpll_list),
+};
+
+static struct clk *clk_src_bpll_fout_list[] = {
+       [0] = &clk_fout_bpll_div2,
+       [1] = &clk_fout_bpll,
+};
+
+struct clksrc_sources clk_src_bpll_fout = {
+       .sources        = clk_src_bpll_fout_list,
+       .nr_sources     = ARRAY_SIZE(clk_src_bpll_fout_list),
+};
+
+/* Possible clock sources for CPLL Mux */
+static struct clk *clk_src_cpll_list[] = {
+       [0] = &clk_fin_cpll,
+       [1] = &clk_fout_cpll,
+};
+
+struct clksrc_sources clk_src_cpll = {
+       .sources        = clk_src_cpll_list,
+       .nr_sources     = ARRAY_SIZE(clk_src_cpll_list),
+};
+
+/* Possible clock sources for MPLL Mux */
+static struct clk *clk_src_mpll_list[] = {
+       [0] = &clk_fin_mpll,
+       [1] = &clk_fout_mpll,
+};
+
+struct clksrc_sources clk_src_mpll = {
+       .sources        = clk_src_mpll_list,
+       .nr_sources     = ARRAY_SIZE(clk_src_mpll_list),
+};
+
+static struct clk *clk_src_mpll_fout_list[] = {
+       [0] = &clk_fout_mpll_div2,
+       [1] = &clk_fout_mpll,
+};
+
+struct clksrc_sources clk_src_mpll_fout = {
+       .sources        = clk_src_mpll_fout_list,
+       .nr_sources     = ARRAY_SIZE(clk_src_mpll_fout_list),
+};
+
+/* Possible clock sources for EPLL Mux */
+static struct clk *clk_src_epll_list[] = {
+       [0] = &clk_fin_epll,
+       [1] = &clk_fout_epll,
+};
+
+struct clksrc_sources clk_src_epll = {
+       .sources        = clk_src_epll_list,
+       .nr_sources     = ARRAY_SIZE(clk_src_epll_list),
+};
+
+/* Possible clock sources for DPLL Mux */
+static struct clk *clk_src_dpll_list[] = {
+       [0] = &clk_fin_dpll,
+       [1] = &clk_fout_dpll,
+};
+
+struct clksrc_sources clk_src_dpll = {
+       .sources        = clk_src_dpll_list,
+       .nr_sources     = ARRAY_SIZE(clk_src_dpll_list),
+};
+
+struct clk clk_vpll = {
+       .name           = "vpll",
+       .id             = -1,
+};
+
+int s5p_gatectrl(void __iomem *reg, struct clk *clk, int enable)
+{
+       unsigned int ctrlbit = clk->ctrlbit;
+       u32 con;
+
+       con = __raw_readl(reg);
+       con = enable ? (con | ctrlbit) : (con & ~ctrlbit);
+       __raw_writel(con, reg);
+       return 0;
+}
+
+int s5p_epll_enable(struct clk *clk, int enable)
+{
+       unsigned int ctrlbit = clk->ctrlbit;
+       unsigned int epll_con = __raw_readl(S5P_EPLL_CON) & ~ctrlbit;
+
+       if (enable)
+               __raw_writel(epll_con | ctrlbit, S5P_EPLL_CON);
+       else
+               __raw_writel(epll_con, S5P_EPLL_CON);
+
+       return 0;
+}
+
+unsigned long s5p_epll_get_rate(struct clk *clk)
+{
+       return clk->rate;
+}
+
+int s5p_spdif_set_rate(struct clk *clk, unsigned long rate)
+{
+       struct clk *pclk;
+       int ret;
+
+       pclk = clk_get_parent(clk);
+       if (IS_ERR(pclk))
+               return -EINVAL;
+
+       ret = pclk->ops->set_rate(pclk, rate);
+       clk_put(pclk);
+
+       return ret;
+}
+
+unsigned long s5p_spdif_get_rate(struct clk *clk)
+{
+       struct clk *pclk;
+       int rate;
+
+       pclk = clk_get_parent(clk);
+       if (IS_ERR(pclk))
+               return -EINVAL;
+
+       rate = pclk->ops->get_rate(pclk);
+       clk_put(pclk);
+
+       return rate;
+}
+
+struct clk_ops s5p_sclk_spdif_ops = {
+       .set_rate       = s5p_spdif_set_rate,
+       .get_rate       = s5p_spdif_get_rate,
+};
+
+static struct clk *s5p_clks[] __initdata = {
+       &clk_ext_xtal_mux,
+       &clk_48m,
+       &s5p_clk_27m,
+       &clk_fout_apll,
+       &clk_fout_mpll,
+       &clk_fout_epll,
+       &clk_fout_dpll,
+       &clk_fout_vpll,
+       &clk_vpll,
+       &clk_xusbxti,
+};
+
+void __init s5p_register_clocks(unsigned long xtal_freq)
+{
+       int ret;
+
+       clk_ext_xtal_mux.rate = xtal_freq;
+
+       ret = s3c24xx_register_clocks(s5p_clks, ARRAY_SIZE(s5p_clks));
+       if (ret > 0)
+               printk(KERN_ERR "Failed to register s5p clocks\n");
+}
diff --git a/arch/arm/plat-samsung/s5p-dev-mfc.c b/arch/arm/plat-samsung/s5p-dev-mfc.c
new file mode 100644 (file)
index 0000000..ad60894
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2010-2011 Samsung Electronics Co.Ltd
+ *
+ * Base S5P MFC resource and device definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/memblock.h>
+#include <linux/ioport.h>
+
+#include <mach/map.h>
+#include <plat/devs.h>
+#include <plat/irqs.h>
+#include <plat/mfc.h>
+
+struct s5p_mfc_reserved_mem {
+       phys_addr_t     base;
+       unsigned long   size;
+       struct device   *dev;
+};
+
+static struct s5p_mfc_reserved_mem s5p_mfc_mem[2] __initdata;
+
+void __init s5p_mfc_reserve_mem(phys_addr_t rbase, unsigned int rsize,
+                               phys_addr_t lbase, unsigned int lsize)
+{
+       int i;
+
+       s5p_mfc_mem[0].dev = &s5p_device_mfc_r.dev;
+       s5p_mfc_mem[0].base = rbase;
+       s5p_mfc_mem[0].size = rsize;
+
+       s5p_mfc_mem[1].dev = &s5p_device_mfc_l.dev;
+       s5p_mfc_mem[1].base = lbase;
+       s5p_mfc_mem[1].size = lsize;
+
+       for (i = 0; i < ARRAY_SIZE(s5p_mfc_mem); i++) {
+               struct s5p_mfc_reserved_mem *area = &s5p_mfc_mem[i];
+               if (memblock_remove(area->base, area->size)) {
+                       printk(KERN_ERR "Failed to reserve memory for MFC device (%ld bytes at 0x%08lx)\n",
+                              area->size, (unsigned long) area->base);
+                       area->base = 0;
+               }
+       }
+}
+
+static int __init s5p_mfc_memory_init(void)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(s5p_mfc_mem); i++) {
+               struct s5p_mfc_reserved_mem *area = &s5p_mfc_mem[i];
+               if (!area->base)
+                       continue;
+
+               if (dma_declare_coherent_memory(area->dev, area->base,
+                               area->base, area->size,
+                               DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE) == 0)
+                       printk(KERN_ERR "Failed to declare coherent memory for MFC device (%ld bytes at 0x%08lx)\n",
+                              area->size, (unsigned long) area->base);
+       }
+       return 0;
+}
+device_initcall(s5p_mfc_memory_init);
diff --git a/arch/arm/plat-samsung/s5p-dev-uart.c b/arch/arm/plat-samsung/s5p-dev-uart.c
new file mode 100644 (file)
index 0000000..cafa3de
--- /dev/null
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2009,2012 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com/
+ *
+ * Base S5P UART resource and device definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/irq.h>
+#include <mach/hardware.h>
+#include <mach/map.h>
+
+#include <plat/devs.h>
+
+ /* Serial port registrations */
+
+static struct resource s5p_uart0_resource[] = {
+       [0] = DEFINE_RES_MEM(S5P_PA_UART0, S5P_SZ_UART),
+       [1] = DEFINE_RES_IRQ(IRQ_UART0),
+};
+
+static struct resource s5p_uart1_resource[] = {
+       [0] = DEFINE_RES_MEM(S5P_PA_UART1, S5P_SZ_UART),
+       [1] = DEFINE_RES_IRQ(IRQ_UART1),
+};
+
+static struct resource s5p_uart2_resource[] = {
+       [0] = DEFINE_RES_MEM(S5P_PA_UART2, S5P_SZ_UART),
+       [1] = DEFINE_RES_IRQ(IRQ_UART2),
+};
+
+static struct resource s5p_uart3_resource[] = {
+#if CONFIG_SERIAL_SAMSUNG_UARTS > 3
+       [0] = DEFINE_RES_MEM(S5P_PA_UART3, S5P_SZ_UART),
+       [1] = DEFINE_RES_IRQ(IRQ_UART3),
+#endif
+};
+
+static struct resource s5p_uart4_resource[] = {
+#if CONFIG_SERIAL_SAMSUNG_UARTS > 4
+       [0] = DEFINE_RES_MEM(S5P_PA_UART4, S5P_SZ_UART),
+       [1] = DEFINE_RES_IRQ(IRQ_UART4),
+#endif
+};
+
+static struct resource s5p_uart5_resource[] = {
+#if CONFIG_SERIAL_SAMSUNG_UARTS > 5
+       [0] = DEFINE_RES_MEM(S5P_PA_UART5, S5P_SZ_UART),
+       [1] = DEFINE_RES_IRQ(IRQ_UART5),
+#endif
+};
+
+struct s3c24xx_uart_resources s5p_uart_resources[] __initdata = {
+       [0] = {
+               .resources      = s5p_uart0_resource,
+               .nr_resources   = ARRAY_SIZE(s5p_uart0_resource),
+       },
+       [1] = {
+               .resources      = s5p_uart1_resource,
+               .nr_resources   = ARRAY_SIZE(s5p_uart1_resource),
+       },
+       [2] = {
+               .resources      = s5p_uart2_resource,
+               .nr_resources   = ARRAY_SIZE(s5p_uart2_resource),
+       },
+       [3] = {
+               .resources      = s5p_uart3_resource,
+               .nr_resources   = ARRAY_SIZE(s5p_uart3_resource),
+       },
+       [4] = {
+               .resources      = s5p_uart4_resource,
+               .nr_resources   = ARRAY_SIZE(s5p_uart4_resource),
+       },
+       [5] = {
+               .resources      = s5p_uart5_resource,
+               .nr_resources   = ARRAY_SIZE(s5p_uart5_resource),
+       },
+};
diff --git a/arch/arm/plat-samsung/s5p-irq-eint.c b/arch/arm/plat-samsung/s5p-irq-eint.c
new file mode 100644 (file)
index 0000000..33bd3f3
--- /dev/null
@@ -0,0 +1,218 @@
+/*
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * S5P - IRQ EINT support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/gpio.h>
+
+#include <asm/hardware/vic.h>
+
+#include <plat/regs-irqtype.h>
+
+#include <mach/map.h>
+#include <plat/cpu.h>
+#include <plat/pm.h>
+
+#include <plat/gpio-cfg.h>
+#include <mach/regs-gpio.h>
+
+static inline void s5p_irq_eint_mask(struct irq_data *data)
+{
+       u32 mask;
+
+       mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
+       mask |= eint_irq_to_bit(data->irq);
+       __raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
+}
+
+static void s5p_irq_eint_unmask(struct irq_data *data)
+{
+       u32 mask;
+
+       mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
+       mask &= ~(eint_irq_to_bit(data->irq));
+       __raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
+}
+
+static inline void s5p_irq_eint_ack(struct irq_data *data)
+{
+       __raw_writel(eint_irq_to_bit(data->irq),
+                    S5P_EINT_PEND(EINT_REG_NR(data->irq)));
+}
+
+static void s5p_irq_eint_maskack(struct irq_data *data)
+{
+       /* compiler should in-line these */
+       s5p_irq_eint_mask(data);
+       s5p_irq_eint_ack(data);
+}
+
+static int s5p_irq_eint_set_type(struct irq_data *data, unsigned int type)
+{
+       int offs = EINT_OFFSET(data->irq);
+       int shift;
+       u32 ctrl, mask;
+       u32 newvalue = 0;
+
+       switch (type) {
+       case IRQ_TYPE_EDGE_RISING:
+               newvalue = S5P_IRQ_TYPE_EDGE_RISING;
+               break;
+
+       case IRQ_TYPE_EDGE_FALLING:
+               newvalue = S5P_IRQ_TYPE_EDGE_FALLING;
+               break;
+
+       case IRQ_TYPE_EDGE_BOTH:
+               newvalue = S5P_IRQ_TYPE_EDGE_BOTH;
+               break;
+
+       case IRQ_TYPE_LEVEL_LOW:
+               newvalue = S5P_IRQ_TYPE_LEVEL_LOW;
+               break;
+
+       case IRQ_TYPE_LEVEL_HIGH:
+               newvalue = S5P_IRQ_TYPE_LEVEL_HIGH;
+               break;
+
+       default:
+               printk(KERN_ERR "No such irq type %d", type);
+               return -EINVAL;
+       }
+
+       shift = (offs & 0x7) * 4;
+       mask = 0x7 << shift;
+
+       ctrl = __raw_readl(S5P_EINT_CON(EINT_REG_NR(data->irq)));
+       ctrl &= ~mask;
+       ctrl |= newvalue << shift;
+       __raw_writel(ctrl, S5P_EINT_CON(EINT_REG_NR(data->irq)));
+
+       if ((0 <= offs) && (offs < 8))
+               s3c_gpio_cfgpin(EINT_GPIO_0(offs & 0x7), EINT_MODE);
+
+       else if ((8 <= offs) && (offs < 16))
+               s3c_gpio_cfgpin(EINT_GPIO_1(offs & 0x7), EINT_MODE);
+
+       else if ((16 <= offs) && (offs < 24))
+               s3c_gpio_cfgpin(EINT_GPIO_2(offs & 0x7), EINT_MODE);
+
+       else if ((24 <= offs) && (offs < 32))
+               s3c_gpio_cfgpin(EINT_GPIO_3(offs & 0x7), EINT_MODE);
+
+       else
+               printk(KERN_ERR "No such irq number %d", offs);
+
+       return 0;
+}
+
+static struct irq_chip s5p_irq_eint = {
+       .name           = "s5p-eint",
+       .irq_mask       = s5p_irq_eint_mask,
+       .irq_unmask     = s5p_irq_eint_unmask,
+       .irq_mask_ack   = s5p_irq_eint_maskack,
+       .irq_ack        = s5p_irq_eint_ack,
+       .irq_set_type   = s5p_irq_eint_set_type,
+#ifdef CONFIG_PM
+       .irq_set_wake   = s3c_irqext_wake,
+#endif
+};
+
+/* s5p_irq_demux_eint
+ *
+ * This function demuxes the IRQ from the group0 external interrupts,
+ * from EINTs 16 to 31. It is designed to be inlined into the specific
+ * handler s5p_irq_demux_eintX_Y.
+ *
+ * Each EINT pend/mask registers handle eight of them.
+ */
+static inline void s5p_irq_demux_eint(unsigned int start)
+{
+       u32 status = __raw_readl(S5P_EINT_PEND(EINT_REG_NR(start)));
+       u32 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(start)));
+       unsigned int irq;
+
+       status &= ~mask;
+       status &= 0xff;
+
+       while (status) {
+               irq = fls(status) - 1;
+               generic_handle_irq(irq + start);
+               status &= ~(1 << irq);
+       }
+}
+
+static void s5p_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
+{
+       s5p_irq_demux_eint(IRQ_EINT(16));
+       s5p_irq_demux_eint(IRQ_EINT(24));
+}
+
+static inline void s5p_irq_vic_eint_mask(struct irq_data *data)
+{
+       void __iomem *base = irq_data_get_irq_chip_data(data);
+
+       s5p_irq_eint_mask(data);
+       writel(1 << EINT_OFFSET(data->irq), base + VIC_INT_ENABLE_CLEAR);
+}
+
+static void s5p_irq_vic_eint_unmask(struct irq_data *data)
+{
+       void __iomem *base = irq_data_get_irq_chip_data(data);
+
+       s5p_irq_eint_unmask(data);
+       writel(1 << EINT_OFFSET(data->irq), base + VIC_INT_ENABLE);
+}
+
+static inline void s5p_irq_vic_eint_ack(struct irq_data *data)
+{
+       __raw_writel(eint_irq_to_bit(data->irq),
+                    S5P_EINT_PEND(EINT_REG_NR(data->irq)));
+}
+
+static void s5p_irq_vic_eint_maskack(struct irq_data *data)
+{
+       s5p_irq_vic_eint_mask(data);
+       s5p_irq_vic_eint_ack(data);
+}
+
+static struct irq_chip s5p_irq_vic_eint = {
+       .name           = "s5p_vic_eint",
+       .irq_mask       = s5p_irq_vic_eint_mask,
+       .irq_unmask     = s5p_irq_vic_eint_unmask,
+       .irq_mask_ack   = s5p_irq_vic_eint_maskack,
+       .irq_ack        = s5p_irq_vic_eint_ack,
+       .irq_set_type   = s5p_irq_eint_set_type,
+#ifdef CONFIG_PM
+       .irq_set_wake   = s3c_irqext_wake,
+#endif
+};
+
+static int __init s5p_init_irq_eint(void)
+{
+       int irq;
+
+       for (irq = IRQ_EINT(0); irq <= IRQ_EINT(15); irq++)
+               irq_set_chip(irq, &s5p_irq_vic_eint);
+
+       for (irq = IRQ_EINT(16); irq <= IRQ_EINT(31); irq++) {
+               irq_set_chip_and_handler(irq, &s5p_irq_eint, handle_level_irq);
+               set_irq_flags(irq, IRQF_VALID);
+       }
+
+       irq_set_chained_handler(IRQ_EINT16_31, s5p_irq_demux_eint16_31);
+       return 0;
+}
+
+arch_initcall(s5p_init_irq_eint);
diff --git a/arch/arm/plat-samsung/s5p-irq-gpioint.c b/arch/arm/plat-samsung/s5p-irq-gpioint.c
new file mode 100644 (file)
index 0000000..f9431fe
--- /dev/null
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * Author: Kyungmin Park <kyungmin.park@samsung.com>
+ * Author: Joonyoung Shim <jy0922.shim@samsung.com>
+ * Author: Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+
+#include <mach/map.h>
+#include <plat/gpio-core.h>
+#include <plat/gpio-cfg.h>
+
+#include <asm/mach/irq.h>
+
+#define GPIO_BASE(chip)                (((unsigned long)(chip)->base) & 0xFFFFF000u)
+
+#define CON_OFFSET             0x700
+#define MASK_OFFSET            0x900
+#define PEND_OFFSET            0xA00
+#define REG_OFFSET(x)          ((x) << 2)
+
+struct s5p_gpioint_bank {
+       struct list_head        list;
+       int                     start;
+       int                     nr_groups;
+       int                     irq;
+       struct samsung_gpio_chip        **chips;
+       void                    (*handler)(unsigned int, struct irq_desc *);
+};
+
+static LIST_HEAD(banks);
+
+static int s5p_gpioint_set_type(struct irq_data *d, unsigned int type)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       struct irq_chip_type *ct = gc->chip_types;
+       unsigned int shift = (d->irq - gc->irq_base) << 2;
+
+       switch (type) {
+       case IRQ_TYPE_EDGE_RISING:
+               type = S5P_IRQ_TYPE_EDGE_RISING;
+               break;
+       case IRQ_TYPE_EDGE_FALLING:
+               type = S5P_IRQ_TYPE_EDGE_FALLING;
+               break;
+       case IRQ_TYPE_EDGE_BOTH:
+               type = S5P_IRQ_TYPE_EDGE_BOTH;
+               break;
+       case IRQ_TYPE_LEVEL_HIGH:
+               type = S5P_IRQ_TYPE_LEVEL_HIGH;
+               break;
+       case IRQ_TYPE_LEVEL_LOW:
+               type = S5P_IRQ_TYPE_LEVEL_LOW;
+               break;
+       case IRQ_TYPE_NONE:
+       default:
+               printk(KERN_WARNING "No irq type\n");
+               return -EINVAL;
+       }
+
+       gc->type_cache &= ~(0x7 << shift);
+       gc->type_cache |= type << shift;
+       writel(gc->type_cache, gc->reg_base + ct->regs.type);
+       return 0;
+}
+
+static void s5p_gpioint_handler(unsigned int irq, struct irq_desc *desc)
+{
+       struct s5p_gpioint_bank *bank = irq_get_handler_data(irq);
+       int group, pend_offset, mask_offset;
+       unsigned int pend, mask;
+
+       struct irq_chip *chip = irq_get_chip(irq);
+       chained_irq_enter(chip, desc);
+
+       for (group = 0; group < bank->nr_groups; group++) {
+               struct samsung_gpio_chip *chip = bank->chips[group];
+               if (!chip)
+                       continue;
+
+               pend_offset = REG_OFFSET(group);
+               pend = __raw_readl(GPIO_BASE(chip) + PEND_OFFSET + pend_offset);
+               if (!pend)
+                       continue;
+
+               mask_offset = REG_OFFSET(group);
+               mask = __raw_readl(GPIO_BASE(chip) + MASK_OFFSET + mask_offset);
+               pend &= ~mask;
+
+               while (pend) {
+                       int offset = fls(pend) - 1;
+                       int real_irq = chip->irq_base + offset;
+                       generic_handle_irq(real_irq);
+                       pend &= ~BIT(offset);
+               }
+       }
+       chained_irq_exit(chip, desc);
+}
+
+static __init int s5p_gpioint_add(struct samsung_gpio_chip *chip)
+{
+       static int used_gpioint_groups = 0;
+       int group = chip->group;
+       struct s5p_gpioint_bank *b, *bank = NULL;
+       struct irq_chip_generic *gc;
+       struct irq_chip_type *ct;
+
+       if (used_gpioint_groups >= S5P_GPIOINT_GROUP_COUNT)
+               return -ENOMEM;
+
+       list_for_each_entry(b, &banks, list) {
+               if (group >= b->start && group < b->start + b->nr_groups) {
+                       bank = b;
+                       break;
+               }
+       }
+       if (!bank)
+               return -EINVAL;
+
+       if (!bank->handler) {
+               bank->chips = kzalloc(sizeof(struct samsung_gpio_chip *) *
+                                     bank->nr_groups, GFP_KERNEL);
+               if (!bank->chips)
+                       return -ENOMEM;
+
+               irq_set_chained_handler(bank->irq, s5p_gpioint_handler);
+               irq_set_handler_data(bank->irq, bank);
+               bank->handler = s5p_gpioint_handler;
+               printk(KERN_INFO "Registered chained gpio int handler for interrupt %d.\n",
+                      bank->irq);
+       }
+
+       /*
+        * chained GPIO irq has been successfully registered, allocate new gpio
+        * int group and assign irq nubmers
+        */
+       chip->irq_base = S5P_GPIOINT_BASE +
+                        used_gpioint_groups * S5P_GPIOINT_GROUP_SIZE;
+       used_gpioint_groups++;
+
+       bank->chips[group - bank->start] = chip;
+
+       gc = irq_alloc_generic_chip("s5p_gpioint", 1, chip->irq_base,
+                                   (void __iomem *)GPIO_BASE(chip),
+                                   handle_level_irq);
+       if (!gc)
+               return -ENOMEM;
+       ct = gc->chip_types;
+       ct->chip.irq_ack = irq_gc_ack_set_bit;
+       ct->chip.irq_mask = irq_gc_mask_set_bit;
+       ct->chip.irq_unmask = irq_gc_mask_clr_bit;
+       ct->chip.irq_set_type = s5p_gpioint_set_type,
+       ct->regs.ack = PEND_OFFSET + REG_OFFSET(group - bank->start);
+       ct->regs.mask = MASK_OFFSET + REG_OFFSET(group - bank->start);
+       ct->regs.type = CON_OFFSET + REG_OFFSET(group - bank->start);
+       irq_setup_generic_chip(gc, IRQ_MSK(chip->chip.ngpio),
+                              IRQ_GC_INIT_MASK_CACHE,
+                              IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+       return 0;
+}
+
+int __init s5p_register_gpio_interrupt(int pin)
+{
+       struct samsung_gpio_chip *my_chip = samsung_gpiolib_getchip(pin);
+       int offset, group;
+       int ret;
+
+       if (!my_chip)
+               return -EINVAL;
+
+       offset = pin - my_chip->chip.base;
+       group = my_chip->group;
+
+       /* check if the group has been already registered */
+       if (my_chip->irq_base)
+               return my_chip->irq_base + offset;
+
+       /* register gpio group */
+       ret = s5p_gpioint_add(my_chip);
+       if (ret == 0) {
+               my_chip->chip.to_irq = samsung_gpiolib_to_irq;
+               printk(KERN_INFO "Registered interrupt support for gpio group %d.\n",
+                      group);
+               return my_chip->irq_base + offset;
+       }
+       return ret;
+}
+
+int __init s5p_register_gpioint_bank(int chain_irq, int start, int nr_groups)
+{
+       struct s5p_gpioint_bank *bank;
+
+       bank = kzalloc(sizeof(*bank), GFP_KERNEL);
+       if (!bank)
+               return -ENOMEM;
+
+       bank->start = start;
+       bank->nr_groups = nr_groups;
+       bank->irq = chain_irq;
+
+       list_add_tail(&bank->list, &banks);
+       return 0;
+}
diff --git a/arch/arm/plat-samsung/s5p-irq-pm.c b/arch/arm/plat-samsung/s5p-irq-pm.c
new file mode 100644 (file)
index 0000000..7c1e3b7
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Based on arch/arm/plat-s3c24xx/irq-pm.c,
+ * Copyright (c) 2003,2004 Simtec Electronics
+ *     Ben Dooks <ben@simtec.co.uk>
+ *     http://armlinux.simtec.co.uk/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+
+#include <plat/cpu.h>
+#include <plat/irqs.h>
+#include <plat/pm.h>
+#include <mach/map.h>
+
+#include <mach/regs-gpio.h>
+#include <mach/regs-irq.h>
+
+/* state for IRQs over sleep */
+
+/* default is to allow for EINT0..EINT31, and IRQ_RTC_TIC, IRQ_RTC_ALARM,
+ * as wakeup sources
+ *
+ * set bit to 1 in allow bitfield to enable the wakeup settings on it
+*/
+
+unsigned long s3c_irqwake_intallow     = 0x00000006L;
+unsigned long s3c_irqwake_eintallow    = 0xffffffffL;
+
+int s3c_irq_wake(struct irq_data *data, unsigned int state)
+{
+       unsigned long irqbit;
+       unsigned int irq_rtc_tic, irq_rtc_alarm;
+
+#ifdef CONFIG_ARCH_EXYNOS
+       if (soc_is_exynos5250()) {
+               irq_rtc_tic = EXYNOS5_IRQ_RTC_TIC;
+               irq_rtc_alarm = EXYNOS5_IRQ_RTC_ALARM;
+       } else {
+               irq_rtc_tic = EXYNOS4_IRQ_RTC_TIC;
+               irq_rtc_alarm = EXYNOS4_IRQ_RTC_ALARM;
+       }
+#else
+       irq_rtc_tic = IRQ_RTC_TIC;
+       irq_rtc_alarm = IRQ_RTC_ALARM;
+#endif
+
+       if (data->irq == irq_rtc_tic || data->irq == irq_rtc_alarm) {
+               irqbit = 1 << (data->irq + 1 - irq_rtc_alarm);
+
+               if (!state)
+                       s3c_irqwake_intmask |= irqbit;
+               else
+                       s3c_irqwake_intmask &= ~irqbit;
+       } else {
+               return -ENOENT;
+       }
+
+       return 0;
+}
+
+static struct sleep_save eint_save[] = {
+       SAVE_ITEM(S5P_EINT_CON(0)),
+       SAVE_ITEM(S5P_EINT_CON(1)),
+       SAVE_ITEM(S5P_EINT_CON(2)),
+       SAVE_ITEM(S5P_EINT_CON(3)),
+
+       SAVE_ITEM(S5P_EINT_FLTCON(0)),
+       SAVE_ITEM(S5P_EINT_FLTCON(1)),
+       SAVE_ITEM(S5P_EINT_FLTCON(2)),
+       SAVE_ITEM(S5P_EINT_FLTCON(3)),
+       SAVE_ITEM(S5P_EINT_FLTCON(4)),
+       SAVE_ITEM(S5P_EINT_FLTCON(5)),
+       SAVE_ITEM(S5P_EINT_FLTCON(6)),
+       SAVE_ITEM(S5P_EINT_FLTCON(7)),
+
+       SAVE_ITEM(S5P_EINT_MASK(0)),
+       SAVE_ITEM(S5P_EINT_MASK(1)),
+       SAVE_ITEM(S5P_EINT_MASK(2)),
+       SAVE_ITEM(S5P_EINT_MASK(3)),
+};
+
+int s3c24xx_irq_suspend(void)
+{
+       s3c_pm_do_save(eint_save, ARRAY_SIZE(eint_save));
+
+       return 0;
+}
+
+void s3c24xx_irq_resume(void)
+{
+       s3c_pm_do_restore(eint_save, ARRAY_SIZE(eint_save));
+}
+
diff --git a/arch/arm/plat-samsung/s5p-irq.c b/arch/arm/plat-samsung/s5p-irq.c
new file mode 100644 (file)
index 0000000..dfb47d6
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2009 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com/
+ *
+ * S5P - Interrupt handling
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+
+#include <asm/hardware/vic.h>
+
+#include <mach/map.h>
+#include <plat/regs-timer.h>
+#include <plat/cpu.h>
+#include <plat/irq-vic-timer.h>
+
+void __init s5p_init_irq(u32 *vic, u32 num_vic)
+{
+#ifdef CONFIG_ARM_VIC
+       int irq;
+
+       /* initialize the VICs */
+       for (irq = 0; irq < num_vic; irq++)
+               vic_init(VA_VIC(irq), VIC_BASE(irq), vic[irq], 0);
+#endif
+
+       s3c_init_vic_timer_irq(5, IRQ_TIMER0);
+}
diff --git a/arch/arm/plat-samsung/s5p-pm.c b/arch/arm/plat-samsung/s5p-pm.c
new file mode 100644 (file)
index 0000000..0747468
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * S5P Power Manager (Suspend-To-RAM) support
+ *
+ * Based on arch/arm/plat-s3c24xx/pm.c
+ * Copyright (c) 2004,2006 Simtec Electronics
+ *     Ben Dooks <ben@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/suspend.h>
+#include <plat/pm.h>
+
+#define PFX "s5p pm: "
+
+/* s3c_pm_configure_extint
+ *
+ * configure all external interrupt pins
+*/
+
+void s3c_pm_configure_extint(void)
+{
+       /* nothing here yet */
+}
+
+void s3c_pm_restore_core(void)
+{
+       /* nothing here yet */
+}
+
+void s3c_pm_save_core(void)
+{
+       /* nothing here yet */
+}
+
diff --git a/arch/arm/plat-samsung/s5p-sleep.S b/arch/arm/plat-samsung/s5p-sleep.S
new file mode 100644 (file)
index 0000000..bdf6dad
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Common S5P Sleep Code
+ * Based on S3C64XX sleep code by:
+ *     Ben Dooks, (c) 2008 Simtec Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+*/
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/hardware/cache-l2x0.h>
+
+/*
+ *      The following code is located into the .data section. This is to
+ *      allow l2x0_regs_phys to be accessed with a relative load while we
+ *      can't rely on any MMU translation. We could have put l2x0_regs_phys
+ *      in the .text section as well, but some setups might insist on it to
+ *      be truly read-only. (Reference from: arch/arm/kernel/sleep.S)
+ */
+       .data
+       .align
+
+       /*
+        * sleep magic, to allow the bootloader to check for an valid
+        * image to resume to. Must be the first word before the
+        * s3c_cpu_resume entry.
+        */
+
+       .word   0x2bedf00d
+
+       /*
+        * s3c_cpu_resume
+        *
+        * resume code entry for bootloader to call
+        */
+
+ENTRY(s3c_cpu_resume)
+#ifdef CONFIG_CACHE_L2X0
+       adr     r0, l2x0_regs_phys
+       ldr     r0, [r0]
+       ldr     r1, [r0, #L2X0_R_PHY_BASE]
+       ldr     r2, [r1, #L2X0_CTRL]
+       tst     r2, #0x1
+       bne     resume_l2on
+       ldr     r2, [r0, #L2X0_R_AUX_CTRL]
+       str     r2, [r1, #L2X0_AUX_CTRL]
+       ldr     r2, [r0, #L2X0_R_TAG_LATENCY]
+       str     r2, [r1, #L2X0_TAG_LATENCY_CTRL]
+       ldr     r2, [r0, #L2X0_R_DATA_LATENCY]
+       str     r2, [r1, #L2X0_DATA_LATENCY_CTRL]
+       ldr     r2, [r0, #L2X0_R_PREFETCH_CTRL]
+       str     r2, [r1, #L2X0_PREFETCH_CTRL]
+       ldr     r2, [r0, #L2X0_R_PWR_CTRL]
+       str     r2, [r1, #L2X0_POWER_CTRL]
+       mov     r2, #1
+       str     r2, [r1, #L2X0_CTRL]
+resume_l2on:
+#endif
+       b       cpu_resume
+ENDPROC(s3c_cpu_resume)
+#ifdef CONFIG_CACHE_L2X0
+       .globl l2x0_regs_phys
+l2x0_regs_phys:
+       .long   0
+#endif
diff --git a/arch/arm/plat-samsung/s5p-time.c b/arch/arm/plat-samsung/s5p-time.c
new file mode 100644 (file)
index 0000000..028b6e8
--- /dev/null
@@ -0,0 +1,405 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com/
+ *
+ * S5P - Common hr-timer support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/platform_device.h>
+
+#include <asm/smp_twd.h>
+#include <asm/mach/time.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/sched_clock.h>
+
+#include <mach/map.h>
+#include <plat/devs.h>
+#include <plat/regs-timer.h>
+#include <plat/s5p-time.h>
+
+static struct clk *tin_event;
+static struct clk *tin_source;
+static struct clk *tdiv_event;
+static struct clk *tdiv_source;
+static struct clk *timerclk;
+static struct s5p_timer_source timer_source;
+static unsigned long clock_count_per_tick;
+static void s5p_timer_resume(void);
+
+static void s5p_time_stop(enum s5p_timer_mode mode)
+{
+       unsigned long tcon;
+
+       tcon = __raw_readl(S3C2410_TCON);
+
+       switch (mode) {
+       case S5P_PWM0:
+               tcon &= ~S3C2410_TCON_T0START;
+               break;
+
+       case S5P_PWM1:
+               tcon &= ~S3C2410_TCON_T1START;
+               break;
+
+       case S5P_PWM2:
+               tcon &= ~S3C2410_TCON_T2START;
+               break;
+
+       case S5P_PWM3:
+               tcon &= ~S3C2410_TCON_T3START;
+               break;
+
+       case S5P_PWM4:
+               tcon &= ~S3C2410_TCON_T4START;
+               break;
+
+       default:
+               printk(KERN_ERR "Invalid Timer %d\n", mode);
+               break;
+       }
+       __raw_writel(tcon, S3C2410_TCON);
+}
+
+static void s5p_time_setup(enum s5p_timer_mode mode, unsigned long tcnt)
+{
+       unsigned long tcon;
+
+       tcon = __raw_readl(S3C2410_TCON);
+
+       tcnt--;
+
+       switch (mode) {
+       case S5P_PWM0:
+               tcon &= ~(0x0f << 0);
+               tcon |= S3C2410_TCON_T0MANUALUPD;
+               break;
+
+       case S5P_PWM1:
+               tcon &= ~(0x0f << 8);
+               tcon |= S3C2410_TCON_T1MANUALUPD;
+               break;
+
+       case S5P_PWM2:
+               tcon &= ~(0x0f << 12);
+               tcon |= S3C2410_TCON_T2MANUALUPD;
+               break;
+
+       case S5P_PWM3:
+               tcon &= ~(0x0f << 16);
+               tcon |= S3C2410_TCON_T3MANUALUPD;
+               break;
+
+       case S5P_PWM4:
+               tcon &= ~(0x07 << 20);
+               tcon |= S3C2410_TCON_T4MANUALUPD;
+               break;
+
+       default:
+               printk(KERN_ERR "Invalid Timer %d\n", mode);
+               break;
+       }
+
+       __raw_writel(tcnt, S3C2410_TCNTB(mode));
+       __raw_writel(tcnt, S3C2410_TCMPB(mode));
+       __raw_writel(tcon, S3C2410_TCON);
+}
+
+static void s5p_time_start(enum s5p_timer_mode mode, bool periodic)
+{
+       unsigned long tcon;
+
+       tcon  = __raw_readl(S3C2410_TCON);
+
+       switch (mode) {
+       case S5P_PWM0:
+               tcon |= S3C2410_TCON_T0START;
+               tcon &= ~S3C2410_TCON_T0MANUALUPD;
+
+               if (periodic)
+                       tcon |= S3C2410_TCON_T0RELOAD;
+               else
+                       tcon &= ~S3C2410_TCON_T0RELOAD;
+               break;
+
+       case S5P_PWM1:
+               tcon |= S3C2410_TCON_T1START;
+               tcon &= ~S3C2410_TCON_T1MANUALUPD;
+
+               if (periodic)
+                       tcon |= S3C2410_TCON_T1RELOAD;
+               else
+                       tcon &= ~S3C2410_TCON_T1RELOAD;
+               break;
+
+       case S5P_PWM2:
+               tcon |= S3C2410_TCON_T2START;
+               tcon &= ~S3C2410_TCON_T2MANUALUPD;
+
+               if (periodic)
+                       tcon |= S3C2410_TCON_T2RELOAD;
+               else
+                       tcon &= ~S3C2410_TCON_T2RELOAD;
+               break;
+
+       case S5P_PWM3:
+               tcon |= S3C2410_TCON_T3START;
+               tcon &= ~S3C2410_TCON_T3MANUALUPD;
+
+               if (periodic)
+                       tcon |= S3C2410_TCON_T3RELOAD;
+               else
+                       tcon &= ~S3C2410_TCON_T3RELOAD;
+               break;
+
+       case S5P_PWM4:
+               tcon |= S3C2410_TCON_T4START;
+               tcon &= ~S3C2410_TCON_T4MANUALUPD;
+
+               if (periodic)
+                       tcon |= S3C2410_TCON_T4RELOAD;
+               else
+                       tcon &= ~S3C2410_TCON_T4RELOAD;
+               break;
+
+       default:
+               printk(KERN_ERR "Invalid Timer %d\n", mode);
+               break;
+       }
+       __raw_writel(tcon, S3C2410_TCON);
+}
+
+static int s5p_set_next_event(unsigned long cycles,
+                               struct clock_event_device *evt)
+{
+       s5p_time_setup(timer_source.event_id, cycles);
+       s5p_time_start(timer_source.event_id, NON_PERIODIC);
+
+       return 0;
+}
+
+static void s5p_set_mode(enum clock_event_mode mode,
+                               struct clock_event_device *evt)
+{
+       s5p_time_stop(timer_source.event_id);
+
+       switch (mode) {
+       case CLOCK_EVT_MODE_PERIODIC:
+               s5p_time_setup(timer_source.event_id, clock_count_per_tick);
+               s5p_time_start(timer_source.event_id, PERIODIC);
+               break;
+
+       case CLOCK_EVT_MODE_ONESHOT:
+               break;
+
+       case CLOCK_EVT_MODE_UNUSED:
+       case CLOCK_EVT_MODE_SHUTDOWN:
+               break;
+
+       case CLOCK_EVT_MODE_RESUME:
+               s5p_timer_resume();
+               break;
+       }
+}
+
+static void s5p_timer_resume(void)
+{
+       /* event timer restart */
+       s5p_time_setup(timer_source.event_id, clock_count_per_tick);
+       s5p_time_start(timer_source.event_id, PERIODIC);
+
+       /* source timer restart */
+       s5p_time_setup(timer_source.source_id, TCNT_MAX);
+       s5p_time_start(timer_source.source_id, PERIODIC);
+}
+
+void __init s5p_set_timer_source(enum s5p_timer_mode event,
+                                enum s5p_timer_mode source)
+{
+       s3c_device_timer[event].dev.bus = &platform_bus_type;
+       s3c_device_timer[source].dev.bus = &platform_bus_type;
+
+       timer_source.event_id = event;
+       timer_source.source_id = source;
+}
+
+static struct clock_event_device time_event_device = {
+       .name           = "s5p_event_timer",
+       .features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+       .rating         = 200,
+       .set_next_event = s5p_set_next_event,
+       .set_mode       = s5p_set_mode,
+};
+
+static irqreturn_t s5p_clock_event_isr(int irq, void *dev_id)
+{
+       struct clock_event_device *evt = dev_id;
+
+       evt->event_handler(evt);
+
+       return IRQ_HANDLED;
+}
+
+static struct irqaction s5p_clock_event_irq = {
+       .name           = "s5p_time_irq",
+       .flags          = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+       .handler        = s5p_clock_event_isr,
+       .dev_id         = &time_event_device,
+};
+
+static void __init s5p_clockevent_init(void)
+{
+       unsigned long pclk;
+       unsigned long clock_rate;
+       unsigned int irq_number;
+       struct clk *tscaler;
+
+       pclk = clk_get_rate(timerclk);
+
+       tscaler = clk_get_parent(tdiv_event);
+
+       clk_set_rate(tscaler, pclk / 2);
+       clk_set_rate(tdiv_event, pclk / 2);
+       clk_set_parent(tin_event, tdiv_event);
+
+       clock_rate = clk_get_rate(tin_event);
+       clock_count_per_tick = clock_rate / HZ;
+
+       clockevents_calc_mult_shift(&time_event_device,
+                                   clock_rate, S5PTIMER_MIN_RANGE);
+       time_event_device.max_delta_ns =
+               clockevent_delta2ns(-1, &time_event_device);
+       time_event_device.min_delta_ns =
+               clockevent_delta2ns(1, &time_event_device);
+
+       time_event_device.cpumask = cpumask_of(0);
+       clockevents_register_device(&time_event_device);
+
+       irq_number = timer_source.event_id + IRQ_TIMER0;
+       setup_irq(irq_number, &s5p_clock_event_irq);
+}
+
+static void __iomem *s5p_timer_reg(void)
+{
+       unsigned long offset = 0;
+
+       switch (timer_source.source_id) {
+       case S5P_PWM0:
+       case S5P_PWM1:
+       case S5P_PWM2:
+       case S5P_PWM3:
+               offset = (timer_source.source_id * 0x0c) + 0x14;
+               break;
+
+       case S5P_PWM4:
+               offset = 0x40;
+               break;
+
+       default:
+               printk(KERN_ERR "Invalid Timer %d\n", timer_source.source_id);
+               return NULL;
+       }
+
+       return S3C_TIMERREG(offset);
+}
+
+/*
+ * Override the global weak sched_clock symbol with this
+ * local implementation which uses the clocksource to get some
+ * better resolution when scheduling the kernel. We accept that
+ * this wraps around for now, since it is just a relative time
+ * stamp. (Inspired by U300 implementation.)
+ */
+static u32 notrace s5p_read_sched_clock(void)
+{
+       void __iomem *reg = s5p_timer_reg();
+
+       if (!reg)
+               return 0;
+
+       return ~__raw_readl(reg);
+}
+
+static void __init s5p_clocksource_init(void)
+{
+       unsigned long pclk;
+       unsigned long clock_rate;
+
+       pclk = clk_get_rate(timerclk);
+
+       clk_set_rate(tdiv_source, pclk / 2);
+       clk_set_parent(tin_source, tdiv_source);
+
+       clock_rate = clk_get_rate(tin_source);
+
+       s5p_time_setup(timer_source.source_id, TCNT_MAX);
+       s5p_time_start(timer_source.source_id, PERIODIC);
+
+       setup_sched_clock(s5p_read_sched_clock, 32, clock_rate);
+
+       if (clocksource_mmio_init(s5p_timer_reg(), "s5p_clocksource_timer",
+                       clock_rate, 250, 32, clocksource_mmio_readl_down))
+               panic("s5p_clocksource_timer: can't register clocksource\n");
+}
+
+static void __init s5p_timer_resources(void)
+{
+
+       unsigned long event_id = timer_source.event_id;
+       unsigned long source_id = timer_source.source_id;
+       char devname[15];
+
+       timerclk = clk_get(NULL, "timers");
+       if (IS_ERR(timerclk))
+               panic("failed to get timers clock for timer");
+
+       clk_enable(timerclk);
+
+       sprintf(devname, "s3c24xx-pwm.%lu", event_id);
+       s3c_device_timer[event_id].id = event_id;
+       s3c_device_timer[event_id].dev.init_name = devname;
+
+       tin_event = clk_get(&s3c_device_timer[event_id].dev, "pwm-tin");
+       if (IS_ERR(tin_event))
+               panic("failed to get pwm-tin clock for event timer");
+
+       tdiv_event = clk_get(&s3c_device_timer[event_id].dev, "pwm-tdiv");
+       if (IS_ERR(tdiv_event))
+               panic("failed to get pwm-tdiv clock for event timer");
+
+       clk_enable(tin_event);
+
+       sprintf(devname, "s3c24xx-pwm.%lu", source_id);
+       s3c_device_timer[source_id].id = source_id;
+       s3c_device_timer[source_id].dev.init_name = devname;
+
+       tin_source = clk_get(&s3c_device_timer[source_id].dev, "pwm-tin");
+       if (IS_ERR(tin_source))
+               panic("failed to get pwm-tin clock for source timer");
+
+       tdiv_source = clk_get(&s3c_device_timer[source_id].dev, "pwm-tdiv");
+       if (IS_ERR(tdiv_source))
+               panic("failed to get pwm-tdiv clock for source timer");
+
+       clk_enable(tin_source);
+}
+
+static void __init s5p_timer_init(void)
+{
+       s5p_timer_resources();
+       s5p_clockevent_init();
+       s5p_clocksource_init();
+}
+
+struct sys_timer s5p_timer = {
+       .init           = s5p_timer_init,
+};
diff --git a/arch/arm/plat-samsung/setup-mipiphy.c b/arch/arm/plat-samsung/setup-mipiphy.c
new file mode 100644 (file)
index 0000000..683c466
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ *
+ * S5P - Helper functions for MIPI-CSIS and MIPI-DSIM D-PHY control
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <mach/regs-clock.h>
+
+static int __s5p_mipi_phy_control(struct platform_device *pdev,
+                                 bool on, u32 reset)
+{
+       static DEFINE_SPINLOCK(lock);
+       void __iomem *addr;
+       unsigned long flags;
+       int pid;
+       u32 cfg;
+
+       if (!pdev)
+               return -EINVAL;
+
+       pid = (pdev->id == -1) ? 0 : pdev->id;
+
+       if (pid != 0 && pid != 1)
+               return -EINVAL;
+
+       addr = S5P_MIPI_DPHY_CONTROL(pid);
+
+       spin_lock_irqsave(&lock, flags);
+
+       cfg = __raw_readl(addr);
+       cfg = on ? (cfg | reset) : (cfg & ~reset);
+       __raw_writel(cfg, addr);
+
+       if (on) {
+               cfg |= S5P_MIPI_DPHY_ENABLE;
+       } else if (!(cfg & (S5P_MIPI_DPHY_SRESETN |
+                           S5P_MIPI_DPHY_MRESETN) & ~reset)) {
+               cfg &= ~S5P_MIPI_DPHY_ENABLE;
+       }
+
+       __raw_writel(cfg, addr);
+       spin_unlock_irqrestore(&lock, flags);
+
+       return 0;
+}
+
+int s5p_csis_phy_enable(struct platform_device *pdev, bool on)
+{
+       return __s5p_mipi_phy_control(pdev, on, S5P_MIPI_DPHY_SRESETN);
+}
+
+int s5p_dsim_phy_enable(struct platform_device *pdev, bool on)
+{
+       return __s5p_mipi_phy_control(pdev, on, S5P_MIPI_DPHY_MRESETN);
+}
index 387655b5ce0593025e4a5b73194fa2033dc3965c..4404f82d59793d1227e2c6c0474941801b36807f 100644 (file)
@@ -8,6 +8,17 @@ choice
        prompt "ST SPEAr Family"
        default ARCH_SPEAR3XX
 
+config ARCH_SPEAR13XX
+       bool "ST SPEAr13xx with Device Tree"
+       select ARM_GIC
+       select CPU_V7
+       select USE_OF
+       select HAVE_SMP
+       select MIGHT_HAVE_CACHE_L2X0
+       select PINCTRL
+       help
+         Supports for ARM's SPEAR13XX family
+
 config ARCH_SPEAR3XX
        bool "ST SPEAr3xx with Device Tree"
        select ARM_VIC
@@ -27,6 +38,7 @@ config ARCH_SPEAR6XX
 endchoice
 
 # Adding SPEAr machine specific configuration files
+source "arch/arm/mach-spear13xx/Kconfig"
 source "arch/arm/mach-spear3xx/Kconfig"
 source "arch/arm/mach-spear6xx/Kconfig"
 
index 7744802c83e733d8c564b2ae63a3013126afb6ef..2607bd05c525ed12f4821064529269a7ae4169f2 100644 (file)
@@ -3,6 +3,7 @@
 #
 
 # Common support
-obj-y  := clock.o restart.o time.o pl080.o
+obj-y  := restart.o time.o
 
-obj-$(CONFIG_ARCH_SPEAR3XX)    += shirq.o
+obj-$(CONFIG_ARCH_SPEAR3XX)    += pl080.o shirq.o
+obj-$(CONFIG_ARCH_SPEAR6XX)    += pl080.o
diff --git a/arch/arm/plat-spear/clock.c b/arch/arm/plat-spear/clock.c
deleted file mode 100644 (file)
index 67dd003..0000000
+++ /dev/null
@@ -1,1005 +0,0 @@
-/*
- * arch/arm/plat-spear/clock.c
- *
- * Clock framework for SPEAr platform
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/bug.h>
-#include <linux/clk.h>
-#include <linux/debugfs.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <plat/clock.h>
-
-static DEFINE_SPINLOCK(clocks_lock);
-static LIST_HEAD(root_clks);
-#ifdef CONFIG_DEBUG_FS
-static LIST_HEAD(clocks);
-#endif
-
-static void propagate_rate(struct clk *, int on_init);
-#ifdef CONFIG_DEBUG_FS
-static int clk_debugfs_reparent(struct clk *);
-#endif
-
-static int generic_clk_enable(struct clk *clk)
-{
-       unsigned int val;
-
-       if (!clk->en_reg)
-               return -EFAULT;
-
-       val = readl(clk->en_reg);
-       if (unlikely(clk->flags & RESET_TO_ENABLE))
-               val &= ~(1 << clk->en_reg_bit);
-       else
-               val |= 1 << clk->en_reg_bit;
-
-       writel(val, clk->en_reg);
-
-       return 0;
-}
-
-static void generic_clk_disable(struct clk *clk)
-{
-       unsigned int val;
-
-       if (!clk->en_reg)
-               return;
-
-       val = readl(clk->en_reg);
-       if (unlikely(clk->flags & RESET_TO_ENABLE))
-               val |= 1 << clk->en_reg_bit;
-       else
-               val &= ~(1 << clk->en_reg_bit);
-
-       writel(val, clk->en_reg);
-}
-
-/* generic clk ops */
-static struct clkops generic_clkops = {
-       .enable = generic_clk_enable,
-       .disable = generic_clk_disable,
-};
-
-/* returns current programmed clocks clock info structure */
-static struct pclk_info *pclk_info_get(struct clk *clk)
-{
-       unsigned int val, i;
-       struct pclk_info *info = NULL;
-
-       val = (readl(clk->pclk_sel->pclk_sel_reg) >> clk->pclk_sel_shift)
-               & clk->pclk_sel->pclk_sel_mask;
-
-       for (i = 0; i < clk->pclk_sel->pclk_count; i++) {
-               if (clk->pclk_sel->pclk_info[i].pclk_val == val)
-                       info = &clk->pclk_sel->pclk_info[i];
-       }
-
-       return info;
-}
-
-/*
- * Set Update pclk, and pclk_info of clk and add clock sibling node to current
- * parents children list
- */
-static void clk_reparent(struct clk *clk, struct pclk_info *pclk_info)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&clocks_lock, flags);
-       list_del(&clk->sibling);
-       list_add(&clk->sibling, &pclk_info->pclk->children);
-
-       clk->pclk = pclk_info->pclk;
-       spin_unlock_irqrestore(&clocks_lock, flags);
-
-#ifdef CONFIG_DEBUG_FS
-       clk_debugfs_reparent(clk);
-#endif
-}
-
-static void do_clk_disable(struct clk *clk)
-{
-       if (!clk)
-               return;
-
-       if (!clk->usage_count) {
-               WARN_ON(1);
-               return;
-       }
-
-       clk->usage_count--;
-
-       if (clk->usage_count == 0) {
-               /*
-                * Surely, there are no active childrens or direct users
-                * of this clock
-                */
-               if (clk->pclk)
-                       do_clk_disable(clk->pclk);
-
-               if (clk->ops && clk->ops->disable)
-                       clk->ops->disable(clk);
-       }
-}
-
-static int do_clk_enable(struct clk *clk)
-{
-       int ret = 0;
-
-       if (!clk)
-               return -EFAULT;
-
-       if (clk->usage_count == 0) {
-               if (clk->pclk) {
-                       ret = do_clk_enable(clk->pclk);
-                       if (ret)
-                               goto err;
-               }
-               if (clk->ops && clk->ops->enable) {
-                       ret = clk->ops->enable(clk);
-                       if (ret) {
-                               if (clk->pclk)
-                                       do_clk_disable(clk->pclk);
-                               goto err;
-                       }
-               }
-               /*
-                * Since the clock is going to be used for the first
-                * time please reclac
-                */
-               if (clk->recalc) {
-                       ret = clk->recalc(clk);
-                       if (ret)
-                               goto err;
-               }
-       }
-       clk->usage_count++;
-err:
-       return ret;
-}
-
-/*
- * clk_enable - inform the system when the clock source should be running.
- * @clk: clock source
- *
- * If the clock can not be enabled/disabled, this should return success.
- *
- * Returns success (0) or negative errno.
- */
-int clk_enable(struct clk *clk)
-{
-       unsigned long flags;
-       int ret = 0;
-
-       spin_lock_irqsave(&clocks_lock, flags);
-       ret = do_clk_enable(clk);
-       spin_unlock_irqrestore(&clocks_lock, flags);
-       return ret;
-}
-EXPORT_SYMBOL(clk_enable);
-
-/*
- * clk_disable - inform the system when the clock source is no longer required.
- * @clk: clock source
- *
- * Inform the system that a clock source is no longer required by
- * a driver and may be shut down.
- *
- * Implementation detail: if the clock source is shared between
- * multiple drivers, clk_enable() calls must be balanced by the
- * same number of clk_disable() calls for the clock source to be
- * disabled.
- */
-void clk_disable(struct clk *clk)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&clocks_lock, flags);
-       do_clk_disable(clk);
-       spin_unlock_irqrestore(&clocks_lock, flags);
-}
-EXPORT_SYMBOL(clk_disable);
-
-/**
- * clk_get_rate - obtain the current clock rate (in Hz) for a clock source.
- *              This is only valid once the clock source has been enabled.
- * @clk: clock source
- */
-unsigned long clk_get_rate(struct clk *clk)
-{
-       unsigned long flags, rate;
-
-       spin_lock_irqsave(&clocks_lock, flags);
-       rate = clk->rate;
-       spin_unlock_irqrestore(&clocks_lock, flags);
-
-       return rate;
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-/**
- * clk_set_parent - set the parent clock source for this clock
- * @clk: clock source
- * @parent: parent clock source
- *
- * Returns success (0) or negative errno.
- */
-int clk_set_parent(struct clk *clk, struct clk *parent)
-{
-       int i, found = 0, val = 0;
-       unsigned long flags;
-
-       if (!clk || !parent)
-               return -EFAULT;
-       if (clk->pclk == parent)
-               return 0;
-       if (!clk->pclk_sel)
-               return -EPERM;
-
-       /* check if requested parent is in clk parent list */
-       for (i = 0; i < clk->pclk_sel->pclk_count; i++) {
-               if (clk->pclk_sel->pclk_info[i].pclk == parent) {
-                       found = 1;
-                       break;
-               }
-       }
-
-       if (!found)
-               return -EINVAL;
-
-       spin_lock_irqsave(&clocks_lock, flags);
-       /* reflect parent change in hardware */
-       val = readl(clk->pclk_sel->pclk_sel_reg);
-       val &= ~(clk->pclk_sel->pclk_sel_mask << clk->pclk_sel_shift);
-       val |= clk->pclk_sel->pclk_info[i].pclk_val << clk->pclk_sel_shift;
-       writel(val, clk->pclk_sel->pclk_sel_reg);
-       spin_unlock_irqrestore(&clocks_lock, flags);
-
-       /* reflect parent change in software */
-       clk_reparent(clk, &clk->pclk_sel->pclk_info[i]);
-
-       propagate_rate(clk, 0);
-       return 0;
-}
-EXPORT_SYMBOL(clk_set_parent);
-
-/**
- * clk_set_rate - set the clock rate for a clock source
- * @clk: clock source
- * @rate: desired clock rate in Hz
- *
- * Returns success (0) or negative errno.
- */
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
-       unsigned long flags;
-       int ret = -EINVAL;
-
-       if (!clk || !rate)
-               return -EFAULT;
-
-       if (clk->set_rate) {
-               spin_lock_irqsave(&clocks_lock, flags);
-               ret = clk->set_rate(clk, rate);
-               if (!ret)
-                       /* if successful -> propagate */
-                       propagate_rate(clk, 0);
-               spin_unlock_irqrestore(&clocks_lock, flags);
-       } else if (clk->pclk) {
-               u32 mult = clk->div_factor ? clk->div_factor : 1;
-               ret = clk_set_rate(clk->pclk, mult * rate);
-       }
-
-       return ret;
-}
-EXPORT_SYMBOL(clk_set_rate);
-
-/* registers clock in platform clock framework */
-void clk_register(struct clk_lookup *cl)
-{
-       struct clk *clk;
-       unsigned long flags;
-
-       if (!cl || !cl->clk)
-               return;
-       clk = cl->clk;
-
-       spin_lock_irqsave(&clocks_lock, flags);
-
-       INIT_LIST_HEAD(&clk->children);
-       if (clk->flags & ALWAYS_ENABLED)
-               clk->ops = NULL;
-       else if (!clk->ops)
-               clk->ops = &generic_clkops;
-
-       /* root clock don't have any parents */
-       if (!clk->pclk && !clk->pclk_sel) {
-               list_add(&clk->sibling, &root_clks);
-       } else if (clk->pclk && !clk->pclk_sel) {
-               /* add clocks with only one parent to parent's children list */
-               list_add(&clk->sibling, &clk->pclk->children);
-       } else {
-               /* clocks with more than one parent */
-               struct pclk_info *pclk_info;
-
-               pclk_info = pclk_info_get(clk);
-               if (!pclk_info) {
-                       pr_err("CLKDEV: invalid pclk info of clk with"
-                                       " %s dev_id and %s con_id\n",
-                                       cl->dev_id, cl->con_id);
-               } else {
-                       clk->pclk = pclk_info->pclk;
-                       list_add(&clk->sibling, &pclk_info->pclk->children);
-               }
-       }
-
-       spin_unlock_irqrestore(&clocks_lock, flags);
-
-       /* debugfs specific */
-#ifdef CONFIG_DEBUG_FS
-       list_add(&clk->node, &clocks);
-       clk->cl = cl;
-#endif
-
-       /* add clock to arm clockdev framework */
-       clkdev_add(cl);
-}
-
-/**
- * propagate_rate - recalculate and propagate all clocks to children
- * @pclk: parent clock required to be propogated
- * @on_init: flag for enabling clocks which are ENABLED_ON_INIT.
- *
- * Recalculates all children clocks
- */
-void propagate_rate(struct clk *pclk, int on_init)
-{
-       struct clk *clk, *_temp;
-       int ret = 0;
-
-       list_for_each_entry_safe(clk, _temp, &pclk->children, sibling) {
-               if (clk->recalc) {
-                       ret = clk->recalc(clk);
-                       /*
-                        * recalc will return error if clk out is not programmed
-                        * In this case configure default rate.
-                        */
-                       if (ret && clk->set_rate)
-                               clk->set_rate(clk, 0);
-               }
-               propagate_rate(clk, on_init);
-
-               if (!on_init)
-                       continue;
-
-               /* Enable clks enabled on init, in software view */
-               if (clk->flags & ENABLED_ON_INIT)
-                       do_clk_enable(clk);
-       }
-}
-
-/**
- * round_rate_index - return closest programmable rate index in rate_config tbl
- * @clk: ptr to clock structure
- * @drate: desired rate
- * @rate: final rate will be returned in this variable only.
- *
- * Finds index in rate_config for highest clk rate which is less than
- * requested rate. If there is no clk rate lesser than requested rate then
- * -EINVAL is returned. This routine assumes that rate_config is written
- * in incrementing order of clk rates.
- * If drate passed is zero then default rate is programmed.
- */
-static int
-round_rate_index(struct clk *clk, unsigned long drate, unsigned long *rate)
-{
-       unsigned long tmp = 0, prev_rate = 0;
-       int index;
-
-       if (!clk->calc_rate)
-               return -EFAULT;
-
-       if (!drate)
-               return -EINVAL;
-
-       /*
-        * This loops ends on two conditions:
-        * - as soon as clk is found with rate greater than requested rate.
-        * - if all clks in rate_config are smaller than requested rate.
-        */
-       for (index = 0; index < clk->rate_config.count; index++) {
-               prev_rate = tmp;
-               tmp = clk->calc_rate(clk, index);
-               if (drate < tmp) {
-                       index--;
-                       break;
-               }
-       }
-       /* return if can't find suitable clock */
-       if (index < 0) {
-               index = -EINVAL;
-               *rate = 0;
-       } else if (index == clk->rate_config.count) {
-               /* program with highest clk rate possible */
-               index = clk->rate_config.count - 1;
-               *rate = tmp;
-       } else
-               *rate = prev_rate;
-
-       return index;
-}
-
-/**
- * clk_round_rate - adjust a rate to the exact rate a clock can provide
- * @clk: clock source
- * @rate: desired clock rate in Hz
- *
- * Returns rounded clock rate in Hz, or negative errno.
- */
-long clk_round_rate(struct clk *clk, unsigned long drate)
-{
-       long rate = 0;
-       int index;
-
-       /*
-        * propagate call to parent who supports calc_rate. Similar approach is
-        * used in clk_set_rate.
-        */
-       if (!clk->calc_rate) {
-               u32 mult;
-               if (!clk->pclk)
-                       return clk->rate;
-
-               mult = clk->div_factor ? clk->div_factor : 1;
-               return clk_round_rate(clk->pclk, mult * drate) / mult;
-       }
-
-       index = round_rate_index(clk, drate, &rate);
-       if (index >= 0)
-               return rate;
-       else
-               return index;
-}
-EXPORT_SYMBOL(clk_round_rate);
-
-/*All below functions are called with lock held */
-
-/*
- * Calculates pll clk rate for specific value of mode, m, n and p
- *
- * In normal mode
- * rate = (2 * M[15:8] * Fin)/(N * 2^P)
- *
- * In Dithered mode
- * rate = (2 * M[15:0] * Fin)/(256 * N * 2^P)
- */
-unsigned long pll_calc_rate(struct clk *clk, int index)
-{
-       unsigned long rate = clk->pclk->rate;
-       struct pll_rate_tbl *tbls = clk->rate_config.tbls;
-       unsigned int mode;
-
-       mode = tbls[index].mode ? 256 : 1;
-       return (((2 * rate / 10000) * tbls[index].m) /
-                       (mode * tbls[index].n * (1 << tbls[index].p))) * 10000;
-}
-
-/*
- * calculates current programmed rate of pll1
- *
- * In normal mode
- * rate = (2 * M[15:8] * Fin)/(N * 2^P)
- *
- * In Dithered mode
- * rate = (2 * M[15:0] * Fin)/(256 * N * 2^P)
- */
-int pll_clk_recalc(struct clk *clk)
-{
-       struct pll_clk_config *config = clk->private_data;
-       unsigned int num = 2, den = 0, val, mode = 0;
-
-       mode = (readl(config->mode_reg) >> config->masks->mode_shift) &
-               config->masks->mode_mask;
-
-       val = readl(config->cfg_reg);
-       /* calculate denominator */
-       den = (val >> config->masks->div_p_shift) & config->masks->div_p_mask;
-       den = 1 << den;
-       den *= (val >> config->masks->div_n_shift) & config->masks->div_n_mask;
-
-       /* calculate numerator & denominator */
-       if (!mode) {
-               /* Normal mode */
-               num *= (val >> config->masks->norm_fdbk_m_shift) &
-                       config->masks->norm_fdbk_m_mask;
-       } else {
-               /* Dithered mode */
-               num *= (val >> config->masks->dith_fdbk_m_shift) &
-                       config->masks->dith_fdbk_m_mask;
-               den *= 256;
-       }
-
-       if (!den)
-               return -EINVAL;
-
-       clk->rate = (((clk->pclk->rate/10000) * num) / den) * 10000;
-       return 0;
-}
-
-/*
- * Configures new clock rate of pll
- */
-int pll_clk_set_rate(struct clk *clk, unsigned long desired_rate)
-{
-       struct pll_rate_tbl *tbls = clk->rate_config.tbls;
-       struct pll_clk_config *config = clk->private_data;
-       unsigned long val, rate;
-       int i;
-
-       i = round_rate_index(clk, desired_rate, &rate);
-       if (i < 0)
-               return i;
-
-       val = readl(config->mode_reg) &
-               ~(config->masks->mode_mask << config->masks->mode_shift);
-       val |= (tbls[i].mode & config->masks->mode_mask) <<
-               config->masks->mode_shift;
-       writel(val, config->mode_reg);
-
-       val = readl(config->cfg_reg) &
-               ~(config->masks->div_p_mask << config->masks->div_p_shift);
-       val |= (tbls[i].p & config->masks->div_p_mask) <<
-               config->masks->div_p_shift;
-       val &= ~(config->masks->div_n_mask << config->masks->div_n_shift);
-       val |= (tbls[i].n & config->masks->div_n_mask) <<
-               config->masks->div_n_shift;
-       val &= ~(config->masks->dith_fdbk_m_mask <<
-                       config->masks->dith_fdbk_m_shift);
-       if (tbls[i].mode)
-               val |= (tbls[i].m & config->masks->dith_fdbk_m_mask) <<
-                       config->masks->dith_fdbk_m_shift;
-       else
-               val |= (tbls[i].m & config->masks->norm_fdbk_m_mask) <<
-                       config->masks->norm_fdbk_m_shift;
-
-       writel(val, config->cfg_reg);
-
-       clk->rate = rate;
-
-       return 0;
-}
-
-/*
- * Calculates ahb, apb clk rate for specific value of div
- */
-unsigned long bus_calc_rate(struct clk *clk, int index)
-{
-       unsigned long rate = clk->pclk->rate;
-       struct bus_rate_tbl *tbls = clk->rate_config.tbls;
-
-       return rate / (tbls[index].div + 1);
-}
-
-/* calculates current programmed rate of ahb or apb bus */
-int bus_clk_recalc(struct clk *clk)
-{
-       struct bus_clk_config *config = clk->private_data;
-       unsigned int div;
-
-       div = ((readl(config->reg) >> config->masks->shift) &
-                       config->masks->mask) + 1;
-
-       if (!div)
-               return -EINVAL;
-
-       clk->rate = (unsigned long)clk->pclk->rate / div;
-       return 0;
-}
-
-/* Configures new clock rate of AHB OR APB bus */
-int bus_clk_set_rate(struct clk *clk, unsigned long desired_rate)
-{
-       struct bus_rate_tbl *tbls = clk->rate_config.tbls;
-       struct bus_clk_config *config = clk->private_data;
-       unsigned long val, rate;
-       int i;
-
-       i = round_rate_index(clk, desired_rate, &rate);
-       if (i < 0)
-               return i;
-
-       val = readl(config->reg) &
-               ~(config->masks->mask << config->masks->shift);
-       val |= (tbls[i].div & config->masks->mask) << config->masks->shift;
-       writel(val, config->reg);
-
-       clk->rate = rate;
-
-       return 0;
-}
-
-/*
- * gives rate for different values of eq, x and y
- *
- * Fout from synthesizer can be given from two equations:
- * Fout1 = (Fin * X/Y)/2               EQ1
- * Fout2 = Fin * X/Y                   EQ2
- */
-unsigned long aux_calc_rate(struct clk *clk, int index)
-{
-       unsigned long rate = clk->pclk->rate;
-       struct aux_rate_tbl *tbls = clk->rate_config.tbls;
-       u8 eq = tbls[index].eq ? 1 : 2;
-
-       return (((rate/10000) * tbls[index].xscale) /
-                       (tbls[index].yscale * eq)) * 10000;
-}
-
-/*
- * calculates current programmed rate of auxiliary synthesizers
- * used by: UART, FIRDA
- *
- * Fout from synthesizer can be given from two equations:
- * Fout1 = (Fin * X/Y)/2
- * Fout2 = Fin * X/Y
- *
- * Selection of eqn 1 or 2 is programmed in register
- */
-int aux_clk_recalc(struct clk *clk)
-{
-       struct aux_clk_config *config = clk->private_data;
-       unsigned int num = 1, den = 1, val, eqn;
-
-       val = readl(config->synth_reg);
-
-       eqn = (val >> config->masks->eq_sel_shift) &
-               config->masks->eq_sel_mask;
-       if (eqn == config->masks->eq1_mask)
-               den *= 2;
-
-       /* calculate numerator */
-       num = (val >> config->masks->xscale_sel_shift) &
-               config->masks->xscale_sel_mask;
-
-       /* calculate denominator */
-       den *= (val >> config->masks->yscale_sel_shift) &
-               config->masks->yscale_sel_mask;
-
-       if (!den)
-               return -EINVAL;
-
-       clk->rate = (((clk->pclk->rate/10000) * num) / den) * 10000;
-       return 0;
-}
-
-/* Configures new clock rate of auxiliary synthesizers used by: UART, FIRDA*/
-int aux_clk_set_rate(struct clk *clk, unsigned long desired_rate)
-{
-       struct aux_rate_tbl *tbls = clk->rate_config.tbls;
-       struct aux_clk_config *config = clk->private_data;
-       unsigned long val, rate;
-       int i;
-
-       i = round_rate_index(clk, desired_rate, &rate);
-       if (i < 0)
-               return i;
-
-       val = readl(config->synth_reg) &
-               ~(config->masks->eq_sel_mask << config->masks->eq_sel_shift);
-       val |= (tbls[i].eq & config->masks->eq_sel_mask) <<
-               config->masks->eq_sel_shift;
-       val &= ~(config->masks->xscale_sel_mask <<
-                       config->masks->xscale_sel_shift);
-       val |= (tbls[i].xscale & config->masks->xscale_sel_mask) <<
-               config->masks->xscale_sel_shift;
-       val &= ~(config->masks->yscale_sel_mask <<
-                       config->masks->yscale_sel_shift);
-       val |= (tbls[i].yscale & config->masks->yscale_sel_mask) <<
-               config->masks->yscale_sel_shift;
-       writel(val, config->synth_reg);
-
-       clk->rate = rate;
-
-       return 0;
-}
-
-/*
- * Calculates gpt clk rate for different values of mscale and nscale
- *
- * Fout= Fin/((2 ^ (N+1)) * (M+1))
- */
-unsigned long gpt_calc_rate(struct clk *clk, int index)
-{
-       unsigned long rate = clk->pclk->rate;
-       struct gpt_rate_tbl *tbls = clk->rate_config.tbls;
-
-       return rate / ((1 << (tbls[index].nscale + 1)) *
-                       (tbls[index].mscale + 1));
-}
-
-/*
- * calculates current programmed rate of gpt synthesizers
- * Fout from synthesizer can be given from below equations:
- * Fout= Fin/((2 ^ (N+1)) * (M+1))
- */
-int gpt_clk_recalc(struct clk *clk)
-{
-       struct gpt_clk_config *config = clk->private_data;
-       unsigned int div = 1, val;
-
-       val = readl(config->synth_reg);
-       div += (val >> config->masks->mscale_sel_shift) &
-               config->masks->mscale_sel_mask;
-       div *= 1 << (((val >> config->masks->nscale_sel_shift) &
-                               config->masks->nscale_sel_mask) + 1);
-
-       if (!div)
-               return -EINVAL;
-
-       clk->rate = (unsigned long)clk->pclk->rate / div;
-       return 0;
-}
-
-/* Configures new clock rate of gptiliary synthesizers used by: UART, FIRDA*/
-int gpt_clk_set_rate(struct clk *clk, unsigned long desired_rate)
-{
-       struct gpt_rate_tbl *tbls = clk->rate_config.tbls;
-       struct gpt_clk_config *config = clk->private_data;
-       unsigned long val, rate;
-       int i;
-
-       i = round_rate_index(clk, desired_rate, &rate);
-       if (i < 0)
-               return i;
-
-       val = readl(config->synth_reg) & ~(config->masks->mscale_sel_mask <<
-                       config->masks->mscale_sel_shift);
-       val |= (tbls[i].mscale & config->masks->mscale_sel_mask) <<
-               config->masks->mscale_sel_shift;
-       val &= ~(config->masks->nscale_sel_mask <<
-                       config->masks->nscale_sel_shift);
-       val |= (tbls[i].nscale & config->masks->nscale_sel_mask) <<
-               config->masks->nscale_sel_shift;
-       writel(val, config->synth_reg);
-
-       clk->rate = rate;
-
-       return 0;
-}
-
-/*
- * Calculates clcd clk rate for different values of div
- *
- * Fout from synthesizer can be given from below equation:
- * Fout= Fin/2*div (division factor)
- * div is 17 bits:-
- *     0-13 (fractional part)
- *     14-16 (integer part)
- * To calculate Fout we left shift val by 14 bits and divide Fin by
- * complete div (including fractional part) and then right shift the
- * result by 14 places.
- */
-unsigned long clcd_calc_rate(struct clk *clk, int index)
-{
-       unsigned long rate = clk->pclk->rate;
-       struct clcd_rate_tbl *tbls = clk->rate_config.tbls;
-
-       rate /= 1000;
-       rate <<= 12;
-       rate /= (2 * tbls[index].div);
-       rate >>= 12;
-       rate *= 1000;
-
-       return rate;
-}
-
-/*
- * calculates current programmed rate of clcd synthesizer
- * Fout from synthesizer can be given from below equation:
- * Fout= Fin/2*div (division factor)
- * div is 17 bits:-
- *     0-13 (fractional part)
- *     14-16 (integer part)
- * To calculate Fout we left shift val by 14 bits and divide Fin by
- * complete div (including fractional part) and then right shift the
- * result by 14 places.
- */
-int clcd_clk_recalc(struct clk *clk)
-{
-       struct clcd_clk_config *config = clk->private_data;
-       unsigned int div = 1;
-       unsigned long prate;
-       unsigned int val;
-
-       val = readl(config->synth_reg);
-       div = (val >> config->masks->div_factor_shift) &
-               config->masks->div_factor_mask;
-
-       if (!div)
-               return -EINVAL;
-
-       prate = clk->pclk->rate / 1000; /* first level division, make it KHz */
-
-       clk->rate = (((unsigned long)prate << 12) / (2 * div)) >> 12;
-       clk->rate *= 1000;
-       return 0;
-}
-
-/* Configures new clock rate of auxiliary synthesizers used by: UART, FIRDA*/
-int clcd_clk_set_rate(struct clk *clk, unsigned long desired_rate)
-{
-       struct clcd_rate_tbl *tbls = clk->rate_config.tbls;
-       struct clcd_clk_config *config = clk->private_data;
-       unsigned long val, rate;
-       int i;
-
-       i = round_rate_index(clk, desired_rate, &rate);
-       if (i < 0)
-               return i;
-
-       val = readl(config->synth_reg) & ~(config->masks->div_factor_mask <<
-                       config->masks->div_factor_shift);
-       val |= (tbls[i].div & config->masks->div_factor_mask) <<
-               config->masks->div_factor_shift;
-       writel(val, config->synth_reg);
-
-       clk->rate = rate;
-
-       return 0;
-}
-
-/*
- * Used for clocks that always have value as the parent clock divided by a
- * fixed divisor
- */
-int follow_parent(struct clk *clk)
-{
-       unsigned int div_factor = (clk->div_factor < 1) ? 1 : clk->div_factor;
-
-       clk->rate = clk->pclk->rate/div_factor;
-       return 0;
-}
-
-/**
- * recalc_root_clocks - recalculate and propagate all root clocks
- *
- * Recalculates all root clocks (clocks with no parent), which if the
- * clock's .recalc is set correctly, should also propagate their rates.
- */
-void recalc_root_clocks(void)
-{
-       struct clk *pclk;
-       unsigned long flags;
-       int ret = 0;
-
-       spin_lock_irqsave(&clocks_lock, flags);
-       list_for_each_entry(pclk, &root_clks, sibling) {
-               if (pclk->recalc) {
-                       ret = pclk->recalc(pclk);
-                       /*
-                        * recalc will return error if clk out is not programmed
-                        * In this case configure default clock.
-                        */
-                       if (ret && pclk->set_rate)
-                               pclk->set_rate(pclk, 0);
-               }
-               propagate_rate(pclk, 1);
-               /* Enable clks enabled on init, in software view */
-               if (pclk->flags & ENABLED_ON_INIT)
-                       do_clk_enable(pclk);
-       }
-       spin_unlock_irqrestore(&clocks_lock, flags);
-}
-
-void __init clk_init(void)
-{
-       recalc_root_clocks();
-}
-
-#ifdef CONFIG_DEBUG_FS
-/*
- *     debugfs support to trace clock tree hierarchy and attributes
- */
-static struct dentry *clk_debugfs_root;
-static int clk_debugfs_register_one(struct clk *c)
-{
-       int err;
-       struct dentry *d;
-       struct clk *pa = c->pclk;
-       char s[255];
-       char *p = s;
-
-       if (c) {
-               if (c->cl->con_id)
-                       p += sprintf(p, "%s", c->cl->con_id);
-               if (c->cl->dev_id)
-                       p += sprintf(p, "%s", c->cl->dev_id);
-       }
-       d = debugfs_create_dir(s, pa ? pa->dent : clk_debugfs_root);
-       if (!d)
-               return -ENOMEM;
-       c->dent = d;
-
-       d = debugfs_create_u32("usage_count", S_IRUGO, c->dent,
-                       (u32 *)&c->usage_count);
-       if (!d) {
-               err = -ENOMEM;
-               goto err_out;
-       }
-       d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
-       if (!d) {
-               err = -ENOMEM;
-               goto err_out;
-       }
-       d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
-       if (!d) {
-               err = -ENOMEM;
-               goto err_out;
-       }
-       return 0;
-
-err_out:
-       debugfs_remove_recursive(c->dent);
-       return err;
-}
-
-static int clk_debugfs_register(struct clk *c)
-{
-       int err;
-       struct clk *pa = c->pclk;
-
-       if (pa && !pa->dent) {
-               err = clk_debugfs_register(pa);
-               if (err)
-                       return err;
-       }
-
-       if (!c->dent) {
-               err = clk_debugfs_register_one(c);
-               if (err)
-                       return err;
-       }
-       return 0;
-}
-
-static int __init clk_debugfs_init(void)
-{
-       struct clk *c;
-       struct dentry *d;
-       int err;
-
-       d = debugfs_create_dir("clock", NULL);
-       if (!d)
-               return -ENOMEM;
-       clk_debugfs_root = d;
-
-       list_for_each_entry(c, &clocks, node) {
-               err = clk_debugfs_register(c);
-               if (err)
-                       goto err_out;
-       }
-       return 0;
-err_out:
-       debugfs_remove_recursive(clk_debugfs_root);
-       return err;
-}
-late_initcall(clk_debugfs_init);
-
-static int clk_debugfs_reparent(struct clk *c)
-{
-       debugfs_remove(c->dent);
-       return clk_debugfs_register_one(c);
-}
-#endif /* CONFIG_DEBUG_FS */
diff --git a/arch/arm/plat-spear/include/plat/clock.h b/arch/arm/plat-spear/include/plat/clock.h
deleted file mode 100644 (file)
index 0062baf..0000000
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
- * arch/arm/plat-spear/include/plat/clock.h
- *
- * Clock framework definitions for SPEAr platform
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __PLAT_CLOCK_H
-#define __PLAT_CLOCK_H
-
-#include <linux/list.h>
-#include <linux/clkdev.h>
-#include <linux/types.h>
-
-/* clk structure flags */
-#define        ALWAYS_ENABLED          (1 << 0) /* clock always enabled */
-#define        RESET_TO_ENABLE         (1 << 1) /* reset register bit to enable clk */
-#define        ENABLED_ON_INIT         (1 << 2) /* clocks enabled at init */
-
-/**
- * struct clkops - clock operations
- * @enable: pointer to clock enable function
- * @disable: pointer to clock disable function
- */
-struct clkops {
-       int (*enable) (struct clk *);
-       void (*disable) (struct clk *);
-};
-
-/**
- * struct pclk_info - parents info
- * @pclk: pointer to parent clk
- * @pclk_val: value to be written for selecting this parent
- */
-struct pclk_info {
-       struct clk *pclk;
-       u8 pclk_val;
-};
-
-/**
- * struct pclk_sel - parents selection configuration
- * @pclk_info: pointer to array of parent clock info
- * @pclk_count: number of parents
- * @pclk_sel_reg: register for selecting a parent
- * @pclk_sel_mask: mask for selecting parent (can be used to clear bits also)
- */
-struct pclk_sel {
-       struct pclk_info *pclk_info;
-       u8 pclk_count;
-       void __iomem *pclk_sel_reg;
-       unsigned int pclk_sel_mask;
-};
-
-/**
- * struct rate_config - clk rate configurations
- * @tbls: array of device specific clk rate tables, in ascending order of rates
- * @count: size of tbls array
- * @default_index: default setting when originally disabled
- */
-struct rate_config {
-       void *tbls;
-       u8 count;
-       u8 default_index;
-};
-
-/**
- * struct clk - clock structure
- * @usage_count: num of users who enabled this clock
- * @flags: flags for clock properties
- * @rate: programmed clock rate in Hz
- * @en_reg: clk enable/disable reg
- * @en_reg_bit: clk enable/disable bit
- * @ops: clk enable/disable ops - generic_clkops selected if NULL
- * @recalc: pointer to clock rate recalculate function
- * @set_rate: pointer to clock set rate function
- * @calc_rate: pointer to clock get rate function for index
- * @rate_config: rate configuration information, used by set_rate
- * @div_factor: division factor to parent clock.
- * @pclk: current parent clk
- * @pclk_sel: pointer to parent selection structure
- * @pclk_sel_shift: register shift for selecting parent of this clock
- * @children: list for childrens or this clock
- * @sibling: node for list of clocks having same parents
- * @private_data: clock specific private data
- * @node: list to maintain clocks linearly
- * @cl: clocklook up associated with this clock
- * @dent: object for debugfs
- */
-struct clk {
-       unsigned int usage_count;
-       unsigned int flags;
-       unsigned long rate;
-       void __iomem *en_reg;
-       u8 en_reg_bit;
-       const struct clkops *ops;
-       int (*recalc) (struct clk *);
-       int (*set_rate) (struct clk *, unsigned long rate);
-       unsigned long (*calc_rate)(struct clk *, int index);
-       struct rate_config rate_config;
-       unsigned int div_factor;
-
-       struct clk *pclk;
-       struct pclk_sel *pclk_sel;
-       unsigned int pclk_sel_shift;
-
-       struct list_head children;
-       struct list_head sibling;
-       void *private_data;
-#ifdef CONFIG_DEBUG_FS
-       struct list_head node;
-       struct clk_lookup *cl;
-       struct dentry *dent;
-#endif
-};
-
-/* pll configuration structure */
-struct pll_clk_masks {
-       u32 mode_mask;
-       u32 mode_shift;
-
-       u32 norm_fdbk_m_mask;
-       u32 norm_fdbk_m_shift;
-       u32 dith_fdbk_m_mask;
-       u32 dith_fdbk_m_shift;
-       u32 div_p_mask;
-       u32 div_p_shift;
-       u32 div_n_mask;
-       u32 div_n_shift;
-};
-
-struct pll_clk_config {
-       void __iomem *mode_reg;
-       void __iomem *cfg_reg;
-       struct pll_clk_masks *masks;
-};
-
-/* pll clk rate config structure */
-struct pll_rate_tbl {
-       u8 mode;
-       u16 m;
-       u8 n;
-       u8 p;
-};
-
-/* ahb and apb bus configuration structure */
-struct bus_clk_masks {
-       u32 mask;
-       u32 shift;
-};
-
-struct bus_clk_config {
-       void __iomem *reg;
-       struct bus_clk_masks *masks;
-};
-
-/* ahb and apb clk bus rate config structure */
-struct bus_rate_tbl {
-       u8 div;
-};
-
-/* Aux clk configuration structure: applicable to UART and FIRDA */
-struct aux_clk_masks {
-       u32 eq_sel_mask;
-       u32 eq_sel_shift;
-       u32 eq1_mask;
-       u32 eq2_mask;
-       u32 xscale_sel_mask;
-       u32 xscale_sel_shift;
-       u32 yscale_sel_mask;
-       u32 yscale_sel_shift;
-};
-
-struct aux_clk_config {
-       void __iomem *synth_reg;
-       struct aux_clk_masks *masks;
-};
-
-/* aux clk rate config structure */
-struct aux_rate_tbl {
-       u16 xscale;
-       u16 yscale;
-       u8 eq;
-};
-
-/* GPT clk configuration structure */
-struct gpt_clk_masks {
-       u32 mscale_sel_mask;
-       u32 mscale_sel_shift;
-       u32 nscale_sel_mask;
-       u32 nscale_sel_shift;
-};
-
-struct gpt_clk_config {
-       void __iomem *synth_reg;
-       struct gpt_clk_masks *masks;
-};
-
-/* gpt clk rate config structure */
-struct gpt_rate_tbl {
-       u16 mscale;
-       u16 nscale;
-};
-
-/* clcd clk configuration structure */
-struct clcd_synth_masks {
-       u32 div_factor_mask;
-       u32 div_factor_shift;
-};
-
-struct clcd_clk_config {
-       void __iomem *synth_reg;
-       struct clcd_synth_masks *masks;
-};
-
-/* clcd clk rate config structure */
-struct clcd_rate_tbl {
-       u16 div;
-};
-
-/* platform specific clock functions */
-void __init clk_init(void);
-void clk_register(struct clk_lookup *cl);
-void recalc_root_clocks(void);
-
-/* clock recalc & set rate functions */
-int follow_parent(struct clk *clk);
-unsigned long pll_calc_rate(struct clk *clk, int index);
-int pll_clk_recalc(struct clk *clk);
-int pll_clk_set_rate(struct clk *clk, unsigned long desired_rate);
-unsigned long bus_calc_rate(struct clk *clk, int index);
-int bus_clk_recalc(struct clk *clk);
-int bus_clk_set_rate(struct clk *clk, unsigned long desired_rate);
-unsigned long gpt_calc_rate(struct clk *clk, int index);
-int gpt_clk_recalc(struct clk *clk);
-int gpt_clk_set_rate(struct clk *clk, unsigned long desired_rate);
-unsigned long aux_calc_rate(struct clk *clk, int index);
-int aux_clk_recalc(struct clk *clk);
-int aux_clk_set_rate(struct clk *clk, unsigned long desired_rate);
-unsigned long clcd_calc_rate(struct clk *clk, int index);
-int clcd_clk_recalc(struct clk *clk);
-int clcd_clk_set_rate(struct clk *clk, unsigned long desired_rate);
-
-#endif /* __PLAT_CLOCK_H */
index 4471a232713a8927195ab5d8748ad08e9ec6b58b..ea0a61302b7ef56ff1650d2758dbe449d34deac9 100644 (file)
@@ -16,6 +16,7 @@
 #include <mach/spear.h>
 #include <mach/generic.h>
 
+#define SPEAR13XX_SYS_SW_RES                   (VA_MISC_BASE + 0x204)
 void spear_restart(char mode, const char *cmd)
 {
        if (mode == 's') {
@@ -23,6 +24,10 @@ void spear_restart(char mode, const char *cmd)
                soft_restart(0);
        } else {
                /* hardware reset, Use on-chip reset capability */
+#ifdef CONFIG_ARCH_SPEAR13XX
+               writel_relaxed(0x01, SPEAR13XX_SYS_SW_RES);
+#else
                sysctl_soft_reset((void __iomem *)VA_SPEAR_SYS_CTRL_BASE);
+#endif
        }
 }
index a3164d1647fda73bb905d746336d0493b748dffb..03321af5de9f853c57c8772d4d182d6a95a6594f 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/ioport.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
 #include <linux/time.h>
 #include <linux/irq.h>
 #include <asm/mach/time.h>
@@ -197,19 +199,32 @@ static void __init spear_clockevent_init(int irq)
        setup_irq(irq, &spear_timer_irq);
 }
 
-void __init spear_setup_timer(resource_size_t base, int irq)
+const static struct of_device_id timer_of_match[] __initconst = {
+       { .compatible = "st,spear-timer", },
+       { },
+};
+
+void __init spear_setup_of_timer(void)
 {
-       int ret;
+       struct device_node *np;
+       int irq, ret;
+
+       np = of_find_matching_node(NULL, timer_of_match);
+       if (!np) {
+               pr_err("%s: No timer passed via DT\n", __func__);
+               return;
+       }
 
-       if (!request_mem_region(base, SZ_1K, "gpt0")) {
-               pr_err("%s:cannot get IO addr\n", __func__);
+       irq = irq_of_parse_and_map(np, 0);
+       if (!irq) {
+               pr_err("%s: No irq passed for timer via DT\n", __func__);
                return;
        }
 
-       gpt_base = ioremap(base, SZ_1K);
+       gpt_base = of_iomap(np, 0);
        if (!gpt_base) {
-               pr_err("%s:ioremap failed for gpt\n", __func__);
-               goto err_mem;
+               pr_err("%s: of iomap failed\n", __func__);
+               return;
        }
 
        gpt_clk = clk_get_sys("gpt0", NULL);
@@ -218,10 +233,10 @@ void __init spear_setup_timer(resource_size_t base, int irq)
                goto err_iomap;
        }
 
-       ret = clk_enable(gpt_clk);
+       ret = clk_prepare_enable(gpt_clk);
        if (ret < 0) {
-               pr_err("%s:couldn't enable gpt clock\n", __func__);
-               goto err_clk;
+               pr_err("%s:couldn't prepare-enable gpt clock\n", __func__);
+               goto err_prepare_enable_clk;
        }
 
        spear_clockevent_init(irq);
@@ -229,10 +244,8 @@ void __init spear_setup_timer(resource_size_t base, int irq)
 
        return;
 
-err_clk:
+err_prepare_enable_clk:
        clk_put(gpt_clk);
 err_iomap:
        iounmap(gpt_base);
-err_mem:
-       release_mem_region(base, SZ_1K);
 }
diff --git a/arch/avr32/include/asm/kvm_para.h b/arch/avr32/include/asm/kvm_para.h
new file mode 100644 (file)
index 0000000..14fab8f
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
index 74667bfc88cc7676e4c43332b40705908c747021..9ba9e749b3f34d7c2760d1d9784ff9ede9528ef3 100644 (file)
@@ -17,9 +17,6 @@
 typedef unsigned short  __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short  __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short  __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
index 169268c40ae2552df9430128e4ae6e6964249989..df28841813139f658a3511daaacb5aa14012dd80 100644 (file)
@@ -281,7 +281,7 @@ syscall_exit_work:
        ld.w    r1, r0[TI_flags]
        rjmp    1b
 
-2:     mov     r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NOTIFY_RESUME
+2:     mov     r2, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
        tst     r1, r2
        breq    3f
        unmask_interrupts
@@ -587,7 +587,7 @@ fault_exit_work:
        ld.w    r1, r0[TI_flags]
        rjmp    fault_exit_work
 
-1:     mov     r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
+1:     mov     r2, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
        tst     r1, r2
        breq    2f
        unmask_interrupts
index ae386c304beefe153fb162e2b6909edc84dd5ef4..c140f9b41dce48fed05240ff531dc22e42f29095 100644 (file)
@@ -22,8 +22,6 @@
 #include <asm/ucontext.h>
 #include <asm/syscalls.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
                               struct pt_regs *regs)
 {
@@ -89,7 +87,6 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
@@ -224,30 +221,27 @@ static inline void setup_syscall_restart(struct pt_regs *regs)
 
 static inline void
 handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
-             sigset_t *oldset, struct pt_regs *regs, int syscall)
+             struct pt_regs *regs, int syscall)
 {
        int ret;
 
        /*
         * Set up the stack frame
         */
-       ret = setup_rt_frame(sig, ka, info, oldset, regs);
+       ret = setup_rt_frame(sig, ka, info, sigmask_to_save(), regs);
 
        /*
         * Check that the resulting registers are sane
         */
        ret |= !valid_user_regs(regs);
 
-       if (ret != 0) {
-               force_sigsegv(sig, current);
-               return;
-       }
-
        /*
         * Block the signal if we were successful.
         */
-       block_sigmask(ka, sig);
-       clear_thread_flag(TIF_RESTORE_SIGMASK);
+       if (ret != 0)
+               force_sigsegv(sig, current);
+       else
+               signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -255,7 +249,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
  * doesn't want to handle. Thus you cannot kill init even with a
  * SIGKILL even by mistake.
  */
-int do_signal(struct pt_regs *regs, sigset_t *oldset, int syscall)
+static void do_signal(struct pt_regs *regs, int syscall)
 {
        siginfo_t info;
        int signr;
@@ -267,12 +261,7 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset, int syscall)
         * without doing anything if so.
         */
        if (!user_mode(regs))
-               return 0;
-
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else if (!oldset)
-               oldset = &current->blocked;
+               return;
 
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (syscall) {
@@ -297,15 +286,11 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset, int syscall)
 
        if (signr == 0) {
                /* No signal to deliver -- put the saved sigmask back */
-               if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-                       clear_thread_flag(TIF_RESTORE_SIGMASK);
-                       sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-               }
-               return 0;
+               restore_saved_sigmask();
+               return;
        }
 
-       handle_signal(signr, &ka, &info, oldset, regs, syscall);
-       return 1;
+       handle_signal(signr, &ka, &info, regs, syscall);
 }
 
 asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti)
@@ -315,13 +300,11 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti)
        if ((sysreg_read(SR) & MODE_MASK) == MODE_SUPERVISOR)
                syscall = 1;
 
-       if (ti->flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
-               do_signal(regs, &current->blocked, syscall);
+       if (ti->flags & _TIF_SIGPENDING))
+               do_signal(regs, syscall);
 
        if (ti->flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
diff --git a/arch/blackfin/include/asm/kvm_para.h b/arch/blackfin/include/asm/kvm_para.h
new file mode 100644 (file)
index 0000000..14fab8f
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
index 41bc1875c4d7fd367bbbea5432b9332a2821557f..1bd3436db6a7b7d080bdf4bcb1a09db621fde1b0 100644 (file)
@@ -10,9 +10,6 @@
 typedef unsigned short __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned int __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
index 02560fd8a12182f019802e2f7b4c392110c2eb66..53ad10005ae37db4fbf6c3d6cb21eee6485b8db6 100644 (file)
@@ -100,7 +100,6 @@ static inline struct thread_info *current_thread_info(void)
                                           TIF_NEED_RESCHED */
 #define TIF_MEMDIE             4       /* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK    5       /* restore signal mask in do_signal() */
-#define TIF_FREEZE             6       /* is freezing for suspend */
 #define TIF_IRQ_SYNC           7       /* sync pipeline stage */
 #define TIF_NOTIFY_RESUME      8       /* callback before returning to user */
 #define TIF_SINGLESTEP         9
@@ -111,7 +110,6 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_NEED_RESCHED      (1<<TIF_NEED_RESCHED)
 #define _TIF_POLLING_NRFLAG    (1<<TIF_POLLING_NRFLAG)
 #define _TIF_RESTORE_SIGMASK   (1<<TIF_RESTORE_SIGMASK)
-#define _TIF_FREEZE            (1<<TIF_FREEZE)
 #define _TIF_IRQ_SYNC          (1<<TIF_IRQ_SYNC)
 #define _TIF_NOTIFY_RESUME     (1<<TIF_NOTIFY_RESUME)
 #define _TIF_SINGLESTEP                (1<<TIF_SINGLESTEP)
index e5bbc1a5edc242dae7221168f771bbad704872d3..6682b73a8523f789bf2e3fa722f068f1ef4ebf79 100644 (file)
@@ -19,8 +19,6 @@
 #include <asm/fixed_code.h>
 #include <asm/syscall.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /* Location of the trace bit in SYSCFG. */
 #define TRACE_BITS 0x0001
 
@@ -98,7 +96,6 @@ asmlinkage int do_rt_sigreturn(unsigned long __unused)
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (rt_restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
@@ -190,17 +187,22 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t * info,
        err |= copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
 
        if (err)
-               goto give_sigsegv;
+               return -EFAULT;
 
        /* Set up registers for signal handler */
-       wrusp((unsigned long)frame);
        if (current->personality & FDPIC_FUNCPTRS) {
                struct fdpic_func_descriptor __user *funcptr =
                        (struct fdpic_func_descriptor *) ka->sa.sa_handler;
-               __get_user(regs->pc, &funcptr->text);
-               __get_user(regs->p3, &funcptr->GOT);
+               u32 pc, p3;
+               err |= __get_user(pc, &funcptr->text);
+               err |= __get_user(p3, &funcptr->GOT);
+               if (err)
+                       return -EFAULT;
+               regs->pc = pc;
+               regs->p3 = p3;
        } else
                regs->pc = (unsigned long)ka->sa.sa_handler;
+       wrusp((unsigned long)frame);
        regs->rets = SIGRETURN_STUB;
 
        regs->r0 = frame->sig;
@@ -208,10 +210,6 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t * info,
        regs->r2 = (unsigned long)(&frame->uc);
 
        return 0;
-
- give_sigsegv:
-       force_sigsegv(sig, current);
-       return -EFAULT;
 }
 
 static inline void
@@ -247,24 +245,21 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
 /*
  * OK, we're invoking a handler
  */
-static int
+static void
 handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka,
-             sigset_t *oldset, struct pt_regs *regs)
+             struct pt_regs *regs)
 {
-       int ret;
-
        /* are we from a system call? to see pt_regs->orig_p0 */
        if (regs->orig_p0 >= 0)
                /* If so, check system call restarting.. */
                handle_restart(regs, ka, 1);
 
        /* set up the stack frame */
-       ret = setup_rt_frame(sig, ka, info, oldset, regs);
-
-       if (ret == 0)
-               block_sigmask(ka, sig);
-
-       return ret;
+       if (setup_rt_frame(sig, ka, info, sigmask_to_save(), regs) < 0)
+               force_sigsegv(sig, current);
+       else 
+               signal_delivered(sig, info, ka, regs,
+                               test_thread_flag(TIF_SINGLESTEP));
 }
 
 /*
@@ -281,37 +276,16 @@ asmlinkage void do_signal(struct pt_regs *regs)
        siginfo_t info;
        int signr;
        struct k_sigaction ka;
-       sigset_t *oldset;
 
        current->thread.esp0 = (unsigned long)regs;
 
-       if (try_to_freeze())
-               goto no_signal;
-
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
                /* Whee!  Actually deliver the signal.  */
-               if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
-                       /* a signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag */
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
-
-                       tracehook_signal_handler(signr, &info, &ka, regs,
-                               test_thread_flag(TIF_SINGLESTEP));
-               }
-
+               handle_signal(signr, &info, &ka, regs);
                return;
        }
 
- no_signal:
        /* Did we come from a system call? */
        if (regs->orig_p0 >= 0)
                /* Restart the system call - no handlers present */
@@ -319,10 +293,7 @@ asmlinkage void do_signal(struct pt_regs *regs)
 
        /* if there's no signal to deliver, we just put the saved sigmask
         * back */
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 }
 
 /*
@@ -330,14 +301,12 @@ asmlinkage void do_signal(struct pt_regs *regs)
  */
 asmlinkage void do_notify_resume(struct pt_regs *regs)
 {
-       if (test_thread_flag(TIF_SIGPENDING) || test_thread_flag(TIF_RESTORE_SIGMASK))
+       if (test_thread_flag(TIF_SIGPENDING))
                do_signal(regs);
 
        if (test_thread_flag(TIF_NOTIFY_RESUME)) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
 
index 44bbf2f564cb7953d076a63b3940b400a2b330d8..f7f7a18abca915773ad291bd74a8ef192ffde333 100644 (file)
@@ -10,6 +10,8 @@
 #include <linux/hardirq.h>
 #include <linux/thread_info.h>
 #include <linux/mm.h>
+#include <linux/oom.h>
+#include <linux/sched.h>
 #include <linux/uaccess.h>
 #include <linux/module.h>
 #include <linux/kallsyms.h>
@@ -27,8 +29,7 @@ void decode_address(char *buf, unsigned long address)
 {
        struct task_struct *p;
        struct mm_struct *mm;
-       unsigned long flags, offset;
-       unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
+       unsigned long offset;
        struct rb_node *n;
 
 #ifdef CONFIG_KALLSYMS
@@ -112,17 +113,17 @@ void decode_address(char *buf, unsigned long address)
         * mappings of all our processes and see if we can't be a whee
         * bit more specific
         */
-       write_lock_irqsave(&tasklist_lock, flags);
+       read_lock(&tasklist_lock);
        for_each_process(p) {
-               mm = (in_atomic ? p->mm : get_task_mm(p));
-               if (!mm)
-                       continue;
+               struct task_struct *t;
 
-               if (!down_read_trylock(&mm->mmap_sem)) {
-                       if (!in_atomic)
-                               mmput(mm);
+               t = find_lock_task_mm(p);
+               if (!t)
                        continue;
-               }
+
+               mm = t->mm;
+               if (!down_read_trylock(&mm->mmap_sem))
+                       goto __continue;
 
                for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
                        struct vm_area_struct *vma;
@@ -131,7 +132,7 @@ void decode_address(char *buf, unsigned long address)
 
                        if (address >= vma->vm_start && address < vma->vm_end) {
                                char _tmpbuf[256];
-                               char *name = p->comm;
+                               char *name = t->comm;
                                struct file *file = vma->vm_file;
 
                                if (file) {
@@ -164,8 +165,7 @@ void decode_address(char *buf, unsigned long address)
                                                name, vma->vm_start, vma->vm_end);
 
                                up_read(&mm->mmap_sem);
-                               if (!in_atomic)
-                                       mmput(mm);
+                               task_unlock(t);
 
                                if (buf[0] == '\0')
                                        sprintf(buf, "[ %s ] dynamic memory", name);
@@ -175,8 +175,8 @@ void decode_address(char *buf, unsigned long address)
                }
 
                up_read(&mm->mmap_sem);
-               if (!in_atomic)
-                       mmput(mm);
+__continue:
+               task_unlock(t);
        }
 
        /*
@@ -186,7 +186,7 @@ void decode_address(char *buf, unsigned long address)
        sprintf(buf, "/* kernel dynamic memory */");
 
 done:
-       write_unlock_irqrestore(&tasklist_lock, flags);
+       read_unlock(&tasklist_lock);
 }
 
 #define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1)
index f6ffd6f054c398201dfea4e21aaf4c3b6d044719..0b74218fdd3a89ec5ee00cf4e74c09ee4ddbc0ed 100644 (file)
@@ -248,8 +248,6 @@ static struct platform_device bfin_uart0_device = {
 
 #if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE)
 
-const char *part_probes[] = { "cmdlinepart", NULL };
-
 static struct mtd_partition bfin_plat_nand_partitions[] = {
        {
         .name = "params(nand)",
@@ -289,7 +287,6 @@ static struct platform_nand_data bfin_plat_nand_data = {
        .chip = {
                 .nr_chips = 1,
                 .chip_delay = 30,
-                .part_probe_types = part_probes,
                 .partitions = bfin_plat_nand_partitions,
                 .nr_partitions = ARRAY_SIZE(bfin_plat_nand_partitions),
                 },
index 80aa2535e2c9f2be80c6f78d6a49c832f7e8be3d..04c2fbe41a7ff3e7193532b1e809b8dbcb7474ee 100644 (file)
@@ -711,8 +711,6 @@ ENTRY(_system_call)
        jump .Lresume_userspace_1;
 
 .Lsyscall_sigpending:
-       cc = BITTST(r7, TIF_RESTORE_SIGMASK);
-       if cc jump .Lsyscall_do_signals;
        cc = BITTST(r7, TIF_SIGPENDING);
        if cc jump .Lsyscall_do_signals;
        cc = BITTST(r7, TIF_NOTIFY_RESUME);
diff --git a/arch/c6x/include/asm/kvm_para.h b/arch/c6x/include/asm/kvm_para.h
new file mode 100644 (file)
index 0000000..14fab8f
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
index cf37478c1169c59411bc6926142c020a6348ec26..3d8f3c22a94fa0adf7e66fe2a793a65326759990 100644 (file)
@@ -20,8 +20,6 @@
 #include <asm/cacheflush.h>
 
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /*
  * Do a signal return, undo the signal stack.
  */
@@ -87,7 +85,6 @@ asmlinkage int do_rt_sigreturn(struct pt_regs *regs)
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
@@ -248,10 +245,9 @@ do_restart:
 /*
  * handle the actual delivery of a signal to userspace
  */
-static int handle_signal(int sig,
+static void handle_signal(int sig,
                         siginfo_t *info, struct k_sigaction *ka,
-                        sigset_t *oldset, struct pt_regs *regs,
-                        int syscall)
+                        struct pt_regs *regs, int syscall)
 {
        int ret;
 
@@ -278,11 +274,9 @@ static int handle_signal(int sig,
        }
 
        /* Set up the stack frame */
-       ret = setup_rt_frame(sig, ka, info, oldset, regs);
-       if (ret == 0)
-               block_sigmask(ka, sig);
-
-       return ret;
+       if (setup_rt_frame(sig, ka, info, sigmask_to_save(), regs) < 0)
+               return;
+       signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -292,7 +286,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
 {
        struct k_sigaction ka;
        siginfo_t info;
-       sigset_t *oldset;
        int signr;
 
        /* we want the common case to go fast, which is why we may in certain
@@ -300,25 +293,9 @@ static void do_signal(struct pt_regs *regs, int syscall)
        if (!user_mode(regs))
                return;
 
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
-               if (handle_signal(signr, &info, &ka, oldset,
-                                 regs, syscall) == 0) {
-                       /* a signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag */
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
-
-                       tracehook_signal_handler(signr, &info, &ka, regs, 0);
-               }
-
+               handle_signal(signr, &info, &ka, regs, syscall);
                return;
        }
 
@@ -343,10 +320,7 @@ static void do_signal(struct pt_regs *regs, int syscall)
 
        /* if there's no signal to deliver, we just put the saved sigmask
         * back */
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 }
 
 /*
@@ -357,14 +331,11 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags,
                                 int syscall)
 {
        /* deal with pending signal delivery */
-       if (thread_info_flags & ((1 << TIF_SIGPENDING) |
-                                (1 << TIF_RESTORE_SIGMASK)))
+       if (thread_info_flags & (1 << TIF_SIGPENDING))
                do_signal(regs, syscall);
 
        if (thread_info_flags & (1 << TIF_NOTIFY_RESUME)) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
index 22d34d64cc81d014ef39e793f14dc1b28d105655..bb344650a14f255517487a3a06342992801e8237 100644 (file)
@@ -40,6 +40,7 @@ config CRIS
        bool
        default y
        select HAVE_IDE
+       select GENERIC_ATOMIC64
        select HAVE_GENERIC_HARDIRQS
        select GENERIC_IRQ_SHOW
        select GENERIC_IOMAP
diff --git a/arch/cris/arch-v10/drivers/ds1302.c b/arch/cris/arch-v10/drivers/ds1302.c
deleted file mode 100644 (file)
index 74f99c6..0000000
+++ /dev/null
@@ -1,515 +0,0 @@
-/*!***************************************************************************
-*!
-*! FILE NAME  : ds1302.c
-*!
-*! DESCRIPTION: Implements an interface for the DS1302 RTC through Etrax I/O
-*!
-*! Functions exported: ds1302_readreg, ds1302_writereg, ds1302_init
-*!
-*! ---------------------------------------------------------------------------
-*!
-*! (C) Copyright 1999-2007 Axis Communications AB, LUND, SWEDEN
-*!
-*!***************************************************************************/
-
-
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/miscdevice.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/bcd.h>
-#include <linux/capability.h>
-
-#include <asm/uaccess.h>
-#include <arch/svinto.h>
-#include <asm/io.h>
-#include <asm/rtc.h>
-#include <arch/io_interface_mux.h>
-
-#include "i2c.h"
-
-#define RTC_MAJOR_NR 121 /* local major, change later */
-
-static DEFINE_MUTEX(ds1302_mutex);
-static const char ds1302_name[] = "ds1302";
-
-/* The DS1302 might be connected to different bits on different products. 
- * It has three signals - SDA, SCL and RST. RST and SCL are always outputs,
- * but SDA can have a selected direction.
- * For now, only PORT_PB is hardcoded.
- */
-
-/* The RST bit may be on either the Generic Port or Port PB. */
-#ifdef CONFIG_ETRAX_DS1302_RST_ON_GENERIC_PORT
-#define TK_RST_OUT(x) REG_SHADOW_SET(R_PORT_G_DATA,  port_g_data_shadow,  CONFIG_ETRAX_DS1302_RSTBIT, x)
-#define TK_RST_DIR(x)
-#else
-#define TK_RST_OUT(x) REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, CONFIG_ETRAX_DS1302_RSTBIT, x)
-#define TK_RST_DIR(x) REG_SHADOW_SET(R_PORT_PB_DIR,  port_pb_dir_shadow,  CONFIG_ETRAX_DS1302_RSTBIT, x)
-#endif
-
-
-#define TK_SDA_OUT(x) REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, CONFIG_ETRAX_DS1302_SDABIT, x)
-#define TK_SCL_OUT(x) REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, CONFIG_ETRAX_DS1302_SCLBIT, x)
-
-#define TK_SDA_IN()   ((*R_PORT_PB_READ >> CONFIG_ETRAX_DS1302_SDABIT) & 1)
-/* 1 is out, 0 is in */
-#define TK_SDA_DIR(x) REG_SHADOW_SET(R_PORT_PB_DIR,  port_pb_dir_shadow,  CONFIG_ETRAX_DS1302_SDABIT, x)
-#define TK_SCL_DIR(x) REG_SHADOW_SET(R_PORT_PB_DIR,  port_pb_dir_shadow,  CONFIG_ETRAX_DS1302_SCLBIT, x)
-
-
-/*
- * The reason for tempudelay and not udelay is that loops_per_usec
- * (used in udelay) is not set when functions here are called from time.c 
- */
-
-static void tempudelay(int usecs) 
-{
-       volatile int loops;
-
-       for(loops = usecs * 12; loops > 0; loops--)
-               /* nothing */;  
-}
-
-
-/* Send 8 bits. */
-static void
-out_byte(unsigned char x) 
-{
-       int i;
-       TK_SDA_DIR(1);
-       for (i = 8; i--;) {
-               /* The chip latches incoming bits on the rising edge of SCL. */
-               TK_SCL_OUT(0);
-               TK_SDA_OUT(x & 1);
-               tempudelay(1);
-               TK_SCL_OUT(1);
-               tempudelay(1);
-               x >>= 1;
-       }
-       TK_SDA_DIR(0);
-}
-
-static unsigned char
-in_byte(void) 
-{
-       unsigned char x = 0;
-       int i;
-
-       /* Read byte. Bits come LSB first, on the falling edge of SCL.
-        * Assume SDA is in input direction already.
-        */
-       TK_SDA_DIR(0);
-
-       for (i = 8; i--;) {
-               TK_SCL_OUT(0);
-               tempudelay(1);
-               x >>= 1;
-               x |= (TK_SDA_IN() << 7);
-               TK_SCL_OUT(1);
-               tempudelay(1);
-       }
-
-       return x;
-}
-
-/* Prepares for a transaction by de-activating RST (active-low). */
-
-static void
-start(void) 
-{
-       TK_SCL_OUT(0);
-       tempudelay(1);
-       TK_RST_OUT(0);
-       tempudelay(5);
-       TK_RST_OUT(1);  
-}
-
-/* Ends a transaction by taking RST active again. */
-
-static void
-stop(void) 
-{
-       tempudelay(2);
-       TK_RST_OUT(0);
-}
-
-/* Enable writing. */
-
-static void
-ds1302_wenable(void) 
-{
-       start();        
-       out_byte(0x8e); /* Write control register  */
-       out_byte(0x00); /* Disable write protect bit 7 = 0 */
-       stop();
-}
-
-/* Disable writing. */
-
-static void
-ds1302_wdisable(void) 
-{
-       start();
-       out_byte(0x8e); /* Write control register  */
-       out_byte(0x80); /* Disable write protect bit 7 = 0 */
-       stop();
-}
-
-
-
-/* Read a byte from the selected register in the DS1302. */
-
-unsigned char
-ds1302_readreg(int reg) 
-{
-       unsigned char x;
-
-       start();
-       out_byte(0x81 | (reg << 1)); /* read register */
-       x = in_byte();
-       stop();
-
-       return x;
-}
-
-/* Write a byte to the selected register. */
-
-void
-ds1302_writereg(int reg, unsigned char val) 
-{
-#ifndef CONFIG_ETRAX_RTC_READONLY
-       int do_writereg = 1;
-#else
-       int do_writereg = 0;
-
-       if (reg == RTC_TRICKLECHARGER)
-               do_writereg = 1;
-#endif
-
-       if (do_writereg) {
-               ds1302_wenable();
-               start();
-               out_byte(0x80 | (reg << 1)); /* write register */
-               out_byte(val);
-               stop();
-               ds1302_wdisable();
-       }
-}
-
-void
-get_rtc_time(struct rtc_time *rtc_tm) 
-{
-       unsigned long flags;
-
-       local_irq_save(flags);
-
-       rtc_tm->tm_sec = CMOS_READ(RTC_SECONDS);
-       rtc_tm->tm_min = CMOS_READ(RTC_MINUTES);
-       rtc_tm->tm_hour = CMOS_READ(RTC_HOURS);
-       rtc_tm->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH);
-       rtc_tm->tm_mon = CMOS_READ(RTC_MONTH);
-       rtc_tm->tm_year = CMOS_READ(RTC_YEAR);
-
-       local_irq_restore(flags);
-       
-       rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
-       rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);
-       rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour);
-       rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday);
-       rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon);
-       rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
-
-       /*
-        * Account for differences between how the RTC uses the values
-        * and how they are defined in a struct rtc_time;
-        */
-
-       if (rtc_tm->tm_year <= 69)
-               rtc_tm->tm_year += 100;
-
-       rtc_tm->tm_mon--;
-}
-
-static unsigned char days_in_mo[] = 
-    {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
-
-/* ioctl that supports RTC_RD_TIME and RTC_SET_TIME (read and set time/date). */
-
-static int rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-       unsigned long flags;
-
-       switch(cmd) {
-               case RTC_RD_TIME:       /* read the time/date from RTC  */
-               {
-                       struct rtc_time rtc_tm;
-                                               
-                       memset(&rtc_tm, 0, sizeof (struct rtc_time));
-                       get_rtc_time(&rtc_tm);                                          
-                       if (copy_to_user((struct rtc_time*)arg, &rtc_tm, sizeof(struct rtc_time)))
-                               return -EFAULT; 
-                       return 0;
-               }
-
-               case RTC_SET_TIME:      /* set the RTC */
-               {
-                       struct rtc_time rtc_tm;
-                       unsigned char mon, day, hrs, min, sec, leap_yr;
-                       unsigned int yrs;
-
-                       if (!capable(CAP_SYS_TIME))
-                               return -EPERM;
-
-                       if (copy_from_user(&rtc_tm, (struct rtc_time*)arg, sizeof(struct rtc_time)))
-                               return -EFAULT;
-
-                       yrs = rtc_tm.tm_year + 1900;
-                       mon = rtc_tm.tm_mon + 1;   /* tm_mon starts at zero */
-                       day = rtc_tm.tm_mday;
-                       hrs = rtc_tm.tm_hour;
-                       min = rtc_tm.tm_min;
-                       sec = rtc_tm.tm_sec;
-                       
-                       
-                       if ((yrs < 1970) || (yrs > 2069))
-                               return -EINVAL;
-
-                       leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400));
-
-                       if ((mon > 12) || (day == 0))
-                               return -EINVAL;
-
-                       if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr)))
-                               return -EINVAL;
-                       
-                       if ((hrs >= 24) || (min >= 60) || (sec >= 60))
-                               return -EINVAL;
-
-                       if (yrs >= 2000)
-                               yrs -= 2000;    /* RTC (0, 1, ... 69) */
-                       else
-                               yrs -= 1900;    /* RTC (70, 71, ... 99) */
-
-                       sec = bin2bcd(sec);
-                       min = bin2bcd(min);
-                       hrs = bin2bcd(hrs);
-                       day = bin2bcd(day);
-                       mon = bin2bcd(mon);
-                       yrs = bin2bcd(yrs);
-
-                       local_irq_save(flags);
-                       CMOS_WRITE(yrs, RTC_YEAR);
-                       CMOS_WRITE(mon, RTC_MONTH);
-                       CMOS_WRITE(day, RTC_DAY_OF_MONTH);
-                       CMOS_WRITE(hrs, RTC_HOURS);
-                       CMOS_WRITE(min, RTC_MINUTES);
-                       CMOS_WRITE(sec, RTC_SECONDS);
-                       local_irq_restore(flags);
-
-                       /* Notice that at this point, the RTC is updated but
-                        * the kernel is still running with the old time.
-                        * You need to set that separately with settimeofday
-                        * or adjtimex.
-                        */
-                       return 0;
-               }
-
-               case RTC_SET_CHARGE: /* set the RTC TRICKLE CHARGE register */
-               {
-                       int tcs_val;
-
-                       if (!capable(CAP_SYS_TIME))
-                               return -EPERM;
-                       
-                       if(copy_from_user(&tcs_val, (int*)arg, sizeof(int)))
-                               return -EFAULT;
-
-                       tcs_val = RTC_TCR_PATTERN | (tcs_val & 0x0F);
-                       ds1302_writereg(RTC_TRICKLECHARGER, tcs_val);
-                       return 0;
-               }
-               case RTC_VL_READ:
-               {
-                       /* TODO:
-                        * Implement voltage low detection support
-                        */
-                       printk(KERN_WARNING "DS1302: RTC Voltage Low detection"
-                              " is not supported\n");
-                       return 0;
-               }
-               case RTC_VL_CLR:
-               {
-                       /* TODO:
-                        * Nothing to do since Voltage Low detection is not supported
-                        */
-                       return 0;
-               }
-               default:
-                       return -ENOIOCTLCMD;
-       }
-}
-
-static long rtc_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-       int ret;
-
-       mutex_lock(&ds1302_mutex);
-       ret = rtc_ioctl(file, cmd, arg);
-       mutex_unlock(&ds1302_mutex);
-
-       return ret;
-}
-
-static void
-print_rtc_status(void)
-{
-       struct rtc_time tm;
-
-       get_rtc_time(&tm);
-
-       /*
-        * There is no way to tell if the luser has the RTC set for local
-        * time or for Universal Standard Time (GMT). Probably local though.
-        */
-
-       printk(KERN_INFO "rtc_time\t: %02d:%02d:%02d\n",
-              tm.tm_hour, tm.tm_min, tm.tm_sec);
-       printk(KERN_INFO "rtc_date\t: %04d-%02d-%02d\n",
-              tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday);
-}
-
-/* The various file operations we support. */
-
-static const struct file_operations rtc_fops = {
-       .owner          = THIS_MODULE,
-       .unlocked_ioctl = rtc_unlocked_ioctl,
-       .llseek         = noop_llseek,
-}; 
-
-/* Probe for the chip by writing something to its RAM and try reading it back. */
-
-#define MAGIC_PATTERN 0x42
-
-static int __init
-ds1302_probe(void) 
-{
-       int retval, res; 
-
-       TK_RST_DIR(1);
-       TK_SCL_DIR(1);
-       TK_SDA_DIR(0);
-       
-       /* Try to talk to timekeeper. */
-
-       ds1302_wenable();  
-       start();
-       out_byte(0xc0); /* write RAM byte 0 */  
-       out_byte(MAGIC_PATTERN); /* write something magic */
-       start();
-       out_byte(0xc1); /* read RAM byte 0 */
-
-       if((res = in_byte()) == MAGIC_PATTERN) {
-               stop();
-               ds1302_wdisable();
-               printk(KERN_INFO "%s: RTC found.\n", ds1302_name);
-               printk(KERN_INFO "%s: SDA, SCL, RST on PB%i, PB%i, %s%i\n",
-                      ds1302_name,
-                      CONFIG_ETRAX_DS1302_SDABIT,
-                      CONFIG_ETRAX_DS1302_SCLBIT,
-#ifdef CONFIG_ETRAX_DS1302_RST_ON_GENERIC_PORT
-                      "GENIO",
-#else
-                      "PB",
-#endif
-                      CONFIG_ETRAX_DS1302_RSTBIT);
-                      print_rtc_status();
-               retval = 1;
-       } else {
-               stop();
-               retval = 0;
-       }
-
-       return retval;
-}
-
-
-/* Just probe for the RTC and register the device to handle the ioctl needed. */
-
-int __init
-ds1302_init(void) 
-{
-#ifdef CONFIG_ETRAX_I2C
-       i2c_init();
-#endif
-
-       if (!ds1302_probe()) {
-#ifdef CONFIG_ETRAX_DS1302_RST_ON_GENERIC_PORT
-#if CONFIG_ETRAX_DS1302_RSTBIT == 27
-               /*
-                * The only way to set g27 to output is to enable ATA.
-                *
-                * Make sure that R_GEN_CONFIG is setup correct.
-                */
-               /* Allocating the ATA interface will grab almost all
-                * pins in I/O groups a, b, c and d.  A consequence of
-                * allocating the ATA interface is that the fixed
-                * interfaces shared RAM, parallel port 0, parallel
-                * port 1, parallel port W, SCSI-8 port 0, SCSI-8 port
-                * 1, SCSI-W, serial port 2, serial port 3,
-                * synchronous serial port 3 and USB port 2 and almost
-                * all GPIO pins on port g cannot be used.
-                */
-               if (cris_request_io_interface(if_ata, "ds1302/ATA")) {
-                       printk(KERN_WARNING "ds1302: Failed to get IO interface\n");
-                       return -1;
-               }
-
-#elif CONFIG_ETRAX_DS1302_RSTBIT == 0
-               if (cris_io_interface_allocate_pins(if_gpio_grp_a,
-                                                   'g',
-                                                   CONFIG_ETRAX_DS1302_RSTBIT,
-                                                   CONFIG_ETRAX_DS1302_RSTBIT)) {
-                       printk(KERN_WARNING "ds1302: Failed to get IO interface\n");
-                       return -1;
-               }
-
-               /* Set the direction of this bit to out. */
-               genconfig_shadow = ((genconfig_shadow &
-                                    ~IO_MASK(R_GEN_CONFIG, g0dir)) |
-                                  (IO_STATE(R_GEN_CONFIG, g0dir, out)));
-               *R_GEN_CONFIG = genconfig_shadow;
-#endif
-               if (!ds1302_probe()) {
-                       printk(KERN_WARNING "%s: RTC not found.\n", ds1302_name);
-                       return -1;
-               }
-#else
-               printk(KERN_WARNING "%s: RTC not found.\n", ds1302_name);
-               return -1;
-#endif
-       }
-       /* Initialise trickle charger */
-       ds1302_writereg(RTC_TRICKLECHARGER,
-                       RTC_TCR_PATTERN |(CONFIG_ETRAX_DS1302_TRICKLE_CHARGE & 0x0F));
-        /* Start clock by resetting CLOCK_HALT */
-       ds1302_writereg(RTC_SECONDS, (ds1302_readreg(RTC_SECONDS) & 0x7F));
-       return 0;
-}
-
-static int __init ds1302_register(void)
-{
-       ds1302_init();
-       if (register_chrdev(RTC_MAJOR_NR, ds1302_name, &rtc_fops)) {
-               printk(KERN_INFO "%s: unable to get major %d for rtc\n", 
-                      ds1302_name, RTC_MAJOR_NR);
-               return -1;
-       }
-        return 0;
-
-}
-
-module_init(ds1302_register);
diff --git a/arch/cris/arch-v10/drivers/pcf8563.c b/arch/cris/arch-v10/drivers/pcf8563.c
deleted file mode 100644 (file)
index 9da0568..0000000
+++ /dev/null
@@ -1,380 +0,0 @@
-/*
- * PCF8563 RTC
- *
- * From Phillips' datasheet:
- *
- * The PCF8563 is a CMOS real-time clock/calendar optimized for low power
- * consumption. A programmable clock output, interrupt output and voltage
- * low detector are also provided. All address and data are transferred
- * serially via two-line bidirectional I2C-bus. Maximum bus speed is
- * 400 kbits/s. The built-in word address register is incremented
- * automatically after each written or read byte.
- *
- * Copyright (c) 2002-2007, Axis Communications AB
- * All rights reserved.
- *
- * Author: Tobias Anderberg <tobiasa@axis.com>.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/ioctl.h>
-#include <linux/delay.h>
-#include <linux/bcd.h>
-#include <linux/mutex.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/rtc.h>
-
-#include "i2c.h"
-
-#define PCF8563_MAJOR 121      /* Local major number. */
-#define DEVICE_NAME "rtc"      /* Name which is registered in /proc/devices. */
-#define PCF8563_NAME "PCF8563"
-#define DRIVER_VERSION "$Revision: 1.24 $"
-
-/* I2C bus slave registers. */
-#define RTC_I2C_READ           0xa3
-#define RTC_I2C_WRITE          0xa2
-
-/* Two simple wrapper macros, saves a few keystrokes. */
-#define rtc_read(x) i2c_readreg(RTC_I2C_READ, x)
-#define rtc_write(x,y) i2c_writereg(RTC_I2C_WRITE, x, y)
-
-static DEFINE_MUTEX(pcf8563_mutex);
-static DEFINE_MUTEX(rtc_lock); /* Protect state etc */
-
-static const unsigned char days_in_month[] =
-       { 0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 };
-
-static long pcf8563_unlocked_ioctl(struct file *, unsigned int, unsigned long);
-
-/* Cache VL bit value read at driver init since writing the RTC_SECOND
- * register clears the VL status.
- */
-static int voltage_low;
-
-static const struct file_operations pcf8563_fops = {
-       .owner = THIS_MODULE,
-       .unlocked_ioctl = pcf8563_unlocked_ioctl,
-       .llseek         = noop_llseek,
-};
-
-unsigned char
-pcf8563_readreg(int reg)
-{
-       unsigned char res = rtc_read(reg);
-
-       /* The PCF8563 does not return 0 for unimplemented bits. */
-       switch (reg) {
-       case RTC_SECONDS:
-       case RTC_MINUTES:
-               res &= 0x7F;
-               break;
-       case RTC_HOURS:
-       case RTC_DAY_OF_MONTH:
-               res &= 0x3F;
-               break;
-       case RTC_WEEKDAY:
-               res &= 0x07;
-               break;
-       case RTC_MONTH:
-               res &= 0x1F;
-               break;
-       case RTC_CONTROL1:
-               res &= 0xA8;
-               break;
-       case RTC_CONTROL2:
-               res &= 0x1F;
-               break;
-       case RTC_CLOCKOUT_FREQ:
-       case RTC_TIMER_CONTROL:
-               res &= 0x83;
-               break;
-       }
-       return res;
-}
-
-void
-pcf8563_writereg(int reg, unsigned char val)
-{
-       rtc_write(reg, val);
-}
-
-void
-get_rtc_time(struct rtc_time *tm)
-{
-       tm->tm_sec  = rtc_read(RTC_SECONDS);
-       tm->tm_min  = rtc_read(RTC_MINUTES);
-       tm->tm_hour = rtc_read(RTC_HOURS);
-       tm->tm_mday = rtc_read(RTC_DAY_OF_MONTH);
-       tm->tm_wday = rtc_read(RTC_WEEKDAY);
-       tm->tm_mon  = rtc_read(RTC_MONTH);
-       tm->tm_year = rtc_read(RTC_YEAR);
-
-       if (tm->tm_sec & 0x80) {
-               printk(KERN_ERR "%s: RTC Voltage Low - reliable date/time "
-                      "information is no longer guaranteed!\n", PCF8563_NAME);
-       }
-
-       tm->tm_year  = bcd2bin(tm->tm_year) +
-                      ((tm->tm_mon & 0x80) ? 100 : 0);
-       tm->tm_sec  &= 0x7F;
-       tm->tm_min  &= 0x7F;
-       tm->tm_hour &= 0x3F;
-       tm->tm_mday &= 0x3F;
-       tm->tm_wday &= 0x07; /* Not coded in BCD. */
-       tm->tm_mon  &= 0x1F;
-
-       tm->tm_sec = bcd2bin(tm->tm_sec);
-       tm->tm_min = bcd2bin(tm->tm_min);
-       tm->tm_hour = bcd2bin(tm->tm_hour);
-       tm->tm_mday = bcd2bin(tm->tm_mday);
-       tm->tm_mon = bcd2bin(tm->tm_mon);
-       tm->tm_mon--; /* Month is 1..12 in RTC but 0..11 in linux */
-}
-
-int __init
-pcf8563_init(void)
-{
-       static int res;
-       static int first = 1;
-
-       if (!first)
-               return res;
-       first = 0;
-
-       /* Initiate the i2c protocol. */
-       res = i2c_init();
-       if (res < 0) {
-               printk(KERN_CRIT "pcf8563_init: Failed to init i2c.\n");
-               return res;
-       }
-
-       /*
-        * First of all we need to reset the chip. This is done by
-        * clearing control1, control2 and clk freq and resetting
-        * all alarms.
-        */
-       if (rtc_write(RTC_CONTROL1, 0x00) < 0)
-               goto err;
-
-       if (rtc_write(RTC_CONTROL2, 0x00) < 0)
-               goto err;
-
-       if (rtc_write(RTC_CLOCKOUT_FREQ, 0x00) < 0)
-               goto err;
-
-       if (rtc_write(RTC_TIMER_CONTROL, 0x03) < 0)
-               goto err;
-
-       /* Reset the alarms. */
-       if (rtc_write(RTC_MINUTE_ALARM, 0x80) < 0)
-               goto err;
-
-       if (rtc_write(RTC_HOUR_ALARM, 0x80) < 0)
-               goto err;
-
-       if (rtc_write(RTC_DAY_ALARM, 0x80) < 0)
-               goto err;
-
-       if (rtc_write(RTC_WEEKDAY_ALARM, 0x80) < 0)
-               goto err;
-
-       /* Check for low voltage, and warn about it. */
-       if (rtc_read(RTC_SECONDS) & 0x80) {
-               voltage_low = 1;
-               printk(KERN_WARNING "%s: RTC Voltage Low - reliable "
-                      "date/time information is no longer guaranteed!\n",
-                      PCF8563_NAME);
-       }
-
-       return res;
-
-err:
-       printk(KERN_INFO "%s: Error initializing chip.\n", PCF8563_NAME);
-       res = -1;
-       return res;
-}
-
-void __exit
-pcf8563_exit(void)
-{
-       unregister_chrdev(PCF8563_MAJOR, DEVICE_NAME);
-}
-
-/*
- * ioctl calls for this driver. Why return -ENOTTY upon error? Because
- * POSIX says so!
- */
-static int pcf8563_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
-       /* Some sanity checks. */
-       if (_IOC_TYPE(cmd) != RTC_MAGIC)
-               return -ENOTTY;
-
-       if (_IOC_NR(cmd) > RTC_MAX_IOCTL)
-               return -ENOTTY;
-
-       switch (cmd) {
-       case RTC_RD_TIME:
-       {
-               struct rtc_time tm;
-
-               mutex_lock(&rtc_lock);
-               memset(&tm, 0, sizeof tm);
-               get_rtc_time(&tm);
-
-               if (copy_to_user((struct rtc_time *) arg, &tm,
-                                sizeof tm)) {
-                       mutex_unlock(&rtc_lock);
-                       return -EFAULT;
-               }
-
-               mutex_unlock(&rtc_lock);
-
-               return 0;
-       }
-       case RTC_SET_TIME:
-       {
-               int leap;
-               int year;
-               int century;
-               struct rtc_time tm;
-
-               memset(&tm, 0, sizeof tm);
-               if (!capable(CAP_SYS_TIME))
-                       return -EPERM;
-
-               if (copy_from_user(&tm, (struct rtc_time *) arg, sizeof tm))
-                       return -EFAULT;
-
-               /* Convert from struct tm to struct rtc_time. */
-               tm.tm_year += 1900;
-               tm.tm_mon += 1;
-
-               /*
-                * Check if tm.tm_year is a leap year. A year is a leap
-                * year if it is divisible by 4 but not 100, except
-                * that years divisible by 400 _are_ leap years.
-                */
-               year = tm.tm_year;
-               leap = (tm.tm_mon == 2) &&
-                       ((year % 4 == 0 && year % 100 != 0) || year % 400 == 0);
-
-               /* Perform some sanity checks. */
-               if ((tm.tm_year < 1970) ||
-                   (tm.tm_mon > 12) ||
-                   (tm.tm_mday == 0) ||
-                   (tm.tm_mday > days_in_month[tm.tm_mon] + leap) ||
-                   (tm.tm_wday >= 7) ||
-                   (tm.tm_hour >= 24) ||
-                   (tm.tm_min >= 60) ||
-                   (tm.tm_sec >= 60))
-                       return -EINVAL;
-
-               century = (tm.tm_year >= 2000) ? 0x80 : 0;
-               tm.tm_year = tm.tm_year % 100;
-
-               tm.tm_year = bin2bcd(tm.tm_year);
-               tm.tm_mon = bin2bcd(tm.tm_mon);
-               tm.tm_mday = bin2bcd(tm.tm_mday);
-               tm.tm_hour = bin2bcd(tm.tm_hour);
-               tm.tm_min = bin2bcd(tm.tm_min);
-               tm.tm_sec = bin2bcd(tm.tm_sec);
-               tm.tm_mon |= century;
-
-               mutex_lock(&rtc_lock);
-
-               rtc_write(RTC_YEAR, tm.tm_year);
-               rtc_write(RTC_MONTH, tm.tm_mon);
-               rtc_write(RTC_WEEKDAY, tm.tm_wday); /* Not coded in BCD. */
-               rtc_write(RTC_DAY_OF_MONTH, tm.tm_mday);
-               rtc_write(RTC_HOURS, tm.tm_hour);
-               rtc_write(RTC_MINUTES, tm.tm_min);
-               rtc_write(RTC_SECONDS, tm.tm_sec);
-
-               mutex_unlock(&rtc_lock);
-
-               return 0;
-       }
-       case RTC_VL_READ:
-               if (voltage_low) {
-                       printk(KERN_ERR "%s: RTC Voltage Low - "
-                              "reliable date/time information is no "
-                              "longer guaranteed!\n", PCF8563_NAME);
-               }
-
-               if (copy_to_user((int *) arg, &voltage_low, sizeof(int)))
-                       return -EFAULT;
-               return 0;
-
-       case RTC_VL_CLR:
-       {
-               /* Clear the VL bit in the seconds register in case
-                * the time has not been set already (which would
-                * have cleared it). This does not really matter
-                * because of the cached voltage_low value but do it
-                * anyway for consistency. */
-
-               int ret = rtc_read(RTC_SECONDS);
-
-               rtc_write(RTC_SECONDS, (ret & 0x7F));
-
-               /* Clear the cached value. */
-               voltage_low = 0;
-
-               return 0;
-       }
-       default:
-               return -ENOTTY;
-       }
-
-       return 0;
-}
-
-static long pcf8563_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
-       int ret;
-
-       mutex_lock(&pcf8563_mutex);
-       ret = pcf8563_ioctl(filp, cmd, arg);
-       mutex_unlock(&pcf8563_mutex);
-
-       return ret;
-}
-
-static int __init pcf8563_register(void)
-{
-       if (pcf8563_init() < 0) {
-               printk(KERN_INFO "%s: Unable to initialize Real-Time Clock "
-                      "Driver, %s\n", PCF8563_NAME, DRIVER_VERSION);
-               return -1;
-       }
-
-       if (register_chrdev(PCF8563_MAJOR, DEVICE_NAME, &pcf8563_fops) < 0) {
-               printk(KERN_INFO "%s: Unable to get major number %d for RTC device.\n",
-                      PCF8563_NAME, PCF8563_MAJOR);
-               return -1;
-       }
-
-       printk(KERN_INFO "%s Real-Time Clock Driver, %s\n", PCF8563_NAME,
-              DRIVER_VERSION);
-
-       /* Check for low voltage, and warn about it. */
-       if (voltage_low) {
-               printk(KERN_WARNING "%s: RTC Voltage Low - reliable date/time "
-                      "information is no longer guaranteed!\n", PCF8563_NAME);
-       }
-
-       return 0;
-}
-
-module_init(pcf8563_register);
-module_exit(pcf8563_exit);
index 8a8196ee8ce88f5966694c570be8556c577c17b6..082f1890bacbea4808917912fa1602e14d4f8e82 100644 (file)
@@ -21,8 +21,6 @@
 #include <asm/io.h>
 #include <asm/irq.h>
 #include <asm/delay.h>
-#include <asm/rtc.h>
-
 
 #include <arch/svinto.h>
 #include <asm/fasttimer.h>
index b579dd02e098eb666d21e4fcdc1d83ef0adbb65f..37e6d2c50b764a1d72ea95b632f267d7d450fa80 100644 (file)
@@ -264,7 +264,7 @@ static int write_register (int regno, char *val);
 
 /* Write a value to a specified register in the stack of a thread other
    than the current thread. */
-static write_stack_register (int thread_id, int regno, char *valptr);
+static int write_stack_register(int thread_id, int regno, char *valptr);
 
 /* Read a value from a specified register in the register image. Returns the
    status of the read operation. The register value is returned in valptr. */
index e16f8f297f61a0376f8b6b4497675a742ee9bf87..0bb477c13a4e08b6bd09ceee6e22e9a11ac2a6b0 100644 (file)
@@ -31,8 +31,6 @@
 
 #define DEBUG_SIG 0
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /* a syscall in Linux/CRIS is a break 13 instruction which is 2 bytes */
 /* manipulate regs so that upon return, it will be re-executed */
 
@@ -176,7 +174,6 @@ asmlinkage int sys_sigreturn(long r10, long r11, long r12, long r13, long mof,
                                    sizeof(frame->extramask))))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->sc))
@@ -212,7 +209,6 @@ asmlinkage int sys_rt_sigreturn(long r10, long r11, long r12, long r13,
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
@@ -415,10 +411,11 @@ give_sigsegv:
  * OK, we're invoking a handler
  */
 
-static inline int handle_signal(int canrestart, unsigned long sig,
+static inline void handle_signal(int canrestart, unsigned long sig,
        siginfo_t *info, struct k_sigaction *ka,
-       sigset_t *oldset, struct pt_regs *regs)
+       struct pt_regs *regs)
 {
+       sigset_t *oldset = sigmask_to_save();
        int ret;
 
        /* Are we from a system call? */
@@ -456,9 +453,7 @@ static inline int handle_signal(int canrestart, unsigned long sig,
                ret = setup_frame(sig, ka, oldset, regs);
 
        if (ret == 0)
-               block_sigmask(ka, sig);
-
-       return ret;
+               signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -478,7 +473,6 @@ void do_signal(int canrestart, struct pt_regs *regs)
        siginfo_t info;
        int signr;
         struct k_sigaction ka;
-       sigset_t *oldset;
 
        /*
         * We want the common case to go fast, which
@@ -489,23 +483,10 @@ void do_signal(int canrestart, struct pt_regs *regs)
        if (!user_mode(regs))
                return;
 
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
                /* Whee!  Actually deliver the signal.  */
-               if (handle_signal(canrestart, signr, &info, &ka,
-                               oldset, regs)) {
-                       /* a signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag */
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               }
+               handle_signal(canrestart, signr, &info, &ka, regs);
                return;
        }
 
@@ -525,8 +506,5 @@ void do_signal(int canrestart, struct pt_regs *regs)
 
        /* if there's no signal to deliver, we just put the saved sigmask
         * back */
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 }
index 20c85b5dc7d0950e860193def61e5ffa5e14ffee..bcffcb6a9415dc7cc991330e949465891a99c1d9 100644 (file)
 #include <asm/signal.h>
 #include <asm/io.h>
 #include <asm/delay.h>
-#include <asm/rtc.h>
 #include <asm/irq_regs.h>
 
 /* define this if you need to use print_timestamp */
 /* it will make jiffies at 96 hz instead of 100 hz though */
 #undef USE_CASCADE_TIMERS
 
-extern int set_rtc_mmss(unsigned long nowtime);
-extern int have_rtc;
-
 unsigned long get_ns_in_jiffie(void)
 {
        unsigned char timer_count, t1;
@@ -203,11 +199,6 @@ time_init(void)
         */
        loops_per_usec = 50;
 
-       if(RTC_INIT() < 0)
-               have_rtc = 0;
-       else
-               have_rtc = 1;
-
        /* Setup the etrax timers
         * Base frequency is 25000 hz, divider 250 -> 100 HZ
         * In normal mode, we use timer0, so timer1 is free. In cascade
index 36e9a9c5239bc1a6a3176f2f4c2dae2a752b48f0..725153edb764e5c815edca5d4c0fda0dd3f63803 100644 (file)
@@ -2,8 +2,5 @@
 # Makefile for Etrax-specific library files..
 #
 
-
-EXTRA_AFLAGS := -traditional
-
 lib-y  = checksum.o checksumcopy.o string.o usercopy.o memset.o csumcpfruser.o
 
index 642c6fed43d753b4375ff97ffa3f215eec849349..f8476d9e856b9d06da038eb49c90db2c60f053af 100644 (file)
@@ -1394,11 +1394,10 @@ static int create_md5_pad(int alloc_flag, unsigned long long hashed_length, char
 
        if (padlen < MD5_MIN_PAD_LENGTH) padlen += MD5_BLOCK_LENGTH;
 
-       p = kmalloc(padlen, alloc_flag);
+       p = kzalloc(padlen, alloc_flag);
        if (!p) return -ENOMEM;
 
        *p = 0x80;
-       memset(p+1, 0, padlen - 1);
 
        DEBUG(printk("create_md5_pad: hashed_length=%lld bits == %lld bytes\n", bit_length, hashed_length));
 
@@ -1426,11 +1425,10 @@ static int create_sha1_pad(int alloc_flag, unsigned long long hashed_length, cha
 
        if (padlen < SHA1_MIN_PAD_LENGTH) padlen += SHA1_BLOCK_LENGTH;
 
-       p = kmalloc(padlen, alloc_flag);
+       p = kzalloc(padlen, alloc_flag);
        if (!p) return -ENOMEM;
 
        *p = 0x80;
-       memset(p+1, 0, padlen - 1);
 
        DEBUG(printk("create_sha1_pad: hashed_length=%lld bits == %lld bytes\n", bit_length, hashed_length));
 
index f7ad9e8637df271a1f9361ebede99b7f78966da0..f085229cf870bc306b95df2dee4eadd9ebdd4b8b 100644 (file)
@@ -114,8 +114,6 @@ void user_disable_single_step(struct task_struct *child)
 void
 ptrace_disable(struct task_struct *child)
 {
-       unsigned long tmp;
-
        /* Deconfigure SPC and S-bit. */
        user_disable_single_step(child);
        put_reg(child, PT_SPC, 0);
index b338d8fc0c1241c12aa66495e474d16cd5d49f4a..b60d1b65a4267ef5cbdb14758058b5c9ca6707a0 100644 (file)
@@ -24,9 +24,6 @@
 
 extern unsigned long cris_signal_return_page;
 
-/* Flag to check if a signal is blockable. */
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /*
  * A syscall in CRIS is really a "break 13" instruction, which is 2
  * bytes. The registers is manipulated so upon return the instruction
@@ -167,7 +164,6 @@ sys_sigreturn(long r10, long r11, long r12, long r13, long mof, long srp,
                                                 sizeof(frame->extramask))))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->sc))
@@ -208,7 +204,6 @@ sys_rt_sigreturn(long r10, long r11, long r12, long r13, long mof, long srp,
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
@@ -434,11 +429,12 @@ give_sigsegv:
 }
 
 /* Invoke a signal handler to, well, handle the signal. */
-static inline int
+static inline void
 handle_signal(int canrestart, unsigned long sig,
              siginfo_t *info, struct k_sigaction *ka,
-              sigset_t *oldset, struct pt_regs * regs)
+              struct pt_regs * regs)
 {
+       sigset_t *oldset = sigmask_to_save();
        int ret;
 
        /* Check if this got called from a system call. */
@@ -489,9 +485,7 @@ handle_signal(int canrestart, unsigned long sig,
                ret = setup_frame(sig, ka, oldset, regs);
 
        if (ret == 0)
-               block_sigmask(ka, sig);
-
-       return ret;
+               signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -511,7 +505,6 @@ do_signal(int canrestart, struct pt_regs *regs)
        int signr;
        siginfo_t info;
         struct k_sigaction ka;
-       sigset_t *oldset;
 
        /*
         * The common case should go fast, which is why this point is
@@ -521,25 +514,11 @@ do_signal(int canrestart, struct pt_regs *regs)
        if (!user_mode(regs))
                return;
 
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 
        if (signr > 0) {
                /* Whee!  Actually deliver the signal.  */
-               if (handle_signal(canrestart, signr, &info, &ka,
-                               oldset, regs)) {
-                       /* a signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag */
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               }
-
+               handle_signal(canrestart, signr, &info, &ka, regs);
                return;
        }
 
@@ -560,10 +539,7 @@ do_signal(int canrestart, struct pt_regs *regs)
 
        /* if there's no signal to deliver, we just put the saved sigmask
         * back */
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 }
 
 asmlinkage void
index 6773fc83a670f08568a059bfa81c2c2e7a3bfa7f..8c4b45efd7b6712f8dc13da94c55bc8ee51ca369 100644 (file)
@@ -18,7 +18,6 @@
 #include <asm/signal.h>
 #include <asm/io.h>
 #include <asm/delay.h>
-#include <asm/rtc.h>
 #include <asm/irq.h>
 #include <asm/irq_regs.h>
 
@@ -67,7 +66,6 @@ unsigned long timer_regs[NR_CPUS] =
 };
 
 extern int set_rtc_mmss(unsigned long nowtime);
-extern int have_rtc;
 
 #ifdef CONFIG_CPU_FREQ
 static int
@@ -265,11 +263,6 @@ void __init time_init(void)
         */
        loops_per_usec = 50;
 
-       if(RTC_INIT() < 0)
-               have_rtc = 0;
-       else
-               have_rtc = 1;
-
        /* Start CPU local timer. */
        cris_timer_init();
 
index 1de779f4f240e6747bc86828896cc2c06971a0c0..7caf25d58e6b6e3b76f75251e0831e1251ced43a 100644 (file)
@@ -7,7 +7,7 @@
 #define L1_CACHE_BYTES 32
 #define L1_CACHE_SHIFT 5
 
-#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+#define __read_mostly __attribute__((__section__(".data..read_mostly")))
 
 void flush_dma_list(dma_descr_data *descr);
 void flush_dma_descr(dma_descr_data *descr, int flush_buf);
index 956eea246b97bbe316088343842f3c94c5b9352b..04d02a51c5e90a2b7dde5403f23329ece74362bc 100644 (file)
@@ -6,5 +6,4 @@ header-y += arch-v32/
 header-y += ethernet.h
 header-y += etraxgpio.h
 header-y += rs485.h
-header-y += rtc.h
 header-y += sync_serial.h
index 72b3cd6eda0b4a09e69b51f9fd338874061c0c9e..ce4e517931514fb0f6d52a2ef1af35b39aa19ea0 100644 (file)
@@ -15,9 +15,6 @@
 typedef unsigned short __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short  __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
@@ -33,4 +30,6 @@ typedef int           __kernel_ptrdiff_t;
 typedef unsigned short __kernel_old_dev_t;
 #define __kernel_old_dev_t __kernel_old_dev_t
 
+#include <asm-generic/posix_types.h>
+
 #endif /* __ARCH_CRIS_POSIX_TYPES_H */
diff --git a/arch/cris/include/asm/rtc.h b/arch/cris/include/asm/rtc.h
deleted file mode 100644 (file)
index 17d3019..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-
-#ifndef __RTC_H__
-#define __RTC_H__
-
-#ifdef CONFIG_ETRAX_DS1302
-   /* Dallas DS1302 clock/calendar register numbers. */
-#  define RTC_SECONDS      0
-#  define RTC_MINUTES      1
-#  define RTC_HOURS        2
-#  define RTC_DAY_OF_MONTH 3
-#  define RTC_MONTH        4
-#  define RTC_WEEKDAY      5
-#  define RTC_YEAR         6
-#  define RTC_CONTROL      7
-
-   /* Bits in CONTROL register. */
-#  define RTC_CONTROL_WRITEPROTECT     0x80
-#  define RTC_TRICKLECHARGER           8
-
-  /* Bits in TRICKLECHARGER register TCS TCS TCS TCS DS DS RS RS. */
-#  define RTC_TCR_PATTERN      0xA0    /* 1010xxxx */
-#  define RTC_TCR_1DIOD                0x04    /* xxxx01xx */
-#  define RTC_TCR_2DIOD                0x08    /* xxxx10xx */
-#  define RTC_TCR_DISABLED     0x00    /* xxxxxx00 Disabled */
-#  define RTC_TCR_2KOHM                0x01    /* xxxxxx01 2KOhm */
-#  define RTC_TCR_4KOHM                0x02    /* xxxxxx10 4kOhm */
-#  define RTC_TCR_8KOHM                0x03    /* xxxxxx11 8kOhm */
-
-#elif defined(CONFIG_ETRAX_PCF8563)
-   /* I2C bus slave registers. */
-#  define RTC_I2C_READ         0xa3
-#  define RTC_I2C_WRITE                0xa2
-
-   /* Phillips PCF8563 registers. */
-#  define RTC_CONTROL1         0x00            /* Control/Status register 1. */
-#  define RTC_CONTROL2         0x01            /* Control/Status register 2. */
-#  define RTC_CLOCKOUT_FREQ    0x0d            /* CLKOUT frequency. */
-#  define RTC_TIMER_CONTROL    0x0e            /* Timer control. */
-#  define RTC_TIMER_CNTDOWN    0x0f            /* Timer countdown. */
-
-   /* BCD encoded clock registers. */
-#  define RTC_SECONDS          0x02
-#  define RTC_MINUTES          0x03
-#  define RTC_HOURS            0x04
-#  define RTC_DAY_OF_MONTH     0x05
-#  define RTC_WEEKDAY          0x06    /* Not coded in BCD! */
-#  define RTC_MONTH            0x07
-#  define RTC_YEAR             0x08
-#  define RTC_MINUTE_ALARM     0x09
-#  define RTC_HOUR_ALARM       0x0a
-#  define RTC_DAY_ALARM                0x0b
-#  define RTC_WEEKDAY_ALARM 0x0c
-
-#endif
-
-#ifdef CONFIG_ETRAX_DS1302
-extern unsigned char ds1302_readreg(int reg);
-extern void ds1302_writereg(int reg, unsigned char val);
-extern int ds1302_init(void);
-#  define CMOS_READ(x) ds1302_readreg(x)
-#  define CMOS_WRITE(val,reg) ds1302_writereg(reg,val)
-#  define RTC_INIT() ds1302_init()
-#elif defined(CONFIG_ETRAX_PCF8563)
-extern unsigned char pcf8563_readreg(int reg);
-extern void pcf8563_writereg(int reg, unsigned char val);
-extern int pcf8563_init(void);
-#  define CMOS_READ(x) pcf8563_readreg(x)
-#  define CMOS_WRITE(val,reg) pcf8563_writereg(reg,val)
-#  define RTC_INIT() pcf8563_init()
-#else
-  /* No RTC configured so we shouldn't try to access any. */
-#  define CMOS_READ(x) 42
-#  define CMOS_WRITE(x,y)
-#  define RTC_INIT() (-1)
-#endif
-
-/*
- * The struct used to pass data via the following ioctl. Similar to the
- * struct tm in <time.h>, but it needs to be here so that the kernel
- * source is self contained, allowing cross-compiles, etc. etc.
- */
-struct rtc_time {
-       int tm_sec;
-       int tm_min;
-       int tm_hour;
-       int tm_mday;
-       int tm_mon;
-       int tm_year;
-       int tm_wday;
-       int tm_yday;
-       int tm_isdst;
-};
-
-/* ioctl() calls that are permitted to the /dev/rtc interface. */
-#define RTC_MAGIC 'p'
-/* Read RTC time. */
-#define RTC_RD_TIME            _IOR(RTC_MAGIC, 0x09, struct rtc_time)
-/* Set RTC time. */
-#define RTC_SET_TIME           _IOW(RTC_MAGIC, 0x0a, struct rtc_time)
-#define RTC_SET_CHARGE         _IOW(RTC_MAGIC, 0x0b, int)
-/* Voltage low detector */
-#define RTC_VL_READ            _IOR(RTC_MAGIC, 0x13, int)
-/* Clear voltage low information */
-#define RTC_VL_CLR             _IO(RTC_MAGIC, 0x14)
-#define RTC_MAX_IOCTL 0x14
-
-#endif /* __RTC_H__ */
index d114ad3da9b15f95ccc5a68b736d5e80bae75af7..58d44ee1a71f70885aef328fdf6a6af32d6c7b58 100644 (file)
@@ -40,7 +40,5 @@ void do_notify_resume(int canrestart, struct pt_regs *regs,
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
index 4e73092e85c0388291e1fe99fe3c3913ad0ca358..277ffc459e4b821e8360e1042df6bff469f12057 100644 (file)
@@ -21,7 +21,6 @@
  *
  */
 
-#include <asm/rtc.h>
 #include <linux/errno.h>
 #include <linux/module.h>
 #include <linux/param.h>
@@ -32,7 +31,8 @@
 #include <linux/profile.h>
 #include <linux/sched.h>       /* just for sched_clock() - funny that */
 
-int have_rtc;  /* used to remember if we have an RTC or not */;
+
+#define D(x)
 
 #define TICK_SIZE tick
 
@@ -50,78 +50,16 @@ u32 arch_gettimeoffset(void)
 }
 #endif
 
-/*
- * BUG: This routine does not handle hour overflow properly; it just
- *      sets the minutes. Usually you'll only notice that after reboot!
- */
-
 int set_rtc_mmss(unsigned long nowtime)
 {
-       int retval = 0;
-       int real_seconds, real_minutes, cmos_minutes;
-
-       printk(KERN_DEBUG "set_rtc_mmss(%lu)\n", nowtime);
-
-       if(!have_rtc)
-               return 0;
-
-       cmos_minutes = CMOS_READ(RTC_MINUTES);
-       cmos_minutes = bcd2bin(cmos_minutes);
-
-       /*
-        * since we're only adjusting minutes and seconds,
-        * don't interfere with hour overflow. This avoids
-        * messing with unknown time zones but requires your
-        * RTC not to be off by more than 15 minutes
-        */
-       real_seconds = nowtime % 60;
-       real_minutes = nowtime / 60;
-       if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
-               real_minutes += 30;             /* correct for half hour time zone */
-       real_minutes %= 60;
-
-       if (abs(real_minutes - cmos_minutes) < 30) {
-               real_seconds = bin2bcd(real_seconds);
-               real_minutes = bin2bcd(real_minutes);
-               CMOS_WRITE(real_seconds,RTC_SECONDS);
-               CMOS_WRITE(real_minutes,RTC_MINUTES);
-       } else {
-               printk_once(KERN_NOTICE
-                      "set_rtc_mmss: can't update from %d to %d\n",
-                      cmos_minutes, real_minutes);
-               retval = -1;
-       }
-
-       return retval;
+       D(printk(KERN_DEBUG "set_rtc_mmss(%lu)\n", nowtime));
+       return 0;
 }
 
 /* grab the time from the RTC chip */
-
-unsigned long
-get_cmos_time(void)
+unsigned long get_cmos_time(void)
 {
-       unsigned int year, mon, day, hour, min, sec;
-       if(!have_rtc)
-               return 0;
-
-       sec = CMOS_READ(RTC_SECONDS);
-       min = CMOS_READ(RTC_MINUTES);
-       hour = CMOS_READ(RTC_HOURS);
-       day = CMOS_READ(RTC_DAY_OF_MONTH);
-       mon = CMOS_READ(RTC_MONTH);
-       year = CMOS_READ(RTC_YEAR);
-
-       sec = bcd2bin(sec);
-       min = bcd2bin(min);
-       hour = bcd2bin(hour);
-       day = bcd2bin(day);
-       mon = bcd2bin(mon);
-       year = bcd2bin(year);
-
-       if ((year += 1900) < 1970)
-               year += 100;
-
-       return mktime(year, mon, day, hour, min, sec);
+       return 0;
 }
 
 
@@ -132,7 +70,7 @@ int update_persistent_clock(struct timespec now)
 
 void read_persistent_clock(struct timespec *ts)
 {
-       ts->tv_sec = get_cmos_time();
+       ts->tv_sec = 0;
        ts->tv_nsec = 0;
 }
 
index a6990cb0f098772c8739b8e7e738789414af7dc8..a68b983dcea1bd413630c03e375f20a21baa5db8 100644 (file)
@@ -52,6 +52,7 @@ SECTIONS
 
        EXCEPTION_TABLE(4)
 
+       _sdata = .;
        RODATA
 
        . = ALIGN (4);
index b4760d86e1bba7b8674b933eb66d798e5ce4d1c9..45fd542cf173df075c556d3fb1586d740298b426 100644 (file)
@@ -58,6 +58,8 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
        struct vm_area_struct * vma;
        siginfo_t info;
        int fault;
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+                               ((writeaccess & 1) ? FAULT_FLAG_WRITE : 0);
 
        D(printk(KERN_DEBUG
                 "Page fault for %lX on %X at %lX, prot %d write %d\n",
@@ -115,6 +117,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
        if (in_atomic() || !mm)
                goto no_context;
 
+retry:
        down_read(&mm->mmap_sem);
        vma = find_vma(mm, address);
        if (!vma)
@@ -163,7 +166,11 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
         * the fault.
         */
 
-       fault = handle_mm_fault(mm, vma, address, (writeaccess & 1) ? FAULT_FLAG_WRITE : 0);
+       fault = handle_mm_fault(mm, vma, address, flags);
+
+       if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+               return;
+
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
@@ -171,10 +178,24 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
                        goto do_sigbus;
                BUG();
        }
-       if (fault & VM_FAULT_MAJOR)
-               tsk->maj_flt++;
-       else
-               tsk->min_flt++;
+
+       if (flags & FAULT_FLAG_ALLOW_RETRY) {
+               if (fault & VM_FAULT_MAJOR)
+                       tsk->maj_flt++;
+               else
+                       tsk->min_flt++;
+               if (fault & VM_FAULT_RETRY) {
+                       flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+                       /*
+                        * No need to up_read(&mm->mmap_sem) as we would
+                        * have already released it in __lock_page_or_retry
+                        * in mm/filemap.c.
+                        */
+
+                       goto retry;
+               }
+       }
 
        up_read(&mm->mmap_sem);
        return;
diff --git a/arch/frv/include/asm/kvm_para.h b/arch/frv/include/asm/kvm_para.h
new file mode 100644 (file)
index 0000000..14fab8f
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
index 3f34cb45fbb3fafd24edff901ca8b52d9941532d..fe512af74a5afbb57dbc1490214155cdbbeb220d 100644 (file)
@@ -10,9 +10,6 @@
 typedef unsigned short __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
index 54ab13a0de415c1c76d2449626ad5d3ef92c3565..0ff03a33c81e8b193da81f690ce94c0f0c9ad82b 100644 (file)
@@ -94,8 +94,8 @@ register struct thread_info *__current_thread_info asm("gr15");
 #define TIF_NEED_RESCHED       3       /* rescheduling necessary */
 #define TIF_SINGLESTEP         4       /* restore singlestep on return to user mode */
 #define TIF_RESTORE_SIGMASK    5       /* restore signal mask in do_signal() */
-#define TIF_POLLING_NRFLAG     16      /* true if poll_idle() is polling TIF_NEED_RESCHED */
-#define TIF_MEMDIE             17      /* is terminating due to OOM killer */
+#define TIF_POLLING_NRFLAG           /* true if poll_idle() is polling TIF_NEED_RESCHED */
+#define TIF_MEMDIE                   /* is terminating due to OOM killer */
 
 #define _TIF_SYSCALL_TRACE     (1 << TIF_SYSCALL_TRACE)
 #define _TIF_NOTIFY_RESUME     (1 << TIF_NOTIFY_RESUME)
@@ -105,8 +105,16 @@ register struct thread_info *__current_thread_info asm("gr15");
 #define _TIF_RESTORE_SIGMASK   (1 << TIF_RESTORE_SIGMASK)
 #define _TIF_POLLING_NRFLAG    (1 << TIF_POLLING_NRFLAG)
 
-#define _TIF_WORK_MASK         0x0000FFFE      /* work to do on interrupt/exception return */
-#define _TIF_ALLWORK_MASK      0x0000FFFF      /* work to do on any return to u-space */
+/* work to do on interrupt/exception return */
+#define _TIF_WORK_MASK         \
+       (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_SINGLESTEP)
+
+/* work to do on any return to u-space */
+#define _TIF_ALLWORK_MASK      (_TIF_WORK_MASK | _TIF_SYSCALL_TRACE)
+
+#if _TIF_ALLWORK_MASK >= 0x2000
+#error "_TIF_ALLWORK_MASK won't fit in an ANDI now (see entry.S)"
+#endif
 
 /*
  * Thread-synchronous status.
index 5ba23f715ea5e7f0f3dfcc0cab6a860519e41ec6..7d5e000fd32e0e4d40c78c2fcec1806f17bb1286 100644 (file)
@@ -905,18 +905,19 @@ __syscall_call:
 __syscall_exit:
        LEDS            0x6300
 
-       sti             gr8,@(gr28,#REG_GR(8))  ; save return value
+       # keep current PSR in GR23
+       movsg           psr,gr23
 
-       # rebuild saved psr - execve will change it for init/main.c
        ldi             @(gr28,#REG_PSR),gr22
+
+       sti.p           gr8,@(gr28,#REG_GR(8))  ; save return value
+
+       # rebuild saved psr - execve will change it for init/main.c
        srli            gr22,#1,gr5
        andi.p          gr22,#~PSR_PS,gr22
        andi            gr5,#PSR_PS,gr5
        or              gr5,gr22,gr22
-       ori             gr22,#PSR_S,gr22
-
-       # keep current PSR in GR23
-       movsg           psr,gr23
+       ori.p           gr22,#PSR_S,gr22
 
        # make sure we don't miss an interrupt setting need_resched or sigpending between
        # sampling and the RETT
@@ -924,9 +925,7 @@ __syscall_exit:
        movgs           gr23,psr
 
        ldi             @(gr15,#TI_FLAGS),gr4
-       sethi.p         %hi(_TIF_ALLWORK_MASK),gr5
-       setlo           %lo(_TIF_ALLWORK_MASK),gr5
-       andcc           gr4,gr5,gr0,icc0
+       andicc          gr4,#_TIF_ALLWORK_MASK,gr0,icc0
        bne             icc0,#0,__syscall_exit_work
 
        # restore all registers and return
@@ -1111,9 +1110,7 @@ __entry_resume_userspace:
 __entry_return_from_user_interrupt:
        LEDS            0x6402
        ldi             @(gr15,#TI_FLAGS),gr4
-       sethi.p         %hi(_TIF_WORK_MASK),gr5
-       setlo           %lo(_TIF_WORK_MASK),gr5
-       andcc           gr4,gr5,gr0,icc0
+       andicc          gr4,#_TIF_WORK_MASK,gr0,icc0
        beq             icc0,#1,__entry_return_direct
 
 __entry_work_pending:
@@ -1133,9 +1130,7 @@ __entry_work_resched:
 
        LEDS            0x6401
        ldi             @(gr15,#TI_FLAGS),gr4
-       sethi.p         %hi(_TIF_WORK_MASK),gr5
-       setlo           %lo(_TIF_WORK_MASK),gr5
-       andcc           gr4,gr5,gr0,icc0
+       andicc          gr4,#_TIF_WORK_MASK,gr0,icc0
        beq             icc0,#1,__entry_return_direct
        andicc          gr4,#_TIF_NEED_RESCHED,gr0,icc0
        bne             icc0,#1,__entry_work_resched
@@ -1163,7 +1158,9 @@ __syscall_trace_entry:
        # perform syscall exit tracing
 __syscall_exit_work:
        LEDS            0x6340
-       andicc          gr4,#_TIF_SYSCALL_TRACE,gr0,icc0
+       andicc          gr22,#PSR_PS,gr0,icc1   ; don't handle on return to kernel mode
+       andicc.p        gr4,#_TIF_SYSCALL_TRACE,gr0,icc0
+       bne             icc1,#0,__entry_return_direct
        beq             icc0,#1,__entry_work_pending
 
        movsg           psr,gr23
index 8cf5dca01758f75e63041d2c81edf1d8542bbb0c..864c2f0d497bfa62800e4168cac94215bff4d128 100644 (file)
@@ -28,8 +28,6 @@
 
 #define DEBUG_SIG 0
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 struct fdpic_func_descriptor {
        unsigned long   text;
        unsigned long   GOT;
@@ -149,7 +147,6 @@ asmlinkage int sys_sigreturn(void)
            __copy_from_user(&set.sig[1], &frame->extramask, sizeof(frame->extramask)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(&frame->sc, &gr8))
@@ -172,7 +169,6 @@ asmlinkage int sys_rt_sigreturn(void)
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(&frame->uc.uc_mcontext, &gr8))
@@ -426,9 +422,10 @@ give_sigsegv:
 /*
  * OK, we're invoking a handler
  */
-static int handle_signal(unsigned long sig, siginfo_t *info,
-                        struct k_sigaction *ka, sigset_t *oldset)
+static void handle_signal(unsigned long sig, siginfo_t *info,
+                        struct k_sigaction *ka)
 {
+       sigset_t *oldset = sigmask_to_save();
        int ret;
 
        /* Are we from a system call? */
@@ -460,11 +457,11 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
        else
                ret = setup_frame(sig, ka, oldset);
 
-       if (ret == 0)
-               block_sigmask(ka, sig);
-
-       return ret;
+       if (ret)
+               return;
 
+       signal_delivered(sig, info, ka, __frame,
+                                test_thread_flag(TIF_SINGLESTEP));
 } /* end handle_signal() */
 
 /*****************************************************************************/
@@ -477,44 +474,14 @@ static void do_signal(void)
 {
        struct k_sigaction ka;
        siginfo_t info;
-       sigset_t *oldset;
        int signr;
 
-       /*
-        * We want the common case to go fast, which
-        * is why we may in certain cases get here from
-        * kernel mode. Just return without doing anything
-        * if so.
-        */
-       if (!user_mode(__frame))
-               return;
-
-       if (try_to_freeze())
-               goto no_signal;
-
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, __frame, NULL);
        if (signr > 0) {
-               if (handle_signal(signr, &info, &ka, oldset) == 0) {
-                       /* a signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag */
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
-
-                       tracehook_signal_handler(signr, &info, &ka, __frame,
-                                                test_thread_flag(TIF_SINGLESTEP));
-               }
-
+               handle_signal(signr, &info, &ka);
                return;
        }
 
-no_signal:
        /* Did we come from a system call? */
        if (__frame->syscallno != -1) {
                /* Restart the system call - no handlers present */
@@ -536,11 +503,7 @@ no_signal:
 
        /* if there's no signal to deliver, we just put the saved sigmask
         * back */
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
-
+       restore_saved_sigmask();
 } /* end do_signal() */
 
 /*****************************************************************************/
@@ -555,15 +518,13 @@ asmlinkage void do_notify_resume(__u32 thread_info_flags)
                clear_thread_flag(TIF_SINGLESTEP);
 
        /* deal with pending signal delivery */
-       if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
+       if (thread_info_flags & _TIF_SIGPENDING)
                do_signal();
 
        /* deal with notification on about to resume userspace execution */
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(__frame);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 
 } /* end do_notify_resume() */
diff --git a/arch/h8300/include/asm/kvm_para.h b/arch/h8300/include/asm/kvm_para.h
new file mode 100644 (file)
index 0000000..14fab8f
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
index bc4c34efb1ad167ccafa90ee8796d605058572a0..91e62ba4c7b02e99cf73893ef04e6a8cf259025a 100644 (file)
@@ -10,9 +10,6 @@
 typedef unsigned short __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
index d4b0555d29047851d9a80d9ae020f30eecc1a370..fca10378701bb9c818b8c123fb4d3c05522cd0c6 100644 (file)
@@ -47,8 +47,6 @@
 #include <asm/traps.h>
 #include <asm/ucontext.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /*
  * Atomically swap in the new signal mask, and wait for a signal.
  */
@@ -186,7 +184,6 @@ asmlinkage int do_sigreturn(unsigned long __unused,...)
                              sizeof(frame->extramask))))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
        
        if (restore_sigcontext(regs, &frame->sc, &er0))
@@ -211,7 +208,6 @@ asmlinkage int do_rt_sigreturn(unsigned long __unused,...)
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
        
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &er0))
@@ -412,8 +408,9 @@ give_sigsegv:
  */
 static void
 handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
-             sigset_t *oldset, struct pt_regs * regs)
+             struct pt_regs * regs)
 {
+       sigset_t *oldset = sigmask_to_save();
        int ret;
        /* are we from a system call? */
        if (regs->orig_er0 >= 0) {
@@ -441,10 +438,8 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
        else
                ret = setup_frame(sig, ka, oldset, regs);
 
-       if (!ret) {
-               block_sigmask(ka, sig);
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-       }
+       if (!ret)
+               signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -457,7 +452,6 @@ statis void do_signal(struct pt_regs *regs)
        siginfo_t info;
        int signr;
        struct k_sigaction ka;
-       sigset_t *oldset;
 
        /*
         * We want the common case to go fast, which
@@ -468,23 +462,14 @@ statis void do_signal(struct pt_regs *regs)
        if ((regs->ccr & 0x10))
                return;
 
-       if (try_to_freeze())
-               goto no_signal;
-
        current->thread.esp0 = (unsigned long) regs;
 
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
                /* Whee!  Actually deliver the signal.  */
-               handle_signal(signr, &info, &ka, oldset, regs);
+               handle_signal(signr, &info, &ka, regs);
                return;
        }
- no_signal:
        /* Did we come from a system call? */
        if (regs->orig_er0 >= 0) {
                /* Restart the system call - no handlers present */
@@ -501,8 +486,7 @@ statis void do_signal(struct pt_regs *regs)
        }
 
        /* If there's no signal to deliver, we just restore the saved mask.  */
-       if (test_and_clear_thread_flag(TIF_RESTORE_SIGMASK))
-               set_current_blocked(&current->saved_sigmask);
+       restore_saved_sigmask();
 }
 
 asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
@@ -513,7 +497,5 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
diff --git a/arch/hexagon/include/asm/kvm_para.h b/arch/hexagon/include/asm/kvm_para.h
new file mode 100644 (file)
index 0000000..14fab8f
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
index 434866eb0f1cf36d639c5e14367458bf272522df..304b0808d07213f0853ee256d781a114f561a947 100644 (file)
@@ -31,8 +31,6 @@
 #include <asm/signal.h>
 #include <asm/vdso.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 struct rt_sigframe {
        unsigned long tramp[2];
        struct siginfo info;
@@ -149,11 +147,9 @@ sigsegv:
 /*
  * Setup invocation of signal handler
  */
-static int handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka,
-                        sigset_t *oldset, struct pt_regs *regs)
+static void handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka,
+                        struct pt_regs *regs)
 {
-       int rc;
-
        /*
         * If we're handling a signal that aborted a system call,
         * set up the error return value before adding the signal
@@ -186,15 +182,12 @@ static int handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka,
         * Set up the stack frame; not doing the SA_SIGINFO thing.  We
         * only set up the rt_frame flavor.
         */
-       rc = setup_rt_frame(sig, ka, info, oldset, regs);
-
        /* If there was an error on setup, no signal was delivered. */
-       if (rc)
-               return rc;
-
-       block_sigmask(ka, sig);
+       if (setup_rt_frame(sig, ka, info, sigmask_to_save(), regs) < 0)
+               return;
 
-       return 0;
+       signal_delivered(sig, info, ka, regs,
+                       test_thread_flag(TIF_SINGLESTEP));
 }
 
 /*
@@ -209,34 +202,13 @@ static void do_signal(struct pt_regs *regs)
        if (!user_mode(regs))
                return;
 
-       if (try_to_freeze())
-               goto no_signal;
-
        signo = get_signal_to_deliver(&info, &sigact, regs, NULL);
 
        if (signo > 0) {
-               sigset_t *oldset;
-
-               if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                       oldset = &current->saved_sigmask;
-               else
-                       oldset = &current->blocked;
-
-               if (handle_signal(signo, &info, &sigact, oldset, regs) == 0) {
-                       /*
-                        * Successful delivery case.  The saved sigmask is
-                        * stored in the signal frame, and will be restored
-                        * by sigreturn.  We can clear the TIF flag.
-                        */
-                       clear_thread_flag(TIF_RESTORE_SIGMASK);
-
-                       tracehook_signal_handler(signo, &info, &sigact, regs,
-                               test_thread_flag(TIF_SINGLESTEP));
-               }
+               handle_signal(signo, &info, &sigact, regs);
                return;
        }
 
-no_signal:
        /*
         * If we came from a system call, handle the restart.
         */
@@ -259,10 +231,7 @@ no_signal:
 
 no_restart:
        /* If there's no signal to deliver, put the saved sigmask back */
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 }
 
 void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
@@ -273,8 +242,6 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
 
@@ -303,7 +270,6 @@ asmlinkage int sys_rt_sigreturn(void)
        if (__copy_from_user(&blocked, &frame->uc.uc_sigmask, sizeof(blocked)))
                goto badframe;
 
-       sigdelsetmask(&blocked, ~_BLOCKABLE);
        set_current_blocked(&blocked);
 
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
index e35b3a84a40bb8fd4cdc688000fde06484ef8956..6d6a5ac48d8560a367cc8380880594d1ff21c222 100644 (file)
@@ -365,6 +365,7 @@ struct thash_cb {
 };
 
 struct kvm_vcpu_stat {
+       u32 halt_wakeup;
 };
 
 struct kvm_vcpu_arch {
@@ -448,6 +449,8 @@ struct kvm_vcpu_arch {
        char log_buf[VMM_LOG_LEN];
        union context host;
        union context guest;
+
+       char mmio_data[8];
 };
 
 struct kvm_vm_stat {
index 1588aee781a28956411a8800a2ddb98abbab93bf..2019cb99335e2d5da84546cd977d2a46f92d4340 100644 (file)
@@ -26,6 +26,11 @@ static inline unsigned int kvm_arch_para_features(void)
        return 0;
 }
 
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+       return false;
+}
+
 #endif
 
 #endif
index 7323ab9467ebae726473512588d2c8f41d0ceb40..99ee1d6510cfc98a7dc66128fca841af022b133f 100644 (file)
@@ -1,9 +1,6 @@
 #ifndef _ASM_IA64_POSIX_TYPES_H
 #define _ASM_IA64_POSIX_TYPES_H
 
-typedef unsigned int   __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned long  __kernel_sigset_t;      /* at least 32 bits */
 
 #include <asm-generic/posix_types.h>
index 310d9734f02d11a2537311b6860a736ff5fa63a7..f7ee85378311a0f2d241a458640f697b4940a2a1 100644 (file)
@@ -141,7 +141,23 @@ static inline void set_restore_sigmask(void)
 {
        struct thread_info *ti = current_thread_info();
        ti->status |= TS_RESTORE_SIGMASK;
-       set_bit(TIF_SIGPENDING, &ti->flags);
+       WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
+}
+static inline void clear_restore_sigmask(void)
+{
+       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+}
+static inline bool test_restore_sigmask(void)
+{
+       return current_thread_info()->status & TS_RESTORE_SIGMASK;
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+       struct thread_info *ti = current_thread_info();
+       if (!(ti->status & TS_RESTORE_SIGMASK))
+               return false;
+       ti->status &= ~TS_RESTORE_SIGMASK;
+       return true;
 }
 #endif /* !__ASSEMBLY__ */
 
index f00ba025375d5696d0070bfe640b6f26f554eebd..d7f558c1e7117bfff75a056d4fee9213c6a4b7fb 100644 (file)
@@ -604,12 +604,6 @@ pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
        spin_unlock(&(x)->ctx_lock);
 }
 
-static inline unsigned long 
-pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec)
-{
-       return get_unmapped_area(file, addr, len, pgoff, flags);
-}
-
 /* forward declaration */
 static const struct dentry_operations pfmfs_dentry_operations;
 
@@ -2333,8 +2327,8 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
        down_write(&task->mm->mmap_sem);
 
        /* find some free area in address space, must have mmap sem held */
-       vma->vm_start = pfm_get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS, 0);
-       if (vma->vm_start == 0UL) {
+       vma->vm_start = get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS);
+       if (IS_ERR_VALUE(vma->vm_start)) {
                DPRINT(("Cannot find unmapped area for size %ld\n", size));
                up_write(&task->mm->mmap_sem);
                goto error;
index 5e0e86ddb12f7801ec0a1f7e5579dae48168d5a4..dd6fc14497419dca929be82a44b55ce638bc4705 100644 (file)
@@ -199,8 +199,6 @@ do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)
        if (test_thread_flag(TIF_NOTIFY_RESUME)) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(&scr->pt);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 
        /* copy user rbs to kernel rbs */
index 7523501d3bc087bbb39a22573776ba365d131aeb..a199be1fe619bc12d00a9e87198b6f919a9f2c74 100644 (file)
@@ -30,7 +30,6 @@
 
 #define DEBUG_SIG      0
 #define STACK_ALIGN    16              /* minimal alignment for stack pointer */
-#define _BLOCKABLE     (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
 
 #if _NSIG_WORDS > 1
 # define PUT_SIGSET(k,u)       __copy_to_user((u)->sig, (k)->sig, sizeof(sigset_t))
@@ -200,7 +199,6 @@ ia64_rt_sigreturn (struct sigscratch *scr)
        if (GET_SIGSET(&set, &sc->sc_mask))
                goto give_sigsegv;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(sc, scr))
@@ -415,18 +413,13 @@ setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set,
 }
 
 static long
-handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset,
+handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
               struct sigscratch *scr)
 {
-       if (!setup_frame(sig, ka, info, oldset, scr))
+       if (!setup_frame(sig, ka, info, sigmask_to_save(), scr))
                return 0;
 
-       block_sigmask(ka, sig);
-
-       /*
-        * Let tracing know that we've done the handler setup.
-        */
-       tracehook_signal_handler(sig, info, ka, &scr->pt,
+       signal_delivered(sig, info, ka, &scr->pt,
                                 test_thread_flag(TIF_SINGLESTEP));
 
        return 1;
@@ -440,7 +433,6 @@ void
 ia64_do_signal (struct sigscratch *scr, long in_syscall)
 {
        struct k_sigaction ka;
-       sigset_t *oldset;
        siginfo_t info;
        long restart = in_syscall;
        long errno = scr->pt.r8;
@@ -453,11 +445,6 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
        if (!user_mode(&scr->pt))
                return;
 
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK)
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        /*
         * This only loops in the rare cases of handle_signal() failing, in which case we
         * need to push through a forced SIGSEGV.
@@ -507,16 +494,8 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
                 * Whee!  Actually deliver the signal.  If the delivery failed, we need to
                 * continue to iterate in this loop so we can deliver the SIGSEGV...
                 */
-               if (handle_signal(signr, &ka, &info, oldset, scr)) {
-                       /*
-                        * A signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TS_RESTORE_SIGMASK flag.
-                        */
-                       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+               if (handle_signal(signr, &ka, &info, scr))
                        return;
-               }
        }
 
        /* Did we come from a system call? */
@@ -538,8 +517,5 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
 
        /* if there's no signal to deliver, we just put the saved sigmask
         * back */
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
-               current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 }
index 609d50056a6c7bd9fba2d46b757960b3e8893388..d9439ef2f66187d9e864f91778b8a9022c41577c 100644 (file)
@@ -171,22 +171,9 @@ asmlinkage unsigned long
 ia64_mremap (unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags,
             unsigned long new_addr)
 {
-       extern unsigned long do_mremap (unsigned long addr,
-                                       unsigned long old_len,
-                                       unsigned long new_len,
-                                       unsigned long flags,
-                                       unsigned long new_addr);
-
-       down_write(&current->mm->mmap_sem);
-       {
-               addr = do_mremap(addr, old_len, new_len, flags, new_addr);
-       }
-       up_write(&current->mm->mmap_sem);
-
-       if (IS_ERR((void *) addr))
-               return addr;
-
-       force_successful_syscall_return();
+       addr = sys_mremap(addr, old_len, new_len, flags, new_addr);
+       if (!IS_ERR((void *) addr))
+               force_successful_syscall_return();
        return addr;
 }
 
index 463fb3bbe11ee13dbd903f65359864bd8f45b452..bd77cb507c1c7401124dbd3dc86fe663c17e6737 100644 (file)
@@ -232,12 +232,12 @@ static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS)
                goto mmio;
        vcpu->mmio_needed = 1;
-       vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr;
-       vcpu->mmio_size = kvm_run->mmio.len = p->size;
+       vcpu->mmio_fragments[0].gpa = kvm_run->mmio.phys_addr = p->addr;
+       vcpu->mmio_fragments[0].len = kvm_run->mmio.len = p->size;
        vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir;
 
        if (vcpu->mmio_is_write)
-               memcpy(vcpu->mmio_data, &p->data, p->size);
+               memcpy(vcpu->arch.mmio_data, &p->data, p->size);
        memcpy(kvm_run->mmio.data, &p->data, p->size);
        kvm_run->exit_reason = KVM_EXIT_MMIO;
        return 0;
@@ -719,7 +719,7 @@ static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)
        struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu);
 
        if (!vcpu->mmio_is_write)
-               memcpy(&p->data, vcpu->mmio_data, 8);
+               memcpy(&p->data, vcpu->arch.mmio_data, 8);
        p->state = STATE_IORESP_READY;
 }
 
@@ -739,7 +739,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        }
 
        if (vcpu->mmio_needed) {
-               memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
+               memcpy(vcpu->arch.mmio_data, kvm_run->mmio.data, 8);
                kvm_set_mmio_data(vcpu);
                vcpu->mmio_read_completed = 1;
                vcpu->mmio_needed = 0;
@@ -1872,21 +1872,6 @@ void kvm_arch_hardware_unsetup(void)
 {
 }
 
-void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
-{
-       int me;
-       int cpu = vcpu->cpu;
-
-       if (waitqueue_active(&vcpu->wq))
-               wake_up_interruptible(&vcpu->wq);
-
-       me = get_cpu();
-       if (cpu != me && (unsigned) cpu < nr_cpu_ids && cpu_online(cpu))
-               if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests))
-                       smp_send_reschedule(cpu);
-       put_cpu();
-}
-
 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq)
 {
        return __apic_accept_irq(vcpu, irq->vector);
@@ -1956,6 +1941,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
                (kvm_highest_pending_irq(vcpu) != -1);
 }
 
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
+{
+       return (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests));
+}
+
 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
                                    struct kvm_mp_state *mp_state)
 {
index 0195850e1f88698b7a6c29ffd8b807b9e38a5b53..236de26a409b3f9a3d85df67129d88a488d025d1 100644 (file)
@@ -10,9 +10,6 @@
 typedef unsigned short __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
index f54d96993ea187686f7264fdab3867d5ffc821ca..f3fb2c029cfcab061fbbfb5697273067e57c38e8 100644 (file)
@@ -28,8 +28,6 @@
 
 #define DEBUG_SIG 0
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 asmlinkage int
 sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
                unsigned long r2, unsigned long r3, unsigned long r4,
@@ -111,7 +109,6 @@ sys_rt_sigreturn(unsigned long r0, unsigned long r1,
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &result))
@@ -267,9 +264,9 @@ static int prev_insn(struct pt_regs *regs)
  * OK, we're invoking a handler
  */
 
-static int
+static void
 handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
-             sigset_t *oldset, struct pt_regs *regs)
+             struct pt_regs *regs)
 {
        /* Are we from a system call? */
        if (regs->syscall_nr >= 0) {
@@ -294,11 +291,10 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
        }
 
        /* Set up the stack frame */
-       if (setup_rt_frame(sig, ka, info, oldset, regs))
-               return -EFAULT;
+       if (setup_rt_frame(sig, ka, info, sigmask_to_save(), regs))
+               return;
 
-       block_sigmask(ka, sig);
-       return 0;
+       signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -311,7 +307,6 @@ static void do_signal(struct pt_regs *regs)
        siginfo_t info;
        int signr;
        struct k_sigaction ka;
-       sigset_t *oldset;
 
        /*
         * We want the common case to go fast, which
@@ -322,14 +317,6 @@ static void do_signal(struct pt_regs *regs)
        if (!user_mode(regs))
                return;
 
-       if (try_to_freeze()) 
-               goto no_signal;
-
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
                /* Re-enable any watchpoints before delivering the
@@ -339,13 +326,11 @@ static void do_signal(struct pt_regs *regs)
                 */
 
                /* Whee!  Actually deliver the signal.  */
-               if (handle_signal(signr, &ka, &info, oldset, regs) == 0)
-                       clear_thread_flag(TIF_RESTORE_SIGMASK);
+               handle_signal(signr, &ka, &info, regs);
 
                return;
        }
 
- no_signal:
        /* Did we come from a system call? */
        if (regs->syscall_nr >= 0) {
                /* Restart the system call - no handlers present */
@@ -360,10 +345,7 @@ static void do_signal(struct pt_regs *regs)
                        prev_insn(regs);
                }
        }
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 }
 
 /*
@@ -383,8 +365,6 @@ void do_notify_resume(struct pt_regs *regs, __u32 thread_info_flags)
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 
        clear_thread_flag(TIF_IRET);
diff --git a/arch/m68k/include/asm/kvm_para.h b/arch/m68k/include/asm/kvm_para.h
new file mode 100644 (file)
index 0000000..14fab8f
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
index 6373093be72bb049f37f071468c3b6c73d6daafe..cf4dbf70fdc73f116f95a83c698511fd7b5f4a62 100644 (file)
@@ -10,9 +10,6 @@
 typedef unsigned short __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
index d9f3d1900eed029a5044fb05845dceeccb261844..710a528b928b8580ac4281449586ee4507966db9 100644 (file)
@@ -51,8 +51,6 @@
 #include <asm/traps.h>
 #include <asm/ucontext.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 #ifdef CONFIG_MMU
 
 /*
@@ -795,7 +793,6 @@ asmlinkage int do_sigreturn(unsigned long __unused)
                              sizeof(frame->extramask))))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->sc, frame + 1))
@@ -820,7 +817,6 @@ asmlinkage int do_rt_sigreturn(unsigned long __unused)
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (rt_restore_ucontext(regs, sw, &frame->uc))
@@ -1123,8 +1119,9 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
  */
 static void
 handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
-             sigset_t *oldset, struct pt_regs *regs)
+             struct pt_regs *regs)
 {
+       sigset_t *oldset = sigmask_to_save();
        int err;
        /* are we from a system call? */
        if (regs->orig_d0 >= 0)
@@ -1140,14 +1137,12 @@ handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
        if (err)
                return;
 
-       block_sigmask(ka, sig);
+       signal_delivered(sig, info, ka, regs, 0);
 
        if (test_thread_flag(TIF_DELAYED_TRACE)) {
                regs->sr &= ~0x8000;
                send_sig(SIGTRAP, current, 1);
        }
-
-       clear_thread_flag(TIF_RESTORE_SIGMASK);
 }
 
 /*
@@ -1160,19 +1155,13 @@ static void do_signal(struct pt_regs *regs)
        siginfo_t info;
        struct k_sigaction ka;
        int signr;
-       sigset_t *oldset;
 
        current->thread.esp0 = (unsigned long) regs;
 
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
                /* Whee!  Actually deliver the signal.  */
-               handle_signal(signr, &ka, &info, oldset, regs);
+               handle_signal(signr, &ka, &info, regs);
                return;
        }
 
@@ -1182,10 +1171,7 @@ static void do_signal(struct pt_regs *regs)
                handle_restart(regs, NULL, 0);
 
        /* If there's no signal to deliver, we just restore the saved mask.  */
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 }
 
 void do_notify_resume(struct pt_regs *regs)
@@ -1193,9 +1179,6 @@ void do_notify_resume(struct pt_regs *regs)
        if (test_thread_flag(TIF_SIGPENDING))
                do_signal(regs);
 
-       if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) {
+       if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
-       }
 }
index 83460468998d5e56aee700ea5d2c219d9489b28f..0bf44231aaf91c023e2f2ac1dfa03c0a34da3a24 100644 (file)
@@ -52,7 +52,7 @@ config GENERIC_CALIBRATE_DELAY
        def_bool y
 
 config GENERIC_GPIO
-       def_bool y
+       bool
 
 config GENERIC_CSUM
        def_bool y
diff --git a/arch/microblaze/include/asm/kvm_para.h b/arch/microblaze/include/asm/kvm_para.h
new file mode 100644 (file)
index 0000000..14fab8f
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
index 1a8ab6a5c03fc3ab399cd8fa75003b4220e69cd5..6c610234ffab96c8d849dfc8dac498bfaf1c9919 100644 (file)
@@ -166,7 +166,23 @@ static inline void set_restore_sigmask(void)
 {
        struct thread_info *ti = current_thread_info();
        ti->status |= TS_RESTORE_SIGMASK;
-       set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
+       WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
+}
+static inline void clear_restore_sigmask(void)
+{
+       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+}
+static inline bool test_restore_sigmask(void)
+{
+       return current_thread_info()->status & TS_RESTORE_SIGMASK;
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+       struct thread_info *ti = current_thread_info();
+       if (!(ti->status & TS_RESTORE_SIGMASK))
+               return false;
+       ti->status &= ~TS_RESTORE_SIGMASK;
+       return true;
 }
 #endif
 
index daff9e5e4a1fb80cbdc317954a87d68092357da0..03f7b8ce6b6bcc6968816b1ef09d50401a35ee21 100644 (file)
@@ -492,10 +492,11 @@ C_ENTRY(sys_clone):
        bnei    r6, 1f;                 /* See if child SP arg (arg 1) is 0. */
        lwi     r6, r1, PT_R1;  /* If so, use paret's stack ptr */
 1:     addik   r7, r1, 0;                      /* Arg 2: parent context */
-       add     r8, r0, r0;                     /* Arg 3: (unused) */
-       add     r9, r0, r0;                     /* Arg 4: (unused) */
+       lwi     r9, r1, PT_R8;          /* parent tid.  */
+       lwi     r10, r1, PT_R9;         /* child tid.  */
+       /* do_fork will pick up TLS from regs->r10.  */
        brid    do_fork         /* Do real work (tail-call) */
-       add     r10, r0, r0;                    /* Arg 5: (unused) */
+       add     r8, r0, r0;             /* Arg 3: (unused) */
 
 C_ENTRY(sys_execve):
        brid    microblaze_execve;      /* Do real work (tail-call).*/
index e7eaa7a8cbd34d2404fae12769b8b3b4365ff752..fc1e1322ce4c9b58d1ad7527367569748d7c95d1 100644 (file)
@@ -138,7 +138,7 @@ NOALIGN_ENTRY(ftrace_call)
 #endif /* CONFIG_DYNAMIC_FTRACE */
 /* static normal trace */
        lwi     r6, r1, 120; /* MS: load parent addr */
-       addik   r5, r15, 0; /* MS: load current function addr */
+       addik   r5, r15, -4; /* MS: load current function addr */
        /* MS: here is dependency on previous code */
        brald   r15, r20; /* MS: jump to ftrace handler */
        nop;
index 883b92789cdf849330d3b06e29fafef9b3381b39..1944e00f07e1d4185cbf8bbdf894591192ef6abd 100644 (file)
@@ -182,8 +182,12 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
 #endif
        ti->cpu_context.r15 = (unsigned long)ret_from_fork - 8;
 
+       /*
+        *  r21 is the thread reg, r10 is 6th arg to clone
+        *  which contains TLS area
+        */
        if (clone_flags & CLONE_SETTLS)
-               ;
+               childregs->r21 = childregs->r10;
 
        return 0;
 }
index 7f4c7bef1642e0107afb3ffc7bbb02ce87b74585..76b9722557db77e99a3500154a34b8ed6556d958 100644 (file)
@@ -41,8 +41,6 @@
 #include <asm/cacheflush.h>
 #include <asm/syscalls.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 asmlinkage long
 sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
                struct pt_regs *regs)
@@ -106,7 +104,6 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &rval))
@@ -310,10 +307,11 @@ do_restart:
  * OK, we're invoking a handler
  */
 
-static int
+static void
 handle_signal(unsigned long sig, struct k_sigaction *ka,
-               siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
+               siginfo_t *info, struct pt_regs *regs)
 {
+       sigset_t *oldset = sigmask_to_save();
        int ret;
 
        /* Set up the stack frame */
@@ -323,11 +321,9 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
                ret = setup_rt_frame(sig, ka, NULL, oldset, regs);
 
        if (ret)
-               return ret;
-
-       block_sigmask(ka, sig);
+               return;
 
-       return 0;
+       signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -344,33 +340,18 @@ static void do_signal(struct pt_regs *regs, int in_syscall)
        siginfo_t info;
        int signr;
        struct k_sigaction ka;
-       sigset_t *oldset;
 #ifdef DEBUG_SIG
        printk(KERN_INFO "do signal: %p %d\n", regs, in_syscall);
        printk(KERN_INFO "do signal2: %lx %lx %ld [%lx]\n", regs->pc, regs->r1,
                        regs->r12, current_thread_info()->flags);
 #endif
 
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK)
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
                /* Whee! Actually deliver the signal. */
                if (in_syscall)
                        handle_restart(regs, &ka, 1);
-               if (!handle_signal(signr, &ka, &info, oldset, regs)) {
-                       /*
-                        * A signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TS_RESTORE_SIGMASK flag.
-                        */
-                       current_thread_info()->status &=
-                           ~TS_RESTORE_SIGMASK;
-               }
+               handle_signal(signr, &ka, &info, regs);
                return;
        }
 
@@ -381,10 +362,7 @@ static void do_signal(struct pt_regs *regs, int in_syscall)
         * If there's no signal to deliver, we just put the saved sigmask
         * back.
         */
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
-               current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 }
 
 void do_notify_resume(struct pt_regs *regs, int in_syscall)
@@ -401,9 +379,6 @@ void do_notify_resume(struct pt_regs *regs, int in_syscall)
        if (test_thread_flag(TIF_SIGPENDING))
                do_signal(regs, in_syscall);
 
-       if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) {
+       if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
-       }
 }
index c38a265846dec68c427398834e9833ceced31753..eb365d6795fa80448fbe4fe391b8b3211c0f4156 100644 (file)
@@ -92,6 +92,8 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
        int code = SEGV_MAPERR;
        int is_write = error_code & ESR_S;
        int fault;
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+                                        (is_write ? FAULT_FLAG_WRITE : 0);
 
        regs->ear = address;
        regs->esr = error_code;
@@ -138,6 +140,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
                if (kernel_mode(regs) && !search_exception_tables(regs->pc))
                        goto bad_area_nosemaphore;
 
+retry:
                down_read(&mm->mmap_sem);
        }
 
@@ -210,7 +213,11 @@ good_area:
         * make sure we exit gracefully rather than endlessly redo
         * the fault.
         */
-       fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
+       fault = handle_mm_fault(mm, vma, address, flags);
+
+       if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+               return;
+
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
@@ -218,11 +225,27 @@ good_area:
                        goto do_sigbus;
                BUG();
        }
-       if (unlikely(fault & VM_FAULT_MAJOR))
-               current->maj_flt++;
-       else
-               current->min_flt++;
+
+       if (flags & FAULT_FLAG_ALLOW_RETRY) {
+               if (unlikely(fault & VM_FAULT_MAJOR))
+                       current->maj_flt++;
+               else
+                       current->min_flt++;
+               if (fault & VM_FAULT_RETRY) {
+                       flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+                       /*
+                        * No need to up_read(&mm->mmap_sem) as we would
+                        * have already released it in __lock_page_or_retry
+                        * in mm/filemap.c.
+                        */
+
+                       goto retry;
+               }
+       }
+
        up_read(&mm->mmap_sem);
+
        /*
         * keep track of tlb+htab misses that are good addrs but
         * just need pte's created via handle_mm_fault()
index 77050671eeef5ba9629cc5e991b23459350aa555..09ab87ee6fef654eef0220ab05b1224492947efc 100644 (file)
@@ -233,8 +233,9 @@ config LANTIQ
        select ARCH_REQUIRE_GPIOLIB
        select SWAP_IO_SPACE
        select BOOT_RAW
-       select HAVE_CLK
-       select MIPS_MACHINE
+       select HAVE_MACH_CLKDEV
+       select CLKDEV_LOOKUP
+       select USE_OF
 
 config LASAT
        bool "LASAT Networks platforms"
@@ -1783,10 +1784,12 @@ endchoice
 
 config FORCE_MAX_ZONEORDER
        int "Maximum zone order"
-       range 13 64 if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_32KB
-       default "13" if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_32KB
-       range 12 64 if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_16KB
-       default "12" if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_16KB
+       range 14 64 if HUGETLB_PAGE && PAGE_SIZE_64KB
+       default "14" if HUGETLB_PAGE && PAGE_SIZE_64KB
+       range 13 64 if HUGETLB_PAGE && PAGE_SIZE_32KB
+       default "13" if HUGETLB_PAGE && PAGE_SIZE_32KB
+       range 12 64 if HUGETLB_PAGE && PAGE_SIZE_16KB
+       default "12" if HUGETLB_PAGE && PAGE_SIZE_16KB
        range 11 64
        default "11"
        help
index 76017c25a9e6fd1af1ea1ac215658548c99f2156..764e37a9dbb34c1b6ecb870457bb0067934f556e 100644 (file)
@@ -219,8 +219,8 @@ endif
 
 KBUILD_AFLAGS  += $(cflags-y)
 KBUILD_CFLAGS  += $(cflags-y)
-KBUILD_CPPFLAGS += -D"VMLINUX_LOAD_ADDRESS=$(load-y)"
-KBUILD_CPPFLAGS += -D"DATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)"
+KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y)
+KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)
 
 LDFLAGS                        += -m $(ld-emul)
 
index a83302b96c0163187b6f87710a34da57ada23a47..bf2248474fa8da6c4516c0567803c781636cb385 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/gpio.h>
 #include <linux/i2c.h>
 #include <linux/init.h>
+#include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/leds.h>
@@ -212,8 +213,6 @@ static int au1200_nand_device_ready(struct mtd_info *mtd)
        return __raw_readl((void __iomem *)MEM_STSTAT) & 1;
 }
 
-static const char *db1200_part_probes[] = { "cmdlinepart", NULL };
-
 static struct mtd_partition db1200_nand_parts[] = {
        {
                .name   = "NAND FS 0",
@@ -234,7 +233,6 @@ struct platform_nand_data db1200_nand_platdata = {
                .nr_partitions  = ARRAY_SIZE(db1200_nand_parts),
                .partitions     = db1200_nand_parts,
                .chip_delay     = 20,
-               .part_probe_types = db1200_part_probes,
        },
        .ctrl = {
                .dev_ready      = au1200_nand_device_ready,
index 0893f2af0d01d543d0a5cc1add7d5179bcef7cca..c56e0246694ecdffbc4afda86374db50d8bc5cb8 100644 (file)
@@ -145,8 +145,6 @@ static int au1300_nand_device_ready(struct mtd_info *mtd)
        return __raw_readl((void __iomem *)MEM_STSTAT) & 1;
 }
 
-static const char *db1300_part_probes[] = { "cmdlinepart", NULL };
-
 static struct mtd_partition db1300_nand_parts[] = {
        {
                .name   = "NAND FS 0",
@@ -167,7 +165,6 @@ struct platform_nand_data db1300_nand_platdata = {
                .nr_partitions  = ARRAY_SIZE(db1300_nand_parts),
                .partitions     = db1300_nand_parts,
                .chip_delay     = 20,
-               .part_probe_types = db1300_part_probes,
        },
        .ctrl = {
                .dev_ready      = au1300_nand_device_ready,
index 6815d0783cd8c195c8afdcdc47ab40c1753248e1..9eb79062f46e449f08a799b5f7c2d91a8c7696bc 100644 (file)
@@ -149,8 +149,6 @@ static int au1550_nand_device_ready(struct mtd_info *mtd)
        return __raw_readl((void __iomem *)MEM_STSTAT) & 1;
 }
 
-static const char *db1550_part_probes[] = { "cmdlinepart", NULL };
-
 static struct mtd_partition db1550_nand_parts[] = {
        {
                .name   = "NAND FS 0",
@@ -171,7 +169,6 @@ struct platform_nand_data db1550_nand_platdata = {
                .nr_partitions  = ARRAY_SIZE(db1550_nand_parts),
                .partitions     = db1550_nand_parts,
                .chip_delay     = 20,
-               .part_probe_types = db1550_part_probes,
        },
        .ctrl = {
                .dev_ready      = au1550_nand_device_ready,
index e0fae8f4442b792dbb86bcbbf15a260edf91b2ce..f44feee2d67f9f506bdeffe0d5bc2d205a860d7f 100644 (file)
@@ -26,6 +26,18 @@ config ATH79_MACH_AP81
          Say 'Y' here if you want your kernel to support the
          Atheros AP81 reference board.
 
+config ATH79_MACH_DB120
+       bool "Atheros DB120 reference board"
+       select SOC_AR934X
+       select ATH79_DEV_GPIO_BUTTONS
+       select ATH79_DEV_LEDS_GPIO
+       select ATH79_DEV_SPI
+       select ATH79_DEV_USB
+       select ATH79_DEV_WMAC
+       help
+         Say 'Y' here if you want your kernel to support the
+         Atheros DB120 reference board.
+
 config ATH79_MACH_PB44
        bool "Atheros PB44 reference board"
        select SOC_AR71XX
@@ -52,12 +64,14 @@ endmenu
 config SOC_AR71XX
        select USB_ARCH_HAS_EHCI
        select USB_ARCH_HAS_OHCI
+       select HW_HAS_PCI
        def_bool n
 
 config SOC_AR724X
        select USB_ARCH_HAS_EHCI
        select USB_ARCH_HAS_OHCI
        select HW_HAS_PCI
+       select PCI_AR724X if PCI
        def_bool n
 
 config SOC_AR913X
@@ -68,6 +82,15 @@ config SOC_AR933X
        select USB_ARCH_HAS_EHCI
        def_bool n
 
+config SOC_AR934X
+       select USB_ARCH_HAS_EHCI
+       select HW_HAS_PCI
+       select PCI_AR724X if PCI
+       def_bool n
+
+config PCI_AR724X
+       def_bool n
+
 config ATH79_DEV_GPIO_BUTTONS
        def_bool n
 
@@ -81,7 +104,7 @@ config ATH79_DEV_USB
        def_bool n
 
 config ATH79_DEV_WMAC
-       depends on (SOC_AR913X || SOC_AR933X)
+       depends on (SOC_AR913X || SOC_AR933X || SOC_AR934X)
        def_bool n
 
 endif
index 3b911e09dbecb5386136b4872affb515a8438caa..2b54d98263f30acb2b7db532fc44922d20b55385 100644 (file)
@@ -11,6 +11,7 @@
 obj-y  := prom.o setup.o irq.o common.o clock.o gpio.o
 
 obj-$(CONFIG_EARLY_PRINTK)             += early_printk.o
+obj-$(CONFIG_PCI)                      += pci.o
 
 #
 # Devices
@@ -27,5 +28,6 @@ obj-$(CONFIG_ATH79_DEV_WMAC)          += dev-wmac.o
 #
 obj-$(CONFIG_ATH79_MACH_AP121)         += mach-ap121.o
 obj-$(CONFIG_ATH79_MACH_AP81)          += mach-ap81.o
+obj-$(CONFIG_ATH79_MACH_DB120)         += mach-db120.o
 obj-$(CONFIG_ATH79_MACH_PB44)          += mach-pb44.o
 obj-$(CONFIG_ATH79_MACH_UBNT_XM)       += mach-ubnt-xm.o
index 54d0eb4db987266657f1ac5fa58085015d820f4c..b91ad3efe29e816a8b36567f2b6f090e9336c9a8 100644 (file)
@@ -1,8 +1,11 @@
 /*
  *  Atheros AR71XX/AR724X/AR913X common routines
  *
+ *  Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
  *  Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
  *
+ *  Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ *
  *  This program is free software; you can redistribute it and/or modify it
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
@@ -163,6 +166,82 @@ static void __init ar933x_clocks_init(void)
        ath79_uart_clk.rate = ath79_ref_clk.rate;
 }
 
+static void __init ar934x_clocks_init(void)
+{
+       u32 pll, out_div, ref_div, nint, frac, clk_ctrl, postdiv;
+       u32 cpu_pll, ddr_pll;
+       u32 bootstrap;
+
+       bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
+       if (bootstrap & AR934X_BOOTSTRAP_REF_CLK_40)
+               ath79_ref_clk.rate = 40 * 1000 * 1000;
+       else
+               ath79_ref_clk.rate = 25 * 1000 * 1000;
+
+       pll = ath79_pll_rr(AR934X_PLL_CPU_CONFIG_REG);
+       out_div = (pll >> AR934X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
+                 AR934X_PLL_CPU_CONFIG_OUTDIV_MASK;
+       ref_div = (pll >> AR934X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
+                 AR934X_PLL_CPU_CONFIG_REFDIV_MASK;
+       nint = (pll >> AR934X_PLL_CPU_CONFIG_NINT_SHIFT) &
+              AR934X_PLL_CPU_CONFIG_NINT_MASK;
+       frac = (pll >> AR934X_PLL_CPU_CONFIG_NFRAC_SHIFT) &
+              AR934X_PLL_CPU_CONFIG_NFRAC_MASK;
+
+       cpu_pll = nint * ath79_ref_clk.rate / ref_div;
+       cpu_pll += frac * ath79_ref_clk.rate / (ref_div * (2 << 6));
+       cpu_pll /= (1 << out_div);
+
+       pll = ath79_pll_rr(AR934X_PLL_DDR_CONFIG_REG);
+       out_div = (pll >> AR934X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
+                 AR934X_PLL_DDR_CONFIG_OUTDIV_MASK;
+       ref_div = (pll >> AR934X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
+                 AR934X_PLL_DDR_CONFIG_REFDIV_MASK;
+       nint = (pll >> AR934X_PLL_DDR_CONFIG_NINT_SHIFT) &
+              AR934X_PLL_DDR_CONFIG_NINT_MASK;
+       frac = (pll >> AR934X_PLL_DDR_CONFIG_NFRAC_SHIFT) &
+              AR934X_PLL_DDR_CONFIG_NFRAC_MASK;
+
+       ddr_pll = nint * ath79_ref_clk.rate / ref_div;
+       ddr_pll += frac * ath79_ref_clk.rate / (ref_div * (2 << 10));
+       ddr_pll /= (1 << out_div);
+
+       clk_ctrl = ath79_pll_rr(AR934X_PLL_CPU_DDR_CLK_CTRL_REG);
+
+       postdiv = (clk_ctrl >> AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_SHIFT) &
+                 AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_MASK;
+
+       if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_PLL_BYPASS)
+               ath79_cpu_clk.rate = ath79_ref_clk.rate;
+       else if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_CPUCLK_FROM_CPUPLL)
+               ath79_cpu_clk.rate = cpu_pll / (postdiv + 1);
+       else
+               ath79_cpu_clk.rate = ddr_pll / (postdiv + 1);
+
+       postdiv = (clk_ctrl >> AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_POST_DIV_SHIFT) &
+                 AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_POST_DIV_MASK;
+
+       if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_PLL_BYPASS)
+               ath79_ddr_clk.rate = ath79_ref_clk.rate;
+       else if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_DDRCLK_FROM_DDRPLL)
+               ath79_ddr_clk.rate = ddr_pll / (postdiv + 1);
+       else
+               ath79_ddr_clk.rate = cpu_pll / (postdiv + 1);
+
+       postdiv = (clk_ctrl >> AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_POST_DIV_SHIFT) &
+                 AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_POST_DIV_MASK;
+
+       if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_PLL_BYPASS)
+               ath79_ahb_clk.rate = ath79_ref_clk.rate;
+       else if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_AHBCLK_FROM_DDRPLL)
+               ath79_ahb_clk.rate = ddr_pll / (postdiv + 1);
+       else
+               ath79_ahb_clk.rate = cpu_pll / (postdiv + 1);
+
+       ath79_wdt_clk.rate = ath79_ref_clk.rate;
+       ath79_uart_clk.rate = ath79_ref_clk.rate;
+}
+
 void __init ath79_clocks_init(void)
 {
        if (soc_is_ar71xx())
@@ -173,6 +252,8 @@ void __init ath79_clocks_init(void)
                ar913x_clocks_init();
        else if (soc_is_ar933x())
                ar933x_clocks_init();
+       else if (soc_is_ar934x())
+               ar934x_clocks_init();
        else
                BUG();
 
index f0fda982b9650698c6b641aeeb6c38d9ea16f060..5a4adfc9d79dc3b3fe891456530ff88e20c5ff90 100644 (file)
@@ -1,9 +1,12 @@
 /*
  *  Atheros AR71XX/AR724X/AR913X common routines
  *
- *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
  *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
  *
+ *  Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ *
  *  This program is free software; you can redistribute it and/or modify it
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
@@ -67,6 +70,8 @@ void ath79_device_reset_set(u32 mask)
                reg = AR913X_RESET_REG_RESET_MODULE;
        else if (soc_is_ar933x())
                reg = AR933X_RESET_REG_RESET_MODULE;
+       else if (soc_is_ar934x())
+               reg = AR934X_RESET_REG_RESET_MODULE;
        else
                BUG();
 
@@ -91,6 +96,8 @@ void ath79_device_reset_clear(u32 mask)
                reg = AR913X_RESET_REG_RESET_MODULE;
        else if (soc_is_ar933x())
                reg = AR933X_RESET_REG_RESET_MODULE;
+       else if (soc_is_ar934x())
+               reg = AR934X_RESET_REG_RESET_MODULE;
        else
                BUG();
 
index f4956f809072e37f975ee1dd2ac9d3885c425422..45efc63b08b65a981b00dde2fb8526eb1d2382d9 100644 (file)
@@ -89,7 +89,8 @@ void __init ath79_register_uart(void)
 
        if (soc_is_ar71xx() ||
            soc_is_ar724x() ||
-           soc_is_ar913x()) {
+           soc_is_ar913x() ||
+           soc_is_ar934x()) {
                ath79_uart_data[0].uartclk = clk_get_rate(clk);
                platform_device_register(&ath79_uart_device);
        } else if (soc_is_ar933x()) {
index 4b0168a11c010ad7e808bed77329825ed1f582c7..366b35fb164dddbad566cb1c23ec8a25f518396b 100644 (file)
@@ -25,12 +25,10 @@ void __init ath79_register_gpio_keys_polled(int id,
        struct gpio_keys_button *p;
        int err;
 
-       p = kmalloc(nbuttons * sizeof(*p), GFP_KERNEL);
+       p = kmemdup(buttons, nbuttons * sizeof(*p), GFP_KERNEL);
        if (!p)
                return;
 
-       memcpy(p, buttons, nbuttons * sizeof(*p));
-
        pdev = platform_device_alloc("gpio-keys-polled", id);
        if (!pdev)
                goto err_free_buttons;
index cdade68dcd17a311d76af069c24e714d4aa690b6..dcb1debcefb8f81065ee48f7a7ceaa57fd16e6bb 100644 (file)
@@ -24,12 +24,10 @@ void __init ath79_register_leds_gpio(int id,
        struct gpio_led *p;
        int err;
 
-       p = kmalloc(num_leds * sizeof(*p), GFP_KERNEL);
+       p = kmemdup(leds, num_leds * sizeof(*p), GFP_KERNEL);
        if (!p)
                return;
 
-       memcpy(p, leds, num_leds * sizeof(*p));
-
        pdev = platform_device_alloc("leds-gpio", id);
        if (!pdev)
                goto err_free_leds;
index 9c717bf98ffe629f9f9c4061bd49b9114a9a99f5..d6d893c16ad405f6e48aee6f2ef088c36877a8be 100644 (file)
@@ -1,9 +1,12 @@
 /*
  *  Atheros AR913X/AR933X SoC built-in WMAC device support
  *
+ *  Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
  *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
  *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
  *
+ *  Parts of this file are based on Atheros 2.6.15/2.6.31 BSP
+ *
  *  This program is free software; you can redistribute it and/or modify it
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
@@ -26,8 +29,7 @@ static struct resource ath79_wmac_resources[] = {
                /* .start and .end fields are filled dynamically */
                .flags  = IORESOURCE_MEM,
        }, {
-               .start  = ATH79_CPU_IRQ_IP2,
-               .end    = ATH79_CPU_IRQ_IP2,
+               /* .start and .end fields are filled dynamically */
                .flags  = IORESOURCE_IRQ,
        },
 };
@@ -53,6 +55,8 @@ static void __init ar913x_wmac_setup(void)
 
        ath79_wmac_resources[0].start = AR913X_WMAC_BASE;
        ath79_wmac_resources[0].end = AR913X_WMAC_BASE + AR913X_WMAC_SIZE - 1;
+       ath79_wmac_resources[1].start = ATH79_CPU_IRQ_IP2;
+       ath79_wmac_resources[1].end = ATH79_CPU_IRQ_IP2;
 }
 
 
@@ -79,6 +83,8 @@ static void __init ar933x_wmac_setup(void)
 
        ath79_wmac_resources[0].start = AR933X_WMAC_BASE;
        ath79_wmac_resources[0].end = AR933X_WMAC_BASE + AR933X_WMAC_SIZE - 1;
+       ath79_wmac_resources[1].start = ATH79_CPU_IRQ_IP2;
+       ath79_wmac_resources[1].end = ATH79_CPU_IRQ_IP2;
 
        t = ath79_reset_rr(AR933X_RESET_REG_BOOTSTRAP);
        if (t & AR933X_BOOTSTRAP_REF_CLK_40)
@@ -92,12 +98,32 @@ static void __init ar933x_wmac_setup(void)
        ath79_wmac_data.external_reset = ar933x_wmac_reset;
 }
 
+static void ar934x_wmac_setup(void)
+{
+       u32 t;
+
+       ath79_wmac_device.name = "ar934x_wmac";
+
+       ath79_wmac_resources[0].start = AR934X_WMAC_BASE;
+       ath79_wmac_resources[0].end = AR934X_WMAC_BASE + AR934X_WMAC_SIZE - 1;
+       ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1);
+       ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1);
+
+       t = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
+       if (t & AR934X_BOOTSTRAP_REF_CLK_40)
+               ath79_wmac_data.is_clk_25mhz = false;
+       else
+               ath79_wmac_data.is_clk_25mhz = true;
+}
+
 void __init ath79_register_wmac(u8 *cal_data)
 {
        if (soc_is_ar913x())
                ar913x_wmac_setup();
        else if (soc_is_ar933x())
                ar933x_wmac_setup();
+       else if (soc_is_ar934x())
+               ar934x_wmac_setup();
        else
                BUG();
 
index 6a51ced7a293fe840351d1e7b0ff5f467784a334..dc938cb2ba58f13d320d67b169eed69636cc5ed8 100644 (file)
@@ -71,6 +71,9 @@ static void prom_putchar_init(void)
        case REV_ID_MAJOR_AR7241:
        case REV_ID_MAJOR_AR7242:
        case REV_ID_MAJOR_AR913X:
+       case REV_ID_MAJOR_AR9341:
+       case REV_ID_MAJOR_AR9342:
+       case REV_ID_MAJOR_AR9344:
                _prom_putchar = prom_putchar_ar71xx;
                break;
 
index a2f8ca630ed667bb49c2144129f2232c7c047fc1..29054f211832505d371930d3e13f752af56f69f3 100644 (file)
@@ -1,9 +1,12 @@
 /*
  *  Atheros AR71XX/AR724X/AR913X GPIO API support
  *
- *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
  *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
  *
+ *  Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ *
  *  This program is free software; you can redistribute it and/or modify it
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
@@ -89,6 +92,42 @@ static int ath79_gpio_direction_output(struct gpio_chip *chip,
        return 0;
 }
 
+static int ar934x_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+       void __iomem *base = ath79_gpio_base;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ath79_gpio_lock, flags);
+
+       __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) | (1 << offset),
+                    base + AR71XX_GPIO_REG_OE);
+
+       spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+
+       return 0;
+}
+
+static int ar934x_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+                                       int value)
+{
+       void __iomem *base = ath79_gpio_base;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ath79_gpio_lock, flags);
+
+       if (value)
+               __raw_writel(1 << offset, base + AR71XX_GPIO_REG_SET);
+       else
+               __raw_writel(1 << offset, base + AR71XX_GPIO_REG_CLEAR);
+
+       __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) & ~(1 << offset),
+                    base + AR71XX_GPIO_REG_OE);
+
+       spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+
+       return 0;
+}
+
 static struct gpio_chip ath79_gpio_chip = {
        .label                  = "ath79",
        .get                    = ath79_gpio_get_value,
@@ -155,11 +194,17 @@ void __init ath79_gpio_init(void)
                ath79_gpio_count = AR913X_GPIO_COUNT;
        else if (soc_is_ar933x())
                ath79_gpio_count = AR933X_GPIO_COUNT;
+       else if (soc_is_ar934x())
+               ath79_gpio_count = AR934X_GPIO_COUNT;
        else
                BUG();
 
        ath79_gpio_base = ioremap_nocache(AR71XX_GPIO_BASE, AR71XX_GPIO_SIZE);
        ath79_gpio_chip.ngpio = ath79_gpio_count;
+       if (soc_is_ar934x()) {
+               ath79_gpio_chip.direction_input = ar934x_gpio_direction_input;
+               ath79_gpio_chip.direction_output = ar934x_gpio_direction_output;
+       }
 
        err = gpiochip_add(&ath79_gpio_chip);
        if (err)
index 1b073de44680ea0fdba8c95f84cf6f214f394448..90d09fc15398c18cd6ba89f1e3f33653da31a0f4 100644 (file)
@@ -1,10 +1,11 @@
 /*
  *  Atheros AR71xx/AR724x/AR913x specific interrupt handling
  *
- *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
  *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
  *
- *  Parts of this file are based on Atheros' 2.6.15 BSP
+ *  Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
  *
  *  This program is free software; you can redistribute it and/or modify it
  *  under the terms of the GNU General Public License version 2 as published
@@ -23,8 +24,8 @@
 #include <asm/mach-ath79/ar71xx_regs.h>
 #include "common.h"
 
-static unsigned int ath79_ip2_flush_reg;
-static unsigned int ath79_ip3_flush_reg;
+static void (*ath79_ip2_handler)(void);
+static void (*ath79_ip3_handler)(void);
 
 static void ath79_misc_irq_handler(unsigned int irq, struct irq_desc *desc)
 {
@@ -129,7 +130,7 @@ static void __init ath79_misc_irq_init(void)
 
        if (soc_is_ar71xx() || soc_is_ar913x())
                ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
-       else if (soc_is_ar724x() || soc_is_ar933x())
+       else if (soc_is_ar724x() || soc_is_ar933x() || soc_is_ar934x())
                ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
        else
                BUG();
@@ -143,6 +144,39 @@ static void __init ath79_misc_irq_init(void)
        irq_set_chained_handler(ATH79_CPU_IRQ_MISC, ath79_misc_irq_handler);
 }
 
+static void ar934x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc)
+{
+       u32 status;
+
+       disable_irq_nosync(irq);
+
+       status = ath79_reset_rr(AR934X_RESET_REG_PCIE_WMAC_INT_STATUS);
+
+       if (status & AR934X_PCIE_WMAC_INT_PCIE_ALL) {
+               ath79_ddr_wb_flush(AR934X_DDR_REG_FLUSH_PCIE);
+               generic_handle_irq(ATH79_IP2_IRQ(0));
+       } else if (status & AR934X_PCIE_WMAC_INT_WMAC_ALL) {
+               ath79_ddr_wb_flush(AR934X_DDR_REG_FLUSH_WMAC);
+               generic_handle_irq(ATH79_IP2_IRQ(1));
+       } else {
+               spurious_interrupt();
+       }
+
+       enable_irq(irq);
+}
+
+static void ar934x_ip2_irq_init(void)
+{
+       int i;
+
+       for (i = ATH79_IP2_IRQ_BASE;
+            i < ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT; i++)
+               irq_set_chip_and_handler(i, &dummy_irq_chip,
+                                        handle_level_irq);
+
+       irq_set_chained_handler(ATH79_CPU_IRQ_IP2, ar934x_ip2_irq_dispatch);
+}
+
 asmlinkage void plat_irq_dispatch(void)
 {
        unsigned long pending;
@@ -152,10 +186,8 @@ asmlinkage void plat_irq_dispatch(void)
        if (pending & STATUSF_IP7)
                do_IRQ(ATH79_CPU_IRQ_TIMER);
 
-       else if (pending & STATUSF_IP2) {
-               ath79_ddr_wb_flush(ath79_ip2_flush_reg);
-               do_IRQ(ATH79_CPU_IRQ_IP2);
-       }
+       else if (pending & STATUSF_IP2)
+               ath79_ip2_handler();
 
        else if (pending & STATUSF_IP4)
                do_IRQ(ATH79_CPU_IRQ_GE0);
@@ -163,10 +195,8 @@ asmlinkage void plat_irq_dispatch(void)
        else if (pending & STATUSF_IP5)
                do_IRQ(ATH79_CPU_IRQ_GE1);
 
-       else if (pending & STATUSF_IP3) {
-               ath79_ddr_wb_flush(ath79_ip3_flush_reg);
-               do_IRQ(ATH79_CPU_IRQ_USB);
-       }
+       else if (pending & STATUSF_IP3)
+               ath79_ip3_handler();
 
        else if (pending & STATUSF_IP6)
                do_IRQ(ATH79_CPU_IRQ_MISC);
@@ -175,24 +205,97 @@ asmlinkage void plat_irq_dispatch(void)
                spurious_interrupt();
 }
 
+/*
+ * The IP2/IP3 lines are tied to a PCI/WMAC/USB device. Drivers for
+ * these devices typically allocate coherent DMA memory, however the
+ * DMA controller may still have some unsynchronized data in the FIFO.
+ * Issue a flush in the handlers to ensure that the driver sees
+ * the update.
+ */
+static void ar71xx_ip2_handler(void)
+{
+       ath79_ddr_wb_flush(AR71XX_DDR_REG_FLUSH_PCI);
+       do_IRQ(ATH79_CPU_IRQ_IP2);
+}
+
+static void ar724x_ip2_handler(void)
+{
+       ath79_ddr_wb_flush(AR724X_DDR_REG_FLUSH_PCIE);
+       do_IRQ(ATH79_CPU_IRQ_IP2);
+}
+
+static void ar913x_ip2_handler(void)
+{
+       ath79_ddr_wb_flush(AR913X_DDR_REG_FLUSH_WMAC);
+       do_IRQ(ATH79_CPU_IRQ_IP2);
+}
+
+static void ar933x_ip2_handler(void)
+{
+       ath79_ddr_wb_flush(AR933X_DDR_REG_FLUSH_WMAC);
+       do_IRQ(ATH79_CPU_IRQ_IP2);
+}
+
+static void ar934x_ip2_handler(void)
+{
+       do_IRQ(ATH79_CPU_IRQ_IP2);
+}
+
+static void ar71xx_ip3_handler(void)
+{
+       ath79_ddr_wb_flush(AR71XX_DDR_REG_FLUSH_USB);
+       do_IRQ(ATH79_CPU_IRQ_USB);
+}
+
+static void ar724x_ip3_handler(void)
+{
+       ath79_ddr_wb_flush(AR724X_DDR_REG_FLUSH_USB);
+       do_IRQ(ATH79_CPU_IRQ_USB);
+}
+
+static void ar913x_ip3_handler(void)
+{
+       ath79_ddr_wb_flush(AR913X_DDR_REG_FLUSH_USB);
+       do_IRQ(ATH79_CPU_IRQ_USB);
+}
+
+static void ar933x_ip3_handler(void)
+{
+       ath79_ddr_wb_flush(AR933X_DDR_REG_FLUSH_USB);
+       do_IRQ(ATH79_CPU_IRQ_USB);
+}
+
+static void ar934x_ip3_handler(void)
+{
+       ath79_ddr_wb_flush(AR934X_DDR_REG_FLUSH_USB);
+       do_IRQ(ATH79_CPU_IRQ_USB);
+}
+
 void __init arch_init_irq(void)
 {
        if (soc_is_ar71xx()) {
-               ath79_ip2_flush_reg = AR71XX_DDR_REG_FLUSH_PCI;
-               ath79_ip3_flush_reg = AR71XX_DDR_REG_FLUSH_USB;
+               ath79_ip2_handler = ar71xx_ip2_handler;
+               ath79_ip3_handler = ar71xx_ip3_handler;
        } else if (soc_is_ar724x()) {
-               ath79_ip2_flush_reg = AR724X_DDR_REG_FLUSH_PCIE;
-               ath79_ip3_flush_reg = AR724X_DDR_REG_FLUSH_USB;
+               ath79_ip2_handler = ar724x_ip2_handler;
+               ath79_ip3_handler = ar724x_ip3_handler;
        } else if (soc_is_ar913x()) {
-               ath79_ip2_flush_reg = AR913X_DDR_REG_FLUSH_WMAC;
-               ath79_ip3_flush_reg = AR913X_DDR_REG_FLUSH_USB;
+               ath79_ip2_handler = ar913x_ip2_handler;
+               ath79_ip3_handler = ar913x_ip3_handler;
        } else if (soc_is_ar933x()) {
-               ath79_ip2_flush_reg = AR933X_DDR_REG_FLUSH_WMAC;
-               ath79_ip3_flush_reg = AR933X_DDR_REG_FLUSH_USB;
-       } else
+               ath79_ip2_handler = ar933x_ip2_handler;
+               ath79_ip3_handler = ar933x_ip3_handler;
+       } else if (soc_is_ar934x()) {
+               ath79_ip2_handler = ar934x_ip2_handler;
+               ath79_ip3_handler = ar934x_ip3_handler;
+       } else {
                BUG();
+       }
 
        cp0_perfcount_irq = ATH79_MISC_IRQ_PERFC;
        mips_cpu_irq_init();
        ath79_misc_irq_init();
+
+       if (soc_is_ar934x())
+               ar934x_ip2_irq_init();
 }
diff --git a/arch/mips/ath79/mach-db120.c b/arch/mips/ath79/mach-db120.c
new file mode 100644 (file)
index 0000000..1983e4d
--- /dev/null
@@ -0,0 +1,134 @@
+/*
+ * Atheros DB120 reference board support
+ *
+ * Copyright (c) 2011 Qualcomm Atheros
+ * Copyright (c) 2011 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/ath9k_platform.h>
+
+#include "machtypes.h"
+#include "dev-gpio-buttons.h"
+#include "dev-leds-gpio.h"
+#include "dev-spi.h"
+#include "dev-wmac.h"
+#include "pci.h"
+
+#define DB120_GPIO_LED_WLAN_5G         12
+#define DB120_GPIO_LED_WLAN_2G         13
+#define DB120_GPIO_LED_STATUS          14
+#define DB120_GPIO_LED_WPS             15
+
+#define DB120_GPIO_BTN_WPS             16
+
+#define DB120_KEYS_POLL_INTERVAL       20      /* msecs */
+#define DB120_KEYS_DEBOUNCE_INTERVAL   (3 * DB120_KEYS_POLL_INTERVAL)
+
+#define DB120_WMAC_CALDATA_OFFSET 0x1000
+#define DB120_PCIE_CALDATA_OFFSET 0x5000
+
+static struct gpio_led db120_leds_gpio[] __initdata = {
+       {
+               .name           = "db120:green:status",
+               .gpio           = DB120_GPIO_LED_STATUS,
+               .active_low     = 1,
+       },
+       {
+               .name           = "db120:green:wps",
+               .gpio           = DB120_GPIO_LED_WPS,
+               .active_low     = 1,
+       },
+       {
+               .name           = "db120:green:wlan-5g",
+               .gpio           = DB120_GPIO_LED_WLAN_5G,
+               .active_low     = 1,
+       },
+       {
+               .name           = "db120:green:wlan-2g",
+               .gpio           = DB120_GPIO_LED_WLAN_2G,
+               .active_low     = 1,
+       },
+};
+
+static struct gpio_keys_button db120_gpio_keys[] __initdata = {
+       {
+               .desc           = "WPS button",
+               .type           = EV_KEY,
+               .code           = KEY_WPS_BUTTON,
+               .debounce_interval = DB120_KEYS_DEBOUNCE_INTERVAL,
+               .gpio           = DB120_GPIO_BTN_WPS,
+               .active_low     = 1,
+       },
+};
+
+static struct spi_board_info db120_spi_info[] = {
+       {
+               .bus_num        = 0,
+               .chip_select    = 0,
+               .max_speed_hz   = 25000000,
+               .modalias       = "s25sl064a",
+       }
+};
+
+static struct ath79_spi_platform_data db120_spi_data = {
+       .bus_num        = 0,
+       .num_chipselect = 1,
+};
+
+#ifdef CONFIG_PCI
+static struct ath9k_platform_data db120_ath9k_data;
+
+static int db120_pci_plat_dev_init(struct pci_dev *dev)
+{
+       switch (PCI_SLOT(dev->devfn)) {
+       case 0:
+               dev->dev.platform_data = &db120_ath9k_data;
+               break;
+       }
+
+       return 0;
+}
+
+static void __init db120_pci_init(u8 *eeprom)
+{
+       memcpy(db120_ath9k_data.eeprom_data, eeprom,
+              sizeof(db120_ath9k_data.eeprom_data));
+
+       ath79_pci_set_plat_dev_init(db120_pci_plat_dev_init);
+       ath79_register_pci();
+}
+#else
+static inline void db120_pci_init(void) {}
+#endif /* CONFIG_PCI */
+
+static void __init db120_setup(void)
+{
+       u8 *art = (u8 *) KSEG1ADDR(0x1fff0000);
+
+       ath79_register_leds_gpio(-1, ARRAY_SIZE(db120_leds_gpio),
+                                db120_leds_gpio);
+       ath79_register_gpio_keys_polled(-1, DB120_KEYS_POLL_INTERVAL,
+                                       ARRAY_SIZE(db120_gpio_keys),
+                                       db120_gpio_keys);
+       ath79_register_spi(&db120_spi_data, db120_spi_info,
+                          ARRAY_SIZE(db120_spi_info));
+       ath79_register_wmac(art + DB120_WMAC_CALDATA_OFFSET);
+       db120_pci_init(art + DB120_PCIE_CALDATA_OFFSET);
+}
+
+MIPS_MACHINE(ATH79_MACH_DB120, "DB120", "Atheros DB120 reference board",
+            db120_setup);
index fe9701a322916bbb946b2cc037aa57fd02d6329f..c5f0ea5e00c38dc0e76312b401cf21793a91dcc5 100644 (file)
@@ -19,6 +19,7 @@
 #include "dev-leds-gpio.h"
 #include "dev-spi.h"
 #include "dev-usb.h"
+#include "pci.h"
 
 #define PB44_GPIO_I2C_SCL      0
 #define PB44_GPIO_I2C_SDA      1
@@ -114,6 +115,7 @@ static void __init pb44_init(void)
        ath79_register_spi(&pb44_spi_data, pb44_spi_info,
                           ARRAY_SIZE(pb44_spi_info));
        ath79_register_usb();
+       ath79_register_pci();
 }
 
 MIPS_MACHINE(ATH79_MACH_PB44, "PB44", "Atheros PB44 reference board",
index 3c311a5393471cb69cfa158bb2ba0c9ab417ddec..4a3c60694c756fedaa3a3f9e18687065789cb721 100644 (file)
 
 #include <linux/init.h>
 #include <linux/pci.h>
-
-#ifdef CONFIG_PCI
 #include <linux/ath9k_platform.h>
-#include <asm/mach-ath79/pci-ath724x.h>
-#endif /* CONFIG_PCI */
+
+#include <asm/mach-ath79/irq.h>
 
 #include "machtypes.h"
 #include "dev-gpio-buttons.h"
 #include "dev-leds-gpio.h"
 #include "dev-spi.h"
+#include "pci.h"
 
 #define UBNT_XM_GPIO_LED_L1            0
 #define UBNT_XM_GPIO_LED_L2            1
@@ -33,7 +32,6 @@
 #define UBNT_XM_KEYS_POLL_INTERVAL     20
 #define UBNT_XM_KEYS_DEBOUNCE_INTERVAL (3 * UBNT_XM_KEYS_POLL_INTERVAL)
 
-#define UBNT_XM_PCI_IRQ                        48
 #define UBNT_XM_EEPROM_ADDR            (u8 *) KSEG1ADDR(0x1fff1000)
 
 static struct gpio_led ubnt_xm_leds_gpio[] __initdata = {
@@ -84,12 +82,27 @@ static struct ath79_spi_platform_data ubnt_xm_spi_data = {
 #ifdef CONFIG_PCI
 static struct ath9k_platform_data ubnt_xm_eeprom_data;
 
-static struct ath724x_pci_data ubnt_xm_pci_data[] = {
-       {
-               .irq    = UBNT_XM_PCI_IRQ,
-               .pdata  = &ubnt_xm_eeprom_data,
-       },
-};
+static int ubnt_xm_pci_plat_dev_init(struct pci_dev *dev)
+{
+       switch (PCI_SLOT(dev->devfn)) {
+       case 0:
+               dev->dev.platform_data = &ubnt_xm_eeprom_data;
+               break;
+       }
+
+       return 0;
+}
+
+static void __init ubnt_xm_pci_init(void)
+{
+       memcpy(ubnt_xm_eeprom_data.eeprom_data, UBNT_XM_EEPROM_ADDR,
+              sizeof(ubnt_xm_eeprom_data.eeprom_data));
+
+       ath79_pci_set_plat_dev_init(ubnt_xm_pci_plat_dev_init);
+       ath79_register_pci();
+}
+#else
+static inline void ubnt_xm_pci_init(void) {}
 #endif /* CONFIG_PCI */
 
 static void __init ubnt_xm_init(void)
@@ -104,13 +117,7 @@ static void __init ubnt_xm_init(void)
        ath79_register_spi(&ubnt_xm_spi_data, ubnt_xm_spi_info,
                           ARRAY_SIZE(ubnt_xm_spi_info));
 
-#ifdef CONFIG_PCI
-       memcpy(ubnt_xm_eeprom_data.eeprom_data, UBNT_XM_EEPROM_ADDR,
-              sizeof(ubnt_xm_eeprom_data.eeprom_data));
-
-       ath724x_pci_add_data(ubnt_xm_pci_data, ARRAY_SIZE(ubnt_xm_pci_data));
-#endif /* CONFIG_PCI */
-
+       ubnt_xm_pci_init();
 }
 
 MIPS_MACHINE(ATH79_MACH_UBNT_XM,
index 9a1f3826626e13703c24abc17b05a930f7528558..af92e5c30d66306fd46490bea8c9cd49c56bd1c9 100644 (file)
@@ -18,6 +18,7 @@ enum ath79_mach_type {
        ATH79_MACH_GENERIC = 0,
        ATH79_MACH_AP121,               /* Atheros AP121 reference board */
        ATH79_MACH_AP81,                /* Atheros AP81 reference board */
+       ATH79_MACH_DB120,               /* Atheros DB120 reference board */
        ATH79_MACH_PB44,                /* Atheros PB44 reference board */
        ATH79_MACH_UBNT_XM,             /* Ubiquiti Networks XM board rev 1.0 */
 };
diff --git a/arch/mips/ath79/pci.c b/arch/mips/ath79/pci.c
new file mode 100644 (file)
index 0000000..ca83abd
--- /dev/null
@@ -0,0 +1,130 @@
+/*
+ *  Atheros AR71XX/AR724X specific PCI setup code
+ *
+ *  Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
+ *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  Parts of this file are based on Atheros' 2.6.15 BSP
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <asm/mach-ath79/ar71xx_regs.h>
+#include <asm/mach-ath79/ath79.h>
+#include <asm/mach-ath79/irq.h>
+#include <asm/mach-ath79/pci.h>
+#include "pci.h"
+
+static int (*ath79_pci_plat_dev_init)(struct pci_dev *dev);
+static const struct ath79_pci_irq *ath79_pci_irq_map __initdata;
+static unsigned ath79_pci_nr_irqs __initdata;
+
+static const struct ath79_pci_irq ar71xx_pci_irq_map[] __initconst = {
+       {
+               .slot   = 17,
+               .pin    = 1,
+               .irq    = ATH79_PCI_IRQ(0),
+       }, {
+               .slot   = 18,
+               .pin    = 1,
+               .irq    = ATH79_PCI_IRQ(1),
+       }, {
+               .slot   = 19,
+               .pin    = 1,
+               .irq    = ATH79_PCI_IRQ(2),
+       }
+};
+
+static const struct ath79_pci_irq ar724x_pci_irq_map[] __initconst = {
+       {
+               .slot   = 0,
+               .pin    = 1,
+               .irq    = ATH79_PCI_IRQ(0),
+       }
+};
+
+int __init pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin)
+{
+       int irq = -1;
+       int i;
+
+       if (ath79_pci_nr_irqs == 0 ||
+           ath79_pci_irq_map == NULL) {
+               if (soc_is_ar71xx()) {
+                       ath79_pci_irq_map = ar71xx_pci_irq_map;
+                       ath79_pci_nr_irqs = ARRAY_SIZE(ar71xx_pci_irq_map);
+               } else if (soc_is_ar724x() ||
+                          soc_is_ar9342() ||
+                          soc_is_ar9344()) {
+                       ath79_pci_irq_map = ar724x_pci_irq_map;
+                       ath79_pci_nr_irqs = ARRAY_SIZE(ar724x_pci_irq_map);
+               } else {
+                       pr_crit("pci %s: invalid irq map\n",
+                               pci_name((struct pci_dev *) dev));
+                       return irq;
+               }
+       }
+
+       for (i = 0; i < ath79_pci_nr_irqs; i++) {
+               const struct ath79_pci_irq *entry;
+
+               entry = &ath79_pci_irq_map[i];
+               if (entry->slot == slot && entry->pin == pin) {
+                       irq = entry->irq;
+                       break;
+               }
+       }
+
+       if (irq < 0)
+               pr_crit("pci %s: no irq found for pin %u\n",
+                       pci_name((struct pci_dev *) dev), pin);
+       else
+               pr_info("pci %s: using irq %d for pin %u\n",
+                       pci_name((struct pci_dev *) dev), irq, pin);
+
+       return irq;
+}
+
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+       if (ath79_pci_plat_dev_init)
+               return ath79_pci_plat_dev_init(dev);
+
+       return 0;
+}
+
+void __init ath79_pci_set_irq_map(unsigned nr_irqs,
+                                 const struct ath79_pci_irq *map)
+{
+       ath79_pci_nr_irqs = nr_irqs;
+       ath79_pci_irq_map = map;
+}
+
+void __init ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *dev))
+{
+       ath79_pci_plat_dev_init = func;
+}
+
+int __init ath79_register_pci(void)
+{
+       if (soc_is_ar71xx())
+               return ar71xx_pcibios_init();
+
+       if (soc_is_ar724x())
+               return ar724x_pcibios_init(ATH79_CPU_IRQ_IP2);
+
+       if (soc_is_ar9342() || soc_is_ar9344()) {
+               u32 bootstrap;
+
+               bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
+               if (bootstrap & AR934X_BOOTSTRAP_PCIE_RC)
+                       return ar724x_pcibios_init(ATH79_IP2_IRQ(0));
+       }
+
+       return -ENODEV;
+}
diff --git a/arch/mips/ath79/pci.h b/arch/mips/ath79/pci.h
new file mode 100644 (file)
index 0000000..51c6625
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ *  Atheros AR71XX/AR724X PCI support
+ *
+ *  Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
+ *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#ifndef _ATH79_PCI_H
+#define _ATH79_PCI_H
+
+struct ath79_pci_irq {
+       u8      slot;
+       u8      pin;
+       int     irq;
+};
+
+#ifdef CONFIG_PCI
+void ath79_pci_set_irq_map(unsigned nr_irqs, const struct ath79_pci_irq *map);
+void ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *dev));
+int ath79_register_pci(void);
+#else
+static inline void
+ath79_pci_set_irq_map(unsigned nr_irqs, const struct ath79_pci_irq *map) {}
+static inline void
+ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *)) {}
+static inline int ath79_register_pci(void) { return 0; }
+#endif
+
+#endif /* _ATH79_PCI_H */
index 80a7d4023d7ffbd872ce76ed62bbe8b496d7e6bc..60d212ef86290c61d51ff5135e952506834caa2d 100644 (file)
@@ -1,10 +1,11 @@
 /*
  *  Atheros AR71XX/AR724X/AR913X specific setup
  *
+ *  Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
  *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
  *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
  *
- *  Parts of this file are based on Atheros' 2.6.15 BSP
+ *  Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
  *
  *  This program is free software; you can redistribute it and/or modify it
  *  under the terms of the GNU General Public License version 2 as published
@@ -116,18 +117,6 @@ static void __init ath79_detect_sys_type(void)
                rev = id & AR724X_REV_ID_REVISION_MASK;
                break;
 
-       case REV_ID_MAJOR_AR9330:
-               ath79_soc = ATH79_SOC_AR9330;
-               chip = "9330";
-               rev = id & AR933X_REV_ID_REVISION_MASK;
-               break;
-
-       case REV_ID_MAJOR_AR9331:
-               ath79_soc = ATH79_SOC_AR9331;
-               chip = "9331";
-               rev = id & AR933X_REV_ID_REVISION_MASK;
-               break;
-
        case REV_ID_MAJOR_AR913X:
                minor = id & AR913X_REV_ID_MINOR_MASK;
                rev = id >> AR913X_REV_ID_REVISION_SHIFT;
@@ -145,6 +134,36 @@ static void __init ath79_detect_sys_type(void)
                }
                break;
 
+       case REV_ID_MAJOR_AR9330:
+               ath79_soc = ATH79_SOC_AR9330;
+               chip = "9330";
+               rev = id & AR933X_REV_ID_REVISION_MASK;
+               break;
+
+       case REV_ID_MAJOR_AR9331:
+               ath79_soc = ATH79_SOC_AR9331;
+               chip = "9331";
+               rev = id & AR933X_REV_ID_REVISION_MASK;
+               break;
+
+       case REV_ID_MAJOR_AR9341:
+               ath79_soc = ATH79_SOC_AR9341;
+               chip = "9341";
+               rev = id & AR934X_REV_ID_REVISION_MASK;
+               break;
+
+       case REV_ID_MAJOR_AR9342:
+               ath79_soc = ATH79_SOC_AR9342;
+               chip = "9342";
+               rev = id & AR934X_REV_ID_REVISION_MASK;
+               break;
+
+       case REV_ID_MAJOR_AR9344:
+               ath79_soc = ATH79_SOC_AR9344;
+               chip = "9344";
+               rev = id & AR934X_REV_ID_REVISION_MASK;
+               break;
+
        default:
                panic("ath79: unknown SoC, id:0x%08x", id);
        }
index 9f64fb41407743358f55a36377617613fb9a0383..af07c1aa202fac322ec78e24664df14e1eee7d7a 100644 (file)
@@ -1,3 +1 @@
 obj-$(CONFIG_BOARD_BCM963XX)           += board_bcm963xx.o
-
-ccflags-y := -Werror
index d3a9f012aa0a57db9549d13a5d9ee26151218164..260dc247c052ca5874c23594f0db926a7fcfb1eb 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/init.h>
 #include <linux/console.h>
 #include <linux/delay.h>
+#include <linux/export.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/serial.h>
index 97e7ce9b50ed4066d276ed98e47937bfd5236d42..4b93048044eb266457b4c9ac80a164c056e6eba9 100644 (file)
@@ -257,8 +257,6 @@ DEFINE_PER_CPU(int, cpu_state);
 
 extern void fixup_irqs(void);
 
-static DEFINE_SPINLOCK(smp_reserve_lock);
-
 static int octeon_cpu_disable(void)
 {
        unsigned int cpu = smp_processor_id();
@@ -266,8 +264,6 @@ static int octeon_cpu_disable(void)
        if (cpu == 0)
                return -EBUSY;
 
-       spin_lock(&smp_reserve_lock);
-
        set_cpu_online(cpu, false);
        cpu_clear(cpu, cpu_callin_map);
        local_irq_disable();
@@ -277,8 +273,6 @@ static int octeon_cpu_disable(void)
        flush_cache_all();
        local_flush_tlb_all();
 
-       spin_unlock(&smp_reserve_lock);
-
        return 0;
 }
 
index 5314b37aff2c493a736eb438a47f03d33cba509f..4f349ec1ea2da4bb200a1c296d16f27b54650935 100644 (file)
@@ -8,5 +8,3 @@ lib-y                           += cmdline.o env.o file.o identify.o init.o \
 lib-$(CONFIG_ARC_MEMORY)       += memory.o
 lib-$(CONFIG_ARC_CONSOLE)      += arc_con.o
 lib-$(CONFIG_ARC_PROMLIB)      += promlib.o
-
-ccflags-y                      := -Werror
diff --git a/arch/mips/include/asm/clkdev.h b/arch/mips/include/asm/clkdev.h
new file mode 100644 (file)
index 0000000..2624754
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ *  based on arch/arm/include/asm/clkdev.h
+ *
+ *  Copyright (C) 2008 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Helper for the clk API to assist looking up a struct clk.
+ */
+#ifndef __ASM_CLKDEV_H
+#define __ASM_CLKDEV_H
+
+#include <linux/slab.h>
+
+#define __clk_get(clk) ({ 1; })
+#define __clk_put(clk) do { } while (0)
+
+static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)
+{
+       return kzalloc(size, GFP_KERNEL);
+}
+
+#endif
diff --git a/arch/mips/include/asm/kvm_para.h b/arch/mips/include/asm/kvm_para.h
new file mode 100644 (file)
index 0000000..14fab8f
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
index 2f0becb4ec8f490283ce1c22e1af3ae44d05c0f7..1caa78ad06d5833306ee367cd44fbefe5dbd6fe3 100644 (file)
@@ -1,10 +1,11 @@
 /*
  *  Atheros AR71XX/AR724X/AR913X SoC register definitions
  *
+ *  Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
  *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
  *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
  *
- *  Parts of this file are based on Atheros' 2.6.15 BSP
+ *  Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
  *
  *  This program is free software; you can redistribute it and/or modify it
  *  under the terms of the GNU General Public License version 2 as published
@@ -60,6 +61,9 @@
 #define AR933X_EHCI_BASE       0x1b000000
 #define AR933X_EHCI_SIZE       0x1000
 
+#define AR934X_WMAC_BASE       (AR71XX_APB_BASE + 0x00100000)
+#define AR934X_WMAC_SIZE       0x20000
+
 /*
  * DDR_CTRL block
  */
 #define AR933X_DDR_REG_FLUSH_USB       0x84
 #define AR933X_DDR_REG_FLUSH_WMAC      0x88
 
+#define AR934X_DDR_REG_FLUSH_GE0       0x9c
+#define AR934X_DDR_REG_FLUSH_GE1       0xa0
+#define AR934X_DDR_REG_FLUSH_USB       0xa4
+#define AR934X_DDR_REG_FLUSH_PCIE      0xa8
+#define AR934X_DDR_REG_FLUSH_WMAC      0xac
+
 /*
  * PLL block
  */
 #define AR933X_PLL_CLOCK_CTRL_AHB_DIV_SHIFT    15
 #define AR933X_PLL_CLOCK_CTRL_AHB_DIV_MASK     0x7
 
+#define AR934X_PLL_CPU_CONFIG_REG              0x00
+#define AR934X_PLL_DDR_CONFIG_REG              0x04
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_REG                0x08
+
+#define AR934X_PLL_CPU_CONFIG_NFRAC_SHIFT      0
+#define AR934X_PLL_CPU_CONFIG_NFRAC_MASK       0x3f
+#define AR934X_PLL_CPU_CONFIG_NINT_SHIFT       6
+#define AR934X_PLL_CPU_CONFIG_NINT_MASK                0x3f
+#define AR934X_PLL_CPU_CONFIG_REFDIV_SHIFT     12
+#define AR934X_PLL_CPU_CONFIG_REFDIV_MASK      0x1f
+#define AR934X_PLL_CPU_CONFIG_OUTDIV_SHIFT     19
+#define AR934X_PLL_CPU_CONFIG_OUTDIV_MASK      0x3
+
+#define AR934X_PLL_DDR_CONFIG_NFRAC_SHIFT      0
+#define AR934X_PLL_DDR_CONFIG_NFRAC_MASK       0x3ff
+#define AR934X_PLL_DDR_CONFIG_NINT_SHIFT       10
+#define AR934X_PLL_DDR_CONFIG_NINT_MASK                0x3f
+#define AR934X_PLL_DDR_CONFIG_REFDIV_SHIFT     16
+#define AR934X_PLL_DDR_CONFIG_REFDIV_MASK      0x1f
+#define AR934X_PLL_DDR_CONFIG_OUTDIV_SHIFT     23
+#define AR934X_PLL_DDR_CONFIG_OUTDIV_MASK      0x7
+
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_PLL_BYPASS     BIT(2)
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_PLL_BYPASS     BIT(3)
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_PLL_BYPASS     BIT(4)
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_SHIFT 5
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_MASK  0x1f
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_POST_DIV_SHIFT 10
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_POST_DIV_MASK  0x1f
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_POST_DIV_SHIFT 15
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_POST_DIV_MASK  0x1f
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_CPUCLK_FROM_CPUPLL BIT(20)
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_DDRCLK_FROM_DDRPLL BIT(21)
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_AHBCLK_FROM_DDRPLL BIT(24)
+
 /*
  * USB_CONFIG block
  */
 #define AR933X_RESET_REG_RESET_MODULE          0x1c
 #define AR933X_RESET_REG_BOOTSTRAP             0xac
 
+#define AR934X_RESET_REG_RESET_MODULE          0x1c
+#define AR934X_RESET_REG_BOOTSTRAP             0xb0
+#define AR934X_RESET_REG_PCIE_WMAC_INT_STATUS  0xac
+
 #define MISC_INT_ETHSW                 BIT(12)
 #define MISC_INT_TIMER4                        BIT(10)
 #define MISC_INT_TIMER3                        BIT(9)
 
 #define AR933X_BOOTSTRAP_REF_CLK_40    BIT(0)
 
+#define AR934X_BOOTSTRAP_SW_OPTION8    BIT(23)
+#define AR934X_BOOTSTRAP_SW_OPTION7    BIT(22)
+#define AR934X_BOOTSTRAP_SW_OPTION6    BIT(21)
+#define AR934X_BOOTSTRAP_SW_OPTION5    BIT(20)
+#define AR934X_BOOTSTRAP_SW_OPTION4    BIT(19)
+#define AR934X_BOOTSTRAP_SW_OPTION3    BIT(18)
+#define AR934X_BOOTSTRAP_SW_OPTION2    BIT(17)
+#define AR934X_BOOTSTRAP_SW_OPTION1    BIT(16)
+#define AR934X_BOOTSTRAP_USB_MODE_DEVICE BIT(7)
+#define AR934X_BOOTSTRAP_PCIE_RC       BIT(6)
+#define AR934X_BOOTSTRAP_EJTAG_MODE    BIT(5)
+#define AR934X_BOOTSTRAP_REF_CLK_40    BIT(4)
+#define AR934X_BOOTSTRAP_BOOT_FROM_SPI BIT(2)
+#define AR934X_BOOTSTRAP_SDRAM_DISABLED        BIT(1)
+#define AR934X_BOOTSTRAP_DDR1          BIT(0)
+
+#define AR934X_PCIE_WMAC_INT_WMAC_MISC         BIT(0)
+#define AR934X_PCIE_WMAC_INT_WMAC_TX           BIT(1)
+#define AR934X_PCIE_WMAC_INT_WMAC_RXLP         BIT(2)
+#define AR934X_PCIE_WMAC_INT_WMAC_RXHP         BIT(3)
+#define AR934X_PCIE_WMAC_INT_PCIE_RC           BIT(4)
+#define AR934X_PCIE_WMAC_INT_PCIE_RC0          BIT(5)
+#define AR934X_PCIE_WMAC_INT_PCIE_RC1          BIT(6)
+#define AR934X_PCIE_WMAC_INT_PCIE_RC2          BIT(7)
+#define AR934X_PCIE_WMAC_INT_PCIE_RC3          BIT(8)
+#define AR934X_PCIE_WMAC_INT_WMAC_ALL \
+       (AR934X_PCIE_WMAC_INT_WMAC_MISC | AR934X_PCIE_WMAC_INT_WMAC_TX | \
+        AR934X_PCIE_WMAC_INT_WMAC_RXLP | AR934X_PCIE_WMAC_INT_WMAC_RXHP)
+
+#define AR934X_PCIE_WMAC_INT_PCIE_ALL \
+       (AR934X_PCIE_WMAC_INT_PCIE_RC | AR934X_PCIE_WMAC_INT_PCIE_RC0 | \
+        AR934X_PCIE_WMAC_INT_PCIE_RC1 | AR934X_PCIE_WMAC_INT_PCIE_RC2 | \
+        AR934X_PCIE_WMAC_INT_PCIE_RC3)
+
 #define REV_ID_MAJOR_MASK              0xfff0
 #define REV_ID_MAJOR_AR71XX            0x00a0
 #define REV_ID_MAJOR_AR913X            0x00b0
 #define REV_ID_MAJOR_AR7242            0x1100
 #define REV_ID_MAJOR_AR9330            0x0110
 #define REV_ID_MAJOR_AR9331            0x1110
+#define REV_ID_MAJOR_AR9341            0x0120
+#define REV_ID_MAJOR_AR9342            0x1120
+#define REV_ID_MAJOR_AR9344            0x2120
 
 #define AR71XX_REV_ID_MINOR_MASK       0x3
 #define AR71XX_REV_ID_MINOR_AR7130     0x0
 
 #define AR724X_REV_ID_REVISION_MASK    0x3
 
+#define AR934X_REV_ID_REVISION_MASK     0xf
+
 /*
  * SPI block
  */
 #define AR724X_GPIO_COUNT              18
 #define AR913X_GPIO_COUNT              22
 #define AR933X_GPIO_COUNT              30
+#define AR934X_GPIO_COUNT              23
 
 #endif /* __ASM_MACH_AR71XX_REGS_H */
index 6d0c6c9d5622fbe7890add97b200e4e6ced76d02..4f248c3d7b237100e41a1bad3489d090ff8cbb29 100644 (file)
@@ -29,6 +29,9 @@ enum ath79_soc_type {
        ATH79_SOC_AR9132,
        ATH79_SOC_AR9330,
        ATH79_SOC_AR9331,
+       ATH79_SOC_AR9341,
+       ATH79_SOC_AR9342,
+       ATH79_SOC_AR9344,
 };
 
 extern enum ath79_soc_type ath79_soc;
@@ -75,6 +78,26 @@ static inline int soc_is_ar933x(void)
                ath79_soc == ATH79_SOC_AR9331);
 }
 
+static inline int soc_is_ar9341(void)
+{
+       return (ath79_soc == ATH79_SOC_AR9341);
+}
+
+static inline int soc_is_ar9342(void)
+{
+       return (ath79_soc == ATH79_SOC_AR9342);
+}
+
+static inline int soc_is_ar9344(void)
+{
+       return (ath79_soc == ATH79_SOC_AR9344);
+}
+
+static inline int soc_is_ar934x(void)
+{
+       return soc_is_ar9341() || soc_is_ar9342() || soc_is_ar9344();
+}
+
 extern void __iomem *ath79_ddr_base;
 extern void __iomem *ath79_pll_base;
 extern void __iomem *ath79_reset_base;
index 519958fe4e3c7d0532c6b639148e9aa2b9522e74..0968f69e2018527e6f35b40b7aa7f7be333323d8 100644 (file)
 #define __ASM_MACH_ATH79_IRQ_H
 
 #define MIPS_CPU_IRQ_BASE      0
-#define NR_IRQS                        40
+#define NR_IRQS                        48
 
 #define ATH79_MISC_IRQ_BASE    8
 #define ATH79_MISC_IRQ_COUNT   32
 
+#define ATH79_PCI_IRQ_BASE     (ATH79_MISC_IRQ_BASE + ATH79_MISC_IRQ_COUNT)
+#define ATH79_PCI_IRQ_COUNT    6
+#define ATH79_PCI_IRQ(_x)      (ATH79_PCI_IRQ_BASE + (_x))
+
+#define ATH79_IP2_IRQ_BASE     (ATH79_PCI_IRQ_BASE + ATH79_PCI_IRQ_COUNT)
+#define ATH79_IP2_IRQ_COUNT    2
+#define ATH79_IP2_IRQ(_x)      (ATH79_IP2_IRQ_BASE + (_x))
+
 #define ATH79_CPU_IRQ_IP2      (MIPS_CPU_IRQ_BASE + 2)
 #define ATH79_CPU_IRQ_USB      (MIPS_CPU_IRQ_BASE + 3)
 #define ATH79_CPU_IRQ_GE0      (MIPS_CPU_IRQ_BASE + 4)
diff --git a/arch/mips/include/asm/mach-ath79/pci-ath724x.h b/arch/mips/include/asm/mach-ath79/pci-ath724x.h
deleted file mode 100644 (file)
index 454885f..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- *  Atheros 724x PCI support
- *
- *  Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- */
-
-#ifndef __ASM_MACH_ATH79_PCI_ATH724X_H
-#define __ASM_MACH_ATH79_PCI_ATH724X_H
-
-struct ath724x_pci_data {
-       int irq;
-       void *pdata;
-};
-
-void ath724x_pci_add_data(struct ath724x_pci_data *data, int size);
-
-#endif /* __ASM_MACH_ATH79_PCI_ATH724X_H */
diff --git a/arch/mips/include/asm/mach-ath79/pci.h b/arch/mips/include/asm/mach-ath79/pci.h
new file mode 100644 (file)
index 0000000..7868f7f
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ *  Atheros AR71XX/AR724X PCI support
+ *
+ *  Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
+ *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#ifndef __ASM_MACH_ATH79_PCI_H
+#define __ASM_MACH_ATH79_PCI_H
+
+#if defined(CONFIG_PCI) && defined(CONFIG_SOC_AR71XX)
+int ar71xx_pcibios_init(void);
+#else
+static inline int ar71xx_pcibios_init(void) { return 0; }
+#endif
+
+#if defined(CONFIG_PCI_AR724X)
+int ar724x_pcibios_init(int irq);
+#else
+static inline int ar724x_pcibios_init(int irq) { return 0; }
+#endif
+
+#endif /* __ASM_MACH_ATH79_PCI_H */
index 3d5de96d40369523940662c3400302ef9a0e48fb..1d7dd96aa460b5d150f4d15660993a1170a4f3e8 100644 (file)
@@ -2,6 +2,7 @@
 #define BCM63XX_GPIO_H
 
 #include <linux/init.h>
+#include <bcm63xx_cpu.h>
 
 int __init bcm63xx_gpio_init(void);
 
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h
new file mode 100644 (file)
index 0000000..318f982
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
+ */
+
+#ifndef _FALCON_IRQ__
+#define _FALCON_IRQ__
+
+#define INT_NUM_IRQ0                   8
+#define INT_NUM_IM0_IRL0               (INT_NUM_IRQ0 + 0)
+#define INT_NUM_IM1_IRL0               (INT_NUM_IM0_IRL0 + 32)
+#define INT_NUM_IM2_IRL0               (INT_NUM_IM1_IRL0 + 32)
+#define INT_NUM_IM3_IRL0               (INT_NUM_IM2_IRL0 + 32)
+#define INT_NUM_IM4_IRL0               (INT_NUM_IM3_IRL0 + 32)
+#define INT_NUM_EXTRA_START            (INT_NUM_IM4_IRL0 + 32)
+#define INT_NUM_IM_OFFSET              (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0)
+
+#define MIPS_CPU_TIMER_IRQ                     7
+
+#endif /* _FALCON_IRQ__ */
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/irq.h b/arch/mips/include/asm/mach-lantiq/falcon/irq.h
new file mode 100644 (file)
index 0000000..2caccd9
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  Copyright (C) 2011 Thomas Langer <thomas.langer@lantiq.com>
+ */
+
+#ifndef __FALCON_IRQ_H
+#define __FALCON_IRQ_H
+
+#include <falcon_irq.h>
+
+#define NR_IRQS 328
+
+#include_next <irq.h>
+
+#endif
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
new file mode 100644 (file)
index 0000000..b385252
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef _LTQ_FALCON_H__
+#define _LTQ_FALCON_H__
+
+#ifdef CONFIG_SOC_FALCON
+
+#include <linux/pinctrl/pinctrl.h>
+#include <lantiq.h>
+
+/* Chip IDs */
+#define SOC_ID_FALCON          0x01B8
+
+/* SoC Types */
+#define SOC_TYPE_FALCON                0x01
+
+/*
+ * during early_printk no ioremap possible at this early stage
+ * lets use KSEG1 instead
+ */
+#define LTQ_ASC0_BASE_ADDR     0x1E100C00
+#define LTQ_EARLY_ASC          KSEG1ADDR(LTQ_ASC0_BASE_ADDR)
+
+/* WDT */
+#define LTQ_RST_CAUSE_WDTRST   0x0002
+
+/* CHIP ID */
+#define LTQ_STATUS_BASE_ADDR   0x1E802000
+
+#define FALCON_CHIPID          ((u32 *)(KSEG1 + LTQ_STATUS_BASE_ADDR + 0x0c))
+#define FALCON_CHIPTYPE                ((u32 *)(KSEG1 + LTQ_STATUS_BASE_ADDR + 0x38))
+#define FALCON_CHIPCONF                ((u32 *)(KSEG1 + LTQ_STATUS_BASE_ADDR + 0x40))
+
+/* SYSCTL - start/stop/restart/configure/... different parts of the Soc */
+#define SYSCTL_SYS1            0
+#define SYSCTL_SYSETH          1
+#define SYSCTL_SYSGPE          2
+
+/* BOOT_SEL - find what boot media we have */
+#define BS_FLASH               0x1
+#define BS_SPI                  0x4
+
+/* global register ranges */
+extern __iomem void *ltq_ebu_membase;
+extern __iomem void *ltq_sys1_membase;
+#define ltq_ebu_w32(x, y)      ltq_w32((x), ltq_ebu_membase + (y))
+#define ltq_ebu_r32(x)         ltq_r32(ltq_ebu_membase + (x))
+
+#define ltq_sys1_w32(x, y)     ltq_w32((x), ltq_sys1_membase + (y))
+#define ltq_sys1_r32(x)                ltq_r32(ltq_sys1_membase + (x))
+#define ltq_sys1_w32_mask(clear, set, reg)   \
+       ltq_sys1_w32((ltq_sys1_r32(reg) & ~(clear)) | (set), reg)
+
+/*
+ * to keep the irq code generic we need to define this to 0 as falcon
+ * has no EIU/EBU
+ */
+#define LTQ_EBU_PCC_ISTAT      0
+
+#endif /* CONFIG_SOC_FALCON */
+#endif /* _LTQ_XWAY_H__ */
diff --git a/arch/mips/include/asm/mach-lantiq/gpio.h b/arch/mips/include/asm/mach-lantiq/gpio.h
new file mode 100644 (file)
index 0000000..f79505b
--- /dev/null
@@ -0,0 +1,16 @@
+#ifndef __ASM_MIPS_MACH_LANTIQ_GPIO_H
+#define __ASM_MIPS_MACH_LANTIQ_GPIO_H
+
+static inline int gpio_to_irq(unsigned int gpio)
+{
+       return -1;
+}
+
+#define gpio_get_value __gpio_get_value
+#define gpio_set_value __gpio_set_value
+
+#define gpio_cansleep __gpio_cansleep
+
+#include <asm-generic/gpio.h>
+
+#endif
index ce2f02929d22f284cb99b554952d382e70b18a92..5e8a6e9657567c0d5ef2d5af9d10507bf0bcaf80 100644 (file)
@@ -9,6 +9,8 @@
 #define _LANTIQ_H__
 
 #include <linux/irq.h>
+#include <linux/device.h>
+#include <linux/clk.h>
 
 /* generic reg access functions */
 #define ltq_r32(reg)           __raw_readl(reg)
 /* register access macros for EBU and CGU */
 #define ltq_ebu_w32(x, y)      ltq_w32((x), ltq_ebu_membase + (y))
 #define ltq_ebu_r32(x)         ltq_r32(ltq_ebu_membase + (x))
-#define ltq_cgu_w32(x, y)      ltq_w32((x), ltq_cgu_membase + (y))
-#define ltq_cgu_r32(x)         ltq_r32(ltq_cgu_membase + (x))
-
+#define ltq_ebu_w32_mask(x, y, z) \
+       ltq_w32_mask(x, y, ltq_ebu_membase + (z))
 extern __iomem void *ltq_ebu_membase;
-extern __iomem void *ltq_cgu_membase;
-
-extern unsigned int ltq_get_cpu_ver(void);
-extern unsigned int ltq_get_soc_type(void);
-
-/* clock speeds */
-#define CLOCK_60M      60000000
-#define CLOCK_83M      83333333
-#define CLOCK_111M     111111111
-#define CLOCK_133M     133333333
-#define CLOCK_167M     166666667
-#define CLOCK_200M     200000000
-#define CLOCK_266M     266666666
-#define CLOCK_333M     333333333
-#define CLOCK_400M     400000000
 
 /* spinlock all ebu i/o */
 extern spinlock_t ebu_lock;
@@ -49,15 +35,21 @@ extern void ltq_disable_irq(struct irq_data *data);
 extern void ltq_mask_and_ack_irq(struct irq_data *data);
 extern void ltq_enable_irq(struct irq_data *data);
 
+/* clock handling */
+extern int clk_activate(struct clk *clk);
+extern void clk_deactivate(struct clk *clk);
+extern struct clk *clk_get_cpu(void);
+extern struct clk *clk_get_fpi(void);
+extern struct clk *clk_get_io(void);
+
+/* find out what bootsource we have */
+extern unsigned char ltq_boot_select(void);
 /* find out what caused the last cpu reset */
 extern int ltq_reset_cause(void);
-#define LTQ_RST_CAUSE_WDTRST   0x20
 
 #define IOPORT_RESOURCE_START  0x10000000
 #define IOPORT_RESOURCE_END    0xffffffff
 #define IOMEM_RESOURCE_START   0x10000000
 #define IOMEM_RESOURCE_END     0xffffffff
-#define LTQ_FLASH_START                0x10000000
-#define LTQ_FLASH_MAX          0x04000000
 
 #endif
index a305f1d0259e76e282c9787fa1042799c0d40f7f..e23bf7c9a2d0782fa4827ab2297de1a033eb6fdd 100644 (file)
@@ -9,41 +9,8 @@
 #ifndef _LANTIQ_PLATFORM_H__
 #define _LANTIQ_PLATFORM_H__
 
-#include <linux/mtd/partitions.h>
 #include <linux/socket.h>
 
-/* struct used to pass info to the pci core */
-enum {
-       PCI_CLOCK_INT = 0,
-       PCI_CLOCK_EXT
-};
-
-#define PCI_EXIN0      0x0001
-#define PCI_EXIN1      0x0002
-#define PCI_EXIN2      0x0004
-#define PCI_EXIN3      0x0008
-#define PCI_EXIN4      0x0010
-#define PCI_EXIN5      0x0020
-#define PCI_EXIN_MAX   6
-
-#define PCI_GNT1       0x0040
-#define PCI_GNT2       0x0080
-#define PCI_GNT3       0x0100
-#define PCI_GNT4       0x0200
-
-#define PCI_REQ1       0x0400
-#define PCI_REQ2       0x0800
-#define PCI_REQ3       0x1000
-#define PCI_REQ4       0x2000
-#define PCI_REQ_SHIFT  10
-#define PCI_REQ_MASK   0xf
-
-struct ltq_pci_data {
-       int clock;
-       int gpio;
-       int irq[16];
-};
-
 /* struct used to pass info to network drivers */
 struct ltq_eth_data {
        struct sockaddr mac;
index b4465a888e20eecab1558e01d3cf1eb9cb997b0a..aa0b3b866f8467bbb2d1b3f6b943ccb65d97621e 100644 (file)
 #define INT_NUM_IM4_IRL0       (INT_NUM_IRQ0 + 128)
 #define INT_NUM_IM_OFFSET      (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0)
 
-#define LTQ_ASC_TIR(x)         (INT_NUM_IM3_IRL0 + (x * 8))
-#define LTQ_ASC_RIR(x)         (INT_NUM_IM3_IRL0 + (x * 8) + 1)
-#define LTQ_ASC_EIR(x)         (INT_NUM_IM3_IRL0 + (x * 8) + 2)
-
-#define LTQ_ASC_ASE_TIR                INT_NUM_IM2_IRL0
-#define LTQ_ASC_ASE_RIR                (INT_NUM_IM2_IRL0 + 2)
-#define LTQ_ASC_ASE_EIR                (INT_NUM_IM2_IRL0 + 3)
-
-#define LTQ_SSC_TIR            (INT_NUM_IM0_IRL0 + 15)
-#define LTQ_SSC_RIR            (INT_NUM_IM0_IRL0 + 14)
-#define LTQ_SSC_EIR            (INT_NUM_IM0_IRL0 + 16)
-
-#define LTQ_MEI_DYING_GASP_INT (INT_NUM_IM1_IRL0 + 21)
-#define LTQ_MEI_INT            (INT_NUM_IM1_IRL0 + 23)
-
-#define LTQ_TIMER6_INT         (INT_NUM_IM1_IRL0 + 23)
-#define LTQ_USB_INT            (INT_NUM_IM1_IRL0 + 22)
-#define LTQ_USB_OC_INT         (INT_NUM_IM4_IRL0 + 23)
-
-#define MIPS_CPU_TIMER_IRQ             7
-
 #define LTQ_DMA_CH0_INT                (INT_NUM_IM2_IRL0)
-#define LTQ_DMA_CH1_INT                (INT_NUM_IM2_IRL0 + 1)
-#define LTQ_DMA_CH2_INT                (INT_NUM_IM2_IRL0 + 2)
-#define LTQ_DMA_CH3_INT                (INT_NUM_IM2_IRL0 + 3)
-#define LTQ_DMA_CH4_INT                (INT_NUM_IM2_IRL0 + 4)
-#define LTQ_DMA_CH5_INT                (INT_NUM_IM2_IRL0 + 5)
-#define LTQ_DMA_CH6_INT                (INT_NUM_IM2_IRL0 + 6)
-#define LTQ_DMA_CH7_INT                (INT_NUM_IM2_IRL0 + 7)
-#define LTQ_DMA_CH8_INT                (INT_NUM_IM2_IRL0 + 8)
-#define LTQ_DMA_CH9_INT                (INT_NUM_IM2_IRL0 + 9)
-#define LTQ_DMA_CH10_INT       (INT_NUM_IM2_IRL0 + 10)
-#define LTQ_DMA_CH11_INT       (INT_NUM_IM2_IRL0 + 11)
-#define LTQ_DMA_CH12_INT       (INT_NUM_IM2_IRL0 + 25)
-#define LTQ_DMA_CH13_INT       (INT_NUM_IM2_IRL0 + 26)
-#define LTQ_DMA_CH14_INT       (INT_NUM_IM2_IRL0 + 27)
-#define LTQ_DMA_CH15_INT       (INT_NUM_IM2_IRL0 + 28)
-#define LTQ_DMA_CH16_INT       (INT_NUM_IM2_IRL0 + 29)
-#define LTQ_DMA_CH17_INT       (INT_NUM_IM2_IRL0 + 30)
-#define LTQ_DMA_CH18_INT       (INT_NUM_IM2_IRL0 + 16)
-#define LTQ_DMA_CH19_INT       (INT_NUM_IM2_IRL0 + 21)
-
-#define LTQ_PPE_MBOX_INT       (INT_NUM_IM2_IRL0 + 24)
 
-#define INT_NUM_IM4_IRL14      (INT_NUM_IM4_IRL0 + 14)
+#define MIPS_CPU_TIMER_IRQ     7
 
 #endif
index 8a3c6be669d2136ad8d8d556e748274ae58ebeb2..6a2df709c576956a4c58541d2fad5c2f9c726917 100644 (file)
 #define SOC_ID_DANUBE1         0x129
 #define SOC_ID_DANUBE2         0x12B
 #define SOC_ID_TWINPASS                0x12D
-#define SOC_ID_AMAZON_SE       0x152
+#define SOC_ID_AMAZON_SE_1     0x152 /* 50601 */
+#define SOC_ID_AMAZON_SE_2     0x153 /* 50600 */
 #define SOC_ID_ARX188          0x16C
-#define SOC_ID_ARX168          0x16D
+#define SOC_ID_ARX168_1                0x16D
+#define SOC_ID_ARX168_2                0x16E
 #define SOC_ID_ARX182          0x16F
-
-/* SoC Types */
+#define SOC_ID_GRX188          0x170
+#define SOC_ID_GRX168          0x171
+
+#define SOC_ID_VRX288          0x1C0 /* v1.1 */
+#define SOC_ID_VRX282          0x1C1 /* v1.1 */
+#define SOC_ID_VRX268          0x1C2 /* v1.1 */
+#define SOC_ID_GRX268          0x1C8 /* v1.1 */
+#define SOC_ID_GRX288          0x1C9 /* v1.1 */
+#define SOC_ID_VRX288_2                0x00B /* v1.2 */
+#define SOC_ID_VRX268_2                0x00C /* v1.2 */
+#define SOC_ID_GRX288_2                0x00D /* v1.2 */
+#define SOC_ID_GRX282_2                0x00E /* v1.2 */
+
+ /* SoC Types */
 #define SOC_TYPE_DANUBE                0x01
 #define SOC_TYPE_TWINPASS      0x02
 #define SOC_TYPE_AR9           0x03
-#define SOC_TYPE_VR9           0x04
-#define SOC_TYPE_AMAZON_SE     0x05
+#define SOC_TYPE_VR9           0x04 /* v1.1 */
+#define SOC_TYPE_VR9_2         0x05 /* v1.2 */
+#define SOC_TYPE_AMAZON_SE     0x06
+
+/* BOOT_SEL - find what boot media we have */
+#define BS_EXT_ROM             0x0
+#define BS_FLASH               0x1
+#define BS_MII0                        0x2
+#define BS_PCI                 0x3
+#define BS_UART1               0x4
+#define BS_SPI                 0x5
+#define BS_NAND                        0x6
+#define BS_RMII0               0x7
+
+/* helpers used to access the cgu */
+#define ltq_cgu_w32(x, y)      ltq_w32((x), ltq_cgu_membase + (y))
+#define ltq_cgu_r32(x)         ltq_r32(ltq_cgu_membase + (x))
+extern __iomem void *ltq_cgu_membase;
 
-/* ASC0/1 - serial port */
-#define LTQ_ASC0_BASE_ADDR     0x1E100400
+/*
+ * during early_printk no ioremap is possible
+ * lets use KSEG1 instead
+ */
 #define LTQ_ASC1_BASE_ADDR     0x1E100C00
-#define LTQ_ASC_SIZE           0x400
-
-/* RCU - reset control unit */
-#define LTQ_RCU_BASE_ADDR      0x1F203000
-#define LTQ_RCU_SIZE           0x1000
-
-/* GPTU - general purpose timer unit */
-#define LTQ_GPTU_BASE_ADDR     0x18000300
-#define LTQ_GPTU_SIZE          0x100
+#define LTQ_EARLY_ASC          KSEG1ADDR(LTQ_ASC1_BASE_ADDR)
 
 /* EBU - external bus unit */
-#define LTQ_EBU_GPIO_START     0x14000000
-#define LTQ_EBU_GPIO_SIZE      0x1000
-
-#define LTQ_EBU_BASE_ADDR      0x1E105300
-#define LTQ_EBU_SIZE           0x100
-
 #define LTQ_EBU_BUSCON0                0x0060
 #define LTQ_EBU_PCC_CON                0x0090
 #define LTQ_EBU_PCC_IEN                0x00A4
 #define LTQ_EBU_ADDRSEL1       0x0024
 #define EBU_WRDIS              0x80000000
 
-/* CGU - clock generation unit */
-#define LTQ_CGU_BASE_ADDR      0x1F103000
-#define LTQ_CGU_SIZE           0x1000
-
-/* ICU - interrupt control unit */
-#define LTQ_ICU_BASE_ADDR      0x1F880200
-#define LTQ_ICU_SIZE           0x100
-
-/* EIU - external interrupt unit */
-#define LTQ_EIU_BASE_ADDR      0x1F101000
-#define LTQ_EIU_SIZE           0x1000
-
-/* PMU - power management unit */
-#define LTQ_PMU_BASE_ADDR      0x1F102000
-#define LTQ_PMU_SIZE           0x1000
-
-#define PMU_DMA                        0x0020
-#define PMU_USB                        0x8041
-#define PMU_LED                        0x0800
-#define PMU_GPT                        0x1000
-#define PMU_PPE                        0x2000
-#define PMU_FPI                        0x4000
-#define PMU_SWITCH             0x10000000
-
-/* ETOP - ethernet */
-#define LTQ_ETOP_BASE_ADDR     0x1E180000
-#define LTQ_ETOP_SIZE          0x40000
-
-/* DMA */
-#define LTQ_DMA_BASE_ADDR      0x1E104100
-#define LTQ_DMA_SIZE           0x800
-
-/* PCI */
-#define PCI_CR_BASE_ADDR       0x1E105400
-#define PCI_CR_SIZE            0x400
-
 /* WDT */
-#define LTQ_WDT_BASE_ADDR      0x1F8803F0
-#define LTQ_WDT_SIZE           0x10
-
-/* STP - serial to parallel conversion unit */
-#define LTQ_STP_BASE_ADDR      0x1E100BB0
-#define LTQ_STP_SIZE           0x40
-
-/* GPIO */
-#define LTQ_GPIO0_BASE_ADDR    0x1E100B10
-#define LTQ_GPIO1_BASE_ADDR    0x1E100B40
-#define LTQ_GPIO2_BASE_ADDR    0x1E100B70
-#define LTQ_GPIO_SIZE          0x30
-
-/* SSC */
-#define LTQ_SSC_BASE_ADDR      0x1e100800
-#define LTQ_SSC_SIZE           0x100
-
-/* MEI - dsl core */
-#define LTQ_MEI_BASE_ADDR      0x1E116000
-
-/* DEU - data encryption unit */
-#define LTQ_DEU_BASE_ADDR      0x1E103100
+#define LTQ_RST_CAUSE_WDTRST   0x20
 
 /* MPS - multi processor unit (voice) */
 #define LTQ_MPS_BASE_ADDR      (KSEG1 + 0x1F107000)
 #define LTQ_MPS_CHIPID         ((u32 *)(LTQ_MPS_BASE_ADDR + 0x0344))
 
 /* request a non-gpio and set the PIO config */
-extern int  ltq_gpio_request(unsigned int pin, unsigned int alt0,
-       unsigned int alt1, unsigned int dir, const char *name);
+#define PMU_PPE                         BIT(13)
 extern void ltq_pmu_enable(unsigned int module);
 extern void ltq_pmu_disable(unsigned int module);
 
-static inline int ltq_is_ar9(void)
-{
-       return (ltq_get_soc_type() == SOC_TYPE_AR9);
-}
-
-static inline int ltq_is_vr9(void)
-{
-       return (ltq_get_soc_type() == SOC_TYPE_VR9);
-}
-
 #endif /* CONFIG_SOC_TYPE_XWAY */
 #endif /* _LTQ_XWAY_H__ */
index 46c08563e5328419540341ab176831ea4e991b46..6e23ceb0ba8caa2c7e632e15125aa1e596ca84ae 100644 (file)
@@ -93,8 +93,4 @@ extern void mips_pcibios_init(void);
 #define mips_pcibios_init() do { } while (0)
 #endif
 
-#ifdef CONFIG_KGDB
-extern void kgdb_config(void);
-#endif
-
 #endif  /* __ASM_MIPS_BOARDS_GENERIC_H */
index 7467d1d933d54a0eba29fe93465b78c26ade6108..530008048c6227fb653ccb8b66b0319c442ce8ea 100644 (file)
@@ -2,6 +2,7 @@
 #define _ASM_MODULE_H
 
 #include <linux/list.h>
+#include <linux/elf.h>
 #include <asm/uaccess.h>
 
 struct mod_arch_specific {
diff --git a/arch/mips/include/asm/octeon/cvmx-pcieep-defs.h b/arch/mips/include/asm/octeon/cvmx-pcieep-defs.h
deleted file mode 100644 (file)
index d553f8e..0000000
+++ /dev/null
@@ -1,1365 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT.  See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_PCIEEP_DEFS_H__
-#define __CVMX_PCIEEP_DEFS_H__
-
-#define CVMX_PCIEEP_CFG000 \
-        (0x0000000000000000ull)
-#define CVMX_PCIEEP_CFG001 \
-        (0x0000000000000004ull)
-#define CVMX_PCIEEP_CFG002 \
-        (0x0000000000000008ull)
-#define CVMX_PCIEEP_CFG003 \
-        (0x000000000000000Cull)
-#define CVMX_PCIEEP_CFG004 \
-        (0x0000000000000010ull)
-#define CVMX_PCIEEP_CFG004_MASK \
-        (0x0000000080000010ull)
-#define CVMX_PCIEEP_CFG005 \
-        (0x0000000000000014ull)
-#define CVMX_PCIEEP_CFG005_MASK \
-        (0x0000000080000014ull)
-#define CVMX_PCIEEP_CFG006 \
-        (0x0000000000000018ull)
-#define CVMX_PCIEEP_CFG006_MASK \
-        (0x0000000080000018ull)
-#define CVMX_PCIEEP_CFG007 \
-        (0x000000000000001Cull)
-#define CVMX_PCIEEP_CFG007_MASK \
-        (0x000000008000001Cull)
-#define CVMX_PCIEEP_CFG008 \
-        (0x0000000000000020ull)
-#define CVMX_PCIEEP_CFG008_MASK \
-        (0x0000000080000020ull)
-#define CVMX_PCIEEP_CFG009 \
-        (0x0000000000000024ull)
-#define CVMX_PCIEEP_CFG009_MASK \
-        (0x0000000080000024ull)
-#define CVMX_PCIEEP_CFG010 \
-        (0x0000000000000028ull)
-#define CVMX_PCIEEP_CFG011 \
-        (0x000000000000002Cull)
-#define CVMX_PCIEEP_CFG012 \
-        (0x0000000000000030ull)
-#define CVMX_PCIEEP_CFG012_MASK \
-        (0x0000000080000030ull)
-#define CVMX_PCIEEP_CFG013 \
-        (0x0000000000000034ull)
-#define CVMX_PCIEEP_CFG015 \
-        (0x000000000000003Cull)
-#define CVMX_PCIEEP_CFG016 \
-        (0x0000000000000040ull)
-#define CVMX_PCIEEP_CFG017 \
-        (0x0000000000000044ull)
-#define CVMX_PCIEEP_CFG020 \
-        (0x0000000000000050ull)
-#define CVMX_PCIEEP_CFG021 \
-        (0x0000000000000054ull)
-#define CVMX_PCIEEP_CFG022 \
-        (0x0000000000000058ull)
-#define CVMX_PCIEEP_CFG023 \
-        (0x000000000000005Cull)
-#define CVMX_PCIEEP_CFG028 \
-        (0x0000000000000070ull)
-#define CVMX_PCIEEP_CFG029 \
-        (0x0000000000000074ull)
-#define CVMX_PCIEEP_CFG030 \
-        (0x0000000000000078ull)
-#define CVMX_PCIEEP_CFG031 \
-        (0x000000000000007Cull)
-#define CVMX_PCIEEP_CFG032 \
-        (0x0000000000000080ull)
-#define CVMX_PCIEEP_CFG033 \
-        (0x0000000000000084ull)
-#define CVMX_PCIEEP_CFG034 \
-        (0x0000000000000088ull)
-#define CVMX_PCIEEP_CFG037 \
-        (0x0000000000000094ull)
-#define CVMX_PCIEEP_CFG038 \
-        (0x0000000000000098ull)
-#define CVMX_PCIEEP_CFG039 \
-        (0x000000000000009Cull)
-#define CVMX_PCIEEP_CFG040 \
-        (0x00000000000000A0ull)
-#define CVMX_PCIEEP_CFG041 \
-        (0x00000000000000A4ull)
-#define CVMX_PCIEEP_CFG042 \
-        (0x00000000000000A8ull)
-#define CVMX_PCIEEP_CFG064 \
-        (0x0000000000000100ull)
-#define CVMX_PCIEEP_CFG065 \
-        (0x0000000000000104ull)
-#define CVMX_PCIEEP_CFG066 \
-        (0x0000000000000108ull)
-#define CVMX_PCIEEP_CFG067 \
-        (0x000000000000010Cull)
-#define CVMX_PCIEEP_CFG068 \
-        (0x0000000000000110ull)
-#define CVMX_PCIEEP_CFG069 \
-        (0x0000000000000114ull)
-#define CVMX_PCIEEP_CFG070 \
-        (0x0000000000000118ull)
-#define CVMX_PCIEEP_CFG071 \
-        (0x000000000000011Cull)
-#define CVMX_PCIEEP_CFG072 \
-        (0x0000000000000120ull)
-#define CVMX_PCIEEP_CFG073 \
-        (0x0000000000000124ull)
-#define CVMX_PCIEEP_CFG074 \
-        (0x0000000000000128ull)
-#define CVMX_PCIEEP_CFG448 \
-        (0x0000000000000700ull)
-#define CVMX_PCIEEP_CFG449 \
-        (0x0000000000000704ull)
-#define CVMX_PCIEEP_CFG450 \
-        (0x0000000000000708ull)
-#define CVMX_PCIEEP_CFG451 \
-        (0x000000000000070Cull)
-#define CVMX_PCIEEP_CFG452 \
-        (0x0000000000000710ull)
-#define CVMX_PCIEEP_CFG453 \
-        (0x0000000000000714ull)
-#define CVMX_PCIEEP_CFG454 \
-        (0x0000000000000718ull)
-#define CVMX_PCIEEP_CFG455 \
-        (0x000000000000071Cull)
-#define CVMX_PCIEEP_CFG456 \
-        (0x0000000000000720ull)
-#define CVMX_PCIEEP_CFG458 \
-        (0x0000000000000728ull)
-#define CVMX_PCIEEP_CFG459 \
-        (0x000000000000072Cull)
-#define CVMX_PCIEEP_CFG460 \
-        (0x0000000000000730ull)
-#define CVMX_PCIEEP_CFG461 \
-        (0x0000000000000734ull)
-#define CVMX_PCIEEP_CFG462 \
-        (0x0000000000000738ull)
-#define CVMX_PCIEEP_CFG463 \
-        (0x000000000000073Cull)
-#define CVMX_PCIEEP_CFG464 \
-        (0x0000000000000740ull)
-#define CVMX_PCIEEP_CFG465 \
-        (0x0000000000000744ull)
-#define CVMX_PCIEEP_CFG466 \
-        (0x0000000000000748ull)
-#define CVMX_PCIEEP_CFG467 \
-        (0x000000000000074Cull)
-#define CVMX_PCIEEP_CFG468 \
-        (0x0000000000000750ull)
-#define CVMX_PCIEEP_CFG490 \
-        (0x00000000000007A8ull)
-#define CVMX_PCIEEP_CFG491 \
-        (0x00000000000007ACull)
-#define CVMX_PCIEEP_CFG492 \
-        (0x00000000000007B0ull)
-#define CVMX_PCIEEP_CFG516 \
-        (0x0000000000000810ull)
-#define CVMX_PCIEEP_CFG517 \
-        (0x0000000000000814ull)
-
-union cvmx_pcieep_cfg000 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg000_s {
-               uint32_t devid:16;
-               uint32_t vendid:16;
-       } s;
-       struct cvmx_pcieep_cfg000_s cn52xx;
-       struct cvmx_pcieep_cfg000_s cn52xxp1;
-       struct cvmx_pcieep_cfg000_s cn56xx;
-       struct cvmx_pcieep_cfg000_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg001 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg001_s {
-               uint32_t dpe:1;
-               uint32_t sse:1;
-               uint32_t rma:1;
-               uint32_t rta:1;
-               uint32_t sta:1;
-               uint32_t devt:2;
-               uint32_t mdpe:1;
-               uint32_t fbb:1;
-               uint32_t reserved_22_22:1;
-               uint32_t m66:1;
-               uint32_t cl:1;
-               uint32_t i_stat:1;
-               uint32_t reserved_11_18:8;
-               uint32_t i_dis:1;
-               uint32_t fbbe:1;
-               uint32_t see:1;
-               uint32_t ids_wcc:1;
-               uint32_t per:1;
-               uint32_t vps:1;
-               uint32_t mwice:1;
-               uint32_t scse:1;
-               uint32_t me:1;
-               uint32_t msae:1;
-               uint32_t isae:1;
-       } s;
-       struct cvmx_pcieep_cfg001_s cn52xx;
-       struct cvmx_pcieep_cfg001_s cn52xxp1;
-       struct cvmx_pcieep_cfg001_s cn56xx;
-       struct cvmx_pcieep_cfg001_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg002 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg002_s {
-               uint32_t bcc:8;
-               uint32_t sc:8;
-               uint32_t pi:8;
-               uint32_t rid:8;
-       } s;
-       struct cvmx_pcieep_cfg002_s cn52xx;
-       struct cvmx_pcieep_cfg002_s cn52xxp1;
-       struct cvmx_pcieep_cfg002_s cn56xx;
-       struct cvmx_pcieep_cfg002_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg003 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg003_s {
-               uint32_t bist:8;
-               uint32_t mfd:1;
-               uint32_t chf:7;
-               uint32_t lt:8;
-               uint32_t cls:8;
-       } s;
-       struct cvmx_pcieep_cfg003_s cn52xx;
-       struct cvmx_pcieep_cfg003_s cn52xxp1;
-       struct cvmx_pcieep_cfg003_s cn56xx;
-       struct cvmx_pcieep_cfg003_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg004 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg004_s {
-               uint32_t lbab:18;
-               uint32_t reserved_4_13:10;
-               uint32_t pf:1;
-               uint32_t typ:2;
-               uint32_t mspc:1;
-       } s;
-       struct cvmx_pcieep_cfg004_s cn52xx;
-       struct cvmx_pcieep_cfg004_s cn52xxp1;
-       struct cvmx_pcieep_cfg004_s cn56xx;
-       struct cvmx_pcieep_cfg004_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg004_mask {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg004_mask_s {
-               uint32_t lmask:31;
-               uint32_t enb:1;
-       } s;
-       struct cvmx_pcieep_cfg004_mask_s cn52xx;
-       struct cvmx_pcieep_cfg004_mask_s cn52xxp1;
-       struct cvmx_pcieep_cfg004_mask_s cn56xx;
-       struct cvmx_pcieep_cfg004_mask_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg005 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg005_s {
-               uint32_t ubab:32;
-       } s;
-       struct cvmx_pcieep_cfg005_s cn52xx;
-       struct cvmx_pcieep_cfg005_s cn52xxp1;
-       struct cvmx_pcieep_cfg005_s cn56xx;
-       struct cvmx_pcieep_cfg005_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg005_mask {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg005_mask_s {
-               uint32_t umask:32;
-       } s;
-       struct cvmx_pcieep_cfg005_mask_s cn52xx;
-       struct cvmx_pcieep_cfg005_mask_s cn52xxp1;
-       struct cvmx_pcieep_cfg005_mask_s cn56xx;
-       struct cvmx_pcieep_cfg005_mask_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg006 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg006_s {
-               uint32_t lbab:6;
-               uint32_t reserved_4_25:22;
-               uint32_t pf:1;
-               uint32_t typ:2;
-               uint32_t mspc:1;
-       } s;
-       struct cvmx_pcieep_cfg006_s cn52xx;
-       struct cvmx_pcieep_cfg006_s cn52xxp1;
-       struct cvmx_pcieep_cfg006_s cn56xx;
-       struct cvmx_pcieep_cfg006_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg006_mask {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg006_mask_s {
-               uint32_t lmask:31;
-               uint32_t enb:1;
-       } s;
-       struct cvmx_pcieep_cfg006_mask_s cn52xx;
-       struct cvmx_pcieep_cfg006_mask_s cn52xxp1;
-       struct cvmx_pcieep_cfg006_mask_s cn56xx;
-       struct cvmx_pcieep_cfg006_mask_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg007 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg007_s {
-               uint32_t ubab:32;
-       } s;
-       struct cvmx_pcieep_cfg007_s cn52xx;
-       struct cvmx_pcieep_cfg007_s cn52xxp1;
-       struct cvmx_pcieep_cfg007_s cn56xx;
-       struct cvmx_pcieep_cfg007_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg007_mask {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg007_mask_s {
-               uint32_t umask:32;
-       } s;
-       struct cvmx_pcieep_cfg007_mask_s cn52xx;
-       struct cvmx_pcieep_cfg007_mask_s cn52xxp1;
-       struct cvmx_pcieep_cfg007_mask_s cn56xx;
-       struct cvmx_pcieep_cfg007_mask_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg008 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg008_s {
-               uint32_t reserved_4_31:28;
-               uint32_t pf:1;
-               uint32_t typ:2;
-               uint32_t mspc:1;
-       } s;
-       struct cvmx_pcieep_cfg008_s cn52xx;
-       struct cvmx_pcieep_cfg008_s cn52xxp1;
-       struct cvmx_pcieep_cfg008_s cn56xx;
-       struct cvmx_pcieep_cfg008_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg008_mask {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg008_mask_s {
-               uint32_t lmask:31;
-               uint32_t enb:1;
-       } s;
-       struct cvmx_pcieep_cfg008_mask_s cn52xx;
-       struct cvmx_pcieep_cfg008_mask_s cn52xxp1;
-       struct cvmx_pcieep_cfg008_mask_s cn56xx;
-       struct cvmx_pcieep_cfg008_mask_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg009 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg009_s {
-               uint32_t ubab:25;
-               uint32_t reserved_0_6:7;
-       } s;
-       struct cvmx_pcieep_cfg009_s cn52xx;
-       struct cvmx_pcieep_cfg009_s cn52xxp1;
-       struct cvmx_pcieep_cfg009_s cn56xx;
-       struct cvmx_pcieep_cfg009_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg009_mask {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg009_mask_s {
-               uint32_t umask:32;
-       } s;
-       struct cvmx_pcieep_cfg009_mask_s cn52xx;
-       struct cvmx_pcieep_cfg009_mask_s cn52xxp1;
-       struct cvmx_pcieep_cfg009_mask_s cn56xx;
-       struct cvmx_pcieep_cfg009_mask_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg010 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg010_s {
-               uint32_t cisp:32;
-       } s;
-       struct cvmx_pcieep_cfg010_s cn52xx;
-       struct cvmx_pcieep_cfg010_s cn52xxp1;
-       struct cvmx_pcieep_cfg010_s cn56xx;
-       struct cvmx_pcieep_cfg010_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg011 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg011_s {
-               uint32_t ssid:16;
-               uint32_t ssvid:16;
-       } s;
-       struct cvmx_pcieep_cfg011_s cn52xx;
-       struct cvmx_pcieep_cfg011_s cn52xxp1;
-       struct cvmx_pcieep_cfg011_s cn56xx;
-       struct cvmx_pcieep_cfg011_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg012 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg012_s {
-               uint32_t eraddr:16;
-               uint32_t reserved_1_15:15;
-               uint32_t er_en:1;
-       } s;
-       struct cvmx_pcieep_cfg012_s cn52xx;
-       struct cvmx_pcieep_cfg012_s cn52xxp1;
-       struct cvmx_pcieep_cfg012_s cn56xx;
-       struct cvmx_pcieep_cfg012_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg012_mask {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg012_mask_s {
-               uint32_t mask:31;
-               uint32_t enb:1;
-       } s;
-       struct cvmx_pcieep_cfg012_mask_s cn52xx;
-       struct cvmx_pcieep_cfg012_mask_s cn52xxp1;
-       struct cvmx_pcieep_cfg012_mask_s cn56xx;
-       struct cvmx_pcieep_cfg012_mask_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg013 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg013_s {
-               uint32_t reserved_8_31:24;
-               uint32_t cp:8;
-       } s;
-       struct cvmx_pcieep_cfg013_s cn52xx;
-       struct cvmx_pcieep_cfg013_s cn52xxp1;
-       struct cvmx_pcieep_cfg013_s cn56xx;
-       struct cvmx_pcieep_cfg013_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg015 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg015_s {
-               uint32_t ml:8;
-               uint32_t mg:8;
-               uint32_t inta:8;
-               uint32_t il:8;
-       } s;
-       struct cvmx_pcieep_cfg015_s cn52xx;
-       struct cvmx_pcieep_cfg015_s cn52xxp1;
-       struct cvmx_pcieep_cfg015_s cn56xx;
-       struct cvmx_pcieep_cfg015_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg016 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg016_s {
-               uint32_t pmes:5;
-               uint32_t d2s:1;
-               uint32_t d1s:1;
-               uint32_t auxc:3;
-               uint32_t dsi:1;
-               uint32_t reserved_20_20:1;
-               uint32_t pme_clock:1;
-               uint32_t pmsv:3;
-               uint32_t ncp:8;
-               uint32_t pmcid:8;
-       } s;
-       struct cvmx_pcieep_cfg016_s cn52xx;
-       struct cvmx_pcieep_cfg016_s cn52xxp1;
-       struct cvmx_pcieep_cfg016_s cn56xx;
-       struct cvmx_pcieep_cfg016_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg017 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg017_s {
-               uint32_t pmdia:8;
-               uint32_t bpccee:1;
-               uint32_t bd3h:1;
-               uint32_t reserved_16_21:6;
-               uint32_t pmess:1;
-               uint32_t pmedsia:2;
-               uint32_t pmds:4;
-               uint32_t pmeens:1;
-               uint32_t reserved_4_7:4;
-               uint32_t nsr:1;
-               uint32_t reserved_2_2:1;
-               uint32_t ps:2;
-       } s;
-       struct cvmx_pcieep_cfg017_s cn52xx;
-       struct cvmx_pcieep_cfg017_s cn52xxp1;
-       struct cvmx_pcieep_cfg017_s cn56xx;
-       struct cvmx_pcieep_cfg017_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg020 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg020_s {
-               uint32_t reserved_24_31:8;
-               uint32_t m64:1;
-               uint32_t mme:3;
-               uint32_t mmc:3;
-               uint32_t msien:1;
-               uint32_t ncp:8;
-               uint32_t msicid:8;
-       } s;
-       struct cvmx_pcieep_cfg020_s cn52xx;
-       struct cvmx_pcieep_cfg020_s cn52xxp1;
-       struct cvmx_pcieep_cfg020_s cn56xx;
-       struct cvmx_pcieep_cfg020_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg021 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg021_s {
-               uint32_t lmsi:30;
-               uint32_t reserved_0_1:2;
-       } s;
-       struct cvmx_pcieep_cfg021_s cn52xx;
-       struct cvmx_pcieep_cfg021_s cn52xxp1;
-       struct cvmx_pcieep_cfg021_s cn56xx;
-       struct cvmx_pcieep_cfg021_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg022 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg022_s {
-               uint32_t umsi:32;
-       } s;
-       struct cvmx_pcieep_cfg022_s cn52xx;
-       struct cvmx_pcieep_cfg022_s cn52xxp1;
-       struct cvmx_pcieep_cfg022_s cn56xx;
-       struct cvmx_pcieep_cfg022_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg023 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg023_s {
-               uint32_t reserved_16_31:16;
-               uint32_t msimd:16;
-       } s;
-       struct cvmx_pcieep_cfg023_s cn52xx;
-       struct cvmx_pcieep_cfg023_s cn52xxp1;
-       struct cvmx_pcieep_cfg023_s cn56xx;
-       struct cvmx_pcieep_cfg023_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg028 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg028_s {
-               uint32_t reserved_30_31:2;
-               uint32_t imn:5;
-               uint32_t si:1;
-               uint32_t dpt:4;
-               uint32_t pciecv:4;
-               uint32_t ncp:8;
-               uint32_t pcieid:8;
-       } s;
-       struct cvmx_pcieep_cfg028_s cn52xx;
-       struct cvmx_pcieep_cfg028_s cn52xxp1;
-       struct cvmx_pcieep_cfg028_s cn56xx;
-       struct cvmx_pcieep_cfg028_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg029 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg029_s {
-               uint32_t reserved_28_31:4;
-               uint32_t cspls:2;
-               uint32_t csplv:8;
-               uint32_t reserved_16_17:2;
-               uint32_t rber:1;
-               uint32_t reserved_12_14:3;
-               uint32_t el1al:3;
-               uint32_t el0al:3;
-               uint32_t etfs:1;
-               uint32_t pfs:2;
-               uint32_t mpss:3;
-       } s;
-       struct cvmx_pcieep_cfg029_s cn52xx;
-       struct cvmx_pcieep_cfg029_s cn52xxp1;
-       struct cvmx_pcieep_cfg029_s cn56xx;
-       struct cvmx_pcieep_cfg029_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg030 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg030_s {
-               uint32_t reserved_22_31:10;
-               uint32_t tp:1;
-               uint32_t ap_d:1;
-               uint32_t ur_d:1;
-               uint32_t fe_d:1;
-               uint32_t nfe_d:1;
-               uint32_t ce_d:1;
-               uint32_t reserved_15_15:1;
-               uint32_t mrrs:3;
-               uint32_t ns_en:1;
-               uint32_t ap_en:1;
-               uint32_t pf_en:1;
-               uint32_t etf_en:1;
-               uint32_t mps:3;
-               uint32_t ro_en:1;
-               uint32_t ur_en:1;
-               uint32_t fe_en:1;
-               uint32_t nfe_en:1;
-               uint32_t ce_en:1;
-       } s;
-       struct cvmx_pcieep_cfg030_s cn52xx;
-       struct cvmx_pcieep_cfg030_s cn52xxp1;
-       struct cvmx_pcieep_cfg030_s cn56xx;
-       struct cvmx_pcieep_cfg030_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg031 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg031_s {
-               uint32_t pnum:8;
-               uint32_t reserved_22_23:2;
-               uint32_t lbnc:1;
-               uint32_t dllarc:1;
-               uint32_t sderc:1;
-               uint32_t cpm:1;
-               uint32_t l1el:3;
-               uint32_t l0el:3;
-               uint32_t aslpms:2;
-               uint32_t mlw:6;
-               uint32_t mls:4;
-       } s;
-       struct cvmx_pcieep_cfg031_s cn52xx;
-       struct cvmx_pcieep_cfg031_s cn52xxp1;
-       struct cvmx_pcieep_cfg031_s cn56xx;
-       struct cvmx_pcieep_cfg031_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg032 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg032_s {
-               uint32_t reserved_30_31:2;
-               uint32_t dlla:1;
-               uint32_t scc:1;
-               uint32_t lt:1;
-               uint32_t reserved_26_26:1;
-               uint32_t nlw:6;
-               uint32_t ls:4;
-               uint32_t reserved_10_15:6;
-               uint32_t hawd:1;
-               uint32_t ecpm:1;
-               uint32_t es:1;
-               uint32_t ccc:1;
-               uint32_t rl:1;
-               uint32_t ld:1;
-               uint32_t rcb:1;
-               uint32_t reserved_2_2:1;
-               uint32_t aslpc:2;
-       } s;
-       struct cvmx_pcieep_cfg032_s cn52xx;
-       struct cvmx_pcieep_cfg032_s cn52xxp1;
-       struct cvmx_pcieep_cfg032_s cn56xx;
-       struct cvmx_pcieep_cfg032_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg033 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg033_s {
-               uint32_t ps_num:13;
-               uint32_t nccs:1;
-               uint32_t emip:1;
-               uint32_t sp_ls:2;
-               uint32_t sp_lv:8;
-               uint32_t hp_c:1;
-               uint32_t hp_s:1;
-               uint32_t pip:1;
-               uint32_t aip:1;
-               uint32_t mrlsp:1;
-               uint32_t pcp:1;
-               uint32_t abp:1;
-       } s;
-       struct cvmx_pcieep_cfg033_s cn52xx;
-       struct cvmx_pcieep_cfg033_s cn52xxp1;
-       struct cvmx_pcieep_cfg033_s cn56xx;
-       struct cvmx_pcieep_cfg033_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg034 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg034_s {
-               uint32_t reserved_25_31:7;
-               uint32_t dlls_c:1;
-               uint32_t emis:1;
-               uint32_t pds:1;
-               uint32_t mrlss:1;
-               uint32_t ccint_d:1;
-               uint32_t pd_c:1;
-               uint32_t mrls_c:1;
-               uint32_t pf_d:1;
-               uint32_t abp_d:1;
-               uint32_t reserved_13_15:3;
-               uint32_t dlls_en:1;
-               uint32_t emic:1;
-               uint32_t pcc:1;
-               uint32_t pic:2;
-               uint32_t aic:2;
-               uint32_t hpint_en:1;
-               uint32_t ccint_en:1;
-               uint32_t pd_en:1;
-               uint32_t mrls_en:1;
-               uint32_t pf_en:1;
-               uint32_t abp_en:1;
-       } s;
-       struct cvmx_pcieep_cfg034_s cn52xx;
-       struct cvmx_pcieep_cfg034_s cn52xxp1;
-       struct cvmx_pcieep_cfg034_s cn56xx;
-       struct cvmx_pcieep_cfg034_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg037 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg037_s {
-               uint32_t reserved_5_31:27;
-               uint32_t ctds:1;
-               uint32_t ctrs:4;
-       } s;
-       struct cvmx_pcieep_cfg037_s cn52xx;
-       struct cvmx_pcieep_cfg037_s cn52xxp1;
-       struct cvmx_pcieep_cfg037_s cn56xx;
-       struct cvmx_pcieep_cfg037_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg038 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg038_s {
-               uint32_t reserved_5_31:27;
-               uint32_t ctd:1;
-               uint32_t ctv:4;
-       } s;
-       struct cvmx_pcieep_cfg038_s cn52xx;
-       struct cvmx_pcieep_cfg038_s cn52xxp1;
-       struct cvmx_pcieep_cfg038_s cn56xx;
-       struct cvmx_pcieep_cfg038_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg039 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg039_s {
-               uint32_t reserved_0_31:32;
-       } s;
-       struct cvmx_pcieep_cfg039_s cn52xx;
-       struct cvmx_pcieep_cfg039_s cn52xxp1;
-       struct cvmx_pcieep_cfg039_s cn56xx;
-       struct cvmx_pcieep_cfg039_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg040 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg040_s {
-               uint32_t reserved_0_31:32;
-       } s;
-       struct cvmx_pcieep_cfg040_s cn52xx;
-       struct cvmx_pcieep_cfg040_s cn52xxp1;
-       struct cvmx_pcieep_cfg040_s cn56xx;
-       struct cvmx_pcieep_cfg040_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg041 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg041_s {
-               uint32_t reserved_0_31:32;
-       } s;
-       struct cvmx_pcieep_cfg041_s cn52xx;
-       struct cvmx_pcieep_cfg041_s cn52xxp1;
-       struct cvmx_pcieep_cfg041_s cn56xx;
-       struct cvmx_pcieep_cfg041_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg042 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg042_s {
-               uint32_t reserved_0_31:32;
-       } s;
-       struct cvmx_pcieep_cfg042_s cn52xx;
-       struct cvmx_pcieep_cfg042_s cn52xxp1;
-       struct cvmx_pcieep_cfg042_s cn56xx;
-       struct cvmx_pcieep_cfg042_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg064 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg064_s {
-               uint32_t nco:12;
-               uint32_t cv:4;
-               uint32_t pcieec:16;
-       } s;
-       struct cvmx_pcieep_cfg064_s cn52xx;
-       struct cvmx_pcieep_cfg064_s cn52xxp1;
-       struct cvmx_pcieep_cfg064_s cn56xx;
-       struct cvmx_pcieep_cfg064_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg065 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg065_s {
-               uint32_t reserved_21_31:11;
-               uint32_t ures:1;
-               uint32_t ecrces:1;
-               uint32_t mtlps:1;
-               uint32_t ros:1;
-               uint32_t ucs:1;
-               uint32_t cas:1;
-               uint32_t cts:1;
-               uint32_t fcpes:1;
-               uint32_t ptlps:1;
-               uint32_t reserved_6_11:6;
-               uint32_t sdes:1;
-               uint32_t dlpes:1;
-               uint32_t reserved_0_3:4;
-       } s;
-       struct cvmx_pcieep_cfg065_s cn52xx;
-       struct cvmx_pcieep_cfg065_s cn52xxp1;
-       struct cvmx_pcieep_cfg065_s cn56xx;
-       struct cvmx_pcieep_cfg065_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg066 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg066_s {
-               uint32_t reserved_21_31:11;
-               uint32_t urem:1;
-               uint32_t ecrcem:1;
-               uint32_t mtlpm:1;
-               uint32_t rom:1;
-               uint32_t ucm:1;
-               uint32_t cam:1;
-               uint32_t ctm:1;
-               uint32_t fcpem:1;
-               uint32_t ptlpm:1;
-               uint32_t reserved_6_11:6;
-               uint32_t sdem:1;
-               uint32_t dlpem:1;
-               uint32_t reserved_0_3:4;
-       } s;
-       struct cvmx_pcieep_cfg066_s cn52xx;
-       struct cvmx_pcieep_cfg066_s cn52xxp1;
-       struct cvmx_pcieep_cfg066_s cn56xx;
-       struct cvmx_pcieep_cfg066_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg067 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg067_s {
-               uint32_t reserved_21_31:11;
-               uint32_t ures:1;
-               uint32_t ecrces:1;
-               uint32_t mtlps:1;
-               uint32_t ros:1;
-               uint32_t ucs:1;
-               uint32_t cas:1;
-               uint32_t cts:1;
-               uint32_t fcpes:1;
-               uint32_t ptlps:1;
-               uint32_t reserved_6_11:6;
-               uint32_t sdes:1;
-               uint32_t dlpes:1;
-               uint32_t reserved_0_3:4;
-       } s;
-       struct cvmx_pcieep_cfg067_s cn52xx;
-       struct cvmx_pcieep_cfg067_s cn52xxp1;
-       struct cvmx_pcieep_cfg067_s cn56xx;
-       struct cvmx_pcieep_cfg067_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg068 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg068_s {
-               uint32_t reserved_14_31:18;
-               uint32_t anfes:1;
-               uint32_t rtts:1;
-               uint32_t reserved_9_11:3;
-               uint32_t rnrs:1;
-               uint32_t bdllps:1;
-               uint32_t btlps:1;
-               uint32_t reserved_1_5:5;
-               uint32_t res:1;
-       } s;
-       struct cvmx_pcieep_cfg068_s cn52xx;
-       struct cvmx_pcieep_cfg068_s cn52xxp1;
-       struct cvmx_pcieep_cfg068_s cn56xx;
-       struct cvmx_pcieep_cfg068_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg069 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg069_s {
-               uint32_t reserved_14_31:18;
-               uint32_t anfem:1;
-               uint32_t rttm:1;
-               uint32_t reserved_9_11:3;
-               uint32_t rnrm:1;
-               uint32_t bdllpm:1;
-               uint32_t btlpm:1;
-               uint32_t reserved_1_5:5;
-               uint32_t rem:1;
-       } s;
-       struct cvmx_pcieep_cfg069_s cn52xx;
-       struct cvmx_pcieep_cfg069_s cn52xxp1;
-       struct cvmx_pcieep_cfg069_s cn56xx;
-       struct cvmx_pcieep_cfg069_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg070 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg070_s {
-               uint32_t reserved_9_31:23;
-               uint32_t ce:1;
-               uint32_t cc:1;
-               uint32_t ge:1;
-               uint32_t gc:1;
-               uint32_t fep:5;
-       } s;
-       struct cvmx_pcieep_cfg070_s cn52xx;
-       struct cvmx_pcieep_cfg070_s cn52xxp1;
-       struct cvmx_pcieep_cfg070_s cn56xx;
-       struct cvmx_pcieep_cfg070_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg071 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg071_s {
-               uint32_t dword1:32;
-       } s;
-       struct cvmx_pcieep_cfg071_s cn52xx;
-       struct cvmx_pcieep_cfg071_s cn52xxp1;
-       struct cvmx_pcieep_cfg071_s cn56xx;
-       struct cvmx_pcieep_cfg071_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg072 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg072_s {
-               uint32_t dword2:32;
-       } s;
-       struct cvmx_pcieep_cfg072_s cn52xx;
-       struct cvmx_pcieep_cfg072_s cn52xxp1;
-       struct cvmx_pcieep_cfg072_s cn56xx;
-       struct cvmx_pcieep_cfg072_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg073 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg073_s {
-               uint32_t dword3:32;
-       } s;
-       struct cvmx_pcieep_cfg073_s cn52xx;
-       struct cvmx_pcieep_cfg073_s cn52xxp1;
-       struct cvmx_pcieep_cfg073_s cn56xx;
-       struct cvmx_pcieep_cfg073_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg074 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg074_s {
-               uint32_t dword4:32;
-       } s;
-       struct cvmx_pcieep_cfg074_s cn52xx;
-       struct cvmx_pcieep_cfg074_s cn52xxp1;
-       struct cvmx_pcieep_cfg074_s cn56xx;
-       struct cvmx_pcieep_cfg074_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg448 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg448_s {
-               uint32_t rtl:16;
-               uint32_t rtltl:16;
-       } s;
-       struct cvmx_pcieep_cfg448_s cn52xx;
-       struct cvmx_pcieep_cfg448_s cn52xxp1;
-       struct cvmx_pcieep_cfg448_s cn56xx;
-       struct cvmx_pcieep_cfg448_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg449 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg449_s {
-               uint32_t omr:32;
-       } s;
-       struct cvmx_pcieep_cfg449_s cn52xx;
-       struct cvmx_pcieep_cfg449_s cn52xxp1;
-       struct cvmx_pcieep_cfg449_s cn56xx;
-       struct cvmx_pcieep_cfg449_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg450 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg450_s {
-               uint32_t lpec:8;
-               uint32_t reserved_22_23:2;
-               uint32_t link_state:6;
-               uint32_t force_link:1;
-               uint32_t reserved_8_14:7;
-               uint32_t link_num:8;
-       } s;
-       struct cvmx_pcieep_cfg450_s cn52xx;
-       struct cvmx_pcieep_cfg450_s cn52xxp1;
-       struct cvmx_pcieep_cfg450_s cn56xx;
-       struct cvmx_pcieep_cfg450_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg451 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg451_s {
-               uint32_t reserved_30_31:2;
-               uint32_t l1el:3;
-               uint32_t l0el:3;
-               uint32_t n_fts_cc:8;
-               uint32_t n_fts:8;
-               uint32_t ack_freq:8;
-       } s;
-       struct cvmx_pcieep_cfg451_s cn52xx;
-       struct cvmx_pcieep_cfg451_s cn52xxp1;
-       struct cvmx_pcieep_cfg451_s cn56xx;
-       struct cvmx_pcieep_cfg451_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg452 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg452_s {
-               uint32_t reserved_26_31:6;
-               uint32_t eccrc:1;
-               uint32_t reserved_22_24:3;
-               uint32_t lme:6;
-               uint32_t reserved_8_15:8;
-               uint32_t flm:1;
-               uint32_t reserved_6_6:1;
-               uint32_t dllle:1;
-               uint32_t reserved_4_4:1;
-               uint32_t ra:1;
-               uint32_t le:1;
-               uint32_t sd:1;
-               uint32_t omr:1;
-       } s;
-       struct cvmx_pcieep_cfg452_s cn52xx;
-       struct cvmx_pcieep_cfg452_s cn52xxp1;
-       struct cvmx_pcieep_cfg452_s cn56xx;
-       struct cvmx_pcieep_cfg452_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg453 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg453_s {
-               uint32_t dlld:1;
-               uint32_t reserved_26_30:5;
-               uint32_t ack_nak:1;
-               uint32_t fcd:1;
-               uint32_t ilst:24;
-       } s;
-       struct cvmx_pcieep_cfg453_s cn52xx;
-       struct cvmx_pcieep_cfg453_s cn52xxp1;
-       struct cvmx_pcieep_cfg453_s cn56xx;
-       struct cvmx_pcieep_cfg453_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg454 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg454_s {
-               uint32_t reserved_29_31:3;
-               uint32_t tmfcwt:5;
-               uint32_t tmanlt:5;
-               uint32_t tmrt:5;
-               uint32_t reserved_11_13:3;
-               uint32_t nskps:3;
-               uint32_t reserved_4_7:4;
-               uint32_t ntss:4;
-       } s;
-       struct cvmx_pcieep_cfg454_s cn52xx;
-       struct cvmx_pcieep_cfg454_s cn52xxp1;
-       struct cvmx_pcieep_cfg454_s cn56xx;
-       struct cvmx_pcieep_cfg454_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg455 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg455_s {
-               uint32_t m_cfg0_filt:1;
-               uint32_t m_io_filt:1;
-               uint32_t msg_ctrl:1;
-               uint32_t m_cpl_ecrc_filt:1;
-               uint32_t m_ecrc_filt:1;
-               uint32_t m_cpl_len_err:1;
-               uint32_t m_cpl_attr_err:1;
-               uint32_t m_cpl_tc_err:1;
-               uint32_t m_cpl_fun_err:1;
-               uint32_t m_cpl_rid_err:1;
-               uint32_t m_cpl_tag_err:1;
-               uint32_t m_lk_filt:1;
-               uint32_t m_cfg1_filt:1;
-               uint32_t m_bar_match:1;
-               uint32_t m_pois_filt:1;
-               uint32_t m_fun:1;
-               uint32_t dfcwt:1;
-               uint32_t reserved_11_14:4;
-               uint32_t skpiv:11;
-       } s;
-       struct cvmx_pcieep_cfg455_s cn52xx;
-       struct cvmx_pcieep_cfg455_s cn52xxp1;
-       struct cvmx_pcieep_cfg455_s cn56xx;
-       struct cvmx_pcieep_cfg455_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg456 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg456_s {
-               uint32_t reserved_2_31:30;
-               uint32_t m_vend1_drp:1;
-               uint32_t m_vend0_drp:1;
-       } s;
-       struct cvmx_pcieep_cfg456_s cn52xx;
-       struct cvmx_pcieep_cfg456_s cn52xxp1;
-       struct cvmx_pcieep_cfg456_s cn56xx;
-       struct cvmx_pcieep_cfg456_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg458 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg458_s {
-               uint32_t dbg_info_l32:32;
-       } s;
-       struct cvmx_pcieep_cfg458_s cn52xx;
-       struct cvmx_pcieep_cfg458_s cn52xxp1;
-       struct cvmx_pcieep_cfg458_s cn56xx;
-       struct cvmx_pcieep_cfg458_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg459 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg459_s {
-               uint32_t dbg_info_u32:32;
-       } s;
-       struct cvmx_pcieep_cfg459_s cn52xx;
-       struct cvmx_pcieep_cfg459_s cn52xxp1;
-       struct cvmx_pcieep_cfg459_s cn56xx;
-       struct cvmx_pcieep_cfg459_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg460 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg460_s {
-               uint32_t reserved_20_31:12;
-               uint32_t tphfcc:8;
-               uint32_t tpdfcc:12;
-       } s;
-       struct cvmx_pcieep_cfg460_s cn52xx;
-       struct cvmx_pcieep_cfg460_s cn52xxp1;
-       struct cvmx_pcieep_cfg460_s cn56xx;
-       struct cvmx_pcieep_cfg460_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg461 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg461_s {
-               uint32_t reserved_20_31:12;
-               uint32_t tchfcc:8;
-               uint32_t tcdfcc:12;
-       } s;
-       struct cvmx_pcieep_cfg461_s cn52xx;
-       struct cvmx_pcieep_cfg461_s cn52xxp1;
-       struct cvmx_pcieep_cfg461_s cn56xx;
-       struct cvmx_pcieep_cfg461_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg462 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg462_s {
-               uint32_t reserved_20_31:12;
-               uint32_t tchfcc:8;
-               uint32_t tcdfcc:12;
-       } s;
-       struct cvmx_pcieep_cfg462_s cn52xx;
-       struct cvmx_pcieep_cfg462_s cn52xxp1;
-       struct cvmx_pcieep_cfg462_s cn56xx;
-       struct cvmx_pcieep_cfg462_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg463 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg463_s {
-               uint32_t reserved_3_31:29;
-               uint32_t rqne:1;
-               uint32_t trbne:1;
-               uint32_t rtlpfccnr:1;
-       } s;
-       struct cvmx_pcieep_cfg463_s cn52xx;
-       struct cvmx_pcieep_cfg463_s cn52xxp1;
-       struct cvmx_pcieep_cfg463_s cn56xx;
-       struct cvmx_pcieep_cfg463_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg464 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg464_s {
-               uint32_t wrr_vc3:8;
-               uint32_t wrr_vc2:8;
-               uint32_t wrr_vc1:8;
-               uint32_t wrr_vc0:8;
-       } s;
-       struct cvmx_pcieep_cfg464_s cn52xx;
-       struct cvmx_pcieep_cfg464_s cn52xxp1;
-       struct cvmx_pcieep_cfg464_s cn56xx;
-       struct cvmx_pcieep_cfg464_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg465 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg465_s {
-               uint32_t wrr_vc7:8;
-               uint32_t wrr_vc6:8;
-               uint32_t wrr_vc5:8;
-               uint32_t wrr_vc4:8;
-       } s;
-       struct cvmx_pcieep_cfg465_s cn52xx;
-       struct cvmx_pcieep_cfg465_s cn52xxp1;
-       struct cvmx_pcieep_cfg465_s cn56xx;
-       struct cvmx_pcieep_cfg465_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg466 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg466_s {
-               uint32_t rx_queue_order:1;
-               uint32_t type_ordering:1;
-               uint32_t reserved_24_29:6;
-               uint32_t queue_mode:3;
-               uint32_t reserved_20_20:1;
-               uint32_t header_credits:8;
-               uint32_t data_credits:12;
-       } s;
-       struct cvmx_pcieep_cfg466_s cn52xx;
-       struct cvmx_pcieep_cfg466_s cn52xxp1;
-       struct cvmx_pcieep_cfg466_s cn56xx;
-       struct cvmx_pcieep_cfg466_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg467 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg467_s {
-               uint32_t reserved_24_31:8;
-               uint32_t queue_mode:3;
-               uint32_t reserved_20_20:1;
-               uint32_t header_credits:8;
-               uint32_t data_credits:12;
-       } s;
-       struct cvmx_pcieep_cfg467_s cn52xx;
-       struct cvmx_pcieep_cfg467_s cn52xxp1;
-       struct cvmx_pcieep_cfg467_s cn56xx;
-       struct cvmx_pcieep_cfg467_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg468 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg468_s {
-               uint32_t reserved_24_31:8;
-               uint32_t queue_mode:3;
-               uint32_t reserved_20_20:1;
-               uint32_t header_credits:8;
-               uint32_t data_credits:12;
-       } s;
-       struct cvmx_pcieep_cfg468_s cn52xx;
-       struct cvmx_pcieep_cfg468_s cn52xxp1;
-       struct cvmx_pcieep_cfg468_s cn56xx;
-       struct cvmx_pcieep_cfg468_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg490 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg490_s {
-               uint32_t reserved_26_31:6;
-               uint32_t header_depth:10;
-               uint32_t reserved_14_15:2;
-               uint32_t data_depth:14;
-       } s;
-       struct cvmx_pcieep_cfg490_s cn52xx;
-       struct cvmx_pcieep_cfg490_s cn52xxp1;
-       struct cvmx_pcieep_cfg490_s cn56xx;
-       struct cvmx_pcieep_cfg490_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg491 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg491_s {
-               uint32_t reserved_26_31:6;
-               uint32_t header_depth:10;
-               uint32_t reserved_14_15:2;
-               uint32_t data_depth:14;
-       } s;
-       struct cvmx_pcieep_cfg491_s cn52xx;
-       struct cvmx_pcieep_cfg491_s cn52xxp1;
-       struct cvmx_pcieep_cfg491_s cn56xx;
-       struct cvmx_pcieep_cfg491_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg492 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg492_s {
-               uint32_t reserved_26_31:6;
-               uint32_t header_depth:10;
-               uint32_t reserved_14_15:2;
-               uint32_t data_depth:14;
-       } s;
-       struct cvmx_pcieep_cfg492_s cn52xx;
-       struct cvmx_pcieep_cfg492_s cn52xxp1;
-       struct cvmx_pcieep_cfg492_s cn56xx;
-       struct cvmx_pcieep_cfg492_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg516 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg516_s {
-               uint32_t phy_stat:32;
-       } s;
-       struct cvmx_pcieep_cfg516_s cn52xx;
-       struct cvmx_pcieep_cfg516_s cn52xxp1;
-       struct cvmx_pcieep_cfg516_s cn56xx;
-       struct cvmx_pcieep_cfg516_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg517 {
-       uint32_t u32;
-       struct cvmx_pcieep_cfg517_s {
-               uint32_t phy_ctrl:32;
-       } s;
-       struct cvmx_pcieep_cfg517_s cn52xx;
-       struct cvmx_pcieep_cfg517_s cn52xxp1;
-       struct cvmx_pcieep_cfg517_s cn56xx;
-       struct cvmx_pcieep_cfg517_s cn56xxp1;
-};
-
-#endif
index fcd4060f642196b2e248fac9e3eca2d8cb3bb6d4..90bf3b3fce199cda78430b3af99770fe82de3fcd 100644 (file)
@@ -17,6 +17,7 @@
  */
 
 #include <linux/ioport.h>
+#include <linux/of.h>
 
 /*
  * Each pci channel is a top-level PCI bus seem by CPU.  A machine  with
@@ -26,6 +27,7 @@
 struct pci_controller {
        struct pci_controller *next;
        struct pci_bus *bus;
+       struct device_node *of_node;
 
        struct pci_ops *pci_ops;
        struct resource *mem_resource;
@@ -142,4 +144,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
 
 extern char * (*pcibios_plat_setup)(char *str);
 
+/* this function parses memory ranges from a device node */
+extern void __devinit pci_load_of_ranges(struct pci_controller *hose,
+                                        struct device_node *node);
+
 #endif /* _ASM_PCI_H */
index e0308dcca1358f6f2db6161486299de11d61dde5..fa03ec3fbf897a4c3271d8a38025f383c82760cc 100644 (file)
  * assume GCC is being used.
  */
 
-#if (_MIPS_SZLONG == 64)
-typedef unsigned int   __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-#endif
-
 typedef long           __kernel_daddr_t;
 #define __kernel_daddr_t __kernel_daddr_t
 
index 7a6e82ef449b3bf4e8747974dd3ed81c2a231401..7206d445bab876e926cec6bd242309b893870007 100644 (file)
@@ -12,6 +12,9 @@
 #define __ASM_PROM_H
 
 #ifdef CONFIG_OF
+#include <linux/bug.h>
+#include <linux/io.h>
+#include <linux/types.h>
 #include <asm/bootinfo.h>
 
 extern int early_init_dt_scan_memory_arch(unsigned long node,
@@ -21,6 +24,29 @@ extern int reserve_mem_mach(unsigned long addr, unsigned long size);
 extern void free_mem_mach(unsigned long addr, unsigned long size);
 
 extern void device_tree_init(void);
+
+static inline unsigned long pci_address_to_pio(phys_addr_t address)
+{
+       /*
+        * The ioport address can be directly used by inX() / outX()
+        */
+       BUG_ON(address > IO_SPACE_LIMIT);
+
+       return (unsigned long) address;
+}
+#define pci_address_to_pio pci_address_to_pio
+
+struct boot_param_header;
+
+extern void __dt_setup_arch(struct boot_param_header *bph);
+
+#define dt_setup_arch(sym)                                             \
+({                                                                     \
+       extern struct boot_param_header __dtb_##sym##_begin;            \
+                                                                       \
+       __dt_setup_arch(&__dtb_##sym##_begin);                          \
+})
+
 #else /* CONFIG_OF */
 static inline void device_tree_init(void) { }
 #endif /* CONFIG_OF */
index 6dce6d8d09ab08d77042c9b89a37da65e2c82f8d..2560b6b6a7d8f9b47ddc6bc607f80b732110dad3 100644 (file)
@@ -14,7 +14,8 @@ extern void *set_vi_handler(int n, vi_handler_t addr);
 
 extern void *set_except_vector(int n, void *addr);
 extern unsigned long ebase;
-extern void per_cpu_trap_init(void);
+extern void per_cpu_trap_init(bool);
+extern void cpu_cache_init(void);
 
 #endif /* __KERNEL__ */
 
index 7165333ad043b4ac22fa3304cb711f4e371e1cf0..4461198361c9760fa5afed8a101af0fa89207bc0 100644 (file)
@@ -6,7 +6,11 @@
  * SECTION_SIZE_BITS           2^N: how big each section will be
  * MAX_PHYSMEM_BITS            2^N: how much memory we can have in that space
  */
-#define SECTION_SIZE_BITS       28
+#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PAGE_SIZE_64KB)
+# define SECTION_SIZE_BITS     29
+#else
+# define SECTION_SIZE_BITS     28
+#endif
 #define MAX_PHYSMEM_BITS        35
 
 #endif /* CONFIG_SPARSEMEM */
index 6e00f751ab6dc675b886d736e1f0c814136cfbd4..fe9a4c3ec5a1f2d9f557adf7c32348b0c892e374 100644 (file)
@@ -20,7 +20,7 @@ struct stat {
        long            st_pad1[3];             /* Reserved for network id */
        ino_t           st_ino;
        mode_t          st_mode;
-       nlink_t         st_nlink;
+       __u32           st_nlink;
        uid_t           st_uid;
        gid_t           st_gid;
        unsigned        st_rdev;
@@ -55,7 +55,7 @@ struct stat64 {
        unsigned long long      st_ino;
 
        mode_t          st_mode;
-       nlink_t         st_nlink;
+       __u32           st_nlink;
 
        uid_t           st_uid;
        gid_t           st_gid;
@@ -96,7 +96,7 @@ struct stat {
        unsigned long           st_ino;
 
        mode_t                  st_mode;
-       nlink_t                 st_nlink;
+       __u32                   st_nlink;
 
        uid_t                   st_uid;
        gid_t                   st_gid;
index 8f77f774a2a01e3b70fee2bae89a1b036c89fc6f..abdd87aaf609034742f0401e9846ea4ba2bd2ced 100644 (file)
@@ -60,7 +60,7 @@ struct termio {
 };
 
 #ifdef __KERNEL__
-#include <linux/module.h>
+#include <asm/uaccess.h>
 
 /*
  *     intr=^C         quit=^\         erase=del       kill=^U
index ff74aec3561a3f0a88829db33c15b87887fdcbc9..420ca06b2f42df601e8fcfbd5f148626f2df225d 100644 (file)
@@ -25,6 +25,7 @@ extern void (*board_nmi_handler_setup)(void);
 extern void (*board_ejtag_handler_setup)(void);
 extern void (*board_bind_eic_interrupt)(int irq, int regset);
 extern void (*board_ebase_setup)(void);
+extern void (*board_cache_error_setup)(void);
 
 extern int register_nmi_notifier(struct notifier_block *nb);
 
index 504d40aedfae670aff49644f1964e6a932b63ac6..440a21dab575705412805809c57ce31a893825b3 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/types.h>
 
 #ifdef CONFIG_EXPORT_UASM
-#include <linux/module.h>
+#include <linux/export.h>
 #define __uasminit
 #define __uasminitdata
 #define UASM_EXPORT_SYMBOL(sym) EXPORT_SYMBOL(sym)
index a9dff33212518d752b8620a969a79da3935543d1..e44abea9c209ef1c2e035373333e671a876b0093 100644 (file)
@@ -16,5 +16,3 @@ obj-$(CONFIG_JZ4740_QI_LB60)  += board-qi_lb60.o
 # PM support
 
 obj-$(CONFIG_PM) += pm.o
-
-ccflags-y := -Werror -Wall
index 5099201fb7bc9933f6b939697a5d93fc02520ea9..6ae7ce4ac63eb9b6a65ecf8ae0a49093579ae597 100644 (file)
@@ -340,7 +340,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                __cpu_name[cpu] = "R2000";
                c->isa_level = MIPS_CPU_ISA_I;
                c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
-                            MIPS_CPU_NOFPUEX;
+                            MIPS_CPU_NOFPUEX;
                if (__cpu_has_fpu())
                        c->options |= MIPS_CPU_FPU;
                c->tlbsize = 64;
@@ -361,7 +361,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                }
                c->isa_level = MIPS_CPU_ISA_I;
                c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
-                            MIPS_CPU_NOFPUEX;
+                            MIPS_CPU_NOFPUEX;
                if (__cpu_has_fpu())
                        c->options |= MIPS_CPU_FPU;
                c->tlbsize = 64;
@@ -387,8 +387,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
 
                c->isa_level = MIPS_CPU_ISA_III;
                c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
-                            MIPS_CPU_WATCH | MIPS_CPU_VCE |
-                            MIPS_CPU_LLSC;
+                            MIPS_CPU_WATCH | MIPS_CPU_VCE |
+                            MIPS_CPU_LLSC;
                c->tlbsize = 48;
                break;
        case PRID_IMP_VR41XX:
@@ -434,7 +434,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                __cpu_name[cpu] = "R4300";
                c->isa_level = MIPS_CPU_ISA_III;
                c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
-                            MIPS_CPU_LLSC;
+                            MIPS_CPU_LLSC;
                c->tlbsize = 32;
                break;
        case PRID_IMP_R4600:
@@ -446,7 +446,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                c->tlbsize = 48;
                break;
        #if 0
-       case PRID_IMP_R4650:
+       case PRID_IMP_R4650:
                /*
                 * This processor doesn't have an MMU, so it's not
                 * "real easy" to run Linux on it. It is left purely
@@ -455,9 +455,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                 */
                c->cputype = CPU_R4650;
                __cpu_name[cpu] = "R4650";
-               c->isa_level = MIPS_CPU_ISA_III;
+               c->isa_level = MIPS_CPU_ISA_III;
                c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC;
-               c->tlbsize = 48;
+               c->tlbsize = 48;
                break;
        #endif
        case PRID_IMP_TX39:
@@ -488,7 +488,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                __cpu_name[cpu] = "R4700";
                c->isa_level = MIPS_CPU_ISA_III;
                c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
-                            MIPS_CPU_LLSC;
+                            MIPS_CPU_LLSC;
                c->tlbsize = 48;
                break;
        case PRID_IMP_TX49:
@@ -505,7 +505,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                __cpu_name[cpu] = "R5000";
                c->isa_level = MIPS_CPU_ISA_IV;
                c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
-                            MIPS_CPU_LLSC;
+                            MIPS_CPU_LLSC;
                c->tlbsize = 48;
                break;
        case PRID_IMP_R5432:
@@ -513,7 +513,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                __cpu_name[cpu] = "R5432";
                c->isa_level = MIPS_CPU_ISA_IV;
                c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
-                            MIPS_CPU_WATCH | MIPS_CPU_LLSC;
+                            MIPS_CPU_WATCH | MIPS_CPU_LLSC;
                c->tlbsize = 48;
                break;
        case PRID_IMP_R5500:
@@ -521,7 +521,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                __cpu_name[cpu] = "R5500";
                c->isa_level = MIPS_CPU_ISA_IV;
                c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
-                            MIPS_CPU_WATCH | MIPS_CPU_LLSC;
+                            MIPS_CPU_WATCH | MIPS_CPU_LLSC;
                c->tlbsize = 48;
                break;
        case PRID_IMP_NEVADA:
@@ -529,7 +529,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                __cpu_name[cpu] = "Nevada";
                c->isa_level = MIPS_CPU_ISA_IV;
                c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
-                            MIPS_CPU_DIVEC | MIPS_CPU_LLSC;
+                            MIPS_CPU_DIVEC | MIPS_CPU_LLSC;
                c->tlbsize = 48;
                break;
        case PRID_IMP_R6000:
@@ -537,7 +537,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                __cpu_name[cpu] = "R6000";
                c->isa_level = MIPS_CPU_ISA_II;
                c->options = MIPS_CPU_TLB | MIPS_CPU_FPU |
-                            MIPS_CPU_LLSC;
+                            MIPS_CPU_LLSC;
                c->tlbsize = 32;
                break;
        case PRID_IMP_R6000A:
@@ -545,7 +545,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                __cpu_name[cpu] = "R6000A";
                c->isa_level = MIPS_CPU_ISA_II;
                c->options = MIPS_CPU_TLB | MIPS_CPU_FPU |
-                            MIPS_CPU_LLSC;
+                            MIPS_CPU_LLSC;
                c->tlbsize = 32;
                break;
        case PRID_IMP_RM7000:
@@ -553,7 +553,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                __cpu_name[cpu] = "RM7000";
                c->isa_level = MIPS_CPU_ISA_IV;
                c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
-                            MIPS_CPU_LLSC;
+                            MIPS_CPU_LLSC;
                /*
                 * Undocumented RM7000:  Bit 29 in the info register of
                 * the RM7000 v2.0 indicates if the TLB has 48 or 64
@@ -569,7 +569,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                __cpu_name[cpu] = "RM9000";
                c->isa_level = MIPS_CPU_ISA_IV;
                c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
-                            MIPS_CPU_LLSC;
+                            MIPS_CPU_LLSC;
                /*
                 * Bit 29 in the info register of the RM9000
                 * indicates if the TLB has 48 or 64 entries.
@@ -584,8 +584,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                __cpu_name[cpu] = "RM8000";
                c->isa_level = MIPS_CPU_ISA_IV;
                c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX |
-                            MIPS_CPU_FPU | MIPS_CPU_32FPR |
-                            MIPS_CPU_LLSC;
+                            MIPS_CPU_FPU | MIPS_CPU_32FPR |
+                            MIPS_CPU_LLSC;
                c->tlbsize = 384;      /* has weird TLB: 3-way x 128 */
                break;
        case PRID_IMP_R10000:
@@ -593,9 +593,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                __cpu_name[cpu] = "R10000";
                c->isa_level = MIPS_CPU_ISA_IV;
                c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
-                            MIPS_CPU_FPU | MIPS_CPU_32FPR |
+                            MIPS_CPU_FPU | MIPS_CPU_32FPR |
                             MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
-                            MIPS_CPU_LLSC;
+                            MIPS_CPU_LLSC;
                c->tlbsize = 64;
                break;
        case PRID_IMP_R12000:
@@ -603,9 +603,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                __cpu_name[cpu] = "R12000";
                c->isa_level = MIPS_CPU_ISA_IV;
                c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
-                            MIPS_CPU_FPU | MIPS_CPU_32FPR |
+                            MIPS_CPU_FPU | MIPS_CPU_32FPR |
                             MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
-                            MIPS_CPU_LLSC;
+                            MIPS_CPU_LLSC;
                c->tlbsize = 64;
                break;
        case PRID_IMP_R14000:
@@ -613,9 +613,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                __cpu_name[cpu] = "R14000";
                c->isa_level = MIPS_CPU_ISA_IV;
                c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
-                            MIPS_CPU_FPU | MIPS_CPU_32FPR |
+                            MIPS_CPU_FPU | MIPS_CPU_32FPR |
                             MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
-                            MIPS_CPU_LLSC;
+                            MIPS_CPU_LLSC;
                c->tlbsize = 64;
                break;
        case PRID_IMP_LOONGSON2:
@@ -739,7 +739,7 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
        if (config3 & MIPS_CONF3_VEIC)
                c->options |= MIPS_CPU_VEIC;
        if (config3 & MIPS_CONF3_MT)
-               c->ases |= MIPS_ASE_MIPSMT;
+               c->ases |= MIPS_ASE_MIPSMT;
        if (config3 & MIPS_CONF3_ULRI)
                c->options |= MIPS_CPU_ULRI;
 
@@ -767,7 +767,7 @@ static void __cpuinit decode_configs(struct cpuinfo_mips *c)
 
        /* MIPS32 or MIPS64 compliant CPU.  */
        c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER |
-                    MIPS_CPU_DIVEC | MIPS_CPU_LLSC | MIPS_CPU_MCHECK;
+                    MIPS_CPU_DIVEC | MIPS_CPU_LLSC | MIPS_CPU_MCHECK;
 
        c->scache.flags = MIPS_CACHE_NOT_PRESENT;
 
index ab73fa2fb9b5707343c6081b821faf9775b90a2f..f29099b104c497e5cb4b4a3d368a801fc6c60d05 100644 (file)
@@ -1532,7 +1532,8 @@ init_hw_perf_events(void)
                irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
        } else {
 #endif
-               if (cp0_perfcount_irq >= 0)
+               if ((cp0_perfcount_irq >= 0) &&
+                               (cp0_compare_irq != cp0_perfcount_irq))
                        irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
                else
                        irq = -1;
index f8b2c592514de3293e219d92bb59939ec51e8ace..5542817c1b498869cef930f4bd09c7ce680fe130 100644 (file)
@@ -41,27 +41,27 @@ static int show_cpuinfo(struct seq_file *m, void *v)
 
        seq_printf(m, "processor\t\t: %ld\n", n);
        sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n",
-               cpu_data[n].options & MIPS_CPU_FPU ? "  FPU V%d.%d" : "");
+                     cpu_data[n].options & MIPS_CPU_FPU ? "  FPU V%d.%d" : "");
        seq_printf(m, fmt, __cpu_name[n],
-                                  (version >> 4) & 0x0f, version & 0x0f,
-                                  (fp_vers >> 4) & 0x0f, fp_vers & 0x0f);
+                     (version >> 4) & 0x0f, version & 0x0f,
+                     (fp_vers >> 4) & 0x0f, fp_vers & 0x0f);
        seq_printf(m, "BogoMIPS\t\t: %u.%02u\n",
-                     cpu_data[n].udelay_val / (500000/HZ),
-                     (cpu_data[n].udelay_val / (5000/HZ)) % 100);
+                     cpu_data[n].udelay_val / (500000/HZ),
+                     (cpu_data[n].udelay_val / (5000/HZ)) % 100);
        seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no");
        seq_printf(m, "microsecond timers\t: %s\n",
-                     cpu_has_counter ? "yes" : "no");
+                     cpu_has_counter ? "yes" : "no");
        seq_printf(m, "tlb_entries\t\t: %d\n", cpu_data[n].tlbsize);
        seq_printf(m, "extra interrupt vector\t: %s\n",
-                     cpu_has_divec ? "yes" : "no");
+                     cpu_has_divec ? "yes" : "no");
        seq_printf(m, "hardware watchpoint\t: %s",
-                  cpu_has_watch ? "yes, " : "no\n");
+                     cpu_has_watch ? "yes, " : "no\n");
        if (cpu_has_watch) {
                seq_printf(m, "count: %d, address/irw mask: [",
-                          cpu_data[n].watch_reg_count);
+                     cpu_data[n].watch_reg_count);
                for (i = 0; i < cpu_data[n].watch_reg_count; i++)
                        seq_printf(m, "%s0x%04x", i ? ", " : "" ,
-                                  cpu_data[n].watch_reg_masks[i]);
+                               cpu_data[n].watch_reg_masks[i]);
                seq_printf(m, "]\n");
        }
        seq_printf(m, "ASEs implemented\t:%s%s%s%s%s%s\n",
@@ -73,13 +73,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                      cpu_has_mipsmt ? " mt" : ""
                );
        seq_printf(m, "shadow register sets\t: %d\n",
-                      cpu_data[n].srsets);
+                     cpu_data[n].srsets);
        seq_printf(m, "kscratch registers\t: %d\n",
-                  hweight8(cpu_data[n].kscratch_mask));
+                     hweight8(cpu_data[n].kscratch_mask));
        seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
 
        sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
-               cpu_has_vce ? "%u" : "not available");
+                     cpu_has_vce ? "%u" : "not available");
        seq_printf(m, fmt, 'D', vced_count);
        seq_printf(m, fmt, 'I', vcei_count);
        seq_printf(m, "\n");
index 558b5395795df810d01c97ed36efa96fc9141eea..f11b2bbb826d223d10eefffb810c9bcdb67eb017 100644 (file)
@@ -95,3 +95,16 @@ void __init device_tree_init(void)
        /* free the space reserved for the dt blob */
        free_mem_mach(base, size);
 }
+
+void __init __dt_setup_arch(struct boot_param_header *bph)
+{
+       if (be32_to_cpu(bph->magic) != OF_DT_HEADER) {
+               pr_err("DTB has bad magic, ignoring builtin OF DTB\n");
+
+               return;
+       }
+
+       initial_boot_params = bph;
+
+       early_init_devtree(initial_boot_params);
+}
index c504b212f8f3f968ede65e514cbcc96168c7c330..a53f8ec37aac68beef41b905b88cf1fe92e805d9 100644 (file)
@@ -605,6 +605,8 @@ void __init setup_arch(char **cmdline_p)
 
        resource_init();
        plat_smp_setup();
+
+       cpu_cache_init();
 }
 
 unsigned long kernelsp[NR_CPUS];
index 10263b405981f0eb4604f1d38bd3887e10f1c129..9c60d09e62a71521007c3cae0894cdcf09f01424 100644 (file)
@@ -19,8 +19,6 @@
 #  define DEBUGP(fmt, args...)
 #endif
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /*
  * Determine which stack to use..
  */
index 17f6ee30ad0d604da53fca7da7569bf4006d3d35..f2c09cfc60ac338dc9300f3487bae83e48a8cbd1 100644 (file)
@@ -339,7 +339,6 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
        if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
                goto badframe;
 
-       sigdelsetmask(&blocked, ~_BLOCKABLE);
        set_current_blocked(&blocked);
 
        sig = restore_sigcontext(&regs, &frame->sf_sc);
@@ -375,7 +374,6 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
        if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
@@ -514,9 +512,10 @@ struct mips_abi mips_abi = {
        .restart        = __NR_restart_syscall
 };
 
-static int handle_signal(unsigned long sig, siginfo_t *info,
-       struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs)
+static void handle_signal(unsigned long sig, siginfo_t *info,
+       struct k_sigaction *ka, struct pt_regs *regs)
 {
+       sigset_t *oldset = sigmask_to_save();
        int ret;
        struct mips_abi *abi = current->thread.abi;
        void *vdso = current->mm->context.vdso;
@@ -550,17 +549,14 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
                                       ka, regs, sig, oldset);
 
        if (ret)
-               return ret;
-
-       block_sigmask(ka, sig);
+               return;
 
-       return ret;
+       signal_delivered(sig, info, ka, regs, 0);
 }
 
 static void do_signal(struct pt_regs *regs)
 {
        struct k_sigaction ka;
-       sigset_t *oldset;
        siginfo_t info;
        int signr;
 
@@ -572,25 +568,10 @@ static void do_signal(struct pt_regs *regs)
        if (!user_mode(regs))
                return;
 
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
                /* Whee!  Actually deliver the signal.  */
-               if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
-                       /*
-                        * A signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag.
-                        */
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               }
-
+               handle_signal(signr, &info, &ka, regs);
                return;
        }
 
@@ -614,10 +595,7 @@ static void do_signal(struct pt_regs *regs)
         * If there's no signal to deliver, we just put the saved sigmask
         * back
         */
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 }
 
 /*
@@ -630,14 +608,12 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
        local_irq_enable();
 
        /* deal with pending signal delivery */
-       if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
+       if (thread_info_flags & _TIF_SIGPENDING)
                do_signal(regs);
 
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
 
index b4fe2eacbd5d258a55146f5d51154491119569e8..da1b56a39ac77815b4989183fd07821286468dfe 100644 (file)
@@ -465,7 +465,6 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
        if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask))
                goto badframe;
 
-       sigdelsetmask(&blocked, ~_BLOCKABLE);
        set_current_blocked(&blocked);
 
        sig = restore_sigcontext32(&regs, &frame->sf_sc);
@@ -503,7 +502,6 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
        if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        sig = restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext);
index 63ffac9af7c5b61fa9fa966e641889f935b7d747..3574c145511be486b0feb82177ced3c80679d180 100644 (file)
@@ -109,7 +109,6 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
        if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
index 71a95f55a6493be90e4695de4d4c948a0dfa465f..48650c8180401aeb1ce1c3f2713471d9c6e45536 100644 (file)
@@ -106,7 +106,7 @@ asmlinkage __cpuinit void start_secondary(void)
 #endif /* CONFIG_MIPS_MT_SMTC */
        cpu_probe();
        cpu_report();
-       per_cpu_trap_init();
+       per_cpu_trap_init(false);
        mips_clockevent_init();
        mp_ops->init_secondary();
 
index cfdaaa4cffc0a28fa60b662d6eccf992396ebe58..2d0c2a277f525b5b89b89500a5153c9032ed4ce3 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/compiler.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/mm.h>
 #include <linux/sched.h>
 #include <linux/smp.h>
@@ -91,7 +92,7 @@ void (*board_nmi_handler_setup)(void);
 void (*board_ejtag_handler_setup)(void);
 void (*board_bind_eic_interrupt)(int irq, int regset);
 void (*board_ebase_setup)(void);
-
+void __cpuinitdata(*board_cache_error_setup)(void);
 
 static void show_raw_backtrace(unsigned long reg29)
 {
@@ -1490,7 +1491,6 @@ void *set_vi_handler(int n, vi_handler_t addr)
        return set_vi_srs_handler(n, addr, 0);
 }
 
-extern void cpu_cache_init(void);
 extern void tlb_init(void);
 extern void flush_tlb_handlers(void);
 
@@ -1517,7 +1517,7 @@ static int __init ulri_disable(char *s)
 }
 __setup("noulri", ulri_disable);
 
-void __cpuinit per_cpu_trap_init(void)
+void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
 {
        unsigned int cpu = smp_processor_id();
        unsigned int status_set = ST0_CU0;
@@ -1616,7 +1616,9 @@ void __cpuinit per_cpu_trap_init(void)
 #ifdef CONFIG_MIPS_MT_SMTC
        if (bootTC) {
 #endif /* CONFIG_MIPS_MT_SMTC */
-               cpu_cache_init();
+               /* Boot CPU's cache setup in setup_arch(). */
+               if (!is_boot_cpu)
+                       cpu_cache_init();
                tlb_init();
 #ifdef CONFIG_MIPS_MT_SMTC
        } else if (!secondaryTC) {
@@ -1632,7 +1634,7 @@ void __cpuinit per_cpu_trap_init(void)
 }
 
 /* Install CPU exception handler */
-void __init set_handler(unsigned long offset, void *addr, unsigned long size)
+void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size)
 {
        memcpy((void *)(ebase + offset), addr, size);
        local_flush_icache_range(ebase + offset, ebase + offset + size);
@@ -1693,7 +1695,7 @@ void __init trap_init(void)
 
        if (board_ebase_setup)
                board_ebase_setup();
-       per_cpu_trap_init();
+       per_cpu_trap_init(true);
 
        /*
         * Copy the generic exception handlers to their final destination.
@@ -1797,6 +1799,9 @@ void __init trap_init(void)
 
        set_except_vector(26, handle_dsp);
 
+       if (board_cache_error_setup)
+               board_cache_error_setup();
+
        if (cpu_has_vce)
                /* Special exception: R4[04]00 uses also the divec space. */
                memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100);
index 3fccf2104513b7bbc1368313106ad1533a345a78..20bdf40b3efa7be98d3d1bac47ad30dec134452b 100644 (file)
@@ -16,8 +16,22 @@ config SOC_XWAY
        bool "XWAY"
        select SOC_TYPE_XWAY
        select HW_HAS_PCI
+
+config SOC_FALCON
+       bool "FALCON"
+
+endchoice
+
+choice
+       prompt "Devicetree"
+
+config DT_EASY50712
+       bool "Easy50712"
+       depends on SOC_XWAY
 endchoice
 
-source "arch/mips/lantiq/xway/Kconfig"
+config PCI_LANTIQ
+       bool "PCI Support"
+       depends on SOC_XWAY && PCI
 
 endif
index e5dae0e24b00f8319d09dd77e0461042a329b20f..d6bdc579419fb2bbc7bfc0e7390a321cb404c995 100644 (file)
@@ -4,8 +4,11 @@
 # under the terms of the GNU General Public License version 2 as published
 # by the Free Software Foundation.
 
-obj-y := irq.o setup.o clk.o prom.o devices.o
+obj-y := irq.o clk.o prom.o
+
+obj-y += dts/
 
 obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
 
 obj-$(CONFIG_SOC_TYPE_XWAY) += xway/
+obj-$(CONFIG_SOC_FALCON) += falcon/
index f3dff05722de63f02c30f6a67df56eb5304fd69a..b3ec49838fd7526f716c393cff823217b41fe099 100644 (file)
@@ -6,3 +6,4 @@ platform-$(CONFIG_LANTIQ)       += lantiq/
 cflags-$(CONFIG_LANTIQ)                += -I$(srctree)/arch/mips/include/asm/mach-lantiq
 load-$(CONFIG_LANTIQ)          = 0xffffffff80002000
 cflags-$(CONFIG_SOC_TYPE_XWAY) += -I$(srctree)/arch/mips/include/asm/mach-lantiq/xway
+cflags-$(CONFIG_SOC_FALCON)    += -I$(srctree)/arch/mips/include/asm/mach-lantiq/falcon
index 412814fdd3ee239ce5f7cfc6e6c7f6ead93ed9fa..d3bcc33f4699ae7dcf8036bf70f0a6be19293d70 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/clk.h>
+#include <linux/clkdev.h>
 #include <linux/err.h>
 #include <linux/list.h>
 
 #include <lantiq_soc.h>
 
 #include "clk.h"
+#include "prom.h"
 
-struct clk {
-       const char *name;
-       unsigned long rate;
-       unsigned long (*get_rate) (void);
-};
+/* lantiq socs have 3 static clocks */
+static struct clk cpu_clk_generic[3];
 
-static struct clk *cpu_clk;
-static int cpu_clk_cnt;
+void clkdev_add_static(unsigned long cpu, unsigned long fpi, unsigned long io)
+{
+       cpu_clk_generic[0].rate = cpu;
+       cpu_clk_generic[1].rate = fpi;
+       cpu_clk_generic[2].rate = io;
+}
 
-/* lantiq socs have 3 static clocks */
-static struct clk cpu_clk_generic[] = {
-       {
-               .name = "cpu",
-               .get_rate = ltq_get_cpu_hz,
-       }, {
-               .name = "fpi",
-               .get_rate = ltq_get_fpi_hz,
-       }, {
-               .name = "io",
-               .get_rate = ltq_get_io_region_clock,
-       },
-};
-
-static struct resource ltq_cgu_resource = {
-       .name   = "cgu",
-       .start  = LTQ_CGU_BASE_ADDR,
-       .end    = LTQ_CGU_BASE_ADDR + LTQ_CGU_SIZE - 1,
-       .flags  = IORESOURCE_MEM,
-};
-
-/* remapped clock register range */
-void __iomem *ltq_cgu_membase;
-
-void clk_init(void)
+struct clk *clk_get_cpu(void)
+{
+       return &cpu_clk_generic[0];
+}
+
+struct clk *clk_get_fpi(void)
+{
+       return &cpu_clk_generic[1];
+}
+EXPORT_SYMBOL_GPL(clk_get_fpi);
+
+struct clk *clk_get_io(void)
 {
-       cpu_clk = cpu_clk_generic;
-       cpu_clk_cnt = ARRAY_SIZE(cpu_clk_generic);
+       return &cpu_clk_generic[2];
 }
 
 static inline int clk_good(struct clk *clk)
@@ -82,38 +71,71 @@ unsigned long clk_get_rate(struct clk *clk)
 }
 EXPORT_SYMBOL(clk_get_rate);
 
-struct clk *clk_get(struct device *dev, const char *id)
+int clk_set_rate(struct clk *clk, unsigned long rate)
 {
-       int i;
-
-       for (i = 0; i < cpu_clk_cnt; i++)
-               if (!strcmp(id, cpu_clk[i].name))
-                       return &cpu_clk[i];
-       BUG();
-       return ERR_PTR(-ENOENT);
-}
-EXPORT_SYMBOL(clk_get);
-
-void clk_put(struct clk *clk)
-{
-       /* not used */
+       if (unlikely(!clk_good(clk)))
+               return 0;
+       if (clk->rates && *clk->rates) {
+               unsigned long *r = clk->rates;
+
+               while (*r && (*r != rate))
+                       r++;
+               if (!*r) {
+                       pr_err("clk %s.%s: trying to set invalid rate %ld\n",
+                               clk->cl.dev_id, clk->cl.con_id, rate);
+                       return -1;
+               }
+       }
+       clk->rate = rate;
+       return 0;
 }
-EXPORT_SYMBOL(clk_put);
+EXPORT_SYMBOL(clk_set_rate);
 
 int clk_enable(struct clk *clk)
 {
-       /* not used */
-       return 0;
+       if (unlikely(!clk_good(clk)))
+               return -1;
+
+       if (clk->enable)
+               return clk->enable(clk);
+
+       return -1;
 }
 EXPORT_SYMBOL(clk_enable);
 
 void clk_disable(struct clk *clk)
 {
-       /* not used */
+       if (unlikely(!clk_good(clk)))
+               return;
+
+       if (clk->disable)
+               clk->disable(clk);
 }
 EXPORT_SYMBOL(clk_disable);
 
-static inline u32 ltq_get_counter_resolution(void)
+int clk_activate(struct clk *clk)
+{
+       if (unlikely(!clk_good(clk)))
+               return -1;
+
+       if (clk->activate)
+               return clk->activate(clk);
+
+       return -1;
+}
+EXPORT_SYMBOL(clk_activate);
+
+void clk_deactivate(struct clk *clk)
+{
+       if (unlikely(!clk_good(clk)))
+               return;
+
+       if (clk->deactivate)
+               clk->deactivate(clk);
+}
+EXPORT_SYMBOL(clk_deactivate);
+
+static inline u32 get_counter_resolution(void)
 {
        u32 res;
 
@@ -133,21 +155,11 @@ void __init plat_time_init(void)
 {
        struct clk *clk;
 
-       if (insert_resource(&iomem_resource, &ltq_cgu_resource) < 0)
-               panic("Failed to insert cgu memory");
+       ltq_soc_init();
 
-       if (request_mem_region(ltq_cgu_resource.start,
-                       resource_size(&ltq_cgu_resource), "cgu") < 0)
-               panic("Failed to request cgu memory");
-
-       ltq_cgu_membase = ioremap_nocache(ltq_cgu_resource.start,
-                               resource_size(&ltq_cgu_resource));
-       if (!ltq_cgu_membase) {
-               pr_err("Failed to remap cgu memory\n");
-               unreachable();
-       }
-       clk = clk_get(0, "cpu");
-       mips_hpt_frequency = clk_get_rate(clk) / ltq_get_counter_resolution();
+       clk = clk_get_cpu();
+       mips_hpt_frequency = clk_get_rate(clk) / get_counter_resolution();
        write_c0_compare(read_c0_count());
+       pr_info("CPU Clock: %ldMHz\n", clk_get_rate(clk) / 1000000);
        clk_put(clk);
 }
index 3328925f2c3f260ad3c887bb66d50f2d084d12ee..fa670602b91b509713f2a58e93647d903e26eb81 100644 (file)
@@ -9,10 +9,70 @@
 #ifndef _LTQ_CLK_H__
 #define _LTQ_CLK_H__
 
-extern void clk_init(void);
+#include <linux/clkdev.h>
 
-extern unsigned long ltq_get_cpu_hz(void);
-extern unsigned long ltq_get_fpi_hz(void);
-extern unsigned long ltq_get_io_region_clock(void);
+/* clock speeds */
+#define CLOCK_33M      33333333
+#define CLOCK_60M      60000000
+#define CLOCK_62_5M    62500000
+#define CLOCK_83M      83333333
+#define CLOCK_83_5M    83500000
+#define CLOCK_98_304M  98304000
+#define CLOCK_100M     100000000
+#define CLOCK_111M     111111111
+#define CLOCK_125M     125000000
+#define CLOCK_133M     133333333
+#define CLOCK_150M     150000000
+#define CLOCK_166M     166666666
+#define CLOCK_167M     166666667
+#define CLOCK_196_608M 196608000
+#define CLOCK_200M     200000000
+#define CLOCK_250M     250000000
+#define CLOCK_266M     266666666
+#define CLOCK_300M     300000000
+#define CLOCK_333M     333333333
+#define CLOCK_393M     393215332
+#define CLOCK_400M     400000000
+#define CLOCK_500M     500000000
+#define CLOCK_600M     600000000
+
+/* clock out speeds */
+#define CLOCK_32_768K  32768
+#define CLOCK_1_536M   1536000
+#define CLOCK_2_5M     2500000
+#define CLOCK_12M      12000000
+#define CLOCK_24M      24000000
+#define CLOCK_25M      25000000
+#define CLOCK_30M      30000000
+#define CLOCK_40M      40000000
+#define CLOCK_48M      48000000
+#define CLOCK_50M      50000000
+#define CLOCK_60M      60000000
+
+struct clk {
+       struct clk_lookup cl;
+       unsigned long rate;
+       unsigned long *rates;
+       unsigned int module;
+       unsigned int bits;
+       unsigned long (*get_rate) (void);
+       int (*enable) (struct clk *clk);
+       void (*disable) (struct clk *clk);
+       int (*activate) (struct clk *clk);
+       void (*deactivate) (struct clk *clk);
+       void (*reboot) (struct clk *clk);
+};
+
+extern void clkdev_add_static(unsigned long cpu, unsigned long fpi,
+                               unsigned long io);
+
+extern unsigned long ltq_danube_cpu_hz(void);
+extern unsigned long ltq_danube_fpi_hz(void);
+
+extern unsigned long ltq_ar9_cpu_hz(void);
+extern unsigned long ltq_ar9_fpi_hz(void);
+
+extern unsigned long ltq_vr9_cpu_hz(void);
+extern unsigned long ltq_vr9_fpi_hz(void);
 
 #endif
diff --git a/arch/mips/lantiq/devices.c b/arch/mips/lantiq/devices.c
deleted file mode 100644 (file)
index de1cb2b..0000000
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/reboot.h>
-#include <linux/platform_device.h>
-#include <linux/leds.h>
-#include <linux/etherdevice.h>
-#include <linux/time.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-
-#include <lantiq_soc.h>
-
-#include "devices.h"
-
-/* nor flash */
-static struct resource ltq_nor_resource = {
-       .name   = "nor",
-       .start  = LTQ_FLASH_START,
-       .end    = LTQ_FLASH_START + LTQ_FLASH_MAX - 1,
-       .flags  = IORESOURCE_MEM,
-};
-
-static struct platform_device ltq_nor = {
-       .name           = "ltq_nor",
-       .resource       = &ltq_nor_resource,
-       .num_resources  = 1,
-};
-
-void __init ltq_register_nor(struct physmap_flash_data *data)
-{
-       ltq_nor.dev.platform_data = data;
-       platform_device_register(&ltq_nor);
-}
-
-/* watchdog */
-static struct resource ltq_wdt_resource = {
-       .name   = "watchdog",
-       .start  = LTQ_WDT_BASE_ADDR,
-       .end    = LTQ_WDT_BASE_ADDR + LTQ_WDT_SIZE - 1,
-       .flags  = IORESOURCE_MEM,
-};
-
-void __init ltq_register_wdt(void)
-{
-       platform_device_register_simple("ltq_wdt", 0, &ltq_wdt_resource, 1);
-}
-
-/* asc ports */
-static struct resource ltq_asc0_resources[] = {
-       {
-               .name   = "asc0",
-               .start  = LTQ_ASC0_BASE_ADDR,
-               .end    = LTQ_ASC0_BASE_ADDR + LTQ_ASC_SIZE - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       IRQ_RES(tx, LTQ_ASC_TIR(0)),
-       IRQ_RES(rx, LTQ_ASC_RIR(0)),
-       IRQ_RES(err, LTQ_ASC_EIR(0)),
-};
-
-static struct resource ltq_asc1_resources[] = {
-       {
-               .name   = "asc1",
-               .start  = LTQ_ASC1_BASE_ADDR,
-               .end    = LTQ_ASC1_BASE_ADDR + LTQ_ASC_SIZE - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       IRQ_RES(tx, LTQ_ASC_TIR(1)),
-       IRQ_RES(rx, LTQ_ASC_RIR(1)),
-       IRQ_RES(err, LTQ_ASC_EIR(1)),
-};
-
-void __init ltq_register_asc(int port)
-{
-       switch (port) {
-       case 0:
-               platform_device_register_simple("ltq_asc", 0,
-                       ltq_asc0_resources, ARRAY_SIZE(ltq_asc0_resources));
-               break;
-       case 1:
-               platform_device_register_simple("ltq_asc", 1,
-                       ltq_asc1_resources, ARRAY_SIZE(ltq_asc1_resources));
-               break;
-       default:
-               break;
-       }
-}
-
-#ifdef CONFIG_PCI
-/* pci */
-static struct platform_device ltq_pci = {
-       .name           = "ltq_pci",
-       .num_resources  = 0,
-};
-
-void __init ltq_register_pci(struct ltq_pci_data *data)
-{
-       ltq_pci.dev.platform_data = data;
-       platform_device_register(&ltq_pci);
-}
-#else
-void __init ltq_register_pci(struct ltq_pci_data *data)
-{
-       pr_err("kernel is compiled without PCI support\n");
-}
-#endif
diff --git a/arch/mips/lantiq/devices.h b/arch/mips/lantiq/devices.h
deleted file mode 100644 (file)
index 2947bb1..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#ifndef _LTQ_DEVICES_H__
-#define _LTQ_DEVICES_H__
-
-#include <lantiq_platform.h>
-#include <linux/mtd/physmap.h>
-
-#define IRQ_RES(resname, irq) \
-       {.name = #resname, .start = (irq), .flags = IORESOURCE_IRQ}
-
-extern void ltq_register_nor(struct physmap_flash_data *data);
-extern void ltq_register_wdt(void);
-extern void ltq_register_asc(int port);
-extern void ltq_register_pci(struct ltq_pci_data *data);
-
-#endif
diff --git a/arch/mips/lantiq/dts/Makefile b/arch/mips/lantiq/dts/Makefile
new file mode 100644 (file)
index 0000000..674fca4
--- /dev/null
@@ -0,0 +1,4 @@
+obj-$(CONFIG_DT_EASY50712) := easy50712.dtb.o
+
+$(obj)/%.dtb: $(obj)/%.dts
+       $(call if_changed,dtc)
diff --git a/arch/mips/lantiq/dts/danube.dtsi b/arch/mips/lantiq/dts/danube.dtsi
new file mode 100644 (file)
index 0000000..3a4520f
--- /dev/null
@@ -0,0 +1,105 @@
+/ {
+       #address-cells = <1>;
+       #size-cells = <1>;
+       compatible = "lantiq,xway", "lantiq,danube";
+
+       cpus {
+               cpu@0 {
+                       compatible = "mips,mips24Kc";
+               };
+       };
+
+       biu@1F800000 {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               compatible = "lantiq,biu", "simple-bus";
+               reg = <0x1F800000 0x800000>;
+               ranges = <0x0 0x1F800000 0x7FFFFF>;
+
+               icu0: icu@80200 {
+                       #interrupt-cells = <1>;
+                       interrupt-controller;
+                       compatible = "lantiq,icu";
+                       reg = <0x80200 0x120>;
+               };
+
+               watchdog@803F0 {
+                       compatible = "lantiq,wdt";
+                       reg = <0x803F0 0x10>;
+               };
+       };
+
+       sram@1F000000 {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               compatible = "lantiq,sram";
+               reg = <0x1F000000 0x800000>;
+               ranges = <0x0 0x1F000000 0x7FFFFF>;
+
+               eiu0: eiu@101000 {
+                       #interrupt-cells = <1>;
+                       interrupt-controller;
+                       interrupt-parent;
+                       compatible = "lantiq,eiu-xway";
+                       reg = <0x101000 0x1000>;
+               };
+
+               pmu0: pmu@102000 {
+                       compatible = "lantiq,pmu-xway";
+                       reg = <0x102000 0x1000>;
+               };
+
+               cgu0: cgu@103000 {
+                       compatible = "lantiq,cgu-xway";
+                       reg = <0x103000 0x1000>;
+                       #clock-cells = <1>;
+               };
+
+               rcu0: rcu@203000 {
+                       compatible = "lantiq,rcu-xway";
+                       reg = <0x203000 0x1000>;
+               };
+       };
+
+       fpi@10000000 {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               compatible = "lantiq,fpi", "simple-bus";
+               ranges = <0x0 0x10000000 0xEEFFFFF>;
+               reg = <0x10000000 0xEF00000>;
+
+               gptu@E100A00 {
+                       compatible = "lantiq,gptu-xway";
+                       reg = <0xE100A00 0x100>;
+               };
+
+               serial@E100C00 {
+                       compatible = "lantiq,asc";
+                       reg = <0xE100C00 0x400>;
+                       interrupt-parent = <&icu0>;
+                       interrupts = <112 113 114>;
+               };
+
+               dma0: dma@E104100 {
+                       compatible = "lantiq,dma-xway";
+                       reg = <0xE104100 0x800>;
+               };
+
+               ebu0: ebu@E105300 {
+                       compatible = "lantiq,ebu-xway";
+                       reg = <0xE105300 0x100>;
+               };
+
+               pci0: pci@E105400 {
+                       #address-cells = <3>;
+                       #size-cells = <2>;
+                       #interrupt-cells = <1>;
+                       compatible = "lantiq,pci-xway";
+                       bus-range = <0x0 0x0>;
+                       ranges = <0x2000000 0 0x8000000 0x8000000 0 0x2000000   /* pci memory */
+                                 0x1000000 0 0x00000000 0xAE00000 0 0x200000>; /* io space */
+                       reg = <0x7000000 0x8000         /* config space */
+                               0xE105400 0x400>;       /* pci bridge */
+               };
+       };
+};
diff --git a/arch/mips/lantiq/dts/easy50712.dts b/arch/mips/lantiq/dts/easy50712.dts
new file mode 100644 (file)
index 0000000..68c1731
--- /dev/null
@@ -0,0 +1,113 @@
+/dts-v1/;
+
+/include/ "danube.dtsi"
+
+/ {
+       chosen {
+               bootargs = "console=ttyLTQ0,115200 init=/etc/preinit";
+       };
+
+       memory@0 {
+               reg = <0x0 0x2000000>;
+       };
+
+       fpi@10000000 {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               localbus@0 {
+                       #address-cells = <2>;
+                       #size-cells = <1>;
+                       ranges = <0 0 0x0 0x3ffffff /* addrsel0 */
+                               1 0 0x4000000 0x4000010>; /* addsel1 */
+                       compatible = "lantiq,localbus", "simple-bus";
+
+                       nor-boot@0 {
+                               compatible = "lantiq,nor";
+                               bank-width = <2>;
+                               reg = <0 0x0 0x2000000>;
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+
+                               partition@0 {
+                                       label = "uboot";
+                                       reg = <0x00000 0x10000>; /* 64 KB */
+                               };
+
+                               partition@10000 {
+                                       label = "uboot_env";
+                                       reg = <0x10000 0x10000>; /* 64 KB */
+                               };
+
+                               partition@20000 {
+                                       label = "linux";
+                                       reg = <0x20000 0x3d0000>;
+                               };
+
+                               partition@400000 {
+                                       label = "rootfs";
+                                       reg = <0x400000 0x400000>;
+                               };
+                       };
+               };
+
+               gpio: pinmux@E100B10 {
+                       compatible = "lantiq,pinctrl-xway";
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&state_default>;
+
+                       #gpio-cells = <2>;
+                       gpio-controller;
+                       reg = <0xE100B10 0xA0>;
+
+                       state_default: pinmux {
+                               stp {
+                                       lantiq,groups = "stp";
+                                       lantiq,function = "stp";
+                               };
+                               exin {
+                                       lantiq,groups = "exin1";
+                                       lantiq,function = "exin";
+                               };
+                               pci {
+                                       lantiq,groups = "gnt1";
+                                       lantiq,function = "pci";
+                               };
+                               conf_out {
+                                       lantiq,pins = "io4", "io5", "io6"; /* stp */
+                                       lantiq,open-drain;
+                                       lantiq,pull = <0>;
+                               };
+                       };
+               };
+
+               etop@E180000 {
+                       compatible = "lantiq,etop-xway";
+                       reg = <0xE180000 0x40000>;
+                       interrupt-parent = <&icu0>;
+                       interrupts = <73 78>;
+                       phy-mode = "rmii";
+                       mac-address = [ 00 11 22 33 44 55 ];
+               };
+
+               stp0: stp@E100BB0 {
+                       #gpio-cells = <2>;
+                       compatible = "lantiq,gpio-stp-xway";
+                       gpio-controller;
+                       reg = <0xE100BB0 0x40>;
+
+                       lantiq,shadow = <0xfff>;
+                       lantiq,groups = <0x3>;
+               };
+
+               pci@E105400 {
+                       lantiq,bus-clock = <33333333>;
+                       interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+                       interrupt-map = <
+                                0x7000 0 0 1 &icu0 29 1 // slot 14, irq 29
+                       >;
+                       gpios-reset = <&gpio 21 0>;
+                       req-mask = <0x1>;               /* GNT1 */
+               };
+
+       };
+};
index 972e05f8763193f903edfa64c3135d05beaedf33..9b28d0940ef4c9c444ae811f3ba55d6aa01ea45b 100644 (file)
@@ -6,17 +6,16 @@
  *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
  */
 
-#include <linux/init.h>
 #include <linux/cpu.h>
-
-#include <lantiq.h>
 #include <lantiq_soc.h>
 
-/* no ioremap possible at this early stage, lets use KSEG1 instead  */
-#define LTQ_ASC_BASE   KSEG1ADDR(LTQ_ASC1_BASE_ADDR)
 #define ASC_BUF                1024
-#define LTQ_ASC_FSTAT  ((u32 *)(LTQ_ASC_BASE + 0x0048))
-#define LTQ_ASC_TBUF   ((u32 *)(LTQ_ASC_BASE + 0x0020))
+#define LTQ_ASC_FSTAT  ((u32 *)(LTQ_EARLY_ASC + 0x0048))
+#ifdef __BIG_ENDIAN
+#define LTQ_ASC_TBUF   ((u32 *)(LTQ_EARLY_ASC + 0x0020 + 3))
+#else
+#define LTQ_ASC_TBUF   ((u32 *)(LTQ_EARLY_ASC + 0x0020))
+#endif
 #define TXMASK         0x3F00
 #define TXOFFSET       8
 
@@ -27,7 +26,7 @@ void prom_putchar(char c)
        local_irq_save(flags);
        do { } while ((ltq_r32(LTQ_ASC_FSTAT) & TXMASK) >> TXOFFSET);
        if (c == '\n')
-               ltq_w32('\r', LTQ_ASC_TBUF);
-       ltq_w32(c, LTQ_ASC_TBUF);
+               ltq_w8('\r', LTQ_ASC_TBUF);
+       ltq_w8(c, LTQ_ASC_TBUF);
        local_irq_restore(flags);
 }
diff --git a/arch/mips/lantiq/falcon/Makefile b/arch/mips/lantiq/falcon/Makefile
new file mode 100644 (file)
index 0000000..ff220f9
--- /dev/null
@@ -0,0 +1 @@
+obj-y := prom.o reset.o sysctrl.o
diff --git a/arch/mips/lantiq/falcon/prom.c b/arch/mips/lantiq/falcon/prom.c
new file mode 100644 (file)
index 0000000..c1d278f
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 Thomas Langer <thomas.langer@lantiq.com>
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <asm/io.h>
+
+#include <lantiq_soc.h>
+
+#include "../prom.h"
+
+#define SOC_FALCON     "Falcon"
+#define SOC_FALCON_D   "Falcon-D"
+#define SOC_FALCON_V   "Falcon-V"
+#define SOC_FALCON_M   "Falcon-M"
+
+#define COMP_FALCON    "lantiq,falcon"
+
+#define PART_SHIFT     12
+#define PART_MASK      0x0FFFF000
+#define REV_SHIFT      28
+#define REV_MASK       0xF0000000
+#define SREV_SHIFT     22
+#define SREV_MASK      0x03C00000
+#define TYPE_SHIFT     26
+#define TYPE_MASK      0x3C000000
+
+/* reset, nmi and ejtag exception vectors */
+#define BOOT_REG_BASE  (KSEG1 | 0x1F200000)
+#define BOOT_RVEC      (BOOT_REG_BASE | 0x00)
+#define BOOT_NVEC      (BOOT_REG_BASE | 0x04)
+#define BOOT_EVEC      (BOOT_REG_BASE | 0x08)
+
+void __init ltq_soc_nmi_setup(void)
+{
+       extern void (*nmi_handler)(void);
+
+       ltq_w32((unsigned long)&nmi_handler, (void *)BOOT_NVEC);
+}
+
+void __init ltq_soc_ejtag_setup(void)
+{
+       extern void (*ejtag_debug_handler)(void);
+
+       ltq_w32((unsigned long)&ejtag_debug_handler, (void *)BOOT_EVEC);
+}
+
+void __init ltq_soc_detect(struct ltq_soc_info *i)
+{
+       u32 type;
+       i->partnum = (ltq_r32(FALCON_CHIPID) & PART_MASK) >> PART_SHIFT;
+       i->rev = (ltq_r32(FALCON_CHIPID) & REV_MASK) >> REV_SHIFT;
+       i->srev = ((ltq_r32(FALCON_CHIPCONF) & SREV_MASK) >> SREV_SHIFT);
+       i->compatible = COMP_FALCON;
+       i->type = SOC_TYPE_FALCON;
+       sprintf(i->rev_type, "%c%d%d", (i->srev & 0x4) ? ('B') : ('A'),
+               i->rev & 0x7, (i->srev & 0x3) + 1);
+
+       switch (i->partnum) {
+       case SOC_ID_FALCON:
+               type = (ltq_r32(FALCON_CHIPTYPE) & TYPE_MASK) >> TYPE_SHIFT;
+               switch (type) {
+               case 0:
+                       i->name = SOC_FALCON_D;
+                       break;
+               case 1:
+                       i->name = SOC_FALCON_V;
+                       break;
+               case 2:
+                       i->name = SOC_FALCON_M;
+                       break;
+               default:
+                       i->name = SOC_FALCON;
+                       break;
+               }
+               break;
+
+       default:
+               unreachable();
+               break;
+       }
+}
diff --git a/arch/mips/lantiq/falcon/reset.c b/arch/mips/lantiq/falcon/reset.c
new file mode 100644 (file)
index 0000000..5682482
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 Thomas Langer <thomas.langer@lantiq.com>
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/pm.h>
+#include <asm/reboot.h>
+#include <linux/export.h>
+
+#include <lantiq_soc.h>
+
+/* CPU0 Reset Source Register */
+#define SYS1_CPU0RS            0x0040
+/* reset cause mask */
+#define CPU0RS_MASK            0x0003
+/* CPU0 Boot Mode Register */
+#define SYS1_BM                        0x00a0
+/* boot mode mask */
+#define BM_MASK                        0x0005
+
+/* allow platform code to find out what surce we booted from */
+unsigned char ltq_boot_select(void)
+{
+       return ltq_sys1_r32(SYS1_BM) & BM_MASK;
+}
+
+/* allow the watchdog driver to find out what the boot reason was */
+int ltq_reset_cause(void)
+{
+       return ltq_sys1_r32(SYS1_CPU0RS) & CPU0RS_MASK;
+}
+EXPORT_SYMBOL_GPL(ltq_reset_cause);
+
+#define BOOT_REG_BASE  (KSEG1 | 0x1F200000)
+#define BOOT_PW1_REG   (BOOT_REG_BASE | 0x20)
+#define BOOT_PW2_REG   (BOOT_REG_BASE | 0x24)
+#define BOOT_PW1       0x4C545100
+#define BOOT_PW2       0x0051544C
+
+#define WDT_REG_BASE   (KSEG1 | 0x1F8803F0)
+#define WDT_PW1                0x00BE0000
+#define WDT_PW2                0x00DC0000
+
+static void machine_restart(char *command)
+{
+       local_irq_disable();
+
+       /* reboot magic */
+       ltq_w32(BOOT_PW1, (void *)BOOT_PW1_REG); /* 'LTQ\0' */
+       ltq_w32(BOOT_PW2, (void *)BOOT_PW2_REG); /* '\0QTL' */
+       ltq_w32(0, (void *)BOOT_REG_BASE); /* reset Bootreg RVEC */
+
+       /* watchdog magic */
+       ltq_w32(WDT_PW1, (void *)WDT_REG_BASE);
+       ltq_w32(WDT_PW2 |
+               (0x3 << 26) | /* PWL */
+               (0x2 << 24) | /* CLKDIV */
+               (0x1 << 31) | /* enable */
+               (1), /* reload */
+               (void *)WDT_REG_BASE);
+       unreachable();
+}
+
+static void machine_halt(void)
+{
+       local_irq_disable();
+       unreachable();
+}
+
+static void machine_power_off(void)
+{
+       local_irq_disable();
+       unreachable();
+}
+
+static int __init mips_reboot_setup(void)
+{
+       _machine_restart = machine_restart;
+       _machine_halt = machine_halt;
+       pm_power_off = machine_power_off;
+       return 0;
+}
+
+arch_initcall(mips_reboot_setup);
diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c
new file mode 100644 (file)
index 0000000..ba0123d
--- /dev/null
@@ -0,0 +1,260 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2011 Thomas Langer <thomas.langer@lantiq.com>
+ * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/ioport.h>
+#include <linux/export.h>
+#include <linux/clkdev.h>
+#include <linux/of_address.h>
+#include <asm/delay.h>
+
+#include <lantiq_soc.h>
+
+#include "../clk.h"
+
+/* infrastructure control register */
+#define SYS1_INFRAC            0x00bc
+/* Configuration fuses for drivers and pll */
+#define STATUS_CONFIG          0x0040
+
+/* GPE frequency selection */
+#define GPPC_OFFSET            24
+#define GPEFREQ_MASK           0x00000C0
+#define GPEFREQ_OFFSET         10
+/* Clock status register */
+#define SYSCTL_CLKS            0x0000
+/* Clock enable register */
+#define SYSCTL_CLKEN           0x0004
+/* Clock clear register */
+#define SYSCTL_CLKCLR          0x0008
+/* Activation Status Register */
+#define SYSCTL_ACTS            0x0020
+/* Activation Register */
+#define SYSCTL_ACT             0x0024
+/* Deactivation Register */
+#define SYSCTL_DEACT           0x0028
+/* reboot Register */
+#define SYSCTL_RBT             0x002c
+/* CPU0 Clock Control Register */
+#define SYS1_CPU0CC            0x0040
+/* HRST_OUT_N Control Register */
+#define SYS1_HRSTOUTC          0x00c0
+/* clock divider bit */
+#define CPU0CC_CPUDIV          0x0001
+
+/* Activation Status Register */
+#define ACTS_ASC1_ACT  0x00000800
+#define ACTS_I2C_ACT   0x00004000
+#define ACTS_P0                0x00010000
+#define ACTS_P1                0x00010000
+#define ACTS_P2                0x00020000
+#define ACTS_P3                0x00020000
+#define ACTS_P4                0x00040000
+#define ACTS_PADCTRL0  0x00100000
+#define ACTS_PADCTRL1  0x00100000
+#define ACTS_PADCTRL2  0x00200000
+#define ACTS_PADCTRL3  0x00200000
+#define ACTS_PADCTRL4  0x00400000
+
+#define sysctl_w32(m, x, y)    ltq_w32((x), sysctl_membase[m] + (y))
+#define sysctl_r32(m, x)       ltq_r32(sysctl_membase[m] + (x))
+#define sysctl_w32_mask(m, clear, set, reg)    \
+               sysctl_w32(m, (sysctl_r32(m, reg) & ~(clear)) | (set), reg)
+
+#define status_w32(x, y)       ltq_w32((x), status_membase + (y))
+#define status_r32(x)          ltq_r32(status_membase + (x))
+
+static void __iomem *sysctl_membase[3], *status_membase;
+void __iomem *ltq_sys1_membase, *ltq_ebu_membase;
+
+void falcon_trigger_hrst(int level)
+{
+       sysctl_w32(SYSCTL_SYS1, level & 1, SYS1_HRSTOUTC);
+}
+
+static inline void sysctl_wait(struct clk *clk,
+               unsigned int test, unsigned int reg)
+{
+       int err = 1000000;
+
+       do {} while (--err && ((sysctl_r32(clk->module, reg)
+                                       & clk->bits) != test));
+       if (!err)
+               pr_err("module de/activation failed %d %08X %08X %08X\n",
+                       clk->module, clk->bits, test,
+                       sysctl_r32(clk->module, reg) & clk->bits);
+}
+
+static int sysctl_activate(struct clk *clk)
+{
+       sysctl_w32(clk->module, clk->bits, SYSCTL_CLKEN);
+       sysctl_w32(clk->module, clk->bits, SYSCTL_ACT);
+       sysctl_wait(clk, clk->bits, SYSCTL_ACTS);
+       return 0;
+}
+
+static void sysctl_deactivate(struct clk *clk)
+{
+       sysctl_w32(clk->module, clk->bits, SYSCTL_CLKCLR);
+       sysctl_w32(clk->module, clk->bits, SYSCTL_DEACT);
+       sysctl_wait(clk, 0, SYSCTL_ACTS);
+}
+
+static int sysctl_clken(struct clk *clk)
+{
+       sysctl_w32(clk->module, clk->bits, SYSCTL_CLKEN);
+       sysctl_wait(clk, clk->bits, SYSCTL_CLKS);
+       return 0;
+}
+
+static void sysctl_clkdis(struct clk *clk)
+{
+       sysctl_w32(clk->module, clk->bits, SYSCTL_CLKCLR);
+       sysctl_wait(clk, 0, SYSCTL_CLKS);
+}
+
+static void sysctl_reboot(struct clk *clk)
+{
+       unsigned int act;
+       unsigned int bits;
+
+       act = sysctl_r32(clk->module, SYSCTL_ACT);
+       bits = ~act & clk->bits;
+       if (bits != 0) {
+               sysctl_w32(clk->module, bits, SYSCTL_CLKEN);
+               sysctl_w32(clk->module, bits, SYSCTL_ACT);
+               sysctl_wait(clk, bits, SYSCTL_ACTS);
+       }
+       sysctl_w32(clk->module, act & clk->bits, SYSCTL_RBT);
+       sysctl_wait(clk, clk->bits, SYSCTL_ACTS);
+}
+
+/* enable the ONU core */
+static void falcon_gpe_enable(void)
+{
+       unsigned int freq;
+       unsigned int status;
+
+       /* if if the clock is already enabled */
+       status = sysctl_r32(SYSCTL_SYS1, SYS1_INFRAC);
+       if (status & (1 << (GPPC_OFFSET + 1)))
+               return;
+
+       if (status_r32(STATUS_CONFIG) == 0)
+               freq = 1; /* use 625MHz on unfused chip */
+       else
+               freq = (status_r32(STATUS_CONFIG) &
+                       GPEFREQ_MASK) >>
+                       GPEFREQ_OFFSET;
+
+       /* apply new frequency */
+       sysctl_w32_mask(SYSCTL_SYS1, 7 << (GPPC_OFFSET + 1),
+               freq << (GPPC_OFFSET + 2) , SYS1_INFRAC);
+       udelay(1);
+
+       /* enable new frequency */
+       sysctl_w32_mask(SYSCTL_SYS1, 0, 1 << (GPPC_OFFSET + 1), SYS1_INFRAC);
+       udelay(1);
+}
+
+static inline void clkdev_add_sys(const char *dev, unsigned int module,
+                                       unsigned int bits)
+{
+       struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+       clk->cl.dev_id = dev;
+       clk->cl.con_id = NULL;
+       clk->cl.clk = clk;
+       clk->module = module;
+       clk->activate = sysctl_activate;
+       clk->deactivate = sysctl_deactivate;
+       clk->enable = sysctl_clken;
+       clk->disable = sysctl_clkdis;
+       clk->reboot = sysctl_reboot;
+       clkdev_add(&clk->cl);
+}
+
+void __init ltq_soc_init(void)
+{
+       struct device_node *np_status =
+               of_find_compatible_node(NULL, NULL, "lantiq,status-falcon");
+       struct device_node *np_ebu =
+               of_find_compatible_node(NULL, NULL, "lantiq,ebu-falcon");
+       struct device_node *np_sys1 =
+               of_find_compatible_node(NULL, NULL, "lantiq,sys1-falcon");
+       struct device_node *np_syseth =
+               of_find_compatible_node(NULL, NULL, "lantiq,syseth-falcon");
+       struct device_node *np_sysgpe =
+               of_find_compatible_node(NULL, NULL, "lantiq,sysgpe-falcon");
+       struct resource res_status, res_ebu, res_sys[3];
+       int i;
+
+       /* check if all the core register ranges are available */
+       if (!np_status || !np_ebu || !np_sys1 || !np_syseth || !np_sysgpe)
+               panic("Failed to load core nodes from devicetree");
+
+       if (of_address_to_resource(np_status, 0, &res_status) ||
+                       of_address_to_resource(np_ebu, 0, &res_ebu) ||
+                       of_address_to_resource(np_sys1, 0, &res_sys[0]) ||
+                       of_address_to_resource(np_syseth, 0, &res_sys[1]) ||
+                       of_address_to_resource(np_sysgpe, 0, &res_sys[2]))
+               panic("Failed to get core resources");
+
+       if ((request_mem_region(res_status.start, resource_size(&res_status),
+                               res_status.name) < 0) ||
+               (request_mem_region(res_ebu.start, resource_size(&res_ebu),
+                               res_ebu.name) < 0) ||
+               (request_mem_region(res_sys[0].start,
+                               resource_size(&res_sys[0]),
+                               res_sys[0].name) < 0) ||
+               (request_mem_region(res_sys[1].start,
+                               resource_size(&res_sys[1]),
+                               res_sys[1].name) < 0) ||
+               (request_mem_region(res_sys[2].start,
+                               resource_size(&res_sys[2]),
+                               res_sys[2].name) < 0))
+               pr_err("Failed to request core reources");
+
+       status_membase = ioremap_nocache(res_status.start,
+                                       resource_size(&res_status));
+       ltq_ebu_membase = ioremap_nocache(res_ebu.start,
+                                       resource_size(&res_ebu));
+
+       if (!status_membase || !ltq_ebu_membase)
+               panic("Failed to remap core resources");
+
+       for (i = 0; i < 3; i++) {
+               sysctl_membase[i] = ioremap_nocache(res_sys[i].start,
+                                               resource_size(&res_sys[i]));
+               if (!sysctl_membase[i])
+                       panic("Failed to remap sysctrl resources");
+       }
+       ltq_sys1_membase = sysctl_membase[0];
+
+       falcon_gpe_enable();
+
+       /* get our 3 static rates for cpu, fpi and io clocks */
+       if (ltq_sys1_r32(SYS1_CPU0CC) & CPU0CC_CPUDIV)
+               clkdev_add_static(CLOCK_200M, CLOCK_100M, CLOCK_200M);
+       else
+               clkdev_add_static(CLOCK_400M, CLOCK_100M, CLOCK_200M);
+
+       /* add our clock domains */
+       clkdev_add_sys("1d810000.gpio", SYSCTL_SYSETH, ACTS_P0);
+       clkdev_add_sys("1d810100.gpio", SYSCTL_SYSETH, ACTS_P2);
+       clkdev_add_sys("1e800100.gpio", SYSCTL_SYS1, ACTS_P1);
+       clkdev_add_sys("1e800200.gpio", SYSCTL_SYS1, ACTS_P3);
+       clkdev_add_sys("1e800300.gpio", SYSCTL_SYS1, ACTS_P4);
+       clkdev_add_sys("1db01000.pad", SYSCTL_SYSETH, ACTS_PADCTRL0);
+       clkdev_add_sys("1db02000.pad", SYSCTL_SYSETH, ACTS_PADCTRL2);
+       clkdev_add_sys("1e800400.pad", SYSCTL_SYS1, ACTS_PADCTRL1);
+       clkdev_add_sys("1e800500.pad", SYSCTL_SYS1, ACTS_PADCTRL3);
+       clkdev_add_sys("1e800600.pad", SYSCTL_SYS1, ACTS_PADCTRL4);
+       clkdev_add_sys("1e100C00.serial", SYSCTL_SYS1, ACTS_ASC1_ACT);
+       clkdev_add_sys("1e200000.i2c", SYSCTL_SYS1, ACTS_I2C_ACT);
+}
index d673731c538a4d7c6328d00d8249b1290d51dd91..57c1a4e51408c9800e644e8a299c4e8c9dd340de 100644 (file)
@@ -9,6 +9,11 @@
 
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/irqdomain.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 
 #include <asm/bootinfo.h>
 #include <asm/irq_cpu.h>
@@ -16,7 +21,7 @@
 #include <lantiq_soc.h>
 #include <irq.h>
 
-/* register definitions */
+/* register definitions - internal irqs */
 #define LTQ_ICU_IM0_ISR                0x0000
 #define LTQ_ICU_IM0_IER                0x0008
 #define LTQ_ICU_IM0_IOSR       0x0010
@@ -25,6 +30,7 @@
 #define LTQ_ICU_IM1_ISR                0x0028
 #define LTQ_ICU_OFFSET         (LTQ_ICU_IM1_ISR - LTQ_ICU_IM0_ISR)
 
+/* register definitions - external irqs */
 #define LTQ_EIU_EXIN_C         0x0000
 #define LTQ_EIU_EXIN_INIC      0x0004
 #define LTQ_EIU_EXIN_INEN      0x000C
 #define LTQ_EIU_IR4            (INT_NUM_IM1_IRL0 + 1)
 #define LTQ_EIU_IR5            (INT_NUM_IM1_IRL0 + 2)
 #define LTQ_EIU_IR6            (INT_NUM_IM2_IRL0 + 30)
-
+#define XWAY_EXIN_COUNT                3
 #define MAX_EIU                        6
 
-/* irqs generated by device attached to the EBU need to be acked in
+/* the performance counter */
+#define LTQ_PERF_IRQ           (INT_NUM_IM4_IRL0 + 31)
+
+/*
+ * irqs generated by devices attached to the EBU need to be acked in
  * a special manner
  */
 #define LTQ_ICU_EBU_IRQ                22
 #define ltq_eiu_w32(x, y)      ltq_w32((x), ltq_eiu_membase + (y))
 #define ltq_eiu_r32(x)         ltq_r32(ltq_eiu_membase + (x))
 
+/* our 2 ipi interrupts for VSMP */
+#define MIPS_CPU_IPI_RESCHED_IRQ       0
+#define MIPS_CPU_IPI_CALL_IRQ          1
+
+/* we have a cascade of 8 irqs */
+#define MIPS_CPU_IRQ_CASCADE           8
+
+#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
+int gic_present;
+#endif
+
 static unsigned short ltq_eiu_irq[MAX_EIU] = {
        LTQ_EIU_IR0,
        LTQ_EIU_IR1,
@@ -60,64 +81,51 @@ static unsigned short ltq_eiu_irq[MAX_EIU] = {
        LTQ_EIU_IR5,
 };
 
-static struct resource ltq_icu_resource = {
-       .name   = "icu",
-       .start  = LTQ_ICU_BASE_ADDR,
-       .end    = LTQ_ICU_BASE_ADDR + LTQ_ICU_SIZE - 1,
-       .flags  = IORESOURCE_MEM,
-};
-
-static struct resource ltq_eiu_resource = {
-       .name   = "eiu",
-       .start  = LTQ_EIU_BASE_ADDR,
-       .end    = LTQ_EIU_BASE_ADDR + LTQ_ICU_SIZE - 1,
-       .flags  = IORESOURCE_MEM,
-};
-
+static int exin_avail;
 static void __iomem *ltq_icu_membase;
 static void __iomem *ltq_eiu_membase;
 
 void ltq_disable_irq(struct irq_data *d)
 {
        u32 ier = LTQ_ICU_IM0_IER;
-       int irq_nr = d->irq - INT_NUM_IRQ0;
+       int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
 
-       ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
-       irq_nr %= INT_NUM_IM_OFFSET;
-       ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier);
+       ier += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
+       offset %= INT_NUM_IM_OFFSET;
+       ltq_icu_w32(ltq_icu_r32(ier) & ~BIT(offset), ier);
 }
 
 void ltq_mask_and_ack_irq(struct irq_data *d)
 {
        u32 ier = LTQ_ICU_IM0_IER;
        u32 isr = LTQ_ICU_IM0_ISR;
-       int irq_nr = d->irq - INT_NUM_IRQ0;
+       int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
 
-       ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
-       isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
-       irq_nr %= INT_NUM_IM_OFFSET;
-       ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier);
-       ltq_icu_w32((1 << irq_nr), isr);
+       ier += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
+       isr += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
+       offset %= INT_NUM_IM_OFFSET;
+       ltq_icu_w32(ltq_icu_r32(ier) & ~BIT(offset), ier);
+       ltq_icu_w32(BIT(offset), isr);
 }
 
 static void ltq_ack_irq(struct irq_data *d)
 {
        u32 isr = LTQ_ICU_IM0_ISR;
-       int irq_nr = d->irq - INT_NUM_IRQ0;
+       int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
 
-       isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
-       irq_nr %= INT_NUM_IM_OFFSET;
-       ltq_icu_w32((1 << irq_nr), isr);
+       isr += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
+       offset %= INT_NUM_IM_OFFSET;
+       ltq_icu_w32(BIT(offset), isr);
 }
 
 void ltq_enable_irq(struct irq_data *d)
 {
        u32 ier = LTQ_ICU_IM0_IER;
-       int irq_nr = d->irq - INT_NUM_IRQ0;
+       int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
 
-       ier += LTQ_ICU_OFFSET  * (irq_nr / INT_NUM_IM_OFFSET);
-       irq_nr %= INT_NUM_IM_OFFSET;
-       ltq_icu_w32(ltq_icu_r32(ier) | (1 << irq_nr), ier);
+       ier += LTQ_ICU_OFFSET  * (offset / INT_NUM_IM_OFFSET);
+       offset %= INT_NUM_IM_OFFSET;
+       ltq_icu_w32(ltq_icu_r32(ier) | BIT(offset), ier);
 }
 
 static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
@@ -126,15 +134,15 @@ static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
 
        ltq_enable_irq(d);
        for (i = 0; i < MAX_EIU; i++) {
-               if (d->irq == ltq_eiu_irq[i]) {
+               if (d->hwirq == ltq_eiu_irq[i]) {
                        /* low level - we should really handle set_type */
                        ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
                                (0x6 << (i * 4)), LTQ_EIU_EXIN_C);
                        /* clear all pending */
-                       ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INIC) & ~(1 << i),
+                       ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INIC) & ~BIT(i),
                                LTQ_EIU_EXIN_INIC);
                        /* enable */
-                       ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | (1 << i),
+                       ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | BIT(i),
                                LTQ_EIU_EXIN_INEN);
                        break;
                }
@@ -149,9 +157,9 @@ static void ltq_shutdown_eiu_irq(struct irq_data *d)
 
        ltq_disable_irq(d);
        for (i = 0; i < MAX_EIU; i++) {
-               if (d->irq == ltq_eiu_irq[i]) {
+               if (d->hwirq == ltq_eiu_irq[i]) {
                        /* disable */
-                       ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~(1 << i),
+                       ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i),
                                LTQ_EIU_EXIN_INEN);
                        break;
                }
@@ -188,14 +196,15 @@ static void ltq_hw_irqdispatch(int module)
        if (irq == 0)
                return;
 
-       /* silicon bug causes only the msb set to 1 to be valid. all
+       /*
+        * silicon bug causes only the msb set to 1 to be valid. all
         * other bits might be bogus
         */
        irq = __fls(irq);
-       do_IRQ((int)irq + INT_NUM_IM0_IRL0 + (INT_NUM_IM_OFFSET * module));
+       do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module));
 
        /* if this is a EBU irq, we need to ack it or get a deadlock */
-       if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0))
+       if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
                ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
                        LTQ_EBU_PCC_ISTAT);
 }
@@ -216,6 +225,47 @@ static void ltq_hw5_irqdispatch(void)
        do_IRQ(MIPS_CPU_TIMER_IRQ);
 }
 
+#ifdef CONFIG_MIPS_MT_SMP
+void __init arch_init_ipiirq(int irq, struct irqaction *action)
+{
+       setup_irq(irq, action);
+       irq_set_handler(irq, handle_percpu_irq);
+}
+
+static void ltq_sw0_irqdispatch(void)
+{
+       do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
+}
+
+static void ltq_sw1_irqdispatch(void)
+{
+       do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
+}
+static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
+{
+       scheduler_ipi();
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
+{
+       smp_call_function_interrupt();
+       return IRQ_HANDLED;
+}
+
+static struct irqaction irq_resched = {
+       .handler        = ipi_resched_interrupt,
+       .flags          = IRQF_PERCPU,
+       .name           = "IPI_resched"
+};
+
+static struct irqaction irq_call = {
+       .handler        = ipi_call_interrupt,
+       .flags          = IRQF_PERCPU,
+       .name           = "IPI_call"
+};
+#endif
+
 asmlinkage void plat_irq_dispatch(void)
 {
        unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
@@ -238,45 +288,75 @@ out:
        return;
 }
 
+static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+       struct irq_chip *chip = &ltq_irq_type;
+       int i;
+
+       for (i = 0; i < exin_avail; i++)
+               if (hw == ltq_eiu_irq[i])
+                       chip = &ltq_eiu_type;
+
+       irq_set_chip_and_handler(hw, chip, handle_level_irq);
+
+       return 0;
+}
+
+static const struct irq_domain_ops irq_domain_ops = {
+       .xlate = irq_domain_xlate_onetwocell,
+       .map = icu_map,
+};
+
 static struct irqaction cascade = {
        .handler = no_action,
        .name = "cascade",
 };
 
-void __init arch_init_irq(void)
+int __init icu_of_init(struct device_node *node, struct device_node *parent)
 {
+       struct device_node *eiu_node;
+       struct resource res;
        int i;
 
-       if (insert_resource(&iomem_resource, &ltq_icu_resource) < 0)
-               panic("Failed to insert icu memory");
+       if (of_address_to_resource(node, 0, &res))
+               panic("Failed to get icu memory range");
 
-       if (request_mem_region(ltq_icu_resource.start,
-                       resource_size(&ltq_icu_resource), "icu") < 0)
-               panic("Failed to request icu memory");
+       if (request_mem_region(res.start, resource_size(&res), res.name) < 0)
+               pr_err("Failed to request icu memory");
 
-       ltq_icu_membase = ioremap_nocache(ltq_icu_resource.start,
-                               resource_size(&ltq_icu_resource));
+       ltq_icu_membase = ioremap_nocache(res.start, resource_size(&res));
        if (!ltq_icu_membase)
                panic("Failed to remap icu memory");
 
-       if (insert_resource(&iomem_resource, &ltq_eiu_resource) < 0)
-               panic("Failed to insert eiu memory");
-
-       if (request_mem_region(ltq_eiu_resource.start,
-                       resource_size(&ltq_eiu_resource), "eiu") < 0)
-               panic("Failed to request eiu memory");
-
-       ltq_eiu_membase = ioremap_nocache(ltq_eiu_resource.start,
-                               resource_size(&ltq_eiu_resource));
-       if (!ltq_eiu_membase)
-               panic("Failed to remap eiu memory");
+       /* the external interrupts are optional and xway only */
+       eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu");
+       if (eiu_node && of_address_to_resource(eiu_node, 0, &res)) {
+               /* find out how many external irq sources we have */
+               const __be32 *count = of_get_property(node,
+                                                       "lantiq,count", NULL);
+
+               if (count)
+                       exin_avail = *count;
+               if (exin_avail > MAX_EIU)
+                       exin_avail = MAX_EIU;
+
+               if (request_mem_region(res.start, resource_size(&res),
+                                                       res.name) < 0)
+                       pr_err("Failed to request eiu memory");
+
+               ltq_eiu_membase = ioremap_nocache(res.start,
+                                                       resource_size(&res));
+               if (!ltq_eiu_membase)
+                       panic("Failed to remap eiu memory");
+       }
 
-       /* make sure all irqs are turned off by default */
-       for (i = 0; i < 5; i++)
+       /* turn off all irqs by default */
+       for (i = 0; i < 5; i++) {
+               /* make sure all irqs are turned off by default */
                ltq_icu_w32(0, LTQ_ICU_IM0_IER + (i * LTQ_ICU_OFFSET));
-
-       /* clear all possibly pending interrupts */
-       ltq_icu_w32(~0, LTQ_ICU_IM0_ISR + (i * LTQ_ICU_OFFSET));
+               /* clear all possibly pending interrupts */
+               ltq_icu_w32(~0, LTQ_ICU_IM0_ISR + (i * LTQ_ICU_OFFSET));
+       }
 
        mips_cpu_irq_init();
 
@@ -293,20 +373,19 @@ void __init arch_init_irq(void)
                set_vi_handler(7, ltq_hw5_irqdispatch);
        }
 
-       for (i = INT_NUM_IRQ0;
-               i <= (INT_NUM_IRQ0 + (5 * INT_NUM_IM_OFFSET)); i++)
-               if ((i == LTQ_EIU_IR0) || (i == LTQ_EIU_IR1) ||
-                       (i == LTQ_EIU_IR2))
-                       irq_set_chip_and_handler(i, &ltq_eiu_type,
-                               handle_level_irq);
-               /* EIU3-5 only exist on ar9 and vr9 */
-               else if (((i == LTQ_EIU_IR3) || (i == LTQ_EIU_IR4) ||
-                       (i == LTQ_EIU_IR5)) && (ltq_is_ar9() || ltq_is_vr9()))
-                       irq_set_chip_and_handler(i, &ltq_eiu_type,
-                               handle_level_irq);
-               else
-                       irq_set_chip_and_handler(i, &ltq_irq_type,
-                               handle_level_irq);
+       irq_domain_add_linear(node, 6 * INT_NUM_IM_OFFSET,
+               &irq_domain_ops, 0);
+
+#if defined(CONFIG_MIPS_MT_SMP)
+       if (cpu_has_vint) {
+               pr_info("Setting up IPI vectored interrupts\n");
+               set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ltq_sw0_irqdispatch);
+               set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ltq_sw1_irqdispatch);
+       }
+       arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ,
+               &irq_resched);
+       arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ, &irq_call);
+#endif
 
 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
        set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
@@ -315,9 +394,23 @@ void __init arch_init_irq(void)
        set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 |
                IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
 #endif
+
+       /* tell oprofile which irq to use */
+       cp0_perfcount_irq = LTQ_PERF_IRQ;
+       return 0;
 }
 
 unsigned int __cpuinit get_c0_compare_int(void)
 {
        return CP0_LEGACY_COMPARE_IRQ;
 }
+
+static struct of_device_id __initdata of_irq_ids[] = {
+       { .compatible = "lantiq,icu", .data = icu_of_init },
+       {},
+};
+
+void __init arch_init_irq(void)
+{
+       of_irq_init(of_irq_ids);
+}
diff --git a/arch/mips/lantiq/machtypes.h b/arch/mips/lantiq/machtypes.h
deleted file mode 100644 (file)
index 7e01b8c..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#ifndef _LANTIQ_MACH_H__
-#define _LANTIQ_MACH_H__
-
-#include <asm/mips_machine.h>
-
-enum lantiq_mach_type {
-       LTQ_MACH_GENERIC = 0,
-       LTQ_MACH_EASY50712,     /* Danube evaluation board */
-       LTQ_MACH_EASY50601,     /* Amazon SE evaluation board */
-};
-
-#endif
index e34fcfd0d5ca5763983c2b0a248352275b40f49b..d185e8477fdf2a8eb38d73ff74ed2c94acb894e8 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/export.h>
 #include <linux/clk.h>
+#include <linux/of_platform.h>
 #include <asm/bootinfo.h>
 #include <asm/time.h>
 
 #include "prom.h"
 #include "clk.h"
 
-static struct ltq_soc_info soc_info;
-
-unsigned int ltq_get_cpu_ver(void)
-{
-       return soc_info.rev;
-}
-EXPORT_SYMBOL(ltq_get_cpu_ver);
+/* access to the ebu needs to be locked between different drivers */
+DEFINE_SPINLOCK(ebu_lock);
+EXPORT_SYMBOL_GPL(ebu_lock);
 
-unsigned int ltq_get_soc_type(void)
-{
-       return soc_info.type;
-}
-EXPORT_SYMBOL(ltq_get_soc_type);
+/*
+ * this struct is filled by the soc specific detection code and holds
+ * information about the specific soc type, revision and name
+ */
+static struct ltq_soc_info soc_info;
 
 const char *get_system_type(void)
 {
@@ -45,27 +42,62 @@ static void __init prom_init_cmdline(void)
        char **argv = (char **) KSEG1ADDR(fw_arg1);
        int i;
 
+       arcs_cmdline[0] = '\0';
+
        for (i = 0; i < argc; i++) {
-               char *p = (char *)  KSEG1ADDR(argv[i]);
+               char *p = (char *) KSEG1ADDR(argv[i]);
 
-               if (p && *p) {
+               if (CPHYSADDR(p) && *p) {
                        strlcat(arcs_cmdline, p, sizeof(arcs_cmdline));
                        strlcat(arcs_cmdline, " ", sizeof(arcs_cmdline));
                }
        }
 }
 
-void __init prom_init(void)
+void __init plat_mem_setup(void)
 {
-       struct clk *clk;
+       ioport_resource.start = IOPORT_RESOURCE_START;
+       ioport_resource.end = IOPORT_RESOURCE_END;
+       iomem_resource.start = IOMEM_RESOURCE_START;
+       iomem_resource.end = IOMEM_RESOURCE_END;
+
+       set_io_port_base((unsigned long) KSEG1);
 
+       /*
+        * Load the builtin devicetree. This causes the chosen node to be
+        * parsed resulting in our memory appearing
+        */
+       __dt_setup_arch(&__dtb_start);
+}
+
+void __init prom_init(void)
+{
+       /* call the soc specific detetcion code and get it to fill soc_info */
        ltq_soc_detect(&soc_info);
-       clk_init();
-       clk = clk_get(0, "cpu");
-       snprintf(soc_info.sys_type, LTQ_SYS_TYPE_LEN - 1, "%s rev1.%d",
-               soc_info.name, soc_info.rev);
-       clk_put(clk);
+       snprintf(soc_info.sys_type, LTQ_SYS_TYPE_LEN - 1, "%s rev %s",
+               soc_info.name, soc_info.rev_type);
        soc_info.sys_type[LTQ_SYS_TYPE_LEN - 1] = '\0';
        pr_info("SoC: %s\n", soc_info.sys_type);
        prom_init_cmdline();
+
+#if defined(CONFIG_MIPS_MT_SMP)
+       if (register_vsmp_smp_ops())
+               panic("failed to register_vsmp_smp_ops()");
+#endif
 }
+
+int __init plat_of_setup(void)
+{
+       static struct of_device_id of_ids[3];
+
+       if (!of_have_populated_dt())
+               panic("device tree not present");
+
+       strncpy(of_ids[0].compatible, soc_info.compatible,
+               sizeof(of_ids[0].compatible));
+       strncpy(of_ids[1].compatible, "simple-bus",
+               sizeof(of_ids[1].compatible));
+       return of_platform_bus_probe(NULL, of_ids, NULL);
+}
+
+arch_initcall(plat_of_setup);
index b4229d94280f9cdd3f9f960698ac28cc7aee52a5..a3fa1a2bfaae5accf32fc341d9a92b0df17438e7 100644 (file)
 #define _LTQ_PROM_H__
 
 #define LTQ_SYS_TYPE_LEN       0x100
+#define LTQ_SYS_REV_LEN         0x10
 
 struct ltq_soc_info {
        unsigned char *name;
        unsigned int rev;
+       unsigned char rev_type[LTQ_SYS_REV_LEN];
+       unsigned int srev;
        unsigned int partnum;
        unsigned int type;
        unsigned char sys_type[LTQ_SYS_TYPE_LEN];
+       unsigned char *compatible;
 };
 
 extern void ltq_soc_detect(struct ltq_soc_info *i);
-extern void ltq_soc_setup(void);
+extern void ltq_soc_init(void);
+
+extern struct boot_param_header __dtb_start;
 
 #endif
diff --git a/arch/mips/lantiq/setup.c b/arch/mips/lantiq/setup.c
deleted file mode 100644 (file)
index 1ff6c9d..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <asm/bootinfo.h>
-
-#include <lantiq_soc.h>
-
-#include "machtypes.h"
-#include "devices.h"
-#include "prom.h"
-
-void __init plat_mem_setup(void)
-{
-       /* assume 16M as default incase uboot fails to pass proper ramsize */
-       unsigned long memsize = 16;
-       char **envp = (char **) KSEG1ADDR(fw_arg2);
-
-       ioport_resource.start = IOPORT_RESOURCE_START;
-       ioport_resource.end = IOPORT_RESOURCE_END;
-       iomem_resource.start = IOMEM_RESOURCE_START;
-       iomem_resource.end = IOMEM_RESOURCE_END;
-
-       set_io_port_base((unsigned long) KSEG1);
-
-       while (*envp) {
-               char *e = (char *)KSEG1ADDR(*envp);
-               if (!strncmp(e, "memsize=", 8)) {
-                       e += 8;
-                       if (strict_strtoul(e, 0, &memsize))
-                               pr_warn("bad memsize specified\n");
-               }
-               envp++;
-       }
-       memsize *= 1024 * 1024;
-       add_memory_region(0x00000000, memsize, BOOT_MEM_RAM);
-}
-
-static int __init
-lantiq_setup(void)
-{
-       ltq_soc_setup();
-       mips_machine_setup();
-       return 0;
-}
-
-arch_initcall(lantiq_setup);
-
-static void __init
-lantiq_generic_init(void)
-{
-       /* Nothing to do */
-}
-
-MIPS_MACHINE(LTQ_MACH_GENERIC,
-            "Generic",
-            "Generic Lantiq based board",
-            lantiq_generic_init);
diff --git a/arch/mips/lantiq/xway/Kconfig b/arch/mips/lantiq/xway/Kconfig
deleted file mode 100644 (file)
index 2b857de..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-if SOC_XWAY
-
-menu "MIPS Machine"
-
-config LANTIQ_MACH_EASY50712
-       bool "Easy50712 - Danube"
-       default y
-
-endmenu
-
-endif
-
-if SOC_AMAZON_SE
-
-menu "MIPS Machine"
-
-config LANTIQ_MACH_EASY50601
-       bool "Easy50601 - Amazon SE"
-       default y
-
-endmenu
-
-endif
index c517f2e77563cb3001146a28ec62853350798f53..dc3194f6ee421ca16c39295b9fcaec07fc5d9cf6 100644 (file)
@@ -1,7 +1 @@
-obj-y := pmu.o ebu.o reset.o gpio.o gpio_stp.o gpio_ebu.o devices.o dma.o
-
-obj-$(CONFIG_SOC_XWAY) += clk-xway.o prom-xway.o setup-xway.o
-obj-$(CONFIG_SOC_AMAZON_SE) += clk-ase.o prom-ase.o setup-ase.o
-
-obj-$(CONFIG_LANTIQ_MACH_EASY50712) += mach-easy50712.o
-obj-$(CONFIG_LANTIQ_MACH_EASY50601) += mach-easy50601.o
+obj-y := prom.o sysctrl.o clk.o reset.o gpio.o dma.o
diff --git a/arch/mips/lantiq/xway/clk-ase.c b/arch/mips/lantiq/xway/clk-ase.c
deleted file mode 100644 (file)
index 6522583..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2011 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/io.h>
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/clk.h>
-
-#include <asm/time.h>
-#include <asm/irq.h>
-#include <asm/div64.h>
-
-#include <lantiq_soc.h>
-
-/* cgu registers */
-#define LTQ_CGU_SYS    0x0010
-
-unsigned int ltq_get_io_region_clock(void)
-{
-       return CLOCK_133M;
-}
-EXPORT_SYMBOL(ltq_get_io_region_clock);
-
-unsigned int ltq_get_fpi_bus_clock(int fpi)
-{
-       return CLOCK_133M;
-}
-EXPORT_SYMBOL(ltq_get_fpi_bus_clock);
-
-unsigned int ltq_get_cpu_hz(void)
-{
-       if (ltq_cgu_r32(LTQ_CGU_SYS) & (1 << 5))
-               return CLOCK_266M;
-       else
-               return CLOCK_133M;
-}
-EXPORT_SYMBOL(ltq_get_cpu_hz);
-
-unsigned int ltq_get_fpi_hz(void)
-{
-       return CLOCK_133M;
-}
-EXPORT_SYMBOL(ltq_get_fpi_hz);
diff --git a/arch/mips/lantiq/xway/clk-xway.c b/arch/mips/lantiq/xway/clk-xway.c
deleted file mode 100644 (file)
index 696b1a3..0000000
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/io.h>
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/clk.h>
-
-#include <asm/time.h>
-#include <asm/irq.h>
-#include <asm/div64.h>
-
-#include <lantiq_soc.h>
-
-static unsigned int ltq_ram_clocks[] = {
-       CLOCK_167M, CLOCK_133M, CLOCK_111M, CLOCK_83M };
-#define DDR_HZ ltq_ram_clocks[ltq_cgu_r32(LTQ_CGU_SYS) & 0x3]
-
-#define BASIC_FREQUENCY_1      35328000
-#define BASIC_FREQUENCY_2      36000000
-#define BASIS_REQUENCY_USB     12000000
-
-#define GET_BITS(x, msb, lsb) \
-       (((x) & ((1 << ((msb) + 1)) - 1)) >> (lsb))
-
-#define LTQ_CGU_PLL0_CFG       0x0004
-#define LTQ_CGU_PLL1_CFG       0x0008
-#define LTQ_CGU_PLL2_CFG       0x000C
-#define LTQ_CGU_SYS            0x0010
-#define LTQ_CGU_UPDATE         0x0014
-#define LTQ_CGU_IF_CLK         0x0018
-#define LTQ_CGU_OSC_CON                0x001C
-#define LTQ_CGU_SMD            0x0020
-#define LTQ_CGU_CT1SR          0x0028
-#define LTQ_CGU_CT2SR          0x002C
-#define LTQ_CGU_PCMCR          0x0030
-#define LTQ_CGU_PCI_CR         0x0034
-#define LTQ_CGU_PD_PC          0x0038
-#define LTQ_CGU_FMR            0x003C
-
-#define CGU_PLL0_PHASE_DIVIDER_ENABLE  \
-       (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 31))
-#define CGU_PLL0_BYPASS                        \
-       (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 30))
-#define CGU_PLL0_CFG_DSMSEL            \
-       (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 28))
-#define CGU_PLL0_CFG_FRAC_EN           \
-       (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 27))
-#define CGU_PLL1_SRC                   \
-       (ltq_cgu_r32(LTQ_CGU_PLL1_CFG) & (1 << 31))
-#define CGU_PLL2_PHASE_DIVIDER_ENABLE  \
-       (ltq_cgu_r32(LTQ_CGU_PLL2_CFG) & (1 << 20))
-#define CGU_SYS_FPI_SEL                        (1 << 6)
-#define CGU_SYS_DDR_SEL                        0x3
-#define CGU_PLL0_SRC                   (1 << 29)
-
-#define CGU_PLL0_CFG_PLLK      GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 26, 17)
-#define CGU_PLL0_CFG_PLLN      GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 12, 6)
-#define CGU_PLL0_CFG_PLLM      GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 5, 2)
-#define CGU_PLL2_SRC           GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL2_CFG), 18, 17)
-#define CGU_PLL2_CFG_INPUT_DIV GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL2_CFG), 16, 13)
-
-static unsigned int ltq_get_pll0_fdiv(void);
-
-static inline unsigned int get_input_clock(int pll)
-{
-       switch (pll) {
-       case 0:
-               if (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & CGU_PLL0_SRC)
-                       return BASIS_REQUENCY_USB;
-               else if (CGU_PLL0_PHASE_DIVIDER_ENABLE)
-                       return BASIC_FREQUENCY_1;
-               else
-                       return BASIC_FREQUENCY_2;
-       case 1:
-               if (CGU_PLL1_SRC)
-                       return BASIS_REQUENCY_USB;
-               else if (CGU_PLL0_PHASE_DIVIDER_ENABLE)
-                       return BASIC_FREQUENCY_1;
-               else
-                       return BASIC_FREQUENCY_2;
-       case 2:
-               switch (CGU_PLL2_SRC) {
-               case 0:
-                       return ltq_get_pll0_fdiv();
-               case 1:
-                       return CGU_PLL2_PHASE_DIVIDER_ENABLE ?
-                               BASIC_FREQUENCY_1 :
-                               BASIC_FREQUENCY_2;
-               case 2:
-                       return BASIS_REQUENCY_USB;
-               }
-       default:
-               return 0;
-       }
-}
-
-static inline unsigned int cal_dsm(int pll, unsigned int num, unsigned int den)
-{
-       u64 res, clock = get_input_clock(pll);
-
-       res = num * clock;
-       do_div(res, den);
-       return res;
-}
-
-static inline unsigned int mash_dsm(int pll, unsigned int M, unsigned int N,
-       unsigned int K)
-{
-       unsigned int num = ((N + 1) << 10) + K;
-       unsigned int den = (M + 1) << 10;
-
-       return cal_dsm(pll, num, den);
-}
-
-static inline unsigned int ssff_dsm_1(int pll, unsigned int M, unsigned int N,
-       unsigned int K)
-{
-       unsigned int num = ((N + 1) << 11) + K + 512;
-       unsigned int den = (M + 1) << 11;
-
-       return cal_dsm(pll, num, den);
-}
-
-static inline unsigned int ssff_dsm_2(int pll, unsigned int M, unsigned int N,
-       unsigned int K)
-{
-       unsigned int num = K >= 512 ?
-               ((N + 1) << 12) + K - 512 : ((N + 1) << 12) + K + 3584;
-       unsigned int den = (M + 1) << 12;
-
-       return cal_dsm(pll, num, den);
-}
-
-static inline unsigned int dsm(int pll, unsigned int M, unsigned int N,
-       unsigned int K, unsigned int dsmsel, unsigned int phase_div_en)
-{
-       if (!dsmsel)
-               return mash_dsm(pll, M, N, K);
-       else if (!phase_div_en)
-               return mash_dsm(pll, M, N, K);
-       else
-               return ssff_dsm_2(pll, M, N, K);
-}
-
-static inline unsigned int ltq_get_pll0_fosc(void)
-{
-       if (CGU_PLL0_BYPASS)
-               return get_input_clock(0);
-       else
-               return !CGU_PLL0_CFG_FRAC_EN
-                       ? dsm(0, CGU_PLL0_CFG_PLLM, CGU_PLL0_CFG_PLLN, 0,
-                               CGU_PLL0_CFG_DSMSEL,
-                               CGU_PLL0_PHASE_DIVIDER_ENABLE)
-                       : dsm(0, CGU_PLL0_CFG_PLLM, CGU_PLL0_CFG_PLLN,
-                               CGU_PLL0_CFG_PLLK, CGU_PLL0_CFG_DSMSEL,
-                               CGU_PLL0_PHASE_DIVIDER_ENABLE);
-}
-
-static unsigned int ltq_get_pll0_fdiv(void)
-{
-       unsigned int div = CGU_PLL2_CFG_INPUT_DIV + 1;
-
-       return (ltq_get_pll0_fosc() + (div >> 1)) / div;
-}
-
-unsigned int ltq_get_io_region_clock(void)
-{
-       unsigned int ret = ltq_get_pll0_fosc();
-
-       switch (ltq_cgu_r32(LTQ_CGU_PLL2_CFG) & CGU_SYS_DDR_SEL) {
-       default:
-       case 0:
-               return (ret + 1) / 2;
-       case 1:
-               return (ret * 2 + 2) / 5;
-       case 2:
-               return (ret + 1) / 3;
-       case 3:
-               return (ret + 2) / 4;
-       }
-}
-EXPORT_SYMBOL(ltq_get_io_region_clock);
-
-unsigned int ltq_get_fpi_bus_clock(int fpi)
-{
-       unsigned int ret = ltq_get_io_region_clock();
-
-       if ((fpi == 2) && (ltq_cgu_r32(LTQ_CGU_SYS) & CGU_SYS_FPI_SEL))
-               ret >>= 1;
-       return ret;
-}
-EXPORT_SYMBOL(ltq_get_fpi_bus_clock);
-
-unsigned int ltq_get_cpu_hz(void)
-{
-       switch (ltq_cgu_r32(LTQ_CGU_SYS) & 0xc) {
-       case 0:
-               return CLOCK_333M;
-       case 4:
-               return DDR_HZ;
-       case 8:
-               return DDR_HZ << 1;
-       default:
-               return DDR_HZ >> 1;
-       }
-}
-EXPORT_SYMBOL(ltq_get_cpu_hz);
-
-unsigned int ltq_get_fpi_hz(void)
-{
-       unsigned int ddr_clock = DDR_HZ;
-
-       if (ltq_cgu_r32(LTQ_CGU_SYS) & 0x40)
-               return ddr_clock >> 1;
-       return ddr_clock;
-}
-EXPORT_SYMBOL(ltq_get_fpi_hz);
diff --git a/arch/mips/lantiq/xway/clk.c b/arch/mips/lantiq/xway/clk.c
new file mode 100644 (file)
index 0000000..9aa17f7
--- /dev/null
@@ -0,0 +1,151 @@
+/*
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/io.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+
+#include <asm/time.h>
+#include <asm/irq.h>
+#include <asm/div64.h>
+
+#include <lantiq_soc.h>
+
+#include "../clk.h"
+
+static unsigned int ram_clocks[] = {
+       CLOCK_167M, CLOCK_133M, CLOCK_111M, CLOCK_83M };
+#define DDR_HZ ram_clocks[ltq_cgu_r32(CGU_SYS) & 0x3]
+
+/* legacy xway clock */
+#define CGU_SYS                        0x10
+
+/* vr9 clock */
+#define CGU_SYS_VR9            0x0c
+#define CGU_IF_CLK_VR9         0x24
+
+unsigned long ltq_danube_fpi_hz(void)
+{
+       unsigned long ddr_clock = DDR_HZ;
+
+       if (ltq_cgu_r32(CGU_SYS) & 0x40)
+               return ddr_clock >> 1;
+       return ddr_clock;
+}
+
+unsigned long ltq_danube_cpu_hz(void)
+{
+       switch (ltq_cgu_r32(CGU_SYS) & 0xc) {
+       case 0:
+               return CLOCK_333M;
+       case 4:
+               return DDR_HZ;
+       case 8:
+               return DDR_HZ << 1;
+       default:
+               return DDR_HZ >> 1;
+       }
+}
+
+unsigned long ltq_ar9_sys_hz(void)
+{
+       if (((ltq_cgu_r32(CGU_SYS) >> 3) & 0x3) == 0x2)
+               return CLOCK_393M;
+       return CLOCK_333M;
+}
+
+unsigned long ltq_ar9_fpi_hz(void)
+{
+       unsigned long sys = ltq_ar9_sys_hz();
+
+       if (ltq_cgu_r32(CGU_SYS) & BIT(0))
+               return sys;
+       return sys >> 1;
+}
+
+unsigned long ltq_ar9_cpu_hz(void)
+{
+       if (ltq_cgu_r32(CGU_SYS) & BIT(2))
+               return ltq_ar9_fpi_hz();
+       else
+               return ltq_ar9_sys_hz();
+}
+
+unsigned long ltq_vr9_cpu_hz(void)
+{
+       unsigned int cpu_sel;
+       unsigned long clk;
+
+       cpu_sel = (ltq_cgu_r32(CGU_SYS_VR9) >> 4) & 0xf;
+
+       switch (cpu_sel) {
+       case 0:
+               clk = CLOCK_600M;
+               break;
+       case 1:
+               clk = CLOCK_500M;
+               break;
+       case 2:
+               clk = CLOCK_393M;
+               break;
+       case 3:
+               clk = CLOCK_333M;
+               break;
+       case 5:
+       case 6:
+               clk = CLOCK_196_608M;
+               break;
+       case 7:
+               clk = CLOCK_167M;
+               break;
+       case 4:
+       case 8:
+       case 9:
+               clk = CLOCK_125M;
+               break;
+       default:
+               clk = 0;
+               break;
+       }
+
+       return clk;
+}
+
+unsigned long ltq_vr9_fpi_hz(void)
+{
+       unsigned int ocp_sel, cpu_clk;
+       unsigned long clk;
+
+       cpu_clk = ltq_vr9_cpu_hz();
+       ocp_sel = ltq_cgu_r32(CGU_SYS_VR9) & 0x3;
+
+       switch (ocp_sel) {
+       case 0:
+               /* OCP ratio 1 */
+               clk = cpu_clk;
+               break;
+       case 2:
+               /* OCP ratio 2 */
+               clk = cpu_clk / 2;
+               break;
+       case 3:
+               /* OCP ratio 2.5 */
+               clk = (cpu_clk * 2) / 5;
+               break;
+       case 4:
+               /* OCP ratio 3 */
+               clk = cpu_clk / 3;
+               break;
+       default:
+               clk = 0;
+               break;
+       }
+
+       return clk;
+}
diff --git a/arch/mips/lantiq/xway/devices.c b/arch/mips/lantiq/xway/devices.c
deleted file mode 100644 (file)
index d614aa7..0000000
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/mtd/physmap.h>
-#include <linux/kernel.h>
-#include <linux/reboot.h>
-#include <linux/platform_device.h>
-#include <linux/leds.h>
-#include <linux/etherdevice.h>
-#include <linux/time.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-
-#include <lantiq_soc.h>
-#include <lantiq_irq.h>
-#include <lantiq_platform.h>
-
-#include "devices.h"
-
-/* gpio */
-static struct resource ltq_gpio_resource[] = {
-       {
-               .name   = "gpio0",
-               .start  = LTQ_GPIO0_BASE_ADDR,
-               .end    = LTQ_GPIO0_BASE_ADDR + LTQ_GPIO_SIZE - 1,
-               .flags  = IORESOURCE_MEM,
-       }, {
-               .name   = "gpio1",
-               .start  = LTQ_GPIO1_BASE_ADDR,
-               .end    = LTQ_GPIO1_BASE_ADDR + LTQ_GPIO_SIZE - 1,
-               .flags  = IORESOURCE_MEM,
-       }, {
-               .name   = "gpio2",
-               .start  = LTQ_GPIO2_BASE_ADDR,
-               .end    = LTQ_GPIO2_BASE_ADDR + LTQ_GPIO_SIZE - 1,
-               .flags  = IORESOURCE_MEM,
-       }
-};
-
-void __init ltq_register_gpio(void)
-{
-       platform_device_register_simple("ltq_gpio", 0,
-               &ltq_gpio_resource[0], 1);
-       platform_device_register_simple("ltq_gpio", 1,
-               &ltq_gpio_resource[1], 1);
-
-       /* AR9 and VR9 have an extra gpio block */
-       if (ltq_is_ar9() || ltq_is_vr9()) {
-               platform_device_register_simple("ltq_gpio", 2,
-                       &ltq_gpio_resource[2], 1);
-       }
-}
-
-/* serial to parallel conversion */
-static struct resource ltq_stp_resource = {
-       .name   = "stp",
-       .start  = LTQ_STP_BASE_ADDR,
-       .end    = LTQ_STP_BASE_ADDR + LTQ_STP_SIZE - 1,
-       .flags  = IORESOURCE_MEM,
-};
-
-void __init ltq_register_gpio_stp(void)
-{
-       platform_device_register_simple("ltq_stp", 0, &ltq_stp_resource, 1);
-}
-
-/* asc ports - amazon se has its own serial mapping */
-static struct resource ltq_ase_asc_resources[] = {
-       {
-               .name   = "asc0",
-               .start  = LTQ_ASC1_BASE_ADDR,
-               .end    = LTQ_ASC1_BASE_ADDR + LTQ_ASC_SIZE - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       IRQ_RES(tx, LTQ_ASC_ASE_TIR),
-       IRQ_RES(rx, LTQ_ASC_ASE_RIR),
-       IRQ_RES(err, LTQ_ASC_ASE_EIR),
-};
-
-void __init ltq_register_ase_asc(void)
-{
-       platform_device_register_simple("ltq_asc", 0,
-               ltq_ase_asc_resources, ARRAY_SIZE(ltq_ase_asc_resources));
-}
-
-/* ethernet */
-static struct resource ltq_etop_resources = {
-       .name   = "etop",
-       .start  = LTQ_ETOP_BASE_ADDR,
-       .end    = LTQ_ETOP_BASE_ADDR + LTQ_ETOP_SIZE - 1,
-       .flags  = IORESOURCE_MEM,
-};
-
-static struct platform_device ltq_etop = {
-       .name           = "ltq_etop",
-       .resource       = &ltq_etop_resources,
-       .num_resources  = 1,
-};
-
-void __init
-ltq_register_etop(struct ltq_eth_data *eth)
-{
-       if (eth) {
-               ltq_etop.dev.platform_data = eth;
-               platform_device_register(&ltq_etop);
-       }
-}
diff --git a/arch/mips/lantiq/xway/devices.h b/arch/mips/lantiq/xway/devices.h
deleted file mode 100644 (file)
index e904934..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#ifndef _LTQ_DEVICES_XWAY_H__
-#define _LTQ_DEVICES_XWAY_H__
-
-#include "../devices.h"
-#include <linux/phy.h>
-
-extern void ltq_register_gpio(void);
-extern void ltq_register_gpio_stp(void);
-extern void ltq_register_ase_asc(void);
-extern void ltq_register_etop(struct ltq_eth_data *eth);
-
-#endif
index b210e936c7c3a84900eeeb0c1ba628b7479ba71a..55d2c4fa47140d555d762fadb74b982b7a2f89fd 100644 (file)
@@ -19,7 +19,8 @@
 #include <linux/platform_device.h>
 #include <linux/io.h>
 #include <linux/dma-mapping.h>
-#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/clk.h>
 
 #include <lantiq_soc.h>
 #include <xway_dma.h>
 #define ltq_dma_w32_mask(x, y, z)      ltq_w32_mask(x, y, \
                                                ltq_dma_membase + (z))
 
-static struct resource ltq_dma_resource = {
-       .name   = "dma",
-       .start  = LTQ_DMA_BASE_ADDR,
-       .end    = LTQ_DMA_BASE_ADDR + LTQ_DMA_SIZE - 1,
-       .flags  = IORESOURCE_MEM,
-};
-
 static void __iomem *ltq_dma_membase;
 
 void
@@ -215,27 +209,28 @@ ltq_dma_init_port(int p)
 }
 EXPORT_SYMBOL_GPL(ltq_dma_init_port);
 
-int __init
-ltq_dma_init(void)
+static int __devinit
+ltq_dma_init(struct platform_device *pdev)
 {
+       struct clk *clk;
+       struct resource *res;
        int i;
 
-       /* insert and request the memory region */
-       if (insert_resource(&iomem_resource, &ltq_dma_resource) < 0)
-               panic("Failed to insert dma memory");
-
-       if (request_mem_region(ltq_dma_resource.start,
-                       resource_size(&ltq_dma_resource), "dma") < 0)
-               panic("Failed to request dma memory");
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res)
+               panic("Failed to get dma resource");
 
        /* remap dma register range */
-       ltq_dma_membase = ioremap_nocache(ltq_dma_resource.start,
-                               resource_size(&ltq_dma_resource));
+       ltq_dma_membase = devm_request_and_ioremap(&pdev->dev, res);
        if (!ltq_dma_membase)
-               panic("Failed to remap dma memory");
+               panic("Failed to remap dma resource");
 
        /* power up and reset the dma engine */
-       ltq_pmu_enable(PMU_DMA);
+       clk = clk_get(&pdev->dev, NULL);
+       if (IS_ERR(clk))
+               panic("Failed to get dma clock");
+
+       clk_enable(clk);
        ltq_dma_w32_mask(0, DMA_RESET, LTQ_DMA_CTRL);
 
        /* disable all interrupts */
@@ -248,7 +243,29 @@ ltq_dma_init(void)
                ltq_dma_w32(DMA_POLL | DMA_CLK_DIV4, LTQ_DMA_CPOLL);
                ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
        }
+       dev_info(&pdev->dev, "init done\n");
        return 0;
 }
 
-postcore_initcall(ltq_dma_init);
+static const struct of_device_id dma_match[] = {
+       { .compatible = "lantiq,dma-xway" },
+       {},
+};
+MODULE_DEVICE_TABLE(of, dma_match);
+
+static struct platform_driver dma_driver = {
+       .probe = ltq_dma_init,
+       .driver = {
+               .name = "dma-xway",
+               .owner = THIS_MODULE,
+               .of_match_table = dma_match,
+       },
+};
+
+int __init
+dma_init(void)
+{
+       return platform_driver_register(&dma_driver);
+}
+
+postcore_initcall(dma_init);
diff --git a/arch/mips/lantiq/xway/ebu.c b/arch/mips/lantiq/xway/ebu.c
deleted file mode 100644 (file)
index 862e3e8..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  EBU - the external bus unit attaches PCI, NOR and NAND
- *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/ioport.h>
-
-#include <lantiq_soc.h>
-
-/* all access to the ebu must be locked */
-DEFINE_SPINLOCK(ebu_lock);
-EXPORT_SYMBOL_GPL(ebu_lock);
-
-static struct resource ltq_ebu_resource = {
-       .name   = "ebu",
-       .start  = LTQ_EBU_BASE_ADDR,
-       .end    = LTQ_EBU_BASE_ADDR + LTQ_EBU_SIZE - 1,
-       .flags  = IORESOURCE_MEM,
-};
-
-/* remapped base addr of the clock unit and external bus unit */
-void __iomem *ltq_ebu_membase;
-
-static int __init lantiq_ebu_init(void)
-{
-       /* insert and request the memory region */
-       if (insert_resource(&iomem_resource, &ltq_ebu_resource) < 0)
-               panic("Failed to insert ebu memory");
-
-       if (request_mem_region(ltq_ebu_resource.start,
-                       resource_size(&ltq_ebu_resource), "ebu") < 0)
-               panic("Failed to request ebu memory");
-
-       /* remap ebu register range */
-       ltq_ebu_membase = ioremap_nocache(ltq_ebu_resource.start,
-                               resource_size(&ltq_ebu_resource));
-       if (!ltq_ebu_membase)
-               panic("Failed to remap ebu memory");
-
-       /* make sure to unprotect the memory region where flash is located */
-       ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_BUSCON0) & ~EBU_WRDIS, LTQ_EBU_BUSCON0);
-       return 0;
-}
-
-postcore_initcall(lantiq_ebu_init);
index c429a5bc080fdfca9fd9c40eb61c5cdb9b8198d6..2ab39e93d9beb711a7f8b3250a69e2ec094a287d 100644 (file)
@@ -36,18 +36,6 @@ struct ltq_gpio {
 
 static struct ltq_gpio ltq_gpio_port[MAX_PORTS];
 
-int gpio_to_irq(unsigned int gpio)
-{
-       return -EINVAL;
-}
-EXPORT_SYMBOL(gpio_to_irq);
-
-int irq_to_gpio(unsigned int gpio)
-{
-       return -EINVAL;
-}
-EXPORT_SYMBOL(irq_to_gpio);
-
 int ltq_gpio_request(unsigned int pin, unsigned int alt0,
        unsigned int alt1, unsigned int dir, const char *name)
 {
diff --git a/arch/mips/lantiq/xway/gpio_ebu.c b/arch/mips/lantiq/xway/gpio_ebu.c
deleted file mode 100644 (file)
index aae1717..0000000
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/platform_device.h>
-#include <linux/mutex.h>
-#include <linux/gpio.h>
-#include <linux/io.h>
-
-#include <lantiq_soc.h>
-
-/*
- * By attaching hardware latches to the EBU it is possible to create output
- * only gpios. This driver configures a special memory address, which when
- * written to outputs 16 bit to the latches.
- */
-
-#define LTQ_EBU_BUSCON 0x1e7ff         /* 16 bit access, slowest timing */
-#define LTQ_EBU_WP     0x80000000      /* write protect bit */
-
-/* we keep a shadow value of the last value written to the ebu */
-static int ltq_ebu_gpio_shadow = 0x0;
-static void __iomem *ltq_ebu_gpio_membase;
-
-static void ltq_ebu_apply(void)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&ebu_lock, flags);
-       ltq_ebu_w32(LTQ_EBU_BUSCON, LTQ_EBU_BUSCON1);
-       *((__u16 *)ltq_ebu_gpio_membase) = ltq_ebu_gpio_shadow;
-       ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1);
-       spin_unlock_irqrestore(&ebu_lock, flags);
-}
-
-static void ltq_ebu_set(struct gpio_chip *chip, unsigned offset, int value)
-{
-       if (value)
-               ltq_ebu_gpio_shadow |= (1 << offset);
-       else
-               ltq_ebu_gpio_shadow &= ~(1 << offset);
-       ltq_ebu_apply();
-}
-
-static int ltq_ebu_direction_output(struct gpio_chip *chip, unsigned offset,
-       int value)
-{
-       ltq_ebu_set(chip, offset, value);
-
-       return 0;
-}
-
-static struct gpio_chip ltq_ebu_chip = {
-       .label = "ltq_ebu",
-       .direction_output = ltq_ebu_direction_output,
-       .set = ltq_ebu_set,
-       .base = 72,
-       .ngpio = 16,
-       .can_sleep = 1,
-       .owner = THIS_MODULE,
-};
-
-static int ltq_ebu_probe(struct platform_device *pdev)
-{
-       int ret = 0;
-       struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
-       if (!res) {
-               dev_err(&pdev->dev, "failed to get memory resource\n");
-               return -ENOENT;
-       }
-
-       res = devm_request_mem_region(&pdev->dev, res->start,
-               resource_size(res), dev_name(&pdev->dev));
-       if (!res) {
-               dev_err(&pdev->dev, "failed to request memory resource\n");
-               return -EBUSY;
-       }
-
-       ltq_ebu_gpio_membase = devm_ioremap_nocache(&pdev->dev, res->start,
-               resource_size(res));
-       if (!ltq_ebu_gpio_membase) {
-               dev_err(&pdev->dev, "Failed to ioremap mem region\n");
-               return -ENOMEM;
-       }
-
-       /* grab the default shadow value passed form the platform code */
-       ltq_ebu_gpio_shadow = (unsigned int) pdev->dev.platform_data;
-
-       /* tell the ebu controller which memory address we will be using */
-       ltq_ebu_w32(pdev->resource->start | 0x1, LTQ_EBU_ADDRSEL1);
-
-       /* write protect the region */
-       ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1);
-
-       ret = gpiochip_add(&ltq_ebu_chip);
-       if (!ret)
-               ltq_ebu_apply();
-       return ret;
-}
-
-static struct platform_driver ltq_ebu_driver = {
-       .probe = ltq_ebu_probe,
-       .driver = {
-               .name = "ltq_ebu",
-               .owner = THIS_MODULE,
-       },
-};
-
-static int __init ltq_ebu_init(void)
-{
-       int ret = platform_driver_register(&ltq_ebu_driver);
-
-       if (ret)
-               pr_info("ltq_ebu : Error registering platform driver!");
-       return ret;
-}
-
-postcore_initcall(ltq_ebu_init);
diff --git a/arch/mips/lantiq/xway/gpio_stp.c b/arch/mips/lantiq/xway/gpio_stp.c
deleted file mode 100644 (file)
index fd07d87..0000000
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2007 John Crispin <blogic@openwrt.org>
- *
- */
-
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/platform_device.h>
-#include <linux/mutex.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-
-#include <lantiq_soc.h>
-
-#define LTQ_STP_CON0           0x00
-#define LTQ_STP_CON1           0x04
-#define LTQ_STP_CPU0           0x08
-#define LTQ_STP_CPU1           0x0C
-#define LTQ_STP_AR             0x10
-
-#define LTQ_STP_CON_SWU                (1 << 31)
-#define LTQ_STP_2HZ            0
-#define LTQ_STP_4HZ            (1 << 23)
-#define LTQ_STP_8HZ            (2 << 23)
-#define LTQ_STP_10HZ           (3 << 23)
-#define LTQ_STP_SPEED_MASK     (0xf << 23)
-#define LTQ_STP_UPD_FPI                (1 << 31)
-#define LTQ_STP_UPD_MASK       (3 << 30)
-#define LTQ_STP_ADSL_SRC       (3 << 24)
-
-#define LTQ_STP_GROUP0         (1 << 0)
-
-#define LTQ_STP_RISING         0
-#define LTQ_STP_FALLING                (1 << 26)
-#define LTQ_STP_EDGE_MASK      (1 << 26)
-
-#define ltq_stp_r32(reg)       __raw_readl(ltq_stp_membase + reg)
-#define ltq_stp_w32(val, reg)  __raw_writel(val, ltq_stp_membase + reg)
-#define ltq_stp_w32_mask(clear, set, reg) \
-               ltq_w32((ltq_r32(ltq_stp_membase + reg) & ~(clear)) | (set), \
-               ltq_stp_membase + (reg))
-
-static int ltq_stp_shadow = 0xffff;
-static void __iomem *ltq_stp_membase;
-
-static void ltq_stp_set(struct gpio_chip *chip, unsigned offset, int value)
-{
-       if (value)
-               ltq_stp_shadow |= (1 << offset);
-       else
-               ltq_stp_shadow &= ~(1 << offset);
-       ltq_stp_w32(ltq_stp_shadow, LTQ_STP_CPU0);
-}
-
-static int ltq_stp_direction_output(struct gpio_chip *chip, unsigned offset,
-       int value)
-{
-       ltq_stp_set(chip, offset, value);
-
-       return 0;
-}
-
-static struct gpio_chip ltq_stp_chip = {
-       .label = "ltq_stp",
-       .direction_output = ltq_stp_direction_output,
-       .set = ltq_stp_set,
-       .base = 48,
-       .ngpio = 24,
-       .can_sleep = 1,
-       .owner = THIS_MODULE,
-};
-
-static int ltq_stp_hw_init(void)
-{
-       /* the 3 pins used to control the external stp */
-       ltq_gpio_request(4, 1, 0, 1, "stp-st");
-       ltq_gpio_request(5, 1, 0, 1, "stp-d");
-       ltq_gpio_request(6, 1, 0, 1, "stp-sh");
-
-       /* sane defaults */
-       ltq_stp_w32(0, LTQ_STP_AR);
-       ltq_stp_w32(0, LTQ_STP_CPU0);
-       ltq_stp_w32(0, LTQ_STP_CPU1);
-       ltq_stp_w32(LTQ_STP_CON_SWU, LTQ_STP_CON0);
-       ltq_stp_w32(0, LTQ_STP_CON1);
-
-       /* rising or falling edge */
-       ltq_stp_w32_mask(LTQ_STP_EDGE_MASK, LTQ_STP_FALLING, LTQ_STP_CON0);
-
-       /* per default stp 15-0 are set */
-       ltq_stp_w32_mask(0, LTQ_STP_GROUP0, LTQ_STP_CON1);
-
-       /* stp are update periodically by the FPI bus */
-       ltq_stp_w32_mask(LTQ_STP_UPD_MASK, LTQ_STP_UPD_FPI, LTQ_STP_CON1);
-
-       /* set stp update speed */
-       ltq_stp_w32_mask(LTQ_STP_SPEED_MASK, LTQ_STP_8HZ, LTQ_STP_CON1);
-
-       /* tell the hardware that pin (led) 0 and 1 are controlled
-        *  by the dsl arc
-        */
-       ltq_stp_w32_mask(0, LTQ_STP_ADSL_SRC, LTQ_STP_CON0);
-
-       ltq_pmu_enable(PMU_LED);
-       return 0;
-}
-
-static int __devinit ltq_stp_probe(struct platform_device *pdev)
-{
-       struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       int ret = 0;
-
-       if (!res)
-               return -ENOENT;
-       res = devm_request_mem_region(&pdev->dev, res->start,
-               resource_size(res), dev_name(&pdev->dev));
-       if (!res) {
-               dev_err(&pdev->dev, "failed to request STP memory\n");
-               return -EBUSY;
-       }
-       ltq_stp_membase = devm_ioremap_nocache(&pdev->dev, res->start,
-               resource_size(res));
-       if (!ltq_stp_membase) {
-               dev_err(&pdev->dev, "failed to remap STP memory\n");
-               return -ENOMEM;
-       }
-       ret = gpiochip_add(&ltq_stp_chip);
-       if (!ret)
-               ret = ltq_stp_hw_init();
-
-       return ret;
-}
-
-static struct platform_driver ltq_stp_driver = {
-       .probe = ltq_stp_probe,
-       .driver = {
-               .name = "ltq_stp",
-               .owner = THIS_MODULE,
-       },
-};
-
-int __init ltq_stp_init(void)
-{
-       int ret = platform_driver_register(&ltq_stp_driver);
-
-       if (ret)
-               pr_info("ltq_stp: error registering platform driver");
-       return ret;
-}
-
-postcore_initcall(ltq_stp_init);
diff --git a/arch/mips/lantiq/xway/mach-easy50601.c b/arch/mips/lantiq/xway/mach-easy50601.c
deleted file mode 100644 (file)
index d5aaf63..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/physmap.h>
-#include <linux/input.h>
-
-#include <lantiq.h>
-
-#include "../machtypes.h"
-#include "devices.h"
-
-static struct mtd_partition easy50601_partitions[] = {
-       {
-               .name   = "uboot",
-               .offset = 0x0,
-               .size   = 0x10000,
-       },
-       {
-               .name   = "uboot_env",
-               .offset = 0x10000,
-               .size   = 0x10000,
-       },
-       {
-               .name   = "linux",
-               .offset = 0x20000,
-               .size   = 0xE0000,
-       },
-       {
-               .name   = "rootfs",
-               .offset = 0x100000,
-               .size   = 0x300000,
-       },
-};
-
-static struct physmap_flash_data easy50601_flash_data = {
-       .nr_parts       = ARRAY_SIZE(easy50601_partitions),
-       .parts          = easy50601_partitions,
-};
-
-static void __init easy50601_init(void)
-{
-       ltq_register_nor(&easy50601_flash_data);
-}
-
-MIPS_MACHINE(LTQ_MACH_EASY50601,
-                       "EASY50601",
-                       "EASY50601 Eval Board",
-                       easy50601_init);
diff --git a/arch/mips/lantiq/xway/mach-easy50712.c b/arch/mips/lantiq/xway/mach-easy50712.c
deleted file mode 100644 (file)
index ea5027b..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/physmap.h>
-#include <linux/input.h>
-#include <linux/phy.h>
-
-#include <lantiq_soc.h>
-#include <irq.h>
-
-#include "../machtypes.h"
-#include "devices.h"
-
-static struct mtd_partition easy50712_partitions[] = {
-       {
-               .name   = "uboot",
-               .offset = 0x0,
-               .size   = 0x10000,
-       },
-       {
-               .name   = "uboot_env",
-               .offset = 0x10000,
-               .size   = 0x10000,
-       },
-       {
-               .name   = "linux",
-               .offset = 0x20000,
-               .size   = 0xe0000,
-       },
-       {
-               .name   = "rootfs",
-               .offset = 0x100000,
-               .size   = 0x300000,
-       },
-};
-
-static struct physmap_flash_data easy50712_flash_data = {
-       .nr_parts       = ARRAY_SIZE(easy50712_partitions),
-       .parts          = easy50712_partitions,
-};
-
-static struct ltq_pci_data ltq_pci_data = {
-       .clock  = PCI_CLOCK_INT,
-       .gpio   = PCI_GNT1 | PCI_REQ1,
-       .irq    = {
-               [14] = INT_NUM_IM0_IRL0 + 22,
-       },
-};
-
-static struct ltq_eth_data ltq_eth_data = {
-       .mii_mode = PHY_INTERFACE_MODE_MII,
-};
-
-static void __init easy50712_init(void)
-{
-       ltq_register_gpio_stp();
-       ltq_register_nor(&easy50712_flash_data);
-       ltq_register_pci(&ltq_pci_data);
-       ltq_register_etop(&ltq_eth_data);
-}
-
-MIPS_MACHINE(LTQ_MACH_EASY50712,
-            "EASY50712",
-            "EASY50712 Eval Board",
-             easy50712_init);
diff --git a/arch/mips/lantiq/xway/pmu.c b/arch/mips/lantiq/xway/pmu.c
deleted file mode 100644 (file)
index fe85361..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/ioport.h>
-
-#include <lantiq_soc.h>
-
-/* PMU - the power management unit allows us to turn part of the core
- * on and off
- */
-
-/* the enable / disable registers */
-#define LTQ_PMU_PWDCR  0x1C
-#define LTQ_PMU_PWDSR  0x20
-
-#define ltq_pmu_w32(x, y)      ltq_w32((x), ltq_pmu_membase + (y))
-#define ltq_pmu_r32(x)         ltq_r32(ltq_pmu_membase + (x))
-
-static struct resource ltq_pmu_resource = {
-       .name   = "pmu",
-       .start  = LTQ_PMU_BASE_ADDR,
-       .end    = LTQ_PMU_BASE_ADDR + LTQ_PMU_SIZE - 1,
-       .flags  = IORESOURCE_MEM,
-};
-
-static void __iomem *ltq_pmu_membase;
-
-void ltq_pmu_enable(unsigned int module)
-{
-       int err = 1000000;
-
-       ltq_pmu_w32(ltq_pmu_r32(LTQ_PMU_PWDCR) & ~module, LTQ_PMU_PWDCR);
-       do {} while (--err && (ltq_pmu_r32(LTQ_PMU_PWDSR) & module));
-
-       if (!err)
-               panic("activating PMU module failed!");
-}
-EXPORT_SYMBOL(ltq_pmu_enable);
-
-void ltq_pmu_disable(unsigned int module)
-{
-       ltq_pmu_w32(ltq_pmu_r32(LTQ_PMU_PWDCR) | module, LTQ_PMU_PWDCR);
-}
-EXPORT_SYMBOL(ltq_pmu_disable);
-
-int __init ltq_pmu_init(void)
-{
-       if (insert_resource(&iomem_resource, &ltq_pmu_resource) < 0)
-               panic("Failed to insert pmu memory");
-
-       if (request_mem_region(ltq_pmu_resource.start,
-                       resource_size(&ltq_pmu_resource), "pmu") < 0)
-               panic("Failed to request pmu memory");
-
-       ltq_pmu_membase = ioremap_nocache(ltq_pmu_resource.start,
-                               resource_size(&ltq_pmu_resource));
-       if (!ltq_pmu_membase)
-               panic("Failed to remap pmu memory");
-       return 0;
-}
-
-core_initcall(ltq_pmu_init);
diff --git a/arch/mips/lantiq/xway/prom-ase.c b/arch/mips/lantiq/xway/prom-ase.c
deleted file mode 100644 (file)
index ae4959a..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/export.h>
-#include <linux/clk.h>
-#include <asm/bootinfo.h>
-#include <asm/time.h>
-
-#include <lantiq_soc.h>
-
-#include "../prom.h"
-
-#define SOC_AMAZON_SE  "Amazon_SE"
-
-#define PART_SHIFT     12
-#define PART_MASK      0x0FFFFFFF
-#define REV_SHIFT      28
-#define REV_MASK       0xF0000000
-
-void __init ltq_soc_detect(struct ltq_soc_info *i)
-{
-       i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT;
-       i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT;
-       switch (i->partnum) {
-       case SOC_ID_AMAZON_SE:
-               i->name = SOC_AMAZON_SE;
-               i->type = SOC_TYPE_AMAZON_SE;
-               break;
-
-       default:
-               unreachable();
-               break;
-       }
-}
diff --git a/arch/mips/lantiq/xway/prom-xway.c b/arch/mips/lantiq/xway/prom-xway.c
deleted file mode 100644 (file)
index 2228133..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/export.h>
-#include <linux/clk.h>
-#include <asm/bootinfo.h>
-#include <asm/time.h>
-
-#include <lantiq_soc.h>
-
-#include "../prom.h"
-
-#define SOC_DANUBE     "Danube"
-#define SOC_TWINPASS   "Twinpass"
-#define SOC_AR9                "AR9"
-
-#define PART_SHIFT     12
-#define PART_MASK      0x0FFFFFFF
-#define REV_SHIFT      28
-#define REV_MASK       0xF0000000
-
-void __init ltq_soc_detect(struct ltq_soc_info *i)
-{
-       i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT;
-       i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT;
-       switch (i->partnum) {
-       case SOC_ID_DANUBE1:
-       case SOC_ID_DANUBE2:
-               i->name = SOC_DANUBE;
-               i->type = SOC_TYPE_DANUBE;
-               break;
-
-       case SOC_ID_TWINPASS:
-               i->name = SOC_TWINPASS;
-               i->type = SOC_TYPE_DANUBE;
-               break;
-
-       case SOC_ID_ARX188:
-       case SOC_ID_ARX168:
-       case SOC_ID_ARX182:
-               i->name = SOC_AR9;
-               i->type = SOC_TYPE_AR9;
-               break;
-
-       default:
-               unreachable();
-               break;
-       }
-}
diff --git a/arch/mips/lantiq/xway/prom.c b/arch/mips/lantiq/xway/prom.c
new file mode 100644 (file)
index 0000000..248429a
--- /dev/null
@@ -0,0 +1,115 @@
+/*
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/export.h>
+#include <linux/clk.h>
+#include <asm/bootinfo.h>
+#include <asm/time.h>
+
+#include <lantiq_soc.h>
+
+#include "../prom.h"
+
+#define SOC_DANUBE     "Danube"
+#define SOC_TWINPASS   "Twinpass"
+#define SOC_AMAZON_SE  "Amazon_SE"
+#define SOC_AR9                "AR9"
+#define SOC_GR9                "GR9"
+#define SOC_VR9                "VR9"
+
+#define COMP_DANUBE    "lantiq,danube"
+#define COMP_TWINPASS  "lantiq,twinpass"
+#define COMP_AMAZON_SE "lantiq,ase"
+#define COMP_AR9       "lantiq,ar9"
+#define COMP_GR9       "lantiq,gr9"
+#define COMP_VR9       "lantiq,vr9"
+
+#define PART_SHIFT     12
+#define PART_MASK      0x0FFFFFFF
+#define REV_SHIFT      28
+#define REV_MASK       0xF0000000
+
+void __init ltq_soc_detect(struct ltq_soc_info *i)
+{
+       i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT;
+       i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT;
+       sprintf(i->rev_type, "1.%d", i->rev);
+       switch (i->partnum) {
+       case SOC_ID_DANUBE1:
+       case SOC_ID_DANUBE2:
+               i->name = SOC_DANUBE;
+               i->type = SOC_TYPE_DANUBE;
+               i->compatible = COMP_DANUBE;
+               break;
+
+       case SOC_ID_TWINPASS:
+               i->name = SOC_TWINPASS;
+               i->type = SOC_TYPE_DANUBE;
+               i->compatible = COMP_TWINPASS;
+               break;
+
+       case SOC_ID_ARX188:
+       case SOC_ID_ARX168_1:
+       case SOC_ID_ARX168_2:
+       case SOC_ID_ARX182:
+               i->name = SOC_AR9;
+               i->type = SOC_TYPE_AR9;
+               i->compatible = COMP_AR9;
+               break;
+
+       case SOC_ID_GRX188:
+       case SOC_ID_GRX168:
+               i->name = SOC_GR9;
+               i->type = SOC_TYPE_AR9;
+               i->compatible = COMP_GR9;
+               break;
+
+       case SOC_ID_AMAZON_SE_1:
+       case SOC_ID_AMAZON_SE_2:
+#ifdef CONFIG_PCI
+               panic("ase is only supported for non pci kernels");
+#endif
+               i->name = SOC_AMAZON_SE;
+               i->type = SOC_TYPE_AMAZON_SE;
+               i->compatible = COMP_AMAZON_SE;
+               break;
+
+       case SOC_ID_VRX282:
+       case SOC_ID_VRX268:
+       case SOC_ID_VRX288:
+               i->name = SOC_VR9;
+               i->type = SOC_TYPE_VR9;
+               i->compatible = COMP_VR9;
+               break;
+
+       case SOC_ID_GRX268:
+       case SOC_ID_GRX288:
+               i->name = SOC_GR9;
+               i->type = SOC_TYPE_VR9;
+               i->compatible = COMP_GR9;
+               break;
+
+       case SOC_ID_VRX268_2:
+       case SOC_ID_VRX288_2:
+               i->name = SOC_VR9;
+               i->type = SOC_TYPE_VR9_2;
+               i->compatible = COMP_VR9;
+               break;
+
+       case SOC_ID_GRX282_2:
+       case SOC_ID_GRX288_2:
+               i->name = SOC_GR9;
+               i->type = SOC_TYPE_VR9_2;
+               i->compatible = COMP_GR9;
+               break;
+
+       default:
+               unreachable();
+               break;
+       }
+}
index 8b66bd87f0c1cbf4a698c47cf78721da799aee0f..22c55f73aa9d646c535560a9938e27663970a9d9 100644 (file)
 #include <linux/ioport.h>
 #include <linux/pm.h>
 #include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
 #include <asm/reboot.h>
 
 #include <lantiq_soc.h>
 
+#include "../prom.h"
+
 #define ltq_rcu_w32(x, y)      ltq_w32((x), ltq_rcu_membase + (y))
 #define ltq_rcu_r32(x)         ltq_r32(ltq_rcu_membase + (x))
 
-/* register definitions */
-#define LTQ_RCU_RST            0x0010
-#define LTQ_RCU_RST_ALL                0x40000000
-
-#define LTQ_RCU_RST_STAT       0x0014
-#define LTQ_RCU_STAT_SHIFT     26
+/* reset request register */
+#define RCU_RST_REQ            0x0010
+/* reset status register */
+#define RCU_RST_STAT           0x0014
 
-static struct resource ltq_rcu_resource = {
-       .name   = "rcu",
-       .start  = LTQ_RCU_BASE_ADDR,
-       .end    = LTQ_RCU_BASE_ADDR + LTQ_RCU_SIZE - 1,
-       .flags  = IORESOURCE_MEM,
-};
+/* reboot bit */
+#define RCU_RD_SRST            BIT(30)
+/* reset cause */
+#define RCU_STAT_SHIFT         26
+/* boot selection */
+#define RCU_BOOT_SEL_SHIFT     26
+#define RCU_BOOT_SEL_MASK      0x7
 
 /* remapped base addr of the reset control unit */
 static void __iomem *ltq_rcu_membase;
@@ -38,48 +43,64 @@ static void __iomem *ltq_rcu_membase;
 /* This function is used by the watchdog driver */
 int ltq_reset_cause(void)
 {
-       u32 val = ltq_rcu_r32(LTQ_RCU_RST_STAT);
-       return val >> LTQ_RCU_STAT_SHIFT;
+       u32 val = ltq_rcu_r32(RCU_RST_STAT);
+       return val >> RCU_STAT_SHIFT;
 }
 EXPORT_SYMBOL_GPL(ltq_reset_cause);
 
+/* allow platform code to find out what source we booted from */
+unsigned char ltq_boot_select(void)
+{
+       u32 val = ltq_rcu_r32(RCU_RST_STAT);
+       return (val >> RCU_BOOT_SEL_SHIFT) & RCU_BOOT_SEL_MASK;
+}
+
+/* reset a io domain for u micro seconds */
+void ltq_reset_once(unsigned int module, ulong u)
+{
+       ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) | module, RCU_RST_REQ);
+       udelay(u);
+       ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) & ~module, RCU_RST_REQ);
+}
+
 static void ltq_machine_restart(char *command)
 {
-       pr_notice("System restart\n");
        local_irq_disable();
-       ltq_rcu_w32(ltq_rcu_r32(LTQ_RCU_RST) | LTQ_RCU_RST_ALL, LTQ_RCU_RST);
+       ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) | RCU_RD_SRST, RCU_RST_REQ);
        unreachable();
 }
 
 static void ltq_machine_halt(void)
 {
-       pr_notice("System halted.\n");
        local_irq_disable();
        unreachable();
 }
 
 static void ltq_machine_power_off(void)
 {
-       pr_notice("Please turn off the power now.\n");
        local_irq_disable();
        unreachable();
 }
 
 static int __init mips_reboot_setup(void)
 {
-       /* insert and request the memory region */
-       if (insert_resource(&iomem_resource, &ltq_rcu_resource) < 0)
-               panic("Failed to insert rcu memory");
+       struct resource res;
+       struct device_node *np =
+               of_find_compatible_node(NULL, NULL, "lantiq,rcu-xway");
+
+       /* check if all the reset register range is available */
+       if (!np)
+               panic("Failed to load reset resources from devicetree");
+
+       if (of_address_to_resource(np, 0, &res))
+               panic("Failed to get rcu memory range");
 
-       if (request_mem_region(ltq_rcu_resource.start,
-                       resource_size(&ltq_rcu_resource), "rcu") < 0)
-               panic("Failed to request rcu memory");
+       if (request_mem_region(res.start, resource_size(&res), res.name) < 0)
+               pr_err("Failed to request rcu memory");
 
-       /* remap rcu register range */
-       ltq_rcu_membase = ioremap_nocache(ltq_rcu_resource.start,
-                               resource_size(&ltq_rcu_resource));
+       ltq_rcu_membase = ioremap_nocache(res.start, resource_size(&res));
        if (!ltq_rcu_membase)
-               panic("Failed to remap rcu memory");
+               panic("Failed to remap core memory");
 
        _machine_restart = ltq_machine_restart;
        _machine_halt = ltq_machine_halt;
diff --git a/arch/mips/lantiq/xway/setup-ase.c b/arch/mips/lantiq/xway/setup-ase.c
deleted file mode 100644 (file)
index f6f3267..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2011 John Crispin <blogic@openwrt.org>
- */
-
-#include <lantiq_soc.h>
-
-#include "../prom.h"
-#include "devices.h"
-
-void __init ltq_soc_setup(void)
-{
-       ltq_register_ase_asc();
-       ltq_register_gpio();
-       ltq_register_wdt();
-}
diff --git a/arch/mips/lantiq/xway/setup-xway.c b/arch/mips/lantiq/xway/setup-xway.c
deleted file mode 100644 (file)
index c292f64..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright (C) 2011 John Crispin <blogic@openwrt.org>
- */
-
-#include <lantiq_soc.h>
-
-#include "../prom.h"
-#include "devices.h"
-
-void __init ltq_soc_setup(void)
-{
-       ltq_register_asc(0);
-       ltq_register_asc(1);
-       ltq_register_gpio();
-       ltq_register_wdt();
-}
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
new file mode 100644 (file)
index 0000000..83780f7
--- /dev/null
@@ -0,0 +1,371 @@
+/*
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  Copyright (C) 2011-2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/ioport.h>
+#include <linux/export.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+
+#include <lantiq_soc.h>
+
+#include "../clk.h"
+#include "../prom.h"
+
+/* clock control register */
+#define CGU_IFCCR      0x0018
+/* system clock register */
+#define CGU_SYS                0x0010
+/* pci control register */
+#define CGU_PCICR      0x0034
+/* ephy configuration register */
+#define CGU_EPHY       0x10
+/* power control register */
+#define PMU_PWDCR      0x1C
+/* power status register */
+#define PMU_PWDSR      0x20
+/* power control register */
+#define PMU_PWDCR1     0x24
+/* power status register */
+#define PMU_PWDSR1     0x28
+/* power control register */
+#define PWDCR(x) ((x) ? (PMU_PWDCR1) : (PMU_PWDCR))
+/* power status register */
+#define PWDSR(x) ((x) ? (PMU_PWDSR1) : (PMU_PWDSR))
+
+/* clock gates that we can en/disable */
+#define PMU_USB0_P     BIT(0)
+#define PMU_PCI                BIT(4)
+#define PMU_DMA                BIT(5)
+#define PMU_USB0       BIT(6)
+#define PMU_ASC0       BIT(7)
+#define PMU_EPHY       BIT(7)  /* ase */
+#define PMU_SPI                BIT(8)
+#define PMU_DFE                BIT(9)
+#define PMU_EBU                BIT(10)
+#define PMU_STP                BIT(11)
+#define PMU_GPT                BIT(12)
+#define PMU_AHBS       BIT(13) /* vr9 */
+#define PMU_FPI                BIT(14)
+#define PMU_AHBM       BIT(15)
+#define PMU_ASC1       BIT(17)
+#define PMU_PPE_QSB    BIT(18)
+#define PMU_PPE_SLL01  BIT(19)
+#define PMU_PPE_TC     BIT(21)
+#define PMU_PPE_EMA    BIT(22)
+#define PMU_PPE_DPLUM  BIT(23)
+#define PMU_PPE_DPLUS  BIT(24)
+#define PMU_USB1_P     BIT(26)
+#define PMU_USB1       BIT(27)
+#define PMU_SWITCH     BIT(28)
+#define PMU_PPE_TOP    BIT(29)
+#define PMU_GPHY       BIT(30)
+#define PMU_PCIE_CLK   BIT(31)
+
+#define PMU1_PCIE_PHY  BIT(0)
+#define PMU1_PCIE_CTL  BIT(1)
+#define PMU1_PCIE_PDI  BIT(4)
+#define PMU1_PCIE_MSI  BIT(5)
+
+#define pmu_w32(x, y)  ltq_w32((x), pmu_membase + (y))
+#define pmu_r32(x)     ltq_r32(pmu_membase + (x))
+
+static void __iomem *pmu_membase;
+void __iomem *ltq_cgu_membase;
+void __iomem *ltq_ebu_membase;
+
+/* legacy function kept alive to ease clkdev transition */
+void ltq_pmu_enable(unsigned int module)
+{
+       int err = 1000000;
+
+       pmu_w32(pmu_r32(PMU_PWDCR) & ~module, PMU_PWDCR);
+       do {} while (--err && (pmu_r32(PMU_PWDSR) & module));
+
+       if (!err)
+               panic("activating PMU module failed!");
+}
+EXPORT_SYMBOL(ltq_pmu_enable);
+
+/* legacy function kept alive to ease clkdev transition */
+void ltq_pmu_disable(unsigned int module)
+{
+       pmu_w32(pmu_r32(PMU_PWDCR) | module, PMU_PWDCR);
+}
+EXPORT_SYMBOL(ltq_pmu_disable);
+
+/* enable a hw clock */
+static int cgu_enable(struct clk *clk)
+{
+       ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) | clk->bits, CGU_IFCCR);
+       return 0;
+}
+
+/* disable a hw clock */
+static void cgu_disable(struct clk *clk)
+{
+       ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) & ~clk->bits, CGU_IFCCR);
+}
+
+/* enable a clock gate */
+static int pmu_enable(struct clk *clk)
+{
+       int retry = 1000000;
+
+       pmu_w32(pmu_r32(PWDCR(clk->module)) & ~clk->bits,
+               PWDCR(clk->module));
+       do {} while (--retry && (pmu_r32(PWDSR(clk->module)) & clk->bits));
+
+       if (!retry)
+               panic("activating PMU module failed!\n");
+
+       return 0;
+}
+
+/* disable a clock gate */
+static void pmu_disable(struct clk *clk)
+{
+       pmu_w32(pmu_r32(PWDCR(clk->module)) | clk->bits,
+               PWDCR(clk->module));
+}
+
+/* the pci enable helper */
+static int pci_enable(struct clk *clk)
+{
+       unsigned int ifccr = ltq_cgu_r32(CGU_IFCCR);
+       /* set bus clock speed */
+       if (of_machine_is_compatible("lantiq,ar9")) {
+               ifccr &= ~0x1f00000;
+               if (clk->rate == CLOCK_33M)
+                       ifccr |= 0xe00000;
+               else
+                       ifccr |= 0x700000; /* 62.5M */
+       } else {
+               ifccr &= ~0xf00000;
+               if (clk->rate == CLOCK_33M)
+                       ifccr |= 0x800000;
+               else
+                       ifccr |= 0x400000; /* 62.5M */
+       }
+       ltq_cgu_w32(ifccr, CGU_IFCCR);
+       pmu_enable(clk);
+       return 0;
+}
+
+/* enable the external clock as a source */
+static int pci_ext_enable(struct clk *clk)
+{
+       ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) & ~(1 << 16),
+               CGU_IFCCR);
+       ltq_cgu_w32((1 << 30), CGU_PCICR);
+       return 0;
+}
+
+/* disable the external clock as a source */
+static void pci_ext_disable(struct clk *clk)
+{
+       ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) | (1 << 16),
+               CGU_IFCCR);
+       ltq_cgu_w32((1 << 31) | (1 << 30), CGU_PCICR);
+}
+
+/* enable a clockout source */
+static int clkout_enable(struct clk *clk)
+{
+       int i;
+
+       /* get the correct rate */
+       for (i = 0; i < 4; i++) {
+               if (clk->rates[i] == clk->rate) {
+                       int shift = 14 - (2 * clk->module);
+                       unsigned int ifccr = ltq_cgu_r32(CGU_IFCCR);
+
+                       ifccr &= ~(3 << shift);
+                       ifccr |= i << shift;
+                       ltq_cgu_w32(ifccr, CGU_IFCCR);
+                       return 0;
+               }
+       }
+       return -1;
+}
+
+/* manage the clock gates via PMU */
+static void clkdev_add_pmu(const char *dev, const char *con,
+                                       unsigned int module, unsigned int bits)
+{
+       struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+       clk->cl.dev_id = dev;
+       clk->cl.con_id = con;
+       clk->cl.clk = clk;
+       clk->enable = pmu_enable;
+       clk->disable = pmu_disable;
+       clk->module = module;
+       clk->bits = bits;
+       clkdev_add(&clk->cl);
+}
+
+/* manage the clock generator */
+static void clkdev_add_cgu(const char *dev, const char *con,
+                                       unsigned int bits)
+{
+       struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+       clk->cl.dev_id = dev;
+       clk->cl.con_id = con;
+       clk->cl.clk = clk;
+       clk->enable = cgu_enable;
+       clk->disable = cgu_disable;
+       clk->bits = bits;
+       clkdev_add(&clk->cl);
+}
+
+/* pci needs its own enable function as the setup is a bit more complex */
+static unsigned long valid_pci_rates[] = {CLOCK_33M, CLOCK_62_5M, 0};
+
+static void clkdev_add_pci(void)
+{
+       struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+       struct clk *clk_ext = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+       /* main pci clock */
+       clk->cl.dev_id = "17000000.pci";
+       clk->cl.con_id = NULL;
+       clk->cl.clk = clk;
+       clk->rate = CLOCK_33M;
+       clk->rates = valid_pci_rates;
+       clk->enable = pci_enable;
+       clk->disable = pmu_disable;
+       clk->module = 0;
+       clk->bits = PMU_PCI;
+       clkdev_add(&clk->cl);
+
+       /* use internal/external bus clock */
+       clk_ext->cl.dev_id = "17000000.pci";
+       clk_ext->cl.con_id = "external";
+       clk_ext->cl.clk = clk_ext;
+       clk_ext->enable = pci_ext_enable;
+       clk_ext->disable = pci_ext_disable;
+       clkdev_add(&clk_ext->cl);
+}
+
+/* xway socs can generate clocks on gpio pins */
+static unsigned long valid_clkout_rates[4][5] = {
+       {CLOCK_32_768K, CLOCK_1_536M, CLOCK_2_5M, CLOCK_12M, 0},
+       {CLOCK_40M, CLOCK_12M, CLOCK_24M, CLOCK_48M, 0},
+       {CLOCK_25M, CLOCK_40M, CLOCK_30M, CLOCK_60M, 0},
+       {CLOCK_12M, CLOCK_50M, CLOCK_32_768K, CLOCK_25M, 0},
+};
+
+static void clkdev_add_clkout(void)
+{
+       int i;
+
+       for (i = 0; i < 4; i++) {
+               struct clk *clk;
+               char *name;
+
+               name = kzalloc(sizeof("clkout0"), GFP_KERNEL);
+               sprintf(name, "clkout%d", i);
+
+               clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+               clk->cl.dev_id = "1f103000.cgu";
+               clk->cl.con_id = name;
+               clk->cl.clk = clk;
+               clk->rate = 0;
+               clk->rates = valid_clkout_rates[i];
+               clk->enable = clkout_enable;
+               clk->module = i;
+               clkdev_add(&clk->cl);
+       }
+}
+
+/* bring up all register ranges that we need for basic system control */
+void __init ltq_soc_init(void)
+{
+       struct resource res_pmu, res_cgu, res_ebu;
+       struct device_node *np_pmu =
+                       of_find_compatible_node(NULL, NULL, "lantiq,pmu-xway");
+       struct device_node *np_cgu =
+                       of_find_compatible_node(NULL, NULL, "lantiq,cgu-xway");
+       struct device_node *np_ebu =
+                       of_find_compatible_node(NULL, NULL, "lantiq,ebu-xway");
+
+       /* check if all the core register ranges are available */
+       if (!np_pmu || !np_cgu || !np_ebu)
+               panic("Failed to load core nodess from devicetree");
+
+       if (of_address_to_resource(np_pmu, 0, &res_pmu) ||
+                       of_address_to_resource(np_cgu, 0, &res_cgu) ||
+                       of_address_to_resource(np_ebu, 0, &res_ebu))
+               panic("Failed to get core resources");
+
+       if ((request_mem_region(res_pmu.start, resource_size(&res_pmu),
+                               res_pmu.name) < 0) ||
+               (request_mem_region(res_cgu.start, resource_size(&res_cgu),
+                               res_cgu.name) < 0) ||
+               (request_mem_region(res_ebu.start, resource_size(&res_ebu),
+                               res_ebu.name) < 0))
+               pr_err("Failed to request core reources");
+
+       pmu_membase = ioremap_nocache(res_pmu.start, resource_size(&res_pmu));
+       ltq_cgu_membase = ioremap_nocache(res_cgu.start,
+                                               resource_size(&res_cgu));
+       ltq_ebu_membase = ioremap_nocache(res_ebu.start,
+                                               resource_size(&res_ebu));
+       if (!pmu_membase || !ltq_cgu_membase || !ltq_ebu_membase)
+               panic("Failed to remap core resources");
+
+       /* make sure to unprotect the memory region where flash is located */
+       ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_BUSCON0) & ~EBU_WRDIS, LTQ_EBU_BUSCON0);
+
+       /* add our generic xway clocks */
+       clkdev_add_pmu("10000000.fpi", NULL, 0, PMU_FPI);
+       clkdev_add_pmu("1e100400.serial", NULL, 0, PMU_ASC0);
+       clkdev_add_pmu("1e100a00.gptu", NULL, 0, PMU_GPT);
+       clkdev_add_pmu("1e100bb0.stp", NULL, 0, PMU_STP);
+       clkdev_add_pmu("1e104100.dma", NULL, 0, PMU_DMA);
+       clkdev_add_pmu("1e100800.spi", NULL, 0, PMU_SPI);
+       clkdev_add_pmu("1e105300.ebu", NULL, 0, PMU_EBU);
+       clkdev_add_clkout();
+
+       /* add the soc dependent clocks */
+       if (!of_machine_is_compatible("lantiq,vr9"))
+               clkdev_add_pmu("1e180000.etop", NULL, 0, PMU_PPE);
+
+       if (!of_machine_is_compatible("lantiq,ase")) {
+               clkdev_add_pmu("1e100c00.serial", NULL, 0, PMU_ASC1);
+               clkdev_add_pci();
+       }
+
+       if (of_machine_is_compatible("lantiq,ase")) {
+               if (ltq_cgu_r32(CGU_SYS) & (1 << 5))
+                       clkdev_add_static(CLOCK_266M, CLOCK_133M, CLOCK_133M);
+               else
+                       clkdev_add_static(CLOCK_133M, CLOCK_133M, CLOCK_133M);
+               clkdev_add_cgu("1e180000.etop", "ephycgu", CGU_EPHY),
+               clkdev_add_pmu("1e180000.etop", "ephy", 0, PMU_EPHY);
+       } else if (of_machine_is_compatible("lantiq,vr9")) {
+               clkdev_add_static(ltq_vr9_cpu_hz(), ltq_vr9_fpi_hz(),
+                               ltq_vr9_fpi_hz());
+               clkdev_add_pmu("1d900000.pcie", "phy", 1, PMU1_PCIE_PHY);
+               clkdev_add_pmu("1d900000.pcie", "bus", 0, PMU_PCIE_CLK);
+               clkdev_add_pmu("1d900000.pcie", "msi", 1, PMU1_PCIE_MSI);
+               clkdev_add_pmu("1d900000.pcie", "pdi", 1, PMU1_PCIE_PDI);
+               clkdev_add_pmu("1d900000.pcie", "ctl", 1, PMU1_PCIE_CTL);
+               clkdev_add_pmu("1d900000.pcie", "ahb", 0, PMU_AHBM | PMU_AHBS);
+       } else if (of_machine_is_compatible("lantiq,ar9")) {
+               clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(),
+                               ltq_ar9_fpi_hz());
+               clkdev_add_pmu("1e180000.etop", "switch", 0, PMU_SWITCH);
+       } else {
+               clkdev_add_static(ltq_danube_cpu_hz(), ltq_danube_fpi_hz(),
+                               ltq_danube_fpi_hz());
+       }
+}
index 47037ec5589b88bca92df21c81a3a890f937bd36..44e69e7a4519110b3efd1e30a755a8ec3e0692ea 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/r4kcache.h>
+#include <asm/traps.h>
 #include <asm/mmu_context.h>
 #include <asm/war.h>
 
@@ -248,6 +249,11 @@ static void __cpuinit probe_octeon(void)
        }
 }
 
+static void  __cpuinit octeon_cache_error_setup(void)
+{
+       extern char except_vec2_octeon;
+       set_handler(0x100, &except_vec2_octeon, 0x80);
+}
 
 /**
  * Setup the Octeon cache flush routines
@@ -255,12 +261,6 @@ static void __cpuinit probe_octeon(void)
  */
 void __cpuinit octeon_cache_init(void)
 {
-       extern unsigned long ebase;
-       extern char except_vec2_octeon;
-
-       memcpy((void *)(ebase + 0x100), &except_vec2_octeon, 0x80);
-       octeon_flush_cache_sigtramp(ebase + 0x100);
-
        probe_octeon();
 
        shm_align_mask = PAGE_SIZE - 1;
@@ -280,6 +280,8 @@ void __cpuinit octeon_cache_init(void)
 
        build_clear_page();
        build_copy_page();
+
+       board_cache_error_setup = octeon_cache_error_setup;
 }
 
 /**
index bda8eb26ece74098ecaa49c2693ab40dd2bcb772..5109be96d98d099ec8509dd1de6a9048f5c4b82d 100644 (file)
@@ -32,7 +32,7 @@
 #include <asm/mmu_context.h>
 #include <asm/war.h>
 #include <asm/cacheflush.h> /* for run_uncached() */
-
+#include <asm/traps.h>
 
 /*
  * Special Variant of smp_call_function for use by cache functions:
@@ -1385,10 +1385,8 @@ static int __init setcoherentio(char *str)
 __setup("coherentio", setcoherentio);
 #endif
 
-void __cpuinit r4k_cache_init(void)
+static void __cpuinit r4k_cache_error_setup(void)
 {
-       extern void build_clear_page(void);
-       extern void build_copy_page(void);
        extern char __weak except_vec2_generic;
        extern char __weak except_vec2_sb1;
        struct cpuinfo_mips *c = &current_cpu_data;
@@ -1403,6 +1401,13 @@ void __cpuinit r4k_cache_init(void)
                set_uncached_handler(0x100, &except_vec2_generic, 0x80);
                break;
        }
+}
+
+void __cpuinit r4k_cache_init(void)
+{
+       extern void build_clear_page(void);
+       extern void build_copy_page(void);
+       struct cpuinfo_mips *c = &current_cpu_data;
 
        probe_pcache();
        setup_scache();
@@ -1465,4 +1470,5 @@ void __cpuinit r4k_cache_init(void)
        local_r4k___flush_cache_all(NULL);
 #endif
        coherency_setup();
+       board_cache_error_setup = r4k_cache_error_setup;
 }
index 29f2f13eb31c433a0a6a79208dabee4706fdcf3a..1208c280f77dbb0cc2596890dbccab3f1c45c3e0 100644 (file)
@@ -1,5 +1,3 @@
-ccflags-y := -Werror
-
 obj-$(CONFIG_OPROFILE) += oprofile.o
 
 DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
index 54759f1669d3a00aca9e07fb213842b397b4b800..baba3bcaa3c28100067a39d9e1a1132a5a8d02b4 100644 (file)
@@ -298,6 +298,11 @@ static void reset_counters(void *arg)
        }
 }
 
+static irqreturn_t mipsxx_perfcount_int(int irq, void *dev_id)
+{
+       return mipsxx_perfcount_handler();
+}
+
 static int __init mipsxx_init(void)
 {
        int counters;
@@ -374,6 +379,10 @@ static int __init mipsxx_init(void)
        save_perf_irq = perf_irq;
        perf_irq = mipsxx_perfcount_handler;
 
+       if ((cp0_perfcount_irq >= 0) && (cp0_compare_irq != cp0_perfcount_irq))
+               return request_irq(cp0_perfcount_irq, mipsxx_perfcount_int,
+                       0, "Perfcounter", save_perf_irq);
+
        return 0;
 }
 
@@ -381,6 +390,9 @@ static void mipsxx_exit(void)
 {
        int counters = op_model_mipsxx_ops.num_counters;
 
+       if ((cp0_perfcount_irq >= 0) && (cp0_compare_irq != cp0_perfcount_irq))
+               free_irq(cp0_perfcount_irq, save_perf_irq);
+
        counters = counters_per_cpu_to_total(counters);
        on_each_cpu(reset_counters, (void *)(long)counters, 1);
 
index c3ac4b086eb203738d2c2bfdbd32b61171ded9ab..c703f43a9914bee08452c9db536b53b82fc9f037 100644 (file)
@@ -19,7 +19,8 @@ obj-$(CONFIG_BCM47XX)         += pci-bcm47xx.o
 obj-$(CONFIG_BCM63XX)          += pci-bcm63xx.o fixup-bcm63xx.o \
                                        ops-bcm63xx.o
 obj-$(CONFIG_MIPS_ALCHEMY)     += pci-alchemy.o
-obj-$(CONFIG_SOC_AR724X)       += pci-ath724x.o
+obj-$(CONFIG_SOC_AR71XX)       += pci-ar71xx.o
+obj-$(CONFIG_PCI_AR724X)       += pci-ar724x.o
 
 #
 # These are still pretty much in the old state, watch, go blind.
@@ -41,7 +42,8 @@ obj-$(CONFIG_SIBYTE_SB1250)   += fixup-sb1250.o pci-sb1250.o
 obj-$(CONFIG_SIBYTE_BCM112X)   += fixup-sb1250.o pci-sb1250.o
 obj-$(CONFIG_SIBYTE_BCM1x80)   += pci-bcm1480.o pci-bcm1480ht.o
 obj-$(CONFIG_SNI_RM)           += fixup-sni.o ops-sni.o
-obj-$(CONFIG_SOC_XWAY)         += pci-lantiq.o ops-lantiq.o
+obj-$(CONFIG_LANTIQ)           += fixup-lantiq.o
+obj-$(CONFIG_PCI_LANTIQ)       += pci-lantiq.o ops-lantiq.o
 obj-$(CONFIG_TANBAC_TB0219)    += fixup-tb0219.o
 obj-$(CONFIG_TANBAC_TB0226)    += fixup-tb0226.o
 obj-$(CONFIG_TANBAC_TB0287)    += fixup-tb0287.o
diff --git a/arch/mips/pci/fixup-lantiq.c b/arch/mips/pci/fixup-lantiq.c
new file mode 100644 (file)
index 0000000..6c829df
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+
+int (*ltq_pci_plat_arch_init)(struct pci_dev *dev) = NULL;
+int (*ltq_pci_plat_dev_init)(struct pci_dev *dev) = NULL;
+
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+       if (ltq_pci_plat_arch_init)
+               return ltq_pci_plat_arch_init(dev);
+
+       if (ltq_pci_plat_dev_init)
+               return ltq_pci_plat_dev_init(dev);
+
+       return 0;
+}
+
+int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+       struct of_irq dev_irq;
+       int irq;
+
+       if (of_irq_map_pci(dev, &dev_irq)) {
+               dev_err(&dev->dev, "trying to map irq for unknown slot:%d pin:%d\n",
+                       slot, pin);
+               return 0;
+       }
+       irq = irq_create_of_mapping(dev_irq.controller, dev_irq.specifier,
+                                       dev_irq.size);
+       dev_info(&dev->dev, "SLOT:%d PIN:%d IRQ:%d\n", slot, pin, irq);
+       return irq;
+}
index d657ee0bc131c8c3c26c1095b35a312c666ddd94..afd221122d222722d355c6723547ae3657853c5f 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/pci.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
+#include <linux/export.h>
 
 #include <loongson.h>
 
diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c
new file mode 100644 (file)
index 0000000..1552522
--- /dev/null
@@ -0,0 +1,375 @@
+/*
+ *  Atheros AR71xx PCI host controller driver
+ *
+ *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  Parts of this file are based on Atheros' 2.6.15 BSP
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#include <linux/resource.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/interrupt.h>
+
+#include <asm/mach-ath79/ar71xx_regs.h>
+#include <asm/mach-ath79/ath79.h>
+#include <asm/mach-ath79/pci.h>
+
+#define AR71XX_PCI_MEM_BASE    0x10000000
+#define AR71XX_PCI_MEM_SIZE    0x08000000
+
+#define AR71XX_PCI_WIN0_OFFS           0x10000000
+#define AR71XX_PCI_WIN1_OFFS           0x11000000
+#define AR71XX_PCI_WIN2_OFFS           0x12000000
+#define AR71XX_PCI_WIN3_OFFS           0x13000000
+#define AR71XX_PCI_WIN4_OFFS           0x14000000
+#define AR71XX_PCI_WIN5_OFFS           0x15000000
+#define AR71XX_PCI_WIN6_OFFS           0x16000000
+#define AR71XX_PCI_WIN7_OFFS           0x07000000
+
+#define AR71XX_PCI_CFG_BASE            \
+       (AR71XX_PCI_MEM_BASE + AR71XX_PCI_WIN7_OFFS + 0x10000)
+#define AR71XX_PCI_CFG_SIZE            0x100
+
+#define AR71XX_PCI_REG_CRP_AD_CBE      0x00
+#define AR71XX_PCI_REG_CRP_WRDATA      0x04
+#define AR71XX_PCI_REG_CRP_RDDATA      0x08
+#define AR71XX_PCI_REG_CFG_AD          0x0c
+#define AR71XX_PCI_REG_CFG_CBE         0x10
+#define AR71XX_PCI_REG_CFG_WRDATA      0x14
+#define AR71XX_PCI_REG_CFG_RDDATA      0x18
+#define AR71XX_PCI_REG_PCI_ERR         0x1c
+#define AR71XX_PCI_REG_PCI_ERR_ADDR    0x20
+#define AR71XX_PCI_REG_AHB_ERR         0x24
+#define AR71XX_PCI_REG_AHB_ERR_ADDR    0x28
+
+#define AR71XX_PCI_CRP_CMD_WRITE       0x00010000
+#define AR71XX_PCI_CRP_CMD_READ                0x00000000
+#define AR71XX_PCI_CFG_CMD_READ                0x0000000a
+#define AR71XX_PCI_CFG_CMD_WRITE       0x0000000b
+
+#define AR71XX_PCI_INT_CORE            BIT(4)
+#define AR71XX_PCI_INT_DEV2            BIT(2)
+#define AR71XX_PCI_INT_DEV1            BIT(1)
+#define AR71XX_PCI_INT_DEV0            BIT(0)
+
+#define AR71XX_PCI_IRQ_COUNT           5
+
+static DEFINE_SPINLOCK(ar71xx_pci_lock);
+static void __iomem *ar71xx_pcicfg_base;
+
+/* Byte lane enable bits */
+static const u8 ar71xx_pci_ble_table[4][4] = {
+       {0x0, 0xf, 0xf, 0xf},
+       {0xe, 0xd, 0xb, 0x7},
+       {0xc, 0xf, 0x3, 0xf},
+       {0xf, 0xf, 0xf, 0xf},
+};
+
+static const u32 ar71xx_pci_read_mask[8] = {
+       0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0
+};
+
+static inline u32 ar71xx_pci_get_ble(int where, int size, int local)
+{
+       u32 t;
+
+       t = ar71xx_pci_ble_table[size & 3][where & 3];
+       BUG_ON(t == 0xf);
+       t <<= (local) ? 20 : 4;
+
+       return t;
+}
+
+static inline u32 ar71xx_pci_bus_addr(struct pci_bus *bus, unsigned int devfn,
+                                     int where)
+{
+       u32 ret;
+
+       if (!bus->number) {
+               /* type 0 */
+               ret = (1 << PCI_SLOT(devfn)) | (PCI_FUNC(devfn) << 8) |
+                     (where & ~3);
+       } else {
+               /* type 1 */
+               ret = (bus->number << 16) | (PCI_SLOT(devfn) << 11) |
+                     (PCI_FUNC(devfn) << 8) | (where & ~3) | 1;
+       }
+
+       return ret;
+}
+
+static int ar71xx_pci_check_error(int quiet)
+{
+       void __iomem *base = ar71xx_pcicfg_base;
+       u32 pci_err;
+       u32 ahb_err;
+
+       pci_err = __raw_readl(base + AR71XX_PCI_REG_PCI_ERR) & 3;
+       if (pci_err) {
+               if (!quiet) {
+                       u32 addr;
+
+                       addr = __raw_readl(base + AR71XX_PCI_REG_PCI_ERR_ADDR);
+                       pr_crit("ar71xx: %s bus error %d at addr 0x%x\n",
+                               "PCI", pci_err, addr);
+               }
+
+               /* clear PCI error status */
+               __raw_writel(pci_err, base + AR71XX_PCI_REG_PCI_ERR);
+       }
+
+       ahb_err = __raw_readl(base + AR71XX_PCI_REG_AHB_ERR) & 1;
+       if (ahb_err) {
+               if (!quiet) {
+                       u32 addr;
+
+                       addr = __raw_readl(base + AR71XX_PCI_REG_AHB_ERR_ADDR);
+                       pr_crit("ar71xx: %s bus error %d at addr 0x%x\n",
+                               "AHB", ahb_err, addr);
+               }
+
+               /* clear AHB error status */
+               __raw_writel(ahb_err, base + AR71XX_PCI_REG_AHB_ERR);
+       }
+
+       return !!(ahb_err | pci_err);
+}
+
+static inline void ar71xx_pci_local_write(int where, int size, u32 value)
+{
+       void __iomem *base = ar71xx_pcicfg_base;
+       u32 ad_cbe;
+
+       value = value << (8 * (where & 3));
+
+       ad_cbe = AR71XX_PCI_CRP_CMD_WRITE | (where & ~3);
+       ad_cbe |= ar71xx_pci_get_ble(where, size, 1);
+
+       __raw_writel(ad_cbe, base + AR71XX_PCI_REG_CRP_AD_CBE);
+       __raw_writel(value, base + AR71XX_PCI_REG_CRP_WRDATA);
+}
+
+static inline int ar71xx_pci_set_cfgaddr(struct pci_bus *bus,
+                                        unsigned int devfn,
+                                        int where, int size, u32 cmd)
+{
+       void __iomem *base = ar71xx_pcicfg_base;
+       u32 addr;
+
+       addr = ar71xx_pci_bus_addr(bus, devfn, where);
+
+       __raw_writel(addr, base + AR71XX_PCI_REG_CFG_AD);
+       __raw_writel(cmd | ar71xx_pci_get_ble(where, size, 0),
+                    base + AR71XX_PCI_REG_CFG_CBE);
+
+       return ar71xx_pci_check_error(1);
+}
+
+static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
+                                 int where, int size, u32 *value)
+{
+       void __iomem *base = ar71xx_pcicfg_base;
+       unsigned long flags;
+       u32 data;
+       int err;
+       int ret;
+
+       ret = PCIBIOS_SUCCESSFUL;
+       data = ~0;
+
+       spin_lock_irqsave(&ar71xx_pci_lock, flags);
+
+       err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size,
+                                    AR71XX_PCI_CFG_CMD_READ);
+       if (err)
+               ret = PCIBIOS_DEVICE_NOT_FOUND;
+       else
+               data = __raw_readl(base + AR71XX_PCI_REG_CFG_RDDATA);
+
+       spin_unlock_irqrestore(&ar71xx_pci_lock, flags);
+
+       *value = (data >> (8 * (where & 3))) & ar71xx_pci_read_mask[size & 7];
+
+       return ret;
+}
+
+static int ar71xx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
+                                  int where, int size, u32 value)
+{
+       void __iomem *base = ar71xx_pcicfg_base;
+       unsigned long flags;
+       int err;
+       int ret;
+
+       value = value << (8 * (where & 3));
+       ret = PCIBIOS_SUCCESSFUL;
+
+       spin_lock_irqsave(&ar71xx_pci_lock, flags);
+
+       err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size,
+                                    AR71XX_PCI_CFG_CMD_WRITE);
+       if (err)
+               ret = PCIBIOS_DEVICE_NOT_FOUND;
+       else
+               __raw_writel(value, base + AR71XX_PCI_REG_CFG_WRDATA);
+
+       spin_unlock_irqrestore(&ar71xx_pci_lock, flags);
+
+       return ret;
+}
+
+static struct pci_ops ar71xx_pci_ops = {
+       .read   = ar71xx_pci_read_config,
+       .write  = ar71xx_pci_write_config,
+};
+
+static struct resource ar71xx_pci_io_resource = {
+       .name           = "PCI IO space",
+       .start          = 0,
+       .end            = 0,
+       .flags          = IORESOURCE_IO,
+};
+
+static struct resource ar71xx_pci_mem_resource = {
+       .name           = "PCI memory space",
+       .start          = AR71XX_PCI_MEM_BASE,
+       .end            = AR71XX_PCI_MEM_BASE + AR71XX_PCI_MEM_SIZE - 1,
+       .flags          = IORESOURCE_MEM
+};
+
+static struct pci_controller ar71xx_pci_controller = {
+       .pci_ops        = &ar71xx_pci_ops,
+       .mem_resource   = &ar71xx_pci_mem_resource,
+       .io_resource    = &ar71xx_pci_io_resource,
+};
+
+static void ar71xx_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+       void __iomem *base = ath79_reset_base;
+       u32 pending;
+
+       pending = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_STATUS) &
+                 __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+
+       if (pending & AR71XX_PCI_INT_DEV0)
+               generic_handle_irq(ATH79_PCI_IRQ(0));
+
+       else if (pending & AR71XX_PCI_INT_DEV1)
+               generic_handle_irq(ATH79_PCI_IRQ(1));
+
+       else if (pending & AR71XX_PCI_INT_DEV2)
+               generic_handle_irq(ATH79_PCI_IRQ(2));
+
+       else if (pending & AR71XX_PCI_INT_CORE)
+               generic_handle_irq(ATH79_PCI_IRQ(4));
+
+       else
+               spurious_interrupt();
+}
+
+static void ar71xx_pci_irq_unmask(struct irq_data *d)
+{
+       unsigned int irq = d->irq - ATH79_PCI_IRQ_BASE;
+       void __iomem *base = ath79_reset_base;
+       u32 t;
+
+       t = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+       __raw_writel(t | (1 << irq), base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+
+       /* flush write */
+       __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+}
+
+static void ar71xx_pci_irq_mask(struct irq_data *d)
+{
+       unsigned int irq = d->irq - ATH79_PCI_IRQ_BASE;
+       void __iomem *base = ath79_reset_base;
+       u32 t;
+
+       t = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+       __raw_writel(t & ~(1 << irq), base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+
+       /* flush write */
+       __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+}
+
+static struct irq_chip ar71xx_pci_irq_chip = {
+       .name           = "AR71XX PCI",
+       .irq_mask       = ar71xx_pci_irq_mask,
+       .irq_unmask     = ar71xx_pci_irq_unmask,
+       .irq_mask_ack   = ar71xx_pci_irq_mask,
+};
+
+static __init void ar71xx_pci_irq_init(void)
+{
+       void __iomem *base = ath79_reset_base;
+       int i;
+
+       __raw_writel(0, base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+       __raw_writel(0, base + AR71XX_RESET_REG_PCI_INT_STATUS);
+
+       BUILD_BUG_ON(ATH79_PCI_IRQ_COUNT < AR71XX_PCI_IRQ_COUNT);
+
+       for (i = ATH79_PCI_IRQ_BASE;
+            i < ATH79_PCI_IRQ_BASE + AR71XX_PCI_IRQ_COUNT; i++)
+               irq_set_chip_and_handler(i, &ar71xx_pci_irq_chip,
+                                        handle_level_irq);
+
+       irq_set_chained_handler(ATH79_CPU_IRQ_IP2, ar71xx_pci_irq_handler);
+}
+
+static __init void ar71xx_pci_reset(void)
+{
+       void __iomem *ddr_base = ath79_ddr_base;
+
+       ath79_device_reset_set(AR71XX_RESET_PCI_BUS | AR71XX_RESET_PCI_CORE);
+       mdelay(100);
+
+       ath79_device_reset_clear(AR71XX_RESET_PCI_BUS | AR71XX_RESET_PCI_CORE);
+       mdelay(100);
+
+       __raw_writel(AR71XX_PCI_WIN0_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN0);
+       __raw_writel(AR71XX_PCI_WIN1_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN1);
+       __raw_writel(AR71XX_PCI_WIN2_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN2);
+       __raw_writel(AR71XX_PCI_WIN3_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN3);
+       __raw_writel(AR71XX_PCI_WIN4_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN4);
+       __raw_writel(AR71XX_PCI_WIN5_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN5);
+       __raw_writel(AR71XX_PCI_WIN6_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN6);
+       __raw_writel(AR71XX_PCI_WIN7_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN7);
+
+       mdelay(100);
+}
+
+__init int ar71xx_pcibios_init(void)
+{
+       u32 t;
+
+       ar71xx_pcicfg_base = ioremap(AR71XX_PCI_CFG_BASE, AR71XX_PCI_CFG_SIZE);
+       if (ar71xx_pcicfg_base == NULL)
+               return -ENOMEM;
+
+       ar71xx_pci_reset();
+
+       /* setup COMMAND register */
+       t = PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE
+         | PCI_COMMAND_PARITY | PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK;
+       ar71xx_pci_local_write(PCI_COMMAND, 4, t);
+
+       /* clear bus errors */
+       ar71xx_pci_check_error(1);
+
+       ar71xx_pci_irq_init();
+
+       register_pci_controller(&ar71xx_pci_controller);
+
+       return 0;
+}
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
new file mode 100644 (file)
index 0000000..414a745
--- /dev/null
@@ -0,0 +1,292 @@
+/*
+ *  Atheros AR724X PCI host controller driver
+ *
+ *  Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
+ *  Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <asm/mach-ath79/ath79.h>
+#include <asm/mach-ath79/ar71xx_regs.h>
+#include <asm/mach-ath79/pci.h>
+
+#define AR724X_PCI_CFG_BASE    0x14000000
+#define AR724X_PCI_CFG_SIZE    0x1000
+#define AR724X_PCI_CTRL_BASE   (AR71XX_APB_BASE + 0x000f0000)
+#define AR724X_PCI_CTRL_SIZE   0x100
+
+#define AR724X_PCI_MEM_BASE    0x10000000
+#define AR724X_PCI_MEM_SIZE    0x08000000
+
+#define AR724X_PCI_REG_INT_STATUS      0x4c
+#define AR724X_PCI_REG_INT_MASK                0x50
+
+#define AR724X_PCI_INT_DEV0            BIT(14)
+
+#define AR724X_PCI_IRQ_COUNT           1
+
+#define AR7240_BAR0_WAR_VALUE  0xffff
+
+static DEFINE_SPINLOCK(ar724x_pci_lock);
+static void __iomem *ar724x_pci_devcfg_base;
+static void __iomem *ar724x_pci_ctrl_base;
+
+static u32 ar724x_pci_bar0_value;
+static bool ar724x_pci_bar0_is_cached;
+
+static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
+                           int size, uint32_t *value)
+{
+       unsigned long flags;
+       void __iomem *base;
+       u32 data;
+
+       if (devfn)
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
+       base = ar724x_pci_devcfg_base;
+
+       spin_lock_irqsave(&ar724x_pci_lock, flags);
+       data = __raw_readl(base + (where & ~3));
+
+       switch (size) {
+       case 1:
+               if (where & 1)
+                       data >>= 8;
+               if (where & 2)
+                       data >>= 16;
+               data &= 0xff;
+               break;
+       case 2:
+               if (where & 2)
+                       data >>= 16;
+               data &= 0xffff;
+               break;
+       case 4:
+               break;
+       default:
+               spin_unlock_irqrestore(&ar724x_pci_lock, flags);
+
+               return PCIBIOS_BAD_REGISTER_NUMBER;
+       }
+
+       spin_unlock_irqrestore(&ar724x_pci_lock, flags);
+
+       if (where == PCI_BASE_ADDRESS_0 && size == 4 &&
+           ar724x_pci_bar0_is_cached) {
+               /* use the cached value */
+               *value = ar724x_pci_bar0_value;
+       } else {
+               *value = data;
+       }
+
+       return PCIBIOS_SUCCESSFUL;
+}
+
+static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
+                            int size, uint32_t value)
+{
+       unsigned long flags;
+       void __iomem *base;
+       u32 data;
+       int s;
+
+       if (devfn)
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
+       if (soc_is_ar7240() && where == PCI_BASE_ADDRESS_0 && size == 4) {
+               if (value != 0xffffffff) {
+                       /*
+                        * WAR for a hw issue. If the BAR0 register of the
+                        * device is set to the proper base address, the
+                        * memory space of the device is not accessible.
+                        *
+                        * Cache the intended value so it can be read back,
+                        * and write a SoC specific constant value to the
+                        * BAR0 register in order to make the device memory
+                        * accessible.
+                        */
+                       ar724x_pci_bar0_is_cached = true;
+                       ar724x_pci_bar0_value = value;
+
+                       value = AR7240_BAR0_WAR_VALUE;
+               } else {
+                       ar724x_pci_bar0_is_cached = false;
+               }
+       }
+
+       base = ar724x_pci_devcfg_base;
+
+       spin_lock_irqsave(&ar724x_pci_lock, flags);
+       data = __raw_readl(base + (where & ~3));
+
+       switch (size) {
+       case 1:
+               s = ((where & 3) * 8);
+               data &= ~(0xff << s);
+               data |= ((value & 0xff) << s);
+               break;
+       case 2:
+               s = ((where & 2) * 8);
+               data &= ~(0xffff << s);
+               data |= ((value & 0xffff) << s);
+               break;
+       case 4:
+               data = value;
+               break;
+       default:
+               spin_unlock_irqrestore(&ar724x_pci_lock, flags);
+
+               return PCIBIOS_BAD_REGISTER_NUMBER;
+       }
+
+       __raw_writel(data, base + (where & ~3));
+       /* flush write */
+       __raw_readl(base + (where & ~3));
+       spin_unlock_irqrestore(&ar724x_pci_lock, flags);
+
+       return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops ar724x_pci_ops = {
+       .read   = ar724x_pci_read,
+       .write  = ar724x_pci_write,
+};
+
+static struct resource ar724x_io_resource = {
+       .name   = "PCI IO space",
+       .start  = 0,
+       .end    = 0,
+       .flags  = IORESOURCE_IO,
+};
+
+static struct resource ar724x_mem_resource = {
+       .name   = "PCI memory space",
+       .start  = AR724X_PCI_MEM_BASE,
+       .end    = AR724X_PCI_MEM_BASE + AR724X_PCI_MEM_SIZE - 1,
+       .flags  = IORESOURCE_MEM,
+};
+
+static struct pci_controller ar724x_pci_controller = {
+       .pci_ops        = &ar724x_pci_ops,
+       .io_resource    = &ar724x_io_resource,
+       .mem_resource   = &ar724x_mem_resource,
+};
+
+static void ar724x_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+       void __iomem *base;
+       u32 pending;
+
+       base = ar724x_pci_ctrl_base;
+
+       pending = __raw_readl(base + AR724X_PCI_REG_INT_STATUS) &
+                 __raw_readl(base + AR724X_PCI_REG_INT_MASK);
+
+       if (pending & AR724X_PCI_INT_DEV0)
+               generic_handle_irq(ATH79_PCI_IRQ(0));
+
+       else
+               spurious_interrupt();
+}
+
+static void ar724x_pci_irq_unmask(struct irq_data *d)
+{
+       void __iomem *base;
+       u32 t;
+
+       base = ar724x_pci_ctrl_base;
+
+       switch (d->irq) {
+       case ATH79_PCI_IRQ(0):
+               t = __raw_readl(base + AR724X_PCI_REG_INT_MASK);
+               __raw_writel(t | AR724X_PCI_INT_DEV0,
+                            base + AR724X_PCI_REG_INT_MASK);
+               /* flush write */
+               __raw_readl(base + AR724X_PCI_REG_INT_MASK);
+       }
+}
+
+static void ar724x_pci_irq_mask(struct irq_data *d)
+{
+       void __iomem *base;
+       u32 t;
+
+       base = ar724x_pci_ctrl_base;
+
+       switch (d->irq) {
+       case ATH79_PCI_IRQ(0):
+               t = __raw_readl(base + AR724X_PCI_REG_INT_MASK);
+               __raw_writel(t & ~AR724X_PCI_INT_DEV0,
+                            base + AR724X_PCI_REG_INT_MASK);
+
+               /* flush write */
+               __raw_readl(base + AR724X_PCI_REG_INT_MASK);
+
+               t = __raw_readl(base + AR724X_PCI_REG_INT_STATUS);
+               __raw_writel(t | AR724X_PCI_INT_DEV0,
+                            base + AR724X_PCI_REG_INT_STATUS);
+
+               /* flush write */
+               __raw_readl(base + AR724X_PCI_REG_INT_STATUS);
+       }
+}
+
+static struct irq_chip ar724x_pci_irq_chip = {
+       .name           = "AR724X PCI ",
+       .irq_mask       = ar724x_pci_irq_mask,
+       .irq_unmask     = ar724x_pci_irq_unmask,
+       .irq_mask_ack   = ar724x_pci_irq_mask,
+};
+
+static void __init ar724x_pci_irq_init(int irq)
+{
+       void __iomem *base;
+       int i;
+
+       base = ar724x_pci_ctrl_base;
+
+       __raw_writel(0, base + AR724X_PCI_REG_INT_MASK);
+       __raw_writel(0, base + AR724X_PCI_REG_INT_STATUS);
+
+       BUILD_BUG_ON(ATH79_PCI_IRQ_COUNT < AR724X_PCI_IRQ_COUNT);
+
+       for (i = ATH79_PCI_IRQ_BASE;
+            i < ATH79_PCI_IRQ_BASE + AR724X_PCI_IRQ_COUNT; i++)
+               irq_set_chip_and_handler(i, &ar724x_pci_irq_chip,
+                                        handle_level_irq);
+
+       irq_set_chained_handler(irq, ar724x_pci_irq_handler);
+}
+
+int __init ar724x_pcibios_init(int irq)
+{
+       int ret;
+
+       ret = -ENOMEM;
+
+       ar724x_pci_devcfg_base = ioremap(AR724X_PCI_CFG_BASE,
+                                        AR724X_PCI_CFG_SIZE);
+       if (ar724x_pci_devcfg_base == NULL)
+               goto err;
+
+       ar724x_pci_ctrl_base = ioremap(AR724X_PCI_CTRL_BASE,
+                                      AR724X_PCI_CTRL_SIZE);
+       if (ar724x_pci_ctrl_base == NULL)
+               goto err_unmap_devcfg;
+
+       ar724x_pci_irq_init(irq);
+       register_pci_controller(&ar724x_pci_controller);
+
+       return PCIBIOS_SUCCESSFUL;
+
+err_unmap_devcfg:
+       iounmap(ar724x_pci_devcfg_base);
+err:
+       return ret;
+}
diff --git a/arch/mips/pci/pci-ath724x.c b/arch/mips/pci/pci-ath724x.c
deleted file mode 100644 (file)
index a4dd24a..0000000
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- *  Atheros 724x PCI support
- *
- *  Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- */
-
-#include <linux/pci.h>
-#include <asm/mach-ath79/pci-ath724x.h>
-
-#define reg_read(_phys)                (*(unsigned int *) KSEG1ADDR(_phys))
-#define reg_write(_phys, _val) ((*(unsigned int *) KSEG1ADDR(_phys)) = (_val))
-
-#define ATH724X_PCI_DEV_BASE   0x14000000
-#define ATH724X_PCI_MEM_BASE   0x10000000
-#define ATH724X_PCI_MEM_SIZE   0x08000000
-
-static DEFINE_SPINLOCK(ath724x_pci_lock);
-static struct ath724x_pci_data *pci_data;
-static int pci_data_size;
-
-static int ath724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
-                           int size, uint32_t *value)
-{
-       unsigned long flags, addr, tval, mask;
-
-       if (devfn)
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       if (where & (size - 1))
-               return PCIBIOS_BAD_REGISTER_NUMBER;
-
-       spin_lock_irqsave(&ath724x_pci_lock, flags);
-
-       switch (size) {
-       case 1:
-               addr = where & ~3;
-               mask = 0xff000000 >> ((where % 4) * 8);
-               tval = reg_read(ATH724X_PCI_DEV_BASE + addr);
-               tval = tval & ~mask;
-               *value = (tval >> ((4 - (where % 4))*8));
-               break;
-       case 2:
-               addr = where & ~3;
-               mask = 0xffff0000 >> ((where % 4)*8);
-               tval = reg_read(ATH724X_PCI_DEV_BASE + addr);
-               tval = tval & ~mask;
-               *value = (tval >> ((4 - (where % 4))*8));
-               break;
-       case 4:
-               *value = reg_read(ATH724X_PCI_DEV_BASE + where);
-               break;
-       default:
-               spin_unlock_irqrestore(&ath724x_pci_lock, flags);
-
-               return PCIBIOS_BAD_REGISTER_NUMBER;
-       }
-
-       spin_unlock_irqrestore(&ath724x_pci_lock, flags);
-
-       return PCIBIOS_SUCCESSFUL;
-}
-
-static int ath724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
-                            int size, uint32_t value)
-{
-       unsigned long flags, tval, addr, mask;
-
-       if (devfn)
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       if (where & (size - 1))
-               return PCIBIOS_BAD_REGISTER_NUMBER;
-
-       spin_lock_irqsave(&ath724x_pci_lock, flags);
-
-       switch (size) {
-       case 1:
-               addr = (ATH724X_PCI_DEV_BASE + where) & ~3;
-               mask = 0xff000000 >> ((where % 4)*8);
-               tval = reg_read(addr);
-               tval = tval & ~mask;
-               tval |= (value << ((4 - (where % 4))*8)) & mask;
-               reg_write(addr, tval);
-               break;
-       case 2:
-               addr = (ATH724X_PCI_DEV_BASE + where) & ~3;
-               mask = 0xffff0000 >> ((where % 4)*8);
-               tval = reg_read(addr);
-               tval = tval & ~mask;
-               tval |= (value << ((4 - (where % 4))*8)) & mask;
-               reg_write(addr, tval);
-               break;
-       case 4:
-               reg_write((ATH724X_PCI_DEV_BASE + where), value);
-               break;
-       default:
-               spin_unlock_irqrestore(&ath724x_pci_lock, flags);
-
-               return PCIBIOS_BAD_REGISTER_NUMBER;
-       }
-
-       spin_unlock_irqrestore(&ath724x_pci_lock, flags);
-
-       return PCIBIOS_SUCCESSFUL;
-}
-
-static struct pci_ops ath724x_pci_ops = {
-       .read   = ath724x_pci_read,
-       .write  = ath724x_pci_write,
-};
-
-static struct resource ath724x_io_resource = {
-       .name   = "PCI IO space",
-       .start  = 0,
-       .end    = 0,
-       .flags  = IORESOURCE_IO,
-};
-
-static struct resource ath724x_mem_resource = {
-       .name   = "PCI memory space",
-       .start  = ATH724X_PCI_MEM_BASE,
-       .end    = ATH724X_PCI_MEM_BASE + ATH724X_PCI_MEM_SIZE - 1,
-       .flags  = IORESOURCE_MEM,
-};
-
-static struct pci_controller ath724x_pci_controller = {
-       .pci_ops        = &ath724x_pci_ops,
-       .io_resource    = &ath724x_io_resource,
-       .mem_resource   = &ath724x_mem_resource,
-};
-
-void ath724x_pci_add_data(struct ath724x_pci_data *data, int size)
-{
-       pci_data        = data;
-       pci_data_size   = size;
-}
-
-int __init pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin)
-{
-       unsigned int devfn = dev->devfn;
-       int irq = -1;
-
-       if (devfn > pci_data_size - 1)
-               return irq;
-
-       irq = pci_data[devfn].irq;
-
-       return irq;
-}
-
-int pcibios_plat_dev_init(struct pci_dev *dev)
-{
-       unsigned int devfn = dev->devfn;
-
-       if (devfn > pci_data_size - 1)
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       dev->dev.platform_data = pci_data[devfn].pdata;
-
-       return PCIBIOS_SUCCESSFUL;
-}
-
-static int __init ath724x_pcibios_init(void)
-{
-       register_pci_controller(&ath724x_pci_controller);
-
-       return PCIBIOS_SUCCESSFUL;
-}
-
-arch_initcall(ath724x_pcibios_init);
index 030c77e7926e5395111913da53ef884e5ac64899..ea453532a33c6dfc0eeb49659b2dc9036494713a 100644 (file)
 #include <linux/delay.h>
 #include <linux/mm.h>
 #include <linux/vmalloc.h>
-#include <linux/export.h>
-#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
 
 #include <asm/pci.h>
 #include <asm/gpio.h>
 
 #include <lantiq_soc.h>
 #include <lantiq_irq.h>
-#include <lantiq_platform.h>
 
 #include "pci-lantiq.h"
 
-#define LTQ_PCI_CFG_BASE               0x17000000
-#define LTQ_PCI_CFG_SIZE               0x00008000
-#define LTQ_PCI_MEM_BASE               0x18000000
-#define LTQ_PCI_MEM_SIZE               0x02000000
-#define LTQ_PCI_IO_BASE                        0x1AE00000
-#define LTQ_PCI_IO_SIZE                        0x00200000
-
 #define PCI_CR_FCI_ADDR_MAP0           0x00C0
 #define PCI_CR_FCI_ADDR_MAP1           0x00C4
 #define PCI_CR_FCI_ADDR_MAP2           0x00C8
 #define ltq_pci_cfg_w32(x, y)  ltq_w32((x), ltq_pci_mapped_cfg + (y))
 #define ltq_pci_cfg_r32(x)     ltq_r32(ltq_pci_mapped_cfg + (x))
 
-struct ltq_pci_gpio_map {
-       int pin;
-       int alt0;
-       int alt1;
-       int dir;
-       char *name;
-};
-
-/* the pci core can make use of the following gpios */
-static struct ltq_pci_gpio_map ltq_pci_gpio_map[] = {
-       { 0, 1, 0, 0, "pci-exin0" },
-       { 1, 1, 0, 0, "pci-exin1" },
-       { 2, 1, 0, 0, "pci-exin2" },
-       { 39, 1, 0, 0, "pci-exin3" },
-       { 10, 1, 0, 0, "pci-exin4" },
-       { 9, 1, 0, 0, "pci-exin5" },
-       { 30, 1, 0, 1, "pci-gnt1" },
-       { 23, 1, 0, 1, "pci-gnt2" },
-       { 19, 1, 0, 1, "pci-gnt3" },
-       { 38, 1, 0, 1, "pci-gnt4" },
-       { 29, 1, 0, 0, "pci-req1" },
-       { 31, 1, 0, 0, "pci-req2" },
-       { 3, 1, 0, 0, "pci-req3" },
-       { 37, 1, 0, 0, "pci-req4" },
-};
-
 __iomem void *ltq_pci_mapped_cfg;
 static __iomem void *ltq_pci_membase;
 
-int (*ltqpci_plat_dev_init)(struct pci_dev *dev) = NULL;
-
-/* Since the PCI REQ pins can be reused for other functionality, make it
-   possible to exclude those from interpretation by the PCI controller */
-static int ltq_pci_req_mask = 0xf;
-
-static int *ltq_pci_irq_map;
-
-struct pci_ops ltq_pci_ops = {
+static int reset_gpio;
+static struct clk *clk_pci, *clk_external;
+static struct resource pci_io_resource;
+static struct resource pci_mem_resource;
+static struct pci_ops pci_ops = {
        .read   = ltq_pci_read_config_dword,
        .write  = ltq_pci_write_config_dword
 };
 
-static struct resource pci_io_resource = {
-       .name   = "pci io space",
-       .start  = LTQ_PCI_IO_BASE,
-       .end    = LTQ_PCI_IO_BASE + LTQ_PCI_IO_SIZE - 1,
-       .flags  = IORESOURCE_IO
-};
-
-static struct resource pci_mem_resource = {
-       .name   = "pci memory space",
-       .start  = LTQ_PCI_MEM_BASE,
-       .end    = LTQ_PCI_MEM_BASE + LTQ_PCI_MEM_SIZE - 1,
-       .flags  = IORESOURCE_MEM
-};
-
-static struct pci_controller ltq_pci_controller = {
-       .pci_ops        = &ltq_pci_ops,
+static struct pci_controller pci_controller = {
+       .pci_ops        = &pci_ops,
        .mem_resource   = &pci_mem_resource,
        .mem_offset     = 0x00000000UL,
        .io_resource    = &pci_io_resource,
        .io_offset      = 0x00000000UL,
 };
 
-int pcibios_plat_dev_init(struct pci_dev *dev)
-{
-       if (ltqpci_plat_dev_init)
-               return ltqpci_plat_dev_init(dev);
-
-       return 0;
-}
-
-static u32 ltq_calc_bar11mask(void)
+static inline u32 ltq_calc_bar11mask(void)
 {
        u32 mem, bar11mask;
 
@@ -151,48 +95,42 @@ static u32 ltq_calc_bar11mask(void)
        return bar11mask;
 }
 
-static void ltq_pci_setup_gpio(int gpio)
-{
-       int i;
-       for (i = 0; i < ARRAY_SIZE(ltq_pci_gpio_map); i++) {
-               if (gpio & (1 << i)) {
-                       ltq_gpio_request(ltq_pci_gpio_map[i].pin,
-                               ltq_pci_gpio_map[i].alt0,
-                               ltq_pci_gpio_map[i].alt1,
-                               ltq_pci_gpio_map[i].dir,
-                               ltq_pci_gpio_map[i].name);
-               }
-       }
-       ltq_gpio_request(21, 0, 0, 1, "pci-reset");
-       ltq_pci_req_mask = (gpio >> PCI_REQ_SHIFT) & PCI_REQ_MASK;
-}
-
-static int __devinit ltq_pci_startup(struct ltq_pci_data *conf)
+static int __devinit ltq_pci_startup(struct platform_device *pdev)
 {
+       struct device_node *node = pdev->dev.of_node;
+       const __be32 *req_mask, *bus_clk;
        u32 temp_buffer;
 
-       /* set clock to 33Mhz */
-       if (ltq_is_ar9()) {
-               ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0x1f00000, LTQ_CGU_IFCCR);
-               ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0xe00000, LTQ_CGU_IFCCR);
-       } else {
-               ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0xf00000, LTQ_CGU_IFCCR);
-               ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0x800000, LTQ_CGU_IFCCR);
+       /* get our clocks */
+       clk_pci = clk_get(&pdev->dev, NULL);
+       if (IS_ERR(clk_pci)) {
+               dev_err(&pdev->dev, "failed to get pci clock\n");
+               return PTR_ERR(clk_pci);
        }
 
-       /* external or internal clock ? */
-       if (conf->clock) {
-               ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~(1 << 16),
-                       LTQ_CGU_IFCCR);
-               ltq_cgu_w32((1 << 30), LTQ_CGU_PCICR);
-       } else {
-               ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | (1 << 16),
-                       LTQ_CGU_IFCCR);
-               ltq_cgu_w32((1 << 31) | (1 << 30), LTQ_CGU_PCICR);
+       clk_external = clk_get(&pdev->dev, "external");
+       if (IS_ERR(clk_external)) {
+               clk_put(clk_pci);
+               dev_err(&pdev->dev, "failed to get external pci clock\n");
+               return PTR_ERR(clk_external);
        }
 
-       /* setup pci clock and gpis used by pci */
-       ltq_pci_setup_gpio(conf->gpio);
+       /* read the bus speed that we want */
+       bus_clk = of_get_property(node, "lantiq,bus-clock", NULL);
+       if (bus_clk)
+               clk_set_rate(clk_pci, *bus_clk);
+
+       /* and enable the clocks */
+       clk_enable(clk_pci);
+       if (of_find_property(node, "lantiq,external-clock", NULL))
+               clk_enable(clk_external);
+       else
+               clk_disable(clk_external);
+
+       /* setup reset gpio used by pci */
+       reset_gpio = of_get_named_gpio(node, "gpio-reset", 0);
+       if (reset_gpio > 0)
+               devm_gpio_request(&pdev->dev, reset_gpio, "pci-reset");
 
        /* enable auto-switching between PCI and EBU */
        ltq_pci_w32(0xa, PCI_CR_CLK_CTRL);
@@ -205,7 +143,12 @@ static int __devinit ltq_pci_startup(struct ltq_pci_data *conf)
 
        /* enable external 2 PCI masters */
        temp_buffer = ltq_pci_r32(PCI_CR_PC_ARB);
-       temp_buffer &= (~(ltq_pci_req_mask << 16));
+       /* setup the request mask */
+       req_mask = of_get_property(node, "req-mask", NULL);
+       if (req_mask)
+               temp_buffer &= ~((*req_mask & 0xf) << 16);
+       else
+               temp_buffer &= ~0xf0000;
        /* enable internal arbiter */
        temp_buffer |= (1 << INTERNAL_ARB_ENABLE_BIT);
        /* enable internal PCI master reqest */
@@ -249,47 +192,55 @@ static int __devinit ltq_pci_startup(struct ltq_pci_data *conf)
        ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_IEN) | 0x10, LTQ_EBU_PCC_IEN);
 
        /* toggle reset pin */
-       __gpio_set_value(21, 0);
-       wmb();
-       mdelay(1);
-       __gpio_set_value(21, 1);
-       return 0;
-}
-
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
-       if (ltq_pci_irq_map[slot])
-               return ltq_pci_irq_map[slot];
-       printk(KERN_ERR "lq_pci: trying to map irq for unknown slot %d\n",
-               slot);
-
+       if (reset_gpio > 0) {
+               __gpio_set_value(reset_gpio, 0);
+               wmb();
+               mdelay(1);
+               __gpio_set_value(reset_gpio, 1);
+       }
        return 0;
 }
 
 static int __devinit ltq_pci_probe(struct platform_device *pdev)
 {
-       struct ltq_pci_data *ltq_pci_data =
-               (struct ltq_pci_data *) pdev->dev.platform_data;
+       struct resource *res_cfg, *res_bridge;
 
        pci_clear_flags(PCI_PROBE_ONLY);
-       ltq_pci_irq_map = ltq_pci_data->irq;
-       ltq_pci_membase = ioremap_nocache(PCI_CR_BASE_ADDR, PCI_CR_SIZE);
-       ltq_pci_mapped_cfg =
-               ioremap_nocache(LTQ_PCI_CFG_BASE, LTQ_PCI_CFG_BASE);
-       ltq_pci_controller.io_map_base =
-               (unsigned long)ioremap(LTQ_PCI_IO_BASE, LTQ_PCI_IO_SIZE - 1);
-       ltq_pci_startup(ltq_pci_data);
-       register_pci_controller(&ltq_pci_controller);
 
+       res_cfg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       res_bridge = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       if (!res_cfg || !res_bridge) {
+               dev_err(&pdev->dev, "missing memory reources\n");
+               return -EINVAL;
+       }
+
+       ltq_pci_membase = devm_request_and_ioremap(&pdev->dev, res_bridge);
+       ltq_pci_mapped_cfg = devm_request_and_ioremap(&pdev->dev, res_cfg);
+
+       if (!ltq_pci_membase || !ltq_pci_mapped_cfg) {
+               dev_err(&pdev->dev, "failed to remap resources\n");
+               return -ENOMEM;
+       }
+
+       ltq_pci_startup(pdev);
+
+       pci_load_of_ranges(&pci_controller, pdev->dev.of_node);
+       register_pci_controller(&pci_controller);
        return 0;
 }
 
-static struct platform_driver
-ltq_pci_driver = {
+static const struct of_device_id ltq_pci_match[] = {
+       { .compatible = "lantiq,pci-xway" },
+       {},
+};
+MODULE_DEVICE_TABLE(of, ltq_pci_match);
+
+static struct platform_driver ltq_pci_driver = {
        .probe = ltq_pci_probe,
        .driver = {
-               .name = "ltq_pci",
+               .name = "pci-xway",
                .owner = THIS_MODULE,
+               .of_match_table = ltq_pci_match,
        },
 };
 
@@ -297,7 +248,7 @@ int __init pcibios_init(void)
 {
        int ret = platform_driver_register(&ltq_pci_driver);
        if (ret)
-               printk(KERN_INFO "ltq_pci: Error registering platfom driver!");
+               pr_info("pci-xway: Error registering platform driver!");
        return ret;
 }
 
index 0514866fa9255f13f8cc2578c840b22ad7626482..271e8c4a54c7f2304101020d8d8bb0ab50043437 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/pci.h>
+#include <linux/of_address.h>
 
 #include <asm/cpu-info.h>
 
@@ -114,9 +115,63 @@ static void __devinit pcibios_scanbus(struct pci_controller *hose)
                        pci_bus_assign_resources(bus);
                        pci_enable_bridges(bus);
                }
+               bus->dev.of_node = hose->of_node;
        }
 }
 
+#ifdef CONFIG_OF
+void __devinit pci_load_of_ranges(struct pci_controller *hose,
+                               struct device_node *node)
+{
+       const __be32 *ranges;
+       int rlen;
+       int pna = of_n_addr_cells(node);
+       int np = pna + 5;
+
+       pr_info("PCI host bridge %s ranges:\n", node->full_name);
+       ranges = of_get_property(node, "ranges", &rlen);
+       if (ranges == NULL)
+               return;
+       hose->of_node = node;
+
+       while ((rlen -= np * 4) >= 0) {
+               u32 pci_space;
+               struct resource *res = NULL;
+               u64 addr, size;
+
+               pci_space = be32_to_cpup(&ranges[0]);
+               addr = of_translate_address(node, ranges + 3);
+               size = of_read_number(ranges + pna + 3, 2);
+               ranges += np;
+               switch ((pci_space >> 24) & 0x3) {
+               case 1:         /* PCI IO space */
+                       pr_info("  IO 0x%016llx..0x%016llx\n",
+                                       addr, addr + size - 1);
+                       hose->io_map_base =
+                               (unsigned long)ioremap(addr, size);
+                       res = hose->io_resource;
+                       res->flags = IORESOURCE_IO;
+                       break;
+               case 2:         /* PCI Memory space */
+               case 3:         /* PCI 64 bits Memory space */
+                       pr_info(" MEM 0x%016llx..0x%016llx\n",
+                                       addr, addr + size - 1);
+                       res = hose->mem_resource;
+                       res->flags = IORESOURCE_MEM;
+                       break;
+               }
+               if (res != NULL) {
+                       res->start = addr;
+                       res->name = node->full_name;
+                       res->end = res->start + size - 1;
+                       res->parent = NULL;
+                       res->sibling = NULL;
+                       res->child = NULL;
+               }
+       }
+}
+#endif
+
 static DEFINE_MUTEX(pci_scan_mutex);
 
 void __devinit register_pci_controller(struct pci_controller *hose)
index 02f5fb94ea2808a5bf580679421598e0659961d2..5af95ec3319d70e6a8e5d7f28d1ad516c64f08bf 100644 (file)
@@ -5,5 +5,3 @@
 obj-y    += irq.o prom.o py-console.o setup.o
 
 obj-$(CONFIG_SMP)              += smp.o
-
-ccflags-y := -Werror
index 3498ac9c35af026a4f8205ccef17223b9024cdc9..b6472fc88a991cb3a53aa3d8cae9e2077ace4f29 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/bcd.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/export.h>
 #include <linux/types.h>
 #include <linux/mm.h>
 #include <linux/bootmem.h>
index 87167dcc79fa24bf694a9e2d7bc69890a4eab2ef..05a1d922cd60871ef226cc792a448c0d2f295793 100644 (file)
@@ -244,11 +244,6 @@ static struct platform_device pnx833x_sata_device = {
        .resource      = pnx833x_sata_resources,
 };
 
-static const char *part_probes[] = {
-       "cmdlinepart",
-       NULL
-};
-
 static void
 pnx833x_flash_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
 {
@@ -268,7 +263,6 @@ static struct platform_nand_data pnx833x_flash_nand_data = {
        .chip = {
                .nr_chips               = 1,
                .chip_delay             = 25,
-               .part_probe_types       = part_probes,
        },
        .ctrl = {
                .cmd_ctrl               = pnx833x_flash_nand_cmd_ctrl
index 348d2e850ef5128d42a34fe761fe357fdb5fc5ff..39ca9f8d63ae43c6ffe2bdd0e3e61abefaeb9000 100644 (file)
@@ -27,5 +27,3 @@ obj-y += init.o ioremap.o memory.o powertv_setup.o reset.o time.o \
        asic/ pci/
 
 obj-$(CONFIG_USB) += powertv-usb.o
-
-ccflags-y := -Wall
index d810a33182a4a6b76a250e86532930b2f4d23f98..35dcc53eb25f3960eed05f3a936c6dd7d9cc1c1d 100644 (file)
@@ -19,5 +19,3 @@
 obj-y += asic-calliope.o asic-cronus.o asic-gaia.o asic-zeus.o \
        asic_devices.o asic_int.o irq_asic.o prealloc-calliope.o \
        prealloc-cronus.o prealloc-cronuslite.o prealloc-gaia.o prealloc-zeus.o
-
-ccflags-y := -Wall -Werror
index 5783201cd2c81196659939f6ba495069118e6281..2610a6af5b2c6cf980ad327b3f9d9ee67a2d6953 100644 (file)
@@ -17,5 +17,3 @@
 #
 
 obj-$(CONFIG_PCI)      += fixup-powertv.o
-
-ccflags-y := -Wall -Werror
index a969eb8266340326c17509cfbba6522498f0d6cd..716e9a12f0e77a0c85f492d3cf760ce41059690a 100644 (file)
@@ -15,6 +15,7 @@
  *  GNU General Public License for more details.
  */
 #include <linux/kernel.h>
+#include <linux/export.h>
 #include <linux/init.h>
 #include <linux/ctype.h>
 #include <linux/string.h>
@@ -292,7 +293,6 @@ static void __init rb532_nand_setup(void)
        rb532_nand_data.chip.nr_partitions = ARRAY_SIZE(rb532_partition_info);
        rb532_nand_data.chip.partitions = rb532_partition_info;
        rb532_nand_data.chip.chip_delay = NAND_CHIP_DELAY;
-       rb532_nand_data.chip.options = NAND_NO_AUTOINCR;
 }
 
 
index d16b462154c33834f996bc0e89726ae97a83b089..413f17f8e89289035ce836148c64cc56c31443a5 100644 (file)
@@ -10,6 +10,7 @@
  */
 #include <linux/eisa.h>
 #include <linux/init.h>
+#include <linux/export.h>
 #include <linux/console.h>
 #include <linux/fb.h>
 #include <linux/screen_info.h>
diff --git a/arch/mn10300/include/asm/kvm_para.h b/arch/mn10300/include/asm/kvm_para.h
new file mode 100644 (file)
index 0000000..14fab8f
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
index ab506181ec3108ad98c2db77d73f01e2e2b9b134..d31eeea480cfdda8a4231351abbf61c76da08a5f 100644 (file)
@@ -20,9 +20,6 @@
 typedef unsigned short __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
index 890cf91767cc8486b4fd004cf8ca6e44c9b94a28..6ab0bee2a54fd38efeccbaa683b4437f26406a34 100644 (file)
@@ -31,8 +31,6 @@
 
 #define DEBUG_SIG 0
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /*
  * atomically swap in the new signal mask, and wait for a signal.
  */
@@ -163,7 +161,6 @@ asmlinkage long sys_sigreturn(void)
                             sizeof(frame->extramask)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(current_frame(), &frame->sc, &d0))
@@ -191,7 +188,6 @@ asmlinkage long sys_rt_sigreturn(void)
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(current_frame(), &frame->uc.uc_mcontext, &d0))
@@ -430,8 +426,9 @@ static inline void stepback(struct pt_regs *regs)
  */
 static int handle_signal(int sig,
                         siginfo_t *info, struct k_sigaction *ka,
-                        sigset_t *oldset, struct pt_regs *regs)
+                        struct pt_regs *regs)
 {
+       sigset_t *oldset = sigmask_to_save();
        int ret;
 
        /* Are we from a system call? */
@@ -461,11 +458,11 @@ static int handle_signal(int sig,
                ret = setup_rt_frame(sig, ka, info, oldset, regs);
        else
                ret = setup_frame(sig, ka, oldset, regs);
+       if (ret)
+               return;
 
-       if (ret == 0)
-               block_sigmask(ka, sig);
-
-       return ret;
+       signal_delivered(sig, info, ka, regs,
+                                test_thread_flag(TIF_SINGLESTEP));
 }
 
 /*
@@ -475,7 +472,6 @@ static void do_signal(struct pt_regs *regs)
 {
        struct k_sigaction ka;
        siginfo_t info;
-       sigset_t *oldset;
        int signr;
 
        /* we want the common case to go fast, which is why we may in certain
@@ -483,23 +479,9 @@ static void do_signal(struct pt_regs *regs)
        if (!user_mode(regs))
                return;
 
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
-               if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
-                       /* a signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag */
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
-
-                       tracehook_signal_handler(signr, &info, &ka, regs,
-                                                test_thread_flag(TIF_SINGLESTEP));
+               if (handle_signal(signr, &info, &ka, regs) == 0) {
                }
 
                return;
@@ -525,10 +507,7 @@ static void do_signal(struct pt_regs *regs)
 
        /* if there's no signal to deliver, we just put the saved sigmask
         * back */
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 }
 
 /*
@@ -548,13 +527,11 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
        }
 
        /* deal with pending signal delivery */
-       if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
+       if (thread_info_flags & _TIF_SIGPENDING)
                do_signal(regs);
 
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(current_frame());
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
index 4932247d078ac3cfde1da41bfa8e759a9b6b8a8e..49765b53f6374709943a1c1a5d17b46cad1846fb 100644 (file)
@@ -19,6 +19,8 @@ config OPENRISC
        select GENERIC_CPU_DEVICES
        select GENERIC_ATOMIC64
        select GENERIC_CLOCKEVENTS
+       select GENERIC_STRNCPY_FROM_USER
+       select GENERIC_STRNLEN_USER
 
 config MMU
        def_bool y
index c936483bc8e2a3af3eae98c0e2c505a1d5009d43..3f35c38d7b6498505c1563eef1a8e39adc41ba1f 100644 (file)
@@ -66,3 +66,4 @@ generic-y += topology.h
 generic-y += types.h
 generic-y += ucontext.h
 generic-y += user.h
+generic-y += word-at-a-time.h
diff --git a/arch/openrisc/include/asm/kvm_para.h b/arch/openrisc/include/asm/kvm_para.h
new file mode 100644 (file)
index 0000000..14fab8f
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
index f5abaa0ffc38c407799b17e97595204277ff4fa4..ab2e7a198a4cfedfc0bb99249ec597d26333db5b 100644 (file)
@@ -313,42 +313,12 @@ clear_user(void *addr, unsigned long size)
        return size;
 }
 
-extern int __strncpy_from_user(char *dst, const char *src, long count);
+#define user_addr_max() \
+       (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
 
-static inline long strncpy_from_user(char *dst, const char *src, long count)
-{
-       if (access_ok(VERIFY_READ, src, 1))
-               return __strncpy_from_user(dst, src, count);
-       return -EFAULT;
-}
-
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 for error
- */
-
-extern int __strnlen_user(const char *str, long len, unsigned long top);
-
-/*
- * Returns the length of the string at str (including the null byte),
- * or 0 if we hit a page we can't access,
- * or something > len if we didn't find a null byte.
- *
- * The `top' parameter to __strnlen_user is to make sure that
- * we can never overflow from the user area into kernel space.
- */
-static inline long strnlen_user(const char __user *str, long len)
-{
-       unsigned long top = (unsigned long)get_fs();
-       unsigned long res = 0;
-
-       if (__addr_ok(str))
-               res = __strnlen_user(str, len, top);
-
-       return res;
-}
+extern long strncpy_from_user(char *dest, const char __user *src, long count);
 
-#define strlen_user(str) strnlen_user(str, TASK_SIZE-1)
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
 
 #endif /* __ASM_OPENRISC_UACCESS_H */
index e970743251ae3c922fb4ff4f9722f4dc596f1f6f..30110297f4f9509d6437c8f5c3e80220add6e2a9 100644 (file)
@@ -33,8 +33,6 @@
 
 #define DEBUG_SIG 0
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 asmlinkage long
 _sys_sigaltstack(const stack_t *uss, stack_t *uoss, struct pt_regs *regs)
 {
@@ -101,7 +99,6 @@ asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs)
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
@@ -251,20 +248,19 @@ give_sigsegv:
        return -EFAULT;
 }
 
-static inline int
+static inline void
 handle_signal(unsigned long sig,
              siginfo_t *info, struct k_sigaction *ka,
-             sigset_t *oldset, struct pt_regs *regs)
+             struct pt_regs *regs)
 {
        int ret;
 
-       ret = setup_rt_frame(sig, ka, info, oldset, regs);
+       ret = setup_rt_frame(sig, ka, info, sigmask_to_save(), regs);
        if (ret)
-               return ret;
-
-       block_sigmask(ka, sig);
+               return;
 
-       return 0;
+       signal_delivered(sig, info, ka, regs,
+                                test_thread_flag(TIF_SINGLESTEP));
 }
 
 /*
@@ -339,30 +335,10 @@ void do_signal(struct pt_regs *regs)
        if (signr <= 0) {
                /* no signal to deliver so we just put the saved sigmask
                 * back */
-               if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-                       clear_thread_flag(TIF_RESTORE_SIGMASK);
-                       sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-               }
-
+               restore_saved_sigmask();
        } else {                /* signr > 0 */
-               sigset_t *oldset;
-
-               if (current_thread_info()->flags & _TIF_RESTORE_SIGMASK)
-                       oldset = &current->saved_sigmask;
-               else
-                       oldset = &current->blocked;
-
                /* Whee!  Actually deliver the signal.  */
-               if (!handle_signal(signr, &info, &ka, oldset, regs)) {
-                       /* a signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag */
-                       clear_thread_flag(TIF_RESTORE_SIGMASK);
-               }
-
-               tracehook_signal_handler(signr, &info, &ka, regs,
-                                        test_thread_flag(TIF_SINGLESTEP));
+               handle_signal(signr, &info, &ka, regs);
        }
 
        return;
@@ -376,7 +352,5 @@ asmlinkage void do_notify_resume(struct pt_regs *regs)
        if (current_thread_info()->flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
index 465f04bc7deb1cc7a3413a6e0001a41f16f28641..c09fee7dec1455082273c9e46b6b2665603f856a 100644 (file)
@@ -103,102 +103,3 @@ __clear_user:
        .section __ex_table, "a"
                .long 9b, 99b           // write fault
        .previous
-
-/*
- * long strncpy_from_user(char *dst, const char *src, long count)
- *
- *
- */
-       .global __strncpy_from_user
-__strncpy_from_user:
-       l.addi  r1,r1,-16
-       l.sw    0(r1),r6
-       l.sw    4(r1),r5
-       l.sw    8(r1),r4
-       l.sw    12(r1),r3
-
-       l.addi  r11,r5,0
-2:     l.sfeq  r5,r0
-       l.bf    1f
-       l.addi  r5,r5,-1
-8:     l.lbz   r6,0(r4)
-       l.sfeq  r6,r0
-       l.bf    1f
-9:     l.sb    0(r3),r6
-       l.addi  r3,r3,1
-       l.j     2b
-       l.addi  r4,r4,1
-1:
-       l.lwz   r6,0(r1)
-       l.addi  r5,r5,1
-       l.sub   r11,r11,r5              // r11 holds the return value
-
-       l.lwz   r6,0(r1)
-       l.lwz   r5,4(r1)
-       l.lwz   r4,8(r1)
-       l.lwz   r3,12(r1)
-       l.jr    r9
-       l.addi  r1,r1,16
-
-       .section .fixup, "ax"
-99:
-               l.movhi r11,hi(-EFAULT)
-               l.ori   r11,r11,lo(-EFAULT)
-
-               l.lwz   r6,0(r1)
-               l.lwz   r5,4(r1)
-               l.lwz   r4,8(r1)
-               l.lwz   r3,12(r1)
-               l.jr    r9
-               l.addi  r1,r1,16
-       .previous
-
-       .section __ex_table, "a"
-               .long 8b, 99b           // read fault
-       .previous
-
-/*
- * extern int __strnlen_user(const char *str, long len, unsigned long top);
- *
- *
- * RTRN: - length of a string including NUL termination character
- *       - on page fault 0
- */
-
-       .global __strnlen_user
-__strnlen_user:
-       l.addi  r1,r1,-8
-       l.sw    0(r1),r6
-       l.sw    4(r1),r3
-
-       l.addi  r11,r0,0
-2:     l.sfeq  r11,r4
-       l.bf    1f
-       l.addi  r11,r11,1
-8:     l.lbz   r6,0(r3)
-       l.sfeq  r6,r0
-       l.bf    1f
-       l.sfgeu r3,r5                  // are we over the top ?
-       l.bf    99f
-       l.j     2b
-       l.addi  r3,r3,1
-
-1:
-       l.lwz   r6,0(r1)
-       l.lwz   r3,4(r1)
-       l.jr    r9
-       l.addi  r1,r1,8
-
-       .section .fixup, "ax"
-99:
-               l.addi  r11,r0,0
-
-               l.lwz   r6,0(r1)
-               l.lwz   r3,4(r1)
-               l.jr    r9
-               l.addi  r1,r1,8
-       .previous
-
-       .section __ex_table, "a"
-               .long 8b, 99b           // read fault
-       .previous
index ddb8b24b823d1c77881f80cc019570c537ea6c62..3ff21b536f28f6c1e84b06b665e6d7589579f6c9 100644 (file)
@@ -18,6 +18,7 @@ config PARISC
        select IRQ_PER_CPU
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
        select GENERIC_SMP_IDLE_THREAD
+       select GENERIC_STRNCPY_FROM_USER
 
        help
          The PA-RISC microprocessor is designed by Hewlett-Packard and used
diff --git a/arch/parisc/include/asm/kvm_para.h b/arch/parisc/include/asm/kvm_para.h
new file mode 100644 (file)
index 0000000..14fab8f
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
index 5212b0357daf15aaf454b751b0eb146fc47b34ac..b9344256f76b365db1c6a9a99b08b337f706970c 100644 (file)
@@ -10,9 +10,6 @@
 typedef unsigned short         __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short         __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short         __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
index e8f8037d872bc91c794f50ba5fd7a0f8eb996232..a5dc9066c6d8d50cb35f2e4cf88509fa4621b84b 100644 (file)
@@ -25,7 +25,6 @@ typedef unsigned long address_t;
 #define cpu_number_map(cpu)    (cpu)
 #define cpu_logical_map(cpu)   (cpu)
 
-extern void smp_send_reschedule(int cpu);
 extern void smp_send_all_nop(void);
 
 extern void arch_send_call_function_single_ipi(int cpu);
@@ -50,6 +49,5 @@ static inline void __cpu_die (unsigned int cpu) {
   while(1)
     ;
 }
-extern int __cpu_up (unsigned int cpu);
 
 #endif /*  __ASM_SMP_H */
index 9d5fbbc5c31f14df4791b1005e83ade0cd0505f4..d76fbda5d62c0437f5fb52c389e144597f054b12 100644 (file)
@@ -7,7 +7,7 @@ struct stat {
        unsigned int    st_dev;         /* dev_t is 32 bits on parisc */
        ino_t           st_ino;         /* 32 bits */
        mode_t          st_mode;        /* 16 bits */
-       nlink_t         st_nlink;       /* 16 bits */
+       unsigned short  st_nlink;       /* 16 bits */
        unsigned short  st_reserved1;   /* old st_uid */
        unsigned short  st_reserved2;   /* old st_gid */
        unsigned int    st_rdev;
@@ -42,7 +42,7 @@ struct hpux_stat64 {
        unsigned int    st_dev;         /* dev_t is 32 bits on parisc */
        ino_t           st_ino;         /* 32 bits */
        mode_t          st_mode;        /* 16 bits */
-       nlink_t         st_nlink;       /* 16 bits */
+       unsigned short  st_nlink;       /* 16 bits */
        unsigned short  st_reserved1;   /* old st_uid */
        unsigned short  st_reserved2;   /* old st_gid */
        unsigned int    st_rdev;
index 83ae7dd4d99ea721adbdc16a751435db902da354..22b4726dee494403c80bdf2f88c583ba32043b94 100644 (file)
@@ -74,7 +74,7 @@ struct thread_info {
 #define _TIF_BLOCKSTEP         (1 << TIF_BLOCKSTEP)
 
 #define _TIF_USER_WORK_MASK     (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \
-                                 _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK)
+                                 _TIF_NEED_RESCHED)
 
 #endif /* __KERNEL__ */
 
index 9ac066086f030fc4080ea24be0d557afd7c8aae8..4ba2c93770f1f47c83226dc97b6e25c89d9b0bd6 100644 (file)
@@ -218,15 +218,14 @@ struct exception_data {
 extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long);
 extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long);
 extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long);
-extern long lstrncpy_from_user(char *, const char __user *, long);
+extern long strncpy_from_user(char *, const char __user *, long);
 extern unsigned lclear_user(void __user *,unsigned long);
 extern long lstrnlen_user(const char __user *,long);
-
 /*
  * Complex access routines -- macros
  */
+#define user_addr_max() (~0UL)
 
-#define strncpy_from_user lstrncpy_from_user
 #define strnlen_user lstrnlen_user
 #define strlen_user(str) lstrnlen_user(str, 0x7fffffffL)
 #define clear_user lclear_user
index 5350342170218f635e4e231a4d10b1147210a1bf..18670a078849b96d92d4dd4ff406a79c2caa28a4 100644 (file)
         * entry (identifying the physical page) and %r23 up with
         * the from tlb entry (or nothing if only a to entry---for
         * clear_user_page_asm) */
-       .macro          do_alias        spc,tmp,tmp1,va,pte,prot,fault
+       .macro          do_alias        spc,tmp,tmp1,va,pte,prot,fault,patype
        cmpib,COND(<>),n 0,\spc,\fault
        ldil            L%(TMPALIAS_MAP_START),\tmp
 #if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
         */
        cmpiclr,=       0x01,\tmp,%r0
        ldi             (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
-#ifdef CONFIG_64BIT
+.ifc \patype,20
        depd,z          \prot,8,7,\prot
-#else
+.else
+.ifc \patype,11
        depw,z          \prot,8,7,\prot
-#endif
+.else
+       .error "undefined PA type to do_alias"
+.endif
+.endif
        /*
         * OK, it is in the temp alias region, check whether "from" or "to".
         * Check "subtle" note in pacache.S re: r23/r26.
@@ -920,7 +924,7 @@ intr_check_sig:
        /* As above */
        mfctl   %cr30,%r1
        LDREG   TI_FLAGS(%r1),%r19
-       ldi     (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NOTIFY_RESUME), %r20
+       ldi     (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r20
        and,COND(<>)    %r19, %r20, %r0
        b,n     intr_restore    /* skip past if we've nothing to do */
 
@@ -1189,7 +1193,7 @@ dtlb_miss_20w:
        nop
 
 dtlb_check_alias_20w:
-       do_alias        spc,t0,t1,va,pte,prot,dtlb_fault
+       do_alias        spc,t0,t1,va,pte,prot,dtlb_fault,20
 
        idtlbt          pte,prot
 
@@ -1213,7 +1217,7 @@ nadtlb_miss_20w:
        nop
 
 nadtlb_check_alias_20w:
-       do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate
+       do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate,20
 
        idtlbt          pte,prot
 
@@ -1245,7 +1249,7 @@ dtlb_miss_11:
        nop
 
 dtlb_check_alias_11:
-       do_alias        spc,t0,t1,va,pte,prot,dtlb_fault
+       do_alias        spc,t0,t1,va,pte,prot,dtlb_fault,11
 
        idtlba          pte,(va)
        idtlbp          prot,(va)
@@ -1277,7 +1281,7 @@ nadtlb_miss_11:
        nop
 
 nadtlb_check_alias_11:
-       do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate
+       do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate,11
 
        idtlba          pte,(va)
        idtlbp          prot,(va)
@@ -1304,7 +1308,7 @@ dtlb_miss_20:
        nop
 
 dtlb_check_alias_20:
-       do_alias        spc,t0,t1,va,pte,prot,dtlb_fault
+       do_alias        spc,t0,t1,va,pte,prot,dtlb_fault,20
        
        idtlbt          pte,prot
 
@@ -1330,7 +1334,7 @@ nadtlb_miss_20:
        nop
 
 nadtlb_check_alias_20:
-       do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate
+       do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate,20
 
        idtlbt          pte,prot
 
@@ -1457,7 +1461,7 @@ naitlb_miss_20w:
        nop
 
 naitlb_check_alias_20w:
-       do_alias        spc,t0,t1,va,pte,prot,naitlb_fault
+       do_alias        spc,t0,t1,va,pte,prot,naitlb_fault,20
 
        iitlbt          pte,prot
 
@@ -1511,7 +1515,7 @@ naitlb_miss_11:
        nop
 
 naitlb_check_alias_11:
-       do_alias        spc,t0,t1,va,pte,prot,itlb_fault
+       do_alias        spc,t0,t1,va,pte,prot,itlb_fault,11
 
        iitlba          pte,(%sr0, va)
        iitlbp          prot,(%sr0, va)
@@ -1557,7 +1561,7 @@ naitlb_miss_20:
        nop
 
 naitlb_check_alias_20:
-       do_alias        spc,t0,t1,va,pte,prot,naitlb_fault
+       do_alias        spc,t0,t1,va,pte,prot,naitlb_fault,20
 
        iitlbt          pte,prot
 
@@ -2028,7 +2032,7 @@ syscall_check_resched:
        .import do_signal,code
 syscall_check_sig:
        LDREG   TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
-       ldi     (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r26
+       ldi     (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r26
        and,COND(<>)    %r19, %r26, %r0
        b,n     syscall_restore /* skip past if we've nothing to do */
 
index a7bb757a5497137d894518d1dc9d252cb845bb49..ceec85de62904a1892c0fb30eca7eb034e627e53 100644 (file)
@@ -44,7 +44,6 @@ EXPORT_SYMBOL(__cmpxchg_u64);
 #endif
 
 #include <asm/uaccess.h>
-EXPORT_SYMBOL(lstrncpy_from_user);
 EXPORT_SYMBOL(lclear_user);
 EXPORT_SYMBOL(lstrnlen_user);
 
index 4b9cb0d546d132fb39c5ba57d3986f3b109dff48..594459bde14ed3c927b7dddce88e1d9cf9863e74 100644 (file)
@@ -48,9 +48,6 @@
 #define DBG(LEVEL, ...)
 #endif
        
-
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /* gcc will complain if a pointer is cast to an integer of different
  * size.  If you really need to do this (and we do for an ELF32 user
  * application in an ELF64 kernel) then you have to do a cast to an
@@ -131,7 +128,6 @@ sys_rt_sigreturn(struct pt_regs *regs, int in_syscall)
                        goto give_sigsegv;
        }
                
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        /* Good thing we saved the old gr[30], eh? */
@@ -443,8 +439,9 @@ give_sigsegv:
 
 static long
 handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
-               sigset_t *oldset, struct pt_regs *regs, int in_syscall)
+               struct pt_regs *regs, int in_syscall)
 {
+       sigset_t *oldset = sigmask_to_save();
        DBG(1,"handle_signal: sig=%ld, ka=%p, info=%p, oldset=%p, regs=%p\n",
               sig, ka, info, oldset, regs);
        
@@ -452,12 +449,13 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
        if (!setup_rt_frame(sig, ka, info, oldset, regs, in_syscall))
                return 0;
 
-       block_sigmask(ka, sig);
-
-       tracehook_signal_handler(sig, info, ka, regs, 
+       signal_delivered(sig, info, ka, regs, 
                test_thread_flag(TIF_SINGLESTEP) ||
                test_thread_flag(TIF_BLOCKSTEP));
 
+       DBG(1,KERN_DEBUG "do_signal: Exit (success), regs->gr[28] = %ld\n",
+               regs->gr[28]);
+
        return 1;
 }
 
@@ -568,28 +566,17 @@ do_signal(struct pt_regs *regs, long in_syscall)
        siginfo_t info;
        struct k_sigaction ka;
        int signr;
-       sigset_t *oldset;
 
-       DBG(1,"\ndo_signal: oldset=0x%p, regs=0x%p, sr7 %#lx, in_syscall=%d\n",
-              oldset, regs, regs->sr[7], in_syscall);
+       DBG(1,"\ndo_signal: regs=0x%p, sr7 %#lx, in_syscall=%d\n",
+              regs, regs->sr[7], in_syscall);
 
        /* Everyone else checks to see if they are in kernel mode at
           this point and exits if that's the case.  I'm not sure why
           we would be called in that case, but for some reason we
           are. */
 
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
-       DBG(1,"do_signal: oldset %08lx / %08lx\n", 
-               oldset->sig[0], oldset->sig[1]);
-
-
        /* May need to force signal if handle_signal failed to deliver */
        while (1) {
-         
                signr = get_signal_to_deliver(&info, &ka, regs, NULL);
                DBG(3,"do_signal: signr = %d, regs->gr[28] = %ld\n", signr, regs->gr[28]); 
        
@@ -603,14 +590,8 @@ do_signal(struct pt_regs *regs, long in_syscall)
                /* Whee!  Actually deliver the signal.  If the
                   delivery failed, we need to continue to iterate in
                   this loop so we can deliver the SIGSEGV... */
-               if (handle_signal(signr, &info, &ka, oldset,
-                                 regs, in_syscall)) {
-                       DBG(1,KERN_DEBUG "do_signal: Exit (success), regs->gr[28] = %ld\n",
-                               regs->gr[28]);
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
+               if (handle_signal(signr, &info, &ka, regs, in_syscall))
                        return;
-               }
        }
        /* end of while(1) looping forever if we can't force a signal */
 
@@ -621,24 +602,16 @@ do_signal(struct pt_regs *regs, long in_syscall)
        DBG(1,"do_signal: Exit (not delivered), regs->gr[28] = %ld\n", 
                regs->gr[28]);
 
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
-
-       return;
+       restore_saved_sigmask();
 }
 
 void do_notify_resume(struct pt_regs *regs, long in_syscall)
 {
-       if (test_thread_flag(TIF_SIGPENDING) ||
-           test_thread_flag(TIF_RESTORE_SIGMASK))
+       if (test_thread_flag(TIF_SIGPENDING))
                do_signal(regs, in_syscall);
 
        if (test_thread_flag(TIF_NOTIFY_RESUME)) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
index e14132430762414166a729ae57e647ec5b9f1316..fd49aeda9eb8b0c4fef656e3dc00228c59251b99 100644 (file)
@@ -47,8 +47,6 @@
 #define DBG(LEVEL, ...)
 #endif
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 inline void
 sigset_32to64(sigset_t *s64, compat_sigset_t *s32)
 {
index fa6f2b8163e03cc1bdc953bbb1e2b8282b826c7a..64a999882e4fb8d0d584da223c7b1f43842e8d2c 100644 (file)
@@ -50,8 +50,10 @@ SECTIONS
        . = KERNEL_BINARY_TEXT_START;
 
        _text = .;              /* Text and read-only data */
-       .text ALIGN(16) : {
+       .head ALIGN(16) : {
                HEAD_TEXT
+       } = 0
+       .text ALIGN(16) : {
                TEXT_TEXT
                SCHED_TEXT
                LOCK_TEXT
@@ -65,7 +67,7 @@ SECTIONS
                *(.fixup)
                *(.lock.text)           /* out-of-line lock text */
                *(.gnu.warning)
-       } = 0
+       }
        /* End of text section */
        _etext = .;
 
index 1bd23ccec17b9a53fb838fdfdf5eb9ab62ccf739..6f2d9355efe25af6ab90d4205a216c1c649c39a9 100644 (file)
        bv          %r0(%r1)
        .endm
 
-       /*
-        * long lstrncpy_from_user(char *dst, const char *src, long n)
-        *
-        * Returns -EFAULT if exception before terminator,
-        *         N if the entire buffer filled,
-        *         otherwise strlen (i.e. excludes zero byte)
-        */
-
-ENTRY(lstrncpy_from_user)
-       .proc
-       .callinfo NO_CALLS
-       .entry
-       comib,=     0,%r24,$lsfu_done
-       copy        %r24,%r23
-       get_sr
-1:      ldbs,ma     1(%sr1,%r25),%r1
-$lsfu_loop:
-       stbs,ma     %r1,1(%r26)
-       comib,=,n   0,%r1,$lsfu_done
-       addib,<>,n  -1,%r24,$lsfu_loop
-2:      ldbs,ma     1(%sr1,%r25),%r1
-$lsfu_done:
-       sub         %r23,%r24,%r28
-$lsfu_exit:
-       bv          %r0(%r2)
-       nop
-       .exit
-ENDPROC(lstrncpy_from_user)
-
-       .section .fixup,"ax"
-3:      fixup_branch $lsfu_exit
-       ldi         -EFAULT,%r28
-       .previous
-
-       .section __ex_table,"aw"
-       ASM_ULONG_INSN 1b,3b
-       ASM_ULONG_INSN 2b,3b
-       .previous
-
-       .procend
-
        /*
         * unsigned long lclear_user(void *to, unsigned long n)
         *
index 00b9874e2240d79a669ab40af6602fb403d7104d..050cb371a69e6f6892e700c312fc02facd9a5e97 100644 (file)
@@ -135,6 +135,8 @@ config PPC
        select GENERIC_CMOS_UPDATE
        select GENERIC_TIME_VSYSCALL
        select GENERIC_CLOCKEVENTS
+       select GENERIC_STRNCPY_FROM_USER
+       select GENERIC_STRNLEN_USER
 
 config EARLY_PRINTK
        bool
index 7e283c891b7f8871eca5dc5a64a79d9bb4e56590..fe0d60935e9be10109d2ed0ee7c65b9e493d9777 100644 (file)
                sdhc@2e000 {
                        status = "disabled";
                        sdhci,1-bit-only;
+                       bus-width = <1>;
                };
 
                par_io@e0100 {
index b9219e99bd2ae6cbc4c5882dff3dfd62759cbfe2..50d82c8a037f4cec2903504541b39a6eb2352331 100644 (file)
@@ -168,6 +168,7 @@ extern const char *powerpc_base_platform;
 #define CPU_FTR_LWSYNC                 ASM_CONST(0x0000000008000000)
 #define CPU_FTR_NOEXECUTE              ASM_CONST(0x0000000010000000)
 #define CPU_FTR_INDEXED_DCR            ASM_CONST(0x0000000020000000)
+#define CPU_FTR_EMB_HV                 ASM_CONST(0x0000000040000000)
 
 /*
  * Add the 64-bit processor unique features in the top half of the word;
@@ -376,7 +377,8 @@ extern const char *powerpc_base_platform;
 #define CPU_FTRS_47X   (CPU_FTRS_440x6)
 #define CPU_FTRS_E200  (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \
            CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \
-           CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE)
+           CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE | \
+           CPU_FTR_DEBUG_LVL_EXC)
 #define CPU_FTRS_E500  (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
            CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
            CPU_FTR_NOEXECUTE)
@@ -385,15 +387,15 @@ extern const char *powerpc_base_platform;
            CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
 #define CPU_FTRS_E500MC        (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
            CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
-           CPU_FTR_DBELL)
+           CPU_FTR_DBELL | CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV)
 #define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
            CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
            CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
-           CPU_FTR_DEBUG_LVL_EXC)
+           CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV)
 #define CPU_FTRS_E6500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
            CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
            CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
-           CPU_FTR_DEBUG_LVL_EXC)
+           CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV)
 #define CPU_FTRS_GENERIC_32    (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
 
 /* 64-bit CPUs */
@@ -486,8 +488,10 @@ enum {
            CPU_FTRS_E200 |
 #endif
 #ifdef CONFIG_E500
-           CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC |
-           CPU_FTRS_E5500 | CPU_FTRS_E6500 |
+           CPU_FTRS_E500 | CPU_FTRS_E500_2 |
+#endif
+#ifdef CONFIG_PPC_E500MC
+           CPU_FTRS_E500MC | CPU_FTRS_E5500 | CPU_FTRS_E6500 |
 #endif
            0,
 };
@@ -531,9 +535,12 @@ enum {
            CPU_FTRS_E200 &
 #endif
 #ifdef CONFIG_E500
-           CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC &
-           CPU_FTRS_E5500 & CPU_FTRS_E6500 &
+           CPU_FTRS_E500 & CPU_FTRS_E500_2 &
+#endif
+#ifdef CONFIG_PPC_E500MC
+           CPU_FTRS_E500MC & CPU_FTRS_E5500 & CPU_FTRS_E6500 &
 #endif
+           ~CPU_FTR_EMB_HV &   /* can be removed at runtime */
            CPU_FTRS_POSSIBLE,
 };
 #endif /* __powerpc64__ */
index efa74ac44a359b5b78d8dd7b564905fc5af02bb2..154c067761b15bbf50af6bf099bed0c997ff542e 100644 (file)
@@ -19,6 +19,9 @@
 
 #define PPC_DBELL_MSG_BRDCAST  (0x04000000)
 #define PPC_DBELL_TYPE(x)      (((x) & 0xf) << (63-36))
+#define PPC_DBELL_TYPE_MASK    PPC_DBELL_TYPE(0xf)
+#define PPC_DBELL_LPID(x)      ((x) << (63 - 49))
+#define PPC_DBELL_PIR_MASK     0x3fff
 enum ppc_dbell {
        PPC_DBELL = 0,          /* doorbell */
        PPC_DBELL_CRIT = 1,     /* critical doorbell */
index 612252388190ccfafeab132ba93e84d8f60b632e..423cf9eaf4a4fddf35811e62de33dc16363812e5 100644 (file)
 #define H_PP1                  (1UL<<(63-62))
 #define H_PP2                  (1UL<<(63-63))
 
+/* Flags for H_REGISTER_VPA subfunction field */
+#define H_VPA_FUNC_SHIFT       (63-18) /* Bit posn of subfunction code */
+#define H_VPA_FUNC_MASK                7UL
+#define H_VPA_REG_VPA          1UL     /* Register Virtual Processor Area */
+#define H_VPA_REG_DTL          2UL     /* Register Dispatch Trace Log */
+#define H_VPA_REG_SLB          3UL     /* Register SLB shadow buffer */
+#define H_VPA_DEREG_VPA                5UL     /* Deregister Virtual Processor Area */
+#define H_VPA_DEREG_DTL                6UL     /* Deregister Dispatch Trace Log */
+#define H_VPA_DEREG_SLB                7UL     /* Deregister SLB shadow buffer */
+
 /* VASI States */
 #define H_VASI_INVALID          0
 #define H_VASI_ENABLED          1
index 51010bfc792e9ea365f6d4f3b4fa34f089dd2e28..c9aac24b02e267ab9d171af7d1087863e0ef0dc8 100644 (file)
@@ -33,6 +33,7 @@
 extern void __replay_interrupt(unsigned int vector);
 
 extern void timer_interrupt(struct pt_regs *);
+extern void performance_monitor_exception(struct pt_regs *regs);
 
 #ifdef CONFIG_PPC64
 #include <asm/paca.h>
index b921c3f48928868a74bce5142af1a346c9a79c7f..1bea4d8ea6f432d3e3425752aafa156a8fe7710b 100644 (file)
@@ -277,6 +277,7 @@ struct kvm_sync_regs {
 #define KVM_CPU_E500V2         2
 #define KVM_CPU_3S_32          3
 #define KVM_CPU_3S_64          4
+#define KVM_CPU_E500MC         5
 
 /* for KVM_CAP_SPAPR_TCE */
 struct kvm_create_spapr_tce {
index 7b1f0e0fc6533eb20c49b5e6536f66e6baf09790..76fdcfef088972d1b564d8413c275e1d01261ced 100644 (file)
 #ifndef __POWERPC_KVM_ASM_H__
 #define __POWERPC_KVM_ASM_H__
 
+#ifdef __ASSEMBLY__
+#ifdef CONFIG_64BIT
+#define PPC_STD(sreg, offset, areg)  std sreg, (offset)(areg)
+#define PPC_LD(treg, offset, areg)   ld treg, (offset)(areg)
+#else
+#define PPC_STD(sreg, offset, areg)  stw sreg, (offset+4)(areg)
+#define PPC_LD(treg, offset, areg)   lwz treg, (offset+4)(areg)
+#endif
+#endif
+
 /* IVPR must be 64KiB-aligned. */
 #define VCPU_SIZE_ORDER 4
 #define VCPU_SIZE_LOG   (VCPU_SIZE_ORDER + 12)
 #define BOOKE_INTERRUPT_SPE_FP_DATA 33
 #define BOOKE_INTERRUPT_SPE_FP_ROUND 34
 #define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
+#define BOOKE_INTERRUPT_DOORBELL 36
+#define BOOKE_INTERRUPT_DOORBELL_CRITICAL 37
+
+/* booke_hv */
+#define BOOKE_INTERRUPT_GUEST_DBELL 38
+#define BOOKE_INTERRUPT_GUEST_DBELL_CRIT 39
+#define BOOKE_INTERRUPT_HV_SYSCALL 40
+#define BOOKE_INTERRUPT_HV_PRIV 41
 
 /* book3s */
 
index fd07f43d66224c42b367b6d911dda9ddb8a33bd4..f0e0c6a66d973fdb833138e2433b54be860dc4f3 100644 (file)
@@ -453,4 +453,7 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
 
 #define INS_DCBZ                       0x7c0007ec
 
+/* LPIDs we support with this build -- runtime limit may be lower */
+#define KVMPPC_NR_LPIDS                        (LPID_RSVD + 1)
+
 #endif /* __ASM_KVM_BOOK3S_H__ */
index 1f2f5b6156bd01e6aa3a3752d4899f2ab0175f53..88609b23b775460c96a4d9d805feaf49fecf94a3 100644 (file)
@@ -79,6 +79,9 @@ struct kvmppc_host_state {
        u8 napping;
 
 #ifdef CONFIG_KVM_BOOK3S_64_HV
+       u8 hwthread_req;
+       u8 hwthread_state;
+
        struct kvm_vcpu *kvm_vcpu;
        struct kvmppc_vcore *kvm_vcore;
        unsigned long xics_phys;
@@ -122,4 +125,9 @@ struct kvmppc_book3s_shadow_vcpu {
 
 #endif /*__ASSEMBLY__ */
 
+/* Values for kvm_state */
+#define KVM_HWTHREAD_IN_KERNEL 0
+#define KVM_HWTHREAD_IN_NAP    1
+#define KVM_HWTHREAD_IN_KVM    2
+
 #endif /* __ASM_KVM_BOOK3S_ASM_H__ */
index a90e0918877738180a3f08574900b983cabfdf03..b7cd3356a532d7c76d53dd9e1c418c40adb35275 100644 (file)
@@ -23,6 +23,9 @@
 #include <linux/types.h>
 #include <linux/kvm_host.h>
 
+/* LPIDs we support with this build -- runtime limit may be lower */
+#define KVMPPC_NR_LPIDS                        64
+
 static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
 {
        vcpu->arch.gpr[num] = val;
diff --git a/arch/powerpc/include/asm/kvm_booke_hv_asm.h b/arch/powerpc/include/asm/kvm_booke_hv_asm.h
new file mode 100644 (file)
index 0000000..30a600f
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2010-2011 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_KVM_BOOKE_HV_ASM_H
+#define ASM_KVM_BOOKE_HV_ASM_H
+
+#ifdef __ASSEMBLY__
+
+/*
+ * All exceptions from guest state must go through KVM
+ * (except for those which are delivered directly to the guest) --
+ * there are no exceptions for which we fall through directly to
+ * the normal host handler.
+ *
+ * Expected inputs (normal exceptions):
+ *   SCRATCH0 = saved r10
+ *   r10 = thread struct
+ *   r11 = appropriate SRR1 variant (currently used as scratch)
+ *   r13 = saved CR
+ *   *(r10 + THREAD_NORMSAVE(0)) = saved r11
+ *   *(r10 + THREAD_NORMSAVE(2)) = saved r13
+ *
+ * Expected inputs (crit/mcheck/debug exceptions):
+ *   appropriate SCRATCH = saved r8
+ *   r8 = exception level stack frame
+ *   r9 = *(r8 + _CCR) = saved CR
+ *   r11 = appropriate SRR1 variant (currently used as scratch)
+ *   *(r8 + GPR9) = saved r9
+ *   *(r8 + GPR10) = saved r10 (r10 not yet clobbered)
+ *   *(r8 + GPR11) = saved r11
+ */
+.macro DO_KVM intno srr1
+#ifdef CONFIG_KVM_BOOKE_HV
+BEGIN_FTR_SECTION
+       mtocrf  0x80, r11       /* check MSR[GS] without clobbering reg */
+       bf      3, kvmppc_resume_\intno\()_\srr1
+       b       kvmppc_handler_\intno\()_\srr1
+kvmppc_resume_\intno\()_\srr1:
+END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
+#endif
+.endm
+
+#endif /*__ASSEMBLY__ */
+#endif /* ASM_KVM_BOOKE_HV_ASM_H */
diff --git a/arch/powerpc/include/asm/kvm_e500.h b/arch/powerpc/include/asm/kvm_e500.h
deleted file mode 100644 (file)
index 8cd50a5..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
- *
- * Author: Yu Liu, <yu.liu@freescale.com>
- *
- * Description:
- * This file is derived from arch/powerpc/include/asm/kvm_44x.h,
- * by Hollis Blanchard <hollisb@us.ibm.com>.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_KVM_E500_H__
-#define __ASM_KVM_E500_H__
-
-#include <linux/kvm_host.h>
-
-#define BOOKE_INTERRUPT_SIZE 36
-
-#define E500_PID_NUM   3
-#define E500_TLB_NUM   2
-
-#define E500_TLB_VALID 1
-#define E500_TLB_DIRTY 2
-
-struct tlbe_ref {
-       pfn_t pfn;
-       unsigned int flags; /* E500_TLB_* */
-};
-
-struct tlbe_priv {
-       struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */
-};
-
-struct vcpu_id_table;
-
-struct kvmppc_e500_tlb_params {
-       int entries, ways, sets;
-};
-
-struct kvmppc_vcpu_e500 {
-       /* Unmodified copy of the guest's TLB -- shared with host userspace. */
-       struct kvm_book3e_206_tlb_entry *gtlb_arch;
-
-       /* Starting entry number in gtlb_arch[] */
-       int gtlb_offset[E500_TLB_NUM];
-
-       /* KVM internal information associated with each guest TLB entry */
-       struct tlbe_priv *gtlb_priv[E500_TLB_NUM];
-
-       struct kvmppc_e500_tlb_params gtlb_params[E500_TLB_NUM];
-
-       unsigned int gtlb_nv[E500_TLB_NUM];
-
-       /*
-        * information associated with each host TLB entry --
-        * TLB1 only for now.  If/when guest TLB1 entries can be
-        * mapped with host TLB0, this will be used for that too.
-        *
-        * We don't want to use this for guest TLB0 because then we'd
-        * have the overhead of doing the translation again even if
-        * the entry is still in the guest TLB (e.g. we swapped out
-        * and back, and our host TLB entries got evicted).
-        */
-       struct tlbe_ref *tlb_refs[E500_TLB_NUM];
-       unsigned int host_tlb1_nv;
-
-       u32 host_pid[E500_PID_NUM];
-       u32 pid[E500_PID_NUM];
-       u32 svr;
-
-       /* vcpu id table */
-       struct vcpu_id_table *idt;
-
-       u32 l1csr0;
-       u32 l1csr1;
-       u32 hid0;
-       u32 hid1;
-       u32 tlb0cfg;
-       u32 tlb1cfg;
-       u64 mcar;
-
-       struct page **shared_tlb_pages;
-       int num_shared_tlb_pages;
-
-       struct kvm_vcpu vcpu;
-};
-
-static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
-{
-       return container_of(vcpu, struct kvmppc_vcpu_e500, vcpu);
-}
-
-#endif /* __ASM_KVM_E500_H__ */
index 52eb9c1f4fe01cd463523fb55b782069929bf911..d848cdc4971523936550b435b7e3d8e7f08bba60 100644 (file)
@@ -82,7 +82,7 @@ struct kvm_vcpu;
 
 struct lppaca;
 struct slb_shadow;
-struct dtl;
+struct dtl_entry;
 
 struct kvm_vm_stat {
        u32 remote_tlb_flush;
@@ -106,6 +106,8 @@ struct kvm_vcpu_stat {
        u32 dec_exits;
        u32 ext_intr_exits;
        u32 halt_wakeup;
+       u32 dbell_exits;
+       u32 gdbell_exits;
 #ifdef CONFIG_PPC_BOOK3S
        u32 pf_storage;
        u32 pf_instruc;
@@ -140,6 +142,7 @@ enum kvm_exit_types {
        EMULATED_TLBSX_EXITS,
        EMULATED_TLBWE_EXITS,
        EMULATED_RFI_EXITS,
+       EMULATED_RFCI_EXITS,
        DEC_EXITS,
        EXT_INTR_EXITS,
        HALT_WAKEUP,
@@ -147,6 +150,8 @@ enum kvm_exit_types {
        FP_UNAVAIL,
        DEBUG_EXITS,
        TIMEINGUEST,
+       DBELL_EXITS,
+       GDBELL_EXITS,
        __NUMBER_OF_KVM_EXIT_TYPES
 };
 
@@ -217,10 +222,10 @@ struct kvm_arch_memory_slot {
 };
 
 struct kvm_arch {
+       unsigned int lpid;
 #ifdef CONFIG_KVM_BOOK3S_64_HV
        unsigned long hpt_virt;
        struct revmap_entry *revmap;
-       unsigned int lpid;
        unsigned int host_lpid;
        unsigned long host_lpcr;
        unsigned long sdr1;
@@ -232,7 +237,6 @@ struct kvm_arch {
        unsigned long vrma_slb_v;
        int rma_setup_done;
        int using_mmu_notifiers;
-       struct list_head spapr_tce_tables;
        spinlock_t slot_phys_lock;
        unsigned long *slot_phys[KVM_MEM_SLOTS_NUM];
        int slot_npages[KVM_MEM_SLOTS_NUM];
@@ -240,6 +244,9 @@ struct kvm_arch {
        struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
        struct kvmppc_linear_info *hpt_li;
 #endif /* CONFIG_KVM_BOOK3S_64_HV */
+#ifdef CONFIG_PPC_BOOK3S_64
+       struct list_head spapr_tce_tables;
+#endif
 };
 
 /*
@@ -263,6 +270,9 @@ struct kvmppc_vcore {
        struct list_head runnable_threads;
        spinlock_t lock;
        wait_queue_head_t wq;
+       u64 stolen_tb;
+       u64 preempt_tb;
+       struct kvm_vcpu *runner;
 };
 
 #define VCORE_ENTRY_COUNT(vc)  ((vc)->entry_exit_count & 0xff)
@@ -274,6 +284,19 @@ struct kvmppc_vcore {
 #define VCORE_EXITING  2
 #define VCORE_SLEEPING 3
 
+/*
+ * Struct used to manage memory for a virtual processor area
+ * registered by a PAPR guest.  There are three types of area
+ * that a guest can register.
+ */
+struct kvmppc_vpa {
+       void *pinned_addr;      /* Address in kernel linear mapping */
+       void *pinned_end;       /* End of region */
+       unsigned long next_gpa; /* Guest phys addr for update */
+       unsigned long len;      /* Number of bytes required */
+       u8 update_pending;      /* 1 => update pinned_addr from next_gpa */
+};
+
 struct kvmppc_pte {
        ulong eaddr;
        u64 vpage;
@@ -345,6 +368,17 @@ struct kvm_vcpu_arch {
        u64 vsr[64];
 #endif
 
+#ifdef CONFIG_KVM_BOOKE_HV
+       u32 host_mas4;
+       u32 host_mas6;
+       u32 shadow_epcr;
+       u32 epcr;
+       u32 shadow_msrp;
+       u32 eplc;
+       u32 epsc;
+       u32 oldpir;
+#endif
+
 #ifdef CONFIG_PPC_BOOK3S
        /* For Gekko paired singles */
        u32 qpr[32];
@@ -370,6 +404,7 @@ struct kvm_vcpu_arch {
 #endif
        u32 vrsave; /* also USPRG0 */
        u32 mmucr;
+       /* shadow_msr is unused for BookE HV */
        ulong shadow_msr;
        ulong csrr0;
        ulong csrr1;
@@ -426,8 +461,12 @@ struct kvm_vcpu_arch {
        ulong fault_esr;
        ulong queued_dear;
        ulong queued_esr;
+       u32 tlbcfg[4];
+       u32 mmucfg;
+       u32 epr;
 #endif
        gpa_t paddr_accessed;
+       gva_t vaddr_accessed;
 
        u8 io_gpr; /* GPR used as IO source/target */
        u8 mmio_is_bigendian;
@@ -453,11 +492,6 @@ struct kvm_vcpu_arch {
        u8 prodded;
        u32 last_inst;
 
-       struct lppaca *vpa;
-       struct slb_shadow *slb_shadow;
-       struct dtl *dtl;
-       struct dtl *dtl_end;
-
        wait_queue_head_t *wqp;
        struct kvmppc_vcore *vcore;
        int ret;
@@ -482,6 +516,14 @@ struct kvm_vcpu_arch {
        struct task_struct *run_task;
        struct kvm_run *kvm_run;
        pgd_t *pgdir;
+
+       spinlock_t vpa_update_lock;
+       struct kvmppc_vpa vpa;
+       struct kvmppc_vpa dtl;
+       struct dtl_entry *dtl_ptr;
+       unsigned long dtl_index;
+       u64 stolen_logged;
+       struct kvmppc_vpa slb_shadow;
 #endif
 };
 
@@ -498,4 +540,6 @@ struct kvm_vcpu_arch {
 #define KVM_MMIO_REG_QPR       0x0040
 #define KVM_MMIO_REG_FQPR      0x0060
 
+#define __KVM_HAVE_ARCH_WQP
+
 #endif /* __POWERPC_KVM_HOST_H__ */
index 7b754e74300397e2d92f7cab4adb10ddadf1a726..c18916bff689af6719c015458b930bf51ec257ce 100644 (file)
@@ -206,6 +206,11 @@ static inline unsigned int kvm_arch_para_features(void)
        return r;
 }
 
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+       return false;
+}
+
 #endif /* __KERNEL__ */
 
 #endif /* __POWERPC_KVM_PARA_H__ */
index 9d6dee0f7d48ecc442b3d1e8062583c033434e69..f68c22fa2fcebd0751447f96d92507d253b27114 100644 (file)
@@ -95,7 +95,7 @@ extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
 extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
 extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
 
-extern void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
+extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
 extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
 extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
 extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
@@ -107,8 +107,10 @@ extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
 
 extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
                                   unsigned int op, int *advance);
-extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs);
-extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
+extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn,
+                                    ulong val);
+extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn,
+                                    ulong *val);
 
 extern int kvmppc_booke_init(void);
 extern void kvmppc_booke_exit(void);
@@ -126,6 +128,8 @@ extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
 extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
 extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
                                struct kvm_create_spapr_tce *args);
+extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+                            unsigned long ioba, unsigned long tce);
 extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
                                struct kvm_allocate_rma *rma);
 extern struct kvmppc_linear_info *kvm_alloc_rma(void);
@@ -138,6 +142,11 @@ extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
                                struct kvm_userspace_memory_region *mem);
 extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
                                struct kvm_userspace_memory_region *mem);
+extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
+                                     struct kvm_ppc_smmu_info *info);
+
+extern int kvmppc_bookehv_init(void);
+extern void kvmppc_bookehv_exit(void);
 
 /*
  * Cuts out inst bits with ordering according to spec.
@@ -204,4 +213,9 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
 int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
                             struct kvm_dirty_tlb *cfg);
 
+long kvmppc_alloc_lpid(void);
+void kvmppc_claim_lpid(long lpid);
+void kvmppc_free_lpid(long lpid);
+void kvmppc_init_lpid(unsigned long nr_lpids);
+
 #endif /* __POWERPC_KVM_PPC_H__ */
index cdb5421877e2e2259ed3265df92d655aa62dd115..eeabcdbc30f7ab00bd7856983e020e70db8a7045 100644 (file)
 #define MAS4_TSIZED_MASK       0x00000f80      /* Default TSIZE */
 #define MAS4_TSIZED_SHIFT      7
 
+#define MAS5_SGS               0x80000000
+
 #define MAS6_SPID0             0x3FFF0000
 #define MAS6_SPID1             0x00007FFE
 #define MAS6_ISIZE(x)          MAS1_TSIZE(x)
 
 #define MAS7_RPN               0xFFFFFFFF
 
+#define MAS8_TGS               0x80000000 /* Guest space */
+#define MAS8_VF                        0x40000000 /* Virtualization Fault */
+#define MAS8_TLPID             0x000000ff
+
 /* Bit definitions for MMUCFG */
 #define MMUCFG_MAVN    0x00000003      /* MMU Architecture Version Number */
 #define MMUCFG_MAVN_V1 0x00000000      /* v1.0 */
index f1393252bbdad837c97b794c8534328a9c912ee3..2958c5b97b2dd4100ac5907129b4736d94458cf7 100644 (file)
@@ -16,9 +16,6 @@ typedef int           __kernel_ssize_t;
 typedef long           __kernel_ptrdiff_t;
 #define __kernel_size_t __kernel_size_t
 
-typedef unsigned short __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef short          __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 #endif
index 55e85631c42e3be5ab150ce48e2d3dbf767e9c6a..413a5eaef56c94340d16451a0f65e56453c5b38d 100644 (file)
@@ -240,6 +240,9 @@ struct thread_struct {
 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
        void*           kvm_shadow_vcpu; /* KVM internal data */
 #endif /* CONFIG_KVM_BOOK3S_32_HANDLER */
+#if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
+       struct kvm_vcpu *kvm_vcpu;
+#endif
 #ifdef CONFIG_PPC64
        unsigned long   dscr;
        int             dscr_inherit;
index 9d7f0fb690285bd0bb183e375080518e695792cb..f0cb7f461b9d5470c2cd61e7428d44e9f10f4238 100644 (file)
 #define   LPCR_LPES_SH 2
 #define   LPCR_RMI     0x00000002      /* real mode is cache inhibit */
 #define   LPCR_HDICE   0x00000001      /* Hyp Decr enable (HV,PR,EE) */
+#ifndef SPRN_LPID
 #define SPRN_LPID      0x13F   /* Logical Partition Identifier */
+#endif
 #define   LPID_RSVD    0x3ff           /* Reserved LPID for partn switching */
 #define        SPRN_HMER       0x150   /* Hardware m? error recovery */
 #define        SPRN_HMEER      0x151   /* Hardware m? enable error recovery */
index 8a97aa7289d36b155a1e01df4211a261a7a110e8..2d916c4982c5136a71b10f917606757aa743792f 100644 (file)
 #define SPRN_SPRG7W    0x117   /* Special Purpose Register General 7 Write */
 #define SPRN_EPCR      0x133   /* Embedded Processor Control Register */
 #define SPRN_DBCR2     0x136   /* Debug Control Register 2 */
+#define SPRN_MSRP      0x137   /* MSR Protect Register */
 #define SPRN_IAC3      0x13A   /* Instruction Address Compare 3 */
 #define SPRN_IAC4      0x13B   /* Instruction Address Compare 4 */
 #define SPRN_DVC1      0x13E   /* Data Value Compare Register 1 */
 #define SPRN_DVC2      0x13F   /* Data Value Compare Register 2 */
+#define SPRN_LPID      0x152   /* Logical Partition ID */
 #define SPRN_MAS8      0x155   /* MMU Assist Register 8 */
 #define SPRN_TLB0PS    0x158   /* TLB 0 Page Size Register */
 #define SPRN_TLB1PS    0x159   /* TLB 1 Page Size Register */
 #define SPRN_MAS5_MAS6 0x15c   /* MMU Assist Register 5 || 6 */
 #define SPRN_MAS8_MAS1 0x15d   /* MMU Assist Register 8 || 1 */
 #define SPRN_EPTCFG    0x15e   /* Embedded Page Table Config */
+#define SPRN_GSPRG0    0x170   /* Guest SPRG0 */
+#define SPRN_GSPRG1    0x171   /* Guest SPRG1 */
+#define SPRN_GSPRG2    0x172   /* Guest SPRG2 */
+#define SPRN_GSPRG3    0x173   /* Guest SPRG3 */
 #define SPRN_MAS7_MAS3 0x174   /* MMU Assist Register 7 || 3 */
 #define SPRN_MAS0_MAS1 0x175   /* MMU Assist Register 0 || 1 */
+#define SPRN_GSRR0     0x17A   /* Guest SRR0 */
+#define SPRN_GSRR1     0x17B   /* Guest SRR1 */
+#define SPRN_GEPR      0x17C   /* Guest EPR */
+#define SPRN_GDEAR     0x17D   /* Guest DEAR */
+#define SPRN_GPIR      0x17E   /* Guest PIR */
+#define SPRN_GESR      0x17F   /* Guest Exception Syndrome Register */
 #define SPRN_IVOR0     0x190   /* Interrupt Vector Offset Register 0 */
 #define SPRN_IVOR1     0x191   /* Interrupt Vector Offset Register 1 */
 #define SPRN_IVOR2     0x192   /* Interrupt Vector Offset Register 2 */
 #define SPRN_IVOR39    0x1B1   /* Interrupt Vector Offset Register 39 */
 #define SPRN_IVOR40    0x1B2   /* Interrupt Vector Offset Register 40 */
 #define SPRN_IVOR41    0x1B3   /* Interrupt Vector Offset Register 41 */
+#define SPRN_GIVOR2    0x1B8   /* Guest IVOR2 */
+#define SPRN_GIVOR3    0x1B9   /* Guest IVOR3 */
+#define SPRN_GIVOR4    0x1BA   /* Guest IVOR4 */
+#define SPRN_GIVOR8    0x1BB   /* Guest IVOR8 */
+#define SPRN_GIVOR13   0x1BC   /* Guest IVOR13 */
+#define SPRN_GIVOR14   0x1BD   /* Guest IVOR14 */
+#define SPRN_GIVPR     0x1BF   /* Guest IVPR */
 #define SPRN_SPEFSCR   0x200   /* SPE & Embedded FP Status & Control */
 #define SPRN_BBEAR     0x201   /* Branch Buffer Entry Address Register */
 #define SPRN_BBTAR     0x202   /* Branch Buffer Target Address Register */
 #define MCSR_LDG       0x00002000UL /* Guarded Load */
 #define MCSR_TLBSYNC   0x00000002UL /* Multiple tlbsyncs detected */
 #define MCSR_BSL2_ERR  0x00000001UL /* Backside L2 cache error */
+
+#define MSRP_UCLEP     0x04000000 /* Protect MSR[UCLE] */
+#define MSRP_DEP       0x00000200 /* Protect MSR[DE] */
+#define MSRP_PMMP      0x00000004 /* Protect MSR[PMM] */
 #endif
 
 #ifdef CONFIG_E200
 #define SPRN_EPCR_DMIUH                0x00400000      /* Disable MAS Interrupt updates
                                                 * for hypervisor */
 
+/* Bit definitions for EPLC/EPSC */
+#define EPC_EPR                0x80000000 /* 1 = user, 0 = kernel */
+#define EPC_EPR_SHIFT  31
+#define EPC_EAS                0x40000000 /* Address Space */
+#define EPC_EAS_SHIFT  30
+#define EPC_EGS                0x20000000 /* 1 = guest, 0 = hypervisor */
+#define EPC_EGS_SHIFT  29
+#define EPC_ELPID      0x00ff0000
+#define EPC_ELPID_SHIFT        16
+#define EPC_EPID       0x00003fff
+#define EPC_EPID_SHIFT 0
 
 /*
  * The IBM-403 is an even more odd special case, as it is much
index e4edc510b530cfed6420e96012a69dc424a34e79..84880b80cc1ce7811924ec18e68bbd75431371f6 100644 (file)
@@ -30,11 +30,11 @@ struct stat {
        unsigned long   st_dev;
        ino_t           st_ino;
 #ifdef __powerpc64__
-       nlink_t         st_nlink;
+       unsigned long   st_nlink;
        mode_t          st_mode;
 #else
        mode_t          st_mode;
-       nlink_t         st_nlink;
+       unsigned short  st_nlink;
 #endif
        uid_t           st_uid;
        gid_t           st_gid;
index 1a6320290d2616d971c2c09ca6da2281e5b766fb..200d763a0a6708b16674eaa92df29a3fcf57fce9 100644 (file)
@@ -17,6 +17,7 @@ extern struct task_struct *_switch(struct thread_struct *prev,
                                   struct thread_struct *next);
 
 extern void giveup_fpu(struct task_struct *);
+extern void load_up_fpu(void);
 extern void disable_kernel_fp(void);
 extern void enable_kernel_fp(void);
 extern void flush_fp_to_thread(struct task_struct *);
index a556ccc16b58d4560b004c1ee797f8e92c7c6506..68831e9cf82f01ddf8f12164962d8db438b93af3 100644 (file)
@@ -140,7 +140,23 @@ static inline void set_restore_sigmask(void)
 {
        struct thread_info *ti = current_thread_info();
        ti->local_flags |= _TLF_RESTORE_SIGMASK;
-       set_bit(TIF_SIGPENDING, &ti->flags);
+       WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
+}
+static inline void clear_restore_sigmask(void)
+{
+       current_thread_info()->local_flags &= ~_TLF_RESTORE_SIGMASK;
+}
+static inline bool test_restore_sigmask(void)
+{
+       return current_thread_info()->local_flags & _TLF_RESTORE_SIGMASK;
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+       struct thread_info *ti = current_thread_info();
+       if (!(ti->local_flags & _TLF_RESTORE_SIGMASK))
+               return false;
+       ti->local_flags &= ~_TLF_RESTORE_SIGMASK;
+       return true;
 }
 
 static inline bool test_thread_local_flags(unsigned int flags)
index 2136f58a54e80a32fd014dd66b84196b9f0bc1df..3b4b4a8da922fc4801d51bae86fc8e2b8e810108 100644 (file)
@@ -23,6 +23,7 @@
 extern unsigned long tb_ticks_per_jiffy;
 extern unsigned long tb_ticks_per_usec;
 extern unsigned long tb_ticks_per_sec;
+extern struct clock_event_device decrementer_clockevent;
 
 struct rtc_time;
 extern void to_tm(int tim, struct rtc_time * tm);
index bd0fb8495154d1684f51aa9bce6654ca6a763a99..17bb40cad5bfbaade0efaf0211baafeb49351e04 100644 (file)
@@ -40,6 +40,8 @@
 
 #define segment_eq(a, b)       ((a).seg == (b).seg)
 
+#define user_addr_max()        (get_fs().seg)
+
 #ifdef __powerpc64__
 /*
  * This check is sufficient because there is a large enough
@@ -453,42 +455,9 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size)
        return size;
 }
 
-extern int __strncpy_from_user(char *dst, const char __user *src, long count);
-
-static inline long strncpy_from_user(char *dst, const char __user *src,
-               long count)
-{
-       might_sleep();
-       if (likely(access_ok(VERIFY_READ, src, 1)))
-               return __strncpy_from_user(dst, src, count);
-       return -EFAULT;
-}
-
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 for error
- */
-extern int __strnlen_user(const char __user *str, long len, unsigned long top);
-
-/*
- * Returns the length of the string at str (including the null byte),
- * or 0 if we hit a page we can't access,
- * or something > len if we didn't find a null byte.
- *
- * The `top' parameter to __strnlen_user is to make sure that
- * we can never overflow from the user area into kernel space.
- */
-static inline int strnlen_user(const char __user *str, long len)
-{
-       unsigned long top = current->thread.fs.seg;
-
-       if ((unsigned long)str > top)
-               return 0;
-       return __strnlen_user(str, len, top);
-}
-
-#define strlen_user(str)       strnlen_user((str), 0x7ffffffe)
+extern long strncpy_from_user(char *dst, const char __user *src, long count);
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
 
 #endif  /* __ASSEMBLY__ */
 #endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
new file mode 100644 (file)
index 0000000..d0b6d4a
--- /dev/null
@@ -0,0 +1,41 @@
+#ifndef _ASM_WORD_AT_A_TIME_H
+#define _ASM_WORD_AT_A_TIME_H
+
+/*
+ * Word-at-a-time interfaces for PowerPC.
+ */
+
+#include <linux/kernel.h>
+#include <asm/asm-compat.h>
+
+struct word_at_a_time {
+       const unsigned long high_bits, low_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0xfe) + 1, REPEAT_BYTE(0x7f) }
+
+/* Bit set in the bytes that have a zero */
+static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c)
+{
+       unsigned long mask = (val & c->low_bits) + c->low_bits;
+       return ~(mask | rhs);
+}
+
+#define create_zero_mask(mask) (mask)
+
+static inline long find_zero(unsigned long mask)
+{
+       long leading_zero_bits;
+
+       asm (PPC_CNTLZL "%0,%1" : "=r" (leading_zero_bits) : "r" (mask));
+       return leading_zero_bits >> 3;
+}
+
+static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+{
+       unsigned long rhs = val | c->low_bits;
+       *data = rhs;
+       return (val + c->high_bits) & ~rhs;
+}
+
+#endif /* _ASM_WORD_AT_A_TIME_H */
index 4554dc2fe857262af77c26fa500bd669514460ee..52c7ad78242ebd95c16d950424552d6a4741018e 100644 (file)
@@ -116,6 +116,9 @@ int main(void)
 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
        DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu));
 #endif
+#ifdef CONFIG_KVM_BOOKE_HV
+       DEFINE(THREAD_KVM_VCPU, offsetof(struct thread_struct, kvm_vcpu));
+#endif
 
        DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
        DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
@@ -383,6 +386,7 @@ int main(void)
 #ifdef CONFIG_KVM
        DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
        DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
+       DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid));
        DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
        DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave));
        DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr));
@@ -425,9 +429,11 @@ int main(void)
        DEFINE(VCPU_SHARED_MAS4, offsetof(struct kvm_vcpu_arch_shared, mas4));
        DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6));
 
+       DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
+       DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
+
        /* book3s */
 #ifdef CONFIG_KVM_BOOK3S_64_HV
-       DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
        DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1));
        DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
        DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
@@ -440,9 +446,9 @@ int main(void)
        DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v));
        DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr));
        DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
+       DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
 #endif
 #ifdef CONFIG_PPC_BOOK3S
-       DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
        DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
        DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr));
        DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr));
@@ -457,7 +463,6 @@ int main(void)
        DEFINE(VCPU_PENDING_EXC, offsetof(struct kvm_vcpu, arch.pending_exceptions));
        DEFINE(VCPU_CEDED, offsetof(struct kvm_vcpu, arch.ceded));
        DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
-       DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa));
        DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
        DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
        DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
@@ -533,6 +538,8 @@ int main(void)
        HSTATE_FIELD(HSTATE_NAPPING, napping);
 
 #ifdef CONFIG_KVM_BOOK3S_64_HV
+       HSTATE_FIELD(HSTATE_HWTHREAD_REQ, hwthread_req);
+       HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state);
        HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
        HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
        HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
@@ -593,6 +600,12 @@ int main(void)
        DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr));
 #endif
 
+#ifdef CONFIG_KVM_BOOKE_HV
+       DEFINE(VCPU_HOST_MAS4, offsetof(struct kvm_vcpu, arch.host_mas4));
+       DEFINE(VCPU_HOST_MAS6, offsetof(struct kvm_vcpu, arch.host_mas6));
+       DEFINE(VCPU_EPLC, offsetof(struct kvm_vcpu, arch.eplc));
+#endif
+
 #ifdef CONFIG_KVM_EXIT_TIMING
        DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
                                                arch.timing_exit.tv32.tbu));
index 8053db02b85e276c91b1c52b82d0b80fda0b52c1..69fdd2322a6676c27e5e67fad4c5eefff3f9e1b5 100644 (file)
@@ -73,6 +73,7 @@ _GLOBAL(__setup_cpu_e500v2)
        mtlr    r4
        blr
 _GLOBAL(__setup_cpu_e500mc)
+       mr      r5, r4
        mflr    r4
        bl      __e500_icache_setup
        bl      __e500_dcache_setup
index f7bed44ee165594abd3c37bf3387dec2c29d9dff..1c06d297154532473527de1ee7a26081714f4fa0 100644 (file)
@@ -63,11 +63,13 @@ BEGIN_FTR_SECTION
        GET_PACA(r13)
 
 #ifdef CONFIG_KVM_BOOK3S_64_HV
-       lbz     r0,PACAPROCSTART(r13)
-       cmpwi   r0,0x80
-       bne     1f
-       li      r0,1
-       stb     r0,PACAPROCSTART(r13)
+       li      r0,KVM_HWTHREAD_IN_KERNEL
+       stb     r0,HSTATE_HWTHREAD_STATE(r13)
+       /* Order setting hwthread_state vs. testing hwthread_req */
+       sync
+       lbz     r0,HSTATE_HWTHREAD_REQ(r13)
+       cmpwi   r0,0
+       beq     1f
        b       kvm_start_guest
 1:
 #endif
index 22d608e8bb7d258cdda03a64aba858addd7c81fd..7a2e5e421abfd2f6da2331844471c9eedc3b7847 100644 (file)
@@ -248,10 +248,11 @@ _ENTRY(_start);
 
 interrupt_base:
        /* Critical Input Interrupt */
-       CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
+       CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception)
 
        /* Machine Check Interrupt */
-       CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
+       CRITICAL_EXCEPTION(0x0200, MACHINE_CHECK, MachineCheck, \
+                          machine_check_exception)
        MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception)
 
        /* Data Storage Interrupt */
@@ -261,7 +262,8 @@ interrupt_base:
        INSTRUCTION_STORAGE_EXCEPTION
 
        /* External Input Interrupt */
-       EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
+       EXCEPTION(0x0500, BOOKE_INTERRUPT_EXTERNAL, ExternalInput, \
+                 do_IRQ, EXC_XFER_LITE)
 
        /* Alignment Interrupt */
        ALIGNMENT_EXCEPTION
@@ -273,29 +275,32 @@ interrupt_base:
 #ifdef CONFIG_PPC_FPU
        FP_UNAVAILABLE_EXCEPTION
 #else
-       EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2010, BOOKE_INTERRUPT_FP_UNAVAIL, \
+                 FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
 #endif
        /* System Call Interrupt */
        START_EXCEPTION(SystemCall)
-       NORMAL_EXCEPTION_PROLOG
+       NORMAL_EXCEPTION_PROLOG(BOOKE_INTERRUPT_SYSCALL)
        EXC_XFER_EE_LITE(0x0c00, DoSyscall)
 
        /* Auxiliary Processor Unavailable Interrupt */
-       EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2020, BOOKE_INTERRUPT_AP_UNAVAIL, \
+                 AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
 
        /* Decrementer Interrupt */
        DECREMENTER_EXCEPTION
 
        /* Fixed Internal Timer Interrupt */
        /* TODO: Add FIT support */
-       EXCEPTION(0x1010, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x1010, BOOKE_INTERRUPT_FIT, FixedIntervalTimer, \
+                 unknown_exception, EXC_XFER_EE)
 
        /* Watchdog Timer Interrupt */
        /* TODO: Add watchdog support */
 #ifdef CONFIG_BOOKE_WDT
-       CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException)
+       CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, WatchdogException)
 #else
-       CRITICAL_EXCEPTION(0x1020, WatchdogTimer, unknown_exception)
+       CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, unknown_exception)
 #endif
 
        /* Data TLB Error Interrupt */
index 0e4175388f478af5456d7095a860a2d621e3ad3c..5f051eeb93a278cb33c823c722582fb4652566f5 100644 (file)
@@ -2,6 +2,9 @@
 #define __HEAD_BOOKE_H__
 
 #include <asm/ptrace.h>        /* for STACK_FRAME_REGS_MARKER */
+#include <asm/kvm_asm.h>
+#include <asm/kvm_booke_hv_asm.h>
+
 /*
  * Macros used for common Book-e exception handling
  */
  */
 #define THREAD_NORMSAVE(offset)        (THREAD_NORMSAVES + (offset * 4))
 
-#define NORMAL_EXCEPTION_PROLOG                                                     \
+#define NORMAL_EXCEPTION_PROLOG(intno)                                              \
        mtspr   SPRN_SPRG_WSCRATCH0, r10;       /* save one register */      \
        mfspr   r10, SPRN_SPRG_THREAD;                                       \
        stw     r11, THREAD_NORMSAVE(0)(r10);                                \
        stw     r13, THREAD_NORMSAVE(2)(r10);                                \
        mfcr    r13;                    /* save CR in r13 for now          */\
-       mfspr   r11,SPRN_SRR1;          /* check whether user or kernel    */\
-       andi.   r11,r11,MSR_PR;                                              \
+       mfspr   r11, SPRN_SRR1;                                              \
+       DO_KVM  BOOKE_INTERRUPT_##intno SPRN_SRR1;                           \
+       andi.   r11, r11, MSR_PR;       /* check whether user or kernel    */\
        mr      r11, r1;                                                     \
        beq     1f;                                                          \
        /* if from user, start at top of this thread's kernel stack */       \
  * registers as the normal prolog above. Instead we use a portion of the
  * critical/machine check exception stack at low physical addresses.
  */
-#define EXC_LEVEL_EXCEPTION_PROLOG(exc_level, exc_level_srr0, exc_level_srr1) \
+#define EXC_LEVEL_EXCEPTION_PROLOG(exc_level, intno, exc_level_srr0, exc_level_srr1) \
        mtspr   SPRN_SPRG_WSCRATCH_##exc_level,r8;                           \
        BOOKE_LOAD_EXC_LEVEL_STACK(exc_level);/* r8 points to the exc_level stack*/ \
        stw     r9,GPR9(r8);            /* save various registers          */\
        stw     r10,GPR10(r8);                                               \
        stw     r11,GPR11(r8);                                               \
        stw     r9,_CCR(r8);            /* save CR on stack                */\
-       mfspr   r10,exc_level_srr1;     /* check whether user or kernel    */\
-       andi.   r10,r10,MSR_PR;                                              \
+       mfspr   r11,exc_level_srr1;     /* check whether user or kernel    */\
+       DO_KVM  BOOKE_INTERRUPT_##intno exc_level_srr1;                      \
+       andi.   r11,r11,MSR_PR;                                              \
        mfspr   r11,SPRN_SPRG_THREAD;   /* if from user, start at top of   */\
        lwz     r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
        addi    r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame    */\
        SAVE_4GPRS(3, r11);                                                  \
        SAVE_2GPRS(7, r11)
 
-#define CRITICAL_EXCEPTION_PROLOG \
-               EXC_LEVEL_EXCEPTION_PROLOG(CRIT, SPRN_CSRR0, SPRN_CSRR1)
+#define CRITICAL_EXCEPTION_PROLOG(intno) \
+               EXC_LEVEL_EXCEPTION_PROLOG(CRIT, intno, SPRN_CSRR0, SPRN_CSRR1)
 #define DEBUG_EXCEPTION_PROLOG \
-               EXC_LEVEL_EXCEPTION_PROLOG(DBG, SPRN_DSRR0, SPRN_DSRR1)
+               EXC_LEVEL_EXCEPTION_PROLOG(DBG, DEBUG, SPRN_DSRR0, SPRN_DSRR1)
 #define MCHECK_EXCEPTION_PROLOG \
-               EXC_LEVEL_EXCEPTION_PROLOG(MC, SPRN_MCSRR0, SPRN_MCSRR1)
+               EXC_LEVEL_EXCEPTION_PROLOG(MC, MACHINE_CHECK, \
+                       SPRN_MCSRR0, SPRN_MCSRR1)
+
+/*
+ * Guest Doorbell -- this is a bit odd in that uses GSRR0/1 despite
+ * being delivered to the host.  This exception can only happen
+ * inside a KVM guest -- so we just handle up to the DO_KVM rather
+ * than try to fit this into one of the existing prolog macros.
+ */
+#define GUEST_DOORBELL_EXCEPTION \
+       START_EXCEPTION(GuestDoorbell);                                      \
+       mtspr   SPRN_SPRG_WSCRATCH0, r10;       /* save one register */      \
+       mfspr   r10, SPRN_SPRG_THREAD;                                       \
+       stw     r11, THREAD_NORMSAVE(0)(r10);                                \
+       mfspr   r11, SPRN_SRR1;                                              \
+       stw     r13, THREAD_NORMSAVE(2)(r10);                                \
+       mfcr    r13;                    /* save CR in r13 for now          */\
+       DO_KVM  BOOKE_INTERRUPT_GUEST_DBELL SPRN_GSRR1;                      \
+       trap
 
 /*
  * Exception vectors.
@@ -181,16 +204,16 @@ label:
        .long   func;                                           \
        .long   ret_from_except_full
 
-#define EXCEPTION(n, label, hdlr, xfer)                                \
+#define EXCEPTION(n, intno, label, hdlr, xfer)                 \
        START_EXCEPTION(label);                                 \
-       NORMAL_EXCEPTION_PROLOG;                                \
+       NORMAL_EXCEPTION_PROLOG(intno);                         \
        addi    r3,r1,STACK_FRAME_OVERHEAD;                     \
        xfer(n, hdlr)
 
-#define CRITICAL_EXCEPTION(n, label, hdlr)                     \
-       START_EXCEPTION(label);                                 \
-       CRITICAL_EXCEPTION_PROLOG;                              \
-       addi    r3,r1,STACK_FRAME_OVERHEAD;                     \
+#define CRITICAL_EXCEPTION(n, intno, label, hdlr)                      \
+       START_EXCEPTION(label);                                         \
+       CRITICAL_EXCEPTION_PROLOG(intno);                               \
+       addi    r3,r1,STACK_FRAME_OVERHEAD;                             \
        EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
                          NOCOPY, crit_transfer_to_handler, \
                          ret_from_crit_exc)
@@ -302,7 +325,7 @@ label:
 
 #define DEBUG_CRIT_EXCEPTION                                                 \
        START_EXCEPTION(DebugCrit);                                           \
-       CRITICAL_EXCEPTION_PROLOG;                                            \
+       CRITICAL_EXCEPTION_PROLOG(DEBUG);                                     \
                                                                              \
        /*                                                                    \
         * If there is a single step or branch-taken exception in an          \
@@ -355,7 +378,7 @@ label:
 
 #define DATA_STORAGE_EXCEPTION                                               \
        START_EXCEPTION(DataStorage)                                          \
-       NORMAL_EXCEPTION_PROLOG;                                              \
+       NORMAL_EXCEPTION_PROLOG(DATA_STORAGE);                \
        mfspr   r5,SPRN_ESR;            /* Grab the ESR and save it */        \
        stw     r5,_ESR(r11);                                                 \
        mfspr   r4,SPRN_DEAR;           /* Grab the DEAR */                   \
@@ -363,7 +386,7 @@ label:
 
 #define INSTRUCTION_STORAGE_EXCEPTION                                        \
        START_EXCEPTION(InstructionStorage)                                   \
-       NORMAL_EXCEPTION_PROLOG;                                              \
+       NORMAL_EXCEPTION_PROLOG(INST_STORAGE);                \
        mfspr   r5,SPRN_ESR;            /* Grab the ESR and save it */        \
        stw     r5,_ESR(r11);                                                 \
        mr      r4,r12;                 /* Pass SRR0 as arg2 */               \
@@ -372,7 +395,7 @@ label:
 
 #define ALIGNMENT_EXCEPTION                                                  \
        START_EXCEPTION(Alignment)                                            \
-       NORMAL_EXCEPTION_PROLOG;                                              \
+       NORMAL_EXCEPTION_PROLOG(ALIGNMENT);                   \
        mfspr   r4,SPRN_DEAR;           /* Grab the DEAR and save it */       \
        stw     r4,_DEAR(r11);                                                \
        addi    r3,r1,STACK_FRAME_OVERHEAD;                                   \
@@ -380,7 +403,7 @@ label:
 
 #define PROGRAM_EXCEPTION                                                    \
        START_EXCEPTION(Program)                                              \
-       NORMAL_EXCEPTION_PROLOG;                                              \
+       NORMAL_EXCEPTION_PROLOG(PROGRAM);                     \
        mfspr   r4,SPRN_ESR;            /* Grab the ESR and save it */        \
        stw     r4,_ESR(r11);                                                 \
        addi    r3,r1,STACK_FRAME_OVERHEAD;                                   \
@@ -388,7 +411,7 @@ label:
 
 #define DECREMENTER_EXCEPTION                                                \
        START_EXCEPTION(Decrementer)                                          \
-       NORMAL_EXCEPTION_PROLOG;                                              \
+       NORMAL_EXCEPTION_PROLOG(DECREMENTER);                 \
        lis     r0,TSR_DIS@h;           /* Setup the DEC interrupt mask */    \
        mtspr   SPRN_TSR,r0;            /* Clear the DEC interrupt */         \
        addi    r3,r1,STACK_FRAME_OVERHEAD;                                   \
@@ -396,7 +419,7 @@ label:
 
 #define FP_UNAVAILABLE_EXCEPTION                                             \
        START_EXCEPTION(FloatingPointUnavailable)                             \
-       NORMAL_EXCEPTION_PROLOG;                                              \
+       NORMAL_EXCEPTION_PROLOG(FP_UNAVAIL);                  \
        beq     1f;                                                           \
        bl      load_up_fpu;            /* if from user, just load it up */   \
        b       fast_exception_return;                                        \
index de80e0f9a2bded4d802bdaf9c4f71a2e29add915..1f4434a3860885bc9fa33c821359731be7b7398d 100644 (file)
@@ -301,19 +301,20 @@ _ENTRY(__early_start)
 
 interrupt_base:
        /* Critical Input Interrupt */
-       CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
+       CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception)
 
        /* Machine Check Interrupt */
 #ifdef CONFIG_E200
        /* no RFMCI, MCSRRs on E200 */
-       CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
+       CRITICAL_EXCEPTION(0x0200, MACHINE_CHECK, MachineCheck, \
+                          machine_check_exception)
 #else
        MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
 #endif
 
        /* Data Storage Interrupt */
        START_EXCEPTION(DataStorage)
-       NORMAL_EXCEPTION_PROLOG
+       NORMAL_EXCEPTION_PROLOG(DATA_STORAGE)
        mfspr   r5,SPRN_ESR             /* Grab the ESR, save it, pass arg3 */
        stw     r5,_ESR(r11)
        mfspr   r4,SPRN_DEAR            /* Grab the DEAR, save it, pass arg2 */
@@ -328,7 +329,7 @@ interrupt_base:
        INSTRUCTION_STORAGE_EXCEPTION
 
        /* External Input Interrupt */
-       EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
+       EXCEPTION(0x0500, EXTERNAL, ExternalInput, do_IRQ, EXC_XFER_LITE)
 
        /* Alignment Interrupt */
        ALIGNMENT_EXCEPTION
@@ -342,32 +343,36 @@ interrupt_base:
 #else
 #ifdef CONFIG_E200
        /* E200 treats 'normal' floating point instructions as FP Unavail exception */
-       EXCEPTION(0x0800, FloatingPointUnavailable, program_check_exception, EXC_XFER_EE)
+       EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, \
+                 program_check_exception, EXC_XFER_EE)
 #else
-       EXCEPTION(0x0800, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, \
+                 unknown_exception, EXC_XFER_EE)
 #endif
 #endif
 
        /* System Call Interrupt */
        START_EXCEPTION(SystemCall)
-       NORMAL_EXCEPTION_PROLOG
+       NORMAL_EXCEPTION_PROLOG(SYSCALL)
        EXC_XFER_EE_LITE(0x0c00, DoSyscall)
 
        /* Auxiliary Processor Unavailable Interrupt */
-       EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2900, AP_UNAVAIL, AuxillaryProcessorUnavailable, \
+                 unknown_exception, EXC_XFER_EE)
 
        /* Decrementer Interrupt */
        DECREMENTER_EXCEPTION
 
        /* Fixed Internal Timer Interrupt */
        /* TODO: Add FIT support */
-       EXCEPTION(0x3100, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x3100, FIT, FixedIntervalTimer, \
+                 unknown_exception, EXC_XFER_EE)
 
        /* Watchdog Timer Interrupt */
 #ifdef CONFIG_BOOKE_WDT
-       CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException)
+       CRITICAL_EXCEPTION(0x3200, WATCHDOG, WatchdogTimer, WatchdogException)
 #else
-       CRITICAL_EXCEPTION(0x3200, WatchdogTimer, unknown_exception)
+       CRITICAL_EXCEPTION(0x3200, WATCHDOG, WatchdogTimer, unknown_exception)
 #endif
 
        /* Data TLB Error Interrupt */
@@ -375,10 +380,16 @@ interrupt_base:
        mtspr   SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
        mfspr   r10, SPRN_SPRG_THREAD
        stw     r11, THREAD_NORMSAVE(0)(r10)
+#ifdef CONFIG_KVM_BOOKE_HV
+BEGIN_FTR_SECTION
+       mfspr   r11, SPRN_SRR1
+END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
+#endif
        stw     r12, THREAD_NORMSAVE(1)(r10)
        stw     r13, THREAD_NORMSAVE(2)(r10)
        mfcr    r13
        stw     r13, THREAD_NORMSAVE(3)(r10)
+       DO_KVM  BOOKE_INTERRUPT_DTLB_MISS SPRN_SRR1
        mfspr   r10, SPRN_DEAR          /* Get faulting address */
 
        /* If we are faulting a kernel address, we have to use the
@@ -463,10 +474,16 @@ interrupt_base:
        mtspr   SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
        mfspr   r10, SPRN_SPRG_THREAD
        stw     r11, THREAD_NORMSAVE(0)(r10)
+#ifdef CONFIG_KVM_BOOKE_HV
+BEGIN_FTR_SECTION
+       mfspr   r11, SPRN_SRR1
+END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
+#endif
        stw     r12, THREAD_NORMSAVE(1)(r10)
        stw     r13, THREAD_NORMSAVE(2)(r10)
        mfcr    r13
        stw     r13, THREAD_NORMSAVE(3)(r10)
+       DO_KVM  BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR1
        mfspr   r10, SPRN_SRR0          /* Get faulting address */
 
        /* If we are faulting a kernel address, we have to use the
@@ -538,36 +555,54 @@ interrupt_base:
 #ifdef CONFIG_SPE
        /* SPE Unavailable */
        START_EXCEPTION(SPEUnavailable)
-       NORMAL_EXCEPTION_PROLOG
+       NORMAL_EXCEPTION_PROLOG(SPE_UNAVAIL)
        bne     load_up_spe
        addi    r3,r1,STACK_FRAME_OVERHEAD
        EXC_XFER_EE_LITE(0x2010, KernelSPE)
 #else
-       EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, \
+                 unknown_exception, EXC_XFER_EE)
 #endif /* CONFIG_SPE */
 
        /* SPE Floating Point Data */
 #ifdef CONFIG_SPE
-       EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE);
+       EXCEPTION(0x2030, SPE_FP_DATA, SPEFloatingPointData, \
+                 SPEFloatingPointException, EXC_XFER_EE);
 
        /* SPE Floating Point Round */
-       EXCEPTION(0x2050, SPEFloatingPointRound, SPEFloatingPointRoundException, EXC_XFER_EE)
+       EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
+                 SPEFloatingPointRoundException, EXC_XFER_EE)
 #else
-       EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE)
-       EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2040, SPE_FP_DATA, SPEFloatingPointData, \
+                 unknown_exception, EXC_XFER_EE)
+       EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
+                 unknown_exception, EXC_XFER_EE)
 #endif /* CONFIG_SPE */
 
        /* Performance Monitor */
-       EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD)
+       EXCEPTION(0x2060, PERFORMANCE_MONITOR, PerformanceMonitor, \
+                 performance_monitor_exception, EXC_XFER_STD)
 
-       EXCEPTION(0x2070, Doorbell, doorbell_exception, EXC_XFER_STD)
+       EXCEPTION(0x2070, DOORBELL, Doorbell, doorbell_exception, EXC_XFER_STD)
 
-       CRITICAL_EXCEPTION(0x2080, CriticalDoorbell, unknown_exception)
+       CRITICAL_EXCEPTION(0x2080, DOORBELL_CRITICAL, \
+                          CriticalDoorbell, unknown_exception)
 
        /* Debug Interrupt */
        DEBUG_DEBUG_EXCEPTION
        DEBUG_CRIT_EXCEPTION
 
+       GUEST_DOORBELL_EXCEPTION
+
+       CRITICAL_EXCEPTION(0, GUEST_DBELL_CRIT, CriticalGuestDoorbell, \
+                          unknown_exception)
+
+       /* Hypercall */
+       EXCEPTION(0, HV_SYSCALL, Hypercall, unknown_exception, EXC_XFER_EE)
+
+       /* Embedded Hypervisor Privilege */
+       EXCEPTION(0, HV_PRIV, Ehvpriv, unknown_exception, EXC_XFER_EE)
+
 /*
  * Local functions
  */
@@ -871,8 +906,31 @@ _GLOBAL(__setup_e500mc_ivors)
        mtspr   SPRN_IVOR36,r3
        li      r3,CriticalDoorbell@l
        mtspr   SPRN_IVOR37,r3
+
+       /*
+        * We only want to touch IVOR38-41 if we're running on hardware
+        * that supports category E.HV.  The architectural way to determine
+        * this is MMUCFG[LPIDSIZE].
+        */
+       mfspr   r3, SPRN_MMUCFG
+       andis.  r3, r3, MMUCFG_LPIDSIZE@h
+       beq     no_hv
+       li      r3,GuestDoorbell@l
+       mtspr   SPRN_IVOR38,r3
+       li      r3,CriticalGuestDoorbell@l
+       mtspr   SPRN_IVOR39,r3
+       li      r3,Hypercall@l
+       mtspr   SPRN_IVOR40,r3
+       li      r3,Ehvpriv@l
+       mtspr   SPRN_IVOR41,r3
+skip_hv_ivors:
        sync
        blr
+no_hv:
+       lwz     r3, CPU_SPEC_FEATURES(r5)
+       rlwinm  r3, r3, 0, ~CPU_FTR_EMB_HV
+       stw     r3, CPU_SPEC_FEATURES(r5)
+       b       skip_hv_ivors
 
 #ifdef CONFIG_SPE
 /*
index 0cdc9a3928391264e66f7f772b7a276c3e25c69c..7140d838339e1a9e2ef691ff4079ea022f4d964c 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/ppc-opcode.h>
 #include <asm/hw_irq.h>
+#include <asm/kvm_book3s_asm.h>
 
 #undef DEBUG
 
@@ -81,6 +82,12 @@ _GLOBAL(power7_idle)
        std     r9,_MSR(r1)
        std     r1,PACAR1(r13)
 
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+       /* Tell KVM we're napping */
+       li      r4,KVM_HWTHREAD_IN_NAP
+       stb     r4,HSTATE_HWTHREAD_STATE(r13)
+#endif
+
        /* Magic NAP mode enter sequence */
        std     r0,0(r1)
        ptesync
index 786a2700ec2d541d3544d23635b72b40dc75e64b..3e4031581c65482bb9348b17c9509e630b710570 100644 (file)
@@ -85,8 +85,6 @@ EXPORT_SYMBOL(csum_tcpudp_magic);
 
 EXPORT_SYMBOL(__copy_tofrom_user);
 EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__strncpy_from_user);
-EXPORT_SYMBOL(__strnlen_user);
 EXPORT_SYMBOL(copy_page);
 
 #if defined(CONFIG_PCI) && defined(CONFIG_PPC32)
@@ -190,3 +188,7 @@ EXPORT_SYMBOL(__arch_hweight16);
 EXPORT_SYMBOL(__arch_hweight32);
 EXPORT_SYMBOL(__arch_hweight64);
 #endif
+
+#ifdef CONFIG_PPC_BOOK3S_64
+EXPORT_SYMBOL_GPL(mmu_psize_defs);
+#endif
index 651c5963662b68ed098c04d1dbdc512f06f2fac1..5c023c9cf16ee70a7a3b281af2ad9807028c3b13 100644 (file)
@@ -51,16 +51,6 @@ void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
         return (void __user *)newsp;
 }
 
-
-/*
- * Restore the user process's signal mask
- */
-void restore_sigmask(sigset_t *set)
-{
-       sigdelsetmask(set, ~_BLOCKABLE);
-       set_current_blocked(set);
-}
-
 static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
                                  int has_handler)
 {
@@ -114,30 +104,21 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
 
 static int do_signal(struct pt_regs *regs)
 {
-       sigset_t *oldset;
+       sigset_t *oldset = sigmask_to_save();
        siginfo_t info;
        int signr;
        struct k_sigaction ka;
        int ret;
        int is32 = is_32bit_task();
 
-       if (current_thread_info()->local_flags & _TLF_RESTORE_SIGMASK)
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 
        /* Is there any syscall restart business here ? */
        check_syscall_restart(regs, &ka, signr > 0);
 
        if (signr <= 0) {
-               struct thread_info *ti = current_thread_info();
                /* No signal to deliver -- put the saved sigmask back */
-               if (ti->local_flags & _TLF_RESTORE_SIGMASK) {
-                       ti->local_flags &= ~_TLF_RESTORE_SIGMASK;
-                       sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-               }
+               restore_saved_sigmask();
                regs->trap = 0;
                return 0;               /* no signals delivered */
        }
@@ -167,18 +148,7 @@ static int do_signal(struct pt_regs *regs)
 
        regs->trap = 0;
        if (ret) {
-               block_sigmask(&ka, signr);
-
-               /*
-                * A signal was successfully delivered; the saved sigmask is in
-                * its frame, and we can clear the TLF_RESTORE_SIGMASK flag.
-                */
-               current_thread_info()->local_flags &= ~_TLF_RESTORE_SIGMASK;
-
-               /*
-                * Let tracing know that we've done the handler setup.
-                */
-               tracehook_signal_handler(signr, &info, &ka, regs,
+               signal_delivered(signr, &info, &ka, regs,
                                         test_thread_flag(TIF_SINGLESTEP));
        }
 
@@ -193,8 +163,6 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
 
index 8dde973aaaf513ffd4c39a41f0f3bf2bead0d7db..e00acb4139346074ddfbda1c6553010614a2d78c 100644 (file)
 #ifndef _POWERPC_ARCH_SIGNAL_H
 #define _POWERPC_ARCH_SIGNAL_H
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
 
 extern void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
                                  size_t frame_size, int is_32);
-extern void restore_sigmask(sigset_t *set);
 
 extern int handle_signal32(unsigned long sig, struct k_sigaction *ka,
                           siginfo_t *info, sigset_t *oldset,
index 61f6aff25edc3f94010bda48710d6a1f27e65161..8b4c049aee20e8604fa83a9a017366213d0ab6d9 100644 (file)
@@ -919,7 +919,7 @@ static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int
        if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
                return -EFAULT;
 #endif
-       restore_sigmask(&set);
+       set_current_blocked(&set);
        if (restore_user_regs(regs, mcp, sig))
                return -EFAULT;
 
@@ -1273,7 +1273,7 @@ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
        set.sig[0] = sigctx.oldmask;
        set.sig[1] = sigctx._unused[3];
 #endif
-       restore_sigmask(&set);
+       set_current_blocked(&set);
 
        sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
        addr = sr;
index 2692efdb154e210a5aa787e021ed960cb3bde591..d183f8719a505ce18e4cc8cb06ee4df3e8fa8674 100644 (file)
@@ -335,7 +335,7 @@ int sys_swapcontext(struct ucontext __user *old_ctx,
 
        if (__copy_from_user(&set, &new_ctx->uc_sigmask, sizeof(set)))
                do_exit(SIGSEGV);
-       restore_sigmask(&set);
+       set_current_blocked(&set);
        if (restore_sigcontext(regs, NULL, 0, &new_ctx->uc_mcontext))
                do_exit(SIGSEGV);
 
@@ -364,7 +364,7 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
 
        if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
                goto badframe;
-       restore_sigmask(&set);
+       set_current_blocked(&set);
        if (restore_sigcontext(regs, NULL, 1, &uc->uc_mcontext))
                goto badframe;
 
index 2c42cd72d0f5b1a33eb501da54a8e5edbb08e63e..99a995c2a3f2496da4e2e48e807b7354488ff2c7 100644 (file)
@@ -100,7 +100,7 @@ static int decrementer_set_next_event(unsigned long evt,
 static void decrementer_set_mode(enum clock_event_mode mode,
                                 struct clock_event_device *dev);
 
-static struct clock_event_device decrementer_clockevent = {
+struct clock_event_device decrementer_clockevent = {
        .name           = "decrementer",
        .rating         = 200,
        .irq            = 0,
@@ -108,6 +108,7 @@ static struct clock_event_device decrementer_clockevent = {
        .set_mode       = decrementer_set_mode,
        .features       = CLOCK_EVT_FEAT_ONESHOT,
 };
+EXPORT_SYMBOL(decrementer_clockevent);
 
 DEFINE_PER_CPU(u64, decrementers_next_tb);
 static DEFINE_PER_CPU(struct clock_event_device, decrementers);
index 7b612a76c70129488d24be5ed60bc56c523878f4..50e7dbc7356cd0ed574f3ac9fa61ea509fb26439 100644 (file)
 #include <asm/kvm_ppc.h>
 
 #include "44x_tlb.h"
+#include "booke.h"
 
 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
+       kvmppc_booke_vcpu_load(vcpu, cpu);
        kvmppc_44x_tlb_load(vcpu);
 }
 
 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
 {
        kvmppc_44x_tlb_put(vcpu);
+       kvmppc_booke_vcpu_put(vcpu);
 }
 
 int kvmppc_core_check_processor_compat(void)
@@ -160,6 +163,15 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
        kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
 }
 
+int kvmppc_core_init_vm(struct kvm *kvm)
+{
+       return 0;
+}
+
+void kvmppc_core_destroy_vm(struct kvm *kvm)
+{
+}
+
 static int __init kvmppc_44x_init(void)
 {
        int r;
index 549bb2c9a47a389eb997bc632faf98ef3c2751e3..c8c61578fdfc6e8420497854971d8da7b6d74598 100644 (file)
@@ -37,22 +37,19 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
                            unsigned int inst, int *advance)
 {
        int emulated = EMULATE_DONE;
-       int dcrn;
-       int ra;
-       int rb;
-       int rc;
-       int rs;
-       int rt;
-       int ws;
+       int dcrn = get_dcrn(inst);
+       int ra = get_ra(inst);
+       int rb = get_rb(inst);
+       int rc = get_rc(inst);
+       int rs = get_rs(inst);
+       int rt = get_rt(inst);
+       int ws = get_ws(inst);
 
        switch (get_op(inst)) {
        case 31:
                switch (get_xop(inst)) {
 
                case XOP_MFDCR:
-                       dcrn = get_dcrn(inst);
-                       rt = get_rt(inst);
-
                        /* The guest may access CPR0 registers to determine the timebase
                         * frequency, and it must know the real host frequency because it
                         * can directly access the timebase registers.
@@ -88,9 +85,6 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
                        break;
 
                case XOP_MTDCR:
-                       dcrn = get_dcrn(inst);
-                       rs = get_rs(inst);
-
                        /* emulate some access in kernel */
                        switch (dcrn) {
                        case DCRN_CPR0_CONFIG_ADDR:
@@ -108,17 +102,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
                        break;
 
                case XOP_TLBWE:
-                       ra = get_ra(inst);
-                       rs = get_rs(inst);
-                       ws = get_ws(inst);
                        emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws);
                        break;
 
                case XOP_TLBSX:
-                       rt = get_rt(inst);
-                       ra = get_ra(inst);
-                       rb = get_rb(inst);
-                       rc = get_rc(inst);
                        emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc);
                        break;
 
@@ -141,41 +128,41 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
        return emulated;
 }
 
-int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
+int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
 {
        int emulated = EMULATE_DONE;
 
        switch (sprn) {
        case SPRN_PID:
-               kvmppc_set_pid(vcpu, kvmppc_get_gpr(vcpu, rs)); break;
+               kvmppc_set_pid(vcpu, spr_val); break;
        case SPRN_MMUCR:
-               vcpu->arch.mmucr = kvmppc_get_gpr(vcpu, rs); break;
+               vcpu->arch.mmucr = spr_val; break;
        case SPRN_CCR0:
-               vcpu->arch.ccr0 = kvmppc_get_gpr(vcpu, rs); break;
+               vcpu->arch.ccr0 = spr_val; break;
        case SPRN_CCR1:
-               vcpu->arch.ccr1 = kvmppc_get_gpr(vcpu, rs); break;
+               vcpu->arch.ccr1 = spr_val; break;
        default:
-               emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs);
+               emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
        }
 
        return emulated;
 }
 
-int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
+int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
 {
        int emulated = EMULATE_DONE;
 
        switch (sprn) {
        case SPRN_PID:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.pid); break;
+               *spr_val = vcpu->arch.pid; break;
        case SPRN_MMUCR:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.mmucr); break;
+               *spr_val = vcpu->arch.mmucr; break;
        case SPRN_CCR0:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr0); break;
+               *spr_val = vcpu->arch.ccr0; break;
        case SPRN_CCR1:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr1); break;
+               *spr_val = vcpu->arch.ccr1; break;
        default:
-               emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt);
+               emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
        }
 
        return emulated;
index 8f64709ae3319d3a26ac4701b97b9b5a182f2081..f4dacb9c57fac1a275291aedf1357c3070b6f750 100644 (file)
@@ -90,6 +90,9 @@ config KVM_BOOK3S_64_PR
        depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV
        select KVM_BOOK3S_PR
 
+config KVM_BOOKE_HV
+       bool
+
 config KVM_440
        bool "KVM support for PowerPC 440 processors"
        depends on EXPERIMENTAL && 44x
@@ -106,7 +109,7 @@ config KVM_440
 
 config KVM_EXIT_TIMING
        bool "Detailed exit timing"
-       depends on KVM_440 || KVM_E500
+       depends on KVM_440 || KVM_E500V2 || KVM_E500MC
        ---help---
          Calculate elapsed time for every exit/enter cycle. A per-vcpu
          report is available in debugfs kvm/vm#_vcpu#_timing.
@@ -115,14 +118,29 @@ config KVM_EXIT_TIMING
 
          If unsure, say N.
 
-config KVM_E500
-       bool "KVM support for PowerPC E500 processors"
-       depends on EXPERIMENTAL && E500
+config KVM_E500V2
+       bool "KVM support for PowerPC E500v2 processors"
+       depends on EXPERIMENTAL && E500 && !PPC_E500MC
        select KVM
        select KVM_MMIO
        ---help---
          Support running unmodified E500 guest kernels in virtual machines on
-         E500 host processors.
+         E500v2 host processors.
+
+         This module provides access to the hardware capabilities through
+         a character device node named /dev/kvm.
+
+         If unsure, say N.
+
+config KVM_E500MC
+       bool "KVM support for PowerPC E500MC/E5500 processors"
+       depends on EXPERIMENTAL && PPC_E500MC
+       select KVM
+       select KVM_MMIO
+       select KVM_BOOKE_HV
+       ---help---
+         Support running unmodified E500MC/E5500 (32-bit) guest kernels in
+         virtual machines on E500MC/E5500 host processors.
 
          This module provides access to the hardware capabilities through
          a character device node named /dev/kvm.
index 3688aeecc4b24e1762a7289dc8d61a54a44c1ac7..c2a08636e6d4294a12e5148fd4a8dd8fe07dbe7d 100644 (file)
@@ -36,7 +36,17 @@ kvm-e500-objs := \
        e500.o \
        e500_tlb.o \
        e500_emulate.o
-kvm-objs-$(CONFIG_KVM_E500) := $(kvm-e500-objs)
+kvm-objs-$(CONFIG_KVM_E500V2) := $(kvm-e500-objs)
+
+kvm-e500mc-objs := \
+       $(common-objs-y) \
+       booke.o \
+       booke_emulate.o \
+       bookehv_interrupts.o \
+       e500mc.o \
+       e500_tlb.o \
+       e500_emulate.o
+kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs)
 
 kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
        ../../../virt/kvm/coalesced_mmio.o \
@@ -44,6 +54,7 @@ kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
        book3s_paired_singles.o \
        book3s_pr.o \
        book3s_pr_papr.o \
+       book3s_64_vio_hv.o \
        book3s_emulate.o \
        book3s_interrupts.o \
        book3s_mmu_hpte.o \
@@ -68,6 +79,7 @@ kvm-book3s_64-module-objs := \
        powerpc.o \
        emulate.o \
        book3s.o \
+       book3s_64_vio.o \
        $(kvm-book3s_64-objs-y)
 
 kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-module-objs)
@@ -88,7 +100,8 @@ kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs)
 kvm-objs := $(kvm-objs-m) $(kvm-objs-y)
 
 obj-$(CONFIG_KVM_440) += kvm.o
-obj-$(CONFIG_KVM_E500) += kvm.o
+obj-$(CONFIG_KVM_E500V2) += kvm.o
+obj-$(CONFIG_KVM_E500MC) += kvm.o
 obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o
 obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o
 
index 7d54f4ed6d96e3b5fa41f57044f066567131a195..3f2a8360c857f1aae94a13631b2eb099a96795a0 100644 (file)
@@ -258,7 +258,7 @@ static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
        return true;
 }
 
-void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
+int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
 {
        unsigned long *pending = &vcpu->arch.pending_exceptions;
        unsigned long old_pending = vcpu->arch.pending_exceptions;
@@ -283,12 +283,17 @@ void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
 
        /* Tell the guest about our interrupt status */
        kvmppc_update_int_pending(vcpu, *pending, old_pending);
+
+       return 0;
 }
 
 pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
 {
        ulong mp_pa = vcpu->arch.magic_page_pa;
 
+       if (!(vcpu->arch.shared->msr & MSR_SF))
+               mp_pa = (uint32_t)mp_pa;
+
        /* Magic page override */
        if (unlikely(mp_pa) &&
            unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) ==
index c3beaeef3f60b41013c723197b2b898cf47f3d1b..80a57751758444a427797ecaff2a6f2e07a1ba03 100644 (file)
 
 /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
 #define MAX_LPID_970   63
-#define NR_LPIDS       (LPID_RSVD + 1)
-unsigned long lpid_inuse[BITS_TO_LONGS(NR_LPIDS)];
 
 long kvmppc_alloc_hpt(struct kvm *kvm)
 {
        unsigned long hpt;
-       unsigned long lpid;
+       long lpid;
        struct revmap_entry *rev;
        struct kvmppc_linear_info *li;
 
@@ -72,14 +70,9 @@ long kvmppc_alloc_hpt(struct kvm *kvm)
        }
        kvm->arch.revmap = rev;
 
-       /* Allocate the guest's logical partition ID */
-       do {
-               lpid = find_first_zero_bit(lpid_inuse, NR_LPIDS);
-               if (lpid >= NR_LPIDS) {
-                       pr_err("kvm_alloc_hpt: No LPIDs free\n");
-                       goto out_freeboth;
-               }
-       } while (test_and_set_bit(lpid, lpid_inuse));
+       lpid = kvmppc_alloc_lpid();
+       if (lpid < 0)
+               goto out_freeboth;
 
        kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18);
        kvm->arch.lpid = lpid;
@@ -96,7 +89,7 @@ long kvmppc_alloc_hpt(struct kvm *kvm)
 
 void kvmppc_free_hpt(struct kvm *kvm)
 {
-       clear_bit(kvm->arch.lpid, lpid_inuse);
+       kvmppc_free_lpid(kvm->arch.lpid);
        vfree(kvm->arch.revmap);
        if (kvm->arch.hpt_li)
                kvm_release_hpt(kvm->arch.hpt_li);
@@ -171,8 +164,7 @@ int kvmppc_mmu_hv_init(void)
        if (!cpu_has_feature(CPU_FTR_HVMODE))
                return -EINVAL;
 
-       memset(lpid_inuse, 0, sizeof(lpid_inuse));
-
+       /* POWER7 has 10-bit LPIDs, PPC970 and e500mc have 6-bit LPIDs */
        if (cpu_has_feature(CPU_FTR_ARCH_206)) {
                host_lpid = mfspr(SPRN_LPID);   /* POWER7 */
                rsvd_lpid = LPID_RSVD;
@@ -181,9 +173,11 @@ int kvmppc_mmu_hv_init(void)
                rsvd_lpid = MAX_LPID_970;
        }
 
-       set_bit(host_lpid, lpid_inuse);
+       kvmppc_init_lpid(rsvd_lpid + 1);
+
+       kvmppc_claim_lpid(host_lpid);
        /* rsvd_lpid is reserved for use in partition switching */
-       set_bit(rsvd_lpid, lpid_inuse);
+       kvmppc_claim_lpid(rsvd_lpid);
 
        return 0;
 }
@@ -452,7 +446,7 @@ static int instruction_is_store(unsigned int instr)
 }
 
 static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
-                                 unsigned long gpa, int is_store)
+                                 unsigned long gpa, gva_t ea, int is_store)
 {
        int ret;
        u32 last_inst;
@@ -499,6 +493,7 @@ static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
         */
 
        vcpu->arch.paddr_accessed = gpa;
+       vcpu->arch.vaddr_accessed = ea;
        return kvmppc_emulate_mmio(run, vcpu);
 }
 
@@ -552,7 +547,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
        /* No memslot means it's an emulated MMIO region */
        if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
                unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1));
-               return kvmppc_hv_emulate_mmio(run, vcpu, gpa,
+               return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
                                              dsisr & DSISR_ISSTORE);
        }
 
index f2e6e48ea463c9cdb404bb384ce672244f0e795f..56b983e7b7380c88a4199d68dc97fb2a6a0ec985 100644 (file)
@@ -90,8 +90,6 @@ slb_exit_skip_ ## num:
        or      r10, r10, r12
        slbie   r10
 
-       isync
-
        /* Fill SLB with our shadow */
 
        lbz     r12, SVCPU_SLB_MAX(r3)
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
new file mode 100644 (file)
index 0000000..72ffc89
--- /dev/null
@@ -0,0 +1,150 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/highmem.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/hugetlb.h>
+#include <linux/list.h>
+#include <linux/anon_inodes.h>
+
+#include <asm/tlbflush.h>
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+#include <asm/mmu-hash64.h>
+#include <asm/hvcall.h>
+#include <asm/synch.h>
+#include <asm/ppc-opcode.h>
+#include <asm/kvm_host.h>
+#include <asm/udbg.h>
+
+#define TCES_PER_PAGE  (PAGE_SIZE / sizeof(u64))
+
+static long kvmppc_stt_npages(unsigned long window_size)
+{
+       return ALIGN((window_size >> SPAPR_TCE_SHIFT)
+                    * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
+}
+
+static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt)
+{
+       struct kvm *kvm = stt->kvm;
+       int i;
+
+       mutex_lock(&kvm->lock);
+       list_del(&stt->list);
+       for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++)
+               __free_page(stt->pages[i]);
+       kfree(stt);
+       mutex_unlock(&kvm->lock);
+
+       kvm_put_kvm(kvm);
+}
+
+static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
+       struct page *page;
+
+       if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size))
+               return VM_FAULT_SIGBUS;
+
+       page = stt->pages[vmf->pgoff];
+       get_page(page);
+       vmf->page = page;
+       return 0;
+}
+
+static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
+       .fault = kvm_spapr_tce_fault,
+};
+
+static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       vma->vm_ops = &kvm_spapr_tce_vm_ops;
+       return 0;
+}
+
+static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
+{
+       struct kvmppc_spapr_tce_table *stt = filp->private_data;
+
+       release_spapr_tce_table(stt);
+       return 0;
+}
+
+static struct file_operations kvm_spapr_tce_fops = {
+       .mmap           = kvm_spapr_tce_mmap,
+       .release        = kvm_spapr_tce_release,
+};
+
+long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
+                                  struct kvm_create_spapr_tce *args)
+{
+       struct kvmppc_spapr_tce_table *stt = NULL;
+       long npages;
+       int ret = -ENOMEM;
+       int i;
+
+       /* Check this LIOBN hasn't been previously allocated */
+       list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
+               if (stt->liobn == args->liobn)
+                       return -EBUSY;
+       }
+
+       npages = kvmppc_stt_npages(args->window_size);
+
+       stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
+                     GFP_KERNEL);
+       if (!stt)
+               goto fail;
+
+       stt->liobn = args->liobn;
+       stt->window_size = args->window_size;
+       stt->kvm = kvm;
+
+       for (i = 0; i < npages; i++) {
+               stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+               if (!stt->pages[i])
+                       goto fail;
+       }
+
+       kvm_get_kvm(kvm);
+
+       mutex_lock(&kvm->lock);
+       list_add(&stt->list, &kvm->arch.spapr_tce_tables);
+
+       mutex_unlock(&kvm->lock);
+
+       return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
+                               stt, O_RDWR);
+
+fail:
+       if (stt) {
+               for (i = 0; i < npages; i++)
+                       if (stt->pages[i])
+                               __free_page(stt->pages[i]);
+
+               kfree(stt);
+       }
+       return ret;
+}
index ea0f8c537c28d56429af2fe63c9f5cffbd49a928..30c2f3b134c66c496c371de6e4a8c3bd6011dfbe 100644 (file)
@@ -38,6 +38,9 @@
 
 #define TCES_PER_PAGE  (PAGE_SIZE / sizeof(u64))
 
+/* WARNING: This will be called in real-mode on HV KVM and virtual
+ *          mode on PR KVM
+ */
 long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
                      unsigned long ioba, unsigned long tce)
 {
index 135663a3e4fc945d4eaf9ef7706fa388feccfe07..b9a989dc76ccff5cd739f22910c4201c0dafb4e2 100644 (file)
@@ -87,6 +87,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
                            unsigned int inst, int *advance)
 {
        int emulated = EMULATE_DONE;
+       int rt = get_rt(inst);
+       int rs = get_rs(inst);
+       int ra = get_ra(inst);
+       int rb = get_rb(inst);
 
        switch (get_op(inst)) {
        case 19:
@@ -106,21 +110,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
        case 31:
                switch (get_xop(inst)) {
                case OP_31_XOP_MFMSR:
-                       kvmppc_set_gpr(vcpu, get_rt(inst),
-                                      vcpu->arch.shared->msr);
+                       kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr);
                        break;
                case OP_31_XOP_MTMSRD:
                {
-                       ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst));
+                       ulong rs_val = kvmppc_get_gpr(vcpu, rs);
                        if (inst & 0x10000) {
-                               vcpu->arch.shared->msr &= ~(MSR_RI | MSR_EE);
-                               vcpu->arch.shared->msr |= rs & (MSR_RI | MSR_EE);
+                               ulong new_msr = vcpu->arch.shared->msr;
+                               new_msr &= ~(MSR_RI | MSR_EE);
+                               new_msr |= rs_val & (MSR_RI | MSR_EE);
+                               vcpu->arch.shared->msr = new_msr;
                        } else
-                               kvmppc_set_msr(vcpu, rs);
+                               kvmppc_set_msr(vcpu, rs_val);
                        break;
                }
                case OP_31_XOP_MTMSR:
-                       kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst)));
+                       kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
                        break;
                case OP_31_XOP_MFSR:
                {
@@ -130,7 +135,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
                        if (vcpu->arch.mmu.mfsrin) {
                                u32 sr;
                                sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
-                               kvmppc_set_gpr(vcpu, get_rt(inst), sr);
+                               kvmppc_set_gpr(vcpu, rt, sr);
                        }
                        break;
                }
@@ -138,29 +143,29 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
                {
                        int srnum;
 
-                       srnum = (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf;
+                       srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf;
                        if (vcpu->arch.mmu.mfsrin) {
                                u32 sr;
                                sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
-                               kvmppc_set_gpr(vcpu, get_rt(inst), sr);
+                               kvmppc_set_gpr(vcpu, rt, sr);
                        }
                        break;
                }
                case OP_31_XOP_MTSR:
                        vcpu->arch.mmu.mtsrin(vcpu,
                                (inst >> 16) & 0xf,
-                               kvmppc_get_gpr(vcpu, get_rs(inst)));
+                               kvmppc_get_gpr(vcpu, rs));
                        break;
                case OP_31_XOP_MTSRIN:
                        vcpu->arch.mmu.mtsrin(vcpu,
-                               (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf,
-                               kvmppc_get_gpr(vcpu, get_rs(inst)));
+                               (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf,
+                               kvmppc_get_gpr(vcpu, rs));
                        break;
                case OP_31_XOP_TLBIE:
                case OP_31_XOP_TLBIEL:
                {
                        bool large = (inst & 0x00200000) ? true : false;
-                       ulong addr = kvmppc_get_gpr(vcpu, get_rb(inst));
+                       ulong addr = kvmppc_get_gpr(vcpu, rb);
                        vcpu->arch.mmu.tlbie(vcpu, addr, large);
                        break;
                }
@@ -171,15 +176,15 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
                                return EMULATE_FAIL;
 
                        vcpu->arch.mmu.slbmte(vcpu,
-                                       kvmppc_get_gpr(vcpu, get_rs(inst)),
-                                       kvmppc_get_gpr(vcpu, get_rb(inst)));
+                                       kvmppc_get_gpr(vcpu, rs),
+                                       kvmppc_get_gpr(vcpu, rb));
                        break;
                case OP_31_XOP_SLBIE:
                        if (!vcpu->arch.mmu.slbie)
                                return EMULATE_FAIL;
 
                        vcpu->arch.mmu.slbie(vcpu,
-                                       kvmppc_get_gpr(vcpu, get_rb(inst)));
+                                       kvmppc_get_gpr(vcpu, rb));
                        break;
                case OP_31_XOP_SLBIA:
                        if (!vcpu->arch.mmu.slbia)
@@ -191,22 +196,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
                        if (!vcpu->arch.mmu.slbmfee) {
                                emulated = EMULATE_FAIL;
                        } else {
-                               ulong t, rb;
+                               ulong t, rb_val;
 
-                               rb = kvmppc_get_gpr(vcpu, get_rb(inst));
-                               t = vcpu->arch.mmu.slbmfee(vcpu, rb);
-                               kvmppc_set_gpr(vcpu, get_rt(inst), t);
+                               rb_val = kvmppc_get_gpr(vcpu, rb);
+                               t = vcpu->arch.mmu.slbmfee(vcpu, rb_val);
+                               kvmppc_set_gpr(vcpu, rt, t);
                        }
                        break;
                case OP_31_XOP_SLBMFEV:
                        if (!vcpu->arch.mmu.slbmfev) {
                                emulated = EMULATE_FAIL;
                        } else {
-                               ulong t, rb;
+                               ulong t, rb_val;
 
-                               rb = kvmppc_get_gpr(vcpu, get_rb(inst));
-                               t = vcpu->arch.mmu.slbmfev(vcpu, rb);
-                               kvmppc_set_gpr(vcpu, get_rt(inst), t);
+                               rb_val = kvmppc_get_gpr(vcpu, rb);
+                               t = vcpu->arch.mmu.slbmfev(vcpu, rb_val);
+                               kvmppc_set_gpr(vcpu, rt, t);
                        }
                        break;
                case OP_31_XOP_DCBA:
@@ -214,17 +219,17 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
                        break;
                case OP_31_XOP_DCBZ:
                {
-                       ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst));
-                       ulong ra = 0;
+                       ulong rb_val = kvmppc_get_gpr(vcpu, rb);
+                       ulong ra_val = 0;
                        ulong addr, vaddr;
                        u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
                        u32 dsisr;
                        int r;
 
-                       if (get_ra(inst))
-                               ra = kvmppc_get_gpr(vcpu, get_ra(inst));
+                       if (ra)
+                               ra_val = kvmppc_get_gpr(vcpu, ra);
 
-                       addr = (ra + rb) & ~31ULL;
+                       addr = (ra_val + rb_val) & ~31ULL;
                        if (!(vcpu->arch.shared->msr & MSR_SF))
                                addr &= 0xffffffff;
                        vaddr = addr;
@@ -313,10 +318,9 @@ static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
        return bat;
 }
 
-int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
+int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
 {
        int emulated = EMULATE_DONE;
-       ulong spr_val = kvmppc_get_gpr(vcpu, rs);
 
        switch (sprn) {
        case SPRN_SDR1:
@@ -428,7 +432,7 @@ unprivileged:
        return emulated;
 }
 
-int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
+int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
 {
        int emulated = EMULATE_DONE;
 
@@ -441,46 +445,46 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
                struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
 
                if (sprn % 2)
-                       kvmppc_set_gpr(vcpu, rt, bat->raw >> 32);
+                       *spr_val = bat->raw >> 32;
                else
-                       kvmppc_set_gpr(vcpu, rt, bat->raw);
+                       *spr_val = bat->raw;
 
                break;
        }
        case SPRN_SDR1:
                if (!spr_allowed(vcpu, PRIV_HYPER))
                        goto unprivileged;
-               kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1);
+               *spr_val = to_book3s(vcpu)->sdr1;
                break;
        case SPRN_DSISR:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dsisr);
+               *spr_val = vcpu->arch.shared->dsisr;
                break;
        case SPRN_DAR:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar);
+               *spr_val = vcpu->arch.shared->dar;
                break;
        case SPRN_HIOR:
-               kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior);
+               *spr_val = to_book3s(vcpu)->hior;
                break;
        case SPRN_HID0:
-               kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[0]);
+               *spr_val = to_book3s(vcpu)->hid[0];
                break;
        case SPRN_HID1:
-               kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]);
+               *spr_val = to_book3s(vcpu)->hid[1];
                break;
        case SPRN_HID2:
        case SPRN_HID2_GEKKO:
-               kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]);
+               *spr_val = to_book3s(vcpu)->hid[2];
                break;
        case SPRN_HID4:
        case SPRN_HID4_GEKKO:
-               kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]);
+               *spr_val = to_book3s(vcpu)->hid[4];
                break;
        case SPRN_HID5:
-               kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]);
+               *spr_val = to_book3s(vcpu)->hid[5];
                break;
        case SPRN_CFAR:
        case SPRN_PURR:
-               kvmppc_set_gpr(vcpu, rt, 0);
+               *spr_val = 0;
                break;
        case SPRN_GQR0:
        case SPRN_GQR1:
@@ -490,8 +494,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
        case SPRN_GQR5:
        case SPRN_GQR6:
        case SPRN_GQR7:
-               kvmppc_set_gpr(vcpu, rt,
-                              to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]);
+               *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0];
                break;
        case SPRN_THRM1:
        case SPRN_THRM2:
@@ -506,7 +509,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
        case SPRN_PMC3_GEKKO:
        case SPRN_PMC4_GEKKO:
        case SPRN_WPAR_GEKKO:
-               kvmppc_set_gpr(vcpu, rt, 0);
+               *spr_val = 0;
                break;
        default:
 unprivileged:
@@ -565,23 +568,22 @@ u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
 ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
 {
        ulong dar = 0;
-       ulong ra;
+       ulong ra = get_ra(inst);
+       ulong rb = get_rb(inst);
 
        switch (get_op(inst)) {
        case OP_LFS:
        case OP_LFD:
        case OP_STFD:
        case OP_STFS:
-               ra = get_ra(inst);
                if (ra)
                        dar = kvmppc_get_gpr(vcpu, ra);
                dar += (s32)((s16)inst);
                break;
        case 31:
-               ra = get_ra(inst);
                if (ra)
                        dar = kvmppc_get_gpr(vcpu, ra);
-               dar += kvmppc_get_gpr(vcpu, get_rb(inst));
+               dar += kvmppc_get_gpr(vcpu, rb);
                break;
        default:
                printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
index 108d1f580177b5e0b860c02b221ef888d1c63676..c6af1d6238395947725a2e53ff0fbd6d6614b2e7 100644 (file)
@@ -60,12 +60,20 @@ static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu);
 
 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
+       struct kvmppc_vcore *vc = vcpu->arch.vcore;
+
        local_paca->kvm_hstate.kvm_vcpu = vcpu;
-       local_paca->kvm_hstate.kvm_vcore = vcpu->arch.vcore;
+       local_paca->kvm_hstate.kvm_vcore = vc;
+       if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
+               vc->stolen_tb += mftb() - vc->preempt_tb;
 }
 
 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
 {
+       struct kvmppc_vcore *vc = vcpu->arch.vcore;
+
+       if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
+               vc->preempt_tb = mftb();
 }
 
 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
@@ -134,6 +142,22 @@ static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
        vpa->yield_count = 1;
 }
 
+/* Length for a per-processor buffer is passed in at offset 4 in the buffer */
+struct reg_vpa {
+       u32 dummy;
+       union {
+               u16 hword;
+               u32 word;
+       } length;
+};
+
+static int vpa_is_registered(struct kvmppc_vpa *vpap)
+{
+       if (vpap->update_pending)
+               return vpap->next_gpa != 0;
+       return vpap->pinned_addr != NULL;
+}
+
 static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
                                       unsigned long flags,
                                       unsigned long vcpuid, unsigned long vpa)
@@ -142,88 +166,182 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
        unsigned long len, nb;
        void *va;
        struct kvm_vcpu *tvcpu;
-       int err = H_PARAMETER;
+       int err;
+       int subfunc;
+       struct kvmppc_vpa *vpap;
 
        tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
        if (!tvcpu)
                return H_PARAMETER;
 
-       flags >>= 63 - 18;
-       flags &= 7;
-       if (flags == 0 || flags == 4)
-               return H_PARAMETER;
-       if (flags < 4) {
-               if (vpa & 0x7f)
+       subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK;
+       if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL ||
+           subfunc == H_VPA_REG_SLB) {
+               /* Registering new area - address must be cache-line aligned */
+               if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa)
                        return H_PARAMETER;
-               if (flags >= 2 && !tvcpu->arch.vpa)
-                       return H_RESOURCE;
-               /* registering new area; convert logical addr to real */
+
+               /* convert logical addr to kernel addr and read length */
                va = kvmppc_pin_guest_page(kvm, vpa, &nb);
                if (va == NULL)
                        return H_PARAMETER;
-               if (flags <= 1)
-                       len = *(unsigned short *)(va + 4);
+               if (subfunc == H_VPA_REG_VPA)
+                       len = ((struct reg_vpa *)va)->length.hword;
                else
-                       len = *(unsigned int *)(va + 4);
-               if (len > nb)
-                       goto out_unpin;
-               switch (flags) {
-               case 1:         /* register VPA */
-                       if (len < 640)
-                               goto out_unpin;
-                       if (tvcpu->arch.vpa)
-                               kvmppc_unpin_guest_page(kvm, vcpu->arch.vpa);
-                       tvcpu->arch.vpa = va;
-                       init_vpa(vcpu, va);
-                       break;
-               case 2:         /* register DTL */
-                       if (len < 48)
-                               goto out_unpin;
-                       len -= len % 48;
-                       if (tvcpu->arch.dtl)
-                               kvmppc_unpin_guest_page(kvm, vcpu->arch.dtl);
-                       tvcpu->arch.dtl = va;
-                       tvcpu->arch.dtl_end = va + len;
+                       len = ((struct reg_vpa *)va)->length.word;
+               kvmppc_unpin_guest_page(kvm, va);
+
+               /* Check length */
+               if (len > nb || len < sizeof(struct reg_vpa))
+                       return H_PARAMETER;
+       } else {
+               vpa = 0;
+               len = 0;
+       }
+
+       err = H_PARAMETER;
+       vpap = NULL;
+       spin_lock(&tvcpu->arch.vpa_update_lock);
+
+       switch (subfunc) {
+       case H_VPA_REG_VPA:             /* register VPA */
+               if (len < sizeof(struct lppaca))
                        break;
-               case 3:         /* register SLB shadow buffer */
-                       if (len < 16)
-                               goto out_unpin;
-                       if (tvcpu->arch.slb_shadow)
-                               kvmppc_unpin_guest_page(kvm, vcpu->arch.slb_shadow);
-                       tvcpu->arch.slb_shadow = va;
+               vpap = &tvcpu->arch.vpa;
+               err = 0;
+               break;
+
+       case H_VPA_REG_DTL:             /* register DTL */
+               if (len < sizeof(struct dtl_entry))
                        break;
-               }
-       } else {
-               switch (flags) {
-               case 5:         /* unregister VPA */
-                       if (tvcpu->arch.slb_shadow || tvcpu->arch.dtl)
-                               return H_RESOURCE;
-                       if (!tvcpu->arch.vpa)
-                               break;
-                       kvmppc_unpin_guest_page(kvm, tvcpu->arch.vpa);
-                       tvcpu->arch.vpa = NULL;
+               len -= len % sizeof(struct dtl_entry);
+
+               /* Check that they have previously registered a VPA */
+               err = H_RESOURCE;
+               if (!vpa_is_registered(&tvcpu->arch.vpa))
                        break;
-               case 6:         /* unregister DTL */
-                       if (!tvcpu->arch.dtl)
-                               break;
-                       kvmppc_unpin_guest_page(kvm, tvcpu->arch.dtl);
-                       tvcpu->arch.dtl = NULL;
+
+               vpap = &tvcpu->arch.dtl;
+               err = 0;
+               break;
+
+       case H_VPA_REG_SLB:             /* register SLB shadow buffer */
+               /* Check that they have previously registered a VPA */
+               err = H_RESOURCE;
+               if (!vpa_is_registered(&tvcpu->arch.vpa))
                        break;
-               case 7:         /* unregister SLB shadow buffer */
-                       if (!tvcpu->arch.slb_shadow)
-                               break;
-                       kvmppc_unpin_guest_page(kvm, tvcpu->arch.slb_shadow);
-                       tvcpu->arch.slb_shadow = NULL;
+
+               vpap = &tvcpu->arch.slb_shadow;
+               err = 0;
+               break;
+
+       case H_VPA_DEREG_VPA:           /* deregister VPA */
+               /* Check they don't still have a DTL or SLB buf registered */
+               err = H_RESOURCE;
+               if (vpa_is_registered(&tvcpu->arch.dtl) ||
+                   vpa_is_registered(&tvcpu->arch.slb_shadow))
                        break;
-               }
+
+               vpap = &tvcpu->arch.vpa;
+               err = 0;
+               break;
+
+       case H_VPA_DEREG_DTL:           /* deregister DTL */
+               vpap = &tvcpu->arch.dtl;
+               err = 0;
+               break;
+
+       case H_VPA_DEREG_SLB:           /* deregister SLB shadow buffer */
+               vpap = &tvcpu->arch.slb_shadow;
+               err = 0;
+               break;
+       }
+
+       if (vpap) {
+               vpap->next_gpa = vpa;
+               vpap->len = len;
+               vpap->update_pending = 1;
        }
-       return H_SUCCESS;
 
- out_unpin:
-       kvmppc_unpin_guest_page(kvm, va);
+       spin_unlock(&tvcpu->arch.vpa_update_lock);
+
        return err;
 }
 
+static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap)
+{
+       void *va;
+       unsigned long nb;
+
+       vpap->update_pending = 0;
+       va = NULL;
+       if (vpap->next_gpa) {
+               va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb);
+               if (nb < vpap->len) {
+                       /*
+                        * If it's now too short, it must be that userspace
+                        * has changed the mappings underlying guest memory,
+                        * so unregister the region.
+                        */
+                       kvmppc_unpin_guest_page(kvm, va);
+                       va = NULL;
+               }
+       }
+       if (vpap->pinned_addr)
+               kvmppc_unpin_guest_page(kvm, vpap->pinned_addr);
+       vpap->pinned_addr = va;
+       if (va)
+               vpap->pinned_end = va + vpap->len;
+}
+
+static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
+{
+       struct kvm *kvm = vcpu->kvm;
+
+       spin_lock(&vcpu->arch.vpa_update_lock);
+       if (vcpu->arch.vpa.update_pending) {
+               kvmppc_update_vpa(kvm, &vcpu->arch.vpa);
+               init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
+       }
+       if (vcpu->arch.dtl.update_pending) {
+               kvmppc_update_vpa(kvm, &vcpu->arch.dtl);
+               vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
+               vcpu->arch.dtl_index = 0;
+       }
+       if (vcpu->arch.slb_shadow.update_pending)
+               kvmppc_update_vpa(kvm, &vcpu->arch.slb_shadow);
+       spin_unlock(&vcpu->arch.vpa_update_lock);
+}
+
+static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
+                                   struct kvmppc_vcore *vc)
+{
+       struct dtl_entry *dt;
+       struct lppaca *vpa;
+       unsigned long old_stolen;
+
+       dt = vcpu->arch.dtl_ptr;
+       vpa = vcpu->arch.vpa.pinned_addr;
+       old_stolen = vcpu->arch.stolen_logged;
+       vcpu->arch.stolen_logged = vc->stolen_tb;
+       if (!dt || !vpa)
+               return;
+       memset(dt, 0, sizeof(struct dtl_entry));
+       dt->dispatch_reason = 7;
+       dt->processor_id = vc->pcpu + vcpu->arch.ptid;
+       dt->timebase = mftb();
+       dt->enqueue_to_dispatch_time = vc->stolen_tb - old_stolen;
+       dt->srr0 = kvmppc_get_pc(vcpu);
+       dt->srr1 = vcpu->arch.shregs.msr;
+       ++dt;
+       if (dt == vcpu->arch.dtl.pinned_end)
+               dt = vcpu->arch.dtl.pinned_addr;
+       vcpu->arch.dtl_ptr = dt;
+       /* order writing *dt vs. writing vpa->dtl_idx */
+       smp_wmb();
+       vpa->dtl_idx = ++vcpu->arch.dtl_index;
+}
+
 int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
 {
        unsigned long req = kvmppc_get_gpr(vcpu, 3);
@@ -468,6 +586,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
        /* default to host PVR, since we can't spoof it */
        vcpu->arch.pvr = mfspr(SPRN_PVR);
        kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
+       spin_lock_init(&vcpu->arch.vpa_update_lock);
 
        kvmppc_mmu_book3s_hv_init(vcpu);
 
@@ -486,6 +605,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
                        INIT_LIST_HEAD(&vcore->runnable_threads);
                        spin_lock_init(&vcore->lock);
                        init_waitqueue_head(&vcore->wq);
+                       vcore->preempt_tb = mftb();
                }
                kvm->arch.vcores[core] = vcore;
        }
@@ -498,6 +618,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
        ++vcore->num_threads;
        spin_unlock(&vcore->lock);
        vcpu->arch.vcore = vcore;
+       vcpu->arch.stolen_logged = vcore->stolen_tb;
 
        vcpu->arch.cpu_type = KVM_CPU_3S_64;
        kvmppc_sanity_check(vcpu);
@@ -512,12 +633,14 @@ out:
 
 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
 {
-       if (vcpu->arch.dtl)
-               kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.dtl);
-       if (vcpu->arch.slb_shadow)
-               kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.slb_shadow);
-       if (vcpu->arch.vpa)
-               kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.vpa);
+       spin_lock(&vcpu->arch.vpa_update_lock);
+       if (vcpu->arch.dtl.pinned_addr)
+               kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.dtl.pinned_addr);
+       if (vcpu->arch.slb_shadow.pinned_addr)
+               kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.slb_shadow.pinned_addr);
+       if (vcpu->arch.vpa.pinned_addr)
+               kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.vpa.pinned_addr);
+       spin_unlock(&vcpu->arch.vpa_update_lock);
        kvm_vcpu_uninit(vcpu);
        kmem_cache_free(kvm_vcpu_cache, vcpu);
 }
@@ -569,6 +692,45 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
        list_del(&vcpu->arch.run_list);
 }
 
+static int kvmppc_grab_hwthread(int cpu)
+{
+       struct paca_struct *tpaca;
+       long timeout = 1000;
+
+       tpaca = &paca[cpu];
+
+       /* Ensure the thread won't go into the kernel if it wakes */
+       tpaca->kvm_hstate.hwthread_req = 1;
+
+       /*
+        * If the thread is already executing in the kernel (e.g. handling
+        * a stray interrupt), wait for it to get back to nap mode.
+        * The smp_mb() is to ensure that our setting of hwthread_req
+        * is visible before we look at hwthread_state, so if this
+        * races with the code at system_reset_pSeries and the thread
+        * misses our setting of hwthread_req, we are sure to see its
+        * setting of hwthread_state, and vice versa.
+        */
+       smp_mb();
+       while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) {
+               if (--timeout <= 0) {
+                       pr_err("KVM: couldn't grab cpu %d\n", cpu);
+                       return -EBUSY;
+               }
+               udelay(1);
+       }
+       return 0;
+}
+
+static void kvmppc_release_hwthread(int cpu)
+{
+       struct paca_struct *tpaca;
+
+       tpaca = &paca[cpu];
+       tpaca->kvm_hstate.hwthread_req = 0;
+       tpaca->kvm_hstate.kvm_vcpu = NULL;
+}
+
 static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
 {
        int cpu;
@@ -588,8 +750,7 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
        smp_wmb();
 #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
        if (vcpu->arch.ptid) {
-               tpaca->cpu_start = 0x80;
-               wmb();
+               kvmppc_grab_hwthread(cpu);
                xics_wake_cpu(cpu);
                ++vc->n_woken;
        }
@@ -639,7 +800,7 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
        struct kvm_vcpu *vcpu, *vcpu0, *vnext;
        long ret;
        u64 now;
-       int ptid;
+       int ptid, i;
 
        /* don't start if any threads have a signal pending */
        list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
@@ -681,17 +842,29 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
        vc->nap_count = 0;
        vc->entry_exit_count = 0;
        vc->vcore_state = VCORE_RUNNING;
+       vc->stolen_tb += mftb() - vc->preempt_tb;
        vc->in_guest = 0;
        vc->pcpu = smp_processor_id();
        vc->napping_threads = 0;
-       list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
+       list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
                kvmppc_start_thread(vcpu);
+               if (vcpu->arch.vpa.update_pending ||
+                   vcpu->arch.slb_shadow.update_pending ||
+                   vcpu->arch.dtl.update_pending)
+                       kvmppc_update_vpas(vcpu);
+               kvmppc_create_dtl_entry(vcpu, vc);
+       }
+       /* Grab any remaining hw threads so they can't go into the kernel */
+       for (i = ptid; i < threads_per_core; ++i)
+               kvmppc_grab_hwthread(vc->pcpu + i);
 
        preempt_disable();
        spin_unlock(&vc->lock);
 
        kvm_guest_enter();
        __kvmppc_vcore_entry(NULL, vcpu0);
+       for (i = 0; i < threads_per_core; ++i)
+               kvmppc_release_hwthread(vc->pcpu + i);
 
        spin_lock(&vc->lock);
        /* disable sending of IPIs on virtual external irqs */
@@ -737,6 +910,7 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
        spin_lock(&vc->lock);
  out:
        vc->vcore_state = VCORE_INACTIVE;
+       vc->preempt_tb = mftb();
        list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
                                 arch.run_list) {
                if (vcpu->arch.ret != RESUME_GUEST) {
@@ -835,6 +1009,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
                        spin_lock(&vc->lock);
                        continue;
                }
+               vc->runner = vcpu;
                n_ceded = 0;
                list_for_each_entry(v, &vc->runnable_threads, arch.run_list)
                        n_ceded += v->arch.ceded;
@@ -854,6 +1029,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
                                wake_up(&v->arch.cpu_run);
                        }
                }
+               vc->runner = NULL;
        }
 
        if (signal_pending(current)) {
@@ -917,115 +1093,6 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
        return r;
 }
 
-static long kvmppc_stt_npages(unsigned long window_size)
-{
-       return ALIGN((window_size >> SPAPR_TCE_SHIFT)
-                    * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
-}
-
-static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt)
-{
-       struct kvm *kvm = stt->kvm;
-       int i;
-
-       mutex_lock(&kvm->lock);
-       list_del(&stt->list);
-       for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++)
-               __free_page(stt->pages[i]);
-       kfree(stt);
-       mutex_unlock(&kvm->lock);
-
-       kvm_put_kvm(kvm);
-}
-
-static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
-       struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
-       struct page *page;
-
-       if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size))
-               return VM_FAULT_SIGBUS;
-
-       page = stt->pages[vmf->pgoff];
-       get_page(page);
-       vmf->page = page;
-       return 0;
-}
-
-static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
-       .fault = kvm_spapr_tce_fault,
-};
-
-static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
-{
-       vma->vm_ops = &kvm_spapr_tce_vm_ops;
-       return 0;
-}
-
-static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
-{
-       struct kvmppc_spapr_tce_table *stt = filp->private_data;
-
-       release_spapr_tce_table(stt);
-       return 0;
-}
-
-static struct file_operations kvm_spapr_tce_fops = {
-       .mmap           = kvm_spapr_tce_mmap,
-       .release        = kvm_spapr_tce_release,
-};
-
-long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
-                                  struct kvm_create_spapr_tce *args)
-{
-       struct kvmppc_spapr_tce_table *stt = NULL;
-       long npages;
-       int ret = -ENOMEM;
-       int i;
-
-       /* Check this LIOBN hasn't been previously allocated */
-       list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
-               if (stt->liobn == args->liobn)
-                       return -EBUSY;
-       }
-
-       npages = kvmppc_stt_npages(args->window_size);
-
-       stt = kzalloc(sizeof(*stt) + npages* sizeof(struct page *),
-                     GFP_KERNEL);
-       if (!stt)
-               goto fail;
-
-       stt->liobn = args->liobn;
-       stt->window_size = args->window_size;
-       stt->kvm = kvm;
-
-       for (i = 0; i < npages; i++) {
-               stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
-               if (!stt->pages[i])
-                       goto fail;
-       }
-
-       kvm_get_kvm(kvm);
-
-       mutex_lock(&kvm->lock);
-       list_add(&stt->list, &kvm->arch.spapr_tce_tables);
-
-       mutex_unlock(&kvm->lock);
-
-       return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
-                               stt, O_RDWR);
-
-fail:
-       if (stt) {
-               for (i = 0; i < npages; i++)
-                       if (stt->pages[i])
-                               __free_page(stt->pages[i]);
-
-               kfree(stt);
-       }
-       return ret;
-}
 
 /* Work out RMLS (real mode limit selector) field value for a given RMA size.
    Assumes POWER7 or PPC970. */
@@ -1108,6 +1175,38 @@ long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
        return fd;
 }
 
+static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
+                                    int linux_psize)
+{
+       struct mmu_psize_def *def = &mmu_psize_defs[linux_psize];
+
+       if (!def->shift)
+               return;
+       (*sps)->page_shift = def->shift;
+       (*sps)->slb_enc = def->sllp;
+       (*sps)->enc[0].page_shift = def->shift;
+       (*sps)->enc[0].pte_enc = def->penc;
+       (*sps)++;
+}
+
+int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
+{
+       struct kvm_ppc_one_seg_page_size *sps;
+
+       info->flags = KVM_PPC_PAGE_SIZES_REAL;
+       if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
+               info->flags |= KVM_PPC_1T_SEGMENTS;
+       info->slb_size = mmu_slb_size;
+
+       /* We only support these sizes for now, and no muti-size segments */
+       sps = &info->sps[0];
+       kvmppc_add_seg_page_size(&sps, MMU_PAGE_4K);
+       kvmppc_add_seg_page_size(&sps, MMU_PAGE_64K);
+       kvmppc_add_seg_page_size(&sps, MMU_PAGE_16M);
+
+       return 0;
+}
+
 /*
  * Get (and clear) the dirty memory log for a memory slot.
  */
@@ -1404,12 +1503,12 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
        return EMULATE_FAIL;
 }
 
-int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
+int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
 {
        return EMULATE_FAIL;
 }
 
-int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
+int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
 {
        return EMULATE_FAIL;
 }
index d3fb4df02c419f48dfd8d3205c042cc04ff94426..84035a528c80e7ef5261c3dc1821ad8d0b82a364 100644 (file)
@@ -68,19 +68,24 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
        rotldi  r10,r10,16
        mtmsrd  r10,1
 
-       /* Save host PMU registers and load guest PMU registers */
+       /* Save host PMU registers */
        /* R4 is live here (vcpu pointer) but not r3 or r5 */
        li      r3, 1
        sldi    r3, r3, 31              /* MMCR0_FC (freeze counters) bit */
        mfspr   r7, SPRN_MMCR0          /* save MMCR0 */
        mtspr   SPRN_MMCR0, r3          /* freeze all counters, disable interrupts */
+       mfspr   r6, SPRN_MMCRA
+BEGIN_FTR_SECTION
+       /* On P7, clear MMCRA in order to disable SDAR updates */
+       li      r5, 0
+       mtspr   SPRN_MMCRA, r5
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
        isync
        ld      r3, PACALPPACAPTR(r13)  /* is the host using the PMU? */
        lbz     r5, LPPACA_PMCINUSE(r3)
        cmpwi   r5, 0
        beq     31f                     /* skip if not */
        mfspr   r5, SPRN_MMCR1
-       mfspr   r6, SPRN_MMCRA
        std     r7, HSTATE_MMCR(r13)
        std     r5, HSTATE_MMCR + 8(r13)
        std     r6, HSTATE_MMCR + 16(r13)
index b70bf22a3ff39615c90d82c78764f897d4fa1677..a84aafce2a129e311a0943be0975db3823b88402 100644 (file)
@@ -26,6 +26,7 @@
 #include <asm/hvcall.h>
 #include <asm/asm-offsets.h>
 #include <asm/exception-64s.h>
+#include <asm/kvm_book3s_asm.h>
 
 /*****************************************************************************
  *                                                                           *
@@ -82,6 +83,7 @@ _GLOBAL(kvmppc_hv_entry_trampoline)
 
 #define XICS_XIRR              4
 #define XICS_QIRR              0xc
+#define XICS_IPI               2       /* interrupt source # for IPIs */
 
 /*
  * We come in here when wakened from nap mode on a secondary hw thread.
@@ -94,26 +96,54 @@ kvm_start_guest:
        subi    r1,r1,STACK_FRAME_OVERHEAD
        ld      r2,PACATOC(r13)
 
-       /* were we napping due to cede? */
-       lbz     r0,HSTATE_NAPPING(r13)
-       cmpwi   r0,0
-       bne     kvm_end_cede
+       li      r0,KVM_HWTHREAD_IN_KVM
+       stb     r0,HSTATE_HWTHREAD_STATE(r13)
 
-       /* get vcpu pointer */
-       ld      r4, HSTATE_KVM_VCPU(r13)
+       /* NV GPR values from power7_idle() will no longer be valid */
+       li      r0,1
+       stb     r0,PACA_NAPSTATELOST(r13)
 
-       /* We got here with an IPI; clear it */
-       ld      r5, HSTATE_XICS_PHYS(r13)
-       li      r0, 0xff
-       li      r6, XICS_QIRR
-       li      r7, XICS_XIRR
-       lwzcix  r8, r5, r7              /* ack the interrupt */
+       /* get vcpu pointer, NULL if we have no vcpu to run */
+       ld      r4,HSTATE_KVM_VCPU(r13)
+       cmpdi   cr1,r4,0
+
+       /* Check the wake reason in SRR1 to see why we got here */
+       mfspr   r3,SPRN_SRR1
+       rlwinm  r3,r3,44-31,0x7         /* extract wake reason field */
+       cmpwi   r3,4                    /* was it an external interrupt? */
+       bne     27f
+
+       /*
+        * External interrupt - for now assume it is an IPI, since we
+        * should never get any other interrupts sent to offline threads.
+        * Only do this for secondary threads.
+        */
+       beq     cr1,25f
+       lwz     r3,VCPU_PTID(r4)
+       cmpwi   r3,0
+       beq     27f
+25:    ld      r5,HSTATE_XICS_PHYS(r13)
+       li      r0,0xff
+       li      r6,XICS_QIRR
+       li      r7,XICS_XIRR
+       lwzcix  r8,r5,r7                /* get and ack the interrupt */
        sync
-       stbcix  r0, r5, r6              /* clear it */
-       stwcix  r8, r5, r7              /* EOI it */
+       clrldi. r9,r8,40                /* get interrupt source ID. */
+       beq     27f                     /* none there? */
+       cmpwi   r9,XICS_IPI
+       bne     26f
+       stbcix  r0,r5,r6                /* clear IPI */
+26:    stwcix  r8,r5,r7                /* EOI the interrupt */
 
-       /* NV GPR values from power7_idle() will no longer be valid */
-       stb     r0, PACA_NAPSTATELOST(r13)
+27:    /* XXX should handle hypervisor maintenance interrupts etc. here */
+
+       /* if we have no vcpu to run, go back to sleep */
+       beq     cr1,kvm_no_guest
+
+       /* were we napping due to cede? */
+       lbz     r0,HSTATE_NAPPING(r13)
+       cmpwi   r0,0
+       bne     kvm_end_cede
 
 .global kvmppc_hv_entry
 kvmppc_hv_entry:
@@ -129,24 +159,15 @@ kvmppc_hv_entry:
        mflr    r0
        std     r0, HSTATE_VMHANDLER(r13)
 
-       ld      r14, VCPU_GPR(r14)(r4)
-       ld      r15, VCPU_GPR(r15)(r4)
-       ld      r16, VCPU_GPR(r16)(r4)
-       ld      r17, VCPU_GPR(r17)(r4)
-       ld      r18, VCPU_GPR(r18)(r4)
-       ld      r19, VCPU_GPR(r19)(r4)
-       ld      r20, VCPU_GPR(r20)(r4)
-       ld      r21, VCPU_GPR(r21)(r4)
-       ld      r22, VCPU_GPR(r22)(r4)
-       ld      r23, VCPU_GPR(r23)(r4)
-       ld      r24, VCPU_GPR(r24)(r4)
-       ld      r25, VCPU_GPR(r25)(r4)
-       ld      r26, VCPU_GPR(r26)(r4)
-       ld      r27, VCPU_GPR(r27)(r4)
-       ld      r28, VCPU_GPR(r28)(r4)
-       ld      r29, VCPU_GPR(r29)(r4)
-       ld      r30, VCPU_GPR(r30)(r4)
-       ld      r31, VCPU_GPR(r31)(r4)
+       /* Set partition DABR */
+       /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
+       li      r5,3
+       ld      r6,VCPU_DABR(r4)
+       mtspr   SPRN_DABRX,r5
+       mtspr   SPRN_DABR,r6
+BEGIN_FTR_SECTION
+       isync
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 
        /* Load guest PMU registers */
        /* R4 is live here (vcpu pointer) */
@@ -185,6 +206,25 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
        /* Load up FP, VMX and VSX registers */
        bl      kvmppc_load_fp
 
+       ld      r14, VCPU_GPR(r14)(r4)
+       ld      r15, VCPU_GPR(r15)(r4)
+       ld      r16, VCPU_GPR(r16)(r4)
+       ld      r17, VCPU_GPR(r17)(r4)
+       ld      r18, VCPU_GPR(r18)(r4)
+       ld      r19, VCPU_GPR(r19)(r4)
+       ld      r20, VCPU_GPR(r20)(r4)
+       ld      r21, VCPU_GPR(r21)(r4)
+       ld      r22, VCPU_GPR(r22)(r4)
+       ld      r23, VCPU_GPR(r23)(r4)
+       ld      r24, VCPU_GPR(r24)(r4)
+       ld      r25, VCPU_GPR(r25)(r4)
+       ld      r26, VCPU_GPR(r26)(r4)
+       ld      r27, VCPU_GPR(r27)(r4)
+       ld      r28, VCPU_GPR(r28)(r4)
+       ld      r29, VCPU_GPR(r29)(r4)
+       ld      r30, VCPU_GPR(r30)(r4)
+       ld      r31, VCPU_GPR(r31)(r4)
+
 BEGIN_FTR_SECTION
        /* Switch DSCR to guest value */
        ld      r5, VCPU_DSCR(r4)
@@ -226,12 +266,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
        mtspr   SPRN_DAR, r5
        mtspr   SPRN_DSISR, r6
 
-       /* Set partition DABR */
-       li      r5,3
-       ld      r6,VCPU_DABR(r4)
-       mtspr   SPRN_DABRX,r5
-       mtspr   SPRN_DABR,r6
-
 BEGIN_FTR_SECTION
        /* Restore AMR and UAMOR, set AMOR to all 1s */
        ld      r5,VCPU_AMR(r4)
@@ -925,12 +959,6 @@ BEGIN_FTR_SECTION
        mtspr   SPRN_AMR,r6
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 
-       /* Restore host DABR and DABRX */
-       ld      r5,HSTATE_DABR(r13)
-       li      r6,7
-       mtspr   SPRN_DABR,r5
-       mtspr   SPRN_DABRX,r6
-
        /* Switch DSCR back to host value */
 BEGIN_FTR_SECTION
        mfspr   r8, SPRN_DSCR
@@ -969,6 +997,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
        std     r5, VCPU_SPRG2(r9)
        std     r6, VCPU_SPRG3(r9)
 
+       /* save FP state */
+       mr      r3, r9
+       bl      .kvmppc_save_fp
+
        /* Increment yield count if they have a VPA */
        ld      r8, VCPU_VPA(r9)        /* do they have a VPA? */
        cmpdi   r8, 0
@@ -983,6 +1015,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
        sldi    r3, r3, 31              /* MMCR0_FC (freeze counters) bit */
        mfspr   r4, SPRN_MMCR0          /* save MMCR0 */
        mtspr   SPRN_MMCR0, r3          /* freeze all counters, disable ints */
+       mfspr   r6, SPRN_MMCRA
+BEGIN_FTR_SECTION
+       /* On P7, clear MMCRA in order to disable SDAR updates */
+       li      r7, 0
+       mtspr   SPRN_MMCRA, r7
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
        isync
        beq     21f                     /* if no VPA, save PMU stuff anyway */
        lbz     r7, LPPACA_PMCINUSE(r8)
@@ -991,7 +1029,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
        std     r3, VCPU_MMCR(r9)       /* if not, set saved MMCR0 to FC */
        b       22f
 21:    mfspr   r5, SPRN_MMCR1
-       mfspr   r6, SPRN_MMCRA
        std     r4, VCPU_MMCR(r9)
        std     r5, VCPU_MMCR + 8(r9)
        std     r6, VCPU_MMCR + 16(r9)
@@ -1016,17 +1053,20 @@ BEGIN_FTR_SECTION
        stw     r11, VCPU_PMC + 28(r9)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
 22:
-       /* save FP state */
-       mr      r3, r9
-       bl      .kvmppc_save_fp
 
        /* Secondary threads go off to take a nap on POWER7 */
 BEGIN_FTR_SECTION
-       lwz     r0,VCPU_PTID(r3)
+       lwz     r0,VCPU_PTID(r9)
        cmpwi   r0,0
        bne     secondary_nap
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 
+       /* Restore host DABR and DABRX */
+       ld      r5,HSTATE_DABR(r13)
+       li      r6,7
+       mtspr   SPRN_DABR,r5
+       mtspr   SPRN_DABRX,r6
+
        /*
         * Reload DEC.  HDEC interrupts were disabled when
         * we reloaded the host's LPCR value.
@@ -1363,7 +1403,12 @@ bounce_ext_interrupt:
 
 _GLOBAL(kvmppc_h_set_dabr)
        std     r4,VCPU_DABR(r3)
-       mtspr   SPRN_DABR,r4
+       /* Work around P7 bug where DABR can get corrupted on mtspr */
+1:     mtspr   SPRN_DABR,r4
+       mfspr   r5, SPRN_DABR
+       cmpd    r4, r5
+       bne     1b
+       isync
        li      r3,0
        blr
 
@@ -1445,8 +1490,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
         * Take a nap until a decrementer or external interrupt occurs,
         * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR
         */
-       li      r0,0x80
-       stb     r0,PACAPROCSTART(r13)
+       li      r0,1
+       stb     r0,HSTATE_HWTHREAD_REQ(r13)
        mfspr   r5,SPRN_LPCR
        ori     r5,r5,LPCR_PECE0 | LPCR_PECE1
        mtspr   SPRN_LPCR,r5
@@ -1463,26 +1508,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
 kvm_end_cede:
        /* Woken by external or decrementer interrupt */
        ld      r1, HSTATE_HOST_R1(r13)
-       ld      r2, PACATOC(r13)
 
-       /* If we're a secondary thread and we got here by an IPI, ack it */
-       ld      r4,HSTATE_KVM_VCPU(r13)
-       lwz     r3,VCPU_PTID(r4)
-       cmpwi   r3,0
-       beq     27f
-       mfspr   r3,SPRN_SRR1
-       rlwinm  r3,r3,44-31,0x7         /* extract wake reason field */
-       cmpwi   r3,4                    /* was it an external interrupt? */
-       bne     27f
-       ld      r5, HSTATE_XICS_PHYS(r13)
-       li      r0,0xff
-       li      r6,XICS_QIRR
-       li      r7,XICS_XIRR
-       lwzcix  r8,r5,r7                /* ack the interrupt */
-       sync
-       stbcix  r0,r5,r6                /* clear it */
-       stwcix  r8,r5,r7                /* EOI it */
-27:
        /* load up FP state */
        bl      kvmppc_load_fp
 
@@ -1580,12 +1606,17 @@ secondary_nap:
        stwcx.  r3, 0, r4
        bne     51b
 
+kvm_no_guest:
+       li      r0, KVM_HWTHREAD_IN_NAP
+       stb     r0, HSTATE_HWTHREAD_STATE(r13)
+       li      r0, 0
+       std     r0, HSTATE_KVM_VCPU(r13)
+
        li      r3, LPCR_PECE0
        mfspr   r4, SPRN_LPCR
        rlwimi  r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
        mtspr   SPRN_LPCR, r4
        isync
-       li      r0, 0
        std     r0, HSTATE_SCRATCH0(r13)
        ptesync
        ld      r0, HSTATE_SCRATCH0(r13)
@@ -1599,8 +1630,8 @@ secondary_nap:
  * r3 = vcpu pointer
  */
 _GLOBAL(kvmppc_save_fp)
-       mfmsr   r9
-       ori     r8,r9,MSR_FP
+       mfmsr   r5
+       ori     r8,r5,MSR_FP
 #ifdef CONFIG_ALTIVEC
 BEGIN_FTR_SECTION
        oris    r8,r8,MSR_VEC@h
@@ -1649,7 +1680,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 #endif
        mfspr   r6,SPRN_VRSAVE
        stw     r6,VCPU_VRSAVE(r3)
-       mtmsrd  r9
+       mtmsrd  r5
        isync
        blr
 
index 7759053d391b87298a4888c1cbbb7463ee6e8431..a1baec340f7ee3e0492d3fcd86e14e41a8811ccf 100644 (file)
@@ -120,6 +120,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
        if (msr & MSR_POW) {
                if (!vcpu->arch.pending_exceptions) {
                        kvm_vcpu_block(vcpu);
+                       clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
                        vcpu->stat.halt_wakeup++;
 
                        /* Unset POW bit after we woke up */
@@ -144,6 +145,21 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
                }
        }
 
+       /*
+        * When switching from 32 to 64-bit, we may have a stale 32-bit
+        * magic page around, we need to flush it. Typically 32-bit magic
+        * page will be instanciated when calling into RTAS. Note: We
+        * assume that such transition only happens while in kernel mode,
+        * ie, we never transition from user 32-bit to kernel 64-bit with
+        * a 32-bit magic page around.
+        */
+       if (vcpu->arch.magic_page_pa &&
+           !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
+               /* going from RTAS to normal kernel code */
+               kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
+                                    ~0xFFFUL);
+       }
+
        /* Preload FPU if it's enabled */
        if (vcpu->arch.shared->msr & MSR_FP)
                kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
@@ -251,6 +267,9 @@ static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
 {
        ulong mp_pa = vcpu->arch.magic_page_pa;
 
+       if (!(vcpu->arch.shared->msr & MSR_SF))
+               mp_pa = (uint32_t)mp_pa;
+
        if (unlikely(mp_pa) &&
            unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
                return 1;
@@ -351,6 +370,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
                /* MMIO */
                vcpu->stat.mmio_exits++;
                vcpu->arch.paddr_accessed = pte.raddr;
+               vcpu->arch.vaddr_accessed = pte.eaddr;
                r = kvmppc_emulate_mmio(run, vcpu);
                if ( r == RESUME_HOST_NV )
                        r = RESUME_HOST;
@@ -528,6 +548,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
        run->exit_reason = KVM_EXIT_UNKNOWN;
        run->ready_for_interrupt_injection = 1;
 
+       /* We get here with MSR.EE=0, so enable it to be a nice citizen */
+       __hard_irq_enable();
+
        trace_kvm_book3s_exit(exit_nr, vcpu);
        preempt_enable();
        kvm_resched(vcpu);
@@ -617,10 +640,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
                break;
        /* We're good on these - the host merely wanted to get our attention */
        case BOOK3S_INTERRUPT_DECREMENTER:
+       case BOOK3S_INTERRUPT_HV_DECREMENTER:
                vcpu->stat.dec_exits++;
                r = RESUME_GUEST;
                break;
        case BOOK3S_INTERRUPT_EXTERNAL:
+       case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
+       case BOOK3S_INTERRUPT_EXTERNAL_HV:
                vcpu->stat.ext_intr_exits++;
                r = RESUME_GUEST;
                break;
@@ -628,6 +654,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
                r = RESUME_GUEST;
                break;
        case BOOK3S_INTERRUPT_PROGRAM:
+       case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
        {
                enum emulation_result er;
                struct kvmppc_book3s_shadow_vcpu *svcpu;
@@ -1131,6 +1158,31 @@ out:
        return r;
 }
 
+#ifdef CONFIG_PPC64
+int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
+{
+       /* No flags */
+       info->flags = 0;
+
+       /* SLB is always 64 entries */
+       info->slb_size = 64;
+
+       /* Standard 4k base page size segment */
+       info->sps[0].page_shift = 12;
+       info->sps[0].slb_enc = 0;
+       info->sps[0].enc[0].page_shift = 12;
+       info->sps[0].enc[0].pte_enc = 0;
+
+       /* Standard 16M large page size segment */
+       info->sps[1].page_shift = 24;
+       info->sps[1].slb_enc = SLB_VSID_L;
+       info->sps[1].enc[0].page_shift = 24;
+       info->sps[1].enc[0].pte_enc = 0;
+
+       return 0;
+}
+#endif /* CONFIG_PPC64 */
+
 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
                                      struct kvm_userspace_memory_region *mem)
 {
@@ -1144,11 +1196,18 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
 
 int kvmppc_core_init_vm(struct kvm *kvm)
 {
+#ifdef CONFIG_PPC64
+       INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
+#endif
+
        return 0;
 }
 
 void kvmppc_core_destroy_vm(struct kvm *kvm)
 {
+#ifdef CONFIG_PPC64
+       WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
+#endif
 }
 
 static int kvmppc_book3s_init(void)
index b9589324797baf7d17abdc3acdae90036bb91b95..3ff9013d6e7914e59f2919f5ee5bf685ad7d7797 100644 (file)
@@ -15,6 +15,8 @@
  * published by the Free Software Foundation.
  */
 
+#include <linux/anon_inodes.h>
+
 #include <asm/uaccess.h>
 #include <asm/kvm_ppc.h>
 #include <asm/kvm_book3s.h>
@@ -98,6 +100,83 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
        return EMULATE_DONE;
 }
 
+/* Request defs for kvmppc_h_pr_bulk_remove() */
+#define H_BULK_REMOVE_TYPE             0xc000000000000000ULL
+#define   H_BULK_REMOVE_REQUEST        0x4000000000000000ULL
+#define   H_BULK_REMOVE_RESPONSE       0x8000000000000000ULL
+#define   H_BULK_REMOVE_END            0xc000000000000000ULL
+#define H_BULK_REMOVE_CODE             0x3000000000000000ULL
+#define   H_BULK_REMOVE_SUCCESS        0x0000000000000000ULL
+#define   H_BULK_REMOVE_NOT_FOUND      0x1000000000000000ULL
+#define   H_BULK_REMOVE_PARM           0x2000000000000000ULL
+#define   H_BULK_REMOVE_HW             0x3000000000000000ULL
+#define H_BULK_REMOVE_RC               0x0c00000000000000ULL
+#define H_BULK_REMOVE_FLAGS            0x0300000000000000ULL
+#define   H_BULK_REMOVE_ABSOLUTE       0x0000000000000000ULL
+#define   H_BULK_REMOVE_ANDCOND        0x0100000000000000ULL
+#define   H_BULK_REMOVE_AVPN           0x0200000000000000ULL
+#define H_BULK_REMOVE_PTEX             0x00ffffffffffffffULL
+#define H_BULK_REMOVE_MAX_BATCH        4
+
+static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
+{
+       int i;
+       int paramnr = 4;
+       int ret = H_SUCCESS;
+
+       for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
+               unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i));
+               unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1);
+               unsigned long pteg, rb, flags;
+               unsigned long pte[2];
+               unsigned long v = 0;
+
+               if ((tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
+                       break; /* Exit success */
+               } else if ((tsh & H_BULK_REMOVE_TYPE) !=
+                          H_BULK_REMOVE_REQUEST) {
+                       ret = H_PARAMETER;
+                       break; /* Exit fail */
+               }
+
+               tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
+               tsh |= H_BULK_REMOVE_RESPONSE;
+
+               if ((tsh & H_BULK_REMOVE_ANDCOND) &&
+                   (tsh & H_BULK_REMOVE_AVPN)) {
+                       tsh |= H_BULK_REMOVE_PARM;
+                       kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
+                       ret = H_PARAMETER;
+                       break; /* Exit fail */
+               }
+
+               pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX);
+               copy_from_user(pte, (void __user *)pteg, sizeof(pte));
+
+               /* tsl = AVPN */
+               flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26;
+
+               if ((pte[0] & HPTE_V_VALID) == 0 ||
+                   ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != tsl) ||
+                   ((flags & H_ANDCOND) && (pte[0] & tsl) != 0)) {
+                       tsh |= H_BULK_REMOVE_NOT_FOUND;
+               } else {
+                       /* Splat the pteg in (userland) hpt */
+                       copy_to_user((void __user *)pteg, &v, sizeof(v));
+
+                       rb = compute_tlbie_rb(pte[0], pte[1],
+                                             tsh & H_BULK_REMOVE_PTEX);
+                       vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
+                       tsh |= H_BULK_REMOVE_SUCCESS;
+                       tsh |= (pte[1] & (HPTE_R_C | HPTE_R_R)) << 43;
+               }
+               kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
+       }
+       kvmppc_set_gpr(vcpu, 3, ret);
+
+       return EMULATE_DONE;
+}
+
 static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
 {
        unsigned long flags = kvmppc_get_gpr(vcpu, 4);
@@ -134,6 +213,20 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
        return EMULATE_DONE;
 }
 
+static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
+{
+       unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
+       unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
+       unsigned long tce = kvmppc_get_gpr(vcpu, 6);
+       long rc;
+
+       rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce);
+       if (rc == H_TOO_HARD)
+               return EMULATE_FAIL;
+       kvmppc_set_gpr(vcpu, 3, rc);
+       return EMULATE_DONE;
+}
+
 int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
 {
        switch (cmd) {
@@ -144,12 +237,12 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
        case H_PROTECT:
                return kvmppc_h_pr_protect(vcpu);
        case H_BULK_REMOVE:
-               /* We just flush all PTEs, so user space can
-                  handle the HPT modifications */
-               kvmppc_mmu_pte_flush(vcpu, 0, 0);
-               break;
+               return kvmppc_h_pr_bulk_remove(vcpu);
+       case H_PUT_TCE:
+               return kvmppc_h_pr_put_tce(vcpu);
        case H_CEDE:
                kvm_vcpu_block(vcpu);
+               clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
                vcpu->stat.halt_wakeup++;
                return EMULATE_DONE;
        }
index 6e6e9cef34a8977235020904936f244a8d777a02..798491a268b3f6d0da8e4b9c96fbbcca64d44dd2 100644 (file)
@@ -128,24 +128,25 @@ no_dcbz32_on:
        /* First clear RI in our current MSR value */
        li      r0, MSR_RI
        andc    r6, r6, r0
-       MTMSR_EERI(r6)
-       mtsrr0  r9
-       mtsrr1  r4
 
        PPC_LL  r0, SVCPU_R0(r3)
        PPC_LL  r1, SVCPU_R1(r3)
        PPC_LL  r2, SVCPU_R2(r3)
-       PPC_LL  r4, SVCPU_R4(r3)
        PPC_LL  r5, SVCPU_R5(r3)
-       PPC_LL  r6, SVCPU_R6(r3)
        PPC_LL  r7, SVCPU_R7(r3)
        PPC_LL  r8, SVCPU_R8(r3)
-       PPC_LL  r9, SVCPU_R9(r3)
        PPC_LL  r10, SVCPU_R10(r3)
        PPC_LL  r11, SVCPU_R11(r3)
        PPC_LL  r12, SVCPU_R12(r3)
        PPC_LL  r13, SVCPU_R13(r3)
 
+       MTMSR_EERI(r6)
+       mtsrr0  r9
+       mtsrr1  r4
+
+       PPC_LL  r4, SVCPU_R4(r3)
+       PPC_LL  r6, SVCPU_R6(r3)
+       PPC_LL  r9, SVCPU_R9(r3)
        PPC_LL  r3, (SVCPU_R3)(r3)
 
        RFI
index ee9e1ee9c858116aad1256e35fd27f61eb749994..72f13f4a06e0d0bb16e442b9840f87c9c8b1339b 100644 (file)
@@ -17,6 +17,8 @@
  *
  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
+ *          Scott Wood <scottwood@freescale.com>
+ *          Varun Sethi <varun.sethi@freescale.com>
  */
 
 #include <linux/errno.h>
 #include <asm/cputable.h>
 #include <asm/uaccess.h>
 #include <asm/kvm_ppc.h>
-#include "timing.h"
 #include <asm/cacheflush.h>
+#include <asm/dbell.h>
+#include <asm/hw_irq.h>
+#include <asm/irq.h>
 
+#include "timing.h"
 #include "booke.h"
 
 unsigned long kvmppc_booke_handlers;
@@ -55,6 +60,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "dec",        VCPU_STAT(dec_exits) },
        { "ext_intr",   VCPU_STAT(ext_intr_exits) },
        { "halt_wakeup", VCPU_STAT(halt_wakeup) },
+       { "doorbell", VCPU_STAT(dbell_exits) },
+       { "guest doorbell", VCPU_STAT(gdbell_exits) },
        { NULL }
 };
 
@@ -121,6 +128,10 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
 {
        u32 old_msr = vcpu->arch.shared->msr;
 
+#ifdef CONFIG_KVM_BOOKE_HV
+       new_msr |= MSR_GS;
+#endif
+
        vcpu->arch.shared->msr = new_msr;
 
        kvmppc_mmu_msr_notify(vcpu, old_msr);
@@ -195,17 +206,87 @@ void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
        clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
 }
 
+static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
+{
+#ifdef CONFIG_KVM_BOOKE_HV
+       mtspr(SPRN_GSRR0, srr0);
+       mtspr(SPRN_GSRR1, srr1);
+#else
+       vcpu->arch.shared->srr0 = srr0;
+       vcpu->arch.shared->srr1 = srr1;
+#endif
+}
+
+static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
+{
+       vcpu->arch.csrr0 = srr0;
+       vcpu->arch.csrr1 = srr1;
+}
+
+static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
+{
+       if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
+               vcpu->arch.dsrr0 = srr0;
+               vcpu->arch.dsrr1 = srr1;
+       } else {
+               set_guest_csrr(vcpu, srr0, srr1);
+       }
+}
+
+static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
+{
+       vcpu->arch.mcsrr0 = srr0;
+       vcpu->arch.mcsrr1 = srr1;
+}
+
+static unsigned long get_guest_dear(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_KVM_BOOKE_HV
+       return mfspr(SPRN_GDEAR);
+#else
+       return vcpu->arch.shared->dar;
+#endif
+}
+
+static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear)
+{
+#ifdef CONFIG_KVM_BOOKE_HV
+       mtspr(SPRN_GDEAR, dear);
+#else
+       vcpu->arch.shared->dar = dear;
+#endif
+}
+
+static unsigned long get_guest_esr(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_KVM_BOOKE_HV
+       return mfspr(SPRN_GESR);
+#else
+       return vcpu->arch.shared->esr;
+#endif
+}
+
+static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr)
+{
+#ifdef CONFIG_KVM_BOOKE_HV
+       mtspr(SPRN_GESR, esr);
+#else
+       vcpu->arch.shared->esr = esr;
+#endif
+}
+
 /* Deliver the interrupt of the corresponding priority, if possible. */
 static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
                                         unsigned int priority)
 {
        int allowed = 0;
-       ulong uninitialized_var(msr_mask);
+       ulong msr_mask = 0;
        bool update_esr = false, update_dear = false;
        ulong crit_raw = vcpu->arch.shared->critical;
        ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
        bool crit;
        bool keep_irq = false;
+       enum int_class int_class;
 
        /* Truncate crit indicators in 32 bit mode */
        if (!(vcpu->arch.shared->msr & MSR_SF)) {
@@ -241,46 +322,85 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
        case BOOKE_IRQPRIO_AP_UNAVAIL:
        case BOOKE_IRQPRIO_ALIGNMENT:
                allowed = 1;
-               msr_mask = MSR_CE|MSR_ME|MSR_DE;
+               msr_mask = MSR_CE | MSR_ME | MSR_DE;
+               int_class = INT_CLASS_NONCRIT;
                break;
        case BOOKE_IRQPRIO_CRITICAL:
-       case BOOKE_IRQPRIO_WATCHDOG:
+       case BOOKE_IRQPRIO_DBELL_CRIT:
                allowed = vcpu->arch.shared->msr & MSR_CE;
+               allowed = allowed && !crit;
                msr_mask = MSR_ME;
+               int_class = INT_CLASS_CRIT;
                break;
        case BOOKE_IRQPRIO_MACHINE_CHECK:
                allowed = vcpu->arch.shared->msr & MSR_ME;
-               msr_mask = 0;
+               allowed = allowed && !crit;
+               int_class = INT_CLASS_MC;
                break;
        case BOOKE_IRQPRIO_DECREMENTER:
        case BOOKE_IRQPRIO_FIT:
                keep_irq = true;
                /* fall through */
        case BOOKE_IRQPRIO_EXTERNAL:
+       case BOOKE_IRQPRIO_DBELL:
                allowed = vcpu->arch.shared->msr & MSR_EE;
                allowed = allowed && !crit;
-               msr_mask = MSR_CE|MSR_ME|MSR_DE;
+               msr_mask = MSR_CE | MSR_ME | MSR_DE;
+               int_class = INT_CLASS_NONCRIT;
                break;
        case BOOKE_IRQPRIO_DEBUG:
                allowed = vcpu->arch.shared->msr & MSR_DE;
+               allowed = allowed && !crit;
                msr_mask = MSR_ME;
+               int_class = INT_CLASS_CRIT;
                break;
        }
 
        if (allowed) {
-               vcpu->arch.shared->srr0 = vcpu->arch.pc;
-               vcpu->arch.shared->srr1 = vcpu->arch.shared->msr;
+               switch (int_class) {
+               case INT_CLASS_NONCRIT:
+                       set_guest_srr(vcpu, vcpu->arch.pc,
+                                     vcpu->arch.shared->msr);
+                       break;
+               case INT_CLASS_CRIT:
+                       set_guest_csrr(vcpu, vcpu->arch.pc,
+                                      vcpu->arch.shared->msr);
+                       break;
+               case INT_CLASS_DBG:
+                       set_guest_dsrr(vcpu, vcpu->arch.pc,
+                                      vcpu->arch.shared->msr);
+                       break;
+               case INT_CLASS_MC:
+                       set_guest_mcsrr(vcpu, vcpu->arch.pc,
+                                       vcpu->arch.shared->msr);
+                       break;
+               }
+
                vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
                if (update_esr == true)
-                       vcpu->arch.shared->esr = vcpu->arch.queued_esr;
+                       set_guest_esr(vcpu, vcpu->arch.queued_esr);
                if (update_dear == true)
-                       vcpu->arch.shared->dar = vcpu->arch.queued_dear;
+                       set_guest_dear(vcpu, vcpu->arch.queued_dear);
                kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
 
                if (!keep_irq)
                        clear_bit(priority, &vcpu->arch.pending_exceptions);
        }
 
+#ifdef CONFIG_KVM_BOOKE_HV
+       /*
+        * If an interrupt is pending but masked, raise a guest doorbell
+        * so that we are notified when the guest enables the relevant
+        * MSR bit.
+        */
+       if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
+               kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
+       if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
+               kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
+       if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
+               kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
+#endif
+
        return allowed;
 }
 
@@ -305,7 +425,7 @@ static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
        }
 
        priority = __ffs(*pending);
-       while (priority <= BOOKE_IRQPRIO_MAX) {
+       while (priority < BOOKE_IRQPRIO_MAX) {
                if (kvmppc_booke_irqprio_deliver(vcpu, priority))
                        break;
 
@@ -319,8 +439,9 @@ static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
 }
 
 /* Check pending exceptions and deliver one, if possible. */
-void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
+int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
 {
+       int r = 0;
        WARN_ON_ONCE(!irqs_disabled());
 
        kvmppc_core_check_exceptions(vcpu);
@@ -328,16 +449,60 @@ void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
        if (vcpu->arch.shared->msr & MSR_WE) {
                local_irq_enable();
                kvm_vcpu_block(vcpu);
+               clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
                local_irq_disable();
 
                kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
-               kvmppc_core_check_exceptions(vcpu);
+               r = 1;
        };
+
+       return r;
+}
+
+/*
+ * Common checks before entering the guest world.  Call with interrupts
+ * disabled.
+ *
+ * returns !0 if a signal is pending and check_signal is true
+ */
+static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
+{
+       int r = 0;
+
+       WARN_ON_ONCE(!irqs_disabled());
+       while (true) {
+               if (need_resched()) {
+                       local_irq_enable();
+                       cond_resched();
+                       local_irq_disable();
+                       continue;
+               }
+
+               if (signal_pending(current)) {
+                       r = 1;
+                       break;
+               }
+
+               if (kvmppc_core_prepare_to_enter(vcpu)) {
+                       /* interrupts got enabled in between, so we
+                          are back at square 1 */
+                       continue;
+               }
+
+               break;
+       }
+
+       return r;
 }
 
 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 {
        int ret;
+#ifdef CONFIG_PPC_FPU
+       unsigned int fpscr;
+       int fpexc_mode;
+       u64 fpr[32];
+#endif
 
        if (!vcpu->arch.sane) {
                kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
@@ -345,17 +510,53 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
        }
 
        local_irq_disable();
-
-       kvmppc_core_prepare_to_enter(vcpu);
-
-       if (signal_pending(current)) {
+       if (kvmppc_prepare_to_enter(vcpu)) {
                kvm_run->exit_reason = KVM_EXIT_INTR;
                ret = -EINTR;
                goto out;
        }
 
        kvm_guest_enter();
+
+#ifdef CONFIG_PPC_FPU
+       /* Save userspace FPU state in stack */
+       enable_kernel_fp();
+       memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
+       fpscr = current->thread.fpscr.val;
+       fpexc_mode = current->thread.fpexc_mode;
+
+       /* Restore guest FPU state to thread */
+       memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr));
+       current->thread.fpscr.val = vcpu->arch.fpscr;
+
+       /*
+        * Since we can't trap on MSR_FP in GS-mode, we consider the guest
+        * as always using the FPU.  Kernel usage of FP (via
+        * enable_kernel_fp()) in this thread must not occur while
+        * vcpu->fpu_active is set.
+        */
+       vcpu->fpu_active = 1;
+
+       kvmppc_load_guest_fp(vcpu);
+#endif
+
        ret = __kvmppc_vcpu_run(kvm_run, vcpu);
+
+#ifdef CONFIG_PPC_FPU
+       kvmppc_save_guest_fp(vcpu);
+
+       vcpu->fpu_active = 0;
+
+       /* Save guest FPU state from thread */
+       memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr));
+       vcpu->arch.fpscr = current->thread.fpscr.val;
+
+       /* Restore userspace FPU state from stack */
+       memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
+       current->thread.fpscr.val = fpscr;
+       current->thread.fpexc_mode = fpexc_mode;
+#endif
+
        kvm_guest_exit();
 
 out:
@@ -363,6 +564,84 @@ out:
        return ret;
 }
 
+static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er;
+
+       er = kvmppc_emulate_instruction(run, vcpu);
+       switch (er) {
+       case EMULATE_DONE:
+               /* don't overwrite subtypes, just account kvm_stats */
+               kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
+               /* Future optimization: only reload non-volatiles if
+                * they were actually modified by emulation. */
+               return RESUME_GUEST_NV;
+
+       case EMULATE_DO_DCR:
+               run->exit_reason = KVM_EXIT_DCR;
+               return RESUME_HOST;
+
+       case EMULATE_FAIL:
+               printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
+                      __func__, vcpu->arch.pc, vcpu->arch.last_inst);
+               /* For debugging, encode the failing instruction and
+                * report it to userspace. */
+               run->hw.hardware_exit_reason = ~0ULL << 32;
+               run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
+               kvmppc_core_queue_program(vcpu, ESR_PIL);
+               return RESUME_HOST;
+
+       default:
+               BUG();
+       }
+}
+
+static void kvmppc_fill_pt_regs(struct pt_regs *regs)
+{
+       ulong r1, ip, msr, lr;
+
+       asm("mr %0, 1" : "=r"(r1));
+       asm("mflr %0" : "=r"(lr));
+       asm("mfmsr %0" : "=r"(msr));
+       asm("bl 1f; 1: mflr %0" : "=r"(ip));
+
+       memset(regs, 0, sizeof(*regs));
+       regs->gpr[1] = r1;
+       regs->nip = ip;
+       regs->msr = msr;
+       regs->link = lr;
+}
+
+static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
+                                    unsigned int exit_nr)
+{
+       struct pt_regs regs;
+
+       switch (exit_nr) {
+       case BOOKE_INTERRUPT_EXTERNAL:
+               kvmppc_fill_pt_regs(&regs);
+               do_IRQ(&regs);
+               break;
+       case BOOKE_INTERRUPT_DECREMENTER:
+               kvmppc_fill_pt_regs(&regs);
+               timer_interrupt(&regs);
+               break;
+#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3E_64)
+       case BOOKE_INTERRUPT_DOORBELL:
+               kvmppc_fill_pt_regs(&regs);
+               doorbell_exception(&regs);
+               break;
+#endif
+       case BOOKE_INTERRUPT_MACHINE_CHECK:
+               /* FIXME */
+               break;
+       case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
+               kvmppc_fill_pt_regs(&regs);
+               performance_monitor_exception(&regs);
+               break;
+       }
+}
+
 /**
  * kvmppc_handle_exit
  *
@@ -371,12 +650,14 @@ out:
 int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
                        unsigned int exit_nr)
 {
-       enum emulation_result er;
        int r = RESUME_HOST;
 
        /* update before a new last_exit_type is rewritten */
        kvmppc_update_timing_stats(vcpu);
 
+       /* restart interrupts if they were meant for the host */
+       kvmppc_restart_interrupt(vcpu, exit_nr);
+
        local_irq_enable();
 
        run->exit_reason = KVM_EXIT_UNKNOWN;
@@ -386,62 +667,74 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
        case BOOKE_INTERRUPT_MACHINE_CHECK:
                printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
                kvmppc_dump_vcpu(vcpu);
+               /* For debugging, send invalid exit reason to user space */
+               run->hw.hardware_exit_reason = ~1ULL << 32;
+               run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
                r = RESUME_HOST;
                break;
 
        case BOOKE_INTERRUPT_EXTERNAL:
                kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
-               if (need_resched())
-                       cond_resched();
                r = RESUME_GUEST;
                break;
 
        case BOOKE_INTERRUPT_DECREMENTER:
-               /* Since we switched IVPR back to the host's value, the host
-                * handled this interrupt the moment we enabled interrupts.
-                * Now we just offer it a chance to reschedule the guest. */
                kvmppc_account_exit(vcpu, DEC_EXITS);
-               if (need_resched())
-                       cond_resched();
                r = RESUME_GUEST;
                break;
 
+       case BOOKE_INTERRUPT_DOORBELL:
+               kvmppc_account_exit(vcpu, DBELL_EXITS);
+               r = RESUME_GUEST;
+               break;
+
+       case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
+               kvmppc_account_exit(vcpu, GDBELL_EXITS);
+
+               /*
+                * We are here because there is a pending guest interrupt
+                * which could not be delivered as MSR_CE or MSR_ME was not
+                * set.  Once we break from here we will retry delivery.
+                */
+               r = RESUME_GUEST;
+               break;
+
+       case BOOKE_INTERRUPT_GUEST_DBELL:
+               kvmppc_account_exit(vcpu, GDBELL_EXITS);
+
+               /*
+                * We are here because there is a pending guest interrupt
+                * which could not be delivered as MSR_EE was not set.  Once
+                * we break from here we will retry delivery.
+                */
+               r = RESUME_GUEST;
+               break;
+
+       case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
+               r = RESUME_GUEST;
+               break;
+
+       case BOOKE_INTERRUPT_HV_PRIV:
+               r = emulation_exit(run, vcpu);
+               break;
+
        case BOOKE_INTERRUPT_PROGRAM:
-               if (vcpu->arch.shared->msr & MSR_PR) {
-                       /* Program traps generated by user-level software must be handled
-                        * by the guest kernel. */
+               if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
+                       /*
+                        * Program traps generated by user-level software must
+                        * be handled by the guest kernel.
+                        *
+                        * In GS mode, hypervisor privileged instructions trap
+                        * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
+                        * actual program interrupts, handled by the guest.
+                        */
                        kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
                        r = RESUME_GUEST;
                        kvmppc_account_exit(vcpu, USR_PR_INST);
                        break;
                }
 
-               er = kvmppc_emulate_instruction(run, vcpu);
-               switch (er) {
-               case EMULATE_DONE:
-                       /* don't overwrite subtypes, just account kvm_stats */
-                       kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
-                       /* Future optimization: only reload non-volatiles if
-                        * they were actually modified by emulation. */
-                       r = RESUME_GUEST_NV;
-                       break;
-               case EMULATE_DO_DCR:
-                       run->exit_reason = KVM_EXIT_DCR;
-                       r = RESUME_HOST;
-                       break;
-               case EMULATE_FAIL:
-                       /* XXX Deliver Program interrupt to guest. */
-                       printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
-                              __func__, vcpu->arch.pc, vcpu->arch.last_inst);
-                       /* For debugging, encode the failing instruction and
-                        * report it to userspace. */
-                       run->hw.hardware_exit_reason = ~0ULL << 32;
-                       run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
-                       r = RESUME_HOST;
-                       break;
-               default:
-                       BUG();
-               }
+               r = emulation_exit(run, vcpu);
                break;
 
        case BOOKE_INTERRUPT_FP_UNAVAIL:
@@ -506,6 +799,21 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
                r = RESUME_GUEST;
                break;
 
+#ifdef CONFIG_KVM_BOOKE_HV
+       case BOOKE_INTERRUPT_HV_SYSCALL:
+               if (!(vcpu->arch.shared->msr & MSR_PR)) {
+                       kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
+               } else {
+                       /*
+                        * hcall from guest userspace -- send privileged
+                        * instruction program check.
+                        */
+                       kvmppc_core_queue_program(vcpu, ESR_PPR);
+               }
+
+               r = RESUME_GUEST;
+               break;
+#else
        case BOOKE_INTERRUPT_SYSCALL:
                if (!(vcpu->arch.shared->msr & MSR_PR) &&
                    (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
@@ -519,6 +827,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
                kvmppc_account_exit(vcpu, SYSCALL_EXITS);
                r = RESUME_GUEST;
                break;
+#endif
 
        case BOOKE_INTERRUPT_DTLB_MISS: {
                unsigned long eaddr = vcpu->arch.fault_dear;
@@ -526,7 +835,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
                gpa_t gpaddr;
                gfn_t gfn;
 
-#ifdef CONFIG_KVM_E500
+#ifdef CONFIG_KVM_E500V2
                if (!(vcpu->arch.shared->msr & MSR_PR) &&
                    (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
                        kvmppc_map_magic(vcpu);
@@ -567,6 +876,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
                        /* Guest has mapped and accessed a page which is not
                         * actually RAM. */
                        vcpu->arch.paddr_accessed = gpaddr;
+                       vcpu->arch.vaddr_accessed = eaddr;
                        r = kvmppc_emulate_mmio(run, vcpu);
                        kvmppc_account_exit(vcpu, MMIO_EXITS);
                }
@@ -634,15 +944,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
                BUG();
        }
 
-       local_irq_disable();
-
-       kvmppc_core_prepare_to_enter(vcpu);
-
+       /*
+        * To avoid clobbering exit_reason, only check for signals if we
+        * aren't already exiting to userspace for some other reason.
+        */
        if (!(r & RESUME_HOST)) {
-               /* To avoid clobbering exit_reason, only check for signals if
-                * we aren't already exiting to userspace for some other
-                * reason. */
-               if (signal_pending(current)) {
+               local_irq_disable();
+               if (kvmppc_prepare_to_enter(vcpu)) {
                        run->exit_reason = KVM_EXIT_INTR;
                        r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
                        kvmppc_account_exit(vcpu, SIGNAL_EXITS);
@@ -659,12 +967,15 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
        int r;
 
        vcpu->arch.pc = 0;
-       vcpu->arch.shared->msr = 0;
-       vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
        vcpu->arch.shared->pir = vcpu->vcpu_id;
        kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
+       kvmppc_set_msr(vcpu, 0);
 
+#ifndef CONFIG_KVM_BOOKE_HV
+       vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
        vcpu->arch.shadow_pid = 1;
+       vcpu->arch.shared->msr = 0;
+#endif
 
        /* Eye-catching numbers so we know if the guest takes an interrupt
         * before it's programmed its own IVPR/IVORs. */
@@ -745,8 +1056,8 @@ static void get_sregs_base(struct kvm_vcpu *vcpu,
        sregs->u.e.csrr0 = vcpu->arch.csrr0;
        sregs->u.e.csrr1 = vcpu->arch.csrr1;
        sregs->u.e.mcsr = vcpu->arch.mcsr;
-       sregs->u.e.esr = vcpu->arch.shared->esr;
-       sregs->u.e.dear = vcpu->arch.shared->dar;
+       sregs->u.e.esr = get_guest_esr(vcpu);
+       sregs->u.e.dear = get_guest_dear(vcpu);
        sregs->u.e.tsr = vcpu->arch.tsr;
        sregs->u.e.tcr = vcpu->arch.tcr;
        sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
@@ -763,8 +1074,8 @@ static int set_sregs_base(struct kvm_vcpu *vcpu,
        vcpu->arch.csrr0 = sregs->u.e.csrr0;
        vcpu->arch.csrr1 = sregs->u.e.csrr1;
        vcpu->arch.mcsr = sregs->u.e.mcsr;
-       vcpu->arch.shared->esr = sregs->u.e.esr;
-       vcpu->arch.shared->dar = sregs->u.e.dear;
+       set_guest_esr(vcpu, sregs->u.e.esr);
+       set_guest_dear(vcpu, sregs->u.e.dear);
        vcpu->arch.vrsave = sregs->u.e.vrsave;
        kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
 
@@ -932,15 +1243,6 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
 {
 }
 
-int kvmppc_core_init_vm(struct kvm *kvm)
-{
-       return 0;
-}
-
-void kvmppc_core_destroy_vm(struct kvm *kvm)
-{
-}
-
 void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
 {
        vcpu->arch.tcr = new_tcr;
@@ -968,8 +1270,19 @@ void kvmppc_decrementer_func(unsigned long data)
        kvmppc_set_tsr_bits(vcpu, TSR_DIS);
 }
 
+void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+       current->thread.kvm_vcpu = vcpu;
+}
+
+void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
+{
+       current->thread.kvm_vcpu = NULL;
+}
+
 int __init kvmppc_booke_init(void)
 {
+#ifndef CONFIG_KVM_BOOKE_HV
        unsigned long ivor[16];
        unsigned long max_ivor = 0;
        int i;
@@ -1012,7 +1325,7 @@ int __init kvmppc_booke_init(void)
        }
        flush_icache_range(kvmppc_booke_handlers,
                           kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
-
+#endif /* !BOOKE_HV */
        return 0;
 }
 
index 2fe202705a3f9c4677f7334885233f50bb5a2942..ba61974c1e20839f454ebb9e33cc8f46e4949d9e 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/types.h>
 #include <linux/kvm_host.h>
 #include <asm/kvm_ppc.h>
+#include <asm/switch_to.h>
 #include "timing.h"
 
 /* interrupt priortity ordering */
 #define BOOKE_IRQPRIO_PERFORMANCE_MONITOR 19
 /* Internal pseudo-irqprio for level triggered externals */
 #define BOOKE_IRQPRIO_EXTERNAL_LEVEL 20
-#define BOOKE_IRQPRIO_MAX 20
+#define BOOKE_IRQPRIO_DBELL 21
+#define BOOKE_IRQPRIO_DBELL_CRIT 22
+#define BOOKE_IRQPRIO_MAX 23
+
+#define BOOKE_IRQMASK_EE ((1 << BOOKE_IRQPRIO_EXTERNAL_LEVEL) | \
+                         (1 << BOOKE_IRQPRIO_PERFORMANCE_MONITOR) | \
+                         (1 << BOOKE_IRQPRIO_DBELL) | \
+                         (1 << BOOKE_IRQPRIO_DECREMENTER) | \
+                         (1 << BOOKE_IRQPRIO_FIT) | \
+                         (1 << BOOKE_IRQPRIO_EXTERNAL))
+
+#define BOOKE_IRQMASK_CE ((1 << BOOKE_IRQPRIO_DBELL_CRIT) | \
+                         (1 << BOOKE_IRQPRIO_WATCHDOG) | \
+                         (1 << BOOKE_IRQPRIO_CRITICAL))
 
 extern unsigned long kvmppc_booke_handlers;
 
@@ -61,8 +75,8 @@ void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
 
 int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
                             unsigned int inst, int *advance);
-int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
-int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs);
+int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
+int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
 
 /* low-level asm code to transfer guest state */
 void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu);
@@ -71,4 +85,46 @@ void kvmppc_save_guest_spe(struct kvm_vcpu *vcpu);
 /* high-level function, manages flags, host state */
 void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu);
 
+void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu);
+
+enum int_class {
+       INT_CLASS_NONCRIT,
+       INT_CLASS_CRIT,
+       INT_CLASS_MC,
+       INT_CLASS_DBG,
+};
+
+void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type);
+
+/*
+ * Load up guest vcpu FP state if it's needed.
+ * It also set the MSR_FP in thread so that host know
+ * we're holding FPU, and then host can help to save
+ * guest vcpu FP state if other threads require to use FPU.
+ * This simulates an FP unavailable fault.
+ *
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PPC_FPU
+       if (vcpu->fpu_active && !(current->thread.regs->msr & MSR_FP)) {
+               load_up_fpu();
+               current->thread.regs->msr |= MSR_FP;
+       }
+#endif
+}
+
+/*
+ * Save guest vcpu FP state into thread.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PPC_FPU
+       if (vcpu->fpu_active && (current->thread.regs->msr & MSR_FP))
+               giveup_fpu(current);
+#endif
+}
 #endif /* __KVM_BOOKE_H__ */
index 3e652da365348433ce1ae2dc7cbe206b8e7e4e32..6c76397f2af482d622caee0e92f2d0da8cf3e77e 100644 (file)
@@ -40,8 +40,8 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
                             unsigned int inst, int *advance)
 {
        int emulated = EMULATE_DONE;
-       int rs;
-       int rt;
+       int rs = get_rs(inst);
+       int rt = get_rt(inst);
 
        switch (get_op(inst)) {
        case 19:
@@ -62,19 +62,16 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
                switch (get_xop(inst)) {
 
                case OP_31_XOP_MFMSR:
-                       rt = get_rt(inst);
                        kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr);
                        kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS);
                        break;
 
                case OP_31_XOP_MTMSR:
-                       rs = get_rs(inst);
                        kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS);
                        kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
                        break;
 
                case OP_31_XOP_WRTEE:
-                       rs = get_rs(inst);
                        vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE)
                                        | (kvmppc_get_gpr(vcpu, rs) & MSR_EE);
                        kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
@@ -99,22 +96,32 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
        return emulated;
 }
 
-int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
+/*
+ * NOTE: some of these registers are not emulated on BOOKE_HV (GS-mode).
+ * Their backing store is in real registers, and these functions
+ * will return the wrong result if called for them in another context
+ * (such as debugging).
+ */
+int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
 {
        int emulated = EMULATE_DONE;
-       ulong spr_val = kvmppc_get_gpr(vcpu, rs);
 
        switch (sprn) {
        case SPRN_DEAR:
-               vcpu->arch.shared->dar = spr_val; break;
+               vcpu->arch.shared->dar = spr_val;
+               break;
        case SPRN_ESR:
-               vcpu->arch.shared->esr = spr_val; break;
+               vcpu->arch.shared->esr = spr_val;
+               break;
        case SPRN_DBCR0:
-               vcpu->arch.dbcr0 = spr_val; break;
+               vcpu->arch.dbcr0 = spr_val;
+               break;
        case SPRN_DBCR1:
-               vcpu->arch.dbcr1 = spr_val; break;
+               vcpu->arch.dbcr1 = spr_val;
+               break;
        case SPRN_DBSR:
-               vcpu->arch.dbsr &= ~spr_val; break;
+               vcpu->arch.dbsr &= ~spr_val;
+               break;
        case SPRN_TSR:
                kvmppc_clr_tsr_bits(vcpu, spr_val);
                break;
@@ -122,20 +129,29 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
                kvmppc_set_tcr(vcpu, spr_val);
                break;
 
-       /* Note: SPRG4-7 are user-readable. These values are
-        * loaded into the real SPRGs when resuming the
-        * guest. */
+       /*
+        * Note: SPRG4-7 are user-readable.
+        * These values are loaded into the real SPRGs when resuming the
+        * guest (PR-mode only).
+        */
        case SPRN_SPRG4:
-               vcpu->arch.shared->sprg4 = spr_val; break;
+               vcpu->arch.shared->sprg4 = spr_val;
+               break;
        case SPRN_SPRG5:
-               vcpu->arch.shared->sprg5 = spr_val; break;
+               vcpu->arch.shared->sprg5 = spr_val;
+               break;
        case SPRN_SPRG6:
-               vcpu->arch.shared->sprg6 = spr_val; break;
+               vcpu->arch.shared->sprg6 = spr_val;
+               break;
        case SPRN_SPRG7:
-               vcpu->arch.shared->sprg7 = spr_val; break;
+               vcpu->arch.shared->sprg7 = spr_val;
+               break;
 
        case SPRN_IVPR:
                vcpu->arch.ivpr = spr_val;
+#ifdef CONFIG_KVM_BOOKE_HV
+               mtspr(SPRN_GIVPR, spr_val);
+#endif
                break;
        case SPRN_IVOR0:
                vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val;
@@ -145,6 +161,9 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
                break;
        case SPRN_IVOR2:
                vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val;
+#ifdef CONFIG_KVM_BOOKE_HV
+               mtspr(SPRN_GIVOR2, spr_val);
+#endif
                break;
        case SPRN_IVOR3:
                vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val;
@@ -163,6 +182,9 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
                break;
        case SPRN_IVOR8:
                vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val;
+#ifdef CONFIG_KVM_BOOKE_HV
+               mtspr(SPRN_GIVOR8, spr_val);
+#endif
                break;
        case SPRN_IVOR9:
                vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val;
@@ -193,75 +215,83 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
        return emulated;
 }
 
-int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
+int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
 {
        int emulated = EMULATE_DONE;
 
        switch (sprn) {
        case SPRN_IVPR:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivpr); break;
+               *spr_val = vcpu->arch.ivpr;
+               break;
        case SPRN_DEAR:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); break;
+               *spr_val = vcpu->arch.shared->dar;
+               break;
        case SPRN_ESR:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->esr); break;
+               *spr_val = vcpu->arch.shared->esr;
+               break;
        case SPRN_DBCR0:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break;
+               *spr_val = vcpu->arch.dbcr0;
+               break;
        case SPRN_DBCR1:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr1); break;
+               *spr_val = vcpu->arch.dbcr1;
+               break;
        case SPRN_DBSR:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbsr); break;
+               *spr_val = vcpu->arch.dbsr;
+               break;
        case SPRN_TSR:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.tsr); break;
+               *spr_val = vcpu->arch.tsr;
+               break;
        case SPRN_TCR:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.tcr); break;
+               *spr_val = vcpu->arch.tcr;
+               break;
 
        case SPRN_IVOR0:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]);
+               *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
                break;
        case SPRN_IVOR1:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]);
+               *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
                break;
        case SPRN_IVOR2:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]);
+               *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
                break;
        case SPRN_IVOR3:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]);
+               *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
                break;
        case SPRN_IVOR4:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]);
+               *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
                break;
        case SPRN_IVOR5:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]);
+               *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
                break;
        case SPRN_IVOR6:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]);
+               *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
                break;
        case SPRN_IVOR7:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]);
+               *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
                break;
        case SPRN_IVOR8:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]);
+               *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
                break;
        case SPRN_IVOR9:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]);
+               *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
                break;
        case SPRN_IVOR10:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]);
+               *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
                break;
        case SPRN_IVOR11:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]);
+               *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
                break;
        case SPRN_IVOR12:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]);
+               *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
                break;
        case SPRN_IVOR13:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]);
+               *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
                break;
        case SPRN_IVOR14:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]);
+               *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
                break;
        case SPRN_IVOR15:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]);
+               *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
                break;
 
        default:
index c8c4b878795a71542876308d68e34d7ff6e26163..8feec2ff3928e2e89ce917588704b95d661bf149 100644 (file)
@@ -419,13 +419,13 @@ lightweight_exit:
         * written directly to the shared area, so we
         * need to reload them here with the guest's values.
         */
-       lwz     r3, VCPU_SHARED_SPRG4(r5)
+       PPC_LD(r3, VCPU_SHARED_SPRG4, r5)
        mtspr   SPRN_SPRG4W, r3
-       lwz     r3, VCPU_SHARED_SPRG5(r5)
+       PPC_LD(r3, VCPU_SHARED_SPRG5, r5)
        mtspr   SPRN_SPRG5W, r3
-       lwz     r3, VCPU_SHARED_SPRG6(r5)
+       PPC_LD(r3, VCPU_SHARED_SPRG6, r5)
        mtspr   SPRN_SPRG6W, r3
-       lwz     r3, VCPU_SHARED_SPRG7(r5)
+       PPC_LD(r3, VCPU_SHARED_SPRG7, r5)
        mtspr   SPRN_SPRG7W, r3
 
 #ifdef CONFIG_KVM_EXIT_TIMING
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
new file mode 100644 (file)
index 0000000..6048a00
--- /dev/null
@@ -0,0 +1,597 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
+ *
+ * Author: Varun Sethi <varun.sethi@freescale.com>
+ * Author: Scott Wood <scotwood@freescale.com>
+ *
+ * This file is derived from arch/powerpc/kvm/booke_interrupts.S
+ */
+
+#include <asm/ppc_asm.h>
+#include <asm/kvm_asm.h>
+#include <asm/reg.h>
+#include <asm/mmu-44x.h>
+#include <asm/page.h>
+#include <asm/asm-compat.h>
+#include <asm/asm-offsets.h>
+#include <asm/bitsperlong.h>
+#include <asm/thread_info.h>
+
+#include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */
+
+#define GET_VCPU(vcpu, thread) \
+       PPC_LL  vcpu, THREAD_KVM_VCPU(thread)
+
+#define LONGBYTES              (BITS_PER_LONG / 8)
+
+#define VCPU_GPR(n)            (VCPU_GPRS + (n * LONGBYTES))
+#define VCPU_GUEST_SPRG(n)     (VCPU_GUEST_SPRGS + (n * LONGBYTES))
+
+/* The host stack layout: */
+#define HOST_R1         (0 * LONGBYTES) /* Implied by stwu. */
+#define HOST_CALLEE_LR  (1 * LONGBYTES)
+#define HOST_RUN        (2 * LONGBYTES) /* struct kvm_run */
+/*
+ * r2 is special: it holds 'current', and it made nonvolatile in the
+ * kernel with the -ffixed-r2 gcc option.
+ */
+#define HOST_R2         (3 * LONGBYTES)
+#define HOST_CR         (4 * LONGBYTES)
+#define HOST_NV_GPRS    (5 * LONGBYTES)
+#define HOST_NV_GPR(n)  (HOST_NV_GPRS + ((n - 14) * LONGBYTES))
+#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + LONGBYTES)
+#define HOST_STACK_SIZE ((HOST_MIN_STACK_SIZE + 15) & ~15) /* Align. */
+#define HOST_STACK_LR   (HOST_STACK_SIZE + LONGBYTES) /* In caller stack frame. */
+
+#define NEED_EMU               0x00000001 /* emulation -- save nv regs */
+#define NEED_DEAR              0x00000002 /* save faulting DEAR */
+#define NEED_ESR               0x00000004 /* save faulting ESR */
+
+/*
+ * On entry:
+ * r4 = vcpu, r5 = srr0, r6 = srr1
+ * saved in vcpu: cr, ctr, r3-r13
+ */
+.macro kvm_handler_common intno, srr0, flags
+       /* Restore host stack pointer */
+       PPC_STL r1, VCPU_GPR(r1)(r4)
+       PPC_STL r2, VCPU_GPR(r2)(r4)
+       PPC_LL  r1, VCPU_HOST_STACK(r4)
+       PPC_LL  r2, HOST_R2(r1)
+
+       mfspr   r10, SPRN_PID
+       lwz     r8, VCPU_HOST_PID(r4)
+       PPC_LL  r11, VCPU_SHARED(r4)
+       PPC_STL r14, VCPU_GPR(r14)(r4) /* We need a non-volatile GPR. */
+       li      r14, \intno
+
+       stw     r10, VCPU_GUEST_PID(r4)
+       mtspr   SPRN_PID, r8
+
+#ifdef CONFIG_KVM_EXIT_TIMING
+       /* save exit time */
+1:     mfspr   r7, SPRN_TBRU
+       mfspr   r8, SPRN_TBRL
+       mfspr   r9, SPRN_TBRU
+       cmpw    r9, r7
+       stw     r8, VCPU_TIMING_EXIT_TBL(r4)
+       bne-    1b
+       stw     r9, VCPU_TIMING_EXIT_TBU(r4)
+#endif
+
+       oris    r8, r6, MSR_CE@h
+       PPC_STD(r6, VCPU_SHARED_MSR, r11)
+       ori     r8, r8, MSR_ME | MSR_RI
+       PPC_STL r5, VCPU_PC(r4)
+
+       /*
+        * Make sure CE/ME/RI are set (if appropriate for exception type)
+        * whether or not the guest had it set.  Since mfmsr/mtmsr are
+        * somewhat expensive, skip in the common case where the guest
+        * had all these bits set (and thus they're still set if
+        * appropriate for the exception type).
+        */
+       cmpw    r6, r8
+       beq     1f
+       mfmsr   r7
+       .if     \srr0 != SPRN_MCSRR0 && \srr0 != SPRN_CSRR0
+       oris    r7, r7, MSR_CE@h
+       .endif
+       .if     \srr0 != SPRN_MCSRR0
+       ori     r7, r7, MSR_ME | MSR_RI
+       .endif
+       mtmsr   r7
+1:
+
+       .if     \flags & NEED_EMU
+       /*
+        * This assumes you have external PID support.
+        * To support a bookehv CPU without external PID, you'll
+        * need to look up the TLB entry and create a temporary mapping.
+        *
+        * FIXME: we don't currently handle if the lwepx faults.  PR-mode
+        * booke doesn't handle it either.  Since Linux doesn't use
+        * broadcast tlbivax anymore, the only way this should happen is
+        * if the guest maps its memory execute-but-not-read, or if we
+        * somehow take a TLB miss in the middle of this entry code and
+        * evict the relevant entry.  On e500mc, all kernel lowmem is
+        * bolted into TLB1 large page mappings, and we don't use
+        * broadcast invalidates, so we should not take a TLB miss here.
+        *
+        * Later we'll need to deal with faults here.  Disallowing guest
+        * mappings that are execute-but-not-read could be an option on
+        * e500mc, but not on chips with an LRAT if it is used.
+        */
+
+       mfspr   r3, SPRN_EPLC   /* will already have correct ELPID and EGS */
+       PPC_STL r15, VCPU_GPR(r15)(r4)
+       PPC_STL r16, VCPU_GPR(r16)(r4)
+       PPC_STL r17, VCPU_GPR(r17)(r4)
+       PPC_STL r18, VCPU_GPR(r18)(r4)
+       PPC_STL r19, VCPU_GPR(r19)(r4)
+       mr      r8, r3
+       PPC_STL r20, VCPU_GPR(r20)(r4)
+       rlwimi  r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS
+       PPC_STL r21, VCPU_GPR(r21)(r4)
+       rlwimi  r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR
+       PPC_STL r22, VCPU_GPR(r22)(r4)
+       rlwimi  r8, r10, EPC_EPID_SHIFT, EPC_EPID
+       PPC_STL r23, VCPU_GPR(r23)(r4)
+       PPC_STL r24, VCPU_GPR(r24)(r4)
+       PPC_STL r25, VCPU_GPR(r25)(r4)
+       PPC_STL r26, VCPU_GPR(r26)(r4)
+       PPC_STL r27, VCPU_GPR(r27)(r4)
+       PPC_STL r28, VCPU_GPR(r28)(r4)
+       PPC_STL r29, VCPU_GPR(r29)(r4)
+       PPC_STL r30, VCPU_GPR(r30)(r4)
+       PPC_STL r31, VCPU_GPR(r31)(r4)
+       mtspr   SPRN_EPLC, r8
+
+       /* disable preemption, so we are sure we hit the fixup handler */
+#ifdef CONFIG_PPC64
+       clrrdi  r8,r1,THREAD_SHIFT
+#else
+       rlwinm  r8,r1,0,0,31-THREAD_SHIFT       /* current thread_info */
+#endif
+       li      r7, 1
+       stw     r7, TI_PREEMPT(r8)
+
+       isync
+
+       /*
+        * In case the read goes wrong, we catch it and write an invalid value
+        * in LAST_INST instead.
+        */
+1:     lwepx   r9, 0, r5
+2:
+.section .fixup, "ax"
+3:     li      r9, KVM_INST_FETCH_FAILED
+       b       2b
+.previous
+.section __ex_table,"a"
+       PPC_LONG_ALIGN
+       PPC_LONG 1b,3b
+.previous
+
+       mtspr   SPRN_EPLC, r3
+       li      r7, 0
+       stw     r7, TI_PREEMPT(r8)
+       stw     r9, VCPU_LAST_INST(r4)
+       .endif
+
+       .if     \flags & NEED_ESR
+       mfspr   r8, SPRN_ESR
+       PPC_STL r8, VCPU_FAULT_ESR(r4)
+       .endif
+
+       .if     \flags & NEED_DEAR
+       mfspr   r9, SPRN_DEAR
+       PPC_STL r9, VCPU_FAULT_DEAR(r4)
+       .endif
+
+       b       kvmppc_resume_host
+.endm
+
+/*
+ * For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h
+ */
+.macro kvm_handler intno srr0, srr1, flags
+_GLOBAL(kvmppc_handler_\intno\()_\srr1)
+       GET_VCPU(r11, r10)
+       PPC_STL r3, VCPU_GPR(r3)(r11)
+       mfspr   r3, SPRN_SPRG_RSCRATCH0
+       PPC_STL r4, VCPU_GPR(r4)(r11)
+       PPC_LL  r4, THREAD_NORMSAVE(0)(r10)
+       PPC_STL r5, VCPU_GPR(r5)(r11)
+       stw     r13, VCPU_CR(r11)
+       mfspr   r5, \srr0
+       PPC_STL r3, VCPU_GPR(r10)(r11)
+       PPC_LL  r3, THREAD_NORMSAVE(2)(r10)
+       PPC_STL r6, VCPU_GPR(r6)(r11)
+       PPC_STL r4, VCPU_GPR(r11)(r11)
+       mfspr   r6, \srr1
+       PPC_STL r7, VCPU_GPR(r7)(r11)
+       PPC_STL r8, VCPU_GPR(r8)(r11)
+       PPC_STL r9, VCPU_GPR(r9)(r11)
+       PPC_STL r3, VCPU_GPR(r13)(r11)
+       mfctr   r7
+       PPC_STL r12, VCPU_GPR(r12)(r11)
+       PPC_STL r7, VCPU_CTR(r11)
+       mr      r4, r11
+       kvm_handler_common \intno, \srr0, \flags
+.endm
+
+.macro kvm_lvl_handler intno scratch srr0, srr1, flags
+_GLOBAL(kvmppc_handler_\intno\()_\srr1)
+       mfspr   r10, SPRN_SPRG_THREAD
+       GET_VCPU(r11, r10)
+       PPC_STL r3, VCPU_GPR(r3)(r11)
+       mfspr   r3, \scratch
+       PPC_STL r4, VCPU_GPR(r4)(r11)
+       PPC_LL  r4, GPR9(r8)
+       PPC_STL r5, VCPU_GPR(r5)(r11)
+       stw     r9, VCPU_CR(r11)
+       mfspr   r5, \srr0
+       PPC_STL r3, VCPU_GPR(r8)(r11)
+       PPC_LL  r3, GPR10(r8)
+       PPC_STL r6, VCPU_GPR(r6)(r11)
+       PPC_STL r4, VCPU_GPR(r9)(r11)
+       mfspr   r6, \srr1
+       PPC_LL  r4, GPR11(r8)
+       PPC_STL r7, VCPU_GPR(r7)(r11)
+       PPC_STL r3, VCPU_GPR(r10)(r11)
+       mfctr   r7
+       PPC_STL r12, VCPU_GPR(r12)(r11)
+       PPC_STL r13, VCPU_GPR(r13)(r11)
+       PPC_STL r4, VCPU_GPR(r11)(r11)
+       PPC_STL r7, VCPU_CTR(r11)
+       mr      r4, r11
+       kvm_handler_common \intno, \srr0, \flags
+.endm
+
+kvm_lvl_handler BOOKE_INTERRUPT_CRITICAL, \
+       SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
+kvm_lvl_handler BOOKE_INTERRUPT_MACHINE_CHECK, \
+       SPRN_SPRG_RSCRATCH_MC, SPRN_MCSRR0, SPRN_MCSRR1, 0
+kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, \
+       SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR)
+kvm_handler BOOKE_INTERRUPT_INST_STORAGE, SPRN_SRR0, SPRN_SRR1, NEED_ESR
+kvm_handler BOOKE_INTERRUPT_EXTERNAL, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_ALIGNMENT, \
+       SPRN_SRR0, SPRN_SRR1, (NEED_DEAR | NEED_ESR)
+kvm_handler BOOKE_INTERRUPT_PROGRAM, SPRN_SRR0, SPRN_SRR1, NEED_ESR
+kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_DECREMENTER, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_FIT, SPRN_SRR0, SPRN_SRR1, 0
+kvm_lvl_handler BOOKE_INTERRUPT_WATCHDOG, \
+       SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
+kvm_handler BOOKE_INTERRUPT_DTLB_MISS, \
+       SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
+kvm_handler BOOKE_INTERRUPT_ITLB_MISS, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_SPE_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_SPE_FP_DATA, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_SPE_FP_ROUND, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_DOORBELL, SPRN_SRR0, SPRN_SRR1, 0
+kvm_lvl_handler BOOKE_INTERRUPT_DOORBELL_CRITICAL, \
+       SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
+kvm_handler BOOKE_INTERRUPT_HV_PRIV, SPRN_SRR0, SPRN_SRR1, NEED_EMU
+kvm_handler BOOKE_INTERRUPT_HV_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_GUEST_DBELL, SPRN_GSRR0, SPRN_GSRR1, 0
+kvm_lvl_handler BOOKE_INTERRUPT_GUEST_DBELL_CRIT, \
+       SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
+kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \
+       SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
+kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \
+       SPRN_SPRG_RSCRATCH_DBG, SPRN_DSRR0, SPRN_DSRR1, 0
+
+
+/* Registers:
+ *  SPRG_SCRATCH0: guest r10
+ *  r4: vcpu pointer
+ *  r11: vcpu->arch.shared
+ *  r14: KVM exit number
+ */
+_GLOBAL(kvmppc_resume_host)
+       /* Save remaining volatile guest register state to vcpu. */
+       mfspr   r3, SPRN_VRSAVE
+       PPC_STL r0, VCPU_GPR(r0)(r4)
+       mflr    r5
+       mfspr   r6, SPRN_SPRG4
+       PPC_STL r5, VCPU_LR(r4)
+       mfspr   r7, SPRN_SPRG5
+       stw     r3, VCPU_VRSAVE(r4)
+       PPC_STD(r6, VCPU_SHARED_SPRG4, r11)
+       mfspr   r8, SPRN_SPRG6
+       PPC_STD(r7, VCPU_SHARED_SPRG5, r11)
+       mfspr   r9, SPRN_SPRG7
+       PPC_STD(r8, VCPU_SHARED_SPRG6, r11)
+       mfxer   r3
+       PPC_STD(r9, VCPU_SHARED_SPRG7, r11)
+
+       /* save guest MAS registers and restore host mas4 & mas6 */
+       mfspr   r5, SPRN_MAS0
+       PPC_STL r3, VCPU_XER(r4)
+       mfspr   r6, SPRN_MAS1
+       stw     r5, VCPU_SHARED_MAS0(r11)
+       mfspr   r7, SPRN_MAS2
+       stw     r6, VCPU_SHARED_MAS1(r11)
+       PPC_STD(r7, VCPU_SHARED_MAS2, r11)
+       mfspr   r5, SPRN_MAS3
+       mfspr   r6, SPRN_MAS4
+       stw     r5, VCPU_SHARED_MAS7_3+4(r11)
+       mfspr   r7, SPRN_MAS6
+       stw     r6, VCPU_SHARED_MAS4(r11)
+       mfspr   r5, SPRN_MAS7
+       lwz     r6, VCPU_HOST_MAS4(r4)
+       stw     r7, VCPU_SHARED_MAS6(r11)
+       lwz     r8, VCPU_HOST_MAS6(r4)
+       mtspr   SPRN_MAS4, r6
+       stw     r5, VCPU_SHARED_MAS7_3+0(r11)
+       mtspr   SPRN_MAS6, r8
+       /* Enable MAS register updates via exception */
+       mfspr   r3, SPRN_EPCR
+       rlwinm  r3, r3, 0, ~SPRN_EPCR_DMIUH
+       mtspr   SPRN_EPCR, r3
+       isync
+
+       /* Switch to kernel stack and jump to handler. */
+       PPC_LL  r3, HOST_RUN(r1)
+       mr      r5, r14 /* intno */
+       mr      r14, r4 /* Save vcpu pointer. */
+       bl      kvmppc_handle_exit
+
+       /* Restore vcpu pointer and the nonvolatiles we used. */
+       mr      r4, r14
+       PPC_LL  r14, VCPU_GPR(r14)(r4)
+
+       andi.   r5, r3, RESUME_FLAG_NV
+       beq     skip_nv_load
+       PPC_LL  r15, VCPU_GPR(r15)(r4)
+       PPC_LL  r16, VCPU_GPR(r16)(r4)
+       PPC_LL  r17, VCPU_GPR(r17)(r4)
+       PPC_LL  r18, VCPU_GPR(r18)(r4)
+       PPC_LL  r19, VCPU_GPR(r19)(r4)
+       PPC_LL  r20, VCPU_GPR(r20)(r4)
+       PPC_LL  r21, VCPU_GPR(r21)(r4)
+       PPC_LL  r22, VCPU_GPR(r22)(r4)
+       PPC_LL  r23, VCPU_GPR(r23)(r4)
+       PPC_LL  r24, VCPU_GPR(r24)(r4)
+       PPC_LL  r25, VCPU_GPR(r25)(r4)
+       PPC_LL  r26, VCPU_GPR(r26)(r4)
+       PPC_LL  r27, VCPU_GPR(r27)(r4)
+       PPC_LL  r28, VCPU_GPR(r28)(r4)
+       PPC_LL  r29, VCPU_GPR(r29)(r4)
+       PPC_LL  r30, VCPU_GPR(r30)(r4)
+       PPC_LL  r31, VCPU_GPR(r31)(r4)
+skip_nv_load:
+       /* Should we return to the guest? */
+       andi.   r5, r3, RESUME_FLAG_HOST
+       beq     lightweight_exit
+
+       srawi   r3, r3, 2 /* Shift -ERR back down. */
+
+heavyweight_exit:
+       /* Not returning to guest. */
+       PPC_LL  r5, HOST_STACK_LR(r1)
+       lwz     r6, HOST_CR(r1)
+
+       /*
+        * We already saved guest volatile register state; now save the
+        * non-volatiles.
+        */
+
+       PPC_STL r15, VCPU_GPR(r15)(r4)
+       PPC_STL r16, VCPU_GPR(r16)(r4)
+       PPC_STL r17, VCPU_GPR(r17)(r4)
+       PPC_STL r18, VCPU_GPR(r18)(r4)
+       PPC_STL r19, VCPU_GPR(r19)(r4)
+       PPC_STL r20, VCPU_GPR(r20)(r4)
+       PPC_STL r21, VCPU_GPR(r21)(r4)
+       PPC_STL r22, VCPU_GPR(r22)(r4)
+       PPC_STL r23, VCPU_GPR(r23)(r4)
+       PPC_STL r24, VCPU_GPR(r24)(r4)
+       PPC_STL r25, VCPU_GPR(r25)(r4)
+       PPC_STL r26, VCPU_GPR(r26)(r4)
+       PPC_STL r27, VCPU_GPR(r27)(r4)
+       PPC_STL r28, VCPU_GPR(r28)(r4)
+       PPC_STL r29, VCPU_GPR(r29)(r4)
+       PPC_STL r30, VCPU_GPR(r30)(r4)
+       PPC_STL r31, VCPU_GPR(r31)(r4)
+
+       /* Load host non-volatile register state from host stack. */
+       PPC_LL  r14, HOST_NV_GPR(r14)(r1)
+       PPC_LL  r15, HOST_NV_GPR(r15)(r1)
+       PPC_LL  r16, HOST_NV_GPR(r16)(r1)
+       PPC_LL  r17, HOST_NV_GPR(r17)(r1)
+       PPC_LL  r18, HOST_NV_GPR(r18)(r1)
+       PPC_LL  r19, HOST_NV_GPR(r19)(r1)
+       PPC_LL  r20, HOST_NV_GPR(r20)(r1)
+       PPC_LL  r21, HOST_NV_GPR(r21)(r1)
+       PPC_LL  r22, HOST_NV_GPR(r22)(r1)
+       PPC_LL  r23, HOST_NV_GPR(r23)(r1)
+       PPC_LL  r24, HOST_NV_GPR(r24)(r1)
+       PPC_LL  r25, HOST_NV_GPR(r25)(r1)
+       PPC_LL  r26, HOST_NV_GPR(r26)(r1)
+       PPC_LL  r27, HOST_NV_GPR(r27)(r1)
+       PPC_LL  r28, HOST_NV_GPR(r28)(r1)
+       PPC_LL  r29, HOST_NV_GPR(r29)(r1)
+       PPC_LL  r30, HOST_NV_GPR(r30)(r1)
+       PPC_LL  r31, HOST_NV_GPR(r31)(r1)
+
+       /* Return to kvm_vcpu_run(). */
+       mtlr    r5
+       mtcr    r6
+       addi    r1, r1, HOST_STACK_SIZE
+       /* r3 still contains the return code from kvmppc_handle_exit(). */
+       blr
+
+/* Registers:
+ *  r3: kvm_run pointer
+ *  r4: vcpu pointer
+ */
+_GLOBAL(__kvmppc_vcpu_run)
+       stwu    r1, -HOST_STACK_SIZE(r1)
+       PPC_STL r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */
+
+       /* Save host state to stack. */
+       PPC_STL r3, HOST_RUN(r1)
+       mflr    r3
+       mfcr    r5
+       PPC_STL r3, HOST_STACK_LR(r1)
+
+       stw     r5, HOST_CR(r1)
+
+       /* Save host non-volatile register state to stack. */
+       PPC_STL r14, HOST_NV_GPR(r14)(r1)
+       PPC_STL r15, HOST_NV_GPR(r15)(r1)
+       PPC_STL r16, HOST_NV_GPR(r16)(r1)
+       PPC_STL r17, HOST_NV_GPR(r17)(r1)
+       PPC_STL r18, HOST_NV_GPR(r18)(r1)
+       PPC_STL r19, HOST_NV_GPR(r19)(r1)
+       PPC_STL r20, HOST_NV_GPR(r20)(r1)
+       PPC_STL r21, HOST_NV_GPR(r21)(r1)
+       PPC_STL r22, HOST_NV_GPR(r22)(r1)
+       PPC_STL r23, HOST_NV_GPR(r23)(r1)
+       PPC_STL r24, HOST_NV_GPR(r24)(r1)
+       PPC_STL r25, HOST_NV_GPR(r25)(r1)
+       PPC_STL r26, HOST_NV_GPR(r26)(r1)
+       PPC_STL r27, HOST_NV_GPR(r27)(r1)
+       PPC_STL r28, HOST_NV_GPR(r28)(r1)
+       PPC_STL r29, HOST_NV_GPR(r29)(r1)
+       PPC_STL r30, HOST_NV_GPR(r30)(r1)
+       PPC_STL r31, HOST_NV_GPR(r31)(r1)
+
+       /* Load guest non-volatiles. */
+       PPC_LL  r14, VCPU_GPR(r14)(r4)
+       PPC_LL  r15, VCPU_GPR(r15)(r4)
+       PPC_LL  r16, VCPU_GPR(r16)(r4)
+       PPC_LL  r17, VCPU_GPR(r17)(r4)
+       PPC_LL  r18, VCPU_GPR(r18)(r4)
+       PPC_LL  r19, VCPU_GPR(r19)(r4)
+       PPC_LL  r20, VCPU_GPR(r20)(r4)
+       PPC_LL  r21, VCPU_GPR(r21)(r4)
+       PPC_LL  r22, VCPU_GPR(r22)(r4)
+       PPC_LL  r23, VCPU_GPR(r23)(r4)
+       PPC_LL  r24, VCPU_GPR(r24)(r4)
+       PPC_LL  r25, VCPU_GPR(r25)(r4)
+       PPC_LL  r26, VCPU_GPR(r26)(r4)
+       PPC_LL  r27, VCPU_GPR(r27)(r4)
+       PPC_LL  r28, VCPU_GPR(r28)(r4)
+       PPC_LL  r29, VCPU_GPR(r29)(r4)
+       PPC_LL  r30, VCPU_GPR(r30)(r4)
+       PPC_LL  r31, VCPU_GPR(r31)(r4)
+
+
+lightweight_exit:
+       PPC_STL r2, HOST_R2(r1)
+
+       mfspr   r3, SPRN_PID
+       stw     r3, VCPU_HOST_PID(r4)
+       lwz     r3, VCPU_GUEST_PID(r4)
+       mtspr   SPRN_PID, r3
+
+       PPC_LL  r11, VCPU_SHARED(r4)
+       /* Disable MAS register updates via exception */
+       mfspr   r3, SPRN_EPCR
+       oris    r3, r3, SPRN_EPCR_DMIUH@h
+       mtspr   SPRN_EPCR, r3
+       isync
+       /* Save host mas4 and mas6 and load guest MAS registers */
+       mfspr   r3, SPRN_MAS4
+       stw     r3, VCPU_HOST_MAS4(r4)
+       mfspr   r3, SPRN_MAS6
+       stw     r3, VCPU_HOST_MAS6(r4)
+       lwz     r3, VCPU_SHARED_MAS0(r11)
+       lwz     r5, VCPU_SHARED_MAS1(r11)
+       PPC_LD(r6, VCPU_SHARED_MAS2, r11)
+       lwz     r7, VCPU_SHARED_MAS7_3+4(r11)
+       lwz     r8, VCPU_SHARED_MAS4(r11)
+       mtspr   SPRN_MAS0, r3
+       mtspr   SPRN_MAS1, r5
+       mtspr   SPRN_MAS2, r6
+       mtspr   SPRN_MAS3, r7
+       mtspr   SPRN_MAS4, r8
+       lwz     r3, VCPU_SHARED_MAS6(r11)
+       lwz     r5, VCPU_SHARED_MAS7_3+0(r11)
+       mtspr   SPRN_MAS6, r3
+       mtspr   SPRN_MAS7, r5
+
+       /*
+        * Host interrupt handlers may have clobbered these guest-readable
+        * SPRGs, so we need to reload them here with the guest's values.
+        */
+       lwz     r3, VCPU_VRSAVE(r4)
+       PPC_LD(r5, VCPU_SHARED_SPRG4, r11)
+       mtspr   SPRN_VRSAVE, r3
+       PPC_LD(r6, VCPU_SHARED_SPRG5, r11)
+       mtspr   SPRN_SPRG4W, r5
+       PPC_LD(r7, VCPU_SHARED_SPRG6, r11)
+       mtspr   SPRN_SPRG5W, r6
+       PPC_LD(r8, VCPU_SHARED_SPRG7, r11)
+       mtspr   SPRN_SPRG6W, r7
+       mtspr   SPRN_SPRG7W, r8
+
+       /* Load some guest volatiles. */
+       PPC_LL  r3, VCPU_LR(r4)
+       PPC_LL  r5, VCPU_XER(r4)
+       PPC_LL  r6, VCPU_CTR(r4)
+       lwz     r7, VCPU_CR(r4)
+       PPC_LL  r8, VCPU_PC(r4)
+       PPC_LD(r9, VCPU_SHARED_MSR, r11)
+       PPC_LL  r0, VCPU_GPR(r0)(r4)
+       PPC_LL  r1, VCPU_GPR(r1)(r4)
+       PPC_LL  r2, VCPU_GPR(r2)(r4)
+       PPC_LL  r10, VCPU_GPR(r10)(r4)
+       PPC_LL  r11, VCPU_GPR(r11)(r4)
+       PPC_LL  r12, VCPU_GPR(r12)(r4)
+       PPC_LL  r13, VCPU_GPR(r13)(r4)
+       mtlr    r3
+       mtxer   r5
+       mtctr   r6
+       mtsrr0  r8
+       mtsrr1  r9
+
+#ifdef CONFIG_KVM_EXIT_TIMING
+       /* save enter time */
+1:
+       mfspr   r6, SPRN_TBRU
+       mfspr   r9, SPRN_TBRL
+       mfspr   r8, SPRN_TBRU
+       cmpw    r8, r6
+       stw     r9, VCPU_TIMING_LAST_ENTER_TBL(r4)
+       bne     1b
+       stw     r8, VCPU_TIMING_LAST_ENTER_TBU(r4)
+#endif
+
+       /*
+        * Don't execute any instruction which can change CR after
+        * below instruction.
+        */
+       mtcr    r7
+
+       /* Finish loading guest volatiles and jump to guest. */
+       PPC_LL  r5, VCPU_GPR(r5)(r4)
+       PPC_LL  r6, VCPU_GPR(r6)(r4)
+       PPC_LL  r7, VCPU_GPR(r7)(r4)
+       PPC_LL  r8, VCPU_GPR(r8)(r4)
+       PPC_LL  r9, VCPU_GPR(r9)(r4)
+
+       PPC_LL  r3, VCPU_GPR(r3)(r4)
+       PPC_LL  r4, VCPU_GPR(r4)(r4)
+       rfi
index ddcd896fa2ffee16d037c7ecedb839e3d24741a5..b479ed77c515331315703d18a214d849f7209b2e 100644 (file)
 #include <asm/reg.h>
 #include <asm/cputable.h>
 #include <asm/tlbflush.h>
-#include <asm/kvm_e500.h>
 #include <asm/kvm_ppc.h>
 
+#include "../mm/mmu_decl.h"
 #include "booke.h"
-#include "e500_tlb.h"
+#include "e500.h"
+
+struct id {
+       unsigned long val;
+       struct id **pentry;
+};
+
+#define NUM_TIDS 256
+
+/*
+ * This table provide mappings from:
+ * (guestAS,guestTID,guestPR) --> ID of physical cpu
+ * guestAS     [0..1]
+ * guestTID    [0..255]
+ * guestPR     [0..1]
+ * ID          [1..255]
+ * Each vcpu keeps one vcpu_id_table.
+ */
+struct vcpu_id_table {
+       struct id id[2][NUM_TIDS][2];
+};
+
+/*
+ * This table provide reversed mappings of vcpu_id_table:
+ * ID --> address of vcpu_id_table item.
+ * Each physical core has one pcpu_id_table.
+ */
+struct pcpu_id_table {
+       struct id *entry[NUM_TIDS];
+};
+
+static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids);
+
+/* This variable keeps last used shadow ID on local core.
+ * The valid range of shadow ID is [1..255] */
+static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);
+
+/*
+ * Allocate a free shadow id and setup a valid sid mapping in given entry.
+ * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
+ *
+ * The caller must have preemption disabled, and keep it that way until
+ * it has finished with the returned shadow id (either written into the
+ * TLB or arch.shadow_pid, or discarded).
+ */
+static inline int local_sid_setup_one(struct id *entry)
+{
+       unsigned long sid;
+       int ret = -1;
+
+       sid = ++(__get_cpu_var(pcpu_last_used_sid));
+       if (sid < NUM_TIDS) {
+               __get_cpu_var(pcpu_sids).entry[sid] = entry;
+               entry->val = sid;
+               entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid];
+               ret = sid;
+       }
+
+       /*
+        * If sid == NUM_TIDS, we've run out of sids.  We return -1, and
+        * the caller will invalidate everything and start over.
+        *
+        * sid > NUM_TIDS indicates a race, which we disable preemption to
+        * avoid.
+        */
+       WARN_ON(sid > NUM_TIDS);
+
+       return ret;
+}
+
+/*
+ * Check if given entry contain a valid shadow id mapping.
+ * An ID mapping is considered valid only if
+ * both vcpu and pcpu know this mapping.
+ *
+ * The caller must have preemption disabled, and keep it that way until
+ * it has finished with the returned shadow id (either written into the
+ * TLB or arch.shadow_pid, or discarded).
+ */
+static inline int local_sid_lookup(struct id *entry)
+{
+       if (entry && entry->val != 0 &&
+           __get_cpu_var(pcpu_sids).entry[entry->val] == entry &&
+           entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val])
+               return entry->val;
+       return -1;
+}
+
+/* Invalidate all id mappings on local core -- call with preempt disabled */
+static inline void local_sid_destroy_all(void)
+{
+       __get_cpu_var(pcpu_last_used_sid) = 0;
+       memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids)));
+}
+
+static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+       vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL);
+       return vcpu_e500->idt;
+}
+
+static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+       kfree(vcpu_e500->idt);
+       vcpu_e500->idt = NULL;
+}
+
+/* Map guest pid to shadow.
+ * We use PID to keep shadow of current guest non-zero PID,
+ * and use PID1 to keep shadow of guest zero PID.
+ * So that guest tlbe with TID=0 can be accessed at any time */
+static void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+       preempt_disable();
+       vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500,
+                       get_cur_as(&vcpu_e500->vcpu),
+                       get_cur_pid(&vcpu_e500->vcpu),
+                       get_cur_pr(&vcpu_e500->vcpu), 1);
+       vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500,
+                       get_cur_as(&vcpu_e500->vcpu), 0,
+                       get_cur_pr(&vcpu_e500->vcpu), 1);
+       preempt_enable();
+}
+
+/* Invalidate all mappings on vcpu */
+static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+       memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table));
+
+       /* Update shadow pid when mappings are changed */
+       kvmppc_e500_recalc_shadow_pid(vcpu_e500);
+}
+
+/* Invalidate one ID mapping on vcpu */
+static inline void kvmppc_e500_id_table_reset_one(
+                              struct kvmppc_vcpu_e500 *vcpu_e500,
+                              int as, int pid, int pr)
+{
+       struct vcpu_id_table *idt = vcpu_e500->idt;
+
+       BUG_ON(as >= 2);
+       BUG_ON(pid >= NUM_TIDS);
+       BUG_ON(pr >= 2);
+
+       idt->id[as][pid][pr].val = 0;
+       idt->id[as][pid][pr].pentry = NULL;
+
+       /* Update shadow pid when mappings are changed */
+       kvmppc_e500_recalc_shadow_pid(vcpu_e500);
+}
+
+/*
+ * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
+ * This function first lookup if a valid mapping exists,
+ * if not, then creates a new one.
+ *
+ * The caller must have preemption disabled, and keep it that way until
+ * it has finished with the returned shadow id (either written into the
+ * TLB or arch.shadow_pid, or discarded).
+ */
+unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
+                                unsigned int as, unsigned int gid,
+                                unsigned int pr, int avoid_recursion)
+{
+       struct vcpu_id_table *idt = vcpu_e500->idt;
+       int sid;
+
+       BUG_ON(as >= 2);
+       BUG_ON(gid >= NUM_TIDS);
+       BUG_ON(pr >= 2);
+
+       sid = local_sid_lookup(&idt->id[as][gid][pr]);
+
+       while (sid <= 0) {
+               /* No mapping yet */
+               sid = local_sid_setup_one(&idt->id[as][gid][pr]);
+               if (sid <= 0) {
+                       _tlbil_all();
+                       local_sid_destroy_all();
+               }
+
+               /* Update shadow pid when mappings are changed */
+               if (!avoid_recursion)
+                       kvmppc_e500_recalc_shadow_pid(vcpu_e500);
+       }
+
+       return sid;
+}
+
+unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
+                                     struct kvm_book3e_206_tlb_entry *gtlbe)
+{
+       return kvmppc_e500_get_sid(to_e500(vcpu), get_tlb_ts(gtlbe),
+                                  get_tlb_tid(gtlbe), get_cur_pr(vcpu), 0);
+}
+
+void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
+{
+       struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+
+       if (vcpu->arch.pid != pid) {
+               vcpu_e500->pid[0] = vcpu->arch.pid = pid;
+               kvmppc_e500_recalc_shadow_pid(vcpu_e500);
+       }
+}
+
+/* gtlbe must not be mapped by more than one host tlbe */
+void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
+                           struct kvm_book3e_206_tlb_entry *gtlbe)
+{
+       struct vcpu_id_table *idt = vcpu_e500->idt;
+       unsigned int pr, tid, ts, pid;
+       u32 val, eaddr;
+       unsigned long flags;
+
+       ts = get_tlb_ts(gtlbe);
+       tid = get_tlb_tid(gtlbe);
+
+       preempt_disable();
+
+       /* One guest ID may be mapped to two shadow IDs */
+       for (pr = 0; pr < 2; pr++) {
+               /*
+                * The shadow PID can have a valid mapping on at most one
+                * host CPU.  In the common case, it will be valid on this
+                * CPU, in which case we do a local invalidation of the
+                * specific address.
+                *
+                * If the shadow PID is not valid on the current host CPU,
+                * we invalidate the entire shadow PID.
+                */
+               pid = local_sid_lookup(&idt->id[ts][tid][pr]);
+               if (pid <= 0) {
+                       kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr);
+                       continue;
+               }
+
+               /*
+                * The guest is invalidating a 4K entry which is in a PID
+                * that has a valid shadow mapping on this host CPU.  We
+                * search host TLB to invalidate it's shadow TLB entry,
+                * similar to __tlbil_va except that we need to look in AS1.
+                */
+               val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS;
+               eaddr = get_tlb_eaddr(gtlbe);
+
+               local_irq_save(flags);
+
+               mtspr(SPRN_MAS6, val);
+               asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr));
+               val = mfspr(SPRN_MAS1);
+               if (val & MAS1_VALID) {
+                       mtspr(SPRN_MAS1, val & ~MAS1_VALID);
+                       asm volatile("tlbwe");
+               }
+
+               local_irq_restore(flags);
+       }
+
+       preempt_enable();
+}
+
+void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+       kvmppc_e500_id_table_reset_all(vcpu_e500);
+}
+
+void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
+{
+       /* Recalc shadow pid since MSR changes */
+       kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
+}
 
 void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
 {
@@ -36,17 +307,20 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
 
 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
-       kvmppc_e500_tlb_load(vcpu, cpu);
+       kvmppc_booke_vcpu_load(vcpu, cpu);
+
+       /* Shadow PID may be expired on local core */
+       kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
 }
 
 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
 {
-       kvmppc_e500_tlb_put(vcpu);
-
 #ifdef CONFIG_SPE
        if (vcpu->arch.shadow_msr & MSR_SPE)
                kvmppc_vcpu_disable_spe(vcpu);
 #endif
+
+       kvmppc_booke_vcpu_put(vcpu);
 }
 
 int kvmppc_core_check_processor_compat(void)
@@ -61,6 +335,23 @@ int kvmppc_core_check_processor_compat(void)
        return r;
 }
 
+static void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+       struct kvm_book3e_206_tlb_entry *tlbe;
+
+       /* Insert large initial mapping for guest. */
+       tlbe = get_entry(vcpu_e500, 1, 0);
+       tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
+       tlbe->mas2 = 0;
+       tlbe->mas7_3 = E500_TLB_SUPER_PERM_MASK;
+
+       /* 4K map for serial output. Used by kernel wrapper. */
+       tlbe = get_entry(vcpu_e500, 1, 1);
+       tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
+       tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
+       tlbe->mas7_3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
+}
+
 int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
 {
        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -76,32 +367,6 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-/* 'linear_address' is actually an encoding of AS|PID|EADDR . */
-int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
-                               struct kvm_translation *tr)
-{
-       int index;
-       gva_t eaddr;
-       u8 pid;
-       u8 as;
-
-       eaddr = tr->linear_address;
-       pid = (tr->linear_address >> 32) & 0xff;
-       as = (tr->linear_address >> 40) & 0x1;
-
-       index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as);
-       if (index < 0) {
-               tr->valid = 0;
-               return 0;
-       }
-
-       tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr);
-       /* XXX what does "writeable" and "usermode" even mean? */
-       tr->valid = 1;
-
-       return 0;
-}
-
 void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 {
        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -115,19 +380,6 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
        sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0;
        sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar;
 
-       sregs->u.e.mas0 = vcpu->arch.shared->mas0;
-       sregs->u.e.mas1 = vcpu->arch.shared->mas1;
-       sregs->u.e.mas2 = vcpu->arch.shared->mas2;
-       sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3;
-       sregs->u.e.mas4 = vcpu->arch.shared->mas4;
-       sregs->u.e.mas6 = vcpu->arch.shared->mas6;
-
-       sregs->u.e.mmucfg = mfspr(SPRN_MMUCFG);
-       sregs->u.e.tlbcfg[0] = vcpu_e500->tlb0cfg;
-       sregs->u.e.tlbcfg[1] = vcpu_e500->tlb1cfg;
-       sregs->u.e.tlbcfg[2] = 0;
-       sregs->u.e.tlbcfg[3] = 0;
-
        sregs->u.e.ivor_high[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
        sregs->u.e.ivor_high[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
        sregs->u.e.ivor_high[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
@@ -135,11 +387,13 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
                vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
 
        kvmppc_get_sregs_ivor(vcpu, sregs);
+       kvmppc_get_sregs_e500_tlb(vcpu, sregs);
 }
 
 int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 {
        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+       int ret;
 
        if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
                vcpu_e500->svr = sregs->u.e.impl.fsl.svr;
@@ -147,14 +401,9 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
                vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar;
        }
 
-       if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) {
-               vcpu->arch.shared->mas0 = sregs->u.e.mas0;
-               vcpu->arch.shared->mas1 = sregs->u.e.mas1;
-               vcpu->arch.shared->mas2 = sregs->u.e.mas2;
-               vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3;
-               vcpu->arch.shared->mas4 = sregs->u.e.mas4;
-               vcpu->arch.shared->mas6 = sregs->u.e.mas6;
-       }
+       ret = kvmppc_set_sregs_e500_tlb(vcpu, sregs);
+       if (ret < 0)
+               return ret;
 
        if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
                return 0;
@@ -193,9 +442,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
        if (err)
                goto free_vcpu;
 
+       if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL)
+               goto uninit_vcpu;
+
        err = kvmppc_e500_tlb_init(vcpu_e500);
        if (err)
-               goto uninit_vcpu;
+               goto uninit_id;
 
        vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO);
        if (!vcpu->arch.shared)
@@ -205,6 +457,8 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
 
 uninit_tlb:
        kvmppc_e500_tlb_uninit(vcpu_e500);
+uninit_id:
+       kvmppc_e500_id_table_free(vcpu_e500);
 uninit_vcpu:
        kvm_vcpu_uninit(vcpu);
 free_vcpu:
@@ -218,11 +472,21 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 
        free_page((unsigned long)vcpu->arch.shared);
-       kvm_vcpu_uninit(vcpu);
        kvmppc_e500_tlb_uninit(vcpu_e500);
+       kvmppc_e500_id_table_free(vcpu_e500);
+       kvm_vcpu_uninit(vcpu);
        kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
 }
 
+int kvmppc_core_init_vm(struct kvm *kvm)
+{
+       return 0;
+}
+
+void kvmppc_core_destroy_vm(struct kvm *kvm)
+{
+}
+
 static int __init kvmppc_e500_init(void)
 {
        int r, i;
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
new file mode 100644 (file)
index 0000000..aa8b814
--- /dev/null
@@ -0,0 +1,306 @@
+/*
+ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: Yu Liu <yu.liu@freescale.com>
+ *         Scott Wood <scottwood@freescale.com>
+ *         Ashish Kalra <ashish.kalra@freescale.com>
+ *         Varun Sethi <varun.sethi@freescale.com>
+ *
+ * Description:
+ * This file is based on arch/powerpc/kvm/44x_tlb.h and
+ * arch/powerpc/include/asm/kvm_44x.h by Hollis Blanchard <hollisb@us.ibm.com>,
+ * Copyright IBM Corp. 2007-2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef KVM_E500_H
+#define KVM_E500_H
+
+#include <linux/kvm_host.h>
+#include <asm/mmu-book3e.h>
+#include <asm/tlb.h>
+
+#define E500_PID_NUM   3
+#define E500_TLB_NUM   2
+
+#define E500_TLB_VALID 1
+#define E500_TLB_DIRTY 2
+#define E500_TLB_BITMAP 4
+
+struct tlbe_ref {
+       pfn_t pfn;
+       unsigned int flags; /* E500_TLB_* */
+};
+
+struct tlbe_priv {
+       struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */
+};
+
+#ifdef CONFIG_KVM_E500V2
+struct vcpu_id_table;
+#endif
+
+struct kvmppc_e500_tlb_params {
+       int entries, ways, sets;
+};
+
+struct kvmppc_vcpu_e500 {
+       struct kvm_vcpu vcpu;
+
+       /* Unmodified copy of the guest's TLB -- shared with host userspace. */
+       struct kvm_book3e_206_tlb_entry *gtlb_arch;
+
+       /* Starting entry number in gtlb_arch[] */
+       int gtlb_offset[E500_TLB_NUM];
+
+       /* KVM internal information associated with each guest TLB entry */
+       struct tlbe_priv *gtlb_priv[E500_TLB_NUM];
+
+       struct kvmppc_e500_tlb_params gtlb_params[E500_TLB_NUM];
+
+       unsigned int gtlb_nv[E500_TLB_NUM];
+
+       /*
+        * information associated with each host TLB entry --
+        * TLB1 only for now.  If/when guest TLB1 entries can be
+        * mapped with host TLB0, this will be used for that too.
+        *
+        * We don't want to use this for guest TLB0 because then we'd
+        * have the overhead of doing the translation again even if
+        * the entry is still in the guest TLB (e.g. we swapped out
+        * and back, and our host TLB entries got evicted).
+        */
+       struct tlbe_ref *tlb_refs[E500_TLB_NUM];
+       unsigned int host_tlb1_nv;
+
+       u32 svr;
+       u32 l1csr0;
+       u32 l1csr1;
+       u32 hid0;
+       u32 hid1;
+       u64 mcar;
+
+       struct page **shared_tlb_pages;
+       int num_shared_tlb_pages;
+
+       u64 *g2h_tlb1_map;
+       unsigned int *h2g_tlb1_rmap;
+
+       /* Minimum and maximum address mapped my TLB1 */
+       unsigned long tlb1_min_eaddr;
+       unsigned long tlb1_max_eaddr;
+
+#ifdef CONFIG_KVM_E500V2
+       u32 pid[E500_PID_NUM];
+
+       /* vcpu id table */
+       struct vcpu_id_table *idt;
+#endif
+};
+
+static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
+{
+       return container_of(vcpu, struct kvmppc_vcpu_e500, vcpu);
+}
+
+
+/* This geometry is the legacy default -- can be overridden by userspace */
+#define KVM_E500_TLB0_WAY_SIZE         128
+#define KVM_E500_TLB0_WAY_NUM          2
+
+#define KVM_E500_TLB0_SIZE  (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM)
+#define KVM_E500_TLB1_SIZE  16
+
+#define index_of(tlbsel, esel) (((tlbsel) << 16) | ((esel) & 0xFFFF))
+#define tlbsel_of(index)       ((index) >> 16)
+#define esel_of(index)         ((index) & 0xFFFF)
+
+#define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW)
+#define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW)
+#define MAS2_ATTRIB_MASK \
+         (MAS2_X0 | MAS2_X1)
+#define MAS3_ATTRIB_MASK \
+         (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \
+          | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK)
+
+int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500,
+                               ulong value);
+int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu);
+int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu);
+int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb);
+int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int rt, int ra, int rb);
+int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb);
+int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500);
+void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500);
+
+void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+
+
+#ifdef CONFIG_KVM_E500V2
+unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
+                                unsigned int as, unsigned int gid,
+                                unsigned int pr, int avoid_recursion);
+#endif
+
+/* TLB helper functions */
+static inline unsigned int
+get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+       return (tlbe->mas1 >> 7) & 0x1f;
+}
+
+static inline gva_t get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+       return tlbe->mas2 & 0xfffff000;
+}
+
+static inline u64 get_tlb_bytes(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+       unsigned int pgsize = get_tlb_size(tlbe);
+       return 1ULL << 10 << pgsize;
+}
+
+static inline gva_t get_tlb_end(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+       u64 bytes = get_tlb_bytes(tlbe);
+       return get_tlb_eaddr(tlbe) + bytes - 1;
+}
+
+static inline u64 get_tlb_raddr(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+       return tlbe->mas7_3 & ~0xfffULL;
+}
+
+static inline unsigned int
+get_tlb_tid(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+       return (tlbe->mas1 >> 16) & 0xff;
+}
+
+static inline unsigned int
+get_tlb_ts(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+       return (tlbe->mas1 >> 12) & 0x1;
+}
+
+static inline unsigned int
+get_tlb_v(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+       return (tlbe->mas1 >> 31) & 0x1;
+}
+
+static inline unsigned int
+get_tlb_iprot(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+       return (tlbe->mas1 >> 30) & 0x1;
+}
+
+static inline unsigned int
+get_tlb_tsize(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+       return (tlbe->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
+}
+
+static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.pid & 0xff;
+}
+
+static inline unsigned int get_cur_as(struct kvm_vcpu *vcpu)
+{
+       return !!(vcpu->arch.shared->msr & (MSR_IS | MSR_DS));
+}
+
+static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu)
+{
+       return !!(vcpu->arch.shared->msr & MSR_PR);
+}
+
+static inline unsigned int get_cur_spid(const struct kvm_vcpu *vcpu)
+{
+       return (vcpu->arch.shared->mas6 >> 16) & 0xff;
+}
+
+static inline unsigned int get_cur_sas(const struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.shared->mas6 & 0x1;
+}
+
+static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu)
+{
+       /*
+        * Manual says that tlbsel has 2 bits wide.
+        * Since we only have two TLBs, only lower bit is used.
+        */
+       return (vcpu->arch.shared->mas0 >> 28) & 0x1;
+}
+
+static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.shared->mas0 & 0xfff;
+}
+
+static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu *vcpu)
+{
+       return (vcpu->arch.shared->mas0 >> 16) & 0xfff;
+}
+
+static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
+                       const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+       gpa_t gpa;
+
+       if (!get_tlb_v(tlbe))
+               return 0;
+
+#ifndef CONFIG_KVM_BOOKE_HV
+       /* Does it match current guest AS? */
+       /* XXX what about IS != DS? */
+       if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS))
+               return 0;
+#endif
+
+       gpa = get_tlb_raddr(tlbe);
+       if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
+               /* Mapping is not for RAM. */
+               return 0;
+
+       return 1;
+}
+
+static inline struct kvm_book3e_206_tlb_entry *get_entry(
+       struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry)
+{
+       int offset = vcpu_e500->gtlb_offset[tlbsel];
+       return &vcpu_e500->gtlb_arch[offset + entry];
+}
+
+void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
+                          struct kvm_book3e_206_tlb_entry *gtlbe);
+void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500);
+
+#ifdef CONFIG_KVM_BOOKE_HV
+#define kvmppc_e500_get_tlb_stid(vcpu, gtlbe)       get_tlb_tid(gtlbe)
+#define get_tlbmiss_tid(vcpu)           get_cur_pid(vcpu)
+#define get_tlb_sts(gtlbe)              (gtlbe->mas1 & MAS1_TS)
+#else
+unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
+                                     struct kvm_book3e_206_tlb_entry *gtlbe);
+
+static inline unsigned int get_tlbmiss_tid(struct kvm_vcpu *vcpu)
+{
+       struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+       unsigned int tidseld = (vcpu->arch.shared->mas4 >> 16) & 0xf;
+
+       return vcpu_e500->pid[tidseld];
+}
+
+/* Force TS=1 for all guest mappings. */
+#define get_tlb_sts(gtlbe)              (MAS1_TS)
+#endif /* !BOOKE_HV */
+
+#endif /* KVM_E500_H */
index 6d0b2bd54fb0a9ffef74f3bfb21b04fa34aa4ce7..8b99e076dc8183f0f82109c7304799a0f8d8ac81 100644 (file)
 
 #include <asm/kvm_ppc.h>
 #include <asm/disassemble.h>
-#include <asm/kvm_e500.h>
+#include <asm/dbell.h>
 
 #include "booke.h"
-#include "e500_tlb.h"
+#include "e500.h"
 
+#define XOP_MSGSND  206
+#define XOP_MSGCLR  238
 #define XOP_TLBIVAX 786
 #define XOP_TLBSX   914
 #define XOP_TLBRE   946
 #define XOP_TLBWE   978
+#define XOP_TLBILX  18
+
+#ifdef CONFIG_KVM_E500MC
+static int dbell2prio(ulong param)
+{
+       int msg = param & PPC_DBELL_TYPE_MASK;
+       int prio = -1;
+
+       switch (msg) {
+       case PPC_DBELL_TYPE(PPC_DBELL):
+               prio = BOOKE_IRQPRIO_DBELL;
+               break;
+       case PPC_DBELL_TYPE(PPC_DBELL_CRIT):
+               prio = BOOKE_IRQPRIO_DBELL_CRIT;
+               break;
+       default:
+               break;
+       }
+
+       return prio;
+}
+
+static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb)
+{
+       ulong param = vcpu->arch.gpr[rb];
+       int prio = dbell2prio(param);
+
+       if (prio < 0)
+               return EMULATE_FAIL;
+
+       clear_bit(prio, &vcpu->arch.pending_exceptions);
+       return EMULATE_DONE;
+}
+
+static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
+{
+       ulong param = vcpu->arch.gpr[rb];
+       int prio = dbell2prio(rb);
+       int pir = param & PPC_DBELL_PIR_MASK;
+       int i;
+       struct kvm_vcpu *cvcpu;
+
+       if (prio < 0)
+               return EMULATE_FAIL;
+
+       kvm_for_each_vcpu(i, cvcpu, vcpu->kvm) {
+               int cpir = cvcpu->arch.shared->pir;
+               if ((param & PPC_DBELL_MSG_BRDCAST) || (cpir == pir)) {
+                       set_bit(prio, &cvcpu->arch.pending_exceptions);
+                       kvm_vcpu_kick(cvcpu);
+               }
+       }
+
+       return EMULATE_DONE;
+}
+#endif
 
 int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
                            unsigned int inst, int *advance)
 {
        int emulated = EMULATE_DONE;
-       int ra;
-       int rb;
+       int ra = get_ra(inst);
+       int rb = get_rb(inst);
+       int rt = get_rt(inst);
 
        switch (get_op(inst)) {
        case 31:
                switch (get_xop(inst)) {
 
+#ifdef CONFIG_KVM_E500MC
+               case XOP_MSGSND:
+                       emulated = kvmppc_e500_emul_msgsnd(vcpu, rb);
+                       break;
+
+               case XOP_MSGCLR:
+                       emulated = kvmppc_e500_emul_msgclr(vcpu, rb);
+                       break;
+#endif
+
                case XOP_TLBRE:
                        emulated = kvmppc_e500_emul_tlbre(vcpu);
                        break;
@@ -44,13 +113,14 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
                        break;
 
                case XOP_TLBSX:
-                       rb = get_rb(inst);
                        emulated = kvmppc_e500_emul_tlbsx(vcpu,rb);
                        break;
 
+               case XOP_TLBILX:
+                       emulated = kvmppc_e500_emul_tlbilx(vcpu, rt, ra, rb);
+                       break;
+
                case XOP_TLBIVAX:
-                       ra = get_ra(inst);
-                       rb = get_rb(inst);
                        emulated = kvmppc_e500_emul_tlbivax(vcpu, ra, rb);
                        break;
 
@@ -70,52 +140,63 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
        return emulated;
 }
 
-int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
+int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
 {
        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
        int emulated = EMULATE_DONE;
-       ulong spr_val = kvmppc_get_gpr(vcpu, rs);
 
        switch (sprn) {
+#ifndef CONFIG_KVM_BOOKE_HV
        case SPRN_PID:
                kvmppc_set_pid(vcpu, spr_val);
                break;
        case SPRN_PID1:
                if (spr_val != 0)
                        return EMULATE_FAIL;
-               vcpu_e500->pid[1] = spr_val; break;
+               vcpu_e500->pid[1] = spr_val;
+               break;
        case SPRN_PID2:
                if (spr_val != 0)
                        return EMULATE_FAIL;
-               vcpu_e500->pid[2] = spr_val; break;
+               vcpu_e500->pid[2] = spr_val;
+               break;
        case SPRN_MAS0:
-               vcpu->arch.shared->mas0 = spr_val; break;
+               vcpu->arch.shared->mas0 = spr_val;
+               break;
        case SPRN_MAS1:
-               vcpu->arch.shared->mas1 = spr_val; break;
+               vcpu->arch.shared->mas1 = spr_val;
+               break;
        case SPRN_MAS2:
-               vcpu->arch.shared->mas2 = spr_val; break;
+               vcpu->arch.shared->mas2 = spr_val;
+               break;
        case SPRN_MAS3:
                vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff;
                vcpu->arch.shared->mas7_3 |= spr_val;
                break;
        case SPRN_MAS4:
-               vcpu->arch.shared->mas4 = spr_val; break;
+               vcpu->arch.shared->mas4 = spr_val;
+               break;
        case SPRN_MAS6:
-               vcpu->arch.shared->mas6 = spr_val; break;
+               vcpu->arch.shared->mas6 = spr_val;
+               break;
        case SPRN_MAS7:
                vcpu->arch.shared->mas7_3 &= (u64)0xffffffff;
                vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32;
                break;
+#endif
        case SPRN_L1CSR0:
                vcpu_e500->l1csr0 = spr_val;
                vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC);
                break;
        case SPRN_L1CSR1:
-               vcpu_e500->l1csr1 = spr_val; break;
+               vcpu_e500->l1csr1 = spr_val;
+               break;
        case SPRN_HID0:
-               vcpu_e500->hid0 = spr_val; break;
+               vcpu_e500->hid0 = spr_val;
+               break;
        case SPRN_HID1:
-               vcpu_e500->hid1 = spr_val; break;
+               vcpu_e500->hid1 = spr_val;
+               break;
 
        case SPRN_MMUCSR0:
                emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500,
@@ -135,81 +216,112 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
        case SPRN_IVOR35:
                vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val;
                break;
-
+#ifdef CONFIG_KVM_BOOKE_HV
+       case SPRN_IVOR36:
+               vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] = spr_val;
+               break;
+       case SPRN_IVOR37:
+               vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] = spr_val;
+               break;
+#endif
        default:
-               emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs);
+               emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
        }
 
        return emulated;
 }
 
-int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
+int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
 {
        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
        int emulated = EMULATE_DONE;
-       unsigned long val;
 
        switch (sprn) {
+#ifndef CONFIG_KVM_BOOKE_HV
        case SPRN_PID:
-               kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[0]); break;
+               *spr_val = vcpu_e500->pid[0];
+               break;
        case SPRN_PID1:
-               kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[1]); break;
+               *spr_val = vcpu_e500->pid[1];
+               break;
        case SPRN_PID2:
-               kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break;
+               *spr_val = vcpu_e500->pid[2];
+               break;
        case SPRN_MAS0:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas0); break;
+               *spr_val = vcpu->arch.shared->mas0;
+               break;
        case SPRN_MAS1:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas1); break;
+               *spr_val = vcpu->arch.shared->mas1;
+               break;
        case SPRN_MAS2:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas2); break;
+               *spr_val = vcpu->arch.shared->mas2;
+               break;
        case SPRN_MAS3:
-               val = (u32)vcpu->arch.shared->mas7_3;
-               kvmppc_set_gpr(vcpu, rt, val);
+               *spr_val = (u32)vcpu->arch.shared->mas7_3;
                break;
        case SPRN_MAS4:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas4); break;
+               *spr_val = vcpu->arch.shared->mas4;
+               break;
        case SPRN_MAS6:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas6); break;
+               *spr_val = vcpu->arch.shared->mas6;
+               break;
        case SPRN_MAS7:
-               val = vcpu->arch.shared->mas7_3 >> 32;
-               kvmppc_set_gpr(vcpu, rt, val);
+               *spr_val = vcpu->arch.shared->mas7_3 >> 32;
                break;
+#endif
        case SPRN_TLB0CFG:
-               kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb0cfg); break;
+               *spr_val = vcpu->arch.tlbcfg[0];
+               break;
        case SPRN_TLB1CFG:
-               kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb1cfg); break;
+               *spr_val = vcpu->arch.tlbcfg[1];
+               break;
        case SPRN_L1CSR0:
-               kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr0); break;
+               *spr_val = vcpu_e500->l1csr0;
+               break;
        case SPRN_L1CSR1:
-               kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr1); break;
+               *spr_val = vcpu_e500->l1csr1;
+               break;
        case SPRN_HID0:
-               kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid0); break;
+               *spr_val = vcpu_e500->hid0;
+               break;
        case SPRN_HID1:
-               kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid1); break;
+               *spr_val = vcpu_e500->hid1;
+               break;
        case SPRN_SVR:
-               kvmppc_set_gpr(vcpu, rt, vcpu_e500->svr); break;
+               *spr_val = vcpu_e500->svr;
+               break;
 
        case SPRN_MMUCSR0:
-               kvmppc_set_gpr(vcpu, rt, 0); break;
+               *spr_val = 0;
+               break;
 
        case SPRN_MMUCFG:
-               kvmppc_set_gpr(vcpu, rt, mfspr(SPRN_MMUCFG)); break;
+               *spr_val = vcpu->arch.mmucfg;
+               break;
 
        /* extra exceptions */
        case SPRN_IVOR32:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]);
+               *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
                break;
        case SPRN_IVOR33:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]);
+               *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
                break;
        case SPRN_IVOR34:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]);
+               *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
                break;
        case SPRN_IVOR35:
-               kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]);
+               *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
+               break;
+#ifdef CONFIG_KVM_BOOKE_HV
+       case SPRN_IVOR36:
+               *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
+               break;
+       case SPRN_IVOR37:
+               *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
                break;
+#endif
        default:
-               emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt);
+               emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
        }
 
        return emulated;
index 6e53e4164de195583a138c7de853f89a94746c97..c510fc961302c2d1ae1284cc3d1aab1139002fbd 100644 (file)
@@ -2,6 +2,9 @@
  * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
  *
  * Author: Yu Liu, yu.liu@freescale.com
+ *         Scott Wood, scottwood@freescale.com
+ *         Ashish Kalra, ashish.kalra@freescale.com
+ *         Varun Sethi, varun.sethi@freescale.com
  *
  * Description:
  * This file is based on arch/powerpc/kvm/44x_tlb.c,
 #include <linux/vmalloc.h>
 #include <linux/hugetlb.h>
 #include <asm/kvm_ppc.h>
-#include <asm/kvm_e500.h>
 
-#include "../mm/mmu_decl.h"
-#include "e500_tlb.h"
+#include "e500.h"
 #include "trace.h"
 #include "timing.h"
 
 #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
 
-struct id {
-       unsigned long val;
-       struct id **pentry;
-};
-
-#define NUM_TIDS 256
-
-/*
- * This table provide mappings from:
- * (guestAS,guestTID,guestPR) --> ID of physical cpu
- * guestAS     [0..1]
- * guestTID    [0..255]
- * guestPR     [0..1]
- * ID          [1..255]
- * Each vcpu keeps one vcpu_id_table.
- */
-struct vcpu_id_table {
-       struct id id[2][NUM_TIDS][2];
-};
-
-/*
- * This table provide reversed mappings of vcpu_id_table:
- * ID --> address of vcpu_id_table item.
- * Each physical core has one pcpu_id_table.
- */
-struct pcpu_id_table {
-       struct id *entry[NUM_TIDS];
-};
-
-static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids);
-
-/* This variable keeps last used shadow ID on local core.
- * The valid range of shadow ID is [1..255] */
-static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);
-
 static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
 
-static struct kvm_book3e_206_tlb_entry *get_entry(
-       struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry)
-{
-       int offset = vcpu_e500->gtlb_offset[tlbsel];
-       return &vcpu_e500->gtlb_arch[offset + entry];
-}
-
-/*
- * Allocate a free shadow id and setup a valid sid mapping in given entry.
- * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
- *
- * The caller must have preemption disabled, and keep it that way until
- * it has finished with the returned shadow id (either written into the
- * TLB or arch.shadow_pid, or discarded).
- */
-static inline int local_sid_setup_one(struct id *entry)
-{
-       unsigned long sid;
-       int ret = -1;
-
-       sid = ++(__get_cpu_var(pcpu_last_used_sid));
-       if (sid < NUM_TIDS) {
-               __get_cpu_var(pcpu_sids).entry[sid] = entry;
-               entry->val = sid;
-               entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid];
-               ret = sid;
-       }
-
-       /*
-        * If sid == NUM_TIDS, we've run out of sids.  We return -1, and
-        * the caller will invalidate everything and start over.
-        *
-        * sid > NUM_TIDS indicates a race, which we disable preemption to
-        * avoid.
-        */
-       WARN_ON(sid > NUM_TIDS);
-
-       return ret;
-}
-
-/*
- * Check if given entry contain a valid shadow id mapping.
- * An ID mapping is considered valid only if
- * both vcpu and pcpu know this mapping.
- *
- * The caller must have preemption disabled, and keep it that way until
- * it has finished with the returned shadow id (either written into the
- * TLB or arch.shadow_pid, or discarded).
- */
-static inline int local_sid_lookup(struct id *entry)
-{
-       if (entry && entry->val != 0 &&
-           __get_cpu_var(pcpu_sids).entry[entry->val] == entry &&
-           entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val])
-               return entry->val;
-       return -1;
-}
-
-/* Invalidate all id mappings on local core -- call with preempt disabled */
-static inline void local_sid_destroy_all(void)
-{
-       __get_cpu_var(pcpu_last_used_sid) = 0;
-       memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids)));
-}
-
-static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
-{
-       vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL);
-       return vcpu_e500->idt;
-}
-
-static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500)
-{
-       kfree(vcpu_e500->idt);
-}
-
-/* Invalidate all mappings on vcpu */
-static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500)
-{
-       memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table));
-
-       /* Update shadow pid when mappings are changed */
-       kvmppc_e500_recalc_shadow_pid(vcpu_e500);
-}
-
-/* Invalidate one ID mapping on vcpu */
-static inline void kvmppc_e500_id_table_reset_one(
-                              struct kvmppc_vcpu_e500 *vcpu_e500,
-                              int as, int pid, int pr)
-{
-       struct vcpu_id_table *idt = vcpu_e500->idt;
-
-       BUG_ON(as >= 2);
-       BUG_ON(pid >= NUM_TIDS);
-       BUG_ON(pr >= 2);
-
-       idt->id[as][pid][pr].val = 0;
-       idt->id[as][pid][pr].pentry = NULL;
-
-       /* Update shadow pid when mappings are changed */
-       kvmppc_e500_recalc_shadow_pid(vcpu_e500);
-}
-
-/*
- * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
- * This function first lookup if a valid mapping exists,
- * if not, then creates a new one.
- *
- * The caller must have preemption disabled, and keep it that way until
- * it has finished with the returned shadow id (either written into the
- * TLB or arch.shadow_pid, or discarded).
- */
-static unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
-                                       unsigned int as, unsigned int gid,
-                                       unsigned int pr, int avoid_recursion)
-{
-       struct vcpu_id_table *idt = vcpu_e500->idt;
-       int sid;
-
-       BUG_ON(as >= 2);
-       BUG_ON(gid >= NUM_TIDS);
-       BUG_ON(pr >= 2);
-
-       sid = local_sid_lookup(&idt->id[as][gid][pr]);
-
-       while (sid <= 0) {
-               /* No mapping yet */
-               sid = local_sid_setup_one(&idt->id[as][gid][pr]);
-               if (sid <= 0) {
-                       _tlbil_all();
-                       local_sid_destroy_all();
-               }
-
-               /* Update shadow pid when mappings are changed */
-               if (!avoid_recursion)
-                       kvmppc_e500_recalc_shadow_pid(vcpu_e500);
-       }
-
-       return sid;
-}
-
-/* Map guest pid to shadow.
- * We use PID to keep shadow of current guest non-zero PID,
- * and use PID1 to keep shadow of guest zero PID.
- * So that guest tlbe with TID=0 can be accessed at any time */
-void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
-{
-       preempt_disable();
-       vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500,
-                       get_cur_as(&vcpu_e500->vcpu),
-                       get_cur_pid(&vcpu_e500->vcpu),
-                       get_cur_pr(&vcpu_e500->vcpu), 1);
-       vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500,
-                       get_cur_as(&vcpu_e500->vcpu), 0,
-                       get_cur_pr(&vcpu_e500->vcpu), 1);
-       preempt_enable();
-}
-
 static inline unsigned int gtlb0_get_next_victim(
                struct kvmppc_vcpu_e500 *vcpu_e500)
 {
@@ -258,6 +66,7 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
        /* Mask off reserved bits. */
        mas3 &= MAS3_ATTRIB_MASK;
 
+#ifndef CONFIG_KVM_BOOKE_HV
        if (!usermode) {
                /* Guest is in supervisor mode,
                 * so we need to translate guest
@@ -265,8 +74,9 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
                mas3 &= ~E500_TLB_USER_PERM_MASK;
                mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
        }
-
-       return mas3 | E500_TLB_SUPER_PERM_MASK;
+       mas3 |= E500_TLB_SUPER_PERM_MASK;
+#endif
+       return mas3;
 }
 
 static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
@@ -292,7 +102,16 @@ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
        mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2);
        mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
        mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
+#ifdef CONFIG_KVM_BOOKE_HV
+       mtspr(SPRN_MAS8, stlbe->mas8);
+#endif
        asm volatile("isync; tlbwe" : : : "memory");
+
+#ifdef CONFIG_KVM_BOOKE_HV
+       /* Must clear mas8 for other host tlbwe's */
+       mtspr(SPRN_MAS8, 0);
+       isync();
+#endif
        local_irq_restore(flags);
 
        trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
@@ -337,6 +156,7 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
        }
 }
 
+#ifdef CONFIG_KVM_E500V2
 void kvmppc_map_magic(struct kvm_vcpu *vcpu)
 {
        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -361,75 +181,41 @@ void kvmppc_map_magic(struct kvm_vcpu *vcpu)
        __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
        preempt_enable();
 }
-
-void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
-{
-       struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
-
-       /* Shadow PID may be expired on local core */
-       kvmppc_e500_recalc_shadow_pid(vcpu_e500);
-}
-
-void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu)
-{
-}
+#endif
 
 static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500,
                                int tlbsel, int esel)
 {
        struct kvm_book3e_206_tlb_entry *gtlbe =
                get_entry(vcpu_e500, tlbsel, esel);
-       struct vcpu_id_table *idt = vcpu_e500->idt;
-       unsigned int pr, tid, ts, pid;
-       u32 val, eaddr;
-       unsigned long flags;
-
-       ts = get_tlb_ts(gtlbe);
-       tid = get_tlb_tid(gtlbe);
-
-       preempt_disable();
-
-       /* One guest ID may be mapped to two shadow IDs */
-       for (pr = 0; pr < 2; pr++) {
-               /*
-                * The shadow PID can have a valid mapping on at most one
-                * host CPU.  In the common case, it will be valid on this
-                * CPU, in which case (for TLB0) we do a local invalidation
-                * of the specific address.
-                *
-                * If the shadow PID is not valid on the current host CPU, or
-                * if we're invalidating a TLB1 entry, we invalidate the
-                * entire shadow PID.
-                */
-               if (tlbsel == 1 ||
-                   (pid = local_sid_lookup(&idt->id[ts][tid][pr])) <= 0) {
-                       kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr);
-                       continue;
-               }
 
-               /*
-                * The guest is invalidating a TLB0 entry which is in a PID
-                * that has a valid shadow mapping on this host CPU.  We
-                * search host TLB0 to invalidate it's shadow TLB entry,
-                * similar to __tlbil_va except that we need to look in AS1.
-                */
-               val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS;
-               eaddr = get_tlb_eaddr(gtlbe);
+       if (tlbsel == 1 &&
+           vcpu_e500->gtlb_priv[1][esel].ref.flags & E500_TLB_BITMAP) {
+               u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
+               int hw_tlb_indx;
+               unsigned long flags;
 
                local_irq_save(flags);
-
-               mtspr(SPRN_MAS6, val);
-               asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr));
-               val = mfspr(SPRN_MAS1);
-               if (val & MAS1_VALID) {
-                       mtspr(SPRN_MAS1, val & ~MAS1_VALID);
+               while (tmp) {
+                       hw_tlb_indx = __ilog2_u64(tmp & -tmp);
+                       mtspr(SPRN_MAS0,
+                             MAS0_TLBSEL(1) |
+                             MAS0_ESEL(to_htlb1_esel(hw_tlb_indx)));
+                       mtspr(SPRN_MAS1, 0);
                        asm volatile("tlbwe");
+                       vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0;
+                       tmp &= tmp - 1;
                }
-
+               mb();
+               vcpu_e500->g2h_tlb1_map[esel] = 0;
+               vcpu_e500->gtlb_priv[1][esel].ref.flags &= ~E500_TLB_BITMAP;
                local_irq_restore(flags);
+
+               return;
        }
 
-       preempt_enable();
+       /* Guest tlbe is backed by at most one host tlbe per shadow pid. */
+       kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
 }
 
 static int tlb0_set_base(gva_t addr, int sets, int ways)
@@ -475,6 +261,9 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
                set_base = gtlb0_set_base(vcpu_e500, eaddr);
                size = vcpu_e500->gtlb_params[0].ways;
        } else {
+               if (eaddr < vcpu_e500->tlb1_min_eaddr ||
+                               eaddr > vcpu_e500->tlb1_max_eaddr)
+                       return -1;
                set_base = 0;
        }
 
@@ -530,6 +319,16 @@ static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
        }
 }
 
+static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+       if (vcpu_e500->g2h_tlb1_map)
+               memset(vcpu_e500->g2h_tlb1_map,
+                      sizeof(u64) * vcpu_e500->gtlb_params[1].entries, 0);
+       if (vcpu_e500->h2g_tlb1_rmap)
+               memset(vcpu_e500->h2g_tlb1_rmap,
+                      sizeof(unsigned int) * host_tlb_params[1].entries, 0);
+}
+
 static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
 {
        int tlbsel = 0;
@@ -547,7 +346,7 @@ static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
        int stlbsel = 1;
        int i;
 
-       kvmppc_e500_id_table_reset_all(vcpu_e500);
+       kvmppc_e500_tlbil_all(vcpu_e500);
 
        for (i = 0; i < host_tlb_params[stlbsel].entries; i++) {
                struct tlbe_ref *ref =
@@ -562,19 +361,18 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
                unsigned int eaddr, int as)
 {
        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
-       unsigned int victim, pidsel, tsized;
+       unsigned int victim, tsized;
        int tlbsel;
 
        /* since we only have two TLBs, only lower bit is used. */
        tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1;
        victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
-       pidsel = (vcpu->arch.shared->mas4 >> 16) & 0xf;
        tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f;
 
        vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
                | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
        vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
-               | MAS1_TID(vcpu_e500->pid[pidsel])
+               | MAS1_TID(get_tlbmiss_tid(vcpu))
                | MAS1_TSIZE(tsized);
        vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN)
                | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK);
@@ -586,23 +384,26 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
 
 /* TID must be supplied by the caller */
 static inline void kvmppc_e500_setup_stlbe(
-       struct kvmppc_vcpu_e500 *vcpu_e500,
+       struct kvm_vcpu *vcpu,
        struct kvm_book3e_206_tlb_entry *gtlbe,
        int tsize, struct tlbe_ref *ref, u64 gvaddr,
        struct kvm_book3e_206_tlb_entry *stlbe)
 {
        pfn_t pfn = ref->pfn;
+       u32 pr = vcpu->arch.shared->msr & MSR_PR;
 
        BUG_ON(!(ref->flags & E500_TLB_VALID));
 
-       /* Force TS=1 IPROT=0 for all guest mappings. */
-       stlbe->mas1 = MAS1_TSIZE(tsize) | MAS1_TS | MAS1_VALID;
-       stlbe->mas2 = (gvaddr & MAS2_EPN)
-               | e500_shadow_mas2_attrib(gtlbe->mas2,
-                               vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
-       stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT)
-               | e500_shadow_mas3_attrib(gtlbe->mas7_3,
-                               vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
+       /* Force IPROT=0 for all guest mappings. */
+       stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
+       stlbe->mas2 = (gvaddr & MAS2_EPN) |
+                     e500_shadow_mas2_attrib(gtlbe->mas2, pr);
+       stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
+                       e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
+
+#ifdef CONFIG_KVM_BOOKE_HV
+       stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid;
+#endif
 }
 
 static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
@@ -736,7 +537,8 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
        kvmppc_e500_ref_release(ref);
        kvmppc_e500_ref_setup(ref, gtlbe, pfn);
 
-       kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, tsize, ref, gvaddr, stlbe);
+       kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
+                               ref, gvaddr, stlbe);
 }
 
 /* XXX only map the one-one case, for now use TLB0 */
@@ -760,7 +562,7 @@ static void kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 /* XXX for both one-one and one-to-many , for now use TLB1 */
 static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
                u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
-               struct kvm_book3e_206_tlb_entry *stlbe)
+               struct kvm_book3e_206_tlb_entry *stlbe, int esel)
 {
        struct tlbe_ref *ref;
        unsigned int victim;
@@ -773,15 +575,74 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
        ref = &vcpu_e500->tlb_refs[1][victim];
        kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref);
 
+       vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << victim;
+       vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
+       if (vcpu_e500->h2g_tlb1_rmap[victim]) {
+               unsigned int idx = vcpu_e500->h2g_tlb1_rmap[victim];
+               vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << victim);
+       }
+       vcpu_e500->h2g_tlb1_rmap[victim] = esel;
+
        return victim;
 }
 
-void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
+static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+       int size = vcpu_e500->gtlb_params[1].entries;
+       unsigned int offset;
+       gva_t eaddr;
+       int i;
+
+       vcpu_e500->tlb1_min_eaddr = ~0UL;
+       vcpu_e500->tlb1_max_eaddr = 0;
+       offset = vcpu_e500->gtlb_offset[1];
+
+       for (i = 0; i < size; i++) {
+               struct kvm_book3e_206_tlb_entry *tlbe =
+                       &vcpu_e500->gtlb_arch[offset + i];
+
+               if (!get_tlb_v(tlbe))
+                       continue;
+
+               eaddr = get_tlb_eaddr(tlbe);
+               vcpu_e500->tlb1_min_eaddr =
+                               min(vcpu_e500->tlb1_min_eaddr, eaddr);
+
+               eaddr = get_tlb_end(tlbe);
+               vcpu_e500->tlb1_max_eaddr =
+                               max(vcpu_e500->tlb1_max_eaddr, eaddr);
+       }
+}
+
+static int kvmppc_need_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500,
+                               struct kvm_book3e_206_tlb_entry *gtlbe)
 {
+       unsigned long start, end, size;
+
+       size = get_tlb_bytes(gtlbe);
+       start = get_tlb_eaddr(gtlbe) & ~(size - 1);
+       end = start + size - 1;
+
+       return vcpu_e500->tlb1_min_eaddr == start ||
+                       vcpu_e500->tlb1_max_eaddr == end;
+}
+
+/* This function is supposed to be called for a adding a new valid tlb entry */
+static void kvmppc_set_tlb1map_range(struct kvm_vcpu *vcpu,
+                               struct kvm_book3e_206_tlb_entry *gtlbe)
+{
+       unsigned long start, end, size;
        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 
-       /* Recalc shadow pid since MSR changes */
-       kvmppc_e500_recalc_shadow_pid(vcpu_e500);
+       if (!get_tlb_v(gtlbe))
+               return;
+
+       size = get_tlb_bytes(gtlbe);
+       start = get_tlb_eaddr(gtlbe) & ~(size - 1);
+       end = start + size - 1;
+
+       vcpu_e500->tlb1_min_eaddr = min(vcpu_e500->tlb1_min_eaddr, start);
+       vcpu_e500->tlb1_max_eaddr = max(vcpu_e500->tlb1_max_eaddr, end);
 }
 
 static inline int kvmppc_e500_gtlbe_invalidate(
@@ -794,6 +655,9 @@ static inline int kvmppc_e500_gtlbe_invalidate(
        if (unlikely(get_tlb_iprot(gtlbe)))
                return -1;
 
+       if (tlbsel == 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe))
+               kvmppc_recalc_tlb1map_range(vcpu_e500);
+
        gtlbe->mas1 = 0;
 
        return 0;
@@ -811,7 +675,7 @@ int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
                        kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
 
        /* Invalidate all vcpu id mappings */
-       kvmppc_e500_id_table_reset_all(vcpu_e500);
+       kvmppc_e500_tlbil_all(vcpu_e500);
 
        return EMULATE_DONE;
 }
@@ -844,7 +708,59 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
        }
 
        /* Invalidate all vcpu id mappings */
-       kvmppc_e500_id_table_reset_all(vcpu_e500);
+       kvmppc_e500_tlbil_all(vcpu_e500);
+
+       return EMULATE_DONE;
+}
+
+static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
+                      int pid, int rt)
+{
+       struct kvm_book3e_206_tlb_entry *tlbe;
+       int tid, esel;
+
+       /* invalidate all entries */
+       for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) {
+               tlbe = get_entry(vcpu_e500, tlbsel, esel);
+               tid = get_tlb_tid(tlbe);
+               if (rt == 0 || tid == pid) {
+                       inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
+                       kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
+               }
+       }
+}
+
+static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid,
+                      int ra, int rb)
+{
+       int tlbsel, esel;
+       gva_t ea;
+
+       ea = kvmppc_get_gpr(&vcpu_e500->vcpu, rb);
+       if (ra)
+               ea += kvmppc_get_gpr(&vcpu_e500->vcpu, ra);
+
+       for (tlbsel = 0; tlbsel < 2; tlbsel++) {
+               esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, -1);
+               if (esel >= 0) {
+                       inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
+                       kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
+                       break;
+               }
+       }
+}
+
+int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int rt, int ra, int rb)
+{
+       struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+       int pid = get_cur_spid(vcpu);
+
+       if (rt == 0 || rt == 1) {
+               tlbilx_all(vcpu_e500, 0, pid, rt);
+               tlbilx_all(vcpu_e500, 1, pid, rt);
+       } else if (rt == 3) {
+               tlbilx_one(vcpu_e500, pid, ra, rb);
+       }
 
        return EMULATE_DONE;
 }
@@ -929,9 +845,7 @@ static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
        int stid;
 
        preempt_disable();
-       stid = kvmppc_e500_get_sid(vcpu_e500, get_tlb_ts(gtlbe),
-                                  get_tlb_tid(gtlbe),
-                                  get_cur_pr(&vcpu_e500->vcpu), 0);
+       stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe);
 
        stlbe->mas1 |= MAS1_TID(stid);
        write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
@@ -941,16 +855,21 @@ static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
 int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
 {
        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
-       struct kvm_book3e_206_tlb_entry *gtlbe;
-       int tlbsel, esel;
+       struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
+       int tlbsel, esel, stlbsel, sesel;
+       int recal = 0;
 
        tlbsel = get_tlb_tlbsel(vcpu);
        esel = get_tlb_esel(vcpu, tlbsel);
 
        gtlbe = get_entry(vcpu_e500, tlbsel, esel);
 
-       if (get_tlb_v(gtlbe))
+       if (get_tlb_v(gtlbe)) {
                inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
+               if ((tlbsel == 1) &&
+                       kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe))
+                       recal = 1;
+       }
 
        gtlbe->mas1 = vcpu->arch.shared->mas1;
        gtlbe->mas2 = vcpu->arch.shared->mas2;
@@ -959,10 +878,20 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
        trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1,
                                      gtlbe->mas2, gtlbe->mas7_3);
 
+       if (tlbsel == 1) {
+               /*
+                * If a valid tlb1 entry is overwritten then recalculate the
+                * min/max TLB1 map address range otherwise no need to look
+                * in tlb1 array.
+                */
+               if (recal)
+                       kvmppc_recalc_tlb1map_range(vcpu_e500);
+               else
+                       kvmppc_set_tlb1map_range(vcpu, gtlbe);
+       }
+
        /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
        if (tlbe_is_host_safe(vcpu, gtlbe)) {
-               struct kvm_book3e_206_tlb_entry stlbe;
-               int stlbsel, sesel;
                u64 eaddr;
                u64 raddr;
 
@@ -989,7 +918,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
                         * are mapped on the fly. */
                        stlbsel = 1;
                        sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
-                                       raddr >> PAGE_SHIFT, gtlbe, &stlbe);
+                                   raddr >> PAGE_SHIFT, gtlbe, &stlbe, esel);
                        break;
 
                default:
@@ -1003,6 +932,48 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
        return EMULATE_DONE;
 }
 
+static int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
+                                 gva_t eaddr, unsigned int pid, int as)
+{
+       struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+       int esel, tlbsel;
+
+       for (tlbsel = 0; tlbsel < 2; tlbsel++) {
+               esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
+               if (esel >= 0)
+                       return index_of(tlbsel, esel);
+       }
+
+       return -1;
+}
+
+/* 'linear_address' is actually an encoding of AS|PID|EADDR . */
+int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
+                               struct kvm_translation *tr)
+{
+       int index;
+       gva_t eaddr;
+       u8 pid;
+       u8 as;
+
+       eaddr = tr->linear_address;
+       pid = (tr->linear_address >> 32) & 0xff;
+       as = (tr->linear_address >> 40) & 0x1;
+
+       index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as);
+       if (index < 0) {
+               tr->valid = 0;
+               return 0;
+       }
+
+       tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr);
+       /* XXX what does "writeable" and "usermode" even mean? */
+       tr->valid = 1;
+
+       return 0;
+}
+
+
 int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
 {
        unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
@@ -1066,7 +1037,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
                sesel = 0; /* unused */
                priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
 
-               kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, BOOK3E_PAGESZ_4K,
+               kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
                                        &priv->ref, eaddr, &stlbe);
                break;
 
@@ -1075,7 +1046,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
 
                stlbsel = 1;
                sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn,
-                                            gtlbe, &stlbe);
+                                            gtlbe, &stlbe, esel);
                break;
        }
 
@@ -1087,52 +1058,13 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
        write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
 }
 
-int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
-                               gva_t eaddr, unsigned int pid, int as)
-{
-       struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
-       int esel, tlbsel;
-
-       for (tlbsel = 0; tlbsel < 2; tlbsel++) {
-               esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
-               if (esel >= 0)
-                       return index_of(tlbsel, esel);
-       }
-
-       return -1;
-}
-
-void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
-{
-       struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
-
-       if (vcpu->arch.pid != pid) {
-               vcpu_e500->pid[0] = vcpu->arch.pid = pid;
-               kvmppc_e500_recalc_shadow_pid(vcpu_e500);
-       }
-}
-
-void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
-{
-       struct kvm_book3e_206_tlb_entry *tlbe;
-
-       /* Insert large initial mapping for guest. */
-       tlbe = get_entry(vcpu_e500, 1, 0);
-       tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
-       tlbe->mas2 = 0;
-       tlbe->mas7_3 = E500_TLB_SUPER_PERM_MASK;
-
-       /* 4K map for serial output. Used by kernel wrapper. */
-       tlbe = get_entry(vcpu_e500, 1, 1);
-       tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
-       tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
-       tlbe->mas7_3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
-}
-
 static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
 {
        int i;
 
+       clear_tlb1_bitmap(vcpu_e500);
+       kfree(vcpu_e500->g2h_tlb1_map);
+
        clear_tlb_refs(vcpu_e500);
        kfree(vcpu_e500->gtlb_priv[0]);
        kfree(vcpu_e500->gtlb_priv[1]);
@@ -1155,6 +1087,36 @@ static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
        vcpu_e500->gtlb_arch = NULL;
 }
 
+void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+       sregs->u.e.mas0 = vcpu->arch.shared->mas0;
+       sregs->u.e.mas1 = vcpu->arch.shared->mas1;
+       sregs->u.e.mas2 = vcpu->arch.shared->mas2;
+       sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3;
+       sregs->u.e.mas4 = vcpu->arch.shared->mas4;
+       sregs->u.e.mas6 = vcpu->arch.shared->mas6;
+
+       sregs->u.e.mmucfg = vcpu->arch.mmucfg;
+       sregs->u.e.tlbcfg[0] = vcpu->arch.tlbcfg[0];
+       sregs->u.e.tlbcfg[1] = vcpu->arch.tlbcfg[1];
+       sregs->u.e.tlbcfg[2] = 0;
+       sregs->u.e.tlbcfg[3] = 0;
+}
+
+int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+       if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) {
+               vcpu->arch.shared->mas0 = sregs->u.e.mas0;
+               vcpu->arch.shared->mas1 = sregs->u.e.mas1;
+               vcpu->arch.shared->mas2 = sregs->u.e.mas2;
+               vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3;
+               vcpu->arch.shared->mas4 = sregs->u.e.mas4;
+               vcpu->arch.shared->mas6 = sregs->u.e.mas6;
+       }
+
+       return 0;
+}
+
 int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
                              struct kvm_config_tlb *cfg)
 {
@@ -1163,6 +1125,7 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
        char *virt;
        struct page **pages;
        struct tlbe_priv *privs[2] = {};
+       u64 *g2h_bitmap = NULL;
        size_t array_len;
        u32 sets;
        int num_pages, ret, i;
@@ -1224,10 +1187,16 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
        if (!privs[0] || !privs[1])
                goto err_put_page;
 
+       g2h_bitmap = kzalloc(sizeof(u64) * params.tlb_sizes[1],
+                            GFP_KERNEL);
+       if (!g2h_bitmap)
+               goto err_put_page;
+
        free_gtlb(vcpu_e500);
 
        vcpu_e500->gtlb_priv[0] = privs[0];
        vcpu_e500->gtlb_priv[1] = privs[1];
+       vcpu_e500->g2h_tlb1_map = g2h_bitmap;
 
        vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *)
                (virt + (cfg->array & (PAGE_SIZE - 1)));
@@ -1238,14 +1207,16 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
        vcpu_e500->gtlb_offset[0] = 0;
        vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0];
 
-       vcpu_e500->tlb0cfg &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
+       vcpu->arch.mmucfg = mfspr(SPRN_MMUCFG) & ~MMUCFG_LPIDSIZE;
+
+       vcpu->arch.tlbcfg[0] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
        if (params.tlb_sizes[0] <= 2048)
-               vcpu_e500->tlb0cfg |= params.tlb_sizes[0];
-       vcpu_e500->tlb0cfg |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT;
+               vcpu->arch.tlbcfg[0] |= params.tlb_sizes[0];
+       vcpu->arch.tlbcfg[0] |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT;
 
-       vcpu_e500->tlb1cfg &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
-       vcpu_e500->tlb1cfg |= params.tlb_sizes[1];
-       vcpu_e500->tlb1cfg |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT;
+       vcpu->arch.tlbcfg[1] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
+       vcpu->arch.tlbcfg[1] |= params.tlb_sizes[1];
+       vcpu->arch.tlbcfg[1] |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT;
 
        vcpu_e500->shared_tlb_pages = pages;
        vcpu_e500->num_shared_tlb_pages = num_pages;
@@ -1256,6 +1227,7 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
        vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1];
        vcpu_e500->gtlb_params[1].sets = 1;
 
+       kvmppc_recalc_tlb1map_range(vcpu_e500);
        return 0;
 
 err_put_page:
@@ -1274,13 +1246,14 @@ int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
                             struct kvm_dirty_tlb *dirty)
 {
        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
-
+       kvmppc_recalc_tlb1map_range(vcpu_e500);
        clear_tlb_refs(vcpu_e500);
        return 0;
 }
 
 int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
 {
+       struct kvm_vcpu *vcpu = &vcpu_e500->vcpu;
        int entry_size = sizeof(struct kvm_book3e_206_tlb_entry);
        int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE;
 
@@ -1357,22 +1330,32 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
        if (!vcpu_e500->gtlb_priv[1])
                goto err;
 
-       if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL)
+       vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(unsigned int) *
+                                         vcpu_e500->gtlb_params[1].entries,
+                                         GFP_KERNEL);
+       if (!vcpu_e500->g2h_tlb1_map)
+               goto err;
+
+       vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
+                                          host_tlb_params[1].entries,
+                                          GFP_KERNEL);
+       if (!vcpu_e500->h2g_tlb1_rmap)
                goto err;
 
        /* Init TLB configuration register */
-       vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) &
+       vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) &
                             ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
-       vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_params[0].entries;
-       vcpu_e500->tlb0cfg |=
+       vcpu->arch.tlbcfg[0] |= vcpu_e500->gtlb_params[0].entries;
+       vcpu->arch.tlbcfg[0] |=
                vcpu_e500->gtlb_params[0].ways << TLBnCFG_ASSOC_SHIFT;
 
-       vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) &
+       vcpu->arch.tlbcfg[1] = mfspr(SPRN_TLB1CFG) &
                             ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
-       vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_params[1].entries;
-       vcpu_e500->tlb0cfg |=
+       vcpu->arch.tlbcfg[1] |= vcpu_e500->gtlb_params[1].entries;
+       vcpu->arch.tlbcfg[1] |=
                vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT;
 
+       kvmppc_recalc_tlb1map_range(vcpu_e500);
        return 0;
 
 err:
@@ -1385,8 +1368,7 @@ err:
 void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
 {
        free_gtlb(vcpu_e500);
-       kvmppc_e500_id_table_free(vcpu_e500);
-
+       kfree(vcpu_e500->h2g_tlb1_rmap);
        kfree(vcpu_e500->tlb_refs[0]);
        kfree(vcpu_e500->tlb_refs[1]);
 }
diff --git a/arch/powerpc/kvm/e500_tlb.h b/arch/powerpc/kvm/e500_tlb.h
deleted file mode 100644 (file)
index 5c6d2d7..0000000
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
- *
- * Author: Yu Liu, yu.liu@freescale.com
- *
- * Description:
- * This file is based on arch/powerpc/kvm/44x_tlb.h,
- * by Hollis Blanchard <hollisb@us.ibm.com>.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- */
-
-#ifndef __KVM_E500_TLB_H__
-#define __KVM_E500_TLB_H__
-
-#include <linux/kvm_host.h>
-#include <asm/mmu-book3e.h>
-#include <asm/tlb.h>
-#include <asm/kvm_e500.h>
-
-/* This geometry is the legacy default -- can be overridden by userspace */
-#define KVM_E500_TLB0_WAY_SIZE         128
-#define KVM_E500_TLB0_WAY_NUM          2
-
-#define KVM_E500_TLB0_SIZE  (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM)
-#define KVM_E500_TLB1_SIZE  16
-
-#define index_of(tlbsel, esel) (((tlbsel) << 16) | ((esel) & 0xFFFF))
-#define tlbsel_of(index)       ((index) >> 16)
-#define esel_of(index)         ((index) & 0xFFFF)
-
-#define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW)
-#define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW)
-#define MAS2_ATTRIB_MASK \
-         (MAS2_X0 | MAS2_X1)
-#define MAS3_ATTRIB_MASK \
-         (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \
-          | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK)
-
-extern void kvmppc_dump_tlbs(struct kvm_vcpu *);
-extern int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *, ulong);
-extern int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *);
-extern int kvmppc_e500_emul_tlbre(struct kvm_vcpu *);
-extern int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *, int, int);
-extern int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *, int);
-extern int kvmppc_e500_tlb_search(struct kvm_vcpu *, gva_t, unsigned int, int);
-extern void kvmppc_e500_tlb_put(struct kvm_vcpu *);
-extern void kvmppc_e500_tlb_load(struct kvm_vcpu *, int);
-extern int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *);
-extern void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *);
-extern void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *);
-extern void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *);
-
-/* TLB helper functions */
-static inline unsigned int
-get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe)
-{
-       return (tlbe->mas1 >> 7) & 0x1f;
-}
-
-static inline gva_t get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry *tlbe)
-{
-       return tlbe->mas2 & 0xfffff000;
-}
-
-static inline u64 get_tlb_bytes(const struct kvm_book3e_206_tlb_entry *tlbe)
-{
-       unsigned int pgsize = get_tlb_size(tlbe);
-       return 1ULL << 10 << pgsize;
-}
-
-static inline gva_t get_tlb_end(const struct kvm_book3e_206_tlb_entry *tlbe)
-{
-       u64 bytes = get_tlb_bytes(tlbe);
-       return get_tlb_eaddr(tlbe) + bytes - 1;
-}
-
-static inline u64 get_tlb_raddr(const struct kvm_book3e_206_tlb_entry *tlbe)
-{
-       return tlbe->mas7_3 & ~0xfffULL;
-}
-
-static inline unsigned int
-get_tlb_tid(const struct kvm_book3e_206_tlb_entry *tlbe)
-{
-       return (tlbe->mas1 >> 16) & 0xff;
-}
-
-static inline unsigned int
-get_tlb_ts(const struct kvm_book3e_206_tlb_entry *tlbe)
-{
-       return (tlbe->mas1 >> 12) & 0x1;
-}
-
-static inline unsigned int
-get_tlb_v(const struct kvm_book3e_206_tlb_entry *tlbe)
-{
-       return (tlbe->mas1 >> 31) & 0x1;
-}
-
-static inline unsigned int
-get_tlb_iprot(const struct kvm_book3e_206_tlb_entry *tlbe)
-{
-       return (tlbe->mas1 >> 30) & 0x1;
-}
-
-static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu)
-{
-       return vcpu->arch.pid & 0xff;
-}
-
-static inline unsigned int get_cur_as(struct kvm_vcpu *vcpu)
-{
-       return !!(vcpu->arch.shared->msr & (MSR_IS | MSR_DS));
-}
-
-static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu)
-{
-       return !!(vcpu->arch.shared->msr & MSR_PR);
-}
-
-static inline unsigned int get_cur_spid(const struct kvm_vcpu *vcpu)
-{
-       return (vcpu->arch.shared->mas6 >> 16) & 0xff;
-}
-
-static inline unsigned int get_cur_sas(const struct kvm_vcpu *vcpu)
-{
-       return vcpu->arch.shared->mas6 & 0x1;
-}
-
-static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu)
-{
-       /*
-        * Manual says that tlbsel has 2 bits wide.
-        * Since we only have two TLBs, only lower bit is used.
-        */
-       return (vcpu->arch.shared->mas0 >> 28) & 0x1;
-}
-
-static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu *vcpu)
-{
-       return vcpu->arch.shared->mas0 & 0xfff;
-}
-
-static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu *vcpu)
-{
-       return (vcpu->arch.shared->mas0 >> 16) & 0xfff;
-}
-
-static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
-                       const struct kvm_book3e_206_tlb_entry *tlbe)
-{
-       gpa_t gpa;
-
-       if (!get_tlb_v(tlbe))
-               return 0;
-
-       /* Does it match current guest AS? */
-       /* XXX what about IS != DS? */
-       if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS))
-               return 0;
-
-       gpa = get_tlb_raddr(tlbe);
-       if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
-               /* Mapping is not for RAM. */
-               return 0;
-
-       return 1;
-}
-
-#endif /* __KVM_E500_TLB_H__ */
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
new file mode 100644 (file)
index 0000000..fe6c1de
--- /dev/null
@@ -0,0 +1,342 @@
+/*
+ * Copyright (C) 2010 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: Varun Sethi, <varun.sethi@freescale.com>
+ *
+ * Description:
+ * This file is derived from arch/powerpc/kvm/e500.c,
+ * by Yu Liu <yu.liu@freescale.com>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/export.h>
+
+#include <asm/reg.h>
+#include <asm/cputable.h>
+#include <asm/tlbflush.h>
+#include <asm/kvm_ppc.h>
+#include <asm/dbell.h>
+
+#include "booke.h"
+#include "e500.h"
+
+void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type)
+{
+       enum ppc_dbell dbell_type;
+       unsigned long tag;
+
+       switch (type) {
+       case INT_CLASS_NONCRIT:
+               dbell_type = PPC_G_DBELL;
+               break;
+       case INT_CLASS_CRIT:
+               dbell_type = PPC_G_DBELL_CRIT;
+               break;
+       case INT_CLASS_MC:
+               dbell_type = PPC_G_DBELL_MC;
+               break;
+       default:
+               WARN_ONCE(1, "%s: unknown int type %d\n", __func__, type);
+               return;
+       }
+
+
+       tag = PPC_DBELL_LPID(vcpu->kvm->arch.lpid) | vcpu->vcpu_id;
+       mb();
+       ppc_msgsnd(dbell_type, 0, tag);
+}
+
+/* gtlbe must not be mapped by more than one host tlb entry */
+void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
+                          struct kvm_book3e_206_tlb_entry *gtlbe)
+{
+       unsigned int tid, ts;
+       u32 val, eaddr, lpid;
+       unsigned long flags;
+
+       ts = get_tlb_ts(gtlbe);
+       tid = get_tlb_tid(gtlbe);
+       lpid = vcpu_e500->vcpu.kvm->arch.lpid;
+
+       /* We search the host TLB to invalidate its shadow TLB entry */
+       val = (tid << 16) | ts;
+       eaddr = get_tlb_eaddr(gtlbe);
+
+       local_irq_save(flags);
+
+       mtspr(SPRN_MAS6, val);
+       mtspr(SPRN_MAS5, MAS5_SGS | lpid);
+
+       asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr] "r" (eaddr));
+       val = mfspr(SPRN_MAS1);
+       if (val & MAS1_VALID) {
+               mtspr(SPRN_MAS1, val & ~MAS1_VALID);
+               asm volatile("tlbwe");
+       }
+       mtspr(SPRN_MAS5, 0);
+       /* NOTE: tlbsx also updates mas8, so clear it for host tlbwe */
+       mtspr(SPRN_MAS8, 0);
+       isync();
+
+       local_irq_restore(flags);
+}
+
+void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       mtspr(SPRN_MAS5, MAS5_SGS | vcpu_e500->vcpu.kvm->arch.lpid);
+       asm volatile("tlbilxlpid");
+       mtspr(SPRN_MAS5, 0);
+       local_irq_restore(flags);
+}
+
+void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
+{
+       vcpu->arch.pid = pid;
+}
+
+void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
+{
+}
+
+void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+       struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+
+       kvmppc_booke_vcpu_load(vcpu, cpu);
+
+       mtspr(SPRN_LPID, vcpu->kvm->arch.lpid);
+       mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr);
+       mtspr(SPRN_GPIR, vcpu->vcpu_id);
+       mtspr(SPRN_MSRP, vcpu->arch.shadow_msrp);
+       mtspr(SPRN_EPLC, vcpu->arch.eplc);
+       mtspr(SPRN_EPSC, vcpu->arch.epsc);
+
+       mtspr(SPRN_GIVPR, vcpu->arch.ivpr);
+       mtspr(SPRN_GIVOR2, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]);
+       mtspr(SPRN_GIVOR8, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]);
+       mtspr(SPRN_GSPRG0, (unsigned long)vcpu->arch.shared->sprg0);
+       mtspr(SPRN_GSPRG1, (unsigned long)vcpu->arch.shared->sprg1);
+       mtspr(SPRN_GSPRG2, (unsigned long)vcpu->arch.shared->sprg2);
+       mtspr(SPRN_GSPRG3, (unsigned long)vcpu->arch.shared->sprg3);
+
+       mtspr(SPRN_GSRR0, vcpu->arch.shared->srr0);
+       mtspr(SPRN_GSRR1, vcpu->arch.shared->srr1);
+
+       mtspr(SPRN_GEPR, vcpu->arch.epr);
+       mtspr(SPRN_GDEAR, vcpu->arch.shared->dar);
+       mtspr(SPRN_GESR, vcpu->arch.shared->esr);
+
+       if (vcpu->arch.oldpir != mfspr(SPRN_PIR))
+               kvmppc_e500_tlbil_all(vcpu_e500);
+
+       kvmppc_load_guest_fp(vcpu);
+}
+
+void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.eplc = mfspr(SPRN_EPLC);
+       vcpu->arch.epsc = mfspr(SPRN_EPSC);
+
+       vcpu->arch.shared->sprg0 = mfspr(SPRN_GSPRG0);
+       vcpu->arch.shared->sprg1 = mfspr(SPRN_GSPRG1);
+       vcpu->arch.shared->sprg2 = mfspr(SPRN_GSPRG2);
+       vcpu->arch.shared->sprg3 = mfspr(SPRN_GSPRG3);
+
+       vcpu->arch.shared->srr0 = mfspr(SPRN_GSRR0);
+       vcpu->arch.shared->srr1 = mfspr(SPRN_GSRR1);
+
+       vcpu->arch.epr = mfspr(SPRN_GEPR);
+       vcpu->arch.shared->dar = mfspr(SPRN_GDEAR);
+       vcpu->arch.shared->esr = mfspr(SPRN_GESR);
+
+       vcpu->arch.oldpir = mfspr(SPRN_PIR);
+
+       kvmppc_booke_vcpu_put(vcpu);
+}
+
+int kvmppc_core_check_processor_compat(void)
+{
+       int r;
+
+       if (strcmp(cur_cpu_spec->cpu_name, "e500mc") == 0)
+               r = 0;
+       else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0)
+               r = 0;
+       else
+               r = -ENOTSUPP;
+
+       return r;
+}
+
+int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+       struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+
+       vcpu->arch.shadow_epcr = SPRN_EPCR_DSIGS | SPRN_EPCR_DGTMI | \
+                                SPRN_EPCR_DUVD;
+       vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_DEP | MSRP_PMMP;
+       vcpu->arch.eplc = EPC_EGS | (vcpu->kvm->arch.lpid << EPC_ELPID_SHIFT);
+       vcpu->arch.epsc = vcpu->arch.eplc;
+
+       vcpu->arch.pvr = mfspr(SPRN_PVR);
+       vcpu_e500->svr = mfspr(SPRN_SVR);
+
+       vcpu->arch.cpu_type = KVM_CPU_E500MC;
+
+       return 0;
+}
+
+void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+       struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+
+       sregs->u.e.features |= KVM_SREGS_E_ARCH206_MMU | KVM_SREGS_E_PM |
+                              KVM_SREGS_E_PC;
+       sregs->u.e.impl_id = KVM_SREGS_E_IMPL_FSL;
+
+       sregs->u.e.impl.fsl.features = 0;
+       sregs->u.e.impl.fsl.svr = vcpu_e500->svr;
+       sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0;
+       sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar;
+
+       kvmppc_get_sregs_e500_tlb(vcpu, sregs);
+
+       sregs->u.e.ivor_high[3] =
+               vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
+       sregs->u.e.ivor_high[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
+       sregs->u.e.ivor_high[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
+
+       kvmppc_get_sregs_ivor(vcpu, sregs);
+}
+
+int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+       struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+       int ret;
+
+       if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
+               vcpu_e500->svr = sregs->u.e.impl.fsl.svr;
+               vcpu_e500->hid0 = sregs->u.e.impl.fsl.hid0;
+               vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar;
+       }
+
+       ret = kvmppc_set_sregs_e500_tlb(vcpu, sregs);
+       if (ret < 0)
+               return ret;
+
+       if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
+               return 0;
+
+       if (sregs->u.e.features & KVM_SREGS_E_PM) {
+               vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] =
+                       sregs->u.e.ivor_high[3];
+       }
+
+       if (sregs->u.e.features & KVM_SREGS_E_PC) {
+               vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] =
+                       sregs->u.e.ivor_high[4];
+               vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] =
+                       sregs->u.e.ivor_high[5];
+       }
+
+       return kvmppc_set_sregs_ivor(vcpu, sregs);
+}
+
+struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+       struct kvmppc_vcpu_e500 *vcpu_e500;
+       struct kvm_vcpu *vcpu;
+       int err;
+
+       vcpu_e500 = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+       if (!vcpu_e500) {
+               err = -ENOMEM;
+               goto out;
+       }
+       vcpu = &vcpu_e500->vcpu;
+
+       /* Invalid PIR value -- this LPID dosn't have valid state on any cpu */
+       vcpu->arch.oldpir = 0xffffffff;
+
+       err = kvm_vcpu_init(vcpu, kvm, id);
+       if (err)
+               goto free_vcpu;
+
+       err = kvmppc_e500_tlb_init(vcpu_e500);
+       if (err)
+               goto uninit_vcpu;
+
+       vcpu->arch.shared = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+       if (!vcpu->arch.shared)
+               goto uninit_tlb;
+
+       return vcpu;
+
+uninit_tlb:
+       kvmppc_e500_tlb_uninit(vcpu_e500);
+uninit_vcpu:
+       kvm_vcpu_uninit(vcpu);
+
+free_vcpu:
+       kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
+out:
+       return ERR_PTR(err);
+}
+
+void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
+{
+       struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+
+       free_page((unsigned long)vcpu->arch.shared);
+       kvmppc_e500_tlb_uninit(vcpu_e500);
+       kvm_vcpu_uninit(vcpu);
+       kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
+}
+
+int kvmppc_core_init_vm(struct kvm *kvm)
+{
+       int lpid;
+
+       lpid = kvmppc_alloc_lpid();
+       if (lpid < 0)
+               return lpid;
+
+       kvm->arch.lpid = lpid;
+       return 0;
+}
+
+void kvmppc_core_destroy_vm(struct kvm *kvm)
+{
+       kvmppc_free_lpid(kvm->arch.lpid);
+}
+
+static int __init kvmppc_e500mc_init(void)
+{
+       int r;
+
+       r = kvmppc_booke_init();
+       if (r)
+               return r;
+
+       kvmppc_init_lpid(64);
+       kvmppc_claim_lpid(0); /* host */
+
+       return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
+}
+
+static void __exit kvmppc_e500mc_exit(void)
+{
+       kvmppc_booke_exit();
+}
+
+module_init(kvmppc_e500mc_init);
+module_exit(kvmppc_e500mc_exit);
index 968f401018834aa4758ec45afeb32eee327a37c8..f90e86dea7a2cfdef3a3321a878a370cdde4b287 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/types.h>
 #include <linux/string.h>
 #include <linux/kvm_host.h>
+#include <linux/clockchips.h>
 
 #include <asm/reg.h>
 #include <asm/time.h>
@@ -35,7 +36,9 @@
 #define OP_TRAP 3
 #define OP_TRAP_64 2
 
+#define OP_31_XOP_TRAP      4
 #define OP_31_XOP_LWZX      23
+#define OP_31_XOP_TRAP_64   68
 #define OP_31_XOP_LBZX      87
 #define OP_31_XOP_STWX      151
 #define OP_31_XOP_STBX      215
@@ -102,8 +105,12 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
         */
 
        dec_time = vcpu->arch.dec;
-       dec_time *= 1000;
-       do_div(dec_time, tb_ticks_per_usec);
+       /*
+        * Guest timebase ticks at the same frequency as host decrementer.
+        * So use the host decrementer calculations for decrementer emulation.
+        */
+       dec_time = dec_time << decrementer_clockevent.shift;
+       do_div(dec_time, decrementer_clockevent.mult);
        dec_nsec = do_div(dec_time, NSEC_PER_SEC);
        hrtimer_start(&vcpu->arch.dec_timer,
                ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
@@ -141,14 +148,13 @@ u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
 int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
 {
        u32 inst = kvmppc_get_last_inst(vcpu);
-       u32 ea;
-       int ra;
-       int rb;
-       int rs;
-       int rt;
-       int sprn;
+       int ra = get_ra(inst);
+       int rs = get_rs(inst);
+       int rt = get_rt(inst);
+       int sprn = get_sprn(inst);
        enum emulation_result emulated = EMULATE_DONE;
        int advance = 1;
+       ulong spr_val = 0;
 
        /* this default type might be overwritten by subcategories */
        kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
@@ -170,173 +176,143 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
        case 31:
                switch (get_xop(inst)) {
 
+               case OP_31_XOP_TRAP:
+#ifdef CONFIG_64BIT
+               case OP_31_XOP_TRAP_64:
+#endif
+#ifdef CONFIG_PPC_BOOK3S
+                       kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
+#else
+                       kvmppc_core_queue_program(vcpu,
+                                       vcpu->arch.shared->esr | ESR_PTR);
+#endif
+                       advance = 0;
+                       break;
                case OP_31_XOP_LWZX:
-                       rt = get_rt(inst);
                        emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
                        break;
 
                case OP_31_XOP_LBZX:
-                       rt = get_rt(inst);
                        emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
                        break;
 
                case OP_31_XOP_LBZUX:
-                       rt = get_rt(inst);
-                       ra = get_ra(inst);
-                       rb = get_rb(inst);
-
-                       ea = kvmppc_get_gpr(vcpu, rb);
-                       if (ra)
-                               ea += kvmppc_get_gpr(vcpu, ra);
-
                        emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
-                       kvmppc_set_gpr(vcpu, ra, ea);
+                       kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
                        break;
 
                case OP_31_XOP_STWX:
-                       rs = get_rs(inst);
                        emulated = kvmppc_handle_store(run, vcpu,
                                                       kvmppc_get_gpr(vcpu, rs),
                                                       4, 1);
                        break;
 
                case OP_31_XOP_STBX:
-                       rs = get_rs(inst);
                        emulated = kvmppc_handle_store(run, vcpu,
                                                       kvmppc_get_gpr(vcpu, rs),
                                                       1, 1);
                        break;
 
                case OP_31_XOP_STBUX:
-                       rs = get_rs(inst);
-                       ra = get_ra(inst);
-                       rb = get_rb(inst);
-
-                       ea = kvmppc_get_gpr(vcpu, rb);
-                       if (ra)
-                               ea += kvmppc_get_gpr(vcpu, ra);
-
                        emulated = kvmppc_handle_store(run, vcpu,
                                                       kvmppc_get_gpr(vcpu, rs),
                                                       1, 1);
-                       kvmppc_set_gpr(vcpu, rs, ea);
+                       kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
                        break;
 
                case OP_31_XOP_LHAX:
-                       rt = get_rt(inst);
                        emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
                        break;
 
                case OP_31_XOP_LHZX:
-                       rt = get_rt(inst);
                        emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
                        break;
 
                case OP_31_XOP_LHZUX:
-                       rt = get_rt(inst);
-                       ra = get_ra(inst);
-                       rb = get_rb(inst);
-
-                       ea = kvmppc_get_gpr(vcpu, rb);
-                       if (ra)
-                               ea += kvmppc_get_gpr(vcpu, ra);
-
                        emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
-                       kvmppc_set_gpr(vcpu, ra, ea);
+                       kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
                        break;
 
                case OP_31_XOP_MFSPR:
-                       sprn = get_sprn(inst);
-                       rt = get_rt(inst);
-
                        switch (sprn) {
                        case SPRN_SRR0:
-                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0);
+                               spr_val = vcpu->arch.shared->srr0;
                                break;
                        case SPRN_SRR1:
-                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr1);
+                               spr_val = vcpu->arch.shared->srr1;
                                break;
                        case SPRN_PVR:
-                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break;
+                               spr_val = vcpu->arch.pvr;
+                               break;
                        case SPRN_PIR:
-                               kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break;
+                               spr_val = vcpu->vcpu_id;
+                               break;
                        case SPRN_MSSSR0:
-                               kvmppc_set_gpr(vcpu, rt, 0); break;
+                               spr_val = 0;
+                               break;
 
                        /* Note: mftb and TBRL/TBWL are user-accessible, so
                         * the guest can always access the real TB anyways.
                         * In fact, we probably will never see these traps. */
                        case SPRN_TBWL:
-                               kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break;
+                               spr_val = get_tb() >> 32;
+                               break;
                        case SPRN_TBWU:
-                               kvmppc_set_gpr(vcpu, rt, get_tb()); break;
+                               spr_val = get_tb();
+                               break;
 
                        case SPRN_SPRG0:
-                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg0);
+                               spr_val = vcpu->arch.shared->sprg0;
                                break;
                        case SPRN_SPRG1:
-                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg1);
+                               spr_val = vcpu->arch.shared->sprg1;
                                break;
                        case SPRN_SPRG2:
-                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg2);
+                               spr_val = vcpu->arch.shared->sprg2;
                                break;
                        case SPRN_SPRG3:
-                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg3);
+                               spr_val = vcpu->arch.shared->sprg3;
                                break;
                        /* Note: SPRG4-7 are user-readable, so we don't get
                         * a trap. */
 
                        case SPRN_DEC:
-                       {
-                               kvmppc_set_gpr(vcpu, rt,
-                                              kvmppc_get_dec(vcpu, get_tb()));
+                               spr_val = kvmppc_get_dec(vcpu, get_tb());
                                break;
-                       }
                        default:
-                               emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
-                               if (emulated == EMULATE_FAIL) {
-                                       printk("mfspr: unknown spr %x\n", sprn);
-                                       kvmppc_set_gpr(vcpu, rt, 0);
+                               emulated = kvmppc_core_emulate_mfspr(vcpu, sprn,
+                                                                    &spr_val);
+                               if (unlikely(emulated == EMULATE_FAIL)) {
+                                       printk(KERN_INFO "mfspr: unknown spr "
+                                               "0x%x\n", sprn);
                                }
                                break;
                        }
+                       kvmppc_set_gpr(vcpu, rt, spr_val);
                        kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
                        break;
 
                case OP_31_XOP_STHX:
-                       rs = get_rs(inst);
-                       ra = get_ra(inst);
-                       rb = get_rb(inst);
-
                        emulated = kvmppc_handle_store(run, vcpu,
                                                       kvmppc_get_gpr(vcpu, rs),
                                                       2, 1);
                        break;
 
                case OP_31_XOP_STHUX:
-                       rs = get_rs(inst);
-                       ra = get_ra(inst);
-                       rb = get_rb(inst);
-
-                       ea = kvmppc_get_gpr(vcpu, rb);
-                       if (ra)
-                               ea += kvmppc_get_gpr(vcpu, ra);
-
                        emulated = kvmppc_handle_store(run, vcpu,
                                                       kvmppc_get_gpr(vcpu, rs),
                                                       2, 1);
-                       kvmppc_set_gpr(vcpu, ra, ea);
+                       kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
                        break;
 
                case OP_31_XOP_MTSPR:
-                       sprn = get_sprn(inst);
-                       rs = get_rs(inst);
+                       spr_val = kvmppc_get_gpr(vcpu, rs);
                        switch (sprn) {
                        case SPRN_SRR0:
-                               vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs);
+                               vcpu->arch.shared->srr0 = spr_val;
                                break;
                        case SPRN_SRR1:
-                               vcpu->arch.shared->srr1 = kvmppc_get_gpr(vcpu, rs);
+                               vcpu->arch.shared->srr1 = spr_val;
                                break;
 
                        /* XXX We need to context-switch the timebase for
@@ -347,27 +323,29 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
                        case SPRN_MSSSR0: break;
 
                        case SPRN_DEC:
-                               vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs);
+                               vcpu->arch.dec = spr_val;
                                kvmppc_emulate_dec(vcpu);
                                break;
 
                        case SPRN_SPRG0:
-                               vcpu->arch.shared->sprg0 = kvmppc_get_gpr(vcpu, rs);
+                               vcpu->arch.shared->sprg0 = spr_val;
                                break;
                        case SPRN_SPRG1:
-                               vcpu->arch.shared->sprg1 = kvmppc_get_gpr(vcpu, rs);
+                               vcpu->arch.shared->sprg1 = spr_val;
                                break;
                        case SPRN_SPRG2:
-                               vcpu->arch.shared->sprg2 = kvmppc_get_gpr(vcpu, rs);
+                               vcpu->arch.shared->sprg2 = spr_val;
                                break;
                        case SPRN_SPRG3:
-                               vcpu->arch.shared->sprg3 = kvmppc_get_gpr(vcpu, rs);
+                               vcpu->arch.shared->sprg3 = spr_val;
                                break;
 
                        default:
-                               emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
+                               emulated = kvmppc_core_emulate_mtspr(vcpu, sprn,
+                                                                    spr_val);
                                if (emulated == EMULATE_FAIL)
-                                       printk("mtspr: unknown spr %x\n", sprn);
+                                       printk(KERN_INFO "mtspr: unknown spr "
+                                               "0x%x\n", sprn);
                                break;
                        }
                        kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
@@ -382,7 +360,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
                        break;
 
                case OP_31_XOP_LWBRX:
-                       rt = get_rt(inst);
                        emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
                        break;
 
@@ -390,25 +367,16 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
                        break;
 
                case OP_31_XOP_STWBRX:
-                       rs = get_rs(inst);
-                       ra = get_ra(inst);
-                       rb = get_rb(inst);
-
                        emulated = kvmppc_handle_store(run, vcpu,
                                                       kvmppc_get_gpr(vcpu, rs),
                                                       4, 0);
                        break;
 
                case OP_31_XOP_LHBRX:
-                       rt = get_rt(inst);
                        emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
                        break;
 
                case OP_31_XOP_STHBRX:
-                       rs = get_rs(inst);
-                       ra = get_ra(inst);
-                       rb = get_rb(inst);
-
                        emulated = kvmppc_handle_store(run, vcpu,
                                                       kvmppc_get_gpr(vcpu, rs),
                                                       2, 0);
@@ -421,99 +389,78 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
                break;
 
        case OP_LWZ:
-               rt = get_rt(inst);
                emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
                break;
 
        case OP_LWZU:
-               ra = get_ra(inst);
-               rt = get_rt(inst);
                emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
-               kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
+               kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
                break;
 
        case OP_LBZ:
-               rt = get_rt(inst);
                emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
                break;
 
        case OP_LBZU:
-               ra = get_ra(inst);
-               rt = get_rt(inst);
                emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
-               kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
+               kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
                break;
 
        case OP_STW:
-               rs = get_rs(inst);
                emulated = kvmppc_handle_store(run, vcpu,
                                               kvmppc_get_gpr(vcpu, rs),
                                               4, 1);
                break;
 
        case OP_STWU:
-               ra = get_ra(inst);
-               rs = get_rs(inst);
                emulated = kvmppc_handle_store(run, vcpu,
                                               kvmppc_get_gpr(vcpu, rs),
                                               4, 1);
-               kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
+               kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
                break;
 
        case OP_STB:
-               rs = get_rs(inst);
                emulated = kvmppc_handle_store(run, vcpu,
                                               kvmppc_get_gpr(vcpu, rs),
                                               1, 1);
                break;
 
        case OP_STBU:
-               ra = get_ra(inst);
-               rs = get_rs(inst);
                emulated = kvmppc_handle_store(run, vcpu,
                                               kvmppc_get_gpr(vcpu, rs),
                                               1, 1);
-               kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
+               kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
                break;
 
        case OP_LHZ:
-               rt = get_rt(inst);
                emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
                break;
 
        case OP_LHZU:
-               ra = get_ra(inst);
-               rt = get_rt(inst);
                emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
-               kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
+               kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
                break;
 
        case OP_LHA:
-               rt = get_rt(inst);
                emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
                break;
 
        case OP_LHAU:
-               ra = get_ra(inst);
-               rt = get_rt(inst);
                emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
-               kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
+               kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
                break;
 
        case OP_STH:
-               rs = get_rs(inst);
                emulated = kvmppc_handle_store(run, vcpu,
                                               kvmppc_get_gpr(vcpu, rs),
                                               2, 1);
                break;
 
        case OP_STHU:
-               ra = get_ra(inst);
-               rs = get_rs(inst);
                emulated = kvmppc_handle_store(run, vcpu,
                                               kvmppc_get_gpr(vcpu, rs),
                                               2, 1);
-               kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
+               kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
                break;
 
        default:
index 00d7e345b3fefef2280fd145d6233bd9acca9719..1493c8de947b92abc84b9a155f87b1e5a9bb0673 100644 (file)
@@ -43,6 +43,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
               v->requests;
 }
 
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
+{
+       return 1;
+}
+
 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
 {
        int nr = kvmppc_get_gpr(vcpu, 11);
@@ -74,7 +79,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
        }
        case HC_VENDOR_KVM | KVM_HC_FEATURES:
                r = HC_EV_SUCCESS;
-#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500)
+#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
                /* XXX Missing magic page on 44x */
                r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
 #endif
@@ -109,6 +114,11 @@ int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
                goto out;
 #endif
 
+#ifdef CONFIG_KVM_BOOKE_HV
+       if (!cpu_has_feature(CPU_FTR_EMB_HV))
+               goto out;
+#endif
+
        r = true;
 
 out:
@@ -225,7 +235,7 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_PPC_PAIRED_SINGLES:
        case KVM_CAP_PPC_OSI:
        case KVM_CAP_PPC_GET_PVINFO:
-#ifdef CONFIG_KVM_E500
+#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
        case KVM_CAP_SW_TLB:
 #endif
                r = 1;
@@ -234,10 +244,12 @@ int kvm_dev_ioctl_check_extension(long ext)
                r = KVM_COALESCED_MMIO_PAGE_OFFSET;
                break;
 #endif
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_PPC_BOOK3S_64
        case KVM_CAP_SPAPR_TCE:
                r = 1;
                break;
+#endif /* CONFIG_PPC_BOOK3S_64 */
+#ifdef CONFIG_KVM_BOOK3S_64_HV
        case KVM_CAP_PPC_SMT:
                r = threads_per_core;
                break;
@@ -267,6 +279,11 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_MAX_VCPUS:
                r = KVM_MAX_VCPUS;
                break;
+#ifdef CONFIG_PPC_BOOK3S_64
+       case KVM_CAP_PPC_GET_SMMU_INFO:
+               r = 1;
+               break;
+#endif
        default:
                r = 0;
                break;
@@ -588,21 +605,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
        return r;
 }
 
-void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
-{
-       int me;
-       int cpu = vcpu->cpu;
-
-       me = get_cpu();
-       if (waitqueue_active(vcpu->arch.wqp)) {
-               wake_up_interruptible(vcpu->arch.wqp);
-               vcpu->stat.halt_wakeup++;
-       } else if (cpu != me && cpu != -1) {
-               smp_send_reschedule(vcpu->cpu);
-       }
-       put_cpu();
-}
-
 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
 {
        if (irq->irq == KVM_INTERRUPT_UNSET) {
@@ -611,6 +613,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
        }
 
        kvmppc_core_queue_external(vcpu, irq);
+
        kvm_vcpu_kick(vcpu);
 
        return 0;
@@ -633,7 +636,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
                r = 0;
                vcpu->arch.papr_enabled = true;
                break;
-#ifdef CONFIG_KVM_E500
+#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
        case KVM_CAP_SW_TLB: {
                struct kvm_config_tlb cfg;
                void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
@@ -710,7 +713,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                break;
        }
 
-#ifdef CONFIG_KVM_E500
+#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
        case KVM_DIRTY_TLB: {
                struct kvm_dirty_tlb dirty;
                r = -EFAULT;
@@ -720,7 +723,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                break;
        }
 #endif
-
        default:
                r = -EINVAL;
        }
@@ -777,7 +779,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
 
                break;
        }
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_PPC_BOOK3S_64
        case KVM_CREATE_SPAPR_TCE: {
                struct kvm_create_spapr_tce create_tce;
                struct kvm *kvm = filp->private_data;
@@ -788,7 +790,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
                r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
                goto out;
        }
+#endif /* CONFIG_PPC_BOOK3S_64 */
 
+#ifdef CONFIG_KVM_BOOK3S_64_HV
        case KVM_ALLOCATE_RMA: {
                struct kvm *kvm = filp->private_data;
                struct kvm_allocate_rma rma;
@@ -800,6 +804,18 @@ long kvm_arch_vm_ioctl(struct file *filp,
        }
 #endif /* CONFIG_KVM_BOOK3S_64_HV */
 
+#ifdef CONFIG_PPC_BOOK3S_64
+       case KVM_PPC_GET_SMMU_INFO: {
+               struct kvm *kvm = filp->private_data;
+               struct kvm_ppc_smmu_info info;
+
+               memset(&info, 0, sizeof(info));
+               r = kvm_vm_ioctl_get_smmu_info(kvm, &info);
+               if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
+                       r = -EFAULT;
+               break;
+       }
+#endif /* CONFIG_PPC_BOOK3S_64 */
        default:
                r = -ENOTTY;
        }
@@ -808,6 +824,40 @@ out:
        return r;
 }
 
+static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
+static unsigned long nr_lpids;
+
+long kvmppc_alloc_lpid(void)
+{
+       long lpid;
+
+       do {
+               lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
+               if (lpid >= nr_lpids) {
+                       pr_err("%s: No LPIDs free\n", __func__);
+                       return -ENOMEM;
+               }
+       } while (test_and_set_bit(lpid, lpid_inuse));
+
+       return lpid;
+}
+
+void kvmppc_claim_lpid(long lpid)
+{
+       set_bit(lpid, lpid_inuse);
+}
+
+void kvmppc_free_lpid(long lpid)
+{
+       clear_bit(lpid, lpid_inuse);
+}
+
+void kvmppc_init_lpid(unsigned long nr_lpids_param)
+{
+       nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
+       memset(lpid_inuse, 0, sizeof(lpid_inuse));
+}
+
 int kvm_arch_init(void *opaque)
 {
        return 0;
index 8167d42a776f08d9e1b5315c7b0a5a338a6ac011..bf191e72b2d88ce3b0c66b7e39470bc033145a43 100644 (file)
@@ -93,6 +93,12 @@ static inline void kvmppc_account_exit_stat(struct kvm_vcpu *vcpu, int type)
        case SIGNAL_EXITS:
                vcpu->stat.signal_exits++;
                break;
+       case DBELL_EXITS:
+               vcpu->stat.dbell_exits++;
+               break;
+       case GDBELL_EXITS:
+               vcpu->stat.gdbell_exits++;
+               break;
        }
 }
 
index 455881a5563f2925544cf14d986b68904c7f1b98..093d6316435cc0733ecfa45c6e7e9f8bec1b3b95 100644 (file)
@@ -160,48 +160,3 @@ _GLOBAL(__clear_user)
        PPC_LONG        1b,91b
        PPC_LONG        8b,92b
        .text
-
-_GLOBAL(__strncpy_from_user)
-       addi    r6,r3,-1
-       addi    r4,r4,-1
-       cmpwi   0,r5,0
-       beq     2f
-       mtctr   r5
-1:     lbzu    r0,1(r4)
-       cmpwi   0,r0,0
-       stbu    r0,1(r6)
-       bdnzf   2,1b            /* dec ctr, branch if ctr != 0 && !cr0.eq */
-       beq     3f
-2:     addi    r6,r6,1
-3:     subf    r3,r3,r6
-       blr
-99:    li      r3,-EFAULT
-       blr
-
-       .section __ex_table,"a"
-       PPC_LONG        1b,99b
-       .text
-
-/* r3 = str, r4 = len (> 0), r5 = top (highest addr) */
-_GLOBAL(__strnlen_user)
-       addi    r7,r3,-1
-       subf    r6,r7,r5        /* top+1 - str */
-       cmplw   0,r4,r6
-       bge     0f
-       mr      r6,r4
-0:     mtctr   r6              /* ctr = min(len, top - str) */
-1:     lbzu    r0,1(r7)        /* get next byte */
-       cmpwi   0,r0,0
-       bdnzf   2,1b            /* loop if --ctr != 0 && byte != 0 */
-       addi    r7,r7,1
-       subf    r3,r3,r7        /* number of bytes we have looked at */
-       beqlr                   /* return if we found a 0 byte */
-       cmpw    0,r3,r4         /* did we look at all len bytes? */
-       blt     99f             /* if not, must have hit top */
-       addi    r3,r4,1         /* return len + 1 to indicate no null found */
-       blr
-99:    li      r3,0            /* bad address, return 0 */
-       blr
-
-       .section __ex_table,"a"
-       PPC_LONG        1b,99b
index 5b63bd3da4a968fab738434d0a83a7db2f722dc8..e779642c25e5e3192a39a4b167d505a8cfb63254 100644 (file)
@@ -333,9 +333,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
                                            unsigned long action, void *hcpu)
 {
        unsigned int cpu = (unsigned int)(long)hcpu;
-#ifdef CONFIG_HOTPLUG_CPU
-       struct task_struct *p;
-#endif
+
        /* We don't touch CPU 0 map, it's allocated at aboot and kept
         * around forever
         */
@@ -358,12 +356,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
                stale_map[cpu] = NULL;
 
                /* We also clear the cpu_vm_mask bits of CPUs going away */
-               read_lock(&tasklist_lock);
-               for_each_process(p) {
-                       if (p->mm)
-                               cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
-               }
-               read_unlock(&tasklist_lock);
+               clear_tasks_mm_cpumask(cpu);
        break;
 #endif /* CONFIG_HOTPLUG_CPU */
        }
index 1d75c92ea8fbf94c7773817de7d511e8d240069c..66519d263da7fa250499abf9f4d7c288fe8e6432 100644 (file)
@@ -151,7 +151,7 @@ static void
 spufs_evict_inode(struct inode *inode)
 {
        struct spufs_inode_info *ei = SPUFS_I(inode);
-       end_writeback(inode);
+       clear_inode(inode);
        if (ei->i_ctx)
                put_spu_context(ei->i_ctx);
        if (ei->i_gang)
index b403c533432c94438260df3b524434efdb6db4ce..a39b4690c171621e78e2183c6b1b97bd25f4afaf 100644 (file)
@@ -87,6 +87,7 @@ config S390
        select ARCH_SAVE_PAGE_KEYS if HIBERNATION
        select HAVE_MEMBLOCK
        select HAVE_MEMBLOCK_NODE_MAP
+       select HAVE_CMPXCHG_LOCAL
        select ARCH_DISCARD_MEMBLOCK
        select ARCH_INLINE_SPIN_TRYLOCK
        select ARCH_INLINE_SPIN_TRYLOCK_BH
index 6a2cb560e968ec1cd430200b0fb4cb9e2d3e87b7..73dae8b9b77a8c9f58c3c9e1f81f206b74dac04c 100644 (file)
@@ -115,7 +115,7 @@ static struct inode *hypfs_make_inode(struct super_block *sb, umode_t mode)
 
 static void hypfs_evict_inode(struct inode *inode)
 {
-       end_writeback(inode);
+       clear_inode(inode);
        kfree(inode->i_private);
 }
 
index e5beb490959bcea55fce5e2051061db7d0885bea..a6ff5a83e227279fe6e49dd9b6420eeca3400606 100644 (file)
@@ -13,8 +13,6 @@
  *
  */
 
-#ifdef __KERNEL__
-
 #ifndef _LINUX_BITOPS_H
 #error only <linux/bitops.h> can be included directly
 #endif
@@ -63,7 +61,7 @@ extern const char _ni_bitmap[];
 extern const char _zb_findmap[];
 extern const char _sb_findmap[];
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 
 #define __BITOPS_ALIGN         3
 #define __BITOPS_WORDSIZE      32
@@ -83,7 +81,7 @@ extern const char _sb_findmap[];
                : "d" (__val), "Q" (*(unsigned long *) __addr)  \
                : "cc");
 
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 
 #define __BITOPS_ALIGN         7
 #define __BITOPS_WORDSIZE      64
@@ -103,7 +101,7 @@ extern const char _sb_findmap[];
                : "d" (__val), "Q" (*(unsigned long *) __addr)  \
                : "cc");
 
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 #define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
 #define __BITOPS_BARRIER() asm volatile("" : : : "memory")
@@ -412,7 +410,7 @@ static inline unsigned long __ffz_word_loop(const unsigned long *addr,
        unsigned long bytes = 0;
 
        asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
                "       ahi     %1,-1\n"
                "       sra     %1,5\n"
                "       jz      1f\n"
@@ -449,7 +447,7 @@ static inline unsigned long __ffs_word_loop(const unsigned long *addr,
        unsigned long bytes = 0;
 
        asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
                "       ahi     %1,-1\n"
                "       sra     %1,5\n"
                "       jz      1f\n"
@@ -481,7 +479,7 @@ static inline unsigned long __ffs_word_loop(const unsigned long *addr,
  */
 static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
        if ((word & 0xffffffff) == 0xffffffff) {
                word >>= 32;
                nr += 32;
@@ -505,7 +503,7 @@ static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
  */
 static inline unsigned long __ffs_word(unsigned long nr, unsigned long word)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
        if ((word & 0xffffffff) == 0) {
                word >>= 32;
                nr += 32;
@@ -546,7 +544,7 @@ static inline unsigned long __load_ulong_le(const unsigned long *p,
        unsigned long word;
 
        p = (unsigned long *)((unsigned long) p + offset);
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
        asm volatile(
                "       ic      %0,%O1(%R1)\n"
                "       icm     %0,2,%O1+1(%R1)\n"
@@ -834,7 +832,4 @@ static inline int find_next_bit_le(void *vaddr, unsigned long size,
 
 #include <asm-generic/bitops/ext2-atomic-setbit.h>
 
-
-#endif /* __KERNEL__ */
-
 #endif /* _S390_BITOPS_H */
index fc50a3342da3ba726f35db37df75110897c50478..4c8d4d5b8bd2ca545a1e35dcacaa4f45b115c083 100644 (file)
@@ -10,8 +10,6 @@
 #include <linux/spinlock.h>
 #include <asm/types.h>
 
-#ifdef __KERNEL__
-
 #define LPM_ANYPATH 0xff
 #define __MAX_CSSID 0
 
@@ -291,5 +289,3 @@ int chsc_sstpc(void *page, unsigned int op, u16 ctrl);
 int chsc_sstpi(void *page, void *result, size_t size);
 
 #endif
-
-#endif
index 81d7908416cf769202a40541d6a1779560712df5..8d798e962b632c9a8aa426576077a48d1f6f0a38 100644 (file)
@@ -29,7 +29,7 @@ static inline unsigned long __xchg(unsigned long x, void *ptr, int size)
                        "       cs      %0,0,%4\n"
                        "       jl      0b\n"
                        : "=&d" (old), "=Q" (*(int *) addr)
-                       : "d" (x << shift), "d" (~(255 << shift)),
+                       : "d" ((x & 0xff) << shift), "d" (~(0xff << shift)),
                          "Q" (*(int *) addr) : "memory", "cc", "0");
                return old >> shift;
        case 2:
@@ -44,7 +44,7 @@ static inline unsigned long __xchg(unsigned long x, void *ptr, int size)
                        "       cs      %0,0,%4\n"
                        "       jl      0b\n"
                        : "=&d" (old), "=Q" (*(int *) addr)
-                       : "d" (x << shift), "d" (~(65535 << shift)),
+                       : "d" ((x & 0xffff) << shift), "d" (~(0xffff << shift)),
                          "Q" (*(int *) addr) : "memory", "cc", "0");
                return old >> shift;
        case 4:
@@ -113,9 +113,10 @@ static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
                        "       nr      %1,%5\n"
                        "       jnz     0b\n"
                        "1:"
-                       : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
-                       : "d" (old << shift), "d" (new << shift),
-                         "d" (~(255 << shift)), "Q" (*(int *) ptr)
+                       : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
+                       : "d" ((old & 0xff) << shift),
+                         "d" ((new & 0xff) << shift),
+                         "d" (~(0xff << shift))
                        : "memory", "cc");
                return prev >> shift;
        case 2:
@@ -134,9 +135,10 @@ static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
                        "       nr      %1,%5\n"
                        "       jnz     0b\n"
                        "1:"
-                       : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
-                       : "d" (old << shift), "d" (new << shift),
-                         "d" (~(65535 << shift)), "Q" (*(int *) ptr)
+                       : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
+                       : "d" ((old & 0xffff) << shift),
+                         "d" ((new & 0xffff) << shift),
+                         "d" (~(0xffff << shift))
                        : "memory", "cc");
                return prev >> shift;
        case 4:
@@ -160,9 +162,14 @@ static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
        return old;
 }
 
-#define cmpxchg(ptr, o, n)                                             \
-       ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o),       \
-                                      (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg(ptr, o, n)                                              \
+({                                                                      \
+       __typeof__(*(ptr)) __ret;                                        \
+       __ret = (__typeof__(*(ptr)))                                     \
+               __cmpxchg((ptr), (unsigned long)(o), (unsigned long)(n), \
+                         sizeof(*(ptr)));                               \
+       __ret;                                                           \
+})
 
 #ifdef CONFIG_64BIT
 #define cmpxchg64(ptr, o, n)                                           \
@@ -181,13 +188,19 @@ static inline unsigned long long __cmpxchg64(void *ptr,
                "       cds     %0,%2,%1"
                : "+&d" (rp_old), "=Q" (ptr)
                : "d" (rp_new), "Q" (ptr)
-               : "cc");
+               : "memory", "cc");
        return rp_old.pair;
 }
-#define cmpxchg64(ptr, o, n)                                           \
-       ((__typeof__(*(ptr)))__cmpxchg64((ptr),                         \
-                                        (unsigned long long)(o),       \
-                                        (unsigned long long)(n)))
+
+#define cmpxchg64(ptr, o, n)                           \
+({                                                     \
+       __typeof__(*(ptr)) __ret;                       \
+       __ret = (__typeof__(*(ptr)))                    \
+               __cmpxchg64((ptr),                      \
+                           (unsigned long long)(o),    \
+                           (unsigned long long)(n));   \
+       __ret;                                          \
+})
 #endif /* CONFIG_64BIT */
 
 #include <asm-generic/cmpxchg-local.h>
@@ -216,8 +229,13 @@ static inline unsigned long __cmpxchg_local(void *ptr,
  * them available.
  */
 #define cmpxchg_local(ptr, o, n)                                       \
-       ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
-                       (unsigned long)(n), sizeof(*(ptr))))
+({                                                                     \
+       __typeof__(*(ptr)) __ret;                                       \
+       __ret = (__typeof__(*(ptr)))                                    \
+               __cmpxchg_local((ptr), (unsigned long)(o),              \
+                               (unsigned long)(n), sizeof(*(ptr)));    \
+       __ret;                                                          \
+})
 
 #define cmpxchg64_local(ptr, o, n)     cmpxchg64((ptr), (o), (n))
 
index 24ef186a1c4f6f0b7170834986aa01d320e45163..718374de9c7f3f75f658bbaedb54695760ed25eb 100644 (file)
@@ -21,15 +21,15 @@ typedef unsigned long long __nocast cputime64_t;
 
 static inline unsigned long __div(unsigned long long n, unsigned long base)
 {
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
        register_pair rp;
 
        rp.pair = n >> 1;
        asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1));
        return rp.subreg.odd;
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
        return n / base;
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 }
 
 #define cputime_one_jiffy              jiffies_to_cputime(1)
@@ -100,7 +100,7 @@ static inline void cputime_to_timespec(const cputime_t cputime,
                                       struct timespec *value)
 {
        unsigned long long __cputime = (__force unsigned long long) cputime;
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
        register_pair rp;
 
        rp.pair = __cputime >> 1;
@@ -128,7 +128,7 @@ static inline void cputime_to_timeval(const cputime_t cputime,
                                      struct timeval *value)
 {
        unsigned long long __cputime = (__force unsigned long long) cputime;
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
        register_pair rp;
 
        rp.pair = __cputime >> 1;
index ecde9417d669f20ef06d1eddab491aaab2ffb076..debfda33d1f86d88a8b3bce89e6b23962c643489 100644 (file)
@@ -7,7 +7,7 @@
 #ifndef __ASM_CTL_REG_H
 #define __ASM_CTL_REG_H
 
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
 
 #define __ctl_load(array, low, high) ({                                \
        typedef struct { char _[sizeof(array)]; } addrtype;     \
@@ -25,7 +25,7 @@
                : "i" (low), "i" (high));                       \
        })
 
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 
 #define __ctl_load(array, low, high) ({                                \
        typedef struct { char _[sizeof(array)]; } addrtype;     \
@@ -43,7 +43,7 @@
                : "i" (low), "i" (high));                       \
        })
 
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 #define __ctl_set_bit(cr, bit) ({      \
        unsigned long __dummy;          \
index 83cf36cde2da2cc9356bc12d6d9ad9689cc9d7e1..7a68084ec2f0aa0443ff671048082562ca63c346 100644 (file)
 #ifndef _S390_CURRENT_H
 #define _S390_CURRENT_H
 
-#ifdef __KERNEL__
 #include <asm/lowcore.h>
 
 struct task_struct;
 
 #define current ((struct task_struct *const)S390_lowcore.current_task)
 
-#endif
-
 #endif /* !(_S390_CURRENT_H) */
index c4ee39f7a4d6a12895f6f3a8bf8b862b83ea9e4d..06151e6a309889a16b46e62efbc26798d8b0e42d 100644 (file)
 /*
  * These are used to set parameters in the core dumps.
  */
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define ELF_CLASS      ELFCLASS32
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 #define ELF_CLASS      ELFCLASS64
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 #define ELF_DATA       ELFDATA2MSB
 #define ELF_ARCH       EM_S390
 
@@ -181,9 +181,9 @@ extern unsigned long elf_hwcap;
 extern char elf_platform[];
 #define ELF_PLATFORM (elf_platform)
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define SET_PERSONALITY(ex) set_personality(PER_LINUX)
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 #define SET_PERSONALITY(ex)                                    \
 do {                                                           \
        if (personality(current->personality) != PER_LINUX32)   \
@@ -194,7 +194,7 @@ do {                                                                \
        else                                                    \
                clear_thread_flag(TIF_31BIT);                   \
 } while (0)
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 #define STACK_RND_MASK 0x7ffUL
 
index 81cf36b691f1dfd42c2ed4f5a48f6bc42a0a7a0e..96bc83ea5c90e0a05f4b959167145b5ef1484e52 100644 (file)
@@ -1,8 +1,6 @@
 #ifndef _ASM_S390_FUTEX_H
 #define _ASM_S390_FUTEX_H
 
-#ifdef __KERNEL__
-
 #include <linux/futex.h>
 #include <linux/uaccess.h>
 #include <asm/errno.h>
@@ -48,5 +46,4 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
        return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval);
 }
 
-#endif /* __KERNEL__ */
 #endif /* _ASM_S390_FUTEX_H */
index aae276d00383cc90660f1e1771d0aee11ae36949..aef0dde340d1f54b866874a9f14061b8237ef4dc 100644 (file)
@@ -20,7 +20,7 @@
 #include <asm/cio.h>
 #include <asm/uaccess.h>
 
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
 #define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */
 #else
 #define IDA_SIZE_LOG 11 /* 11 for 2k , 12 for 4k */
@@ -33,7 +33,7 @@
 static inline int
 idal_is_needed(void *vaddr, unsigned int length)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
        return ((__pa(vaddr) + length - 1) >> 31) != 0;
 #else
        return 0;
@@ -78,7 +78,7 @@ static inline unsigned long *idal_create_words(unsigned long *idaws,
 static inline int
 set_normalized_cda(struct ccw1 * ccw, void *vaddr)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
        unsigned int nridaws;
        unsigned long *idal;
 
@@ -105,7 +105,7 @@ set_normalized_cda(struct ccw1 * ccw, void *vaddr)
 static inline void
 clear_normalized_cda(struct ccw1 * ccw)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
        if (ccw->flags & CCW_FLAG_IDA) {
                kfree((void *)(unsigned long) ccw->cda);
                ccw->flags &= ~CCW_FLAG_IDA;
@@ -182,7 +182,7 @@ idal_buffer_free(struct idal_buffer *ib)
 static inline int
 __idal_buffer_is_needed(struct idal_buffer *ib)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
        return ib->size > (4096ul << ib->page_order) ||
                idal_is_needed(ib->data[0], ib->size);
 #else
index 27216d317991af2bc7d212731855a5bb2b12e37e..f81a0975cbea0efb88db7b8a123072f125f80e10 100644 (file)
@@ -11,8 +11,6 @@
 #ifndef _S390_IO_H
 #define _S390_IO_H
 
-#ifdef __KERNEL__
-
 #include <asm/page.h>
 
 #define IO_SPACE_LIMIT 0xffffffff
@@ -46,6 +44,4 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
  */
 #define xlate_dev_kmem_ptr(p)  p
 
-#endif /* __KERNEL__ */
-
 #endif
index 5289cacd4861773928e5257b670a98525637460c..2b9d41899d21af3201c71916b27806c511f37267 100644 (file)
@@ -17,7 +17,8 @@ enum interruption_class {
        EXTINT_VRT,
        EXTINT_SCP,
        EXTINT_IUC,
-       EXTINT_CPM,
+       EXTINT_CMS,
+       EXTINT_CMC,
        IOINT_CIO,
        IOINT_QAI,
        IOINT_DAS,
index 3f30dac804ea7ee92808bb5a1f51e16482462951..f4f38826eebb3347ed5c5039db329e1d318b2ef1 100644 (file)
 #ifndef _S390_KEXEC_H
 #define _S390_KEXEC_H
 
-#ifdef __KERNEL__
-#include <asm/page.h>
-#endif
 #include <asm/processor.h>
+#include <asm/page.h>
 /*
  * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
  * I.e. Maximum page that is mapped directly into kernel memory,
index 94ec3ee07983f8e9b97c6d857b5e7702bbfa36ee..0a88622339ee363d0663d53390a2106e26e51038 100644 (file)
@@ -1,8 +1,6 @@
-#ifdef __KERNEL__
 #ifndef _ASM_KMAP_TYPES_H
 #define _ASM_KMAP_TYPES_H
 
 #include <asm-generic/kmap_types.h>
 
 #endif
-#endif /* __KERNEL__ */
index 96076676e22481b72b9cb20914a9857f0b12e325..bdcbe0f8dd7b6f6560931a35e12bfebe16394b01 100644 (file)
@@ -52,4 +52,9 @@ struct kvm_sync_regs {
        __u32 acrs[16]; /* access registers */
        __u64 crs[16];  /* control registers */
 };
+
+#define KVM_REG_S390_TODPR     (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
+#define KVM_REG_S390_EPOCHDIFF (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x2)
+#define KVM_REG_S390_CPU_TIMER  (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x3)
+#define KVM_REG_S390_CLOCK_COMP (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x4)
 #endif
index 7343872890a25a692eb7925f3a5904e640408880..dd17537b9a9d2362a749a0325d8ed6b86b348ed0 100644 (file)
@@ -148,6 +148,7 @@ struct kvm_vcpu_stat {
        u32 instruction_sigp_restart;
        u32 diagnose_10;
        u32 diagnose_44;
+       u32 diagnose_9c;
 };
 
 struct kvm_s390_io_info {
index 6964db226f83ee3d27e7bde23e5269bef95d3e61..a98832961035b6daf24d871d152faa2d228fb91e 100644 (file)
@@ -149,6 +149,11 @@ static inline unsigned int kvm_arch_para_features(void)
        return 0;
 }
 
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+       return false;
+}
+
 #endif
 
 #endif /* __S390_KVM_PARA_H */
index 5d09e405c54d504a8e3a243b54155d91eb8e18db..69bdf72e95ecfd7fd5134640a48b929952a74747 100644 (file)
@@ -49,7 +49,7 @@ static inline int init_new_context(struct task_struct *tsk,
 
 #define destroy_context(mm)             do { } while (0)
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define LCTL_OPCODE "lctl"
 #else
 #define LCTL_OPCODE "lctlg"
index 1cc1c5af705aadb431dbd2dfbced6f25b5b3fa67..f0b6b26b6e59de846b260deef64d3d0b07e0a188 100644 (file)
@@ -28,7 +28,7 @@ struct mod_arch_specific
        struct mod_arch_syminfo *syminfo;
 };
 
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
 #define ElfW(x) Elf64_ ## x
 #define ELFW(x) ELF64_ ## x
 #else
index d07518af09ea828e1e20973aca47434ebd09ba02..295f2c4f1c96ab2dbd333b6321475796fa8b820b 100644 (file)
@@ -13,7 +13,6 @@
 
 #define OS_INFO_VMCOREINFO     0
 #define OS_INFO_REIPL_BLOCK    1
-#define OS_INFO_INIT_FN                2
 
 struct os_info_entry {
        u64     addr;
@@ -28,8 +27,8 @@ struct os_info {
        u16     version_minor;
        u64     crashkernel_addr;
        u64     crashkernel_size;
-       struct os_info_entry entry[3];
-       u8      reserved[4004];
+       struct os_info_entry entry[2];
+       u8      reserved[4024];
 } __packed;
 
 void os_info_init(void);
index 0fbd1899c7b039fe6924704ab73871fb13f6b53d..6537e72e0853d01473fe57ab1d83257e7487b2af 100644 (file)
@@ -15,7 +15,7 @@
  * per cpu area, use weak definitions to force the compiler to
  * generate external references.
  */
-#if defined(CONFIG_SMP) && defined(__s390x__) && defined(MODULE)
+#if defined(CONFIG_SMP) && defined(CONFIG_64BIT) && defined(MODULE)
 #define ARCH_NEEDS_WEAK_PER_CPU
 #endif
 
index 78e3041919dedd11556ed359c40c1b2d875a197c..43078c1943948ca9b801dc60f684ef7025eed616 100644 (file)
@@ -48,7 +48,7 @@ static inline void crst_table_init(unsigned long *crst, unsigned long entry)
        clear_table(crst, entry, sizeof(unsigned long)*2048);
 }
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 
 static inline unsigned long pgd_entry_type(struct mm_struct *mm)
 {
@@ -64,7 +64,7 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
 #define pgd_populate(mm, pgd, pud)             BUG()
 #define pud_populate(mm, pud, pmd)             BUG()
 
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 
 static inline unsigned long pgd_entry_type(struct mm_struct *mm)
 {
@@ -106,7 +106,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
        pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
 }
 
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
index 011358c1b18e0d1145874eaf5929df48e89ed435..b3227415abdaca94d3f3dcc301cb7ce425cd6a9e 100644 (file)
@@ -74,15 +74,15 @@ static inline int is_zero_pfn(unsigned long pfn)
  * table can map
  * PGDIR_SHIFT determines what a third-level page table entry can map
  */
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 # define PMD_SHIFT     20
 # define PUD_SHIFT     20
 # define PGDIR_SHIFT   20
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 # define PMD_SHIFT     20
 # define PUD_SHIFT     31
 # define PGDIR_SHIFT   42
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 #define PMD_SIZE        (1UL << PMD_SHIFT)
 #define PMD_MASK        (~(PMD_SIZE-1))
@@ -98,13 +98,13 @@ static inline int is_zero_pfn(unsigned long pfn)
  * that leads to 1024 pte per pgd
  */
 #define PTRS_PER_PTE   256
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define PTRS_PER_PMD   1
 #define PTRS_PER_PUD   1
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 #define PTRS_PER_PMD   2048
 #define PTRS_PER_PUD   2048
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 #define PTRS_PER_PGD   2048
 
 #define FIRST_USER_ADDRESS  0
@@ -276,7 +276,7 @@ extern struct page *vmemmap;
  * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
  */
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 
 /* Bits in the segment table address-space-control-element */
 #define _ASCE_SPACE_SWITCH     0x80000000UL    /* space switch event       */
@@ -308,7 +308,7 @@ extern struct page *vmemmap;
 #define KVM_UR_BIT     0x00008000UL
 #define KVM_UC_BIT     0x00004000UL
 
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 
 /* Bits in the segment/region table address-space-control-element */
 #define _ASCE_ORIGIN           ~0xfffUL/* segment table origin             */
@@ -363,7 +363,7 @@ extern struct page *vmemmap;
 #define KVM_UR_BIT     0x0000800000000000UL
 #define KVM_UC_BIT     0x0000400000000000UL
 
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 /*
  * A user page table pointer has the space-switch-event bit, the
@@ -424,7 +424,7 @@ static inline int mm_has_pgste(struct mm_struct *mm)
 /*
  * pgd/pmd/pte query functions
  */
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 
 static inline int pgd_present(pgd_t pgd) { return 1; }
 static inline int pgd_none(pgd_t pgd)    { return 0; }
@@ -434,7 +434,7 @@ static inline int pud_present(pud_t pud) { return 1; }
 static inline int pud_none(pud_t pud)   { return 0; }
 static inline int pud_bad(pud_t pud)    { return 0; }
 
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 
 static inline int pgd_present(pgd_t pgd)
 {
@@ -490,7 +490,7 @@ static inline int pud_bad(pud_t pud)
        return (pud_val(pud) & mask) != 0;
 }
 
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 static inline int pmd_present(pmd_t pmd)
 {
@@ -741,7 +741,7 @@ static inline int pte_young(pte_t pte)
 
 static inline void pgd_clear(pgd_t *pgd)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
        if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
                pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
 #endif
@@ -749,7 +749,7 @@ static inline void pgd_clear(pgd_t *pgd)
 
 static inline void pud_clear(pud_t *pud)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
        if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
                pud_val(*pud) = _REGION3_ENTRY_EMPTY;
 #endif
@@ -921,7 +921,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
 static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
 {
        if (!(pte_val(*ptep) & _PAGE_INVALID)) {
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
                /* pto must point to the start of the segment table */
                pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
 #else
@@ -1116,7 +1116,7 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 
 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
 #define pud_deref(pmd) ({ BUG(); 0UL; })
@@ -1125,7 +1125,7 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
 #define pud_offset(pgd, address) ((pud_t *) pgd)
 #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
 
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 
 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
@@ -1147,7 +1147,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
        return pmd + pmd_index(address);
 }
 
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
@@ -1196,7 +1196,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
  *  0000000000111111111122222222223333333333444444444455 5555 5 55566 66
  *  0123456789012345678901234567890123456789012345678901 2345 6 78901 23
  */
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define __SWP_OFFSET_MASK (~0UL >> 12)
 #else
 #define __SWP_OFFSET_MASK (~0UL >> 11)
@@ -1217,11 +1217,11 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
 #define __pte_to_swp_entry(pte)        ((swp_entry_t) { pte_val(pte) })
 #define __swp_entry_to_pte(x)  ((pte_t) { (x).val })
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 # define PTE_FILE_MAX_BITS     26
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 # define PTE_FILE_MAX_BITS     59
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 #define pte_to_pgoff(__pte) \
        ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
index edf8527ff08d9bdf9e5f0468e24faebfdd372c34..7be104c0f19230e157d569efb9a18002dfa4709d 100644 (file)
@@ -24,7 +24,6 @@ typedef unsigned short        __kernel_old_dev_t;
 
 typedef unsigned long   __kernel_ino_t;
 typedef unsigned short  __kernel_mode_t;
-typedef unsigned short  __kernel_nlink_t;
 typedef unsigned short  __kernel_ipc_pid_t;
 typedef unsigned short  __kernel_uid_t;
 typedef unsigned short  __kernel_gid_t;
@@ -35,7 +34,6 @@ typedef int             __kernel_ptrdiff_t;
 
 typedef unsigned int    __kernel_ino_t;
 typedef unsigned int    __kernel_mode_t;
-typedef unsigned int    __kernel_nlink_t;
 typedef int             __kernel_ipc_pid_t;
 typedef unsigned int    __kernel_uid_t;
 typedef unsigned int    __kernel_gid_t;
@@ -47,7 +45,6 @@ typedef unsigned long   __kernel_sigset_t;      /* at least 32 bits */
 
 #define __kernel_ino_t  __kernel_ino_t
 #define __kernel_mode_t __kernel_mode_t
-#define __kernel_nlink_t __kernel_nlink_t
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 #define __kernel_uid_t __kernel_uid_t
 #define __kernel_gid_t __kernel_gid_t
index 6cbf31311673a37bc83de2be5add15bbc42ece5e..20d0585cf905675422ad406d406401eba2bd6f82 100644 (file)
@@ -20,7 +20,6 @@
 #include <asm/ptrace.h>
 #include <asm/setup.h>
 
-#ifdef __KERNEL__
 /*
  * Default implementation of macro that returns current
  * instruction pointer ("program counter").
@@ -33,39 +32,33 @@ static inline void get_cpu_id(struct cpuid *ptr)
 }
 
 extern void s390_adjust_jiffies(void);
-extern int get_cpu_capability(unsigned int *);
 extern const struct seq_operations cpuinfo_op;
 extern int sysctl_ieee_emulation_warnings;
 
 /*
  * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
  */
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 
 #define TASK_SIZE              (1UL << 31)
 #define TASK_UNMAPPED_BASE     (1UL << 30)
 
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 
 #define TASK_SIZE_OF(tsk)      ((tsk)->mm->context.asce_limit)
 #define TASK_UNMAPPED_BASE     (test_thread_flag(TIF_31BIT) ? \
                                        (1UL << 30) : (1UL << 41))
 #define TASK_SIZE              TASK_SIZE_OF(current)
 
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
-#ifdef __KERNEL__
-
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define STACK_TOP              (1UL << 31)
 #define STACK_TOP_MAX          (1UL << 31)
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 #define STACK_TOP              (1UL << (test_thread_flag(TIF_31BIT) ? 31:42))
 #define STACK_TOP_MAX          (1UL << 42)
-#endif /* __s390x__ */
-
-
-#endif
+#endif /* CONFIG_64BIT */
 
 #define HAVE_ARCH_PICK_MMAP_LAYOUT
 
@@ -182,7 +175,7 @@ static inline void psw_set_key(unsigned int key)
  */
 static inline void __load_psw(psw_t psw)
 {
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
        asm volatile("lpsw  %0" : : "Q" (psw) : "cc");
 #else
        asm volatile("lpswe %0" : : "Q" (psw) : "cc");
@@ -200,7 +193,7 @@ static inline void __load_psw_mask (unsigned long mask)
 
        psw.mask = mask;
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
        asm volatile(
                "       basr    %0,0\n"
                "0:     ahi     %0,1f-0b\n"
@@ -208,14 +201,14 @@ static inline void __load_psw_mask (unsigned long mask)
                "       lpsw    %1\n"
                "1:"
                : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
        asm volatile(
                "       larl    %0,1f\n"
                "       stg     %0,%O1+8(%R1)\n"
                "       lpswe   %1\n"
                "1:"
                : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 }
 
 /*
@@ -223,7 +216,7 @@ static inline void __load_psw_mask (unsigned long mask)
  */
 static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc)
 {
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
        if (psw.addr & PSW_ADDR_AMODE)
                /* 31 bit mode */
                return (psw.addr - ilc) | PSW_ADDR_AMODE;
@@ -253,7 +246,7 @@ static inline void __noreturn disabled_wait(unsigned long code)
          * Store status and then load disabled wait psw,
          * the processor is dead afterwards
          */
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
        asm volatile(
                "       stctl   0,0,0(%2)\n"
                "       ni      0(%2),0xef\n"   /* switch off protection */
@@ -272,7 +265,7 @@ static inline void __noreturn disabled_wait(unsigned long code)
                "       lpsw    0(%1)"
                : "=m" (ctl_buf)
                : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc");
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
        asm volatile(
                "       stctg   0,0,0(%2)\n"
                "       ni      4(%2),0xef\n"   /* switch off protection */
@@ -305,7 +298,7 @@ static inline void __noreturn disabled_wait(unsigned long code)
                "       lpswe   0(%1)"
                : "=m" (ctl_buf)
                : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1");
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
        while (1);
 }
 
@@ -338,12 +331,10 @@ extern void (*s390_base_ext_handler_fn)(void);
 
 #define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL
 
-#endif
-
 /*
  * Helper macro for exception table entries
  */
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define EX_TABLE(_fault,_target)                       \
        ".section __ex_table,\"a\"\n"                   \
        "       .align 4\n"                             \
index d0eb4653cebdb0d7bf0eab014cfb142904ad89b7..1ceee10264c3832bce52f2cae57070d549afc35c 100644 (file)
 #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
 #endif
 
-#ifdef __KERNEL__
-
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define RWSEM_UNLOCKED_VALUE   0x00000000
 #define RWSEM_ACTIVE_BIAS      0x00000001
 #define RWSEM_ACTIVE_MASK      0x0000ffff
 #define RWSEM_WAITING_BIAS     (-0x00010000)
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 #define RWSEM_UNLOCKED_VALUE   0x0000000000000000L
 #define RWSEM_ACTIVE_BIAS      0x0000000000000001L
 #define RWSEM_ACTIVE_MASK      0x00000000ffffffffL
 #define RWSEM_WAITING_BIAS     (-0x0000000100000000L)
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
 #define RWSEM_ACTIVE_WRITE_BIAS        (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
 
@@ -65,19 +63,19 @@ static inline void __down_read(struct rw_semaphore *sem)
        signed long old, new;
 
        asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
                "       l       %0,%2\n"
                "0:     lr      %1,%0\n"
                "       ahi     %1,%4\n"
                "       cs      %0,%1,%2\n"
                "       jl      0b"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
                "       lg      %0,%2\n"
                "0:     lgr     %1,%0\n"
                "       aghi    %1,%4\n"
                "       csg     %0,%1,%2\n"
                "       jl      0b"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
                : "=&d" (old), "=&d" (new), "=Q" (sem->count)
                : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
                : "cc", "memory");
@@ -93,7 +91,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
        signed long old, new;
 
        asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
                "       l       %0,%2\n"
                "0:     ltr     %1,%0\n"
                "       jm      1f\n"
@@ -101,7 +99,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
                "       cs      %0,%1,%2\n"
                "       jl      0b\n"
                "1:"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
                "       lg      %0,%2\n"
                "0:     ltgr    %1,%0\n"
                "       jm      1f\n"
@@ -109,7 +107,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
                "       csg     %0,%1,%2\n"
                "       jl      0b\n"
                "1:"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
                : "=&d" (old), "=&d" (new), "=Q" (sem->count)
                : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
                : "cc", "memory");
@@ -125,19 +123,19 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
 
        tmp = RWSEM_ACTIVE_WRITE_BIAS;
        asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
                "       l       %0,%2\n"
                "0:     lr      %1,%0\n"
                "       a       %1,%4\n"
                "       cs      %0,%1,%2\n"
                "       jl      0b"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
                "       lg      %0,%2\n"
                "0:     lgr     %1,%0\n"
                "       ag      %1,%4\n"
                "       csg     %0,%1,%2\n"
                "       jl      0b"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
                : "=&d" (old), "=&d" (new), "=Q" (sem->count)
                : "Q" (sem->count), "m" (tmp)
                : "cc", "memory");
@@ -158,19 +156,19 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
        signed long old;
 
        asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
                "       l       %0,%1\n"
                "0:     ltr     %0,%0\n"
                "       jnz     1f\n"
                "       cs      %0,%3,%1\n"
                "       jl      0b\n"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
                "       lg      %0,%1\n"
                "0:     ltgr    %0,%0\n"
                "       jnz     1f\n"
                "       csg     %0,%3,%1\n"
                "       jl      0b\n"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
                "1:"
                : "=&d" (old), "=Q" (sem->count)
                : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
@@ -186,19 +184,19 @@ static inline void __up_read(struct rw_semaphore *sem)
        signed long old, new;
 
        asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
                "       l       %0,%2\n"
                "0:     lr      %1,%0\n"
                "       ahi     %1,%4\n"
                "       cs      %0,%1,%2\n"
                "       jl      0b"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
                "       lg      %0,%2\n"
                "0:     lgr     %1,%0\n"
                "       aghi    %1,%4\n"
                "       csg     %0,%1,%2\n"
                "       jl      0b"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
                : "=&d" (old), "=&d" (new), "=Q" (sem->count)
                : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
                : "cc", "memory");
@@ -216,19 +214,19 @@ static inline void __up_write(struct rw_semaphore *sem)
 
        tmp = -RWSEM_ACTIVE_WRITE_BIAS;
        asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
                "       l       %0,%2\n"
                "0:     lr      %1,%0\n"
                "       a       %1,%4\n"
                "       cs      %0,%1,%2\n"
                "       jl      0b"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
                "       lg      %0,%2\n"
                "0:     lgr     %1,%0\n"
                "       ag      %1,%4\n"
                "       csg     %0,%1,%2\n"
                "       jl      0b"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
                : "=&d" (old), "=&d" (new), "=Q" (sem->count)
                : "Q" (sem->count), "m" (tmp)
                : "cc", "memory");
@@ -246,19 +244,19 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
 
        tmp = -RWSEM_WAITING_BIAS;
        asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
                "       l       %0,%2\n"
                "0:     lr      %1,%0\n"
                "       a       %1,%4\n"
                "       cs      %0,%1,%2\n"
                "       jl      0b"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
                "       lg      %0,%2\n"
                "0:     lgr     %1,%0\n"
                "       ag      %1,%4\n"
                "       csg     %0,%1,%2\n"
                "       jl      0b"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
                : "=&d" (old), "=&d" (new), "=Q" (sem->count)
                : "Q" (sem->count), "m" (tmp)
                : "cc", "memory");
@@ -274,19 +272,19 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
        signed long old, new;
 
        asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
                "       l       %0,%2\n"
                "0:     lr      %1,%0\n"
                "       ar      %1,%4\n"
                "       cs      %0,%1,%2\n"
                "       jl      0b"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
                "       lg      %0,%2\n"
                "0:     lgr     %1,%0\n"
                "       agr     %1,%4\n"
                "       csg     %0,%1,%2\n"
                "       jl      0b"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
                : "=&d" (old), "=&d" (new), "=Q" (sem->count)
                : "Q" (sem->count), "d" (delta)
                : "cc", "memory");
@@ -300,24 +298,23 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
        signed long old, new;
 
        asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
                "       l       %0,%2\n"
                "0:     lr      %1,%0\n"
                "       ar      %1,%4\n"
                "       cs      %0,%1,%2\n"
                "       jl      0b"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
                "       lg      %0,%2\n"
                "0:     lgr     %1,%0\n"
                "       agr     %1,%4\n"
                "       csg     %0,%1,%2\n"
                "       jl      0b"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
                : "=&d" (old), "=&d" (new), "=Q" (sem->count)
                : "Q" (sem->count), "d" (delta)
                : "cc", "memory");
        return new;
 }
 
-#endif /* __KERNEL__ */
 #endif /* _S390_RWSEM_H */
index fed7bee650a06868072ef323723d7e6abe2336d7..bf238c55740bc56e7f60148eafc9f89ee99d09e5 100644 (file)
@@ -48,6 +48,7 @@ int sclp_cpu_deconfigure(u8 cpu);
 void sclp_facilities_detect(void);
 unsigned long long sclp_get_rnmax(void);
 unsigned long long sclp_get_rzm(void);
+u8 sclp_get_fac85(void);
 int sclp_sdias_blk_count(void);
 int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
 int sclp_chp_configure(struct chp_id chpid);
index 7244e1f6412669f4f0be00e6ec9ab9e31005ed9f..40eb2ff88e9e59766cc7e1ba931e65a303e06a16 100644 (file)
 #include <asm/lowcore.h>
 #include <asm/types.h>
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define IPL_DEVICE        (*(unsigned long *)  (0x10404))
 #define INITRD_START      (*(unsigned long *)  (0x1040C))
 #define INITRD_SIZE       (*(unsigned long *)  (0x10414))
 #define OLDMEM_BASE      (*(unsigned long *)  (0x1041C))
 #define OLDMEM_SIZE      (*(unsigned long *)  (0x10424))
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 #define IPL_DEVICE        (*(unsigned long *)  (0x10400))
 #define INITRD_START      (*(unsigned long *)  (0x10408))
 #define INITRD_SIZE       (*(unsigned long *)  (0x10410))
 #define OLDMEM_BASE      (*(unsigned long *)  (0x10418))
 #define OLDMEM_SIZE      (*(unsigned long *)  (0x10420))
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 #define COMMAND_LINE      ((char *)            (0x10480))
 
 #define CHUNK_READ_WRITE 0
@@ -89,7 +89,7 @@ extern unsigned int user_mode;
 
 #define MACHINE_HAS_DIAG9C     (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C)
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define MACHINE_HAS_IEEE       (S390_lowcore.machine_flags & MACHINE_FLAG_IEEE)
 #define MACHINE_HAS_CSP                (S390_lowcore.machine_flags & MACHINE_FLAG_CSP)
 #define MACHINE_HAS_IDTE       (0)
@@ -100,7 +100,7 @@ extern unsigned int user_mode;
 #define MACHINE_HAS_PFMF       (0)
 #define MACHINE_HAS_SPP                (0)
 #define MACHINE_HAS_TOPOLOGY   (0)
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 #define MACHINE_HAS_IEEE       (1)
 #define MACHINE_HAS_CSP                (1)
 #define MACHINE_HAS_IDTE       (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE)
@@ -111,7 +111,7 @@ extern unsigned int user_mode;
 #define MACHINE_HAS_PFMF       (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF)
 #define MACHINE_HAS_SPP                (S390_lowcore.machine_flags & MACHINE_FLAG_SPP)
 #define MACHINE_HAS_TOPOLOGY   (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 #define ZFCPDUMP_HSA_SIZE      (32UL<<20)
 #define ZFCPDUMP_HSA_SIZE_MAX  (64UL<<20)
@@ -153,19 +153,19 @@ extern void (*_machine_power_off)(void);
 
 #else /* __ASSEMBLY__ */
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define IPL_DEVICE        0x10404
 #define INITRD_START      0x1040C
 #define INITRD_SIZE       0x10414
 #define OLDMEM_BASE      0x1041C
 #define OLDMEM_SIZE      0x10424
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 #define IPL_DEVICE        0x10400
 #define INITRD_START      0x10408
 #define INITRD_SIZE       0x10410
 #define OLDMEM_BASE      0x10418
 #define OLDMEM_SIZE      0x10420
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 #define COMMAND_LINE      0x10480
 
 #endif /* __ASSEMBLY__ */
index ca3f8814e3614050d5714843fea3a97c90db8bca..5959bfb3b693ce79def5ccfe2bb548b4dad6378b 100644 (file)
@@ -51,7 +51,7 @@
        wl = __wl;                                      \
 })
 
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
 #define udiv_qrnnd(q, r, n1, n0, d)                    \
   do { unsigned long __n;                              \
        unsigned int __r, __d;                          \
index cd0241db5a4688b754d6838497cfc06768b4025e..8cc160c9e1cb108c2ce9829dc4c5684a34a3b4ae 100644 (file)
@@ -9,8 +9,6 @@
 #ifndef _S390_STRING_H_
 #define _S390_STRING_H_
 
-#ifdef __KERNEL__
-
 #ifndef _LINUX_TYPES_H
 #include <linux/types.h>
 #endif
@@ -152,6 +150,4 @@ size_t strlen(const char *s);
 size_t strnlen(const char * s, size_t n);
 #endif /* !IN_ARCH_STRING_C */
 
-#endif /* __KERNEL__ */
-
 #endif /* __S390_STRING_H_ */
index 003b04edcff6636f1e5c23a04054dbd7f4de749d..4e40b25cd0600e7d76fcdf38789f86fd32275839 100644 (file)
@@ -9,15 +9,13 @@
 #ifndef _ASM_THREAD_INFO_H
 #define _ASM_THREAD_INFO_H
 
-#ifdef __KERNEL__
-
 /*
  * Size of kernel stack for each process
  */
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define THREAD_ORDER 1
 #define ASYNC_ORDER  1
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
 #ifndef __SMALL_STACK
 #define THREAD_ORDER 2
 #define ASYNC_ORDER  2
@@ -25,7 +23,7 @@
 #define THREAD_ORDER 1
 #define ASYNC_ORDER  1
 #endif
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
 #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
 #define ASYNC_SIZE  (PAGE_SIZE << ASYNC_ORDER)
@@ -123,8 +121,6 @@ static inline struct thread_info *current_thread_info(void)
 #define is_32bit_task()                (1)
 #endif
 
-#endif /* __KERNEL__ */
-
 #define PREEMPT_ACTIVE         0x4000000
 
 #endif /* _ASM_THREAD_INFO_H */
index e63069ba39e3b274d46d691f62ae74e5d14d4249..15d647901e5cafc14a355325a7ebe25d53da88da 100644 (file)
@@ -10,8 +10,6 @@
 #ifndef _ASM_S390_TIMER_H
 #define _ASM_S390_TIMER_H
 
-#ifdef __KERNEL__
-
 #include <linux/timer.h>
 
 #define VTIMER_MAX_SLICE (0x7ffffffffffff000LL)
@@ -50,6 +48,4 @@ extern void vtime_init(void);
 extern void vtime_stop_cpu(void);
 extern void vtime_start_leave(void);
 
-#endif /* __KERNEL__ */
-
 #endif /* _ASM_S390_TIMER_H */
index 775a5eea8f9eb9896e9d38e809dc74d51823d99f..06e5acbc84bd50ef4917eabb5f6fc8cd2d6f6b22 100644 (file)
@@ -106,7 +106,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
 static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
                                unsigned long address)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
        if (tlb->mm->context.asce_limit <= (1UL << 31))
                return;
        if (!tlb->fullmm)
@@ -125,7 +125,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
 static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
                                unsigned long address)
 {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
        if (tlb->mm->context.asce_limit <= (1UL << 42))
                return;
        if (!tlb->fullmm)
index 1d8648cf2fea81eb7a6fdbe5129fd22478b074c2..9fde315f3a7cd42184a54f2174584258076e6a60 100644 (file)
@@ -27,12 +27,12 @@ static inline void __tlb_flush_global(void)
        register unsigned long reg4 asm("4");
        long dummy;
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
        if (!MACHINE_HAS_CSP) {
                smp_ptlb_all();
                return;
        }
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
 
        dummy = 0;
        reg2 = reg3 = 0;
index 05ebbcdbbf6ba7d34791545f3f37355fcb10ef00..6c8c35f8df142b3b8e22dd21d3230a2403dcab38 100644 (file)
@@ -28,7 +28,7 @@ typedef __signed__ long saddr_t;
 
 #ifndef __ASSEMBLY__
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 typedef union {
        unsigned long long pair;
        struct {
@@ -37,7 +37,7 @@ typedef union {
        } subreg;
 } register_pair;
 
-#endif /* ! __s390x__   */
+#endif /* ! CONFIG_64BIT   */
 #endif /* __ASSEMBLY__  */
 #endif /* __KERNEL__    */
 #endif /* _S390_TYPES_H */
index 8f2cada4f7c916d9d88fb87cbb390cf99e4b793e..1f3a79bcd262722e251d575009cec172a9c77509 100644 (file)
 
 #define segment_eq(a,b) ((a).ar4 == (b).ar4)
 
-#define __access_ok(addr, size)        \
-({                             \
-       __chk_user_ptr(addr);   \
-       1;                      \
+static inline int __range_ok(unsigned long addr, unsigned long size)
+{
+       return 1;
+}
+
+#define __access_ok(addr, size)                                \
+({                                                     \
+       __chk_user_ptr(addr);                           \
+       __range_ok((unsigned long)(addr), (size));      \
 })
 
 #define access_ok(type, addr, size) __access_ok(addr, size)
@@ -377,7 +382,7 @@ clear_user(void __user *to, unsigned long n)
 }
 
 extern int memcpy_real(void *, void *, size_t);
-extern void copy_to_absolute_zero(void *dest, void *src, size_t count);
+extern void memcpy_absolute(void *, void *, size_t);
 extern int copy_to_user_real(void __user *dest, void *src, size_t count);
 extern int copy_from_user_real(void *dest, void __user *src, size_t count);
 
index c4a11cfad3c8a55aa1178b5769f82d973d963c66..a73eb2e1e918351356005b99940629235ef6ee98 100644 (file)
@@ -1,8 +1,6 @@
 #ifndef __S390_VDSO_H__
 #define __S390_VDSO_H__
 
-#ifdef __KERNEL__
-
 /* Default link addresses for the vDSOs */
 #define VDSO32_LBASE   0
 #define VDSO64_LBASE   0
@@ -45,7 +43,4 @@ void vdso_free_per_cpu(struct _lowcore *lowcore);
 #endif
 
 #endif /* __ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
-
 #endif /* __S390_VDSO_H__ */
index 3aa4d00aaf50ec0af3d4581facf40c1cffeef171..c880ff72db44a0247f57c48450a230f682843ec1 100644 (file)
@@ -88,6 +88,9 @@ ENTRY(diag308_reset)
        stctg   %c0,%c15,0(%r4)
        larl    %r4,.Lfpctl             # Floating point control register
        stfpc   0(%r4)
+       larl    %r4,.Lcontinue_psw      # Save PSW flags
+       epsw    %r2,%r3
+       stm     %r2,%r3,0(%r4)
        larl    %r4,.Lrestart_psw       # Setup restart PSW at absolute 0
        lghi    %r3,0
        lg      %r4,0(%r4)              # Save PSW
@@ -103,11 +106,20 @@ ENTRY(diag308_reset)
        lctlg   %c0,%c15,0(%r4)
        larl    %r4,.Lfpctl             # Restore floating point ctl register
        lfpc    0(%r4)
+       larl    %r4,.Lcontinue_psw      # Restore PSW flags
+       lpswe   0(%r4)
+.Lcontinue:
        br      %r14
 .align 16
 .Lrestart_psw:
        .long   0x00080000,0x80000000 + .Lrestart_part2
 
+       .section .data..nosave,"aw",@progbits
+.align 8
+.Lcontinue_psw:
+       .quad   0,.Lcontinue
+       .previous
+
        .section .bss
 .align 8
 .Lctlregs:
index 377c096ca4a72c658327b543127692c1e1a91f0c..3c0c19830c37f5a0f7896d0a292762b794b30a74 100644 (file)
@@ -32,8 +32,6 @@
 #include "compat_ptrace.h"
 #include "entry.h"
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 typedef struct 
 {
        __u8 callee_used_stack[__SIGNAL_FRAMESIZE32];
@@ -364,7 +362,6 @@ asmlinkage long sys32_sigreturn(void)
                goto badframe;
        if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
                goto badframe;
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
        if (restore_sigregs32(regs, &frame->sregs))
                goto badframe;
@@ -390,7 +387,6 @@ asmlinkage long sys32_rt_sigreturn(void)
                goto badframe;
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
        if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
                goto badframe;
@@ -572,7 +568,7 @@ give_sigsegv:
  * OK, we're invoking a handler
  */    
 
-int handle_signal32(unsigned long sig, struct k_sigaction *ka,
+void handle_signal32(unsigned long sig, struct k_sigaction *ka,
                    siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
 {
        int ret;
@@ -583,8 +579,8 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
        else
                ret = setup_frame32(sig, ka, oldset, regs);
        if (ret)
-               return ret;
-       block_sigmask(ka, sig);
-       return 0;
+               return;
+       signal_delivered(sig, info, ka, regs,
+                                test_thread_flag(TIF_SINGLE_STEP));
 }
 
index d84181f1f5e83f4dc82b6099a495bc763e317375..6684fff1755834f14837868248a31a66644f4575 100644 (file)
@@ -237,7 +237,7 @@ static noinline __init void detect_machine_type(void)
                S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
 }
 
-static __init void early_pgm_check_handler(void)
+static void early_pgm_check_handler(void)
 {
        unsigned long addr;
        const struct exception_table_entry *fixup;
index 6cdddac93a2e48ed47f58d74648c5d858c7d8557..f66a229ab0b3fdf52308cb55c0ab627ef9564a7b 100644 (file)
@@ -31,7 +31,7 @@ void do_per_trap(struct pt_regs *regs);
 void syscall_trace(struct pt_regs *regs, int entryexit);
 void kernel_stack_overflow(struct pt_regs * regs);
 void do_signal(struct pt_regs *regs);
-int handle_signal32(unsigned long sig, struct k_sigaction *ka,
+void handle_signal32(unsigned long sig, struct k_sigaction *ka,
                    siginfo_t *info, sigset_t *oldset, struct pt_regs *regs);
 void do_notify_resume(struct pt_regs *regs);
 
index e1ac3893e972883e2c17b2787fad0a8857efc8d5..796c976b5fdc1b49a6d82e5a9a7cf19906a9443c 100644 (file)
@@ -85,11 +85,6 @@ startup_kdump_relocated:
        basr    %r13,0
 0:
        mvc     0(8,%r0),.Lrestart_psw-0b(%r13) # Setup restart PSW
-       mvc     464(16,%r0),.Lpgm_psw-0b(%r13)  # Setup pgm check PSW
-       lhi     %r1,1                           # Start new kernel
-       diag    %r1,%r1,0x308                   # with diag 308
-
-.Lno_diag308:                                  # No diag 308
        sam31                                   # Switch to 31 bit addr mode
        sr      %r1,%r1                         # Erase register r1
        sr      %r2,%r2                         # Erase register r2
@@ -98,8 +93,6 @@ startup_kdump_relocated:
 .align 8
 .Lrestart_psw:
        .long   0x00080000,0x80000000 + startup
-.Lpgm_psw:
-       .quad   0x0000000180000000,0x0000000000000000 + .Lno_diag308
 #else
 .align 2
 .Lep_startup_kdump:
index 8342e65a140daf7bb3fc9f589fbcd1f8d417f322..2f6cfd460cb6ad5a04fd033ea7f515b49ba3e487 100644 (file)
@@ -1528,12 +1528,15 @@ static struct shutdown_action __refdata dump_action = {
 
 static void dump_reipl_run(struct shutdown_trigger *trigger)
 {
-       u32 csum;
-
-       csum = csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0);
-       copy_to_absolute_zero(&S390_lowcore.ipib_checksum, &csum, sizeof(csum));
-       copy_to_absolute_zero(&S390_lowcore.ipib, &reipl_block_actual,
-                             sizeof(reipl_block_actual));
+       struct {
+               void    *addr;
+               __u32   csum;
+       } __packed ipib;
+
+       ipib.csum = csum_partial(reipl_block_actual,
+                                reipl_block_actual->hdr.len, 0);
+       ipib.addr = reipl_block_actual;
+       memcpy_absolute(&S390_lowcore.ipib, &ipib, sizeof(ipib));
        dump_run(trigger);
 }
 
@@ -1750,6 +1753,7 @@ static struct kobj_attribute on_restart_attr =
 
 static void __do_restart(void *ignore)
 {
+       __arch_local_irq_stosm(0x04); /* enable DAT */
        smp_send_stop();
 #ifdef CONFIG_CRASH_DUMP
        crash_kexec(NULL);
index 8a22c27219dd0748f380a0aef0d63128b01ec21a..b4f4a7133fa10e3456b82f7f27fc8fbf70d942ed 100644 (file)
@@ -42,7 +42,8 @@ static const struct irq_class intrclass_names[] = {
        {.name = "VRT", .desc = "[EXT] Virtio" },
        {.name = "SCP", .desc = "[EXT] Service Call" },
        {.name = "IUC", .desc = "[EXT] IUCV" },
-       {.name = "CPM", .desc = "[EXT] CPU Measurement" },
+       {.name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling" },
+       {.name = "CMC", .desc = "[EXT] CPU-Measurement: Counter" },
        {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt" },
        {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" },
        {.name = "DAS", .desc = "[I/O] DASD" },
index bdad47d544783d89fa015ee6db34d52a2a555695..cdacf8f91b2d11b7cb3acd683f7a8db830c6fb05 100644 (file)
@@ -24,6 +24,7 @@
 #include <asm/ipl.h>
 #include <asm/diag.h>
 #include <asm/asm-offsets.h>
+#include <asm/os_info.h>
 
 typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
 
@@ -79,8 +80,8 @@ static void __do_machine_kdump(void *image)
 #ifdef CONFIG_CRASH_DUMP
        int (*start_kdump)(int) = (void *)((struct kimage *) image)->start;
 
-       __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
        setup_regs();
+       __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
        start_kdump(1);
 #endif
 }
@@ -114,8 +115,13 @@ static void crash_map_pages(int enable)
               size % KEXEC_CRASH_MEM_ALIGN);
        if (enable)
                vmem_add_mapping(crashk_res.start, size);
-       else
+       else {
                vmem_remove_mapping(crashk_res.start, size);
+               if (size)
+                       os_info_crashkernel_add(crashk_res.start, size);
+               else
+                       os_info_crashkernel_add(0, 0);
+       }
 }
 
 /*
@@ -208,6 +214,7 @@ static void __machine_kexec(void *data)
 {
        struct kimage *image = data;
 
+       __arch_local_irq_stosm(0x04); /* enable DAT */
        pfault_fini();
        tracing_off();
        debug_locks_off();
index e8d6c214d498a0aaf5037f7d64c5fd7e6306fbc9..95fa5ac6c4cedbf6d287ca25708b906cb1f35c4c 100644 (file)
@@ -60,7 +60,7 @@ void __init os_info_init(void)
        os_info.version_minor = OS_INFO_VERSION_MINOR;
        os_info.magic = OS_INFO_MAGIC;
        os_info.csum = os_info_csum(&os_info);
-       copy_to_absolute_zero(&S390_lowcore.os_info, &ptr, sizeof(ptr));
+       memcpy_absolute(&S390_lowcore.os_info, &ptr, sizeof(ptr));
 }
 
 #ifdef CONFIG_CRASH_DUMP
@@ -138,7 +138,6 @@ static void os_info_old_init(void)
                goto fail_free;
        os_info_old_alloc(OS_INFO_VMCOREINFO, 1);
        os_info_old_alloc(OS_INFO_REIPL_BLOCK, 1);
-       os_info_old_alloc(OS_INFO_INIT_FN, PAGE_SIZE);
        pr_info("crashkernel: addr=0x%lx size=%lu\n",
                (unsigned long) os_info_old->crashkernel_addr,
                (unsigned long) os_info_old->crashkernel_size);
index cb019f429e88ba22745a14bf38c714743c2383ad..9871b1971ed7602a7efef88fc62d019dde3b98c4 100644 (file)
@@ -225,7 +225,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
        if (!(alert & CPU_MF_INT_CF_MASK))
                return;
 
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_CPM]++;
+       kstat_cpu(smp_processor_id()).irqs[EXTINT_CMC]++;
        cpuhw = &__get_cpu_var(cpu_hw_events);
 
        /* Measurement alerts are shared and might happen when the PMU
index 06264ae8ccd9e05fd54f166d4d2bdaa37df18441..489d1d8d96b068f63b61886ee3c55d50f52b3913 100644 (file)
@@ -428,10 +428,12 @@ static void __init setup_lowcore(void)
        lc->restart_fn = (unsigned long) do_restart;
        lc->restart_data = 0;
        lc->restart_source = -1UL;
-       memcpy(&S390_lowcore.restart_stack, &lc->restart_stack,
-              4*sizeof(unsigned long));
-       copy_to_absolute_zero(&S390_lowcore.restart_psw,
-                             &lc->restart_psw, sizeof(psw_t));
+
+       /* Setup absolute zero lowcore */
+       memcpy_absolute(&S390_lowcore.restart_stack, &lc->restart_stack,
+                       4 * sizeof(unsigned long));
+       memcpy_absolute(&S390_lowcore.restart_psw, &lc->restart_psw,
+                       sizeof(lc->restart_psw));
 
        set_prefix((u32)(unsigned long) lc);
        lowcore_ptr[0] = lc;
@@ -598,7 +600,7 @@ static void __init setup_vmcoreinfo(void)
 #ifdef CONFIG_KEXEC
        unsigned long ptr = paddr_vmcoreinfo_note();
 
-       copy_to_absolute_zero(&S390_lowcore.vmcore_info, &ptr, sizeof(ptr));
+       memcpy_absolute(&S390_lowcore.vmcore_info, &ptr, sizeof(ptr));
 #endif
 }
 
index f626232e216c0ca100f14c1cbeed52c8696c578f..ac565b44aabbf76c23da00540f8f2884e06f07a6 100644 (file)
@@ -33,9 +33,6 @@
 #include <asm/switch_to.h>
 #include "entry.h"
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
-
 typedef struct 
 {
        __u8 callee_used_stack[__SIGNAL_FRAMESIZE];
@@ -169,7 +166,6 @@ SYSCALL_DEFINE0(sigreturn)
                goto badframe;
        if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE))
                goto badframe;
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
        if (restore_sigregs(regs, &frame->sregs))
                goto badframe;
@@ -189,7 +185,6 @@ SYSCALL_DEFINE0(rt_sigreturn)
                goto badframe;
        if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
        if (restore_sigregs(regs, &frame->uc.uc_mcontext))
                goto badframe;
@@ -367,7 +362,7 @@ give_sigsegv:
        return -EFAULT;
 }
 
-static int handle_signal(unsigned long sig, struct k_sigaction *ka,
+static void handle_signal(unsigned long sig, struct k_sigaction *ka,
                         siginfo_t *info, sigset_t *oldset,
                         struct pt_regs *regs)
 {
@@ -379,9 +374,9 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
        else
                ret = setup_frame(sig, ka, oldset, regs);
        if (ret)
-               return ret;
-       block_sigmask(ka, sig);
-       return 0;
+               return;
+       signal_delivered(sig, info, ka, regs,
+                                test_thread_flag(TIF_SINGLE_STEP));
 }
 
 /*
@@ -398,12 +393,7 @@ void do_signal(struct pt_regs *regs)
        siginfo_t info;
        int signr;
        struct k_sigaction ka;
-       sigset_t *oldset;
-
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
+       sigset_t *oldset = sigmask_to_save();
 
        /*
         * Get signal to deliver. When running under ptrace, at this point
@@ -441,24 +431,10 @@ void do_signal(struct pt_regs *regs)
                /* No longer in a system call */
                clear_thread_flag(TIF_SYSCALL);
 
-               if ((is_compat_task() ?
-                    handle_signal32(signr, &ka, &info, oldset, regs) :
-                    handle_signal(signr, &ka, &info, oldset, regs)) == 0) {
-                       /*
-                        * A signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag.
-                        */
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
-
-                       /*
-                        * Let tracing know that we've done the handler setup.
-                        */
-                       tracehook_signal_handler(signr, &info, &ka, regs,
-                                        test_thread_flag(TIF_SINGLE_STEP));
-               }
+               if (is_compat_task())
+                       handle_signal32(signr, &ka, &info, oldset, regs);
+               else
+                       handle_signal(signr, &ka, &info, oldset, regs);
                return;
        }
 
@@ -484,16 +460,11 @@ void do_signal(struct pt_regs *regs)
        /*
         * If there's no signal to deliver, we just put the saved sigmask back.
         */
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 }
 
 void do_notify_resume(struct pt_regs *regs)
 {
        clear_thread_flag(TIF_NOTIFY_RESUME);
        tracehook_notify_resume(regs);
-       if (current->replacement_session_keyring)
-               key_replace_session_keyring();
 }
index 647ba9425893de446e5237d84234c612ab5fedc9..15cca26ccb6c4ff1cbde51c60259a7ec95471718 100644 (file)
@@ -297,26 +297,27 @@ static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
 static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
                          void *data, unsigned long stack)
 {
-       struct _lowcore *lc = pcpu->lowcore;
-       unsigned short this_cpu;
+       struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
+       struct {
+               unsigned long   stack;
+               void            *func;
+               void            *data;
+               unsigned long   source;
+       } restart = { stack, func, data, stap() };
 
        __load_psw_mask(psw_kernel_bits);
-       this_cpu = stap();
-       if (pcpu->address == this_cpu)
+       if (pcpu->address == restart.source)
                func(data);     /* should not return */
        /* Stop target cpu (if func returns this stops the current cpu). */
        pcpu_sigp_retry(pcpu, sigp_stop, 0);
        /* Restart func on the target cpu and stop the current cpu. */
-       lc->restart_stack = stack;
-       lc->restart_fn = (unsigned long) func;
-       lc->restart_data = (unsigned long) data;
-       lc->restart_source = (unsigned long) this_cpu;
+       memcpy_absolute(&lc->restart_stack, &restart, sizeof(restart));
        asm volatile(
                "0:     sigp    0,%0,6  # sigp restart to target cpu\n"
                "       brc     2,0b    # busy, try again\n"
                "1:     sigp    0,%1,5  # sigp stop to current cpu\n"
                "       brc     2,1b    # busy, try again\n"
-               : : "d" (pcpu->address), "d" (this_cpu) : "0", "1", "cc");
+               : : "d" (pcpu->address), "d" (restart.source) : "0", "1", "cc");
        for (;;) ;
 }
 
@@ -800,17 +801,6 @@ void __noreturn cpu_die(void)
 
 #endif /* CONFIG_HOTPLUG_CPU */
 
-static void smp_call_os_info_init_fn(void)
-{
-       int (*init_fn)(void);
-       unsigned long size;
-
-       init_fn = os_info_old_entry(OS_INFO_INIT_FN, &size);
-       if (!init_fn)
-               return;
-       init_fn();
-}
-
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
        /* request the 0x1201 emergency signal external interrupt */
@@ -819,7 +809,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
        /* request the 0x1202 external call external interrupt */
        if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
                panic("Couldn't request external interrupt 0x1202");
-       smp_call_os_info_init_fn();
        smp_detect_cpus();
 }
 
@@ -943,19 +932,6 @@ static struct attribute_group cpu_common_attr_group = {
        .attrs = cpu_common_attrs,
 };
 
-static ssize_t show_capability(struct device *dev,
-                               struct device_attribute *attr, char *buf)
-{
-       unsigned int capability;
-       int rc;
-
-       rc = get_cpu_capability(&capability);
-       if (rc)
-               return rc;
-       return sprintf(buf, "%u\n", capability);
-}
-static DEVICE_ATTR(capability, 0444, show_capability, NULL);
-
 static ssize_t show_idle_count(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
@@ -993,7 +969,6 @@ static ssize_t show_idle_time(struct device *dev,
 static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
 
 static struct attribute *cpu_online_attrs[] = {
-       &dev_attr_capability.attr,
        &dev_attr_idle_count.attr,
        &dev_attr_idle_time_us.attr,
        NULL,
index 2a94b774695c069241ed413f50a0f9b8b183c656..fa0eb238dac7d8940321c239d970d1245078d219 100644 (file)
@@ -392,27 +392,6 @@ static __init int create_proc_service_level(void)
 }
 subsys_initcall(create_proc_service_level);
 
-/*
- * Bogomips calculation based on cpu capability.
- */
-int get_cpu_capability(unsigned int *capability)
-{
-       struct sysinfo_1_2_2 *info;
-       int rc;
-
-       info = (void *) get_zeroed_page(GFP_KERNEL);
-       if (!info)
-               return -ENOMEM;
-       rc = stsi(info, 1, 2, 2);
-       if (rc == -ENOSYS)
-               goto out;
-       rc = 0;
-       *capability = info->capability;
-out:
-       free_page((unsigned long) info);
-       return rc;
-}
-
 /*
  * CPU capability might have changed. Therefore recalculate loops_per_jiffy.
  */
index a353f0ea45c2235d73455ab9013fbfeb1409223b..b23d9ac77dfc77fcf090f52fc739b17cc036a69c 100644 (file)
@@ -47,9 +47,30 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
 {
        VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
        vcpu->stat.diagnose_44++;
-       vcpu_put(vcpu);
-       yield();
-       vcpu_load(vcpu);
+       kvm_vcpu_on_spin(vcpu);
+       return 0;
+}
+
+static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
+{
+       struct kvm *kvm = vcpu->kvm;
+       struct kvm_vcpu *tcpu;
+       int tid;
+       int i;
+
+       tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
+       vcpu->stat.diagnose_9c++;
+       VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d", tid);
+
+       if (tid == vcpu->vcpu_id)
+               return 0;
+
+       kvm_for_each_vcpu(i, tcpu, kvm)
+               if (tcpu->vcpu_id == tid) {
+                       kvm_vcpu_yield_to(tcpu);
+                       break;
+               }
+
        return 0;
 }
 
@@ -89,6 +110,8 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
                return diag_release_pages(vcpu);
        case 0x44:
                return __diag_time_slice_end(vcpu);
+       case 0x9c:
+               return __diag_time_slice_end_directed(vcpu);
        case 0x308:
                return __diag_ipl_functions(vcpu);
        default:
index 361456577c6f0f537721cb4eb2bcccb8115026a3..979cbe55bf5ef1dd42589935ba04aa208d2bb30a 100644 (file)
@@ -101,6 +101,7 @@ static int handle_lctl(struct kvm_vcpu *vcpu)
 }
 
 static intercept_handler_t instruction_handlers[256] = {
+       [0x01] = kvm_s390_handle_01,
        [0x83] = kvm_s390_handle_diag,
        [0xae] = kvm_s390_handle_sigp,
        [0xb2] = kvm_s390_handle_b2,
index 217ce44395a4e7978194a7c9cc042bad1f84e889..664766d0c83c6a8461634c4dfd31679cf74c7b02 100644 (file)
@@ -28,6 +28,7 @@
 #include <asm/pgtable.h>
 #include <asm/nmi.h>
 #include <asm/switch_to.h>
+#include <asm/sclp.h>
 #include "kvm-s390.h"
 #include "gaccess.h"
 
@@ -74,6 +75,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
        { "diagnose_10", VCPU_STAT(diagnose_10) },
        { "diagnose_44", VCPU_STAT(diagnose_44) },
+       { "diagnose_9c", VCPU_STAT(diagnose_9c) },
        { NULL }
 };
 
@@ -133,8 +135,16 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_S390_UCONTROL:
 #endif
        case KVM_CAP_SYNC_REGS:
+       case KVM_CAP_ONE_REG:
                r = 1;
                break;
+       case KVM_CAP_NR_VCPUS:
+       case KVM_CAP_MAX_VCPUS:
+               r = KVM_MAX_VCPUS;
+               break;
+       case KVM_CAP_S390_COW:
+               r = sclp_get_fac85() & 0x2;
+               break;
        default:
                r = 0;
        }
@@ -423,6 +433,71 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
        return 0;
 }
 
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
+{
+       /* kvm common code refers to this, but never calls it */
+       BUG();
+       return 0;
+}
+
+static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
+                                          struct kvm_one_reg *reg)
+{
+       int r = -EINVAL;
+
+       switch (reg->id) {
+       case KVM_REG_S390_TODPR:
+               r = put_user(vcpu->arch.sie_block->todpr,
+                            (u32 __user *)reg->addr);
+               break;
+       case KVM_REG_S390_EPOCHDIFF:
+               r = put_user(vcpu->arch.sie_block->epoch,
+                            (u64 __user *)reg->addr);
+               break;
+       case KVM_REG_S390_CPU_TIMER:
+               r = put_user(vcpu->arch.sie_block->cputm,
+                            (u64 __user *)reg->addr);
+               break;
+       case KVM_REG_S390_CLOCK_COMP:
+               r = put_user(vcpu->arch.sie_block->ckc,
+                            (u64 __user *)reg->addr);
+               break;
+       default:
+               break;
+       }
+
+       return r;
+}
+
+static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
+                                          struct kvm_one_reg *reg)
+{
+       int r = -EINVAL;
+
+       switch (reg->id) {
+       case KVM_REG_S390_TODPR:
+               r = get_user(vcpu->arch.sie_block->todpr,
+                            (u32 __user *)reg->addr);
+               break;
+       case KVM_REG_S390_EPOCHDIFF:
+               r = get_user(vcpu->arch.sie_block->epoch,
+                            (u64 __user *)reg->addr);
+               break;
+       case KVM_REG_S390_CPU_TIMER:
+               r = get_user(vcpu->arch.sie_block->cputm,
+                            (u64 __user *)reg->addr);
+               break;
+       case KVM_REG_S390_CLOCK_COMP:
+               r = get_user(vcpu->arch.sie_block->ckc,
+                            (u64 __user *)reg->addr);
+               break;
+       default:
+               break;
+       }
+
+       return r;
+}
+
 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
 {
        kvm_s390_vcpu_initial_reset(vcpu);
@@ -753,6 +828,18 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
        case KVM_S390_INITIAL_RESET:
                r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
                break;
+       case KVM_SET_ONE_REG:
+       case KVM_GET_ONE_REG: {
+               struct kvm_one_reg reg;
+               r = -EFAULT;
+               if (copy_from_user(&reg, argp, sizeof(reg)))
+                       break;
+               if (ioctl == KVM_SET_ONE_REG)
+                       r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
+               else
+                       r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
+               break;
+       }
 #ifdef CONFIG_KVM_S390_UCONTROL
        case KVM_S390_UCAS_MAP: {
                struct kvm_s390_ucas_mapping ucasmap;
index ff28f9d1c9eb94a2f50c2775ddf35cfbd3573a54..2294377975e8cf56ad06e03d380a4e211820bce3 100644 (file)
@@ -79,6 +79,7 @@ int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action);
 /* implemented in priv.c */
 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
+int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
 
 /* implemented in sigp.c */
 int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
index e5a45dbd26ac58e98cdf48b198128e067ef66e31..68a6b2ed16bf125c56ce7456e76c68f4fd6bd236 100644 (file)
@@ -380,3 +380,34 @@ int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
        return -EOPNOTSUPP;
 }
 
+static int handle_sckpf(struct kvm_vcpu *vcpu)
+{
+       u32 value;
+
+       if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+               return kvm_s390_inject_program_int(vcpu,
+                                                  PGM_PRIVILEGED_OPERATION);
+
+       if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
+               return kvm_s390_inject_program_int(vcpu,
+                                                  PGM_SPECIFICATION);
+
+       value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
+       vcpu->arch.sie_block->todpr = value;
+
+       return 0;
+}
+
+static intercept_handler_t x01_handlers[256] = {
+       [0x07] = handle_sckpf,
+};
+
+int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
+{
+       intercept_handler_t handler;
+
+       handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
+       if (handler)
+               return handler(vcpu);
+       return -EOPNOTSUPP;
+}
index 60455f104ea36ee9d3a22fd7100031c21f94866e..58a75a8ae90ce7beae1a4553801c47dd544cd1ee 100644 (file)
@@ -14,7 +14,7 @@
 #include <asm/futex.h>
 #include "uaccess.h"
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define AHI    "ahi"
 #define ALR    "alr"
 #define CLR    "clr"
index bb1a7eed42ce4cbef8350dca7a8eb85269fb1966..57e94298539b51326ff1d9a4a10a9ef36c405326 100644 (file)
@@ -15,7 +15,7 @@
 #include <asm/futex.h>
 #include "uaccess.h"
 
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
 #define AHI    "ahi"
 #define ALR    "alr"
 #define CLR    "clr"
index 795a0a9bb2eba72875aa3a300f98929daa1c6c12..921fa541dc0431050dfc6b429b4836b94972d6a0 100644 (file)
@@ -101,19 +101,27 @@ int memcpy_real(void *dest, void *src, size_t count)
 }
 
 /*
- * Copy memory to absolute zero
+ * Copy memory in absolute mode (kernel to kernel)
  */
-void copy_to_absolute_zero(void *dest, void *src, size_t count)
+void memcpy_absolute(void *dest, void *src, size_t count)
 {
-       unsigned long cr0;
+       unsigned long cr0, flags, prefix;
 
-       BUG_ON((unsigned long) dest + count >= sizeof(struct _lowcore));
-       preempt_disable();
+       flags = arch_local_irq_save();
        __ctl_store(cr0, 0, 0);
        __ctl_clear_bit(0, 28); /* disable lowcore protection */
-       memcpy_real(dest + store_prefix(), src, count);
+       prefix = store_prefix();
+       if (prefix) {
+               local_mcck_disable();
+               set_prefix(0);
+               memcpy(dest, src, count);
+               set_prefix(prefix);
+               local_mcck_enable();
+       } else {
+               memcpy(dest, src, count);
+       }
        __ctl_load(cr0, 0, 0);
-       preempt_enable();
+       arch_local_irq_restore(flags);
 }
 
 /*
@@ -187,20 +195,6 @@ static int is_swapped(unsigned long addr)
        return 0;
 }
 
-/*
- * Return swapped prefix or zero page address
- */
-static unsigned long get_swapped(unsigned long addr)
-{
-       unsigned long prefix = store_prefix();
-
-       if (addr < sizeof(struct _lowcore))
-               return addr + prefix;
-       if (addr >= prefix && addr < prefix + sizeof(struct _lowcore))
-               return addr - prefix;
-       return addr;
-}
-
 /*
  * Convert a physical pointer for /dev/mem access
  *
@@ -218,7 +212,7 @@ void *xlate_dev_mem_ptr(unsigned long addr)
                size = PAGE_SIZE - (addr & ~PAGE_MASK);
                bounce = (void *) __get_free_page(GFP_ATOMIC);
                if (bounce)
-                       memcpy_real(bounce, (void *) get_swapped(addr), size);
+                       memcpy_absolute(bounce, (void *) addr, size);
        }
        preempt_enable();
        put_online_cpus();
index 4799383e2df9551c45ad69f08f57455bb9771dc0..71ae20df674e53f051834f20319f3c48fae1358b 100644 (file)
@@ -109,7 +109,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
                pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
                pm_dir = pmd_offset(pu_dir, address);
 
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
                if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
                    (address + HPAGE_SIZE <= start + size) &&
                    (address >= HPAGE_SIZE)) {
index c6646de07bf455acb80c3c94674cdc61c535677c..a4a89fa980d6c4e0e8e44a00e8519d9ead2ac2b1 100644 (file)
@@ -235,7 +235,7 @@ static void hws_ext_handler(struct ext_code ext_code,
        if (!(param32 & CPU_MF_INT_SF_MASK))
                return;
 
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_CPM]++;
+       kstat_cpu(smp_processor_id()).irqs[EXTINT_CMS]++;
        atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32);
 
        if (hws_wq)
diff --git a/arch/score/include/asm/kvm_para.h b/arch/score/include/asm/kvm_para.h
new file mode 100644 (file)
index 0000000..14fab8f
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
index d4a49011c48a58d4311b182f6015efeb67a55e6d..e382c52ca0d90b455d9bee68f7587f4278d36bc8 100644 (file)
@@ -34,8 +34,6 @@
 #include <asm/syscalls.h>
 #include <asm/ucontext.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 struct rt_sigframe {
        u32 rs_ass[4];          /* argument save space */
        u32 rs_code[2];         /* signal trampoline */
@@ -162,7 +160,6 @@ score_rt_sigreturn(struct pt_regs *regs)
        if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext);
@@ -241,11 +238,9 @@ give_sigsegv:
        return -EFAULT;
 }
 
-static int handle_signal(unsigned long sig, siginfo_t *info,
-       struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs)
+static void handle_signal(unsigned long sig, siginfo_t *info,
+       struct k_sigaction *ka, struct pt_regs *regs)
 {
-       int ret;
-
        if (regs->is_syscall) {
                switch (regs->regs[4]) {
                case ERESTART_RESTARTBLOCK:
@@ -269,18 +264,15 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
        /*
         * Set up the stack frame
         */
-       ret = setup_rt_frame(ka, regs, sig, oldset, info);
-
-       if (ret == 0)
-               block_sigmask(ka, sig);
+       if (setup_rt_frame(ka, regs, sig, sigmask_to_save(), info) < 0)
+               return;
 
-       return ret;
+       signal_delivered(sig, info, ka, regs, 0);
 }
 
 static void do_signal(struct pt_regs *regs)
 {
        struct k_sigaction ka;
-       sigset_t *oldset;
        siginfo_t info;
        int signr;
 
@@ -292,25 +284,10 @@ static void do_signal(struct pt_regs *regs)
        if (!user_mode(regs))
                return;
 
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
                /* Actually deliver the signal.  */
-               if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
-                       /*
-                        * A signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag.
-                        */
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               }
-
+               handle_signal(signr, &info, &ka, regs);
                return;
        }
 
@@ -337,10 +314,7 @@ static void do_signal(struct pt_regs *regs)
         * If there's no signal to deliver, we just put the saved sigmask
         * back
         */
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 }
 
 /*
@@ -356,7 +330,5 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
index 34cd0c5ff2e1bd104e6de9ff407ff478d2bb1aa7..a8a1ca741c8599b975420dd1a796a573a33e40bc 100644 (file)
@@ -188,7 +188,6 @@ static struct platform_nand_data migor_nand_flash_data = {
                .partitions = migor_nand_flash_partitions,
                .nr_partitions = ARRAY_SIZE(migor_nand_flash_partitions),
                .chip_delay = 20,
-               .part_probe_types = (const char *[]) { "cmdlinepart", NULL },
        },
        .ctrl = {
                .dev_ready = migor_nand_flash_ready,
diff --git a/arch/sh/include/asm/kvm_para.h b/arch/sh/include/asm/kvm_para.h
new file mode 100644 (file)
index 0000000..14fab8f
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
index abda58467ece9e86ff1249029bcc7143281b2f04..ba0bdc423b072fa62f74fbc64e1bc5b683f2af7d 100644 (file)
@@ -3,8 +3,6 @@
 
 typedef unsigned short __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
-typedef unsigned short __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
 typedef unsigned short __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 typedef unsigned short __kernel_uid_t;
index fcda07b4a616be8196f105ce5d2faee8682c9af1..244f7e950e176b0cbdc907f70b4fdf88572b08f0 100644 (file)
@@ -3,8 +3,6 @@
 
 typedef unsigned short __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
-typedef unsigned short __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
 typedef unsigned short __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 typedef unsigned short __kernel_uid_t;
index 0c04ffc4f12c41d344eded872aea06aaf7232e2b..bc13b57cdc834210b95aaa70ea603298ed55f468 100644 (file)
@@ -169,7 +169,7 @@ static inline void set_restore_sigmask(void)
 {
        struct thread_info *ti = current_thread_info();
        ti->status |= TS_RESTORE_SIGMASK;
-       set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
+       WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
 }
 
 #define TI_FLAG_FAULT_CODE_SHIFT       24
@@ -189,6 +189,23 @@ static inline unsigned int get_thread_fault_code(void)
        struct thread_info *ti = current_thread_info();
        return ti->flags >> TI_FLAG_FAULT_CODE_SHIFT;
 }
+
+static inline void clear_restore_sigmask(void)
+{
+       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+}
+static inline bool test_restore_sigmask(void)
+{
+       return current_thread_info()->status & TS_RESTORE_SIGMASK;
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+       struct thread_info *ti = current_thread_info();
+       if (!(ti->status & TS_RESTORE_SIGMASK))
+               return false;
+       ti->status &= ~TS_RESTORE_SIGMASK;
+       return true;
+}
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __KERNEL__ */
index cb4172c8af7d81c90e373a7c9a7ecb61a0bbd3a9..d6b7b6154f8764576abac916033326ba1c1e6bd2 100644 (file)
@@ -32,8 +32,6 @@
 #include <asm/syscalls.h>
 #include <asm/fpu.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 struct fdpic_func_descriptor {
        unsigned long   text;
        unsigned long   GOT;
@@ -226,7 +224,6 @@ asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5,
                                    sizeof(frame->extramask))))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->sc, &r0))
@@ -256,7 +253,6 @@ asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
@@ -522,10 +518,11 @@ handle_syscall_restart(unsigned long save_r0, struct pt_regs *regs,
 /*
  * OK, we're invoking a handler
  */
-static int
+static void
 handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
-             sigset_t *oldset, struct pt_regs *regs, unsigned int save_r0)
+             struct pt_regs *regs, unsigned int save_r0)
 {
+       sigset_t *oldset = sigmask_to_save();
        int ret;
 
        /* Set up the stack frame */
@@ -534,10 +531,10 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
        else
                ret = setup_frame(sig, ka, oldset, regs);
 
-       if (ret == 0)
-               block_sigmask(ka, sig);
-
-       return ret;
+       if (ret)
+               return;
+       signal_delivered(sig, info, ka, regs,
+                       test_thread_flag(TIF_SINGLESTEP));
 }
 
 /*
@@ -554,7 +551,6 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0)
        siginfo_t info;
        int signr;
        struct k_sigaction ka;
-       sigset_t *oldset;
 
        /*
         * We want the common case to go fast, which
@@ -565,30 +561,12 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0)
        if (!user_mode(regs))
                return;
 
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK)
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
                handle_syscall_restart(save_r0, regs, &ka.sa);
 
                /* Whee!  Actually deliver the signal.  */
-               if (handle_signal(signr, &ka, &info, oldset,
-                                 regs, save_r0) == 0) {
-                       /*
-                        * A signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TS_RESTORE_SIGMASK flag
-                        */
-                       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-
-                       tracehook_signal_handler(signr, &info, &ka, regs,
-                                       test_thread_flag(TIF_SINGLESTEP));
-               }
-
+               handle_signal(signr, &ka, &info, regs, save_r0);
                return;
        }
 
@@ -610,10 +588,7 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0)
         * If there's no signal to deliver, we just put the saved sigmask
         * back.
         */
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
-               current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 }
 
 asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0,
@@ -626,7 +601,5 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0,
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
index b589a354c069aec0ee66d231b7790c7c0d9b3717..6b5b3dfe886b2d3c15a4b08a4d4f21ab74e90da0 100644 (file)
 
 #define DEBUG_SIG 0
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
-static int
+static void
 handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
-               sigset_t *oldset, struct pt_regs * regs);
+               struct pt_regs * regs);
 
 static inline void
 handle_syscall_restart(struct pt_regs *regs, struct sigaction *sa)
@@ -88,7 +86,6 @@ static void do_signal(struct pt_regs *regs)
        siginfo_t info;
        int signr;
        struct k_sigaction ka;
-       sigset_t *oldset;
 
        /*
         * We want the common case to go fast, which
@@ -99,28 +96,13 @@ static void do_signal(struct pt_regs *regs)
        if (!user_mode(regs))
                return;
 
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK)
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, 0);
        if (signr > 0) {
                handle_syscall_restart(regs, &ka.sa);
 
                /* Whee!  Actually deliver the signal.  */
-               if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
-                       /*
-                        * If a signal was successfully delivered, the
-                        * saved sigmask is in its frame, and we can
-                        * clear the TS_RESTORE_SIGMASK flag.
-                        */
-                       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-
-                       tracehook_signal_handler(signr, &info, &ka, regs,
-                                       test_thread_flag(TIF_SINGLESTEP));
-                       return;
-               }
+               handle_signal(signr, &info, &ka, regs);
+               return;
        }
 
        /* Did we come from a system call? */
@@ -143,12 +125,7 @@ static void do_signal(struct pt_regs *regs)
        }
 
        /* No signal to deliver -- put the saved sigmask back */
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
-               current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
-
-       return;
+       restore_saved_sigmask();
 }
 
 /*
@@ -351,7 +328,6 @@ asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3,
                                    sizeof(frame->extramask))))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->sc, &ret))
@@ -384,7 +360,6 @@ asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3,
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ret))
@@ -659,10 +634,11 @@ give_sigsegv:
 /*
  * OK, we're invoking a handler
  */
-static int
+static void
 handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
-               sigset_t *oldset, struct pt_regs * regs)
+               struct pt_regs * regs)
 {
+       sigset_t *oldset = sigmask_to_save();
        int ret;
 
        /* Set up the stack frame */
@@ -671,10 +647,11 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
        else
                ret = setup_frame(sig, ka, oldset, regs);
 
-       if (ret == 0)
-               block_sigmask(ka, sig);
+       if (ret)
+               return;
 
-       return ret;
+       signal_delivered(sig, info, ka, regs,
+                       test_thread_flag(TIF_SINGLESTEP));
 }
 
 asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
@@ -685,7 +662,5 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_info
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
index b86e9ca79455d7cacb08a21b2e2199bc8879ee7c..2062aa88af41cc696d1f5892782245c4d73190fa 100644 (file)
@@ -123,7 +123,6 @@ void native_play_dead(void)
 int __cpu_disable(void)
 {
        unsigned int cpu = smp_processor_id();
-       struct task_struct *p;
        int ret;
 
        ret = mp_ops->cpu_disable(cpu);
@@ -153,11 +152,7 @@ int __cpu_disable(void)
        flush_cache_all();
        local_flush_tlb_all();
 
-       read_lock(&tasklist_lock);
-       for_each_process(p)
-               if (p->mm)
-                       cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
-       read_unlock(&tasklist_lock);
+       clear_tasks_mm_cpumask(cpu);
 
        return 0;
 }
index 15e9e05740da3382a43f9d8f4c45585ee461e1d1..e74ff137762661844783fe76a30986fe01308e9e 100644 (file)
@@ -35,12 +35,12 @@ config SPARC
        select GENERIC_CMOS_UPDATE
        select GENERIC_CLOCKEVENTS
        select GENERIC_STRNCPY_FROM_USER
+       select GENERIC_STRNLEN_USER
 
 config SPARC32
        def_bool !64BIT
        select GENERIC_ATOMIC64
        select CLZ_TAB
-       select ARCH_USES_GETTIMEOFFSET
 
 config SPARC64
        def_bool 64BIT
index 2c2e38821f6083090e8b276689bbf70fa47a307f..67f83e0a0d68d47e3bebff7a365897614bfa411e 100644 (file)
@@ -21,3 +21,4 @@ generic-y += div64.h
 generic-y += local64.h
 generic-y += irq_regs.h
 generic-y += local.h
+generic-y += word-at-a-time.h
index cbb93e5141de0ff27d4d0197363fff3c23397e9b..61ebe7411ceb0af141b4bc9ceef1c1bcf36521fc 100644 (file)
 #define ASI_M_UNA01         0x01   /* Same here... */
 #define ASI_M_MXCC          0x02   /* Access to TI VIKING MXCC registers */
 #define ASI_M_FLUSH_PROBE   0x03   /* Reference MMU Flush/Probe; rw, ss */
-#ifndef CONFIG_SPARC_LEON
 #define ASI_M_MMUREGS       0x04   /* MMU Registers; rw, ss */
-#else
-#define ASI_M_MMUREGS       0x19
-#endif /* CONFIG_SPARC_LEON */
 #define ASI_M_TLBDIAG       0x05   /* MMU TLB only Diagnostics */
 #define ASI_M_DIAGS         0x06   /* Reference MMU Diagnostics */
 #define ASI_M_IODIAG        0x07   /* MMU I/O TLB only Diagnostics */
index 02a172fb193aaded11b3080f2e5337ee951e1ad9..a0e28ef025587a8384f3825f04a5ec66cd99ca75 100644 (file)
 /* All traps low-level code here must end with this macro. */
 #define RESTORE_ALL b ret_trap_entry; clr %l6;
 
+/* Support for run-time patching of single instructions.
+ * This is used to handle the differences in the ASI for
+ * MMUREGS for LEON and SUN.
+ *
+ * Sample:
+ * LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %o0
+ * SUN_PI_(lda [%g0] ASI_M_MMUREGS, %o0
+ * PI == Patch Instruction
+ *
+ * For LEON we will use the first variant,
+ * and for all other we will use the SUN variant.
+ * The order is important.
+ */
+#define LEON_PI(...)                           \
+662:   __VA_ARGS__
+
+#define SUN_PI_(...)                           \
+       .section .leon_1insn_patch, "ax";       \
+       .word 662b;                             \
+       __VA_ARGS__;                            \
+       .previous
+
 #endif /* !(_SPARC_ASMMACRO_H) */
index 48a7c65731d2e0cf08ab2e974adcf922f2dddc58..8493fd3c7ba5a5ea39364948c26ecdd4feef41b8 100644 (file)
@@ -12,13 +12,18 @@ extern int dma_supported(struct device *dev, u64 mask);
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
 
-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
+extern struct dma_map_ops *dma_ops;
+extern struct dma_map_ops *leon_dma_ops;
+extern struct dma_map_ops pci32_dma_ops;
+
 extern struct bus_type pci_bus_type;
 
 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
 {
 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
-       if (dev->bus == &pci_bus_type)
+       if (sparc_cpu_model == sparc_leon)
+               return leon_dma_ops;
+       else if (dev->bus == &pci_bus_type)
                return &pci32_dma_ops;
 #endif
        return dma_ops;
diff --git a/arch/sparc/include/asm/kvm_para.h b/arch/sparc/include/asm/kvm_para.h
new file mode 100644 (file)
index 0000000..14fab8f
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
index 07659124c1404da4ac11200c5269eb7131c0929f..3375c6293893654bfb0ab46a41ee084ccd3d8cc6 100644 (file)
@@ -8,8 +8,6 @@
 #ifndef LEON_H_INCLUDE
 #define LEON_H_INCLUDE
 
-#ifdef CONFIG_SPARC_LEON
-
 /* mmu register access, ASI_LEON_MMUREGS */
 #define LEON_CNR_CTRL          0x000
 #define LEON_CNR_CTXP          0x100
 
 #ifndef __ASSEMBLY__
 
-/* do a virtual address read without cache */
-static inline unsigned long leon_readnobuffer_reg(unsigned long paddr)
-{
-       unsigned long retval;
-       __asm__ __volatile__("lda [%1] %2, %0\n\t" :
-                            "=r"(retval) : "r"(paddr), "i"(ASI_LEON_NOCACHE));
-       return retval;
-}
-
 /* do a physical address bypass write, i.e. for 0x80000000 */
 static inline void leon_store_reg(unsigned long paddr, unsigned long value)
 {
@@ -87,47 +76,16 @@ static inline unsigned long leon_load_reg(unsigned long paddr)
        return retval;
 }
 
-static inline void leon_srmmu_disabletlb(void)
-{
-       unsigned int retval;
-       __asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0),
-                            "i"(ASI_LEON_MMUREGS));
-       retval |= LEON_CNR_CTRL_TLBDIS;
-       __asm__ __volatile__("sta %0, [%%g0] %2\n\t" : : "r"(retval), "r"(0),
-                            "i"(ASI_LEON_MMUREGS) : "memory");
-}
-
-static inline void leon_srmmu_enabletlb(void)
-{
-       unsigned int retval;
-       __asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0),
-                            "i"(ASI_LEON_MMUREGS));
-       retval = retval & ~LEON_CNR_CTRL_TLBDIS;
-       __asm__ __volatile__("sta %0, [%%g0] %2\n\t" : : "r"(retval), "r"(0),
-                            "i"(ASI_LEON_MMUREGS) : "memory");
-}
-
 /* macro access for leon_load_reg() and leon_store_reg() */
 #define LEON3_BYPASS_LOAD_PA(x)            (leon_load_reg((unsigned long)(x)))
 #define LEON3_BYPASS_STORE_PA(x, v) (leon_store_reg((unsigned long)(x), (unsigned long)(v)))
-#define LEON3_BYPASS_ANDIN_PA(x, v) LEON3_BYPASS_STORE_PA(x, LEON3_BYPASS_LOAD_PA(x) & v)
-#define LEON3_BYPASS_ORIN_PA(x, v)  LEON3_BYPASS_STORE_PA(x, LEON3_BYPASS_LOAD_PA(x) | v)
 #define LEON_BYPASS_LOAD_PA(x)      leon_load_reg((unsigned long)(x))
 #define LEON_BYPASS_STORE_PA(x, v)  leon_store_reg((unsigned long)(x), (unsigned long)(v))
-#define LEON_REGLOAD_PA(x)          leon_load_reg((unsigned long)(x)+LEON_PREGS)
-#define LEON_REGSTORE_PA(x, v)      leon_store_reg((unsigned long)(x)+LEON_PREGS, (unsigned long)(v))
-#define LEON_REGSTORE_OR_PA(x, v)   LEON_REGSTORE_PA(x, LEON_REGLOAD_PA(x) | (unsigned long)(v))
-#define LEON_REGSTORE_AND_PA(x, v)  LEON_REGSTORE_PA(x, LEON_REGLOAD_PA(x) & (unsigned long)(v))
-
-/* macro access for leon_readnobuffer_reg() */
-#define LEON_BYPASSCACHE_LOAD_VA(x) leon_readnobuffer_reg((unsigned long)(x))
 
 extern void leon_init(void);
 extern void leon_switch_mm(void);
 extern void leon_init_IRQ(void);
 
-extern unsigned long last_valid_pfn;
-
 static inline unsigned long sparc_leon3_get_dcachecfg(void)
 {
        unsigned int retval;
@@ -230,9 +188,6 @@ static inline int sparc_leon3_cpuid(void)
 #error cannot determine LEON_PAGE_SIZE_LEON
 #endif
 
-#define PAGE_MIN_SHIFT   (12)
-#define PAGE_MIN_SIZE    (1UL << PAGE_MIN_SHIFT)
-
 #define LEON3_XCCR_SETS_MASK  0x07000000UL
 #define LEON3_XCCR_SSIZE_MASK 0x00f00000UL
 
@@ -242,7 +197,7 @@ static inline int sparc_leon3_cpuid(void)
 #ifndef __ASSEMBLY__
 struct vm_area_struct;
 
-extern unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr);
+extern unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr);
 extern void leon_flush_icache_all(void);
 extern void leon_flush_dcache_all(void);
 extern void leon_flush_cache_all(void);
@@ -258,15 +213,7 @@ struct leon3_cacheregs {
        unsigned long dccr;     /* 0x0c - Data Cache Configuration Register */
 };
 
-/* struct that hold LEON2 cache configuration register
- * & configuration register
- */
-struct leon2_cacheregs {
-       unsigned long ccr, cfg;
-};
-
-#ifdef __KERNEL__
-
+#include <linux/irq.h>
 #include <linux/interrupt.h>
 
 struct device_node;
@@ -292,24 +239,15 @@ extern void leon_smp_done(void);
 extern void leon_boot_cpus(void);
 extern int leon_boot_one_cpu(int i, struct task_struct *);
 void leon_init_smp(void);
-extern void cpu_idle(void);
-extern void init_IRQ(void);
-extern void cpu_panic(void);
-extern int __leon_processor_id(void);
 void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu);
 extern irqreturn_t leon_percpu_timer_interrupt(int irq, void *unused);
 
-extern unsigned int real_irq_entry[];
 extern unsigned int smpleon_ipi[];
-extern unsigned int patchme_maybe_smp_msg[];
-extern unsigned int t_nmi[], linux_trap_ipi15_leon[];
-extern unsigned int linux_trap_ipi15_sun4m[];
+extern unsigned int linux_trap_ipi15_leon[];
 extern int leon_ipi_irq;
 
 #endif /* CONFIG_SMP */
 
-#endif /* __KERNEL__ */
-
 #endif /* __ASSEMBLY__ */
 
 /* macros used in leon_mm.c */
@@ -317,18 +255,4 @@ extern int leon_ipi_irq;
 #define _pfn_valid(pfn)         ((pfn < last_valid_pfn) && (pfn >= PFN(phys_base)))
 #define _SRMMU_PTE_PMASK_LEON 0xffffffff
 
-#else /* defined(CONFIG_SPARC_LEON) */
-
-/* nop definitions for !LEON case */
-#define leon_init() do {} while (0)
-#define leon_switch_mm() do {} while (0)
-#define leon_init_IRQ() do {} while (0)
-#define init_leon() do {} while (0)
-#define leon_smp_done() do {} while (0)
-#define leon_boot_cpus() do {} while (0)
-#define leon_boot_one_cpu(i, t) 1
-#define leon_init_smp() do {} while (0)
-
-#endif /* !defined(CONFIG_SPARC_LEON) */
-
 #endif
index e50f326e71bd1244c091da75519f2638b09ab8f9..f3034eddf4682569c257a2ce19941b013a98c218 100644 (file)
@@ -87,8 +87,6 @@ struct amba_prom_registers {
 #define LEON3_GPTIMER_CONFIG_NRTIMERS(c) ((c)->config & 0x7)
 #define LEON3_GPTIMER_CTRL_ISPENDING(r)  (((r)&LEON3_GPTIMER_CTRL_PENDING) ? 1 : 0)
 
-#ifdef CONFIG_SPARC_LEON
-
 #ifndef __ASSEMBLY__
 
 struct leon3_irqctrl_regs_map {
@@ -264,6 +262,4 @@ extern unsigned int sparc_leon_eirq;
 
 #define amba_device(x) (((x) >> 12) & 0xfff)
 
-#endif /* !defined(CONFIG_SPARC_LEON) */
-
 #endif
index cb828703a63ae853f702d781eac4ce69395ad4a9..79da17866fa8997ab3032f444b7598076dacaf36 100644 (file)
         restore %g0, %g0, %g0;
 
 #ifndef __ASSEMBLY__
+extern unsigned long last_valid_pfn;
 
 /* This makes sense. Honest it does - Anton */
 /* XXX Yes but it's ugly as sin.  FIXME. -KMW */
@@ -148,67 +149,13 @@ extern void *srmmu_nocache_pool;
 #define __nocache_fix(VADDR) __va(__nocache_pa(VADDR))
 
 /* Accessing the MMU control register. */
-static inline unsigned int srmmu_get_mmureg(void)
-{
-        unsigned int retval;
-       __asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
-                            "=r" (retval) :
-                            "i" (ASI_M_MMUREGS));
-       return retval;
-}
-
-static inline void srmmu_set_mmureg(unsigned long regval)
-{
-       __asm__ __volatile__("sta %0, [%%g0] %1\n\t" : :
-                            "r" (regval), "i" (ASI_M_MMUREGS) : "memory");
-
-}
-
-static inline void srmmu_set_ctable_ptr(unsigned long paddr)
-{
-       paddr = ((paddr >> 4) & SRMMU_CTX_PMASK);
-       __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
-                            "r" (paddr), "r" (SRMMU_CTXTBL_PTR),
-                            "i" (ASI_M_MMUREGS) :
-                            "memory");
-}
-
-static inline void srmmu_set_context(int context)
-{
-       __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
-                            "r" (context), "r" (SRMMU_CTX_REG),
-                            "i" (ASI_M_MMUREGS) : "memory");
-}
-
-static inline int srmmu_get_context(void)
-{
-       register int retval;
-       __asm__ __volatile__("lda [%1] %2, %0\n\t" :
-                            "=r" (retval) :
-                            "r" (SRMMU_CTX_REG),
-                            "i" (ASI_M_MMUREGS));
-       return retval;
-}
-
-static inline unsigned int srmmu_get_fstatus(void)
-{
-       unsigned int retval;
-
-       __asm__ __volatile__("lda [%1] %2, %0\n\t" :
-                            "=r" (retval) :
-                            "r" (SRMMU_FAULT_STATUS), "i" (ASI_M_MMUREGS));
-       return retval;
-}
-
-static inline unsigned int srmmu_get_faddr(void)
-{
-       unsigned int retval;
-
-       __asm__ __volatile__("lda [%1] %2, %0\n\t" :
-                            "=r" (retval) :
-                            "r" (SRMMU_FAULT_ADDR), "i" (ASI_M_MMUREGS));
-       return retval;
-}
+unsigned int srmmu_get_mmureg(void);
+void srmmu_set_mmureg(unsigned long regval);
+void srmmu_set_ctable_ptr(unsigned long paddr);
+void srmmu_set_context(int context);
+int srmmu_get_context(void);
+unsigned int srmmu_get_fstatus(void);
+unsigned int srmmu_get_faddr(void);
 
 /* This is guaranteed on all SRMMU's. */
 static inline void srmmu_flush_whole_tlb(void)
@@ -219,23 +166,6 @@ static inline void srmmu_flush_whole_tlb(void)
 
 }
 
-/* These flush types are not available on all chips... */
-#ifndef CONFIG_SPARC_LEON
-static inline unsigned long srmmu_hwprobe(unsigned long vaddr)
-{
-       unsigned long retval;
-
-       vaddr &= PAGE_MASK;
-       __asm__ __volatile__("lda [%1] %2, %0\n\t" :
-                            "=r" (retval) :
-                            "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
-
-       return retval;
-}
-#else
-#define srmmu_hwprobe(addr) srmmu_swprobe(addr, 0)
-#endif
-
 static inline int
 srmmu_get_pte (unsigned long addr)
 {
index 3070f25ae90a3e235eaaf2373949226ea83acfb6..156220ed99eb7dfbfe8696ea9f04da84fc00ff5f 100644 (file)
@@ -9,8 +9,6 @@
 
 #if defined(__sparc__) && defined(__arch64__)
 /* sparc 64 bit */
-typedef unsigned int           __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
 
 typedef unsigned short                __kernel_old_uid_t;
 typedef unsigned short         __kernel_old_gid_t;
@@ -38,9 +36,6 @@ typedef unsigned short         __kernel_gid_t;
 typedef unsigned short         __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef short                  __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef long                   __kernel_daddr_t;
 #define __kernel_daddr_t __kernel_daddr_t
 
index b8c0e5f0a66bbf1e4014bec94292320835914382..cee7ed9c927d9ac433db875646add31e212dc32b 100644 (file)
 #define PSR_VERS    0x0f000000         /* cpu-version field          */
 #define PSR_IMPL    0xf0000000         /* cpu-implementation field   */
 
+#define PSR_VERS_SHIFT         24
+#define PSR_IMPL_SHIFT         28
+#define PSR_VERS_SHIFTED_MASK  0xf
+#define PSR_IMPL_SHIFTED_MASK  0xf
+
+#define PSR_IMPL_TI            0x4
+#define PSR_IMPL_LEON          0xf
+
 #ifdef __KERNEL__
 
 #ifndef __ASSEMBLY__
index 0b0553bbd8a0dd26feb97be864a29997b754643e..f300d1a9b2b6f8f3650ef3b6d712f493e4a6b8b1 100644 (file)
@@ -7,4 +7,7 @@
 /* sparc entry point */
 extern char _start[];
 
+extern char __leon_1insn_patch[];
+extern char __leon_1insn_patch_end[];
+
 #endif
index 5af664932452190e66543500d21c9dcd6ba48763..e6cd224506a9355168ebea0c5ed0be8c3b625120 100644 (file)
@@ -131,8 +131,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
 #define _TIF_POLLING_NRFLAG    (1<<TIF_POLLING_NRFLAG)
 
 #define _TIF_DO_NOTIFY_RESUME_MASK     (_TIF_NOTIFY_RESUME | \
-                                        _TIF_SIGPENDING | \
-                                        _TIF_RESTORE_SIGMASK)
+                                        _TIF_SIGPENDING)
 
 #endif /* __KERNEL__ */
 
index 7f0981b094517aace2b461e2f6d7db9aa6d0fbfa..cfa8c38fb9c8511d51cfbe17e3d8792584d8b241 100644 (file)
@@ -238,7 +238,23 @@ static inline void set_restore_sigmask(void)
 {
        struct thread_info *ti = current_thread_info();
        ti->status |= TS_RESTORE_SIGMASK;
-       set_bit(TIF_SIGPENDING, &ti->flags);
+       WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
+}
+static inline void clear_restore_sigmask(void)
+{
+       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+}
+static inline bool test_restore_sigmask(void)
+{
+       return current_thread_info()->status & TS_RESTORE_SIGMASK;
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+       struct thread_info *ti = current_thread_info();
+       if (!(ti->status & TS_RESTORE_SIGMASK))
+               return false;
+       ti->status &= ~TS_RESTORE_SIGMASK;
+       return true;
 }
 #endif /* !__ASSEMBLY__ */
 
index 59586b57ef1a45e33a7b29505e35ec192feb808d..53a28dd59f59582b91ec9da33313713d94fe192c 100644 (file)
@@ -16,6 +16,8 @@
 
 #ifndef __ASSEMBLY__
 
+#include <asm/processor.h>
+
 #define ARCH_HAS_SORT_EXTABLE
 #define ARCH_HAS_SEARCH_EXTABLE
 
@@ -304,24 +306,8 @@ static inline unsigned long clear_user(void __user *addr, unsigned long n)
                return n;
 }
 
-extern long __strlen_user(const char __user *);
-extern long __strnlen_user(const char __user *, long len);
-
-static inline long strlen_user(const char __user *str)
-{
-       if (!access_ok(VERIFY_READ, str, 0))
-               return 0;
-       else
-               return __strlen_user(str);
-}
-
-static inline long strnlen_user(const char __user *str, long len)
-{
-       if (!access_ok(VERIFY_READ, str, 0))
-               return 0;
-       else
-               return __strnlen_user(str, len);
-}
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
 
 #endif  /* __ASSEMBLY__ */
 
index dcdfb89cbf3fff2a32b87fd6bb232c65e7ee3196..7c831d848b4e10b93737d619f2789fe00ba5fe13 100644 (file)
@@ -17,6 +17,8 @@
 
 #ifndef __ASSEMBLY__
 
+#include <asm/processor.h>
+
 /*
  * Sparc64 is segmented, though more like the M68K than the I386.
  * We use the secondary ASI to address user memory, which references a
@@ -257,11 +259,9 @@ extern unsigned long __must_check __clear_user(void __user *, unsigned long);
 
 #define clear_user __clear_user
 
-extern long __strlen_user(const char __user *);
-extern long __strnlen_user(const char __user *, long len);
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
 
-#define strlen_user __strlen_user
-#define strnlen_user __strnlen_user
 #define __copy_to_user_inatomic ___copy_to_user
 #define __copy_from_user_inatomic ___copy_from_user
 
index 72308f9b0096e45b6da24fef022a261989d5eacf..6cf591b7e1c67e2b82c4e28bf177197ce0f602a7 100644 (file)
@@ -51,8 +51,8 @@ obj-y                   += of_device_common.o
 obj-y                   += of_device_$(BITS).o
 obj-$(CONFIG_SPARC64)   += prom_irqtrans.o
 
-obj-$(CONFIG_SPARC_LEON)+= leon_kernel.o
-obj-$(CONFIG_SPARC_LEON)+= leon_pmc.o
+obj-$(CONFIG_SPARC32)   += leon_kernel.o
+obj-$(CONFIG_SPARC32)   += leon_pmc.o
 
 obj-$(CONFIG_SPARC64)   += reboot.o
 obj-$(CONFIG_SPARC64)   += sysfs.o
index 2d1819641769fca63bad5c086bccd7006a8444ef..a6c94a2bf9d4b1150e41b63d3e71a81138fc1f63 100644 (file)
@@ -121,7 +121,7 @@ static const struct manufacturer_info __initconst manufacturer_info[] = {
                FPU(-1, NULL)
        }
 },{
-       4,
+       PSR_IMPL_TI,
        .cpu_info = {
                CPU(0, "Texas Instruments, Inc. - SuperSparc-(II)"),
                /* SparcClassic  --  borned STP1010TAB-50*/
@@ -191,7 +191,7 @@ static const struct manufacturer_info __initconst manufacturer_info[] = {
                FPU(-1, NULL)
        }
 },{
-       0xF,            /* Aeroflex Gaisler */
+       PSR_IMPL_LEON,          /* Aeroflex Gaisler */
        .cpu_info = {
                CPU(3, "LEON"),
                CPU(-1, NULL)
@@ -440,16 +440,16 @@ static int __init cpu_type_probe(void)
        int psr_impl, psr_vers, fpu_vers;
        int psr;
 
-       psr_impl = ((get_psr() >> 28) & 0xf);
-       psr_vers = ((get_psr() >> 24) & 0xf);
+       psr_impl = ((get_psr() >> PSR_IMPL_SHIFT) & PSR_IMPL_SHIFTED_MASK);
+       psr_vers = ((get_psr() >> PSR_VERS_SHIFT) & PSR_VERS_SHIFTED_MASK);
 
        psr = get_psr();
        put_psr(psr | PSR_EF);
-#ifdef CONFIG_SPARC_LEON
-       fpu_vers = get_psr() & PSR_EF ? ((get_fsr() >> 17) & 0x7) : 7;
-#else
-       fpu_vers = ((get_fsr() >> 17) & 0x7);
-#endif
+
+       if (psr_impl == PSR_IMPL_LEON)
+               fpu_vers = get_psr() & PSR_EF ? ((get_fsr() >> 17) & 0x7) : 7;
+       else
+               fpu_vers = ((get_fsr() >> 17) & 0x7);
 
        put_psr(psr);
 
index 2dbe1806e5300ab8bf49a1c22dda5850c263f75f..dcaa1cf0de40c790b27b32fbbdfe997cfdee072f 100644 (file)
@@ -393,7 +393,6 @@ linux_trap_ipi15_sun4d:
        /* FIXME */
 1:     b,a     1b
 
-#ifdef CONFIG_SPARC_LEON
        .globl  smpleon_ipi
        .extern leon_ipi_interrupt
        /* SMP per-cpu IPI interrupts are handled specially. */
@@ -424,8 +423,6 @@ linux_trap_ipi15_leon:
        b       ret_trap_lockless_ipi
         clr    %l6
 
-#endif /* CONFIG_SPARC_LEON */
-
 #endif /* CONFIG_SMP */
 
        /* This routine handles illegal instructions and privileged
@@ -770,8 +767,11 @@ srmmu_fault:
        mov     0x400, %l5
        mov     0x300, %l4
 
-       lda     [%l5] ASI_M_MMUREGS, %l6        ! read sfar first
-       lda     [%l4] ASI_M_MMUREGS, %l5        ! read sfsr last
+LEON_PI(lda    [%l5] ASI_LEON_MMUREGS, %l6)    ! read sfar first
+SUN_PI_(lda    [%l5] ASI_M_MMUREGS, %l6)       ! read sfar first
+
+LEON_PI(lda    [%l4] ASI_LEON_MMUREGS, %l5)    ! read sfsr last
+SUN_PI_(lda    [%l4] ASI_M_MMUREGS, %l5)       ! read sfsr last
 
        andn    %l6, 0xfff, %l6
        srl     %l5, 6, %l5                     ! and encode all info into l7
index 84b5f0d2afde51b344485c96ee2fbef57e2ae684..e3e80d65e39af1d9167a7ca3156ee2f797112a6c 100644 (file)
@@ -234,7 +234,8 @@ tsetup_srmmu_stackchk:
 
        cmp     %glob_tmp, %sp
        bleu,a  1f
-        lda    [%g0] ASI_M_MMUREGS, %glob_tmp          ! read MMU control
+LEON_PI( lda   [%g0] ASI_LEON_MMUREGS, %glob_tmp)      ! read MMU control
+SUN_PI_( lda   [%g0] ASI_M_MMUREGS, %glob_tmp)         ! read MMU control
 
 trap_setup_user_stack_is_bolixed:
        /* From user/kernel into invalid window w/bad user
@@ -249,18 +250,25 @@ trap_setup_user_stack_is_bolixed:
 1:
        /* Clear the fault status and turn on the no_fault bit. */
        or      %glob_tmp, 0x2, %glob_tmp               ! or in no_fault bit
-       sta     %glob_tmp, [%g0] ASI_M_MMUREGS          ! set it
+LEON_PI(sta    %glob_tmp, [%g0] ASI_LEON_MMUREGS)              ! set it
+SUN_PI_(sta    %glob_tmp, [%g0] ASI_M_MMUREGS)         ! set it
 
        /* Dump the registers and cross fingers. */
        STORE_WINDOW(sp)
 
        /* Clear the no_fault bit and check the status. */
        andn    %glob_tmp, 0x2, %glob_tmp
-       sta     %glob_tmp, [%g0] ASI_M_MMUREGS
+LEON_PI(sta    %glob_tmp, [%g0] ASI_LEON_MMUREGS)
+SUN_PI_(sta    %glob_tmp, [%g0] ASI_M_MMUREGS)
+
        mov     AC_M_SFAR, %glob_tmp
-       lda     [%glob_tmp] ASI_M_MMUREGS, %g0
+LEON_PI(lda    [%glob_tmp] ASI_LEON_MMUREGS, %g0)
+SUN_PI_(lda    [%glob_tmp] ASI_M_MMUREGS, %g0)
+
        mov     AC_M_SFSR, %glob_tmp
-       lda     [%glob_tmp] ASI_M_MMUREGS, %glob_tmp    ! save away status of winstore
+LEON_PI(lda    [%glob_tmp] ASI_LEON_MMUREGS, %glob_tmp)! save away status of winstore
+SUN_PI_(lda    [%glob_tmp] ASI_M_MMUREGS, %glob_tmp)   ! save away status of winstore
+
        andcc   %glob_tmp, 0x2, %g0                     ! did we fault?
        bne     trap_setup_user_stack_is_bolixed        ! failure
         nop
index a0f5c20e4b9cd5f286432d8bd4dc678cbc62e0bd..afeb1d7703032a76c5b29b38ec98cd068f74cb55 100644 (file)
  * the cpu-type
  */
        .align 4
-cputyp:
-        .word   1
-
-       .align 4
        .globl cputypval
 cputypval:
        .asciz "sun4m"
@@ -46,8 +42,8 @@ cputypvar:
 
        .align 4
 
-sun4c_notsup:
-       .asciz  "Sparc-Linux sun4/sun4c support does no longer exist.\n\n"
+notsup:
+       .asciz  "Sparc-Linux sun4/sun4c or MMU-less not supported\n\n"
        .align 4
 
 sun4e_notsup:
@@ -123,7 +119,7 @@ current_pc:
                tst     %o0
                be      no_sun4u_here
                 mov    %g4, %o7                /* Previous %o7. */
-       
+
                mov     %o0, %l0                ! stash away romvec
                mov     %o0, %g7                ! put it here too
                mov     %o1, %l1                ! stash away debug_vec too
@@ -132,7 +128,7 @@ current_pc:
                set     current_pc, %g5
                cmp     %g3, %g5
                be      already_mapped
-                nop 
+                nop
 
                /* %l6 will hold the offset we have to subtract
                 * from absolute symbols in order to access areas
@@ -192,9 +188,9 @@ copy_prom_done:
                bne     not_a_sun4
                 nop
 
-halt_sun4_or_sun4c:
+halt_notsup:
                ld      [%g7 + 0x68], %o1
-               set     sun4c_notsup, %o0
+               set     notsup, %o0
                sub     %o0, %l6, %o0
                call    %o1
                 nop
@@ -202,18 +198,31 @@ halt_sun4_or_sun4c:
                 nop
 
 not_a_sun4:
+               /* It looks like this is a machine we support.
+                * Now find out what MMU we are dealing with
+                * LEON - identified by the psr.impl field
+                * Viking - identified by the psr.impl field
+                * In all other cases a sun4m srmmu.
+                * We check that the MMU is enabled in all cases.
+                */
+
+               /* Check if this is a LEON CPU */
+               rd      %psr, %g3
+               srl     %g3, PSR_IMPL_SHIFT, %g3
+               and     %g3, PSR_IMPL_SHIFTED_MASK, %g3
+               cmp     %g3, PSR_IMPL_LEON
+               be      leon_remap              /* It is a LEON - jump */
+                nop
+
+               /* Sanity-check, is MMU enabled */
                lda     [%g0] ASI_M_MMUREGS, %g1
                andcc   %g1, 1, %g0
-               be      halt_sun4_or_sun4c
+               be      halt_notsup
                 nop
 
-srmmu_remap:
-               /* First, check for a viking (TI) module. */
-               set     0x40000000, %g2
-               rd      %psr, %g3
-               and     %g2, %g3, %g3
-               subcc   %g3, 0x0, %g0
-               bz      srmmu_nviking
+               /* Check for a viking (TI) module. */
+               cmp     %g3, PSR_IMPL_TI
+               bne     srmmu_not_viking
                 nop
 
                /* Figure out what kind of viking we are on.
@@ -228,14 +237,14 @@ srmmu_remap:
                lda     [%g0] ASI_M_MMUREGS, %g3        ! peek in the control reg
                and     %g2, %g3, %g3
                subcc   %g3, 0x0, %g0
-               bnz     srmmu_nviking                   ! is in mbus mode
+               bnz     srmmu_not_viking                        ! is in mbus mode
                 nop
-               
+
                rd      %psr, %g3                       ! DO NOT TOUCH %g3
                andn    %g3, PSR_ET, %g2
                wr      %g2, 0x0, %psr
                WRITE_PAUSE
-               
+
                /* Get context table pointer, then convert to
                 * a physical address, which is 36 bits.
                 */
@@ -258,12 +267,12 @@ srmmu_remap:
                lda     [%g4] ASI_M_BYPASS, %o1         ! This is a level 1 ptr
                srl     %o1, 0x4, %o1                   ! Clear low 4 bits
                sll     %o1, 0x8, %o1                   ! Make physical
-               
+
                /* Ok, pull in the PTD. */
                lda     [%o1] ASI_M_BYPASS, %o2         ! This is the 0x0 16MB pgd
 
                /* Calculate to KERNBASE entry. */
-               add     %o1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %o3           
+               add     %o1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %o3
 
                /* Poke the entry into the calculated address. */
                sta     %o2, [%o3] ASI_M_BYPASS
@@ -293,12 +302,12 @@ srmmu_remap:
                b       go_to_highmem
                 nop
 
+srmmu_not_viking:
                /* This works on viking's in Mbus mode and all
                 * other MBUS modules.  It is virtually the same as
                 * the above madness sans turning traps off and flipping
                 * the AC bit.
                 */
-srmmu_nviking:
                set     AC_M_CTPR, %g1
                lda     [%g1] ASI_M_MMUREGS, %g1        ! get ctx table ptr
                sll     %g1, 0x4, %g1                   ! make physical addr
@@ -313,6 +322,29 @@ srmmu_nviking:
                 nop                                    ! wheee....
 
 
+leon_remap:
+               /* Sanity-check, is MMU enabled */
+               lda     [%g0] ASI_LEON_MMUREGS, %g1
+               andcc   %g1, 1, %g0
+               be      halt_notsup
+                nop
+
+               /* Same code as in the srmmu_not_viking case,
+                * with the LEON ASI for mmuregs
+                */
+               set     AC_M_CTPR, %g1
+               lda     [%g1] ASI_LEON_MMUREGS, %g1     ! get ctx table ptr
+               sll     %g1, 0x4, %g1                   ! make physical addr
+               lda     [%g1] ASI_M_BYPASS, %g1         ! ptr to level 1 pg_table
+               srl     %g1, 0x4, %g1
+               sll     %g1, 0x8, %g1                   ! make phys addr for l1 tbl
+
+               lda     [%g1] ASI_M_BYPASS, %g2         ! get level1 entry for 0x0
+               add     %g1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %g3
+               sta     %g2, [%g3] ASI_M_BYPASS         ! place at KERNBASE entry
+               b       go_to_highmem
+                nop                                    ! wheee....
+
 /* Now do a non-relative jump so that PC is in high-memory */
 go_to_highmem:
                set     execute_in_high_mem, %g1
@@ -336,8 +368,9 @@ execute_in_high_mem:
                sethi   %hi(linux_dbvec), %g1
                st      %o1, [%g1 + %lo(linux_dbvec)]
 
-/* Get the machine type via the mysterious romvec node operations. */
-
+               /* Get the machine type via the romvec
+                * getprops node operation
+                */
                add     %g7, 0x1c, %l1
                ld      [%l1], %l0
                ld      [%l0], %l0
@@ -356,9 +389,42 @@ execute_in_high_mem:
                                                ! to a buf where above string
                                                ! will get stored by the prom.
 
-#ifdef CONFIG_SPARC_LEON
-               /* no cpu-type check is needed, it is a SPARC-LEON */
 
+               /* Check value of "compatible" property.
+                * "value" => "model"
+                * leon => sparc_leon
+                * sun4m => sun4m
+                * sun4s => sun4m
+                * sun4d => sun4d
+                * sun4e => "no_sun4e_here"
+                * '*'   => "no_sun4u_here"
+                * Check single letters only
+                */
+
+               set     cputypval, %o2
+               /* If cputypval[0] == 'l' (lower case letter L) this is leon */
+               ldub    [%o2], %l1
+               cmp     %l1, 'l'
+               be      leon_init
+                nop
+
+               /* Check cputypval[4] to find the sun model */
+               ldub    [%o2 + 0x4], %l1
+
+               cmp     %l1, 'm'
+               be      sun4m_init
+                cmp    %l1, 's'
+               be      sun4m_init
+                cmp    %l1, 'd'
+               be      sun4d_init
+                cmp    %l1, 'e'
+               be      no_sun4e_here           ! Could be a sun4e.
+                nop
+               b       no_sun4u_here           ! AIEEE, a V9 sun4u... Get our BIG BROTHER kernel :))
+                nop
+
+leon_init:
+               /* LEON CPU - set boot_cpu_id */
                sethi   %hi(boot_cpu_id), %g2   ! boot-cpu index
 
 #ifdef CONFIG_SMP
@@ -376,26 +442,6 @@ execute_in_high_mem:
 
                ba continue_boot
                 nop
-#endif
-
-/* Check to cputype. We may be booted on a sun4u (64 bit box),
- * and sun4d needs special treatment.
- */
-
-               set     cputypval, %o2
-               ldub    [%o2 + 0x4], %l1
-
-               cmp     %l1, 'm'
-               be      sun4m_init
-                cmp    %l1, 's'
-               be      sun4m_init
-                cmp    %l1, 'd'
-               be      sun4d_init
-                cmp    %l1, 'e'
-               be      no_sun4e_here           ! Could be a sun4e.
-                nop
-               b       no_sun4u_here           ! AIEEE, a V9 sun4u... Get our BIG BROTHER kernel :))
-                nop
 
 /* CPUID in bootbus can be found at PA 0xff0140000 */
 #define SUN4D_BOOTBUS_CPUID     0xf0140000
@@ -431,9 +477,9 @@ sun4m_init:
 /* This sucks, apparently this makes Vikings call prom panic, will fix later */
 2:
                rd      %psr, %o1
-               srl     %o1, 28, %o1            ! Get a type of the CPU
+               srl     %o1, PSR_IMPL_SHIFT, %o1        ! Get a type of the CPU
 
-               subcc   %o1, 4, %g0             ! TI: Viking or MicroSPARC
+               subcc   %o1, PSR_IMPL_TI, %g0           ! TI: Viking or MicroSPARC
                be      continue_boot
                 nop
 
@@ -459,10 +505,6 @@ continue_boot:
 /* Aieee, now set PC and nPC, enable traps, give ourselves a stack and it's
  * show-time!
  */
-
-               sethi   %hi(cputyp), %o0
-               st      %g4, [%o0 + %lo(cputyp)]
-
                /* Turn on Supervisor, EnableFloating, and all the PIL bits.
                 * Also puts us in register window zero with traps off.
                 */
@@ -480,7 +522,7 @@ continue_boot:
                set     __bss_start , %o0       ! First address of BSS
                set     _end , %o1              ! Last address of BSS
                add     %o0, 0x1, %o0
-1:     
+1:
                stb     %g0, [%o0]
                subcc   %o0, %o1, %g0
                bl      1b
@@ -546,7 +588,7 @@ continue_boot:
                set     dest, %g2; \
                ld      [%g5], %g4; \
                st      %g4, [%g2];
-       
+
                /* Patch for window spills... */
                PATCH_INSN(spnwin_patch1_7win, spnwin_patch1)
                PATCH_INSN(spnwin_patch2_7win, spnwin_patch2)
@@ -597,7 +639,7 @@ continue_boot:
                st      %g4, [%g5 + 0x18]
                st      %g4, [%g5 + 0x1c]
 
-2:             
+2:
                sethi   %hi(nwindows), %g4
                st      %g3, [%g4 + %lo(nwindows)]      ! store final value
                sub     %g3, 0x1, %g3
@@ -617,18 +659,12 @@ continue_boot:
                wr      %g3, PSR_ET, %psr
                WRITE_PAUSE
 
-               /* First we call prom_init() to set up PROMLIB, then
-                * off to start_kernel().
-                */
-
+               /* Call sparc32_start_kernel(struct linux_romvec *rp) */
                sethi   %hi(prom_vector_p), %g5
                ld      [%g5 + %lo(prom_vector_p)], %o0
-               call    prom_init
+               call    sparc32_start_kernel
                 nop
 
-               call    start_kernel
-                nop
-       
                /* We should not get here. */
                call    halt_me
                 nop
@@ -659,7 +695,7 @@ sun4u_5:
                .asciz "write"
                .align  4
 sun4u_6:
-               .asciz  "\n\rOn sun4u you have to use UltraLinux (64bit) kernel\n\rand not a 32bit sun4[cdem] version\n\r\n\r"
+               .asciz  "\n\rOn sun4u you have to use sparc64 kernel\n\rand not a sparc32 version\n\r\n\r"
 sun4u_6e:
                .align  4
 sun4u_7:
index a2846f5e32d8042246e1764132c4ae9ec32bb857..0f094db918c7f8b711aeaab6ba01ac46847332cd 100644 (file)
@@ -55,17 +55,13 @@ const struct sparc32_dma_ops *sparc32_dma_ops;
 /* This function must make sure that caches and memory are coherent after DMA
  * On LEON systems without cache snooping it flushes the entire D-CACHE.
  */
-#ifndef CONFIG_SPARC_LEON
 static inline void dma_make_coherent(unsigned long pa, unsigned long len)
 {
+       if (sparc_cpu_model == sparc_leon) {
+               if (!sparc_leon3_snooping_enabled())
+                       leon_flush_dcache_all();
+       }
 }
-#else
-static inline void dma_make_coherent(unsigned long pa, unsigned long len)
-{
-       if (!sparc_leon3_snooping_enabled())
-               leon_flush_dcache_all();
-}
-#endif
 
 static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz);
 static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
@@ -427,9 +423,6 @@ arch_initcall(sparc_register_ioport);
 #endif /* CONFIG_SBUS */
 
 
-/* LEON reuses PCI DMA ops */
-#if defined(CONFIG_PCI) || defined(CONFIG_SPARC_LEON)
-
 /* Allocate and map kernel buffer using consistent mode DMA for a device.
  * hwdev should be valid struct pci_dev pointer for PCI devices.
  */
@@ -657,14 +650,11 @@ struct dma_map_ops pci32_dma_ops = {
 };
 EXPORT_SYMBOL(pci32_dma_ops);
 
-#endif /* CONFIG_PCI || CONFIG_SPARC_LEON */
+/* leon re-uses pci32_dma_ops */
+struct dma_map_ops *leon_dma_ops = &pci32_dma_ops;
+EXPORT_SYMBOL(leon_dma_ops);
 
-#ifdef CONFIG_SPARC_LEON
-struct dma_map_ops *dma_ops = &pci32_dma_ops;
-#elif defined(CONFIG_SBUS)
 struct dma_map_ops *dma_ops = &sbus_dma_ops;
-#endif
-
 EXPORT_SYMBOL(dma_ops);
 
 
index ae04914f7774be143a95964d139f1f2e1003da7d..c145f6fd123b88814c9d650e781dd391ea0e73f7 100644 (file)
@@ -241,9 +241,6 @@ int sparc_floppy_request_irq(unsigned int irq, irq_handler_t irq_handler)
        unsigned int cpu_irq;
        int err;
 
-#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON
-       struct tt_entry *trap_table;
-#endif
 
        err = request_irq(irq, irq_handler, 0, "floppy", NULL);
        if (err)
@@ -264,13 +261,18 @@ int sparc_floppy_request_irq(unsigned int irq, irq_handler_t irq_handler)
        table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP;
 
        INSTANTIATE(sparc_ttable)
-#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON
-       trap_table = &trapbase_cpu1;
-       INSTANTIATE(trap_table)
-       trap_table = &trapbase_cpu2;
-       INSTANTIATE(trap_table)
-       trap_table = &trapbase_cpu3;
-       INSTANTIATE(trap_table)
+
+#if defined CONFIG_SMP
+       if (sparc_cpu_model != sparc_leon) {
+               struct tt_entry *trap_table;
+
+               trap_table = &trapbase_cpu1;
+               INSTANTIATE(trap_table)
+               trap_table = &trapbase_cpu2;
+               INSTANTIATE(trap_table)
+               trap_table = &trapbase_cpu3;
+               INSTANTIATE(trap_table)
+       }
 #endif
 #undef INSTANTIATE
        /*
index a86372d345870202a8bb27e9d72c18b8f7574cb7..291bb5de9ce0963184518a9a00872370821075d1 100644 (file)
@@ -26,6 +26,9 @@ static inline unsigned long kimage_addr_to_ra(const char *p)
 #endif
 
 #ifdef CONFIG_SPARC32
+/* setup_32.c */
+void sparc32_start_kernel(struct linux_romvec *rp);
+
 /* cpu.c */
 extern void cpu_probe(void);
 
index 77c1b916e4dd35c8b04422a1e7102583b1b8a45c..e34e2c40c0609eac5b41b7220f01304cd2373e20 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/smp.h>
 #include <asm/setup.h>
 
+#include "kernel.h"
 #include "prom.h"
 #include "irq.h"
 
index 519ca923f59f491468d6252423effac83cc9f9d2..4e174321097d7a4af24eb0f5cd3bb4b1d5b37e53 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/pm.h>
 
 #include <asm/leon_amba.h>
+#include <asm/cpu_type.h>
 #include <asm/leon.h>
 
 /* List of Systems that need fixup instructions around power-down instruction */
@@ -65,13 +66,15 @@ void pmc_leon_idle(void)
 /* Install LEON Power Down function */
 static int __init leon_pmc_install(void)
 {
-       /* Assign power management IDLE handler */
-       if (pmc_leon_need_fixup())
-               pm_idle = pmc_leon_idle_fixup;
-       else
-               pm_idle = pmc_leon_idle;
+       if (sparc_cpu_model == sparc_leon) {
+               /* Assign power management IDLE handler */
+               if (pmc_leon_need_fixup())
+                       pm_idle = pmc_leon_idle_fixup;
+               else
+                       pm_idle = pmc_leon_idle;
 
-       printk(KERN_INFO "leon: power management initialized\n");
+               printk(KERN_INFO "leon: power management initialized\n");
+       }
 
        return 0;
 }
index a469090faf9f12f83251b2432614b0724d9e67e0..0f3fb6d9c8efc3e8cd0f12e48d3ff26d87a6699f 100644 (file)
 
 #include "kernel.h"
 
-#ifdef CONFIG_SPARC_LEON
-
 #include "irq.h"
 
 extern ctxd_t *srmmu_ctx_table_phys;
 static int smp_processors_ready;
 extern volatile unsigned long cpu_callin_map[NR_CPUS];
 extern cpumask_t smp_commenced_mask;
-void __init leon_configure_cache_smp(void);
+void __cpuinit leon_configure_cache_smp(void);
 static void leon_ipi_init(void);
 
 /* IRQ number of LEON IPIs */
@@ -123,7 +121,7 @@ void __cpuinit leon_callin(void)
 
 extern struct linux_prom_registers smp_penguin_ctable;
 
-void __init leon_configure_cache_smp(void)
+void __cpuinit leon_configure_cache_smp(void)
 {
        unsigned long cfg = sparc_leon3_get_dcachecfg();
        int me = smp_processor_id();
@@ -507,5 +505,3 @@ void __init leon_init_smp(void)
 
        sparc32_ipi_ops = &leon_ipi_ops;
 }
-
-#endif /* CONFIG_SPARC_LEON */
index fe6787cc62fc9188cbf792cd53ba65f998eeccb3..cb36e82dcd5dd789221852c366934c93f26d7ba3 100644 (file)
@@ -65,50 +65,25 @@ extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *);
 struct task_struct *last_task_used_math = NULL;
 struct thread_info *current_set[NR_CPUS];
 
-#ifndef CONFIG_SMP
-
 /*
  * the idle loop on a Sparc... ;)
  */
 void cpu_idle(void)
 {
-       /* endless idle loop with no priority at all */
-       for (;;) {
-               if (pm_idle) {
-                       while (!need_resched())
-                               (*pm_idle)();
-               } else {
-                       while (!need_resched())
-                               cpu_relax();
-               }
-               schedule_preempt_disabled();
-       }
-}
-
-#else
+       set_thread_flag(TIF_POLLING_NRFLAG);
 
-/* This is being executed in task 0 'user space'. */
-void cpu_idle(void)
-{
-        set_thread_flag(TIF_POLLING_NRFLAG);
        /* endless idle loop with no priority at all */
-       while(1) {
-#ifdef CONFIG_SPARC_LEON
-               if (pm_idle) {
-                       while (!need_resched())
+       for (;;) {
+               while (!need_resched()) {
+                       if (pm_idle)
                                (*pm_idle)();
-               } else
-#endif
-               {
-                       while (!need_resched())
+                       else
                                cpu_relax();
                }
                schedule_preempt_disabled();
        }
 }
 
-#endif
-
 /* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
 void machine_halt(void)
 {
index 741df916c124b10b12751da38bf7d3dac335c5dd..1303021748c8cc3c5c2ca109e2310e149dca0471 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/of_pdt.h>
 #include <asm/prom.h>
 #include <asm/oplib.h>
-#include <asm/leon.h>
 
 #include "prom.h"
 
index 7abc24e2bf1a9f5bed13d56cd0bc5e746a5e0d95..6c34de0c2abd40eefc679e35d548ff4c2a132586 100644 (file)
@@ -231,11 +231,14 @@ srmmu_rett_stackchk:
        cmp     %g1, %fp
        bleu    ret_trap_user_stack_is_bolixed
         mov    AC_M_SFSR, %g1
-       lda     [%g1] ASI_M_MMUREGS, %g0
+LEON_PI(lda    [%g1] ASI_LEON_MMUREGS, %g0)
+SUN_PI_(lda    [%g1] ASI_M_MMUREGS, %g0)
 
-       lda     [%g0] ASI_M_MMUREGS, %g1
+LEON_PI(lda    [%g0] ASI_LEON_MMUREGS, %g1)
+SUN_PI_(lda    [%g0] ASI_M_MMUREGS, %g1)
        or      %g1, 0x2, %g1
-       sta     %g1, [%g0] ASI_M_MMUREGS
+LEON_PI(sta    %g1, [%g0] ASI_LEON_MMUREGS)
+SUN_PI_(sta    %g1, [%g0] ASI_M_MMUREGS)
 
        restore %g0, %g0, %g0
 
@@ -244,13 +247,16 @@ srmmu_rett_stackchk:
        save    %g0, %g0, %g0
 
        andn    %g1, 0x2, %g1
-       sta     %g1, [%g0] ASI_M_MMUREGS
+LEON_PI(sta    %g1, [%g0] ASI_LEON_MMUREGS)
+SUN_PI_(sta    %g1, [%g0] ASI_M_MMUREGS)
 
        mov     AC_M_SFAR, %g2
-       lda     [%g2] ASI_M_MMUREGS, %g2
+LEON_PI(lda    [%g2] ASI_LEON_MMUREGS, %g2)
+SUN_PI_(lda    [%g2] ASI_M_MMUREGS, %g2)
 
        mov     AC_M_SFSR, %g1
-       lda     [%g1] ASI_M_MMUREGS, %g1
+LEON_PI(lda    [%g1] ASI_LEON_MMUREGS, %g1)
+SUN_PI_(lda    [%g1] ASI_M_MMUREGS, %g1)
        andcc   %g1, 0x2, %g0
        be      ret_trap_userwins_ok
         nop
index c052313f4dc578700193abaf5fc6e9b127e66e0a..efe3e64bba38bd80814d51958bdb046b9c8f8a68 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/cpu.h>
 #include <linux/kdebug.h>
 #include <linux/export.h>
+#include <linux/start_kernel.h>
 
 #include <asm/io.h>
 #include <asm/processor.h>
@@ -45,6 +46,7 @@
 #include <asm/cpudata.h>
 #include <asm/setup.h>
 #include <asm/cacheflush.h>
+#include <asm/sections.h>
 
 #include "kernel.h"
 
@@ -237,28 +239,42 @@ static void __init per_cpu_patch(void)
        }
 }
 
+struct leon_1insn_patch_entry {
+       unsigned int addr;
+       unsigned int insn;
+};
+
 enum sparc_cpu sparc_cpu_model;
 EXPORT_SYMBOL(sparc_cpu_model);
 
-struct tt_entry *sparc_ttable;
+static __init void leon_patch(void)
+{
+       struct leon_1insn_patch_entry *start = (void *)__leon_1insn_patch;
+       struct leon_1insn_patch_entry *end = (void *)__leon_1insn_patch_end;
 
-struct pt_regs fake_swapper_regs;
+       /* Default instruction is leon - no patching */
+       if (sparc_cpu_model == sparc_leon)
+               return;
 
-void __init setup_arch(char **cmdline_p)
-{
-       int i;
-       unsigned long highest_paddr;
+       while (start < end) {
+               unsigned long addr = start->addr;
 
-       sparc_ttable = (struct tt_entry *) &trapbase;
+               *(unsigned int *)(addr) = start->insn;
+               flushi(addr);
 
-       /* Initialize PROM console and command line. */
-       *cmdline_p = prom_getbootargs();
-       strcpy(boot_command_line, *cmdline_p);
-       parse_early_param();
+               start++;
+       }
+}
 
-       boot_flags_init(*cmdline_p);
+struct tt_entry *sparc_ttable;
+struct pt_regs fake_swapper_regs;
 
-       register_console(&prom_early_console);
+/* Called from head_32.S - before we have setup anything
+ * in the kernel. Be very careful with what you do here.
+ */
+void __init sparc32_start_kernel(struct linux_romvec *rp)
+{
+       prom_init(rp);
 
        /* Set sparc_cpu_model */
        sparc_cpu_model = sun_unknown;
@@ -275,6 +291,26 @@ void __init setup_arch(char **cmdline_p)
        if (!strncmp(&cputypval[0], "leon" , 4))
                sparc_cpu_model = sparc_leon;
 
+       leon_patch();
+       start_kernel();
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+       int i;
+       unsigned long highest_paddr;
+
+       sparc_ttable = (struct tt_entry *) &trapbase;
+
+       /* Initialize PROM console and command line. */
+       *cmdline_p = prom_getbootargs();
+       strcpy(boot_command_line, *cmdline_p);
+       parse_early_param();
+
+       boot_flags_init(*cmdline_p);
+
+       register_console(&prom_early_console);
+
        printk("ARCH: ");
        switch(sparc_cpu_model) {
        case sun4m:
index bb1513e45f1a811b05a07979995726b99eca1a94..a53e0a5fd3a3d944a5eb42391e2862c92a23c044 100644 (file)
@@ -32,8 +32,6 @@
 
 #include "sigutil.h"
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /* This magic should be in g_upper[0] for all upper parts
  * to be valid.
  */
@@ -274,7 +272,6 @@ void do_sigreturn32(struct pt_regs *regs)
                case 2: set.sig[1] = seta[2] + (((long)seta[3]) << 32);
                case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32);
        }
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
        return;
 
@@ -376,7 +373,6 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
                case 2: set.sig[1] = seta.sig[2] + (((long)seta.sig[3]) << 32);
                case 1: set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32);
        }
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
        return;
 segv:
@@ -775,7 +771,7 @@ sigsegv:
        return -EFAULT;
 }
 
-static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka,
+static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka,
                                  siginfo_t *info,
                                  sigset_t *oldset, struct pt_regs *regs)
 {
@@ -787,12 +783,9 @@ static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka,
                err = setup_frame32(ka, regs, signr, oldset);
 
        if (err)
-               return err;
-
-       block_sigmask(ka, signr);
-       tracehook_signal_handler(signr, info, ka, regs, 0);
+               return;
 
-       return 0;
+       signal_delivered(signr, info, ka, regs, 0);
 }
 
 static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs,
@@ -841,14 +834,7 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs)
        if (signr > 0) {
                if (restart_syscall)
                        syscall_restart32(orig_i0, regs, &ka.sa);
-               if (handle_signal32(signr, &ka, &info, oldset, regs) == 0) {
-                       /* A signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TS_RESTORE_SIGMASK flag.
-                        */
-                       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-               }
+               handle_signal32(signr, &ka, &info, oldset, regs);
                return;
        }
        if (restart_syscall &&
@@ -872,10 +858,7 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs)
        /* If there's no signal to deliver, we just put the saved sigmask
         * back
         */
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
-               current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-               set_current_blocked(&current->saved_sigmask);
-       }
+       restore_saved_sigmask();
 }
 
 struct sigstack32 {
index 2b7e849f7c6528b1da91146f3f515ccbf21370c3..68f9c8650af47b2731c622cc8ada8768b63d90cb 100644 (file)
@@ -29,8 +29,6 @@
 
 #include "sigutil.h"
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
                   void *fpqueue, unsigned long *fpqdepth);
 extern void fpload(unsigned long *fpregs, unsigned long *fsr);
@@ -130,7 +128,6 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
        if (err)
                goto segv_and_exit;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
        return;
 
@@ -197,7 +194,6 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
                        goto segv;
        }
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
        return;
 segv:
@@ -449,10 +445,11 @@ sigsegv:
        return -EFAULT;
 }
 
-static inline int
+static inline void
 handle_signal(unsigned long signr, struct k_sigaction *ka,
-             siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
+             siginfo_t *info, struct pt_regs *regs)
 {
+       sigset_t *oldset = sigmask_to_save();
        int err;
 
        if (ka->sa.sa_flags & SA_SIGINFO)
@@ -461,12 +458,9 @@ handle_signal(unsigned long signr, struct k_sigaction *ka,
                err = setup_frame(ka, regs, signr, oldset);
 
        if (err)
-               return err;
-
-       block_sigmask(ka, signr);
-       tracehook_signal_handler(signr, info, ka, regs, 0);
+               return;
 
-       return 0;
+       signal_delivered(signr, info, ka, regs, 0);
 }
 
 static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@@ -498,7 +492,6 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
 {
        struct k_sigaction ka;
        int restart_syscall;
-       sigset_t *oldset;
        siginfo_t info;
        int signr;
 
@@ -523,11 +516,6 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
        if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C))
                regs->u_regs[UREG_G6] = orig_i0;
 
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 
        /* If the debugger messes with the program counter, it clears
@@ -544,15 +532,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
        if (signr > 0) {
                if (restart_syscall)
                        syscall_restart(orig_i0, regs, &ka.sa);
-               if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
-                       /* a signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag.
-                        */
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               }
+               handle_signal(signr, &ka, &info, regs);
                return;
        }
        if (restart_syscall &&
@@ -576,22 +556,17 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
        /* if there's no signal to deliver, we just put the saved sigmask
         * back
         */
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               set_current_blocked(&current->saved_sigmask);
-       }
+       restore_saved_sigmask();
 }
 
 void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0,
                      unsigned long thread_info_flags)
 {
-       if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
+       if (thread_info_flags & _TIF_SIGPENDING)
                do_signal(regs, orig_i0);
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
 
index eafaab486b2d24465a0911e46a08c35ce17d3d29..867de2f8189c32acf359fe5575cd139ede6b7048 100644 (file)
@@ -38,8 +38,6 @@
 #include "systbls.h"
 #include "sigutil.h"
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /* {set, get}context() needed for 64-bit SparcLinux userland. */
 asmlinkage void sparc64_set_context(struct pt_regs *regs)
 {
@@ -71,7 +69,6 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs)
                        if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(sigset_t)))
                                goto do_sigsegv;
                }
-               sigdelsetmask(&set, ~_BLOCKABLE);
                set_current_blocked(&set);
        }
        if (test_thread_flag(TIF_32BIT)) {
@@ -315,7 +312,6 @@ void do_rt_sigreturn(struct pt_regs *regs)
        /* Prevent syscall restart.  */
        pt_regs_clear_syscall(regs);
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
        return;
 segv:
@@ -466,7 +462,7 @@ sigsegv:
        return -EFAULT;
 }
 
-static inline int handle_signal(unsigned long signr, struct k_sigaction *ka,
+static inline void handle_signal(unsigned long signr, struct k_sigaction *ka,
                                siginfo_t *info,
                                sigset_t *oldset, struct pt_regs *regs)
 {
@@ -475,12 +471,9 @@ static inline int handle_signal(unsigned long signr, struct k_sigaction *ka,
        err = setup_rt_frame(ka, regs, signr, oldset,
                             (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
        if (err)
-               return err;
-
-       block_sigmask(ka, signr);
-       tracehook_signal_handler(signr, info, ka, regs, 0);
+               return;
 
-       return 0;
+       signal_delivered(signr, info, ka, regs, 0);
 }
 
 static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@@ -512,7 +505,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
 {
        struct k_sigaction ka;
        int restart_syscall;
-       sigset_t *oldset;
+       sigset_t *oldset = sigmask_to_save();
        siginfo_t info;
        int signr;
        
@@ -538,11 +531,6 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
            (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY)))
                regs->u_regs[UREG_G6] = orig_i0;
 
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK)
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
 #ifdef CONFIG_COMPAT
        if (test_thread_flag(TIF_32BIT)) {
                extern void do_signal32(sigset_t *, struct pt_regs *);
@@ -563,14 +551,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
        if (signr > 0) {
                if (restart_syscall)
                        syscall_restart(orig_i0, regs, &ka.sa);
-               if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
-                       /* A signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TS_RESTORE_SIGMASK flag.
-                        */
-                       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-               }
+               handle_signal(signr, &ka, &info, oldset, regs);
                return;
        }
        if (restart_syscall &&
@@ -594,10 +575,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
        /* If there's no signal to deliver, we just put the saved sigmask
         * back
         */
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
-               current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-               set_current_blocked(&current->saved_sigmask);
-       }
+       restore_saved_sigmask();
 }
 
 void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long thread_info_flags)
@@ -607,8 +585,6 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
 
index 3ee51f189a55297b0babeb1f54d0b40af97de6f8..275f74fd6f6a3f16fdd4e5fae291af2a364075c0 100644 (file)
@@ -580,16 +580,9 @@ SYSCALL_DEFINE5(64_mremap, unsigned long, addr,    unsigned long, old_len,
                unsigned long, new_len, unsigned long, flags,
                unsigned long, new_addr)
 {
-       unsigned long ret = -EINVAL;
-
        if (test_thread_flag(TIF_32BIT))
-               goto out;
-
-       down_write(&current->mm->mmap_sem);
-       ret = do_mremap(addr, old_len, new_len, flags, new_addr);
-       up_write(&current->mm->mmap_sem);
-out:
-       return ret;       
+               return -EINVAL;
+       return sys_mremap(addr, old_len, new_len, flags, new_addr);
 }
 
 /* we come to here via sys_nis_syscall so it can setup the regs argument */
index 7364ddc9e5aadd64317bc48c48aada5948b16727..af27acab44868a085ad2182049d81a6da61831e2 100644 (file)
@@ -149,8 +149,6 @@ sun4d_cpu_startup:
 
        b,a     smp_do_cpu_idle
 
-#ifdef CONFIG_SPARC_LEON
-
        __CPUINIT
        .align  4
         .global leon_smp_cpu_startup, smp_penguin_ctable
@@ -161,7 +159,7 @@ leon_smp_cpu_startup:
         ld [%g1+4],%g1
         srl %g1,4,%g1
         set 0x00000100,%g5 /* SRMMU_CTXTBL_PTR */
-       sta %g1, [%g5] ASI_M_MMUREGS
+       sta %g1, [%g5] ASI_LEON_MMUREGS
 
        /* Set up a sane %psr -- PIL<0xf> S<0x1> PS<0x1> CWP<0x0> */
        set     (PSR_PIL | PSR_S | PSR_PS), %g1
@@ -207,5 +205,3 @@ leon_smp_cpu_startup:
         nop
 
        b,a     smp_do_cpu_idle
-
-#endif
index c72fdf55e1c108435b6f4193b1c8377b441eef3b..3b05e6697710da1718be093e1f56dccaddc04e94 100644 (file)
@@ -2054,7 +2054,7 @@ void do_fpieee(struct pt_regs *regs)
        do_fpe_common(regs);
 }
 
-extern int do_mathemu(struct pt_regs *, struct fpustate *);
+extern int do_mathemu(struct pt_regs *, struct fpustate *, bool);
 
 void do_fpother(struct pt_regs *regs)
 {
@@ -2068,7 +2068,7 @@ void do_fpother(struct pt_regs *regs)
        switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
        case (2 << 14): /* unfinished_FPop */
        case (3 << 14): /* unimplemented_FPop */
-               ret = do_mathemu(regs, f);
+               ret = do_mathemu(regs, f, false);
                break;
        }
        if (ret)
@@ -2308,10 +2308,12 @@ void do_illegal_instruction(struct pt_regs *regs)
                        } else {
                                struct fpustate *f = FPUSTATE;
 
-                               /* XXX maybe verify XFSR bits like
-                                * XXX do_fpother() does?
+                               /* On UltraSPARC T2 and later, FPU insns which
+                                * are not implemented in HW signal an illegal
+                                * instruction trap and do not set the FP Trap
+                                * Trap in the %fsr to unimplemented_FPop.
                                 */
-                               if (do_mathemu(regs, f))
+                               if (do_mathemu(regs, f, true))
                                        return;
                        }
                }
index 0e1605697b4905b030923be4d18ef2efa47fe423..89c2c29f154b4c45114df0dce93feee4ce8330af 100644 (file)
@@ -107,6 +107,11 @@ SECTIONS
                *(.sun4v_2insn_patch)
                __sun4v_2insn_patch_end = .;
        }
+       .leon_1insn_patch : {
+               __leon_1insn_patch = .;
+               *(.leon_1insn_patch)
+               __leon_1insn_patch_end = .;
+       }
        .swapper_tsb_phys_patch : {
                __swapper_tsb_phys_patch = .;
                *(.swapper_tsb_phys_patch)
index 4c2de3cf309b65ad77b9f92e0773b5bd2e1f9e8d..28a7bc69f82b1dcf2113547cd19697244843d7dd 100644 (file)
@@ -332,24 +332,30 @@ spwin_srmmu_stackchk:
         mov    AC_M_SFSR, %glob_tmp
 
        /* Clear the fault status and turn on the no_fault bit. */
-       lda     [%glob_tmp] ASI_M_MMUREGS, %g0          ! eat SFSR
+LEON_PI(lda    [%glob_tmp] ASI_LEON_MMUREGS, %g0)      ! eat SFSR
+SUN_PI_(lda    [%glob_tmp] ASI_M_MMUREGS, %g0)         ! eat SFSR
 
-       lda     [%g0] ASI_M_MMUREGS, %glob_tmp          ! read MMU control
+LEON_PI(lda    [%g0] ASI_LEON_MMUREGS, %glob_tmp)      ! read MMU control
+SUN_PI_(lda    [%g0] ASI_M_MMUREGS, %glob_tmp)         ! read MMU control
        or      %glob_tmp, 0x2, %glob_tmp               ! or in no_fault bit
-       sta     %glob_tmp, [%g0] ASI_M_MMUREGS          ! set it
+LEON_PI(sta    %glob_tmp, [%g0] ASI_LEON_MMUREGS)      ! set it
+SUN_PI_(sta    %glob_tmp, [%g0] ASI_M_MMUREGS)         ! set it
 
        /* Dump the registers and cross fingers. */
        STORE_WINDOW(sp)
 
        /* Clear the no_fault bit and check the status. */
        andn    %glob_tmp, 0x2, %glob_tmp
-       sta     %glob_tmp, [%g0] ASI_M_MMUREGS
+LEON_PI(sta    %glob_tmp, [%g0] ASI_LEON_MMUREGS)
+SUN_PI_(sta    %glob_tmp, [%g0] ASI_M_MMUREGS)
 
        mov     AC_M_SFAR, %glob_tmp
-       lda     [%glob_tmp] ASI_M_MMUREGS, %g0
+LEON_PI(lda    [%glob_tmp] ASI_LEON_MMUREGS, %g0)
+SUN_PI_(lda    [%glob_tmp] ASI_M_MMUREGS, %g0)
 
        mov     AC_M_SFSR, %glob_tmp
-       lda     [%glob_tmp] ASI_M_MMUREGS, %glob_tmp
+LEON_PI(lda    [%glob_tmp] ASI_LEON_MMUREGS, %glob_tmp)
+SUN_PI_(lda    [%glob_tmp] ASI_M_MMUREGS, %glob_tmp)
        andcc   %glob_tmp, 0x2, %g0                     ! did we fault?
        be,a    spwin_finish_up + 0x4                   ! cool beans, success
         restore %g0, %g0, %g0
index 9fde91a249e06b470008c155b8bc56f8b7de08e9..2c21cc59683e2ca28d9c316e47c8b0cda6055795 100644 (file)
@@ -254,16 +254,19 @@ srmmu_fwin_stackchk:
        mov     AC_M_SFSR, %l4
        cmp     %l5, %sp
        bleu    fwin_user_stack_is_bolixed
-        lda    [%l4] ASI_M_MMUREGS, %g0        ! clear fault status
+LEON_PI( lda   [%l4] ASI_LEON_MMUREGS, %g0)    ! clear fault status
+SUN_PI_( lda   [%l4] ASI_M_MMUREGS, %g0)       ! clear fault status
 
        /* The technique is, turn off faults on this processor,
         * just let the load rip, then check the sfsr to see if
         * a fault did occur.  Then we turn on fault traps again
         * and branch conditionally based upon what happened.
         */
-       lda     [%g0] ASI_M_MMUREGS, %l5        ! read mmu-ctrl reg
+LEON_PI(lda    [%g0] ASI_LEON_MMUREGS, %l5)    ! read mmu-ctrl reg
+SUN_PI_(lda    [%g0] ASI_M_MMUREGS, %l5)       ! read mmu-ctrl reg
        or      %l5, 0x2, %l5                   ! turn on no-fault bit
-       sta     %l5, [%g0] ASI_M_MMUREGS        ! store it
+LEON_PI(sta    %l5, [%g0] ASI_LEON_MMUREGS)    ! store it
+SUN_PI_(sta    %l5, [%g0] ASI_M_MMUREGS)       ! store it
 
        /* Cross fingers and go for it. */
        LOAD_WINDOW(sp)
@@ -275,18 +278,22 @@ srmmu_fwin_stackchk:
 
        /* LOCATION: Window 'T' */
 
-       lda     [%g0] ASI_M_MMUREGS, %twin_tmp1 ! load mmu-ctrl again
-       andn    %twin_tmp1, 0x2, %twin_tmp1     ! clear no-fault bit
-       sta     %twin_tmp1, [%g0] ASI_M_MMUREGS ! store it
+LEON_PI(lda    [%g0] ASI_LEON_MMUREGS, %twin_tmp1)     ! load mmu-ctrl again
+SUN_PI_(lda    [%g0] ASI_M_MMUREGS, %twin_tmp1)        ! load mmu-ctrl again
+       andn    %twin_tmp1, 0x2, %twin_tmp1             ! clear no-fault bit
+LEON_PI(sta    %twin_tmp1, [%g0] ASI_LEON_MMUREGS)     ! store it
+SUN_PI_(sta    %twin_tmp1, [%g0] ASI_M_MMUREGS)        ! store it
 
        mov     AC_M_SFAR, %twin_tmp2
-       lda     [%twin_tmp2] ASI_M_MMUREGS, %g0 ! read fault address
+LEON_PI(lda    [%twin_tmp2] ASI_LEON_MMUREGS, %g0)     ! read fault address
+SUN_PI_(lda    [%twin_tmp2] ASI_M_MMUREGS, %g0)        ! read fault address
 
        mov     AC_M_SFSR, %twin_tmp2
-       lda     [%twin_tmp2] ASI_M_MMUREGS, %twin_tmp2  ! read fault status
-       andcc   %twin_tmp2, 0x2, %g0                    ! did fault occur?
+LEON_PI(lda    [%twin_tmp2] ASI_LEON_MMUREGS, %twin_tmp2) ! read fault status
+SUN_PI_(lda    [%twin_tmp2] ASI_M_MMUREGS, %twin_tmp2)    ! read fault status
+       andcc   %twin_tmp2, 0x2, %g0                       ! did fault occur?
 
-       bne     1f                                      ! yep, cleanup
+       bne     1f                                         ! yep, cleanup
         nop
 
        wr      %t_psr, 0x0, %psr
index 943d98dc4cdb55469c9d727453112d9727af1222..dff4096f3dec045472e037a62d644d1e8cbb910e 100644 (file)
@@ -10,7 +10,6 @@ lib-y                 += strlen.o
 lib-y                 += checksum_$(BITS).o
 lib-$(CONFIG_SPARC32) += blockops.o
 lib-y                 += memscan_$(BITS).o memcmp.o strncmp_$(BITS).o
-lib-y                 += strlen_user_$(BITS).o
 lib-$(CONFIG_SPARC32) += divdi3.o udivdi3.o
 lib-$(CONFIG_SPARC32) += copy_user.o locks.o
 lib-$(CONFIG_SPARC64) += atomic_64.o
index 6b278abdb63decf5c231098e217bbf4a1c97aa6b..3b31218cafc6c515d0f24ee809004e15b3e4ab94 100644 (file)
@@ -15,8 +15,6 @@
 
 /* string functions */
 EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(__strlen_user);
-EXPORT_SYMBOL(__strnlen_user);
 EXPORT_SYMBOL(strncmp);
 
 /* mem* functions */
diff --git a/arch/sparc/lib/strlen_user_32.S b/arch/sparc/lib/strlen_user_32.S
deleted file mode 100644 (file)
index 8c8a371..0000000
+++ /dev/null
@@ -1,109 +0,0 @@
-/* strlen_user.S: Sparc optimized strlen_user code
- *
- * Return length of string in userspace including terminating 0
- * or 0 for error
- *
- * Copyright (C) 1991,1996 Free Software Foundation
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- */
-
-#define LO_MAGIC 0x01010101
-#define HI_MAGIC 0x80808080
-
-10:
-       ldub    [%o0], %o5
-       cmp     %o5, 0
-       be      1f
-        add    %o0, 1, %o0
-       andcc   %o0, 3, %g0
-       be      4f
-        or     %o4, %lo(HI_MAGIC), %o3
-11:
-       ldub    [%o0], %o5
-       cmp     %o5, 0
-       be      2f
-        add    %o0, 1, %o0
-       andcc   %o0, 3, %g0
-       be      5f
-        sethi  %hi(LO_MAGIC), %o4
-12:
-       ldub    [%o0], %o5
-       cmp     %o5, 0
-       be      3f
-        add    %o0, 1, %o0
-       b       13f
-        or     %o4, %lo(LO_MAGIC), %o2
-1:
-       retl
-        mov    1, %o0
-2:
-       retl
-        mov    2, %o0
-3:
-       retl
-        mov    3, %o0
-
-       .align 4
-       .global __strlen_user, __strnlen_user
-__strlen_user:
-       sethi   %hi(32768), %o1
-__strnlen_user:
-       mov     %o1, %g1
-       mov     %o0, %o1
-       andcc   %o0, 3, %g0
-       bne     10b
-        sethi  %hi(HI_MAGIC), %o4
-       or      %o4, %lo(HI_MAGIC), %o3
-4:
-       sethi   %hi(LO_MAGIC), %o4
-5:
-       or      %o4, %lo(LO_MAGIC), %o2
-13:
-       ld      [%o0], %o5
-2:
-       sub     %o5, %o2, %o4
-       andcc   %o4, %o3, %g0
-       bne     82f
-        add    %o0, 4, %o0
-       sub     %o0, %o1, %g2
-81:    cmp     %g2, %g1
-       blu     13b
-        mov    %o0, %o4
-       ba,a    1f
-
-       /* Check every byte. */
-82:    srl     %o5, 24, %g5
-       andcc   %g5, 0xff, %g0
-       be      1f
-        add    %o0, -3, %o4
-       srl     %o5, 16, %g5
-       andcc   %g5, 0xff, %g0
-       be      1f
-        add    %o4, 1, %o4
-       srl     %o5, 8, %g5
-       andcc   %g5, 0xff, %g0
-       be      1f
-        add    %o4, 1, %o4
-       andcc   %o5, 0xff, %g0
-       bne     81b
-        sub    %o0, %o1, %g2
-
-       add     %o4, 1, %o4
-1:
-       retl
-        sub    %o4, %o1, %o0
-
-       .section .fixup,#alloc,#execinstr
-       .align  4
-9:
-       retl
-        clr    %o0
-
-       .section __ex_table,#alloc
-       .align  4
-
-       .word   10b, 9b
-       .word   11b, 9b
-       .word   12b, 9b
-       .word   13b, 9b
diff --git a/arch/sparc/lib/strlen_user_64.S b/arch/sparc/lib/strlen_user_64.S
deleted file mode 100644 (file)
index c3df71f..0000000
+++ /dev/null
@@ -1,97 +0,0 @@
-/* strlen_user.S: Sparc64 optimized strlen_user code
- *
- * Return length of string in userspace including terminating 0
- * or 0 for error
- *
- * Copyright (C) 1991,1996 Free Software Foundation
- * Copyright (C) 1996,1999 David S. Miller (davem@redhat.com)
- * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- */
-
-#include <linux/linkage.h>
-#include <asm/asi.h>
-
-#define LO_MAGIC 0x01010101
-#define HI_MAGIC 0x80808080
-
-       .align 4
-ENTRY(__strlen_user)
-       sethi   %hi(32768), %o1
-ENTRY(__strnlen_user)
-       mov     %o1, %g1
-       mov     %o0, %o1
-       andcc   %o0, 3, %g0
-       be,pt   %icc, 9f
-        sethi  %hi(HI_MAGIC), %o4
-10:    lduba   [%o0] %asi, %o5
-       brz,pn  %o5, 21f
-        add    %o0, 1, %o0
-       andcc   %o0, 3, %g0
-       be,pn   %icc, 4f
-        or     %o4, %lo(HI_MAGIC), %o3
-11:    lduba   [%o0] %asi, %o5
-       brz,pn  %o5, 22f
-        add    %o0, 1, %o0
-       andcc   %o0, 3, %g0
-       be,pt   %icc, 13f
-        srl    %o3, 7, %o2
-12:    lduba   [%o0] %asi, %o5
-       brz,pn  %o5, 23f
-        add    %o0, 1, %o0
-       ba,pt   %icc, 2f
-15:     lda    [%o0] %asi, %o5
-9:     or      %o4, %lo(HI_MAGIC), %o3
-4:     srl     %o3, 7, %o2
-13:    lda     [%o0] %asi, %o5
-2:     sub     %o5, %o2, %o4
-       andcc   %o4, %o3, %g0
-       bne,pn  %icc, 82f
-        add    %o0, 4, %o0
-       sub     %o0, %o1, %g2
-81:    cmp     %g2, %g1
-       blu,pt  %icc, 13b
-        mov    %o0, %o4
-       ba,a,pt %xcc, 1f
-
-       /* Check every byte. */
-82:    srl     %o5, 24, %g7
-       andcc   %g7, 0xff, %g0
-       be,pn   %icc, 1f
-        add    %o0, -3, %o4
-       srl     %o5, 16, %g7
-       andcc   %g7, 0xff, %g0
-       be,pn   %icc, 1f
-        add    %o4, 1, %o4
-       srl     %o5, 8, %g7
-       andcc   %g7, 0xff, %g0
-       be,pn   %icc, 1f
-        add    %o4, 1, %o4
-       andcc   %o5, 0xff, %g0
-       bne,pt  %icc, 81b
-        sub    %o0, %o1, %g2
-       add     %o4, 1, %o4
-1:     retl
-        sub    %o4, %o1, %o0
-21:    retl
-        mov    1, %o0
-22:    retl
-        mov    2, %o0
-23:    retl
-        mov    3, %o0
-ENDPROC(__strlen_user)
-ENDPROC(__strnlen_user)
-
-        .section .fixup,#alloc,#execinstr
-        .align  4
-30:
-        retl
-         clr    %o0
-
-       .section __ex_table,"a"
-       .align  4
-
-       .word   10b, 30b
-       .word   11b, 30b
-       .word   12b, 30b
-       .word   15b, 30b
-       .word   13b, 30b
index 2bbe2f28ad23355edb2716828769061327c2f829..1704068da92806d5868d0374bea669f31f21b113 100644 (file)
@@ -163,7 +163,7 @@ typedef union {
        u64 q[2];
 } *argp;
 
-int do_mathemu(struct pt_regs *regs, struct fpustate *f)
+int do_mathemu(struct pt_regs *regs, struct fpustate *f, bool illegal_insn_trap)
 {
        unsigned long pc = regs->tpc;
        unsigned long tstate = regs->tstate;
@@ -218,7 +218,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
                        case FSQRTS: {
                                unsigned long x = current_thread_info()->xfsr[0];
 
-                               x = (x >> 14) & 0xf;
+                               x = (x >> 14) & 0x7;
                                TYPE(x,1,1,1,1,0,0);
                                break;
                        }
@@ -226,7 +226,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
                        case FSQRTD: {
                                unsigned long x = current_thread_info()->xfsr[0];
 
-                               x = (x >> 14) & 0xf;
+                               x = (x >> 14) & 0x7;
                                TYPE(x,2,1,2,1,0,0);
                                break;
                        }
@@ -357,9 +357,17 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
        if (type) {
                argp rs1 = NULL, rs2 = NULL, rd = NULL;
                
-               freg = (current_thread_info()->xfsr[0] >> 14) & 0xf;
-               if (freg != (type >> 9))
-                       goto err;
+               /* Starting with UltraSPARC-T2, the cpu does not set the FP Trap
+                * Type field in the %fsr to unimplemented_FPop.  Nor does it
+                * use the fp_exception_other trap.  Instead it signals an
+                * illegal instruction and leaves the FP trap type field of
+                * the %fsr unchanged.
+                */
+               if (!illegal_insn_trap) {
+                       int ftt = (current_thread_info()->xfsr[0] >> 14) & 0x7;
+                       if (ftt != (type >> 9))
+                               goto err;
+               }
                current_thread_info()->xfsr[0] &= ~0x1c000;
                freg = ((insn >> 14) & 0x1f);
                switch (type & 0x3) {
index 69ffd3112fed2053ae72f42c7a602a43d7378d13..30c3eccfdf5a209f8b09458824aa9d706f3079dd 100644 (file)
@@ -8,8 +8,9 @@ obj-$(CONFIG_SPARC64)   += ultra.o tlb.o tsb.o gup.o
 obj-y                   += fault_$(BITS).o
 obj-y                   += init_$(BITS).o
 obj-$(CONFIG_SPARC32)   += extable.o srmmu.o iommu.o io-unit.o
+obj-$(CONFIG_SPARC32)   += srmmu_access.o
 obj-$(CONFIG_SPARC32)   += hypersparc.o viking.o tsunami.o swift.o
-obj-$(CONFIG_SPARC_LEON)+= leon_mm.o
+obj-$(CONFIG_SPARC32)   += leon_mm.o
 
 # Only used by sparc64
 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
index 4c67ae6e50231990bcd40ce3c2170a2f7c070497..5bed085a2c17984a5c33fc5ea509005e3db679cb 100644 (file)
@@ -32,7 +32,7 @@ static inline unsigned long leon_get_ctable_ptr(void)
 }
 
 
-unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr)
+unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
 {
 
        unsigned int ctxtbl;
index 256db6b22c54ed7cf2cc26412d5013930896703b..62e3f57733037d89d828d389bdefff93c0bc3f0b 100644 (file)
@@ -646,6 +646,23 @@ static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
        }
 }
 
+/* These flush types are not available on all chips... */
+static inline unsigned long srmmu_probe(unsigned long vaddr)
+{
+       unsigned long retval;
+
+       if (sparc_cpu_model != sparc_leon) {
+
+               vaddr &= PAGE_MASK;
+               __asm__ __volatile__("lda [%1] %2, %0\n\t" :
+                                    "=r" (retval) :
+                                    "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
+       } else {
+               retval = leon_swprobe(vaddr, 0);
+       }
+       return retval;
+}
+
 /*
  * This is much cleaner than poking around physical address space
  * looking at the prom's page table directly which is what most
@@ -665,7 +682,7 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
                        break; /* probably wrap around */
                if(start == 0xfef00000)
                        start = KADB_DEBUGGER_BEGVM;
-               if(!(prompte = srmmu_hwprobe(start))) {
+               if(!(prompte = srmmu_probe(start))) {
                        start += PAGE_SIZE;
                        continue;
                }
@@ -674,12 +691,12 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
                what = 0;
     
                if(!(start & ~(SRMMU_REAL_PMD_MASK))) {
-                       if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte)
+                       if(srmmu_probe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte)
                                what = 1;
                }
     
                if(!(start & ~(SRMMU_PGDIR_MASK))) {
-                       if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
+                       if(srmmu_probe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
                           prompte)
                                what = 2;
                }
@@ -1156,7 +1173,7 @@ static void turbosparc_flush_page_to_ram(unsigned long page)
 #ifdef TURBOSPARC_WRITEBACK
        volatile unsigned long clear;
 
-       if (srmmu_hwprobe(page))
+       if (srmmu_probe(page))
                turbosparc_flush_page_cache(page);
        clear = srmmu_get_fstatus();
 #endif
diff --git a/arch/sparc/mm/srmmu_access.S b/arch/sparc/mm/srmmu_access.S
new file mode 100644 (file)
index 0000000..d0a67b2
--- /dev/null
@@ -0,0 +1,82 @@
+/* Assembler variants of srmmu access functions.
+ * Implemented in assembler to allow run-time patching.
+ * LEON uses a different ASI for MMUREGS than SUN.
+ *
+ * The leon_1insn_patch infrastructure is used
+ * for the run-time patching.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/asmmacro.h>
+#include <asm/pgtsrmmu.h>
+#include <asm/asi.h>
+
+/* unsigned int srmmu_get_mmureg(void) */
+ENTRY(srmmu_get_mmureg)
+LEON_PI(lda    [%g0] ASI_LEON_MMUREGS, %o0)
+SUN_PI_(lda    [%g0] ASI_M_MMUREGS, %o0)
+       retl
+        nop
+ENDPROC(srmmu_get_mmureg)
+
+/* void srmmu_set_mmureg(unsigned long regval) */
+ENTRY(srmmu_set_mmureg)
+LEON_PI(sta    %o0, [%g0] ASI_LEON_MMUREGS)
+SUN_PI_(sta    %o0, [%g0] ASI_M_MMUREGS)
+       retl
+        nop
+ENDPROC(srmmu_set_mmureg)
+
+/* void srmmu_set_ctable_ptr(unsigned long paddr) */
+ENTRY(srmmu_set_ctable_ptr)
+       /* paddr = ((paddr >> 4) & SRMMU_CTX_PMASK); */
+       srl     %o0, 4, %g1
+       and     %g1, SRMMU_CTX_PMASK, %g1
+
+       mov     SRMMU_CTXTBL_PTR, %g2
+LEON_PI(sta    %g1, [%g2] ASI_LEON_MMUREGS)
+SUN_PI_(sta    %g1, [%g2] ASI_M_MMUREGS)
+       retl
+        nop
+ENDPROC(srmmu_set_ctable_ptr)
+
+
+/* void srmmu_set_context(int context) */
+ENTRY(srmmu_set_context)
+       mov     SRMMU_CTX_REG, %g1
+LEON_PI(sta    %o0, [%g1] ASI_LEON_MMUREGS)
+SUN_PI_(sta    %o0, [%g1] ASI_M_MMUREGS)
+       retl
+        nop
+ENDPROC(srmmu_set_context)
+
+
+/* int srmmu_get_context(void) */
+ENTRY(srmmu_get_context)
+       mov     SRMMU_CTX_REG, %o0
+LEON_PI(lda     [%o0] ASI_LEON_MMUREGS, %o0)
+SUN_PI_(lda    [%o0] ASI_M_MMUREGS, %o0)
+       retl
+        nop
+ENDPROC(srmmu_get_context)
+
+
+/* unsigned int srmmu_get_fstatus(void) */
+ENTRY(srmmu_get_fstatus)
+       mov     SRMMU_FAULT_STATUS, %o0
+LEON_PI(lda     [%o0] ASI_LEON_MMUREGS, %o0)
+SUN_PI_(lda    [%o0] ASI_M_MMUREGS, %o0)
+       retl
+        nop
+ENDPROC(srmmu_get_fstatus)
+
+
+/* unsigned int srmmu_get_faddr(void) */
+ENTRY(srmmu_get_faddr)
+       mov     SRMMU_FAULT_ADDR, %o0
+LEON_PI(lda     [%o0] ASI_LEON_MMUREGS, %o0)
+SUN_PI_(lda    [%o0] ASI_M_MMUREGS, %o0)
+       retl
+        nop
+ENDPROC(srmmu_get_faddr)
index 6ad6219fc47e0bf4a0751ec862dffa2a6a115fd3..fe128816c448a5f593f419c8b695461bebcd74ec 100644 (file)
@@ -48,6 +48,14 @@ config NEED_PER_CPU_PAGE_FIRST_CHUNK
 config SYS_SUPPORTS_HUGETLBFS
        def_bool y
 
+# Support for additional huge page sizes besides HPAGE_SIZE.
+# The software support is currently only present in the TILE-Gx
+# hypervisor. TILEPro in any case does not support page sizes
+# larger than the default HPAGE_SIZE.
+config HUGETLB_SUPER_PAGES
+       depends on HUGETLB_PAGE && TILEGX
+       def_bool y
+
 # FIXME: tilegx can implement a more efficient rwsem.
 config RWSEM_GENERIC_SPINLOCK
        def_bool y
@@ -107,16 +115,14 @@ config HVC_TILE
        select HVC_DRIVER
        def_bool y
 
-# Please note: TILE-Gx support is not yet finalized; this is
-# the preliminary support.  TILE-Gx drivers are only provided
-# with the alpha or beta test versions for Tilera customers.
 config TILEGX
-       depends on EXPERIMENTAL
        bool "Building with TILE-Gx (64-bit) compiler and toolchain"
 
+config TILEPRO
+       def_bool !TILEGX
+
 config 64BIT
-       depends on TILEGX
-       def_bool y
+       def_bool TILEGX
 
 config ARCH_DEFCONFIG
        string
@@ -137,6 +143,31 @@ config NR_CPUS
          smaller kernel memory footprint results from using a smaller
          value on chips with fewer tiles.
 
+if TILEGX
+
+choice
+       prompt "Kernel page size"
+       default PAGE_SIZE_64KB
+       help
+         This lets you select the page size of the kernel.  For best
+         performance on memory-intensive applications, a page size of 64KB
+         is recommended.  For workloads involving many small files, many
+         connections, etc., it may be better to select 16KB, which uses
+         memory more efficiently at some cost in TLB performance.
+
+         Note that this option is TILE-Gx specific; currently
+         TILEPro page size is set by rebuilding the hypervisor.
+
+config PAGE_SIZE_16KB
+       bool "16KB"
+
+config PAGE_SIZE_64KB
+       bool "64KB"
+
+endchoice
+
+endif
+
 source "kernel/Kconfig.hz"
 
 config KEXEC
index 9520bc5a4b7f65f9904882185267d797885ab7f5..e20b0a0b64a17cf6cf6e796a57b5cecdbad0e871 100644 (file)
@@ -34,7 +34,12 @@ LIBGCC_PATH     := \
   $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name)
 
 # Provide the path to use for "make defconfig".
-KBUILD_DEFCONFIG := $(ARCH)_defconfig
+# We default to the newer TILE-Gx architecture if only "tile" is given.
+ifeq ($(ARCH),tile)
+        KBUILD_DEFCONFIG := tilegx_defconfig
+else
+        KBUILD_DEFCONFIG := $(ARCH)_defconfig
+endif
 
 # Used as a file extension when useful, e.g. head_$(BITS).o
 # Not needed for (e.g.) "$(CC) -m32" since the compiler automatically
index bbc1f4c924eebe0c9d0183bf5119468297e430a1..78bbce2fb19a6017ed6ea33459929f75d11fee34 100644 (file)
 #define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1
 #define SPR_EX_CONTEXT_2_1__ICS_MASK  0x4
 #define SPR_FAIL 0x4e09
+#define SPR_IDN_AVAIL_EN 0x3e05
+#define SPR_IDN_CA_DATA 0x0b00
+#define SPR_IDN_DATA_AVAIL 0x0b03
+#define SPR_IDN_DEADLOCK_TIMEOUT 0x3406
+#define SPR_IDN_DEMUX_CA_COUNT 0x0a05
+#define SPR_IDN_DEMUX_COUNT_0 0x0a06
+#define SPR_IDN_DEMUX_COUNT_1 0x0a07
+#define SPR_IDN_DEMUX_CTL 0x0a08
+#define SPR_IDN_DEMUX_QUEUE_SEL 0x0a0a
+#define SPR_IDN_DEMUX_STATUS 0x0a0b
+#define SPR_IDN_DEMUX_WRITE_FIFO 0x0a0c
+#define SPR_IDN_DIRECTION_PROTECT 0x2e05
+#define SPR_IDN_PENDING 0x0a0e
+#define SPR_IDN_REFILL_EN 0x0e05
+#define SPR_IDN_SP_FIFO_DATA 0x0a0f
+#define SPR_IDN_SP_FIFO_SEL 0x0a10
+#define SPR_IDN_SP_FREEZE 0x0a11
+#define SPR_IDN_SP_FREEZE__SP_FRZ_MASK  0x1
+#define SPR_IDN_SP_FREEZE__DEMUX_FRZ_MASK  0x2
+#define SPR_IDN_SP_FREEZE__NON_DEST_EXT_MASK  0x4
+#define SPR_IDN_SP_STATE 0x0a12
+#define SPR_IDN_TAG_0 0x0a13
+#define SPR_IDN_TAG_1 0x0a14
+#define SPR_IDN_TAG_VALID 0x0a15
+#define SPR_IDN_TILE_COORD 0x0a16
 #define SPR_INTCTRL_0_STATUS 0x4a07
 #define SPR_INTCTRL_1_STATUS 0x4807
 #define SPR_INTCTRL_2_STATUS 0x4607
 #define SPR_INTERRUPT_MASK_SET_1_1 0x480e
 #define SPR_INTERRUPT_MASK_SET_2_0 0x460c
 #define SPR_INTERRUPT_MASK_SET_2_1 0x460d
+#define SPR_MPL_AUX_PERF_COUNT_SET_0 0x6000
+#define SPR_MPL_AUX_PERF_COUNT_SET_1 0x6001
+#define SPR_MPL_AUX_PERF_COUNT_SET_2 0x6002
 #define SPR_MPL_DMA_CPL_SET_0 0x5800
 #define SPR_MPL_DMA_CPL_SET_1 0x5801
 #define SPR_MPL_DMA_CPL_SET_2 0x5802
 #define SPR_MPL_DMA_NOTIFY_SET_0 0x3800
 #define SPR_MPL_DMA_NOTIFY_SET_1 0x3801
 #define SPR_MPL_DMA_NOTIFY_SET_2 0x3802
+#define SPR_MPL_IDN_ACCESS_SET_0 0x0a00
+#define SPR_MPL_IDN_ACCESS_SET_1 0x0a01
+#define SPR_MPL_IDN_ACCESS_SET_2 0x0a02
+#define SPR_MPL_IDN_AVAIL_SET_0 0x3e00
+#define SPR_MPL_IDN_AVAIL_SET_1 0x3e01
+#define SPR_MPL_IDN_AVAIL_SET_2 0x3e02
+#define SPR_MPL_IDN_CA_SET_0 0x3a00
+#define SPR_MPL_IDN_CA_SET_1 0x3a01
+#define SPR_MPL_IDN_CA_SET_2 0x3a02
+#define SPR_MPL_IDN_COMPLETE_SET_0 0x1200
+#define SPR_MPL_IDN_COMPLETE_SET_1 0x1201
+#define SPR_MPL_IDN_COMPLETE_SET_2 0x1202
+#define SPR_MPL_IDN_FIREWALL_SET_0 0x2e00
+#define SPR_MPL_IDN_FIREWALL_SET_1 0x2e01
+#define SPR_MPL_IDN_FIREWALL_SET_2 0x2e02
+#define SPR_MPL_IDN_REFILL_SET_0 0x0e00
+#define SPR_MPL_IDN_REFILL_SET_1 0x0e01
+#define SPR_MPL_IDN_REFILL_SET_2 0x0e02
+#define SPR_MPL_IDN_TIMER_SET_0 0x3400
+#define SPR_MPL_IDN_TIMER_SET_1 0x3401
+#define SPR_MPL_IDN_TIMER_SET_2 0x3402
 #define SPR_MPL_INTCTRL_0_SET_0 0x4a00
 #define SPR_MPL_INTCTRL_0_SET_1 0x4a01
 #define SPR_MPL_INTCTRL_0_SET_2 0x4a02
 #define SPR_MPL_INTCTRL_2_SET_0 0x4600
 #define SPR_MPL_INTCTRL_2_SET_1 0x4601
 #define SPR_MPL_INTCTRL_2_SET_2 0x4602
+#define SPR_MPL_PERF_COUNT_SET_0 0x4200
+#define SPR_MPL_PERF_COUNT_SET_1 0x4201
+#define SPR_MPL_PERF_COUNT_SET_2 0x4202
 #define SPR_MPL_SN_ACCESS_SET_0 0x0800
 #define SPR_MPL_SN_ACCESS_SET_1 0x0801
 #define SPR_MPL_SN_ACCESS_SET_2 0x0802
 #define SPR_UDN_DEMUX_STATUS 0x0c0d
 #define SPR_UDN_DEMUX_WRITE_FIFO 0x0c0e
 #define SPR_UDN_DIRECTION_PROTECT 0x3005
+#define SPR_UDN_PENDING 0x0c10
 #define SPR_UDN_REFILL_EN 0x1005
 #define SPR_UDN_SP_FIFO_DATA 0x0c11
 #define SPR_UDN_SP_FIFO_SEL 0x0c12
 #define SPR_UDN_TAG_3 0x0c18
 #define SPR_UDN_TAG_VALID 0x0c19
 #define SPR_UDN_TILE_COORD 0x0c1a
+#define SPR_WATCH_CTL 0x4209
+#define SPR_WATCH_MASK 0x420a
+#define SPR_WATCH_VAL 0x420b
 
 #endif /* !defined(__ARCH_SPR_DEF_H__) */
 
index cd3e5f95d5fd1b5adca1e1a01bb965154643a13f..0da86faa33707e88d8a477eb373613a9ee2a1c18 100644 (file)
 #define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1
 #define SPR_EX_CONTEXT_2_1__ICS_MASK  0x4
 #define SPR_FAIL 0x2707
+#define SPR_IDN_AVAIL_EN 0x1a05
+#define SPR_IDN_DATA_AVAIL 0x0a80
+#define SPR_IDN_DEADLOCK_TIMEOUT 0x1806
+#define SPR_IDN_DEMUX_COUNT_0 0x0a05
+#define SPR_IDN_DEMUX_COUNT_1 0x0a06
+#define SPR_IDN_DIRECTION_PROTECT 0x1405
+#define SPR_IDN_PENDING 0x0a08
 #define SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK 0x1
 #define SPR_INTCTRL_0_STATUS 0x2505
 #define SPR_INTCTRL_1_STATUS 0x2405
 #define SPR_IPI_MASK_SET_0 0x1f0a
 #define SPR_IPI_MASK_SET_1 0x1e0a
 #define SPR_IPI_MASK_SET_2 0x1d0a
+#define SPR_MPL_AUX_PERF_COUNT_SET_0 0x2100
+#define SPR_MPL_AUX_PERF_COUNT_SET_1 0x2101
+#define SPR_MPL_AUX_PERF_COUNT_SET_2 0x2102
 #define SPR_MPL_AUX_TILE_TIMER_SET_0 0x1700
 #define SPR_MPL_AUX_TILE_TIMER_SET_1 0x1701
 #define SPR_MPL_AUX_TILE_TIMER_SET_2 0x1702
+#define SPR_MPL_IDN_ACCESS_SET_0 0x0a00
+#define SPR_MPL_IDN_ACCESS_SET_1 0x0a01
+#define SPR_MPL_IDN_ACCESS_SET_2 0x0a02
+#define SPR_MPL_IDN_AVAIL_SET_0 0x1a00
+#define SPR_MPL_IDN_AVAIL_SET_1 0x1a01
+#define SPR_MPL_IDN_AVAIL_SET_2 0x1a02
+#define SPR_MPL_IDN_COMPLETE_SET_0 0x0500
+#define SPR_MPL_IDN_COMPLETE_SET_1 0x0501
+#define SPR_MPL_IDN_COMPLETE_SET_2 0x0502
+#define SPR_MPL_IDN_FIREWALL_SET_0 0x1400
+#define SPR_MPL_IDN_FIREWALL_SET_1 0x1401
+#define SPR_MPL_IDN_FIREWALL_SET_2 0x1402
+#define SPR_MPL_IDN_TIMER_SET_0 0x1800
+#define SPR_MPL_IDN_TIMER_SET_1 0x1801
+#define SPR_MPL_IDN_TIMER_SET_2 0x1802
 #define SPR_MPL_INTCTRL_0_SET_0 0x2500
 #define SPR_MPL_INTCTRL_0_SET_1 0x2501
 #define SPR_MPL_INTCTRL_0_SET_2 0x2502
 #define SPR_MPL_INTCTRL_2_SET_0 0x2300
 #define SPR_MPL_INTCTRL_2_SET_1 0x2301
 #define SPR_MPL_INTCTRL_2_SET_2 0x2302
+#define SPR_MPL_IPI_0 0x1f04
+#define SPR_MPL_IPI_0_SET_0 0x1f00
+#define SPR_MPL_IPI_0_SET_1 0x1f01
+#define SPR_MPL_IPI_0_SET_2 0x1f02
+#define SPR_MPL_IPI_1 0x1e04
+#define SPR_MPL_IPI_1_SET_0 0x1e00
+#define SPR_MPL_IPI_1_SET_1 0x1e01
+#define SPR_MPL_IPI_1_SET_2 0x1e02
+#define SPR_MPL_IPI_2 0x1d04
+#define SPR_MPL_IPI_2_SET_0 0x1d00
+#define SPR_MPL_IPI_2_SET_1 0x1d01
+#define SPR_MPL_IPI_2_SET_2 0x1d02
+#define SPR_MPL_PERF_COUNT_SET_0 0x2000
+#define SPR_MPL_PERF_COUNT_SET_1 0x2001
+#define SPR_MPL_PERF_COUNT_SET_2 0x2002
 #define SPR_MPL_UDN_ACCESS_SET_0 0x0b00
 #define SPR_MPL_UDN_ACCESS_SET_1 0x0b01
 #define SPR_MPL_UDN_ACCESS_SET_2 0x0b02
 #define SPR_UDN_DEMUX_COUNT_2 0x0b07
 #define SPR_UDN_DEMUX_COUNT_3 0x0b08
 #define SPR_UDN_DIRECTION_PROTECT 0x1505
+#define SPR_UDN_PENDING 0x0b0a
+#define SPR_WATCH_MASK 0x200a
+#define SPR_WATCH_VAL 0x200b
 
 #endif /* !defined(__ARCH_SPR_DEF_H__) */
 
index 0bb42642343a2a9b0ef8cc40e2fdca86777f455b..143473e3a0bbae936da9347cafe91c53cad61304 100644 (file)
@@ -2,6 +2,7 @@ include include/asm-generic/Kbuild.asm
 
 header-y += ../arch/
 
+header-y += cachectl.h
 header-y += ucontext.h
 header-y += hardwall.h
 
@@ -21,7 +22,6 @@ generic-y += ipcbuf.h
 generic-y += irq_regs.h
 generic-y += kdebug.h
 generic-y += local.h
-generic-y += module.h
 generic-y += msgbuf.h
 generic-y += mutex.h
 generic-y += param.h
index 54d1da826f93dfbf167aaa3c219f143783d91902..e7fb5cfb9597be376b6d35e3c7acabea07e0806d 100644 (file)
@@ -303,7 +303,14 @@ void __init_atomic_per_cpu(void);
 void __atomic_fault_unlock(int *lock_ptr);
 #endif
 
+/* Return a pointer to the lock for the given address. */
+int *__atomic_hashed_lock(volatile void *v);
+
 /* Private helper routines in lib/atomic_asm_32.S */
+struct __get_user {
+       unsigned long val;
+       int err;
+};
 extern struct __get_user __atomic_cmpxchg(volatile int *p,
                                          int *lock, int o, int n);
 extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);
@@ -319,6 +326,9 @@ extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n);
 extern u64 __atomic64_xchg_add_unless(volatile u64 *p,
                                      int *lock, u64 o, u64 n);
 
+/* Return failure from the atomic wrappers. */
+struct __get_user __atomic_bad_address(int __user *addr);
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* _ASM_TILE_ATOMIC_32_H */
index 16f1fa51fea13de4139c5a18c2d10088b8e395c1..bd186c4eaa505947299f0a1d98de1dbe9b9102f6 100644 (file)
@@ -77,6 +77,11 @@ static inline int ffs(int x)
        return __builtin_ffs(x);
 }
 
+static inline int fls64(__u64 w)
+{
+       return (sizeof(__u64) * 8) - __builtin_clzll(w);
+}
+
 /**
  * fls - find last set bit in word
  * @x: the word to search
@@ -90,12 +95,7 @@ static inline int ffs(int x)
  */
 static inline int fls(int x)
 {
-       return (sizeof(int) * 8) - __builtin_clz(x);
-}
-
-static inline int fls64(__u64 w)
-{
-       return (sizeof(__u64) * 8) - __builtin_clzll(w);
+       return fls64((unsigned int) x);
 }
 
 static inline unsigned int __arch_hweight32(unsigned int w)
index 9558416d578b58f939e1969c268fe58da6c19331..fb72ecf49218a4e6b9e5bd6fe93261e2565f61d2 100644 (file)
@@ -1 +1,21 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#if defined (__BIG_ENDIAN__)
+#include <linux/byteorder/big_endian.h>
+#elif defined (__LITTLE_ENDIAN__)
 #include <linux/byteorder/little_endian.h>
+#else
+#error "__BIG_ENDIAN__ or __LITTLE_ENDIAN__ must be defined."
+#endif
diff --git a/arch/tile/include/asm/cachectl.h b/arch/tile/include/asm/cachectl.h
new file mode 100644 (file)
index 0000000..af4c9f9
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#ifndef _ASM_TILE_CACHECTL_H
+#define _ASM_TILE_CACHECTL_H
+
+/*
+ * Options for cacheflush system call.
+ *
+ * The ICACHE flush is performed on all cores currently running the
+ * current process's address space.  The intent is for user
+ * applications to be able to modify code, invoke the system call,
+ * then allow arbitrary other threads in the same address space to see
+ * the newly-modified code.  Passing a length of CHIP_L1I_CACHE_SIZE()
+ * or more invalidates the entire icache on all cores in the address
+ * spaces.  (Note: currently this option invalidates the entire icache
+ * regardless of the requested address and length, but we may choose
+ * to honor the arguments at some point.)
+ *
+ * Flush and invalidation of memory can normally be performed with the
+ * __insn_flush(), __insn_inv(), and __insn_finv() instructions from
+ * userspace.  The DCACHE option to the system call allows userspace
+ * to flush the entire L1+L2 data cache from the core.  In this case,
+ * the address and length arguments are not used.  The DCACHE flush is
+ * restricted to the current core, not all cores in the address space.
+ */
+#define        ICACHE  (1<<0)          /* invalidate L1 instruction cache */
+#define        DCACHE  (1<<1)          /* flush and invalidate data cache */
+#define        BCACHE  (ICACHE|DCACHE) /* flush both caches               */
+
+#endif /* _ASM_TILE_CACHECTL_H */
index 4b4b28969a65266f0840f8e25774908054d70ca2..6e74450ff0a110afc32901e273d74a77c80f8ca4 100644 (file)
@@ -44,7 +44,6 @@ typedef __kernel_uid32_t __compat_gid32_t;
 typedef __kernel_mode_t compat_mode_t;
 typedef __kernel_dev_t compat_dev_t;
 typedef __kernel_loff_t compat_loff_t;
-typedef __kernel_nlink_t compat_nlink_t;
 typedef __kernel_ipc_pid_t compat_ipc_pid_t;
 typedef __kernel_daddr_t compat_daddr_t;
 typedef __kernel_fsid_t        compat_fsid_t;
@@ -242,9 +241,6 @@ long compat_sys_fallocate(int fd, int mode,
 long compat_sys_sched_rr_get_interval(compat_pid_t pid,
                                      struct compat_timespec __user *interval);
 
-/* Tilera Linux syscalls that don't have "compat" versions. */
-#define compat_sys_flush_cache sys_flush_cache
-
 /* These are the intvec_64.S trampolines. */
 long _compat_sys_execve(const char __user *path,
                        const compat_uptr_t __user *argv,
index 623a6bb741c11ca3787d3da91d187c4a8f33e530..d16d006d660e20307783eb63695c1faf5b46af46 100644 (file)
@@ -44,7 +44,11 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
 #else
 #define ELF_CLASS      ELFCLASS32
 #endif
+#ifdef __BIG_ENDIAN__
+#define ELF_DATA       ELFDATA2MSB
+#else
 #define ELF_DATA       ELFDATA2LSB
+#endif
 
 /*
  * There seems to be a bug in how compat_binfmt_elf.c works: it
@@ -59,6 +63,7 @@ enum { ELF_ARCH = CHIP_ELF_TYPE() };
  */
 #define elf_check_arch(x)  \
        ((x)->e_ident[EI_CLASS] == ELF_CLASS && \
+        (x)->e_ident[EI_DATA] == ELF_DATA && \
         (x)->e_machine == CHIP_ELF_TYPE())
 
 /* The module loader only handles a few relocation types. */
index d03ec124a598bc4b8d287ee9ed028289f07d3cc9..5909ac3d7218348c1c7d6f043cb6c56e0236b2db 100644 (file)
 #include <linux/futex.h>
 #include <linux/uaccess.h>
 #include <linux/errno.h>
+#include <asm/atomic.h>
 
-extern struct __get_user futex_set(u32 __user *v, int i);
-extern struct __get_user futex_add(u32 __user *v, int n);
-extern struct __get_user futex_or(u32 __user *v, int n);
-extern struct __get_user futex_andn(u32 __user *v, int n);
-extern struct __get_user futex_cmpxchg(u32 __user *v, int o, int n);
+/*
+ * Support macros for futex operations.  Do not use these macros directly.
+ * They assume "ret", "val", "oparg", and "uaddr" in the lexical context.
+ * __futex_cmpxchg() additionally assumes "oldval".
+ */
+
+#ifdef __tilegx__
+
+#define __futex_asm(OP) \
+       asm("1: {" #OP " %1, %3, %4; movei %0, 0 }\n"           \
+           ".pushsection .fixup,\"ax\"\n"                      \
+           "0: { movei %0, %5; j 9f }\n"                       \
+           ".section __ex_table,\"a\"\n"                       \
+           ".quad 1b, 0b\n"                                    \
+           ".popsection\n"                                     \
+           "9:"                                                \
+           : "=r" (ret), "=r" (val), "+m" (*(uaddr))           \
+           : "r" (uaddr), "r" (oparg), "i" (-EFAULT))
+
+#define __futex_set() __futex_asm(exch4)
+#define __futex_add() __futex_asm(fetchadd4)
+#define __futex_or() __futex_asm(fetchor4)
+#define __futex_andn() ({ oparg = ~oparg; __futex_asm(fetchand4); })
+#define __futex_cmpxchg() \
+       ({ __insn_mtspr(SPR_CMPEXCH_VALUE, oldval); __futex_asm(cmpexch4); })
+
+#define __futex_xor()                                          \
+       ({                                                      \
+               u32 oldval, n = oparg;                          \
+               if ((ret = __get_user(oldval, uaddr)) == 0) {   \
+                       do {                                    \
+                               oparg = oldval ^ n;             \
+                               __futex_cmpxchg();              \
+                       } while (ret == 0 && oldval != val);    \
+               }                                               \
+       })
+
+/* No need to prefetch, since the atomic ops go to the home cache anyway. */
+#define __futex_prolog()
 
-#ifndef __tilegx__
-extern struct __get_user futex_xor(u32 __user *v, int n);
 #else
-static inline struct __get_user futex_xor(u32 __user *uaddr, int n)
-{
-       struct __get_user asm_ret = __get_user_4(uaddr);
-       if (!asm_ret.err) {
-               int oldval, newval;
-               do {
-                       oldval = asm_ret.val;
-                       newval = oldval ^ n;
-                       asm_ret = futex_cmpxchg(uaddr, oldval, newval);
-               } while (asm_ret.err == 0 && oldval != asm_ret.val);
+
+#define __futex_call(FN)                                               \
+       {                                                               \
+               struct __get_user gu = FN((u32 __force *)uaddr, lock, oparg); \
+               val = gu.val;                                           \
+               ret = gu.err;                                           \
        }
-       return asm_ret;
-}
+
+#define __futex_set() __futex_call(__atomic_xchg)
+#define __futex_add() __futex_call(__atomic_xchg_add)
+#define __futex_or() __futex_call(__atomic_or)
+#define __futex_andn() __futex_call(__atomic_andn)
+#define __futex_xor() __futex_call(__atomic_xor)
+
+#define __futex_cmpxchg()                                              \
+       {                                                               \
+               struct __get_user gu = __atomic_cmpxchg((u32 __force *)uaddr, \
+                                                       lock, oldval, oparg); \
+               val = gu.val;                                           \
+               ret = gu.err;                                           \
+       }
+
+/*
+ * Find the lock pointer for the atomic calls to use, and issue a
+ * prefetch to the user address to bring it into cache.  Similar to
+ * __atomic_setup(), but we can't do a read into the L1 since it might
+ * fault; instead we do a prefetch into the L2.
+ */
+#define __futex_prolog()                                       \
+       int *lock;                                              \
+       __insn_prefetch(uaddr);                                 \
+       lock = __atomic_hashed_lock((int __force *)uaddr)
 #endif
 
 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
@@ -59,8 +111,12 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
        int cmp = (encoded_op >> 24) & 15;
        int oparg = (encoded_op << 8) >> 20;
        int cmparg = (encoded_op << 20) >> 20;
-       int ret;
-       struct __get_user asm_ret;
+       int uninitialized_var(val), ret;
+
+       __futex_prolog();
+
+       /* The 32-bit futex code makes this assumption, so validate it here. */
+       BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
 
        if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
                oparg = 1 << oparg;
@@ -71,46 +127,45 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
        pagefault_disable();
        switch (op) {
        case FUTEX_OP_SET:
-               asm_ret = futex_set(uaddr, oparg);
+               __futex_set();
                break;
        case FUTEX_OP_ADD:
-               asm_ret = futex_add(uaddr, oparg);
+               __futex_add();
                break;
        case FUTEX_OP_OR:
-               asm_ret = futex_or(uaddr, oparg);
+               __futex_or();
                break;
        case FUTEX_OP_ANDN:
-               asm_ret = futex_andn(uaddr, oparg);
+               __futex_andn();
                break;
        case FUTEX_OP_XOR:
-               asm_ret = futex_xor(uaddr, oparg);
+               __futex_xor();
                break;
        default:
-               asm_ret.err = -ENOSYS;
+               ret = -ENOSYS;
+               break;
        }
        pagefault_enable();
 
-       ret = asm_ret.err;
-
        if (!ret) {
                switch (cmp) {
                case FUTEX_OP_CMP_EQ:
-                       ret = (asm_ret.val == cmparg);
+                       ret = (val == cmparg);
                        break;
                case FUTEX_OP_CMP_NE:
-                       ret = (asm_ret.val != cmparg);
+                       ret = (val != cmparg);
                        break;
                case FUTEX_OP_CMP_LT:
-                       ret = (asm_ret.val < cmparg);
+                       ret = (val < cmparg);
                        break;
                case FUTEX_OP_CMP_GE:
-                       ret = (asm_ret.val >= cmparg);
+                       ret = (val >= cmparg);
                        break;
                case FUTEX_OP_CMP_LE:
-                       ret = (asm_ret.val <= cmparg);
+                       ret = (val <= cmparg);
                        break;
                case FUTEX_OP_CMP_GT:
-                       ret = (asm_ret.val > cmparg);
+                       ret = (val > cmparg);
                        break;
                default:
                        ret = -ENOSYS;
@@ -120,22 +175,20 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
 }
 
 static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
-                                               u32 oldval, u32 newval)
+                                               u32 oldval, u32 oparg)
 {
-       struct __get_user asm_ret;
+       int ret, val;
+
+       __futex_prolog();
 
        if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
-       asm_ret = futex_cmpxchg(uaddr, oldval, newval);
-       *uval = asm_ret.val;
-       return asm_ret.err;
-}
+       __futex_cmpxchg();
 
-#ifndef __tilegx__
-/* Return failure from the atomic wrappers. */
-struct __get_user __atomic_bad_address(int __user *addr);
-#endif
+       *uval = val;
+       return ret;
+}
 
 #endif /* !__ASSEMBLY__ */
 
index 2ac422848c7d2ee8b0125c60f2848799f53bf57c..47514a58d68559a86fef6a0b7de1fa63c3fbc126 100644 (file)
  *   NON INFRINGEMENT.  See the GNU General Public License for
  *   more details.
  *
- * Provide methods for the HARDWALL_FILE for accessing the UDN.
+ * Provide methods for access control of per-cpu resources like
+ * UDN, IDN, or IPI.
  */
 
 #ifndef _ASM_TILE_HARDWALL_H
 #define _ASM_TILE_HARDWALL_H
 
+#include <arch/chip.h>
 #include <linux/ioctl.h>
 
 #define HARDWALL_IOCTL_BASE 0xa2
@@ -24,8 +26,9 @@
 /*
  * The HARDWALL_CREATE() ioctl is a macro with a "size" argument.
  * The resulting ioctl value is passed to the kernel in conjunction
- * with a pointer to a little-endian bitmask of cpus, which must be
- * physically in a rectangular configuration on the chip.
+ * with a pointer to a standard kernel bitmask of cpus.
+ * For network resources (UDN or IDN) the bitmask must physically
+ * represent a rectangular configuration on the chip.
  * The "size" is the number of bytes of cpu mask data.
  */
 #define _HARDWALL_CREATE 1
 #define HARDWALL_GET_ID \
  _IO(HARDWALL_IOCTL_BASE, _HARDWALL_GET_ID)
 
-#ifndef __KERNEL__
-
-/* This is the canonical name expected by userspace. */
-#define HARDWALL_FILE "/dev/hardwall"
-
-#else
-
+#ifdef __KERNEL__
 /* /proc hooks for hardwall. */
 struct proc_dir_entry;
 #ifdef CONFIG_HARDWALL
@@ -59,7 +56,6 @@ int proc_pid_hardwall(struct task_struct *task, char *buffer);
 #else
 static inline void proc_tile_hardwall_init(struct proc_dir_entry *root) {}
 #endif
-
 #endif
 
 #endif /* _ASM_TILE_HARDWALL_H */
index d396d180516399159223cc7bcbaad150836ef577..b2042380a5aab580011e54c436fc24ccc02e9366 100644 (file)
@@ -106,4 +106,25 @@ static inline void arch_release_hugepage(struct page *page)
 {
 }
 
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
+                                      struct page *page, int writable)
+{
+       size_t pagesize = huge_page_size(hstate_vma(vma));
+       if (pagesize != PUD_SIZE && pagesize != PMD_SIZE)
+               entry = pte_mksuper(entry);
+       return entry;
+}
+#define arch_make_huge_pte arch_make_huge_pte
+
+/* Sizes to scale up page size for PTEs with HV_PTE_SUPER bit. */
+enum {
+       HUGE_SHIFT_PGDIR = 0,
+       HUGE_SHIFT_PMD = 1,
+       HUGE_SHIFT_PAGE = 2,
+       HUGE_SHIFT_ENTRIES
+};
+extern int huge_shift[HUGE_SHIFT_ENTRIES];
+#endif
+
 #endif /* _ASM_TILE_HUGETLB_H */
index 5db0ce54284d6dbe39a0e1d103934df659615417..b4e96fef2cf8edc0b4931af8edcce502c7bebd4f 100644 (file)
  */
 #if CHIP_HAS_AUX_PERF_COUNTERS()
 #define LINUX_MASKABLE_INTERRUPTS_HI \
-       (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT)))
+       (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT)))
 #else
 #define LINUX_MASKABLE_INTERRUPTS_HI \
-       (~(INT_MASK_HI(INT_PERF_COUNT)))
+       (~(INT_MASK_HI(INT_PERF_COUNT)))
 #endif
 
 #else
        __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \
        __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \
 } while (0)
+#define interrupt_mask_save_mask() \
+       (__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_0) | \
+        (((unsigned long long)__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_1))<<32))
+#define interrupt_mask_restore_mask(mask) do { \
+       unsigned long long __m = (mask); \
+       __insn_mtspr(SPR_INTERRUPT_MASK_K_0, (unsigned long)(__m)); \
+       __insn_mtspr(SPR_INTERRUPT_MASK_K_1, (unsigned long)(__m>>32)); \
+} while (0)
 #else
 #define interrupt_mask_set(n) \
        __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n)))
        __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask))
 #define interrupt_mask_reset_mask(mask) \
        __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask))
+#define interrupt_mask_save_mask() \
+       __insn_mfspr(SPR_INTERRUPT_MASK_K)
+#define interrupt_mask_restore_mask(mask) \
+       __insn_mtspr(SPR_INTERRUPT_MASK_K, (mask))
 #endif
 
 /*
@@ -122,7 +134,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
 
 /* Disable all interrupts, including NMIs. */
 #define arch_local_irq_disable_all() \
-       interrupt_mask_set_mask(-1UL)
+       interrupt_mask_set_mask(-1ULL)
 
 /* Re-enable all maskable interrupts. */
 #define arch_local_irq_enable() \
@@ -179,7 +191,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
 #ifdef __tilegx__
 
 #if INT_MEM_ERROR != 0
-# error Fix IRQ_DISABLED() macro
+# error Fix IRQS_DISABLED() macro
 #endif
 
 /* Return 0 or 1 to indicate whether interrupts are currently disabled. */
@@ -207,9 +219,10 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
        mtspr   SPR_INTERRUPT_MASK_SET_K, tmp
 
 /* Enable interrupts. */
-#define IRQ_ENABLE(tmp0, tmp1)                                 \
+#define IRQ_ENABLE_LOAD(tmp0, tmp1)                            \
        GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0);                  \
-       ld      tmp0, tmp0;                                     \
+       ld      tmp0, tmp0
+#define IRQ_ENABLE_APPLY(tmp0, tmp1)                           \
        mtspr   SPR_INTERRUPT_MASK_RESET_K, tmp0
 
 #else /* !__tilegx__ */
@@ -253,17 +266,22 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
        mtspr   SPR_INTERRUPT_MASK_SET_K_1, tmp
 
 /* Enable interrupts. */
-#define IRQ_ENABLE(tmp0, tmp1)                                 \
+#define IRQ_ENABLE_LOAD(tmp0, tmp1)                            \
        GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0);                  \
        {                                                       \
         lw     tmp0, tmp0;                                     \
         addi   tmp1, tmp0, 4                                   \
        };                                                      \
-       lw      tmp1, tmp1;                                     \
+       lw      tmp1, tmp1
+#define IRQ_ENABLE_APPLY(tmp0, tmp1)                           \
        mtspr   SPR_INTERRUPT_MASK_RESET_K_0, tmp0;             \
        mtspr   SPR_INTERRUPT_MASK_RESET_K_1, tmp1
 #endif
 
+#define IRQ_ENABLE(tmp0, tmp1)                                 \
+       IRQ_ENABLE_LOAD(tmp0, tmp1);                            \
+       IRQ_ENABLE_APPLY(tmp0, tmp1)
+
 /*
  * Do the CPU's IRQ-state tracing from assembly code. We call a
  * C function, but almost everywhere we do, we don't mind clobbering
index c11a6cc73bb85b5f94359fa8eebb961088d4b7e2..fc98ccfc98ac46ea46c9434951c3db22c3a33490 100644 (file)
 
 #include <asm/page.h>
 
+#ifndef __tilegx__
 /* Maximum physical address we can use pages from. */
 #define KEXEC_SOURCE_MEMORY_LIMIT TASK_SIZE
 /* Maximum address we can reach in physical address mode. */
 #define KEXEC_DESTINATION_MEMORY_LIMIT TASK_SIZE
 /* Maximum address we can use for the control code buffer. */
 #define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
+#else
+/* We need to limit the memory below PGDIR_SIZE since
+ * we only setup page table for [0, PGDIR_SIZE) before final kexec.
+ */
+/* Maximum physical address we can use pages from. */
+#define KEXEC_SOURCE_MEMORY_LIMIT PGDIR_SIZE
+/* Maximum address we can reach in physical address mode. */
+#define KEXEC_DESTINATION_MEMORY_LIMIT PGDIR_SIZE
+/* Maximum address we can use for the control code buffer. */
+#define KEXEC_CONTROL_MEMORY_LIMIT PGDIR_SIZE
+#endif
 
 #define KEXEC_CONTROL_PAGE_SIZE        PAGE_SIZE
 
diff --git a/arch/tile/include/asm/kvm_para.h b/arch/tile/include/asm/kvm_para.h
new file mode 100644 (file)
index 0000000..14fab8f
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
index 92f94c77b6e449af092ad105580ba9a11227cc89..e2c789096795222970d1906f708c828b1e3b5d1a 100644 (file)
@@ -21,7 +21,7 @@ struct mm_context {
         * Written under the mmap_sem semaphore; read without the
         * semaphore but atomically, but it is conservatively set.
         */
-       unsigned int priority_cached;
+       unsigned long priority_cached;
 };
 
 typedef struct mm_context mm_context_t;
index 15fb246411202d1ba1547a07132ff6bf2198f8fd..37f0b741dee796f2b37b5b3ce1771c55c1037fd2 100644 (file)
@@ -30,11 +30,15 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
        return 0;
 }
 
-/* Note that arch/tile/kernel/head.S also calls hv_install_context() */
+/*
+ * Note that arch/tile/kernel/head_NN.S and arch/tile/mm/migrate_NN.S
+ * also call hv_install_context().
+ */
 static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot)
 {
        /* FIXME: DIRECTIO should not always be set. FIXME. */
-       int rc = hv_install_context(__pa(pgdir), prot, asid, HV_CTX_DIRECTIO);
+       int rc = hv_install_context(__pa(pgdir), prot, asid,
+                                   HV_CTX_DIRECTIO | CTX_PAGE_FLAG);
        if (rc < 0)
                panic("hv_install_context failed: %d", rc);
 }
diff --git a/arch/tile/include/asm/module.h b/arch/tile/include/asm/module.h
new file mode 100644 (file)
index 0000000..44ed07c
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#ifndef _ASM_TILE_MODULE_H
+#define _ASM_TILE_MODULE_H
+
+#include <arch/chip.h>
+
+#include <asm-generic/module.h>
+
+/* We can't use modules built with different page sizes. */
+#if defined(CONFIG_PAGE_SIZE_16KB)
+# define MODULE_PGSZ " 16KB"
+#elif defined(CONFIG_PAGE_SIZE_64KB)
+# define MODULE_PGSZ " 64KB"
+#else
+# define MODULE_PGSZ ""
+#endif
+
+/* We don't really support no-SMP so tag if someone tries. */
+#ifdef CONFIG_SMP
+#define MODULE_NOSMP ""
+#else
+#define MODULE_NOSMP " nosmp"
+#endif
+
+#define MODULE_ARCH_VERMAGIC CHIP_ARCH_NAME MODULE_PGSZ MODULE_NOSMP
+
+#endif /* _ASM_TILE_MODULE_H */
index db93518fac033ff373671549ed7c7da23688ce80..9d9131e5c5529fbb67aee9d19d9dcab6e81f6c68 100644 (file)
 #include <arch/chip.h>
 
 /* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */
-#define PAGE_SHIFT     HV_LOG2_PAGE_SIZE_SMALL
-#define HPAGE_SHIFT    HV_LOG2_PAGE_SIZE_LARGE
+#if defined(CONFIG_PAGE_SIZE_16KB)
+#define PAGE_SHIFT     14
+#define CTX_PAGE_FLAG  HV_CTX_PG_SM_16K
+#elif defined(CONFIG_PAGE_SIZE_64KB)
+#define PAGE_SHIFT     16
+#define CTX_PAGE_FLAG  HV_CTX_PG_SM_64K
+#else
+#define PAGE_SHIFT     HV_LOG2_DEFAULT_PAGE_SIZE_SMALL
+#define CTX_PAGE_FLAG  0
+#endif
+#define HPAGE_SHIFT    HV_LOG2_DEFAULT_PAGE_SIZE_LARGE
 
 #define PAGE_SIZE      (_AC(1, UL) << PAGE_SHIFT)
 #define HPAGE_SIZE     (_AC(1, UL) << HPAGE_SHIFT)
@@ -78,8 +87,7 @@ typedef HV_PTE pgprot_t;
 /*
  * User L2 page tables are managed as one L2 page table per page,
  * because we use the page allocator for them.  This keeps the allocation
- * simple and makes it potentially useful to implement HIGHPTE at some point.
- * However, it's also inefficient, since L2 page tables are much smaller
+ * simple, but it's also inefficient, since L2 page tables are much smaller
  * than pages (currently 2KB vs 64KB).  So we should revisit this.
  */
 typedef struct page *pgtable_t;
@@ -128,7 +136,7 @@ static inline __attribute_const__ int get_order(unsigned long size)
 
 #define HUGETLB_PAGE_ORDER     (HPAGE_SHIFT - PAGE_SHIFT)
 
-#define HUGE_MAX_HSTATE                2
+#define HUGE_MAX_HSTATE                6
 
 #ifdef CONFIG_HUGETLB_PAGE
 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
index e919c0bdc22d7bc020a6f4b15b8d41d60a8fc8ee..1b902508b664d16f95985203e3cf6fc3d8a5ff21 100644 (file)
 #include <linux/mm.h>
 #include <linux/mmzone.h>
 #include <asm/fixmap.h>
+#include <asm/page.h>
 #include <hv/hypervisor.h>
 
 /* Bits for the size of the second-level page table. */
-#define L2_KERNEL_PGTABLE_SHIFT \
-  (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL + HV_LOG2_PTE_SIZE)
+#define L2_KERNEL_PGTABLE_SHIFT _HV_LOG2_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
+
+/* How big is a kernel L2 page table? */
+#define L2_KERNEL_PGTABLE_SIZE (1UL << L2_KERNEL_PGTABLE_SHIFT)
 
 /* We currently allocate user L2 page tables by page (unlike kernel L2s). */
-#if L2_KERNEL_PGTABLE_SHIFT < HV_LOG2_PAGE_SIZE_SMALL
-#define L2_USER_PGTABLE_SHIFT HV_LOG2_PAGE_SIZE_SMALL
+#if L2_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
+#define L2_USER_PGTABLE_SHIFT PAGE_SHIFT
 #else
 #define L2_USER_PGTABLE_SHIFT L2_KERNEL_PGTABLE_SHIFT
 #endif
 
 /* How many pages do we need, as an "order", for a user L2 page table? */
-#define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - HV_LOG2_PAGE_SIZE_SMALL)
-
-/* How big is a kernel L2 page table? */
-#define L2_KERNEL_PGTABLE_SIZE (1 << L2_KERNEL_PGTABLE_SHIFT)
+#define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - PAGE_SHIFT)
 
 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
@@ -50,14 +50,14 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 static inline void pmd_populate_kernel(struct mm_struct *mm,
                                       pmd_t *pmd, pte_t *ptep)
 {
-       set_pmd(pmd, ptfn_pmd(__pa(ptep) >> HV_LOG2_PAGE_TABLE_ALIGN,
+       set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(__pa(ptep)),
                              __pgprot(_PAGE_PRESENT)));
 }
 
 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
                                pgtable_t page)
 {
-       set_pmd(pmd, ptfn_pmd(HV_PFN_TO_PTFN(page_to_pfn(page)),
+       set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(PFN_PHYS(page_to_pfn(page))),
                              __pgprot(_PAGE_PRESENT)));
 }
 
@@ -68,8 +68,20 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
 extern pgd_t *pgd_alloc(struct mm_struct *mm);
 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
 
-extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address);
-extern void pte_free(struct mm_struct *mm, struct page *pte);
+extern pgtable_t pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
+                                  int order);
+extern void pgtable_free(struct mm_struct *mm, struct page *pte, int order);
+
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
+                                     unsigned long address)
+{
+       return pgtable_alloc_one(mm, address, L2_USER_PGTABLE_ORDER);
+}
+
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
+{
+       pgtable_free(mm, pte, L2_USER_PGTABLE_ORDER);
+}
 
 #define pmd_pgtable(pmd) pmd_page(pmd)
 
@@ -85,8 +97,13 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
        pte_free(mm, virt_to_page(pte));
 }
 
-extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
-                          unsigned long address);
+extern void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte,
+                              unsigned long address, int order);
+static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
+                                 unsigned long address)
+{
+       __pgtable_free_tlb(tlb, pte, address, L2_USER_PGTABLE_ORDER);
+}
 
 #define check_pgt_cache()      do { } while (0)
 
@@ -104,19 +121,44 @@ void shatter_pmd(pmd_t *pmd);
 void shatter_huge_page(unsigned long addr);
 
 #ifdef __tilegx__
-/* We share a single page allocator for both L1 and L2 page tables. */
-#if HV_L1_SIZE != HV_L2_SIZE
-# error Rework assumption that L1 and L2 page tables are same size.
-#endif
-#define L1_USER_PGTABLE_ORDER L2_USER_PGTABLE_ORDER
+
 #define pud_populate(mm, pud, pmd) \
   pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd))
-#define pmd_alloc_one(mm, addr) \
-  ((pmd_t *)page_to_virt(pte_alloc_one((mm), (addr))))
-#define pmd_free(mm, pmdp) \
-  pte_free((mm), virt_to_page(pmdp))
-#define __pmd_free_tlb(tlb, pmdp, address) \
-  __pte_free_tlb((tlb), virt_to_page(pmdp), (address))
+
+/* Bits for the size of the L1 (intermediate) page table. */
+#define L1_KERNEL_PGTABLE_SHIFT _HV_LOG2_L1_SIZE(HPAGE_SHIFT)
+
+/* How big is a kernel L2 page table? */
+#define L1_KERNEL_PGTABLE_SIZE (1UL << L1_KERNEL_PGTABLE_SHIFT)
+
+/* We currently allocate L1 page tables by page. */
+#if L1_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
+#define L1_USER_PGTABLE_SHIFT PAGE_SHIFT
+#else
+#define L1_USER_PGTABLE_SHIFT L1_KERNEL_PGTABLE_SHIFT
 #endif
 
+/* How many pages do we need, as an "order", for an L1 page table? */
+#define L1_USER_PGTABLE_ORDER (L1_USER_PGTABLE_SHIFT - PAGE_SHIFT)
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+       struct page *p = pgtable_alloc_one(mm, address, L1_USER_PGTABLE_ORDER);
+       return (pmd_t *)page_to_virt(p);
+}
+
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp)
+{
+       pgtable_free(mm, virt_to_page(pmdp), L1_USER_PGTABLE_ORDER);
+}
+
+static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
+                                 unsigned long address)
+{
+       __pgtable_free_tlb(tlb, virt_to_page(pmdp), address,
+                          L1_USER_PGTABLE_ORDER);
+}
+
+#endif /* __tilegx__ */
+
 #endif /* _ASM_TILE_PGALLOC_H */
index 67490910774d38af80cbd3ffc84e01bffd2abece..73b1a4c9ad03f22c218f40896cbb2fdacf3142ce 100644 (file)
 #include <linux/slab.h>
 #include <linux/list.h>
 #include <linux/spinlock.h>
+#include <linux/pfn.h>
 #include <asm/processor.h>
 #include <asm/fixmap.h>
+#include <asm/page.h>
 
 struct mm_struct;
 struct vm_area_struct;
@@ -69,6 +71,7 @@ extern void set_page_homes(void);
 
 #define _PAGE_PRESENT           HV_PTE_PRESENT
 #define _PAGE_HUGE_PAGE         HV_PTE_PAGE
+#define _PAGE_SUPER_PAGE        HV_PTE_SUPER
 #define _PAGE_READABLE          HV_PTE_READABLE
 #define _PAGE_WRITABLE          HV_PTE_WRITABLE
 #define _PAGE_EXECUTABLE        HV_PTE_EXECUTABLE
@@ -85,6 +88,7 @@ extern void set_page_homes(void);
 #define _PAGE_ALL (\
   _PAGE_PRESENT | \
   _PAGE_HUGE_PAGE | \
+  _PAGE_SUPER_PAGE | \
   _PAGE_READABLE | \
   _PAGE_WRITABLE | \
   _PAGE_EXECUTABLE | \
@@ -162,7 +166,7 @@ extern void set_page_homes(void);
   (pgprot_t) { ((oldprot).val & ~_PAGE_ALL) | (newprot).val }
 
 /* Just setting the PFN to zero suffices. */
-#define pte_pgprot(x) hv_pte_set_pfn((x), 0)
+#define pte_pgprot(x) hv_pte_set_pa((x), 0)
 
 /*
  * For PTEs and PDEs, we must clear the Present bit first when
@@ -187,6 +191,7 @@ static inline void __pte_clear(pte_t *ptep)
  * Undefined behaviour if not..
  */
 #define pte_present hv_pte_get_present
+#define pte_mknotpresent hv_pte_clear_present
 #define pte_user hv_pte_get_user
 #define pte_read hv_pte_get_readable
 #define pte_dirty hv_pte_get_dirty
@@ -194,6 +199,7 @@ static inline void __pte_clear(pte_t *ptep)
 #define pte_write hv_pte_get_writable
 #define pte_exec hv_pte_get_executable
 #define pte_huge hv_pte_get_page
+#define pte_super hv_pte_get_super
 #define pte_rdprotect hv_pte_clear_readable
 #define pte_exprotect hv_pte_clear_executable
 #define pte_mkclean hv_pte_clear_dirty
@@ -206,6 +212,7 @@ static inline void __pte_clear(pte_t *ptep)
 #define pte_mkyoung hv_pte_set_accessed
 #define pte_mkwrite hv_pte_set_writable
 #define pte_mkhuge hv_pte_set_page
+#define pte_mksuper hv_pte_set_super
 
 #define pte_special(pte) 0
 #define pte_mkspecial(pte) (pte)
@@ -261,7 +268,7 @@ static inline int pte_none(pte_t pte)
 
 static inline unsigned long pte_pfn(pte_t pte)
 {
-       return hv_pte_get_pfn(pte);
+       return PFN_DOWN(hv_pte_get_pa(pte));
 }
 
 /* Set or get the remote cache cpu in a pgprot with remote caching. */
@@ -270,7 +277,7 @@ extern int get_remote_cache_cpu(pgprot_t prot);
 
 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
 {
-       return hv_pte_set_pfn(prot, pfn);
+       return hv_pte_set_pa(prot, PFN_PHYS(pfn));
 }
 
 /* Support for priority mappings. */
@@ -312,7 +319,7 @@ extern void check_mm_caching(struct mm_struct *prev, struct mm_struct *next);
  */
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
-       return pfn_pte(hv_pte_get_pfn(pte), newprot);
+       return pfn_pte(pte_pfn(pte), newprot);
 }
 
 /*
@@ -335,13 +342,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  */
 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
 
-#if defined(CONFIG_HIGHPTE)
-extern pte_t *pte_offset_map(pmd_t *, unsigned long address);
-#define pte_unmap(pte) kunmap_atomic(pte)
-#else
 #define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
 #define pte_unmap(pte) do { } while (0)
-#endif
 
 /* Clear a non-executable kernel PTE and flush it from the TLB. */
 #define kpte_clear_flush(ptep, vaddr)          \
@@ -410,6 +412,46 @@ static inline unsigned long pmd_index(unsigned long address)
        return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
 }
 
+#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+                                           unsigned long address,
+                                           pmd_t *pmdp)
+{
+       return ptep_test_and_clear_young(vma, address, pmdp_ptep(pmdp));
+}
+
+#define __HAVE_ARCH_PMDP_SET_WRPROTECT
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+                                     unsigned long address, pmd_t *pmdp)
+{
+       ptep_set_wrprotect(mm, address, pmdp_ptep(pmdp));
+}
+
+
+#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
+static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
+                                      unsigned long address,
+                                      pmd_t *pmdp)
+{
+       return pte_pmd(ptep_get_and_clear(mm, address, pmdp_ptep(pmdp)));
+}
+
+static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
+{
+       set_pte(pmdp_ptep(pmdp), pmd_pte(pmdval));
+}
+
+#define set_pmd_at(mm, addr, pmdp, pmdval) __set_pmd(pmdp, pmdval)
+
+/* Create a pmd from a PTFN. */
+static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
+{
+       return pte_pmd(hv_pte_set_ptfn(prot, ptfn));
+}
+
+/* Return the page-table frame number (ptfn) that a pmd_t points at. */
+#define pmd_ptfn(pmd) hv_pte_get_ptfn(pmd_pte(pmd))
+
 /*
  * A given kernel pmd_t maps to a specific virtual address (either a
  * kernel huge page or a kernel pte_t table).  Since kernel pte_t
@@ -430,7 +472,48 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
  * OK for pte_lockptr(), since we just end up with potentially one
  * lock being used for several pte_t arrays.
  */
-#define pmd_page(pmd) pfn_to_page(HV_PTFN_TO_PFN(pmd_ptfn(pmd)))
+#define pmd_page(pmd) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pmd_ptfn(pmd))))
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+       __pte_clear(pmdp_ptep(pmdp));
+}
+
+#define pmd_mknotpresent(pmd)  pte_pmd(pte_mknotpresent(pmd_pte(pmd)))
+#define pmd_young(pmd)         pte_young(pmd_pte(pmd))
+#define pmd_mkyoung(pmd)       pte_pmd(pte_mkyoung(pmd_pte(pmd)))
+#define pmd_mkold(pmd)         pte_pmd(pte_mkold(pmd_pte(pmd)))
+#define pmd_mkwrite(pmd)       pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+#define pmd_write(pmd)         pte_write(pmd_pte(pmd))
+#define pmd_wrprotect(pmd)     pte_pmd(pte_wrprotect(pmd_pte(pmd)))
+#define pmd_mkdirty(pmd)       pte_pmd(pte_mkdirty(pmd_pte(pmd)))
+#define pmd_huge_page(pmd)     pte_huge(pmd_pte(pmd))
+#define pmd_mkhuge(pmd)                pte_pmd(pte_mkhuge(pmd_pte(pmd)))
+#define __HAVE_ARCH_PMD_WRITE
+
+#define pfn_pmd(pfn, pgprot)   pte_pmd(pfn_pte((pfn), (pgprot)))
+#define pmd_pfn(pmd)           pte_pfn(pmd_pte(pmd))
+#define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))
+
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+       return pfn_pmd(pmd_pfn(pmd), newprot);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define has_transparent_hugepage() 1
+#define pmd_trans_huge pmd_huge_page
+
+static inline pmd_t pmd_mksplitting(pmd_t pmd)
+{
+       return pte_pmd(hv_pte_set_client2(pmd_pte(pmd)));
+}
+
+static inline int pmd_trans_splitting(pmd_t pmd)
+{
+       return hv_pte_get_client2(pmd_pte(pmd));
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
 /*
  * The pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
@@ -448,17 +531,13 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
        return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
 }
 
-static inline int pmd_huge_page(pmd_t pmd)
-{
-       return pmd_val(pmd) & _PAGE_HUGE_PAGE;
-}
-
 #include <asm-generic/pgtable.h>
 
 /* Support /proc/NN/pgtable API. */
 struct seq_file;
 int arch_proc_pgtable_show(struct seq_file *m, struct mm_struct *mm,
-                          unsigned long vaddr, pte_t *ptep, void **datap);
+                          unsigned long vaddr, unsigned long pagesize,
+                          pte_t *ptep, void **datap);
 
 #endif /* !__ASSEMBLY__ */
 
index 9f98529761fd67b4191525f13fdbb2ed4b5b72ae..4ce4a7a99c244c5546f37465f6ebceafbb55173e 100644 (file)
  * The level-1 index is defined by the huge page size.  A PGD is composed
  * of PTRS_PER_PGD pgd_t's and is the top level of the page table.
  */
-#define PGDIR_SHIFT    HV_LOG2_PAGE_SIZE_LARGE
-#define PGDIR_SIZE     HV_PAGE_SIZE_LARGE
+#define PGDIR_SHIFT    HPAGE_SHIFT
+#define PGDIR_SIZE     HPAGE_SIZE
 #define PGDIR_MASK     (~(PGDIR_SIZE-1))
-#define PTRS_PER_PGD   (1 << (32 - PGDIR_SHIFT))
-#define SIZEOF_PGD     (PTRS_PER_PGD * sizeof(pgd_t))
+#define PTRS_PER_PGD   _HV_L1_ENTRIES(HPAGE_SHIFT)
+#define PGD_INDEX(va)  _HV_L1_INDEX(va, HPAGE_SHIFT)
+#define SIZEOF_PGD     _HV_L1_SIZE(HPAGE_SHIFT)
 
 /*
  * The level-2 index is defined by the difference between the huge
@@ -33,8 +34,9 @@
  * Note that the hypervisor docs use PTE for what we call pte_t, so
  * this nomenclature is somewhat confusing.
  */
-#define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL))
-#define SIZEOF_PTE     (PTRS_PER_PTE * sizeof(pte_t))
+#define PTRS_PER_PTE   _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT)
+#define PTE_INDEX(va)  _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT)
+#define SIZEOF_PTE     _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
 
 #ifndef __ASSEMBLY__
 
@@ -111,24 +113,14 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
        return pte;
 }
 
-static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
-{
-       set_pte(&pmdp->pud.pgd, pmdval.pud.pgd);
-}
-
-/* Create a pmd from a PTFN. */
-static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
-{
-       return (pmd_t){ { hv_pte_set_ptfn(prot, ptfn) } };
-}
-
-/* Return the page-table frame number (ptfn) that a pmd_t points at. */
-#define pmd_ptfn(pmd) hv_pte_get_ptfn((pmd).pud.pgd)
-
-static inline void pmd_clear(pmd_t *pmdp)
-{
-       __pte_clear(&pmdp->pud.pgd);
-}
+/*
+ * pmds are wrappers around pgds, which are the same as ptes.
+ * It's often convenient to "cast" back and forth and use the pte methods,
+ * which are the methods supplied by the hypervisor.
+ */
+#define pmd_pte(pmd) ((pmd).pud.pgd)
+#define pmdp_ptep(pmdp) (&(pmdp)->pud.pgd)
+#define pte_pmd(pte) ((pmd_t){ { (pte) } })
 
 #endif /* __ASSEMBLY__ */
 
index fd80328523b47d9e253dbf90ce8d420ac395ad54..2492fa5478e74077d7dfe60c588a01b6c8083867 100644 (file)
 #define PGDIR_SIZE     HV_L1_SPAN
 #define PGDIR_MASK     (~(PGDIR_SIZE-1))
 #define PTRS_PER_PGD   HV_L0_ENTRIES
-#define SIZEOF_PGD     (PTRS_PER_PGD * sizeof(pgd_t))
+#define PGD_INDEX(va)  HV_L0_INDEX(va)
+#define SIZEOF_PGD     HV_L0_SIZE
 
 /*
  * The level-1 index is defined by the huge page size.  A PMD is composed
  * of PTRS_PER_PMD pgd_t's and is the middle level of the page table.
  */
-#define PMD_SHIFT      HV_LOG2_PAGE_SIZE_LARGE
-#define PMD_SIZE       HV_PAGE_SIZE_LARGE
+#define PMD_SHIFT      HPAGE_SHIFT
+#define PMD_SIZE       HPAGE_SIZE
 #define PMD_MASK       (~(PMD_SIZE-1))
-#define PTRS_PER_PMD   (1 << (PGDIR_SHIFT - PMD_SHIFT))
-#define SIZEOF_PMD     (PTRS_PER_PMD * sizeof(pmd_t))
+#define PTRS_PER_PMD   _HV_L1_ENTRIES(HPAGE_SHIFT)
+#define PMD_INDEX(va)  _HV_L1_INDEX(va, HPAGE_SHIFT)
+#define SIZEOF_PMD     _HV_L1_SIZE(HPAGE_SHIFT)
 
 /*
  * The level-2 index is defined by the difference between the huge
  * Note that the hypervisor docs use PTE for what we call pte_t, so
  * this nomenclature is somewhat confusing.
  */
-#define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL))
-#define SIZEOF_PTE     (PTRS_PER_PTE * sizeof(pte_t))
+#define PTRS_PER_PTE   _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT)
+#define PTE_INDEX(va)  _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT)
+#define SIZEOF_PTE     _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
 
 /*
- * Align the vmalloc area to an L2 page table, and leave a guard page
- * at the beginning and end.  The vmalloc code also puts in an internal
+ * Align the vmalloc area to an L2 page table.  Omit guard pages at
+ * the beginning and end for simplicity (particularly in the per-cpu
+ * memory allocation code).  The vmalloc code puts in an internal
  * guard page between each allocation.
  */
 #define _VMALLOC_END   HUGE_VMAP_BASE
-#define VMALLOC_END    (_VMALLOC_END - PAGE_SIZE)
-#define VMALLOC_START  (_VMALLOC_START + PAGE_SIZE)
+#define VMALLOC_END    _VMALLOC_END
+#define VMALLOC_START  _VMALLOC_START
 
 #define HUGE_VMAP_END  (HUGE_VMAP_BASE + PGDIR_SIZE)
 
@@ -98,7 +102,7 @@ static inline int pud_bad(pud_t pud)
  * A pud_t points to a pmd_t array.  Since we can have multiple per
  * page, we don't have a one-to-one mapping of pud_t's to pages.
  */
-#define pud_page(pud) pfn_to_page(HV_PTFN_TO_PFN(pud_ptfn(pud)))
+#define pud_page(pud) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pud_ptfn(pud))))
 
 static inline unsigned long pud_index(unsigned long address)
 {
@@ -108,28 +112,6 @@ static inline unsigned long pud_index(unsigned long address)
 #define pmd_offset(pud, address) \
        ((pmd_t *)pud_page_vaddr(*(pud)) + pmd_index(address))
 
-static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
-{
-       set_pte(pmdp, pmdval);
-}
-
-/* Create a pmd from a PTFN and pgprot. */
-static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
-{
-       return hv_pte_set_ptfn(prot, ptfn);
-}
-
-/* Return the page-table frame number (ptfn) that a pmd_t points at. */
-static inline unsigned long pmd_ptfn(pmd_t pmd)
-{
-       return hv_pte_get_ptfn(pmd);
-}
-
-static inline void pmd_clear(pmd_t *pmdp)
-{
-       __pte_clear(pmdp);
-}
-
 /* Normalize an address to having the correct high bits set. */
 #define pgd_addr_normalize pgd_addr_normalize
 static inline unsigned long pgd_addr_normalize(unsigned long addr)
@@ -170,6 +152,13 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
        return hv_pte(__insn_exch(&ptep->val, 0UL));
 }
 
+/*
+ * pmds are the same as pgds and ptes, so converting is a no-op.
+ */
+#define pmd_pte(pmd) (pmd)
+#define pmdp_ptep(pmdp) (pmdp)
+#define pte_pmd(pte) (pte)
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_TILE_PGTABLE_64_H */
index 15cd8a4a06ce1dc62170444629edc7c17d9da2ab..8c4dd9ff91eb91745e8918506375f2d4cb10d588 100644 (file)
@@ -76,6 +76,17 @@ struct async_tlb {
 
 #ifdef CONFIG_HARDWALL
 struct hardwall_info;
+struct hardwall_task {
+       /* Which hardwall is this task tied to? (or NULL if none) */
+       struct hardwall_info *info;
+       /* Chains this task into the list at info->task_head. */
+       struct list_head list;
+};
+#ifdef __tilepro__
+#define HARDWALL_TYPES 1   /* udn */
+#else
+#define HARDWALL_TYPES 3   /* udn, idn, and ipi */
+#endif
 #endif
 
 struct thread_struct {
@@ -116,10 +127,8 @@ struct thread_struct {
        unsigned long dstream_pf;
 #endif
 #ifdef CONFIG_HARDWALL
-       /* Is this task tied to an activated hardwall? */
-       struct hardwall_info *hardwall;
-       /* Chains this task into the list at hardwall->list. */
-       struct list_head hardwall_list;
+       /* Hardwall information for various resources. */
+       struct hardwall_task hardwall[HARDWALL_TYPES];
 #endif
 #if CHIP_HAS_TILE_DMA()
        /* Async DMA TLB fault information */
index e58613e0752f65712dca236588747e0e893e426c..c67eb70ea78e94fb08410897630a96228a2e3456 100644 (file)
@@ -41,15 +41,15 @@ void restrict_dma_mpls(void);
 #ifdef CONFIG_HARDWALL
 /* User-level network management functions */
 void reset_network_state(void);
-void grant_network_mpls(void);
-void restrict_network_mpls(void);
 struct task_struct;
-int hardwall_deactivate(struct task_struct *task);
+void hardwall_switch_tasks(struct task_struct *prev, struct task_struct *next);
+void hardwall_deactivate_all(struct task_struct *task);
+int hardwall_ipi_valid(int cpu);
 
 /* Hook hardwall code into changes in affinity. */
 #define arch_set_cpus_allowed(p, new_mask) do { \
-       if (p->thread.hardwall && !cpumask_equal(&p->cpus_allowed, new_mask)) \
-               hardwall_deactivate(p); \
+       if (!cpumask_equal(&p->cpus_allowed, new_mask)) \
+               hardwall_deactivate_all(p); \
 } while (0)
 #endif
 
index 3b5507c31eae592a850c8c8ff8851bb14b49682a..06f0464cfed941b3b15903749871b2f02005546a 100644 (file)
@@ -43,7 +43,8 @@ long sys32_fadvise64(int fd, u32 offset_lo, u32 offset_hi,
                     u32 len, int advice);
 int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi,
                       u32 len_lo, u32 len_hi, int advice);
-long sys_flush_cache(void);
+long sys_cacheflush(unsigned long addr, unsigned long len,
+                   unsigned long flags);
 #ifndef __tilegx__  /* No mmap() in the 32-bit kernel. */
 #define sys_mmap sys_mmap
 #endif
index 656c486e64fafd424863fabe0f66d52a36e5c243..7e1fef36bde6651f2f199115919c887e494448f4 100644 (file)
@@ -166,7 +166,23 @@ static inline void set_restore_sigmask(void)
 {
        struct thread_info *ti = current_thread_info();
        ti->status |= TS_RESTORE_SIGMASK;
-       set_bit(TIF_SIGPENDING, &ti->flags);
+       WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
+}
+static inline void clear_restore_sigmask(void)
+{
+       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+}
+static inline bool test_restore_sigmask(void)
+{
+       return current_thread_info()->status & TS_RESTORE_SIGMASK;
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+       struct thread_info *ti = current_thread_info();
+       if (!(ti->status & TS_RESTORE_SIGMASK))
+               return false;
+       ti->status &= ~TS_RESTORE_SIGMASK;
+       return true;
 }
 #endif /* !__ASSEMBLY__ */
 
index 96199d214fb82d55db10d83ff5ef9fddf214a602..dcf91b25a1e58345c772a04dfd568db302b569cd 100644 (file)
@@ -38,16 +38,11 @@ DECLARE_PER_CPU(int, current_asid);
 /* The hypervisor tells us what ASIDs are available to us. */
 extern int min_asid, max_asid;
 
-static inline unsigned long hv_page_size(const struct vm_area_struct *vma)
-{
-       return (vma->vm_flags & VM_HUGETLB) ? HPAGE_SIZE : PAGE_SIZE;
-}
-
 /* Pass as vma pointer for non-executable mapping, if no vma available. */
-#define FLUSH_NONEXEC ((const struct vm_area_struct *)-1UL)
+#define FLUSH_NONEXEC ((struct vm_area_struct *)-1UL)
 
 /* Flush a single user page on this cpu. */
-static inline void local_flush_tlb_page(const struct vm_area_struct *vma,
+static inline void local_flush_tlb_page(struct vm_area_struct *vma,
                                        unsigned long addr,
                                        unsigned long page_size)
 {
@@ -60,7 +55,7 @@ static inline void local_flush_tlb_page(const struct vm_area_struct *vma,
 }
 
 /* Flush range of user pages on this cpu. */
-static inline void local_flush_tlb_pages(const struct vm_area_struct *vma,
+static inline void local_flush_tlb_pages(struct vm_area_struct *vma,
                                         unsigned long addr,
                                         unsigned long page_size,
                                         unsigned long len)
@@ -117,10 +112,10 @@ extern void flush_tlb_all(void);
 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
 extern void flush_tlb_current_task(void);
 extern void flush_tlb_mm(struct mm_struct *);
-extern void flush_tlb_page(const struct vm_area_struct *, unsigned long);
-extern void flush_tlb_page_mm(const struct vm_area_struct *,
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+extern void flush_tlb_page_mm(struct vm_area_struct *,
                              struct mm_struct *, unsigned long);
-extern void flush_tlb_range(const struct vm_area_struct *,
+extern void flush_tlb_range(struct vm_area_struct *,
                            unsigned long start, unsigned long end);
 
 #define flush_tlb()     flush_tlb_current_task()
index ef34d2caa5b1b6205775da2dfe352274569a0e73..c3dd275f25e2f0304d4719358d42d692f046175b 100644 (file)
@@ -114,45 +114,75 @@ struct exception_table_entry {
 extern int fixup_exception(struct pt_regs *regs);
 
 /*
- * We return the __get_user_N function results in a structure,
- * thus in r0 and r1.  If "err" is zero, "val" is the result
- * of the read; otherwise, "err" is -EFAULT.
- *
- * We rarely need 8-byte values on a 32-bit architecture, but
- * we size the structure to accommodate.  In practice, for the
- * the smaller reads, we can zero the high word for free, and
- * the caller will ignore it by virtue of casting anyway.
+ * Support macros for __get_user().
+ *
+ * Implementation note: The "case 8" logic of casting to the type of
+ * the result of subtracting the value from itself is basically a way
+ * of keeping all integer types the same, but casting any pointers to
+ * ptrdiff_t, i.e. also an integer type.  This way there are no
+ * questionable casts seen by the compiler on an ILP32 platform.
+ *
+ * Note that __get_user() and __put_user() assume proper alignment.
  */
-struct __get_user {
-       unsigned long long val;
-       int err;
-};
 
-/*
- * FIXME: we should express these as inline extended assembler, since
- * they're fundamentally just a variable dereference and some
- * supporting exception_table gunk.  Note that (a la i386) we can
- * extend the copy_to_user and copy_from_user routines to call into
- * such extended assembler routines, though we will have to use a
- * different return code in that case (1, 2, or 4, rather than -EFAULT).
- */
-extern struct __get_user __get_user_1(const void __user *);
-extern struct __get_user __get_user_2(const void __user *);
-extern struct __get_user __get_user_4(const void __user *);
-extern struct __get_user __get_user_8(const void __user *);
-extern int __put_user_1(long, void __user *);
-extern int __put_user_2(long, void __user *);
-extern int __put_user_4(long, void __user *);
-extern int __put_user_8(long long, void __user *);
-
-/* Unimplemented routines to cause linker failures */
-extern struct __get_user __get_user_bad(void);
-extern int __put_user_bad(void);
+#ifdef __LP64__
+#define _ASM_PTR       ".quad"
+#else
+#define _ASM_PTR       ".long"
+#endif
+
+#define __get_user_asm(OP, x, ptr, ret)                                        \
+       asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n"              \
+                    ".pushsection .fixup,\"ax\"\n"                     \
+                    "0: { movei %1, 0; movei %0, %3 }\n"               \
+                    "j 9f\n"                                           \
+                    ".section __ex_table,\"a\"\n"                      \
+                    _ASM_PTR " 1b, 0b\n"                               \
+                    ".popsection\n"                                    \
+                    "9:"                                               \
+                    : "=r" (ret), "=r" (x)                             \
+                    : "r" (ptr), "i" (-EFAULT))
+
+#ifdef __tilegx__
+#define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret)
+#define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret)
+#define __get_user_4(x, ptr, ret) __get_user_asm(ld4u, x, ptr, ret)
+#define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret)
+#else
+#define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret)
+#define __get_user_2(x, ptr, ret) __get_user_asm(lh_u, x, ptr, ret)
+#define __get_user_4(x, ptr, ret) __get_user_asm(lw, x, ptr, ret)
+#ifdef __LITTLE_ENDIAN
+#define __lo32(a, b) a
+#define __hi32(a, b) b
+#else
+#define __lo32(a, b) b
+#define __hi32(a, b) a
+#endif
+#define __get_user_8(x, ptr, ret)                                      \
+       ({                                                              \
+               unsigned int __a, __b;                                  \
+               asm volatile("1: { lw %1, %3; addi %2, %3, 4 }\n"       \
+                            "2: { lw %2, %2; movei %0, 0 }\n"          \
+                            ".pushsection .fixup,\"ax\"\n"             \
+                            "0: { movei %1, 0; movei %2, 0 }\n"        \
+                            "{ movei %0, %4; j 9f }\n"                 \
+                            ".section __ex_table,\"a\"\n"              \
+                            ".word 1b, 0b\n"                           \
+                            ".word 2b, 0b\n"                           \
+                            ".popsection\n"                            \
+                            "9:"                                       \
+                            : "=r" (ret), "=r" (__a), "=&r" (__b)      \
+                            : "r" (ptr), "i" (-EFAULT));               \
+               (x) = (__typeof(x))(__typeof((x)-(x)))                  \
+                       (((u64)__hi32(__a, __b) << 32) |                \
+                        __lo32(__a, __b));                             \
+       })
+#endif
+
+extern int __get_user_bad(void)
+  __attribute__((warning("sizeof __get_user argument not 1, 2, 4 or 8")));
 
-/*
- * Careful: we have to cast the result to the type of the pointer
- * for sign reasons.
- */
 /**
  * __get_user: - Get a simple variable from user space, with less checking.
  * @x:   Variable to store result.
@@ -174,30 +204,62 @@ extern int __put_user_bad(void);
  * function.
  */
 #define __get_user(x, ptr)                                             \
-({     struct __get_user __ret;                                        \
-       __typeof__(*(ptr)) const __user *__gu_addr = (ptr);             \
-       __chk_user_ptr(__gu_addr);                                      \
-       switch (sizeof(*(__gu_addr))) {                                 \
-       case 1:                                                         \
-               __ret = __get_user_1(__gu_addr);                        \
-               break;                                                  \
-       case 2:                                                         \
-               __ret = __get_user_2(__gu_addr);                        \
-               break;                                                  \
-       case 4:                                                         \
-               __ret = __get_user_4(__gu_addr);                        \
-               break;                                                  \
-       case 8:                                                         \
-               __ret = __get_user_8(__gu_addr);                        \
-               break;                                                  \
-       default:                                                        \
-               __ret = __get_user_bad();                               \
-               break;                                                  \
-       }                                                               \
-       (x) = (__typeof__(*__gu_addr)) (__typeof__(*__gu_addr - *__gu_addr)) \
-         __ret.val;                                                    \
-       __ret.err;                                                      \
-})
+       ({                                                              \
+               int __ret;                                              \
+               __chk_user_ptr(ptr);                                    \
+               switch (sizeof(*(ptr))) {                               \
+               case 1: __get_user_1(x, ptr, __ret); break;             \
+               case 2: __get_user_2(x, ptr, __ret); break;             \
+               case 4: __get_user_4(x, ptr, __ret); break;             \
+               case 8: __get_user_8(x, ptr, __ret); break;             \
+               default: __ret = __get_user_bad(); break;               \
+               }                                                       \
+               __ret;                                                  \
+       })
+
+/* Support macros for __put_user(). */
+
+#define __put_user_asm(OP, x, ptr, ret)                        \
+       asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n"              \
+                    ".pushsection .fixup,\"ax\"\n"                     \
+                    "0: { movei %0, %3; j 9f }\n"                      \
+                    ".section __ex_table,\"a\"\n"                      \
+                    _ASM_PTR " 1b, 0b\n"                               \
+                    ".popsection\n"                                    \
+                    "9:"                                               \
+                    : "=r" (ret)                                       \
+                    : "r" (ptr), "r" (x), "i" (-EFAULT))
+
+#ifdef __tilegx__
+#define __put_user_1(x, ptr, ret) __put_user_asm(st1, x, ptr, ret)
+#define __put_user_2(x, ptr, ret) __put_user_asm(st2, x, ptr, ret)
+#define __put_user_4(x, ptr, ret) __put_user_asm(st4, x, ptr, ret)
+#define __put_user_8(x, ptr, ret) __put_user_asm(st, x, ptr, ret)
+#else
+#define __put_user_1(x, ptr, ret) __put_user_asm(sb, x, ptr, ret)
+#define __put_user_2(x, ptr, ret) __put_user_asm(sh, x, ptr, ret)
+#define __put_user_4(x, ptr, ret) __put_user_asm(sw, x, ptr, ret)
+#define __put_user_8(x, ptr, ret)                                      \
+       ({                                                              \
+               u64 __x = (__typeof((x)-(x)))(x);                       \
+               int __lo = (int) __x, __hi = (int) (__x >> 32);         \
+               asm volatile("1: { sw %1, %2; addi %0, %1, 4 }\n"       \
+                            "2: { sw %0, %3; movei %0, 0 }\n"          \
+                            ".pushsection .fixup,\"ax\"\n"             \
+                            "0: { movei %0, %4; j 9f }\n"              \
+                            ".section __ex_table,\"a\"\n"              \
+                            ".word 1b, 0b\n"                           \
+                            ".word 2b, 0b\n"                           \
+                            ".popsection\n"                            \
+                            "9:"                                       \
+                            : "=&r" (ret)                              \
+                            : "r" (ptr), "r" (__lo32(__lo, __hi)),     \
+                            "r" (__hi32(__lo, __hi)), "i" (-EFAULT));  \
+       })
+#endif
+
+extern int __put_user_bad(void)
+  __attribute__((warning("sizeof __put_user argument not 1, 2, 4 or 8")));
 
 /**
  * __put_user: - Write a simple value into user space, with less checking.
@@ -217,39 +279,19 @@ extern int __put_user_bad(void);
  * function.
  *
  * Returns zero on success, or -EFAULT on error.
- *
- * Implementation note: The "case 8" logic of casting to the type of
- * the result of subtracting the value from itself is basically a way
- * of keeping all integer types the same, but casting any pointers to
- * ptrdiff_t, i.e. also an integer type.  This way there are no
- * questionable casts seen by the compiler on an ILP32 platform.
  */
 #define __put_user(x, ptr)                                             \
 ({                                                                     \
-       int __pu_err = 0;                                               \
-       __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
-       typeof(*__pu_addr) __pu_val = (x);                              \
-       __chk_user_ptr(__pu_addr);                                      \
-       switch (sizeof(__pu_val)) {                                     \
-       case 1:                                                         \
-               __pu_err = __put_user_1((long)__pu_val, __pu_addr);     \
-               break;                                                  \
-       case 2:                                                         \
-               __pu_err = __put_user_2((long)__pu_val, __pu_addr);     \
-               break;                                                  \
-       case 4:                                                         \
-               __pu_err = __put_user_4((long)__pu_val, __pu_addr);     \
-               break;                                                  \
-       case 8:                                                         \
-               __pu_err =                                              \
-                 __put_user_8((__typeof__(__pu_val - __pu_val))__pu_val,\
-                       __pu_addr);                                     \
-               break;                                                  \
-       default:                                                        \
-               __pu_err = __put_user_bad();                            \
-               break;                                                  \
+       int __ret;                                                      \
+       __chk_user_ptr(ptr);                                            \
+       switch (sizeof(*(ptr))) {                                       \
+       case 1: __put_user_1(x, ptr, __ret); break;                     \
+       case 2: __put_user_2(x, ptr, __ret); break;                     \
+       case 4: __put_user_4(x, ptr, __ret); break;                     \
+       case 8: __put_user_8(x, ptr, __ret); break;                     \
+       default: __ret = __put_user_bad(); break;                       \
        }                                                               \
-       __pu_err;                                                       \
+       __ret;                                                          \
 })
 
 /*
@@ -378,7 +420,7 @@ static inline unsigned long __must_check copy_from_user(void *to,
 /**
  * __copy_in_user() - copy data within user space, with less checking.
  * @to:   Destination address, in user space.
- * @from: Source address, in kernel space.
+ * @from: Source address, in user space.
  * @n:    Number of bytes to copy.
  *
  * Context: User context only.  This function may sleep.
index f70bf1c541f1e2b8f4572223c1b1ee34893269f1..a017246ca0cec75db546c5523830e36668fe206f 100644 (file)
@@ -24,8 +24,8 @@
 #include <asm-generic/unistd.h>
 
 /* Additional Tilera-specific syscalls. */
-#define __NR_flush_cache       (__NR_arch_specific_syscall + 1)
-__SYSCALL(__NR_flush_cache, sys_flush_cache)
+#define __NR_cacheflush        (__NR_arch_specific_syscall + 1)
+__SYSCALL(__NR_cacheflush, sys_cacheflush)
 
 #ifndef __tilegx__
 /* "Fast" syscalls provide atomic support for 32-bit chips. */
index f13188ac281a33061f83d63ee872eecf1dfbe2d5..2a20b266d94469d6329a7a41793ef15c07c53b2a 100644 (file)
@@ -460,7 +460,7 @@ typedef void* lepp_comp_t;
  *  linux's "MAX_SKB_FRAGS", and presumably over-estimates by one, for
  *  our page size of exactly 65536.  We add one for a "body" fragment.
  */
-#define LEPP_MAX_FRAGS (65536 / HV_PAGE_SIZE_SMALL + 2 + 1)
+#define LEPP_MAX_FRAGS (65536 / HV_DEFAULT_PAGE_SIZE_SMALL + 2 + 1)
 
 /** Total number of bytes needed for an lepp_tso_cmd_t. */
 #define LEPP_TSO_CMD_SIZE(num_frags, header_size) \
index 72ec1e972f15afb84746da32594e8d3b8bccdf6e..ccd847e2347f14b2f1c4b54e2605b30a6a99aa17 100644 (file)
@@ -17,8 +17,8 @@
  * The hypervisor's public API.
  */
 
-#ifndef _TILE_HV_H
-#define _TILE_HV_H
+#ifndef _HV_HV_H
+#define _HV_HV_H
 
 #include <arch/chip.h>
 
  */
 #define HV_L1_SPAN (__HV_SIZE_ONE << HV_LOG2_L1_SPAN)
 
-/** The log2 of the size of small pages, in bytes. This value should
- * be verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL).
+/** The log2 of the initial size of small pages, in bytes.
+ * See HV_DEFAULT_PAGE_SIZE_SMALL.
  */
-#define HV_LOG2_PAGE_SIZE_SMALL 16
+#define HV_LOG2_DEFAULT_PAGE_SIZE_SMALL 16
 
-/** The size of small pages, in bytes. This value should be verified
+/** The initial size of small pages, in bytes. This value should be verified
  * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL).
+ * It may also be modified when installing a new context.
  */
-#define HV_PAGE_SIZE_SMALL (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_SMALL)
+#define HV_DEFAULT_PAGE_SIZE_SMALL \
+  (__HV_SIZE_ONE << HV_LOG2_DEFAULT_PAGE_SIZE_SMALL)
 
-/** The log2 of the size of large pages, in bytes. This value should be
- * verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE).
+/** The log2 of the initial size of large pages, in bytes.
+ * See HV_DEFAULT_PAGE_SIZE_LARGE.
  */
-#define HV_LOG2_PAGE_SIZE_LARGE 24
+#define HV_LOG2_DEFAULT_PAGE_SIZE_LARGE 24
 
-/** The size of large pages, in bytes. This value should be verified
+/** The initial size of large pages, in bytes. This value should be verified
  * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE).
+ * It may also be modified when installing a new context.
  */
-#define HV_PAGE_SIZE_LARGE (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_LARGE)
+#define HV_DEFAULT_PAGE_SIZE_LARGE \
+  (__HV_SIZE_ONE << HV_LOG2_DEFAULT_PAGE_SIZE_LARGE)
+
+#if CHIP_VA_WIDTH() > 32
+
+/** The log2 of the initial size of jumbo pages, in bytes.
+ * See HV_DEFAULT_PAGE_SIZE_JUMBO.
+ */
+#define HV_LOG2_DEFAULT_PAGE_SIZE_JUMBO 32
+
+/** The initial size of jumbo pages, in bytes. This value should
+ * be verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO).
+ * It may also be modified when installing a new context.
+ */
+#define HV_DEFAULT_PAGE_SIZE_JUMBO \
+  (__HV_SIZE_ONE << HV_LOG2_DEFAULT_PAGE_SIZE_JUMBO)
+
+#endif
 
 /** The log2 of the granularity at which page tables must be aligned;
  *  in other words, the CPA for a page table must have this many zero
 #define HV_DISPATCH_GET_IPI_PTE                   56
 #endif
 
+/** hv_set_pte_super_shift */
+#define HV_DISPATCH_SET_PTE_SUPER_SHIFT           57
+
 /** One more than the largest dispatch value */
-#define _HV_DISPATCH_END                          57
+#define _HV_DISPATCH_END                          58
 
 
 #ifndef __ASSEMBLER__
@@ -401,7 +424,18 @@ typedef enum {
    *  that the temperature has hit an upper limit and is no longer being
    *  accurately tracked.
    */
-  HV_SYSCONF_BOARD_TEMP      = 6
+  HV_SYSCONF_BOARD_TEMP      = 6,
+
+  /** Legal page size bitmask for hv_install_context().
+   * For example, if 16KB and 64KB small pages are supported,
+   * it would return "HV_CTX_PG_SM_16K | HV_CTX_PG_SM_64K".
+   */
+  HV_SYSCONF_VALID_PAGE_SIZES = 7,
+
+  /** The size of jumbo pages, in bytes.
+   * If no jumbo pages are available, zero will be returned.
+   */
+  HV_SYSCONF_PAGE_SIZE_JUMBO = 8,
 
 } HV_SysconfQuery;
 
@@ -474,7 +508,19 @@ typedef enum {
   HV_CONFSTR_SWITCH_CONTROL  = 14,
 
   /** Chip revision level. */
-  HV_CONFSTR_CHIP_REV        = 15
+  HV_CONFSTR_CHIP_REV        = 15,
+
+  /** CPU module part number. */
+  HV_CONFSTR_CPUMOD_PART_NUM = 16,
+
+  /** CPU module serial number. */
+  HV_CONFSTR_CPUMOD_SERIAL_NUM = 17,
+
+  /** CPU module revision level. */
+  HV_CONFSTR_CPUMOD_REV      = 18,
+
+  /** Human-readable CPU module description. */
+  HV_CONFSTR_CPUMOD_DESC     = 19
 
 } HV_ConfstrQuery;
 
@@ -494,11 +540,16 @@ int hv_confstr(HV_ConfstrQuery query, HV_VirtAddr buf, int len);
 /** Tile coordinate */
 typedef struct
 {
+#ifndef __BIG_ENDIAN__
   /** X coordinate, relative to supervisor's top-left coordinate */
   int x;
 
   /** Y coordinate, relative to supervisor's top-left coordinate */
   int y;
+#else
+  int y;
+  int x;
+#endif
 } HV_Coord;
 
 
@@ -649,6 +700,12 @@ void hv_set_rtc(HV_RTCTime time);
  *  new page table does not need to contain any mapping for the
  *  hv_install_context address itself.
  *
+ *  At most one HV_CTX_PG_SM_* flag may be specified in "flags";
+ *  if multiple flags are specified, HV_EINVAL is returned.
+ *  Specifying none of the flags results in using the default page size.
+ *  All cores participating in a given client must request the same
+ *  page size, or the results are undefined.
+ *
  * @param page_table Root of the page table.
  * @param access PTE providing info on how to read the page table.  This
  *   value must be consistent between multiple tiles sharing a page table,
@@ -667,8 +724,36 @@ int hv_install_context(HV_PhysAddr page_table, HV_PTE access, HV_ASID asid,
 #define HV_CTX_DIRECTIO     0x1   /**< Direct I/O requests are accepted from
                                        PL0. */
 
+#define HV_CTX_PG_SM_4K     0x10  /**< Use 4K small pages, if available. */
+#define HV_CTX_PG_SM_16K    0x20  /**< Use 16K small pages, if available. */
+#define HV_CTX_PG_SM_64K    0x40  /**< Use 64K small pages, if available. */
+#define HV_CTX_PG_SM_MASK   0xf0  /**< Mask of all possible small pages. */
+
 #ifndef __ASSEMBLER__
 
+
+/** Set the number of pages ganged together by HV_PTE_SUPER at a
+ * particular level of the page table.
+ *
+ * The current TILE-Gx hardware only supports powers of four
+ * (i.e. log2_count must be a multiple of two), and the requested
+ * "super" page size must be less than the span of the next level in
+ * the page table.  The largest size that can be requested is 64GB.
+ *
+ * The shift value is initially "0" for all page table levels,
+ * indicating that the HV_PTE_SUPER bit is effectively ignored.
+ *
+ * If you change the count from one non-zero value to another, the
+ * hypervisor will flush the entire TLB and TSB to avoid confusion.
+ *
+ * @param level Page table level (0, 1, or 2)
+ * @param log2_count Base-2 log of the number of pages to gang together,
+ * i.e. how much to shift left the base page size for the super page size.
+ * @return Zero on success, or a hypervisor error code on failure.
+ */
+int hv_set_pte_super_shift(int level, int log2_count);
+
+
 /** Value returned from hv_inquire_context(). */
 typedef struct
 {
@@ -986,8 +1071,13 @@ HV_VirtAddrRange hv_inquire_virtual(int idx);
 /** A range of ASID values. */
 typedef struct
 {
+#ifndef __BIG_ENDIAN__
   HV_ASID start;        /**< First ASID in the range. */
   unsigned int size;    /**< Number of ASIDs. Zero for an invalid range. */
+#else
+  unsigned int size;    /**< Number of ASIDs. Zero for an invalid range. */
+  HV_ASID start;        /**< First ASID in the range. */
+#endif
 } HV_ASIDRange;
 
 /** Returns information about a range of ASIDs.
@@ -1238,11 +1328,14 @@ HV_Errno hv_set_command_line(HV_VirtAddr buf, int length);
  * with the existing priority pages) or "red/black" (if they don't).
  * The bitmask provides information on which parts of the cache
  * have been used for pinned pages so far on this tile; if (1 << N)
- * appears in the bitmask, that indicates that a page has been marked
- * "priority" whose PFN equals N, mod 8.
+ * appears in the bitmask, that indicates that a 4KB region of the
+ * cache starting at (N * 4KB) is in use by a "priority" page.
+ * The portion of cache used by a particular page can be computed
+ * by taking the page's PA, modulo CHIP_L2_CACHE_SIZE(), and setting
+ * all the "4KB" bits corresponding to the actual page size.
  * @param bitmask A bitmap of priority page set values
  */
-void hv_set_caching(unsigned int bitmask);
+void hv_set_caching(unsigned long bitmask);
 
 
 /** Zero out a specified number of pages.
@@ -1308,6 +1401,7 @@ typedef enum
 /** Message recipient. */
 typedef struct
 {
+#ifndef __BIG_ENDIAN__
   /** X coordinate, relative to supervisor's top-left coordinate */
   unsigned int x:11;
 
@@ -1316,6 +1410,11 @@ typedef struct
 
   /** Status of this recipient */
   HV_Recip_State state:10;
+#else //__BIG_ENDIAN__
+  HV_Recip_State state:10;
+  unsigned int y:11;
+  unsigned int x:11;
+#endif
 } HV_Recipient;
 
 /** Send a message to a set of recipients.
@@ -1851,12 +1950,12 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
 #define HV_PTE_INDEX_USER            10  /**< Page is user-accessible */
 #define HV_PTE_INDEX_ACCESSED        11  /**< Page has been accessed */
 #define HV_PTE_INDEX_DIRTY           12  /**< Page has been written */
-                                         /*   Bits 13-15 are reserved for
+                                         /*   Bits 13-14 are reserved for
                                               future use. */
+#define HV_PTE_INDEX_SUPER           15  /**< Pages ganged together for TLB */
 #define HV_PTE_INDEX_MODE            16  /**< Page mode; see HV_PTE_MODE_xxx */
 #define HV_PTE_MODE_BITS              3  /**< Number of bits in mode */
-                                         /*   Bit 19 is reserved for
-                                              future use. */
+#define HV_PTE_INDEX_CLIENT2         19  /**< Page client state 2 */
 #define HV_PTE_INDEX_LOTAR           20  /**< Page's LOTAR; must be high bits
                                               of word */
 #define HV_PTE_LOTAR_BITS            12  /**< Number of bits in a LOTAR */
@@ -1869,15 +1968,6 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
                                               of word */
 #define HV_PTE_PTFN_BITS             29  /**< Number of bits in a PTFN */
 
-/** Position of the PFN field within the PTE (subset of the PTFN). */
-#define HV_PTE_INDEX_PFN (HV_PTE_INDEX_PTFN + (HV_LOG2_PAGE_SIZE_SMALL - \
-                                               HV_LOG2_PAGE_TABLE_ALIGN))
-
-/** Length of the PFN field within the PTE (subset of the PTFN). */
-#define HV_PTE_INDEX_PFN_BITS (HV_PTE_INDEX_PTFN_BITS - \
-                               (HV_LOG2_PAGE_SIZE_SMALL - \
-                                HV_LOG2_PAGE_TABLE_ALIGN))
-
 /*
  * Legal values for the PTE's mode field
  */
@@ -1957,7 +2047,10 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
 
 /** Does this PTE map a page?
  *
- * If this bit is set in the level-1 page table, the entry should be
+ * If this bit is set in a level-0 page table, the entry should be
+ * interpreted as a level-2 page table entry mapping a jumbo page.
+ *
+ * If this bit is set in a level-1 page table, the entry should be
  * interpreted as a level-2 page table entry mapping a large page.
  *
  * This bit should not be modified by the client while PRESENT is set, as
@@ -1967,6 +2060,18 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
  */
 #define HV_PTE_PAGE                  (__HV_PTE_ONE << HV_PTE_INDEX_PAGE)
 
+/** Does this PTE implicitly reference multiple pages?
+ *
+ * If this bit is set in the page table (either in the level-2 page table,
+ * or in a higher level page table in conjunction with the PAGE bit)
+ * then the PTE specifies a range of contiguous pages, not a single page.
+ * The hv_set_pte_super_shift() allows you to specify the count for
+ * each level of the page table.
+ *
+ * Note: this bit is not supported on TILEPro systems.
+ */
+#define HV_PTE_SUPER                 (__HV_PTE_ONE << HV_PTE_INDEX_SUPER)
+
 /** Is this a global (non-ASID) mapping?
  *
  * If this bit is set, the translations established by this PTE will
@@ -2046,6 +2151,13 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
  */
 #define HV_PTE_CLIENT1               (__HV_PTE_ONE << HV_PTE_INDEX_CLIENT1)
 
+/** Client-private bit in PTE.
+ *
+ * This bit is guaranteed not to be inspected or modified by the
+ * hypervisor.
+ */
+#define HV_PTE_CLIENT2               (__HV_PTE_ONE << HV_PTE_INDEX_CLIENT2)
+
 /** Non-coherent (NC) bit in PTE.
  *
  * If this bit is set, the mapping that is set up will be non-coherent
@@ -2178,8 +2290,10 @@ hv_pte_clear_##name(HV_PTE pte)                                 \
  */
 _HV_BIT(present,         PRESENT)
 _HV_BIT(page,            PAGE)
+_HV_BIT(super,           SUPER)
 _HV_BIT(client0,         CLIENT0)
 _HV_BIT(client1,         CLIENT1)
+_HV_BIT(client2,         CLIENT2)
 _HV_BIT(migrating,       MIGRATING)
 _HV_BIT(nc,              NC)
 _HV_BIT(readable,        READABLE)
@@ -2222,40 +2336,11 @@ hv_pte_set_mode(HV_PTE pte, unsigned int val)
  *
  * This field contains the upper bits of the CPA (client physical
  * address) of the target page; the complete CPA is this field with
- * HV_LOG2_PAGE_SIZE_SMALL zero bits appended to it.
- *
- * For PTEs in a level-1 page table where the Page bit is set, the
- * CPA must be aligned modulo the large page size.
- */
-static __inline unsigned int
-hv_pte_get_pfn(const HV_PTE pte)
-{
-  return pte.val >> HV_PTE_INDEX_PFN;
-}
-
-
-/** Set the page frame number into a PTE.  See hv_pte_get_pfn. */
-static __inline HV_PTE
-hv_pte_set_pfn(HV_PTE pte, unsigned int val)
-{
-  /*
-   * Note that the use of "PTFN" in the next line is intentional; we
-   * don't want any garbage lower bits left in that field.
-   */
-  pte.val &= ~(((1ULL << HV_PTE_PTFN_BITS) - 1) << HV_PTE_INDEX_PTFN);
-  pte.val |= (__hv64) val << HV_PTE_INDEX_PFN;
-  return pte;
-}
-
-/** Get the page table frame number from the PTE.
- *
- * This field contains the upper bits of the CPA (client physical
- * address) of the target page table; the complete CPA is this field with
- * with HV_PAGE_TABLE_ALIGN zero bits appended to it.
+ * HV_LOG2_PAGE_TABLE_ALIGN zero bits appended to it.
  *
- * For PTEs in a level-1 page table when the Page bit is not set, the
- * CPA must be aligned modulo the sticter of HV_PAGE_TABLE_ALIGN and
- * the level-2 page table size.
+ * For all PTEs in the lowest-level page table, and for all PTEs with
+ * the Page bit set in all page tables, the CPA must be aligned modulo
+ * the relevant page size.
  */
 static __inline unsigned long
 hv_pte_get_ptfn(const HV_PTE pte)
@@ -2263,7 +2348,6 @@ hv_pte_get_ptfn(const HV_PTE pte)
   return pte.val >> HV_PTE_INDEX_PTFN;
 }
 
-
 /** Set the page table frame number into a PTE.  See hv_pte_get_ptfn. */
 static __inline HV_PTE
 hv_pte_set_ptfn(HV_PTE pte, unsigned long val)
@@ -2273,6 +2357,20 @@ hv_pte_set_ptfn(HV_PTE pte, unsigned long val)
   return pte;
 }
 
+/** Get the client physical address from the PTE.  See hv_pte_set_ptfn. */
+static __inline HV_PhysAddr
+hv_pte_get_pa(const HV_PTE pte)
+{
+  return (__hv64) hv_pte_get_ptfn(pte) << HV_LOG2_PAGE_TABLE_ALIGN;
+}
+
+/** Set the client physical address into a PTE.  See hv_pte_get_ptfn. */
+static __inline HV_PTE
+hv_pte_set_pa(HV_PTE pte, HV_PhysAddr pa)
+{
+  return hv_pte_set_ptfn(pte, pa >> HV_LOG2_PAGE_TABLE_ALIGN);
+}
+
 
 /** Get the remote tile caching this page.
  *
@@ -2308,28 +2406,20 @@ hv_pte_set_lotar(HV_PTE pte, unsigned int val)
 
 #endif  /* !__ASSEMBLER__ */
 
-/** Converts a client physical address to a pfn. */
-#define HV_CPA_TO_PFN(p) ((p) >> HV_LOG2_PAGE_SIZE_SMALL)
-
-/** Converts a pfn to a client physical address. */
-#define HV_PFN_TO_CPA(p) (((HV_PhysAddr)(p)) << HV_LOG2_PAGE_SIZE_SMALL)
-
 /** Converts a client physical address to a ptfn. */
 #define HV_CPA_TO_PTFN(p) ((p) >> HV_LOG2_PAGE_TABLE_ALIGN)
 
 /** Converts a ptfn to a client physical address. */
 #define HV_PTFN_TO_CPA(p) (((HV_PhysAddr)(p)) << HV_LOG2_PAGE_TABLE_ALIGN)
 
-/** Converts a ptfn to a pfn. */
-#define HV_PTFN_TO_PFN(p) \
-  ((p) >> (HV_LOG2_PAGE_SIZE_SMALL - HV_LOG2_PAGE_TABLE_ALIGN))
-
-/** Converts a pfn to a ptfn. */
-#define HV_PFN_TO_PTFN(p) \
-  ((p) << (HV_LOG2_PAGE_SIZE_SMALL - HV_LOG2_PAGE_TABLE_ALIGN))
-
 #if CHIP_VA_WIDTH() > 32
 
+/*
+ * Note that we currently do not allow customizing the page size
+ * of the L0 pages, but fix them at 4GB, so we do not use the
+ * "_HV_xxx" nomenclature for the L0 macros.
+ */
+
 /** Log number of HV_PTE entries in L0 page table */
 #define HV_LOG2_L0_ENTRIES (CHIP_VA_WIDTH() - HV_LOG2_L1_SPAN)
 
@@ -2359,69 +2449,104 @@ hv_pte_set_lotar(HV_PTE pte, unsigned int val)
 #endif /* CHIP_VA_WIDTH() > 32 */
 
 /** Log number of HV_PTE entries in L1 page table */
-#define HV_LOG2_L1_ENTRIES (HV_LOG2_L1_SPAN - HV_LOG2_PAGE_SIZE_LARGE)
+#define _HV_LOG2_L1_ENTRIES(log2_page_size_large) \
+  (HV_LOG2_L1_SPAN - log2_page_size_large)
 
 /** Number of HV_PTE entries in L1 page table */
-#define HV_L1_ENTRIES (1 << HV_LOG2_L1_ENTRIES)
+#define _HV_L1_ENTRIES(log2_page_size_large) \
+  (1 << _HV_LOG2_L1_ENTRIES(log2_page_size_large))
 
 /** Log size of L1 page table in bytes */
-#define HV_LOG2_L1_SIZE (HV_LOG2_PTE_SIZE + HV_LOG2_L1_ENTRIES)
+#define _HV_LOG2_L1_SIZE(log2_page_size_large) \
+  (HV_LOG2_PTE_SIZE + _HV_LOG2_L1_ENTRIES(log2_page_size_large))
 
 /** Size of L1 page table in bytes */
-#define HV_L1_SIZE (1 << HV_LOG2_L1_SIZE)
+#define _HV_L1_SIZE(log2_page_size_large) \
+  (1 << _HV_LOG2_L1_SIZE(log2_page_size_large))
 
 /** Log number of HV_PTE entries in level-2 page table */
-#define HV_LOG2_L2_ENTRIES (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL)
+#define _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small) \
+  (log2_page_size_large - log2_page_size_small)
 
 /** Number of HV_PTE entries in level-2 page table */
-#define HV_L2_ENTRIES (1 << HV_LOG2_L2_ENTRIES)
+#define _HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) \
+  (1 << _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small))
 
 /** Log size of level-2 page table in bytes */
-#define HV_LOG2_L2_SIZE (HV_LOG2_PTE_SIZE + HV_LOG2_L2_ENTRIES)
+#define _HV_LOG2_L2_SIZE(log2_page_size_large, log2_page_size_small) \
+  (HV_LOG2_PTE_SIZE + \
+   _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small))
 
 /** Size of level-2 page table in bytes */
-#define HV_L2_SIZE (1 << HV_LOG2_L2_SIZE)
+#define _HV_L2_SIZE(log2_page_size_large, log2_page_size_small) \
+  (1 << _HV_LOG2_L2_SIZE(log2_page_size_large, log2_page_size_small))
 
 #ifdef __ASSEMBLER__
 
 #if CHIP_VA_WIDTH() > 32
 
 /** Index in L1 for a specific VA */
-#define HV_L1_INDEX(va) \
-  (((va) >> HV_LOG2_PAGE_SIZE_LARGE) & (HV_L1_ENTRIES - 1))
+#define _HV_L1_INDEX(va, log2_page_size_large) \
+  (((va) >> log2_page_size_large) & (_HV_L1_ENTRIES(log2_page_size_large) - 1))
 
 #else /* CHIP_VA_WIDTH() > 32 */
 
 /** Index in L1 for a specific VA */
-#define HV_L1_INDEX(va) \
-  (((va) >> HV_LOG2_PAGE_SIZE_LARGE))
+#define _HV_L1_INDEX(va, log2_page_size_large) \
+  (((va) >> log2_page_size_large))
 
 #endif /* CHIP_VA_WIDTH() > 32 */
 
 /** Index in level-2 page table for a specific VA */
-#define HV_L2_INDEX(va) \
-  (((va) >> HV_LOG2_PAGE_SIZE_SMALL) & (HV_L2_ENTRIES - 1))
+#define _HV_L2_INDEX(va, log2_page_size_large, log2_page_size_small) \
+  (((va) >> log2_page_size_small) & \
+   (_HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) - 1))
 
 #else /* __ASSEMBLER __ */
 
 #if CHIP_VA_WIDTH() > 32
 
 /** Index in L1 for a specific VA */
-#define HV_L1_INDEX(va) \
-  (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_LARGE) & (HV_L1_ENTRIES - 1))
+#define _HV_L1_INDEX(va, log2_page_size_large) \
+  (((HV_VirtAddr)(va) >> log2_page_size_large) & \
+   (_HV_L1_ENTRIES(log2_page_size_large) - 1))
 
 #else /* CHIP_VA_WIDTH() > 32 */
 
 /** Index in L1 for a specific VA */
-#define HV_L1_INDEX(va) \
-  (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_LARGE))
+#define _HV_L1_INDEX(va, log2_page_size_large) \
+  (((HV_VirtAddr)(va) >> log2_page_size_large))
 
 #endif /* CHIP_VA_WIDTH() > 32 */
 
 /** Index in level-2 page table for a specific VA */
-#define HV_L2_INDEX(va) \
-  (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_SMALL) & (HV_L2_ENTRIES - 1))
+#define _HV_L2_INDEX(va, log2_page_size_large, log2_page_size_small) \
+  (((HV_VirtAddr)(va) >> log2_page_size_small) & \
+   (_HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) - 1))
 
 #endif /* __ASSEMBLER __ */
 
-#endif /* _TILE_HV_H */
+/** Position of the PFN field within the PTE (subset of the PTFN). */
+#define _HV_PTE_INDEX_PFN(log2_page_size) \
+  (HV_PTE_INDEX_PTFN + (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
+
+/** Length of the PFN field within the PTE (subset of the PTFN). */
+#define _HV_PTE_INDEX_PFN_BITS(log2_page_size) \
+  (HV_PTE_INDEX_PTFN_BITS - (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
+
+/** Converts a client physical address to a pfn. */
+#define _HV_CPA_TO_PFN(p, log2_page_size) ((p) >> log2_page_size)
+
+/** Converts a pfn to a client physical address. */
+#define _HV_PFN_TO_CPA(p, log2_page_size) \
+  (((HV_PhysAddr)(p)) << log2_page_size)
+
+/** Converts a ptfn to a pfn. */
+#define _HV_PTFN_TO_PFN(p, log2_page_size) \
+  ((p) >> (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
+
+/** Converts a pfn to a ptfn. */
+#define _HV_PFN_TO_PTFN(p, log2_page_size) \
+  ((p) << (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
+
+#endif /* _HV_HV_H */
index 0d826faf8f358c7a78fefb69e38af0d8927e3fad..5de99248d8df1a018aa200f48b249765690bc383 100644 (file)
@@ -9,10 +9,9 @@ obj-y := backtrace.o entry.o irq.o messaging.o \
        intvec_$(BITS).o regs_$(BITS).o tile-desc_$(BITS).o
 
 obj-$(CONFIG_HARDWALL)         += hardwall.o
-obj-$(CONFIG_TILEGX)           += futex_64.o
 obj-$(CONFIG_COMPAT)           += compat.o compat_signal.o
 obj-$(CONFIG_SMP)              += smpboot.o smp.o tlb.o
 obj-$(CONFIG_MODULES)          += module.o
 obj-$(CONFIG_EARLY_PRINTK)     += early_printk.o
-obj-$(CONFIG_KEXEC)            += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_KEXEC)            += machine_kexec.o relocate_kernel_$(BITS).o
 obj-$(CONFIG_PCI)              += pci.o
index cdef6e5ec022cdbe6ab4c1c139f56dffc7fbcd8e..474571b8408584d37743678ce02b2ca77c3c9938 100644 (file)
@@ -118,8 +118,6 @@ struct compat_rt_sigframe {
        struct compat_ucontext uc;
 };
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act,
                             struct compat_sigaction __user *oact,
                             size_t sigsetsize)
@@ -302,7 +300,6 @@ long compat_sys_rt_sigreturn(struct pt_regs *regs)
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
index ec91568df880ef0d41f99f7ab86cc0fdb2330551..133c4b56a99ec7196c59f9dca60742953826bf75 100644 (file)
@@ -100,8 +100,9 @@ STD_ENTRY(smp_nap)
  */
 STD_ENTRY(_cpu_idle)
        movei r1, 1
+       IRQ_ENABLE_LOAD(r2, r3)
        mtspr INTERRUPT_CRITICAL_SECTION, r1
-       IRQ_ENABLE(r2, r3)             /* unmask, but still with ICS set */
+       IRQ_ENABLE_APPLY(r2, r3)       /* unmask, but still with ICS set */
        mtspr INTERRUPT_CRITICAL_SECTION, zero
        .global _cpu_idle_nap
 _cpu_idle_nap:
index 8c41891aab3413c8fa44549baad4a013a992407b..20273ee37deb78919667efa12fbaa7dff41a3cca 100644 (file)
 
 
 /*
- * This data structure tracks the rectangle data, etc., associated
- * one-to-one with a "struct file *" from opening HARDWALL_FILE.
+ * Implement a per-cpu "hardwall" resource class such as UDN or IPI.
+ * We use "hardwall" nomenclature throughout for historical reasons.
+ * The lock here controls access to the list data structure as well as
+ * to the items on the list.
+ */
+struct hardwall_type {
+       int index;
+       int is_xdn;
+       int is_idn;
+       int disabled;
+       const char *name;
+       struct list_head list;
+       spinlock_t lock;
+       struct proc_dir_entry *proc_dir;
+};
+
+enum hardwall_index {
+       HARDWALL_UDN = 0,
+#ifndef __tilepro__
+       HARDWALL_IDN = 1,
+       HARDWALL_IPI = 2,
+#endif
+       _HARDWALL_TYPES
+};
+
+static struct hardwall_type hardwall_types[] = {
+       {  /* user-space access to UDN */
+               0,
+               1,
+               0,
+               0,
+               "udn",
+               LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list),
+               __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_UDN].lock),
+               NULL
+       },
+#ifndef __tilepro__
+       {  /* user-space access to IDN */
+               1,
+               1,
+               1,
+               1,  /* disabled pending hypervisor support */
+               "idn",
+               LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list),
+               __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IDN].lock),
+               NULL
+       },
+       {  /* access to user-space IPI */
+               2,
+               0,
+               0,
+               0,
+               "ipi",
+               LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list),
+               __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IPI].lock),
+               NULL
+       },
+#endif
+};
+
+/*
+ * This data structure tracks the cpu data, etc., associated
+ * one-to-one with a "struct file *" from opening a hardwall device file.
  * Note that the file's private data points back to this structure.
  */
 struct hardwall_info {
-       struct list_head list;             /* "rectangles" list */
+       struct list_head list;             /* for hardwall_types.list */
        struct list_head task_head;        /* head of tasks in this hardwall */
-       struct cpumask cpumask;            /* cpus in the rectangle */
+       struct hardwall_type *type;        /* type of this resource */
+       struct cpumask cpumask;            /* cpus reserved */
+       int id;                            /* integer id for this hardwall */
+       int teardown_in_progress;          /* are we tearing this one down? */
+
+       /* Remaining fields only valid for user-network resources. */
        int ulhc_x;                        /* upper left hand corner x coord */
        int ulhc_y;                        /* upper left hand corner y coord */
        int width;                         /* rectangle width */
        int height;                        /* rectangle height */
-       int id;                            /* integer id for this hardwall */
-       int teardown_in_progress;          /* are we tearing this one down? */
+#if CHIP_HAS_REV1_XDN()
+       atomic_t xdn_pending_count;        /* cores in phase 1 of drain */
+#endif
 };
 
-/* Currently allocated hardwall rectangles */
-static LIST_HEAD(rectangles);
 
 /* /proc/tile/hardwall */
 static struct proc_dir_entry *hardwall_proc_dir;
 
 /* Functions to manage files in /proc/tile/hardwall. */
-static void hardwall_add_proc(struct hardwall_info *rect);
-static void hardwall_remove_proc(struct hardwall_info *rect);
-
-/*
- * Guard changes to the hardwall data structures.
- * This could be finer grained (e.g. one lock for the list of hardwall
- * rectangles, then separate embedded locks for each one's list of tasks),
- * but there are subtle correctness issues when trying to start with
- * a task's "hardwall" pointer and lock the correct rectangle's embedded
- * lock in the presence of a simultaneous deactivation, so it seems
- * easier to have a single lock, given that none of these data
- * structures are touched very frequently during normal operation.
- */
-static DEFINE_SPINLOCK(hardwall_lock);
+static void hardwall_add_proc(struct hardwall_info *);
+static void hardwall_remove_proc(struct hardwall_info *);
 
 /* Allow disabling UDN access. */
-static int udn_disabled;
 static int __init noudn(char *str)
 {
        pr_info("User-space UDN access is disabled\n");
-       udn_disabled = 1;
+       hardwall_types[HARDWALL_UDN].disabled = 1;
        return 0;
 }
 early_param("noudn", noudn);
 
+#ifndef __tilepro__
+/* Allow disabling IDN access. */
+static int __init noidn(char *str)
+{
+       pr_info("User-space IDN access is disabled\n");
+       hardwall_types[HARDWALL_IDN].disabled = 1;
+       return 0;
+}
+early_param("noidn", noidn);
+
+/* Allow disabling IPI access. */
+static int __init noipi(char *str)
+{
+       pr_info("User-space IPI access is disabled\n");
+       hardwall_types[HARDWALL_IPI].disabled = 1;
+       return 0;
+}
+early_param("noipi", noipi);
+#endif
+
 
 /*
- * Low-level primitives
+ * Low-level primitives for UDN/IDN
  */
 
+#ifdef __tilepro__
+#define mtspr_XDN(hwt, name, val) \
+       do { (void)(hwt); __insn_mtspr(SPR_UDN_##name, (val)); } while (0)
+#define mtspr_MPL_XDN(hwt, name, val) \
+       do { (void)(hwt); __insn_mtspr(SPR_MPL_UDN_##name, (val)); } while (0)
+#define mfspr_XDN(hwt, name) \
+       ((void)(hwt), __insn_mfspr(SPR_UDN_##name))
+#else
+#define mtspr_XDN(hwt, name, val)                                      \
+       do {                                                            \
+               if ((hwt)->is_idn)                                      \
+                       __insn_mtspr(SPR_IDN_##name, (val));            \
+               else                                                    \
+                       __insn_mtspr(SPR_UDN_##name, (val));            \
+       } while (0)
+#define mtspr_MPL_XDN(hwt, name, val)                                  \
+       do {                                                            \
+               if ((hwt)->is_idn)                                      \
+                       __insn_mtspr(SPR_MPL_IDN_##name, (val));        \
+               else                                                    \
+                       __insn_mtspr(SPR_MPL_UDN_##name, (val));        \
+       } while (0)
+#define mfspr_XDN(hwt, name) \
+  ((hwt)->is_idn ? __insn_mfspr(SPR_IDN_##name) : __insn_mfspr(SPR_UDN_##name))
+#endif
+
 /* Set a CPU bit if the CPU is online. */
 #define cpu_online_set(cpu, dst) do { \
        if (cpu_online(cpu))          \
@@ -101,7 +199,7 @@ static int contains(struct hardwall_info *r, int x, int y)
 }
 
 /* Compute the rectangle parameters and validate the cpumask. */
-static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask)
+static int check_rectangle(struct hardwall_info *r, struct cpumask *mask)
 {
        int x, y, cpu, ulhc, lrhc;
 
@@ -114,8 +212,6 @@ static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask)
        r->ulhc_y = cpu_y(ulhc);
        r->width = cpu_x(lrhc) - r->ulhc_x + 1;
        r->height = cpu_y(lrhc) - r->ulhc_y + 1;
-       cpumask_copy(&r->cpumask, mask);
-       r->id = ulhc;   /* The ulhc cpu id can be the hardwall id. */
 
        /* Width and height must be positive */
        if (r->width <= 0 || r->height <= 0)
@@ -128,7 +224,7 @@ static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask)
                                return -EINVAL;
 
        /*
-        * Note that offline cpus can't be drained when this UDN
+        * Note that offline cpus can't be drained when this user network
         * rectangle eventually closes.  We used to detect this
         * situation and print a warning, but it annoyed users and
         * they ignored it anyway, so now we just return without a
@@ -137,16 +233,6 @@ static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask)
        return 0;
 }
 
-/* Do the two given rectangles overlap on any cpu? */
-static int overlaps(struct hardwall_info *a, struct hardwall_info *b)
-{
-       return a->ulhc_x + a->width > b->ulhc_x &&    /* A not to the left */
-               b->ulhc_x + b->width > a->ulhc_x &&   /* B not to the left */
-               a->ulhc_y + a->height > b->ulhc_y &&  /* A not above */
-               b->ulhc_y + b->height > a->ulhc_y;    /* B not above */
-}
-
-
 /*
  * Hardware management of hardwall setup, teardown, trapping,
  * and enabling/disabling PL0 access to the networks.
@@ -157,23 +243,35 @@ enum direction_protect {
        N_PROTECT = (1 << 0),
        E_PROTECT = (1 << 1),
        S_PROTECT = (1 << 2),
-       W_PROTECT = (1 << 3)
+       W_PROTECT = (1 << 3),
+       C_PROTECT = (1 << 4),
 };
 
-static void enable_firewall_interrupts(void)
+static inline int xdn_which_interrupt(struct hardwall_type *hwt)
+{
+#ifndef __tilepro__
+       if (hwt->is_idn)
+               return INT_IDN_FIREWALL;
+#endif
+       return INT_UDN_FIREWALL;
+}
+
+static void enable_firewall_interrupts(struct hardwall_type *hwt)
 {
-       arch_local_irq_unmask_now(INT_UDN_FIREWALL);
+       arch_local_irq_unmask_now(xdn_which_interrupt(hwt));
 }
 
-static void disable_firewall_interrupts(void)
+static void disable_firewall_interrupts(struct hardwall_type *hwt)
 {
-       arch_local_irq_mask_now(INT_UDN_FIREWALL);
+       arch_local_irq_mask_now(xdn_which_interrupt(hwt));
 }
 
 /* Set up hardwall on this cpu based on the passed hardwall_info. */
-static void hardwall_setup_ipi_func(void *info)
+static void hardwall_setup_func(void *info)
 {
        struct hardwall_info *r = info;
+       struct hardwall_type *hwt = r->type;
+
        int cpu = smp_processor_id();
        int x = cpu % smp_width;
        int y = cpu / smp_width;
@@ -187,13 +285,12 @@ static void hardwall_setup_ipi_func(void *info)
        if (y == r->ulhc_y + r->height - 1)
                bits |= S_PROTECT;
        BUG_ON(bits == 0);
-       __insn_mtspr(SPR_UDN_DIRECTION_PROTECT, bits);
-       enable_firewall_interrupts();
-
+       mtspr_XDN(hwt, DIRECTION_PROTECT, bits);
+       enable_firewall_interrupts(hwt);
 }
 
 /* Set up all cpus on edge of rectangle to enable/disable hardwall SPRs. */
-static void hardwall_setup(struct hardwall_info *r)
+static void hardwall_protect_rectangle(struct hardwall_info *r)
 {
        int x, y, cpu, delta;
        struct cpumask rect_cpus;
@@ -217,37 +314,50 @@ static void hardwall_setup(struct hardwall_info *r)
        }
 
        /* Then tell all the cpus to set up their protection SPR */
-       on_each_cpu_mask(&rect_cpus, hardwall_setup_ipi_func, r, 1);
+       on_each_cpu_mask(&rect_cpus, hardwall_setup_func, r, 1);
 }
 
 void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
 {
        struct hardwall_info *rect;
+       struct hardwall_type *hwt;
        struct task_struct *p;
        struct siginfo info;
-       int x, y;
        int cpu = smp_processor_id();
        int found_processes;
        unsigned long flags;
-
        struct pt_regs *old_regs = set_irq_regs(regs);
+
        irq_enter();
 
+       /* Figure out which network trapped. */
+       switch (fault_num) {
+#ifndef __tilepro__
+       case INT_IDN_FIREWALL:
+               hwt = &hardwall_types[HARDWALL_IDN];
+               break;
+#endif
+       case INT_UDN_FIREWALL:
+               hwt = &hardwall_types[HARDWALL_UDN];
+               break;
+       default:
+               BUG();
+       }
+       BUG_ON(hwt->disabled);
+
        /* This tile trapped a network access; find the rectangle. */
-       x = cpu % smp_width;
-       y = cpu / smp_width;
-       spin_lock_irqsave(&hardwall_lock, flags);
-       list_for_each_entry(rect, &rectangles, list) {
-               if (contains(rect, x, y))
+       spin_lock_irqsave(&hwt->lock, flags);
+       list_for_each_entry(rect, &hwt->list, list) {
+               if (cpumask_test_cpu(cpu, &rect->cpumask))
                        break;
        }
 
        /*
         * It shouldn't be possible not to find this cpu on the
         * rectangle list, since only cpus in rectangles get hardwalled.
-        * The hardwall is only removed after the UDN is drained.
+        * The hardwall is only removed after the user network is drained.
         */
-       BUG_ON(&rect->list == &rectangles);
+       BUG_ON(&rect->list == &hwt->list);
 
        /*
         * If we already started teardown on this hardwall, don't worry;
@@ -255,30 +365,32 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
         * to quiesce.
         */
        if (rect->teardown_in_progress) {
-               pr_notice("cpu %d: detected hardwall violation %#lx"
+               pr_notice("cpu %d: detected %s hardwall violation %#lx"
                       " while teardown already in progress\n",
-                      cpu, (long) __insn_mfspr(SPR_UDN_DIRECTION_PROTECT));
+                         cpu, hwt->name,
+                         (long)mfspr_XDN(hwt, DIRECTION_PROTECT));
                goto done;
        }
 
        /*
         * Kill off any process that is activated in this rectangle.
         * We bypass security to deliver the signal, since it must be
-        * one of the activated processes that generated the UDN
+        * one of the activated processes that generated the user network
         * message that caused this trap, and all the activated
         * processes shared a single open file so are pretty tightly
         * bound together from a security point of view to begin with.
         */
        rect->teardown_in_progress = 1;
        wmb(); /* Ensure visibility of rectangle before notifying processes. */
-       pr_notice("cpu %d: detected hardwall violation %#lx...\n",
-              cpu, (long) __insn_mfspr(SPR_UDN_DIRECTION_PROTECT));
+       pr_notice("cpu %d: detected %s hardwall violation %#lx...\n",
+                 cpu, hwt->name, (long)mfspr_XDN(hwt, DIRECTION_PROTECT));
        info.si_signo = SIGILL;
        info.si_errno = 0;
        info.si_code = ILL_HARDWALL;
        found_processes = 0;
-       list_for_each_entry(p, &rect->task_head, thread.hardwall_list) {
-               BUG_ON(p->thread.hardwall != rect);
+       list_for_each_entry(p, &rect->task_head,
+                           thread.hardwall[hwt->index].list) {
+               BUG_ON(p->thread.hardwall[hwt->index].info != rect);
                if (!(p->flags & PF_EXITING)) {
                        found_processes = 1;
                        pr_notice("hardwall: killing %d\n", p->pid);
@@ -289,7 +401,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
                pr_notice("hardwall: no associated processes!\n");
 
  done:
-       spin_unlock_irqrestore(&hardwall_lock, flags);
+       spin_unlock_irqrestore(&hwt->lock, flags);
 
        /*
         * We have to disable firewall interrupts now, or else when we
@@ -298,48 +410,87 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
         * haven't yet drained the network, and that would allow packets
         * to cross out of the hardwall region.
         */
-       disable_firewall_interrupts();
+       disable_firewall_interrupts(hwt);
 
        irq_exit();
        set_irq_regs(old_regs);
 }
 
-/* Allow access from user space to the UDN. */
-void grant_network_mpls(void)
+/* Allow access from user space to the user network. */
+void grant_hardwall_mpls(struct hardwall_type *hwt)
 {
-       __insn_mtspr(SPR_MPL_UDN_ACCESS_SET_0, 1);
-       __insn_mtspr(SPR_MPL_UDN_AVAIL_SET_0, 1);
-       __insn_mtspr(SPR_MPL_UDN_COMPLETE_SET_0, 1);
-       __insn_mtspr(SPR_MPL_UDN_TIMER_SET_0, 1);
+#ifndef __tilepro__
+       if (!hwt->is_xdn) {
+               __insn_mtspr(SPR_MPL_IPI_0_SET_0, 1);
+               return;
+       }
+#endif
+       mtspr_MPL_XDN(hwt, ACCESS_SET_0, 1);
+       mtspr_MPL_XDN(hwt, AVAIL_SET_0, 1);
+       mtspr_MPL_XDN(hwt, COMPLETE_SET_0, 1);
+       mtspr_MPL_XDN(hwt, TIMER_SET_0, 1);
 #if !CHIP_HAS_REV1_XDN()
-       __insn_mtspr(SPR_MPL_UDN_REFILL_SET_0, 1);
-       __insn_mtspr(SPR_MPL_UDN_CA_SET_0, 1);
+       mtspr_MPL_XDN(hwt, REFILL_SET_0, 1);
+       mtspr_MPL_XDN(hwt, CA_SET_0, 1);
 #endif
 }
 
-/* Deny access from user space to the UDN. */
-void restrict_network_mpls(void)
+/* Deny access from user space to the user network. */
+void restrict_hardwall_mpls(struct hardwall_type *hwt)
 {
-       __insn_mtspr(SPR_MPL_UDN_ACCESS_SET_1, 1);
-       __insn_mtspr(SPR_MPL_UDN_AVAIL_SET_1, 1);
-       __insn_mtspr(SPR_MPL_UDN_COMPLETE_SET_1, 1);
-       __insn_mtspr(SPR_MPL_UDN_TIMER_SET_1, 1);
+#ifndef __tilepro__
+       if (!hwt->is_xdn) {
+               __insn_mtspr(SPR_MPL_IPI_0_SET_1, 1);
+               return;
+       }
+#endif
+       mtspr_MPL_XDN(hwt, ACCESS_SET_1, 1);
+       mtspr_MPL_XDN(hwt, AVAIL_SET_1, 1);
+       mtspr_MPL_XDN(hwt, COMPLETE_SET_1, 1);
+       mtspr_MPL_XDN(hwt, TIMER_SET_1, 1);
 #if !CHIP_HAS_REV1_XDN()
-       __insn_mtspr(SPR_MPL_UDN_REFILL_SET_1, 1);
-       __insn_mtspr(SPR_MPL_UDN_CA_SET_1, 1);
+       mtspr_MPL_XDN(hwt, REFILL_SET_1, 1);
+       mtspr_MPL_XDN(hwt, CA_SET_1, 1);
 #endif
 }
 
+/* Restrict or deny as necessary for the task we're switching to. */
+void hardwall_switch_tasks(struct task_struct *prev,
+                          struct task_struct *next)
+{
+       int i;
+       for (i = 0; i < HARDWALL_TYPES; ++i) {
+               if (prev->thread.hardwall[i].info != NULL) {
+                       if (next->thread.hardwall[i].info == NULL)
+                               restrict_hardwall_mpls(&hardwall_types[i]);
+               } else if (next->thread.hardwall[i].info != NULL) {
+                       grant_hardwall_mpls(&hardwall_types[i]);
+               }
+       }
+}
+
+/* Does this task have the right to IPI the given cpu? */
+int hardwall_ipi_valid(int cpu)
+{
+#ifdef __tilegx__
+       struct hardwall_info *info =
+               current->thread.hardwall[HARDWALL_IPI].info;
+       return info && cpumask_test_cpu(cpu, &info->cpumask);
+#else
+       return 0;
+#endif
+}
 
 /*
- * Code to create, activate, deactivate, and destroy hardwall rectangles.
+ * Code to create, activate, deactivate, and destroy hardwall resources.
  */
 
-/* Create a hardwall for the given rectangle */
-static struct hardwall_info *hardwall_create(
-       size_t size, const unsigned char __user *bits)
+/* Create a hardwall for the given resource */
+static struct hardwall_info *hardwall_create(struct hardwall_type *hwt,
+                                            size_t size,
+                                            const unsigned char __user *bits)
 {
-       struct hardwall_info *iter, *rect;
+       struct hardwall_info *iter, *info;
        struct cpumask mask;
        unsigned long flags;
        int rc;
@@ -370,55 +521,62 @@ static struct hardwall_info *hardwall_create(
                }
        }
 
-       /* Allocate a new rectangle optimistically. */
-       rect = kmalloc(sizeof(struct hardwall_info),
+       /* Allocate a new hardwall_info optimistically. */
+       info = kmalloc(sizeof(struct hardwall_info),
                        GFP_KERNEL | __GFP_ZERO);
-       if (rect == NULL)
+       if (info == NULL)
                return ERR_PTR(-ENOMEM);
-       INIT_LIST_HEAD(&rect->task_head);
+       INIT_LIST_HEAD(&info->task_head);
+       info->type = hwt;
 
        /* Compute the rectangle size and validate that it's plausible. */
-       rc = setup_rectangle(rect, &mask);
-       if (rc != 0) {
-               kfree(rect);
-               return ERR_PTR(rc);
+       cpumask_copy(&info->cpumask, &mask);
+       info->id = find_first_bit(cpumask_bits(&mask), nr_cpumask_bits);
+       if (hwt->is_xdn) {
+               rc = check_rectangle(info, &mask);
+               if (rc != 0) {
+                       kfree(info);
+                       return ERR_PTR(rc);
+               }
        }
 
        /* Confirm it doesn't overlap and add it to the list. */
-       spin_lock_irqsave(&hardwall_lock, flags);
-       list_for_each_entry(iter, &rectangles, list) {
-               if (overlaps(iter, rect)) {
-                       spin_unlock_irqrestore(&hardwall_lock, flags);
-                       kfree(rect);
+       spin_lock_irqsave(&hwt->lock, flags);
+       list_for_each_entry(iter, &hwt->list, list) {
+               if (cpumask_intersects(&iter->cpumask, &info->cpumask)) {
+                       spin_unlock_irqrestore(&hwt->lock, flags);
+                       kfree(info);
                        return ERR_PTR(-EBUSY);
                }
        }
-       list_add_tail(&rect->list, &rectangles);
-       spin_unlock_irqrestore(&hardwall_lock, flags);
+       list_add_tail(&info->list, &hwt->list);
+       spin_unlock_irqrestore(&hwt->lock, flags);
 
        /* Set up appropriate hardwalling on all affected cpus. */
-       hardwall_setup(rect);
+       if (hwt->is_xdn)
+               hardwall_protect_rectangle(info);
 
        /* Create a /proc/tile/hardwall entry. */
-       hardwall_add_proc(rect);
+       hardwall_add_proc(info);
 
-       return rect;
+       return info;
 }
 
 /* Activate a given hardwall on this cpu for this process. */
-static int hardwall_activate(struct hardwall_info *rect)
+static int hardwall_activate(struct hardwall_info *info)
 {
-       int cpu, x, y;
+       int cpu;
        unsigned long flags;
        struct task_struct *p = current;
        struct thread_struct *ts = &p->thread;
+       struct hardwall_type *hwt;
 
-       /* Require a rectangle. */
-       if (rect == NULL)
+       /* Require a hardwall. */
+       if (info == NULL)
                return -ENODATA;
 
-       /* Not allowed to activate a rectangle that is being torn down. */
-       if (rect->teardown_in_progress)
+       /* Not allowed to activate a hardwall that is being torn down. */
+       if (info->teardown_in_progress)
                return -EINVAL;
 
        /*
@@ -428,78 +586,87 @@ static int hardwall_activate(struct hardwall_info *rect)
        if (cpumask_weight(&p->cpus_allowed) != 1)
                return -EPERM;
 
-       /* Make sure we are bound to a cpu in this rectangle. */
+       /* Make sure we are bound to a cpu assigned to this resource. */
        cpu = smp_processor_id();
        BUG_ON(cpumask_first(&p->cpus_allowed) != cpu);
-       x = cpu_x(cpu);
-       y = cpu_y(cpu);
-       if (!contains(rect, x, y))
+       if (!cpumask_test_cpu(cpu, &info->cpumask))
                return -EINVAL;
 
        /* If we are already bound to this hardwall, it's a no-op. */
-       if (ts->hardwall) {
-               BUG_ON(ts->hardwall != rect);
+       hwt = info->type;
+       if (ts->hardwall[hwt->index].info) {
+               BUG_ON(ts->hardwall[hwt->index].info != info);
                return 0;
        }
 
-       /* Success!  This process gets to use the user networks on this cpu. */
-       ts->hardwall = rect;
-       spin_lock_irqsave(&hardwall_lock, flags);
-       list_add(&ts->hardwall_list, &rect->task_head);
-       spin_unlock_irqrestore(&hardwall_lock, flags);
-       grant_network_mpls();
-       printk(KERN_DEBUG "Pid %d (%s) activated for hardwall: cpu %d\n",
-              p->pid, p->comm, cpu);
+       /* Success!  This process gets to use the resource on this cpu. */
+       ts->hardwall[hwt->index].info = info;
+       spin_lock_irqsave(&hwt->lock, flags);
+       list_add(&ts->hardwall[hwt->index].list, &info->task_head);
+       spin_unlock_irqrestore(&hwt->lock, flags);
+       grant_hardwall_mpls(hwt);
+       printk(KERN_DEBUG "Pid %d (%s) activated for %s hardwall: cpu %d\n",
+              p->pid, p->comm, hwt->name, cpu);
        return 0;
 }
 
 /*
- * Deactivate a task's hardwall.  Must hold hardwall_lock.
+ * Deactivate a task's hardwall.  Must hold lock for hardwall_type.
  * This method may be called from free_task(), so we don't want to
  * rely on too many fields of struct task_struct still being valid.
  * We assume the cpus_allowed, pid, and comm fields are still valid.
  */
-static void _hardwall_deactivate(struct task_struct *task)
+static void _hardwall_deactivate(struct hardwall_type *hwt,
+                                struct task_struct *task)
 {
        struct thread_struct *ts = &task->thread;
 
        if (cpumask_weight(&task->cpus_allowed) != 1) {
-               pr_err("pid %d (%s) releasing networks with"
+               pr_err("pid %d (%s) releasing %s hardwall with"
                       " an affinity mask containing %d cpus!\n",
-                      task->pid, task->comm,
+                      task->pid, task->comm, hwt->name,
                       cpumask_weight(&task->cpus_allowed));
                BUG();
        }
 
-       BUG_ON(ts->hardwall == NULL);
-       ts->hardwall = NULL;
-       list_del(&ts->hardwall_list);
+       BUG_ON(ts->hardwall[hwt->index].info == NULL);
+       ts->hardwall[hwt->index].info = NULL;
+       list_del(&ts->hardwall[hwt->index].list);
        if (task == current)
-               restrict_network_mpls();
+               restrict_hardwall_mpls(hwt);
 }
 
 /* Deactivate a task's hardwall. */
-int hardwall_deactivate(struct task_struct *task)
+static int hardwall_deactivate(struct hardwall_type *hwt,
+                              struct task_struct *task)
 {
        unsigned long flags;
        int activated;
 
-       spin_lock_irqsave(&hardwall_lock, flags);
-       activated = (task->thread.hardwall != NULL);
+       spin_lock_irqsave(&hwt->lock, flags);
+       activated = (task->thread.hardwall[hwt->index].info != NULL);
        if (activated)
-               _hardwall_deactivate(task);
-       spin_unlock_irqrestore(&hardwall_lock, flags);
+               _hardwall_deactivate(hwt, task);
+       spin_unlock_irqrestore(&hwt->lock, flags);
 
        if (!activated)
                return -EINVAL;
 
-       printk(KERN_DEBUG "Pid %d (%s) deactivated for hardwall: cpu %d\n",
-              task->pid, task->comm, smp_processor_id());
+       printk(KERN_DEBUG "Pid %d (%s) deactivated for %s hardwall: cpu %d\n",
+              task->pid, task->comm, hwt->name, smp_processor_id());
        return 0;
 }
 
-/* Stop a UDN switch before draining the network. */
-static void stop_udn_switch(void *ignored)
+void hardwall_deactivate_all(struct task_struct *task)
+{
+       int i;
+       for (i = 0; i < HARDWALL_TYPES; ++i)
+               if (task->thread.hardwall[i].info)
+                       hardwall_deactivate(&hardwall_types[i], task);
+}
+
+/* Stop the switch before draining the network. */
+static void stop_xdn_switch(void *arg)
 {
 #if !CHIP_HAS_REV1_XDN()
        /* Freeze the switch and the demux. */
@@ -507,13 +674,71 @@ static void stop_udn_switch(void *ignored)
                     SPR_UDN_SP_FREEZE__SP_FRZ_MASK |
                     SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK |
                     SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK);
+#else
+       /*
+        * Drop all packets bound for the core or off the edge.
+        * We rely on the normal hardwall protection setup code
+        * to have set the low four bits to trigger firewall interrupts,
+        * and shift those bits up to trigger "drop on send" semantics,
+        * plus adding "drop on send to core" for all switches.
+        * In practice it seems the switches latch the DIRECTION_PROTECT
+        * SPR so they won't start dropping if they're already
+        * delivering the last message to the core, but it doesn't
+        * hurt to enable it here.
+        */
+       struct hardwall_type *hwt = arg;
+       unsigned long protect = mfspr_XDN(hwt, DIRECTION_PROTECT);
+       mtspr_XDN(hwt, DIRECTION_PROTECT, (protect | C_PROTECT) << 5);
 #endif
 }
 
+static void empty_xdn_demuxes(struct hardwall_type *hwt)
+{
+#ifndef __tilepro__
+       if (hwt->is_idn) {
+               while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 0))
+                       (void) __tile_idn0_receive();
+               while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 1))
+                       (void) __tile_idn1_receive();
+               return;
+       }
+#endif
+       while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0))
+               (void) __tile_udn0_receive();
+       while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1))
+               (void) __tile_udn1_receive();
+       while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2))
+               (void) __tile_udn2_receive();
+       while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3))
+               (void) __tile_udn3_receive();
+}
+
 /* Drain all the state from a stopped switch. */
-static void drain_udn_switch(void *ignored)
+static void drain_xdn_switch(void *arg)
 {
-#if !CHIP_HAS_REV1_XDN()
+       struct hardwall_info *info = arg;
+       struct hardwall_type *hwt = info->type;
+
+#if CHIP_HAS_REV1_XDN()
+       /*
+        * The switches have been configured to drop any messages
+        * destined for cores (or off the edge of the rectangle).
+        * But the current message may continue to be delivered,
+        * so we wait until all the cores have finished any pending
+        * messages before we stop draining.
+        */
+       int pending = mfspr_XDN(hwt, PENDING);
+       while (pending--) {
+               empty_xdn_demuxes(hwt);
+               if (hwt->is_idn)
+                       __tile_idn_send(0);
+               else
+                       __tile_udn_send(0);
+       }
+       atomic_dec(&info->xdn_pending_count);
+       while (atomic_read(&info->xdn_pending_count))
+               empty_xdn_demuxes(hwt);
+#else
        int i;
        int from_tile_words, ca_count;
 
@@ -533,15 +758,7 @@ static void drain_udn_switch(void *ignored)
                (void) __insn_mfspr(SPR_UDN_DEMUX_WRITE_FIFO);
 
        /* Empty out demuxes. */
-       while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0))
-               (void) __tile_udn0_receive();
-       while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1))
-               (void) __tile_udn1_receive();
-       while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2))
-               (void) __tile_udn2_receive();
-       while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3))
-               (void) __tile_udn3_receive();
-       BUG_ON((__insn_mfspr(SPR_UDN_DATA_AVAIL) & 0xF) != 0);
+       empty_xdn_demuxes(hwt);
 
        /* Empty out catch all. */
        ca_count = __insn_mfspr(SPR_UDN_DEMUX_CA_COUNT);
@@ -563,21 +780,25 @@ static void drain_udn_switch(void *ignored)
 #endif
 }
 
-/* Reset random UDN state registers at boot up and during hardwall teardown. */
-void reset_network_state(void)
+/* Reset random XDN state registers at boot up and during hardwall teardown. */
+static void reset_xdn_network_state(struct hardwall_type *hwt)
 {
-#if !CHIP_HAS_REV1_XDN()
-       /* Reset UDN coordinates to their standard value */
-       unsigned int cpu = smp_processor_id();
-       unsigned int x = cpu % smp_width;
-       unsigned int y = cpu / smp_width;
-#endif
-
-       if (udn_disabled)
+       if (hwt->disabled)
                return;
 
+       /* Clear out other random registers so we have a clean slate. */
+       mtspr_XDN(hwt, DIRECTION_PROTECT, 0);
+       mtspr_XDN(hwt, AVAIL_EN, 0);
+       mtspr_XDN(hwt, DEADLOCK_TIMEOUT, 0);
+
 #if !CHIP_HAS_REV1_XDN()
-       __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7));
+       /* Reset UDN coordinates to their standard value */
+       {
+               unsigned int cpu = smp_processor_id();
+               unsigned int x = cpu % smp_width;
+               unsigned int y = cpu / smp_width;
+               __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7));
+       }
 
        /* Set demux tags to predefined values and enable them. */
        __insn_mtspr(SPR_UDN_TAG_VALID, 0xf);
@@ -585,56 +806,50 @@ void reset_network_state(void)
        __insn_mtspr(SPR_UDN_TAG_1, (1 << 1));
        __insn_mtspr(SPR_UDN_TAG_2, (1 << 2));
        __insn_mtspr(SPR_UDN_TAG_3, (1 << 3));
-#endif
 
-       /* Clear out other random registers so we have a clean slate. */
-       __insn_mtspr(SPR_UDN_AVAIL_EN, 0);
-       __insn_mtspr(SPR_UDN_DEADLOCK_TIMEOUT, 0);
-#if !CHIP_HAS_REV1_XDN()
+       /* Set other rev0 random registers to a clean state. */
        __insn_mtspr(SPR_UDN_REFILL_EN, 0);
        __insn_mtspr(SPR_UDN_DEMUX_QUEUE_SEL, 0);
        __insn_mtspr(SPR_UDN_SP_FIFO_SEL, 0);
-#endif
 
        /* Start the switch and demux. */
-#if !CHIP_HAS_REV1_XDN()
        __insn_mtspr(SPR_UDN_SP_FREEZE, 0);
 #endif
 }
 
-/* Restart a UDN switch after draining. */
-static void restart_udn_switch(void *ignored)
+void reset_network_state(void)
 {
-       reset_network_state();
-
-       /* Disable firewall interrupts. */
-       __insn_mtspr(SPR_UDN_DIRECTION_PROTECT, 0);
-       disable_firewall_interrupts();
+       reset_xdn_network_state(&hardwall_types[HARDWALL_UDN]);
+#ifndef __tilepro__
+       reset_xdn_network_state(&hardwall_types[HARDWALL_IDN]);
+#endif
 }
 
-/* Build a struct cpumask containing all valid tiles in bounding rectangle. */
-static void fill_mask(struct hardwall_info *r, struct cpumask *result)
+/* Restart an XDN switch after draining. */
+static void restart_xdn_switch(void *arg)
 {
-       int x, y, cpu;
+       struct hardwall_type *hwt = arg;
 
-       cpumask_clear(result);
+#if CHIP_HAS_REV1_XDN()
+       /* One last drain step to avoid races with injection and draining. */
+       empty_xdn_demuxes(hwt);
+#endif
 
-       cpu = r->ulhc_y * smp_width + r->ulhc_x;
-       for (y = 0; y < r->height; ++y, cpu += smp_width - r->width) {
-               for (x = 0; x < r->width; ++x, ++cpu)
-                       cpu_online_set(cpu, result);
-       }
+       reset_xdn_network_state(hwt);
+
+       /* Disable firewall interrupts. */
+       disable_firewall_interrupts(hwt);
 }
 
 /* Last reference to a hardwall is gone, so clear the network. */
-static void hardwall_destroy(struct hardwall_info *rect)
+static void hardwall_destroy(struct hardwall_info *info)
 {
        struct task_struct *task;
+       struct hardwall_type *hwt;
        unsigned long flags;
-       struct cpumask mask;
 
-       /* Make sure this file actually represents a rectangle. */
-       if (rect == NULL)
+       /* Make sure this file actually represents a hardwall. */
+       if (info == NULL)
                return;
 
        /*
@@ -644,39 +859,53 @@ static void hardwall_destroy(struct hardwall_info *rect)
         * deactivate any remaining tasks before freeing the
         * hardwall_info object itself.
         */
-       spin_lock_irqsave(&hardwall_lock, flags);
-       list_for_each_entry(task, &rect->task_head, thread.hardwall_list)
-               _hardwall_deactivate(task);
-       spin_unlock_irqrestore(&hardwall_lock, flags);
-
-       /* Drain the UDN. */
-       printk(KERN_DEBUG "Clearing hardwall rectangle %dx%d %d,%d\n",
-              rect->width, rect->height, rect->ulhc_x, rect->ulhc_y);
-       fill_mask(rect, &mask);
-       on_each_cpu_mask(&mask, stop_udn_switch, NULL, 1);
-       on_each_cpu_mask(&mask, drain_udn_switch, NULL, 1);
+       hwt = info->type;
+       info->teardown_in_progress = 1;
+       spin_lock_irqsave(&hwt->lock, flags);
+       list_for_each_entry(task, &info->task_head,
+                           thread.hardwall[hwt->index].list)
+               _hardwall_deactivate(hwt, task);
+       spin_unlock_irqrestore(&hwt->lock, flags);
+
+       if (hwt->is_xdn) {
+               /* Configure the switches for draining the user network. */
+               printk(KERN_DEBUG
+                      "Clearing %s hardwall rectangle %dx%d %d,%d\n",
+                      hwt->name, info->width, info->height,
+                      info->ulhc_x, info->ulhc_y);
+               on_each_cpu_mask(&info->cpumask, stop_xdn_switch, hwt, 1);
+
+               /* Drain the network. */
+#if CHIP_HAS_REV1_XDN()
+               atomic_set(&info->xdn_pending_count,
+                          cpumask_weight(&info->cpumask));
+               on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 0);
+#else
+               on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 1);
+#endif
 
-       /* Restart switch and disable firewall. */
-       on_each_cpu_mask(&mask, restart_udn_switch, NULL, 1);
+               /* Restart switch and disable firewall. */
+               on_each_cpu_mask(&info->cpumask, restart_xdn_switch, hwt, 1);
+       }
 
        /* Remove the /proc/tile/hardwall entry. */
-       hardwall_remove_proc(rect);
-
-       /* Now free the rectangle from the list. */
-       spin_lock_irqsave(&hardwall_lock, flags);
-       BUG_ON(!list_empty(&rect->task_head));
-       list_del(&rect->list);
-       spin_unlock_irqrestore(&hardwall_lock, flags);
-       kfree(rect);
+       hardwall_remove_proc(info);
+
+       /* Now free the hardwall from the list. */
+       spin_lock_irqsave(&hwt->lock, flags);
+       BUG_ON(!list_empty(&info->task_head));
+       list_del(&info->list);
+       spin_unlock_irqrestore(&hwt->lock, flags);
+       kfree(info);
 }
 
 
 static int hardwall_proc_show(struct seq_file *sf, void *v)
 {
-       struct hardwall_info *rect = sf->private;
+       struct hardwall_info *info = sf->private;
        char buf[256];
 
-       int rc = cpulist_scnprintf(buf, sizeof(buf), &rect->cpumask);
+       int rc = cpulist_scnprintf(buf, sizeof(buf), &info->cpumask);
        buf[rc++] = '\n';
        seq_write(sf, buf, rc);
        return 0;
@@ -695,31 +924,45 @@ static const struct file_operations hardwall_proc_fops = {
        .release        = single_release,
 };
 
-static void hardwall_add_proc(struct hardwall_info *rect)
+static void hardwall_add_proc(struct hardwall_info *info)
 {
        char buf[64];
-       snprintf(buf, sizeof(buf), "%d", rect->id);
-       proc_create_data(buf, 0444, hardwall_proc_dir,
-                        &hardwall_proc_fops, rect);
+       snprintf(buf, sizeof(buf), "%d", info->id);
+       proc_create_data(buf, 0444, info->type->proc_dir,
+                        &hardwall_proc_fops, info);
 }
 
-static void hardwall_remove_proc(struct hardwall_info *rect)
+static void hardwall_remove_proc(struct hardwall_info *info)
 {
        char buf[64];
-       snprintf(buf, sizeof(buf), "%d", rect->id);
-       remove_proc_entry(buf, hardwall_proc_dir);
+       snprintf(buf, sizeof(buf), "%d", info->id);
+       remove_proc_entry(buf, info->type->proc_dir);
 }
 
 int proc_pid_hardwall(struct task_struct *task, char *buffer)
 {
-       struct hardwall_info *rect = task->thread.hardwall;
-       return rect ? sprintf(buffer, "%d\n", rect->id) : 0;
+       int i;
+       int n = 0;
+       for (i = 0; i < HARDWALL_TYPES; ++i) {
+               struct hardwall_info *info = task->thread.hardwall[i].info;
+               if (info)
+                       n += sprintf(&buffer[n], "%s: %d\n",
+                                    info->type->name, info->id);
+       }
+       return n;
 }
 
 void proc_tile_hardwall_init(struct proc_dir_entry *root)
 {
-       if (!udn_disabled)
-               hardwall_proc_dir = proc_mkdir("hardwall", root);
+       int i;
+       for (i = 0; i < HARDWALL_TYPES; ++i) {
+               struct hardwall_type *hwt = &hardwall_types[i];
+               if (hwt->disabled)
+                       continue;
+               if (hardwall_proc_dir == NULL)
+                       hardwall_proc_dir = proc_mkdir("hardwall", root);
+               hwt->proc_dir = proc_mkdir(hwt->name, hardwall_proc_dir);
+       }
 }
 
 
@@ -729,34 +972,45 @@ void proc_tile_hardwall_init(struct proc_dir_entry *root)
 
 static long hardwall_ioctl(struct file *file, unsigned int a, unsigned long b)
 {
-       struct hardwall_info *rect = file->private_data;
+       struct hardwall_info *info = file->private_data;
+       int minor = iminor(file->f_mapping->host);
+       struct hardwall_type* hwt;
 
        if (_IOC_TYPE(a) != HARDWALL_IOCTL_BASE)
                return -EINVAL;
 
+       BUILD_BUG_ON(HARDWALL_TYPES != _HARDWALL_TYPES);
+       BUILD_BUG_ON(HARDWALL_TYPES !=
+                    sizeof(hardwall_types)/sizeof(hardwall_types[0]));
+
+       if (minor < 0 || minor >= HARDWALL_TYPES)
+               return -EINVAL;
+       hwt = &hardwall_types[minor];
+       WARN_ON(info && hwt != info->type);
+
        switch (_IOC_NR(a)) {
        case _HARDWALL_CREATE:
-               if (udn_disabled)
+               if (hwt->disabled)
                        return -ENOSYS;
-               if (rect != NULL)
+               if (info != NULL)
                        return -EALREADY;
-               rect = hardwall_create(_IOC_SIZE(a),
-                                       (const unsigned char __user *)b);
-               if (IS_ERR(rect))
-                       return PTR_ERR(rect);
-               file->private_data = rect;
+               info = hardwall_create(hwt, _IOC_SIZE(a),
+                                      (const unsigned char __user *)b);
+               if (IS_ERR(info))
+                       return PTR_ERR(info);
+               file->private_data = info;
                return 0;
 
        case _HARDWALL_ACTIVATE:
-               return hardwall_activate(rect);
+               return hardwall_activate(info);
 
        case _HARDWALL_DEACTIVATE:
-               if (current->thread.hardwall != rect)
+               if (current->thread.hardwall[hwt->index].info != info)
                        return -EINVAL;
-               return hardwall_deactivate(current);
+               return hardwall_deactivate(hwt, current);
 
        case _HARDWALL_GET_ID:
-               return rect ? rect->id : -EINVAL;
+               return info ? info->id : -EINVAL;
 
        default:
                return -EINVAL;
@@ -775,26 +1029,28 @@ static long hardwall_compat_ioctl(struct file *file,
 /* The user process closed the file; revoke access to user networks. */
 static int hardwall_flush(struct file *file, fl_owner_t owner)
 {
-       struct hardwall_info *rect = file->private_data;
+       struct hardwall_info *info = file->private_data;
        struct task_struct *task, *tmp;
        unsigned long flags;
 
-       if (rect) {
+       if (info) {
                /*
                 * NOTE: if multiple threads are activated on this hardwall
                 * file, the other threads will continue having access to the
-                * UDN until they are context-switched out and back in again.
+                * user network until they are context-switched out and back
+                * in again.
                 *
                 * NOTE: A NULL files pointer means the task is being torn
                 * down, so in that case we also deactivate it.
                 */
-               spin_lock_irqsave(&hardwall_lock, flags);
-               list_for_each_entry_safe(task, tmp, &rect->task_head,
-                                        thread.hardwall_list) {
+               struct hardwall_type *hwt = info->type;
+               spin_lock_irqsave(&hwt->lock, flags);
+               list_for_each_entry_safe(task, tmp, &info->task_head,
+                                        thread.hardwall[hwt->index].list) {
                        if (task->files == owner || task->files == NULL)
-                               _hardwall_deactivate(task);
+                               _hardwall_deactivate(hwt, task);
                }
-               spin_unlock_irqrestore(&hardwall_lock, flags);
+               spin_unlock_irqrestore(&hwt->lock, flags);
        }
 
        return 0;
@@ -824,11 +1080,11 @@ static int __init dev_hardwall_init(void)
        int rc;
        dev_t dev;
 
-       rc = alloc_chrdev_region(&dev, 0, 1, "hardwall");
+       rc = alloc_chrdev_region(&dev, 0, HARDWALL_TYPES, "hardwall");
        if (rc < 0)
                return rc;
        cdev_init(&hardwall_dev, &dev_hardwall_fops);
-       rc = cdev_add(&hardwall_dev, dev, 1);
+       rc = cdev_add(&hardwall_dev, dev, HARDWALL_TYPES);
        if (rc < 0)
                return rc;
 
index 1a39b7c1c87eed6db622f91a9d929168de4a868e..f71bfeeaf1a9ce6019781ded8921aa9b02fa31cc 100644 (file)
@@ -69,7 +69,7 @@ ENTRY(_start)
        }
        {
          moveli lr, lo16(1f)
-         move r5, zero
+         moveli r5, CTX_PAGE_FLAG
        }
        {
          auli lr, lr, ha16(1f)
@@ -141,11 +141,11 @@ ENTRY(empty_zero_page)
 
        .macro PTE va, cpa, bits1, no_org=0
        .ifeq \no_org
-       .org swapper_pg_dir + HV_L1_INDEX(\va) * HV_PTE_SIZE
+       .org swapper_pg_dir + PGD_INDEX(\va) * HV_PTE_SIZE
        .endif
        .word HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED | \
              (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE)
-       .word (\bits1) | (HV_CPA_TO_PFN(\cpa) << (HV_PTE_INDEX_PFN - 32))
+       .word (\bits1) | (HV_CPA_TO_PTFN(\cpa) << (HV_PTE_INDEX_PTFN - 32))
        .endm
 
 __PAGE_ALIGNED_DATA
@@ -166,7 +166,7 @@ ENTRY(swapper_pg_dir)
        /* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */
        PTE MEM_SV_INTRPT, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
                              (1 << (HV_PTE_INDEX_EXECUTABLE - 32))
-       .org swapper_pg_dir + HV_L1_SIZE
+       .org swapper_pg_dir + PGDIR_SIZE
        END(swapper_pg_dir)
 
        /*
index 6bc3a932fe457988214d665c677e5b960c4db134..f9a2734f7b82c955fdb5b04bb93f724e9c7da95e 100644 (file)
@@ -114,7 +114,7 @@ ENTRY(_start)
          shl16insli r0, r0, hw0(swapper_pg_dir - PAGE_OFFSET)
        }
        {
-         move r3, zero
+         moveli r3, CTX_PAGE_FLAG
          j hv_install_context
        }
 1:
@@ -210,19 +210,19 @@ ENTRY(empty_zero_page)
        .macro PTE cpa, bits1
        .quad HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED |\
              HV_PTE_GLOBAL | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) |\
-             (\bits1) | (HV_CPA_TO_PFN(\cpa) << HV_PTE_INDEX_PFN)
+             (\bits1) | (HV_CPA_TO_PTFN(\cpa) << HV_PTE_INDEX_PTFN)
        .endm
 
 __PAGE_ALIGNED_DATA
        .align PAGE_SIZE
 ENTRY(swapper_pg_dir)
-       .org swapper_pg_dir + HV_L0_INDEX(PAGE_OFFSET) * HV_PTE_SIZE
+       .org swapper_pg_dir + PGD_INDEX(PAGE_OFFSET) * HV_PTE_SIZE
 .Lsv_data_pmd:
        .quad 0  /* PTE temp_data_pmd - PAGE_OFFSET, 0 */
-       .org swapper_pg_dir + HV_L0_INDEX(MEM_SV_START) * HV_PTE_SIZE
+       .org swapper_pg_dir + PGD_INDEX(MEM_SV_START) * HV_PTE_SIZE
 .Lsv_code_pmd:
        .quad 0  /* PTE temp_code_pmd - PAGE_OFFSET, 0 */
-       .org swapper_pg_dir + HV_L0_SIZE
+       .org swapper_pg_dir + SIZEOF_PGD
        END(swapper_pg_dir)
 
        .align HV_PAGE_TABLE_ALIGN
@@ -233,11 +233,11 @@ ENTRY(temp_data_pmd)
         * permissions later.
         */
        .set addr, 0
-       .rept HV_L1_ENTRIES
+       .rept PTRS_PER_PMD
        PTE addr, HV_PTE_READABLE | HV_PTE_WRITABLE
-       .set addr, addr + HV_PAGE_SIZE_LARGE
+       .set addr, addr + HPAGE_SIZE
        .endr
-       .org temp_data_pmd + HV_L1_SIZE
+       .org temp_data_pmd + SIZEOF_PMD
        END(temp_data_pmd)
 
        .align HV_PAGE_TABLE_ALIGN
@@ -248,11 +248,11 @@ ENTRY(temp_code_pmd)
         * permissions later.
         */
        .set addr, 0
-       .rept HV_L1_ENTRIES
+       .rept PTRS_PER_PMD
        PTE addr, HV_PTE_READABLE | HV_PTE_EXECUTABLE
-       .set addr, addr + HV_PAGE_SIZE_LARGE
+       .set addr, addr + HPAGE_SIZE
        .endr
-       .org temp_code_pmd + HV_L1_SIZE
+       .org temp_code_pmd + SIZEOF_PMD
        END(temp_code_pmd)
 
        /*
index 2b7cd0a659a9d7c312c8e0a668c8b18e0d0de42c..d44c5a67a1ed37b2e74d8d767e45ab2bedc31044 100644 (file)
@@ -55,4 +55,5 @@ hv_store_mapping = TEXT_OFFSET + 0x106a0;
 hv_inquire_realpa = TEXT_OFFSET + 0x106c0;
 hv_flush_all = TEXT_OFFSET + 0x106e0;
 hv_get_ipi_pte = TEXT_OFFSET + 0x10700;
-hv_glue_internals = TEXT_OFFSET + 0x10720;
+hv_set_pte_super_shift = TEXT_OFFSET + 0x10720;
+hv_glue_internals = TEXT_OFFSET + 0x10740;
index 30ae76e50c44e458fc5b87e2d8712de3219383a5..7c06d597ffd0263875d0d67565f487df608575de 100644 (file)
@@ -220,7 +220,9 @@ intvec_\vecname:
         * This routine saves just the first four registers, plus the
         * stack context so we can do proper backtracing right away,
         * and defers to handle_interrupt to save the rest.
-        * The backtracer needs pc, ex1, lr, sp, r52, and faultnum.
+        * The backtracer needs pc, ex1, lr, sp, r52, and faultnum,
+        * and needs sp set to its final location at the bottom of
+        * the stack frame.
         */
        addli   r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP)
        wh64    r0   /* cache line 7 */
@@ -450,23 +452,6 @@ intvec_\vecname:
        push_reg r5, r52
        st      r52, r4
 
-       /* Load tp with our per-cpu offset. */
-#ifdef CONFIG_SMP
-       {
-        mfspr  r20, SPR_SYSTEM_SAVE_K_0
-        moveli r21, hw2_last(__per_cpu_offset)
-       }
-       {
-        shl16insli r21, r21, hw1(__per_cpu_offset)
-        bfextu r20, r20, 0, LOG2_THREAD_SIZE-1
-       }
-       shl16insli r21, r21, hw0(__per_cpu_offset)
-       shl3add r20, r20, r21
-       ld      tp, r20
-#else
-       move    tp, zero
-#endif
-
        /*
         * If we will be returning to the kernel, we will need to
         * reset the interrupt masks to the state they had before.
@@ -489,6 +474,44 @@ intvec_\vecname:
        .endif
        st      r21, r32
 
+       /*
+        * we've captured enough state to the stack (including in
+        * particular our EX_CONTEXT state) that we can now release
+        * the interrupt critical section and replace it with our
+        * standard "interrupts disabled" mask value.  This allows
+        * synchronous interrupts (and profile interrupts) to punch
+        * through from this point onwards.
+        *
+        * It's important that no code before this point touch memory
+        * other than our own stack (to keep the invariant that this
+        * is all that gets touched under ICS), and that no code after
+        * this point reference any interrupt-specific SPR, in particular
+        * the EX_CONTEXT_K_ values.
+        */
+       .ifc \function,handle_nmi
+       IRQ_DISABLE_ALL(r20)
+       .else
+       IRQ_DISABLE(r20, r21)
+       .endif
+       mtspr   INTERRUPT_CRITICAL_SECTION, zero
+
+       /* Load tp with our per-cpu offset. */
+#ifdef CONFIG_SMP
+       {
+        mfspr  r20, SPR_SYSTEM_SAVE_K_0
+        moveli r21, hw2_last(__per_cpu_offset)
+       }
+       {
+        shl16insli r21, r21, hw1(__per_cpu_offset)
+        bfextu r20, r20, 0, LOG2_THREAD_SIZE-1
+       }
+       shl16insli r21, r21, hw0(__per_cpu_offset)
+       shl3add r20, r20, r21
+       ld      tp, r20
+#else
+       move    tp, zero
+#endif
+
 #ifdef __COLLECT_LINKER_FEEDBACK__
        /*
         * Notify the feedback routines that we were in the
@@ -512,21 +535,6 @@ intvec_\vecname:
        FEEDBACK_ENTER(\function)
 #endif
 
-       /*
-        * we've captured enough state to the stack (including in
-        * particular our EX_CONTEXT state) that we can now release
-        * the interrupt critical section and replace it with our
-        * standard "interrupts disabled" mask value.  This allows
-        * synchronous interrupts (and profile interrupts) to punch
-        * through from this point onwards.
-        */
-       .ifc \function,handle_nmi
-       IRQ_DISABLE_ALL(r20)
-       .else
-       IRQ_DISABLE(r20, r21)
-       .endif
-       mtspr   INTERRUPT_CRITICAL_SECTION, zero
-
        /*
         * Prepare the first 256 stack bytes to be rapidly accessible
         * without having to fetch the background data.
@@ -736,9 +744,10 @@ STD_ENTRY(interrupt_return)
        beqzt   r30, .Lrestore_regs
        j       3f
 2:     TRACE_IRQS_ON
+       IRQ_ENABLE_LOAD(r20, r21)
        movei   r0, 1
        mtspr   INTERRUPT_CRITICAL_SECTION, r0
-       IRQ_ENABLE(r20, r21)
+       IRQ_ENABLE_APPLY(r20, r21)
        beqzt   r30, .Lrestore_regs
 3:
 
@@ -755,7 +764,6 @@ STD_ENTRY(interrupt_return)
         * that will save some cycles if this turns out to be a syscall.
         */
 .Lrestore_regs:
-       FEEDBACK_REENTER(interrupt_return)   /* called from elsewhere */
 
        /*
         * Rotate so we have one high bit and one low bit to test.
@@ -1249,7 +1257,7 @@ STD_ENTRY(fill_ra_stack)
        int_hand     INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign
        int_hand     INT_DTLB_MISS, DTLB_MISS, do_page_fault
        int_hand     INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault
-       int_hand     INT_IDN_FIREWALL, IDN_FIREWALL, bad_intr
+       int_hand     INT_IDN_FIREWALL, IDN_FIREWALL, do_hardwall_trap
        int_hand     INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap
        int_hand     INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt
        int_hand     INT_IDN_TIMER, IDN_TIMER, bad_intr
index 6255f2eab112c9fcae9f5f4cab1d9462a9e12477..f0b54a934712920cc4ff7248b458446149585d1a 100644 (file)
@@ -31,6 +31,8 @@
 #include <asm/pgalloc.h>
 #include <asm/cacheflush.h>
 #include <asm/checksum.h>
+#include <asm/tlbflush.h>
+#include <asm/homecache.h>
 #include <hv/hypervisor.h>
 
 
@@ -222,11 +224,22 @@ struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order)
        return alloc_pages_node(0, gfp_mask, order);
 }
 
+/*
+ * Address range in which pa=va mapping is set in setup_quasi_va_is_pa().
+ * For tilepro, PAGE_OFFSET is used since this is the largest possbile value
+ * for tilepro, while for tilegx, we limit it to entire middle level page
+ * table which we assume has been allocated and is undoubtedly large enough.
+ */
+#ifndef __tilegx__
+#define        QUASI_VA_IS_PA_ADDR_RANGE PAGE_OFFSET
+#else
+#define        QUASI_VA_IS_PA_ADDR_RANGE PGDIR_SIZE
+#endif
+
 static void setup_quasi_va_is_pa(void)
 {
-       HV_PTE *pgtable;
        HV_PTE pte;
-       int i;
+       unsigned long i;
 
        /*
         * Flush our TLB to prevent conflicts between the previous contents
@@ -234,16 +247,22 @@ static void setup_quasi_va_is_pa(void)
         */
        local_flush_tlb_all();
 
-       /* setup VA is PA, at least up to PAGE_OFFSET */
-
-       pgtable = (HV_PTE *)current->mm->pgd;
+       /*
+        * setup VA is PA, at least up to QUASI_VA_IS_PA_ADDR_RANGE.
+        * Note here we assume that level-1 page table is defined by
+        * HPAGE_SIZE.
+        */
        pte = hv_pte(_PAGE_KERNEL | _PAGE_HUGE_PAGE);
        pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
-
-       for (i = 0; i < pgd_index(PAGE_OFFSET); i++) {
+       for (i = 0; i < (QUASI_VA_IS_PA_ADDR_RANGE >> HPAGE_SHIFT); i++) {
+               unsigned long vaddr = i << HPAGE_SHIFT;
+               pgd_t *pgd = pgd_offset(current->mm, vaddr);
+               pud_t *pud = pud_offset(pgd, vaddr);
+               pte_t *ptep = (pte_t *) pmd_offset(pud, vaddr);
                unsigned long pfn = i << (HPAGE_SHIFT - PAGE_SHIFT);
+
                if (pfn_valid(pfn))
-                       __set_pte(&pgtable[i], pfn_pte(pfn, pte));
+                       __set_pte(ptep, pfn_pte(pfn, pte));
        }
 }
 
@@ -251,6 +270,7 @@ static void setup_quasi_va_is_pa(void)
 void machine_kexec(struct kimage *image)
 {
        void *reboot_code_buffer;
+       pte_t *ptep;
        void (*rnk)(unsigned long, void *, unsigned long)
                __noreturn;
 
@@ -266,8 +286,10 @@ void machine_kexec(struct kimage *image)
         */
        homecache_change_page_home(image->control_code_page, 0,
                                   smp_processor_id());
-       reboot_code_buffer = vmap(&image->control_code_page, 1, 0,
-                                 __pgprot(_PAGE_KERNEL | _PAGE_EXECUTABLE));
+       reboot_code_buffer = page_address(image->control_code_page);
+       BUG_ON(reboot_code_buffer == NULL);
+       ptep = virt_to_pte(NULL, (unsigned long)reboot_code_buffer);
+       __set_pte(ptep, pte_mkexec(*ptep));
        memcpy(reboot_code_buffer, relocate_new_kernel,
               relocate_new_kernel_size);
        __flush_icache_range(
index 98d476920106084d847b54fd9bf9cc19c9e21983..001cbfa10ac6364a9136b9d4440bfe69730a08be 100644 (file)
@@ -159,7 +159,17 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
 
                switch (ELF_R_TYPE(rel[i].r_info)) {
 
-#define MUNGE(func) (*location = ((*location & ~func(-1)) | func(value)))
+#ifdef __LITTLE_ENDIAN
+# define MUNGE(func) \
+       (*location = ((*location & ~func(-1)) | func(value)))
+#else
+/*
+ * Instructions are always little-endian, so when we read them as data,
+ * we have to swap them around before and after modifying them.
+ */
+# define MUNGE(func) \
+       (*location = swab64((swab64(*location) & ~func(-1)) | func(value)))
+#endif
 
 #ifndef __tilegx__
                case R_TILE_32:
index 446a7f52cc11f3a6380b7e6cfbf2181abca046a0..dafc447b5125ad13bea8a37a62e637b8994e93cc 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/proc_fs.h>
 #include <linux/sysctl.h>
 #include <linux/hardirq.h>
+#include <linux/hugetlb.h>
 #include <linux/mman.h>
 #include <asm/unaligned.h>
 #include <asm/pgtable.h>
index f572c19c4082db3f803fb9441cf5ff9a355fe832..6be7991505019a30ce3ff6268c275a931a088644 100644 (file)
@@ -128,10 +128,10 @@ void arch_release_thread_info(struct thread_info *info)
         * Calling deactivate here just frees up the data structures.
         * If the task we're freeing held the last reference to a
         * hardwall fd, it would have been released prior to this point
-        * anyway via exit_files(), and "hardwall" would be NULL by now.
+        * anyway via exit_files(), and the hardwall_task.info pointers
+        * would be NULL by now.
         */
-       if (info->task->thread.hardwall)
-               hardwall_deactivate(info->task);
+       hardwall_deactivate_all(info->task);
 #endif
 
        if (step_state) {
@@ -245,7 +245,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
 
 #ifdef CONFIG_HARDWALL
        /* New thread does not own any networks. */
-       p->thread.hardwall = NULL;
+       memset(&p->thread.hardwall[0], 0,
+              sizeof(struct hardwall_task) * HARDWALL_TYPES);
 #endif
 
 
@@ -515,12 +516,7 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
 
 #ifdef CONFIG_HARDWALL
        /* Enable or disable access to the network registers appropriately. */
-       if (prev->thread.hardwall != NULL) {
-               if (next->thread.hardwall == NULL)
-                       restrict_network_mpls();
-       } else if (next->thread.hardwall != NULL) {
-               grant_network_mpls();
-       }
+       hardwall_switch_tasks(prev, next);
 #endif
 
        /*
@@ -569,8 +565,6 @@ int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
                return 1;
        }
        if (thread_info_flags & _TIF_SINGLESTEP) {
diff --git a/arch/tile/kernel/relocate_kernel.S b/arch/tile/kernel/relocate_kernel.S
deleted file mode 100644 (file)
index 010b418..0000000
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- *   This program is free software; you can redistribute it and/or
- *   modify it under the terms of the GNU General Public License
- *   as published by the Free Software Foundation, version 2.
- *
- *   This program is distributed in the hope that it will be useful, but
- *   WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- *   NON INFRINGEMENT.  See the GNU General Public License for
- *   more details.
- *
- * copy new kernel into place and then call hv_reexec
- *
- */
-
-#include <linux/linkage.h>
-#include <arch/chip.h>
-#include <asm/page.h>
-#include <hv/hypervisor.h>
-
-#define ___hvb MEM_SV_INTRPT + HV_GLUE_START_CPA
-
-#define ___hv_dispatch(f) (___hvb + (HV_DISPATCH_ENTRY_SIZE * f))
-
-#define ___hv_console_putc ___hv_dispatch(HV_DISPATCH_CONSOLE_PUTC)
-#define ___hv_halt         ___hv_dispatch(HV_DISPATCH_HALT)
-#define ___hv_reexec       ___hv_dispatch(HV_DISPATCH_REEXEC)
-#define ___hv_flush_remote ___hv_dispatch(HV_DISPATCH_FLUSH_REMOTE)
-
-#undef RELOCATE_NEW_KERNEL_VERBOSE
-
-STD_ENTRY(relocate_new_kernel)
-
-       move    r30, r0         /* page list */
-       move    r31, r1         /* address of page we are on */
-       move    r32, r2         /* start address of new kernel */
-
-       shri    r1, r1, PAGE_SHIFT
-       addi    r1, r1, 1
-       shli    sp, r1, PAGE_SHIFT
-       addi    sp, sp, -8
-       /* we now have a stack (whether we need one or not) */
-
-       moveli  r40, lo16(___hv_console_putc)
-       auli    r40, r40, ha16(___hv_console_putc)
-
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
-       moveli  r0, 'r'
-       jalr    r40
-
-       moveli  r0, '_'
-       jalr    r40
-
-       moveli  r0, 'n'
-       jalr    r40
-
-       moveli  r0, '_'
-       jalr    r40
-
-       moveli  r0, 'k'
-       jalr    r40
-
-       moveli  r0, '\n'
-       jalr    r40
-#endif
-
-       /*
-        * Throughout this code r30 is pointer to the element of page
-        * list we are working on.
-        *
-        * Normally we get to the next element of the page list by
-        * incrementing r30 by four.  The exception is if the element
-        * on the page list is an IND_INDIRECTION in which case we use
-        * the element with the low bits masked off as the new value
-        * of r30.
-        *
-        * To get this started, we need the value passed to us (which
-        * will always be an IND_INDIRECTION) in memory somewhere with
-        * r30 pointing at it.  To do that, we push the value passed
-        * to us on the stack and make r30 point to it.
-        */
-
-       sw      sp, r30
-       move    r30, sp
-       addi    sp, sp, -8
-
-#if CHIP_HAS_CBOX_HOME_MAP()
-       /*
-        * On TILEPro, we need to flush all tiles' caches, since we may
-        * have been doing hash-for-home caching there.  Note that we
-        * must do this _after_ we're completely done modifying any memory
-        * other than our output buffer (which we know is locally cached).
-        * We want the caches to be fully clean when we do the reexec,
-        * because the hypervisor is going to do this flush again at that
-        * point, and we don't want that second flush to overwrite any memory.
-        */
-       {
-        move   r0, zero         /* cache_pa */
-        move   r1, zero
-       }
-       {
-        auli   r2, zero, ha16(HV_FLUSH_EVICT_L2) /* cache_control */
-        movei  r3, -1           /* cache_cpumask; -1 means all client tiles */
-       }
-       {
-        move   r4, zero         /* tlb_va */
-        move   r5, zero         /* tlb_length */
-       }
-       {
-        move   r6, zero         /* tlb_pgsize */
-        move   r7, zero         /* tlb_cpumask */
-       }
-       {
-        move   r8, zero         /* asids */
-        moveli r20, lo16(___hv_flush_remote)
-       }
-       {
-        move   r9, zero         /* asidcount */
-        auli   r20, r20, ha16(___hv_flush_remote)
-       }
-
-       jalr    r20
-#endif
-
-       /* r33 is destination pointer, default to zero */
-
-       moveli  r33, 0
-
-.Lloop:        lw      r10, r30
-
-       andi    r9, r10, 0xf    /* low 4 bits tell us what type it is */
-       xor     r10, r10, r9    /* r10 is now value with low 4 bits stripped */
-
-       seqi    r0, r9, 0x1     /* IND_DESTINATION */
-       bzt     r0, .Ltry2
-
-       move    r33, r10
-
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
-       moveli  r0, 'd'
-       jalr    r40
-#endif
-
-       addi    r30, r30, 4
-       j       .Lloop
-
-.Ltry2:
-       seqi    r0, r9, 0x2     /* IND_INDIRECTION */
-       bzt     r0, .Ltry4
-
-       move    r30, r10
-
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
-       moveli  r0, 'i'
-       jalr    r40
-#endif
-
-       j       .Lloop
-
-.Ltry4:
-       seqi    r0, r9, 0x4     /* IND_DONE */
-       bzt     r0, .Ltry8
-
-       mf
-
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
-       moveli  r0, 'D'
-       jalr    r40
-       moveli  r0, '\n'
-       jalr    r40
-#endif
-
-       move    r0, r32
-       moveli  r1, 0           /* arg to hv_reexec is 64 bits */
-
-       moveli  r41, lo16(___hv_reexec)
-       auli    r41, r41, ha16(___hv_reexec)
-
-       jalr    r41
-
-       /* we should not get here */
-
-       moveli  r0, '?'
-       jalr    r40
-       moveli  r0, '\n'
-       jalr    r40
-
-       j       .Lhalt
-
-.Ltry8:        seqi    r0, r9, 0x8     /* IND_SOURCE */
-       bz      r0, .Lerr       /* unknown type */
-
-       /* copy page at r10 to page at r33 */
-
-       move    r11, r33
-
-       moveli  r0, lo16(PAGE_SIZE)
-       auli    r0, r0, ha16(PAGE_SIZE)
-       add     r33, r33, r0
-
-       /* copy word at r10 to word at r11 until r11 equals r33 */
-
-       /* We know page size must be multiple of 16, so we can unroll
-        * 16 times safely without any edge case checking.
-        *
-        * Issue a flush of the destination every 16 words to avoid
-        * incoherence when starting the new kernel.  (Now this is
-        * just good paranoia because the hv_reexec call will also
-        * take care of this.)
-        */
-
-1:
-       { lw    r0, r10; addi   r10, r10, 4 }
-       { sw    r11, r0; addi   r11, r11, 4 }
-       { lw    r0, r10; addi   r10, r10, 4 }
-       { sw    r11, r0; addi   r11, r11, 4 }
-       { lw    r0, r10; addi   r10, r10, 4 }
-       { sw    r11, r0; addi   r11, r11, 4 }
-       { lw    r0, r10; addi   r10, r10, 4 }
-       { sw    r11, r0; addi   r11, r11, 4 }
-       { lw    r0, r10; addi   r10, r10, 4 }
-       { sw    r11, r0; addi   r11, r11, 4 }
-       { lw    r0, r10; addi   r10, r10, 4 }
-       { sw    r11, r0; addi   r11, r11, 4 }
-       { lw    r0, r10; addi   r10, r10, 4 }
-       { sw    r11, r0; addi   r11, r11, 4 }
-       { lw    r0, r10; addi   r10, r10, 4 }
-       { sw    r11, r0; addi   r11, r11, 4 }
-       { lw    r0, r10; addi   r10, r10, 4 }
-       { sw    r11, r0; addi   r11, r11, 4 }
-       { lw    r0, r10; addi   r10, r10, 4 }
-       { sw    r11, r0; addi   r11, r11, 4 }
-       { lw    r0, r10; addi   r10, r10, 4 }
-       { sw    r11, r0; addi   r11, r11, 4 }
-       { lw    r0, r10; addi   r10, r10, 4 }
-       { sw    r11, r0; addi   r11, r11, 4 }
-       { lw    r0, r10; addi   r10, r10, 4 }
-       { sw    r11, r0; addi   r11, r11, 4 }
-       { lw    r0, r10; addi   r10, r10, 4 }
-       { sw    r11, r0; addi   r11, r11, 4 }
-       { lw    r0, r10; addi   r10, r10, 4 }
-       { sw    r11, r0; addi   r11, r11, 4 }
-       { lw    r0, r10; addi   r10, r10, 4 }
-       { sw    r11, r0 }
-       { flush r11    ; addi   r11, r11, 4 }
-
-       seq     r0, r33, r11
-       bzt     r0, 1b
-
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
-       moveli  r0, 's'
-       jalr    r40
-#endif
-
-       addi    r30, r30, 4
-       j       .Lloop
-
-
-.Lerr: moveli  r0, 'e'
-       jalr    r40
-       moveli  r0, 'r'
-       jalr    r40
-       moveli  r0, 'r'
-       jalr    r40
-       moveli  r0, '\n'
-       jalr    r40
-.Lhalt:
-       moveli  r41, lo16(___hv_halt)
-       auli    r41, r41, ha16(___hv_halt)
-
-       jalr    r41
-       STD_ENDPROC(relocate_new_kernel)
-
-       .section .rodata,"a"
-
-       .globl relocate_new_kernel_size
-relocate_new_kernel_size:
-       .long .Lend_relocate_new_kernel - relocate_new_kernel
diff --git a/arch/tile/kernel/relocate_kernel_32.S b/arch/tile/kernel/relocate_kernel_32.S
new file mode 100644 (file)
index 0000000..010b418
--- /dev/null
@@ -0,0 +1,280 @@
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ *
+ * copy new kernel into place and then call hv_reexec
+ *
+ */
+
+#include <linux/linkage.h>
+#include <arch/chip.h>
+#include <asm/page.h>
+#include <hv/hypervisor.h>
+
+#define ___hvb MEM_SV_INTRPT + HV_GLUE_START_CPA
+
+#define ___hv_dispatch(f) (___hvb + (HV_DISPATCH_ENTRY_SIZE * f))
+
+#define ___hv_console_putc ___hv_dispatch(HV_DISPATCH_CONSOLE_PUTC)
+#define ___hv_halt         ___hv_dispatch(HV_DISPATCH_HALT)
+#define ___hv_reexec       ___hv_dispatch(HV_DISPATCH_REEXEC)
+#define ___hv_flush_remote ___hv_dispatch(HV_DISPATCH_FLUSH_REMOTE)
+
+#undef RELOCATE_NEW_KERNEL_VERBOSE
+
+STD_ENTRY(relocate_new_kernel)
+
+       move    r30, r0         /* page list */
+       move    r31, r1         /* address of page we are on */
+       move    r32, r2         /* start address of new kernel */
+
+       shri    r1, r1, PAGE_SHIFT
+       addi    r1, r1, 1
+       shli    sp, r1, PAGE_SHIFT
+       addi    sp, sp, -8
+       /* we now have a stack (whether we need one or not) */
+
+       moveli  r40, lo16(___hv_console_putc)
+       auli    r40, r40, ha16(___hv_console_putc)
+
+#ifdef RELOCATE_NEW_KERNEL_VERBOSE
+       moveli  r0, 'r'
+       jalr    r40
+
+       moveli  r0, '_'
+       jalr    r40
+
+       moveli  r0, 'n'
+       jalr    r40
+
+       moveli  r0, '_'
+       jalr    r40
+
+       moveli  r0, 'k'
+       jalr    r40
+
+       moveli  r0, '\n'
+       jalr    r40
+#endif
+
+       /*
+        * Throughout this code r30 is pointer to the element of page
+        * list we are working on.
+        *
+        * Normally we get to the next element of the page list by
+        * incrementing r30 by four.  The exception is if the element
+        * on the page list is an IND_INDIRECTION in which case we use
+        * the element with the low bits masked off as the new value
+        * of r30.
+        *
+        * To get this started, we need the value passed to us (which
+        * will always be an IND_INDIRECTION) in memory somewhere with
+        * r30 pointing at it.  To do that, we push the value passed
+        * to us on the stack and make r30 point to it.
+        */
+
+       sw      sp, r30
+       move    r30, sp
+       addi    sp, sp, -8
+
+#if CHIP_HAS_CBOX_HOME_MAP()
+       /*
+        * On TILEPro, we need to flush all tiles' caches, since we may
+        * have been doing hash-for-home caching there.  Note that we
+        * must do this _after_ we're completely done modifying any memory
+        * other than our output buffer (which we know is locally cached).
+        * We want the caches to be fully clean when we do the reexec,
+        * because the hypervisor is going to do this flush again at that
+        * point, and we don't want that second flush to overwrite any memory.
+        */
+       {
+        move   r0, zero         /* cache_pa */
+        move   r1, zero
+       }
+       {
+        auli   r2, zero, ha16(HV_FLUSH_EVICT_L2) /* cache_control */
+        movei  r3, -1           /* cache_cpumask; -1 means all client tiles */
+       }
+       {
+        move   r4, zero         /* tlb_va */
+        move   r5, zero         /* tlb_length */
+       }
+       {
+        move   r6, zero         /* tlb_pgsize */
+        move   r7, zero         /* tlb_cpumask */
+       }
+       {
+        move   r8, zero         /* asids */
+        moveli r20, lo16(___hv_flush_remote)
+       }
+       {
+        move   r9, zero         /* asidcount */
+        auli   r20, r20, ha16(___hv_flush_remote)
+       }
+
+       jalr    r20
+#endif
+
+       /* r33 is destination pointer, default to zero */
+
+       moveli  r33, 0
+
+.Lloop:        lw      r10, r30
+
+       andi    r9, r10, 0xf    /* low 4 bits tell us what type it is */
+       xor     r10, r10, r9    /* r10 is now value with low 4 bits stripped */
+
+       seqi    r0, r9, 0x1     /* IND_DESTINATION */
+       bzt     r0, .Ltry2
+
+       move    r33, r10
+
+#ifdef RELOCATE_NEW_KERNEL_VERBOSE
+       moveli  r0, 'd'
+       jalr    r40
+#endif
+
+       addi    r30, r30, 4
+       j       .Lloop
+
+.Ltry2:
+       seqi    r0, r9, 0x2     /* IND_INDIRECTION */
+       bzt     r0, .Ltry4
+
+       move    r30, r10
+
+#ifdef RELOCATE_NEW_KERNEL_VERBOSE
+       moveli  r0, 'i'
+       jalr    r40
+#endif
+
+       j       .Lloop
+
+.Ltry4:
+       seqi    r0, r9, 0x4     /* IND_DONE */
+       bzt     r0, .Ltry8
+
+       mf
+
+#ifdef RELOCATE_NEW_KERNEL_VERBOSE
+       moveli  r0, 'D'
+       jalr    r40
+       moveli  r0, '\n'
+       jalr    r40
+#endif
+
+       move    r0, r32
+       moveli  r1, 0           /* arg to hv_reexec is 64 bits */
+
+       moveli  r41, lo16(___hv_reexec)
+       auli    r41, r41, ha16(___hv_reexec)
+
+       jalr    r41
+
+       /* we should not get here */
+
+       moveli  r0, '?'
+       jalr    r40
+       moveli  r0, '\n'
+       jalr    r40
+
+       j       .Lhalt
+
+.Ltry8:        seqi    r0, r9, 0x8     /* IND_SOURCE */
+       bz      r0, .Lerr       /* unknown type */
+
+       /* copy page at r10 to page at r33 */
+
+       move    r11, r33
+
+       moveli  r0, lo16(PAGE_SIZE)
+       auli    r0, r0, ha16(PAGE_SIZE)
+       add     r33, r33, r0
+
+       /* copy word at r10 to word at r11 until r11 equals r33 */
+
+       /* We know page size must be multiple of 16, so we can unroll
+        * 16 times safely without any edge case checking.
+        *
+        * Issue a flush of the destination every 16 words to avoid
+        * incoherence when starting the new kernel.  (Now this is
+        * just good paranoia because the hv_reexec call will also
+        * take care of this.)
+        */
+
+1:
+       { lw    r0, r10; addi   r10, r10, 4 }
+       { sw    r11, r0; addi   r11, r11, 4 }
+       { lw    r0, r10; addi   r10, r10, 4 }
+       { sw    r11, r0; addi   r11, r11, 4 }
+       { lw    r0, r10; addi   r10, r10, 4 }
+       { sw    r11, r0; addi   r11, r11, 4 }
+       { lw    r0, r10; addi   r10, r10, 4 }
+       { sw    r11, r0; addi   r11, r11, 4 }
+       { lw    r0, r10; addi   r10, r10, 4 }
+       { sw    r11, r0; addi   r11, r11, 4 }
+       { lw    r0, r10; addi   r10, r10, 4 }
+       { sw    r11, r0; addi   r11, r11, 4 }
+       { lw    r0, r10; addi   r10, r10, 4 }
+       { sw    r11, r0; addi   r11, r11, 4 }
+       { lw    r0, r10; addi   r10, r10, 4 }
+       { sw    r11, r0; addi   r11, r11, 4 }
+       { lw    r0, r10; addi   r10, r10, 4 }
+       { sw    r11, r0; addi   r11, r11, 4 }
+       { lw    r0, r10; addi   r10, r10, 4 }
+       { sw    r11, r0; addi   r11, r11, 4 }
+       { lw    r0, r10; addi   r10, r10, 4 }
+       { sw    r11, r0; addi   r11, r11, 4 }
+       { lw    r0, r10; addi   r10, r10, 4 }
+       { sw    r11, r0; addi   r11, r11, 4 }
+       { lw    r0, r10; addi   r10, r10, 4 }
+       { sw    r11, r0; addi   r11, r11, 4 }
+       { lw    r0, r10; addi   r10, r10, 4 }
+       { sw    r11, r0; addi   r11, r11, 4 }
+       { lw    r0, r10; addi   r10, r10, 4 }
+       { sw    r11, r0; addi   r11, r11, 4 }
+       { lw    r0, r10; addi   r10, r10, 4 }
+       { sw    r11, r0 }
+       { flush r11    ; addi   r11, r11, 4 }
+
+       seq     r0, r33, r11
+       bzt     r0, 1b
+
+#ifdef RELOCATE_NEW_KERNEL_VERBOSE
+       moveli  r0, 's'
+       jalr    r40
+#endif
+
+       addi    r30, r30, 4
+       j       .Lloop
+
+
+.Lerr: moveli  r0, 'e'
+       jalr    r40
+       moveli  r0, 'r'
+       jalr    r40
+       moveli  r0, 'r'
+       jalr    r40
+       moveli  r0, '\n'
+       jalr    r40
+.Lhalt:
+       moveli  r41, lo16(___hv_halt)
+       auli    r41, r41, ha16(___hv_halt)
+
+       jalr    r41
+       STD_ENDPROC(relocate_new_kernel)
+
+       .section .rodata,"a"
+
+       .globl relocate_new_kernel_size
+relocate_new_kernel_size:
+       .long .Lend_relocate_new_kernel - relocate_new_kernel
diff --git a/arch/tile/kernel/relocate_kernel_64.S b/arch/tile/kernel/relocate_kernel_64.S
new file mode 100644 (file)
index 0000000..1c09a4f
--- /dev/null
@@ -0,0 +1,260 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ *
+ * copy new kernel into place and then call hv_reexec
+ *
+ */
+
+#include <linux/linkage.h>
+#include <arch/chip.h>
+#include <asm/page.h>
+#include <hv/hypervisor.h>
+
+#undef RELOCATE_NEW_KERNEL_VERBOSE
+
+STD_ENTRY(relocate_new_kernel)
+
+       move    r30, r0         /* page list */
+       move    r31, r1         /* address of page we are on */
+       move    r32, r2         /* start address of new kernel */
+
+       shrui   r1, r1, PAGE_SHIFT
+       addi    r1, r1, 1
+       shli    sp, r1, PAGE_SHIFT
+       addi    sp, sp, -8
+       /* we now have a stack (whether we need one or not) */
+
+       moveli  r40, hw2_last(hv_console_putc)
+       shl16insli r40, r40, hw1(hv_console_putc)
+       shl16insli r40, r40, hw0(hv_console_putc)
+
+#ifdef RELOCATE_NEW_KERNEL_VERBOSE
+       moveli  r0, 'r'
+       jalr    r40
+
+       moveli  r0, '_'
+       jalr    r40
+
+       moveli  r0, 'n'
+       jalr    r40
+
+       moveli  r0, '_'
+       jalr    r40
+
+       moveli  r0, 'k'
+       jalr    r40
+
+       moveli  r0, '\n'
+       jalr    r40
+#endif
+
+       /*
+        * Throughout this code r30 is pointer to the element of page
+        * list we are working on.
+        *
+        * Normally we get to the next element of the page list by
+        * incrementing r30 by eight.  The exception is if the element
+        * on the page list is an IND_INDIRECTION in which case we use
+        * the element with the low bits masked off as the new value
+        * of r30.
+        *
+        * To get this started, we need the value passed to us (which
+        * will always be an IND_INDIRECTION) in memory somewhere with
+        * r30 pointing at it.  To do that, we push the value passed
+        * to us on the stack and make r30 point to it.
+        */
+
+       st      sp, r30
+       move    r30, sp
+       addi    sp, sp, -16
+
+#if CHIP_HAS_CBOX_HOME_MAP()
+       /*
+        * On TILE-GX, we need to flush all tiles' caches, since we may
+        * have been doing hash-for-home caching there.  Note that we
+        * must do this _after_ we're completely done modifying any memory
+        * other than our output buffer (which we know is locally cached).
+        * We want the caches to be fully clean when we do the reexec,
+        * because the hypervisor is going to do this flush again at that
+        * point, and we don't want that second flush to overwrite any memory.
+        */
+       {
+        move   r0, zero         /* cache_pa */
+        moveli r1, hw2_last(HV_FLUSH_EVICT_L2)
+       }
+       {
+        shl16insli     r1, r1, hw1(HV_FLUSH_EVICT_L2)
+        movei  r2, -1           /* cache_cpumask; -1 means all client tiles */
+       }
+       {
+        shl16insli     r1, r1, hw0(HV_FLUSH_EVICT_L2)  /* cache_control */
+        move   r3, zero         /* tlb_va */
+       }
+       {
+        move   r4, zero         /* tlb_length */
+        move   r5, zero         /* tlb_pgsize */
+       }
+       {
+        move   r6, zero         /* tlb_cpumask */
+        move   r7, zero         /* asids */
+       }
+       {
+        moveli r20, hw2_last(hv_flush_remote)
+        move   r8, zero         /* asidcount */
+       }
+       shl16insli      r20, r20, hw1(hv_flush_remote)
+       shl16insli      r20, r20, hw0(hv_flush_remote)
+
+       jalr    r20
+#endif
+
+       /* r33 is destination pointer, default to zero */
+
+       moveli  r33, 0
+
+.Lloop:        ld      r10, r30
+
+       andi    r9, r10, 0xf    /* low 4 bits tell us what type it is */
+       xor     r10, r10, r9    /* r10 is now value with low 4 bits stripped */
+
+       cmpeqi  r0, r9, 0x1     /* IND_DESTINATION */
+       beqzt   r0, .Ltry2
+
+       move    r33, r10
+
+#ifdef RELOCATE_NEW_KERNEL_VERBOSE
+       moveli  r0, 'd'
+       jalr    r40
+#endif
+
+       addi    r30, r30, 8
+       j       .Lloop
+
+.Ltry2:
+       cmpeqi  r0, r9, 0x2     /* IND_INDIRECTION */
+       beqzt   r0, .Ltry4
+
+       move    r30, r10
+
+#ifdef RELOCATE_NEW_KERNEL_VERBOSE
+       moveli  r0, 'i'
+       jalr    r40
+#endif
+
+       j       .Lloop
+
+.Ltry4:
+       cmpeqi  r0, r9, 0x4     /* IND_DONE */
+       beqzt   r0, .Ltry8
+
+       mf
+
+#ifdef RELOCATE_NEW_KERNEL_VERBOSE
+       moveli  r0, 'D'
+       jalr    r40
+       moveli  r0, '\n'
+       jalr    r40
+#endif
+
+       move    r0, r32
+
+       moveli  r41, hw2_last(hv_reexec)
+       shl16insli      r41, r41, hw1(hv_reexec)
+       shl16insli      r41, r41, hw0(hv_reexec)
+
+       jalr    r41
+
+       /* we should not get here */
+
+       moveli  r0, '?'
+       jalr    r40
+       moveli  r0, '\n'
+       jalr    r40
+
+       j       .Lhalt
+
+.Ltry8:        cmpeqi  r0, r9, 0x8     /* IND_SOURCE */
+       beqz    r0, .Lerr       /* unknown type */
+
+       /* copy page at r10 to page at r33 */
+
+       move    r11, r33
+
+       moveli  r0, hw2_last(PAGE_SIZE)
+       shl16insli      r0, r0, hw1(PAGE_SIZE)
+       shl16insli      r0, r0, hw0(PAGE_SIZE)
+       add     r33, r33, r0
+
+       /* copy word at r10 to word at r11 until r11 equals r33 */
+
+       /* We know page size must be multiple of 8, so we can unroll
+        * 8 times safely without any edge case checking.
+        *
+        * Issue a flush of the destination every 8 words to avoid
+        * incoherence when starting the new kernel.  (Now this is
+        * just good paranoia because the hv_reexec call will also
+        * take care of this.)
+        */
+
+1:
+       { ld    r0, r10; addi   r10, r10, 8 }
+       { st    r11, r0; addi   r11, r11, 8 }
+       { ld    r0, r10; addi   r10, r10, 8 }
+       { st    r11, r0; addi   r11, r11, 8 }
+       { ld    r0, r10; addi   r10, r10, 8 }
+       { st    r11, r0; addi   r11, r11, 8 }
+       { ld    r0, r10; addi   r10, r10, 8 }
+       { st    r11, r0; addi   r11, r11, 8 }
+       { ld    r0, r10; addi   r10, r10, 8 }
+       { st    r11, r0; addi   r11, r11, 8 }
+       { ld    r0, r10; addi   r10, r10, 8 }
+       { st    r11, r0; addi   r11, r11, 8 }
+       { ld    r0, r10; addi   r10, r10, 8 }
+       { st    r11, r0; addi   r11, r11, 8 }
+       { ld    r0, r10; addi   r10, r10, 8 }
+       { st    r11, r0 }
+       { flush r11    ; addi   r11, r11, 8 }
+
+       cmpeq   r0, r33, r11
+       beqzt   r0, 1b
+
+#ifdef RELOCATE_NEW_KERNEL_VERBOSE
+       moveli  r0, 's'
+       jalr    r40
+#endif
+
+       addi    r30, r30, 8
+       j       .Lloop
+
+
+.Lerr: moveli  r0, 'e'
+       jalr    r40
+       moveli  r0, 'r'
+       jalr    r40
+       moveli  r0, 'r'
+       jalr    r40
+       moveli  r0, '\n'
+       jalr    r40
+.Lhalt:
+       moveli r41, hw2_last(hv_halt)
+       shl16insli r41, r41, hw1(hv_halt)
+       shl16insli r41, r41, hw0(hv_halt)
+
+       jalr    r41
+       STD_ENDPROC(relocate_new_kernel)
+
+       .section .rodata,"a"
+
+       .globl relocate_new_kernel_size
+relocate_new_kernel_size:
+       .long .Lend_relocate_new_kernel - relocate_new_kernel
index 98d80eb49ddbd912a0957b0dce0ad4d121be665a..6098ccc59be2484a22a5f11c862569d9850c4248 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/highmem.h>
 #include <linux/smp.h>
 #include <linux/timex.h>
+#include <linux/hugetlb.h>
 #include <asm/setup.h>
 #include <asm/sections.h>
 #include <asm/cacheflush.h>
@@ -49,9 +50,6 @@ char chip_model[64] __write_once;
 struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
 EXPORT_SYMBOL(node_data);
 
-/* We only create bootmem data on node 0. */
-static bootmem_data_t __initdata node0_bdata;
-
 /* Information on the NUMA nodes that we compute early */
 unsigned long __cpuinitdata node_start_pfn[MAX_NUMNODES];
 unsigned long __cpuinitdata node_end_pfn[MAX_NUMNODES];
@@ -534,37 +532,96 @@ static void __init setup_memory(void)
 #endif
 }
 
-static void __init setup_bootmem_allocator(void)
+/*
+ * On 32-bit machines, we only put bootmem on the low controller,
+ * since PAs > 4GB can't be used in bootmem.  In principle one could
+ * imagine, e.g., multiple 1 GB controllers all of which could support
+ * bootmem, but in practice using controllers this small isn't a
+ * particularly interesting scenario, so we just keep it simple and
+ * use only the first controller for bootmem on 32-bit machines.
+ */
+static inline int node_has_bootmem(int nid)
 {
-       unsigned long bootmap_size, first_alloc_pfn, last_alloc_pfn;
+#ifdef CONFIG_64BIT
+       return 1;
+#else
+       return nid == 0;
+#endif
+}
 
-       /* Provide a node 0 bdata. */
-       NODE_DATA(0)->bdata = &node0_bdata;
+static inline unsigned long alloc_bootmem_pfn(int nid,
+                                             unsigned long size,
+                                             unsigned long goal)
+{
+       void *kva = __alloc_bootmem_node(NODE_DATA(nid), size,
+                                        PAGE_SIZE, goal);
+       unsigned long pfn = kaddr_to_pfn(kva);
+       BUG_ON(goal && PFN_PHYS(pfn) != goal);
+       return pfn;
+}
 
-#ifdef CONFIG_PCI
-       /* Don't let boot memory alias the PCI region. */
-       last_alloc_pfn = min(max_low_pfn, pci_reserve_start_pfn);
+static void __init setup_bootmem_allocator_node(int i)
+{
+       unsigned long start, end, mapsize, mapstart;
+
+       if (node_has_bootmem(i)) {
+               NODE_DATA(i)->bdata = &bootmem_node_data[i];
+       } else {
+               /* Share controller zero's bdata for now. */
+               NODE_DATA(i)->bdata = &bootmem_node_data[0];
+               return;
+       }
+
+       /* Skip up to after the bss in node 0. */
+       start = (i == 0) ? min_low_pfn : node_start_pfn[i];
+
+       /* Only lowmem, if we're a HIGHMEM build. */
+#ifdef CONFIG_HIGHMEM
+       end = node_lowmem_end_pfn[i];
 #else
-       last_alloc_pfn = max_low_pfn;
+       end = node_end_pfn[i];
 #endif
 
-       /*
-        * Initialize the boot-time allocator (with low memory only):
-        * The first argument says where to put the bitmap, and the
-        * second says where the end of allocatable memory is.
-        */
-       bootmap_size = init_bootmem(min_low_pfn, last_alloc_pfn);
+       /* No memory here. */
+       if (end == start)
+               return;
+
+       /* Figure out where the bootmem bitmap is located. */
+       mapsize = bootmem_bootmap_pages(end - start);
+       if (i == 0) {
+               /* Use some space right before the heap on node 0. */
+               mapstart = start;
+               start += mapsize;
+       } else {
+               /* Allocate bitmap on node 0 to avoid page table issues. */
+               mapstart = alloc_bootmem_pfn(0, PFN_PHYS(mapsize), 0);
+       }
 
+       /* Initialize a node. */
+       init_bootmem_node(NODE_DATA(i), mapstart, start, end);
+
+       /* Free all the space back into the allocator. */
+       free_bootmem(PFN_PHYS(start), PFN_PHYS(end - start));
+
+#if defined(CONFIG_PCI)
        /*
-        * Let the bootmem allocator use all the space we've given it
-        * except for its own bitmap.
+        * Throw away any memory aliased by the PCI region.  FIXME: this
+        * is a temporary hack to work around bug 10502, and needs to be
+        * fixed properly.
         */
-       first_alloc_pfn = min_low_pfn + PFN_UP(bootmap_size);
-       if (first_alloc_pfn >= last_alloc_pfn)
-               early_panic("Not enough memory on controller 0 for bootmem\n");
+       if (pci_reserve_start_pfn < end && pci_reserve_end_pfn > start)
+               reserve_bootmem(PFN_PHYS(pci_reserve_start_pfn),
+                               PFN_PHYS(pci_reserve_end_pfn -
+                                        pci_reserve_start_pfn),
+                               BOOTMEM_EXCLUSIVE);
+#endif
+}
 
-       free_bootmem(PFN_PHYS(first_alloc_pfn),
-                    PFN_PHYS(last_alloc_pfn - first_alloc_pfn));
+static void __init setup_bootmem_allocator(void)
+{
+       int i;
+       for (i = 0; i < MAX_NUMNODES; ++i)
+               setup_bootmem_allocator_node(i);
 
 #ifdef CONFIG_KEXEC
        if (crashk_res.start != crashk_res.end)
@@ -595,14 +652,6 @@ static int __init percpu_size(void)
        return size;
 }
 
-static inline unsigned long alloc_bootmem_pfn(int size, unsigned long goal)
-{
-       void *kva = __alloc_bootmem(size, PAGE_SIZE, goal);
-       unsigned long pfn = kaddr_to_pfn(kva);
-       BUG_ON(goal && PFN_PHYS(pfn) != goal);
-       return pfn;
-}
-
 static void __init zone_sizes_init(void)
 {
        unsigned long zones_size[MAX_NR_ZONES] = { 0 };
@@ -640,21 +689,22 @@ static void __init zone_sizes_init(void)
                 * though, there'll be no lowmem, so we just alloc_bootmem
                 * the memmap.  There will be no percpu memory either.
                 */
-               if (__pfn_to_highbits(start) == 0) {
-                       /* In low PAs, allocate via bootmem. */
+               if (i != 0 && cpu_isset(i, isolnodes)) {
+                       node_memmap_pfn[i] =
+                               alloc_bootmem_pfn(0, memmap_size, 0);
+                       BUG_ON(node_percpu[i] != 0);
+               } else if (node_has_bootmem(start)) {
                        unsigned long goal = 0;
                        node_memmap_pfn[i] =
-                               alloc_bootmem_pfn(memmap_size, goal);
+                               alloc_bootmem_pfn(i, memmap_size, 0);
                        if (kdata_huge)
                                goal = PFN_PHYS(lowmem_end) - node_percpu[i];
                        if (node_percpu[i])
                                node_percpu_pfn[i] =
-                                   alloc_bootmem_pfn(node_percpu[i], goal);
-               } else if (cpu_isset(i, isolnodes)) {
-                       node_memmap_pfn[i] = alloc_bootmem_pfn(memmap_size, 0);
-                       BUG_ON(node_percpu[i] != 0);
+                                       alloc_bootmem_pfn(i, node_percpu[i],
+                                                         goal);
                } else {
-                       /* In high PAs, just reserve some pages. */
+                       /* In non-bootmem zones, just reserve some pages. */
                        node_memmap_pfn[i] = node_free_pfn[i];
                        node_free_pfn[i] += PFN_UP(memmap_size);
                        if (!kdata_huge) {
@@ -678,16 +728,9 @@ static void __init zone_sizes_init(void)
                zones_size[ZONE_NORMAL] = end - start;
 #endif
 
-               /*
-                * Everyone shares node 0's bootmem allocator, but
-                * we use alloc_remap(), above, to put the actual
-                * struct page array on the individual controllers,
-                * which is most of the data that we actually care about.
-                * We can't place bootmem allocators on the other
-                * controllers since the bootmem allocator can only
-                * operate on 32-bit physical addresses.
-                */
-               NODE_DATA(i)->bdata = NODE_DATA(0)->bdata;
+               /* Take zone metadata from controller 0 if we're isolnode. */
+               if (node_isset(i, isolnodes))
+                       NODE_DATA(i)->bdata = &bootmem_node_data[0];
 
                free_area_init_node(i, zones_size, start, NULL);
                printk(KERN_DEBUG "  Normal zone: %ld per-cpu pages\n",
@@ -870,6 +913,22 @@ subsys_initcall(topology_init);
 
 #endif /* CONFIG_NUMA */
 
+/*
+ * Initialize hugepage support on this cpu.  We do this on all cores
+ * early in boot: before argument parsing for the boot cpu, and after
+ * argument parsing but before the init functions run on the secondaries.
+ * So the values we set up here in the hypervisor may be overridden on
+ * the boot cpu as arguments are parsed.
+ */
+static __cpuinit void init_super_pages(void)
+{
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+       int i;
+       for (i = 0; i < HUGE_SHIFT_ENTRIES; ++i)
+               hv_set_pte_super_shift(i, huge_shift[i]);
+#endif
+}
+
 /**
  * setup_cpu() - Do all necessary per-cpu, tile-specific initialization.
  * @boot: Is this the boot cpu?
@@ -924,6 +983,8 @@ void __cpuinit setup_cpu(int boot)
        /* Reset the network state on this cpu. */
        reset_network_state();
 #endif
+
+       init_super_pages();
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
@@ -1412,13 +1473,13 @@ void __init setup_per_cpu_areas(void)
                for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) {
 
                        /* Update the vmalloc mapping and page home. */
-                       pte_t *ptep =
-                               virt_to_pte(NULL, (unsigned long)ptr + i);
+                       unsigned long addr = (unsigned long)ptr + i;
+                       pte_t *ptep = virt_to_pte(NULL, addr);
                        pte_t pte = *ptep;
                        BUG_ON(pfn != pte_pfn(pte));
                        pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
                        pte = set_remote_cache_cpu(pte, cpu);
-                       set_pte(ptep, pte);
+                       set_pte_at(&init_mm, addr, ptep, pte);
 
                        /* Update the lowmem mapping for consistency. */
                        lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
@@ -1431,7 +1492,7 @@ void __init setup_per_cpu_areas(void)
                                BUG_ON(pte_huge(*ptep));
                        }
                        BUG_ON(pfn != pte_pfn(*ptep));
-                       set_pte(ptep, pte);
+                       set_pte_at(&init_mm, lowmem_va, ptep, pte);
                }
        }
 
index f79d4b88c747ae97db505a7f54143e610e37e24d..e29b0553211d611af9802dfe190f9d0d9d868f69 100644 (file)
@@ -37,8 +37,6 @@
 
 #define DEBUG_SIG 0
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 SYSCALL_DEFINE3(sigaltstack, const stack_t __user *, uss,
                stack_t __user *, uoss, struct pt_regs *, regs)
 {
@@ -96,7 +94,6 @@ SYSCALL_DEFINE1(rt_sigreturn, struct pt_regs *, regs)
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
@@ -242,10 +239,11 @@ give_sigsegv:
  * OK, we're invoking a handler
  */
 
-static int handle_signal(unsigned long sig, siginfo_t *info,
-                        struct k_sigaction *ka, sigset_t *oldset,
+static void handle_signal(unsigned long sig, siginfo_t *info,
+                        struct k_sigaction *ka,
                         struct pt_regs *regs)
 {
+       sigset_t *oldset = sigmask_to_save();
        int ret;
 
        /* Are we from a system call? */
@@ -278,15 +276,9 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
        else
 #endif
                ret = setup_rt_frame(sig, ka, info, oldset, regs);
-       if (ret == 0) {
-               /* This code is only called from system calls or from
-                * the work_pending path in the return-to-user code, and
-                * either way we can re-enable interrupts unconditionally.
-                */
-               block_sigmask(ka, sig);
-       }
-
-       return ret;
+       if (ret)
+               return;
+       signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -299,7 +291,6 @@ void do_signal(struct pt_regs *regs)
        siginfo_t info;
        int signr;
        struct k_sigaction ka;
-       sigset_t *oldset;
 
        /*
         * i386 will check if we're coming from kernel mode and bail out
@@ -308,24 +299,10 @@ void do_signal(struct pt_regs *regs)
         * helpful, we can reinstate the check on "!user_mode(regs)".
         */
 
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK)
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
                /* Whee! Actually deliver the signal.  */
-               if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
-                       /*
-                        * A signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TS_RESTORE_SIGMASK flag.
-                        */
-                       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-               }
-
+               handle_signal(signr, &info, &ka, regs);
                goto done;
        }
 
@@ -350,10 +327,7 @@ void do_signal(struct pt_regs *regs)
        }
 
        /* If there's no signal to deliver, just put the saved sigmask back. */
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
-               current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       restore_saved_sigmask();
 
 done:
        /* Avoid double syscall restart if there are nested signals. */
index 89529c9f060535013bfd0505cfa3a13cfd728cd6..27742e87e25596842c8e0d2030834ad0e5e89b5d 100644 (file)
@@ -172,9 +172,6 @@ static tile_bundle_bits rewrite_load_store_unaligned(
                return (tilepro_bundle_bits) 0;
        }
 
-#ifndef __LITTLE_ENDIAN
-# error We assume little-endian representation with copy_xx_user size 2 here
-#endif
        /* Handle unaligned load/store */
        if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
                unsigned short val_16;
@@ -195,8 +192,19 @@ static tile_bundle_bits rewrite_load_store_unaligned(
                        state->update = 1;
                }
        } else {
+               unsigned short val_16;
                val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg];
-               err = copy_to_user(addr, &val, size);
+               switch (size) {
+               case 2:
+                       val_16 = val;
+                       err = copy_to_user(addr, &val_16, sizeof(val_16));
+                       break;
+               case 4:
+                       err = copy_to_user(addr, &val, sizeof(val));
+                       break;
+               default:
+                       BUG();
+               }
        }
 
        if (err) {
index 91da0f721958da44d91ea08c526228c6c9a015e6..cbc73a8b8fe1e23fd58ce86ae759f525fd3d8ad6 100644 (file)
@@ -203,7 +203,7 @@ void __init ipi_init(void)
                if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0)
                        panic("Failed to initialize IPI for cpu %d\n", cpu);
 
-               offset = hv_pte_get_pfn(pte) << PAGE_SHIFT;
+               offset = PFN_PHYS(pte_pfn(pte));
                ipi_mappings[cpu] = ioremap_prot(offset, PAGE_SIZE, pte);
        }
 #endif
index cb44ba7ccd2d1e61622a761c90259c3b257a9c1f..b08095b402d6c3d5f0a1f651b8c5578ed39c15fa 100644 (file)
 #include <asm/syscalls.h>
 #include <asm/pgtable.h>
 #include <asm/homecache.h>
+#include <asm/cachectl.h>
 #include <arch/chip.h>
 
-SYSCALL_DEFINE0(flush_cache)
+SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, len,
+               unsigned long, flags)
 {
-       homecache_evict(cpumask_of(smp_processor_id()));
+       if (flags & DCACHE)
+               homecache_evict(cpumask_of(smp_processor_id()));
+       if (flags & ICACHE)
+               flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(current->mm),
+                            0, 0, 0, NULL, NULL, 0);
        return 0;
 }
 
index 71ae728e9d0be08eda450eb6b35076755086c528..e25b0a89c18f8e7c63cc3391477f8cadb255f468 100644 (file)
@@ -93,6 +93,10 @@ HV_CONF_ATTR(mezz_part,              HV_CONFSTR_MEZZ_PART_NUM)
 HV_CONF_ATTR(mezz_serial,      HV_CONFSTR_MEZZ_SERIAL_NUM)
 HV_CONF_ATTR(mezz_revision,    HV_CONFSTR_MEZZ_REV)
 HV_CONF_ATTR(mezz_description, HV_CONFSTR_MEZZ_DESC)
+HV_CONF_ATTR(cpumod_part,      HV_CONFSTR_CPUMOD_PART_NUM)
+HV_CONF_ATTR(cpumod_serial,    HV_CONFSTR_CPUMOD_SERIAL_NUM)
+HV_CONF_ATTR(cpumod_revision,  HV_CONFSTR_CPUMOD_REV)
+HV_CONF_ATTR(cpumod_description,HV_CONFSTR_CPUMOD_DESC)
 HV_CONF_ATTR(switch_control,   HV_CONFSTR_SWITCH_CONTROL)
 
 static struct attribute *board_attrs[] = {
@@ -104,6 +108,10 @@ static struct attribute *board_attrs[] = {
        &dev_attr_mezz_serial.attr,
        &dev_attr_mezz_revision.attr,
        &dev_attr_mezz_description.attr,
+       &dev_attr_cpumod_part.attr,
+       &dev_attr_cpumod_serial.attr,
+       &dev_attr_cpumod_revision.attr,
+       &dev_attr_cpumod_description.attr,
        &dev_attr_switch_control.attr,
        NULL
 };
index a5f241c24cac9ec3cdc6fc6d5e8099fe152921d6..3fd54d5bbd4c53d61f1b2ff0b6d08f7df7340b1a 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <linux/cpumask.h>
 #include <linux/module.h>
+#include <linux/hugetlb.h>
 #include <asm/tlbflush.h>
 #include <asm/homecache.h>
 #include <hv/hypervisor.h>
@@ -49,25 +50,25 @@ void flush_tlb_current_task(void)
        flush_tlb_mm(current->mm);
 }
 
-void flush_tlb_page_mm(const struct vm_area_struct *vma, struct mm_struct *mm,
+void flush_tlb_page_mm(struct vm_area_struct *vma, struct mm_struct *mm,
                       unsigned long va)
 {
-       unsigned long size = hv_page_size(vma);
+       unsigned long size = vma_kernel_pagesize(vma);
        int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
        flush_remote(0, cache, mm_cpumask(mm),
                     va, size, size, mm_cpumask(mm), NULL, 0);
 }
 
-void flush_tlb_page(const struct vm_area_struct *vma, unsigned long va)
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
 {
        flush_tlb_page_mm(vma, vma->vm_mm, va);
 }
 EXPORT_SYMBOL(flush_tlb_page);
 
-void flush_tlb_range(const struct vm_area_struct *vma,
+void flush_tlb_range(struct vm_area_struct *vma,
                     unsigned long start, unsigned long end)
 {
-       unsigned long size = hv_page_size(vma);
+       unsigned long size = vma_kernel_pagesize(vma);
        struct mm_struct *mm = vma->vm_mm;
        int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
        flush_remote(0, cache, mm_cpumask(mm), start, end - start, size,
index 73cff814ac57252065ca8086185f2fc7bb87c012..5b19a23c890801ef4f577228e083518d09302a58 100644 (file)
@@ -195,6 +195,25 @@ static int special_ill(bundle_bits bundle, int *sigp, int *codep)
        return 1;
 }
 
+static const char *const int_name[] = {
+       [INT_MEM_ERROR] = "Memory error",
+       [INT_ILL] = "Illegal instruction",
+       [INT_GPV] = "General protection violation",
+       [INT_UDN_ACCESS] = "UDN access",
+       [INT_IDN_ACCESS] = "IDN access",
+#if CHIP_HAS_SN()
+       [INT_SN_ACCESS] = "SN access",
+#endif
+       [INT_SWINT_3] = "Software interrupt 3",
+       [INT_SWINT_2] = "Software interrupt 2",
+       [INT_SWINT_0] = "Software interrupt 0",
+       [INT_UNALIGN_DATA] = "Unaligned data",
+       [INT_DOUBLE_FAULT] = "Double fault",
+#ifdef __tilegx__
+       [INT_ILL_TRANS] = "Illegal virtual address",
+#endif
+};
+
 void __kprobes do_trap(struct pt_regs *regs, int fault_num,
                       unsigned long reason)
 {
@@ -211,10 +230,17 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
         * current process and hope for the best.
         */
        if (!user_mode(regs)) {
+               const char *name;
                if (fixup_exception(regs))  /* only UNALIGN_DATA in practice */
                        return;
-               pr_alert("Kernel took bad trap %d at PC %#lx\n",
-                      fault_num, regs->pc);
+               if (fault_num >= 0 &&
+                   fault_num < sizeof(int_name)/sizeof(int_name[0]) &&
+                   int_name[fault_num] != NULL)
+                       name = int_name[fault_num];
+               else
+                       name = "Unknown interrupt";
+               pr_alert("Kernel took bad trap %d (%s) at PC %#lx\n",
+                        fault_num, name, regs->pc);
                if (fault_num == INT_GPV)
                        pr_alert("GPV_REASON is %#lx\n", reason);
                show_regs(regs);
index 771b251b409d67453a4205cbb835e80b41ec8bc2..f5cada70c3c85c8b77fab9fd66a39ad32939d1de 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/module.h>
 #include <linux/mm.h>
 #include <linux/atomic.h>
-#include <asm/futex.h>
 #include <arch/chip.h>
 
 /* See <asm/atomic_32.h> */
@@ -50,7 +49,7 @@ int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;
 
 #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
 
-static inline int *__atomic_hashed_lock(volatile void *v)
+int *__atomic_hashed_lock(volatile void *v)
 {
        /* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */
 #if ATOMIC_LOCKS_FOUND_VIA_TABLE()
@@ -191,47 +190,6 @@ u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
 EXPORT_SYMBOL(_atomic64_cmpxchg);
 
 
-static inline int *__futex_setup(int __user *v)
-{
-       /*
-        * Issue a prefetch to the counter to bring it into cache.
-        * As for __atomic_setup, but we can't do a read into the L1
-        * since it might fault; instead we do a prefetch into the L2.
-        */
-       __insn_prefetch(v);
-       return __atomic_hashed_lock((int __force *)v);
-}
-
-struct __get_user futex_set(u32 __user *v, int i)
-{
-       return __atomic_xchg((int __force *)v, __futex_setup(v), i);
-}
-
-struct __get_user futex_add(u32 __user *v, int n)
-{
-       return __atomic_xchg_add((int __force *)v, __futex_setup(v), n);
-}
-
-struct __get_user futex_or(u32 __user *v, int n)
-{
-       return __atomic_or((int __force *)v, __futex_setup(v), n);
-}
-
-struct __get_user futex_andn(u32 __user *v, int n)
-{
-       return __atomic_andn((int __force *)v, __futex_setup(v), n);
-}
-
-struct __get_user futex_xor(u32 __user *v, int n)
-{
-       return __atomic_xor((int __force *)v, __futex_setup(v), n);
-}
-
-struct __get_user futex_cmpxchg(u32 __user *v, int o, int n)
-{
-       return __atomic_cmpxchg((int __force *)v, __futex_setup(v), o, n);
-}
-
 /*
  * If any of the atomic or futex routines hit a bad address (not in
  * the page tables at kernel PL) this routine is called.  The futex
@@ -323,7 +281,4 @@ void __init __init_atomic_per_cpu(void)
        BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
 
 #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
-
-       /* The futex code makes this assumption, so we validate it here. */
-       BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
 }
index 2a81d32de0da518989e5a5118ab91b77774e3e7f..dd5f0a33fdaff95d6b53efb947fa86880d92d706 100644 (file)
 
 /* arch/tile/lib/usercopy.S */
 #include <linux/uaccess.h>
-EXPORT_SYMBOL(__get_user_1);
-EXPORT_SYMBOL(__get_user_2);
-EXPORT_SYMBOL(__get_user_4);
-EXPORT_SYMBOL(__get_user_8);
-EXPORT_SYMBOL(__put_user_1);
-EXPORT_SYMBOL(__put_user_2);
-EXPORT_SYMBOL(__put_user_4);
-EXPORT_SYMBOL(__put_user_8);
 EXPORT_SYMBOL(strnlen_user_asm);
 EXPORT_SYMBOL(strncpy_from_user_asm);
 EXPORT_SYMBOL(clear_user_asm);
index 84fdc8d8e735c4a8aaa34b235c0c7aa5f37019ca..6f867dbf7c56df79a25ebcc091dfb9b5fa5321b7 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/types.h>
 #include <linux/string.h>
 #include <linux/module.h>
+#include "string-endian.h"
 
 void *memchr(const void *s, int c, size_t n)
 {
@@ -39,11 +40,8 @@ void *memchr(const void *s, int c, size_t n)
 
        /* Read the first word, but munge it so that bytes before the array
         * will not match goal.
-        *
-        * Note that this shift count expression works because we know
-        * shift counts are taken mod 64.
         */
-       before_mask = (1ULL << (s_int << 3)) - 1;
+       before_mask = MASK(s_int);
        v = (*p | before_mask) ^ (goal & before_mask);
 
        /* Compute the address of the last byte. */
@@ -65,7 +63,7 @@ void *memchr(const void *s, int c, size_t n)
        /* We found a match, but it might be in a byte past the end
         * of the array.
         */
-       ret = ((char *)p) + (__insn_ctz(bits) >> 3);
+       ret = ((char *)p) + (CFZ(bits) >> 3);
        return (ret <= last_byte_ptr) ? ret : NULL;
 }
 EXPORT_SYMBOL(memchr);
index 3fab9a6a2bbe34ef34dd63d03e4746ae993b25fa..c79b8e7c6828bd384e0c354dccf72553ef1c9536 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/types.h>
 #include <linux/string.h>
 #include <linux/module.h>
-#define __memcpy memcpy
 /* EXPORT_SYMBOL() is in arch/tile/lib/exports.c since this should be asm. */
 
 /* Must be 8 bytes in size. */
@@ -188,6 +187,7 @@ int USERCOPY_FUNC(void *__restrict dstv, const void *__restrict srcv, size_t n)
 
        /* n != 0 if we get here.  Write out any trailing bytes. */
        dst1 = (char *)dst8;
+#ifndef __BIG_ENDIAN__
        if (n & 4) {
                ST4((uint32_t *)dst1, final);
                dst1 += 4;
@@ -202,11 +202,30 @@ int USERCOPY_FUNC(void *__restrict dstv, const void *__restrict srcv, size_t n)
        }
        if (n)
                ST1((uint8_t *)dst1, final);
+#else
+       if (n & 4) {
+               ST4((uint32_t *)dst1, final >> 32);
+               dst1 += 4;
+        }
+        else
+        {
+               final >>= 32;
+        }
+       if (n & 2) {
+               ST2((uint16_t *)dst1, final >> 16);
+               dst1 += 2;
+        }
+        else
+        {
+               final >>= 16;
+        }
+       if (n & 1)
+               ST1((uint8_t *)dst1, final >> 8);
+#endif
 
        return RETVAL;
 }
 
-
 #ifdef USERCOPY_FUNC
 #undef ST1
 #undef ST2
index b2fe15e01075a588217504a13dc1139824a97605..3bc4b4e40d9397734035fcdecb891d6b695d7df9 100644 (file)
@@ -160,7 +160,7 @@ retry_source:
                        break;
                if (get_remote_cache_cpu(src_pte) == smp_processor_id())
                        break;
-               src_page = pfn_to_page(hv_pte_get_pfn(src_pte));
+               src_page = pfn_to_page(pte_pfn(src_pte));
                get_page(src_page);
                if (pte_val(src_pte) != pte_val(*src_ptep)) {
                        put_page(src_page);
@@ -168,7 +168,7 @@ retry_source:
                }
                if (pte_huge(src_pte)) {
                        /* Adjust the PTE to correspond to a small page */
-                       int pfn = hv_pte_get_pfn(src_pte);
+                       int pfn = pte_pfn(src_pte);
                        pfn += (((unsigned long)source & (HPAGE_SIZE-1))
                                >> PAGE_SHIFT);
                        src_pte = pfn_pte(pfn, src_pte);
@@ -188,7 +188,7 @@ retry_dest:
                        put_page(src_page);
                        break;
                }
-               dst_page = pfn_to_page(hv_pte_get_pfn(dst_pte));
+               dst_page = pfn_to_page(pte_pfn(dst_pte));
                if (dst_page == src_page) {
                        /*
                         * Source and dest are on the same page; this
@@ -206,7 +206,7 @@ retry_dest:
                }
                if (pte_huge(dst_pte)) {
                        /* Adjust the PTE to correspond to a small page */
-                       int pfn = hv_pte_get_pfn(dst_pte);
+                       int pfn = pte_pfn(dst_pte);
                        pfn += (((unsigned long)dest & (HPAGE_SIZE-1))
                                >> PAGE_SHIFT);
                        dst_pte = pfn_pte(pfn, dst_pte);
index 617a9273aaa8a645741d9a8db217c4b42614ac56..f39f9dc422b02e44853ff54e14a0dc3cdd277380 100644 (file)
@@ -15,8 +15,7 @@
 #include <linux/types.h>
 #include <linux/string.h>
 #include <linux/module.h>
-
-#undef strchr
+#include "string-endian.h"
 
 char *strchr(const char *s, int c)
 {
@@ -33,13 +32,9 @@ char *strchr(const char *s, int c)
         * match neither zero nor goal (we make sure the high bit of each
         * byte is 1, and the low 7 bits are all the opposite of the goal
         * byte).
-        *
-        * Note that this shift count expression works because we know shift
-        * counts are taken mod 64.
         */
-       const uint64_t before_mask = (1ULL << (s_int << 3)) - 1;
-       uint64_t v = (*p | before_mask) ^
-               (goal & __insn_v1shrsi(before_mask, 1));
+       const uint64_t before_mask = MASK(s_int);
+       uint64_t v = (*p | before_mask) ^ (goal & __insn_v1shrui(before_mask, 1));
 
        uint64_t zero_matches, goal_matches;
        while (1) {
@@ -55,8 +50,8 @@ char *strchr(const char *s, int c)
                v = *++p;
        }
 
-       z = __insn_ctz(zero_matches);
-       g = __insn_ctz(goal_matches);
+       z = CFZ(zero_matches);
+       g = CFZ(goal_matches);
 
        /* If we found c before '\0' we got a match. Note that if c == '\0'
         * then g == z, and we correctly return the address of the '\0'
diff --git a/arch/tile/lib/string-endian.h b/arch/tile/lib/string-endian.h
new file mode 100644 (file)
index 0000000..c0eed7c
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ *
+ * Provide a mask based on the pointer alignment that
+ * sets up non-zero bytes before the beginning of the string.
+ * The MASK expression works because shift counts are taken mod 64.
+ * Also, specify how to count "first" and "last" bits
+ * when the bits have been read as a word.
+ */
+
+#include <asm/byteorder.h>
+
+#ifdef __LITTLE_ENDIAN
+#define MASK(x) (__insn_shl(1ULL, (x << 3)) - 1)
+#define NULMASK(x) ((2ULL << x) - 1)
+#define CFZ(x) __insn_ctz(x)
+#define REVCZ(x) __insn_clz(x)
+#else
+#define MASK(x) (__insn_shl(-2LL, ((-x << 3) - 1)))
+#define NULMASK(x) (-2LL << (63 - x))
+#define CFZ(x) __insn_clz(x)
+#define REVCZ(x) __insn_ctz(x)
+#endif
index 1c92d46202a847b826c423ab9f0d64ea8da12de0..9583fc3361fa1b847a29f16011f770f49a844c6b 100644 (file)
@@ -15,8 +15,7 @@
 #include <linux/types.h>
 #include <linux/string.h>
 #include <linux/module.h>
-
-#undef strlen
+#include "string-endian.h"
 
 size_t strlen(const char *s)
 {
@@ -24,15 +23,13 @@ size_t strlen(const char *s)
        const uintptr_t s_int = (uintptr_t) s;
        const uint64_t *p = (const uint64_t *)(s_int & -8);
 
-       /* Read the first word, but force bytes before the string to be nonzero.
-        * This expression works because we know shift counts are taken mod 64.
-        */
-       uint64_t v = *p | ((1ULL << (s_int << 3)) - 1);
+       /* Read and MASK the first word. */
+       uint64_t v = *p | MASK(s_int);
 
        uint64_t bits;
        while ((bits = __insn_v1cmpeqi(v, 0)) == 0)
                v = *++p;
 
-       return ((const char *)p) + (__insn_ctz(bits) >> 3) - s;
+       return ((const char *)p) + (CFZ(bits) >> 3) - s;
 }
 EXPORT_SYMBOL(strlen);
index 979f76d837460616cec4e036dde7c03e527e2c8b..b62d002af0096fe89793f5fb4eba6c357ef55922 100644 (file)
 
 /* Access user memory, but use MMU to avoid propagating kernel exceptions. */
 
-       .pushsection .fixup,"ax"
-
-get_user_fault:
-       { move r0, zero; move r1, zero }
-       { movei r2, -EFAULT; jrp lr }
-       ENDPROC(get_user_fault)
-
-put_user_fault:
-       { movei r0, -EFAULT; jrp lr }
-       ENDPROC(put_user_fault)
-
-       .popsection
-
-/*
- * __get_user_N functions take a pointer in r0, and return 0 in r2
- * on success, with the value in r0; or else -EFAULT in r2.
- */
-#define __get_user_N(bytes, LOAD) \
-       STD_ENTRY(__get_user_##bytes); \
-1:     { LOAD r0, r0; move r1, zero; move r2, zero }; \
-       jrp lr; \
-       STD_ENDPROC(__get_user_##bytes); \
-       .pushsection __ex_table,"a"; \
-       .word 1b, get_user_fault; \
-       .popsection
-
-__get_user_N(1, lb_u)
-__get_user_N(2, lh_u)
-__get_user_N(4, lw)
-
-/*
- * __get_user_8 takes a pointer in r0, and returns 0 in r2
- * on success, with the value in r0/r1; or else -EFAULT in r2.
- */
-       STD_ENTRY(__get_user_8);
-1:     { lw r0, r0; addi r1, r0, 4 };
-2:     { lw r1, r1; move r2, zero };
-       jrp lr;
-       STD_ENDPROC(__get_user_8);
-       .pushsection __ex_table,"a";
-       .word 1b, get_user_fault;
-       .word 2b, get_user_fault;
-       .popsection
-
-/*
- * __put_user_N functions take a value in r0 and a pointer in r1,
- * and return 0 in r0 on success or -EFAULT on failure.
- */
-#define __put_user_N(bytes, STORE) \
-       STD_ENTRY(__put_user_##bytes); \
-1:     { STORE r1, r0; move r0, zero }; \
-       jrp lr; \
-       STD_ENDPROC(__put_user_##bytes); \
-       .pushsection __ex_table,"a"; \
-       .word 1b, put_user_fault; \
-       .popsection
-
-__put_user_N(1, sb)
-__put_user_N(2, sh)
-__put_user_N(4, sw)
-
-/*
- * __put_user_8 takes a value in r0/r1 and a pointer in r2,
- * and returns 0 in r0 on success or -EFAULT on failure.
- */
-STD_ENTRY(__put_user_8)
-1:      { sw r2, r0; addi r2, r2, 4 }
-2:      { sw r2, r1; move r0, zero }
-       jrp lr
-       STD_ENDPROC(__put_user_8)
-       .pushsection __ex_table,"a"
-       .word 1b, put_user_fault
-       .word 2b, put_user_fault
-       .popsection
-
-
 /*
  * strnlen_user_asm takes the pointer in r0, and the length bound in r1.
  * It returns the length, including the terminating NUL, or zero on exception.
index 2ff44f87b78e2bab2cfe454df285db50324170e5..adb2dbbc70cd037d5d30b0d5c448da17470db3ef 100644 (file)
 
 /* Access user memory, but use MMU to avoid propagating kernel exceptions. */
 
-       .pushsection .fixup,"ax"
-
-get_user_fault:
-       { movei r1, -EFAULT; move r0, zero }
-       jrp lr
-       ENDPROC(get_user_fault)
-
-put_user_fault:
-       { movei r0, -EFAULT; jrp lr }
-       ENDPROC(put_user_fault)
-
-       .popsection
-
-/*
- * __get_user_N functions take a pointer in r0, and return 0 in r1
- * on success, with the value in r0; or else -EFAULT in r1.
- */
-#define __get_user_N(bytes, LOAD) \
-       STD_ENTRY(__get_user_##bytes); \
-1:     { LOAD r0, r0; move r1, zero }; \
-       jrp lr; \
-       STD_ENDPROC(__get_user_##bytes); \
-       .pushsection __ex_table,"a"; \
-       .quad 1b, get_user_fault; \
-       .popsection
-
-__get_user_N(1, ld1u)
-__get_user_N(2, ld2u)
-__get_user_N(4, ld4u)
-__get_user_N(8, ld)
-
-/*
- * __put_user_N functions take a value in r0 and a pointer in r1,
- * and return 0 in r0 on success or -EFAULT on failure.
- */
-#define __put_user_N(bytes, STORE) \
-       STD_ENTRY(__put_user_##bytes); \
-1:     { STORE r1, r0; move r0, zero }; \
-       jrp lr; \
-       STD_ENDPROC(__put_user_##bytes); \
-       .pushsection __ex_table,"a"; \
-       .quad 1b, put_user_fault; \
-       .popsection
-
-__put_user_N(1, st1)
-__put_user_N(2, st2)
-__put_user_N(4, st4)
-__put_user_N(8, st)
-
 /*
  * strnlen_user_asm takes the pointer in r0, and the length bound in r1.
  * It returns the length, including the terminating NUL, or zero on exception.
index 22e58f51ed23eedd405e9dc0b719eddb9550f502..84ce7abbf5afb80c5d82648244349bf7d746a9c0 100644 (file)
@@ -187,7 +187,7 @@ static pgd_t *get_current_pgd(void)
        HV_Context ctx = hv_inquire_context();
        unsigned long pgd_pfn = ctx.page_table >> PAGE_SHIFT;
        struct page *pgd_page = pfn_to_page(pgd_pfn);
-       BUG_ON(PageHighMem(pgd_page));   /* oops, HIGHPTE? */
+       BUG_ON(PageHighMem(pgd_page));
        return (pgd_t *) __va(ctx.page_table);
 }
 
@@ -273,11 +273,15 @@ static int handle_page_fault(struct pt_regs *regs,
        int si_code;
        int is_kernel_mode;
        pgd_t *pgd;
+       unsigned int flags;
 
        /* on TILE, protection faults are always writes */
        if (!is_page_fault)
                write = 1;
 
+       flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+                (write ? FAULT_FLAG_WRITE : 0));
+
        is_kernel_mode = (EX1_PL(regs->ex1) != USER_PL);
 
        tsk = validate_current();
@@ -382,6 +386,8 @@ static int handle_page_fault(struct pt_regs *regs,
                        vma = NULL;  /* happy compiler */
                        goto bad_area_nosemaphore;
                }
+
+retry:
                down_read(&mm->mmap_sem);
        }
 
@@ -429,7 +435,11 @@ good_area:
         * make sure we exit gracefully rather than endlessly redo
         * the fault.
         */
-       fault = handle_mm_fault(mm, vma, address, write);
+       fault = handle_mm_fault(mm, vma, address, flags);
+
+       if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+               return 0;
+
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
@@ -437,10 +447,22 @@ good_area:
                        goto do_sigbus;
                BUG();
        }
-       if (fault & VM_FAULT_MAJOR)
-               tsk->maj_flt++;
-       else
-               tsk->min_flt++;
+       if (flags & FAULT_FLAG_ALLOW_RETRY) {
+               if (fault & VM_FAULT_MAJOR)
+                       tsk->maj_flt++;
+               else
+                       tsk->min_flt++;
+               if (fault & VM_FAULT_RETRY) {
+                       flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+                        /*
+                         * No need to up_read(&mm->mmap_sem) as we would
+                         * have already released it in __lock_page_or_retry
+                         * in mm/filemap.c.
+                         */
+                       goto retry;
+               }
+       }
 
 #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
        /*
index 499f73770b05d09e1616e1d9a2293a20c7ae1bac..dbcbdf7b8aa81fe9ed18d4d2613f03e763116b6e 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/cache.h>
 #include <linux/smp.h>
 #include <linux/module.h>
+#include <linux/hugetlb.h>
 
 #include <asm/page.h>
 #include <asm/sections.h>
index 42cfcba4e1ef7edb949f5cebf509f4d8ab9116c1..812e2d037972cee0aebd2ccefebf47b2e9890a43 100644 (file)
 #include <linux/mman.h>
 #include <asm/tlb.h>
 #include <asm/tlbflush.h>
+#include <asm/setup.h>
+
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+
+/*
+ * Provide an additional huge page size (in addition to the regular default
+ * huge page size) if no "hugepagesz" arguments are specified.
+ * Note that it must be smaller than the default huge page size so
+ * that it's possible to allocate them on demand from the buddy allocator.
+ * You can change this to 64K (on a 16K build), 256K, 1M, or 4M,
+ * or not define it at all.
+ */
+#define ADDITIONAL_HUGE_SIZE (1024 * 1024UL)
+
+/* "Extra" page-size multipliers, one per level of the page table. */
+int huge_shift[HUGE_SHIFT_ENTRIES] = {
+#ifdef ADDITIONAL_HUGE_SIZE
+#define ADDITIONAL_HUGE_SHIFT __builtin_ctzl(ADDITIONAL_HUGE_SIZE / PAGE_SIZE)
+       [HUGE_SHIFT_PAGE] = ADDITIONAL_HUGE_SHIFT
+#endif
+};
+
+/*
+ * This routine is a hybrid of pte_alloc_map() and pte_alloc_kernel().
+ * It assumes that L2 PTEs are never in HIGHMEM (we don't support that).
+ * It locks the user pagetable, and bumps up the mm->nr_ptes field,
+ * but otherwise allocate the page table using the kernel versions.
+ */
+static pte_t *pte_alloc_hugetlb(struct mm_struct *mm, pmd_t *pmd,
+                               unsigned long address)
+{
+       pte_t *new;
+
+       if (pmd_none(*pmd)) {
+               new = pte_alloc_one_kernel(mm, address);
+               if (!new)
+                       return NULL;
+
+               smp_wmb(); /* See comment in __pte_alloc */
+
+               spin_lock(&mm->page_table_lock);
+               if (likely(pmd_none(*pmd))) {  /* Has another populated it ? */
+                       mm->nr_ptes++;
+                       pmd_populate_kernel(mm, pmd, new);
+                       new = NULL;
+               } else
+                       VM_BUG_ON(pmd_trans_splitting(*pmd));
+               spin_unlock(&mm->page_table_lock);
+               if (new)
+                       pte_free_kernel(mm, new);
+       }
+
+       return pte_offset_kernel(pmd, address);
+}
+#endif
 
 pte_t *huge_pte_alloc(struct mm_struct *mm,
                      unsigned long addr, unsigned long sz)
 {
        pgd_t *pgd;
        pud_t *pud;
-       pte_t *pte = NULL;
 
-       /* We do not yet support multiple huge page sizes. */
-       BUG_ON(sz != PMD_SIZE);
+       addr &= -sz;   /* Mask off any low bits in the address. */
 
        pgd = pgd_offset(mm, addr);
        pud = pud_alloc(mm, pgd, addr);
-       if (pud)
-               pte = (pte_t *) pmd_alloc(mm, pud, addr);
-       BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
 
-       return pte;
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+       if (sz >= PGDIR_SIZE) {
+               BUG_ON(sz != PGDIR_SIZE &&
+                      sz != PGDIR_SIZE << huge_shift[HUGE_SHIFT_PGDIR]);
+               return (pte_t *)pud;
+       } else {
+               pmd_t *pmd = pmd_alloc(mm, pud, addr);
+               if (sz >= PMD_SIZE) {
+                       BUG_ON(sz != PMD_SIZE &&
+                              sz != (PMD_SIZE << huge_shift[HUGE_SHIFT_PMD]));
+                       return (pte_t *)pmd;
+               }
+               else {
+                       if (sz != PAGE_SIZE << huge_shift[HUGE_SHIFT_PAGE])
+                               panic("Unexpected page size %#lx\n", sz);
+                       return pte_alloc_hugetlb(mm, pmd, addr);
+               }
+       }
+#else
+       BUG_ON(sz != PMD_SIZE);
+       return (pte_t *) pmd_alloc(mm, pud, addr);
+#endif
 }
 
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+static pte_t *get_pte(pte_t *base, int index, int level)
 {
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd = NULL;
-
-       pgd = pgd_offset(mm, addr);
-       if (pgd_present(*pgd)) {
-               pud = pud_offset(pgd, addr);
-               if (pud_present(*pud))
-                       pmd = pmd_offset(pud, addr);
+       pte_t *ptep = base + index;
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+       if (!pte_present(*ptep) && huge_shift[level] != 0) {
+               unsigned long mask = -1UL << huge_shift[level];
+               pte_t *super_ptep = base + (index & mask);
+               pte_t pte = *super_ptep;
+               if (pte_present(pte) && pte_super(pte))
+                       ptep = super_ptep;
        }
-       return (pte_t *) pmd;
+#endif
+       return ptep;
 }
 
-#ifdef HUGETLB_TEST
-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
-                             int write)
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 {
-       unsigned long start = address;
-       int length = 1;
-       int nr;
-       struct page *page;
-       struct vm_area_struct *vma;
-
-       vma = find_vma(mm, addr);
-       if (!vma || !is_vm_hugetlb_page(vma))
-               return ERR_PTR(-EINVAL);
-
-       pte = huge_pte_offset(mm, address);
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+       pte_t *pte;
+#endif
 
-       /* hugetlb should be locked, and hence, prefaulted */
-       WARN_ON(!pte || pte_none(*pte));
+       /* Get the top-level page table entry. */
+       pgd = (pgd_t *)get_pte((pte_t *)mm->pgd, pgd_index(addr), 0);
+       if (!pgd_present(*pgd))
+               return NULL;
 
-       page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
+       /* We don't have four levels. */
+       pud = pud_offset(pgd, addr);
+#ifndef __PAGETABLE_PUD_FOLDED
+# error support fourth page table level
+#endif
 
-       WARN_ON(!PageHead(page));
+       /* Check for an L0 huge PTE, if we have three levels. */
+#ifndef __PAGETABLE_PMD_FOLDED
+       if (pud_huge(*pud))
+               return (pte_t *)pud;
 
-       return page;
-}
-
-int pmd_huge(pmd_t pmd)
-{
-       return 0;
-}
+       pmd = (pmd_t *)get_pte((pte_t *)pud_page_vaddr(*pud),
+                              pmd_index(addr), 1);
+       if (!pmd_present(*pmd))
+               return NULL;
+#else
+       pmd = pmd_offset(pud, addr);
+#endif
 
-int pud_huge(pud_t pud)
-{
-       return 0;
-}
+       /* Check for an L1 huge PTE. */
+       if (pmd_huge(*pmd))
+               return (pte_t *)pmd;
+
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+       /* Check for an L2 huge PTE. */
+       pte = get_pte((pte_t *)pmd_page_vaddr(*pmd), pte_index(addr), 2);
+       if (!pte_present(*pte))
+               return NULL;
+       if (pte_super(*pte))
+               return pte;
+#endif
 
-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
-                            pmd_t *pmd, int write)
-{
        return NULL;
 }
 
-#else
-
 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
                              int write)
 {
@@ -149,8 +225,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
        return 0;
 }
 
-#endif
-
 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
                unsigned long addr, unsigned long len,
@@ -322,21 +396,102 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                return hugetlb_get_unmapped_area_topdown(file, addr, len,
                                pgoff, flags);
 }
+#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
 
-static __init int setup_hugepagesz(char *opt)
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+static __init int __setup_hugepagesz(unsigned long ps)
 {
-       unsigned long ps = memparse(opt, &opt);
-       if (ps == PMD_SIZE) {
-               hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
-       } else if (ps == PUD_SIZE) {
-               hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
+       int log_ps = __builtin_ctzl(ps);
+       int level, base_shift;
+
+       if ((1UL << log_ps) != ps || (log_ps & 1) != 0) {
+               pr_warn("Not enabling %ld byte huge pages;"
+                       " must be a power of four.\n", ps);
+               return -EINVAL;
+       }
+
+       if (ps > 64*1024*1024*1024UL) {
+               pr_warn("Not enabling %ld MB huge pages;"
+                       " largest legal value is 64 GB .\n", ps >> 20);
+               return -EINVAL;
+       } else if (ps >= PUD_SIZE) {
+               static long hv_jpage_size;
+               if (hv_jpage_size == 0)
+                       hv_jpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO);
+               if (hv_jpage_size != PUD_SIZE) {
+                       pr_warn("Not enabling >= %ld MB huge pages:"
+                               " hypervisor reports size %ld\n",
+                               PUD_SIZE >> 20, hv_jpage_size);
+                       return -EINVAL;
+               }
+               level = 0;
+               base_shift = PUD_SHIFT;
+       } else if (ps >= PMD_SIZE) {
+               level = 1;
+               base_shift = PMD_SHIFT;
+       } else if (ps > PAGE_SIZE) {
+               level = 2;
+               base_shift = PAGE_SHIFT;
        } else {
-               pr_err("hugepagesz: Unsupported page size %lu M\n",
-                       ps >> 20);
-               return 0;
+               pr_err("hugepagesz: huge page size %ld too small\n", ps);
+               return -EINVAL;
        }
-       return 1;
+
+       if (log_ps != base_shift) {
+               int shift_val = log_ps - base_shift;
+               if (huge_shift[level] != 0) {
+                       int old_shift = base_shift + huge_shift[level];
+                       pr_warn("Not enabling %ld MB huge pages;"
+                               " already have size %ld MB.\n",
+                               ps >> 20, (1UL << old_shift) >> 20);
+                       return -EINVAL;
+               }
+               if (hv_set_pte_super_shift(level, shift_val) != 0) {
+                       pr_warn("Not enabling %ld MB huge pages;"
+                               " no hypervisor support.\n", ps >> 20);
+                       return -EINVAL;
+               }
+               printk(KERN_DEBUG "Enabled %ld MB huge pages\n", ps >> 20);
+               huge_shift[level] = shift_val;
+       }
+
+       hugetlb_add_hstate(log_ps - PAGE_SHIFT);
+
+       return 0;
+}
+
+static bool saw_hugepagesz;
+
+static __init int setup_hugepagesz(char *opt)
+{
+       if (!saw_hugepagesz) {
+               saw_hugepagesz = true;
+               memset(huge_shift, 0, sizeof(huge_shift));
+       }
+       return __setup_hugepagesz(memparse(opt, NULL));
 }
 __setup("hugepagesz=", setup_hugepagesz);
 
-#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
+#ifdef ADDITIONAL_HUGE_SIZE
+/*
+ * Provide an additional huge page size if no "hugepagesz" args are given.
+ * In that case, all the cores have properly set up their hv super_shift
+ * already, but we need to notify the hugetlb code to enable the
+ * new huge page size from the Linux point of view.
+ */
+static __init int add_default_hugepagesz(void)
+{
+       if (!saw_hugepagesz) {
+               BUILD_BUG_ON(ADDITIONAL_HUGE_SIZE >= PMD_SIZE ||
+                            ADDITIONAL_HUGE_SIZE <= PAGE_SIZE);
+               BUILD_BUG_ON((PAGE_SIZE << ADDITIONAL_HUGE_SHIFT) !=
+                            ADDITIONAL_HUGE_SIZE);
+               BUILD_BUG_ON(ADDITIONAL_HUGE_SHIFT & 1);
+               hugetlb_add_hstate(ADDITIONAL_HUGE_SHIFT);
+       }
+       return 0;
+}
+arch_initcall(add_default_hugepagesz);
+#endif
+
+#endif /* CONFIG_HUGETLB_SUPER_PAGES */
index 6a9d20ddc34f416438a7d9717cf75919ea7fa34b..630dd2ce2afef5237829f83804cea010f18a7dd9 100644 (file)
@@ -82,7 +82,7 @@ static int num_l2_ptes[MAX_NUMNODES];
 
 static void init_prealloc_ptes(int node, int pages)
 {
-       BUG_ON(pages & (HV_L2_ENTRIES-1));
+       BUG_ON(pages & (PTRS_PER_PTE - 1));
        if (pages) {
                num_l2_ptes[node] = pages;
                l2_ptes[node] = __alloc_bootmem(pages * sizeof(pte_t),
@@ -131,14 +131,9 @@ static void __init assign_pte(pmd_t *pmd, pte_t *page_table)
 
 #ifdef __tilegx__
 
-#if HV_L1_SIZE != HV_L2_SIZE
-# error Rework assumption that L1 and L2 page tables are same size.
-#endif
-
-/* Since pmd_t arrays and pte_t arrays are the same size, just use casts. */
 static inline pmd_t *alloc_pmd(void)
 {
-       return (pmd_t *)alloc_pte();
+       return __alloc_bootmem(L1_KERNEL_PGTABLE_SIZE, HV_PAGE_TABLE_ALIGN, 0);
 }
 
 static inline void assign_pmd(pud_t *pud, pmd_t *pmd)
@@ -444,6 +439,7 @@ static pgd_t pgtables[PTRS_PER_PGD]
  */
 static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
 {
+       unsigned long long irqmask;
        unsigned long address, pfn;
        pmd_t *pmd;
        pte_t *pte;
@@ -633,10 +629,13 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
         *  - install pgtables[] as the real page table
         *  - flush the TLB so the new page table takes effect
         */
+       irqmask = interrupt_mask_save_mask();
+       interrupt_mask_set_mask(-1ULL);
        rc = flush_and_install_context(__pa(pgtables),
                                       init_pgprot((unsigned long)pgtables),
                                       __get_cpu_var(current_asid),
                                       cpumask_bits(my_cpu_mask));
+       interrupt_mask_restore_mask(irqmask);
        BUG_ON(rc != 0);
 
        /* Copy the page table back to the normal swapper_pg_dir. */
@@ -699,6 +698,7 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
 #endif /* CONFIG_HIGHMEM */
 
 
+#ifndef CONFIG_64BIT
 static void __init init_free_pfn_range(unsigned long start, unsigned long end)
 {
        unsigned long pfn;
@@ -771,6 +771,7 @@ static void __init set_non_bootmem_pages_init(void)
                init_free_pfn_range(start, end);
        }
 }
+#endif
 
 /*
  * paging_init() sets up the page tables - note that all of lowmem is
@@ -807,7 +808,7 @@ void __init paging_init(void)
         * changing init_mm once we get up and running, and there's no
         * need for e.g. vmalloc_sync_all().
         */
-       BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END));
+       BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END - 1));
        pud = pud_offset(pgd_base + pgd_index(VMALLOC_START), VMALLOC_START);
        assign_pmd(pud, alloc_pmd());
 #endif
@@ -859,8 +860,10 @@ void __init mem_init(void)
        /* this will put all bootmem onto the freelists */
        totalram_pages += free_all_bootmem();
 
+#ifndef CONFIG_64BIT
        /* count all remaining LOWMEM and give all HIGHMEM to page allocator */
        set_non_bootmem_pages_init();
+#endif
 
        codesize =  (unsigned long)&_etext - (unsigned long)&_text;
        datasize =  (unsigned long)&_end - (unsigned long)&_sdata;
index cd45a0837fa69cf7d57ced0dbb8985f875698929..91683d97917e6f79e07ddf291fd56b8559d25c93 100644 (file)
@@ -24,6 +24,9 @@
 /*
  * This function is used as a helper when setting up the initial
  * page table (swapper_pg_dir).
+ *
+ * You must mask ALL interrupts prior to invoking this code, since
+ * you can't legally touch the stack during the cache flush.
  */
 extern int flush_and_install_context(HV_PhysAddr page_table, HV_PTE access,
                                     HV_ASID asid,
@@ -39,6 +42,9 @@ extern int flush_and_install_context(HV_PhysAddr page_table, HV_PTE access,
  *
  * Note that any non-NULL pointers must not point to the page that
  * is handled by the stack_pte itself.
+ *
+ * You must mask ALL interrupts prior to invoking this code, since
+ * you can't legally touch the stack during the cache flush.
  */
 extern int homecache_migrate_stack_and_flush(pte_t stack_pte, unsigned long va,
                                     size_t length, pte_t *stack_ptep,
index ac01a7cdf77f243e5d6944fc7ae1795090ce9056..5305814bf187f52aa89f6b84e5f886cf273f5814 100644 (file)
@@ -40,8 +40,7 @@
 #define FRAME_R32      16
 #define FRAME_R33      20
 #define FRAME_R34      24
-#define FRAME_R35      28
-#define FRAME_SIZE     32
+#define FRAME_SIZE     28
 
 
 
 #define r_my_cpumask   r5
 
 /* Locals (callee-save); must not be more than FRAME_xxx above. */
-#define r_save_ics     r30
-#define r_context_lo   r31
-#define r_context_hi   r32
-#define r_access_lo    r33
-#define r_access_hi    r34
-#define r_asid         r35
+#define r_context_lo   r30
+#define r_context_hi   r31
+#define r_access_lo    r32
+#define r_access_hi    r33
+#define r_asid         r34
 
 STD_ENTRY(flush_and_install_context)
        /*
@@ -104,11 +102,7 @@ STD_ENTRY(flush_and_install_context)
         sw r_tmp, r33
         addi r_tmp, sp, FRAME_R34
        }
-       {
-        sw r_tmp, r34
-        addi r_tmp, sp, FRAME_R35
-       }
-       sw r_tmp, r35
+       sw r_tmp, r34
 
        /* Move some arguments to callee-save registers. */
        {
@@ -121,13 +115,6 @@ STD_ENTRY(flush_and_install_context)
        }
        move r_asid, r_asid_in
 
-       /* Disable interrupts, since we can't use our stack. */
-       {
-        mfspr r_save_ics, INTERRUPT_CRITICAL_SECTION
-        movei r_tmp, 1
-       }
-       mtspr INTERRUPT_CRITICAL_SECTION, r_tmp
-
        /* First, flush our L2 cache. */
        {
         move r0, zero  /* cache_pa */
@@ -163,7 +150,7 @@ STD_ENTRY(flush_and_install_context)
        }
        {
         move r4, r_asid
-        movei r5, HV_CTX_DIRECTIO
+        moveli r5, HV_CTX_DIRECTIO | CTX_PAGE_FLAG
        }
        jal hv_install_context
        bnz r0, .Ldone
@@ -175,9 +162,6 @@ STD_ENTRY(flush_and_install_context)
        }
 
 .Ldone:
-       /* Reset interrupts back how they were before. */
-       mtspr INTERRUPT_CRITICAL_SECTION, r_save_ics
-
        /* Restore the callee-saved registers and return. */
        addli lr, sp, FRAME_SIZE
        {
@@ -202,10 +186,6 @@ STD_ENTRY(flush_and_install_context)
        }
        {
         lw r34, r_tmp
-        addli r_tmp, sp, FRAME_R35
-       }
-       {
-        lw r35, r_tmp
         addi sp, sp, FRAME_SIZE
        }
        jrp lr
index e76fea688bebf83cc172c4ba5770bd2e299edfb7..1d15b10833d113ee077f8ea032503819ea4c257e 100644 (file)
@@ -38,8 +38,7 @@
 #define FRAME_R30      16
 #define FRAME_R31      24
 #define FRAME_R32      32
-#define FRAME_R33      40
-#define FRAME_SIZE     48
+#define FRAME_SIZE     40
 
 
 
 #define r_my_cpumask   r3
 
 /* Locals (callee-save); must not be more than FRAME_xxx above. */
-#define r_save_ics     r30
-#define r_context      r31
-#define r_access       r32
-#define r_asid         r33
+#define r_context      r30
+#define r_access       r31
+#define r_asid         r32
 
 /*
  * Caller-save locals and frame constants are the same as
@@ -93,11 +91,7 @@ STD_ENTRY(flush_and_install_context)
         st r_tmp, r31
         addi r_tmp, sp, FRAME_R32
        }
-       {
-        st r_tmp, r32
-        addi r_tmp, sp, FRAME_R33
-       }
-       st r_tmp, r33
+       st r_tmp, r32
 
        /* Move some arguments to callee-save registers. */
        {
@@ -106,13 +100,6 @@ STD_ENTRY(flush_and_install_context)
        }
        move r_asid, r_asid_in
 
-       /* Disable interrupts, since we can't use our stack. */
-       {
-        mfspr r_save_ics, INTERRUPT_CRITICAL_SECTION
-        movei r_tmp, 1
-       }
-       mtspr INTERRUPT_CRITICAL_SECTION, r_tmp
-
        /* First, flush our L2 cache. */
        {
         move r0, zero  /* cache_pa */
@@ -147,7 +134,7 @@ STD_ENTRY(flush_and_install_context)
        }
        {
         move r2, r_asid
-        movei r3, HV_CTX_DIRECTIO
+        moveli r3, HV_CTX_DIRECTIO | CTX_PAGE_FLAG
        }
        jal hv_install_context
        bnez r0, 1f
@@ -158,10 +145,7 @@ STD_ENTRY(flush_and_install_context)
         jal hv_flush_all
        }
 
-1:      /* Reset interrupts back how they were before. */
-       mtspr INTERRUPT_CRITICAL_SECTION, r_save_ics
-
-       /* Restore the callee-saved registers and return. */
+1:     /* Restore the callee-saved registers and return. */
        addli lr, sp, FRAME_SIZE
        {
         ld lr, lr
@@ -177,10 +161,6 @@ STD_ENTRY(flush_and_install_context)
        }
        {
         ld r32, r_tmp
-        addli r_tmp, sp, FRAME_R33
-       }
-       {
-        ld r33, r_tmp
         addi sp, sp, FRAME_SIZE
        }
        jrp lr
index 2410aa899b3e7c9c17079d2bf78d2d4eaf1eb8b1..345edfed9fcd4cb36b35f70d083e9038803bae4a 100644 (file)
@@ -132,15 +132,6 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
        set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
 }
 
-#if defined(CONFIG_HIGHPTE)
-pte_t *_pte_offset_map(pmd_t *dir, unsigned long address)
-{
-       pte_t *pte = kmap_atomic(pmd_page(*dir)) +
-               (pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK;
-       return &pte[pte_index(address)];
-}
-#endif
-
 /**
  * shatter_huge_page() - ensure a given address is mapped by a small page.
  *
@@ -289,33 +280,26 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 
 #define L2_USER_PGTABLE_PAGES (1 << L2_USER_PGTABLE_ORDER)
 
-struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
+                              int order)
 {
        gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO;
        struct page *p;
-#if L2_USER_PGTABLE_ORDER > 0
        int i;
-#endif
-
-#ifdef CONFIG_HIGHPTE
-       flags |= __GFP_HIGHMEM;
-#endif
 
        p = alloc_pages(flags, L2_USER_PGTABLE_ORDER);
        if (p == NULL)
                return NULL;
 
-#if L2_USER_PGTABLE_ORDER > 0
        /*
         * Make every page have a page_count() of one, not just the first.
         * We don't use __GFP_COMP since it doesn't look like it works
         * correctly with tlb_remove_page().
         */
-       for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
+       for (i = 1; i < order; ++i) {
                init_page_count(p+i);
                inc_zone_page_state(p+i, NR_PAGETABLE);
        }
-#endif
 
        pgtable_page_ctor(p);
        return p;
@@ -326,28 +310,28 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
  * process).  We have to correct whatever pte_alloc_one() did before
  * returning the pages to the allocator.
  */
-void pte_free(struct mm_struct *mm, struct page *p)
+void pgtable_free(struct mm_struct *mm, struct page *p, int order)
 {
        int i;
 
        pgtable_page_dtor(p);
        __free_page(p);
 
-       for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
+       for (i = 1; i < order; ++i) {
                __free_page(p+i);
                dec_zone_page_state(p+i, NR_PAGETABLE);
        }
 }
 
-void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
-                   unsigned long address)
+void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte,
+                       unsigned long address, int order)
 {
        int i;
 
        pgtable_page_dtor(pte);
        tlb_remove_page(tlb, pte);
 
-       for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
+       for (i = 1; i < order; ++i) {
                tlb_remove_page(tlb, pte + i);
                dec_zone_page_state(pte + i, NR_PAGETABLE);
        }
@@ -490,7 +474,7 @@ void set_pte(pte_t *ptep, pte_t pte)
 /* Can this mm load a PTE with cached_priority set? */
 static inline int mm_is_priority_cached(struct mm_struct *mm)
 {
-       return mm->context.priority_cached;
+       return mm->context.priority_cached != 0;
 }
 
 /*
@@ -500,8 +484,8 @@ static inline int mm_is_priority_cached(struct mm_struct *mm)
 void start_mm_caching(struct mm_struct *mm)
 {
        if (!mm_is_priority_cached(mm)) {
-               mm->context.priority_cached = -1U;
-               hv_set_caching(-1U);
+               mm->context.priority_cached = -1UL;
+               hv_set_caching(-1UL);
        }
 }
 
@@ -516,7 +500,7 @@ void start_mm_caching(struct mm_struct *mm)
  * Presumably we'll come back later and have more luck and clear
  * the value then; for now we'll just keep the cache marked for priority.
  */
-static unsigned int update_priority_cached(struct mm_struct *mm)
+static unsigned long update_priority_cached(struct mm_struct *mm)
 {
        if (mm->context.priority_cached && down_write_trylock(&mm->mmap_sem)) {
                struct vm_area_struct *vm;
index 55c0661e2b5dbcba77a097522e43f109922bb577..097091059aaaee32883d4a302457fcb4a630854f 100644 (file)
@@ -121,15 +121,8 @@ LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
 
 LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt))
 
-CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
-define cmd_vmlinux__
-       $(CC) $(CFLAGS_vmlinux) -o $@ \
-       -Wl,-T,$(vmlinux-lds) $(vmlinux-init) \
-       -Wl,--start-group $(vmlinux-main) -Wl,--end-group \
-       -lutil \
-       $(filter-out $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o \
-       FORCE ,$^) ; rm -f linux
-endef
+# Used by link-vmlinux.sh which has special support for um link
+export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
 
 # When cleaning we don't include .config, so we don't include
 # TT or skas makefiles and don't clean skas_ptregs.h.
diff --git a/arch/um/include/asm/kvm_para.h b/arch/um/include/asm/kvm_para.h
new file mode 100644 (file)
index 0000000..14fab8f
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
index 76078490c2581c92a2c40cc039419e918f8b6f9d..e584e40ee8320f62e82e67abe6230a40ac75b19d 100644 (file)
@@ -6,9 +6,6 @@
 #ifndef __FRAME_KERN_H_
 #define __FRAME_KERN_H_
 
-#define _S(nr) (1<<((nr)-1))
-#define _BLOCKABLE (~(_S(SIGKILL) | _S(SIGSTOP)))
-
 extern int setup_signal_stack_sc(unsigned long stack_top, int sig, 
                                 struct k_sigaction *ka,
                                 struct pt_regs *regs, 
index 3a2235e0abc3e18dec7862208364bb8de2d6916c..ccb9a9d283f165760b20fec6bb3b821f814b0a37 100644 (file)
@@ -117,11 +117,8 @@ void interrupt_end(void)
                schedule();
        if (test_thread_flag(TIF_SIGPENDING))
                do_signal();
-       if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) {
+       if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
                tracehook_notify_resume(&current->thread.regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
-       }
 }
 
 void exit_thread(void)
index 4d93dff6b3713e321a66f49ef160d3da4380e7a2..3d15243ce69234c3ddbac961ef58b0282247a4e4 100644 (file)
@@ -4,7 +4,9 @@
  */
 
 #include "linux/sched.h"
+#include "linux/spinlock.h"
 #include "linux/slab.h"
+#include "linux/oom.h"
 #include "kern_util.h"
 #include "os.h"
 #include "skas.h"
@@ -22,13 +24,18 @@ static void kill_off_processes(void)
                struct task_struct *p;
                int pid;
 
+               read_lock(&tasklist_lock);
                for_each_process(p) {
-                       if (p->mm == NULL)
-                               continue;
+                       struct task_struct *t;
 
-                       pid = p->mm->context.id.u.pid;
+                       t = find_lock_task_mm(p);
+                       if (!t)
+                               continue;
+                       pid = t->mm->context.id.u.pid;
+                       task_unlock(t);
                        os_kill_ptraced_process(pid, 1);
                }
+               read_unlock(&tasklist_lock);
        }
 }
 
index 292e706016c521842b3654ea0e1de2619211dc60..7362d58efc29612c1ffaad64558b3a986b820515 100644 (file)
 EXPORT_SYMBOL(block_signals);
 EXPORT_SYMBOL(unblock_signals);
 
-#define _S(nr) (1<<((nr)-1))
-
-#define _BLOCKABLE (~(_S(SIGKILL) | _S(SIGSTOP)))
-
 /*
  * OK, we're invoking a handler
  */
-static int handle_signal(struct pt_regs *regs, unsigned long signr,
-                        struct k_sigaction *ka, siginfo_t *info,
-                        sigset_t *oldset)
+static void handle_signal(struct pt_regs *regs, unsigned long signr,
+                        struct k_sigaction *ka, siginfo_t *info)
 {
+       sigset_t *oldset = sigmask_to_save();
        unsigned long sp;
        int err;
 
@@ -65,9 +61,7 @@ static int handle_signal(struct pt_regs *regs, unsigned long signr,
        if (err)
                force_sigsegv(signr, current);
        else
-               block_sigmask(ka, signr);
-
-       return err;
+               signal_delivered(signr, info, ka, regs, 0);
 }
 
 static int kern_do_signal(struct pt_regs *regs)
@@ -77,24 +71,9 @@ static int kern_do_signal(struct pt_regs *regs)
        int sig, handled_sig = 0;
 
        while ((sig = get_signal_to_deliver(&info, &ka_copy, regs, NULL)) > 0) {
-               sigset_t *oldset;
-               if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                       oldset = &current->saved_sigmask;
-               else
-                       oldset = &current->blocked;
                handled_sig = 1;
                /* Whee!  Actually deliver the signal.  */
-               if (!handle_signal(regs, sig, &ka_copy, &info, oldset)) {
-                       /*
-                        * a signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag
-                        */
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
-                       break;
-               }
+               handle_signal(regs, sig, &ka_copy, &info);
        }
 
        /* Did we come from a system call? */
@@ -130,10 +109,8 @@ static int kern_do_signal(struct pt_regs *regs)
         * if there's no signal to deliver, we just put the saved sigmask
         * back
         */
-       if (!handled_sig && test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-       }
+       if (!handled_sig)
+               restore_saved_sigmask();
        return handled_sig;
 }
 
index dafc9471595021748eda5e750a80d3483a662379..3be60765c0e25d634282ea66e0902a9dc8d41bd4 100644 (file)
@@ -30,6 +30,8 @@ int handle_page_fault(unsigned long address, unsigned long ip,
        pmd_t *pmd;
        pte_t *pte;
        int err = -EFAULT;
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+                                (is_write ? FAULT_FLAG_WRITE : 0);
 
        *code_out = SEGV_MAPERR;
 
@@ -40,6 +42,7 @@ int handle_page_fault(unsigned long address, unsigned long ip,
        if (in_atomic())
                goto out_nosemaphore;
 
+retry:
        down_read(&mm->mmap_sem);
        vma = find_vma(mm, address);
        if (!vma)
@@ -65,7 +68,11 @@ good_area:
        do {
                int fault;
 
-               fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
+               fault = handle_mm_fault(mm, vma, address, flags);
+
+               if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+                       goto out_nosemaphore;
+
                if (unlikely(fault & VM_FAULT_ERROR)) {
                        if (fault & VM_FAULT_OOM) {
                                goto out_of_memory;
@@ -75,10 +82,17 @@ good_area:
                        }
                        BUG();
                }
-               if (fault & VM_FAULT_MAJOR)
-                       current->maj_flt++;
-               else
-                       current->min_flt++;
+               if (flags & FAULT_FLAG_ALLOW_RETRY) {
+                       if (fault & VM_FAULT_MAJOR)
+                               current->maj_flt++;
+                       else
+                               current->min_flt++;
+                       if (fault & VM_FAULT_RETRY) {
+                               flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+                               goto retry;
+                       }
+               }
 
                pgd = pgd_offset(mm, address);
                pud = pud_offset(pgd, address);
diff --git a/arch/unicore32/include/asm/kvm_para.h b/arch/unicore32/include/asm/kvm_para.h
new file mode 100644 (file)
index 0000000..14fab8f
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
index 7754df6ef7d45444dea14b6ebd93a035e54309d6..8adedb37720a0e997dea9ea88abff76113904830 100644 (file)
@@ -21,8 +21,6 @@
 #include <asm/cacheflush.h>
 #include <asm/ucontext.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 /*
  * For UniCore syscalls, we encode the syscall number into the instruction.
  */
@@ -61,10 +59,8 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
        int err;
 
        err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
-       if (err == 0) {
-               sigdelsetmask(&set, ~_BLOCKABLE);
+       if (err == 0)
                set_current_blocked(&set);
-       }
 
        err |= __get_user(regs->UCreg_00, &sf->uc.uc_mcontext.regs.UCreg_00);
        err |= __get_user(regs->UCreg_01, &sf->uc.uc_mcontext.regs.UCreg_01);
@@ -312,13 +308,12 @@ static inline void setup_syscall_restart(struct pt_regs *regs)
 /*
  * OK, we're invoking a handler
  */
-static int handle_signal(unsigned long sig, struct k_sigaction *ka,
-             siginfo_t *info, sigset_t *oldset,
-             struct pt_regs *regs, int syscall)
+static void handle_signal(unsigned long sig, struct k_sigaction *ka,
+             siginfo_t *info, struct pt_regs *regs, int syscall)
 {
        struct thread_info *thread = current_thread_info();
        struct task_struct *tsk = current;
-       sigset_t blocked;
+       sigset_t *oldset = sigmask_to_save();
        int usig = sig;
        int ret;
 
@@ -364,15 +359,10 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
 
        if (ret != 0) {
                force_sigsegv(sig, tsk);
-               return ret;
+               return;
        }
 
-       /*
-        * Block the signal if we were successful.
-        */
-       block_sigmask(ka, sig);
-
-       return 0;
+       signal_delivered(sig, info, ka, regs, 0);
 }
 
 /*
@@ -399,32 +389,12 @@ static void do_signal(struct pt_regs *regs, int syscall)
        if (!user_mode(regs))
                return;
 
-       if (try_to_freeze())
-               goto no_signal;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
-               sigset_t *oldset;
-
-               if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                       oldset = &current->saved_sigmask;
-               else
-                       oldset = &current->blocked;
-               if (handle_signal(signr, &ka, &info, oldset, regs, syscall)
-                               == 0) {
-                       /*
-                        * A signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag.
-                        */
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               }
+               handle_signal(signr, &ka, &info, regs, syscall);
                return;
        }
 
- no_signal:
        /*
         * No signal to deliver to the process - restart the syscall.
         */
@@ -451,8 +421,7 @@ static void do_signal(struct pt_regs *regs, int syscall)
        /* If there's no signal to deliver, we just put the saved
         * sigmask back.
         */
-       if (test_and_clear_thread_flag(TIF_RESTORE_SIGMASK))
-               set_current_blocked(&current->saved_sigmask);
+       restore_saved_sigmask();
 }
 
 asmlinkage void do_notify_resume(struct pt_regs *regs,
@@ -464,8 +433,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
        if (thread_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
 }
 
index 0e9dec6cadd10a32272fb67823b5ff27e587cc88..e5287d8517aa18172c266d3cb98fa8af2b950bb1 100644 (file)
@@ -1,4 +1,3 @@
-
 obj-$(CONFIG_KVM) += kvm/
 
 # Xen paravirtualization support
@@ -7,6 +6,7 @@ obj-$(CONFIG_XEN) += xen/
 # lguest paravirtualization support
 obj-$(CONFIG_LGUEST_GUEST) += lguest/
 
+obj-y += realmode/
 obj-y += kernel/
 obj-y += mm/
 
index 66cc380bebf007403ccdc72afdab31b2c98f746e..c70684f859e13473908a1370a9a3bb160db5c4e3 100644 (file)
@@ -32,6 +32,7 @@ config X86
        select ARCH_WANT_OPTIONAL_GPIOLIB
        select ARCH_WANT_FRAME_POINTERS
        select HAVE_DMA_ATTRS
+       select HAVE_DMA_CONTIGUOUS if !SWIOTLB
        select HAVE_KRETPROBES
        select HAVE_OPTPROBES
        select HAVE_FTRACE_MCOUNT_RECORD
@@ -92,6 +93,8 @@ config X86
        select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC)
        select GENERIC_TIME_VSYSCALL if X86_64
        select KTIME_SCALAR if X86_32
+       select GENERIC_STRNCPY_FROM_USER
+       select GENERIC_STRNLEN_USER
 
 config INSTRUCTION_DECODER
        def_bool (KPROBES || PERF_EVENTS || UPROBES)
@@ -1503,6 +1506,8 @@ config EFI_STUB
           This kernel feature allows a bzImage to be loaded directly
          by EFI firmware without the use of a bootloader.
 
+         See Documentation/x86/efi-stub.txt for more information.
+
 config SECCOMP
        def_bool y
        prompt "Enable seccomp to safely compute untrusted bytecode"
index 2c14e76bb4c71255ee9c1d9b29b171dd73a33d60..4e85f5f85837c17c041217c8240f90820e46921e 100644 (file)
 
 static efi_system_table_t *sys_table;
 
+static void efi_printk(char *str)
+{
+       char *s8;
+
+       for (s8 = str; *s8; s8++) {
+               struct efi_simple_text_output_protocol *out;
+               efi_char16_t ch[2] = { 0 };
+
+               ch[0] = *s8;
+               out = (struct efi_simple_text_output_protocol *)sys_table->con_out;
+
+               if (*s8 == '\n') {
+                       efi_char16_t nl[2] = { '\r', 0 };
+                       efi_call_phys2(out->output_string, out, nl);
+               }
+
+               efi_call_phys2(out->output_string, out, ch);
+       }
+}
+
 static efi_status_t __get_map(efi_memory_desc_t **map, unsigned long *map_size,
                              unsigned long *desc_size)
 {
@@ -531,8 +551,10 @@ static efi_status_t handle_ramdisks(efi_loaded_image_t *image,
                                EFI_LOADER_DATA,
                                nr_initrds * sizeof(*initrds),
                                &initrds);
-       if (status != EFI_SUCCESS)
+       if (status != EFI_SUCCESS) {
+               efi_printk("Failed to alloc mem for initrds\n");
                goto fail;
+       }
 
        str = (char *)(unsigned long)hdr->cmd_line_ptr;
        for (i = 0; i < nr_initrds; i++) {
@@ -575,32 +597,42 @@ static efi_status_t handle_ramdisks(efi_loaded_image_t *image,
 
                        status = efi_call_phys3(boottime->handle_protocol,
                                        image->device_handle, &fs_proto, &io);
-                       if (status != EFI_SUCCESS)
+                       if (status != EFI_SUCCESS) {
+                               efi_printk("Failed to handle fs_proto\n");
                                goto free_initrds;
+                       }
 
                        status = efi_call_phys2(io->open_volume, io, &fh);
-                       if (status != EFI_SUCCESS)
+                       if (status != EFI_SUCCESS) {
+                               efi_printk("Failed to open volume\n");
                                goto free_initrds;
+                       }
                }
 
                status = efi_call_phys5(fh->open, fh, &h, filename_16,
                                        EFI_FILE_MODE_READ, (u64)0);
-               if (status != EFI_SUCCESS)
+               if (status != EFI_SUCCESS) {
+                       efi_printk("Failed to open initrd file\n");
                        goto close_handles;
+               }
 
                initrd->handle = h;
 
                info_sz = 0;
                status = efi_call_phys4(h->get_info, h, &info_guid,
                                        &info_sz, NULL);
-               if (status != EFI_BUFFER_TOO_SMALL)
+               if (status != EFI_BUFFER_TOO_SMALL) {
+                       efi_printk("Failed to get initrd info size\n");
                        goto close_handles;
+               }
 
 grow:
                status = efi_call_phys3(sys_table->boottime->allocate_pool,
                                        EFI_LOADER_DATA, info_sz, &info);
-               if (status != EFI_SUCCESS)
+               if (status != EFI_SUCCESS) {
+                       efi_printk("Failed to alloc mem for initrd info\n");
                        goto close_handles;
+               }
 
                status = efi_call_phys4(h->get_info, h, &info_guid,
                                        &info_sz, info);
@@ -612,8 +644,10 @@ grow:
                file_sz = info->file_size;
                efi_call_phys1(sys_table->boottime->free_pool, info);
 
-               if (status != EFI_SUCCESS)
+               if (status != EFI_SUCCESS) {
+                       efi_printk("Failed to get initrd info\n");
                        goto close_handles;
+               }
 
                initrd->size = file_sz;
                initrd_total += file_sz;
@@ -629,11 +663,14 @@ grow:
                 */
                status = high_alloc(initrd_total, 0x1000,
                                   &initrd_addr, hdr->initrd_addr_max);
-               if (status != EFI_SUCCESS)
+               if (status != EFI_SUCCESS) {
+                       efi_printk("Failed to alloc highmem for initrds\n");
                        goto close_handles;
+               }
 
                /* We've run out of free low memory. */
                if (initrd_addr > hdr->initrd_addr_max) {
+                       efi_printk("We've run out of free low memory\n");
                        status = EFI_INVALID_PARAMETER;
                        goto free_initrd_total;
                }
@@ -652,8 +689,10 @@ grow:
                                status = efi_call_phys3(fh->read,
                                                        initrds[j].handle,
                                                        &chunksize, addr);
-                               if (status != EFI_SUCCESS)
+                               if (status != EFI_SUCCESS) {
+                                       efi_printk("Failed to read initrd\n");
                                        goto free_initrd_total;
+                               }
                                addr += chunksize;
                                size -= chunksize;
                        }
@@ -674,7 +713,7 @@ free_initrd_total:
        low_free(initrd_total, initrd_addr);
 
 close_handles:
-       for (k = j; k < nr_initrds; k++)
+       for (k = j; k < i; k++)
                efi_call_phys1(fh->close, initrds[k].handle);
 free_initrds:
        efi_call_phys1(sys_table->boottime->free_pool, initrds);
@@ -732,8 +771,10 @@ static efi_status_t make_boot_params(struct boot_params *boot_params,
                        options_size++; /* NUL termination */
 
                        status = low_alloc(options_size, 1, &cmdline);
-                       if (status != EFI_SUCCESS)
+                       if (status != EFI_SUCCESS) {
+                               efi_printk("Failed to alloc mem for cmdline\n");
                                goto fail;
+                       }
 
                        s1 = (u8 *)(unsigned long)cmdline;
                        s2 = (u16 *)options;
@@ -895,12 +936,16 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table)
 
        status = efi_call_phys3(sys_table->boottime->handle_protocol,
                                handle, &proto, (void *)&image);
-       if (status != EFI_SUCCESS)
+       if (status != EFI_SUCCESS) {
+               efi_printk("Failed to get handle for LOADED_IMAGE_PROTOCOL\n");
                goto fail;
+       }
 
        status = low_alloc(0x4000, 1, (unsigned long *)&boot_params);
-       if (status != EFI_SUCCESS)
+       if (status != EFI_SUCCESS) {
+               efi_printk("Failed to alloc lowmem for boot params\n");
                goto fail;
+       }
 
        memset(boot_params, 0x0, 0x4000);
 
@@ -933,8 +978,10 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table)
        if (status != EFI_SUCCESS) {
                status = low_alloc(hdr->init_size, hdr->kernel_alignment,
                                   &start);
-               if (status != EFI_SUCCESS)
+               if (status != EFI_SUCCESS) {
+                       efi_printk("Failed to alloc mem for kernel\n");
                        goto fail;
+               }
        }
 
        hdr->code32_start = (__u32)start;
@@ -945,19 +992,25 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table)
        status = efi_call_phys3(sys_table->boottime->allocate_pool,
                                EFI_LOADER_DATA, sizeof(*gdt),
                                (void **)&gdt);
-       if (status != EFI_SUCCESS)
+       if (status != EFI_SUCCESS) {
+               efi_printk("Failed to alloc mem for gdt structure\n");
                goto fail;
+       }
 
        gdt->size = 0x800;
        status = low_alloc(gdt->size, 8, (unsigned long *)&gdt->address);
-       if (status != EFI_SUCCESS)
+       if (status != EFI_SUCCESS) {
+               efi_printk("Failed to alloc mem for gdt\n");
                goto fail;
+       }
 
        status = efi_call_phys3(sys_table->boottime->allocate_pool,
                                EFI_LOADER_DATA, sizeof(*idt),
                                (void **)&idt);
-       if (status != EFI_SUCCESS)
+       if (status != EFI_SUCCESS) {
+               efi_printk("Failed to alloc mem for idt structure\n");
                goto fail;
+       }
 
        idt->size = 0;
        idt->address = 0;
index 39251663e65b3fb64404d3cb67299b33313818f1..3b6e15627c55f0b1c8b5b4b5c9c1a37ffebf8060 100644 (file)
@@ -58,4 +58,10 @@ struct efi_uga_draw_protocol {
        void *blt;
 };
 
+struct efi_simple_text_output_protocol {
+       void *reset;
+       void *output_string;
+       void *test_string;
+};
+
 #endif /* BOOT_COMPRESSED_EBOOT_H */
index 98bd70faccc50cb0bb07ad96066a74a990870239..daeca56211e39b13babd4504cb19e9797f6e7810 100644 (file)
@@ -273,7 +273,6 @@ asmlinkage long sys32_sigreturn(struct pt_regs *regs)
                                    sizeof(frame->extramask))))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (ia32_restore_sigcontext(regs, &frame->sc, &ax))
@@ -299,7 +298,6 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (ia32_restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
index 610001d385dd466c48394873c972e3430b7789ba..0c44630d17893e74a2672cf9e896c0a41b762c42 100644 (file)
@@ -29,7 +29,7 @@
 #include <asm/processor.h>
 #include <asm/mmu.h>
 #include <asm/mpspec.h>
-#include <asm/trampoline.h>
+#include <asm/realmode.h>
 
 #define COMPILER_DEPENDENT_INT64   long long
 #define COMPILER_DEPENDENT_UINT64  unsigned long long
@@ -117,11 +117,8 @@ static inline void acpi_disable_pci(void)
 /* Low-level suspend routine. */
 extern int acpi_suspend_lowlevel(void);
 
-extern const unsigned char acpi_wakeup_code[];
-#define acpi_wakeup_address (__pa(TRAMPOLINE_SYM(acpi_wakeup_code)))
-
-/* early initialization routine */
-extern void acpi_reserve_wakeup_memory(void);
+/* Physical address to resume after wakeup */
+#define acpi_wakeup_address ((unsigned long)(real_mode_header->wakeup_start))
 
 /*
  * Check if the CPU can handle C2 and deeper
index b97596e2b68c7ea3f62eebb38cd1f155719c150e..a6983b2772201c6109060ea25d983499f0281531 100644 (file)
@@ -15,6 +15,8 @@
 #include <linux/compiler.h>
 #include <asm/alternative.h>
 
+#define BIT_64(n)                      (U64_C(1) << (n))
+
 /*
  * These have to be done with inline assembly: that way the bit-setting
  * is guaranteed to be atomic. All bit operations return 0 if the bit
diff --git a/arch/x86/include/asm/dma-contiguous.h b/arch/x86/include/asm/dma-contiguous.h
new file mode 100644 (file)
index 0000000..c092416
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef ASMX86_DMA_CONTIGUOUS_H
+#define ASMX86_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <asm-generic/dma-contiguous.h>
+
+static inline void
+dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
+
+#endif
+#endif
index 61c0bd25845af0b1ceea8892df20dd717dd2d148..f7b4c7903e7e51eb317ecfcd6fa9be05a3336f65 100644 (file)
@@ -13,6 +13,7 @@
 #include <asm/io.h>
 #include <asm/swiotlb.h>
 #include <asm-generic/dma-coherent.h>
+#include <linux/dma-contiguous.h>
 
 #ifdef CONFIG_ISA
 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
@@ -62,6 +63,10 @@ extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
                                        dma_addr_t *dma_addr, gfp_t flag,
                                        struct dma_attrs *attrs);
 
+extern void dma_generic_free_coherent(struct device *dev, size_t size,
+                                     void *vaddr, dma_addr_t dma_addr,
+                                     struct dma_attrs *attrs);
+
 #ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
 extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
 extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
index 18d9005d9e4f014a8b0cb5739dce08dbc0f82d6d..b0767bc08740594380b6bbc8d734984b54522be4 100644 (file)
@@ -34,7 +34,7 @@
 
 #ifndef __ASSEMBLY__
 extern void mcount(void);
-extern int modifying_ftrace_code;
+extern atomic_t modifying_ftrace_code;
 
 static inline unsigned long ftrace_call_adjust(unsigned long addr)
 {
index c222e1a1b12addc1260929dbaaf86c5102f95eda..1ac46c22dd5003d90fd253497be9378d8fd3da8a 100644 (file)
@@ -200,7 +200,7 @@ typedef u32 __attribute__((vector_size(16))) sse128_t;
 
 /* Type, address-of, and value of an instruction's operand. */
 struct operand {
-       enum { OP_REG, OP_MEM, OP_IMM, OP_XMM, OP_NONE } type;
+       enum { OP_REG, OP_MEM, OP_IMM, OP_XMM, OP_MM, OP_NONE } type;
        unsigned int bytes;
        union {
                unsigned long orig_val;
@@ -213,12 +213,14 @@ struct operand {
                        unsigned seg;
                } mem;
                unsigned xmm;
+               unsigned mm;
        } addr;
        union {
                unsigned long val;
                u64 val64;
                char valptr[sizeof(unsigned long) + 2];
                sse128_t vec_val;
+               u64 mm_val;
        };
 };
 
index e5b97be12d2a6798aadfb4a67c4839ed01aafe31..db7c1f2709a270a03429ee1c9089209942647ff5 100644 (file)
@@ -173,6 +173,9 @@ enum {
 #define DR7_FIXED_1    0x00000400
 #define DR7_VOLATILE   0xffff23ff
 
+/* apic attention bits */
+#define KVM_APIC_CHECK_VAPIC   0
+
 /*
  * We don't want allocation failures within the mmu code, so we preallocate
  * enough memory for a single page fault in a cache.
@@ -238,8 +241,6 @@ struct kvm_mmu_page {
 #endif
 
        int write_flooding_count;
-
-       struct rcu_head rcu;
 };
 
 struct kvm_pio_request {
@@ -338,6 +339,7 @@ struct kvm_vcpu_arch {
        u64 efer;
        u64 apic_base;
        struct kvm_lapic *apic;    /* kernel irqchip context */
+       unsigned long apic_attention;
        int32_t apic_arb_prio;
        int mp_state;
        int sipi_vector;
@@ -537,8 +539,6 @@ struct kvm_arch {
        u64 hv_guest_os_id;
        u64 hv_hypercall;
 
-       atomic_t reader_counter;
-
        #ifdef CONFIG_KVM_MMU_AUDIT
        int audit_point;
        #endif
@@ -713,8 +713,9 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
 
 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
-int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
-                              struct kvm_memory_slot *slot);
+void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
+                                    struct kvm_memory_slot *slot,
+                                    gfn_t gfn_offset, unsigned long mask);
 void kvm_mmu_zap_all(struct kvm *kvm);
 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
index 183922e13de1aa83ead2959144378e96934d4608..63ab1661d00eb0401eba1a379eb49409933332fa 100644 (file)
@@ -95,6 +95,14 @@ struct kvm_vcpu_pv_apf_data {
 extern void kvmclock_init(void);
 extern int kvm_register_clock(char *txt);
 
+#ifdef CONFIG_KVM_CLOCK
+bool kvm_check_and_clear_guest_paused(void);
+#else
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+       return false;
+}
+#endif /* CONFIG_KVMCLOCK */
 
 /* This instruction is vmcall.  On non-VT architectures, it will generate a
  * trap that we will then rewrite to the appropriate instruction.
@@ -173,14 +181,16 @@ static inline int kvm_para_available(void)
        if (boot_cpu_data.cpuid_level < 0)
                return 0;       /* So we don't blow up on old processors */
 
-       cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
-       memcpy(signature + 0, &ebx, 4);
-       memcpy(signature + 4, &ecx, 4);
-       memcpy(signature + 8, &edx, 4);
-       signature[12] = 0;
+       if (cpu_has_hypervisor) {
+               cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
+               memcpy(signature + 0, &ebx, 4);
+               memcpy(signature + 4, &ecx, 4);
+               memcpy(signature + 8, &edx, 4);
+               signature[12] = 0;
 
-       if (strcmp(signature, "KVMKVMKVM") == 0)
-               return 1;
+               if (strcmp(signature, "KVMKVMKVM") == 0)
+                       return 1;
+       }
 
        return 0;
 }
index effff47a3c8280fe4d0b5979c433a8400d570129..43876f16caf1ca8981288089d57b362e017bafbf 100644 (file)
@@ -31,6 +31,56 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
        ptep->pte_low = pte.pte_low;
 }
 
+#define pmd_read_atomic pmd_read_atomic
+/*
+ * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with
+ * a "*pmdp" dereference done by gcc. Problem is, in certain places
+ * where pte_offset_map_lock is called, concurrent page faults are
+ * allowed, if the mmap_sem is hold for reading. An example is mincore
+ * vs page faults vs MADV_DONTNEED. On the page fault side
+ * pmd_populate rightfully does a set_64bit, but if we're reading the
+ * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
+ * because gcc will not read the 64bit of the pmd atomically. To fix
+ * this all places running pmd_offset_map_lock() while holding the
+ * mmap_sem in read mode, shall read the pmdp pointer using this
+ * function to know if the pmd is null nor not, and in turn to know if
+ * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
+ * operations.
+ *
+ * Without THP if the mmap_sem is hold for reading, the
+ * pmd can only transition from null to not null while pmd_read_atomic runs.
+ * So there's no need of literally reading it atomically.
+ *
+ * With THP if the mmap_sem is hold for reading, the pmd can become
+ * THP or null or point to a pte (and in turn become "stable") at any
+ * time under pmd_read_atomic, so it's mandatory to read it atomically
+ * with cmpxchg8b.
+ */
+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
+static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
+{
+       pmdval_t ret;
+       u32 *tmp = (u32 *)pmdp;
+
+       ret = (pmdval_t) (*tmp);
+       if (ret) {
+               /*
+                * If the low part is null, we must not read the high part
+                * or we can end up with a partial pmd.
+                */
+               smp_rmb();
+               ret |= ((pmdval_t)*(tmp + 1)) << 32;
+       }
+
+       return (pmd_t) { ret };
+}
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
+{
+       return (pmd_t) { atomic64_read((atomic64_t *)pmdp) };
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
 {
        set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
index 99f262e04b91b6d553fd65bd61957bd9cb5cbd15..8e525059e7d81c0a4cd46dfa2f62695daba80fee 100644 (file)
@@ -10,9 +10,6 @@
 typedef unsigned short __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
 
-typedef unsigned short __kernel_nlink_t;
-#define __kernel_nlink_t __kernel_nlink_t
-
 typedef unsigned short __kernel_ipc_pid_t;
 #define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
index 7745b257f035a8b1438078c670ce49dc1613051f..39bc5777211a63b9fbdd56a43fd7a0183e51697a 100644 (file)
@@ -544,13 +544,16 @@ static inline void load_sp0(struct tss_struct *tss,
  * enable), so that any CPU's that boot up
  * after us can get the correct flags.
  */
-extern unsigned long           mmu_cr4_features;
+extern unsigned long mmu_cr4_features;
+extern u32 *trampoline_cr4_features;
 
 static inline void set_in_cr4(unsigned long mask)
 {
        unsigned long cr4;
 
        mmu_cr4_features |= mask;
+       if (trampoline_cr4_features)
+               *trampoline_cr4_features = mmu_cr4_features;
        cr4 = read_cr4();
        cr4 |= mask;
        write_cr4(cr4);
@@ -561,6 +564,8 @@ static inline void clear_in_cr4(unsigned long mask)
        unsigned long cr4;
 
        mmu_cr4_features &= ~mask;
+       if (trampoline_cr4_features)
+               *trampoline_cr4_features = mmu_cr4_features;
        cr4 = read_cr4();
        cr4 &= ~mask;
        write_cr4(cr4);
index 35f2d1948adad8bebad28a5dc06b328454dc6ace..6167fd7981886228dc075f58901380be63ed87a7 100644 (file)
@@ -40,5 +40,6 @@ struct pvclock_wall_clock {
 } __attribute__((__packed__));
 
 #define PVCLOCK_TSC_STABLE_BIT (1 << 0)
+#define PVCLOCK_GUEST_STOPPED  (1 << 1)
 #endif /* __ASSEMBLY__ */
 #endif /* _ASM_X86_PVCLOCK_ABI_H */
diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
new file mode 100644 (file)
index 0000000..fce3f4a
--- /dev/null
@@ -0,0 +1,62 @@
+#ifndef _ARCH_X86_REALMODE_H
+#define _ARCH_X86_REALMODE_H
+
+#include <linux/types.h>
+#include <asm/io.h>
+
+/* This must match data at realmode.S */
+struct real_mode_header {
+       u32     text_start;
+       u32     ro_end;
+       /* SMP trampoline */
+       u32     trampoline_start;
+       u32     trampoline_status;
+       u32     trampoline_header;
+#ifdef CONFIG_X86_64
+       u32     trampoline_pgd;
+#endif
+       /* ACPI S3 wakeup */
+#ifdef CONFIG_ACPI_SLEEP
+       u32     wakeup_start;
+       u32     wakeup_header;
+#endif
+       /* APM/BIOS reboot */
+#ifdef CONFIG_X86_32
+       u32     machine_real_restart_asm;
+#endif
+};
+
+/* This must match data at trampoline_32/64.S */
+struct trampoline_header {
+#ifdef CONFIG_X86_32
+       u32 start;
+       u16 gdt_pad;
+       u16 gdt_limit;
+       u32 gdt_base;
+#else
+       u64 start;
+       u64 efer;
+       u32 cr4;
+#endif
+};
+
+extern struct real_mode_header *real_mode_header;
+extern unsigned char real_mode_blob_end[];
+
+extern unsigned long init_rsp;
+extern unsigned long initial_code;
+extern unsigned long initial_gs;
+
+extern unsigned char real_mode_blob[];
+extern unsigned char real_mode_relocs[];
+
+#ifdef CONFIG_X86_32
+extern unsigned char startup_32_smp[];
+extern unsigned char boot_gdt[];
+#else
+extern unsigned char secondary_startup_64[];
+#endif
+
+extern void __init setup_real_mode(void);
+
+#endif /* _ARCH_X86_REALMODE_H */
index ada93b3b8c66fb79f5e3a9fe91d9f15d67234edf..beff97f7df3790d04dfba1906fe657b1d024e579 100644 (file)
@@ -7,8 +7,6 @@
 
 #include <asm/processor-flags.h>
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 #define __FIX_EFLAGS   (X86_EFLAGS_AC | X86_EFLAGS_OF | \
                         X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
                         X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
diff --git a/arch/x86/include/asm/sta2x11.h b/arch/x86/include/asm/sta2x11.h
new file mode 100644 (file)
index 0000000..e9d32df
--- /dev/null
@@ -0,0 +1,12 @@
+/*
+ * Header file for STMicroelectronics ConneXt (STA2X11) IOHub
+ */
+#ifndef __ASM_STA2X11_H
+#define __ASM_STA2X11_H
+
+#include <linux/pci.h>
+
+/* This needs to be called from the MFD to configure its sub-devices */
+struct sta2x11_instance *sta2x11_get_instance(struct pci_dev *pdev);
+
+#endif /* __ASM_STA2X11_H */
index 5c25de07cba82fca1fbd027a3038f5f6cedf66ec..89f794f007ec1e4aa5bbd029bcb32182fffe1f48 100644 (file)
@@ -248,7 +248,23 @@ static inline void set_restore_sigmask(void)
 {
        struct thread_info *ti = current_thread_info();
        ti->status |= TS_RESTORE_SIGMASK;
-       set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
+       WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
+}
+static inline void clear_restore_sigmask(void)
+{
+       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+}
+static inline bool test_restore_sigmask(void)
+{
+       return current_thread_info()->status & TS_RESTORE_SIGMASK;
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+       struct thread_info *ti = current_thread_info();
+       if (!(ti->status & TS_RESTORE_SIGMASK))
+               return false;
+       ti->status &= ~TS_RESTORE_SIGMASK;
+       return true;
 }
 
 static inline bool is_ia32_task(void)
diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h
deleted file mode 100644 (file)
index feca311..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-#ifndef _ASM_X86_TRAMPOLINE_H
-#define _ASM_X86_TRAMPOLINE_H
-
-#ifndef __ASSEMBLY__
-
-#include <linux/types.h>
-#include <asm/io.h>
-
-/*
- * Trampoline 80x86 program as an array.  These are in the init rodata
- * segment, but that's okay, because we only care about the relative
- * addresses of the symbols.
- */
-extern const unsigned char x86_trampoline_start [];
-extern const unsigned char x86_trampoline_end   [];
-extern unsigned char *x86_trampoline_base;
-
-extern unsigned long init_rsp;
-extern unsigned long initial_code;
-extern unsigned long initial_gs;
-
-extern void __init setup_trampolines(void);
-
-extern const unsigned char trampoline_data[];
-extern const unsigned char trampoline_status[];
-
-#define TRAMPOLINE_SYM(x)                                              \
-       ((void *)(x86_trampoline_base +                                 \
-                 ((const unsigned char *)(x) - x86_trampoline_start)))
-
-/* Address of the SMP trampoline */
-static inline unsigned long trampoline_address(void)
-{
-       return virt_to_phys(TRAMPOLINE_SYM(trampoline_data));
-}
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_X86_TRAMPOLINE_H */
index 851fe0dc13bc18c33c79b8d54deb06e899ae63ad..04cd6882308e5d06b685bea5ef76ff1f863d67da 100644 (file)
@@ -32,6 +32,7 @@
 
 #define segment_eq(a, b)       ((a).seg == (b).seg)
 
+#define user_addr_max() (current_thread_info()->addr_limit.seg)
 #define __addr_ok(addr)                                        \
        ((unsigned long __force)(addr) <                \
         (current_thread_info()->addr_limit.seg))
@@ -565,6 +566,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
 extern __must_check long
 strncpy_from_user(char *dst, const char __user *src, long count);
 
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
+
 /*
  * movsl can be slow when source and dest are not both 8-byte aligned
  */
index 8084bc73b18cbf8164f85dc86f7e78c4e1b07c78..576e39bca6ad1ee22465336dfb8c97d9a66391c3 100644 (file)
@@ -213,23 +213,6 @@ static inline unsigned long __must_check copy_from_user(void *to,
        return n;
 }
 
-/**
- * strlen_user: - Get the size of a string in user space.
- * @str: The string to measure.
- *
- * Context: User context only.  This function may sleep.
- *
- * Get the size of a NUL-terminated string in user space.
- *
- * Returns the size of the string INCLUDING the terminating NUL.
- * On exception, returns 0.
- *
- * If there is a limit on the length of a valid string, you may wish to
- * consider using strnlen_user() instead.
- */
-#define strlen_user(str) strnlen_user(str, LONG_MAX)
-
-long strnlen_user(const char __user *str, long n);
 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
 unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
 
index fcd4b6f3ef02ffcabec9ae5ef815ea7b7fdf80a8..8e796fbbf9c66e439418dd84580e855a173b4a12 100644 (file)
@@ -208,9 +208,6 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
        }
 }
 
-__must_check long strnlen_user(const char __user *str, long n);
-__must_check long __strnlen_user(const char __user *str, long n);
-__must_check long strlen_user(const char __user *str);
 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
 
index ae03facfadd6b7d50a6dab41a61bfe7cb5728cab..5b238981542a2fa5b84134d65bf630b5a580fca5 100644 (file)
  * bit count instruction, that might be better than the multiply
  * and shift, for example.
  */
+struct word_at_a_time {
+       const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
 
 #ifdef CONFIG_64BIT
 
@@ -37,10 +42,31 @@ static inline long count_masked_bytes(long mask)
 
 #endif
 
-/* Return the high bit set in the first byte that is a zero */
-static inline unsigned long has_zero(unsigned long a)
+/* Return nonzero if it has a zero */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+       unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+       *bits = mask;
+       return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+       return bits;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+       bits = (bits - 1) & ~bits;
+       return bits >> 7;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+static inline unsigned long find_zero(unsigned long mask)
 {
-       return ((a - REPEAT_BYTE(0x01)) & ~a) & REPEAT_BYTE(0x80);
+       return count_masked_bytes(mask);
 }
 
 /*
index 1df35417c412d2fb7dde775b7cc54dde7468dfa4..cc146d51449e372549cfafded1b99815ffa92aec 100644 (file)
@@ -6,6 +6,7 @@ enum ipi_vector {
        XEN_CALL_FUNCTION_VECTOR,
        XEN_CALL_FUNCTION_SINGLE_VECTOR,
        XEN_SPIN_UNLOCK_VECTOR,
+       XEN_IRQ_WORK_VECTOR,
 
        XEN_NR_IPIS,
 };
index c34f96c2f7a0773dae5fcd4a1e30f20eb477a83c..93971e841dd5e7eaa032f5f4a8d5542040900a6d 100644 (file)
@@ -44,6 +44,7 @@ extern unsigned long  machine_to_phys_nr;
 
 extern unsigned long get_phys_to_machine(unsigned long pfn);
 extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
+extern bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn);
 extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
 extern unsigned long set_phys_range_identity(unsigned long pfn_s,
                                             unsigned long pfn_e);
index 9bba5b79902b92c7d3d8ab45f70b64d55bc5ded3..8215e5652d9747b6a7eb5302abca77446933acf3 100644 (file)
@@ -35,7 +35,6 @@ obj-y                 += tsc.o io_delay.o rtc.o
 obj-y                  += pci-iommu_table.o
 obj-y                  += resource.o
 
-obj-y                          += trampoline.o trampoline_$(BITS).o
 obj-y                          += process.o
 obj-y                          += i387.o xsave.o
 obj-y                          += ptrace.o
@@ -48,7 +47,6 @@ obj-$(CONFIG_STACKTRACE)      += stacktrace.o
 obj-y                          += cpu/
 obj-y                          += acpi/
 obj-y                          += reboot.o
-obj-$(CONFIG_X86_32)           += reboot_32.o
 obj-$(CONFIG_X86_MSR)          += msr.o
 obj-$(CONFIG_X86_CPUID)                += cpuid.o
 obj-$(CONFIG_PCI)              += early-quirks.o
index 6f35260bb3ef30ac4c05e1f7beda99450a753739..163b225814728721ff0d424d8407493a6d92430d 100644 (file)
@@ -1,14 +1,7 @@
-subdir-                                := realmode
-
 obj-$(CONFIG_ACPI)             += boot.o
-obj-$(CONFIG_ACPI_SLEEP)       += sleep.o wakeup_rm.o wakeup_$(BITS).o
+obj-$(CONFIG_ACPI_SLEEP)       += sleep.o wakeup_$(BITS).o
 
 ifneq ($(CONFIG_ACPI_PROCESSOR),)
 obj-y                          += cstate.o
 endif
 
-$(obj)/wakeup_rm.o:    $(obj)/realmode/wakeup.bin
-
-$(obj)/realmode/wakeup.bin: FORCE
-       $(Q)$(MAKE) $(build)=$(obj)/realmode
-
diff --git a/arch/x86/kernel/acpi/realmode/.gitignore b/arch/x86/kernel/acpi/realmode/.gitignore
deleted file mode 100644 (file)
index 58f1f48..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-wakeup.bin
-wakeup.elf
-wakeup.lds
diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
deleted file mode 100644 (file)
index 6a564ac..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-#
-# arch/x86/kernel/acpi/realmode/Makefile
-#
-# This file is subject to the terms and conditions of the GNU General Public
-# License.  See the file "COPYING" in the main directory of this archive
-# for more details.
-#
-
-always         := wakeup.bin
-targets                := wakeup.elf wakeup.lds
-
-wakeup-y       += wakeup.o wakemain.o video-mode.o copy.o bioscall.o regs.o
-
-# The link order of the video-*.o modules can matter.  In particular,
-# video-vga.o *must* be listed first, followed by video-vesa.o.
-# Hardware-specific drivers should follow in the order they should be
-# probed, and video-bios.o should typically be last.
-wakeup-y       += video-vga.o
-wakeup-y       += video-vesa.o
-wakeup-y       += video-bios.o
-
-targets                += $(wakeup-y)
-
-bootsrc                := $(src)/../../../boot
-
-# ---------------------------------------------------------------------------
-
-# How to compile the 16-bit code.  Note we always compile for -march=i386,
-# that way we can complain to the user if the CPU is insufficient.
-# Compile with _SETUP since this is similar to the boot-time setup code.
-KBUILD_CFLAGS  := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
-                  -I$(srctree)/$(bootsrc) \
-                  $(cflags-y) \
-                  -Wall -Wstrict-prototypes \
-                  -march=i386 -mregparm=3 \
-                  -include $(srctree)/$(bootsrc)/code16gcc.h \
-                  -fno-strict-aliasing -fomit-frame-pointer \
-                  $(call cc-option, -ffreestanding) \
-                  $(call cc-option, -fno-toplevel-reorder,\
-                       $(call cc-option, -fno-unit-at-a-time)) \
-                  $(call cc-option, -fno-stack-protector) \
-                  $(call cc-option, -mpreferred-stack-boundary=2)
-KBUILD_CFLAGS  += $(call cc-option, -m32)
-KBUILD_AFLAGS  := $(KBUILD_CFLAGS) -D__ASSEMBLY__
-GCOV_PROFILE := n
-
-WAKEUP_OBJS = $(addprefix $(obj)/,$(wakeup-y))
-
-LDFLAGS_wakeup.elf     := -T
-
-CPPFLAGS_wakeup.lds += -P -C
-
-$(obj)/wakeup.elf: $(obj)/wakeup.lds $(WAKEUP_OBJS) FORCE
-       $(call if_changed,ld)
-
-OBJCOPYFLAGS_wakeup.bin        := -O binary
-
-$(obj)/wakeup.bin: $(obj)/wakeup.elf FORCE
-       $(call if_changed,objcopy)
diff --git a/arch/x86/kernel/acpi/realmode/bioscall.S b/arch/x86/kernel/acpi/realmode/bioscall.S
deleted file mode 100644 (file)
index f51eb0b..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../boot/bioscall.S"
diff --git a/arch/x86/kernel/acpi/realmode/copy.S b/arch/x86/kernel/acpi/realmode/copy.S
deleted file mode 100644 (file)
index dc59ebe..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../boot/copy.S"
diff --git a/arch/x86/kernel/acpi/realmode/regs.c b/arch/x86/kernel/acpi/realmode/regs.c
deleted file mode 100644 (file)
index 6206033..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../boot/regs.c"
diff --git a/arch/x86/kernel/acpi/realmode/video-bios.c b/arch/x86/kernel/acpi/realmode/video-bios.c
deleted file mode 100644 (file)
index 7deabc1..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../boot/video-bios.c"
diff --git a/arch/x86/kernel/acpi/realmode/video-mode.c b/arch/x86/kernel/acpi/realmode/video-mode.c
deleted file mode 100644 (file)
index 328ad20..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../boot/video-mode.c"
diff --git a/arch/x86/kernel/acpi/realmode/video-vesa.c b/arch/x86/kernel/acpi/realmode/video-vesa.c
deleted file mode 100644 (file)
index 9dbb967..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../boot/video-vesa.c"
diff --git a/arch/x86/kernel/acpi/realmode/video-vga.c b/arch/x86/kernel/acpi/realmode/video-vga.c
deleted file mode 100644 (file)
index bcc8125..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../boot/video-vga.c"
diff --git a/arch/x86/kernel/acpi/realmode/wakemain.c b/arch/x86/kernel/acpi/realmode/wakemain.c
deleted file mode 100644 (file)
index 883962d..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-#include "wakeup.h"
-#include "boot.h"
-
-static void udelay(int loops)
-{
-       while (loops--)
-               io_delay();     /* Approximately 1 us */
-}
-
-static void beep(unsigned int hz)
-{
-       u8 enable;
-
-       if (!hz) {
-               enable = 0x00;          /* Turn off speaker */
-       } else {
-               u16 div = 1193181/hz;
-
-               outb(0xb6, 0x43);       /* Ctr 2, squarewave, load, binary */
-               io_delay();
-               outb(div, 0x42);        /* LSB of counter */
-               io_delay();
-               outb(div >> 8, 0x42);   /* MSB of counter */
-               io_delay();
-
-               enable = 0x03;          /* Turn on speaker */
-       }
-       inb(0x61);              /* Dummy read of System Control Port B */
-       io_delay();
-       outb(enable, 0x61);     /* Enable timer 2 output to speaker */
-       io_delay();
-}
-
-#define DOT_HZ         880
-#define DASH_HZ                587
-#define US_PER_DOT     125000
-
-/* Okay, this is totally silly, but it's kind of fun. */
-static void send_morse(const char *pattern)
-{
-       char s;
-
-       while ((s = *pattern++)) {
-               switch (s) {
-               case '.':
-                       beep(DOT_HZ);
-                       udelay(US_PER_DOT);
-                       beep(0);
-                       udelay(US_PER_DOT);
-                       break;
-               case '-':
-                       beep(DASH_HZ);
-                       udelay(US_PER_DOT * 3);
-                       beep(0);
-                       udelay(US_PER_DOT);
-                       break;
-               default:        /* Assume it's a space */
-                       udelay(US_PER_DOT * 3);
-                       break;
-               }
-       }
-}
-
-void main(void)
-{
-       /* Kill machine if structures are wrong */
-       if (wakeup_header.real_magic != 0x12345678)
-               while (1);
-
-       if (wakeup_header.realmode_flags & 4)
-               send_morse("...-");
-
-       if (wakeup_header.realmode_flags & 1)
-               asm volatile("lcallw   $0xc000,$3");
-
-       if (wakeup_header.realmode_flags & 2) {
-               /* Need to call BIOS */
-               probe_cards(0);
-               set_mode(wakeup_header.video_mode);
-       }
-}
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
deleted file mode 100644 (file)
index b4fd836..0000000
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * ACPI wakeup real mode startup stub
- */
-#include <asm/segment.h>
-#include <asm/msr-index.h>
-#include <asm/page_types.h>
-#include <asm/pgtable_types.h>
-#include <asm/processor-flags.h>
-#include "wakeup.h"
-
-       .code16
-       .section ".jump", "ax"
-       .globl  _start
-_start:
-       cli
-       jmp     wakeup_code
-
-/* This should match the structure in wakeup.h */
-               .section ".header", "a"
-               .globl  wakeup_header
-wakeup_header:
-video_mode:    .short  0       /* Video mode number */
-pmode_return:  .byte   0x66, 0xea      /* ljmpl */
-               .long   0       /* offset goes here */
-               .short  __KERNEL_CS
-pmode_cr0:     .long   0       /* Saved %cr0 */
-pmode_cr3:     .long   0       /* Saved %cr3 */
-pmode_cr4:     .long   0       /* Saved %cr4 */
-pmode_efer:    .quad   0       /* Saved EFER */
-pmode_gdt:     .quad   0
-pmode_misc_en: .quad   0       /* Saved MISC_ENABLE MSR */
-pmode_behavior:        .long   0       /* Wakeup behavior flags */
-realmode_flags:        .long   0
-real_magic:    .long   0
-trampoline_segment:    .word 0
-_pad1:         .byte   0
-wakeup_jmp:    .byte   0xea    /* ljmpw */
-wakeup_jmp_off:        .word   3f
-wakeup_jmp_seg:        .word   0
-wakeup_gdt:    .quad   0, 0, 0
-signature:     .long   WAKEUP_HEADER_SIGNATURE
-
-       .text
-       .code16
-wakeup_code:
-       cld
-
-       /* Apparently some dimwit BIOS programmers don't know how to
-          program a PM to RM transition, and we might end up here with
-          junk in the data segment descriptor registers.  The only way
-          to repair that is to go into PM and fix it ourselves... */
-       movw    $16, %cx
-       lgdtl   %cs:wakeup_gdt
-       movl    %cr0, %eax
-       orb     $X86_CR0_PE, %al
-       movl    %eax, %cr0
-       jmp     1f
-1:     ljmpw   $8, $2f
-2:
-       movw    %cx, %ds
-       movw    %cx, %es
-       movw    %cx, %ss
-       movw    %cx, %fs
-       movw    %cx, %gs
-
-       andb    $~X86_CR0_PE, %al
-       movl    %eax, %cr0
-       jmp     wakeup_jmp
-3:
-       /* Set up segments */
-       movw    %cs, %ax
-       movw    %ax, %ds
-       movw    %ax, %es
-       movw    %ax, %ss
-       lidtl   wakeup_idt
-
-       movl    $wakeup_stack_end, %esp
-
-       /* Clear the EFLAGS */
-       pushl   $0
-       popfl
-
-       /* Check header signature... */
-       movl    signature, %eax
-       cmpl    $WAKEUP_HEADER_SIGNATURE, %eax
-       jne     bogus_real_magic
-
-       /* Check we really have everything... */
-       movl    end_signature, %eax
-       cmpl    $WAKEUP_END_SIGNATURE, %eax
-       jne     bogus_real_magic
-
-       /* Call the C code */
-       calll   main
-
-       /* Restore MISC_ENABLE before entering protected mode, in case
-          BIOS decided to clear XD_DISABLE during S3. */
-       movl    pmode_behavior, %eax
-       btl     $WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE, %eax
-       jnc     1f
-
-       movl    pmode_misc_en, %eax
-       movl    pmode_misc_en + 4, %edx
-       movl    $MSR_IA32_MISC_ENABLE, %ecx
-       wrmsr
-1:
-
-       /* Do any other stuff... */
-
-#ifndef CONFIG_64BIT
-       /* This could also be done in C code... */
-       movl    pmode_cr3, %eax
-       movl    %eax, %cr3
-
-       movl    pmode_cr4, %ecx
-       jecxz   1f
-       movl    %ecx, %cr4
-1:
-       movl    pmode_efer, %eax
-       movl    pmode_efer + 4, %edx
-       movl    %eax, %ecx
-       orl     %edx, %ecx
-       jz      1f
-       movl    $MSR_EFER, %ecx
-       wrmsr
-1:
-
-       lgdtl   pmode_gdt
-
-       /* This really couldn't... */
-       movl    pmode_cr0, %eax
-       movl    %eax, %cr0
-       jmp     pmode_return
-#else
-       pushw   $0
-       pushw   trampoline_segment
-       pushw   $0
-       lret
-#endif
-
-bogus_real_magic:
-1:
-       hlt
-       jmp     1b
-
-       .data
-       .balign 8
-
-       /* This is the standard real-mode IDT */
-wakeup_idt:
-       .word   0xffff          /* limit */
-       .long   0               /* address */
-       .word   0
-
-       .globl  HEAP, heap_end
-HEAP:
-       .long   wakeup_heap
-heap_end:
-       .long   wakeup_stack
-
-       .bss
-wakeup_heap:
-       .space  2048
-wakeup_stack:
-       .space  2048
-wakeup_stack_end:
-
-       .section ".signature","a"
-end_signature:
-       .long   WAKEUP_END_SIGNATURE
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.h b/arch/x86/kernel/acpi/realmode/wakeup.h
deleted file mode 100644 (file)
index 97a29e1..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Definitions for the wakeup data structure at the head of the
- * wakeup code.
- */
-
-#ifndef ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H
-#define ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H
-
-#ifndef __ASSEMBLY__
-#include <linux/types.h>
-
-/* This must match data at wakeup.S */
-struct wakeup_header {
-       u16 video_mode;         /* Video mode number */
-       u16 _jmp1;              /* ljmpl opcode, 32-bit only */
-       u32 pmode_entry;        /* Protected mode resume point, 32-bit only */
-       u16 _jmp2;              /* CS value, 32-bit only */
-       u32 pmode_cr0;          /* Protected mode cr0 */
-       u32 pmode_cr3;          /* Protected mode cr3 */
-       u32 pmode_cr4;          /* Protected mode cr4 */
-       u32 pmode_efer_low;     /* Protected mode EFER */
-       u32 pmode_efer_high;
-       u64 pmode_gdt;
-       u32 pmode_misc_en_low;  /* Protected mode MISC_ENABLE */
-       u32 pmode_misc_en_high;
-       u32 pmode_behavior;     /* Wakeup routine behavior flags */
-       u32 realmode_flags;
-       u32 real_magic;
-       u16 trampoline_segment; /* segment with trampoline code, 64-bit only */
-       u8  _pad1;
-       u8  wakeup_jmp;
-       u16 wakeup_jmp_off;
-       u16 wakeup_jmp_seg;
-       u64 wakeup_gdt[3];
-       u32 signature;          /* To check we have correct structure */
-} __attribute__((__packed__));
-
-extern struct wakeup_header wakeup_header;
-#endif
-
-#define WAKEUP_HEADER_OFFSET   8
-#define WAKEUP_HEADER_SIGNATURE 0x51ee1111
-#define WAKEUP_END_SIGNATURE   0x65a22c82
-
-/* Wakeup behavior bits */
-#define WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE     0
-
-#endif /* ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H */
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.lds.S b/arch/x86/kernel/acpi/realmode/wakeup.lds.S
deleted file mode 100644 (file)
index d4f8010..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * wakeup.ld
- *
- * Linker script for the real-mode wakeup code
- */
-#undef i386
-#include "wakeup.h"
-
-OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
-OUTPUT_ARCH(i386)
-ENTRY(_start)
-
-SECTIONS
-{
-       . = 0;
-       .jump   : {
-               *(.jump)
-       } = 0x90909090
-
-       . = WAKEUP_HEADER_OFFSET;
-       .header : {
-               *(.header)
-       }
-
-       . = ALIGN(16);
-       .text : {
-                *(.text*)
-       } = 0x90909090
-
-       . = ALIGN(16);
-       .rodata : {
-               *(.rodata*)
-       }
-
-       .videocards : {
-               video_cards = .;
-               *(.videocards)
-               video_cards_end = .;
-       }
-
-       . = ALIGN(16);
-       .data : {
-                *(.data*)
-       }
-
-       . = ALIGN(16);
-       .bss :  {
-               __bss_start = .;
-               *(.bss)
-               __bss_end = .;
-       }
-
-       .signature : {
-               *(.signature)
-       }
-
-       _end = .;
-
-       /DISCARD/ : {
-               *(.note*)
-       }
-}
index 146a49c763a49085b50d5b3b6babcda74650463b..95bf99de9058128f320356caa6cad1ce9b2727da 100644 (file)
@@ -14,8 +14,9 @@
 #include <asm/desc.h>
 #include <asm/pgtable.h>
 #include <asm/cacheflush.h>
+#include <asm/realmode.h>
 
-#include "realmode/wakeup.h"
+#include "../../realmode/rm/wakeup.h"
 #include "sleep.h"
 
 unsigned long acpi_realmode_flags;
@@ -36,13 +37,9 @@ asmlinkage void acpi_enter_s3(void)
  */
 int acpi_suspend_lowlevel(void)
 {
-       struct wakeup_header *header;
-       /* address in low memory of the wakeup routine. */
-       char *acpi_realmode;
+       struct wakeup_header *header =
+               (struct wakeup_header *) __va(real_mode_header->wakeup_header);
 
-       acpi_realmode = TRAMPOLINE_SYM(acpi_wakeup_code);
-
-       header = (struct wakeup_header *)(acpi_realmode + WAKEUP_HEADER_OFFSET);
        if (header->signature != WAKEUP_HEADER_SIGNATURE) {
                printk(KERN_ERR "wakeup header does not match\n");
                return -EINVAL;
@@ -50,27 +47,6 @@ int acpi_suspend_lowlevel(void)
 
        header->video_mode = saved_video_mode;
 
-       header->wakeup_jmp_seg = acpi_wakeup_address >> 4;
-
-       /*
-        * Set up the wakeup GDT.  We set these up as Big Real Mode,
-        * that is, with limits set to 4 GB.  At least the Lenovo
-        * Thinkpad X61 is known to need this for the video BIOS
-        * initialization quirk to work; this is likely to also
-        * be the case for other laptops or integrated video devices.
-        */
-
-       /* GDT[0]: GDT self-pointer */
-       header->wakeup_gdt[0] =
-               (u64)(sizeof(header->wakeup_gdt) - 1) +
-               ((u64)__pa(&header->wakeup_gdt) << 16);
-       /* GDT[1]: big real mode-like code segment */
-       header->wakeup_gdt[1] =
-               GDT_ENTRY(0x809b, acpi_wakeup_address, 0xfffff);
-       /* GDT[2]: big real mode-like data segment */
-       header->wakeup_gdt[2] =
-               GDT_ENTRY(0x8093, acpi_wakeup_address, 0xfffff);
-
 #ifndef CONFIG_64BIT
        store_gdt((struct desc_ptr *)&header->pmode_gdt);
 
@@ -95,7 +71,6 @@ int acpi_suspend_lowlevel(void)
        header->pmode_cr3 = (u32)__pa(&initial_page_table);
        saved_magic = 0x12345678;
 #else /* CONFIG_64BIT */
-       header->trampoline_segment = trampoline_address() >> 4;
 #ifdef CONFIG_SMP
        stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
        early_gdt_descr.address =
index d68677a2a01037a758496ee31aa575ccbb2badff..5653a5791ec92291844a55665cc50a0cb402095b 100644 (file)
@@ -2,8 +2,8 @@
  *     Variables and functions used by the code in sleep.c
  */
 
-#include <asm/trampoline.h>
 #include <linux/linkage.h>
+#include <asm/realmode.h>
 
 extern unsigned long saved_video_mode;
 extern long saved_magic;
diff --git a/arch/x86/kernel/acpi/wakeup_rm.S b/arch/x86/kernel/acpi/wakeup_rm.S
deleted file mode 100644 (file)
index 63b8ab5..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * Wrapper script for the realmode binary as a transport object
- * before copying to low memory.
- */
-#include <asm/page_types.h>
-
-       .section ".x86_trampoline","a"
-       .balign PAGE_SIZE
-       .globl  acpi_wakeup_code
-acpi_wakeup_code:
-       .incbin "arch/x86/kernel/acpi/realmode/wakeup.bin"
-       .size   acpi_wakeup_code, .-acpi_wakeup_code
index 82f29e70d05833b7b3b6e0b7789a46ef9a75c4b8..6b9333b429ba1910637bf29c63bbe0712064022e 100644 (file)
@@ -1101,14 +1101,20 @@ int is_debug_stack(unsigned long addr)
                 addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ));
 }
 
+static DEFINE_PER_CPU(u32, debug_stack_use_ctr);
+
 void debug_stack_set_zero(void)
 {
+       this_cpu_inc(debug_stack_use_ctr);
        load_idt((const struct desc_ptr *)&nmi_idt_descr);
 }
 
 void debug_stack_reset(void)
 {
-       load_idt((const struct desc_ptr *)&idt_descr);
+       if (WARN_ON(!this_cpu_read(debug_stack_use_ctr)))
+               return;
+       if (this_cpu_dec_return(debug_stack_use_ctr) == 0)
+               load_idt((const struct desc_ptr *)&idt_descr);
 }
 
 #else  /* CONFIG_X86_64 */
index 507ea58688e251c12d9f33313cf5493edab3d499..cd8b166a1735326d614d92ef6b44292149c8c5cd 100644 (file)
@@ -42,7 +42,8 @@ void apei_mce_report_mem_error(int corrected, struct cper_sec_mem_err *mem_err)
        struct mce m;
 
        /* Only corrected MC is reported */
-       if (!corrected)
+       if (!corrected || !(mem_err->validation_bits &
+                               CPER_MEM_VALID_PHYSICAL_ADDRESS))
                return;
 
        mce_setup(&m);
index 0c82091b1652cf126440cb608ae3d8ab10d1945c..413c2ced887c66c89b1f49fb18a3bac29604ccb2 100644 (file)
@@ -126,6 +126,16 @@ static struct severity {
                SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
                USER
                ),
+       MCESEV(
+               KEEP, "HT thread notices Action required: instruction fetch error",
+               SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
+               MCGMASK(MCG_STATUS_EIPV, 0)
+               ),
+       MCESEV(
+               AR, "Action required: instruction fetch error",
+               SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
+               USER
+               ),
 #endif
        MCESEV(
                PANIC, "Action required: unknown MCACOD",
@@ -165,15 +175,19 @@ static struct severity {
 };
 
 /*
- * If the EIPV bit is set, it means the saved IP is the
- * instruction which caused the MCE.
+ * If mcgstatus indicated that ip/cs on the stack were
+ * no good, then "m->cs" will be zero and we will have
+ * to assume the worst case (IN_KERNEL) as we actually
+ * have no idea what we were executing when the machine
+ * check hit.
+ * If we do have a good "m->cs" (or a faked one in the
+ * case we were executing in VM86 mode) we can use it to
+ * distinguish an exception taken in user from from one
+ * taken in the kernel.
  */
 static int error_context(struct mce *m)
 {
-       if (m->mcgstatus & MCG_STATUS_EIPV)
-               return (m->ip && (m->cs & 3) == 3) ? IN_USER : IN_KERNEL;
-       /* Unknown, assume kernel */
-       return IN_KERNEL;
+       return ((m->cs & 3) == 3) ? IN_USER : IN_KERNEL;
 }
 
 int mce_severity(struct mce *m, int tolerant, char **msg)
index 2afcbd253e1da1768a10eeb6bdb9a74d286048b7..0a687fd185e6c97b496578d1dc6acb4001c2833d 100644 (file)
@@ -437,6 +437,14 @@ static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
                if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
                        m->ip = regs->ip;
                        m->cs = regs->cs;
+
+                       /*
+                        * When in VM86 mode make the cs look like ring 3
+                        * always. This is a lie, but it's better than passing
+                        * the additional vm86 bit around everywhere.
+                        */
+                       if (v8086_mode(regs))
+                               m->cs |= 3;
                }
                /* Use accurate RIP reporting if available. */
                if (rip_msr)
@@ -641,16 +649,18 @@ EXPORT_SYMBOL_GPL(machine_check_poll);
  * Do a quick check if any of the events requires a panic.
  * This decides if we keep the events around or clear them.
  */
-static int mce_no_way_out(struct mce *m, char **msg)
+static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp)
 {
-       int i;
+       int i, ret = 0;
 
        for (i = 0; i < banks; i++) {
                m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
+               if (m->status & MCI_STATUS_VAL)
+                       __set_bit(i, validp);
                if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY)
-                       return 1;
+                       ret = 1;
        }
-       return 0;
+       return ret;
 }
 
 /*
@@ -1013,6 +1023,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
         */
        int kill_it = 0;
        DECLARE_BITMAP(toclear, MAX_NR_BANKS);
+       DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
        char *msg = "Unknown";
 
        atomic_inc(&mce_entry);
@@ -1027,7 +1038,8 @@ void do_machine_check(struct pt_regs *regs, long error_code)
        final = &__get_cpu_var(mces_seen);
        *final = m;
 
-       no_way_out = mce_no_way_out(&m, &msg);
+       memset(valid_banks, 0, sizeof(valid_banks));
+       no_way_out = mce_no_way_out(&m, &msg, valid_banks);
 
        barrier();
 
@@ -1047,6 +1059,8 @@ void do_machine_check(struct pt_regs *regs, long error_code)
        order = mce_start(&no_way_out);
        for (i = 0; i < banks; i++) {
                __clear_bit(i, toclear);
+               if (!test_bit(i, valid_banks))
+                       continue;
                if (!mce_banks[i].ctl)
                        continue;
 
@@ -1237,15 +1251,15 @@ void mce_log_therm_throt_event(__u64 status)
  * poller finds an MCE, poll 2x faster.  When the poller finds no more
  * errors, poll 2x slower (up to check_interval seconds).
  */
-static int check_interval = 5 * 60; /* 5 minutes */
+static unsigned long check_interval = 5 * 60; /* 5 minutes */
 
-static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */
+static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
 static DEFINE_PER_CPU(struct timer_list, mce_timer);
 
-static void mce_start_timer(unsigned long data)
+static void mce_timer_fn(unsigned long data)
 {
-       struct timer_list *t = &per_cpu(mce_timer, data);
-       int *n;
+       struct timer_list *t = &__get_cpu_var(mce_timer);
+       unsigned long iv;
 
        WARN_ON(smp_processor_id() != data);
 
@@ -1258,13 +1272,14 @@ static void mce_start_timer(unsigned long data)
         * Alert userspace if needed.  If we logged an MCE, reduce the
         * polling interval, otherwise increase the polling interval.
         */
-       n = &__get_cpu_var(mce_next_interval);
+       iv = __this_cpu_read(mce_next_interval);
        if (mce_notify_irq())
-               *n = max(*n/2, HZ/100);
+               iv = max(iv, (unsigned long) HZ/100);
        else
-               *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
+               iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
+       __this_cpu_write(mce_next_interval, iv);
 
-       t->expires = jiffies + *n;
+       t->expires = jiffies + iv;
        add_timer_on(t, smp_processor_id());
 }
 
@@ -1458,9 +1473,9 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
                                 rdmsrl(msrs[i], val);
 
                                 /* CntP bit set? */
-                                if (val & BIT(62)) {
-                                        val &= ~BIT(62);
-                                        wrmsrl(msrs[i], val);
+                                if (val & BIT_64(62)) {
+                                       val &= ~BIT_64(62);
+                                       wrmsrl(msrs[i], val);
                                 }
                         }
 
@@ -1542,17 +1557,17 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
 static void __mcheck_cpu_init_timer(void)
 {
        struct timer_list *t = &__get_cpu_var(mce_timer);
-       int *n = &__get_cpu_var(mce_next_interval);
+       unsigned long iv = __this_cpu_read(mce_next_interval);
 
-       setup_timer(t, mce_start_timer, smp_processor_id());
+       setup_timer(t, mce_timer_fn, smp_processor_id());
 
        if (mce_ignore_ce)
                return;
 
-       *n = check_interval * HZ;
-       if (!*n)
+       __this_cpu_write(mce_next_interval, iv);
+       if (!iv)
                return;
-       t->expires = round_jiffies(jiffies + *n);
+       t->expires = round_jiffies(jiffies + iv);
        add_timer_on(t, smp_processor_id());
 }
 
@@ -2262,7 +2277,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
        case CPU_DOWN_FAILED_FROZEN:
                if (!mce_ignore_ce && check_interval) {
                        t->expires = round_jiffies(jiffies +
-                                          __get_cpu_var(mce_next_interval));
+                                       per_cpu(mce_next_interval, cpu));
                        add_timer_on(t, cpu);
                }
                smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
index ac140c7be396b6f55c97521ddcd572df664e95b6..bdda2e6c673bf71afb30ed670071a9d02dbdcbfa 100644 (file)
@@ -266,7 +266,7 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk,
                if (align > max_align)
                        align = max_align;
 
-               sizek = 1 << align;
+               sizek = 1UL << align;
                if (debug_print) {
                        char start_factor = 'K', size_factor = 'K';
                        unsigned long start_base, size_base;
index 62d61e9976eb0a83bca3aae5c979bb63505c0bbb..41857970517f795739018c01e1fc070f0a831fff 100644 (file)
@@ -113,7 +113,9 @@ static void __init __e820_add_region(struct e820map *e820x, u64 start, u64 size,
        int x = e820x->nr_map;
 
        if (x >= ARRAY_SIZE(e820x->map)) {
-               printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
+               printk(KERN_ERR "e820: too many entries; ignoring [mem %#010llx-%#010llx]\n",
+                      (unsigned long long) start,
+                      (unsigned long long) (start + size - 1));
                return;
        }
 
@@ -133,19 +135,19 @@ static void __init e820_print_type(u32 type)
        switch (type) {
        case E820_RAM:
        case E820_RESERVED_KERN:
-               printk(KERN_CONT "(usable)");
+               printk(KERN_CONT "usable");
                break;
        case E820_RESERVED:
-               printk(KERN_CONT "(reserved)");
+               printk(KERN_CONT "reserved");
                break;
        case E820_ACPI:
-               printk(KERN_CONT "(ACPI data)");
+               printk(KERN_CONT "ACPI data");
                break;
        case E820_NVS:
-               printk(KERN_CONT "(ACPI NVS)");
+               printk(KERN_CONT "ACPI NVS");
                break;
        case E820_UNUSABLE:
-               printk(KERN_CONT "(unusable)");
+               printk(KERN_CONT "unusable");
                break;
        default:
                printk(KERN_CONT "type %u", type);
@@ -158,10 +160,10 @@ void __init e820_print_map(char *who)
        int i;
 
        for (i = 0; i < e820.nr_map; i++) {
-               printk(KERN_INFO " %s: %016Lx - %016Lx ", who,
+               printk(KERN_INFO "%s: [mem %#018Lx-%#018Lx] ", who,
                       (unsigned long long) e820.map[i].addr,
                       (unsigned long long)
-                      (e820.map[i].addr + e820.map[i].size));
+                      (e820.map[i].addr + e820.map[i].size - 1));
                e820_print_type(e820.map[i].type);
                printk(KERN_CONT "\n");
        }
@@ -428,9 +430,8 @@ static u64 __init __e820_update_range(struct e820map *e820x, u64 start,
                size = ULLONG_MAX - start;
 
        end = start + size;
-       printk(KERN_DEBUG "e820 update range: %016Lx - %016Lx ",
-                      (unsigned long long) start,
-                      (unsigned long long) end);
+       printk(KERN_DEBUG "e820: update [mem %#010Lx-%#010Lx] ",
+              (unsigned long long) start, (unsigned long long) (end - 1));
        e820_print_type(old_type);
        printk(KERN_CONT " ==> ");
        e820_print_type(new_type);
@@ -509,9 +510,8 @@ u64 __init e820_remove_range(u64 start, u64 size, unsigned old_type,
                size = ULLONG_MAX - start;
 
        end = start + size;
-       printk(KERN_DEBUG "e820 remove range: %016Lx - %016Lx ",
-                      (unsigned long long) start,
-                      (unsigned long long) end);
+       printk(KERN_DEBUG "e820: remove [mem %#010Lx-%#010Lx] ",
+              (unsigned long long) start, (unsigned long long) (end - 1));
        if (checktype)
                e820_print_type(old_type);
        printk(KERN_CONT "\n");
@@ -567,7 +567,7 @@ void __init update_e820(void)
        if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr_map))
                return;
        e820.nr_map = nr_map;
-       printk(KERN_INFO "modified physical RAM map:\n");
+       printk(KERN_INFO "e820: modified physical RAM map:\n");
        e820_print_map("modified");
 }
 static void __init update_e820_saved(void)
@@ -637,8 +637,8 @@ __init void e820_setup_gap(void)
        if (!found) {
                gapstart = (max_pfn << PAGE_SHIFT) + 1024*1024;
                printk(KERN_ERR
-       "PCI: Warning: Cannot find a gap in the 32bit address range\n"
-       "PCI: Unassigned devices with 32bit resource registers may break!\n");
+       "e820: cannot find a gap in the 32bit address range\n"
+       "e820: PCI devices with unassigned 32bit BARs may break!\n");
        }
 #endif
 
@@ -648,8 +648,8 @@ __init void e820_setup_gap(void)
        pci_mem_start = gapstart;
 
        printk(KERN_INFO
-              "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
-              pci_mem_start, gapstart, gapsize);
+              "e820: [mem %#010lx-%#010lx] available for PCI devices\n",
+              gapstart, gapstart + gapsize - 1);
 }
 
 /**
@@ -667,7 +667,7 @@ void __init parse_e820_ext(struct setup_data *sdata)
        extmap = (struct e820entry *)(sdata->data);
        __append_e820_map(extmap, entries);
        sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
-       printk(KERN_INFO "extended physical RAM map:\n");
+       printk(KERN_INFO "e820: extended physical RAM map:\n");
        e820_print_map("extended");
 }
 
@@ -734,7 +734,7 @@ u64 __init early_reserve_e820(u64 size, u64 align)
        addr = __memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
        if (addr) {
                e820_update_range_saved(addr, size, E820_RAM, E820_RESERVED);
-               printk(KERN_INFO "update e820_saved for early_reserve_e820\n");
+               printk(KERN_INFO "e820: update e820_saved for early_reserve_e820\n");
                update_e820_saved();
        }
 
@@ -784,7 +784,7 @@ static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type)
        if (last_pfn > max_arch_pfn)
                last_pfn = max_arch_pfn;
 
-       printk(KERN_INFO "last_pfn = %#lx max_arch_pfn = %#lx\n",
+       printk(KERN_INFO "e820: last_pfn = %#lx max_arch_pfn = %#lx\n",
                         last_pfn, max_arch_pfn);
        return last_pfn;
 }
@@ -888,7 +888,7 @@ void __init finish_e820_parsing(void)
                        early_panic("Invalid user supplied memory map");
                e820.nr_map = nr;
 
-               printk(KERN_INFO "user-defined physical RAM map:\n");
+               printk(KERN_INFO "e820: user-defined physical RAM map:\n");
                e820_print_map("user");
        }
 }
@@ -996,8 +996,9 @@ void __init e820_reserve_resources_late(void)
                        end = MAX_RESOURCE_SIZE;
                if (start >= end)
                        continue;
-               printk(KERN_DEBUG "reserve RAM buffer: %016llx - %016llx ",
-                              start, end);
+               printk(KERN_DEBUG
+                      "e820: reserve RAM buffer [mem %#010llx-%#010llx]\n",
+                      start, end);
                reserve_region_with_split(&iomem_resource, start, end,
                                          "RAM buffer");
        }
@@ -1047,7 +1048,7 @@ void __init setup_memory_map(void)
 
        who = x86_init.resources.memory_setup();
        memcpy(&e820_saved, &e820, sizeof(struct e820map));
-       printk(KERN_INFO "BIOS-provided physical RAM map:\n");
+       printk(KERN_INFO "e820: BIOS-provided physical RAM map:\n");
        e820_print_map(who);
 }
 
index 01ccf9b71473ce18ffe0cccf5fa09fd379dcf4cf..623f288374763286ec9e58aa66ecabb4e3fc2f6b 100644 (file)
@@ -316,7 +316,6 @@ ret_from_exception:
        preempt_stop(CLBR_ANY)
 ret_from_intr:
        GET_THREAD_INFO(%ebp)
-resume_userspace_sig:
 #ifdef CONFIG_VM86
        movl PT_EFLAGS(%esp), %eax      # mix EFLAGS and CS
        movb PT_CS(%esp), %al
@@ -615,9 +614,13 @@ work_notifysig:                            # deal with pending signals and
                                        # vm86-space
        TRACE_IRQS_ON
        ENABLE_INTERRUPTS(CLBR_NONE)
+       movb PT_CS(%esp), %bl
+       andb $SEGMENT_RPL_MASK, %bl
+       cmpb $USER_RPL, %bl
+       jb resume_kernel
        xorl %edx, %edx
        call do_notify_resume
-       jmp resume_userspace_sig
+       jmp resume_userspace
 
        ALIGN
 work_notifysig_v86:
@@ -630,9 +633,13 @@ work_notifysig_v86:
 #endif
        TRACE_IRQS_ON
        ENABLE_INTERRUPTS(CLBR_NONE)
+       movb PT_CS(%esp), %bl
+       andb $SEGMENT_RPL_MASK, %bl
+       cmpb $USER_RPL, %bl
+       jb resume_kernel
        xorl %edx, %edx
        call do_notify_resume
-       jmp resume_userspace_sig
+       jmp resume_userspace
 END(work_pending)
 
        # perform syscall exit tracing
index 320852d02026171d537b58bd95868113fa458d10..7d65133b51bede19fc529fd82691de2c01926f60 100644 (file)
@@ -190,6 +190,44 @@ ENDPROC(native_usergs_sysret64)
 #endif
 .endm
 
+/*
+ * When dynamic function tracer is enabled it will add a breakpoint
+ * to all locations that it is about to modify, sync CPUs, update
+ * all the code, sync CPUs, then remove the breakpoints. In this time
+ * if lockdep is enabled, it might jump back into the debug handler
+ * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF).
+ *
+ * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to
+ * make sure the stack pointer does not get reset back to the top
+ * of the debug stack, and instead just reuses the current stack.
+ */
+#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS)
+
+.macro TRACE_IRQS_OFF_DEBUG
+       call debug_stack_set_zero
+       TRACE_IRQS_OFF
+       call debug_stack_reset
+.endm
+
+.macro TRACE_IRQS_ON_DEBUG
+       call debug_stack_set_zero
+       TRACE_IRQS_ON
+       call debug_stack_reset
+.endm
+
+.macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
+       bt   $9,EFLAGS-\offset(%rsp)    /* interrupts off? */
+       jnc  1f
+       TRACE_IRQS_ON_DEBUG
+1:
+.endm
+
+#else
+# define TRACE_IRQS_OFF_DEBUG          TRACE_IRQS_OFF
+# define TRACE_IRQS_ON_DEBUG           TRACE_IRQS_ON
+# define TRACE_IRQS_IRETQ_DEBUG                TRACE_IRQS_IRETQ
+#endif
+
 /*
  * C code is not supposed to know about undefined top of stack. Every time
  * a C function with an pt_regs argument is called from the SYSCALL based
@@ -1098,7 +1136,7 @@ ENTRY(\sym)
        subq $ORIG_RAX-R15, %rsp
        CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
        call save_paranoid
-       TRACE_IRQS_OFF
+       TRACE_IRQS_OFF_DEBUG
        movq %rsp,%rdi          /* pt_regs pointer */
        xorl %esi,%esi          /* no error code */
        subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
@@ -1393,7 +1431,7 @@ paranoidzeroentry machine_check *machine_check_vector(%rip)
 ENTRY(paranoid_exit)
        DEFAULT_FRAME
        DISABLE_INTERRUPTS(CLBR_NONE)
-       TRACE_IRQS_OFF
+       TRACE_IRQS_OFF_DEBUG
        testl %ebx,%ebx                         /* swapgs needed? */
        jnz paranoid_restore
        testl $3,CS(%rsp)
@@ -1404,7 +1442,7 @@ paranoid_swapgs:
        RESTORE_ALL 8
        jmp irq_return
 paranoid_restore:
-       TRACE_IRQS_IRETQ 0
+       TRACE_IRQS_IRETQ_DEBUG 0
        RESTORE_ALL 8
        jmp irq_return
 paranoid_userspace:
index 32ff36596ab10d65d8d5050402eb8d24a5f3fb6f..c3a7cb4bf6e6f0f429495d8f4a9478ac9161c0d6 100644 (file)
@@ -100,7 +100,7 @@ static const unsigned char *ftrace_nop_replace(void)
 }
 
 static int
-ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
+ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
                   unsigned const char *new_code)
 {
        unsigned char replaced[MCOUNT_INSN_SIZE];
@@ -141,7 +141,20 @@ int ftrace_make_nop(struct module *mod,
        old = ftrace_call_replace(ip, addr);
        new = ftrace_nop_replace();
 
-       return ftrace_modify_code(rec->ip, old, new);
+       /*
+        * On boot up, and when modules are loaded, the MCOUNT_ADDR
+        * is converted to a nop, and will never become MCOUNT_ADDR
+        * again. This code is either running before SMP (on boot up)
+        * or before the code will ever be executed (module load).
+        * We do not want to use the breakpoint version in this case,
+        * just modify the code directly.
+        */
+       if (addr == MCOUNT_ADDR)
+               return ftrace_modify_code_direct(rec->ip, old, new);
+
+       /* Normal cases use add_brk_on_nop */
+       WARN_ONCE(1, "invalid use of ftrace_make_nop");
+       return -EINVAL;
 }
 
 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
@@ -152,9 +165,47 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
        old = ftrace_nop_replace();
        new = ftrace_call_replace(ip, addr);
 
-       return ftrace_modify_code(rec->ip, old, new);
+       /* Should only be called when module is loaded */
+       return ftrace_modify_code_direct(rec->ip, old, new);
 }
 
+/*
+ * The modifying_ftrace_code is used to tell the breakpoint
+ * handler to call ftrace_int3_handler(). If it fails to
+ * call this handler for a breakpoint added by ftrace, then
+ * the kernel may crash.
+ *
+ * As atomic_writes on x86 do not need a barrier, we do not
+ * need to add smp_mb()s for this to work. It is also considered
+ * that we can not read the modifying_ftrace_code before
+ * executing the breakpoint. That would be quite remarkable if
+ * it could do that. Here's the flow that is required:
+ *
+ *   CPU-0                          CPU-1
+ *
+ * atomic_inc(mfc);
+ * write int3s
+ *                             <trap-int3> // implicit (r)mb
+ *                             if (atomic_read(mfc))
+ *                                     call ftrace_int3_handler()
+ *
+ * Then when we are finished:
+ *
+ * atomic_dec(mfc);
+ *
+ * If we hit a breakpoint that was not set by ftrace, it does not
+ * matter if ftrace_int3_handler() is called or not. It will
+ * simply be ignored. But it is crucial that a ftrace nop/caller
+ * breakpoint is handled. No other user should ever place a
+ * breakpoint on an ftrace nop/caller location. It must only
+ * be done by this code.
+ */
+atomic_t modifying_ftrace_code __read_mostly;
+
+static int
+ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
+                  unsigned const char *new_code);
+
 int ftrace_update_ftrace_func(ftrace_func_t func)
 {
        unsigned long ip = (unsigned long)(&ftrace_call);
@@ -163,13 +214,17 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
 
        memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
        new = ftrace_call_replace(ip, (unsigned long)func);
+
+       /* See comment above by declaration of modifying_ftrace_code */
+       atomic_inc(&modifying_ftrace_code);
+
        ret = ftrace_modify_code(ip, old, new);
 
+       atomic_dec(&modifying_ftrace_code);
+
        return ret;
 }
 
-int modifying_ftrace_code __read_mostly;
-
 /*
  * A breakpoint was added to the code address we are about to
  * modify, and this is the handle that will just skip over it.
@@ -489,13 +544,46 @@ void ftrace_replace_code(int enable)
        }
 }
 
+static int
+ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
+                  unsigned const char *new_code)
+{
+       int ret;
+
+       ret = add_break(ip, old_code);
+       if (ret)
+               goto out;
+
+       run_sync();
+
+       ret = add_update_code(ip, new_code);
+       if (ret)
+               goto fail_update;
+
+       run_sync();
+
+       ret = ftrace_write(ip, new_code, 1);
+       if (ret) {
+               ret = -EPERM;
+               goto out;
+       }
+       run_sync();
+ out:
+       return ret;
+
+ fail_update:
+       probe_kernel_write((void *)ip, &old_code[0], 1);
+       goto out;
+}
+
 void arch_ftrace_update_code(int command)
 {
-       modifying_ftrace_code++;
+       /* See comment above by declaration of modifying_ftrace_code */
+       atomic_inc(&modifying_ftrace_code);
 
        ftrace_modify_all_code(command);
 
-       modifying_ftrace_code--;
+       atomic_dec(&modifying_ftrace_code);
 }
 
 int __init ftrace_dyn_arch_init(void *data)
index 51ff18616d50be24bc61fceef68ef43ccbb4becb..c18f59d10101cefc82f6638bcca758a480b70cc3 100644 (file)
@@ -14,7 +14,6 @@
 #include <asm/sections.h>
 #include <asm/e820.h>
 #include <asm/page.h>
-#include <asm/trampoline.h>
 #include <asm/apic.h>
 #include <asm/io_apic.h>
 #include <asm/bios_ebda.h>
index 3a3b779f41d320b81443615eb62a2ddadd0e5061..037df57a99ac34d5ba1a5cab5abaa4554e0689d0 100644 (file)
@@ -24,7 +24,6 @@
 #include <asm/sections.h>
 #include <asm/kdebug.h>
 #include <asm/e820.h>
-#include <asm/trampoline.h>
 #include <asm/bios_ebda.h>
 
 static void __init zap_identity_mappings(void)
index 463c9797ca6ab5f392bc65c6fbe1c1ddfc699c18..d42ab17b739729c4b27d3f3fe02fdea2633f2f5a 100644 (file)
@@ -274,10 +274,7 @@ num_subarch_entries = (. - subarch_entries) / 4
  * If cpu hotplug is not supported then this code can go in init section
  * which will be freed later
  */
-
 __CPUINIT
-
-#ifdef CONFIG_SMP
 ENTRY(startup_32_smp)
        cld
        movl $(__BOOT_DS),%eax
@@ -288,7 +285,7 @@ ENTRY(startup_32_smp)
        movl pa(stack_start),%ecx
        movl %eax,%ss
        leal -__PAGE_OFFSET(%ecx),%esp
-#endif /* CONFIG_SMP */
+
 default_entry:
 
 /*
index 7a40f2447321d5c6af500be479844ac6379a98b3..94bf9cc2c7ee5f380e28246182e00e06f64ff573 100644 (file)
@@ -139,10 +139,6 @@ ident_complete:
        /* Fixup phys_base */
        addq    %rbp, phys_base(%rip)
 
-       /* Fixup trampoline */
-       addq    %rbp, trampoline_level4_pgt + 0(%rip)
-       addq    %rbp, trampoline_level4_pgt + (511*8)(%rip)
-
        /* Due to ENTRY(), sometimes the empty space gets filled with
         * zeros. Better take a jmp than relying on empty space being
         * filled with 0x90 (nop)
index 9cc7b4392f7c8b0462ad4d031667e5197eff9373..1460a5df92f7a7f314ed0be95a81765cf2df676e 100644 (file)
@@ -870,7 +870,7 @@ int __init hpet_enable(void)
        else
                pr_warn("HPET initial state will not be saved\n");
        cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
-       hpet_writel(cfg, HPET_Tn_CFG(i));
+       hpet_writel(cfg, HPET_CFG);
        if (cfg)
                pr_warn("HPET: Unrecognized bits %#x set in global cfg\n",
                        cfg);
index f8492da65bfcb03e2775638ad5ce9502099d62c8..086eb58c6e801134296372acd7d9efb36de6d12b 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/msr.h>
 #include <asm/apic.h>
 #include <linux/percpu.h>
+#include <linux/hardirq.h>
 
 #include <asm/x86_init.h>
 #include <asm/reboot.h>
@@ -114,6 +115,25 @@ static void kvm_get_preset_lpj(void)
        preset_lpj = lpj;
 }
 
+bool kvm_check_and_clear_guest_paused(void)
+{
+       bool ret = false;
+       struct pvclock_vcpu_time_info *src;
+
+       /*
+        * per_cpu() is safe here because this function is only called from
+        * timer functions where preemption is already disabled.
+        */
+       WARN_ON(!in_atomic());
+       src = &__get_cpu_var(hv_clock);
+       if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) {
+               __this_cpu_and(hv_clock.flags, ~PVCLOCK_GUEST_STOPPED);
+               ret = true;
+       }
+
+       return ret;
+}
+
 static struct clocksource kvm_clock = {
        .name = "kvm-clock",
        .read = kvm_clock_get_cycles,
index b02d4dd6b8a304e0bbd0d3d2d6a62e470d01bc2a..d2b56489d70fb12781e6787c98b1ea13f7d08705 100644 (file)
@@ -27,7 +27,6 @@
 #include <asm/proto.h>
 #include <asm/bios_ebda.h>
 #include <asm/e820.h>
-#include <asm/trampoline.h>
 #include <asm/setup.h>
 #include <asm/smp.h>
 
@@ -568,8 +567,8 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
        struct mpf_intel *mpf;
        unsigned long mem;
 
-       apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n",
-                       bp, length);
+       apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n",
+                   base, base + length - 1);
        BUILD_BUG_ON(sizeof(*mpf) != 16);
 
        while (length > 0) {
@@ -584,8 +583,10 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
 #endif
                        mpf_found = mpf;
 
-                       printk(KERN_INFO "found SMP MP-table at [%p] %llx\n",
-                              mpf, (u64)virt_to_phys(mpf));
+                       printk(KERN_INFO "found SMP MP-table at [mem %#010llx-%#010llx] mapped at [%p]\n",
+                              (unsigned long long) virt_to_phys(mpf),
+                              (unsigned long long) virt_to_phys(mpf) +
+                              sizeof(*mpf) - 1, mpf);
 
                        mem = virt_to_phys(mpf);
                        memblock_reserve(mem, sizeof(*mpf));
index 90875279ef3d56bb04fad7262fb9ea98e2c61dcf..a0b2f84457bebfb88de00f547c9b8db930abca5a 100644 (file)
@@ -444,14 +444,16 @@ static inline void nmi_nesting_preprocess(struct pt_regs *regs)
         */
        if (unlikely(is_debug_stack(regs->sp))) {
                debug_stack_set_zero();
-               __get_cpu_var(update_debug_stack) = 1;
+               this_cpu_write(update_debug_stack, 1);
        }
 }
 
 static inline void nmi_nesting_postprocess(void)
 {
-       if (unlikely(__get_cpu_var(update_debug_stack)))
+       if (unlikely(this_cpu_read(update_debug_stack))) {
                debug_stack_reset();
+               this_cpu_write(update_debug_stack, 0);
+       }
 }
 #endif
 
index 3003250ac51dbcdc5be17a2fa37a84d9aae4a4e1..62c9457ccd2f1c4ccad62a757aebdcd520498cc3 100644 (file)
@@ -100,14 +100,18 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
                                 struct dma_attrs *attrs)
 {
        unsigned long dma_mask;
-       struct page *page;
+       struct page *page = NULL;
+       unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
        dma_addr_t addr;
 
        dma_mask = dma_alloc_coherent_mask(dev, flag);
 
        flag |= __GFP_ZERO;
 again:
-       page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
+       if (!(flag & GFP_ATOMIC))
+               page = dma_alloc_from_contiguous(dev, count, get_order(size));
+       if (!page)
+               page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
        if (!page)
                return NULL;
 
@@ -127,6 +131,16 @@ again:
        return page_address(page);
 }
 
+void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
+                              dma_addr_t dma_addr, struct dma_attrs *attrs)
+{
+       unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       struct page *page = virt_to_page(vaddr);
+
+       if (!dma_release_from_contiguous(dev, page, count))
+               free_pages((unsigned long)vaddr, get_order(size));
+}
+
 /*
  * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
  * parameter documentation.
index f96050685b46eb16504f3731ea02096464750829..871be4a84c7d752c1a322ed990ff18093b192c0b 100644 (file)
@@ -74,12 +74,6 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
        return nents;
 }
 
-static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
-                               dma_addr_t dma_addr, struct dma_attrs *attrs)
-{
-       free_pages((unsigned long)vaddr, get_order(size));
-}
-
 static void nommu_sync_single_for_device(struct device *dev,
                        dma_addr_t addr, size_t size,
                        enum dma_data_direction dir)
@@ -97,7 +91,7 @@ static void nommu_sync_sg_for_device(struct device *dev,
 
 struct dma_map_ops nommu_dma_ops = {
        .alloc                  = dma_generic_alloc_coherent,
-       .free                   = nommu_free_coherent,
+       .free                   = dma_generic_free_coherent,
        .map_sg                 = nommu_map_sg,
        .map_page               = nommu_map_page,
        .sync_single_for_device = nommu_sync_single_for_device,
index 13b1990c7c5839e96d5f5fac0951c48eb3430c9a..c4c6a5c2bf0f393ffa8588a1fa7376bcaa9513bb 100644 (file)
@@ -1211,12 +1211,6 @@ static long x32_arch_ptrace(struct task_struct *child,
                                             0, sizeof(struct user_i387_struct),
                                             datap);
 
-               /* normal 64bit interface to access TLS data.
-                  Works just like arch_prctl, except that the arguments
-                  are reversed. */
-       case PTRACE_ARCH_PRCTL:
-               return do_arch_prctl(child, data, addr);
-
        default:
                return compat_ptrace_request(child, request, addr, data);
        }
index 77215c23fba1250dc9b45085e36d99d7356358b7..79c45af81604c7e191116c23b3625c80bb81476d 100644 (file)
@@ -24,6 +24,7 @@
 #ifdef CONFIG_X86_32
 # include <linux/ctype.h>
 # include <linux/mc146818rtc.h>
+# include <asm/realmode.h>
 #else
 # include <asm/x86_init.h>
 #endif
@@ -156,15 +157,10 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
        return 0;
 }
 
-extern const unsigned char machine_real_restart_asm[];
-extern const u64 machine_real_restart_gdt[3];
-
 void machine_real_restart(unsigned int type)
 {
-       void *restart_va;
-       unsigned long restart_pa;
-       void (*restart_lowmem)(unsigned int);
-       u64 *lowmem_gdt;
+       void (*restart_lowmem)(unsigned int) = (void (*)(unsigned int))
+               real_mode_header->machine_real_restart_asm;
 
        local_irq_disable();
 
@@ -195,21 +191,6 @@ void machine_real_restart(unsigned int type)
         * too. */
        *((unsigned short *)0x472) = reboot_mode;
 
-       /* Patch the GDT in the low memory trampoline */
-       lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
-
-       restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
-       restart_pa = virt_to_phys(restart_va);
-       restart_lowmem = (void (*)(unsigned int))restart_pa;
-
-       /* GDT[0]: GDT self-pointer */
-       lowmem_gdt[0] =
-               (u64)(sizeof(machine_real_restart_gdt) - 1) +
-               ((u64)virt_to_phys(lowmem_gdt) << 16);
-       /* GDT[1]: 64K real mode code segment */
-       lowmem_gdt[1] =
-               GDT_ENTRY(0x009b, restart_pa, 0xffff);
-
        /* Jump to the identity-mapped low memory code */
        restart_lowmem(type);
 }
diff --git a/arch/x86/kernel/reboot_32.S b/arch/x86/kernel/reboot_32.S
deleted file mode 100644 (file)
index 1d5c46d..0000000
+++ /dev/null
@@ -1,135 +0,0 @@
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/segment.h>
-#include <asm/page_types.h>
-
-/*
- * The following code and data reboots the machine by switching to real
- * mode and jumping to the BIOS reset entry point, as if the CPU has
- * really been reset.  The previous version asked the keyboard
- * controller to pulse the CPU reset line, which is more thorough, but
- * doesn't work with at least one type of 486 motherboard.  It is easy
- * to stop this code working; hence the copious comments.
- *
- * This code is called with the restart type (0 = BIOS, 1 = APM) in %eax.
- */
-       .section ".x86_trampoline","a"
-       .balign 16
-       .code32
-ENTRY(machine_real_restart_asm)
-r_base = .
-       /* Get our own relocated address */
-       call    1f
-1:     popl    %ebx
-       subl    $(1b - r_base), %ebx
-
-       /* Compute the equivalent real-mode segment */
-       movl    %ebx, %ecx
-       shrl    $4, %ecx
-       
-       /* Patch post-real-mode segment jump */
-       movw    (dispatch_table - r_base)(%ebx,%eax,2),%ax
-       movw    %ax, (101f - r_base)(%ebx)
-       movw    %cx, (102f - r_base)(%ebx)
-
-       /* Set up the IDT for real mode. */
-       lidtl   (machine_real_restart_idt - r_base)(%ebx)
-
-       /*
-        * Set up a GDT from which we can load segment descriptors for real
-        * mode.  The GDT is not used in real mode; it is just needed here to
-        * prepare the descriptors.
-        */
-       lgdtl   (machine_real_restart_gdt - r_base)(%ebx)
-
-       /*
-        * Load the data segment registers with 16-bit compatible values
-        */
-       movl    $16, %ecx
-       movl    %ecx, %ds
-       movl    %ecx, %es
-       movl    %ecx, %fs
-       movl    %ecx, %gs
-       movl    %ecx, %ss
-       ljmpl   $8, $1f - r_base
-
-/*
- * This is 16-bit protected mode code to disable paging and the cache,
- * switch to real mode and jump to the BIOS reset code.
- *
- * The instruction that switches to real mode by writing to CR0 must be
- * followed immediately by a far jump instruction, which set CS to a
- * valid value for real mode, and flushes the prefetch queue to avoid
- * running instructions that have already been decoded in protected
- * mode.
- *
- * Clears all the flags except ET, especially PG (paging), PE
- * (protected-mode enable) and TS (task switch for coprocessor state
- * save).  Flushes the TLB after paging has been disabled.  Sets CD and
- * NW, to disable the cache on a 486, and invalidates the cache.  This
- * is more like the state of a 486 after reset.  I don't know if
- * something else should be done for other chips.
- *
- * More could be done here to set up the registers as if a CPU reset had
- * occurred; hopefully real BIOSs don't assume much.  This is not the
- * actual BIOS entry point, anyway (that is at 0xfffffff0).
- *
- * Most of this work is probably excessive, but it is what is tested.
- */
-       .code16
-1:
-       xorl    %ecx, %ecx
-       movl    %cr0, %eax
-       andl    $0x00000011, %eax
-       orl     $0x60000000, %eax
-       movl    %eax, %cr0
-       movl    %ecx, %cr3
-       movl    %cr0, %edx
-       andl    $0x60000000, %edx       /* If no cache bits -> no wbinvd */
-       jz      2f
-       wbinvd
-2:
-       andb    $0x10, %al
-       movl    %eax, %cr0
-       .byte   0xea                    /* ljmpw */
-101:   .word   0                       /* Offset */
-102:   .word   0                       /* Segment */
-
-bios:
-       ljmpw   $0xf000, $0xfff0
-
-apm:
-       movw    $0x1000, %ax
-       movw    %ax, %ss
-       movw    $0xf000, %sp
-       movw    $0x5307, %ax
-       movw    $0x0001, %bx
-       movw    $0x0003, %cx
-       int     $0x15
-
-END(machine_real_restart_asm)
-
-       .balign 16
-       /* These must match <asm/reboot.h */
-dispatch_table:
-       .word   bios - r_base
-       .word   apm - r_base
-END(dispatch_table)
-
-       .balign 16
-machine_real_restart_idt:
-       .word   0xffff          /* Length - real mode default value */
-       .long   0               /* Base - real mode default value */
-END(machine_real_restart_idt)
-
-       .balign 16
-ENTRY(machine_real_restart_gdt)
-       .quad   0               /* Self-pointer, filled in by PM code */
-       .quad   0               /* 16-bit code segment, filled in by PM code */
-       /*
-        * 16-bit data segment with the selector value 16 = 0x10 and
-        * base value 0x100; since this is consistent with real mode
-        * semantics we don't have to reload the segments once CR0.PE = 0.
-        */
-       .quad   GDT_ENTRY(0x0093, 0x100, 0xffff)
-END(machine_real_restart_gdt)
index 366c688d619e85c94d0bb826dc2c874f5aa324f5..16be6dc14db1b67c108475466ee494312e82d665 100644 (file)
@@ -49,6 +49,7 @@
 #include <asm/pci-direct.h>
 #include <linux/init_ohci1394_dma.h>
 #include <linux/kvm_para.h>
+#include <linux/dma-contiguous.h>
 
 #include <linux/errno.h>
 #include <linux/kernel.h>
@@ -72,7 +73,7 @@
 
 #include <asm/mtrr.h>
 #include <asm/apic.h>
-#include <asm/trampoline.h>
+#include <asm/realmode.h>
 #include <asm/e820.h>
 #include <asm/mpspec.h>
 #include <asm/setup.h>
@@ -333,8 +334,8 @@ static void __init relocate_initrd(void)
        memblock_reserve(ramdisk_here, area_size);
        initrd_start = ramdisk_here + PAGE_OFFSET;
        initrd_end   = initrd_start + ramdisk_size;
-       printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n",
-                        ramdisk_here, ramdisk_here + ramdisk_size);
+       printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
+                        ramdisk_here, ramdisk_here + ramdisk_size - 1);
 
        q = (char *)initrd_start;
 
@@ -365,8 +366,8 @@ static void __init relocate_initrd(void)
        /* high pages is not converted by early_res_to_bootmem */
        ramdisk_image = boot_params.hdr.ramdisk_image;
        ramdisk_size  = boot_params.hdr.ramdisk_size;
-       printk(KERN_INFO "Move RAMDISK from %016llx - %016llx to"
-               " %08llx - %08llx\n",
+       printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to"
+               " [mem %#010llx-%#010llx]\n",
                ramdisk_image, ramdisk_image + ramdisk_size - 1,
                ramdisk_here, ramdisk_here + ramdisk_size - 1);
 }
@@ -391,8 +392,8 @@ static void __init reserve_initrd(void)
                       ramdisk_size, end_of_lowmem>>1);
        }
 
-       printk(KERN_INFO "RAMDISK: %08llx - %08llx\n", ramdisk_image,
-                       ramdisk_end);
+       printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n", ramdisk_image,
+                       ramdisk_end - 1);
 
 
        if (ramdisk_end <= end_of_lowmem) {
@@ -905,10 +906,10 @@ void __init setup_arch(char **cmdline_p)
        setup_bios_corruption_check();
 #endif
 
-       printk(KERN_DEBUG "initial memory mapped : 0 - %08lx\n",
-                       max_pfn_mapped<<PAGE_SHIFT);
+       printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n",
+                       (max_pfn_mapped<<PAGE_SHIFT) - 1);
 
-       setup_trampolines();
+       setup_real_mode();
 
        init_gbpages();
 
@@ -925,6 +926,7 @@ void __init setup_arch(char **cmdline_p)
        }
 #endif
        memblock.current_limit = get_max_mapped();
+       dma_contiguous_reserve(0);
 
        /*
         * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
@@ -966,6 +968,8 @@ void __init setup_arch(char **cmdline_p)
        if (boot_cpu_data.cpuid_level >= 0) {
                /* A CPU has %cr4 if and only if it has CPUID */
                mmu_cr4_features = read_cr4();
+               if (trampoline_cr4_features)
+                       *trampoline_cr4_features = mmu_cr4_features;
        }
 
 #ifdef CONFIG_X86_32
index 965dfda0fd5e442fa88a13e5aeff0a9b98e73029..21af737053aad05fb726a0e1023dfad428064518 100644 (file)
@@ -555,7 +555,6 @@ unsigned long sys_sigreturn(struct pt_regs *regs)
                                    sizeof(frame->extramask))))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->sc, &ax))
@@ -581,7 +580,6 @@ long sys_rt_sigreturn(struct pt_regs *regs)
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
@@ -647,42 +645,28 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
                struct pt_regs *regs)
 {
        int usig = signr_convert(sig);
-       sigset_t *set = &current->blocked;
-       int ret;
-
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK)
-               set = &current->saved_sigmask;
+       sigset_t *set = sigmask_to_save();
 
        /* Set up the stack frame */
        if (is_ia32) {
                if (ka->sa.sa_flags & SA_SIGINFO)
-                       ret = ia32_setup_rt_frame(usig, ka, info, set, regs);
+                       return ia32_setup_rt_frame(usig, ka, info, set, regs);
                else
-                       ret = ia32_setup_frame(usig, ka, set, regs);
+                       return ia32_setup_frame(usig, ka, set, regs);
 #ifdef CONFIG_X86_X32_ABI
        } else if (is_x32) {
-               ret = x32_setup_rt_frame(usig, ka, info,
+               return x32_setup_rt_frame(usig, ka, info,
                                         (compat_sigset_t *)set, regs);
 #endif
        } else {
-               ret = __setup_rt_frame(sig, ka, info, set, regs);
-       }
-
-       if (ret) {
-               force_sigsegv(sig, current);
-               return -EFAULT;
+               return __setup_rt_frame(sig, ka, info, set, regs);
        }
-
-       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-       return ret;
 }
 
-static int
+static void
 handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
                struct pt_regs *regs)
 {
-       int ret;
-
        /* Are we from a system call? */
        if (syscall_get_nr(current, regs) >= 0) {
                /* If so, check system call restarting.. */
@@ -713,10 +697,10 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
            likely(test_and_clear_thread_flag(TIF_FORCED_TF)))
                regs->flags &= ~X86_EFLAGS_TF;
 
-       ret = setup_rt_frame(sig, ka, info, regs);
-
-       if (ret)
-               return ret;
+       if (setup_rt_frame(sig, ka, info, regs) < 0) {
+               force_sigsegv(sig, current);
+               return;
+       }
 
        /*
         * Clear the direction flag as per the ABI for function entry.
@@ -731,12 +715,8 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
         */
        regs->flags &= ~X86_EFLAGS_TF;
 
-       block_sigmask(ka, sig);
-
-       tracehook_signal_handler(sig, info, ka, regs,
-                                test_thread_flag(TIF_SINGLESTEP));
-
-       return 0;
+       signal_delivered(sig, info, ka, regs,
+                        test_thread_flag(TIF_SINGLESTEP));
 }
 
 #ifdef CONFIG_X86_32
@@ -757,16 +737,6 @@ static void do_signal(struct pt_regs *regs)
        siginfo_t info;
        int signr;
 
-       /*
-        * We want the common case to go fast, which is why we may in certain
-        * cases get here from kernel mode. Just return without doing anything
-        * if so.
-        * X86_32: vm86 regs switched out by assembly code before reaching
-        * here, so testing against kernel CS suffices.
-        */
-       if (!user_mode(regs))
-               return;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
                /* Whee! Actually deliver the signal.  */
@@ -796,10 +766,7 @@ static void do_signal(struct pt_regs *regs)
         * If there's no signal to deliver, we just put the saved sigmask
         * back.
         */
-       if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
-               current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-               set_current_blocked(&current->saved_sigmask);
-       }
+       restore_saved_sigmask();
 }
 
 /*
@@ -827,8 +794,6 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
        }
        if (thread_info_flags & _TIF_USER_RETURN_NOTIFY)
                fire_user_return_notifiers();
@@ -936,7 +901,6 @@ asmlinkage long sys32_x32_rt_sigreturn(struct pt_regs *regs)
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
index 433529e29be479b98a41cee06d601f1a389daf2e..f56f96da77f57e011b64e3e69cbabdc76ed3d442 100644 (file)
@@ -57,7 +57,7 @@
 #include <asm/nmi.h>
 #include <asm/irq.h>
 #include <asm/idle.h>
-#include <asm/trampoline.h>
+#include <asm/realmode.h>
 #include <asm/cpu.h>
 #include <asm/numa.h>
 #include <asm/pgtable.h>
@@ -73,6 +73,8 @@
 #include <asm/smpboot_hooks.h>
 #include <asm/i8259.h>
 
+#include <asm/realmode.h>
+
 /* State of each CPU */
 DEFINE_PER_CPU(int, cpu_state) = { 0 };
 
@@ -660,8 +662,12 @@ static void __cpuinit announce_cpu(int cpu, int apicid)
  */
 static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
 {
+       volatile u32 *trampoline_status =
+               (volatile u32 *) __va(real_mode_header->trampoline_status);
+       /* start_ip had better be page-aligned! */
+       unsigned long start_ip = real_mode_header->trampoline_start;
+
        unsigned long boot_error = 0;
-       unsigned long start_ip;
        int timeout;
 
        alternatives_smp_switch(1);
@@ -684,9 +690,6 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
        initial_code = (unsigned long)start_secondary;
        stack_start  = idle->thread.sp;
 
-       /* start_ip had better be page-aligned! */
-       start_ip = trampoline_address();
-
        /* So we see what's up */
        announce_cpu(cpu, apicid);
 
@@ -749,8 +752,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
                        pr_debug("CPU%d: has booted.\n", cpu);
                } else {
                        boot_error = 1;
-                       if (*(volatile u32 *)TRAMPOLINE_SYM(trampoline_status)
-                           == 0xA5A5A5A5)
+                       if (*trampoline_status == 0xA5A5A5A5)
                                /* trampoline started but...? */
                                pr_err("CPU%d: Stuck ??\n", cpu);
                        else
@@ -776,7 +778,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
        }
 
        /* mark "stuck" area as not stuck */
-       *(volatile u32 *)TRAMPOLINE_SYM(trampoline_status) = 0;
+       *trampoline_status = 0;
 
        if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
                /*
index 6410744ac5cb7249544fb9424853b0e88f28679f..f84fe00fad48a4e1c2273a9f46a18b47af96204c 100644 (file)
@@ -32,7 +32,7 @@
 #include <linux/mm.h>
 #include <linux/tboot.h>
 
-#include <asm/trampoline.h>
+#include <asm/realmode.h>
 #include <asm/processor.h>
 #include <asm/bootparam.h>
 #include <asm/pgtable.h>
@@ -44,7 +44,7 @@
 #include <asm/e820.h>
 #include <asm/io.h>
 
-#include "acpi/realmode/wakeup.h"
+#include "../realmode/rm/wakeup.h"
 
 /* Global pointer to shared data; NULL means no measured launch. */
 struct tboot *tboot __read_mostly;
@@ -201,7 +201,8 @@ static int tboot_setup_sleep(void)
                add_mac_region(e820.map[i].addr, e820.map[i].size);
        }
 
-       tboot->acpi_sinfo.kernel_s3_resume_vector = acpi_wakeup_address;
+       tboot->acpi_sinfo.kernel_s3_resume_vector =
+               real_mode_header->wakeup_start;
 
        return 0;
 }
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
deleted file mode 100644 (file)
index a73b610..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-#include <linux/io.h>
-#include <linux/memblock.h>
-
-#include <asm/trampoline.h>
-#include <asm/cacheflush.h>
-#include <asm/pgtable.h>
-
-unsigned char *x86_trampoline_base;
-
-void __init setup_trampolines(void)
-{
-       phys_addr_t mem;
-       size_t size = PAGE_ALIGN(x86_trampoline_end - x86_trampoline_start);
-
-       /* Has to be in very low memory so we can execute real-mode AP code. */
-       mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
-       if (!mem)
-               panic("Cannot allocate trampoline\n");
-
-       x86_trampoline_base = __va(mem);
-       memblock_reserve(mem, size);
-
-       printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
-              x86_trampoline_base, (unsigned long long)mem, size);
-
-       memcpy(x86_trampoline_base, x86_trampoline_start, size);
-}
-
-/*
- * setup_trampolines() gets called very early, to guarantee the
- * availability of low memory.  This is before the proper kernel page
- * tables are set up, so we cannot set page permissions in that
- * function.  Thus, we use an arch_initcall instead.
- */
-static int __init configure_trampolines(void)
-{
-       size_t size = PAGE_ALIGN(x86_trampoline_end - x86_trampoline_start);
-
-       set_memory_x((unsigned long)x86_trampoline_base, size >> PAGE_SHIFT);
-       return 0;
-}
-arch_initcall(configure_trampolines);
diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
deleted file mode 100644 (file)
index 451c0a7..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- *
- *     Trampoline.S    Derived from Setup.S by Linus Torvalds
- *
- *     4 Jan 1997 Michael Chastain: changed to gnu as.
- *
- *     This is only used for booting secondary CPUs in SMP machine
- *
- *     Entry: CS:IP point to the start of our code, we are 
- *     in real mode with no stack, but the rest of the 
- *     trampoline page to make our stack and everything else
- *     is a mystery.
- *
- *     We jump into arch/x86/kernel/head_32.S.
- *
- *     On entry to trampoline_data, the processor is in real mode
- *     with 16-bit addressing and 16-bit data.  CS has some value
- *     and IP is zero.  Thus, data addresses need to be absolute
- *     (no relocation) and are taken with regard to r_base.
- *
- *     If you work on this file, check the object module with
- *     objdump --reloc to make sure there are no relocation
- *     entries except for:
- *
- *     TYPE              VALUE
- *     R_386_32          startup_32_smp
- *     R_386_32          boot_gdt
- */
-
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/segment.h>
-#include <asm/page_types.h>
-
-#ifdef CONFIG_SMP
-
-       .section ".x86_trampoline","a"
-       .balign PAGE_SIZE
-       .code16
-
-ENTRY(trampoline_data)
-r_base = .
-       wbinvd                  # Needed for NUMA-Q should be harmless for others
-       mov     %cs, %ax        # Code and data in the same place
-       mov     %ax, %ds
-
-       cli                     # We should be safe anyway
-
-       movl    $0xA5A5A5A5, trampoline_status - r_base
-                               # write marker for master knows we're running
-
-       /* GDT tables in non default location kernel can be beyond 16MB and
-        * lgdt will not be able to load the address as in real mode default
-        * operand size is 16bit. Use lgdtl instead to force operand size
-        * to 32 bit.
-        */
-
-       lidtl   boot_idt_descr - r_base # load idt with 0, 0
-       lgdtl   boot_gdt_descr - r_base # load gdt with whatever is appropriate
-
-       xor     %ax, %ax
-       inc     %ax             # protected mode (PE) bit
-       lmsw    %ax             # into protected mode
-       # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
-       ljmpl   $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
-
-       # These need to be in the same 64K segment as the above;
-       # hence we don't use the boot_gdt_descr defined in head.S
-boot_gdt_descr:
-       .word   __BOOT_DS + 7                   # gdt limit
-       .long   boot_gdt - __PAGE_OFFSET        # gdt base
-
-boot_idt_descr:
-       .word   0                               # idt limit = 0
-       .long   0                               # idt base = 0L
-
-ENTRY(trampoline_status)
-       .long   0
-
-.globl trampoline_end
-trampoline_end:
-
-#endif /* CONFIG_SMP */
diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
deleted file mode 100644 (file)
index 09ff517..0000000
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- *
- *     Trampoline.S    Derived from Setup.S by Linus Torvalds
- *
- *     4 Jan 1997 Michael Chastain: changed to gnu as.
- *     15 Sept 2005 Eric Biederman: 64bit PIC support
- *
- *     Entry: CS:IP point to the start of our code, we are 
- *     in real mode with no stack, but the rest of the 
- *     trampoline page to make our stack and everything else
- *     is a mystery.
- *
- *     On entry to trampoline_data, the processor is in real mode
- *     with 16-bit addressing and 16-bit data.  CS has some value
- *     and IP is zero.  Thus, data addresses need to be absolute
- *     (no relocation) and are taken with regard to r_base.
- *
- *     With the addition of trampoline_level4_pgt this code can
- *     now enter a 64bit kernel that lives at arbitrary 64bit
- *     physical addresses.
- *
- *     If you work on this file, check the object module with objdump
- *     --full-contents --reloc to make sure there are no relocation
- *     entries.
- */
-
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/pgtable_types.h>
-#include <asm/page_types.h>
-#include <asm/msr.h>
-#include <asm/segment.h>
-#include <asm/processor-flags.h>
-
-       .section ".x86_trampoline","a"
-       .balign PAGE_SIZE
-       .code16
-
-ENTRY(trampoline_data)
-r_base = .
-       cli                     # We should be safe anyway
-       wbinvd
-       mov     %cs, %ax        # Code and data in the same place
-       mov     %ax, %ds
-       mov     %ax, %es
-       mov     %ax, %ss
-
-
-       movl    $0xA5A5A5A5, trampoline_status - r_base
-                               # write marker for master knows we're running
-
-                                       # Setup stack
-       movw    $(trampoline_stack_end - r_base), %sp
-
-       call    verify_cpu              # Verify the cpu supports long mode
-       testl   %eax, %eax              # Check for return code
-       jnz     no_longmode
-
-       mov     %cs, %ax
-       movzx   %ax, %esi               # Find the 32bit trampoline location
-       shll    $4, %esi
-
-                                       # Fixup the absolute vectors
-       leal    (startup_32 - r_base)(%esi), %eax
-       movl    %eax, startup_32_vector - r_base
-       leal    (startup_64 - r_base)(%esi), %eax
-       movl    %eax, startup_64_vector - r_base
-       leal    (tgdt - r_base)(%esi), %eax
-       movl    %eax, (tgdt + 2 - r_base)
-
-       /*
-        * GDT tables in non default location kernel can be beyond 16MB and
-        * lgdt will not be able to load the address as in real mode default
-        * operand size is 16bit. Use lgdtl instead to force operand size
-        * to 32 bit.
-        */
-
-       lidtl   tidt - r_base   # load idt with 0, 0
-       lgdtl   tgdt - r_base   # load gdt with whatever is appropriate
-
-       mov     $X86_CR0_PE, %ax        # protected mode (PE) bit
-       lmsw    %ax                     # into protected mode
-
-       # flush prefetch and jump to startup_32
-       ljmpl   *(startup_32_vector - r_base)
-
-       .code32
-       .balign 4
-startup_32:
-       movl    $__KERNEL_DS, %eax      # Initialize the %ds segment register
-       movl    %eax, %ds
-
-       movl    $X86_CR4_PAE, %eax
-       movl    %eax, %cr4              # Enable PAE mode
-
-                                       # Setup trampoline 4 level pagetables
-       leal    (trampoline_level4_pgt - r_base)(%esi), %eax
-       movl    %eax, %cr3
-
-       movl    $MSR_EFER, %ecx
-       movl    $(1 << _EFER_LME), %eax # Enable Long Mode
-       xorl    %edx, %edx
-       wrmsr
-
-       # Enable paging and in turn activate Long Mode
-       # Enable protected mode
-       movl    $(X86_CR0_PG | X86_CR0_PE), %eax
-       movl    %eax, %cr0
-
-       /*
-        * At this point we're in long mode but in 32bit compatibility mode
-        * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn
-        * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we use
-        * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
-        */
-       ljmp    *(startup_64_vector - r_base)(%esi)
-
-       .code64
-       .balign 4
-startup_64:
-       # Now jump into the kernel using virtual addresses
-       movq    $secondary_startup_64, %rax
-       jmp     *%rax
-
-       .code16
-no_longmode:
-       hlt
-       jmp no_longmode
-#include "verify_cpu.S"
-
-       .balign 4
-       # Careful these need to be in the same 64K segment as the above;
-tidt:
-       .word   0                       # idt limit = 0
-       .word   0, 0                    # idt base = 0L
-
-       # Duplicate the global descriptor table
-       # so the kernel can live anywhere
-       .balign 4
-tgdt:
-       .short  tgdt_end - tgdt         # gdt limit
-       .long   tgdt - r_base
-       .short 0
-       .quad   0x00cf9b000000ffff      # __KERNEL32_CS
-       .quad   0x00af9b000000ffff      # __KERNEL_CS
-       .quad   0x00cf93000000ffff      # __KERNEL_DS
-tgdt_end:
-
-       .balign 4
-startup_32_vector:
-       .long   startup_32 - r_base
-       .word   __KERNEL32_CS, 0
-
-       .balign 4
-startup_64_vector:
-       .long   startup_64 - r_base
-       .word   __KERNEL_CS, 0
-
-       .balign 4
-ENTRY(trampoline_status)
-       .long   0
-
-trampoline_stack:
-       .org 0x1000
-trampoline_stack_end:
-ENTRY(trampoline_level4_pgt)
-       .quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
-       .fill   510,8,0
-       .quad   level3_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
-
-ENTRY(trampoline_end)
index ff08457a025da1c6a15a5b18ce639f0eb80476eb..05b31d92f69cdf7b7e9ccc22c82835a5e6286bbc 100644 (file)
@@ -303,8 +303,12 @@ gp_in_kernel:
 dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_code)
 {
 #ifdef CONFIG_DYNAMIC_FTRACE
-       /* ftrace must be first, everything else may cause a recursive crash */
-       if (unlikely(modifying_ftrace_code) && ftrace_int3_handler(regs))
+       /*
+        * ftrace must be first, everything else may cause a recursive crash.
+        * See note by declaration of modifying_ftrace_code in ftrace.c
+        */
+       if (unlikely(atomic_read(&modifying_ftrace_code)) &&
+           ftrace_int3_handler(regs))
                return;
 #endif
 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
index 0f703f10901a96d6b2d24e9f93559d62bcc2f63a..22a1530146a8740ab14efed3ca8c0fce5ce68959 100644 (file)
@@ -197,18 +197,6 @@ SECTIONS
 
        INIT_DATA_SECTION(16)
 
-       /*
-        * Code and data for a variety of lowlevel trampolines, to be
-        * copied into base memory (< 1 MiB) during initialization.
-        * Since it is copied early, the main copy can be discarded
-        * afterwards.
-        */
-        .x86_trampoline : AT(ADDR(.x86_trampoline) - LOAD_OFFSET) {
-               x86_trampoline_start = .;
-               *(.x86_trampoline)
-               x86_trampoline_end = .;
-       }
-
        .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
                __x86_cpu_dev_start = .;
                *(.x86_cpu_dev.init)
index 1a7fe868f375cb6bccb281599359b8c2dbad8073..a28f338843eaa083a046ea69dca78f6e3ff541fe 100644 (file)
@@ -36,6 +36,7 @@ config KVM
        select TASKSTATS
        select TASK_DELAY_ACCT
        select PERF_EVENTS
+       select HAVE_KVM_MSI
        ---help---
          Support hosting fully virtualized guest machines using hardware
          virtualization extensions.  You will need a fairly recent
index 9fed5bedaad6a90b61b5b7300bde5f6f5deee74f..7df1c6d839fb495f9024851987ea622442338d70 100644 (file)
@@ -247,7 +247,8 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
 
        /* cpuid 7.0.ebx */
        const u32 kvm_supported_word9_x86_features =
-               F(FSGSBASE) | F(BMI1) | F(AVX2) | F(SMEP) | F(BMI2) | F(ERMS);
+               F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
+               F(BMI2) | F(ERMS) | F(RTM);
 
        /* all calls to cpuid_count() should be made on the same cpu */
        get_cpu();
@@ -397,7 +398,7 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
        case KVM_CPUID_SIGNATURE: {
                char signature[12] = "KVMKVMKVM\0\0";
                u32 *sigptr = (u32 *)signature;
-               entry->eax = 0;
+               entry->eax = KVM_CPUID_FEATURES;
                entry->ebx = sigptr[0];
                entry->ecx = sigptr[1];
                entry->edx = sigptr[2];
index 83756223f8aa770b049f980f81fa5b640f697f72..f95d242ee9f72a8f30bf912cf81dd172745dadd9 100644 (file)
 #define Src2FS      (OpFS << Src2Shift)
 #define Src2GS      (OpGS << Src2Shift)
 #define Src2Mask    (OpMask << Src2Shift)
+#define Mmx         ((u64)1 << 40)  /* MMX Vector instruction */
+#define Aligned     ((u64)1 << 41)  /* Explicitly aligned (e.g. MOVDQA) */
+#define Unaligned   ((u64)1 << 42)  /* Explicitly unaligned (e.g. MOVDQU) */
+#define Avx         ((u64)1 << 43)  /* Advanced Vector Extensions */
 
 #define X2(x...) x, x
 #define X3(x...) X2(x), x
@@ -557,6 +561,29 @@ static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
        ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
 }
 
+/*
+ * x86 defines three classes of vector instructions: explicitly
+ * aligned, explicitly unaligned, and the rest, which change behaviour
+ * depending on whether they're AVX encoded or not.
+ *
+ * Also included is CMPXCHG16B which is not a vector instruction, yet it is
+ * subject to the same check.
+ */
+static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
+{
+       if (likely(size < 16))
+               return false;
+
+       if (ctxt->d & Aligned)
+               return true;
+       else if (ctxt->d & Unaligned)
+               return false;
+       else if (ctxt->d & Avx)
+               return false;
+       else
+               return true;
+}
+
 static int __linearize(struct x86_emulate_ctxt *ctxt,
                     struct segmented_address addr,
                     unsigned size, bool write, bool fetch,
@@ -621,6 +648,8 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
        }
        if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
                la &= (u32)-1;
+       if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
+               return emulate_gp(ctxt, 0);
        *linear = la;
        return X86EMUL_CONTINUE;
 bad:
@@ -859,6 +888,40 @@ static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
        ctxt->ops->put_fpu(ctxt);
 }
 
+static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
+{
+       ctxt->ops->get_fpu(ctxt);
+       switch (reg) {
+       case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
+       case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
+       case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
+       case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
+       case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
+       case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
+       case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
+       case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
+       default: BUG();
+       }
+       ctxt->ops->put_fpu(ctxt);
+}
+
+static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
+{
+       ctxt->ops->get_fpu(ctxt);
+       switch (reg) {
+       case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
+       case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
+       case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
+       case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
+       case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
+       case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
+       case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
+       case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
+       default: BUG();
+       }
+       ctxt->ops->put_fpu(ctxt);
+}
+
 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
                                    struct operand *op)
 {
@@ -875,6 +938,13 @@ static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
                read_sse_reg(ctxt, &op->vec_val, reg);
                return;
        }
+       if (ctxt->d & Mmx) {
+               reg &= 7;
+               op->type = OP_MM;
+               op->bytes = 8;
+               op->addr.mm = reg;
+               return;
+       }
 
        op->type = OP_REG;
        if (ctxt->d & ByteOp) {
@@ -902,7 +972,6 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
                ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */
        }
 
-       ctxt->modrm = insn_fetch(u8, ctxt);
        ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
        ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
        ctxt->modrm_rm |= (ctxt->modrm & 0x07);
@@ -920,6 +989,12 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
                        read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
                        return rc;
                }
+               if (ctxt->d & Mmx) {
+                       op->type = OP_MM;
+                       op->bytes = 8;
+                       op->addr.xmm = ctxt->modrm_rm & 7;
+                       return rc;
+               }
                fetch_register_operand(op);
                return rc;
        }
@@ -1387,6 +1462,9 @@ static int writeback(struct x86_emulate_ctxt *ctxt)
        case OP_XMM:
                write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm);
                break;
+       case OP_MM:
+               write_mmx_reg(ctxt, &ctxt->dst.mm_val, ctxt->dst.addr.mm);
+               break;
        case OP_NONE:
                /* no writeback */
                break;
@@ -2790,7 +2868,7 @@ static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
 
 static int em_mov(struct x86_emulate_ctxt *ctxt)
 {
-       ctxt->dst.val = ctxt->src.val;
+       memcpy(ctxt->dst.valptr, ctxt->src.valptr, ctxt->op_bytes);
        return X86EMUL_CONTINUE;
 }
 
@@ -2870,12 +2948,6 @@ static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
        return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
 }
 
-static int em_movdqu(struct x86_emulate_ctxt *ctxt)
-{
-       memcpy(&ctxt->dst.vec_val, &ctxt->src.vec_val, ctxt->op_bytes);
-       return X86EMUL_CONTINUE;
-}
-
 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
 {
        int rc;
@@ -3061,35 +3133,13 @@ static int em_btc(struct x86_emulate_ctxt *ctxt)
 
 static int em_bsf(struct x86_emulate_ctxt *ctxt)
 {
-       u8 zf;
-
-       __asm__ ("bsf %2, %0; setz %1"
-                : "=r"(ctxt->dst.val), "=q"(zf)
-                : "r"(ctxt->src.val));
-
-       ctxt->eflags &= ~X86_EFLAGS_ZF;
-       if (zf) {
-               ctxt->eflags |= X86_EFLAGS_ZF;
-               /* Disable writeback. */
-               ctxt->dst.type = OP_NONE;
-       }
+       emulate_2op_SrcV_nobyte(ctxt, "bsf");
        return X86EMUL_CONTINUE;
 }
 
 static int em_bsr(struct x86_emulate_ctxt *ctxt)
 {
-       u8 zf;
-
-       __asm__ ("bsr %2, %0; setz %1"
-                : "=r"(ctxt->dst.val), "=q"(zf)
-                : "r"(ctxt->src.val));
-
-       ctxt->eflags &= ~X86_EFLAGS_ZF;
-       if (zf) {
-               ctxt->eflags |= X86_EFLAGS_ZF;
-               /* Disable writeback. */
-               ctxt->dst.type = OP_NONE;
-       }
+       emulate_2op_SrcV_nobyte(ctxt, "bsr");
        return X86EMUL_CONTINUE;
 }
 
@@ -3286,8 +3336,8 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
                      .check_perm = (_p) }
 #define N    D(0)
 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
-#define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
-#define GD(_f, _g) { .flags = ((_f) | GroupDual), .u.gdual = (_g) }
+#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
+#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
 #define II(_f, _e, _i) \
        { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
@@ -3307,25 +3357,25 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
                I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
 
 static struct opcode group7_rm1[] = {
-       DI(SrcNone | ModRM | Priv, monitor),
-       DI(SrcNone | ModRM | Priv, mwait),
+       DI(SrcNone | Priv, monitor),
+       DI(SrcNone | Priv, mwait),
        N, N, N, N, N, N,
 };
 
 static struct opcode group7_rm3[] = {
-       DIP(SrcNone | ModRM | Prot | Priv, vmrun,   check_svme_pa),
-       II(SrcNone | ModRM | Prot | VendorSpecific, em_vmmcall, vmmcall),
-       DIP(SrcNone | ModRM | Prot | Priv, vmload,  check_svme_pa),
-       DIP(SrcNone | ModRM | Prot | Priv, vmsave,  check_svme_pa),
-       DIP(SrcNone | ModRM | Prot | Priv, stgi,    check_svme),
-       DIP(SrcNone | ModRM | Prot | Priv, clgi,    check_svme),
-       DIP(SrcNone | ModRM | Prot | Priv, skinit,  check_svme),
-       DIP(SrcNone | ModRM | Prot | Priv, invlpga, check_svme),
+       DIP(SrcNone | Prot | Priv,              vmrun,          check_svme_pa),
+       II(SrcNone  | Prot | VendorSpecific,    em_vmmcall,     vmmcall),
+       DIP(SrcNone | Prot | Priv,              vmload,         check_svme_pa),
+       DIP(SrcNone | Prot | Priv,              vmsave,         check_svme_pa),
+       DIP(SrcNone | Prot | Priv,              stgi,           check_svme),
+       DIP(SrcNone | Prot | Priv,              clgi,           check_svme),
+       DIP(SrcNone | Prot | Priv,              skinit,         check_svme),
+       DIP(SrcNone | Prot | Priv,              invlpga,        check_svme),
 };
 
 static struct opcode group7_rm7[] = {
        N,
-       DIP(SrcNone | ModRM, rdtscp, check_rdtsc),
+       DIP(SrcNone, rdtscp, check_rdtsc),
        N, N, N, N, N, N,
 };
 
@@ -3341,81 +3391,86 @@ static struct opcode group1[] = {
 };
 
 static struct opcode group1A[] = {
-       I(DstMem | SrcNone | ModRM | Mov | Stack, em_pop), N, N, N, N, N, N, N,
+       I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
 };
 
 static struct opcode group3[] = {
-       I(DstMem | SrcImm | ModRM, em_test),
-       I(DstMem | SrcImm | ModRM, em_test),
-       I(DstMem | SrcNone | ModRM | Lock, em_not),
-       I(DstMem | SrcNone | ModRM | Lock, em_neg),
-       I(SrcMem | ModRM, em_mul_ex),
-       I(SrcMem | ModRM, em_imul_ex),
-       I(SrcMem | ModRM, em_div_ex),
-       I(SrcMem | ModRM, em_idiv_ex),
+       I(DstMem | SrcImm, em_test),
+       I(DstMem | SrcImm, em_test),
+       I(DstMem | SrcNone | Lock, em_not),
+       I(DstMem | SrcNone | Lock, em_neg),
+       I(SrcMem, em_mul_ex),
+       I(SrcMem, em_imul_ex),
+       I(SrcMem, em_div_ex),
+       I(SrcMem, em_idiv_ex),
 };
 
 static struct opcode group4[] = {
-       I(ByteOp | DstMem | SrcNone | ModRM | Lock, em_grp45),
-       I(ByteOp | DstMem | SrcNone | ModRM | Lock, em_grp45),
+       I(ByteOp | DstMem | SrcNone | Lock, em_grp45),
+       I(ByteOp | DstMem | SrcNone | Lock, em_grp45),
        N, N, N, N, N, N,
 };
 
 static struct opcode group5[] = {
-       I(DstMem | SrcNone | ModRM | Lock, em_grp45),
-       I(DstMem | SrcNone | ModRM | Lock, em_grp45),
-       I(SrcMem | ModRM | Stack, em_grp45),
-       I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
-       I(SrcMem | ModRM | Stack, em_grp45),
-       I(SrcMemFAddr | ModRM | ImplicitOps, em_grp45),
-       I(SrcMem | ModRM | Stack, em_grp45), N,
+       I(DstMem | SrcNone | Lock,              em_grp45),
+       I(DstMem | SrcNone | Lock,              em_grp45),
+       I(SrcMem | Stack,                       em_grp45),
+       I(SrcMemFAddr | ImplicitOps | Stack,    em_call_far),
+       I(SrcMem | Stack,                       em_grp45),
+       I(SrcMemFAddr | ImplicitOps,            em_grp45),
+       I(SrcMem | Stack,                       em_grp45), N,
 };
 
 static struct opcode group6[] = {
-       DI(ModRM | Prot,        sldt),
-       DI(ModRM | Prot,        str),
-       DI(ModRM | Prot | Priv, lldt),
-       DI(ModRM | Prot | Priv, ltr),
+       DI(Prot,        sldt),
+       DI(Prot,        str),
+       DI(Prot | Priv, lldt),
+       DI(Prot | Priv, ltr),
        N, N, N, N,
 };
 
 static struct group_dual group7 = { {
-       DI(ModRM | Mov | DstMem | Priv, sgdt),
-       DI(ModRM | Mov | DstMem | Priv, sidt),
-       II(ModRM | SrcMem | Priv, em_lgdt, lgdt),
-       II(ModRM | SrcMem | Priv, em_lidt, lidt),
-       II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
-       II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw),
-       II(SrcMem | ModRM | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
+       DI(Mov | DstMem | Priv,                 sgdt),
+       DI(Mov | DstMem | Priv,                 sidt),
+       II(SrcMem | Priv,                       em_lgdt, lgdt),
+       II(SrcMem | Priv,                       em_lidt, lidt),
+       II(SrcNone | DstMem | Mov,              em_smsw, smsw), N,
+       II(SrcMem16 | Mov | Priv,               em_lmsw, lmsw),
+       II(SrcMem | ByteOp | Priv | NoAccess,   em_invlpg, invlpg),
 }, {
-       I(SrcNone | ModRM | Priv | VendorSpecific, em_vmcall),
+       I(SrcNone | Priv | VendorSpecific,      em_vmcall),
        EXT(0, group7_rm1),
        N, EXT(0, group7_rm3),
-       II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
-       II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw), EXT(0, group7_rm7),
+       II(SrcNone | DstMem | Mov,              em_smsw, smsw), N,
+       II(SrcMem16 | Mov | Priv,               em_lmsw, lmsw),
+       EXT(0, group7_rm7),
 } };
 
 static struct opcode group8[] = {
        N, N, N, N,
-       I(DstMem | SrcImmByte | ModRM, em_bt),
-       I(DstMem | SrcImmByte | ModRM | Lock | PageTable, em_bts),
-       I(DstMem | SrcImmByte | ModRM | Lock, em_btr),
-       I(DstMem | SrcImmByte | ModRM | Lock | PageTable, em_btc),
+       I(DstMem | SrcImmByte,                          em_bt),
+       I(DstMem | SrcImmByte | Lock | PageTable,       em_bts),
+       I(DstMem | SrcImmByte | Lock,                   em_btr),
+       I(DstMem | SrcImmByte | Lock | PageTable,       em_btc),
 };
 
 static struct group_dual group9 = { {
-       N, I(DstMem64 | ModRM | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
+       N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
 }, {
        N, N, N, N, N, N, N, N,
 } };
 
 static struct opcode group11[] = {
-       I(DstMem | SrcImm | ModRM | Mov | PageTable, em_mov),
+       I(DstMem | SrcImm | Mov | PageTable, em_mov),
        X7(D(Undefined)),
 };
 
 static struct gprefix pfx_0f_6f_0f_7f = {
-       N, N, N, I(Sse, em_movdqu),
+       I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
+};
+
+static struct gprefix pfx_vmovntpx = {
+       I(0, em_mov), N, N, N,
 };
 
 static struct opcode opcode_table[256] = {
@@ -3464,10 +3519,10 @@ static struct opcode opcode_table[256] = {
        /* 0x70 - 0x7F */
        X16(D(SrcImmByte)),
        /* 0x80 - 0x87 */
-       G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
-       G(DstMem | SrcImm | ModRM | Group, group1),
-       G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
-       G(DstMem | SrcImmByte | ModRM | Group, group1),
+       G(ByteOp | DstMem | SrcImm, group1),
+       G(DstMem | SrcImm, group1),
+       G(ByteOp | DstMem | SrcImm | No64, group1),
+       G(DstMem | SrcImmByte, group1),
        I2bv(DstMem | SrcReg | ModRM, em_test),
        I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
        /* 0x88 - 0x8F */
@@ -3549,7 +3604,8 @@ static struct opcode twobyte_table[256] = {
        IIP(ModRM | SrcMem | Priv | Op3264, em_cr_write, cr_write, check_cr_write),
        IIP(ModRM | SrcMem | Priv | Op3264, em_dr_write, dr_write, check_dr_write),
        N, N, N, N,
-       N, N, N, N, N, N, N, N,
+       N, N, N, GP(ModRM | DstMem | SrcReg | Sse | Mov | Aligned, &pfx_vmovntpx),
+       N, N, N, N,
        /* 0x30 - 0x3F */
        II(ImplicitOps | Priv, em_wrmsr, wrmsr),
        IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
@@ -3897,17 +3953,16 @@ done_prefixes:
        }
        ctxt->d = opcode.flags;
 
+       if (ctxt->d & ModRM)
+               ctxt->modrm = insn_fetch(u8, ctxt);
+
        while (ctxt->d & GroupMask) {
                switch (ctxt->d & GroupMask) {
                case Group:
-                       ctxt->modrm = insn_fetch(u8, ctxt);
-                       --ctxt->_eip;
                        goffset = (ctxt->modrm >> 3) & 7;
                        opcode = opcode.u.group[goffset];
                        break;
                case GroupDual:
-                       ctxt->modrm = insn_fetch(u8, ctxt);
-                       --ctxt->_eip;
                        goffset = (ctxt->modrm >> 3) & 7;
                        if ((ctxt->modrm >> 6) == 3)
                                opcode = opcode.u.gdual->mod3[goffset];
@@ -3960,6 +4015,8 @@ done_prefixes:
 
        if (ctxt->d & Sse)
                ctxt->op_bytes = 16;
+       else if (ctxt->d & Mmx)
+               ctxt->op_bytes = 8;
 
        /* ModRM and SIB bytes. */
        if (ctxt->d & ModRM) {
@@ -4030,6 +4087,35 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
        return false;
 }
 
+static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
+{
+       bool fault = false;
+
+       ctxt->ops->get_fpu(ctxt);
+       asm volatile("1: fwait \n\t"
+                    "2: \n\t"
+                    ".pushsection .fixup,\"ax\" \n\t"
+                    "3: \n\t"
+                    "movb $1, %[fault] \n\t"
+                    "jmp 2b \n\t"
+                    ".popsection \n\t"
+                    _ASM_EXTABLE(1b, 3b)
+                    : [fault]"+qm"(fault));
+       ctxt->ops->put_fpu(ctxt);
+
+       if (unlikely(fault))
+               return emulate_exception(ctxt, MF_VECTOR, 0, false);
+
+       return X86EMUL_CONTINUE;
+}
+
+static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
+                                      struct operand *op)
+{
+       if (op->type == OP_MM)
+               read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
+}
+
 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
 {
        struct x86_emulate_ops *ops = ctxt->ops;
@@ -4054,18 +4140,31 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
                goto done;
        }
 
-       if ((ctxt->d & Sse)
-           && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)
-               || !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
+       if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
+           || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
                rc = emulate_ud(ctxt);
                goto done;
        }
 
-       if ((ctxt->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
+       if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
                rc = emulate_nm(ctxt);
                goto done;
        }
 
+       if (ctxt->d & Mmx) {
+               rc = flush_pending_x87_faults(ctxt);
+               if (rc != X86EMUL_CONTINUE)
+                       goto done;
+               /*
+                * Now that we know the fpu is exception safe, we can fetch
+                * operands from it.
+                */
+               fetch_possible_mmx_operand(ctxt, &ctxt->src);
+               fetch_possible_mmx_operand(ctxt, &ctxt->src2);
+               if (!(ctxt->d & Mov))
+                       fetch_possible_mmx_operand(ctxt, &ctxt->dst);
+       }
+
        if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
                rc = emulator_check_intercept(ctxt, ctxt->intercept,
                                              X86_ICPT_PRE_EXCEPT);
index d68f99df690c72ba81b53f8436c77c2d420d56a5..adba28f88d1a9d56c45e9716ac8dc8a69958144c 100644 (file)
@@ -34,7 +34,6 @@
 
 #include <linux/kvm_host.h>
 #include <linux/slab.h>
-#include <linux/workqueue.h>
 
 #include "irq.h"
 #include "i8254.h"
@@ -249,7 +248,7 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
                /* in this case, we had multiple outstanding pit interrupts
                 * that we needed to inject.  Reinject
                 */
-               queue_work(ps->pit->wq, &ps->pit->expired);
+               queue_kthread_work(&ps->pit->worker, &ps->pit->expired);
        ps->irq_ack = 1;
        spin_unlock(&ps->inject_lock);
 }
@@ -270,7 +269,7 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
 static void destroy_pit_timer(struct kvm_pit *pit)
 {
        hrtimer_cancel(&pit->pit_state.pit_timer.timer);
-       cancel_work_sync(&pit->expired);
+       flush_kthread_work(&pit->expired);
 }
 
 static bool kpit_is_periodic(struct kvm_timer *ktimer)
@@ -284,7 +283,7 @@ static struct kvm_timer_ops kpit_ops = {
        .is_periodic = kpit_is_periodic,
 };
 
-static void pit_do_work(struct work_struct *work)
+static void pit_do_work(struct kthread_work *work)
 {
        struct kvm_pit *pit = container_of(work, struct kvm_pit, expired);
        struct kvm *kvm = pit->kvm;
@@ -328,7 +327,7 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
 
        if (ktimer->reinject || !atomic_read(&ktimer->pending)) {
                atomic_inc(&ktimer->pending);
-               queue_work(pt->wq, &pt->expired);
+               queue_kthread_work(&pt->worker, &pt->expired);
        }
 
        if (ktimer->t_ops->is_periodic(ktimer)) {
@@ -353,7 +352,7 @@ static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
 
        /* TODO The new value only affected after the retriggered */
        hrtimer_cancel(&pt->timer);
-       cancel_work_sync(&ps->pit->expired);
+       flush_kthread_work(&ps->pit->expired);
        pt->period = interval;
        ps->is_periodic = is_period;
 
@@ -669,6 +668,8 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
 {
        struct kvm_pit *pit;
        struct kvm_kpit_state *pit_state;
+       struct pid *pid;
+       pid_t pid_nr;
        int ret;
 
        pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL);
@@ -685,14 +686,20 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
        mutex_lock(&pit->pit_state.lock);
        spin_lock_init(&pit->pit_state.inject_lock);
 
-       pit->wq = create_singlethread_workqueue("kvm-pit-wq");
-       if (!pit->wq) {
+       pid = get_pid(task_tgid(current));
+       pid_nr = pid_vnr(pid);
+       put_pid(pid);
+
+       init_kthread_worker(&pit->worker);
+       pit->worker_task = kthread_run(kthread_worker_fn, &pit->worker,
+                                      "kvm-pit/%d", pid_nr);
+       if (IS_ERR(pit->worker_task)) {
                mutex_unlock(&pit->pit_state.lock);
                kvm_free_irq_source_id(kvm, pit->irq_source_id);
                kfree(pit);
                return NULL;
        }
-       INIT_WORK(&pit->expired, pit_do_work);
+       init_kthread_work(&pit->expired, pit_do_work);
 
        kvm->arch.vpit = pit;
        pit->kvm = kvm;
@@ -736,7 +743,7 @@ fail:
        kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
        kvm_unregister_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier);
        kvm_free_irq_source_id(kvm, pit->irq_source_id);
-       destroy_workqueue(pit->wq);
+       kthread_stop(pit->worker_task);
        kfree(pit);
        return NULL;
 }
@@ -756,10 +763,10 @@ void kvm_free_pit(struct kvm *kvm)
                mutex_lock(&kvm->arch.vpit->pit_state.lock);
                timer = &kvm->arch.vpit->pit_state.pit_timer.timer;
                hrtimer_cancel(timer);
-               cancel_work_sync(&kvm->arch.vpit->expired);
+               flush_kthread_work(&kvm->arch.vpit->expired);
+               kthread_stop(kvm->arch.vpit->worker_task);
                kvm_free_irq_source_id(kvm, kvm->arch.vpit->irq_source_id);
                mutex_unlock(&kvm->arch.vpit->pit_state.lock);
-               destroy_workqueue(kvm->arch.vpit->wq);
                kfree(kvm->arch.vpit);
        }
 }
index 51a97426e7911ff77f0d3f024c95456bf3e84cab..fdf40425ea1de2946bd40e31a1d1a6045ccdcb15 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __I8254_H
 #define __I8254_H
 
+#include <linux/kthread.h>
+
 #include "iodev.h"
 
 struct kvm_kpit_channel_state {
@@ -39,8 +41,9 @@ struct kvm_pit {
        struct kvm_kpit_state pit_state;
        int irq_source_id;
        struct kvm_irq_mask_notifier mask_notifier;
-       struct workqueue_struct *wq;
-       struct work_struct expired;
+       struct kthread_worker worker;
+       struct task_struct *worker_task;
+       struct kthread_work expired;
 };
 
 #define KVM_PIT_BASE_ADDRESS       0x40
index 858432287ab626dee9ce4568404fdf4f968a89a2..93c15743f1ee155bc5ebd96a3be3aaca2a300e93 100644 (file)
@@ -92,6 +92,11 @@ static inline int apic_test_and_clear_vector(int vec, void *bitmap)
        return test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
 }
 
+static inline int apic_test_vector(int vec, void *bitmap)
+{
+       return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
+}
+
 static inline void apic_set_vector(int vec, void *bitmap)
 {
        set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
@@ -480,7 +485,6 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
 static void apic_set_eoi(struct kvm_lapic *apic)
 {
        int vector = apic_find_highest_isr(apic);
-       int trigger_mode;
        /*
         * Not every write EOI will has corresponding ISR,
         * one example is when Kernel check timer on setup_IO_APIC
@@ -491,12 +495,15 @@ static void apic_set_eoi(struct kvm_lapic *apic)
        apic_clear_vector(vector, apic->regs + APIC_ISR);
        apic_update_ppr(apic);
 
-       if (apic_test_and_clear_vector(vector, apic->regs + APIC_TMR))
-               trigger_mode = IOAPIC_LEVEL_TRIG;
-       else
-               trigger_mode = IOAPIC_EDGE_TRIG;
-       if (!(apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI))
+       if (!(apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) &&
+           kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
+               int trigger_mode;
+               if (apic_test_vector(vector, apic->regs + APIC_TMR))
+                       trigger_mode = IOAPIC_LEVEL_TRIG;
+               else
+                       trigger_mode = IOAPIC_EDGE_TRIG;
                kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode);
+       }
        kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
 }
 
@@ -1081,6 +1088,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
        apic_update_ppr(apic);
 
        vcpu->arch.apic_arb_prio = 0;
+       vcpu->arch.apic_attention = 0;
 
        apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr="
                   "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__,
@@ -1280,7 +1288,7 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
        u32 data;
        void *vapic;
 
-       if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr)
+       if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
                return;
 
        vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
@@ -1297,7 +1305,7 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
        struct kvm_lapic *apic;
        void *vapic;
 
-       if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr)
+       if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
                return;
 
        apic = vcpu->arch.apic;
@@ -1317,10 +1325,11 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
 
 void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
 {
-       if (!irqchip_in_kernel(vcpu->kvm))
-               return;
-
        vcpu->arch.apic->vapic_addr = vapic_addr;
+       if (vapic_addr)
+               __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
+       else
+               __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
 }
 
 int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
index 4cb164268846302ac5acc122bc434246b4175605..be3cea4407ffad63824c068332079baf16336012 100644 (file)
@@ -135,8 +135,6 @@ module_param(dbg, bool, 0644);
 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
                        | PT64_NX_MASK)
 
-#define PTE_LIST_EXT 4
-
 #define ACC_EXEC_MASK    1
 #define ACC_WRITE_MASK   PT_WRITABLE_MASK
 #define ACC_USER_MASK    PT_USER_MASK
@@ -151,6 +149,9 @@ module_param(dbg, bool, 0644);
 
 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
 
+/* make pte_list_desc fit well in cache line */
+#define PTE_LIST_EXT 3
+
 struct pte_list_desc {
        u64 *sptes[PTE_LIST_EXT];
        struct pte_list_desc *more;
@@ -550,19 +551,29 @@ static u64 mmu_spte_get_lockless(u64 *sptep)
 
 static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
 {
-       rcu_read_lock();
-       atomic_inc(&vcpu->kvm->arch.reader_counter);
-
-       /* Increase the counter before walking shadow page table */
-       smp_mb__after_atomic_inc();
+       /*
+        * Prevent page table teardown by making any free-er wait during
+        * kvm_flush_remote_tlbs() IPI to all active vcpus.
+        */
+       local_irq_disable();
+       vcpu->mode = READING_SHADOW_PAGE_TABLES;
+       /*
+        * Make sure a following spte read is not reordered ahead of the write
+        * to vcpu->mode.
+        */
+       smp_mb();
 }
 
 static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
 {
-       /* Decrease the counter after walking shadow page table finished */
-       smp_mb__before_atomic_dec();
-       atomic_dec(&vcpu->kvm->arch.reader_counter);
-       rcu_read_unlock();
+       /*
+        * Make sure the write to vcpu->mode is not reordered in front of
+        * reads to sptes.  If it does, kvm_commit_zap_page() can see us
+        * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
+        */
+       smp_mb();
+       vcpu->mode = OUTSIDE_GUEST_MODE;
+       local_irq_enable();
 }
 
 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
@@ -841,32 +852,6 @@ static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
        return count;
 }
 
-static u64 *pte_list_next(unsigned long *pte_list, u64 *spte)
-{
-       struct pte_list_desc *desc;
-       u64 *prev_spte;
-       int i;
-
-       if (!*pte_list)
-               return NULL;
-       else if (!(*pte_list & 1)) {
-               if (!spte)
-                       return (u64 *)*pte_list;
-               return NULL;
-       }
-       desc = (struct pte_list_desc *)(*pte_list & ~1ul);
-       prev_spte = NULL;
-       while (desc) {
-               for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) {
-                       if (prev_spte == spte)
-                               return desc->sptes[i];
-                       prev_spte = desc->sptes[i];
-               }
-               desc = desc->more;
-       }
-       return NULL;
-}
-
 static void
 pte_list_desc_remove_entry(unsigned long *pte_list, struct pte_list_desc *desc,
                           int i, struct pte_list_desc *prev_desc)
@@ -987,11 +972,6 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
        return pte_list_add(vcpu, spte, rmapp);
 }
 
-static u64 *rmap_next(unsigned long *rmapp, u64 *spte)
-{
-       return pte_list_next(rmapp, spte);
-}
-
 static void rmap_remove(struct kvm *kvm, u64 *spte)
 {
        struct kvm_mmu_page *sp;
@@ -1004,106 +984,201 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
        pte_list_remove(spte, rmapp);
 }
 
+/*
+ * Used by the following functions to iterate through the sptes linked by a
+ * rmap.  All fields are private and not assumed to be used outside.
+ */
+struct rmap_iterator {
+       /* private fields */
+       struct pte_list_desc *desc;     /* holds the sptep if not NULL */
+       int pos;                        /* index of the sptep */
+};
+
+/*
+ * Iteration must be started by this function.  This should also be used after
+ * removing/dropping sptes from the rmap link because in such cases the
+ * information in the itererator may not be valid.
+ *
+ * Returns sptep if found, NULL otherwise.
+ */
+static u64 *rmap_get_first(unsigned long rmap, struct rmap_iterator *iter)
+{
+       if (!rmap)
+               return NULL;
+
+       if (!(rmap & 1)) {
+               iter->desc = NULL;
+               return (u64 *)rmap;
+       }
+
+       iter->desc = (struct pte_list_desc *)(rmap & ~1ul);
+       iter->pos = 0;
+       return iter->desc->sptes[iter->pos];
+}
+
+/*
+ * Must be used with a valid iterator: e.g. after rmap_get_first().
+ *
+ * Returns sptep if found, NULL otherwise.
+ */
+static u64 *rmap_get_next(struct rmap_iterator *iter)
+{
+       if (iter->desc) {
+               if (iter->pos < PTE_LIST_EXT - 1) {
+                       u64 *sptep;
+
+                       ++iter->pos;
+                       sptep = iter->desc->sptes[iter->pos];
+                       if (sptep)
+                               return sptep;
+               }
+
+               iter->desc = iter->desc->more;
+
+               if (iter->desc) {
+                       iter->pos = 0;
+                       /* desc->sptes[0] cannot be NULL */
+                       return iter->desc->sptes[iter->pos];
+               }
+       }
+
+       return NULL;
+}
+
 static void drop_spte(struct kvm *kvm, u64 *sptep)
 {
        if (mmu_spte_clear_track_bits(sptep))
                rmap_remove(kvm, sptep);
 }
 
-int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
-                              struct kvm_memory_slot *slot)
+static int __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level)
 {
-       unsigned long *rmapp;
-       u64 *spte;
-       int i, write_protected = 0;
-
-       rmapp = __gfn_to_rmap(gfn, PT_PAGE_TABLE_LEVEL, slot);
-       spte = rmap_next(rmapp, NULL);
-       while (spte) {
-               BUG_ON(!(*spte & PT_PRESENT_MASK));
-               rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
-               if (is_writable_pte(*spte)) {
-                       mmu_spte_update(spte, *spte & ~PT_WRITABLE_MASK);
-                       write_protected = 1;
+       u64 *sptep;
+       struct rmap_iterator iter;
+       int write_protected = 0;
+
+       for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
+               BUG_ON(!(*sptep & PT_PRESENT_MASK));
+               rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
+
+               if (!is_writable_pte(*sptep)) {
+                       sptep = rmap_get_next(&iter);
+                       continue;
                }
-               spte = rmap_next(rmapp, spte);
-       }
 
-       /* check for huge page mappings */
-       for (i = PT_DIRECTORY_LEVEL;
-            i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
-               rmapp = __gfn_to_rmap(gfn, i, slot);
-               spte = rmap_next(rmapp, NULL);
-               while (spte) {
-                       BUG_ON(!(*spte & PT_PRESENT_MASK));
-                       BUG_ON(!is_large_pte(*spte));
-                       pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
-                       if (is_writable_pte(*spte)) {
-                               drop_spte(kvm, spte);
-                               --kvm->stat.lpages;
-                               spte = NULL;
-                               write_protected = 1;
-                       }
-                       spte = rmap_next(rmapp, spte);
+               if (level == PT_PAGE_TABLE_LEVEL) {
+                       mmu_spte_update(sptep, *sptep & ~PT_WRITABLE_MASK);
+                       sptep = rmap_get_next(&iter);
+               } else {
+                       BUG_ON(!is_large_pte(*sptep));
+                       drop_spte(kvm, sptep);
+                       --kvm->stat.lpages;
+                       sptep = rmap_get_first(*rmapp, &iter);
                }
+
+               write_protected = 1;
        }
 
        return write_protected;
 }
 
+/**
+ * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
+ * @kvm: kvm instance
+ * @slot: slot to protect
+ * @gfn_offset: start of the BITS_PER_LONG pages we care about
+ * @mask: indicates which pages we should protect
+ *
+ * Used when we do not need to care about huge page mappings: e.g. during dirty
+ * logging we do not have any such mappings.
+ */
+void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
+                                    struct kvm_memory_slot *slot,
+                                    gfn_t gfn_offset, unsigned long mask)
+{
+       unsigned long *rmapp;
+
+       while (mask) {
+               rmapp = &slot->rmap[gfn_offset + __ffs(mask)];
+               __rmap_write_protect(kvm, rmapp, PT_PAGE_TABLE_LEVEL);
+
+               /* clear the first set bit */
+               mask &= mask - 1;
+       }
+}
+
 static int rmap_write_protect(struct kvm *kvm, u64 gfn)
 {
        struct kvm_memory_slot *slot;
+       unsigned long *rmapp;
+       int i;
+       int write_protected = 0;
 
        slot = gfn_to_memslot(kvm, gfn);
-       return kvm_mmu_rmap_write_protect(kvm, gfn, slot);
+
+       for (i = PT_PAGE_TABLE_LEVEL;
+            i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
+               rmapp = __gfn_to_rmap(gfn, i, slot);
+               write_protected |= __rmap_write_protect(kvm, rmapp, i);
+       }
+
+       return write_protected;
 }
 
 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
                           unsigned long data)
 {
-       u64 *spte;
+       u64 *sptep;
+       struct rmap_iterator iter;
        int need_tlb_flush = 0;
 
-       while ((spte = rmap_next(rmapp, NULL))) {
-               BUG_ON(!(*spte & PT_PRESENT_MASK));
-               rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
-               drop_spte(kvm, spte);
+       while ((sptep = rmap_get_first(*rmapp, &iter))) {
+               BUG_ON(!(*sptep & PT_PRESENT_MASK));
+               rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", sptep, *sptep);
+
+               drop_spte(kvm, sptep);
                need_tlb_flush = 1;
        }
+
        return need_tlb_flush;
 }
 
 static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
                             unsigned long data)
 {
+       u64 *sptep;
+       struct rmap_iterator iter;
        int need_flush = 0;
-       u64 *spte, new_spte;
+       u64 new_spte;
        pte_t *ptep = (pte_t *)data;
        pfn_t new_pfn;
 
        WARN_ON(pte_huge(*ptep));
        new_pfn = pte_pfn(*ptep);
-       spte = rmap_next(rmapp, NULL);
-       while (spte) {
-               BUG_ON(!is_shadow_present_pte(*spte));
-               rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
+
+       for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
+               BUG_ON(!is_shadow_present_pte(*sptep));
+               rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", sptep, *sptep);
+
                need_flush = 1;
+
                if (pte_write(*ptep)) {
-                       drop_spte(kvm, spte);
-                       spte = rmap_next(rmapp, NULL);
+                       drop_spte(kvm, sptep);
+                       sptep = rmap_get_first(*rmapp, &iter);
                } else {
-                       new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
+                       new_spte = *sptep & ~PT64_BASE_ADDR_MASK;
                        new_spte |= (u64)new_pfn << PAGE_SHIFT;
 
                        new_spte &= ~PT_WRITABLE_MASK;
                        new_spte &= ~SPTE_HOST_WRITEABLE;
                        new_spte &= ~shadow_accessed_mask;
-                       mmu_spte_clear_track_bits(spte);
-                       mmu_spte_set(spte, new_spte);
-                       spte = rmap_next(rmapp, spte);
+
+                       mmu_spte_clear_track_bits(sptep);
+                       mmu_spte_set(sptep, new_spte);
+                       sptep = rmap_get_next(&iter);
                }
        }
+
        if (need_flush)
                kvm_flush_remote_tlbs(kvm);
 
@@ -1162,7 +1237,8 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
 static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
                         unsigned long data)
 {
-       u64 *spte;
+       u64 *sptep;
+       struct rmap_iterator iter;
        int young = 0;
 
        /*
@@ -1175,25 +1251,24 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
        if (!shadow_accessed_mask)
                return kvm_unmap_rmapp(kvm, rmapp, data);
 
-       spte = rmap_next(rmapp, NULL);
-       while (spte) {
-               int _young;
-               u64 _spte = *spte;
-               BUG_ON(!(_spte & PT_PRESENT_MASK));
-               _young = _spte & PT_ACCESSED_MASK;
-               if (_young) {
+       for (sptep = rmap_get_first(*rmapp, &iter); sptep;
+            sptep = rmap_get_next(&iter)) {
+               BUG_ON(!(*sptep & PT_PRESENT_MASK));
+
+               if (*sptep & PT_ACCESSED_MASK) {
                        young = 1;
-                       clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
+                       clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)sptep);
                }
-               spte = rmap_next(rmapp, spte);
        }
+
        return young;
 }
 
 static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
                              unsigned long data)
 {
-       u64 *spte;
+       u64 *sptep;
+       struct rmap_iterator iter;
        int young = 0;
 
        /*
@@ -1204,16 +1279,14 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
        if (!shadow_accessed_mask)
                goto out;
 
-       spte = rmap_next(rmapp, NULL);
-       while (spte) {
-               u64 _spte = *spte;
-               BUG_ON(!(_spte & PT_PRESENT_MASK));
-               young = _spte & PT_ACCESSED_MASK;
-               if (young) {
+       for (sptep = rmap_get_first(*rmapp, &iter); sptep;
+            sptep = rmap_get_next(&iter)) {
+               BUG_ON(!(*sptep & PT_PRESENT_MASK));
+
+               if (*sptep & PT_ACCESSED_MASK) {
                        young = 1;
                        break;
                }
-               spte = rmap_next(rmapp, spte);
        }
 out:
        return young;
@@ -1865,10 +1938,11 @@ static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
 
 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
-       u64 *parent_pte;
+       u64 *sptep;
+       struct rmap_iterator iter;
 
-       while ((parent_pte = pte_list_next(&sp->parent_ptes, NULL)))
-               drop_parent_pte(sp, parent_pte);
+       while ((sptep = rmap_get_first(sp->parent_ptes, &iter)))
+               drop_parent_pte(sp, sptep);
 }
 
 static int mmu_zap_unsync_children(struct kvm *kvm,
@@ -1925,30 +1999,6 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
        return ret;
 }
 
-static void kvm_mmu_isolate_pages(struct list_head *invalid_list)
-{
-       struct kvm_mmu_page *sp;
-
-       list_for_each_entry(sp, invalid_list, link)
-               kvm_mmu_isolate_page(sp);
-}
-
-static void free_pages_rcu(struct rcu_head *head)
-{
-       struct kvm_mmu_page *next, *sp;
-
-       sp = container_of(head, struct kvm_mmu_page, rcu);
-       while (sp) {
-               if (!list_empty(&sp->link))
-                       next = list_first_entry(&sp->link,
-                                     struct kvm_mmu_page, link);
-               else
-                       next = NULL;
-               kvm_mmu_free_page(sp);
-               sp = next;
-       }
-}
-
 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
                                    struct list_head *invalid_list)
 {
@@ -1957,17 +2007,17 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
        if (list_empty(invalid_list))
                return;
 
-       kvm_flush_remote_tlbs(kvm);
-
-       if (atomic_read(&kvm->arch.reader_counter)) {
-               kvm_mmu_isolate_pages(invalid_list);
-               sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
-               list_del_init(invalid_list);
+       /*
+        * wmb: make sure everyone sees our modifications to the page tables
+        * rmb: make sure we see changes to vcpu->mode
+        */
+       smp_mb();
 
-               trace_kvm_mmu_delay_free_pages(sp);
-               call_rcu(&sp->rcu, free_pages_rcu);
-               return;
-       }
+       /*
+        * Wait for all vcpus to exit guest mode and/or lockless shadow
+        * page table walks.
+        */
+       kvm_flush_remote_tlbs(kvm);
 
        do {
                sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
@@ -1975,7 +2025,6 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
                kvm_mmu_isolate_page(sp);
                kvm_mmu_free_page(sp);
        } while (!list_empty(invalid_list));
-
 }
 
 /*
@@ -2546,8 +2595,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
                        *gfnp = gfn;
                        kvm_release_pfn_clean(pfn);
                        pfn &= ~mask;
-                       if (!get_page_unless_zero(pfn_to_page(pfn)))
-                               BUG();
+                       kvm_get_pfn(pfn);
                        *pfnp = pfn;
                }
        }
@@ -3554,7 +3602,7 @@ static bool detect_write_flooding(struct kvm_mmu_page *sp)
         * Skip write-flooding detected for the sp whose level is 1, because
         * it can become unsync, then the guest page is not write-protected.
         */
-       if (sp->role.level == 1)
+       if (sp->role.level == PT_PAGE_TABLE_LEVEL)
                return false;
 
        return ++sp->write_flooding_count >= 3;
index 715da5a19a5b6cf8a6abec0a96fddf1d2260cc85..7d7d0b9e23eb2e3d7b256d74c58852c9f26e5772 100644 (file)
@@ -192,7 +192,8 @@ static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
        struct kvm_memory_slot *slot;
        unsigned long *rmapp;
-       u64 *spte;
+       u64 *sptep;
+       struct rmap_iterator iter;
 
        if (sp->role.direct || sp->unsync || sp->role.invalid)
                return;
@@ -200,13 +201,12 @@ static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
        slot = gfn_to_memslot(kvm, sp->gfn);
        rmapp = &slot->rmap[sp->gfn - slot->base_gfn];
 
-       spte = rmap_next(rmapp, NULL);
-       while (spte) {
-               if (is_writable_pte(*spte))
+       for (sptep = rmap_get_first(*rmapp, &iter); sptep;
+            sptep = rmap_get_next(&iter)) {
+               if (is_writable_pte(*sptep))
                        audit_printk(kvm, "shadow page has writable "
                                     "mappings: gfn %llx role %x\n",
                                     sp->gfn, sp->role.word);
-               spte = rmap_next(rmapp, spte);
        }
 }
 
index df5a70311be815f95f35cf0760eb4675af3eb414..34f970937ef10469bb079a19cbd7e96ae624e4eb 100644 (file)
@@ -658,7 +658,7 @@ static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
 {
        int offset = 0;
 
-       WARN_ON(sp->role.level != 1);
+       WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL);
 
        if (PTTYPE == 32)
                offset = sp->role.quadrant << PT64_LEVEL_BITS;
index e334389e1c755eb471c146264e4bf1021ca5333a..f75af406b268ef4e48c8d3dddbaaf6c0ac66852b 100644 (file)
@@ -22,6 +22,7 @@
 #include "x86.h"
 
 #include <linux/module.h>
+#include <linux/mod_devicetable.h>
 #include <linux/kernel.h>
 #include <linux/vmalloc.h>
 #include <linux/highmem.h>
 MODULE_AUTHOR("Qumranet");
 MODULE_LICENSE("GPL");
 
+static const struct x86_cpu_id svm_cpu_id[] = {
+       X86_FEATURE_MATCH(X86_FEATURE_SVM),
+       {}
+};
+MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
+
 #define IOPM_ALLOC_ORDER 2
 #define MSRPM_ALLOC_ORDER 1
 
@@ -3240,6 +3247,7 @@ static int interrupt_window_interception(struct vcpu_svm *svm)
        svm_clear_vintr(svm);
        svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
        mark_dirty(svm->vmcb, VMCB_INTR);
+       ++svm->vcpu.stat.irq_window_exits;
        /*
         * If the user space waits to inject interrupts, exit as soon as
         * possible
@@ -3247,7 +3255,6 @@ static int interrupt_window_interception(struct vcpu_svm *svm)
        if (!irqchip_in_kernel(svm->vcpu.kvm) &&
            kvm_run->request_interrupt_window &&
            !kvm_cpu_has_interrupt(&svm->vcpu)) {
-               ++svm->vcpu.stat.irq_window_exits;
                kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
                return 0;
        }
index 4ff0ab9bc3c86fd26889fa4b88b23c48c84523db..32eb5886629201d8a5b926bc0123649b12f63850 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/highmem.h>
 #include <linux/sched.h>
 #include <linux/moduleparam.h>
+#include <linux/mod_devicetable.h>
 #include <linux/ftrace_event.h>
 #include <linux/slab.h>
 #include <linux/tboot.h>
 MODULE_AUTHOR("Qumranet");
 MODULE_LICENSE("GPL");
 
+static const struct x86_cpu_id vmx_cpu_id[] = {
+       X86_FEATURE_MATCH(X86_FEATURE_VMX),
+       {}
+};
+MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
+
 static bool __read_mostly enable_vpid = 1;
 module_param_named(vpid, enable_vpid, bool, 0444);
 
@@ -386,6 +393,9 @@ struct vcpu_vmx {
        struct {
                int           loaded;
                u16           fs_sel, gs_sel, ldt_sel;
+#ifdef CONFIG_X86_64
+               u16           ds_sel, es_sel;
+#endif
                int           gs_ldt_reload_needed;
                int           fs_reload_needed;
        } host_state;
@@ -1410,6 +1420,11 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
                vmx->host_state.gs_ldt_reload_needed = 1;
        }
 
+#ifdef CONFIG_X86_64
+       savesegment(ds, vmx->host_state.ds_sel);
+       savesegment(es, vmx->host_state.es_sel);
+#endif
+
 #ifdef CONFIG_X86_64
        vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
        vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
@@ -1450,6 +1465,19 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
        }
        if (vmx->host_state.fs_reload_needed)
                loadsegment(fs, vmx->host_state.fs_sel);
+#ifdef CONFIG_X86_64
+       if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
+               loadsegment(ds, vmx->host_state.ds_sel);
+               loadsegment(es, vmx->host_state.es_sel);
+       }
+#else
+       /*
+        * The sysexit path does not restore ds/es, so we must set them to
+        * a reasonable value ourselves.
+        */
+       loadsegment(ds, __USER_DS);
+       loadsegment(es, __USER_DS);
+#endif
        reload_tss();
 #ifdef CONFIG_X86_64
        wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
@@ -3633,8 +3661,18 @@ static void vmx_set_constant_host_state(void)
        vmcs_writel(HOST_CR3, read_cr3());  /* 22.2.3  FIXME: shadow tables */
 
        vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
+#ifdef CONFIG_X86_64
+       /*
+        * Load null selectors, so we can avoid reloading them in
+        * __vmx_load_host_state(), in case userspace uses the null selectors
+        * too (the expected case).
+        */
+       vmcs_write16(HOST_DS_SELECTOR, 0);
+       vmcs_write16(HOST_ES_SELECTOR, 0);
+#else
        vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
        vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
+#endif
        vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
        vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8);  /* 22.2.4 */
 
@@ -6256,7 +6294,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
                }
        }
 
-       asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
        vmx->loaded_vmcs->launched = 1;
 
        vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
@@ -6343,7 +6380,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
        return &vmx->vcpu;
 
 free_vmcs:
-       free_vmcs(vmx->loaded_vmcs->vmcs);
+       free_loaded_vmcs(vmx->loaded_vmcs);
 free_msrs:
        kfree(vmx->guest_msrs);
 uninit_vcpu:
index 185a2b823a2dbbceedab23b4dcfd8db4395ceda9..be6d54929fa7d661c31f65d076c0daad8086d785 100644 (file)
@@ -2147,6 +2147,7 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_ASYNC_PF:
        case KVM_CAP_GET_TSC_KHZ:
        case KVM_CAP_PCI_2_3:
+       case KVM_CAP_KVMCLOCK_CTRL:
                r = 1;
                break;
        case KVM_CAP_COALESCED_MMIO:
@@ -2597,6 +2598,23 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
        return r;
 }
 
+/*
+ * kvm_set_guest_paused() indicates to the guest kernel that it has been
+ * stopped by the hypervisor.  This function will be called from the host only.
+ * EINVAL is returned when the host attempts to set the flag for a guest that
+ * does not support pv clocks.
+ */
+static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
+{
+       struct pvclock_vcpu_time_info *src = &vcpu->arch.hv_clock;
+       if (!vcpu->arch.time_page)
+               return -EINVAL;
+       src->flags |= PVCLOCK_GUEST_STOPPED;
+       mark_page_dirty(vcpu->kvm, vcpu->arch.time >> PAGE_SHIFT);
+       kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
+       return 0;
+}
+
 long kvm_arch_vcpu_ioctl(struct file *filp,
                         unsigned int ioctl, unsigned long arg)
 {
@@ -2873,6 +2891,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                r = vcpu->arch.virtual_tsc_khz;
                goto out;
        }
+       case KVM_KVMCLOCK_CTRL: {
+               r = kvm_set_guest_paused(vcpu);
+               goto out;
+       }
        default:
                r = -EINVAL;
        }
@@ -3045,57 +3067,32 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
 }
 
 /**
- * write_protect_slot - write protect a slot for dirty logging
- * @kvm: the kvm instance
- * @memslot: the slot we protect
- * @dirty_bitmap: the bitmap indicating which pages are dirty
- * @nr_dirty_pages: the number of dirty pages
+ * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
+ * @kvm: kvm instance
+ * @log: slot id and address to which we copy the log
  *
- * We have two ways to find all sptes to protect:
- * 1. Use kvm_mmu_slot_remove_write_access() which walks all shadow pages and
- *    checks ones that have a spte mapping a page in the slot.
- * 2. Use kvm_mmu_rmap_write_protect() for each gfn found in the bitmap.
+ * We need to keep it in mind that VCPU threads can write to the bitmap
+ * concurrently.  So, to avoid losing data, we keep the following order for
+ * each bit:
  *
- * Generally speaking, if there are not so many dirty pages compared to the
- * number of shadow pages, we should use the latter.
+ *   1. Take a snapshot of the bit and clear it if needed.
+ *   2. Write protect the corresponding page.
+ *   3. Flush TLB's if needed.
+ *   4. Copy the snapshot to the userspace.
  *
- * Note that letting others write into a page marked dirty in the old bitmap
- * by using the remaining tlb entry is not a problem.  That page will become
- * write protected again when we flush the tlb and then be reported dirty to
- * the user space by copying the old bitmap.
- */
-static void write_protect_slot(struct kvm *kvm,
-                              struct kvm_memory_slot *memslot,
-                              unsigned long *dirty_bitmap,
-                              unsigned long nr_dirty_pages)
-{
-       spin_lock(&kvm->mmu_lock);
-
-       /* Not many dirty pages compared to # of shadow pages. */
-       if (nr_dirty_pages < kvm->arch.n_used_mmu_pages) {
-               unsigned long gfn_offset;
-
-               for_each_set_bit(gfn_offset, dirty_bitmap, memslot->npages) {
-                       unsigned long gfn = memslot->base_gfn + gfn_offset;
-
-                       kvm_mmu_rmap_write_protect(kvm, gfn, memslot);
-               }
-               kvm_flush_remote_tlbs(kvm);
-       } else
-               kvm_mmu_slot_remove_write_access(kvm, memslot->id);
-
-       spin_unlock(&kvm->mmu_lock);
-}
-
-/*
- * Get (and clear) the dirty memory log for a memory slot.
+ * Between 2 and 3, the guest may write to the page using the remaining TLB
+ * entry.  This is not a problem because the page will be reported dirty at
+ * step 4 using the snapshot taken before and step 3 ensures that successive
+ * writes will be logged for the next call.
  */
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
-                                     struct kvm_dirty_log *log)
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
 {
        int r;
        struct kvm_memory_slot *memslot;
-       unsigned long n, nr_dirty_pages;
+       unsigned long n, i;
+       unsigned long *dirty_bitmap;
+       unsigned long *dirty_bitmap_buffer;
+       bool is_dirty = false;
 
        mutex_lock(&kvm->slots_lock);
 
@@ -3104,49 +3101,42 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
                goto out;
 
        memslot = id_to_memslot(kvm->memslots, log->slot);
+
+       dirty_bitmap = memslot->dirty_bitmap;
        r = -ENOENT;
-       if (!memslot->dirty_bitmap)
+       if (!dirty_bitmap)
                goto out;
 
        n = kvm_dirty_bitmap_bytes(memslot);
-       nr_dirty_pages = memslot->nr_dirty_pages;
 
-       /* If nothing is dirty, don't bother messing with page tables. */
-       if (nr_dirty_pages) {
-               struct kvm_memslots *slots, *old_slots;
-               unsigned long *dirty_bitmap, *dirty_bitmap_head;
+       dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long);
+       memset(dirty_bitmap_buffer, 0, n);
 
-               dirty_bitmap = memslot->dirty_bitmap;
-               dirty_bitmap_head = memslot->dirty_bitmap_head;
-               if (dirty_bitmap == dirty_bitmap_head)
-                       dirty_bitmap_head += n / sizeof(long);
-               memset(dirty_bitmap_head, 0, n);
+       spin_lock(&kvm->mmu_lock);
 
-               r = -ENOMEM;
-               slots = kmemdup(kvm->memslots, sizeof(*kvm->memslots), GFP_KERNEL);
-               if (!slots)
-                       goto out;
+       for (i = 0; i < n / sizeof(long); i++) {
+               unsigned long mask;
+               gfn_t offset;
 
-               memslot = id_to_memslot(slots, log->slot);
-               memslot->nr_dirty_pages = 0;
-               memslot->dirty_bitmap = dirty_bitmap_head;
-               update_memslots(slots, NULL);
+               if (!dirty_bitmap[i])
+                       continue;
 
-               old_slots = kvm->memslots;
-               rcu_assign_pointer(kvm->memslots, slots);
-               synchronize_srcu_expedited(&kvm->srcu);
-               kfree(old_slots);
+               is_dirty = true;
 
-               write_protect_slot(kvm, memslot, dirty_bitmap, nr_dirty_pages);
+               mask = xchg(&dirty_bitmap[i], 0);
+               dirty_bitmap_buffer[i] = mask;
 
-               r = -EFAULT;
-               if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
-                       goto out;
-       } else {
-               r = -EFAULT;
-               if (clear_user(log->dirty_bitmap, n))
-                       goto out;
+               offset = i * BITS_PER_LONG;
+               kvm_mmu_write_protect_pt_masked(kvm, memslot, offset, mask);
        }
+       if (is_dirty)
+               kvm_flush_remote_tlbs(kvm);
+
+       spin_unlock(&kvm->mmu_lock);
+
+       r = -EFAULT;
+       if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
+               goto out;
 
        r = 0;
 out:
@@ -3728,9 +3718,8 @@ struct read_write_emulator_ops {
 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
 {
        if (vcpu->mmio_read_completed) {
-               memcpy(val, vcpu->mmio_data, bytes);
                trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
-                              vcpu->mmio_phys_addr, *(u64 *)val);
+                              vcpu->mmio_fragments[0].gpa, *(u64 *)val);
                vcpu->mmio_read_completed = 0;
                return 1;
        }
@@ -3766,8 +3755,9 @@ static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
                           void *val, int bytes)
 {
-       memcpy(vcpu->mmio_data, val, bytes);
-       memcpy(vcpu->run->mmio.data, vcpu->mmio_data, 8);
+       struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];
+
+       memcpy(vcpu->run->mmio.data, frag->data, frag->len);
        return X86EMUL_CONTINUE;
 }
 
@@ -3794,10 +3784,7 @@ static int emulator_read_write_onepage(unsigned long addr, void *val,
        gpa_t gpa;
        int handled, ret;
        bool write = ops->write;
-
-       if (ops->read_write_prepare &&
-                 ops->read_write_prepare(vcpu, val, bytes))
-               return X86EMUL_CONTINUE;
+       struct kvm_mmio_fragment *frag;
 
        ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
 
@@ -3823,15 +3810,19 @@ mmio:
        bytes -= handled;
        val += handled;
 
-       vcpu->mmio_needed = 1;
-       vcpu->run->exit_reason = KVM_EXIT_MMIO;
-       vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa;
-       vcpu->mmio_size = bytes;
-       vcpu->run->mmio.len = min(vcpu->mmio_size, 8);
-       vcpu->run->mmio.is_write = vcpu->mmio_is_write = write;
-       vcpu->mmio_index = 0;
+       while (bytes) {
+               unsigned now = min(bytes, 8U);
 
-       return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
+               frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
+               frag->gpa = gpa;
+               frag->data = val;
+               frag->len = now;
+
+               gpa += now;
+               val += now;
+               bytes -= now;
+       }
+       return X86EMUL_CONTINUE;
 }
 
 int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
@@ -3840,10 +3831,18 @@ int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
                        struct read_write_emulator_ops *ops)
 {
        struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+       gpa_t gpa;
+       int rc;
+
+       if (ops->read_write_prepare &&
+                 ops->read_write_prepare(vcpu, val, bytes))
+               return X86EMUL_CONTINUE;
+
+       vcpu->mmio_nr_fragments = 0;
 
        /* Crossing a page boundary? */
        if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
-               int rc, now;
+               int now;
 
                now = -addr & ~PAGE_MASK;
                rc = emulator_read_write_onepage(addr, val, now, exception,
@@ -3856,8 +3855,25 @@ int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
                bytes -= now;
        }
 
-       return emulator_read_write_onepage(addr, val, bytes, exception,
-                                          vcpu, ops);
+       rc = emulator_read_write_onepage(addr, val, bytes, exception,
+                                        vcpu, ops);
+       if (rc != X86EMUL_CONTINUE)
+               return rc;
+
+       if (!vcpu->mmio_nr_fragments)
+               return rc;
+
+       gpa = vcpu->mmio_fragments[0].gpa;
+
+       vcpu->mmio_needed = 1;
+       vcpu->mmio_cur_fragment = 0;
+
+       vcpu->run->mmio.len = vcpu->mmio_fragments[0].len;
+       vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
+       vcpu->run->exit_reason = KVM_EXIT_MMIO;
+       vcpu->run->mmio.phys_addr = gpa;
+
+       return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
 }
 
 static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
@@ -5263,10 +5279,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        kvm_deliver_pmi(vcpu);
        }
 
-       r = kvm_mmu_reload(vcpu);
-       if (unlikely(r))
-               goto out;
-
        if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
                inject_pending_event(vcpu);
 
@@ -5282,6 +5294,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                }
        }
 
+       r = kvm_mmu_reload(vcpu);
+       if (unlikely(r)) {
+               kvm_x86_ops->cancel_injection(vcpu);
+               goto out;
+       }
+
        preempt_disable();
 
        kvm_x86_ops->prepare_guest_switch(vcpu);
@@ -5456,33 +5474,55 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
        return r;
 }
 
+/*
+ * Implements the following, as a state machine:
+ *
+ * read:
+ *   for each fragment
+ *     write gpa, len
+ *     exit
+ *     copy data
+ *   execute insn
+ *
+ * write:
+ *   for each fragment
+ *      write gpa, len
+ *      copy data
+ *      exit
+ */
 static int complete_mmio(struct kvm_vcpu *vcpu)
 {
        struct kvm_run *run = vcpu->run;
+       struct kvm_mmio_fragment *frag;
        int r;
 
        if (!(vcpu->arch.pio.count || vcpu->mmio_needed))
                return 1;
 
        if (vcpu->mmio_needed) {
-               vcpu->mmio_needed = 0;
+               /* Complete previous fragment */
+               frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment++];
                if (!vcpu->mmio_is_write)
-                       memcpy(vcpu->mmio_data + vcpu->mmio_index,
-                              run->mmio.data, 8);
-               vcpu->mmio_index += 8;
-               if (vcpu->mmio_index < vcpu->mmio_size) {
-                       run->exit_reason = KVM_EXIT_MMIO;
-                       run->mmio.phys_addr = vcpu->mmio_phys_addr + vcpu->mmio_index;
-                       memcpy(run->mmio.data, vcpu->mmio_data + vcpu->mmio_index, 8);
-                       run->mmio.len = min(vcpu->mmio_size - vcpu->mmio_index, 8);
-                       run->mmio.is_write = vcpu->mmio_is_write;
-                       vcpu->mmio_needed = 1;
-                       return 0;
+                       memcpy(frag->data, run->mmio.data, frag->len);
+               if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
+                       vcpu->mmio_needed = 0;
+                       if (vcpu->mmio_is_write)
+                               return 1;
+                       vcpu->mmio_read_completed = 1;
+                       goto done;
                }
+               /* Initiate next fragment */
+               ++frag;
+               run->exit_reason = KVM_EXIT_MMIO;
+               run->mmio.phys_addr = frag->gpa;
                if (vcpu->mmio_is_write)
-                       return 1;
-               vcpu->mmio_read_completed = 1;
+                       memcpy(run->mmio.data, frag->data, frag->len);
+               run->mmio.len = frag->len;
+               run->mmio.is_write = vcpu->mmio_is_write;
+               return 0;
+
        }
+done:
        vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
        r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
@@ -6399,21 +6439,9 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
                 kvm_cpu_has_interrupt(vcpu));
 }
 
-void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
 {
-       int me;
-       int cpu = vcpu->cpu;
-
-       if (waitqueue_active(&vcpu->wq)) {
-               wake_up_interruptible(&vcpu->wq);
-               ++vcpu->stat.halt_wakeup;
-       }
-
-       me = get_cpu();
-       if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
-               if (kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE)
-                       smp_send_reschedule(cpu);
-       put_cpu();
+       return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
 }
 
 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
index cb80c293cdd8ea1b26dbbaa050c1c5c43b1fd1ab..3d1134ddb885622af79bdb0fefb589c8f7b4d5b6 100644 (file)
@@ -64,7 +64,7 @@ static inline int is_pse(struct kvm_vcpu *vcpu)
 
 static inline int is_paging(struct kvm_vcpu *vcpu)
 {
-       return kvm_read_cr0_bits(vcpu, X86_CR0_PG);
+       return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
 }
 
 static inline u32 bit(int bitno)
index 2e4e4b02c37a62cd593f3a59645898d946d95ffb..f61ee67ec00f0dc6d61b6165e087ba248f81df2c 100644 (file)
@@ -43,100 +43,3 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
        return len;
 }
 EXPORT_SYMBOL_GPL(copy_from_user_nmi);
-
-/*
- * Do a strncpy, return length of string without final '\0'.
- * 'count' is the user-supplied count (return 'count' if we
- * hit it), 'max' is the address space maximum (and we return
- * -EFAULT if we hit it).
- */
-static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
-{
-       long res = 0;
-
-       /*
-        * Truncate 'max' to the user-specified limit, so that
-        * we only have one limit we need to check in the loop
-        */
-       if (max > count)
-               max = count;
-
-       while (max >= sizeof(unsigned long)) {
-               unsigned long c, mask;
-
-               /* Fall back to byte-at-a-time if we get a page fault */
-               if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
-                       break;
-               mask = has_zero(c);
-               if (mask) {
-                       mask = (mask - 1) & ~mask;
-                       mask >>= 7;
-                       *(unsigned long *)(dst+res) = c & mask;
-                       return res + count_masked_bytes(mask);
-               }
-               *(unsigned long *)(dst+res) = c;
-               res += sizeof(unsigned long);
-               max -= sizeof(unsigned long);
-       }
-
-       while (max) {
-               char c;
-
-               if (unlikely(__get_user(c,src+res)))
-                       return -EFAULT;
-               dst[res] = c;
-               if (!c)
-                       return res;
-               res++;
-               max--;
-       }
-
-       /*
-        * Uhhuh. We hit 'max'. But was that the user-specified maximum
-        * too? If so, that's ok - we got as much as the user asked for.
-        */
-       if (res >= count)
-               return res;
-
-       /*
-        * Nope: we hit the address space limit, and we still had more
-        * characters the caller would have wanted. That's an EFAULT.
-        */
-       return -EFAULT;
-}
-
-/**
- * strncpy_from_user: - Copy a NUL terminated string from userspace.
- * @dst:   Destination address, in kernel space.  This buffer must be at
- *         least @count bytes long.
- * @src:   Source address, in user space.
- * @count: Maximum number of bytes to copy, including the trailing NUL.
- *
- * Copies a NUL-terminated string from userspace to kernel space.
- *
- * On success, returns the length of the string (not including the trailing
- * NUL).
- *
- * If access to userspace fails, returns -EFAULT (some data may have been
- * copied).
- *
- * If @count is smaller than the length of the string, copies @count bytes
- * and returns @count.
- */
-long
-strncpy_from_user(char *dst, const char __user *src, long count)
-{
-       unsigned long max_addr, src_addr;
-
-       if (unlikely(count <= 0))
-               return 0;
-
-       max_addr = current_thread_info()->addr_limit.seg;
-       src_addr = (unsigned long)src;
-       if (likely(src_addr < max_addr)) {
-               unsigned long max = max_addr - src_addr;
-               return do_strncpy_from_user(dst, src, count, max);
-       }
-       return -EFAULT;
-}
-EXPORT_SYMBOL(strncpy_from_user);
index 883b216c60b2d51059985c716da365aef5e9bfbf..1781b2f950e234963f7c6fae0f65158fb8ed531a 100644 (file)
@@ -95,47 +95,6 @@ __clear_user(void __user *to, unsigned long n)
 }
 EXPORT_SYMBOL(__clear_user);
 
-/**
- * strnlen_user: - Get the size of a string in user space.
- * @s: The string to measure.
- * @n: The maximum valid length
- *
- * Get the size of a NUL-terminated string in user space.
- *
- * Returns the size of the string INCLUDING the terminating NUL.
- * On exception, returns 0.
- * If the string is too long, returns a value greater than @n.
- */
-long strnlen_user(const char __user *s, long n)
-{
-       unsigned long mask = -__addr_ok(s);
-       unsigned long res, tmp;
-
-       might_fault();
-
-       __asm__ __volatile__(
-               "       testl %0, %0\n"
-               "       jz 3f\n"
-               "       andl %0,%%ecx\n"
-               "0:     repne; scasb\n"
-               "       setne %%al\n"
-               "       subl %%ecx,%0\n"
-               "       addl %0,%%eax\n"
-               "1:\n"
-               ".section .fixup,\"ax\"\n"
-               "2:     xorl %%eax,%%eax\n"
-               "       jmp 1b\n"
-               "3:     movb $1,%%al\n"
-               "       jmp 1b\n"
-               ".previous\n"
-               _ASM_EXTABLE(0b,2b)
-               :"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp)
-               :"0" (n), "1" (s), "2" (0), "3" (mask)
-               :"cc");
-       return res & mask;
-}
-EXPORT_SYMBOL(strnlen_user);
-
 #ifdef CONFIG_X86_INTEL_USERCOPY
 static unsigned long
 __copy_user_intel(void __user *to, const void *from, unsigned long size)
index 0d0326f388c0bdf9778ec2157c6d39cae3ff5c12..e5b130bc2d0efb4a026821bd2df605b8e4e9b643 100644 (file)
@@ -52,54 +52,6 @@ unsigned long clear_user(void __user *to, unsigned long n)
 }
 EXPORT_SYMBOL(clear_user);
 
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 on exception, a value greater than N if too long
- */
-
-long __strnlen_user(const char __user *s, long n)
-{
-       long res = 0;
-       char c;
-
-       while (1) {
-               if (res>n)
-                       return n+1;
-               if (__get_user(c, s))
-                       return 0;
-               if (!c)
-                       return res+1;
-               res++;
-               s++;
-       }
-}
-EXPORT_SYMBOL(__strnlen_user);
-
-long strnlen_user(const char __user *s, long n)
-{
-       if (!access_ok(VERIFY_READ, s, 1))
-               return 0;
-       return __strnlen_user(s, n);
-}
-EXPORT_SYMBOL(strnlen_user);
-
-long strlen_user(const char __user *s)
-{
-       long res = 0;
-       char c;
-
-       for (;;) {
-               if (get_user(c, s))
-                       return 0;
-               if (!c)
-                       return res+1;
-               res++;
-               s++;
-       }
-}
-EXPORT_SYMBOL(strlen_user);
-
 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
 {
        if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) { 
index 319b6f2fb8b9dc56300c082ab06a266673b091c9..97141c26a13ac8400cfce07c36d55cca50962b26 100644 (file)
@@ -84,8 +84,9 @@ static void __init find_early_table_space(struct map_range *mr, unsigned long en
        pgt_buf_end = pgt_buf_start;
        pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
 
-       printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
-               end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT);
+       printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n",
+               end - 1, pgt_buf_start << PAGE_SHIFT,
+               (pgt_buf_top << PAGE_SHIFT) - 1);
 }
 
 void __init native_pagetable_reserve(u64 start, u64 end)
@@ -132,7 +133,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
        int nr_range, i;
        int use_pse, use_gbpages;
 
-       printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
+       printk(KERN_INFO "init_memory_mapping: [mem %#010lx-%#010lx]\n",
+              start, end - 1);
 
 #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
        /*
@@ -251,8 +253,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
        }
 
        for (i = 0; i < nr_range; i++)
-               printk(KERN_DEBUG " %010lx - %010lx page %s\n",
-                               mr[i].start, mr[i].end,
+               printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n",
+                               mr[i].start, mr[i].end - 1,
                        (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
                         (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
 
@@ -350,8 +352,8 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
         * create a kernel page fault:
         */
 #ifdef CONFIG_DEBUG_PAGEALLOC
-       printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
-               begin, end);
+       printk(KERN_INFO "debug: unmapping init [mem %#010lx-%#010lx]\n",
+               begin, end - 1);
        set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
 #else
        /*
index 19d3fa08b1191493cda6415e56f8f26d86415bf7..2d125be1bae9f2c229b835b8ea1421e685c38879 100644 (file)
@@ -141,8 +141,8 @@ static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
 
        /* whine about and ignore invalid blks */
        if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
-               pr_warning("NUMA: Warning: invalid memblk node %d (%Lx-%Lx)\n",
-                          nid, start, end);
+               pr_warning("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
+                          nid, start, end - 1);
                return 0;
        }
 
@@ -210,8 +210,8 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
 
        start = roundup(start, ZONE_ALIGN);
 
-       printk(KERN_INFO "Initmem setup node %d %016Lx-%016Lx\n",
-              nid, start, end);
+       printk(KERN_INFO "Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
+              nid, start, end - 1);
 
        /*
         * Allocate node data.  Try remap allocator first, node-local
@@ -232,7 +232,7 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
        }
 
        /* report and initialize */
-       printk(KERN_INFO "  NODE_DATA [%016Lx - %016Lx]%s\n",
+       printk(KERN_INFO "  NODE_DATA [mem %#010Lx-%#010Lx]%s\n",
               nd_pa, nd_pa + nd_size - 1, remapped ? " (remapped)" : "");
        tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
        if (!remapped && tnid != nid)
@@ -291,14 +291,14 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
                         */
                        if (bi->end > bj->start && bi->start < bj->end) {
                                if (bi->nid != bj->nid) {
-                                       pr_err("NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n",
-                                              bi->nid, bi->start, bi->end,
-                                              bj->nid, bj->start, bj->end);
+                                       pr_err("NUMA: node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
+                                              bi->nid, bi->start, bi->end - 1,
+                                              bj->nid, bj->start, bj->end - 1);
                                        return -EINVAL;
                                }
-                               pr_warning("NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n",
-                                          bi->nid, bi->start, bi->end,
-                                          bj->start, bj->end);
+                               pr_warning("NUMA: Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
+                                          bi->nid, bi->start, bi->end - 1,
+                                          bj->start, bj->end - 1);
                        }
 
                        /*
@@ -320,9 +320,9 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
                        }
                        if (k < mi->nr_blks)
                                continue;
-                       printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%Lx,%Lx)\n",
-                              bi->nid, bi->start, bi->end, bj->start, bj->end,
-                              start, end);
+                       printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
+                              bi->nid, bi->start, bi->end - 1, bj->start,
+                              bj->end - 1, start, end - 1);
                        bi->start = start;
                        bi->end = end;
                        numa_remove_memblk_from(j--, mi);
@@ -616,8 +616,8 @@ static int __init dummy_numa_init(void)
 {
        printk(KERN_INFO "%s\n",
               numa_off ? "NUMA turned off" : "No NUMA configuration found");
-       printk(KERN_INFO "Faking a node at %016Lx-%016Lx\n",
-              0LLU, PFN_PHYS(max_pfn));
+       printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n",
+              0LLU, PFN_PHYS(max_pfn) - 1);
 
        node_set(0, numa_nodes_parsed);
        numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
index 871dd886817014597fadfb273a895dd0893c9595..dbbbb47260ccc28b8dad5bb14f82420187a83ff3 100644 (file)
@@ -68,8 +68,8 @@ static int __init emu_setup_memblk(struct numa_meminfo *ei,
                numa_remove_memblk_from(phys_blk, pi);
        }
 
-       printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid,
-              eb->start, eb->end, (eb->end - eb->start) >> 20);
+       printk(KERN_INFO "Faking node %d at [mem %#018Lx-%#018Lx] (%LuMB)\n",
+              nid, eb->start, eb->end - 1, (eb->end - eb->start) >> 20);
        return 0;
 }
 
index f6ff57b7efa514e0a7e1ec47ac8472ea868c13fa..3d68ef6d2266cb66b3d07c578191b80c5348e0e2 100644 (file)
@@ -158,31 +158,47 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
        return req_type;
 }
 
+struct pagerange_state {
+       unsigned long           cur_pfn;
+       int                     ram;
+       int                     not_ram;
+};
+
+static int
+pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg)
+{
+       struct pagerange_state *state = arg;
+
+       state->not_ram  |= initial_pfn > state->cur_pfn;
+       state->ram      |= total_nr_pages > 0;
+       state->cur_pfn   = initial_pfn + total_nr_pages;
+
+       return state->ram && state->not_ram;
+}
+
 static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
 {
-       int ram_page = 0, not_rampage = 0;
-       unsigned long page_nr;
+       int ret = 0;
+       unsigned long start_pfn = start >> PAGE_SHIFT;
+       unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       struct pagerange_state state = {start_pfn, 0, 0};
 
-       for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
-            ++page_nr) {
-               /*
-                * For legacy reasons, physical address range in the legacy ISA
-                * region is tracked as non-RAM. This will allow users of
-                * /dev/mem to map portions of legacy ISA region, even when
-                * some of those portions are listed(or not even listed) with
-                * different e820 types(RAM/reserved/..)
-                */
-               if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) &&
-                   page_is_ram(page_nr))
-                       ram_page = 1;
-               else
-                       not_rampage = 1;
-
-               if (ram_page == not_rampage)
-                       return -1;
+       /*
+        * For legacy reasons, physical address range in the legacy ISA
+        * region is tracked as non-RAM. This will allow users of
+        * /dev/mem to map portions of legacy ISA region, even when
+        * some of those portions are listed(or not even listed) with
+        * different e820 types(RAM/reserved/..)
+        */
+       if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT)
+               start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT;
+
+       if (start_pfn < end_pfn) {
+               ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn,
+                               &state, pagerange_is_ram_callback);
        }
 
-       return ram_page;
+       return (ret > 0) ? -1 : (state.ram ? 1 : 0);
 }
 
 /*
@@ -209,9 +225,8 @@ static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
                page = pfn_to_page(pfn);
                type = get_page_memtype(page);
                if (type != -1) {
-                       printk(KERN_INFO "reserve_ram_pages_type failed "
-                               "0x%Lx-0x%Lx, track 0x%lx, req 0x%lx\n",
-                               start, end, type, req_type);
+                       printk(KERN_INFO "reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%lx, req 0x%lx\n",
+                               start, end - 1, type, req_type);
                        if (new_type)
                                *new_type = type;
 
@@ -314,9 +329,9 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
 
        err = rbt_memtype_check_insert(new, new_type);
        if (err) {
-               printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
-                      "track %s, req %s\n",
-                      start, end, cattr_name(new->type), cattr_name(req_type));
+               printk(KERN_INFO "reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
+                      start, end - 1,
+                      cattr_name(new->type), cattr_name(req_type));
                kfree(new);
                spin_unlock(&memtype_lock);
 
@@ -325,8 +340,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
 
        spin_unlock(&memtype_lock);
 
-       dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
-               start, end, cattr_name(new->type), cattr_name(req_type),
+       dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
+               start, end - 1, cattr_name(new->type), cattr_name(req_type),
                new_type ? cattr_name(*new_type) : "-");
 
        return err;
@@ -360,14 +375,14 @@ int free_memtype(u64 start, u64 end)
        spin_unlock(&memtype_lock);
 
        if (!entry) {
-               printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
-                       current->comm, current->pid, start, end);
+               printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
+                      current->comm, current->pid, start, end - 1);
                return -EINVAL;
        }
 
        kfree(entry);
 
-       dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
+       dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1);
 
        return 0;
 }
@@ -491,9 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
 
        while (cursor < to) {
                if (!devmem_is_allowed(pfn)) {
-                       printk(KERN_INFO
-               "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
-                               current->comm, from, to);
+                       printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
+                               current->comm, from, to - 1);
                        return 0;
                }
                cursor += PAGE_SIZE;
@@ -554,12 +568,11 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
                                size;
 
        if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
-               printk(KERN_INFO
-                       "%s:%d ioremap_change_attr failed %s "
-                       "for %Lx-%Lx\n",
+               printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
+                       "for [mem %#010Lx-%#010Lx]\n",
                        current->comm, current->pid,
                        cattr_name(flags),
-                       base, (unsigned long long)(base + size));
+                       base, (unsigned long long)(base + size-1));
                return -EINVAL;
        }
        return 0;
@@ -591,12 +604,11 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
 
                flags = lookup_memtype(paddr);
                if (want_flags != flags) {
-                       printk(KERN_WARNING
-                       "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
+                       printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
                                current->comm, current->pid,
                                cattr_name(want_flags),
                                (unsigned long long)paddr,
-                               (unsigned long long)(paddr + size),
+                               (unsigned long long)(paddr + size - 1),
                                cattr_name(flags));
                        *vma_prot = __pgprot((pgprot_val(*vma_prot) &
                                              (~_PAGE_CACHE_MASK)) |
@@ -614,11 +626,11 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
                    !is_new_memtype_allowed(paddr, size, want_flags, flags)) {
                        free_memtype(paddr, paddr + size);
                        printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
-                               " for %Lx-%Lx, got %s\n",
+                               " for [mem %#010Lx-%#010Lx], got %s\n",
                                current->comm, current->pid,
                                cattr_name(want_flags),
                                (unsigned long long)paddr,
-                               (unsigned long long)(paddr + size),
+                               (unsigned long long)(paddr + size - 1),
                                cattr_name(flags));
                        return -EINVAL;
                }
index efb5b4b93711cec20dd095e0e0a9065a9cc060fb..732af3a9618375da189f3da4651d75a7fbd4693b 100644 (file)
@@ -176,8 +176,9 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
                return;
        }
 
-       printk(KERN_INFO "SRAT: Node %u PXM %u %Lx-%Lx\n", node, pxm,
-              start, end);
+       printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n",
+              node, pxm,
+              (unsigned long long) start, (unsigned long long) end - 1);
 }
 
 void __init acpi_numa_arch_fixup(void) {}
index 7415aa927913eee72a424377636c02cf0f46c518..56ab74989cf170446bff6697e92035b5428df7f1 100644 (file)
@@ -64,6 +64,10 @@ static int xen_register_pirq(u32 gsi, int gsi_override, int triggering,
        int shareable = 0;
        char *name;
 
+       irq = xen_irq_from_gsi(gsi);
+       if (irq > 0)
+               return irq;
+
        if (set_pirq)
                pirq = gsi;
 
diff --git a/arch/x86/realmode/Makefile b/arch/x86/realmode/Makefile
new file mode 100644 (file)
index 0000000..94f7fbe
--- /dev/null
@@ -0,0 +1,18 @@
+#
+# arch/x86/realmode/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+#
+
+subdir- := rm
+
+obj-y += init.o
+obj-y += rmpiggy.o
+
+$(obj)/rmpiggy.o: $(obj)/rm/realmode.bin
+
+$(obj)/rm/realmode.bin: FORCE
+       $(Q)$(MAKE) $(build)=$(obj)/rm $@
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
new file mode 100644 (file)
index 0000000..cbca565
--- /dev/null
@@ -0,0 +1,115 @@
+#include <linux/io.h>
+#include <linux/memblock.h>
+
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
+#include <asm/realmode.h>
+
+struct real_mode_header *real_mode_header;
+u32 *trampoline_cr4_features;
+
+void __init setup_real_mode(void)
+{
+       phys_addr_t mem;
+       u16 real_mode_seg;
+       u32 *rel;
+       u32 count;
+       u32 *ptr;
+       u16 *seg;
+       int i;
+       unsigned char *base;
+       struct trampoline_header *trampoline_header;
+       size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
+#ifdef CONFIG_X86_64
+       u64 *trampoline_pgd;
+       u64 efer;
+#endif
+
+       /* Has to be in very low memory so we can execute real-mode AP code. */
+       mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
+       if (!mem)
+               panic("Cannot allocate trampoline\n");
+
+       base = __va(mem);
+       memblock_reserve(mem, size);
+       real_mode_header = (struct real_mode_header *) base;
+       printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
+              base, (unsigned long long)mem, size);
+
+       memcpy(base, real_mode_blob, size);
+
+       real_mode_seg = __pa(base) >> 4;
+       rel = (u32 *) real_mode_relocs;
+
+       /* 16-bit segment relocations. */
+       count = rel[0];
+       rel = &rel[1];
+       for (i = 0; i < count; i++) {
+               seg = (u16 *) (base + rel[i]);
+               *seg = real_mode_seg;
+       }
+
+       /* 32-bit linear relocations. */
+       count = rel[i];
+       rel =  &rel[i + 1];
+       for (i = 0; i < count; i++) {
+               ptr = (u32 *) (base + rel[i]);
+               *ptr += __pa(base);
+       }
+
+       /* Must be perfomed *after* relocation. */
+       trampoline_header = (struct trampoline_header *)
+               __va(real_mode_header->trampoline_header);
+
+#ifdef CONFIG_X86_32
+       trampoline_header->start = __pa(startup_32_smp);
+       trampoline_header->gdt_limit = __BOOT_DS + 7;
+       trampoline_header->gdt_base = __pa(boot_gdt);
+#else
+       /*
+        * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR
+        * so we need to mask it out.
+        */
+       rdmsrl(MSR_EFER, efer);
+       trampoline_header->efer = efer & ~EFER_LMA;
+
+       trampoline_header->start = (u64) secondary_startup_64;
+       trampoline_cr4_features = &trampoline_header->cr4;
+       *trampoline_cr4_features = read_cr4();
+
+       trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
+       trampoline_pgd[0] = __pa(level3_ident_pgt) + _KERNPG_TABLE;
+       trampoline_pgd[511] = __pa(level3_kernel_pgt) + _KERNPG_TABLE;
+#endif
+}
+
+/*
+ * set_real_mode_permissions() gets called very early, to guarantee the
+ * availability of low memory.  This is before the proper kernel page
+ * tables are set up, so we cannot set page permissions in that
+ * function.  Thus, we use an arch_initcall instead.
+ */
+static int __init set_real_mode_permissions(void)
+{
+       unsigned char *base = (unsigned char *) real_mode_header;
+       size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
+
+       size_t ro_size =
+               PAGE_ALIGN(real_mode_header->ro_end) -
+               __pa(base);
+
+       size_t text_size =
+               PAGE_ALIGN(real_mode_header->ro_end) -
+               real_mode_header->text_start;
+
+       unsigned long text_start =
+               (unsigned long) __va(real_mode_header->text_start);
+
+       set_memory_nx((unsigned long) base, size >> PAGE_SHIFT);
+       set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT);
+       set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT);
+
+       return 0;
+}
+
+arch_initcall(set_real_mode_permissions);
diff --git a/arch/x86/realmode/rm/.gitignore b/arch/x86/realmode/rm/.gitignore
new file mode 100644 (file)
index 0000000..b6ed3a2
--- /dev/null
@@ -0,0 +1,3 @@
+pasyms.h
+realmode.lds
+realmode.relocs
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
new file mode 100644 (file)
index 0000000..5b84a2d
--- /dev/null
@@ -0,0 +1,82 @@
+#
+# arch/x86/realmode/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+#
+
+always := realmode.bin realmode.relocs
+
+wakeup-objs    := wakeup_asm.o wakemain.o video-mode.o
+wakeup-objs    += copy.o bioscall.o regs.o
+# The link order of the video-*.o modules can matter.  In particular,
+# video-vga.o *must* be listed first, followed by video-vesa.o.
+# Hardware-specific drivers should follow in the order they should be
+# probed, and video-bios.o should typically be last.
+wakeup-objs    += video-vga.o
+wakeup-objs    += video-vesa.o
+wakeup-objs    += video-bios.o
+
+realmode-y                     += header.o
+realmode-y                     += trampoline_$(BITS).o
+realmode-y                     += stack.o
+realmode-$(CONFIG_X86_32)      += reboot_32.o
+realmode-$(CONFIG_ACPI_SLEEP)  += $(wakeup-objs)
+
+targets        += $(realmode-y)
+
+REALMODE_OBJS = $(addprefix $(obj)/,$(realmode-y))
+
+sed-pasyms := -n -r -e 's/^([0-9a-fA-F]+) [ABCDGRSTVW] (.+)$$/pa_\2 = \2;/p'
+
+quiet_cmd_pasyms = PASYMS  $@
+      cmd_pasyms = $(NM) $(filter-out FORCE,$^) | \
+                  sed $(sed-pasyms) | sort | uniq > $@
+
+targets += pasyms.h
+$(obj)/pasyms.h: $(REALMODE_OBJS) FORCE
+       $(call if_changed,pasyms)
+
+targets += realmode.lds
+$(obj)/realmode.lds: $(obj)/pasyms.h
+
+LDFLAGS_realmode.elf := --emit-relocs -T
+CPPFLAGS_realmode.lds += -P -C -I$(obj)
+
+targets += realmode.elf
+$(obj)/realmode.elf: $(obj)/realmode.lds $(REALMODE_OBJS) FORCE
+       $(call if_changed,ld)
+
+OBJCOPYFLAGS_realmode.bin := -O binary
+
+targets += realmode.bin
+$(obj)/realmode.bin: $(obj)/realmode.elf $(obj)/realmode.relocs
+       $(call if_changed,objcopy)
+
+quiet_cmd_relocs = RELOCS  $@
+      cmd_relocs = arch/x86/tools/relocs --realmode $< > $@
+
+targets += realmode.relocs
+$(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
+       $(call if_changed,relocs)
+
+# ---------------------------------------------------------------------------
+
+# How to compile the 16-bit code.  Note we always compile for -march=i386,
+# that way we can complain to the user if the CPU is insufficient.
+KBUILD_CFLAGS  := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
+                  -I$(srctree)/arch/x86/boot \
+                  -DDISABLE_BRANCH_PROFILING \
+                  -Wall -Wstrict-prototypes \
+                  -march=i386 -mregparm=3 \
+                  -include $(srctree)/$(src)/../../boot/code16gcc.h \
+                  -fno-strict-aliasing -fomit-frame-pointer \
+                  $(call cc-option, -ffreestanding) \
+                  $(call cc-option, -fno-toplevel-reorder,\
+                       $(call cc-option, -fno-unit-at-a-time)) \
+                  $(call cc-option, -fno-stack-protector) \
+                  $(call cc-option, -mpreferred-stack-boundary=2)
+KBUILD_AFLAGS  := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+GCOV_PROFILE := n
diff --git a/arch/x86/realmode/rm/bioscall.S b/arch/x86/realmode/rm/bioscall.S
new file mode 100644 (file)
index 0000000..16162d1
--- /dev/null
@@ -0,0 +1 @@
+#include "../../boot/bioscall.S"
diff --git a/arch/x86/realmode/rm/copy.S b/arch/x86/realmode/rm/copy.S
new file mode 100644 (file)
index 0000000..b785e6f
--- /dev/null
@@ -0,0 +1 @@
+#include "../../boot/copy.S"
diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
new file mode 100644 (file)
index 0000000..fadf483
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Real-mode blob header; this should match realmode.h and be
+ * readonly; for mutable data instead add pointers into the .data
+ * or .bss sections as appropriate.
+ */
+
+#include <linux/linkage.h>
+#include <asm/page_types.h>
+
+#include "realmode.h"
+       
+       .section ".header", "a"
+
+       .balign 16
+GLOBAL(real_mode_header)
+       .long   pa_text_start
+       .long   pa_ro_end
+       /* SMP trampoline */
+       .long   pa_trampoline_start
+       .long   pa_trampoline_status
+       .long   pa_trampoline_header
+#ifdef CONFIG_X86_64
+       .long   pa_trampoline_pgd;
+#endif
+       /* ACPI S3 wakeup */
+#ifdef CONFIG_ACPI_SLEEP
+       .long   pa_wakeup_start
+       .long   pa_wakeup_header
+#endif
+       /* APM/BIOS reboot */
+#ifdef CONFIG_X86_32
+       .long   pa_machine_real_restart_asm
+#endif
+END(real_mode_header)
+
+       /* End signature, used to verify integrity */
+       .section ".signature","a"
+       .balign 4
+GLOBAL(end_signature)
+       .long   REALMODE_END_SIGNATURE
+END(end_signature)
diff --git a/arch/x86/realmode/rm/realmode.h b/arch/x86/realmode/rm/realmode.h
new file mode 100644 (file)
index 0000000..d74cff6
--- /dev/null
@@ -0,0 +1,21 @@
+#ifndef ARCH_X86_REALMODE_RM_REALMODE_H
+#define ARCH_X86_REALMODE_RM_REALMODE_H
+
+#ifdef __ASSEMBLY__
+
+/*
+ * 16-bit ljmpw to the real_mode_seg
+ *
+ * This must be open-coded since gas will choke on using a
+ * relocatable symbol for the segment portion.
+ */
+#define LJMPW_RM(to)   .byte 0xea ; .word (to), real_mode_seg
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * Signature at the end of the realmode region
+ */
+#define REALMODE_END_SIGNATURE 0x65a22c82
+
+#endif /* ARCH_X86_REALMODE_RM_REALMODE_H */
diff --git a/arch/x86/realmode/rm/realmode.lds.S b/arch/x86/realmode/rm/realmode.lds.S
new file mode 100644 (file)
index 0000000..86b2e8d
--- /dev/null
@@ -0,0 +1,76 @@
+/*
+ * realmode.lds.S
+ *
+ * Linker script for the real-mode code
+ */
+
+#include <asm/page_types.h>
+
+#undef i386
+
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+
+SECTIONS
+{
+       real_mode_seg = 0;
+
+       . = 0;
+       .header : {
+               pa_real_mode_base = .;
+               *(.header)
+       }
+
+       . = ALIGN(4);
+       .rodata : {
+               *(.rodata)
+               *(.rodata.*)
+               . = ALIGN(16);
+               video_cards = .;
+               *(.videocards)
+               video_cards_end = .;
+       }
+
+       . = ALIGN(PAGE_SIZE);
+       pa_text_start = .;
+       .text : {
+               *(.text)
+               *(.text.*)
+       }
+
+       .text32 : {
+               *(.text32)
+               *(.text32.*)
+       }
+
+       .text64 : {
+               *(.text64)
+               *(.text64.*)
+       }
+       pa_ro_end = .;
+
+       . = ALIGN(PAGE_SIZE);
+       .data : {
+               *(.data)
+               *(.data.*)
+       }
+
+       . = ALIGN(128);
+       .bss : {
+               *(.bss*)
+       }
+
+       /* End signature for integrity checking */
+       . = ALIGN(4);
+       .signature : {
+               *(.signature)
+       }
+
+       /DISCARD/ : {
+               *(.note*)
+               *(.debug*)
+               *(.eh_frame*)
+       }
+
+#include "pasyms.h"
+}
diff --git a/arch/x86/realmode/rm/reboot_32.S b/arch/x86/realmode/rm/reboot_32.S
new file mode 100644 (file)
index 0000000..1140448
--- /dev/null
@@ -0,0 +1,132 @@
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/segment.h>
+#include <asm/page_types.h>
+#include "realmode.h"
+
+/*
+ * The following code and data reboots the machine by switching to real
+ * mode and jumping to the BIOS reset entry point, as if the CPU has
+ * really been reset.  The previous version asked the keyboard
+ * controller to pulse the CPU reset line, which is more thorough, but
+ * doesn't work with at least one type of 486 motherboard.  It is easy
+ * to stop this code working; hence the copious comments.
+ *
+ * This code is called with the restart type (0 = BIOS, 1 = APM) in %eax.
+ */
+       .section ".text32", "ax"
+       .code32
+
+       .balign 16
+ENTRY(machine_real_restart_asm)
+       /* Set up the IDT for real mode. */
+       lidtl   pa_machine_real_restart_idt
+
+       /*
+        * Set up a GDT from which we can load segment descriptors for real
+        * mode.  The GDT is not used in real mode; it is just needed here to
+        * prepare the descriptors.
+        */
+       lgdtl   pa_machine_real_restart_gdt
+
+       /*
+        * Load the data segment registers with 16-bit compatible values
+        */
+       movl    $16, %ecx
+       movl    %ecx, %ds
+       movl    %ecx, %es
+       movl    %ecx, %fs
+       movl    %ecx, %gs
+       movl    %ecx, %ss
+       ljmpw   $8, $1f
+
+/*
+ * This is 16-bit protected mode code to disable paging and the cache,
+ * switch to real mode and jump to the BIOS reset code.
+ *
+ * The instruction that switches to real mode by writing to CR0 must be
+ * followed immediately by a far jump instruction, which set CS to a
+ * valid value for real mode, and flushes the prefetch queue to avoid
+ * running instructions that have already been decoded in protected
+ * mode.
+ *
+ * Clears all the flags except ET, especially PG (paging), PE
+ * (protected-mode enable) and TS (task switch for coprocessor state
+ * save).  Flushes the TLB after paging has been disabled.  Sets CD and
+ * NW, to disable the cache on a 486, and invalidates the cache.  This
+ * is more like the state of a 486 after reset.  I don't know if
+ * something else should be done for other chips.
+ *
+ * More could be done here to set up the registers as if a CPU reset had
+ * occurred; hopefully real BIOSs don't assume much.  This is not the
+ * actual BIOS entry point, anyway (that is at 0xfffffff0).
+ *
+ * Most of this work is probably excessive, but it is what is tested.
+ */
+       .text
+       .code16
+
+       .balign 16
+machine_real_restart_asm16:
+1:
+       xorl    %ecx, %ecx
+       movl    %cr0, %edx
+       andl    $0x00000011, %edx
+       orl     $0x60000000, %edx
+       movl    %edx, %cr0
+       movl    %ecx, %cr3
+       movl    %cr0, %edx
+       testl   $0x60000000, %edx       /* If no cache bits -> no wbinvd */
+       jz      2f
+       wbinvd
+2:
+       andb    $0x10, %dl
+       movl    %edx, %cr0
+       LJMPW_RM(3f)
+3:
+       andw    %ax, %ax
+       jz      bios
+
+apm:
+       movw    $0x1000, %ax
+       movw    %ax, %ss
+       movw    $0xf000, %sp
+       movw    $0x5307, %ax
+       movw    $0x0001, %bx
+       movw    $0x0003, %cx
+       int     $0x15
+       /* This should never return... */
+
+bios:
+       ljmpw   $0xf000, $0xfff0
+
+       .section ".rodata", "a"
+
+       .balign 16
+GLOBAL(machine_real_restart_idt)
+       .word   0xffff          /* Length - real mode default value */
+       .long   0               /* Base - real mode default value */
+END(machine_real_restart_idt)
+
+       .balign 16
+GLOBAL(machine_real_restart_gdt)
+       /* Self-pointer */
+       .word   0xffff          /* Length - real mode default value */
+       .long   pa_machine_real_restart_gdt
+       .word   0
+
+       /*
+        * 16-bit code segment pointing to real_mode_seg
+        * Selector value 8
+        */
+       .word   0xffff          /* Limit */
+       .long   0x9b000000 + pa_real_mode_base
+       .word   0
+
+       /*
+        * 16-bit data segment with the selector value 16 = 0x10 and
+        * base value 0x100; since this is consistent with real mode
+        * semantics we don't have to reload the segments once CR0.PE = 0.
+        */
+       .quad   GDT_ENTRY(0x0093, 0x100, 0xffff)
+END(machine_real_restart_gdt)
diff --git a/arch/x86/realmode/rm/regs.c b/arch/x86/realmode/rm/regs.c
new file mode 100644 (file)
index 0000000..fbb15b9
--- /dev/null
@@ -0,0 +1 @@
+#include "../../boot/regs.c"
diff --git a/arch/x86/realmode/rm/stack.S b/arch/x86/realmode/rm/stack.S
new file mode 100644 (file)
index 0000000..867ae87
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Common heap and stack allocations
+ */
+
+#include <linux/linkage.h>
+
+       .data
+GLOBAL(HEAP)
+       .long   rm_heap
+GLOBAL(heap_end)
+       .long   rm_stack
+
+       .bss
+       .balign 16
+GLOBAL(rm_heap)
+       .space  2048
+GLOBAL(rm_stack)
+       .space  2048
+GLOBAL(rm_stack_end)
diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
new file mode 100644 (file)
index 0000000..c1b2791
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ *
+ *     Trampoline.S    Derived from Setup.S by Linus Torvalds
+ *
+ *     4 Jan 1997 Michael Chastain: changed to gnu as.
+ *
+ *     This is only used for booting secondary CPUs in SMP machine
+ *
+ *     Entry: CS:IP point to the start of our code, we are
+ *     in real mode with no stack, but the rest of the
+ *     trampoline page to make our stack and everything else
+ *     is a mystery.
+ *
+ *     We jump into arch/x86/kernel/head_32.S.
+ *
+ *     On entry to trampoline_start, the processor is in real mode
+ *     with 16-bit addressing and 16-bit data.  CS has some value
+ *     and IP is zero.  Thus, we load CS to the physical segment
+ *     of the real mode code before doing anything further.
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/segment.h>
+#include <asm/page_types.h>
+#include "realmode.h"
+
+       .text
+       .code16
+
+       .balign PAGE_SIZE
+ENTRY(trampoline_start)
+       wbinvd                  # Needed for NUMA-Q should be harmless for others
+
+       LJMPW_RM(1f)
+1:
+       mov     %cs, %ax        # Code and data in the same place
+       mov     %ax, %ds
+
+       cli                     # We should be safe anyway
+
+       movl    tr_start, %eax  # where we need to go
+
+       movl    $0xA5A5A5A5, trampoline_status
+                               # write marker for master knows we're running
+
+       /*
+        * GDT tables in non default location kernel can be beyond 16MB and
+        * lgdt will not be able to load the address as in real mode default
+        * operand size is 16bit. Use lgdtl instead to force operand size
+        * to 32 bit.
+        */
+       lidtl   tr_idt                  # load idt with 0, 0
+       lgdtl   tr_gdt                  # load gdt with whatever is appropriate
+
+       movw    $1, %dx                 # protected mode (PE) bit
+       lmsw    %dx                     # into protected mode
+
+       ljmpl   $__BOOT_CS, $pa_startup_32
+
+       .section ".text32","ax"
+       .code32
+ENTRY(startup_32)                      # note: also used from wakeup_asm.S
+       jmp     *%eax
+
+       .bss
+       .balign 8
+GLOBAL(trampoline_header)
+       tr_start:               .space  4
+       tr_gdt_pad:             .space  2
+       tr_gdt:                 .space  6
+END(trampoline_header)
+       
+#include "trampoline_common.S"
diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
new file mode 100644 (file)
index 0000000..bb360dc
--- /dev/null
@@ -0,0 +1,153 @@
+/*
+ *
+ *     Trampoline.S    Derived from Setup.S by Linus Torvalds
+ *
+ *     4 Jan 1997 Michael Chastain: changed to gnu as.
+ *     15 Sept 2005 Eric Biederman: 64bit PIC support
+ *
+ *     Entry: CS:IP point to the start of our code, we are
+ *     in real mode with no stack, but the rest of the
+ *     trampoline page to make our stack and everything else
+ *     is a mystery.
+ *
+ *     On entry to trampoline_start, the processor is in real mode
+ *     with 16-bit addressing and 16-bit data.  CS has some value
+ *     and IP is zero.  Thus, data addresses need to be absolute
+ *     (no relocation) and are taken with regard to r_base.
+ *
+ *     With the addition of trampoline_level4_pgt this code can
+ *     now enter a 64bit kernel that lives at arbitrary 64bit
+ *     physical addresses.
+ *
+ *     If you work on this file, check the object module with objdump
+ *     --full-contents --reloc to make sure there are no relocation
+ *     entries.
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/pgtable_types.h>
+#include <asm/page_types.h>
+#include <asm/msr.h>
+#include <asm/segment.h>
+#include <asm/processor-flags.h>
+#include "realmode.h"
+
+       .text
+       .code16
+
+       .balign PAGE_SIZE
+ENTRY(trampoline_start)
+       cli                     # We should be safe anyway
+       wbinvd
+
+       LJMPW_RM(1f)
+1:
+       mov     %cs, %ax        # Code and data in the same place
+       mov     %ax, %ds
+       mov     %ax, %es
+       mov     %ax, %ss
+
+       movl    $0xA5A5A5A5, trampoline_status
+       # write marker for master knows we're running
+
+       # Setup stack
+       movl    $rm_stack_end, %esp
+
+       call    verify_cpu              # Verify the cpu supports long mode
+       testl   %eax, %eax              # Check for return code
+       jnz     no_longmode
+
+       /*
+        * GDT tables in non default location kernel can be beyond 16MB and
+        * lgdt will not be able to load the address as in real mode default
+        * operand size is 16bit. Use lgdtl instead to force operand size
+        * to 32 bit.
+        */
+
+       lidtl   tr_idt  # load idt with 0, 0
+       lgdtl   tr_gdt  # load gdt with whatever is appropriate
+
+       movw    $__KERNEL_DS, %dx       # Data segment descriptor
+
+       # Enable protected mode
+       movl    $X86_CR0_PE, %eax       # protected mode (PE) bit
+       movl    %eax, %cr0              # into protected mode
+
+       # flush prefetch and jump to startup_32
+       ljmpl   $__KERNEL32_CS, $pa_startup_32
+
+no_longmode:
+       hlt
+       jmp no_longmode
+#include "../kernel/verify_cpu.S"
+
+       .section ".text32","ax"
+       .code32
+       .balign 4
+ENTRY(startup_32)
+       movl    %edx, %ss
+       addl    $pa_real_mode_base, %esp
+       movl    %edx, %ds
+       movl    %edx, %es
+       movl    %edx, %fs
+       movl    %edx, %gs
+
+       movl    pa_tr_cr4, %eax
+       movl    %eax, %cr4              # Enable PAE mode
+
+       # Setup trampoline 4 level pagetables
+       movl    $pa_trampoline_pgd, %eax
+       movl    %eax, %cr3
+
+       # Set up EFER
+       movl    pa_tr_efer, %eax
+       movl    pa_tr_efer + 4, %edx
+       movl    $MSR_EFER, %ecx
+       wrmsr
+
+       # Enable paging and in turn activate Long Mode
+       movl    $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
+       movl    %eax, %cr0
+
+       /*
+        * At this point we're in long mode but in 32bit compatibility mode
+        * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn
+        * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we use
+        * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
+        */
+       ljmpl   $__KERNEL_CS, $pa_startup_64
+
+       .section ".text64","ax"
+       .code64
+       .balign 4
+ENTRY(startup_64)
+       # Now jump into the kernel using virtual addresses
+       jmpq    *tr_start(%rip)
+
+       .section ".rodata","a"
+       # Duplicate the global descriptor table
+       # so the kernel can live anywhere
+       .balign 16
+       .globl tr_gdt
+tr_gdt:
+       .short  tr_gdt_end - tr_gdt - 1 # gdt limit
+       .long   pa_tr_gdt
+       .short  0
+       .quad   0x00cf9b000000ffff      # __KERNEL32_CS
+       .quad   0x00af9b000000ffff      # __KERNEL_CS
+       .quad   0x00cf93000000ffff      # __KERNEL_DS
+tr_gdt_end:
+
+       .bss
+       .balign PAGE_SIZE
+GLOBAL(trampoline_pgd)         .space  PAGE_SIZE
+
+       .balign 8
+GLOBAL(trampoline_header)
+       tr_start:               .space  8
+       GLOBAL(tr_efer)         .space  8
+       GLOBAL(tr_cr4)          .space  4
+END(trampoline_header)
+
+#include "trampoline_common.S"
diff --git a/arch/x86/realmode/rm/trampoline_common.S b/arch/x86/realmode/rm/trampoline_common.S
new file mode 100644 (file)
index 0000000..b1ecdb9
--- /dev/null
@@ -0,0 +1,7 @@
+       .section ".rodata","a"
+       .balign 16
+tr_idt: .fill 1, 6, 0
+
+       .bss
+       .balign 4
+GLOBAL(trampoline_status)      .space  4
diff --git a/arch/x86/realmode/rm/video-bios.c b/arch/x86/realmode/rm/video-bios.c
new file mode 100644 (file)
index 0000000..848b25a
--- /dev/null
@@ -0,0 +1 @@
+#include "../../boot/video-bios.c"
diff --git a/arch/x86/realmode/rm/video-mode.c b/arch/x86/realmode/rm/video-mode.c
new file mode 100644 (file)
index 0000000..2a98b7e
--- /dev/null
@@ -0,0 +1 @@
+#include "../../boot/video-mode.c"
diff --git a/arch/x86/realmode/rm/video-vesa.c b/arch/x86/realmode/rm/video-vesa.c
new file mode 100644 (file)
index 0000000..413eddd
--- /dev/null
@@ -0,0 +1 @@
+#include "../../boot/video-vesa.c"
diff --git a/arch/x86/realmode/rm/video-vga.c b/arch/x86/realmode/rm/video-vga.c
new file mode 100644 (file)
index 0000000..3085f5c
--- /dev/null
@@ -0,0 +1 @@
+#include "../../boot/video-vga.c"
diff --git a/arch/x86/realmode/rm/wakemain.c b/arch/x86/realmode/rm/wakemain.c
new file mode 100644 (file)
index 0000000..91405d5
--- /dev/null
@@ -0,0 +1,82 @@
+#include "wakeup.h"
+#include "boot.h"
+
+static void udelay(int loops)
+{
+       while (loops--)
+               io_delay();     /* Approximately 1 us */
+}
+
+static void beep(unsigned int hz)
+{
+       u8 enable;
+
+       if (!hz) {
+               enable = 0x00;          /* Turn off speaker */
+       } else {
+               u16 div = 1193181/hz;
+
+               outb(0xb6, 0x43);       /* Ctr 2, squarewave, load, binary */
+               io_delay();
+               outb(div, 0x42);        /* LSB of counter */
+               io_delay();
+               outb(div >> 8, 0x42);   /* MSB of counter */
+               io_delay();
+
+               enable = 0x03;          /* Turn on speaker */
+       }
+       inb(0x61);              /* Dummy read of System Control Port B */
+       io_delay();
+       outb(enable, 0x61);     /* Enable timer 2 output to speaker */
+       io_delay();
+}
+
+#define DOT_HZ         880
+#define DASH_HZ                587
+#define US_PER_DOT     125000
+
+/* Okay, this is totally silly, but it's kind of fun. */
+static void send_morse(const char *pattern)
+{
+       char s;
+
+       while ((s = *pattern++)) {
+               switch (s) {
+               case '.':
+                       beep(DOT_HZ);
+                       udelay(US_PER_DOT);
+                       beep(0);
+                       udelay(US_PER_DOT);
+                       break;
+               case '-':
+                       beep(DASH_HZ);
+                       udelay(US_PER_DOT * 3);
+                       beep(0);
+                       udelay(US_PER_DOT);
+                       break;
+               default:        /* Assume it's a space */
+                       udelay(US_PER_DOT * 3);
+                       break;
+               }
+       }
+}
+
+void main(void)
+{
+       /* Kill machine if structures are wrong */
+       if (wakeup_header.real_magic != 0x12345678)
+               while (1)
+                       ;
+
+       if (wakeup_header.realmode_flags & 4)
+               send_morse("...-");
+
+       if (wakeup_header.realmode_flags & 1)
+               asm volatile("lcallw   $0xc000,$3");
+
+       if (wakeup_header.realmode_flags & 2) {
+               /* Need to call BIOS */
+               probe_cards(0);
+               set_mode(wakeup_header.video_mode);
+       }
+}
diff --git a/arch/x86/realmode/rm/wakeup.h b/arch/x86/realmode/rm/wakeup.h
new file mode 100644 (file)
index 0000000..9317e00
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Definitions for the wakeup data structure at the head of the
+ * wakeup code.
+ */
+
+#ifndef ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H
+#define ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+/* This must match data at wakeup.S */
+struct wakeup_header {
+       u16 video_mode;         /* Video mode number */
+       u32 pmode_entry;        /* Protected mode resume point, 32-bit only */
+       u16 pmode_cs;
+       u32 pmode_cr0;          /* Protected mode cr0 */
+       u32 pmode_cr3;          /* Protected mode cr3 */
+       u32 pmode_cr4;          /* Protected mode cr4 */
+       u32 pmode_efer_low;     /* Protected mode EFER */
+       u32 pmode_efer_high;
+       u64 pmode_gdt;
+       u32 pmode_misc_en_low;  /* Protected mode MISC_ENABLE */
+       u32 pmode_misc_en_high;
+       u32 pmode_behavior;     /* Wakeup routine behavior flags */
+       u32 realmode_flags;
+       u32 real_magic;
+       u32 signature;          /* To check we have correct structure */
+} __attribute__((__packed__));
+
+extern struct wakeup_header wakeup_header;
+#endif
+
+#define WAKEUP_HEADER_OFFSET   8
+#define WAKEUP_HEADER_SIGNATURE 0x51ee1111
+
+/* Wakeup behavior bits */
+#define WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE     0
+
+#endif /* ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H */
diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
new file mode 100644 (file)
index 0000000..8905166
--- /dev/null
@@ -0,0 +1,177 @@
+/*
+ * ACPI wakeup real mode startup stub
+ */
+#include <linux/linkage.h>
+#include <asm/segment.h>
+#include <asm/msr-index.h>
+#include <asm/page_types.h>
+#include <asm/pgtable_types.h>
+#include <asm/processor-flags.h>
+#include "realmode.h"
+#include "wakeup.h"
+
+       .code16
+
+/* This should match the structure in wakeup.h */
+       .section ".data", "aw"
+
+       .balign 16
+GLOBAL(wakeup_header)
+       video_mode:     .short  0       /* Video mode number */
+       pmode_entry:    .long   0
+       pmode_cs:       .short  __KERNEL_CS
+       pmode_cr0:      .long   0       /* Saved %cr0 */
+       pmode_cr3:      .long   0       /* Saved %cr3 */
+       pmode_cr4:      .long   0       /* Saved %cr4 */
+       pmode_efer:     .quad   0       /* Saved EFER */
+       pmode_gdt:      .quad   0
+       pmode_misc_en:  .quad   0       /* Saved MISC_ENABLE MSR */
+       pmode_behavior: .long   0       /* Wakeup behavior flags */
+       realmode_flags: .long   0
+       real_magic:     .long   0
+       signature:      .long   WAKEUP_HEADER_SIGNATURE
+END(wakeup_header)
+
+       .text
+       .code16
+
+       .balign 16
+ENTRY(wakeup_start)
+       cli
+       cld
+
+       LJMPW_RM(3f)
+3:
+       /* Apparently some dimwit BIOS programmers don't know how to
+          program a PM to RM transition, and we might end up here with
+          junk in the data segment descriptor registers.  The only way
+          to repair that is to go into PM and fix it ourselves... */
+       movw    $16, %cx
+       lgdtl   %cs:wakeup_gdt
+       movl    %cr0, %eax
+       orb     $X86_CR0_PE, %al
+       movl    %eax, %cr0
+       ljmpw   $8, $2f
+2:
+       movw    %cx, %ds
+       movw    %cx, %es
+       movw    %cx, %ss
+       movw    %cx, %fs
+       movw    %cx, %gs
+
+       andb    $~X86_CR0_PE, %al
+       movl    %eax, %cr0
+       LJMPW_RM(3f)
+3:
+       /* Set up segments */
+       movw    %cs, %ax
+       movw    %ax, %ss
+       movl    $rm_stack_end, %esp
+       movw    %ax, %ds
+       movw    %ax, %es
+       movw    %ax, %fs
+       movw    %ax, %gs
+
+       lidtl   wakeup_idt
+
+       /* Clear the EFLAGS */
+       pushl   $0
+       popfl
+
+       /* Check header signature... */
+       movl    signature, %eax
+       cmpl    $WAKEUP_HEADER_SIGNATURE, %eax
+       jne     bogus_real_magic
+
+       /* Check we really have everything... */
+       movl    end_signature, %eax
+       cmpl    $REALMODE_END_SIGNATURE, %eax
+       jne     bogus_real_magic
+
+       /* Call the C code */
+       calll   main
+
+       /* Restore MISC_ENABLE before entering protected mode, in case
+          BIOS decided to clear XD_DISABLE during S3. */
+       movl    pmode_behavior, %eax
+       btl     $WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE, %eax
+       jnc     1f
+
+       movl    pmode_misc_en, %eax
+       movl    pmode_misc_en + 4, %edx
+       movl    $MSR_IA32_MISC_ENABLE, %ecx
+       wrmsr
+1:
+
+       /* Do any other stuff... */
+
+#ifndef CONFIG_64BIT
+       /* This could also be done in C code... */
+       movl    pmode_cr3, %eax
+       movl    %eax, %cr3
+
+       movl    pmode_cr4, %ecx
+       jecxz   1f
+       movl    %ecx, %cr4
+1:
+       movl    pmode_efer, %eax
+       movl    pmode_efer + 4, %edx
+       movl    %eax, %ecx
+       orl     %edx, %ecx
+       jz      1f
+       movl    $MSR_EFER, %ecx
+       wrmsr
+1:
+
+       lgdtl   pmode_gdt
+
+       /* This really couldn't... */
+       movl    pmode_entry, %eax
+       movl    pmode_cr0, %ecx
+       movl    %ecx, %cr0
+       ljmpl   $__KERNEL_CS, $pa_startup_32
+       /* -> jmp *%eax in trampoline_32.S */
+#else
+       jmp     trampoline_start
+#endif
+
+bogus_real_magic:
+1:
+       hlt
+       jmp     1b
+
+       .section ".rodata","a"
+
+       /*
+        * Set up the wakeup GDT.  We set these up as Big Real Mode,
+        * that is, with limits set to 4 GB.  At least the Lenovo
+        * Thinkpad X61 is known to need this for the video BIOS
+        * initialization quirk to work; this is likely to also
+        * be the case for other laptops or integrated video devices.
+        */
+
+       .balign 16
+GLOBAL(wakeup_gdt)
+       .word   3*8-1           /* Self-descriptor */
+       .long   pa_wakeup_gdt
+       .word   0
+
+       .word   0xffff          /* 16-bit code segment @ real_mode_base */
+       .long   0x9b000000 + pa_real_mode_base
+       .word   0x008f          /* big real mode */
+
+       .word   0xffff          /* 16-bit data segment @ real_mode_base */
+       .long   0x93000000 + pa_real_mode_base
+       .word   0x008f          /* big real mode */
+END(wakeup_gdt)
+
+       .section ".rodata","a"
+       .balign 8
+
+       /* This is the standard real-mode IDT */
+       .balign 16
+GLOBAL(wakeup_idt)
+       .word   0xffff          /* limit */
+       .long   0               /* address */
+       .word   0
+END(wakeup_idt)
diff --git a/arch/x86/realmode/rmpiggy.S b/arch/x86/realmode/rmpiggy.S
new file mode 100644 (file)
index 0000000..204c6ec
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * Wrapper script for the realmode binary as a transport object
+ * before copying to low memory.
+ */
+#include <linux/linkage.h>
+#include <asm/page_types.h>
+
+       .section ".init.data","aw"
+
+       .balign PAGE_SIZE
+
+GLOBAL(real_mode_blob)
+       .incbin "arch/x86/realmode/rm/realmode.bin"
+END(real_mode_blob)
+
+GLOBAL(real_mode_blob_end);
+
+GLOBAL(real_mode_relocs)
+       .incbin "arch/x86/realmode/rm/realmode.relocs"
+END(real_mode_relocs)
index 29f9f0554f7de0244e7120ea69fec26640bf7dce..7a35a6e71d44332d351cdeb9ec28e96c6467c7b6 100644 (file)
 346    i386    setns                   sys_setns
 347    i386    process_vm_readv        sys_process_vm_readv            compat_sys_process_vm_readv
 348    i386    process_vm_writev       sys_process_vm_writev           compat_sys_process_vm_writev
+349    i386    kcmp                    sys_kcmp
index dd29a9ea27c560a9d2fcb6e1c2983f8b8e9be407..51171aeff0dc31483cdc6526e641459b0d64deb3 100644 (file)
 309    common  getcpu                  sys_getcpu
 310    64      process_vm_readv        sys_process_vm_readv
 311    64      process_vm_writev       sys_process_vm_writev
+312    64      kcmp                    sys_kcmp
+
 #
 # x32-specific system call numbers start at 512 to avoid cache impact
 # for native 64-bit operation.
index b685296d44641b091f5d9c565e7e596914e97d39..5a1847d619306e5f0ed5ed5570c53c69a8ce2315 100644 (file)
@@ -77,6 +77,13 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
 
 
 static const char * const sym_regex_realmode[S_NSYMTYPES] = {
+/*
+ * These symbols are known to be relative, even if the linker marks them
+ * as absolute (typically defined outside any section in the linker script.)
+ */
+       [S_REL] =
+       "^pa_",
+
 /*
  * These are 16-bit segment symbols when compiling 16-bit code.
  */
index bb0fb03b9f85f268912e7b6851f74787c8ee8e4a..a508cea135033eba3c1c7facba8aebd04a1a0983 100644 (file)
@@ -486,7 +486,6 @@ long sys_sigreturn(struct pt_regs *regs)
            copy_from_user(&set.sig[1], extramask, sig_size))
                goto segfault;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (copy_sc_from_user(&current->thread.regs, sc))
@@ -600,7 +599,6 @@ long sys_rt_sigreturn(struct pt_regs *regs)
        if (copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
                goto segfault;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (copy_sc_from_user(&current->thread.regs, &uc->uc_mcontext))
index ef1db1900d86366caa78c84652decb5841f5efbd..c8377fb26cdfc488fa7ea248de8369e3e0464373 100644 (file)
@@ -19,107 +19,3 @@ struct dentry * __init xen_init_debugfs(void)
        return d_xen_debug;
 }
 
-struct array_data
-{
-       void *array;
-       unsigned elements;
-};
-
-static int u32_array_open(struct inode *inode, struct file *file)
-{
-       file->private_data = NULL;
-       return nonseekable_open(inode, file);
-}
-
-static size_t format_array(char *buf, size_t bufsize, const char *fmt,
-                          u32 *array, unsigned array_size)
-{
-       size_t ret = 0;
-       unsigned i;
-
-       for(i = 0; i < array_size; i++) {
-               size_t len;
-
-               len = snprintf(buf, bufsize, fmt, array[i]);
-               len++;  /* ' ' or '\n' */
-               ret += len;
-
-               if (buf) {
-                       buf += len;
-                       bufsize -= len;
-                       buf[-1] = (i == array_size-1) ? '\n' : ' ';
-               }
-       }
-
-       ret++;          /* \0 */
-       if (buf)
-               *buf = '\0';
-
-       return ret;
-}
-
-static char *format_array_alloc(const char *fmt, u32 *array, unsigned array_size)
-{
-       size_t len = format_array(NULL, 0, fmt, array, array_size);
-       char *ret;
-
-       ret = kmalloc(len, GFP_KERNEL);
-       if (ret == NULL)
-               return NULL;
-
-       format_array(ret, len, fmt, array, array_size);
-       return ret;
-}
-
-static ssize_t u32_array_read(struct file *file, char __user *buf, size_t len,
-                             loff_t *ppos)
-{
-       struct inode *inode = file->f_path.dentry->d_inode;
-       struct array_data *data = inode->i_private;
-       size_t size;
-
-       if (*ppos == 0) {
-               if (file->private_data) {
-                       kfree(file->private_data);
-                       file->private_data = NULL;
-               }
-
-               file->private_data = format_array_alloc("%u", data->array, data->elements);
-       }
-
-       size = 0;
-       if (file->private_data)
-               size = strlen(file->private_data);
-
-       return simple_read_from_buffer(buf, len, ppos, file->private_data, size);
-}
-
-static int xen_array_release(struct inode *inode, struct file *file)
-{
-       kfree(file->private_data);
-
-       return 0;
-}
-
-static const struct file_operations u32_array_fops = {
-       .owner  = THIS_MODULE,
-       .open   = u32_array_open,
-       .release= xen_array_release,
-       .read   = u32_array_read,
-       .llseek = no_llseek,
-};
-
-struct dentry *xen_debugfs_create_u32_array(const char *name, umode_t mode,
-                                           struct dentry *parent,
-                                           u32 *array, unsigned elements)
-{
-       struct array_data *data = kmalloc(sizeof(*data), GFP_KERNEL);
-
-       if (data == NULL)
-               return NULL;
-
-       data->array = array;
-       data->elements = elements;
-
-       return debugfs_create_file(name, mode, parent, data, &u32_array_fops);
-}
index 78d25499be5ba1662b7c72e1041834863140225f..12ebf3325c7bbd1c324a845792bf02a2806ebb56 100644 (file)
@@ -3,8 +3,4 @@
 
 struct dentry * __init xen_init_debugfs(void);
 
-struct dentry *xen_debugfs_create_u32_array(const char *name, umode_t mode,
-                                           struct dentry *parent,
-                                           u32 *array, unsigned elements);
-
 #endif /* _XEN_DEBUGFS_H */
index c0f5facdb10cd83764b22a52537278a9eb7bfd62..e74df9548a025c2d66b3052f8ac0002695e59498 100644 (file)
@@ -42,6 +42,7 @@
 #include <xen/page.h>
 #include <xen/hvm.h>
 #include <xen/hvc-console.h>
+#include <xen/acpi.h>
 
 #include <asm/paravirt.h>
 #include <asm/apic.h>
@@ -75,6 +76,7 @@
 
 #include "xen-ops.h"
 #include "mmu.h"
+#include "smp.h"
 #include "multicalls.h"
 
 EXPORT_SYMBOL_GPL(hypercall_page);
@@ -883,6 +885,14 @@ static void set_xen_basic_apic_ops(void)
        apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
        apic->set_apic_id = xen_set_apic_id;
        apic->get_apic_id = xen_get_apic_id;
+
+#ifdef CONFIG_SMP
+       apic->send_IPI_allbutself = xen_send_IPI_allbutself;
+       apic->send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself;
+       apic->send_IPI_mask = xen_send_IPI_mask;
+       apic->send_IPI_all = xen_send_IPI_all;
+       apic->send_IPI_self = xen_send_IPI_self;
+#endif
 }
 
 #endif
@@ -1106,7 +1116,10 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
        .wbinvd = native_wbinvd,
 
        .read_msr = native_read_msr_safe,
+       .rdmsr_regs = native_rdmsr_safe_regs,
        .write_msr = xen_write_msr_safe,
+       .wrmsr_regs = native_wrmsr_safe_regs,
+
        .read_tsc = native_read_tsc,
        .read_pmc = native_read_pmc,
 
@@ -1340,7 +1353,6 @@ asmlinkage void __init xen_start_kernel(void)
 
        xen_raw_console_write("mapping kernel into physical memory\n");
        pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
-       xen_ident_map_ISA();
 
        /* Allocate and initialize top and mid mfn levels for p2m structure */
        xen_build_mfn_list_list();
@@ -1400,6 +1412,8 @@ asmlinkage void __init xen_start_kernel(void)
 
                /* Make sure ACS will be enabled */
                pci_request_acs();
+
+               xen_acpi_sleep_register();
        }
 #ifdef CONFIG_PCI
        /* PCI BIOS service won't work from a PV guest. */
index 3506cd4f9a4368243fb89f4b88255276dc7175e1..3a73785631ce5f7ae8dc0dad1119ebf922e233e0 100644 (file)
@@ -1933,29 +1933,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
 #endif
 }
 
-void __init xen_ident_map_ISA(void)
-{
-       unsigned long pa;
-
-       /*
-        * If we're dom0, then linear map the ISA machine addresses into
-        * the kernel's address space.
-        */
-       if (!xen_initial_domain())
-               return;
-
-       xen_raw_printk("Xen: setup ISA identity maps\n");
-
-       for (pa = ISA_START_ADDRESS; pa < ISA_END_ADDRESS; pa += PAGE_SIZE) {
-               pte_t pte = mfn_pte(PFN_DOWN(pa), PAGE_KERNEL_IO);
-
-               if (HYPERVISOR_update_va_mapping(PAGE_OFFSET + pa, pte, 0))
-                       BUG();
-       }
-
-       xen_flush_tlb();
-}
-
 static void __init xen_post_allocator_init(void)
 {
        pv_mmu_ops.set_pte = xen_set_pte;
index 1b267e75158d8d29d888788d0ac12da38a719a34..ffd08c414e91a7cc9f4a507b4ca3bbad9b823523 100644 (file)
@@ -499,16 +499,18 @@ static bool alloc_p2m(unsigned long pfn)
        return true;
 }
 
-static bool __init __early_alloc_p2m(unsigned long pfn)
+static bool __init early_alloc_p2m_middle(unsigned long pfn, bool check_boundary)
 {
        unsigned topidx, mididx, idx;
+       unsigned long *p2m;
+       unsigned long *mid_mfn_p;
 
        topidx = p2m_top_index(pfn);
        mididx = p2m_mid_index(pfn);
        idx = p2m_index(pfn);
 
        /* Pfff.. No boundary cross-over, lets get out. */
-       if (!idx)
+       if (!idx && check_boundary)
                return false;
 
        WARN(p2m_top[topidx][mididx] == p2m_identity,
@@ -522,24 +524,66 @@ static bool __init __early_alloc_p2m(unsigned long pfn)
                return false;
 
        /* Boundary cross-over for the edges: */
-       if (idx) {
-               unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
-               unsigned long *mid_mfn_p;
+       p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
 
-               p2m_init(p2m);
+       p2m_init(p2m);
 
-               p2m_top[topidx][mididx] = p2m;
+       p2m_top[topidx][mididx] = p2m;
 
-               /* For save/restore we need to MFN of the P2M saved */
-               
-               mid_mfn_p = p2m_top_mfn_p[topidx];
-               WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing),
-                       "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n",
-                       topidx, mididx);
-               mid_mfn_p[mididx] = virt_to_mfn(p2m);
+       /* For save/restore we need to MFN of the P2M saved */
+
+       mid_mfn_p = p2m_top_mfn_p[topidx];
+       WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing),
+               "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n",
+               topidx, mididx);
+       mid_mfn_p[mididx] = virt_to_mfn(p2m);
+
+       return true;
+}
+
+static bool __init early_alloc_p2m(unsigned long pfn)
+{
+       unsigned topidx = p2m_top_index(pfn);
+       unsigned long *mid_mfn_p;
+       unsigned long **mid;
+
+       mid = p2m_top[topidx];
+       mid_mfn_p = p2m_top_mfn_p[topidx];
+       if (mid == p2m_mid_missing) {
+               mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
+
+               p2m_mid_init(mid);
+
+               p2m_top[topidx] = mid;
 
+               BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
        }
-       return idx != 0;
+       /* And the save/restore P2M tables.. */
+       if (mid_mfn_p == p2m_mid_missing_mfn) {
+               mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
+               p2m_mid_mfn_init(mid_mfn_p);
+
+               p2m_top_mfn_p[topidx] = mid_mfn_p;
+               p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
+               /* Note: we don't set mid_mfn_p[midix] here,
+                * look in early_alloc_p2m_middle */
+       }
+       return true;
+}
+bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+{
+       if (unlikely(!__set_phys_to_machine(pfn, mfn)))  {
+               if (!early_alloc_p2m(pfn))
+                       return false;
+
+               if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/))
+                       return false;
+
+               if (!__set_phys_to_machine(pfn, mfn))
+                       return false;
+       }
+
+       return true;
 }
 unsigned long __init set_phys_range_identity(unsigned long pfn_s,
                                      unsigned long pfn_e)
@@ -559,35 +603,11 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s,
                pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE));
                pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE)
        {
-               unsigned topidx = p2m_top_index(pfn);
-               unsigned long *mid_mfn_p;
-               unsigned long **mid;
-
-               mid = p2m_top[topidx];
-               mid_mfn_p = p2m_top_mfn_p[topidx];
-               if (mid == p2m_mid_missing) {
-                       mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
-
-                       p2m_mid_init(mid);
-
-                       p2m_top[topidx] = mid;
-
-                       BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
-               }
-               /* And the save/restore P2M tables.. */
-               if (mid_mfn_p == p2m_mid_missing_mfn) {
-                       mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
-                       p2m_mid_mfn_init(mid_mfn_p);
-
-                       p2m_top_mfn_p[topidx] = mid_mfn_p;
-                       p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
-                       /* Note: we don't set mid_mfn_p[midix] here,
-                        * look in __early_alloc_p2m */
-               }
+               WARN_ON(!early_alloc_p2m(pfn));
        }
 
-       __early_alloc_p2m(pfn_s);
-       __early_alloc_p2m(pfn_e);
+       early_alloc_p2m_middle(pfn_s, true);
+       early_alloc_p2m_middle(pfn_e, true);
 
        for (pfn = pfn_s; pfn < pfn_e; pfn++)
                if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn)))
index 1ba8dff26753fb99460a3c33b17feab0ce11ed81..3ebba0753d3876b887875aaf53fdd461f544441c 100644 (file)
@@ -26,7 +26,6 @@
 #include <xen/interface/memory.h>
 #include <xen/interface/physdev.h>
 #include <xen/features.h>
-
 #include "xen-ops.h"
 #include "vdso.h"
 
@@ -84,8 +83,8 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
                __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
 }
 
-static unsigned long __init xen_release_chunk(unsigned long start,
-                                             unsigned long end)
+static unsigned long __init xen_do_chunk(unsigned long start,
+                                        unsigned long end, bool release)
 {
        struct xen_memory_reservation reservation = {
                .address_bits = 0,
@@ -96,30 +95,138 @@ static unsigned long __init xen_release_chunk(unsigned long start,
        unsigned long pfn;
        int ret;
 
-       for(pfn = start; pfn < end; pfn++) {
+       for (pfn = start; pfn < end; pfn++) {
+               unsigned long frame;
                unsigned long mfn = pfn_to_mfn(pfn);
 
-               /* Make sure pfn exists to start with */
-               if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
-                       continue;
-
-               set_xen_guest_handle(reservation.extent_start, &mfn);
+               if (release) {
+                       /* Make sure pfn exists to start with */
+                       if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
+                               continue;
+                       frame = mfn;
+               } else {
+                       if (mfn != INVALID_P2M_ENTRY)
+                               continue;
+                       frame = pfn;
+               }
+               set_xen_guest_handle(reservation.extent_start, &frame);
                reservation.nr_extents = 1;
 
-               ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
+               ret = HYPERVISOR_memory_op(release ? XENMEM_decrease_reservation : XENMEM_populate_physmap,
                                           &reservation);
-               WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
+               WARN(ret != 1, "Failed to %s pfn %lx err=%d\n",
+                    release ? "release" : "populate", pfn, ret);
+
                if (ret == 1) {
-                       __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+                       if (!early_set_phys_to_machine(pfn, release ? INVALID_P2M_ENTRY : frame)) {
+                               if (release)
+                                       break;
+                               set_xen_guest_handle(reservation.extent_start, &frame);
+                               reservation.nr_extents = 1;
+                               ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
+                                                          &reservation);
+                               break;
+                       }
                        len++;
-               }
+               } else
+                       break;
        }
-       printk(KERN_INFO "Freeing  %lx-%lx pfn range: %lu pages freed\n",
-              start, end, len);
+       if (len)
+               printk(KERN_INFO "%s %lx-%lx pfn range: %lu pages %s\n",
+                      release ? "Freeing" : "Populating",
+                      start, end, len,
+                      release ? "freed" : "added");
 
        return len;
 }
 
+static unsigned long __init xen_release_chunk(unsigned long start,
+                                             unsigned long end)
+{
+       return xen_do_chunk(start, end, true);
+}
+
+static unsigned long __init xen_populate_chunk(
+       const struct e820entry *list, size_t map_size,
+       unsigned long max_pfn, unsigned long *last_pfn,
+       unsigned long credits_left)
+{
+       const struct e820entry *entry;
+       unsigned int i;
+       unsigned long done = 0;
+       unsigned long dest_pfn;
+
+       for (i = 0, entry = list; i < map_size; i++, entry++) {
+               unsigned long credits = credits_left;
+               unsigned long s_pfn;
+               unsigned long e_pfn;
+               unsigned long pfns;
+               long capacity;
+
+               if (credits <= 0)
+                       break;
+
+               if (entry->type != E820_RAM)
+                       continue;
+
+               e_pfn = PFN_UP(entry->addr + entry->size);
+
+               /* We only care about E820 after the xen_start_info->nr_pages */
+               if (e_pfn <= max_pfn)
+                       continue;
+
+               s_pfn = PFN_DOWN(entry->addr);
+               /* If the E820 falls within the nr_pages, we want to start
+                * at the nr_pages PFN.
+                * If that would mean going past the E820 entry, skip it
+                */
+               if (s_pfn <= max_pfn) {
+                       capacity = e_pfn - max_pfn;
+                       dest_pfn = max_pfn;
+               } else {
+                       /* last_pfn MUST be within E820_RAM regions */
+                       if (*last_pfn && e_pfn >= *last_pfn)
+                               s_pfn = *last_pfn;
+                       capacity = e_pfn - s_pfn;
+                       dest_pfn = s_pfn;
+               }
+               /* If we had filled this E820_RAM entry, go to the next one. */
+               if (capacity <= 0)
+                       continue;
+
+               if (credits > capacity)
+                       credits = capacity;
+
+               pfns = xen_do_chunk(dest_pfn, dest_pfn + credits, false);
+               done += pfns;
+               credits_left -= pfns;
+               *last_pfn = (dest_pfn + pfns);
+       }
+       return done;
+}
+
+static void __init xen_set_identity_and_release_chunk(
+       unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
+       unsigned long *released, unsigned long *identity)
+{
+       unsigned long pfn;
+
+       /*
+        * If the PFNs are currently mapped, the VA mapping also needs
+        * to be updated to be 1:1.
+        */
+       for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
+               (void)HYPERVISOR_update_va_mapping(
+                       (unsigned long)__va(pfn << PAGE_SHIFT),
+                       mfn_pte(pfn, PAGE_KERNEL_IO), 0);
+
+       if (start_pfn < nr_pages)
+               *released += xen_release_chunk(
+                       start_pfn, min(end_pfn, nr_pages));
+
+       *identity += set_phys_range_identity(start_pfn, end_pfn);
+}
+
 static unsigned long __init xen_set_identity_and_release(
        const struct e820entry *list, size_t map_size, unsigned long nr_pages)
 {
@@ -142,7 +249,6 @@ static unsigned long __init xen_set_identity_and_release(
         */
        for (i = 0, entry = list; i < map_size; i++, entry++) {
                phys_addr_t end = entry->addr + entry->size;
-
                if (entry->type == E820_RAM || i == map_size - 1) {
                        unsigned long start_pfn = PFN_DOWN(start);
                        unsigned long end_pfn = PFN_UP(end);
@@ -150,20 +256,19 @@ static unsigned long __init xen_set_identity_and_release(
                        if (entry->type == E820_RAM)
                                end_pfn = PFN_UP(entry->addr);
 
-                       if (start_pfn < end_pfn) {
-                               if (start_pfn < nr_pages)
-                                       released += xen_release_chunk(
-                                               start_pfn, min(end_pfn, nr_pages));
+                       if (start_pfn < end_pfn)
+                               xen_set_identity_and_release_chunk(
+                                       start_pfn, end_pfn, nr_pages,
+                                       &released, &identity);
 
-                               identity += set_phys_range_identity(
-                                       start_pfn, end_pfn);
-                       }
                        start = end;
                }
        }
 
-       printk(KERN_INFO "Released %lu pages of unused memory\n", released);
-       printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
+       if (released)
+               printk(KERN_INFO "Released %lu pages of unused memory\n", released);
+       if (identity)
+               printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
 
        return released;
 }
@@ -217,7 +322,9 @@ char * __init xen_memory_setup(void)
        int rc;
        struct xen_memory_map memmap;
        unsigned long max_pages;
+       unsigned long last_pfn = 0;
        unsigned long extra_pages = 0;
+       unsigned long populated;
        int i;
        int op;
 
@@ -257,8 +364,19 @@ char * __init xen_memory_setup(void)
         */
        xen_released_pages = xen_set_identity_and_release(
                map, memmap.nr_entries, max_pfn);
-       extra_pages += xen_released_pages;
 
+       /*
+        * Populate back the non-RAM pages and E820 gaps that had been
+        * released. */
+       populated = xen_populate_chunk(map, memmap.nr_entries,
+                       max_pfn, &last_pfn, xen_released_pages);
+
+       extra_pages += (xen_released_pages - populated);
+
+       if (last_pfn > max_pfn) {
+               max_pfn = min(MAX_DOMAIN_PAGES, last_pfn);
+               mem_end = PFN_PHYS(max_pfn);
+       }
        /*
         * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
         * factor the base size.  On non-highmem systems, the base
@@ -272,7 +390,6 @@ char * __init xen_memory_setup(void)
         */
        extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
                          extra_pages);
-
        i = 0;
        while (i < memmap.nr_entries) {
                u64 addr = map[i].addr;
index 3700945ed0d52346839eefc2a0f1bb765c7c5558..afb250d22a6b2e29cf96f54e48bc6f619dab2ca7 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/smp.h>
+#include <linux/irq_work.h>
 
 #include <asm/paravirt.h>
 #include <asm/desc.h>
@@ -41,10 +42,12 @@ cpumask_var_t xen_cpu_initialized_map;
 static DEFINE_PER_CPU(int, xen_resched_irq);
 static DEFINE_PER_CPU(int, xen_callfunc_irq);
 static DEFINE_PER_CPU(int, xen_callfuncsingle_irq);
+static DEFINE_PER_CPU(int, xen_irq_work);
 static DEFINE_PER_CPU(int, xen_debug_irq) = -1;
 
 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
+static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
 
 /*
  * Reschedule call back.
@@ -143,6 +146,17 @@ static int xen_smp_intr_init(unsigned int cpu)
                goto fail;
        per_cpu(xen_callfuncsingle_irq, cpu) = rc;
 
+       callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
+       rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
+                                   cpu,
+                                   xen_irq_work_interrupt,
+                                   IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
+                                   callfunc_name,
+                                   NULL);
+       if (rc < 0)
+               goto fail;
+       per_cpu(xen_irq_work, cpu) = rc;
+
        return 0;
 
  fail:
@@ -155,6 +169,8 @@ static int xen_smp_intr_init(unsigned int cpu)
        if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
                unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
                                       NULL);
+       if (per_cpu(xen_irq_work, cpu) >= 0)
+               unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
 
        return rc;
 }
@@ -407,6 +423,7 @@ static void xen_cpu_die(unsigned int cpu)
        unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
        unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
        unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
+       unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
        xen_uninit_lock_cpu(cpu);
        xen_teardown_timer(cpu);
 
@@ -469,8 +486,8 @@ static void xen_smp_send_reschedule(int cpu)
        xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
 }
 
-static void xen_send_IPI_mask(const struct cpumask *mask,
-                             enum ipi_vector vector)
+static void __xen_send_IPI_mask(const struct cpumask *mask,
+                             int vector)
 {
        unsigned cpu;
 
@@ -482,7 +499,7 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
 {
        int cpu;
 
-       xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
+       __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
 
        /* Make sure other vcpus get a chance to run if they need to. */
        for_each_cpu(cpu, mask) {
@@ -495,10 +512,86 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
 
 static void xen_smp_send_call_function_single_ipi(int cpu)
 {
-       xen_send_IPI_mask(cpumask_of(cpu),
+       __xen_send_IPI_mask(cpumask_of(cpu),
                          XEN_CALL_FUNCTION_SINGLE_VECTOR);
 }
 
+static inline int xen_map_vector(int vector)
+{
+       int xen_vector;
+
+       switch (vector) {
+       case RESCHEDULE_VECTOR:
+               xen_vector = XEN_RESCHEDULE_VECTOR;
+               break;
+       case CALL_FUNCTION_VECTOR:
+               xen_vector = XEN_CALL_FUNCTION_VECTOR;
+               break;
+       case CALL_FUNCTION_SINGLE_VECTOR:
+               xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
+               break;
+       case IRQ_WORK_VECTOR:
+               xen_vector = XEN_IRQ_WORK_VECTOR;
+               break;
+       default:
+               xen_vector = -1;
+               printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
+                       vector);
+       }
+
+       return xen_vector;
+}
+
+void xen_send_IPI_mask(const struct cpumask *mask,
+                             int vector)
+{
+       int xen_vector = xen_map_vector(vector);
+
+       if (xen_vector >= 0)
+               __xen_send_IPI_mask(mask, xen_vector);
+}
+
+void xen_send_IPI_all(int vector)
+{
+       int xen_vector = xen_map_vector(vector);
+
+       if (xen_vector >= 0)
+               __xen_send_IPI_mask(cpu_online_mask, xen_vector);
+}
+
+void xen_send_IPI_self(int vector)
+{
+       int xen_vector = xen_map_vector(vector);
+
+       if (xen_vector >= 0)
+               xen_send_IPI_one(smp_processor_id(), xen_vector);
+}
+
+void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
+                               int vector)
+{
+       unsigned cpu;
+       unsigned int this_cpu = smp_processor_id();
+
+       if (!(num_online_cpus() > 1))
+               return;
+
+       for_each_cpu_and(cpu, mask, cpu_online_mask) {
+               if (this_cpu == cpu)
+                       continue;
+
+               xen_smp_send_call_function_single_ipi(cpu);
+       }
+}
+
+void xen_send_IPI_allbutself(int vector)
+{
+       int xen_vector = xen_map_vector(vector);
+
+       if (xen_vector >= 0)
+               xen_send_IPI_mask_allbutself(cpu_online_mask, xen_vector);
+}
+
 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
 {
        irq_enter();
@@ -519,6 +612,16 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
+{
+       irq_enter();
+       irq_work_run();
+       inc_irq_stat(apic_irq_work_irqs);
+       irq_exit();
+
+       return IRQ_HANDLED;
+}
+
 static const struct smp_ops xen_smp_ops __initconst = {
        .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
        .smp_prepare_cpus = xen_smp_prepare_cpus,
@@ -565,6 +668,7 @@ static void xen_hvm_cpu_die(unsigned int cpu)
        unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
        unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
        unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
+       unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
        native_cpu_die(cpu);
 }
 
diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h
new file mode 100644 (file)
index 0000000..8981a76
--- /dev/null
@@ -0,0 +1,12 @@
+#ifndef _XEN_SMP_H
+
+extern void xen_send_IPI_mask(const struct cpumask *mask,
+                             int vector);
+extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
+                               int vector);
+extern void xen_send_IPI_allbutself(int vector);
+extern void physflat_send_IPI_allbutself(int vector);
+extern void xen_send_IPI_all(int vector);
+extern void xen_send_IPI_self(int vector);
+
+#endif
index d69cc6c3f8080aab34f14e0e0b42b1f6ec8e73f1..83e866d714ce24c8f18868180220c39e838505c6 100644 (file)
@@ -440,12 +440,12 @@ static int __init xen_spinlock_debugfs(void)
        debugfs_create_u64("time_total", 0444, d_spin_debug,
                           &spinlock_stats.time_total);
 
-       xen_debugfs_create_u32_array("histo_total", 0444, d_spin_debug,
-                                    spinlock_stats.histo_spin_total, HISTO_BUCKETS + 1);
-       xen_debugfs_create_u32_array("histo_spinning", 0444, d_spin_debug,
-                                    spinlock_stats.histo_spin_spinning, HISTO_BUCKETS + 1);
-       xen_debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
-                                    spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
+       debugfs_create_u32_array("histo_total", 0444, d_spin_debug,
+                               spinlock_stats.histo_spin_total, HISTO_BUCKETS + 1);
+       debugfs_create_u32_array("histo_spinning", 0444, d_spin_debug,
+                               spinlock_stats.histo_spin_spinning, HISTO_BUCKETS + 1);
+       debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
+                               spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
 
        return 0;
 }
index 45c0c0667bd966b28dfc36223f5d1912ab169c94..202d4c150154fb31ddb03da8f8144a45f21c02a7 100644 (file)
@@ -28,7 +28,6 @@ void xen_setup_shared_info(void);
 void xen_build_mfn_list_list(void);
 void xen_setup_machphys_mapping(void);
 pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
-void xen_ident_map_ISA(void);
 void xen_reserve_top(void);
 extern unsigned long xen_max_p2m_pfn;
 
diff --git a/arch/xtensa/include/asm/kvm_para.h b/arch/xtensa/include/asm/kvm_para.h
new file mode 100644 (file)
index 0000000..14fab8f
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
index c5e4ec0598d24b8ce1406eb73ce83cb580da014b..b9f8e5850d3a4e1c9bc1dbe3ff6ee0b7dbc47cc9 100644 (file)
@@ -30,8 +30,6 @@
 
 #define DEBUG_SIG  0
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
 extern struct task_struct *coproc_owners[];
 
 struct rt_sigframe
@@ -261,7 +259,6 @@ asmlinkage long xtensa_rt_sigreturn(long a0, long a1, long a2, long a3,
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
 
-       sigdelsetmask(&set, ~_BLOCKABLE);
        set_current_blocked(&set);
 
        if (restore_sigcontext(regs, frame))
@@ -452,15 +449,6 @@ static void do_signal(struct pt_regs *regs)
        siginfo_t info;
        int signr;
        struct k_sigaction ka;
-       sigset_t oldset;
-
-       if (try_to_freeze())
-               goto no_signal;
-
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-               oldset = &current->saved_sigmask;
-       else
-               oldset = &current->blocked;
 
        task_pt_regs(current)->icountlevel = 0;
 
@@ -501,19 +489,17 @@ static void do_signal(struct pt_regs *regs)
 
                /* Whee!  Actually deliver the signal.  */
                /* Set up the stack frame */
-               ret = setup_frame(signr, &ka, &info, oldset, regs);
+               ret = setup_frame(signr, &ka, &info, sigmask_to_save(), regs);
                if (ret)
                        return;
 
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               block_sigmask(&ka, signr);
+               signal_delivered(signr, info, ka, regs, 0);
                if (current->ptrace & PT_SINGLESTEP)
                        task_pt_regs(current)->icountlevel = 1;
 
                return;
        }
 
-no_signal:
        /* Did we come from a system call? */
        if ((signed) regs->syscall >= 0) {
                /* Restart the system call - no handlers present */
@@ -532,8 +518,7 @@ no_signal:
        }
 
        /* If there's no signal to deliver, we just restore the saved mask.  */
-       if (test_and_clear_thread_flag(TIF_RESTORE_SIGMASK))
-               set_current_blocked(&current->saved_sigmask);
+       restore_saved_sigmask();
 
        if (current->ptrace & PT_SINGLESTEP)
                task_pt_regs(current)->icountlevel = 1;
@@ -548,9 +533,6 @@ void do_notify_resume(struct pt_regs *regs)
        if (test_thread_flag(TIF_SIGPENDING))
                do_signal(regs);
 
-       if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) {
+       if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
                tracehook_notify_resume(regs);
-               if (current->replacement_session_keyring)
-                       key_replace_session_keyring();
-       }
 }
index 3199b76f795de33f9db31e140aad75a716a51d44..421bef9c4c48d26c035792716064e9f273253c6e 100644 (file)
@@ -23,8 +23,6 @@ config IOSCHED_DEADLINE
 
 config IOSCHED_CFQ
        tristate "CFQ I/O scheduler"
-       # If BLK_CGROUP is a module, CFQ has to be built as module.
-       depends on (BLK_CGROUP=m && m) || !BLK_CGROUP || BLK_CGROUP=y
        default y
        ---help---
          The CFQ I/O scheduler tries to distribute bandwidth equally
@@ -34,8 +32,6 @@ config IOSCHED_CFQ
 
          This is the default I/O scheduler.
 
-         Note: If BLK_CGROUP=m, then CFQ can be built only as module.
-
 config CFQ_GROUP_IOSCHED
        bool "CFQ Group Scheduling support"
        depends on IOSCHED_CFQ && BLK_CGROUP
index 126c341955de162cba789040d0908b1d147c943c..02cf6335e9bdc5bb940ec89fcdbfe63746f9b5df 100644 (file)
  *                   Nauman Rafique <nauman@google.com>
  */
 #include <linux/ioprio.h>
-#include <linux/seq_file.h>
 #include <linux/kdev_t.h>
 #include <linux/module.h>
 #include <linux/err.h>
 #include <linux/blkdev.h>
 #include <linux/slab.h>
-#include "blk-cgroup.h"
 #include <linux/genhd.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include "blk-cgroup.h"
+#include "blk.h"
 
 #define MAX_KEY_LEN 100
 
-static DEFINE_SPINLOCK(blkio_list_lock);
-static LIST_HEAD(blkio_list);
+static DEFINE_MUTEX(blkcg_pol_mutex);
 
-struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
-EXPORT_SYMBOL_GPL(blkio_root_cgroup);
+struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT };
+EXPORT_SYMBOL_GPL(blkcg_root);
 
-/* for encoding cft->private value on file */
-#define BLKIOFILE_PRIVATE(x, val)      (((x) << 16) | (val))
-/* What policy owns the file, proportional or throttle */
-#define BLKIOFILE_POLICY(val)          (((val) >> 16) & 0xffff)
-#define BLKIOFILE_ATTR(val)            ((val) & 0xffff)
+static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
 
-static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
-                                           struct blkio_policy_node *pn)
+struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup)
 {
-       list_add(&pn->node, &blkcg->policy_list);
+       return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
+                           struct blkcg, css);
 }
+EXPORT_SYMBOL_GPL(cgroup_to_blkcg);
 
-static inline bool cftype_blkg_same_policy(struct cftype *cft,
-                       struct blkio_group *blkg)
+static struct blkcg *task_blkcg(struct task_struct *tsk)
 {
-       enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
-
-       if (blkg->plid == plid)
-               return 1;
-
-       return 0;
+       return container_of(task_subsys_state(tsk, blkio_subsys_id),
+                           struct blkcg, css);
 }
 
-/* Determines if policy node matches cgroup file being accessed */
-static inline bool pn_matches_cftype(struct cftype *cft,
-                       struct blkio_policy_node *pn)
+struct blkcg *bio_blkcg(struct bio *bio)
 {
-       enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
-       int fileid = BLKIOFILE_ATTR(cft->private);
-
-       return (plid == pn->plid && fileid == pn->fileid);
+       if (bio && bio->bi_css)
+               return container_of(bio->bi_css, struct blkcg, css);
+       return task_blkcg(current);
 }
+EXPORT_SYMBOL_GPL(bio_blkcg);
 
-/* Must be called with blkcg->lock held */
-static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
+static bool blkcg_policy_enabled(struct request_queue *q,
+                                const struct blkcg_policy *pol)
 {
-       list_del(&pn->node);
+       return pol && test_bit(pol->plid, q->blkcg_pols);
 }
 
-/* Must be called with blkcg->lock held */
-static struct blkio_policy_node *
-blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
-               enum blkio_policy_id plid, int fileid)
+/**
+ * blkg_free - free a blkg
+ * @blkg: blkg to free
+ *
+ * Free @blkg which may be partially allocated.
+ */
+static void blkg_free(struct blkcg_gq *blkg)
 {
-       struct blkio_policy_node *pn;
-
-       list_for_each_entry(pn, &blkcg->policy_list, node) {
-               if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
-                       return pn;
-       }
+       int i;
 
-       return NULL;
-}
+       if (!blkg)
+               return;
 
-struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
-{
-       return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
-                           struct blkio_cgroup, css);
-}
-EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
+       for (i = 0; i < BLKCG_MAX_POLS; i++) {
+               struct blkcg_policy *pol = blkcg_policy[i];
+               struct blkg_policy_data *pd = blkg->pd[i];
 
-struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
-{
-       return container_of(task_subsys_state(tsk, blkio_subsys_id),
-                           struct blkio_cgroup, css);
-}
-EXPORT_SYMBOL_GPL(task_blkio_cgroup);
+               if (!pd)
+                       continue;
 
-static inline void
-blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
-{
-       struct blkio_policy_type *blkiop;
+               if (pol && pol->pd_exit_fn)
+                       pol->pd_exit_fn(blkg);
 
-       list_for_each_entry(blkiop, &blkio_list, list) {
-               /* If this policy does not own the blkg, do not send updates */
-               if (blkiop->plid != blkg->plid)
-                       continue;
-               if (blkiop->ops.blkio_update_group_weight_fn)
-                       blkiop->ops.blkio_update_group_weight_fn(blkg->key,
-                                                       blkg, weight);
+               kfree(pd);
        }
+
+       kfree(blkg);
 }
 
-static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
-                               int fileid)
+/**
+ * blkg_alloc - allocate a blkg
+ * @blkcg: block cgroup the new blkg is associated with
+ * @q: request_queue the new blkg is associated with
+ *
+ * Allocate a new blkg assocating @blkcg and @q.
+ */
+static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
 {
-       struct blkio_policy_type *blkiop;
-
-       list_for_each_entry(blkiop, &blkio_list, list) {
-
-               /* If this policy does not own the blkg, do not send updates */
-               if (blkiop->plid != blkg->plid)
-                       continue;
-
-               if (fileid == BLKIO_THROTL_read_bps_device
-                   && blkiop->ops.blkio_update_group_read_bps_fn)
-                       blkiop->ops.blkio_update_group_read_bps_fn(blkg->key,
-                                                               blkg, bps);
+       struct blkcg_gq *blkg;
+       int i;
 
-               if (fileid == BLKIO_THROTL_write_bps_device
-                   && blkiop->ops.blkio_update_group_write_bps_fn)
-                       blkiop->ops.blkio_update_group_write_bps_fn(blkg->key,
-                                                               blkg, bps);
-       }
-}
+       /* alloc and init base part */
+       blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
+       if (!blkg)
+               return NULL;
 
-static inline void blkio_update_group_iops(struct blkio_group *blkg,
-                       unsigned int iops, int fileid)
-{
-       struct blkio_policy_type *blkiop;
+       blkg->q = q;
+       INIT_LIST_HEAD(&blkg->q_node);
+       blkg->blkcg = blkcg;
+       blkg->refcnt = 1;
 
-       list_for_each_entry(blkiop, &blkio_list, list) {
+       for (i = 0; i < BLKCG_MAX_POLS; i++) {
+               struct blkcg_policy *pol = blkcg_policy[i];
+               struct blkg_policy_data *pd;
 
-               /* If this policy does not own the blkg, do not send updates */
-               if (blkiop->plid != blkg->plid)
+               if (!blkcg_policy_enabled(q, pol))
                        continue;
 
-               if (fileid == BLKIO_THROTL_read_iops_device
-                   && blkiop->ops.blkio_update_group_read_iops_fn)
-                       blkiop->ops.blkio_update_group_read_iops_fn(blkg->key,
-                                                               blkg, iops);
+               /* alloc per-policy data and attach it to blkg */
+               pd = kzalloc_node(pol->pd_size, GFP_ATOMIC, q->node);
+               if (!pd) {
+                       blkg_free(blkg);
+                       return NULL;
+               }
 
-               if (fileid == BLKIO_THROTL_write_iops_device
-                   && blkiop->ops.blkio_update_group_write_iops_fn)
-                       blkiop->ops.blkio_update_group_write_iops_fn(blkg->key,
-                                                               blkg,iops);
+               blkg->pd[i] = pd;
+               pd->blkg = blkg;
        }
-}
 
-/*
- * Add to the appropriate stat variable depending on the request type.
- * This should be called with the blkg->stats_lock held.
- */
-static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
-                               bool sync)
-{
-       if (direction)
-               stat[BLKIO_STAT_WRITE] += add;
-       else
-               stat[BLKIO_STAT_READ] += add;
-       if (sync)
-               stat[BLKIO_STAT_SYNC] += add;
-       else
-               stat[BLKIO_STAT_ASYNC] += add;
-}
+       /* invoke per-policy init */
+       for (i = 0; i < BLKCG_MAX_POLS; i++) {
+               struct blkcg_policy *pol = blkcg_policy[i];
 
-/*
- * Decrements the appropriate stat variable if non-zero depending on the
- * request type. Panics on value being zero.
- * This should be called with the blkg->stats_lock held.
- */
-static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
-{
-       if (direction) {
-               BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
-               stat[BLKIO_STAT_WRITE]--;
-       } else {
-               BUG_ON(stat[BLKIO_STAT_READ] == 0);
-               stat[BLKIO_STAT_READ]--;
-       }
-       if (sync) {
-               BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
-               stat[BLKIO_STAT_SYNC]--;
-       } else {
-               BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
-               stat[BLKIO_STAT_ASYNC]--;
+               if (blkcg_policy_enabled(blkg->q, pol))
+                       pol->pd_init_fn(blkg);
        }
-}
 
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-/* This should be called with the blkg->stats_lock held. */
-static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
-                                               struct blkio_group *curr_blkg)
-{
-       if (blkio_blkg_waiting(&blkg->stats))
-               return;
-       if (blkg == curr_blkg)
-               return;
-       blkg->stats.start_group_wait_time = sched_clock();
-       blkio_mark_blkg_waiting(&blkg->stats);
+       return blkg;
 }
 
-/* This should be called with the blkg->stats_lock held. */
-static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
+static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
+                                     struct request_queue *q)
 {
-       unsigned long long now;
+       struct blkcg_gq *blkg;
 
-       if (!blkio_blkg_waiting(stats))
-               return;
+       blkg = rcu_dereference(blkcg->blkg_hint);
+       if (blkg && blkg->q == q)
+               return blkg;
 
-       now = sched_clock();
-       if (time_after64(now, stats->start_group_wait_time))
-               stats->group_wait_time += now - stats->start_group_wait_time;
-       blkio_clear_blkg_waiting(stats);
+       /*
+        * Hint didn't match.  Look up from the radix tree.  Note that we
+        * may not be holding queue_lock and thus are not sure whether
+        * @blkg from blkg_tree has already been removed or not, so we
+        * can't update hint to the lookup result.  Leave it to the caller.
+        */
+       blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
+       if (blkg && blkg->q == q)
+               return blkg;
+
+       return NULL;
 }
 
-/* This should be called with the blkg->stats_lock held. */
-static void blkio_end_empty_time(struct blkio_group_stats *stats)
+/**
+ * blkg_lookup - lookup blkg for the specified blkcg - q pair
+ * @blkcg: blkcg of interest
+ * @q: request_queue of interest
+ *
+ * Lookup blkg for the @blkcg - @q pair.  This function should be called
+ * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
+ * - see blk_queue_bypass_start() for details.
+ */
+struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
 {
-       unsigned long long now;
-
-       if (!blkio_blkg_empty(stats))
-               return;
+       WARN_ON_ONCE(!rcu_read_lock_held());
 
-       now = sched_clock();
-       if (time_after64(now, stats->start_empty_time))
-               stats->empty_time += now - stats->start_empty_time;
-       blkio_clear_blkg_empty(stats);
+       if (unlikely(blk_queue_bypass(q)))
+               return NULL;
+       return __blkg_lookup(blkcg, q);
 }
+EXPORT_SYMBOL_GPL(blkg_lookup);
 
-void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
+static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
+                                            struct request_queue *q)
+       __releases(q->queue_lock) __acquires(q->queue_lock)
 {
-       unsigned long flags;
+       struct blkcg_gq *blkg;
+       int ret;
 
-       spin_lock_irqsave(&blkg->stats_lock, flags);
-       BUG_ON(blkio_blkg_idling(&blkg->stats));
-       blkg->stats.start_idle_time = sched_clock();
-       blkio_mark_blkg_idling(&blkg->stats);
-       spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
+       WARN_ON_ONCE(!rcu_read_lock_held());
+       lockdep_assert_held(q->queue_lock);
 
-void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
-{
-       unsigned long flags;
-       unsigned long long now;
-       struct blkio_group_stats *stats;
-
-       spin_lock_irqsave(&blkg->stats_lock, flags);
-       stats = &blkg->stats;
-       if (blkio_blkg_idling(stats)) {
-               now = sched_clock();
-               if (time_after64(now, stats->start_idle_time))
-                       stats->idle_time += now - stats->start_idle_time;
-               blkio_clear_blkg_idling(stats);
+       /* lookup and update hint on success, see __blkg_lookup() for details */
+       blkg = __blkg_lookup(blkcg, q);
+       if (blkg) {
+               rcu_assign_pointer(blkcg->blkg_hint, blkg);
+               return blkg;
        }
-       spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
 
-void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
-{
-       unsigned long flags;
-       struct blkio_group_stats *stats;
-
-       spin_lock_irqsave(&blkg->stats_lock, flags);
-       stats = &blkg->stats;
-       stats->avg_queue_size_sum +=
-                       stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
-                       stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
-       stats->avg_queue_size_samples++;
-       blkio_update_group_wait_time(stats);
-       spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
+       /* blkg holds a reference to blkcg */
+       if (!css_tryget(&blkcg->css))
+               return ERR_PTR(-EINVAL);
 
-void blkiocg_set_start_empty_time(struct blkio_group *blkg)
-{
-       unsigned long flags;
-       struct blkio_group_stats *stats;
+       /* allocate */
+       ret = -ENOMEM;
+       blkg = blkg_alloc(blkcg, q);
+       if (unlikely(!blkg))
+               goto err_put;
 
-       spin_lock_irqsave(&blkg->stats_lock, flags);
-       stats = &blkg->stats;
+       /* insert */
+       ret = radix_tree_preload(GFP_ATOMIC);
+       if (ret)
+               goto err_free;
 
-       if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
-                       stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
-               spin_unlock_irqrestore(&blkg->stats_lock, flags);
-               return;
+       spin_lock(&blkcg->lock);
+       ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
+       if (likely(!ret)) {
+               hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
+               list_add(&blkg->q_node, &q->blkg_list);
        }
+       spin_unlock(&blkcg->lock);
 
-       /*
-        * group is already marked empty. This can happen if cfqq got new
-        * request in parent group and moved to this group while being added
-        * to service tree. Just ignore the event and move on.
-        */
-       if(blkio_blkg_empty(stats)) {
-               spin_unlock_irqrestore(&blkg->stats_lock, flags);
-               return;
-       }
+       radix_tree_preload_end();
 
-       stats->start_empty_time = sched_clock();
-       blkio_mark_blkg_empty(stats);
-       spin_unlock_irqrestore(&blkg->stats_lock, flags);
+       if (!ret)
+               return blkg;
+err_free:
+       blkg_free(blkg);
+err_put:
+       css_put(&blkcg->css);
+       return ERR_PTR(ret);
 }
-EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
 
-void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
-                       unsigned long dequeue)
+struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
+                                   struct request_queue *q)
 {
-       blkg->stats.dequeue += dequeue;
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
-#else
-static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
-                                       struct blkio_group *curr_blkg) {}
-static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
-#endif
-
-void blkiocg_update_io_add_stats(struct blkio_group *blkg,
-                       struct blkio_group *curr_blkg, bool direction,
-                       bool sync)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&blkg->stats_lock, flags);
-       blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
-                       sync);
-       blkio_end_empty_time(&blkg->stats);
-       blkio_set_start_group_wait_time(blkg, curr_blkg);
-       spin_unlock_irqrestore(&blkg->stats_lock, flags);
+       /*
+        * This could be the first entry point of blkcg implementation and
+        * we shouldn't allow anything to go through for a bypassing queue.
+        */
+       if (unlikely(blk_queue_bypass(q)))
+               return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
+       return __blkg_lookup_create(blkcg, q);
 }
-EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
+EXPORT_SYMBOL_GPL(blkg_lookup_create);
 
-void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
-                                               bool direction, bool sync)
+static void blkg_destroy(struct blkcg_gq *blkg)
 {
-       unsigned long flags;
+       struct request_queue *q = blkg->q;
+       struct blkcg *blkcg = blkg->blkcg;
 
-       spin_lock_irqsave(&blkg->stats_lock, flags);
-       blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
-                                       direction, sync);
-       spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
+       lockdep_assert_held(q->queue_lock);
+       lockdep_assert_held(&blkcg->lock);
 
-void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
-                               unsigned long unaccounted_time)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&blkg->stats_lock, flags);
-       blkg->stats.time += time;
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-       blkg->stats.unaccounted_time += unaccounted_time;
-#endif
-       spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
+       /* Something wrong if we are trying to remove same group twice */
+       WARN_ON_ONCE(list_empty(&blkg->q_node));
+       WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
 
-/*
- * should be called under rcu read lock or queue lock to make sure blkg pointer
- * is valid.
- */
-void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
-                               uint64_t bytes, bool direction, bool sync)
-{
-       struct blkio_group_stats_cpu *stats_cpu;
-       unsigned long flags;
+       radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
+       list_del_init(&blkg->q_node);
+       hlist_del_init_rcu(&blkg->blkcg_node);
 
        /*
-        * Disabling interrupts to provide mutual exclusion between two
-        * writes on same cpu. It probably is not needed for 64bit. Not
-        * optimizing that case yet.
+        * Both setting lookup hint to and clearing it from @blkg are done
+        * under queue_lock.  If it's not pointing to @blkg now, it never
+        * will.  Hint assignment itself can race safely.
         */
-       local_irq_save(flags);
-
-       stats_cpu = this_cpu_ptr(blkg->stats_cpu);
-
-       u64_stats_update_begin(&stats_cpu->syncp);
-       stats_cpu->sectors += bytes >> 9;
-       blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
-                       1, direction, sync);
-       blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
-                       bytes, direction, sync);
-       u64_stats_update_end(&stats_cpu->syncp);
-       local_irq_restore(flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
-
-void blkiocg_update_completion_stats(struct blkio_group *blkg,
-       uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
-{
-       struct blkio_group_stats *stats;
-       unsigned long flags;
-       unsigned long long now = sched_clock();
-
-       spin_lock_irqsave(&blkg->stats_lock, flags);
-       stats = &blkg->stats;
-       if (time_after64(now, io_start_time))
-               blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
-                               now - io_start_time, direction, sync);
-       if (time_after64(io_start_time, start_time))
-               blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
-                               io_start_time - start_time, direction, sync);
-       spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
-
-/*  Merged stats are per cpu.  */
-void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
-                                       bool sync)
-{
-       struct blkio_group_stats_cpu *stats_cpu;
-       unsigned long flags;
+       if (rcu_dereference_raw(blkcg->blkg_hint) == blkg)
+               rcu_assign_pointer(blkcg->blkg_hint, NULL);
 
        /*
-        * Disabling interrupts to provide mutual exclusion between two
-        * writes on same cpu. It probably is not needed for 64bit. Not
-        * optimizing that case yet.
+        * Put the reference taken at the time of creation so that when all
+        * queues are gone, group can be destroyed.
         */
-       local_irq_save(flags);
-
-       stats_cpu = this_cpu_ptr(blkg->stats_cpu);
-
-       u64_stats_update_begin(&stats_cpu->syncp);
-       blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
-                               direction, sync);
-       u64_stats_update_end(&stats_cpu->syncp);
-       local_irq_restore(flags);
+       blkg_put(blkg);
 }
-EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
 
-/*
- * This function allocates the per cpu stats for blkio_group. Should be called
- * from sleepable context as alloc_per_cpu() requires that.
+/**
+ * blkg_destroy_all - destroy all blkgs associated with a request_queue
+ * @q: request_queue of interest
+ *
+ * Destroy all blkgs associated with @q.
  */
-int blkio_alloc_blkg_stats(struct blkio_group *blkg)
+static void blkg_destroy_all(struct request_queue *q)
 {
-       /* Allocate memory for per cpu stats */
-       blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
-       if (!blkg->stats_cpu)
-               return -ENOMEM;
-       return 0;
-}
-EXPORT_SYMBOL_GPL(blkio_alloc_blkg_stats);
+       struct blkcg_gq *blkg, *n;
 
-void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
-               struct blkio_group *blkg, void *key, dev_t dev,
-               enum blkio_policy_id plid)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&blkcg->lock, flags);
-       spin_lock_init(&blkg->stats_lock);
-       rcu_assign_pointer(blkg->key, key);
-       blkg->blkcg_id = css_id(&blkcg->css);
-       hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
-       blkg->plid = plid;
-       spin_unlock_irqrestore(&blkcg->lock, flags);
-       /* Need to take css reference ? */
-       cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
-       blkg->dev = dev;
-}
-EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
+       lockdep_assert_held(q->queue_lock);
 
-static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
-{
-       hlist_del_init_rcu(&blkg->blkcg_node);
-       blkg->blkcg_id = 0;
-}
+       list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
+               struct blkcg *blkcg = blkg->blkcg;
 
-/*
- * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
- * indicating that blk_group was unhashed by the time we got to it.
- */
-int blkiocg_del_blkio_group(struct blkio_group *blkg)
-{
-       struct blkio_cgroup *blkcg;
-       unsigned long flags;
-       struct cgroup_subsys_state *css;
-       int ret = 1;
-
-       rcu_read_lock();
-       css = css_lookup(&blkio_subsys, blkg->blkcg_id);
-       if (css) {
-               blkcg = container_of(css, struct blkio_cgroup, css);
-               spin_lock_irqsave(&blkcg->lock, flags);
-               if (!hlist_unhashed(&blkg->blkcg_node)) {
-                       __blkiocg_del_blkio_group(blkg);
-                       ret = 0;
-               }
-               spin_unlock_irqrestore(&blkcg->lock, flags);
+               spin_lock(&blkcg->lock);
+               blkg_destroy(blkg);
+               spin_unlock(&blkcg->lock);
        }
-
-       rcu_read_unlock();
-       return ret;
 }
-EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
 
-/* called under rcu_read_lock(). */
-struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
+static void blkg_rcu_free(struct rcu_head *rcu_head)
 {
-       struct blkio_group *blkg;
-       struct hlist_node *n;
-       void *__key;
-
-       hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
-               __key = blkg->key;
-               if (__key == key)
-                       return blkg;
-       }
-
-       return NULL;
+       blkg_free(container_of(rcu_head, struct blkcg_gq, rcu_head));
 }
-EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
 
-static void blkio_reset_stats_cpu(struct blkio_group *blkg)
+void __blkg_release(struct blkcg_gq *blkg)
 {
-       struct blkio_group_stats_cpu *stats_cpu;
-       int i, j, k;
+       /* release the extra blkcg reference this blkg has been holding */
+       css_put(&blkg->blkcg->css);
+
        /*
-        * Note: On 64 bit arch this should not be an issue. This has the
-        * possibility of returning some inconsistent value on 32bit arch
-        * as 64bit update on 32bit is non atomic. Taking care of this
-        * corner case makes code very complicated, like sending IPIs to
-        * cpus, taking care of stats of offline cpus etc.
+        * A group is freed in rcu manner. But having an rcu lock does not
+        * mean that one can access all the fields of blkg and assume these
+        * are valid. For example, don't try to follow throtl_data and
+        * request queue links.
         *
-        * reset stats is anyway more of a debug feature and this sounds a
-        * corner case. So I am not complicating the code yet until and
-        * unless this becomes a real issue.
+        * Having a reference to blkg under an rcu allows acess to only
+        * values local to groups like group stats and group rate limits
         */
-       for_each_possible_cpu(i) {
-               stats_cpu = per_cpu_ptr(blkg->stats_cpu, i);
-               stats_cpu->sectors = 0;
-               for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
-                       for (k = 0; k < BLKIO_STAT_TOTAL; k++)
-                               stats_cpu->stat_arr_cpu[j][k] = 0;
-       }
+       call_rcu(&blkg->rcu_head, blkg_rcu_free);
 }
+EXPORT_SYMBOL_GPL(__blkg_release);
 
-static int
-blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
+static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
+                            u64 val)
 {
-       struct blkio_cgroup *blkcg;
-       struct blkio_group *blkg;
-       struct blkio_group_stats *stats;
+       struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
+       struct blkcg_gq *blkg;
        struct hlist_node *n;
-       uint64_t queued[BLKIO_STAT_TOTAL];
        int i;
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-       bool idling, waiting, empty;
-       unsigned long long now = sched_clock();
-#endif
 
-       blkcg = cgroup_to_blkio_cgroup(cgroup);
+       mutex_lock(&blkcg_pol_mutex);
        spin_lock_irq(&blkcg->lock);
-       hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
-               spin_lock(&blkg->stats_lock);
-               stats = &blkg->stats;
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-               idling = blkio_blkg_idling(stats);
-               waiting = blkio_blkg_waiting(stats);
-               empty = blkio_blkg_empty(stats);
-#endif
-               for (i = 0; i < BLKIO_STAT_TOTAL; i++)
-                       queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
-               memset(stats, 0, sizeof(struct blkio_group_stats));
-               for (i = 0; i < BLKIO_STAT_TOTAL; i++)
-                       stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-               if (idling) {
-                       blkio_mark_blkg_idling(stats);
-                       stats->start_idle_time = now;
-               }
-               if (waiting) {
-                       blkio_mark_blkg_waiting(stats);
-                       stats->start_group_wait_time = now;
-               }
-               if (empty) {
-                       blkio_mark_blkg_empty(stats);
-                       stats->start_empty_time = now;
-               }
-#endif
-               spin_unlock(&blkg->stats_lock);
-
-               /* Reset Per cpu stats which don't take blkg->stats_lock */
-               blkio_reset_stats_cpu(blkg);
-       }
-
-       spin_unlock_irq(&blkcg->lock);
-       return 0;
-}
-
-static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
-                               int chars_left, bool diskname_only)
-{
-       snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
-       chars_left -= strlen(str);
-       if (chars_left <= 0) {
-               printk(KERN_WARNING
-                       "Possibly incorrect cgroup stat display format");
-               return;
-       }
-       if (diskname_only)
-               return;
-       switch (type) {
-       case BLKIO_STAT_READ:
-               strlcat(str, " Read", chars_left);
-               break;
-       case BLKIO_STAT_WRITE:
-               strlcat(str, " Write", chars_left);
-               break;
-       case BLKIO_STAT_SYNC:
-               strlcat(str, " Sync", chars_left);
-               break;
-       case BLKIO_STAT_ASYNC:
-               strlcat(str, " Async", chars_left);
-               break;
-       case BLKIO_STAT_TOTAL:
-               strlcat(str, " Total", chars_left);
-               break;
-       default:
-               strlcat(str, " Invalid", chars_left);
-       }
-}
-
-static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
-                               struct cgroup_map_cb *cb, dev_t dev)
-{
-       blkio_get_key_name(0, dev, str, chars_left, true);
-       cb->fill(cb, str, val);
-       return val;
-}
-
-
-static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
-                       enum stat_type_cpu type, enum stat_sub_type sub_type)
-{
-       int cpu;
-       struct blkio_group_stats_cpu *stats_cpu;
-       u64 val = 0, tval;
-
-       for_each_possible_cpu(cpu) {
-               unsigned int start;
-               stats_cpu  = per_cpu_ptr(blkg->stats_cpu, cpu);
-
-               do {
-                       start = u64_stats_fetch_begin(&stats_cpu->syncp);
-                       if (type == BLKIO_STAT_CPU_SECTORS)
-                               tval = stats_cpu->sectors;
-                       else
-                               tval = stats_cpu->stat_arr_cpu[type][sub_type];
-               } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
-
-               val += tval;
-       }
-
-       return val;
-}
-
-static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
-               struct cgroup_map_cb *cb, dev_t dev, enum stat_type_cpu type)
-{
-       uint64_t disk_total, val;
-       char key_str[MAX_KEY_LEN];
-       enum stat_sub_type sub_type;
 
-       if (type == BLKIO_STAT_CPU_SECTORS) {
-               val = blkio_read_stat_cpu(blkg, type, 0);
-               return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, dev);
-       }
-
-       for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
-                       sub_type++) {
-               blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
-               val = blkio_read_stat_cpu(blkg, type, sub_type);
-               cb->fill(cb, key_str, val);
-       }
-
-       disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) +
-                       blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE);
-
-       blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
-       cb->fill(cb, key_str, disk_total);
-       return disk_total;
-}
-
-/* This should be called with blkg->stats_lock held */
-static uint64_t blkio_get_stat(struct blkio_group *blkg,
-               struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
-{
-       uint64_t disk_total;
-       char key_str[MAX_KEY_LEN];
-       enum stat_sub_type sub_type;
-
-       if (type == BLKIO_STAT_TIME)
-               return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
-                                       blkg->stats.time, cb, dev);
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-       if (type == BLKIO_STAT_UNACCOUNTED_TIME)
-               return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
-                                       blkg->stats.unaccounted_time, cb, dev);
-       if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
-               uint64_t sum = blkg->stats.avg_queue_size_sum;
-               uint64_t samples = blkg->stats.avg_queue_size_samples;
-               if (samples)
-                       do_div(sum, samples);
-               else
-                       sum = 0;
-               return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
-       }
-       if (type == BLKIO_STAT_GROUP_WAIT_TIME)
-               return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
-                                       blkg->stats.group_wait_time, cb, dev);
-       if (type == BLKIO_STAT_IDLE_TIME)
-               return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
-                                       blkg->stats.idle_time, cb, dev);
-       if (type == BLKIO_STAT_EMPTY_TIME)
-               return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
-                                       blkg->stats.empty_time, cb, dev);
-       if (type == BLKIO_STAT_DEQUEUE)
-               return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
-                                       blkg->stats.dequeue, cb, dev);
-#endif
-
-       for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
-                       sub_type++) {
-               blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
-               cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
-       }
-       disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
-                       blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
-       blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
-       cb->fill(cb, key_str, disk_total);
-       return disk_total;
-}
-
-static int blkio_policy_parse_and_set(char *buf,
-       struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
-{
-       struct gendisk *disk = NULL;
-       char *s[4], *p, *major_s = NULL, *minor_s = NULL;
-       unsigned long major, minor;
-       int i = 0, ret = -EINVAL;
-       int part;
-       dev_t dev;
-       u64 temp;
-
-       memset(s, 0, sizeof(s));
-
-       while ((p = strsep(&buf, " ")) != NULL) {
-               if (!*p)
-                       continue;
-
-               s[i++] = p;
-
-               /* Prevent from inputing too many things */
-               if (i == 3)
-                       break;
-       }
-
-       if (i != 2)
-               goto out;
-
-       p = strsep(&s[0], ":");
-       if (p != NULL)
-               major_s = p;
-       else
-               goto out;
-
-       minor_s = s[0];
-       if (!minor_s)
-               goto out;
-
-       if (strict_strtoul(major_s, 10, &major))
-               goto out;
-
-       if (strict_strtoul(minor_s, 10, &minor))
-               goto out;
-
-       dev = MKDEV(major, minor);
-
-       if (strict_strtoull(s[1], 10, &temp))
-               goto out;
-
-       /* For rule removal, do not check for device presence. */
-       if (temp) {
-               disk = get_gendisk(dev, &part);
-               if (!disk || part) {
-                       ret = -ENODEV;
-                       goto out;
-               }
-       }
-
-       newpn->dev = dev;
-
-       switch (plid) {
-       case BLKIO_POLICY_PROP:
-               if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
-                    temp > BLKIO_WEIGHT_MAX)
-                       goto out;
-
-               newpn->plid = plid;
-               newpn->fileid = fileid;
-               newpn->val.weight = temp;
-               break;
-       case BLKIO_POLICY_THROTL:
-               switch(fileid) {
-               case BLKIO_THROTL_read_bps_device:
-               case BLKIO_THROTL_write_bps_device:
-                       newpn->plid = plid;
-                       newpn->fileid = fileid;
-                       newpn->val.bps = temp;
-                       break;
-               case BLKIO_THROTL_read_iops_device:
-               case BLKIO_THROTL_write_iops_device:
-                       if (temp > THROTL_IOPS_MAX)
-                               goto out;
-
-                       newpn->plid = plid;
-                       newpn->fileid = fileid;
-                       newpn->val.iops = (unsigned int)temp;
-                       break;
-               }
-               break;
-       default:
-               BUG();
-       }
-       ret = 0;
-out:
-       put_disk(disk);
-       return ret;
-}
-
-unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
-                             dev_t dev)
-{
-       struct blkio_policy_node *pn;
-       unsigned long flags;
-       unsigned int weight;
-
-       spin_lock_irqsave(&blkcg->lock, flags);
-
-       pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
-                               BLKIO_PROP_weight_device);
-       if (pn)
-               weight = pn->val.weight;
-       else
-               weight = blkcg->weight;
-
-       spin_unlock_irqrestore(&blkcg->lock, flags);
-
-       return weight;
-}
-EXPORT_SYMBOL_GPL(blkcg_get_weight);
-
-uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev)
-{
-       struct blkio_policy_node *pn;
-       unsigned long flags;
-       uint64_t bps = -1;
-
-       spin_lock_irqsave(&blkcg->lock, flags);
-       pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
-                               BLKIO_THROTL_read_bps_device);
-       if (pn)
-               bps = pn->val.bps;
-       spin_unlock_irqrestore(&blkcg->lock, flags);
-
-       return bps;
-}
-
-uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev)
-{
-       struct blkio_policy_node *pn;
-       unsigned long flags;
-       uint64_t bps = -1;
-
-       spin_lock_irqsave(&blkcg->lock, flags);
-       pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
-                               BLKIO_THROTL_write_bps_device);
-       if (pn)
-               bps = pn->val.bps;
-       spin_unlock_irqrestore(&blkcg->lock, flags);
-
-       return bps;
-}
-
-unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev)
-{
-       struct blkio_policy_node *pn;
-       unsigned long flags;
-       unsigned int iops = -1;
-
-       spin_lock_irqsave(&blkcg->lock, flags);
-       pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
-                               BLKIO_THROTL_read_iops_device);
-       if (pn)
-               iops = pn->val.iops;
-       spin_unlock_irqrestore(&blkcg->lock, flags);
-
-       return iops;
-}
-
-unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev)
-{
-       struct blkio_policy_node *pn;
-       unsigned long flags;
-       unsigned int iops = -1;
-
-       spin_lock_irqsave(&blkcg->lock, flags);
-       pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
-                               BLKIO_THROTL_write_iops_device);
-       if (pn)
-               iops = pn->val.iops;
-       spin_unlock_irqrestore(&blkcg->lock, flags);
-
-       return iops;
-}
+       /*
+        * Note that stat reset is racy - it doesn't synchronize against
+        * stat updates.  This is a debug feature which shouldn't exist
+        * anyway.  If you get hit by a race, retry.
+        */
+       hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
+               for (i = 0; i < BLKCG_MAX_POLS; i++) {
+                       struct blkcg_policy *pol = blkcg_policy[i];
 
-/* Checks whether user asked for deleting a policy rule */
-static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
-{
-       switch(pn->plid) {
-       case BLKIO_POLICY_PROP:
-               if (pn->val.weight == 0)
-                       return 1;
-               break;
-       case BLKIO_POLICY_THROTL:
-               switch(pn->fileid) {
-               case BLKIO_THROTL_read_bps_device:
-               case BLKIO_THROTL_write_bps_device:
-                       if (pn->val.bps == 0)
-                               return 1;
-                       break;
-               case BLKIO_THROTL_read_iops_device:
-               case BLKIO_THROTL_write_iops_device:
-                       if (pn->val.iops == 0)
-                               return 1;
+                       if (blkcg_policy_enabled(blkg->q, pol) &&
+                           pol->pd_reset_stats_fn)
+                               pol->pd_reset_stats_fn(blkg);
                }
-               break;
-       default:
-               BUG();
        }
 
+       spin_unlock_irq(&blkcg->lock);
+       mutex_unlock(&blkcg_pol_mutex);
        return 0;
 }
 
-static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
-                                       struct blkio_policy_node *newpn)
-{
-       switch(oldpn->plid) {
-       case BLKIO_POLICY_PROP:
-               oldpn->val.weight = newpn->val.weight;
-               break;
-       case BLKIO_POLICY_THROTL:
-               switch(newpn->fileid) {
-               case BLKIO_THROTL_read_bps_device:
-               case BLKIO_THROTL_write_bps_device:
-                       oldpn->val.bps = newpn->val.bps;
-                       break;
-               case BLKIO_THROTL_read_iops_device:
-               case BLKIO_THROTL_write_iops_device:
-                       oldpn->val.iops = newpn->val.iops;
-               }
-               break;
-       default:
-               BUG();
-       }
-}
-
-/*
- * Some rules/values in blkg have changed. Propagate those to respective
- * policies.
- */
-static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
-               struct blkio_group *blkg, struct blkio_policy_node *pn)
+static const char *blkg_dev_name(struct blkcg_gq *blkg)
 {
-       unsigned int weight, iops;
-       u64 bps;
-
-       switch(pn->plid) {
-       case BLKIO_POLICY_PROP:
-               weight = pn->val.weight ? pn->val.weight :
-                               blkcg->weight;
-               blkio_update_group_weight(blkg, weight);
-               break;
-       case BLKIO_POLICY_THROTL:
-               switch(pn->fileid) {
-               case BLKIO_THROTL_read_bps_device:
-               case BLKIO_THROTL_write_bps_device:
-                       bps = pn->val.bps ? pn->val.bps : (-1);
-                       blkio_update_group_bps(blkg, bps, pn->fileid);
-                       break;
-               case BLKIO_THROTL_read_iops_device:
-               case BLKIO_THROTL_write_iops_device:
-                       iops = pn->val.iops ? pn->val.iops : (-1);
-                       blkio_update_group_iops(blkg, iops, pn->fileid);
-                       break;
-               }
-               break;
-       default:
-               BUG();
-       }
+       /* some drivers (floppy) instantiate a queue w/o disk registered */
+       if (blkg->q->backing_dev_info.dev)
+               return dev_name(blkg->q->backing_dev_info.dev);
+       return NULL;
 }
 
-/*
- * A policy node rule has been updated. Propagate this update to all the
- * block groups which might be affected by this update.
+/**
+ * blkcg_print_blkgs - helper for printing per-blkg data
+ * @sf: seq_file to print to
+ * @blkcg: blkcg of interest
+ * @prfill: fill function to print out a blkg
+ * @pol: policy in question
+ * @data: data to be passed to @prfill
+ * @show_total: to print out sum of prfill return values or not
+ *
+ * This function invokes @prfill on each blkg of @blkcg if pd for the
+ * policy specified by @pol exists.  @prfill is invoked with @sf, the
+ * policy data and @data.  If @show_total is %true, the sum of the return
+ * values from @prfill is printed with "Total" label at the end.
+ *
+ * This is to be used to construct print functions for
+ * cftype->read_seq_string method.
  */
-static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
-                               struct blkio_policy_node *pn)
+void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
+                      u64 (*prfill)(struct seq_file *,
+                                    struct blkg_policy_data *, int),
+                      const struct blkcg_policy *pol, int data,
+                      bool show_total)
 {
-       struct blkio_group *blkg;
+       struct blkcg_gq *blkg;
        struct hlist_node *n;
+       u64 total = 0;
 
-       spin_lock(&blkio_list_lock);
        spin_lock_irq(&blkcg->lock);
-
-       hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
-               if (pn->dev != blkg->dev || pn->plid != blkg->plid)
-                       continue;
-               blkio_update_blkg_policy(blkcg, blkg, pn);
-       }
-
+       hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
+               if (blkcg_policy_enabled(blkg->q, pol))
+                       total += prfill(sf, blkg->pd[pol->plid], data);
        spin_unlock_irq(&blkcg->lock);
-       spin_unlock(&blkio_list_lock);
+
+       if (show_total)
+               seq_printf(sf, "Total %llu\n", (unsigned long long)total);
 }
+EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
 
-static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
-                                      const char *buffer)
+/**
+ * __blkg_prfill_u64 - prfill helper for a single u64 value
+ * @sf: seq_file to print to
+ * @pd: policy private data of interest
+ * @v: value to print
+ *
+ * Print @v to @sf for the device assocaited with @pd.
+ */
+u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
 {
-       int ret = 0;
-       char *buf;
-       struct blkio_policy_node *newpn, *pn;
-       struct blkio_cgroup *blkcg;
-       int keep_newpn = 0;
-       enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
-       int fileid = BLKIOFILE_ATTR(cft->private);
-
-       buf = kstrdup(buffer, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-
-       newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
-       if (!newpn) {
-               ret = -ENOMEM;
-               goto free_buf;
-       }
-
-       ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
-       if (ret)
-               goto free_newpn;
-
-       blkcg = cgroup_to_blkio_cgroup(cgrp);
-
-       spin_lock_irq(&blkcg->lock);
-
-       pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
-       if (!pn) {
-               if (!blkio_delete_rule_command(newpn)) {
-                       blkio_policy_insert_node(blkcg, newpn);
-                       keep_newpn = 1;
-               }
-               spin_unlock_irq(&blkcg->lock);
-               goto update_io_group;
-       }
-
-       if (blkio_delete_rule_command(newpn)) {
-               blkio_policy_delete_node(pn);
-               kfree(pn);
-               spin_unlock_irq(&blkcg->lock);
-               goto update_io_group;
-       }
-       spin_unlock_irq(&blkcg->lock);
+       const char *dname = blkg_dev_name(pd->blkg);
 
-       blkio_update_policy_rule(pn, newpn);
+       if (!dname)
+               return 0;
 
-update_io_group:
-       blkio_update_policy_node_blkg(blkcg, newpn);
-
-free_newpn:
-       if (!keep_newpn)
-               kfree(newpn);
-free_buf:
-       kfree(buf);
-       return ret;
+       seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
+       return v;
 }
+EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
 
-static void
-blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
-{
-       switch(pn->plid) {
-               case BLKIO_POLICY_PROP:
-                       if (pn->fileid == BLKIO_PROP_weight_device)
-                               seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
-                                       MINOR(pn->dev), pn->val.weight);
-                       break;
-               case BLKIO_POLICY_THROTL:
-                       switch(pn->fileid) {
-                       case BLKIO_THROTL_read_bps_device:
-                       case BLKIO_THROTL_write_bps_device:
-                               seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev),
-                                       MINOR(pn->dev), pn->val.bps);
-                               break;
-                       case BLKIO_THROTL_read_iops_device:
-                       case BLKIO_THROTL_write_iops_device:
-                               seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
-                                       MINOR(pn->dev), pn->val.iops);
-                               break;
-                       }
-                       break;
-               default:
-                       BUG();
-       }
-}
+/**
+ * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
+ * @sf: seq_file to print to
+ * @pd: policy private data of interest
+ * @rwstat: rwstat to print
+ *
+ * Print @rwstat to @sf for the device assocaited with @pd.
+ */
+u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+                        const struct blkg_rwstat *rwstat)
+{
+       static const char *rwstr[] = {
+               [BLKG_RWSTAT_READ]      = "Read",
+               [BLKG_RWSTAT_WRITE]     = "Write",
+               [BLKG_RWSTAT_SYNC]      = "Sync",
+               [BLKG_RWSTAT_ASYNC]     = "Async",
+       };
+       const char *dname = blkg_dev_name(pd->blkg);
+       u64 v;
+       int i;
 
-/* cgroup files which read their data from policy nodes end up here */
-static void blkio_read_policy_node_files(struct cftype *cft,
-                       struct blkio_cgroup *blkcg, struct seq_file *m)
-{
-       struct blkio_policy_node *pn;
-
-       if (!list_empty(&blkcg->policy_list)) {
-               spin_lock_irq(&blkcg->lock);
-               list_for_each_entry(pn, &blkcg->policy_list, node) {
-                       if (!pn_matches_cftype(cft, pn))
-                               continue;
-                       blkio_print_policy_node(m, pn);
-               }
-               spin_unlock_irq(&blkcg->lock);
-       }
-}
+       if (!dname)
+               return 0;
 
-static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
-                               struct seq_file *m)
-{
-       struct blkio_cgroup *blkcg;
-       enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
-       int name = BLKIOFILE_ATTR(cft->private);
-
-       blkcg = cgroup_to_blkio_cgroup(cgrp);
-
-       switch(plid) {
-       case BLKIO_POLICY_PROP:
-               switch(name) {
-               case BLKIO_PROP_weight_device:
-                       blkio_read_policy_node_files(cft, blkcg, m);
-                       return 0;
-               default:
-                       BUG();
-               }
-               break;
-       case BLKIO_POLICY_THROTL:
-               switch(name){
-               case BLKIO_THROTL_read_bps_device:
-               case BLKIO_THROTL_write_bps_device:
-               case BLKIO_THROTL_read_iops_device:
-               case BLKIO_THROTL_write_iops_device:
-                       blkio_read_policy_node_files(cft, blkcg, m);
-                       return 0;
-               default:
-                       BUG();
-               }
-               break;
-       default:
-               BUG();
-       }
+       for (i = 0; i < BLKG_RWSTAT_NR; i++)
+               seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
+                          (unsigned long long)rwstat->cnt[i]);
 
-       return 0;
+       v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
+       seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
+       return v;
 }
 
-static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
-               struct cftype *cft, struct cgroup_map_cb *cb,
-               enum stat_type type, bool show_total, bool pcpu)
+/**
+ * blkg_prfill_stat - prfill callback for blkg_stat
+ * @sf: seq_file to print to
+ * @pd: policy private data of interest
+ * @off: offset to the blkg_stat in @pd
+ *
+ * prfill callback for printing a blkg_stat.
+ */
+u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
 {
-       struct blkio_group *blkg;
-       struct hlist_node *n;
-       uint64_t cgroup_total = 0;
-
-       rcu_read_lock();
-       hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
-               if (blkg->dev) {
-                       if (!cftype_blkg_same_policy(cft, blkg))
-                               continue;
-                       if (pcpu)
-                               cgroup_total += blkio_get_stat_cpu(blkg, cb,
-                                               blkg->dev, type);
-                       else {
-                               spin_lock_irq(&blkg->stats_lock);
-                               cgroup_total += blkio_get_stat(blkg, cb,
-                                               blkg->dev, type);
-                               spin_unlock_irq(&blkg->stats_lock);
-                       }
-               }
-       }
-       if (show_total)
-               cb->fill(cb, "Total", cgroup_total);
-       rcu_read_unlock();
-       return 0;
+       return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
 }
+EXPORT_SYMBOL_GPL(blkg_prfill_stat);
 
-/* All map kind of cgroup file get serviced by this function */
-static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
-                               struct cgroup_map_cb *cb)
+/**
+ * blkg_prfill_rwstat - prfill callback for blkg_rwstat
+ * @sf: seq_file to print to
+ * @pd: policy private data of interest
+ * @off: offset to the blkg_rwstat in @pd
+ *
+ * prfill callback for printing a blkg_rwstat.
+ */
+u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+                      int off)
 {
-       struct blkio_cgroup *blkcg;
-       enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
-       int name = BLKIOFILE_ATTR(cft->private);
-
-       blkcg = cgroup_to_blkio_cgroup(cgrp);
-
-       switch(plid) {
-       case BLKIO_POLICY_PROP:
-               switch(name) {
-               case BLKIO_PROP_time:
-                       return blkio_read_blkg_stats(blkcg, cft, cb,
-                                               BLKIO_STAT_TIME, 0, 0);
-               case BLKIO_PROP_sectors:
-                       return blkio_read_blkg_stats(blkcg, cft, cb,
-                                               BLKIO_STAT_CPU_SECTORS, 0, 1);
-               case BLKIO_PROP_io_service_bytes:
-                       return blkio_read_blkg_stats(blkcg, cft, cb,
-                                       BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
-               case BLKIO_PROP_io_serviced:
-                       return blkio_read_blkg_stats(blkcg, cft, cb,
-                                               BLKIO_STAT_CPU_SERVICED, 1, 1);
-               case BLKIO_PROP_io_service_time:
-                       return blkio_read_blkg_stats(blkcg, cft, cb,
-                                               BLKIO_STAT_SERVICE_TIME, 1, 0);
-               case BLKIO_PROP_io_wait_time:
-                       return blkio_read_blkg_stats(blkcg, cft, cb,
-                                               BLKIO_STAT_WAIT_TIME, 1, 0);
-               case BLKIO_PROP_io_merged:
-                       return blkio_read_blkg_stats(blkcg, cft, cb,
-                                               BLKIO_STAT_CPU_MERGED, 1, 1);
-               case BLKIO_PROP_io_queued:
-                       return blkio_read_blkg_stats(blkcg, cft, cb,
-                                               BLKIO_STAT_QUEUED, 1, 0);
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-               case BLKIO_PROP_unaccounted_time:
-                       return blkio_read_blkg_stats(blkcg, cft, cb,
-                                       BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
-               case BLKIO_PROP_dequeue:
-                       return blkio_read_blkg_stats(blkcg, cft, cb,
-                                               BLKIO_STAT_DEQUEUE, 0, 0);
-               case BLKIO_PROP_avg_queue_size:
-                       return blkio_read_blkg_stats(blkcg, cft, cb,
-                                       BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
-               case BLKIO_PROP_group_wait_time:
-                       return blkio_read_blkg_stats(blkcg, cft, cb,
-                                       BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
-               case BLKIO_PROP_idle_time:
-                       return blkio_read_blkg_stats(blkcg, cft, cb,
-                                               BLKIO_STAT_IDLE_TIME, 0, 0);
-               case BLKIO_PROP_empty_time:
-                       return blkio_read_blkg_stats(blkcg, cft, cb,
-                                               BLKIO_STAT_EMPTY_TIME, 0, 0);
-#endif
-               default:
-                       BUG();
-               }
-               break;
-       case BLKIO_POLICY_THROTL:
-               switch(name){
-               case BLKIO_THROTL_io_service_bytes:
-                       return blkio_read_blkg_stats(blkcg, cft, cb,
-                                               BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
-               case BLKIO_THROTL_io_serviced:
-                       return blkio_read_blkg_stats(blkcg, cft, cb,
-                                               BLKIO_STAT_CPU_SERVICED, 1, 1);
-               default:
-                       BUG();
-               }
-               break;
-       default:
-               BUG();
-       }
+       struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
 
-       return 0;
+       return __blkg_prfill_rwstat(sf, pd, &rwstat);
 }
+EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
 
-static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
+/**
+ * blkg_conf_prep - parse and prepare for per-blkg config update
+ * @blkcg: target block cgroup
+ * @pol: target policy
+ * @input: input string
+ * @ctx: blkg_conf_ctx to be filled
+ *
+ * Parse per-blkg config update from @input and initialize @ctx with the
+ * result.  @ctx->blkg points to the blkg to be updated and @ctx->v the new
+ * value.  This function returns with RCU read lock and queue lock held and
+ * must be paired with blkg_conf_finish().
+ */
+int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
+                  const char *input, struct blkg_conf_ctx *ctx)
+       __acquires(rcu) __acquires(disk->queue->queue_lock)
 {
-       struct blkio_group *blkg;
-       struct hlist_node *n;
-       struct blkio_policy_node *pn;
+       struct gendisk *disk;
+       struct blkcg_gq *blkg;
+       unsigned int major, minor;
+       unsigned long long v;
+       int part, ret;
 
-       if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
+       if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
                return -EINVAL;
 
-       spin_lock(&blkio_list_lock);
-       spin_lock_irq(&blkcg->lock);
-       blkcg->weight = (unsigned int)val;
-
-       hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
-               pn = blkio_policy_search_node(blkcg, blkg->dev,
-                               BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
-               if (pn)
-                       continue;
-
-               blkio_update_group_weight(blkg, blkcg->weight);
-       }
-       spin_unlock_irq(&blkcg->lock);
-       spin_unlock(&blkio_list_lock);
-       return 0;
-}
+       disk = get_gendisk(MKDEV(major, minor), &part);
+       if (!disk || part)
+               return -EINVAL;
 
-static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
-       struct blkio_cgroup *blkcg;
-       enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
-       int name = BLKIOFILE_ATTR(cft->private);
+       rcu_read_lock();
+       spin_lock_irq(disk->queue->queue_lock);
 
-       blkcg = cgroup_to_blkio_cgroup(cgrp);
+       if (blkcg_policy_enabled(disk->queue, pol))
+               blkg = blkg_lookup_create(blkcg, disk->queue);
+       else
+               blkg = ERR_PTR(-EINVAL);
 
-       switch(plid) {
-       case BLKIO_POLICY_PROP:
-               switch(name) {
-               case BLKIO_PROP_weight:
-                       return (u64)blkcg->weight;
+       if (IS_ERR(blkg)) {
+               ret = PTR_ERR(blkg);
+               rcu_read_unlock();
+               spin_unlock_irq(disk->queue->queue_lock);
+               put_disk(disk);
+               /*
+                * If queue was bypassing, we should retry.  Do so after a
+                * short msleep().  It isn't strictly necessary but queue
+                * can be bypassing for some time and it's always nice to
+                * avoid busy looping.
+                */
+               if (ret == -EBUSY) {
+                       msleep(10);
+                       ret = restart_syscall();
                }
-               break;
-       default:
-               BUG();
+               return ret;
        }
+
+       ctx->disk = disk;
+       ctx->blkg = blkg;
+       ctx->v = v;
        return 0;
 }
+EXPORT_SYMBOL_GPL(blkg_conf_prep);
 
-static int
-blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
+/**
+ * blkg_conf_finish - finish up per-blkg config update
+ * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
+ *
+ * Finish up after per-blkg config update.  This function must be paired
+ * with blkg_conf_prep().
+ */
+void blkg_conf_finish(struct blkg_conf_ctx *ctx)
+       __releases(ctx->disk->queue->queue_lock) __releases(rcu)
 {
-       struct blkio_cgroup *blkcg;
-       enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
-       int name = BLKIOFILE_ATTR(cft->private);
-
-       blkcg = cgroup_to_blkio_cgroup(cgrp);
-
-       switch(plid) {
-       case BLKIO_POLICY_PROP:
-               switch(name) {
-               case BLKIO_PROP_weight:
-                       return blkio_weight_write(blkcg, val);
-               }
-               break;
-       default:
-               BUG();
-       }
-
-       return 0;
+       spin_unlock_irq(ctx->disk->queue->queue_lock);
+       rcu_read_unlock();
+       put_disk(ctx->disk);
 }
+EXPORT_SYMBOL_GPL(blkg_conf_finish);
 
-struct cftype blkio_files[] = {
-       {
-               .name = "weight_device",
-               .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-                               BLKIO_PROP_weight_device),
-               .read_seq_string = blkiocg_file_read,
-               .write_string = blkiocg_file_write,
-               .max_write_len = 256,
-       },
-       {
-               .name = "weight",
-               .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-                               BLKIO_PROP_weight),
-               .read_u64 = blkiocg_file_read_u64,
-               .write_u64 = blkiocg_file_write_u64,
-       },
-       {
-               .name = "time",
-               .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-                               BLKIO_PROP_time),
-               .read_map = blkiocg_file_read_map,
-       },
-       {
-               .name = "sectors",
-               .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-                               BLKIO_PROP_sectors),
-               .read_map = blkiocg_file_read_map,
-       },
-       {
-               .name = "io_service_bytes",
-               .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-                               BLKIO_PROP_io_service_bytes),
-               .read_map = blkiocg_file_read_map,
-       },
-       {
-               .name = "io_serviced",
-               .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-                               BLKIO_PROP_io_serviced),
-               .read_map = blkiocg_file_read_map,
-       },
-       {
-               .name = "io_service_time",
-               .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-                               BLKIO_PROP_io_service_time),
-               .read_map = blkiocg_file_read_map,
-       },
-       {
-               .name = "io_wait_time",
-               .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-                               BLKIO_PROP_io_wait_time),
-               .read_map = blkiocg_file_read_map,
-       },
-       {
-               .name = "io_merged",
-               .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-                               BLKIO_PROP_io_merged),
-               .read_map = blkiocg_file_read_map,
-       },
-       {
-               .name = "io_queued",
-               .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-                               BLKIO_PROP_io_queued),
-               .read_map = blkiocg_file_read_map,
-       },
+struct cftype blkcg_files[] = {
        {
                .name = "reset_stats",
-               .write_u64 = blkiocg_reset_stats,
-       },
-#ifdef CONFIG_BLK_DEV_THROTTLING
-       {
-               .name = "throttle.read_bps_device",
-               .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
-                               BLKIO_THROTL_read_bps_device),
-               .read_seq_string = blkiocg_file_read,
-               .write_string = blkiocg_file_write,
-               .max_write_len = 256,
-       },
-
-       {
-               .name = "throttle.write_bps_device",
-               .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
-                               BLKIO_THROTL_write_bps_device),
-               .read_seq_string = blkiocg_file_read,
-               .write_string = blkiocg_file_write,
-               .max_write_len = 256,
-       },
-
-       {
-               .name = "throttle.read_iops_device",
-               .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
-                               BLKIO_THROTL_read_iops_device),
-               .read_seq_string = blkiocg_file_read,
-               .write_string = blkiocg_file_write,
-               .max_write_len = 256,
-       },
-
-       {
-               .name = "throttle.write_iops_device",
-               .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
-                               BLKIO_THROTL_write_iops_device),
-               .read_seq_string = blkiocg_file_read,
-               .write_string = blkiocg_file_write,
-               .max_write_len = 256,
-       },
-       {
-               .name = "throttle.io_service_bytes",
-               .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
-                               BLKIO_THROTL_io_service_bytes),
-               .read_map = blkiocg_file_read_map,
-       },
-       {
-               .name = "throttle.io_serviced",
-               .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
-                               BLKIO_THROTL_io_serviced),
-               .read_map = blkiocg_file_read_map,
-       },
-#endif /* CONFIG_BLK_DEV_THROTTLING */
-
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-       {
-               .name = "avg_queue_size",
-               .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-                               BLKIO_PROP_avg_queue_size),
-               .read_map = blkiocg_file_read_map,
+               .write_u64 = blkcg_reset_stats,
        },
-       {
-               .name = "group_wait_time",
-               .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-                               BLKIO_PROP_group_wait_time),
-               .read_map = blkiocg_file_read_map,
-       },
-       {
-               .name = "idle_time",
-               .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-                               BLKIO_PROP_idle_time),
-               .read_map = blkiocg_file_read_map,
-       },
-       {
-               .name = "empty_time",
-               .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-                               BLKIO_PROP_empty_time),
-               .read_map = blkiocg_file_read_map,
-       },
-       {
-               .name = "dequeue",
-               .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-                               BLKIO_PROP_dequeue),
-               .read_map = blkiocg_file_read_map,
-       },
-       {
-               .name = "unaccounted_time",
-               .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
-                               BLKIO_PROP_unaccounted_time),
-               .read_map = blkiocg_file_read_map,
-       },
-#endif
        { }     /* terminate */
 };
 
-static void blkiocg_destroy(struct cgroup *cgroup)
+/**
+ * blkcg_pre_destroy - cgroup pre_destroy callback
+ * @cgroup: cgroup of interest
+ *
+ * This function is called when @cgroup is about to go away and responsible
+ * for shooting down all blkgs associated with @cgroup.  blkgs should be
+ * removed while holding both q and blkcg locks.  As blkcg lock is nested
+ * inside q lock, this function performs reverse double lock dancing.
+ *
+ * This is the blkcg counterpart of ioc_release_fn().
+ */
+static int blkcg_pre_destroy(struct cgroup *cgroup)
 {
-       struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
-       unsigned long flags;
-       struct blkio_group *blkg;
-       void *key;
-       struct blkio_policy_type *blkiop;
-       struct blkio_policy_node *pn, *pntmp;
+       struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
 
-       rcu_read_lock();
-       do {
-               spin_lock_irqsave(&blkcg->lock, flags);
+       spin_lock_irq(&blkcg->lock);
 
-               if (hlist_empty(&blkcg->blkg_list)) {
-                       spin_unlock_irqrestore(&blkcg->lock, flags);
-                       break;
+       while (!hlist_empty(&blkcg->blkg_list)) {
+               struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
+                                               struct blkcg_gq, blkcg_node);
+               struct request_queue *q = blkg->q;
+
+               if (spin_trylock(q->queue_lock)) {
+                       blkg_destroy(blkg);
+                       spin_unlock(q->queue_lock);
+               } else {
+                       spin_unlock_irq(&blkcg->lock);
+                       cpu_relax();
+                       spin_lock_irq(&blkcg->lock);
                }
+       }
 
-               blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
-                                       blkcg_node);
-               key = rcu_dereference(blkg->key);
-               __blkiocg_del_blkio_group(blkg);
-
-               spin_unlock_irqrestore(&blkcg->lock, flags);
-
-               /*
-                * This blkio_group is being unlinked as associated cgroup is
-                * going away. Let all the IO controlling policies know about
-                * this event.
-                */
-               spin_lock(&blkio_list_lock);
-               list_for_each_entry(blkiop, &blkio_list, list) {
-                       if (blkiop->plid != blkg->plid)
-                               continue;
-                       blkiop->ops.blkio_unlink_group_fn(key, blkg);
-               }
-               spin_unlock(&blkio_list_lock);
-       } while (1);
+       spin_unlock_irq(&blkcg->lock);
+       return 0;
+}
 
-       list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
-               blkio_policy_delete_node(pn);
-               kfree(pn);
-       }
+static void blkcg_destroy(struct cgroup *cgroup)
+{
+       struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
 
-       free_css_id(&blkio_subsys, &blkcg->css);
-       rcu_read_unlock();
-       if (blkcg != &blkio_root_cgroup)
+       if (blkcg != &blkcg_root)
                kfree(blkcg);
 }
 
-static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup)
+static struct cgroup_subsys_state *blkcg_create(struct cgroup *cgroup)
 {
-       struct blkio_cgroup *blkcg;
+       static atomic64_t id_seq = ATOMIC64_INIT(0);
+       struct blkcg *blkcg;
        struct cgroup *parent = cgroup->parent;
 
        if (!parent) {
-               blkcg = &blkio_root_cgroup;
+               blkcg = &blkcg_root;
                goto done;
        }
 
@@ -1582,22 +624,68 @@ static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup)
        if (!blkcg)
                return ERR_PTR(-ENOMEM);
 
-       blkcg->weight = BLKIO_WEIGHT_DEFAULT;
+       blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
+       blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
 done:
        spin_lock_init(&blkcg->lock);
+       INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
        INIT_HLIST_HEAD(&blkcg->blkg_list);
 
-       INIT_LIST_HEAD(&blkcg->policy_list);
        return &blkcg->css;
 }
 
+/**
+ * blkcg_init_queue - initialize blkcg part of request queue
+ * @q: request_queue to initialize
+ *
+ * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
+ * part of new request_queue @q.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int blkcg_init_queue(struct request_queue *q)
+{
+       might_sleep();
+
+       return blk_throtl_init(q);
+}
+
+/**
+ * blkcg_drain_queue - drain blkcg part of request_queue
+ * @q: request_queue to drain
+ *
+ * Called from blk_drain_queue().  Responsible for draining blkcg part.
+ */
+void blkcg_drain_queue(struct request_queue *q)
+{
+       lockdep_assert_held(q->queue_lock);
+
+       blk_throtl_drain(q);
+}
+
+/**
+ * blkcg_exit_queue - exit and release blkcg part of request_queue
+ * @q: request_queue being released
+ *
+ * Called from blk_release_queue().  Responsible for exiting blkcg part.
+ */
+void blkcg_exit_queue(struct request_queue *q)
+{
+       spin_lock_irq(q->queue_lock);
+       blkg_destroy_all(q);
+       spin_unlock_irq(q->queue_lock);
+
+       blk_throtl_exit(q);
+}
+
 /*
  * We cannot support shared io contexts, as we have no mean to support
  * two tasks with the same ioc in two different groups without major rework
  * of the main cic data structures.  For now we allow a task to change
  * its cgroup only if it's the only owner of its ioc.
  */
-static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
 {
        struct task_struct *task;
        struct io_context *ioc;
@@ -1616,63 +704,213 @@ static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
        return ret;
 }
 
-static void blkiocg_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
-{
-       struct task_struct *task;
-       struct io_context *ioc;
-
-       cgroup_taskset_for_each(task, cgrp, tset) {
-               /* we don't lose anything even if ioc allocation fails */
-               ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
-               if (ioc) {
-                       ioc_cgroup_changed(ioc);
-                       put_io_context(ioc);
-               }
-       }
-}
-
 struct cgroup_subsys blkio_subsys = {
        .name = "blkio",
-       .create = blkiocg_create,
-       .can_attach = blkiocg_can_attach,
-       .attach = blkiocg_attach,
-       .destroy = blkiocg_destroy,
-#ifdef CONFIG_BLK_CGROUP
-       /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
+       .create = blkcg_create,
+       .can_attach = blkcg_can_attach,
+       .pre_destroy = blkcg_pre_destroy,
+       .destroy = blkcg_destroy,
        .subsys_id = blkio_subsys_id,
-#endif
-       .base_cftypes = blkio_files,
-       .use_id = 1,
+       .base_cftypes = blkcg_files,
        .module = THIS_MODULE,
 };
 EXPORT_SYMBOL_GPL(blkio_subsys);
 
-void blkio_policy_register(struct blkio_policy_type *blkiop)
+/**
+ * blkcg_activate_policy - activate a blkcg policy on a request_queue
+ * @q: request_queue of interest
+ * @pol: blkcg policy to activate
+ *
+ * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
+ * bypass mode to populate its blkgs with policy_data for @pol.
+ *
+ * Activation happens with @q bypassed, so nobody would be accessing blkgs
+ * from IO path.  Update of each blkg is protected by both queue and blkcg
+ * locks so that holding either lock and testing blkcg_policy_enabled() is
+ * always enough for dereferencing policy data.
+ *
+ * The caller is responsible for synchronizing [de]activations and policy
+ * [un]registerations.  Returns 0 on success, -errno on failure.
+ */
+int blkcg_activate_policy(struct request_queue *q,
+                         const struct blkcg_policy *pol)
 {
-       spin_lock(&blkio_list_lock);
-       list_add_tail(&blkiop->list, &blkio_list);
-       spin_unlock(&blkio_list_lock);
+       LIST_HEAD(pds);
+       struct blkcg_gq *blkg;
+       struct blkg_policy_data *pd, *n;
+       int cnt = 0, ret;
+
+       if (blkcg_policy_enabled(q, pol))
+               return 0;
+
+       blk_queue_bypass_start(q);
+
+       /* make sure the root blkg exists and count the existing blkgs */
+       spin_lock_irq(q->queue_lock);
+
+       rcu_read_lock();
+       blkg = __blkg_lookup_create(&blkcg_root, q);
+       rcu_read_unlock();
+
+       if (IS_ERR(blkg)) {
+               ret = PTR_ERR(blkg);
+               goto out_unlock;
+       }
+       q->root_blkg = blkg;
+
+       list_for_each_entry(blkg, &q->blkg_list, q_node)
+               cnt++;
+
+       spin_unlock_irq(q->queue_lock);
+
+       /* allocate policy_data for all existing blkgs */
+       while (cnt--) {
+               pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
+               if (!pd) {
+                       ret = -ENOMEM;
+                       goto out_free;
+               }
+               list_add_tail(&pd->alloc_node, &pds);
+       }
+
+       /*
+        * Install the allocated pds.  With @q bypassing, no new blkg
+        * should have been created while the queue lock was dropped.
+        */
+       spin_lock_irq(q->queue_lock);
+
+       list_for_each_entry(blkg, &q->blkg_list, q_node) {
+               if (WARN_ON(list_empty(&pds))) {
+                       /* umm... this shouldn't happen, just abort */
+                       ret = -ENOMEM;
+                       goto out_unlock;
+               }
+               pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
+               list_del_init(&pd->alloc_node);
+
+               /* grab blkcg lock too while installing @pd on @blkg */
+               spin_lock(&blkg->blkcg->lock);
+
+               blkg->pd[pol->plid] = pd;
+               pd->blkg = blkg;
+               pol->pd_init_fn(blkg);
+
+               spin_unlock(&blkg->blkcg->lock);
+       }
+
+       __set_bit(pol->plid, q->blkcg_pols);
+       ret = 0;
+out_unlock:
+       spin_unlock_irq(q->queue_lock);
+out_free:
+       blk_queue_bypass_end(q);
+       list_for_each_entry_safe(pd, n, &pds, alloc_node)
+               kfree(pd);
+       return ret;
 }
-EXPORT_SYMBOL_GPL(blkio_policy_register);
+EXPORT_SYMBOL_GPL(blkcg_activate_policy);
 
-void blkio_policy_unregister(struct blkio_policy_type *blkiop)
+/**
+ * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
+ * @q: request_queue of interest
+ * @pol: blkcg policy to deactivate
+ *
+ * Deactivate @pol on @q.  Follows the same synchronization rules as
+ * blkcg_activate_policy().
+ */
+void blkcg_deactivate_policy(struct request_queue *q,
+                            const struct blkcg_policy *pol)
 {
-       spin_lock(&blkio_list_lock);
-       list_del_init(&blkiop->list);
-       spin_unlock(&blkio_list_lock);
+       struct blkcg_gq *blkg;
+
+       if (!blkcg_policy_enabled(q, pol))
+               return;
+
+       blk_queue_bypass_start(q);
+       spin_lock_irq(q->queue_lock);
+
+       __clear_bit(pol->plid, q->blkcg_pols);
+
+       /* if no policy is left, no need for blkgs - shoot them down */
+       if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS))
+               blkg_destroy_all(q);
+
+       list_for_each_entry(blkg, &q->blkg_list, q_node) {
+               /* grab blkcg lock too while removing @pd from @blkg */
+               spin_lock(&blkg->blkcg->lock);
+
+               if (pol->pd_exit_fn)
+                       pol->pd_exit_fn(blkg);
+
+               kfree(blkg->pd[pol->plid]);
+               blkg->pd[pol->plid] = NULL;
+
+               spin_unlock(&blkg->blkcg->lock);
+       }
+
+       spin_unlock_irq(q->queue_lock);
+       blk_queue_bypass_end(q);
 }
-EXPORT_SYMBOL_GPL(blkio_policy_unregister);
+EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
 
-static int __init init_cgroup_blkio(void)
+/**
+ * blkcg_policy_register - register a blkcg policy
+ * @pol: blkcg policy to register
+ *
+ * Register @pol with blkcg core.  Might sleep and @pol may be modified on
+ * successful registration.  Returns 0 on success and -errno on failure.
+ */
+int blkcg_policy_register(struct blkcg_policy *pol)
 {
-       return cgroup_load_subsys(&blkio_subsys);
+       int i, ret;
+
+       if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
+               return -EINVAL;
+
+       mutex_lock(&blkcg_pol_mutex);
+
+       /* find an empty slot */
+       ret = -ENOSPC;
+       for (i = 0; i < BLKCG_MAX_POLS; i++)
+               if (!blkcg_policy[i])
+                       break;
+       if (i >= BLKCG_MAX_POLS)
+               goto out_unlock;
+
+       /* register and update blkgs */
+       pol->plid = i;
+       blkcg_policy[i] = pol;
+
+       /* everything is in place, add intf files for the new policy */
+       if (pol->cftypes)
+               WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes));
+       ret = 0;
+out_unlock:
+       mutex_unlock(&blkcg_pol_mutex);
+       return ret;
 }
+EXPORT_SYMBOL_GPL(blkcg_policy_register);
 
-static void __exit exit_cgroup_blkio(void)
+/**
+ * blkcg_policy_unregister - unregister a blkcg policy
+ * @pol: blkcg policy to unregister
+ *
+ * Undo blkcg_policy_register(@pol).  Might sleep.
+ */
+void blkcg_policy_unregister(struct blkcg_policy *pol)
 {
-       cgroup_unload_subsys(&blkio_subsys);
-}
+       mutex_lock(&blkcg_pol_mutex);
 
-module_init(init_cgroup_blkio);
-module_exit(exit_cgroup_blkio);
-MODULE_LICENSE("GPL");
+       if (WARN_ON(blkcg_policy[pol->plid] != pol))
+               goto out_unlock;
+
+       /* kill the intf files first */
+       if (pol->cftypes)
+               cgroup_rm_cftypes(&blkio_subsys, pol->cftypes);
+
+       /* unregister and update blkgs */
+       blkcg_policy[pol->plid] = NULL;
+out_unlock:
+       mutex_unlock(&blkcg_pol_mutex);
+}
+EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
index 6f3ace7e792ff4336fe74ac2d796ec1fa1f5df68..8ac457ce7783847522c1340c008d3c7789f3a1b2 100644 (file)
 
 #include <linux/cgroup.h>
 #include <linux/u64_stats_sync.h>
-
-enum blkio_policy_id {
-       BLKIO_POLICY_PROP = 0,          /* Proportional Bandwidth division */
-       BLKIO_POLICY_THROTL,            /* Throttling */
-};
+#include <linux/seq_file.h>
+#include <linux/radix-tree.h>
 
 /* Max limits for throttle policy */
 #define THROTL_IOPS_MAX                UINT_MAX
 
-#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
-
-#ifndef CONFIG_BLK_CGROUP
-/* When blk-cgroup is a module, its subsys_id isn't a compile-time constant */
-extern struct cgroup_subsys blkio_subsys;
-#define blkio_subsys_id blkio_subsys.subsys_id
-#endif
-
-enum stat_type {
-       /* Total time spent (in ns) between request dispatch to the driver and
-        * request completion for IOs doen by this cgroup. This may not be
-        * accurate when NCQ is turned on. */
-       BLKIO_STAT_SERVICE_TIME = 0,
-       /* Total time spent waiting in scheduler queue in ns */
-       BLKIO_STAT_WAIT_TIME,
-       /* Number of IOs queued up */
-       BLKIO_STAT_QUEUED,
-       /* All the single valued stats go below this */
-       BLKIO_STAT_TIME,
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-       /* Time not charged to this cgroup */
-       BLKIO_STAT_UNACCOUNTED_TIME,
-       BLKIO_STAT_AVG_QUEUE_SIZE,
-       BLKIO_STAT_IDLE_TIME,
-       BLKIO_STAT_EMPTY_TIME,
-       BLKIO_STAT_GROUP_WAIT_TIME,
-       BLKIO_STAT_DEQUEUE
-#endif
-};
+/* CFQ specific, out here for blkcg->cfq_weight */
+#define CFQ_WEIGHT_MIN         10
+#define CFQ_WEIGHT_MAX         1000
+#define CFQ_WEIGHT_DEFAULT     500
 
-/* Per cpu stats */
-enum stat_type_cpu {
-       BLKIO_STAT_CPU_SECTORS,
-       /* Total bytes transferred */
-       BLKIO_STAT_CPU_SERVICE_BYTES,
-       /* Total IOs serviced, post merge */
-       BLKIO_STAT_CPU_SERVICED,
-       /* Number of IOs merged */
-       BLKIO_STAT_CPU_MERGED,
-       BLKIO_STAT_CPU_NR
-};
+#ifdef CONFIG_BLK_CGROUP
 
-enum stat_sub_type {
-       BLKIO_STAT_READ = 0,
-       BLKIO_STAT_WRITE,
-       BLKIO_STAT_SYNC,
-       BLKIO_STAT_ASYNC,
-       BLKIO_STAT_TOTAL
-};
+enum blkg_rwstat_type {
+       BLKG_RWSTAT_READ,
+       BLKG_RWSTAT_WRITE,
+       BLKG_RWSTAT_SYNC,
+       BLKG_RWSTAT_ASYNC,
 
-/* blkg state flags */
-enum blkg_state_flags {
-       BLKG_waiting = 0,
-       BLKG_idling,
-       BLKG_empty,
+       BLKG_RWSTAT_NR,
+       BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
 };
 
-/* cgroup files owned by proportional weight policy */
-enum blkcg_file_name_prop {
-       BLKIO_PROP_weight = 1,
-       BLKIO_PROP_weight_device,
-       BLKIO_PROP_io_service_bytes,
-       BLKIO_PROP_io_serviced,
-       BLKIO_PROP_time,
-       BLKIO_PROP_sectors,
-       BLKIO_PROP_unaccounted_time,
-       BLKIO_PROP_io_service_time,
-       BLKIO_PROP_io_wait_time,
-       BLKIO_PROP_io_merged,
-       BLKIO_PROP_io_queued,
-       BLKIO_PROP_avg_queue_size,
-       BLKIO_PROP_group_wait_time,
-       BLKIO_PROP_idle_time,
-       BLKIO_PROP_empty_time,
-       BLKIO_PROP_dequeue,
-};
+struct blkcg_gq;
 
-/* cgroup files owned by throttle policy */
-enum blkcg_file_name_throtl {
-       BLKIO_THROTL_read_bps_device,
-       BLKIO_THROTL_write_bps_device,
-       BLKIO_THROTL_read_iops_device,
-       BLKIO_THROTL_write_iops_device,
-       BLKIO_THROTL_io_service_bytes,
-       BLKIO_THROTL_io_serviced,
-};
+struct blkcg {
+       struct cgroup_subsys_state      css;
+       spinlock_t                      lock;
 
-struct blkio_cgroup {
-       struct cgroup_subsys_state css;
-       unsigned int weight;
-       spinlock_t lock;
-       struct hlist_head blkg_list;
-       struct list_head policy_list; /* list of blkio_policy_node */
-};
+       struct radix_tree_root          blkg_tree;
+       struct blkcg_gq                 *blkg_hint;
+       struct hlist_head               blkg_list;
+
+       /* for policies to test whether associated blkcg has changed */
+       uint64_t                        id;
 
-struct blkio_group_stats {
-       /* total disk time and nr sectors dispatched by this group */
-       uint64_t time;
-       uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL];
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-       /* Time not charged to this cgroup */
-       uint64_t unaccounted_time;
-
-       /* Sum of number of IOs queued across all samples */
-       uint64_t avg_queue_size_sum;
-       /* Count of samples taken for average */
-       uint64_t avg_queue_size_samples;
-       /* How many times this group has been removed from service tree */
-       unsigned long dequeue;
-
-       /* Total time spent waiting for it to be assigned a timeslice. */
-       uint64_t group_wait_time;
-       uint64_t start_group_wait_time;
-
-       /* Time spent idling for this blkio_group */
-       uint64_t idle_time;
-       uint64_t start_idle_time;
-       /*
-        * Total time when we have requests queued and do not contain the
-        * current active queue.
-        */
-       uint64_t empty_time;
-       uint64_t start_empty_time;
-       uint16_t flags;
-#endif
+       /* TODO: per-policy storage in blkcg */
+       unsigned int                    cfq_weight;     /* belongs to cfq */
 };
 
-/* Per cpu blkio group stats */
-struct blkio_group_stats_cpu {
-       uint64_t sectors;
-       uint64_t stat_arr_cpu[BLKIO_STAT_CPU_NR][BLKIO_STAT_TOTAL];
-       struct u64_stats_sync syncp;
+struct blkg_stat {
+       struct u64_stats_sync           syncp;
+       uint64_t                        cnt;
 };
 
-struct blkio_group {
-       /* An rcu protected unique identifier for the group */
-       void *key;
-       struct hlist_node blkcg_node;
-       unsigned short blkcg_id;
-       /* Store cgroup path */
-       char path[128];
-       /* The device MKDEV(major, minor), this group has been created for */
-       dev_t dev;
-       /* policy which owns this blk group */
-       enum blkio_policy_id plid;
-
-       /* Need to serialize the stats in the case of reset/update */
-       spinlock_t stats_lock;
-       struct blkio_group_stats stats;
-       /* Per cpu stats pointer */
-       struct blkio_group_stats_cpu __percpu *stats_cpu;
+struct blkg_rwstat {
+       struct u64_stats_sync           syncp;
+       uint64_t                        cnt[BLKG_RWSTAT_NR];
 };
 
-struct blkio_policy_node {
-       struct list_head node;
-       dev_t dev;
-       /* This node belongs to max bw policy or porportional weight policy */
-       enum blkio_policy_id plid;
-       /* cgroup file to which this rule belongs to */
-       int fileid;
-
-       union {
-               unsigned int weight;
-               /*
-                * Rate read/write in terms of bytes per second
-                * Whether this rate represents read or write is determined
-                * by file type "fileid".
-                */
-               u64 bps;
-               unsigned int iops;
-       } val;
+/*
+ * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
+ * request_queue (q).  This is used by blkcg policies which need to track
+ * information per blkcg - q pair.
+ *
+ * There can be multiple active blkcg policies and each has its private
+ * data on each blkg, the size of which is determined by
+ * blkcg_policy->pd_size.  blkcg core allocates and frees such areas
+ * together with blkg and invokes pd_init/exit_fn() methods.
+ *
+ * Such private data must embed struct blkg_policy_data (pd) at the
+ * beginning and pd_size can't be smaller than pd.
+ */
+struct blkg_policy_data {
+       /* the blkg this per-policy data belongs to */
+       struct blkcg_gq                 *blkg;
+
+       /* used during policy activation */
+       struct list_head                alloc_node;
 };
 
-extern unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
-                                    dev_t dev);
-extern uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg,
-                                    dev_t dev);
-extern uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg,
-                                    dev_t dev);
-extern unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg,
-                                    dev_t dev);
-extern unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg,
-                                    dev_t dev);
-
-typedef void (blkio_unlink_group_fn) (void *key, struct blkio_group *blkg);
-
-typedef void (blkio_update_group_weight_fn) (void *key,
-                       struct blkio_group *blkg, unsigned int weight);
-typedef void (blkio_update_group_read_bps_fn) (void * key,
-                       struct blkio_group *blkg, u64 read_bps);
-typedef void (blkio_update_group_write_bps_fn) (void *key,
-                       struct blkio_group *blkg, u64 write_bps);
-typedef void (blkio_update_group_read_iops_fn) (void *key,
-                       struct blkio_group *blkg, unsigned int read_iops);
-typedef void (blkio_update_group_write_iops_fn) (void *key,
-                       struct blkio_group *blkg, unsigned int write_iops);
-
-struct blkio_policy_ops {
-       blkio_unlink_group_fn *blkio_unlink_group_fn;
-       blkio_update_group_weight_fn *blkio_update_group_weight_fn;
-       blkio_update_group_read_bps_fn *blkio_update_group_read_bps_fn;
-       blkio_update_group_write_bps_fn *blkio_update_group_write_bps_fn;
-       blkio_update_group_read_iops_fn *blkio_update_group_read_iops_fn;
-       blkio_update_group_write_iops_fn *blkio_update_group_write_iops_fn;
+/* association between a blk cgroup and a request queue */
+struct blkcg_gq {
+       /* Pointer to the associated request_queue */
+       struct request_queue            *q;
+       struct list_head                q_node;
+       struct hlist_node               blkcg_node;
+       struct blkcg                    *blkcg;
+       /* reference count */
+       int                             refcnt;
+
+       struct blkg_policy_data         *pd[BLKCG_MAX_POLS];
+
+       struct rcu_head                 rcu_head;
 };
 
-struct blkio_policy_type {
-       struct list_head list;
-       struct blkio_policy_ops ops;
-       enum blkio_policy_id plid;
+typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
+typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
+typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
+
+struct blkcg_policy {
+       int                             plid;
+       /* policy specific private data size */
+       size_t                          pd_size;
+       /* cgroup files for the policy */
+       struct cftype                   *cftypes;
+
+       /* operations */
+       blkcg_pol_init_pd_fn            *pd_init_fn;
+       blkcg_pol_exit_pd_fn            *pd_exit_fn;
+       blkcg_pol_reset_pd_stats_fn     *pd_reset_stats_fn;
 };
 
+extern struct blkcg blkcg_root;
+
+struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup);
+struct blkcg *bio_blkcg(struct bio *bio);
+struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
+struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
+                                   struct request_queue *q);
+int blkcg_init_queue(struct request_queue *q);
+void blkcg_drain_queue(struct request_queue *q);
+void blkcg_exit_queue(struct request_queue *q);
+
 /* Blkio controller policy registration */
-extern void blkio_policy_register(struct blkio_policy_type *);
-extern void blkio_policy_unregister(struct blkio_policy_type *);
+int blkcg_policy_register(struct blkcg_policy *pol);
+void blkcg_policy_unregister(struct blkcg_policy *pol);
+int blkcg_activate_policy(struct request_queue *q,
+                         const struct blkcg_policy *pol);
+void blkcg_deactivate_policy(struct request_queue *q,
+                            const struct blkcg_policy *pol);
+
+void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
+                      u64 (*prfill)(struct seq_file *,
+                                    struct blkg_policy_data *, int),
+                      const struct blkcg_policy *pol, int data,
+                      bool show_total);
+u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
+u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+                        const struct blkg_rwstat *rwstat);
+u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
+u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+                      int off);
+
+struct blkg_conf_ctx {
+       struct gendisk                  *disk;
+       struct blkcg_gq                 *blkg;
+       u64                             v;
+};
+
+int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
+                  const char *input, struct blkg_conf_ctx *ctx);
+void blkg_conf_finish(struct blkg_conf_ctx *ctx);
+
+
+/**
+ * blkg_to_pdata - get policy private data
+ * @blkg: blkg of interest
+ * @pol: policy of interest
+ *
+ * Return pointer to private data associated with the @blkg-@pol pair.
+ */
+static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
+                                                 struct blkcg_policy *pol)
+{
+       return blkg ? blkg->pd[pol->plid] : NULL;
+}
+
+/**
+ * pdata_to_blkg - get blkg associated with policy private data
+ * @pd: policy private data of interest
+ *
+ * @pd is policy private data.  Determine the blkg it's associated with.
+ */
+static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
+{
+       return pd ? pd->blkg : NULL;
+}
+
+/**
+ * blkg_path - format cgroup path of blkg
+ * @blkg: blkg of interest
+ * @buf: target buffer
+ * @buflen: target buffer length
+ *
+ * Format the path of the cgroup of @blkg into @buf.
+ */
+static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
+{
+       int ret;
+
+       rcu_read_lock();
+       ret = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
+       rcu_read_unlock();
+       if (ret)
+               strncpy(buf, "<unavailable>", buflen);
+       return ret;
+}
 
-static inline char *blkg_path(struct blkio_group *blkg)
+/**
+ * blkg_get - get a blkg reference
+ * @blkg: blkg to get
+ *
+ * The caller should be holding queue_lock and an existing reference.
+ */
+static inline void blkg_get(struct blkcg_gq *blkg)
 {
-       return blkg->path;
+       lockdep_assert_held(blkg->q->queue_lock);
+       WARN_ON_ONCE(!blkg->refcnt);
+       blkg->refcnt++;
 }
 
-#else
+void __blkg_release(struct blkcg_gq *blkg);
 
-struct blkio_group {
+/**
+ * blkg_put - put a blkg reference
+ * @blkg: blkg to put
+ *
+ * The caller should be holding queue_lock.
+ */
+static inline void blkg_put(struct blkcg_gq *blkg)
+{
+       lockdep_assert_held(blkg->q->queue_lock);
+       WARN_ON_ONCE(blkg->refcnt <= 0);
+       if (!--blkg->refcnt)
+               __blkg_release(blkg);
+}
+
+/**
+ * blkg_stat_add - add a value to a blkg_stat
+ * @stat: target blkg_stat
+ * @val: value to add
+ *
+ * Add @val to @stat.  The caller is responsible for synchronizing calls to
+ * this function.
+ */
+static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
+{
+       u64_stats_update_begin(&stat->syncp);
+       stat->cnt += val;
+       u64_stats_update_end(&stat->syncp);
+}
+
+/**
+ * blkg_stat_read - read the current value of a blkg_stat
+ * @stat: blkg_stat to read
+ *
+ * Read the current value of @stat.  This function can be called without
+ * synchroniztion and takes care of u64 atomicity.
+ */
+static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
+{
+       unsigned int start;
+       uint64_t v;
+
+       do {
+               start = u64_stats_fetch_begin(&stat->syncp);
+               v = stat->cnt;
+       } while (u64_stats_fetch_retry(&stat->syncp, start));
+
+       return v;
+}
+
+/**
+ * blkg_stat_reset - reset a blkg_stat
+ * @stat: blkg_stat to reset
+ */
+static inline void blkg_stat_reset(struct blkg_stat *stat)
+{
+       stat->cnt = 0;
+}
+
+/**
+ * blkg_rwstat_add - add a value to a blkg_rwstat
+ * @rwstat: target blkg_rwstat
+ * @rw: mask of REQ_{WRITE|SYNC}
+ * @val: value to add
+ *
+ * Add @val to @rwstat.  The counters are chosen according to @rw.  The
+ * caller is responsible for synchronizing calls to this function.
+ */
+static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
+                                  int rw, uint64_t val)
+{
+       u64_stats_update_begin(&rwstat->syncp);
+
+       if (rw & REQ_WRITE)
+               rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
+       else
+               rwstat->cnt[BLKG_RWSTAT_READ] += val;
+       if (rw & REQ_SYNC)
+               rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
+       else
+               rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
+
+       u64_stats_update_end(&rwstat->syncp);
+}
+
+/**
+ * blkg_rwstat_read - read the current values of a blkg_rwstat
+ * @rwstat: blkg_rwstat to read
+ *
+ * Read the current snapshot of @rwstat and return it as the return value.
+ * This function can be called without synchronization and takes care of
+ * u64 atomicity.
+ */
+static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
+{
+       unsigned int start;
+       struct blkg_rwstat tmp;
+
+       do {
+               start = u64_stats_fetch_begin(&rwstat->syncp);
+               tmp = *rwstat;
+       } while (u64_stats_fetch_retry(&rwstat->syncp, start));
+
+       return tmp;
+}
+
+/**
+ * blkg_rwstat_sum - read the total count of a blkg_rwstat
+ * @rwstat: blkg_rwstat to read
+ *
+ * Return the total count of @rwstat regardless of the IO direction.  This
+ * function can be called without synchronization and takes care of u64
+ * atomicity.
+ */
+static inline uint64_t blkg_rwstat_sum(struct blkg_rwstat *rwstat)
+{
+       struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
+
+       return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
+}
+
+/**
+ * blkg_rwstat_reset - reset a blkg_rwstat
+ * @rwstat: blkg_rwstat to reset
+ */
+static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
+{
+       memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
+}
+
+#else  /* CONFIG_BLK_CGROUP */
+
+struct cgroup;
+
+struct blkg_policy_data {
 };
 
-struct blkio_policy_type {
+struct blkcg_gq {
 };
 
-static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { }
-static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
-
-static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
-
-#endif
-
-#define BLKIO_WEIGHT_MIN       10
-#define BLKIO_WEIGHT_MAX       1000
-#define BLKIO_WEIGHT_DEFAULT   500
-
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg);
-void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
-                               unsigned long dequeue);
-void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg);
-void blkiocg_update_idle_time_stats(struct blkio_group *blkg);
-void blkiocg_set_start_empty_time(struct blkio_group *blkg);
-
-#define BLKG_FLAG_FNS(name)                                            \
-static inline void blkio_mark_blkg_##name(                             \
-               struct blkio_group_stats *stats)                        \
-{                                                                      \
-       stats->flags |= (1 << BLKG_##name);                             \
-}                                                                      \
-static inline void blkio_clear_blkg_##name(                            \
-               struct blkio_group_stats *stats)                        \
-{                                                                      \
-       stats->flags &= ~(1 << BLKG_##name);                            \
-}                                                                      \
-static inline int blkio_blkg_##name(struct blkio_group_stats *stats)   \
-{                                                                      \
-       return (stats->flags & (1 << BLKG_##name)) != 0;                \
-}                                                                      \
-
-BLKG_FLAG_FNS(waiting)
-BLKG_FLAG_FNS(idling)
-BLKG_FLAG_FNS(empty)
-#undef BLKG_FLAG_FNS
-#else
-static inline void blkiocg_update_avg_queue_size_stats(
-                                               struct blkio_group *blkg) {}
-static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
-                                               unsigned long dequeue) {}
-static inline void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
-{}
-static inline void blkiocg_update_idle_time_stats(struct blkio_group *blkg) {}
-static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
-#endif
-
-#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
-extern struct blkio_cgroup blkio_root_cgroup;
-extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
-extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk);
-extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
-       struct blkio_group *blkg, void *key, dev_t dev,
-       enum blkio_policy_id plid);
-extern int blkio_alloc_blkg_stats(struct blkio_group *blkg);
-extern int blkiocg_del_blkio_group(struct blkio_group *blkg);
-extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
-                                               void *key);
-void blkiocg_update_timeslice_used(struct blkio_group *blkg,
-                                       unsigned long time,
-                                       unsigned long unaccounted_time);
-void blkiocg_update_dispatch_stats(struct blkio_group *blkg, uint64_t bytes,
-                                               bool direction, bool sync);
-void blkiocg_update_completion_stats(struct blkio_group *blkg,
-       uint64_t start_time, uint64_t io_start_time, bool direction, bool sync);
-void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
-                                       bool sync);
-void blkiocg_update_io_add_stats(struct blkio_group *blkg,
-               struct blkio_group *curr_blkg, bool direction, bool sync);
-void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
-                                       bool direction, bool sync);
-#else
-struct cgroup;
-static inline struct blkio_cgroup *
-cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
-static inline struct blkio_cgroup *
-task_blkio_cgroup(struct task_struct *tsk) { return NULL; }
-
-static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
-               struct blkio_group *blkg, void *key, dev_t dev,
-               enum blkio_policy_id plid) {}
-
-static inline int blkio_alloc_blkg_stats(struct blkio_group *blkg) { return 0; }
-
-static inline int
-blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; }
-
-static inline struct blkio_group *
-blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) { return NULL; }
-static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg,
-                                               unsigned long time,
-                                               unsigned long unaccounted_time)
-{}
-static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
-                               uint64_t bytes, bool direction, bool sync) {}
-static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
-               uint64_t start_time, uint64_t io_start_time, bool direction,
-               bool sync) {}
-static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
-                                               bool direction, bool sync) {}
-static inline void blkiocg_update_io_add_stats(struct blkio_group *blkg,
-               struct blkio_group *curr_blkg, bool direction, bool sync) {}
-static inline void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
-                                               bool direction, bool sync) {}
-#endif
-#endif /* _BLK_CGROUP_H */
+struct blkcg_policy {
+};
+
+static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; }
+static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
+static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
+static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
+static inline void blkcg_drain_queue(struct request_queue *q) { }
+static inline void blkcg_exit_queue(struct request_queue *q) { }
+static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
+static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
+static inline int blkcg_activate_policy(struct request_queue *q,
+                                       const struct blkcg_policy *pol) { return 0; }
+static inline void blkcg_deactivate_policy(struct request_queue *q,
+                                          const struct blkcg_policy *pol) { }
+
+static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
+                                                 struct blkcg_policy *pol) { return NULL; }
+static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
+static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
+static inline void blkg_get(struct blkcg_gq *blkg) { }
+static inline void blkg_put(struct blkcg_gq *blkg) { }
+
+#endif /* CONFIG_BLK_CGROUP */
+#endif /* _BLK_CGROUP_H */
index 1f61b74867e41d3f74f61aeec539e8b00157dacf..3c923a7aeb56f1658142b091868c3c29ebffd3c5 100644 (file)
 #include <linux/fault-inject.h>
 #include <linux/list_sort.h>
 #include <linux/delay.h>
+#include <linux/ratelimit.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/block.h>
 
 #include "blk.h"
+#include "blk-cgroup.h"
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
@@ -280,7 +282,7 @@ EXPORT_SYMBOL(blk_stop_queue);
  *
  *     This function does not cancel any asynchronous activity arising
  *     out of elevator or throttling code. That would require elevaotor_exit()
- *     and blk_throtl_exit() to be called with queue lock initialized.
+ *     and blkcg_exit_queue() to be called with queue lock initialized.
  *
  */
 void blk_sync_queue(struct request_queue *q)
@@ -365,17 +367,23 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
 
                spin_lock_irq(q->queue_lock);
 
-               elv_drain_elevator(q);
-               if (drain_all)
-                       blk_throtl_drain(q);
+               /*
+                * The caller might be trying to drain @q before its
+                * elevator is initialized.
+                */
+               if (q->elevator)
+                       elv_drain_elevator(q);
+
+               blkcg_drain_queue(q);
 
                /*
                 * This function might be called on a queue which failed
-                * driver init after queue creation.  Some drivers
-                * (e.g. fd) get unhappy in such cases.  Kick queue iff
-                * dispatch queue has something on it.
+                * driver init after queue creation or is not yet fully
+                * active yet.  Some drivers (e.g. fd and loop) get unhappy
+                * in such cases.  Kick queue iff dispatch queue has
+                * something on it and @q has request_fn set.
                 */
-               if (!list_empty(&q->queue_head))
+               if (!list_empty(&q->queue_head) && q->request_fn)
                        __blk_run_queue(q);
 
                drain |= q->rq.elvpriv;
@@ -402,6 +410,49 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
        }
 }
 
+/**
+ * blk_queue_bypass_start - enter queue bypass mode
+ * @q: queue of interest
+ *
+ * In bypass mode, only the dispatch FIFO queue of @q is used.  This
+ * function makes @q enter bypass mode and drains all requests which were
+ * throttled or issued before.  On return, it's guaranteed that no request
+ * is being throttled or has ELVPRIV set and blk_queue_bypass() %true
+ * inside queue or RCU read lock.
+ */
+void blk_queue_bypass_start(struct request_queue *q)
+{
+       bool drain;
+
+       spin_lock_irq(q->queue_lock);
+       drain = !q->bypass_depth++;
+       queue_flag_set(QUEUE_FLAG_BYPASS, q);
+       spin_unlock_irq(q->queue_lock);
+
+       if (drain) {
+               blk_drain_queue(q, false);
+               /* ensure blk_queue_bypass() is %true inside RCU read lock */
+               synchronize_rcu();
+       }
+}
+EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
+
+/**
+ * blk_queue_bypass_end - leave queue bypass mode
+ * @q: queue of interest
+ *
+ * Leave bypass mode and restore the normal queueing behavior.
+ */
+void blk_queue_bypass_end(struct request_queue *q)
+{
+       spin_lock_irq(q->queue_lock);
+       if (!--q->bypass_depth)
+               queue_flag_clear(QUEUE_FLAG_BYPASS, q);
+       WARN_ON_ONCE(q->bypass_depth < 0);
+       spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
+
 /**
  * blk_cleanup_queue - shutdown a request queue
  * @q: request queue to shutdown
@@ -418,6 +469,19 @@ void blk_cleanup_queue(struct request_queue *q)
        queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
 
        spin_lock_irq(lock);
+
+       /*
+        * Dead queue is permanently in bypass mode till released.  Note
+        * that, unlike blk_queue_bypass_start(), we aren't performing
+        * synchronize_rcu() after entering bypass mode to avoid the delay
+        * as some drivers create and destroy a lot of queues while
+        * probing.  This is still safe because blk_release_queue() will be
+        * called only after the queue refcnt drops to zero and nothing,
+        * RCU or not, would be traversing the queue by then.
+        */
+       q->bypass_depth++;
+       queue_flag_set(QUEUE_FLAG_BYPASS, q);
+
        queue_flag_set(QUEUE_FLAG_NOMERGES, q);
        queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
        queue_flag_set(QUEUE_FLAG_DEAD, q);
@@ -428,13 +492,8 @@ void blk_cleanup_queue(struct request_queue *q)
        spin_unlock_irq(lock);
        mutex_unlock(&q->sysfs_lock);
 
-       /*
-        * Drain all requests queued before DEAD marking.  The caller might
-        * be trying to tear down @q before its elevator is initialized, in
-        * which case we don't want to call into draining.
-        */
-       if (q->elevator)
-               blk_drain_queue(q, true);
+       /* drain all requests queued before DEAD marking */
+       blk_drain_queue(q, true);
 
        /* @q won't process any more request, flush async actions */
        del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
@@ -498,14 +557,15 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        if (err)
                goto fail_id;
 
-       if (blk_throtl_init(q))
-               goto fail_id;
-
        setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
                    laptop_mode_timer_fn, (unsigned long) q);
        setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
+       INIT_LIST_HEAD(&q->queue_head);
        INIT_LIST_HEAD(&q->timeout_list);
        INIT_LIST_HEAD(&q->icq_list);
+#ifdef CONFIG_BLK_CGROUP
+       INIT_LIST_HEAD(&q->blkg_list);
+#endif
        INIT_LIST_HEAD(&q->flush_queue[0]);
        INIT_LIST_HEAD(&q->flush_queue[1]);
        INIT_LIST_HEAD(&q->flush_data_in_flight);
@@ -522,6 +582,18 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
         */
        q->queue_lock = &q->__queue_lock;
 
+       /*
+        * A queue starts its life with bypass turned on to avoid
+        * unnecessary bypass on/off overhead and nasty surprises during
+        * init.  The initial bypass will be finished at the end of
+        * blk_init_allocated_queue().
+        */
+       q->bypass_depth = 1;
+       __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
+
+       if (blkcg_init_queue(q))
+               goto fail_id;
+
        return q;
 
 fail_id:
@@ -614,15 +686,15 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
 
        q->sg_reserved_size = INT_MAX;
 
-       /*
-        * all done
-        */
-       if (!elevator_init(q, NULL)) {
-               blk_queue_congestion_threshold(q);
-               return q;
-       }
+       /* init elevator */
+       if (elevator_init(q, NULL))
+               return NULL;
 
-       return NULL;
+       blk_queue_congestion_threshold(q);
+
+       /* all done, end the initial bypass */
+       blk_queue_bypass_end(q);
+       return q;
 }
 EXPORT_SYMBOL(blk_init_allocated_queue);
 
@@ -648,33 +720,6 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq)
        mempool_free(rq, q->rq.rq_pool);
 }
 
-static struct request *
-blk_alloc_request(struct request_queue *q, struct io_cq *icq,
-                 unsigned int flags, gfp_t gfp_mask)
-{
-       struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
-
-       if (!rq)
-               return NULL;
-
-       blk_rq_init(q, rq);
-
-       rq->cmd_flags = flags | REQ_ALLOCED;
-
-       if (flags & REQ_ELVPRIV) {
-               rq->elv.icq = icq;
-               if (unlikely(elv_set_request(q, rq, gfp_mask))) {
-                       mempool_free(rq, q->rq.rq_pool);
-                       return NULL;
-               }
-               /* @rq->elv.icq holds on to io_context until @rq is freed */
-               if (icq)
-                       get_io_context(icq->ioc);
-       }
-
-       return rq;
-}
-
 /*
  * ioc_batching returns true if the ioc is a valid batching request and
  * should be given priority access to a request.
@@ -762,6 +807,22 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
        return true;
 }
 
+/**
+ * rq_ioc - determine io_context for request allocation
+ * @bio: request being allocated is for this bio (can be %NULL)
+ *
+ * Determine io_context to use for request allocation for @bio.  May return
+ * %NULL if %current->io_context doesn't exist.
+ */
+static struct io_context *rq_ioc(struct bio *bio)
+{
+#ifdef CONFIG_BLK_CGROUP
+       if (bio && bio->bi_ioc)
+               return bio->bi_ioc;
+#endif
+       return current->io_context;
+}
+
 /**
  * get_request - get a free request
  * @q: request_queue to allocate request from
@@ -779,7 +840,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
 static struct request *get_request(struct request_queue *q, int rw_flags,
                                   struct bio *bio, gfp_t gfp_mask)
 {
-       struct request *rq = NULL;
+       struct request *rq;
        struct request_list *rl = &q->rq;
        struct elevator_type *et;
        struct io_context *ioc;
@@ -789,7 +850,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
        int may_queue;
 retry:
        et = q->elevator->type;
-       ioc = current->io_context;
+       ioc = rq_ioc(bio);
 
        if (unlikely(blk_queue_dead(q)))
                return NULL;
@@ -808,7 +869,7 @@ retry:
                         */
                        if (!ioc && !retried) {
                                spin_unlock_irq(q->queue_lock);
-                               create_io_context(current, gfp_mask, q->node);
+                               create_io_context(gfp_mask, q->node);
                                spin_lock_irq(q->queue_lock);
                                retried = true;
                                goto retry;
@@ -831,7 +892,7 @@ retry:
                                         * process is not a "batcher", and not
                                         * exempted by the IO scheduler
                                         */
-                                       goto out;
+                                       return NULL;
                                }
                        }
                }
@@ -844,7 +905,7 @@ retry:
         * allocated with any setting of ->nr_requests
         */
        if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
-               goto out;
+               return NULL;
 
        rl->count[is_sync]++;
        rl->starved[is_sync] = 0;
@@ -859,8 +920,7 @@ retry:
         * Also, lookup icq while holding queue_lock.  If it doesn't exist,
         * it will be created after releasing queue_lock.
         */
-       if (blk_rq_should_init_elevator(bio) &&
-           !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) {
+       if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
                rw_flags |= REQ_ELVPRIV;
                rl->elvpriv++;
                if (et->icq_cache && ioc)
@@ -871,41 +931,36 @@ retry:
                rw_flags |= REQ_IO_STAT;
        spin_unlock_irq(q->queue_lock);
 
-       /* create icq if missing */
-       if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) {
-               icq = ioc_create_icq(q, gfp_mask);
-               if (!icq)
-                       goto fail_icq;
-       }
-
-       rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);
+       /* allocate and init request */
+       rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
+       if (!rq)
+               goto fail_alloc;
 
-fail_icq:
-       if (unlikely(!rq)) {
-               /*
-                * Allocation failed presumably due to memory. Undo anything
-                * we might have messed up.
-                *
-                * Allocating task should really be put onto the front of the
-                * wait queue, but this is pretty rare.
-                */
-               spin_lock_irq(q->queue_lock);
-               freed_request(q, rw_flags);
+       blk_rq_init(q, rq);
+       rq->cmd_flags = rw_flags | REQ_ALLOCED;
+
+       /* init elvpriv */
+       if (rw_flags & REQ_ELVPRIV) {
+               if (unlikely(et->icq_cache && !icq)) {
+                       create_io_context(gfp_mask, q->node);
+                       ioc = rq_ioc(bio);
+                       if (!ioc)
+                               goto fail_elvpriv;
+
+                       icq = ioc_create_icq(ioc, q, gfp_mask);
+                       if (!icq)
+                               goto fail_elvpriv;
+               }
 
-               /*
-                * in the very unlikely event that allocation failed and no
-                * requests for this direction was pending, mark us starved
-                * so that freeing of a request in the other direction will
-                * notice us. another possible fix would be to split the
-                * rq mempool into READ and WRITE
-                */
-rq_starved:
-               if (unlikely(rl->count[is_sync] == 0))
-                       rl->starved[is_sync] = 1;
+               rq->elv.icq = icq;
+               if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
+                       goto fail_elvpriv;
 
-               goto out;
+               /* @rq->elv.icq holds io_context until @rq is freed */
+               if (icq)
+                       get_io_context(icq->ioc);
        }
-
+out:
        /*
         * ioc may be NULL here, and ioc_batching will be false. That's
         * OK, if the queue is under the request limit then requests need
@@ -916,8 +971,48 @@ rq_starved:
                ioc->nr_batch_requests--;
 
        trace_block_getrq(q, bio, rw_flags & 1);
-out:
        return rq;
+
+fail_elvpriv:
+       /*
+        * elvpriv init failed.  ioc, icq and elvpriv aren't mempool backed
+        * and may fail indefinitely under memory pressure and thus
+        * shouldn't stall IO.  Treat this request as !elvpriv.  This will
+        * disturb iosched and blkcg but weird is bettern than dead.
+        */
+       printk_ratelimited(KERN_WARNING "%s: request aux data allocation failed, iosched may be disturbed\n",
+                          dev_name(q->backing_dev_info.dev));
+
+       rq->cmd_flags &= ~REQ_ELVPRIV;
+       rq->elv.icq = NULL;
+
+       spin_lock_irq(q->queue_lock);
+       rl->elvpriv--;
+       spin_unlock_irq(q->queue_lock);
+       goto out;
+
+fail_alloc:
+       /*
+        * Allocation failed presumably due to memory. Undo anything we
+        * might have messed up.
+        *
+        * Allocating task should really be put onto the front of the wait
+        * queue, but this is pretty rare.
+        */
+       spin_lock_irq(q->queue_lock);
+       freed_request(q, rw_flags);
+
+       /*
+        * in the very unlikely event that allocation failed and no
+        * requests for this direction was pending, mark us starved so that
+        * freeing of a request in the other direction will notice
+        * us. another possible fix would be to split the rq mempool into
+        * READ and WRITE
+        */
+rq_starved:
+       if (unlikely(rl->count[is_sync] == 0))
+               rl->starved[is_sync] = 1;
+       return NULL;
 }
 
 /**
@@ -961,7 +1056,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
                 * up to a big batch of them for a small period time.
                 * See ioc_batching, ioc_set_batching
                 */
-               create_io_context(current, GFP_NOIO, q->node);
+               create_io_context(GFP_NOIO, q->node);
                ioc_set_batching(q, current->io_context);
 
                spin_lock_irq(q->queue_lock);
index fb95dd2f889a6071d85e334d7d5d9342ca56ab75..893b8007c657e8bd0ca93d5ba61e9e7d02aa892b 100644 (file)
@@ -155,20 +155,20 @@ void put_io_context(struct io_context *ioc)
 }
 EXPORT_SYMBOL(put_io_context);
 
-/* Called by the exiting task */
-void exit_io_context(struct task_struct *task)
+/**
+ * put_io_context_active - put active reference on ioc
+ * @ioc: ioc of interest
+ *
+ * Undo get_io_context_active().  If active reference reaches zero after
+ * put, @ioc can never issue further IOs and ioscheds are notified.
+ */
+void put_io_context_active(struct io_context *ioc)
 {
-       struct io_context *ioc;
-       struct io_cq *icq;
        struct hlist_node *n;
        unsigned long flags;
+       struct io_cq *icq;
 
-       task_lock(task);
-       ioc = task->io_context;
-       task->io_context = NULL;
-       task_unlock(task);
-
-       if (!atomic_dec_and_test(&ioc->nr_tasks)) {
+       if (!atomic_dec_and_test(&ioc->active_ref)) {
                put_io_context(ioc);
                return;
        }
@@ -197,6 +197,20 @@ retry:
        put_io_context(ioc);
 }
 
+/* Called by the exiting task */
+void exit_io_context(struct task_struct *task)
+{
+       struct io_context *ioc;
+
+       task_lock(task);
+       ioc = task->io_context;
+       task->io_context = NULL;
+       task_unlock(task);
+
+       atomic_dec(&ioc->nr_tasks);
+       put_io_context_active(ioc);
+}
+
 /**
  * ioc_clear_queue - break any ioc association with the specified queue
  * @q: request_queue being cleared
@@ -218,19 +232,19 @@ void ioc_clear_queue(struct request_queue *q)
        }
 }
 
-void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
-                               int node)
+int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
 {
        struct io_context *ioc;
+       int ret;
 
        ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
                                    node);
        if (unlikely(!ioc))
-               return;
+               return -ENOMEM;
 
        /* initialize */
        atomic_long_set(&ioc->refcount, 1);
-       atomic_set(&ioc->nr_tasks, 1);
+       atomic_set(&ioc->active_ref, 1);
        spin_lock_init(&ioc->lock);
        INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
        INIT_HLIST_HEAD(&ioc->icq_list);
@@ -249,7 +263,12 @@ void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
                task->io_context = ioc;
        else
                kmem_cache_free(iocontext_cachep, ioc);
+
+       ret = task->io_context ? 0 : -EBUSY;
+
        task_unlock(task);
+
+       return ret;
 }
 
 /**
@@ -281,7 +300,7 @@ struct io_context *get_task_io_context(struct task_struct *task,
                        return ioc;
                }
                task_unlock(task);
-       } while (create_io_context(task, gfp_flags, node));
+       } while (!create_task_io_context(task, gfp_flags, node));
 
        return NULL;
 }
@@ -325,26 +344,23 @@ EXPORT_SYMBOL(ioc_lookup_icq);
 
 /**
  * ioc_create_icq - create and link io_cq
+ * @ioc: io_context of interest
  * @q: request_queue of interest
  * @gfp_mask: allocation mask
  *
- * Make sure io_cq linking %current->io_context and @q exists.  If either
- * io_context and/or icq don't exist, they will be created using @gfp_mask.
+ * Make sure io_cq linking @ioc and @q exists.  If icq doesn't exist, they
+ * will be created using @gfp_mask.
  *
  * The caller is responsible for ensuring @ioc won't go away and @q is
  * alive and will stay alive until this function returns.
  */
-struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
+struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
+                            gfp_t gfp_mask)
 {
        struct elevator_type *et = q->elevator->type;
-       struct io_context *ioc;
        struct io_cq *icq;
 
        /* allocate stuff */
-       ioc = create_io_context(current, gfp_mask, q->node);
-       if (!ioc)
-               return NULL;
-
        icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
                                    q->node);
        if (!icq)
@@ -382,74 +398,6 @@ struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
        return icq;
 }
 
-void ioc_set_icq_flags(struct io_context *ioc, unsigned int flags)
-{
-       struct io_cq *icq;
-       struct hlist_node *n;
-
-       hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node)
-               icq->flags |= flags;
-}
-
-/**
- * ioc_ioprio_changed - notify ioprio change
- * @ioc: io_context of interest
- * @ioprio: new ioprio
- *
- * @ioc's ioprio has changed to @ioprio.  Set %ICQ_IOPRIO_CHANGED for all
- * icq's.  iosched is responsible for checking the bit and applying it on
- * request issue path.
- */
-void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&ioc->lock, flags);
-       ioc->ioprio = ioprio;
-       ioc_set_icq_flags(ioc, ICQ_IOPRIO_CHANGED);
-       spin_unlock_irqrestore(&ioc->lock, flags);
-}
-
-/**
- * ioc_cgroup_changed - notify cgroup change
- * @ioc: io_context of interest
- *
- * @ioc's cgroup has changed.  Set %ICQ_CGROUP_CHANGED for all icq's.
- * iosched is responsible for checking the bit and applying it on request
- * issue path.
- */
-void ioc_cgroup_changed(struct io_context *ioc)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&ioc->lock, flags);
-       ioc_set_icq_flags(ioc, ICQ_CGROUP_CHANGED);
-       spin_unlock_irqrestore(&ioc->lock, flags);
-}
-EXPORT_SYMBOL(ioc_cgroup_changed);
-
-/**
- * icq_get_changed - fetch and clear icq changed mask
- * @icq: icq of interest
- *
- * Fetch and clear ICQ_*_CHANGED bits from @icq.  Grabs and releases
- * @icq->ioc->lock.
- */
-unsigned icq_get_changed(struct io_cq *icq)
-{
-       unsigned int changed = 0;
-       unsigned long flags;
-
-       if (unlikely(icq->flags & ICQ_CHANGED_MASK)) {
-               spin_lock_irqsave(&icq->ioc->lock, flags);
-               changed = icq->flags & ICQ_CHANGED_MASK;
-               icq->flags &= ~ICQ_CHANGED_MASK;
-               spin_unlock_irqrestore(&icq->ioc->lock, flags);
-       }
-       return changed;
-}
-EXPORT_SYMBOL(icq_get_changed);
-
 static int __init blk_ioc_init(void)
 {
        iocontext_cachep = kmem_cache_create("blkdev_ioc",
index cf150011d808bc71fee8ff894387de0581235f56..aa41b47c22d2e89525a5bd3cfb9501e67634bff7 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/blktrace_api.h>
 
 #include "blk.h"
+#include "blk-cgroup.h"
 
 struct queue_sysfs_entry {
        struct attribute attr;
@@ -479,6 +480,8 @@ static void blk_release_queue(struct kobject *kobj)
 
        blk_sync_queue(q);
 
+       blkcg_exit_queue(q);
+
        if (q->elevator) {
                spin_lock_irq(q->queue_lock);
                ioc_clear_queue(q);
@@ -486,15 +489,12 @@ static void blk_release_queue(struct kobject *kobj)
                elevator_exit(q->elevator);
        }
 
-       blk_throtl_exit(q);
-
        if (rl->rq_pool)
                mempool_destroy(rl->rq_pool);
 
        if (q->queue_tags)
                __blk_queue_free_tags(q);
 
-       blk_throtl_release(q);
        blk_trace_shutdown(q);
 
        bdi_destroy(&q->backing_dev_info);
index f2ddb94626bd49df5e942591998d19ff8665a0eb..5b0659512047208efdcc3db7d714bc72dfd456f5 100644 (file)
@@ -21,6 +21,8 @@ static int throtl_quantum = 32;
 /* Throttling is performed over 100ms slice and after that slice is renewed */
 static unsigned long throtl_slice = HZ/10;     /* 100 ms */
 
+static struct blkcg_policy blkcg_policy_throtl;
+
 /* A workqueue to queue throttle related work */
 static struct workqueue_struct *kthrotld_workqueue;
 static void throtl_schedule_delayed_work(struct throtl_data *td,
@@ -38,9 +40,17 @@ struct throtl_rb_root {
 
 #define rb_entry_tg(node)      rb_entry((node), struct throtl_grp, rb_node)
 
+/* Per-cpu group stats */
+struct tg_stats_cpu {
+       /* total bytes transferred */
+       struct blkg_rwstat              service_bytes;
+       /* total IOs serviced, post merge */
+       struct blkg_rwstat              serviced;
+};
+
 struct throtl_grp {
-       /* List of throtl groups on the request queue*/
-       struct hlist_node tg_node;
+       /* must be the first member */
+       struct blkg_policy_data pd;
 
        /* active throtl group service_tree member */
        struct rb_node rb_node;
@@ -52,8 +62,6 @@ struct throtl_grp {
         */
        unsigned long disptime;
 
-       struct blkio_group blkg;
-       atomic_t ref;
        unsigned int flags;
 
        /* Two lists for READ and WRITE */
@@ -80,18 +88,18 @@ struct throtl_grp {
        /* Some throttle limits got updated for the group */
        int limits_changed;
 
-       struct rcu_head rcu_head;
+       /* Per cpu stats pointer */
+       struct tg_stats_cpu __percpu *stats_cpu;
+
+       /* List of tgs waiting for per cpu stats memory to be allocated */
+       struct list_head stats_alloc_node;
 };
 
 struct throtl_data
 {
-       /* List of throtl groups */
-       struct hlist_head tg_list;
-
        /* service tree for active throtl groups */
        struct throtl_rb_root tg_service_tree;
 
-       struct throtl_grp *root_tg;
        struct request_queue *queue;
 
        /* Total Number of queued bios on READ and WRITE lists */
@@ -108,6 +116,33 @@ struct throtl_data
        int limits_changed;
 };
 
+/* list and work item to allocate percpu group stats */
+static DEFINE_SPINLOCK(tg_stats_alloc_lock);
+static LIST_HEAD(tg_stats_alloc_list);
+
+static void tg_stats_alloc_fn(struct work_struct *);
+static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn);
+
+static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
+{
+       return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
+}
+
+static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
+{
+       return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
+}
+
+static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
+{
+       return pd_to_blkg(&tg->pd);
+}
+
+static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
+{
+       return blkg_to_tg(td->queue->root_blkg);
+}
+
 enum tg_state_flags {
        THROTL_TG_FLAG_on_rr = 0,       /* on round-robin busy list */
 };
@@ -128,244 +163,150 @@ static inline int throtl_tg_##name(const struct throtl_grp *tg)         \
 
 THROTL_TG_FNS(on_rr);
 
-#define throtl_log_tg(td, tg, fmt, args...)                            \
-       blk_add_trace_msg((td)->queue, "throtl %s " fmt,                \
-                               blkg_path(&(tg)->blkg), ##args);        \
+#define throtl_log_tg(td, tg, fmt, args...)    do {                    \
+       char __pbuf[128];                                               \
+                                                                       \
+       blkg_path(tg_to_blkg(tg), __pbuf, sizeof(__pbuf));              \
+       blk_add_trace_msg((td)->queue, "throtl %s " fmt, __pbuf, ##args); \
+} while (0)
 
 #define throtl_log(td, fmt, args...)   \
        blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
 
-static inline struct throtl_grp *tg_of_blkg(struct blkio_group *blkg)
-{
-       if (blkg)
-               return container_of(blkg, struct throtl_grp, blkg);
-
-       return NULL;
-}
-
 static inline unsigned int total_nr_queued(struct throtl_data *td)
 {
        return td->nr_queued[0] + td->nr_queued[1];
 }
 
-static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg)
-{
-       atomic_inc(&tg->ref);
-       return tg;
-}
-
-static void throtl_free_tg(struct rcu_head *head)
+/*
+ * Worker for allocating per cpu stat for tgs. This is scheduled on the
+ * system_nrt_wq once there are some groups on the alloc_list waiting for
+ * allocation.
+ */
+static void tg_stats_alloc_fn(struct work_struct *work)
 {
-       struct throtl_grp *tg;
+       static struct tg_stats_cpu *stats_cpu;  /* this fn is non-reentrant */
+       struct delayed_work *dwork = to_delayed_work(work);
+       bool empty = false;
+
+alloc_stats:
+       if (!stats_cpu) {
+               stats_cpu = alloc_percpu(struct tg_stats_cpu);
+               if (!stats_cpu) {
+                       /* allocation failed, try again after some time */
+                       queue_delayed_work(system_nrt_wq, dwork,
+                                          msecs_to_jiffies(10));
+                       return;
+               }
+       }
 
-       tg = container_of(head, struct throtl_grp, rcu_head);
-       free_percpu(tg->blkg.stats_cpu);
-       kfree(tg);
-}
+       spin_lock_irq(&tg_stats_alloc_lock);
 
-static void throtl_put_tg(struct throtl_grp *tg)
-{
-       BUG_ON(atomic_read(&tg->ref) <= 0);
-       if (!atomic_dec_and_test(&tg->ref))
-               return;
+       if (!list_empty(&tg_stats_alloc_list)) {
+               struct throtl_grp *tg = list_first_entry(&tg_stats_alloc_list,
+                                                        struct throtl_grp,
+                                                        stats_alloc_node);
+               swap(tg->stats_cpu, stats_cpu);
+               list_del_init(&tg->stats_alloc_node);
+       }
 
-       /*
-        * A group is freed in rcu manner. But having an rcu lock does not
-        * mean that one can access all the fields of blkg and assume these
-        * are valid. For example, don't try to follow throtl_data and
-        * request queue links.
-        *
-        * Having a reference to blkg under an rcu allows acess to only
-        * values local to groups like group stats and group rate limits
-        */
-       call_rcu(&tg->rcu_head, throtl_free_tg);
+       empty = list_empty(&tg_stats_alloc_list);
+       spin_unlock_irq(&tg_stats_alloc_lock);
+       if (!empty)
+               goto alloc_stats;
 }
 
-static void throtl_init_group(struct throtl_grp *tg)
+static void throtl_pd_init(struct blkcg_gq *blkg)
 {
-       INIT_HLIST_NODE(&tg->tg_node);
+       struct throtl_grp *tg = blkg_to_tg(blkg);
+       unsigned long flags;
+
        RB_CLEAR_NODE(&tg->rb_node);
        bio_list_init(&tg->bio_lists[0]);
        bio_list_init(&tg->bio_lists[1]);
        tg->limits_changed = false;
 
-       /* Practically unlimited BW */
-       tg->bps[0] = tg->bps[1] = -1;
-       tg->iops[0] = tg->iops[1] = -1;
+       tg->bps[READ] = -1;
+       tg->bps[WRITE] = -1;
+       tg->iops[READ] = -1;
+       tg->iops[WRITE] = -1;
 
        /*
-        * Take the initial reference that will be released on destroy
-        * This can be thought of a joint reference by cgroup and
-        * request queue which will be dropped by either request queue
-        * exit or cgroup deletion path depending on who is exiting first.
+        * Ugh... We need to perform per-cpu allocation for tg->stats_cpu
+        * but percpu allocator can't be called from IO path.  Queue tg on
+        * tg_stats_alloc_list and allocate from work item.
         */
-       atomic_set(&tg->ref, 1);
+       spin_lock_irqsave(&tg_stats_alloc_lock, flags);
+       list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
+       queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0);
+       spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
 }
 
-/* Should be called with rcu read lock held (needed for blkcg) */
-static void
-throtl_add_group_to_td_list(struct throtl_data *td, struct throtl_grp *tg)
+static void throtl_pd_exit(struct blkcg_gq *blkg)
 {
-       hlist_add_head(&tg->tg_node, &td->tg_list);
-       td->nr_undestroyed_grps++;
-}
-
-static void
-__throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
-{
-       struct backing_dev_info *bdi = &td->queue->backing_dev_info;
-       unsigned int major, minor;
-
-       if (!tg || tg->blkg.dev)
-               return;
-
-       /*
-        * Fill in device details for a group which might not have been
-        * filled at group creation time as queue was being instantiated
-        * and driver had not attached a device yet
-        */
-       if (bdi->dev && dev_name(bdi->dev)) {
-               sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
-               tg->blkg.dev = MKDEV(major, minor);
-       }
-}
-
-/*
- * Should be called with without queue lock held. Here queue lock will be
- * taken rarely. It will be taken only once during life time of a group
- * if need be
- */
-static void
-throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
-{
-       if (!tg || tg->blkg.dev)
-               return;
-
-       spin_lock_irq(td->queue->queue_lock);
-       __throtl_tg_fill_dev_details(td, tg);
-       spin_unlock_irq(td->queue->queue_lock);
-}
-
-static void throtl_init_add_tg_lists(struct throtl_data *td,
-                       struct throtl_grp *tg, struct blkio_cgroup *blkcg)
-{
-       __throtl_tg_fill_dev_details(td, tg);
-
-       /* Add group onto cgroup list */
-       blkiocg_add_blkio_group(blkcg, &tg->blkg, (void *)td,
-                               tg->blkg.dev, BLKIO_POLICY_THROTL);
+       struct throtl_grp *tg = blkg_to_tg(blkg);
+       unsigned long flags;
 
-       tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev);
-       tg->bps[WRITE] = blkcg_get_write_bps(blkcg, tg->blkg.dev);
-       tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev);
-       tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev);
+       spin_lock_irqsave(&tg_stats_alloc_lock, flags);
+       list_del_init(&tg->stats_alloc_node);
+       spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
 
-       throtl_add_group_to_td_list(td, tg);
+       free_percpu(tg->stats_cpu);
 }
 
-/* Should be called without queue lock and outside of rcu period */
-static struct throtl_grp *throtl_alloc_tg(struct throtl_data *td)
+static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
 {
-       struct throtl_grp *tg = NULL;
-       int ret;
+       struct throtl_grp *tg = blkg_to_tg(blkg);
+       int cpu;
 
-       tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node);
-       if (!tg)
-               return NULL;
+       if (tg->stats_cpu == NULL)
+               return;
 
-       ret = blkio_alloc_blkg_stats(&tg->blkg);
+       for_each_possible_cpu(cpu) {
+               struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
 
-       if (ret) {
-               kfree(tg);
-               return NULL;
+               blkg_rwstat_reset(&sc->service_bytes);
+               blkg_rwstat_reset(&sc->serviced);
        }
-
-       throtl_init_group(tg);
-       return tg;
 }
 
-static struct
-throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
+static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td,
+                                          struct blkcg *blkcg)
 {
-       struct throtl_grp *tg = NULL;
-       void *key = td;
-
        /*
-        * This is the common case when there are no blkio cgroups.
-        * Avoid lookup in this case
-        */
-       if (blkcg == &blkio_root_cgroup)
-               tg = td->root_tg;
-       else
-               tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
+        * This is the common case when there are no blkcgs.  Avoid lookup
+        * in this case
+        */
+       if (blkcg == &blkcg_root)
+               return td_root_tg(td);
 
-       __throtl_tg_fill_dev_details(td, tg);
-       return tg;
+       return blkg_to_tg(blkg_lookup(blkcg, td->queue));
 }
 
-static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
+static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
+                                                 struct blkcg *blkcg)
 {
-       struct throtl_grp *tg = NULL, *__tg = NULL;
-       struct blkio_cgroup *blkcg;
        struct request_queue *q = td->queue;
-
-       /* no throttling for dead queue */
-       if (unlikely(blk_queue_dead(q)))
-               return NULL;
-
-       rcu_read_lock();
-       blkcg = task_blkio_cgroup(current);
-       tg = throtl_find_tg(td, blkcg);
-       if (tg) {
-               rcu_read_unlock();
-               return tg;
-       }
-
-       /*
-        * Need to allocate a group. Allocation of group also needs allocation
-        * of per cpu stats which in-turn takes a mutex() and can block. Hence
-        * we need to drop rcu lock and queue_lock before we call alloc.
-        */
-       rcu_read_unlock();
-       spin_unlock_irq(q->queue_lock);
-
-       tg = throtl_alloc_tg(td);
-
-       /* Group allocated and queue is still alive. take the lock */
-       spin_lock_irq(q->queue_lock);
-
-       /* Make sure @q is still alive */
-       if (unlikely(blk_queue_dead(q))) {
-               kfree(tg);
-               return NULL;
-       }
-
-       /*
-        * Initialize the new group. After sleeping, read the blkcg again.
-        */
-       rcu_read_lock();
-       blkcg = task_blkio_cgroup(current);
+       struct throtl_grp *tg = NULL;
 
        /*
-        * If some other thread already allocated the group while we were
-        * not holding queue lock, free up the group
+        * This is the common case when there are no blkcgs.  Avoid lookup
+        * in this case
         */
-       __tg = throtl_find_tg(td, blkcg);
-
-       if (__tg) {
-               kfree(tg);
-               rcu_read_unlock();
-               return __tg;
-       }
-
-       /* Group allocation failed. Account the IO to root group */
-       if (!tg) {
-               tg = td->root_tg;
-               return tg;
+       if (blkcg == &blkcg_root) {
+               tg = td_root_tg(td);
+       } else {
+               struct blkcg_gq *blkg;
+
+               blkg = blkg_lookup_create(blkcg, q);
+
+               /* if %NULL and @q is alive, fall back to root_tg */
+               if (!IS_ERR(blkg))
+                       tg = blkg_to_tg(blkg);
+               else if (!blk_queue_dead(q))
+                       tg = td_root_tg(td);
        }
 
-       throtl_init_add_tg_lists(td, tg, blkcg);
-       rcu_read_unlock();
        return tg;
 }
 
@@ -734,16 +675,41 @@ static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
        return 0;
 }
 
+static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes,
+                                        int rw)
+{
+       struct throtl_grp *tg = blkg_to_tg(blkg);
+       struct tg_stats_cpu *stats_cpu;
+       unsigned long flags;
+
+       /* If per cpu stats are not allocated yet, don't do any accounting. */
+       if (tg->stats_cpu == NULL)
+               return;
+
+       /*
+        * Disabling interrupts to provide mutual exclusion between two
+        * writes on same cpu. It probably is not needed for 64bit. Not
+        * optimizing that case yet.
+        */
+       local_irq_save(flags);
+
+       stats_cpu = this_cpu_ptr(tg->stats_cpu);
+
+       blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
+       blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
+
+       local_irq_restore(flags);
+}
+
 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
 {
        bool rw = bio_data_dir(bio);
-       bool sync = rw_is_sync(bio->bi_rw);
 
        /* Charge the bio to the group */
        tg->bytes_disp[rw] += bio->bi_size;
        tg->io_disp[rw]++;
 
-       blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync);
+       throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, bio->bi_rw);
 }
 
 static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
@@ -753,7 +719,7 @@ static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
 
        bio_list_add(&tg->bio_lists[rw], bio);
        /* Take a bio reference on tg */
-       throtl_ref_get_tg(tg);
+       blkg_get(tg_to_blkg(tg));
        tg->nr_queued[rw]++;
        td->nr_queued[rw]++;
        throtl_enqueue_tg(td, tg);
@@ -786,8 +752,8 @@ static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
 
        bio = bio_list_pop(&tg->bio_lists[rw]);
        tg->nr_queued[rw]--;
-       /* Drop bio reference on tg */
-       throtl_put_tg(tg);
+       /* Drop bio reference on blkg */
+       blkg_put(tg_to_blkg(tg));
 
        BUG_ON(td->nr_queued[rw] <= 0);
        td->nr_queued[rw]--;
@@ -865,8 +831,8 @@ static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
 
 static void throtl_process_limit_change(struct throtl_data *td)
 {
-       struct throtl_grp *tg;
-       struct hlist_node *pos, *n;
+       struct request_queue *q = td->queue;
+       struct blkcg_gq *blkg, *n;
 
        if (!td->limits_changed)
                return;
@@ -875,7 +841,9 @@ static void throtl_process_limit_change(struct throtl_data *td)
 
        throtl_log(td, "limits changed");
 
-       hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
+       list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
+               struct throtl_grp *tg = blkg_to_tg(blkg);
+
                if (!tg->limits_changed)
                        continue;
 
@@ -973,120 +941,159 @@ throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
        }
 }
 
-static void
-throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
+static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
+                               struct blkg_policy_data *pd, int off)
 {
-       /* Something wrong if we are trying to remove same group twice */
-       BUG_ON(hlist_unhashed(&tg->tg_node));
+       struct throtl_grp *tg = pd_to_tg(pd);
+       struct blkg_rwstat rwstat = { }, tmp;
+       int i, cpu;
 
-       hlist_del_init(&tg->tg_node);
+       for_each_possible_cpu(cpu) {
+               struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
 
-       /*
-        * Put the reference taken at the time of creation so that when all
-        * queues are gone, group can be destroyed.
-        */
-       throtl_put_tg(tg);
-       td->nr_undestroyed_grps--;
+               tmp = blkg_rwstat_read((void *)sc + off);
+               for (i = 0; i < BLKG_RWSTAT_NR; i++)
+                       rwstat.cnt[i] += tmp.cnt[i];
+       }
+
+       return __blkg_prfill_rwstat(sf, pd, &rwstat);
 }
 
-static void throtl_release_tgs(struct throtl_data *td)
+static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
+                              struct seq_file *sf)
 {
-       struct hlist_node *pos, *n;
-       struct throtl_grp *tg;
+       struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
 
-       hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
-               /*
-                * If cgroup removal path got to blk_group first and removed
-                * it from cgroup list, then it will take care of destroying
-                * cfqg also.
-                */
-               if (!blkiocg_del_blkio_group(&tg->blkg))
-                       throtl_destroy_tg(td, tg);
-       }
+       blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl,
+                         cft->private, true);
+       return 0;
 }
 
-/*
- * Blk cgroup controller notification saying that blkio_group object is being
- * delinked as associated cgroup object is going away. That also means that
- * no new IO will come in this group. So get rid of this group as soon as
- * any pending IO in the group is finished.
- *
- * This function is called under rcu_read_lock(). key is the rcu protected
- * pointer. That means "key" is a valid throtl_data pointer as long as we are
- * rcu read lock.
- *
- * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
- * it should not be NULL as even if queue was going away, cgroup deltion
- * path got to it first.
- */
-void throtl_unlink_blkio_group(void *key, struct blkio_group *blkg)
+static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
+                             int off)
 {
-       unsigned long flags;
-       struct throtl_data *td = key;
+       struct throtl_grp *tg = pd_to_tg(pd);
+       u64 v = *(u64 *)((void *)tg + off);
 
-       spin_lock_irqsave(td->queue->queue_lock, flags);
-       throtl_destroy_tg(td, tg_of_blkg(blkg));
-       spin_unlock_irqrestore(td->queue->queue_lock, flags);
+       if (v == -1)
+               return 0;
+       return __blkg_prfill_u64(sf, pd, v);
 }
 
-static void throtl_update_blkio_group_common(struct throtl_data *td,
-                               struct throtl_grp *tg)
+static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
+                              int off)
 {
-       xchg(&tg->limits_changed, true);
-       xchg(&td->limits_changed, true);
-       /* Schedule a work now to process the limit change */
-       throtl_schedule_delayed_work(td, 0);
+       struct throtl_grp *tg = pd_to_tg(pd);
+       unsigned int v = *(unsigned int *)((void *)tg + off);
+
+       if (v == -1)
+               return 0;
+       return __blkg_prfill_u64(sf, pd, v);
 }
 
-/*
- * For all update functions, key should be a valid pointer because these
- * update functions are called under blkcg_lock, that means, blkg is
- * valid and in turn key is valid. queue exit path can not race because
- * of blkcg_lock
- *
- * Can not take queue lock in update functions as queue lock under blkcg_lock
- * is not allowed. Under other paths we take blkcg_lock under queue_lock.
- */
-static void throtl_update_blkio_group_read_bps(void *key,
-                               struct blkio_group *blkg, u64 read_bps)
+static int tg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
+                            struct seq_file *sf)
 {
-       struct throtl_data *td = key;
-       struct throtl_grp *tg = tg_of_blkg(blkg);
-
-       tg->bps[READ] = read_bps;
-       throtl_update_blkio_group_common(td, tg);
+       blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_u64,
+                         &blkcg_policy_throtl, cft->private, false);
+       return 0;
 }
 
-static void throtl_update_blkio_group_write_bps(void *key,
-                               struct blkio_group *blkg, u64 write_bps)
+static int tg_print_conf_uint(struct cgroup *cgrp, struct cftype *cft,
+                             struct seq_file *sf)
 {
-       struct throtl_data *td = key;
-       struct throtl_grp *tg = tg_of_blkg(blkg);
-
-       tg->bps[WRITE] = write_bps;
-       throtl_update_blkio_group_common(td, tg);
+       blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_uint,
+                         &blkcg_policy_throtl, cft->private, false);
+       return 0;
 }
 
-static void throtl_update_blkio_group_read_iops(void *key,
-                       struct blkio_group *blkg, unsigned int read_iops)
+static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
+                      bool is_u64)
 {
-       struct throtl_data *td = key;
-       struct throtl_grp *tg = tg_of_blkg(blkg);
+       struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+       struct blkg_conf_ctx ctx;
+       struct throtl_grp *tg;
+       struct throtl_data *td;
+       int ret;
+
+       ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
+       if (ret)
+               return ret;
+
+       tg = blkg_to_tg(ctx.blkg);
+       td = ctx.blkg->q->td;
+
+       if (!ctx.v)
+               ctx.v = -1;
+
+       if (is_u64)
+               *(u64 *)((void *)tg + cft->private) = ctx.v;
+       else
+               *(unsigned int *)((void *)tg + cft->private) = ctx.v;
+
+       /* XXX: we don't need the following deferred processing */
+       xchg(&tg->limits_changed, true);
+       xchg(&td->limits_changed, true);
+       throtl_schedule_delayed_work(td, 0);
 
-       tg->iops[READ] = read_iops;
-       throtl_update_blkio_group_common(td, tg);
+       blkg_conf_finish(&ctx);
+       return 0;
 }
 
-static void throtl_update_blkio_group_write_iops(void *key,
-                       struct blkio_group *blkg, unsigned int write_iops)
+static int tg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
+                          const char *buf)
 {
-       struct throtl_data *td = key;
-       struct throtl_grp *tg = tg_of_blkg(blkg);
+       return tg_set_conf(cgrp, cft, buf, true);
+}
 
-       tg->iops[WRITE] = write_iops;
-       throtl_update_blkio_group_common(td, tg);
+static int tg_set_conf_uint(struct cgroup *cgrp, struct cftype *cft,
+                           const char *buf)
+{
+       return tg_set_conf(cgrp, cft, buf, false);
 }
 
+static struct cftype throtl_files[] = {
+       {
+               .name = "throttle.read_bps_device",
+               .private = offsetof(struct throtl_grp, bps[READ]),
+               .read_seq_string = tg_print_conf_u64,
+               .write_string = tg_set_conf_u64,
+               .max_write_len = 256,
+       },
+       {
+               .name = "throttle.write_bps_device",
+               .private = offsetof(struct throtl_grp, bps[WRITE]),
+               .read_seq_string = tg_print_conf_u64,
+               .write_string = tg_set_conf_u64,
+               .max_write_len = 256,
+       },
+       {
+               .name = "throttle.read_iops_device",
+               .private = offsetof(struct throtl_grp, iops[READ]),
+               .read_seq_string = tg_print_conf_uint,
+               .write_string = tg_set_conf_uint,
+               .max_write_len = 256,
+       },
+       {
+               .name = "throttle.write_iops_device",
+               .private = offsetof(struct throtl_grp, iops[WRITE]),
+               .read_seq_string = tg_print_conf_uint,
+               .write_string = tg_set_conf_uint,
+               .max_write_len = 256,
+       },
+       {
+               .name = "throttle.io_service_bytes",
+               .private = offsetof(struct tg_stats_cpu, service_bytes),
+               .read_seq_string = tg_print_cpu_rwstat,
+       },
+       {
+               .name = "throttle.io_serviced",
+               .private = offsetof(struct tg_stats_cpu, serviced),
+               .read_seq_string = tg_print_cpu_rwstat,
+       },
+       { }     /* terminate */
+};
+
 static void throtl_shutdown_wq(struct request_queue *q)
 {
        struct throtl_data *td = q->td;
@@ -1094,19 +1101,13 @@ static void throtl_shutdown_wq(struct request_queue *q)
        cancel_delayed_work_sync(&td->throtl_work);
 }
 
-static struct blkio_policy_type blkio_policy_throtl = {
-       .ops = {
-               .blkio_unlink_group_fn = throtl_unlink_blkio_group,
-               .blkio_update_group_read_bps_fn =
-                                       throtl_update_blkio_group_read_bps,
-               .blkio_update_group_write_bps_fn =
-                                       throtl_update_blkio_group_write_bps,
-               .blkio_update_group_read_iops_fn =
-                                       throtl_update_blkio_group_read_iops,
-               .blkio_update_group_write_iops_fn =
-                                       throtl_update_blkio_group_write_iops,
-       },
-       .plid = BLKIO_POLICY_THROTL,
+static struct blkcg_policy blkcg_policy_throtl = {
+       .pd_size                = sizeof(struct throtl_grp),
+       .cftypes                = throtl_files,
+
+       .pd_init_fn             = throtl_pd_init,
+       .pd_exit_fn             = throtl_pd_exit,
+       .pd_reset_stats_fn      = throtl_pd_reset_stats,
 };
 
 bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
@@ -1114,7 +1115,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
        struct throtl_data *td = q->td;
        struct throtl_grp *tg;
        bool rw = bio_data_dir(bio), update_disptime = true;
-       struct blkio_cgroup *blkcg;
+       struct blkcg *blkcg;
        bool throttled = false;
 
        if (bio->bi_rw & REQ_THROTTLED) {
@@ -1122,33 +1123,31 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
                goto out;
        }
 
+       /* bio_associate_current() needs ioc, try creating */
+       create_io_context(GFP_ATOMIC, q->node);
+
        /*
         * A throtl_grp pointer retrieved under rcu can be used to access
         * basic fields like stats and io rates. If a group has no rules,
         * just update the dispatch stats in lockless manner and return.
         */
-
        rcu_read_lock();
-       blkcg = task_blkio_cgroup(current);
-       tg = throtl_find_tg(td, blkcg);
+       blkcg = bio_blkcg(bio);
+       tg = throtl_lookup_tg(td, blkcg);
        if (tg) {
-               throtl_tg_fill_dev_details(td, tg);
-
                if (tg_no_rule_group(tg, rw)) {
-                       blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
-                                       rw, rw_is_sync(bio->bi_rw));
-                       rcu_read_unlock();
-                       goto out;
+                       throtl_update_dispatch_stats(tg_to_blkg(tg),
+                                                    bio->bi_size, bio->bi_rw);
+                       goto out_unlock_rcu;
                }
        }
-       rcu_read_unlock();
 
        /*
         * Either group has not been allocated yet or it is not an unlimited
         * IO group
         */
        spin_lock_irq(q->queue_lock);
-       tg = throtl_get_tg(td);
+       tg = throtl_lookup_create_tg(td, blkcg);
        if (unlikely(!tg))
                goto out_unlock;
 
@@ -1189,6 +1188,7 @@ queue_bio:
                        tg->io_disp[rw], tg->iops[rw],
                        tg->nr_queued[READ], tg->nr_queued[WRITE]);
 
+       bio_associate_current(bio);
        throtl_add_bio_tg(q->td, tg, bio);
        throttled = true;
 
@@ -1199,6 +1199,8 @@ queue_bio:
 
 out_unlock:
        spin_unlock_irq(q->queue_lock);
+out_unlock_rcu:
+       rcu_read_unlock();
 out:
        return throttled;
 }
@@ -1241,79 +1243,31 @@ void blk_throtl_drain(struct request_queue *q)
 int blk_throtl_init(struct request_queue *q)
 {
        struct throtl_data *td;
-       struct throtl_grp *tg;
+       int ret;
 
        td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
        if (!td)
                return -ENOMEM;
 
-       INIT_HLIST_HEAD(&td->tg_list);
        td->tg_service_tree = THROTL_RB_ROOT;
        td->limits_changed = false;
        INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
 
-       /* alloc and Init root group. */
+       q->td = td;
        td->queue = q;
-       tg = throtl_alloc_tg(td);
 
-       if (!tg) {
+       /* activate policy */
+       ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
+       if (ret)
                kfree(td);
-               return -ENOMEM;
-       }
-
-       td->root_tg = tg;
-
-       rcu_read_lock();
-       throtl_init_add_tg_lists(td, tg, &blkio_root_cgroup);
-       rcu_read_unlock();
-
-       /* Attach throtl data to request queue */
-       q->td = td;
-       return 0;
+       return ret;
 }
 
 void blk_throtl_exit(struct request_queue *q)
 {
-       struct throtl_data *td = q->td;
-       bool wait = false;
-
-       BUG_ON(!td);
-
-       throtl_shutdown_wq(q);
-
-       spin_lock_irq(q->queue_lock);
-       throtl_release_tgs(td);
-
-       /* If there are other groups */
-       if (td->nr_undestroyed_grps > 0)
-               wait = true;
-
-       spin_unlock_irq(q->queue_lock);
-
-       /*
-        * Wait for tg->blkg->key accessors to exit their grace periods.
-        * Do this wait only if there are other undestroyed groups out
-        * there (other than root group). This can happen if cgroup deletion
-        * path claimed the responsibility of cleaning up a group before
-        * queue cleanup code get to the group.
-        *
-        * Do not call synchronize_rcu() unconditionally as there are drivers
-        * which create/delete request queue hundreds of times during scan/boot
-        * and synchronize_rcu() can take significant time and slow down boot.
-        */
-       if (wait)
-               synchronize_rcu();
-
-       /*
-        * Just being safe to make sure after previous flush if some body did
-        * update limits through cgroup and another work got queued, cancel
-        * it.
-        */
+       BUG_ON(!q->td);
        throtl_shutdown_wq(q);
-}
-
-void blk_throtl_release(struct request_queue *q)
-{
+       blkcg_deactivate_policy(q, &blkcg_policy_throtl);
        kfree(q->td);
 }
 
@@ -1323,8 +1277,7 @@ static int __init throtl_init(void)
        if (!kthrotld_workqueue)
                panic("Failed to create kthrotld\n");
 
-       blkio_policy_register(&blkio_policy_throtl);
-       return 0;
+       return blkcg_policy_register(&blkcg_policy_throtl);
 }
 
 module_init(throtl_init);
index d45be871329ec67121d2fd41a53289a593f72eb7..85f6ae42f7d3f698e9e82c75064f428065953e70 100644 (file)
@@ -23,7 +23,8 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                        struct bio *bio);
 int blk_rq_append_bio(struct request_queue *q, struct request *rq,
                      struct bio *bio);
-void blk_drain_queue(struct request_queue *q, bool drain_all);
+void blk_queue_bypass_start(struct request_queue *q);
+void blk_queue_bypass_end(struct request_queue *q);
 void blk_dequeue_request(struct request *rq);
 void __blk_queue_free_tags(struct request_queue *q);
 bool __blk_end_bidi_request(struct request *rq, int error,
@@ -144,9 +145,6 @@ void blk_queue_congestion_threshold(struct request_queue *q);
 
 int blk_dev_init(void);
 
-void elv_quiesce_start(struct request_queue *q);
-void elv_quiesce_end(struct request_queue *q);
-
 
 /*
  * Return the threshold (number of used requests) at which the queue is
@@ -186,32 +184,30 @@ static inline int blk_do_io_stat(struct request *rq)
  */
 void get_io_context(struct io_context *ioc);
 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
-struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask);
+struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
+                            gfp_t gfp_mask);
 void ioc_clear_queue(struct request_queue *q);
 
-void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask,
-                               int node);
+int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
 
 /**
  * create_io_context - try to create task->io_context
- * @task: target task
  * @gfp_mask: allocation mask
  * @node: allocation node
  *
- * If @task->io_context is %NULL, allocate a new io_context and install it.
- * Returns the current @task->io_context which may be %NULL if allocation
- * failed.
+ * If %current->io_context is %NULL, allocate a new io_context and install
+ * it.  Returns the current %current->io_context which may be %NULL if
+ * allocation failed.
  *
  * Note that this function can't be called with IRQ disabled because
- * task_lock which protects @task->io_context is IRQ-unsafe.
+ * task_lock which protects %current->io_context is IRQ-unsafe.
  */
-static inline struct io_context *create_io_context(struct task_struct *task,
-                                                  gfp_t gfp_mask, int node)
+static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
 {
        WARN_ON_ONCE(irqs_disabled());
-       if (unlikely(!task->io_context))
-               create_io_context_slowpath(task, gfp_mask, node);
-       return task->io_context;
+       if (unlikely(!current->io_context))
+               create_task_io_context(current, gfp_mask, node);
+       return current->io_context;
 }
 
 /*
@@ -222,7 +218,6 @@ extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
 extern void blk_throtl_drain(struct request_queue *q);
 extern int blk_throtl_init(struct request_queue *q);
 extern void blk_throtl_exit(struct request_queue *q);
-extern void blk_throtl_release(struct request_queue *q);
 #else /* CONFIG_BLK_DEV_THROTTLING */
 static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
 {
@@ -231,7 +226,6 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
 static inline void blk_throtl_drain(struct request_queue *q) { }
 static inline int blk_throtl_init(struct request_queue *q) { return 0; }
 static inline void blk_throtl_exit(struct request_queue *q) { }
-static inline void blk_throtl_release(struct request_queue *q) { }
 #endif /* CONFIG_BLK_DEV_THROTTLING */
 
 #endif /* BLK_INTERNAL_H */
index 3c38536bd52c3e3a25f1b8152e40a6ebe5192d36..673c977cc2bfa238e0fe0efa6dddac5193fbccd8 100644 (file)
@@ -15,7 +15,9 @@
 #include <linux/ioprio.h>
 #include <linux/blktrace_api.h>
 #include "blk.h"
-#include "cfq.h"
+#include "blk-cgroup.h"
+
+static struct blkcg_policy blkcg_policy_cfq __maybe_unused;
 
 /*
  * tunables
@@ -171,8 +173,53 @@ enum wl_type_t {
        SYNC_WORKLOAD = 2
 };
 
+struct cfqg_stats {
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+       /* total bytes transferred */
+       struct blkg_rwstat              service_bytes;
+       /* total IOs serviced, post merge */
+       struct blkg_rwstat              serviced;
+       /* number of ios merged */
+       struct blkg_rwstat              merged;
+       /* total time spent on device in ns, may not be accurate w/ queueing */
+       struct blkg_rwstat              service_time;
+       /* total time spent waiting in scheduler queue in ns */
+       struct blkg_rwstat              wait_time;
+       /* number of IOs queued up */
+       struct blkg_rwstat              queued;
+       /* total sectors transferred */
+       struct blkg_stat                sectors;
+       /* total disk time and nr sectors dispatched by this group */
+       struct blkg_stat                time;
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+       /* time not charged to this cgroup */
+       struct blkg_stat                unaccounted_time;
+       /* sum of number of ios queued across all samples */
+       struct blkg_stat                avg_queue_size_sum;
+       /* count of samples taken for average */
+       struct blkg_stat                avg_queue_size_samples;
+       /* how many times this group has been removed from service tree */
+       struct blkg_stat                dequeue;
+       /* total time spent waiting for it to be assigned a timeslice. */
+       struct blkg_stat                group_wait_time;
+       /* time spent idling for this blkcg_gq */
+       struct blkg_stat                idle_time;
+       /* total time with empty current active q with other requests queued */
+       struct blkg_stat                empty_time;
+       /* fields after this shouldn't be cleared on stat reset */
+       uint64_t                        start_group_wait_time;
+       uint64_t                        start_idle_time;
+       uint64_t                        start_empty_time;
+       uint16_t                        flags;
+#endif /* CONFIG_DEBUG_BLK_CGROUP */
+#endif /* CONFIG_CFQ_GROUP_IOSCHED */
+};
+
 /* This is per cgroup per device grouping structure */
 struct cfq_group {
+       /* must be the first member */
+       struct blkg_policy_data pd;
+
        /* group service_tree member */
        struct rb_node rb_node;
 
@@ -180,7 +227,7 @@ struct cfq_group {
        u64 vdisktime;
        unsigned int weight;
        unsigned int new_weight;
-       bool needs_update;
+       unsigned int dev_weight;
 
        /* number of cfqq currently on this group */
        int nr_cfqq;
@@ -206,20 +253,21 @@ struct cfq_group {
        unsigned long saved_workload_slice;
        enum wl_type_t saved_workload;
        enum wl_prio_t saved_serving_prio;
-       struct blkio_group blkg;
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
-       struct hlist_node cfqd_node;
-       int ref;
-#endif
+
        /* number of requests that are on the dispatch list or inside driver */
        int dispatched;
        struct cfq_ttime ttime;
+       struct cfqg_stats stats;
 };
 
 struct cfq_io_cq {
        struct io_cq            icq;            /* must be the first member */
        struct cfq_queue        *cfqq[2];
        struct cfq_ttime        ttime;
+       int                     ioprio;         /* the current ioprio */
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+       uint64_t                blkcg_id;       /* the current blkcg ID */
+#endif
 };
 
 /*
@@ -229,7 +277,7 @@ struct cfq_data {
        struct request_queue *queue;
        /* Root service tree for cfq_groups */
        struct cfq_rb_root grp_service_tree;
-       struct cfq_group root_group;
+       struct cfq_group *root_group;
 
        /*
         * The priority currently being served
@@ -303,12 +351,6 @@ struct cfq_data {
        struct cfq_queue oom_cfqq;
 
        unsigned long last_delayed_sync;
-
-       /* List of cfq groups being managed on this device*/
-       struct hlist_head cfqg_list;
-
-       /* Number of groups which are on blkcg->blkg_list */
-       unsigned int nr_blkcg_linked_grps;
 };
 
 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
@@ -371,21 +413,284 @@ CFQ_CFQQ_FNS(deep);
 CFQ_CFQQ_FNS(wait_busy);
 #undef CFQ_CFQQ_FNS
 
+static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
+{
+       return pd ? container_of(pd, struct cfq_group, pd) : NULL;
+}
+
+static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
+{
+       return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
+}
+
+static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
+{
+       return pd_to_blkg(&cfqg->pd);
+}
+
+#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+
+/* cfqg stats flags */
+enum cfqg_stats_flags {
+       CFQG_stats_waiting = 0,
+       CFQG_stats_idling,
+       CFQG_stats_empty,
+};
+
+#define CFQG_FLAG_FNS(name)                                            \
+static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats)    \
+{                                                                      \
+       stats->flags |= (1 << CFQG_stats_##name);                       \
+}                                                                      \
+static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats)   \
+{                                                                      \
+       stats->flags &= ~(1 << CFQG_stats_##name);                      \
+}                                                                      \
+static inline int cfqg_stats_##name(struct cfqg_stats *stats)          \
+{                                                                      \
+       return (stats->flags & (1 << CFQG_stats_##name)) != 0;          \
+}                                                                      \
+
+CFQG_FLAG_FNS(waiting)
+CFQG_FLAG_FNS(idling)
+CFQG_FLAG_FNS(empty)
+#undef CFQG_FLAG_FNS
+
+/* This should be called with the queue_lock held. */
+static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
+{
+       unsigned long long now;
+
+       if (!cfqg_stats_waiting(stats))
+               return;
+
+       now = sched_clock();
+       if (time_after64(now, stats->start_group_wait_time))
+               blkg_stat_add(&stats->group_wait_time,
+                             now - stats->start_group_wait_time);
+       cfqg_stats_clear_waiting(stats);
+}
+
+/* This should be called with the queue_lock held. */
+static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
+                                                struct cfq_group *curr_cfqg)
+{
+       struct cfqg_stats *stats = &cfqg->stats;
+
+       if (cfqg_stats_waiting(stats))
+               return;
+       if (cfqg == curr_cfqg)
+               return;
+       stats->start_group_wait_time = sched_clock();
+       cfqg_stats_mark_waiting(stats);
+}
+
+/* This should be called with the queue_lock held. */
+static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
+{
+       unsigned long long now;
+
+       if (!cfqg_stats_empty(stats))
+               return;
+
+       now = sched_clock();
+       if (time_after64(now, stats->start_empty_time))
+               blkg_stat_add(&stats->empty_time,
+                             now - stats->start_empty_time);
+       cfqg_stats_clear_empty(stats);
+}
+
+static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
+{
+       blkg_stat_add(&cfqg->stats.dequeue, 1);
+}
+
+static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
+{
+       struct cfqg_stats *stats = &cfqg->stats;
+
+       if (blkg_rwstat_sum(&stats->queued))
+               return;
+
+       /*
+        * group is already marked empty. This can happen if cfqq got new
+        * request in parent group and moved to this group while being added
+        * to service tree. Just ignore the event and move on.
+        */
+       if (cfqg_stats_empty(stats))
+               return;
+
+       stats->start_empty_time = sched_clock();
+       cfqg_stats_mark_empty(stats);
+}
+
+static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
+{
+       struct cfqg_stats *stats = &cfqg->stats;
+
+       if (cfqg_stats_idling(stats)) {
+               unsigned long long now = sched_clock();
+
+               if (time_after64(now, stats->start_idle_time))
+                       blkg_stat_add(&stats->idle_time,
+                                     now - stats->start_idle_time);
+               cfqg_stats_clear_idling(stats);
+       }
+}
+
+static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
+{
+       struct cfqg_stats *stats = &cfqg->stats;
+
+       BUG_ON(cfqg_stats_idling(stats));
+
+       stats->start_idle_time = sched_clock();
+       cfqg_stats_mark_idling(stats);
+}
+
+static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
+{
+       struct cfqg_stats *stats = &cfqg->stats;
+
+       blkg_stat_add(&stats->avg_queue_size_sum,
+                     blkg_rwstat_sum(&stats->queued));
+       blkg_stat_add(&stats->avg_queue_size_samples, 1);
+       cfqg_stats_update_group_wait_time(stats);
+}
+
+#else  /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
+
+static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
+static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
+static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
+static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
+static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
+static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
+static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
+
+#endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
+
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
-#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
+
+static inline void cfqg_get(struct cfq_group *cfqg)
+{
+       return blkg_get(cfqg_to_blkg(cfqg));
+}
+
+static inline void cfqg_put(struct cfq_group *cfqg)
+{
+       return blkg_put(cfqg_to_blkg(cfqg));
+}
+
+#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do {                    \
+       char __pbuf[128];                                               \
+                                                                       \
+       blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf));  \
        blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
-                       cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
-                       blkg_path(&(cfqq)->cfqg->blkg), ##args)
+                         cfq_cfqq_sync((cfqq)) ? 'S' : 'A',            \
+                         __pbuf, ##args);                              \
+} while (0)
 
-#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)                         \
-       blk_add_trace_msg((cfqd)->queue, "%s " fmt,                     \
-                               blkg_path(&(cfqg)->blkg), ##args)       \
+#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {                    \
+       char __pbuf[128];                                               \
+                                                                       \
+       blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf));          \
+       blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args);    \
+} while (0)
+
+static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
+                                           struct cfq_group *curr_cfqg, int rw)
+{
+       blkg_rwstat_add(&cfqg->stats.queued, rw, 1);
+       cfqg_stats_end_empty_time(&cfqg->stats);
+       cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
+}
+
+static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
+                       unsigned long time, unsigned long unaccounted_time)
+{
+       blkg_stat_add(&cfqg->stats.time, time);
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+       blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
+#endif
+}
+
+static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw)
+{
+       blkg_rwstat_add(&cfqg->stats.queued, rw, -1);
+}
+
+static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw)
+{
+       blkg_rwstat_add(&cfqg->stats.merged, rw, 1);
+}
+
+static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
+                                             uint64_t bytes, int rw)
+{
+       blkg_stat_add(&cfqg->stats.sectors, bytes >> 9);
+       blkg_rwstat_add(&cfqg->stats.serviced, rw, 1);
+       blkg_rwstat_add(&cfqg->stats.service_bytes, rw, bytes);
+}
+
+static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
+                       uint64_t start_time, uint64_t io_start_time, int rw)
+{
+       struct cfqg_stats *stats = &cfqg->stats;
+       unsigned long long now = sched_clock();
+
+       if (time_after64(now, io_start_time))
+               blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
+       if (time_after64(io_start_time, start_time))
+               blkg_rwstat_add(&stats->wait_time, rw,
+                               io_start_time - start_time);
+}
+
+static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
+{
+       struct cfq_group *cfqg = blkg_to_cfqg(blkg);
+       struct cfqg_stats *stats = &cfqg->stats;
+
+       /* queued stats shouldn't be cleared */
+       blkg_rwstat_reset(&stats->service_bytes);
+       blkg_rwstat_reset(&stats->serviced);
+       blkg_rwstat_reset(&stats->merged);
+       blkg_rwstat_reset(&stats->service_time);
+       blkg_rwstat_reset(&stats->wait_time);
+       blkg_stat_reset(&stats->time);
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+       blkg_stat_reset(&stats->unaccounted_time);
+       blkg_stat_reset(&stats->avg_queue_size_sum);
+       blkg_stat_reset(&stats->avg_queue_size_samples);
+       blkg_stat_reset(&stats->dequeue);
+       blkg_stat_reset(&stats->group_wait_time);
+       blkg_stat_reset(&stats->idle_time);
+       blkg_stat_reset(&stats->empty_time);
+#endif
+}
+
+#else  /* CONFIG_CFQ_GROUP_IOSCHED */
+
+static inline void cfqg_get(struct cfq_group *cfqg) { }
+static inline void cfqg_put(struct cfq_group *cfqg) { }
 
-#else
 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
        blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)         do {} while (0)
-#endif
+
+static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
+                       struct cfq_group *curr_cfqg, int rw) { }
+static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
+                       unsigned long time, unsigned long unaccounted_time) { }
+static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
+static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
+static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
+                                             uint64_t bytes, int rw) { }
+static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
+                       uint64_t start_time, uint64_t io_start_time, int rw) { }
+
+#endif /* CONFIG_CFQ_GROUP_IOSCHED */
+
 #define cfq_log(cfqd, fmt, args...)    \
        blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
 
@@ -466,8 +771,9 @@ static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
 }
 
 static void cfq_dispatch_insert(struct request_queue *, struct request *);
-static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
-                                      struct io_context *, gfp_t);
+static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
+                                      struct cfq_io_cq *cic, struct bio *bio,
+                                      gfp_t gfp_mask);
 
 static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
 {
@@ -545,7 +851,7 @@ static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
 {
        u64 d = delta << CFQ_SERVICE_SHIFT;
 
-       d = d * BLKIO_WEIGHT_DEFAULT;
+       d = d * CFQ_WEIGHT_DEFAULT;
        do_div(d, cfqg->weight);
        return d;
 }
@@ -872,9 +1178,9 @@ static void
 cfq_update_group_weight(struct cfq_group *cfqg)
 {
        BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
-       if (cfqg->needs_update) {
+       if (cfqg->new_weight) {
                cfqg->weight = cfqg->new_weight;
-               cfqg->needs_update = false;
+               cfqg->new_weight = 0;
        }
 }
 
@@ -936,7 +1242,7 @@ cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
        cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
        cfq_group_service_tree_del(st, cfqg);
        cfqg->saved_workload_slice = 0;
-       cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
+       cfqg_stats_update_dequeue(cfqg);
 }
 
 static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
@@ -1008,178 +1314,59 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
                     "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
                     used_sl, cfqq->slice_dispatch, charge,
                     iops_mode(cfqd), cfqq->nr_sectors);
-       cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl,
-                                         unaccounted_sl);
-       cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
+       cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
+       cfqg_stats_set_start_empty_time(cfqg);
 }
 
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
-{
-       if (blkg)
-               return container_of(blkg, struct cfq_group, blkg);
-       return NULL;
-}
-
-static void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
-                                         unsigned int weight)
-{
-       struct cfq_group *cfqg = cfqg_of_blkg(blkg);
-       cfqg->new_weight = weight;
-       cfqg->needs_update = true;
-}
-
-static void cfq_init_add_cfqg_lists(struct cfq_data *cfqd,
-                       struct cfq_group *cfqg, struct blkio_cgroup *blkcg)
-{
-       struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
-       unsigned int major, minor;
-
-       /*
-        * Add group onto cgroup list. It might happen that bdi->dev is
-        * not initialized yet. Initialize this new group without major
-        * and minor info and this info will be filled in once a new thread
-        * comes for IO.
-        */
-       if (bdi->dev) {
-               sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
-               cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
-                                       (void *)cfqd, MKDEV(major, minor));
-       } else
-               cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
-                                       (void *)cfqd, 0);
-
-       cfqd->nr_blkcg_linked_grps++;
-       cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
-
-       /* Add group on cfqd list */
-       hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
-}
-
-/*
- * Should be called from sleepable context. No request queue lock as per
- * cpu stats are allocated dynamically and alloc_percpu needs to be called
- * from sleepable context.
+/**
+ * cfq_init_cfqg_base - initialize base part of a cfq_group
+ * @cfqg: cfq_group to initialize
+ *
+ * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
+ * is enabled or not.
  */
-static struct cfq_group * cfq_alloc_cfqg(struct cfq_data *cfqd)
+static void cfq_init_cfqg_base(struct cfq_group *cfqg)
 {
-       struct cfq_group *cfqg = NULL;
-       int i, j, ret;
        struct cfq_rb_root *st;
-
-       cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
-       if (!cfqg)
-               return NULL;
+       int i, j;
 
        for_each_cfqg_st(cfqg, i, j, st)
                *st = CFQ_RB_ROOT;
        RB_CLEAR_NODE(&cfqg->rb_node);
 
        cfqg->ttime.last_end_request = jiffies;
-
-       /*
-        * Take the initial reference that will be released on destroy
-        * This can be thought of a joint reference by cgroup and
-        * elevator which will be dropped by either elevator exit
-        * or cgroup deletion path depending on who is exiting first.
-        */
-       cfqg->ref = 1;
-
-       ret = blkio_alloc_blkg_stats(&cfqg->blkg);
-       if (ret) {
-               kfree(cfqg);
-               return NULL;
-       }
-
-       return cfqg;
 }
 
-static struct cfq_group *
-cfq_find_cfqg(struct cfq_data *cfqd, struct blkio_cgroup *blkcg)
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+static void cfq_pd_init(struct blkcg_gq *blkg)
 {
-       struct cfq_group *cfqg = NULL;
-       void *key = cfqd;
-       struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
-       unsigned int major, minor;
-
-       /*
-        * This is the common case when there are no blkio cgroups.
-        * Avoid lookup in this case
-        */
-       if (blkcg == &blkio_root_cgroup)
-               cfqg = &cfqd->root_group;
-       else
-               cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
-
-       if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
-               sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
-               cfqg->blkg.dev = MKDEV(major, minor);
-       }
+       struct cfq_group *cfqg = blkg_to_cfqg(blkg);
 
-       return cfqg;
+       cfq_init_cfqg_base(cfqg);
+       cfqg->weight = blkg->blkcg->cfq_weight;
 }
 
 /*
  * Search for the cfq group current task belongs to. request_queue lock must
  * be held.
  */
-static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
+static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
+                                               struct blkcg *blkcg)
 {
-       struct blkio_cgroup *blkcg;
-       struct cfq_group *cfqg = NULL, *__cfqg = NULL;
        struct request_queue *q = cfqd->queue;
+       struct cfq_group *cfqg = NULL;
 
-       rcu_read_lock();
-       blkcg = task_blkio_cgroup(current);
-       cfqg = cfq_find_cfqg(cfqd, blkcg);
-       if (cfqg) {
-               rcu_read_unlock();
-               return cfqg;
-       }
-
-       /*
-        * Need to allocate a group. Allocation of group also needs allocation
-        * of per cpu stats which in-turn takes a mutex() and can block. Hence
-        * we need to drop rcu lock and queue_lock before we call alloc.
-        *
-        * Not taking any queue reference here and assuming that queue is
-        * around by the time we return. CFQ queue allocation code does
-        * the same. It might be racy though.
-        */
-
-       rcu_read_unlock();
-       spin_unlock_irq(q->queue_lock);
-
-       cfqg = cfq_alloc_cfqg(cfqd);
-
-       spin_lock_irq(q->queue_lock);
-
-       rcu_read_lock();
-       blkcg = task_blkio_cgroup(current);
-
-       /*
-        * If some other thread already allocated the group while we were
-        * not holding queue lock, free up the group
-        */
-       __cfqg = cfq_find_cfqg(cfqd, blkcg);
+       /* avoid lookup for the common case where there's no blkcg */
+       if (blkcg == &blkcg_root) {
+               cfqg = cfqd->root_group;
+       } else {
+               struct blkcg_gq *blkg;
 
-       if (__cfqg) {
-               kfree(cfqg);
-               rcu_read_unlock();
-               return __cfqg;
+               blkg = blkg_lookup_create(blkcg, q);
+               if (!IS_ERR(blkg))
+                       cfqg = blkg_to_cfqg(blkg);
        }
 
-       if (!cfqg)
-               cfqg = &cfqd->root_group;
-
-       cfq_init_add_cfqg_lists(cfqd, cfqg, blkcg);
-       rcu_read_unlock();
-       return cfqg;
-}
-
-static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
-{
-       cfqg->ref++;
        return cfqg;
 }
 
@@ -1187,94 +1374,224 @@ static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
 {
        /* Currently, all async queues are mapped to root group */
        if (!cfq_cfqq_sync(cfqq))
-               cfqg = &cfqq->cfqd->root_group;
+               cfqg = cfqq->cfqd->root_group;
 
        cfqq->cfqg = cfqg;
        /* cfqq reference on cfqg */
-       cfqq->cfqg->ref++;
+       cfqg_get(cfqg);
 }
 
-static void cfq_put_cfqg(struct cfq_group *cfqg)
+static u64 cfqg_prfill_weight_device(struct seq_file *sf,
+                                    struct blkg_policy_data *pd, int off)
 {
-       struct cfq_rb_root *st;
-       int i, j;
+       struct cfq_group *cfqg = pd_to_cfqg(pd);
 
-       BUG_ON(cfqg->ref <= 0);
-       cfqg->ref--;
-       if (cfqg->ref)
-               return;
-       for_each_cfqg_st(cfqg, i, j, st)
-               BUG_ON(!RB_EMPTY_ROOT(&st->rb));
-       free_percpu(cfqg->blkg.stats_cpu);
-       kfree(cfqg);
+       if (!cfqg->dev_weight)
+               return 0;
+       return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
 }
 
-static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
+static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
+                                   struct seq_file *sf)
 {
-       /* Something wrong if we are trying to remove same group twice */
-       BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
+       blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
+                         cfqg_prfill_weight_device, &blkcg_policy_cfq, 0,
+                         false);
+       return 0;
+}
 
-       hlist_del_init(&cfqg->cfqd_node);
+static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft,
+                           struct seq_file *sf)
+{
+       seq_printf(sf, "%u\n", cgroup_to_blkcg(cgrp)->cfq_weight);
+       return 0;
+}
 
-       BUG_ON(cfqd->nr_blkcg_linked_grps <= 0);
-       cfqd->nr_blkcg_linked_grps--;
+static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
+                                 const char *buf)
+{
+       struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+       struct blkg_conf_ctx ctx;
+       struct cfq_group *cfqg;
+       int ret;
 
-       /*
-        * Put the reference taken at the time of creation so that when all
-        * queues are gone, group can be destroyed.
-        */
-       cfq_put_cfqg(cfqg);
+       ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
+       if (ret)
+               return ret;
+
+       ret = -EINVAL;
+       cfqg = blkg_to_cfqg(ctx.blkg);
+       if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) {
+               cfqg->dev_weight = ctx.v;
+               cfqg->new_weight = cfqg->dev_weight ?: blkcg->cfq_weight;
+               ret = 0;
+       }
+
+       blkg_conf_finish(&ctx);
+       return ret;
 }
 
-static void cfq_release_cfq_groups(struct cfq_data *cfqd)
+static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
 {
-       struct hlist_node *pos, *n;
-       struct cfq_group *cfqg;
+       struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+       struct blkcg_gq *blkg;
+       struct hlist_node *n;
 
-       hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
-               /*
-                * If cgroup removal path got to blk_group first and removed
-                * it from cgroup list, then it will take care of destroying
-                * cfqg also.
-                */
-               if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
-                       cfq_destroy_cfqg(cfqd, cfqg);
+       if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
+               return -EINVAL;
+
+       spin_lock_irq(&blkcg->lock);
+       blkcg->cfq_weight = (unsigned int)val;
+
+       hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
+               struct cfq_group *cfqg = blkg_to_cfqg(blkg);
+
+               if (cfqg && !cfqg->dev_weight)
+                       cfqg->new_weight = blkcg->cfq_weight;
        }
+
+       spin_unlock_irq(&blkcg->lock);
+       return 0;
 }
 
-/*
- * Blk cgroup controller notification saying that blkio_group object is being
- * delinked as associated cgroup object is going away. That also means that
- * no new IO will come in this group. So get rid of this group as soon as
- * any pending IO in the group is finished.
- *
- * This function is called under rcu_read_lock(). key is the rcu protected
- * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu
- * read lock.
- *
- * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
- * it should not be NULL as even if elevator was exiting, cgroup deltion
- * path got to it first.
- */
-static void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg)
+static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft,
+                          struct seq_file *sf)
 {
-       unsigned long  flags;
-       struct cfq_data *cfqd = key;
+       struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
 
-       spin_lock_irqsave(cfqd->queue->queue_lock, flags);
-       cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
-       spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
+       blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkcg_policy_cfq,
+                         cft->private, false);
+       return 0;
 }
 
-#else /* GROUP_IOSCHED */
-static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
+static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
+                            struct seq_file *sf)
 {
-       return &cfqd->root_group;
+       struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+
+       blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkcg_policy_cfq,
+                         cft->private, true);
+       return 0;
 }
 
-static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
+                                     struct blkg_policy_data *pd, int off)
 {
-       return cfqg;
+       struct cfq_group *cfqg = pd_to_cfqg(pd);
+       u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
+       u64 v = 0;
+
+       if (samples) {
+               v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
+               do_div(v, samples);
+       }
+       __blkg_prfill_u64(sf, pd, v);
+       return 0;
+}
+
+/* print avg_queue_size */
+static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
+                                    struct seq_file *sf)
+{
+       struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+
+       blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size,
+                         &blkcg_policy_cfq, 0, false);
+       return 0;
+}
+#endif /* CONFIG_DEBUG_BLK_CGROUP */
+
+static struct cftype cfq_blkcg_files[] = {
+       {
+               .name = "weight_device",
+               .read_seq_string = cfqg_print_weight_device,
+               .write_string = cfqg_set_weight_device,
+               .max_write_len = 256,
+       },
+       {
+               .name = "weight",
+               .read_seq_string = cfq_print_weight,
+               .write_u64 = cfq_set_weight,
+       },
+       {
+               .name = "time",
+               .private = offsetof(struct cfq_group, stats.time),
+               .read_seq_string = cfqg_print_stat,
+       },
+       {
+               .name = "sectors",
+               .private = offsetof(struct cfq_group, stats.sectors),
+               .read_seq_string = cfqg_print_stat,
+       },
+       {
+               .name = "io_service_bytes",
+               .private = offsetof(struct cfq_group, stats.service_bytes),
+               .read_seq_string = cfqg_print_rwstat,
+       },
+       {
+               .name = "io_serviced",
+               .private = offsetof(struct cfq_group, stats.serviced),
+               .read_seq_string = cfqg_print_rwstat,
+       },
+       {
+               .name = "io_service_time",
+               .private = offsetof(struct cfq_group, stats.service_time),
+               .read_seq_string = cfqg_print_rwstat,
+       },
+       {
+               .name = "io_wait_time",
+               .private = offsetof(struct cfq_group, stats.wait_time),
+               .read_seq_string = cfqg_print_rwstat,
+       },
+       {
+               .name = "io_merged",
+               .private = offsetof(struct cfq_group, stats.merged),
+               .read_seq_string = cfqg_print_rwstat,
+       },
+       {
+               .name = "io_queued",
+               .private = offsetof(struct cfq_group, stats.queued),
+               .read_seq_string = cfqg_print_rwstat,
+       },
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+       {
+               .name = "avg_queue_size",
+               .read_seq_string = cfqg_print_avg_queue_size,
+       },
+       {
+               .name = "group_wait_time",
+               .private = offsetof(struct cfq_group, stats.group_wait_time),
+               .read_seq_string = cfqg_print_stat,
+       },
+       {
+               .name = "idle_time",
+               .private = offsetof(struct cfq_group, stats.idle_time),
+               .read_seq_string = cfqg_print_stat,
+       },
+       {
+               .name = "empty_time",
+               .private = offsetof(struct cfq_group, stats.empty_time),
+               .read_seq_string = cfqg_print_stat,
+       },
+       {
+               .name = "dequeue",
+               .private = offsetof(struct cfq_group, stats.dequeue),
+               .read_seq_string = cfqg_print_stat,
+       },
+       {
+               .name = "unaccounted_time",
+               .private = offsetof(struct cfq_group, stats.unaccounted_time),
+               .read_seq_string = cfqg_print_stat,
+       },
+#endif /* CONFIG_DEBUG_BLK_CGROUP */
+       { }     /* terminate */
+};
+#else /* GROUP_IOSCHED */
+static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
+                                               struct blkcg *blkcg)
+{
+       return cfqd->root_group;
 }
 
 static inline void
@@ -1282,9 +1599,6 @@ cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
        cfqq->cfqg = cfqg;
 }
 
-static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
-static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
-
 #endif /* GROUP_IOSCHED */
 
 /*
@@ -1551,12 +1865,10 @@ static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
 {
        elv_rb_del(&cfqq->sort_list, rq);
        cfqq->queued[rq_is_sync(rq)]--;
-       cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
-                                       rq_data_dir(rq), rq_is_sync(rq));
+       cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
        cfq_add_rq_rb(rq);
-       cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
-                       &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
-                       rq_is_sync(rq));
+       cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
+                                rq->cmd_flags);
 }
 
 static struct request *
@@ -1612,8 +1924,7 @@ static void cfq_remove_request(struct request *rq)
        cfq_del_rq_rb(rq);
 
        cfqq->cfqd->rq_queued--;
-       cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
-                                       rq_data_dir(rq), rq_is_sync(rq));
+       cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
        if (rq->cmd_flags & REQ_PRIO) {
                WARN_ON(!cfqq->prio_pending);
                cfqq->prio_pending--;
@@ -1648,8 +1959,7 @@ static void cfq_merged_request(struct request_queue *q, struct request *req,
 static void cfq_bio_merged(struct request_queue *q, struct request *req,
                                struct bio *bio)
 {
-       cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg,
-                                       bio_data_dir(bio), cfq_bio_sync(bio));
+       cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw);
 }
 
 static void
@@ -1671,8 +1981,7 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
        if (cfqq->next_rq == next)
                cfqq->next_rq = rq;
        cfq_remove_request(next);
-       cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
-                                       rq_data_dir(next), rq_is_sync(next));
+       cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
 
        cfqq = RQ_CFQQ(next);
        /*
@@ -1713,7 +2022,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
 static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
        del_timer(&cfqd->idle_slice_timer);
-       cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
+       cfqg_stats_update_idle_time(cfqq->cfqg);
 }
 
 static void __cfq_set_active_queue(struct cfq_data *cfqd,
@@ -1722,7 +2031,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
        if (cfqq) {
                cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
                                cfqd->serving_prio, cfqd->serving_type);
-               cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
+               cfqg_stats_update_avg_queue_size(cfqq->cfqg);
                cfqq->slice_start = 0;
                cfqq->dispatch_start = jiffies;
                cfqq->allocated_slice = 0;
@@ -2043,7 +2352,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
         * task has exited, don't wait
         */
        cic = cfqd->active_cic;
-       if (!cic || !atomic_read(&cic->icq.ioc->nr_tasks))
+       if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
                return;
 
        /*
@@ -2070,7 +2379,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
                sl = cfqd->cfq_slice_idle;
 
        mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
-       cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
+       cfqg_stats_set_start_idle_time(cfqq->cfqg);
        cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
                        group_idle ? 1 : 0);
 }
@@ -2093,8 +2402,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
 
        cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
        cfqq->nr_sectors += blk_rq_sectors(rq);
-       cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
-                                       rq_data_dir(rq), rq_is_sync(rq));
+       cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags);
 }
 
 /*
@@ -2677,7 +2985,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
 
        BUG_ON(cfq_cfqq_on_rr(cfqq));
        kmem_cache_free(cfq_pool, cfqq);
-       cfq_put_cfqg(cfqg);
+       cfqg_put(cfqg);
 }
 
 static void cfq_put_cooperator(struct cfq_queue *cfqq)
@@ -2736,7 +3044,7 @@ static void cfq_exit_icq(struct io_cq *icq)
        }
 }
 
-static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
+static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
 {
        struct task_struct *tsk = current;
        int ioprio_class;
@@ -2744,7 +3052,7 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
        if (!cfq_cfqq_prio_changed(cfqq))
                return;
 
-       ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
+       ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
        switch (ioprio_class) {
        default:
                printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
@@ -2756,11 +3064,11 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
                cfqq->ioprio_class = task_nice_ioclass(tsk);
                break;
        case IOPRIO_CLASS_RT:
-               cfqq->ioprio = task_ioprio(ioc);
+               cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
                cfqq->ioprio_class = IOPRIO_CLASS_RT;
                break;
        case IOPRIO_CLASS_BE:
-               cfqq->ioprio = task_ioprio(ioc);
+               cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
                cfqq->ioprio_class = IOPRIO_CLASS_BE;
                break;
        case IOPRIO_CLASS_IDLE:
@@ -2778,19 +3086,24 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
        cfq_clear_cfqq_prio_changed(cfqq);
 }
 
-static void changed_ioprio(struct cfq_io_cq *cic)
+static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
 {
+       int ioprio = cic->icq.ioc->ioprio;
        struct cfq_data *cfqd = cic_to_cfqd(cic);
        struct cfq_queue *cfqq;
 
-       if (unlikely(!cfqd))
+       /*
+        * Check whether ioprio has changed.  The condition may trigger
+        * spuriously on a newly created cic but there's no harm.
+        */
+       if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
                return;
 
        cfqq = cic->cfqq[BLK_RW_ASYNC];
        if (cfqq) {
                struct cfq_queue *new_cfqq;
-               new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->icq.ioc,
-                                               GFP_ATOMIC);
+               new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio,
+                                        GFP_ATOMIC);
                if (new_cfqq) {
                        cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
                        cfq_put_queue(cfqq);
@@ -2800,6 +3113,8 @@ static void changed_ioprio(struct cfq_io_cq *cic)
        cfqq = cic->cfqq[BLK_RW_SYNC];
        if (cfqq)
                cfq_mark_cfqq_prio_changed(cfqq);
+
+       cic->ioprio = ioprio;
 }
 
 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
@@ -2823,17 +3138,24 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 }
 
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
-static void changed_cgroup(struct cfq_io_cq *cic)
+static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
 {
-       struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
        struct cfq_data *cfqd = cic_to_cfqd(cic);
-       struct request_queue *q;
+       struct cfq_queue *sync_cfqq;
+       uint64_t id;
 
-       if (unlikely(!cfqd))
-               return;
+       rcu_read_lock();
+       id = bio_blkcg(bio)->id;
+       rcu_read_unlock();
 
-       q = cfqd->queue;
+       /*
+        * Check whether blkcg has changed.  The condition may trigger
+        * spuriously on a newly created cic but there's no harm.
+        */
+       if (unlikely(!cfqd) || likely(cic->blkcg_id == id))
+               return;
 
+       sync_cfqq = cic_to_cfqq(cic, 1);
        if (sync_cfqq) {
                /*
                 * Drop reference to sync queue. A new sync queue will be
@@ -2843,21 +3165,26 @@ static void changed_cgroup(struct cfq_io_cq *cic)
                cic_set_cfqq(cic, NULL, 1);
                cfq_put_queue(sync_cfqq);
        }
+
+       cic->blkcg_id = id;
 }
+#else
+static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
 #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
 
 static struct cfq_queue *
-cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
-                    struct io_context *ioc, gfp_t gfp_mask)
+cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
+                    struct bio *bio, gfp_t gfp_mask)
 {
+       struct blkcg *blkcg;
        struct cfq_queue *cfqq, *new_cfqq = NULL;
-       struct cfq_io_cq *cic;
        struct cfq_group *cfqg;
 
 retry:
-       cfqg = cfq_get_cfqg(cfqd);
-       cic = cfq_cic_lookup(cfqd, ioc);
-       /* cic always exists here */
+       rcu_read_lock();
+
+       blkcg = bio_blkcg(bio);
+       cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
        cfqq = cic_to_cfqq(cic, is_sync);
 
        /*
@@ -2870,6 +3197,7 @@ retry:
                        cfqq = new_cfqq;
                        new_cfqq = NULL;
                } else if (gfp_mask & __GFP_WAIT) {
+                       rcu_read_unlock();
                        spin_unlock_irq(cfqd->queue->queue_lock);
                        new_cfqq = kmem_cache_alloc_node(cfq_pool,
                                        gfp_mask | __GFP_ZERO,
@@ -2885,7 +3213,7 @@ retry:
 
                if (cfqq) {
                        cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
-                       cfq_init_prio_data(cfqq, ioc);
+                       cfq_init_prio_data(cfqq, cic);
                        cfq_link_cfqq_cfqg(cfqq, cfqg);
                        cfq_log_cfqq(cfqd, cfqq, "alloced");
                } else
@@ -2895,6 +3223,7 @@ retry:
        if (new_cfqq)
                kmem_cache_free(cfq_pool, new_cfqq);
 
+       rcu_read_unlock();
        return cfqq;
 }
 
@@ -2904,6 +3233,9 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
        switch (ioprio_class) {
        case IOPRIO_CLASS_RT:
                return &cfqd->async_cfqq[0][ioprio];
+       case IOPRIO_CLASS_NONE:
+               ioprio = IOPRIO_NORM;
+               /* fall through */
        case IOPRIO_CLASS_BE:
                return &cfqd->async_cfqq[1][ioprio];
        case IOPRIO_CLASS_IDLE:
@@ -2914,11 +3246,11 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
 }
 
 static struct cfq_queue *
-cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
-             gfp_t gfp_mask)
+cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
+             struct bio *bio, gfp_t gfp_mask)
 {
-       const int ioprio = task_ioprio(ioc);
-       const int ioprio_class = task_ioprio_class(ioc);
+       const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
+       const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
        struct cfq_queue **async_cfqq = NULL;
        struct cfq_queue *cfqq = NULL;
 
@@ -2928,7 +3260,7 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
        }
 
        if (!cfqq)
-               cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
+               cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
 
        /*
         * pin the queue now that it's allocated, scheduler exit will prune it
@@ -3010,7 +3342,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 
        if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
                enable_idle = 0;
-       else if (!atomic_read(&cic->icq.ioc->nr_tasks) ||
+       else if (!atomic_read(&cic->icq.ioc->active_ref) ||
                 !cfqd->cfq_slice_idle ||
                 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
                enable_idle = 0;
@@ -3174,8 +3506,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                                cfq_clear_cfqq_wait_request(cfqq);
                                __blk_run_queue(cfqd->queue);
                        } else {
-                               cfq_blkiocg_update_idle_time_stats(
-                                               &cfqq->cfqg->blkg);
+                               cfqg_stats_update_idle_time(cfqq->cfqg);
                                cfq_mark_cfqq_must_dispatch(cfqq);
                        }
                }
@@ -3197,14 +3528,13 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
        struct cfq_queue *cfqq = RQ_CFQQ(rq);
 
        cfq_log_cfqq(cfqd, cfqq, "insert_request");
-       cfq_init_prio_data(cfqq, RQ_CIC(rq)->icq.ioc);
+       cfq_init_prio_data(cfqq, RQ_CIC(rq));
 
        rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
        list_add_tail(&rq->queuelist, &cfqq->fifo);
        cfq_add_rq_rb(rq);
-       cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
-                       &cfqd->serving_group->blkg, rq_data_dir(rq),
-                       rq_is_sync(rq));
+       cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
+                                rq->cmd_flags);
        cfq_rq_enqueued(cfqd, cfqq, rq);
 }
 
@@ -3300,9 +3630,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
        cfqd->rq_in_driver--;
        cfqq->dispatched--;
        (RQ_CFQG(rq))->dispatched--;
-       cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
-                       rq_start_time_ns(rq), rq_io_start_time_ns(rq),
-                       rq_data_dir(rq), rq_is_sync(rq));
+       cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
+                                    rq_io_start_time_ns(rq), rq->cmd_flags);
 
        cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
 
@@ -3399,7 +3728,7 @@ static int cfq_may_queue(struct request_queue *q, int rw)
 
        cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
        if (cfqq) {
-               cfq_init_prio_data(cfqq, cic->icq.ioc);
+               cfq_init_prio_data(cfqq, cic);
 
                return __cfq_may_queue(cfqq);
        }
@@ -3421,7 +3750,7 @@ static void cfq_put_request(struct request *rq)
                cfqq->allocated[rw]--;
 
                /* Put down rq reference on cfqg */
-               cfq_put_cfqg(RQ_CFQG(rq));
+               cfqg_put(RQ_CFQG(rq));
                rq->elv.priv[0] = NULL;
                rq->elv.priv[1] = NULL;
 
@@ -3465,32 +3794,25 @@ split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
  * Allocate cfq data structures associated with this request.
  */
 static int
-cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
+cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
+               gfp_t gfp_mask)
 {
        struct cfq_data *cfqd = q->elevator->elevator_data;
        struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
        const int rw = rq_data_dir(rq);
        const bool is_sync = rq_is_sync(rq);
        struct cfq_queue *cfqq;
-       unsigned int changed;
 
        might_sleep_if(gfp_mask & __GFP_WAIT);
 
        spin_lock_irq(q->queue_lock);
 
-       /* handle changed notifications */
-       changed = icq_get_changed(&cic->icq);
-       if (unlikely(changed & ICQ_IOPRIO_CHANGED))
-               changed_ioprio(cic);
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
-       if (unlikely(changed & ICQ_CGROUP_CHANGED))
-               changed_cgroup(cic);
-#endif
-
+       check_ioprio_changed(cic, bio);
+       check_blkcg_changed(cic, bio);
 new_queue:
        cfqq = cic_to_cfqq(cic, is_sync);
        if (!cfqq || cfqq == &cfqd->oom_cfqq) {
-               cfqq = cfq_get_queue(cfqd, is_sync, cic->icq.ioc, gfp_mask);
+               cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
                cic_set_cfqq(cic, cfqq, is_sync);
        } else {
                /*
@@ -3516,8 +3838,9 @@ new_queue:
        cfqq->allocated[rw]++;
 
        cfqq->ref++;
+       cfqg_get(cfqq->cfqg);
        rq->elv.priv[0] = cfqq;
-       rq->elv.priv[1] = cfq_ref_get_cfqg(cfqq->cfqg);
+       rq->elv.priv[1] = cfqq->cfqg;
        spin_unlock_irq(q->queue_lock);
        return 0;
 }
@@ -3614,7 +3937,6 @@ static void cfq_exit_queue(struct elevator_queue *e)
 {
        struct cfq_data *cfqd = e->elevator_data;
        struct request_queue *q = cfqd->queue;
-       bool wait = false;
 
        cfq_shutdown_timer_wq(cfqd);
 
@@ -3624,89 +3946,52 @@ static void cfq_exit_queue(struct elevator_queue *e)
                __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
 
        cfq_put_async_queues(cfqd);
-       cfq_release_cfq_groups(cfqd);
-
-       /*
-        * If there are groups which we could not unlink from blkcg list,
-        * wait for a rcu period for them to be freed.
-        */
-       if (cfqd->nr_blkcg_linked_grps)
-               wait = true;
 
        spin_unlock_irq(q->queue_lock);
 
        cfq_shutdown_timer_wq(cfqd);
 
-       /*
-        * Wait for cfqg->blkg->key accessors to exit their grace periods.
-        * Do this wait only if there are other unlinked groups out
-        * there. This can happen if cgroup deletion path claimed the
-        * responsibility of cleaning up a group before queue cleanup code
-        * get to the group.
-        *
-        * Do not call synchronize_rcu() unconditionally as there are drivers
-        * which create/delete request queue hundreds of times during scan/boot
-        * and synchronize_rcu() can take significant time and slow down boot.
-        */
-       if (wait)
-               synchronize_rcu();
-
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
-       /* Free up per cpu stats for root group */
-       free_percpu(cfqd->root_group.blkg.stats_cpu);
+#ifndef CONFIG_CFQ_GROUP_IOSCHED
+       kfree(cfqd->root_group);
 #endif
+       blkcg_deactivate_policy(q, &blkcg_policy_cfq);
        kfree(cfqd);
 }
 
-static void *cfq_init_queue(struct request_queue *q)
+static int cfq_init_queue(struct request_queue *q)
 {
        struct cfq_data *cfqd;
-       int i, j;
-       struct cfq_group *cfqg;
-       struct cfq_rb_root *st;
+       struct blkcg_gq *blkg __maybe_unused;
+       int i, ret;
 
        cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
        if (!cfqd)
-               return NULL;
+               return -ENOMEM;
+
+       cfqd->queue = q;
+       q->elevator->elevator_data = cfqd;
 
        /* Init root service tree */
        cfqd->grp_service_tree = CFQ_RB_ROOT;
 
-       /* Init root group */
-       cfqg = &cfqd->root_group;
-       for_each_cfqg_st(cfqg, i, j, st)
-               *st = CFQ_RB_ROOT;
-       RB_CLEAR_NODE(&cfqg->rb_node);
-
-       /* Give preference to root group over other groups */
-       cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT;
-
+       /* Init root group and prefer root group over other groups by default */
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
-       /*
-        * Set root group reference to 2. One reference will be dropped when
-        * all groups on cfqd->cfqg_list are being deleted during queue exit.
-        * Other reference will remain there as we don't want to delete this
-        * group as it is statically allocated and gets destroyed when
-        * throtl_data goes away.
-        */
-       cfqg->ref = 2;
-
-       if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
-               kfree(cfqg);
-               kfree(cfqd);
-               return NULL;
-       }
-
-       rcu_read_lock();
+       ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
+       if (ret)
+               goto out_free;
 
-       cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
-                                       (void *)cfqd, 0);
-       rcu_read_unlock();
-       cfqd->nr_blkcg_linked_grps++;
+       cfqd->root_group = blkg_to_cfqg(q->root_blkg);
+#else
+       ret = -ENOMEM;
+       cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
+                                       GFP_KERNEL, cfqd->queue->node);
+       if (!cfqd->root_group)
+               goto out_free;
 
-       /* Add group on cfqd->cfqg_list */
-       hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
+       cfq_init_cfqg_base(cfqd->root_group);
 #endif
+       cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
+
        /*
         * Not strictly needed (since RB_ROOT just clears the node and we
         * zeroed cfqd on alloc), but better be safe in case someone decides
@@ -3718,13 +4003,17 @@ static void *cfq_init_queue(struct request_queue *q)
        /*
         * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
         * Grab a permanent reference to it, so that the normal code flow
-        * will not attempt to free it.
+        * will not attempt to free it.  oom_cfqq is linked to root_group
+        * but shouldn't hold a reference as it'll never be unlinked.  Lose
+        * the reference from linking right away.
         */
        cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
        cfqd->oom_cfqq.ref++;
-       cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
 
-       cfqd->queue = q;
+       spin_lock_irq(q->queue_lock);
+       cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
+       cfqg_put(cfqd->root_group);
+       spin_unlock_irq(q->queue_lock);
 
        init_timer(&cfqd->idle_slice_timer);
        cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
@@ -3750,7 +4039,11 @@ static void *cfq_init_queue(struct request_queue *q)
         * second, in order to have larger depth for async operations.
         */
        cfqd->last_delayed_sync = jiffies - HZ;
-       return cfqd;
+       return 0;
+
+out_free:
+       kfree(cfqd);
+       return ret;
 }
 
 /*
@@ -3877,15 +4170,13 @@ static struct elevator_type iosched_cfq = {
 };
 
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
-static struct blkio_policy_type blkio_policy_cfq = {
-       .ops = {
-               .blkio_unlink_group_fn =        cfq_unlink_blkio_group,
-               .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
-       },
-       .plid = BLKIO_POLICY_PROP,
+static struct blkcg_policy blkcg_policy_cfq = {
+       .pd_size                = sizeof(struct cfq_group),
+       .cftypes                = cfq_blkcg_files,
+
+       .pd_init_fn             = cfq_pd_init,
+       .pd_reset_stats_fn      = cfq_pd_reset_stats,
 };
-#else
-static struct blkio_policy_type blkio_policy_cfq;
 #endif
 
 static int __init cfq_init(void)
@@ -3906,24 +4197,31 @@ static int __init cfq_init(void)
 #else
                cfq_group_idle = 0;
 #endif
+
+       ret = blkcg_policy_register(&blkcg_policy_cfq);
+       if (ret)
+               return ret;
+
        cfq_pool = KMEM_CACHE(cfq_queue, 0);
        if (!cfq_pool)
-               return -ENOMEM;
+               goto err_pol_unreg;
 
        ret = elv_register(&iosched_cfq);
-       if (ret) {
-               kmem_cache_destroy(cfq_pool);
-               return ret;
-       }
-
-       blkio_policy_register(&blkio_policy_cfq);
+       if (ret)
+               goto err_free_pool;
 
        return 0;
+
+err_free_pool:
+       kmem_cache_destroy(cfq_pool);
+err_pol_unreg:
+       blkcg_policy_unregister(&blkcg_policy_cfq);
+       return ret;
 }
 
 static void __exit cfq_exit(void)
 {
-       blkio_policy_unregister(&blkio_policy_cfq);
+       blkcg_policy_unregister(&blkcg_policy_cfq);
        elv_unregister(&iosched_cfq);
        kmem_cache_destroy(cfq_pool);
 }
diff --git a/block/cfq.h b/block/cfq.h
deleted file mode 100644 (file)
index 2a15592..0000000
+++ /dev/null
@@ -1,115 +0,0 @@
-#ifndef _CFQ_H
-#define _CFQ_H
-#include "blk-cgroup.h"
-
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
-       struct blkio_group *curr_blkg, bool direction, bool sync)
-{
-       blkiocg_update_io_add_stats(blkg, curr_blkg, direction, sync);
-}
-
-static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
-                       unsigned long dequeue)
-{
-       blkiocg_update_dequeue_stats(blkg, dequeue);
-}
-
-static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
-                       unsigned long time, unsigned long unaccounted_time)
-{
-       blkiocg_update_timeslice_used(blkg, time, unaccounted_time);
-}
-
-static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg)
-{
-       blkiocg_set_start_empty_time(blkg);
-}
-
-static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
-                               bool direction, bool sync)
-{
-       blkiocg_update_io_remove_stats(blkg, direction, sync);
-}
-
-static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
-               bool direction, bool sync)
-{
-       blkiocg_update_io_merged_stats(blkg, direction, sync);
-}
-
-static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
-{
-       blkiocg_update_idle_time_stats(blkg);
-}
-
-static inline void
-cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
-{
-       blkiocg_update_avg_queue_size_stats(blkg);
-}
-
-static inline void
-cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
-{
-       blkiocg_update_set_idle_time_stats(blkg);
-}
-
-static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
-                               uint64_t bytes, bool direction, bool sync)
-{
-       blkiocg_update_dispatch_stats(blkg, bytes, direction, sync);
-}
-
-static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
-{
-       blkiocg_update_completion_stats(blkg, start_time, io_start_time,
-                               direction, sync);
-}
-
-static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
-                       struct blkio_group *blkg, void *key, dev_t dev) {
-       blkiocg_add_blkio_group(blkcg, blkg, key, dev, BLKIO_POLICY_PROP);
-}
-
-static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
-{
-       return blkiocg_del_blkio_group(blkg);
-}
-
-#else /* CFQ_GROUP_IOSCHED */
-static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
-       struct blkio_group *curr_blkg, bool direction, bool sync) {}
-
-static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
-                       unsigned long dequeue) {}
-
-static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
-                       unsigned long time, unsigned long unaccounted_time) {}
-static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
-static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
-                               bool direction, bool sync) {}
-static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
-               bool direction, bool sync) {}
-static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
-{
-}
-static inline void
-cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) {}
-
-static inline void
-cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) {}
-
-static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
-                               uint64_t bytes, bool direction, bool sync) {}
-static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) {}
-
-static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
-                       struct blkio_group *blkg, void *key, dev_t dev) {}
-static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
-{
-       return 0;
-}
-
-#endif /* CFQ_GROUP_IOSCHED */
-#endif
index 7bf12d793fcdee25eb1ba178a59203b72d60ec2e..599b12e5380f50aa32e35ec3096b035437842805 100644 (file)
@@ -337,13 +337,13 @@ static void deadline_exit_queue(struct elevator_queue *e)
 /*
  * initialize elevator private data (deadline_data).
  */
-static void *deadline_init_queue(struct request_queue *q)
+static int deadline_init_queue(struct request_queue *q)
 {
        struct deadline_data *dd;
 
        dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
        if (!dd)
-               return NULL;
+               return -ENOMEM;
 
        INIT_LIST_HEAD(&dd->fifo_list[READ]);
        INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
@@ -354,7 +354,9 @@ static void *deadline_init_queue(struct request_queue *q)
        dd->writes_starved = writes_starved;
        dd->front_merges = 1;
        dd->fifo_batch = fifo_batch;
-       return dd;
+
+       q->elevator->elevator_data = dd;
+       return 0;
 }
 
 /*
index f016855a46b094628190f68e698ae21a936912e6..6a55d418896f5ceee0042da69c0177c495219cbc 100644 (file)
@@ -38,6 +38,7 @@
 #include <trace/events/block.h>
 
 #include "blk.h"
+#include "blk-cgroup.h"
 
 static DEFINE_SPINLOCK(elv_list_lock);
 static LIST_HEAD(elv_list);
@@ -121,15 +122,6 @@ static struct elevator_type *elevator_get(const char *name)
        return e;
 }
 
-static int elevator_init_queue(struct request_queue *q,
-                              struct elevator_queue *eq)
-{
-       eq->elevator_data = eq->type->ops.elevator_init_fn(q);
-       if (eq->elevator_data)
-               return 0;
-       return -ENOMEM;
-}
-
 static char chosen_elevator[ELV_NAME_MAX];
 
 static int __init elevator_setup(char *str)
@@ -188,7 +180,6 @@ static void elevator_release(struct kobject *kobj)
 int elevator_init(struct request_queue *q, char *name)
 {
        struct elevator_type *e = NULL;
-       struct elevator_queue *eq;
        int err;
 
        if (unlikely(q->elevator))
@@ -222,17 +213,16 @@ int elevator_init(struct request_queue *q, char *name)
                }
        }
 
-       eq = elevator_alloc(q, e);
-       if (!eq)
+       q->elevator = elevator_alloc(q, e);
+       if (!q->elevator)
                return -ENOMEM;
 
-       err = elevator_init_queue(q, eq);
+       err = e->ops.elevator_init_fn(q);
        if (err) {
-               kobject_put(&eq->kobj);
+               kobject_put(&q->elevator->kobj);
                return err;
        }
 
-       q->elevator = eq;
        return 0;
 }
 EXPORT_SYMBOL(elevator_init);
@@ -564,25 +554,6 @@ void elv_drain_elevator(struct request_queue *q)
        }
 }
 
-void elv_quiesce_start(struct request_queue *q)
-{
-       if (!q->elevator)
-               return;
-
-       spin_lock_irq(q->queue_lock);
-       queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
-       spin_unlock_irq(q->queue_lock);
-
-       blk_drain_queue(q, false);
-}
-
-void elv_quiesce_end(struct request_queue *q)
-{
-       spin_lock_irq(q->queue_lock);
-       queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
-       spin_unlock_irq(q->queue_lock);
-}
-
 void __elv_add_request(struct request_queue *q, struct request *rq, int where)
 {
        trace_block_rq_insert(q, rq);
@@ -692,12 +663,13 @@ struct request *elv_former_request(struct request_queue *q, struct request *rq)
        return NULL;
 }
 
-int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
+int elv_set_request(struct request_queue *q, struct request *rq,
+                   struct bio *bio, gfp_t gfp_mask)
 {
        struct elevator_queue *e = q->elevator;
 
        if (e->type->ops.elevator_set_req_fn)
-               return e->type->ops.elevator_set_req_fn(q, rq, gfp_mask);
+               return e->type->ops.elevator_set_req_fn(q, rq, bio, gfp_mask);
        return 0;
 }
 
@@ -801,8 +773,9 @@ static struct kobj_type elv_ktype = {
        .release        = elevator_release,
 };
 
-int __elv_register_queue(struct request_queue *q, struct elevator_queue *e)
+int elv_register_queue(struct request_queue *q)
 {
+       struct elevator_queue *e = q->elevator;
        int error;
 
        error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
@@ -820,11 +793,6 @@ int __elv_register_queue(struct request_queue *q, struct elevator_queue *e)
        }
        return error;
 }
-
-int elv_register_queue(struct request_queue *q)
-{
-       return __elv_register_queue(q, q->elevator);
-}
 EXPORT_SYMBOL(elv_register_queue);
 
 void elv_unregister_queue(struct request_queue *q)
@@ -907,53 +875,60 @@ EXPORT_SYMBOL_GPL(elv_unregister);
  */
 static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 {
-       struct elevator_queue *old_elevator, *e;
+       struct elevator_queue *old = q->elevator;
+       bool registered = old->registered;
        int err;
 
-       /* allocate new elevator */
-       e = elevator_alloc(q, new_e);
-       if (!e)
-               return -ENOMEM;
+       /*
+        * Turn on BYPASS and drain all requests w/ elevator private data.
+        * Block layer doesn't call into a quiesced elevator - all requests
+        * are directly put on the dispatch list without elevator data
+        * using INSERT_BACK.  All requests have SOFTBARRIER set and no
+        * merge happens either.
+        */
+       blk_queue_bypass_start(q);
+
+       /* unregister and clear all auxiliary data of the old elevator */
+       if (registered)
+               elv_unregister_queue(q);
+
+       spin_lock_irq(q->queue_lock);
+       ioc_clear_queue(q);
+       spin_unlock_irq(q->queue_lock);
 
-       err = elevator_init_queue(q, e);
+       /* allocate, init and register new elevator */
+       err = -ENOMEM;
+       q->elevator = elevator_alloc(q, new_e);
+       if (!q->elevator)
+               goto fail_init;
+
+       err = new_e->ops.elevator_init_fn(q);
        if (err) {
-               kobject_put(&e->kobj);
-               return err;
+               kobject_put(&q->elevator->kobj);
+               goto fail_init;
        }
 
-       /* turn on BYPASS and drain all requests w/ elevator private data */
-       elv_quiesce_start(q);
-
-       /* unregister old queue, register new one and kill old elevator */
-       if (q->elevator->registered) {
-               elv_unregister_queue(q);
-               err = __elv_register_queue(q, e);
+       if (registered) {
+               err = elv_register_queue(q);
                if (err)
                        goto fail_register;
        }
 
-       /* done, clear io_cq's, switch elevators and turn off BYPASS */
-       spin_lock_irq(q->queue_lock);
-       ioc_clear_queue(q);
-       old_elevator = q->elevator;
-       q->elevator = e;
-       spin_unlock_irq(q->queue_lock);
-
-       elevator_exit(old_elevator);
-       elv_quiesce_end(q);
+       /* done, kill the old one and finish */
+       elevator_exit(old);
+       blk_queue_bypass_end(q);
 
-       blk_add_trace_msg(q, "elv switch: %s", e->type->elevator_name);
+       blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
 
        return 0;
 
 fail_register:
-       /*
-        * switch failed, exit the new io scheduler and reattach the old
-        * one again (along with re-adding the sysfs dir)
-        */
-       elevator_exit(e);
+       elevator_exit(q->elevator);
+fail_init:
+       /* switch failed, restore and re-register old elevator */
+       q->elevator = old;
        elv_register_queue(q);
-       elv_quiesce_end(q);
+       blk_queue_bypass_end(q);
 
        return err;
 }
index 413a0b1d788c745df932745a65adb36ec119afaa..5d1bf70e33d5a04a5fc994c8a8cb0c9ecdbf0900 100644 (file)
@@ -59,15 +59,17 @@ noop_latter_request(struct request_queue *q, struct request *rq)
        return list_entry(rq->queuelist.next, struct request, queuelist);
 }
 
-static void *noop_init_queue(struct request_queue *q)
+static int noop_init_queue(struct request_queue *q)
 {
        struct noop_data *nd;
 
        nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node);
        if (!nd)
-               return NULL;
+               return -ENOMEM;
+
        INIT_LIST_HEAD(&nd->queue);
-       return nd;
+       q->elevator->elevator_data = nd;
+       return 0;
 }
 
 static void noop_exit_queue(struct elevator_queue *e)
index 0ee98d50f9752b0a77925dca996f5cd36fee12a3..2ba29ffef2cbd84ff0a55d66a861ab3eee2159fa 100644 (file)
@@ -18,7 +18,7 @@ obj-$(CONFIG_SFI)             += sfi/
 # PnP must come after ACPI since it will eventually need to check if acpi
 # was used and do nothing if so
 obj-$(CONFIG_PNP)              += pnp/
-obj-$(CONFIG_ARM_AMBA)         += amba/
+obj-y                          += amba/
 # Many drivers will want to use DMA so this has to be made available
 # really early.
 obj-$(CONFIG_DMA_ENGINE)       += dma/
index 8cf6c46e99fb538b320b7e63131269ef78accaae..6680df36b9634f414cc7438ca57590bef0c8cc71 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/init.h>
 #include <linux/device.h>
 #include <linux/sysfs.h>
+#include <linux/io.h>
 #include <acpi/acpi.h>
 #include <acpi/acpi_bus.h>
 
index 3188da3df8da945f1c4d5150e0254f592ec6c291..adceafda9c171987a9b0f7dff625e34f53a68c30 100644 (file)
@@ -182,41 +182,66 @@ EXPORT_SYMBOL(acpi_bus_get_private_data);
                                  Power Management
    -------------------------------------------------------------------------- */
 
+static const char *state_string(int state)
+{
+       switch (state) {
+       case ACPI_STATE_D0:
+               return "D0";
+       case ACPI_STATE_D1:
+               return "D1";
+       case ACPI_STATE_D2:
+               return "D2";
+       case ACPI_STATE_D3_HOT:
+               return "D3hot";
+       case ACPI_STATE_D3_COLD:
+               return "D3";
+       default:
+               return "(unknown)";
+       }
+}
+
 static int __acpi_bus_get_power(struct acpi_device *device, int *state)
 {
-       int result = 0;
-       acpi_status status = 0;
-       unsigned long long psc = 0;
+       int result = ACPI_STATE_UNKNOWN;
 
        if (!device || !state)
                return -EINVAL;
 
-       *state = ACPI_STATE_UNKNOWN;
-
-       if (device->flags.power_manageable) {
-               /*
-                * Get the device's power state either directly (via _PSC) or
-                * indirectly (via power resources).
-                */
-               if (device->power.flags.power_resources) {
-                       result = acpi_power_get_inferred_state(device, state);
-                       if (result)
-                               return result;
-               } else if (device->power.flags.explicit_get) {
-                       status = acpi_evaluate_integer(device->handle, "_PSC",
-                                                      NULL, &psc);
-                       if (ACPI_FAILURE(status))
-                               return -ENODEV;
-                       *state = (int)psc;
-               }
-       } else {
+       if (!device->flags.power_manageable) {
                /* TBD: Non-recursive algorithm for walking up hierarchy. */
                *state = device->parent ?
                        device->parent->power.state : ACPI_STATE_D0;
+               goto out;
+       }
+
+       /*
+        * Get the device's power state either directly (via _PSC) or
+        * indirectly (via power resources).
+        */
+       if (device->power.flags.explicit_get) {
+               unsigned long long psc;
+               acpi_status status = acpi_evaluate_integer(device->handle,
+                                                          "_PSC", NULL, &psc);
+               if (ACPI_FAILURE(status))
+                       return -ENODEV;
+
+               result = psc;
+       }
+       /* The test below covers ACPI_STATE_UNKNOWN too. */
+       if (result <= ACPI_STATE_D2) {
+         ; /* Do nothing. */
+       } else if (device->power.flags.power_resources) {
+               int error = acpi_power_get_inferred_state(device, &result);
+               if (error)
+                       return error;
+       } else if (result == ACPI_STATE_D3_HOT) {
+               result = ACPI_STATE_D3;
        }
+       *state = result;
 
-       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is D%d\n",
-                         device->pnp.bus_id, *state));
+ out:
+       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is %s\n",
+                         device->pnp.bus_id, state_string(*state)));
 
        return 0;
 }
@@ -234,13 +259,14 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
        /* Make sure this is a valid target state */
 
        if (state == device->power.state) {
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n",
-                                 state));
+               ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at %s\n",
+                                 state_string(state)));
                return 0;
        }
 
        if (!device->power.states[state].flags.valid) {
-               printk(KERN_WARNING PREFIX "Device does not support D%d\n", state);
+               printk(KERN_WARNING PREFIX "Device does not support %s\n",
+                      state_string(state));
                return -ENODEV;
        }
        if (device->parent && (state < device->parent->power.state)) {
@@ -294,13 +320,13 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
       end:
        if (result)
                printk(KERN_WARNING PREFIX
-                             "Device [%s] failed to transition to D%d\n",
-                             device->pnp.bus_id, state);
+                             "Device [%s] failed to transition to %s\n",
+                             device->pnp.bus_id, state_string(state));
        else {
                device->power.state = state;
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                                 "Device [%s] transitioned to D%d\n",
-                                 device->pnp.bus_id, state));
+                                 "Device [%s] transitioned to %s\n",
+                                 device->pnp.bus_id, state_string(state)));
        }
 
        return result;
index 0500f719f63e2aa3d7e8c711e809f84961eb8f4d..dd6d6a3c6780d7e787c1c3427b808618d8c64fa4 100644 (file)
@@ -631,7 +631,7 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
         * We know a device's inferred power state when all the resources
         * required for a given D-state are 'on'.
         */
-       for (i = ACPI_STATE_D0; i < ACPI_STATE_D3_HOT; i++) {
+       for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
                list = &device->power.states[i].resources;
                if (list->count < 1)
                        continue;
index 85cbfdccc97cbfc0dcdcf7c236e57c8c003b0700..c8a1f3b68110b86dd2c6b0ddddc91ee2085e93b1 100644 (file)
@@ -1567,6 +1567,7 @@ static int acpi_bus_scan_fixed(void)
                                                ACPI_BUS_TYPE_POWER_BUTTON,
                                                ACPI_STA_DEFAULT,
                                                &ops);
+               device_init_wakeup(&device->dev, true);
        }
 
        if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
index 06527c526618ce2b9a22b2cd4ffc77449cfbc116..88561029cca83915bffd7cc1d59e03f28d1fbfb4 100644 (file)
@@ -57,6 +57,7 @@ MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
 MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
 
 static u8 sleep_states[ACPI_S_STATE_COUNT];
+static bool pwr_btn_event_pending;
 
 static void acpi_sleep_tts_switch(u32 acpi_state)
 {
@@ -93,11 +94,9 @@ static int acpi_sleep_prepare(u32 acpi_state)
 #ifdef CONFIG_ACPI_SLEEP
        /* do we have a wakeup address for S2 and S3? */
        if (acpi_state == ACPI_STATE_S3) {
-               if (!acpi_wakeup_address) {
+               if (!acpi_wakeup_address)
                        return -EFAULT;
-               }
-               acpi_set_firmware_waking_vector(
-                               (acpi_physical_address)acpi_wakeup_address);
+               acpi_set_firmware_waking_vector(acpi_wakeup_address);
 
        }
        ACPI_FLUSH_CPU_CACHE();
@@ -186,6 +185,14 @@ static int acpi_pm_prepare(void)
        return error;
 }
 
+static int find_powerf_dev(struct device *dev, void *data)
+{
+       struct acpi_device *device = to_acpi_device(dev);
+       const char *hid = acpi_device_hid(device);
+
+       return !strcmp(hid, ACPI_BUTTON_HID_POWERF);
+}
+
 /**
  *     acpi_pm_finish - Instruct the platform to leave a sleep state.
  *
@@ -194,6 +201,7 @@ static int acpi_pm_prepare(void)
  */
 static void acpi_pm_finish(void)
 {
+       struct device *pwr_btn_dev;
        u32 acpi_state = acpi_target_sleep_state;
 
        acpi_ec_unblock_transactions();
@@ -211,6 +219,23 @@ static void acpi_pm_finish(void)
        acpi_set_firmware_waking_vector((acpi_physical_address) 0);
 
        acpi_target_sleep_state = ACPI_STATE_S0;
+
+       /* If we were woken with the fixed power button, provide a small
+        * hint to userspace in the form of a wakeup event on the fixed power
+        * button device (if it can be found).
+        *
+        * We delay the event generation til now, as the PM layer requires
+        * timekeeping to be running before we generate events. */
+       if (!pwr_btn_event_pending)
+               return;
+
+       pwr_btn_event_pending = false;
+       pwr_btn_dev = bus_find_device(&acpi_bus_type, NULL, NULL,
+                                     find_powerf_dev);
+       if (pwr_btn_dev) {
+               pm_wakeup_event(pwr_btn_dev, 0);
+               put_device(pwr_btn_dev);
+       }
 }
 
 /**
@@ -300,9 +325,23 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
        /* ACPI 3.0 specs (P62) says that it's the responsibility
         * of the OSPM to clear the status bit [ implying that the
         * POWER_BUTTON event should not reach userspace ]
+        *
+        * However, we do generate a small hint for userspace in the form of
+        * a wakeup event. We flag this condition for now and generate the
+        * event later, as we're currently too early in resume to be able to
+        * generate wakeup events.
         */
-       if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3))
-               acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
+       if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) {
+               acpi_event_status pwr_btn_status;
+
+               acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status);
+
+               if (pwr_btn_status & ACPI_EVENT_FLAG_SET) {
+                       acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
+                       /* Flag for later */
+                       pwr_btn_event_pending = true;
+               }
+       }
 
        /*
         * Disable and clear GPE status before interrupt is enabled. Some GPEs
@@ -732,8 +771,8 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
         * can wake the system.  _S0W may be valid, too.
         */
        if (acpi_target_sleep_state == ACPI_STATE_S0 ||
-           (device_may_wakeup(dev) &&
-            adev->wakeup.sleep_state <= acpi_target_sleep_state)) {
+           (device_may_wakeup(dev) && adev->wakeup.flags.valid &&
+            adev->wakeup.sleep_state >= acpi_target_sleep_state)) {
                acpi_status status;
 
                acpi_method[3] = 'W';
index 40fe74097be23e827b22d64b2b795204fa3cf540..66e81c2f1e3ca30a1a4e9d498cdfeef16c0b5fbc 100644 (file)
@@ -1,2 +1,2 @@
-obj-         += bus.o
-
+obj-$(CONFIG_ARM_AMBA)         += bus.o
+obj-$(CONFIG_TEGRA_AHB)                += tegra-ahb.o
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
new file mode 100644 (file)
index 0000000..aa0b1f1
--- /dev/null
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION.  All rights reserved.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author:
+ *     Jay Cheng <jacheng@nvidia.com>
+ *     James Wylder <james.wylder@motorola.com>
+ *     Benoit Goby <benoit@android.com>
+ *     Colin Cross <ccross@android.com>
+ *     Hiroshi DOYU <hdoyu@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#define DRV_NAME "tegra-ahb"
+
+#define AHB_ARBITRATION_DISABLE                0x00
+#define AHB_ARBITRATION_PRIORITY_CTRL  0x04
+#define   AHB_PRIORITY_WEIGHT(x)       (((x) & 0x7) << 29)
+#define   PRIORITY_SELECT_USB BIT(6)
+#define   PRIORITY_SELECT_USB2 BIT(18)
+#define   PRIORITY_SELECT_USB3 BIT(17)
+
+#define AHB_GIZMO_AHB_MEM              0x0c
+#define   ENB_FAST_REARBITRATE BIT(2)
+#define   DONT_SPLIT_AHB_WR     BIT(7)
+
+#define AHB_GIZMO_APB_DMA              0x10
+#define AHB_GIZMO_IDE                  0x18
+#define AHB_GIZMO_USB                  0x1c
+#define AHB_GIZMO_AHB_XBAR_BRIDGE      0x20
+#define AHB_GIZMO_CPU_AHB_BRIDGE       0x24
+#define AHB_GIZMO_COP_AHB_BRIDGE       0x28
+#define AHB_GIZMO_XBAR_APB_CTLR                0x2c
+#define AHB_GIZMO_VCP_AHB_BRIDGE       0x30
+#define AHB_GIZMO_NAND                 0x3c
+#define AHB_GIZMO_SDMMC4               0x44
+#define AHB_GIZMO_XIO                  0x48
+#define AHB_GIZMO_BSEV                 0x60
+#define AHB_GIZMO_BSEA                 0x70
+#define AHB_GIZMO_NOR                  0x74
+#define AHB_GIZMO_USB2                 0x78
+#define AHB_GIZMO_USB3                 0x7c
+#define   IMMEDIATE    BIT(18)
+
+#define AHB_GIZMO_SDMMC1               0x80
+#define AHB_GIZMO_SDMMC2               0x84
+#define AHB_GIZMO_SDMMC3               0x88
+#define AHB_MEM_PREFETCH_CFG_X         0xd8
+#define AHB_ARBITRATION_XBAR_CTRL      0xdc
+#define AHB_MEM_PREFETCH_CFG3          0xe0
+#define AHB_MEM_PREFETCH_CFG4          0xe4
+#define AHB_MEM_PREFETCH_CFG1          0xec
+#define AHB_MEM_PREFETCH_CFG2          0xf0
+#define   PREFETCH_ENB BIT(31)
+#define   MST_ID(x)    (((x) & 0x1f) << 26)
+#define   AHBDMA_MST_ID        MST_ID(5)
+#define   USB_MST_ID   MST_ID(6)
+#define   USB2_MST_ID  MST_ID(18)
+#define   USB3_MST_ID  MST_ID(17)
+#define   ADDR_BNDRY(x)        (((x) & 0xf) << 21)
+#define   INACTIVITY_TIMEOUT(x)        (((x) & 0xffff) << 0)
+
+#define AHB_ARBITRATION_AHB_MEM_WRQUE_MST_ID   0xf8
+
+#define AHB_ARBITRATION_XBAR_CTRL_SMMU_INIT_DONE BIT(17)
+
+static struct platform_driver tegra_ahb_driver;
+
+static const u32 tegra_ahb_gizmo[] = {
+       AHB_ARBITRATION_DISABLE,
+       AHB_ARBITRATION_PRIORITY_CTRL,
+       AHB_GIZMO_AHB_MEM,
+       AHB_GIZMO_APB_DMA,
+       AHB_GIZMO_IDE,
+       AHB_GIZMO_USB,
+       AHB_GIZMO_AHB_XBAR_BRIDGE,
+       AHB_GIZMO_CPU_AHB_BRIDGE,
+       AHB_GIZMO_COP_AHB_BRIDGE,
+       AHB_GIZMO_XBAR_APB_CTLR,
+       AHB_GIZMO_VCP_AHB_BRIDGE,
+       AHB_GIZMO_NAND,
+       AHB_GIZMO_SDMMC4,
+       AHB_GIZMO_XIO,
+       AHB_GIZMO_BSEV,
+       AHB_GIZMO_BSEA,
+       AHB_GIZMO_NOR,
+       AHB_GIZMO_USB2,
+       AHB_GIZMO_USB3,
+       AHB_GIZMO_SDMMC1,
+       AHB_GIZMO_SDMMC2,
+       AHB_GIZMO_SDMMC3,
+       AHB_MEM_PREFETCH_CFG_X,
+       AHB_ARBITRATION_XBAR_CTRL,
+       AHB_MEM_PREFETCH_CFG3,
+       AHB_MEM_PREFETCH_CFG4,
+       AHB_MEM_PREFETCH_CFG1,
+       AHB_MEM_PREFETCH_CFG2,
+       AHB_ARBITRATION_AHB_MEM_WRQUE_MST_ID,
+};
+
+struct tegra_ahb {
+       void __iomem    *regs;
+       struct device   *dev;
+       u32             ctx[0];
+};
+
+static inline u32 gizmo_readl(struct tegra_ahb *ahb, u32 offset)
+{
+       return readl(ahb->regs + offset);
+}
+
+static inline void gizmo_writel(struct tegra_ahb *ahb, u32 value, u32 offset)
+{
+       writel(value, ahb->regs + offset);
+}
+
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+static int tegra_ahb_match_by_smmu(struct device *dev, void *data)
+{
+       struct tegra_ahb *ahb = dev_get_drvdata(dev);
+       struct device_node *dn = data;
+
+       return (ahb->dev->of_node == dn) ? 1 : 0;
+}
+
+int tegra_ahb_enable_smmu(struct device_node *dn)
+{
+       struct device *dev;
+       u32 val;
+       struct tegra_ahb *ahb;
+
+       dev = driver_find_device(&tegra_ahb_driver.driver, NULL, dn,
+                                tegra_ahb_match_by_smmu);
+       if (!dev)
+               return -EPROBE_DEFER;
+       ahb = dev_get_drvdata(dev);
+       val = gizmo_readl(ahb, AHB_ARBITRATION_XBAR_CTRL);
+       val |= AHB_ARBITRATION_XBAR_CTRL_SMMU_INIT_DONE;
+       gizmo_writel(ahb, val, AHB_ARBITRATION_XBAR_CTRL);
+       return 0;
+}
+EXPORT_SYMBOL(tegra_ahb_enable_smmu);
+#endif
+
+static int tegra_ahb_suspend(struct device *dev)
+{
+       int i;
+       struct tegra_ahb *ahb = dev_get_drvdata(dev);
+
+       for (i = 0; i < ARRAY_SIZE(tegra_ahb_gizmo); i++)
+               ahb->ctx[i] = gizmo_readl(ahb, tegra_ahb_gizmo[i]);
+       return 0;
+}
+
+static int tegra_ahb_resume(struct device *dev)
+{
+       int i;
+       struct tegra_ahb *ahb = dev_get_drvdata(dev);
+
+       for (i = 0; i < ARRAY_SIZE(tegra_ahb_gizmo); i++)
+               gizmo_writel(ahb, ahb->ctx[i], tegra_ahb_gizmo[i]);
+       return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(tegra_ahb_pm,
+                           tegra_ahb_suspend,
+                           tegra_ahb_resume, NULL);
+
+static void tegra_ahb_gizmo_init(struct tegra_ahb *ahb)
+{
+       u32 val;
+
+       val = gizmo_readl(ahb, AHB_GIZMO_AHB_MEM);
+       val |= ENB_FAST_REARBITRATE | IMMEDIATE | DONT_SPLIT_AHB_WR;
+       gizmo_writel(ahb, val, AHB_GIZMO_AHB_MEM);
+
+       val = gizmo_readl(ahb, AHB_GIZMO_USB);
+       val |= IMMEDIATE;
+       gizmo_writel(ahb, val, AHB_GIZMO_USB);
+
+       val = gizmo_readl(ahb, AHB_GIZMO_USB2);
+       val |= IMMEDIATE;
+       gizmo_writel(ahb, val, AHB_GIZMO_USB2);
+
+       val = gizmo_readl(ahb, AHB_GIZMO_USB3);
+       val |= IMMEDIATE;
+       gizmo_writel(ahb, val, AHB_GIZMO_USB3);
+
+       val = gizmo_readl(ahb, AHB_ARBITRATION_PRIORITY_CTRL);
+       val |= PRIORITY_SELECT_USB |
+               PRIORITY_SELECT_USB2 |
+               PRIORITY_SELECT_USB3 |
+               AHB_PRIORITY_WEIGHT(7);
+       gizmo_writel(ahb, val, AHB_ARBITRATION_PRIORITY_CTRL);
+
+       val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG1);
+       val &= ~MST_ID(~0);
+       val |= PREFETCH_ENB |
+               AHBDMA_MST_ID |
+               ADDR_BNDRY(0xc) |
+               INACTIVITY_TIMEOUT(0x1000);
+       gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG1);
+
+       val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG2);
+       val &= ~MST_ID(~0);
+       val |= PREFETCH_ENB |
+               USB_MST_ID |
+               ADDR_BNDRY(0xc) |
+               INACTIVITY_TIMEOUT(0x1000);
+       gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG2);
+
+       val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG3);
+       val &= ~MST_ID(~0);
+       val |= PREFETCH_ENB |
+               USB3_MST_ID |
+               ADDR_BNDRY(0xc) |
+               INACTIVITY_TIMEOUT(0x1000);
+       gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG3);
+
+       val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG4);
+       val &= ~MST_ID(~0);
+       val |= PREFETCH_ENB |
+               USB2_MST_ID |
+               ADDR_BNDRY(0xc) |
+               INACTIVITY_TIMEOUT(0x1000);
+       gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG4);
+}
+
+static int __devinit tegra_ahb_probe(struct platform_device *pdev)
+{
+       struct resource *res;
+       struct tegra_ahb *ahb;
+       size_t bytes;
+
+       bytes = sizeof(*ahb) + sizeof(u32) * ARRAY_SIZE(tegra_ahb_gizmo);
+       ahb = devm_kzalloc(&pdev->dev, bytes, GFP_KERNEL);
+       if (!ahb)
+               return -ENOMEM;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res)
+               return -ENODEV;
+       ahb->regs = devm_request_and_ioremap(&pdev->dev, res);
+       if (!ahb->regs)
+               return -EBUSY;
+
+       ahb->dev = &pdev->dev;
+       platform_set_drvdata(pdev, ahb);
+       tegra_ahb_gizmo_init(ahb);
+       return 0;
+}
+
+static int __devexit tegra_ahb_remove(struct platform_device *pdev)
+{
+       return 0;
+}
+
+static const struct of_device_id tegra_ahb_of_match[] __devinitconst = {
+       { .compatible = "nvidia,tegra30-ahb", },
+       { .compatible = "nvidia,tegra20-ahb", },
+       {},
+};
+
+static struct platform_driver tegra_ahb_driver = {
+       .probe = tegra_ahb_probe,
+       .remove = __devexit_p(tegra_ahb_remove),
+       .driver = {
+               .name = DRV_NAME,
+               .owner = THIS_MODULE,
+               .of_match_table = tegra_ahb_of_match,
+               .pm = &tegra_ahb_pm,
+       },
+};
+module_platform_driver(tegra_ahb_driver);
+
+MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
+MODULE_DESCRIPTION("Tegra AHB driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
index 7336d4a7ab317c091b7b991a1355a712f58023ab..24712adf69dfb8a876ff98d2d39df1182de03942 100644 (file)
@@ -553,6 +553,7 @@ struct mv_host_priv {
 
 #if defined(CONFIG_HAVE_CLK)
        struct clk              *clk;
+       struct clk              **port_clks;
 #endif
        /*
         * These consistent DMA memory pools give us guaranteed
@@ -4027,6 +4028,9 @@ static int mv_platform_probe(struct platform_device *pdev)
        struct resource *res;
        int n_ports = 0;
        int rc;
+#if defined(CONFIG_HAVE_CLK)
+       int port;
+#endif
 
        ata_print_version_once(&pdev->dev, DRV_VERSION);
 
@@ -4054,6 +4058,13 @@ static int mv_platform_probe(struct platform_device *pdev)
 
        if (!host || !hpriv)
                return -ENOMEM;
+#if defined(CONFIG_HAVE_CLK)
+       hpriv->port_clks = devm_kzalloc(&pdev->dev,
+                                       sizeof(struct clk *) * n_ports,
+                                       GFP_KERNEL);
+       if (!hpriv->port_clks)
+               return -ENOMEM;
+#endif
        host->private_data = hpriv;
        hpriv->n_ports = n_ports;
        hpriv->board_idx = chip_soc;
@@ -4066,9 +4077,17 @@ static int mv_platform_probe(struct platform_device *pdev)
 #if defined(CONFIG_HAVE_CLK)
        hpriv->clk = clk_get(&pdev->dev, NULL);
        if (IS_ERR(hpriv->clk))
-               dev_notice(&pdev->dev, "cannot get clkdev\n");
+               dev_notice(&pdev->dev, "cannot get optional clkdev\n");
        else
-               clk_enable(hpriv->clk);
+               clk_prepare_enable(hpriv->clk);
+
+       for (port = 0; port < n_ports; port++) {
+               char port_number[16];
+               sprintf(port_number, "%d", port);
+               hpriv->port_clks[port] = clk_get(&pdev->dev, port_number);
+               if (!IS_ERR(hpriv->port_clks[port]))
+                       clk_prepare_enable(hpriv->port_clks[port]);
+       }
 #endif
 
        /*
@@ -4098,9 +4117,15 @@ static int mv_platform_probe(struct platform_device *pdev)
 err:
 #if defined(CONFIG_HAVE_CLK)
        if (!IS_ERR(hpriv->clk)) {
-               clk_disable(hpriv->clk);
+               clk_disable_unprepare(hpriv->clk);
                clk_put(hpriv->clk);
        }
+       for (port = 0; port < n_ports; port++) {
+               if (!IS_ERR(hpriv->port_clks[port])) {
+                       clk_disable_unprepare(hpriv->port_clks[port]);
+                       clk_put(hpriv->port_clks[port]);
+               }
+       }
 #endif
 
        return rc;
@@ -4119,14 +4144,21 @@ static int __devexit mv_platform_remove(struct platform_device *pdev)
        struct ata_host *host = platform_get_drvdata(pdev);
 #if defined(CONFIG_HAVE_CLK)
        struct mv_host_priv *hpriv = host->private_data;
+       int port;
 #endif
        ata_host_detach(host);
 
 #if defined(CONFIG_HAVE_CLK)
        if (!IS_ERR(hpriv->clk)) {
-               clk_disable(hpriv->clk);
+               clk_disable_unprepare(hpriv->clk);
                clk_put(hpriv->clk);
        }
+       for (port = 0; port < host->n_ports; port++) {
+               if (!IS_ERR(hpriv->port_clks[port])) {
+                       clk_disable_unprepare(hpriv->port_clks[port]);
+                       clk_put(hpriv->port_clks[port]);
+               }
+       }
 #endif
        return 0;
 }
index e8cd652d20178c7c3ff8465aac997f1f65e20f63..98510931c8153110ee0617457fb63cafe652696a 100644 (file)
@@ -984,6 +984,7 @@ static uint32_t fpga_tx(struct solos_card *card)
                        } else if (skb && card->using_dma) {
                                SKB_CB(skb)->dma_addr = pci_map_single(card->dev, skb->data,
                                                                       skb->len, PCI_DMA_TODEVICE);
+                               card->tx_skb[port] = skb;
                                iowrite32(SKB_CB(skb)->dma_addr,
                                          card->config_regs + TX_DMA_ADDR(port));
                        }
@@ -1152,7 +1153,8 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
                db_fpga_upgrade = db_firmware_upgrade = 0;
        }
 
-       if (card->fpga_version >= DMA_SUPPORTED){
+       if (card->fpga_version >= DMA_SUPPORTED) {
+               pci_set_master(dev);
                card->using_dma = 1;
        } else {
                card->using_dma = 0;
index 9aa618acfe97434dd90f58e37c30ce7b672139f2..9b21469482aead0e69004e3285258fd3a0576796 100644 (file)
@@ -192,4 +192,93 @@ config DMA_SHARED_BUFFER
          APIs extension; the file's descriptor can then be passed on to other
          driver.
 
+config CMA
+       bool "Contiguous Memory Allocator (EXPERIMENTAL)"
+       depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK && EXPERIMENTAL
+       select MIGRATION
+       help
+         This enables the Contiguous Memory Allocator which allows drivers
+         to allocate big physically-contiguous blocks of memory for use with
+         hardware components that do not support I/O map nor scatter-gather.
+
+         For more information see <include/linux/dma-contiguous.h>.
+         If unsure, say "n".
+
+if CMA
+
+config CMA_DEBUG
+       bool "CMA debug messages (DEVELOPMENT)"
+       depends on DEBUG_KERNEL
+       help
+         Turns on debug messages in CMA.  This produces KERN_DEBUG
+         messages for every CMA call as well as various messages while
+         processing calls such as dma_alloc_from_contiguous().
+         This option does not affect warning and error messages.
+
+comment "Default contiguous memory area size:"
+
+config CMA_SIZE_MBYTES
+       int "Size in Mega Bytes"
+       depends on !CMA_SIZE_SEL_PERCENTAGE
+       default 16
+       help
+         Defines the size (in MiB) of the default memory area for Contiguous
+         Memory Allocator.
+
+config CMA_SIZE_PERCENTAGE
+       int "Percentage of total memory"
+       depends on !CMA_SIZE_SEL_MBYTES
+       default 10
+       help
+         Defines the size of the default memory area for Contiguous Memory
+         Allocator as a percentage of the total memory in the system.
+
+choice
+       prompt "Selected region size"
+       default CMA_SIZE_SEL_ABSOLUTE
+
+config CMA_SIZE_SEL_MBYTES
+       bool "Use mega bytes value only"
+
+config CMA_SIZE_SEL_PERCENTAGE
+       bool "Use percentage value only"
+
+config CMA_SIZE_SEL_MIN
+       bool "Use lower value (minimum)"
+
+config CMA_SIZE_SEL_MAX
+       bool "Use higher value (maximum)"
+
+endchoice
+
+config CMA_ALIGNMENT
+       int "Maximum PAGE_SIZE order of alignment for contiguous buffers"
+       range 4 9
+       default 8
+       help
+         DMA mapping framework by default aligns all buffers to the smallest
+         PAGE_SIZE order which is greater than or equal to the requested buffer
+         size. This works well for buffers up to a few hundreds kilobytes, but
+         for larger buffers it just a memory waste. With this parameter you can
+         specify the maximum PAGE_SIZE order for contiguous buffers. Larger
+         buffers will be aligned only to this specified order. The order is
+         expressed as a power of two multiplied by the PAGE_SIZE.
+
+         For example, if your system defaults to 4KiB pages, the order value
+         of 8 means that the buffers will be aligned up to 1MiB only.
+
+         If unsure, leave the default value "8".
+
+config CMA_AREAS
+       int "Maximum count of the CMA device-private areas"
+       default 7
+       help
+         CMA allows to create CMA areas for particular devices. This parameter
+         sets the maximum number of such device private CMA areas in the
+         system.
+
+         If unsure, leave the default value "7".
+
+endif
+
 endmenu
index b6d1b9c4200ca2756b313e9a55392b9884cde051..5aa2d703d19fac08073c54e27530ef8d5bc5b45d 100644 (file)
@@ -6,6 +6,7 @@ obj-y                   := core.o bus.o dd.o syscore.o \
                           attribute_container.o transport_class.o \
                           topology.o
 obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
+obj-$(CONFIG_CMA) += dma-contiguous.o
 obj-y                  += power/
 obj-$(CONFIG_HAS_DMA)  += dma-mapping.o
 obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
index 05c64c11bad20b0bad58d6a2985b7a4258bb74c1..24e88fe29ec19801f627e55b7b454fbbe8268181 100644 (file)
@@ -44,8 +44,26 @@ static int dma_buf_release(struct inode *inode, struct file *file)
        return 0;
 }
 
+static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
+{
+       struct dma_buf *dmabuf;
+
+       if (!is_dma_buf_file(file))
+               return -EINVAL;
+
+       dmabuf = file->private_data;
+
+       /* check for overflowing the buffer's size */
+       if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+           dmabuf->size >> PAGE_SHIFT)
+               return -EINVAL;
+
+       return dmabuf->ops->mmap(dmabuf, vma);
+}
+
 static const struct file_operations dma_buf_fops = {
        .release        = dma_buf_release,
+       .mmap           = dma_buf_mmap_internal,
 };
 
 /*
@@ -82,7 +100,8 @@ struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops,
                          || !ops->unmap_dma_buf
                          || !ops->release
                          || !ops->kmap_atomic
-                         || !ops->kmap)) {
+                         || !ops->kmap
+                         || !ops->mmap)) {
                return ERR_PTR(-EINVAL);
        }
 
@@ -406,3 +425,81 @@ void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
                dmabuf->ops->kunmap(dmabuf, page_num, vaddr);
 }
 EXPORT_SYMBOL_GPL(dma_buf_kunmap);
+
+
+/**
+ * dma_buf_mmap - Setup up a userspace mmap with the given vma
+ * @dmabuf:    [in]    buffer that should back the vma
+ * @vma:       [in]    vma for the mmap
+ * @pgoff:     [in]    offset in pages where this mmap should start within the
+ *                     dma-buf buffer.
+ *
+ * This function adjusts the passed in vma so that it points at the file of the
+ * dma_buf operation. It alsog adjusts the starting pgoff and does bounds
+ * checking on the size of the vma. Then it calls the exporters mmap function to
+ * set up the mapping.
+ *
+ * Can return negative error values, returns 0 on success.
+ */
+int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
+                unsigned long pgoff)
+{
+       if (WARN_ON(!dmabuf || !vma))
+               return -EINVAL;
+
+       /* check for offset overflow */
+       if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff)
+               return -EOVERFLOW;
+
+       /* check for overflowing the buffer's size */
+       if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+           dmabuf->size >> PAGE_SHIFT)
+               return -EINVAL;
+
+       /* readjust the vma */
+       if (vma->vm_file)
+               fput(vma->vm_file);
+
+       vma->vm_file = dmabuf->file;
+       get_file(vma->vm_file);
+
+       vma->vm_pgoff = pgoff;
+
+       return dmabuf->ops->mmap(dmabuf, vma);
+}
+EXPORT_SYMBOL_GPL(dma_buf_mmap);
+
+/**
+ * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
+ * address space. Same restrictions as for vmap and friends apply.
+ * @dmabuf:    [in]    buffer to vmap
+ *
+ * This call may fail due to lack of virtual mapping address space.
+ * These calls are optional in drivers. The intended use for them
+ * is for mapping objects linear in kernel space for high use objects.
+ * Please attempt to use kmap/kunmap before thinking about these interfaces.
+ */
+void *dma_buf_vmap(struct dma_buf *dmabuf)
+{
+       if (WARN_ON(!dmabuf))
+               return NULL;
+
+       if (dmabuf->ops->vmap)
+               return dmabuf->ops->vmap(dmabuf);
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(dma_buf_vmap);
+
+/**
+ * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
+ * @dmabuf:    [in]    buffer to vunmap
+ */
+void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+{
+       if (WARN_ON(!dmabuf))
+               return;
+
+       if (dmabuf->ops->vunmap)
+               dmabuf->ops->vunmap(dmabuf, vaddr);
+}
+EXPORT_SYMBOL_GPL(dma_buf_vunmap);
index bb0025c510b3860e1f84b434c03a4d34a1d64851..1b85949e3d2f3f562ea18c61e890309fe18601af 100644 (file)
@@ -10,6 +10,7 @@
 struct dma_coherent_mem {
        void            *virt_base;
        dma_addr_t      device_base;
+       phys_addr_t     pfn_base;
        int             size;
        int             flags;
        unsigned long   *bitmap;
@@ -44,6 +45,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
 
        dev->dma_mem->virt_base = mem_base;
        dev->dma_mem->device_base = device_addr;
+       dev->dma_mem->pfn_base = PFN_DOWN(bus_addr);
        dev->dma_mem->size = pages;
        dev->dma_mem->flags = flags;
 
@@ -176,3 +178,43 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
        return 0;
 }
 EXPORT_SYMBOL(dma_release_from_coherent);
+
+/**
+ * dma_mmap_from_coherent() - try to mmap the memory allocated from
+ * per-device coherent memory pool to userspace
+ * @dev:       device from which the memory was allocated
+ * @vma:       vm_area for the userspace memory
+ * @vaddr:     cpu address returned by dma_alloc_from_coherent
+ * @size:      size of the memory buffer allocated by dma_alloc_from_coherent
+ *
+ * This checks whether the memory was allocated from the per-device
+ * coherent memory pool and if so, maps that memory to the provided vma.
+ *
+ * Returns 1 if we correctly mapped the memory, or 0 if
+ * dma_release_coherent() should proceed with mapping memory from
+ * generic pools.
+ */
+int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
+                          void *vaddr, size_t size, int *ret)
+{
+       struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+
+       if (mem && vaddr >= mem->virt_base && vaddr + size <=
+                  (mem->virt_base + (mem->size << PAGE_SHIFT))) {
+               unsigned long off = vma->vm_pgoff;
+               int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
+               int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+               int count = size >> PAGE_SHIFT;
+
+               *ret = -ENXIO;
+               if (off < count && user_count <= count - off) {
+                       unsigned pfn = mem->pfn_base + start + off;
+                       *ret = remap_pfn_range(vma, vma->vm_start, pfn,
+                                              user_count << PAGE_SHIFT,
+                                              vma->vm_page_prot);
+               }
+               return 1;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(dma_mmap_from_coherent);
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
new file mode 100644 (file)
index 0000000..78efb03
--- /dev/null
@@ -0,0 +1,401 @@
+/*
+ * Contiguous Memory Allocator for DMA mapping framework
+ * Copyright (c) 2010-2011 by Samsung Electronics.
+ * Written by:
+ *     Marek Szyprowski <m.szyprowski@samsung.com>
+ *     Michal Nazarewicz <mina86@mina86.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License or (at your optional) any later version of the license.
+ */
+
+#define pr_fmt(fmt) "cma: " fmt
+
+#ifdef CONFIG_CMA_DEBUG
+#ifndef DEBUG
+#  define DEBUG
+#endif
+#endif
+
+#include <asm/page.h>
+#include <asm/dma-contiguous.h>
+
+#include <linux/memblock.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/page-isolation.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/mm_types.h>
+#include <linux/dma-contiguous.h>
+
+#ifndef SZ_1M
+#define SZ_1M (1 << 20)
+#endif
+
+struct cma {
+       unsigned long   base_pfn;
+       unsigned long   count;
+       unsigned long   *bitmap;
+};
+
+struct cma *dma_contiguous_default_area;
+
+#ifdef CONFIG_CMA_SIZE_MBYTES
+#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
+#else
+#define CMA_SIZE_MBYTES 0
+#endif
+
+/*
+ * Default global CMA area size can be defined in kernel's .config.
+ * This is usefull mainly for distro maintainers to create a kernel
+ * that works correctly for most supported systems.
+ * The size can be set in bytes or as a percentage of the total memory
+ * in the system.
+ *
+ * Users, who want to set the size of global CMA area for their system
+ * should use cma= kernel parameter.
+ */
+static const unsigned long size_bytes = CMA_SIZE_MBYTES * SZ_1M;
+static long size_cmdline = -1;
+
+static int __init early_cma(char *p)
+{
+       pr_debug("%s(%s)\n", __func__, p);
+       size_cmdline = memparse(p, &p);
+       return 0;
+}
+early_param("cma", early_cma);
+
+#ifdef CONFIG_CMA_SIZE_PERCENTAGE
+
+static unsigned long __init __maybe_unused cma_early_percent_memory(void)
+{
+       struct memblock_region *reg;
+       unsigned long total_pages = 0;
+
+       /*
+        * We cannot use memblock_phys_mem_size() here, because
+        * memblock_analyze() has not been called yet.
+        */
+       for_each_memblock(memory, reg)
+               total_pages += memblock_region_memory_end_pfn(reg) -
+                              memblock_region_memory_base_pfn(reg);
+
+       return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
+}
+
+#else
+
+static inline __maybe_unused unsigned long cma_early_percent_memory(void)
+{
+       return 0;
+}
+
+#endif
+
+/**
+ * dma_contiguous_reserve() - reserve area for contiguous memory handling
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ *
+ * This function reserves memory from early allocator. It should be
+ * called by arch specific code once the early allocator (memblock or bootmem)
+ * has been activated and all other subsystems have already allocated/reserved
+ * memory.
+ */
+void __init dma_contiguous_reserve(phys_addr_t limit)
+{
+       unsigned long selected_size = 0;
+
+       pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
+
+       if (size_cmdline != -1) {
+               selected_size = size_cmdline;
+       } else {
+#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
+               selected_size = size_bytes;
+#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
+               selected_size = cma_early_percent_memory();
+#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
+               selected_size = min(size_bytes, cma_early_percent_memory());
+#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
+               selected_size = max(size_bytes, cma_early_percent_memory());
+#endif
+       }
+
+       if (selected_size) {
+               pr_debug("%s: reserving %ld MiB for global area\n", __func__,
+                        selected_size / SZ_1M);
+
+               dma_declare_contiguous(NULL, selected_size, 0, limit);
+       }
+};
+
+static DEFINE_MUTEX(cma_mutex);
+
+static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
+{
+       unsigned long pfn = base_pfn;
+       unsigned i = count >> pageblock_order;
+       struct zone *zone;
+
+       WARN_ON_ONCE(!pfn_valid(pfn));
+       zone = page_zone(pfn_to_page(pfn));
+
+       do {
+               unsigned j;
+               base_pfn = pfn;
+               for (j = pageblock_nr_pages; j; --j, pfn++) {
+                       WARN_ON_ONCE(!pfn_valid(pfn));
+                       if (page_zone(pfn_to_page(pfn)) != zone)
+                               return -EINVAL;
+               }
+               init_cma_reserved_pageblock(pfn_to_page(base_pfn));
+       } while (--i);
+       return 0;
+}
+
+static __init struct cma *cma_create_area(unsigned long base_pfn,
+                                    unsigned long count)
+{
+       int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
+       struct cma *cma;
+       int ret = -ENOMEM;
+
+       pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
+
+       cma = kmalloc(sizeof *cma, GFP_KERNEL);
+       if (!cma)
+               return ERR_PTR(-ENOMEM);
+
+       cma->base_pfn = base_pfn;
+       cma->count = count;
+       cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+
+       if (!cma->bitmap)
+               goto no_mem;
+
+       ret = cma_activate_area(base_pfn, count);
+       if (ret)
+               goto error;
+
+       pr_debug("%s: returned %p\n", __func__, (void *)cma);
+       return cma;
+
+error:
+       kfree(cma->bitmap);
+no_mem:
+       kfree(cma);
+       return ERR_PTR(ret);
+}
+
+static struct cma_reserved {
+       phys_addr_t start;
+       unsigned long size;
+       struct device *dev;
+} cma_reserved[MAX_CMA_AREAS] __initdata;
+static unsigned cma_reserved_count __initdata;
+
+static int __init cma_init_reserved_areas(void)
+{
+       struct cma_reserved *r = cma_reserved;
+       unsigned i = cma_reserved_count;
+
+       pr_debug("%s()\n", __func__);
+
+       for (; i; --i, ++r) {
+               struct cma *cma;
+               cma = cma_create_area(PFN_DOWN(r->start),
+                                     r->size >> PAGE_SHIFT);
+               if (!IS_ERR(cma))
+                       dev_set_cma_area(r->dev, cma);
+       }
+       return 0;
+}
+core_initcall(cma_init_reserved_areas);
+
+/**
+ * dma_declare_contiguous() - reserve area for contiguous memory handling
+ *                           for particular device
+ * @dev:   Pointer to device structure.
+ * @size:  Size of the reserved memory.
+ * @base:  Start address of the reserved memory (optional, 0 for any).
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ *
+ * This function reserves memory for specified device. It should be
+ * called by board specific code when early allocator (memblock or bootmem)
+ * is still activate.
+ */
+int __init dma_declare_contiguous(struct device *dev, unsigned long size,
+                                 phys_addr_t base, phys_addr_t limit)
+{
+       struct cma_reserved *r = &cma_reserved[cma_reserved_count];
+       unsigned long alignment;
+
+       pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
+                (unsigned long)size, (unsigned long)base,
+                (unsigned long)limit);
+
+       /* Sanity checks */
+       if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) {
+               pr_err("Not enough slots for CMA reserved regions!\n");
+               return -ENOSPC;
+       }
+
+       if (!size)
+               return -EINVAL;
+
+       /* Sanitise input arguments */
+       alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order);
+       base = ALIGN(base, alignment);
+       size = ALIGN(size, alignment);
+       limit &= ~(alignment - 1);
+
+       /* Reserve memory */
+       if (base) {
+               if (memblock_is_region_reserved(base, size) ||
+                   memblock_reserve(base, size) < 0) {
+                       base = -EBUSY;
+                       goto err;
+               }
+       } else {
+               /*
+                * Use __memblock_alloc_base() since
+                * memblock_alloc_base() panic()s.
+                */
+               phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
+               if (!addr) {
+                       base = -ENOMEM;
+                       goto err;
+               } else if (addr + size > ~(unsigned long)0) {
+                       memblock_free(addr, size);
+                       base = -EINVAL;
+                       goto err;
+               } else {
+                       base = addr;
+               }
+       }
+
+       /*
+        * Each reserved area must be initialised later, when more kernel
+        * subsystems (like slab allocator) are available.
+        */
+       r->start = base;
+       r->size = size;
+       r->dev = dev;
+       cma_reserved_count++;
+       pr_info("CMA: reserved %ld MiB at %08lx\n", size / SZ_1M,
+               (unsigned long)base);
+
+       /* Architecture specific contiguous memory fixup. */
+       dma_contiguous_early_fixup(base, size);
+       return 0;
+err:
+       pr_err("CMA: failed to reserve %ld MiB\n", size / SZ_1M);
+       return base;
+}
+
+/**
+ * dma_alloc_from_contiguous() - allocate pages from contiguous area
+ * @dev:   Pointer to device for which the allocation is performed.
+ * @count: Requested number of pages.
+ * @align: Requested alignment of pages (in PAGE_SIZE order).
+ *
+ * This function allocates memory buffer for specified device. It uses
+ * device specific contiguous memory area if available or the default
+ * global one. Requires architecture specific get_dev_cma_area() helper
+ * function.
+ */
+struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+                                      unsigned int align)
+{
+       unsigned long mask, pfn, pageno, start = 0;
+       struct cma *cma = dev_get_cma_area(dev);
+       int ret;
+
+       if (!cma || !cma->count)
+               return NULL;
+
+       if (align > CONFIG_CMA_ALIGNMENT)
+               align = CONFIG_CMA_ALIGNMENT;
+
+       pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
+                count, align);
+
+       if (!count)
+               return NULL;
+
+       mask = (1 << align) - 1;
+
+       mutex_lock(&cma_mutex);
+
+       for (;;) {
+               pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
+                                                   start, count, mask);
+               if (pageno >= cma->count) {
+                       ret = -ENOMEM;
+                       goto error;
+               }
+
+               pfn = cma->base_pfn + pageno;
+               ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
+               if (ret == 0) {
+                       bitmap_set(cma->bitmap, pageno, count);
+                       break;
+               } else if (ret != -EBUSY) {
+                       goto error;
+               }
+               pr_debug("%s(): memory range at %p is busy, retrying\n",
+                        __func__, pfn_to_page(pfn));
+               /* try again with a bit different memory target */
+               start = pageno + mask + 1;
+       }
+
+       mutex_unlock(&cma_mutex);
+
+       pr_debug("%s(): returned %p\n", __func__, pfn_to_page(pfn));
+       return pfn_to_page(pfn);
+error:
+       mutex_unlock(&cma_mutex);
+       return NULL;
+}
+
+/**
+ * dma_release_from_contiguous() - release allocated pages
+ * @dev:   Pointer to device for which the pages were allocated.
+ * @pages: Allocated pages.
+ * @count: Number of allocated pages.
+ *
+ * This function releases memory allocated by dma_alloc_from_contiguous().
+ * It returns false when provided pages do not belong to contiguous area and
+ * true otherwise.
+ */
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+                                int count)
+{
+       struct cma *cma = dev_get_cma_area(dev);
+       unsigned long pfn;
+
+       if (!cma || !pages)
+               return false;
+
+       pr_debug("%s(page %p)\n", __func__, (void *)pages);
+
+       pfn = page_to_pfn(pages);
+
+       if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
+               return false;
+
+       VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
+
+       mutex_lock(&cma_mutex);
+       bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
+       free_contig_range(pfn, count);
+       mutex_unlock(&cma_mutex);
+
+       return true;
+}
index 90aa2a11a933b448a536a353c5d932668c3f8e11..af1a177216f12573c7e481fea0a568c09b97ef12 100644 (file)
@@ -592,11 +592,9 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
 {
        int n;
 
-       n = nodelist_scnprintf(buf, PAGE_SIZE, node_states[state]);
-       if (n > 0 && PAGE_SIZE > n + 1) {
-               *(buf + n++) = '\n';
-               *(buf + n++) = '\0';
-       }
+       n = nodelist_scnprintf(buf, PAGE_SIZE-2, node_states[state]);
+       buf[n++] = '\n';
+       buf[n] = '\0';
        return n;
 }
 
index 5f6b2478bf1759717e9c85f1958207364f3f7520..fa6bf5279d28465f095c0829381854a718313e5e 100644 (file)
@@ -42,7 +42,7 @@ static int regmap_i2c_gather_write(void *context,
        /* If the I2C controller can't do a gather tell the core, it
         * will substitute in a linear write for us.
         */
-       if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_PROTOCOL_MANGLING))
+       if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_NOSTART))
                return -ENOTSUPP;
 
        xfer[0].addr = i2c->addr;
index ba29b2e73d48936ab9a93a028abd0b0d9cd29691..72b5e7280d14792e6d83f3a59e4d4793d20fe28c 100644 (file)
@@ -42,7 +42,7 @@ struct device *soc_device_to_device(struct soc_device *soc_dev)
        return &soc_dev->dev;
 }
 
-static mode_t soc_attribute_mode(struct kobject *kobj,
+static umode_t soc_attribute_mode(struct kobject *kobj,
                                  struct attribute *attr,
                                  int index)
 {
index cf0e63dd97da9bf09a88bf364cba87956cea4f42..e54e31b02b88eb6e927072745f345871a8d96203 100644 (file)
@@ -65,39 +65,80 @@ struct drbd_atodb_wait {
 
 int w_al_write_transaction(struct drbd_conf *, struct drbd_work *, int);
 
+void *drbd_md_get_buffer(struct drbd_conf *mdev)
+{
+       int r;
+
+       wait_event(mdev->misc_wait,
+                  (r = atomic_cmpxchg(&mdev->md_io_in_use, 0, 1)) == 0 ||
+                  mdev->state.disk <= D_FAILED);
+
+       return r ? NULL : page_address(mdev->md_io_page);
+}
+
+void drbd_md_put_buffer(struct drbd_conf *mdev)
+{
+       if (atomic_dec_and_test(&mdev->md_io_in_use))
+               wake_up(&mdev->misc_wait);
+}
+
+static bool md_io_allowed(struct drbd_conf *mdev)
+{
+       enum drbd_disk_state ds = mdev->state.disk;
+       return ds >= D_NEGOTIATING || ds == D_ATTACHING;
+}
+
+void wait_until_done_or_disk_failure(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
+                                    unsigned int *done)
+{
+       long dt = bdev->dc.disk_timeout * HZ / 10;
+       if (dt == 0)
+               dt = MAX_SCHEDULE_TIMEOUT;
+
+       dt = wait_event_timeout(mdev->misc_wait, *done || !md_io_allowed(mdev), dt);
+       if (dt == 0)
+               dev_err(DEV, "meta-data IO operation timed out\n");
+}
+
 static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
                                 struct drbd_backing_dev *bdev,
                                 struct page *page, sector_t sector,
                                 int rw, int size)
 {
        struct bio *bio;
-       struct drbd_md_io md_io;
        int ok;
 
-       md_io.mdev = mdev;
-       init_completion(&md_io.event);
-       md_io.error = 0;
+       mdev->md_io.done = 0;
+       mdev->md_io.error = -ENODEV;
 
        if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
                rw |= REQ_FUA | REQ_FLUSH;
        rw |= REQ_SYNC;
 
-       bio = bio_alloc(GFP_NOIO, 1);
+       bio = bio_alloc_drbd(GFP_NOIO);
        bio->bi_bdev = bdev->md_bdev;
        bio->bi_sector = sector;
        ok = (bio_add_page(bio, page, size, 0) == size);
        if (!ok)
                goto out;
-       bio->bi_private = &md_io;
+       bio->bi_private = &mdev->md_io;
        bio->bi_end_io = drbd_md_io_complete;
        bio->bi_rw = rw;
 
+       if (!get_ldev_if_state(mdev, D_ATTACHING)) {  /* Corresponding put_ldev in drbd_md_io_complete() */
+               dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n");
+               ok = 0;
+               goto out;
+       }
+
+       bio_get(bio); /* one bio_put() is in the completion handler */
+       atomic_inc(&mdev->md_io_in_use); /* drbd_md_put_buffer() is in the completion handler */
        if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
                bio_endio(bio, -EIO);
        else
                submit_bio(rw, bio);
-       wait_for_completion(&md_io.event);
-       ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0;
+       wait_until_done_or_disk_failure(mdev, bdev, &mdev->md_io.done);
+       ok = bio_flagged(bio, BIO_UPTODATE) && mdev->md_io.error == 0;
 
  out:
        bio_put(bio);
@@ -111,7 +152,7 @@ int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
        int offset = 0;
        struct page *iop = mdev->md_io_page;
 
-       D_ASSERT(mutex_is_locked(&mdev->md_io_mutex));
+       D_ASSERT(atomic_read(&mdev->md_io_in_use) == 1);
 
        BUG_ON(!bdev->md_bdev);
 
@@ -328,8 +369,13 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
                return 1;
        }
 
-       mutex_lock(&mdev->md_io_mutex); /* protects md_io_buffer, al_tr_cycle, ... */
-       buffer = (struct al_transaction *)page_address(mdev->md_io_page);
+       buffer = drbd_md_get_buffer(mdev); /* protects md_io_buffer, al_tr_cycle, ... */
+       if (!buffer) {
+               dev_err(DEV, "disk failed while waiting for md_io buffer\n");
+               complete(&((struct update_al_work *)w)->event);
+               put_ldev(mdev);
+               return 1;
+       }
 
        buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC);
        buffer->tr_number = cpu_to_be32(mdev->al_tr_number);
@@ -374,7 +420,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
        D_ASSERT(mdev->al_tr_pos < MD_AL_MAX_SIZE);
        mdev->al_tr_number++;
 
-       mutex_unlock(&mdev->md_io_mutex);
+       drbd_md_put_buffer(mdev);
 
        complete(&((struct update_al_work *)w)->event);
        put_ldev(mdev);
@@ -443,8 +489,9 @@ int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
        /* lock out all other meta data io for now,
         * and make sure the page is mapped.
         */
-       mutex_lock(&mdev->md_io_mutex);
-       buffer = page_address(mdev->md_io_page);
+       buffer = drbd_md_get_buffer(mdev);
+       if (!buffer)
+               return 0;
 
        /* Find the valid transaction in the log */
        for (i = 0; i <= mx; i++) {
@@ -452,7 +499,7 @@ int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
                if (rv == 0)
                        continue;
                if (rv == -1) {
-                       mutex_unlock(&mdev->md_io_mutex);
+                       drbd_md_put_buffer(mdev);
                        return 0;
                }
                cnr = be32_to_cpu(buffer->tr_number);
@@ -478,7 +525,7 @@ int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
 
        if (!found_valid) {
                dev_warn(DEV, "No usable activity log found.\n");
-               mutex_unlock(&mdev->md_io_mutex);
+               drbd_md_put_buffer(mdev);
                return 1;
        }
 
@@ -493,7 +540,7 @@ int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
                rv = drbd_al_read_tr(mdev, bdev, buffer, i);
                ERR_IF(rv == 0) goto cancel;
                if (rv == -1) {
-                       mutex_unlock(&mdev->md_io_mutex);
+                       drbd_md_put_buffer(mdev);
                        return 0;
                }
 
@@ -534,7 +581,7 @@ cancel:
                mdev->al_tr_pos = 0;
 
        /* ok, we are done with it */
-       mutex_unlock(&mdev->md_io_mutex);
+       drbd_md_put_buffer(mdev);
 
        dev_info(DEV, "Found %d transactions (%d active extents) in activity log.\n",
             transactions, active_extents);
@@ -671,16 +718,20 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
                        else
                                ext->rs_failed += count;
                        if (ext->rs_left < ext->rs_failed) {
-                               dev_err(DEV, "BAD! sector=%llus enr=%u rs_left=%d "
-                                   "rs_failed=%d count=%d\n",
+                               dev_warn(DEV, "BAD! sector=%llus enr=%u rs_left=%d "
+                                   "rs_failed=%d count=%d cstate=%s\n",
                                     (unsigned long long)sector,
                                     ext->lce.lc_number, ext->rs_left,
-                                    ext->rs_failed, count);
-                               dump_stack();
-
-                               lc_put(mdev->resync, &ext->lce);
-                               drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
-                               return;
+                                    ext->rs_failed, count,
+                                    drbd_conn_str(mdev->state.conn));
+
+                               /* We don't expect to be able to clear more bits
+                                * than have been set when we originally counted
+                                * the set bits to cache that value in ext->rs_left.
+                                * Whatever the reason (disconnect during resync,
+                                * delayed local completion of an application write),
+                                * try to fix it up by recounting here. */
+                               ext->rs_left = drbd_bm_e_weight(mdev, enr);
                        }
                } else {
                        /* Normally this element should be in the cache,
@@ -1192,6 +1243,7 @@ int drbd_rs_del_all(struct drbd_conf *mdev)
                put_ldev(mdev);
        }
        spin_unlock_irq(&mdev->al_lock);
+       wake_up(&mdev->al_wait);
 
        return 0;
 }
index 3030201c69d89e7407230bc8fb2d48dd1b2fbdf3..b5c5ff53cb57f74e89cc59bad8ca6e29df5e1db5 100644 (file)
@@ -205,7 +205,7 @@ void drbd_bm_unlock(struct drbd_conf *mdev)
 static void bm_store_page_idx(struct page *page, unsigned long idx)
 {
        BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK));
-       page_private(page) |= idx;
+       set_page_private(page, idx);
 }
 
 static unsigned long bm_page_to_idx(struct page *page)
@@ -886,12 +886,21 @@ void drbd_bm_clear_all(struct drbd_conf *mdev)
 struct bm_aio_ctx {
        struct drbd_conf *mdev;
        atomic_t in_flight;
-       struct completion done;
+       unsigned int done;
        unsigned flags;
 #define BM_AIO_COPY_PAGES      1
        int error;
+       struct kref kref;
 };
 
+static void bm_aio_ctx_destroy(struct kref *kref)
+{
+       struct bm_aio_ctx *ctx = container_of(kref, struct bm_aio_ctx, kref);
+
+       put_ldev(ctx->mdev);
+       kfree(ctx);
+}
+
 /* bv_page may be a copy, or may be the original */
 static void bm_async_io_complete(struct bio *bio, int error)
 {
@@ -930,20 +939,21 @@ static void bm_async_io_complete(struct bio *bio, int error)
 
        bm_page_unlock_io(mdev, idx);
 
-       /* FIXME give back to page pool */
        if (ctx->flags & BM_AIO_COPY_PAGES)
-               put_page(bio->bi_io_vec[0].bv_page);
+               mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool);
 
        bio_put(bio);
 
-       if (atomic_dec_and_test(&ctx->in_flight))
-               complete(&ctx->done);
+       if (atomic_dec_and_test(&ctx->in_flight)) {
+               ctx->done = 1;
+               wake_up(&mdev->misc_wait);
+               kref_put(&ctx->kref, &bm_aio_ctx_destroy);
+       }
 }
 
 static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local)
 {
-       /* we are process context. we always get a bio */
-       struct bio *bio = bio_alloc(GFP_KERNEL, 1);
+       struct bio *bio = bio_alloc_drbd(GFP_NOIO);
        struct drbd_conf *mdev = ctx->mdev;
        struct drbd_bitmap *b = mdev->bitmap;
        struct page *page;
@@ -966,10 +976,8 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
        bm_set_page_unchanged(b->bm_pages[page_nr]);
 
        if (ctx->flags & BM_AIO_COPY_PAGES) {
-               /* FIXME alloc_page is good enough for now, but actually needs
-                * to use pre-allocated page pool */
                void *src, *dest;
-               page = alloc_page(__GFP_HIGHMEM|__GFP_WAIT);
+               page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_WAIT);
                dest = kmap_atomic(page);
                src = kmap_atomic(b->bm_pages[page_nr]);
                memcpy(dest, src, PAGE_SIZE);
@@ -981,6 +989,8 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
 
        bio->bi_bdev = mdev->ldev->md_bdev;
        bio->bi_sector = on_disk_sector;
+       /* bio_add_page of a single page to an empty bio will always succeed,
+        * according to api.  Do we want to assert that? */
        bio_add_page(bio, page, len, 0);
        bio->bi_private = ctx;
        bio->bi_end_io = bm_async_io_complete;
@@ -999,14 +1009,9 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
 /*
  * bm_rw: read/write the whole bitmap from/to its on disk location.
  */
-static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_idx) __must_hold(local)
+static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_writeout_upper_idx) __must_hold(local)
 {
-       struct bm_aio_ctx ctx = {
-               .mdev = mdev,
-               .in_flight = ATOMIC_INIT(1),
-               .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done),
-               .flags = lazy_writeout_upper_idx ? BM_AIO_COPY_PAGES : 0,
-       };
+       struct bm_aio_ctx *ctx;
        struct drbd_bitmap *b = mdev->bitmap;
        int num_pages, i, count = 0;
        unsigned long now;
@@ -1021,7 +1026,27 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_id
         * For lazy writeout, we don't care for ongoing changes to the bitmap,
         * as we submit copies of pages anyways.
         */
-       if (!ctx.flags)
+
+       ctx = kmalloc(sizeof(struct bm_aio_ctx), GFP_NOIO);
+       if (!ctx)
+               return -ENOMEM;
+
+       *ctx = (struct bm_aio_ctx) {
+               .mdev = mdev,
+               .in_flight = ATOMIC_INIT(1),
+               .done = 0,
+               .flags = flags,
+               .error = 0,
+               .kref = { ATOMIC_INIT(2) },
+       };
+
+       if (!get_ldev_if_state(mdev, D_ATTACHING)) {  /* put is in bm_aio_ctx_destroy() */
+               dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n");
+               kfree(ctx);
+               return -ENODEV;
+       }
+
+       if (!ctx->flags)
                WARN_ON(!(BM_LOCKED_MASK & b->bm_flags));
 
        num_pages = b->bm_number_of_pages;
@@ -1046,29 +1071,38 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_id
                                continue;
                        }
                }
-               atomic_inc(&ctx.in_flight);
-               bm_page_io_async(&ctx, i, rw);
+               atomic_inc(&ctx->in_flight);
+               bm_page_io_async(ctx, i, rw);
                ++count;
                cond_resched();
        }
 
        /*
-        * We initialize ctx.in_flight to one to make sure bm_async_io_complete
-        * will not complete() early, and decrement / test it here.  If there
+        * We initialize ctx->in_flight to one to make sure bm_async_io_complete
+        * will not set ctx->done early, and decrement / test it here.  If there
         * are still some bios in flight, we need to wait for them here.
+        * If all IO is done already (or nothing had been submitted), there is
+        * no need to wait.  Still, we need to put the kref associated with the
+        * "in_flight reached zero, all done" event.
         */
-       if (!atomic_dec_and_test(&ctx.in_flight))
-               wait_for_completion(&ctx.done);
+       if (!atomic_dec_and_test(&ctx->in_flight))
+               wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
+       else
+               kref_put(&ctx->kref, &bm_aio_ctx_destroy);
+
        dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n",
                        rw == WRITE ? "WRITE" : "READ",
                        count, jiffies - now);
 
-       if (ctx.error) {
+       if (ctx->error) {
                dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
                drbd_chk_io_error(mdev, 1, true);
-               err = -EIO; /* ctx.error ? */
+               err = -EIO; /* ctx->error ? */
        }
 
+       if (atomic_read(&ctx->in_flight))
+               err = -EIO; /* Disk failed during IO... */
+
        now = jiffies;
        if (rw == WRITE) {
                drbd_md_flush(mdev);
@@ -1082,6 +1116,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_id
        dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
             ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
 
+       kref_put(&ctx->kref, &bm_aio_ctx_destroy);
        return err;
 }
 
@@ -1091,7 +1126,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_id
  */
 int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local)
 {
-       return bm_rw(mdev, READ, 0);
+       return bm_rw(mdev, READ, 0, 0);
 }
 
 /**
@@ -1102,7 +1137,7 @@ int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local)
  */
 int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
 {
-       return bm_rw(mdev, WRITE, 0);
+       return bm_rw(mdev, WRITE, 0, 0);
 }
 
 /**
@@ -1112,7 +1147,23 @@ int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
  */
 int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(local)
 {
-       return bm_rw(mdev, WRITE, upper_idx);
+       return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, upper_idx);
+}
+
+/**
+ * drbd_bm_write_copy_pages() - Write the whole bitmap to its on disk location.
+ * @mdev:      DRBD device.
+ *
+ * Will only write pages that have changed since last IO.
+ * In contrast to drbd_bm_write(), this will copy the bitmap pages
+ * to temporary writeout pages. It is intended to trigger a full write-out
+ * while still allowing the bitmap to change, for example if a resync or online
+ * verify is aborted due to a failed peer disk, while local IO continues, or
+ * pending resync acks are still being processed.
+ */
+int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local)
+{
+       return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, 0);
 }
 
 
@@ -1130,28 +1181,45 @@ int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(l
  */
 int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local)
 {
-       struct bm_aio_ctx ctx = {
+       struct bm_aio_ctx *ctx;
+       int err;
+
+       if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) {
+               dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx);
+               return 0;
+       }
+
+       ctx = kmalloc(sizeof(struct bm_aio_ctx), GFP_NOIO);
+       if (!ctx)
+               return -ENOMEM;
+
+       *ctx = (struct bm_aio_ctx) {
                .mdev = mdev,
                .in_flight = ATOMIC_INIT(1),
-               .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done),
+               .done = 0,
                .flags = BM_AIO_COPY_PAGES,
+               .error = 0,
+               .kref = { ATOMIC_INIT(2) },
        };
 
-       if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) {
-               dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx);
-               return 0;
+       if (!get_ldev_if_state(mdev, D_ATTACHING)) {  /* put is in bm_aio_ctx_destroy() */
+               dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in drbd_bm_write_page()\n");
+               kfree(ctx);
+               return -ENODEV;
        }
 
-       bm_page_io_async(&ctx, idx, WRITE_SYNC);
-       wait_for_completion(&ctx.done);
+       bm_page_io_async(ctx, idx, WRITE_SYNC);
+       wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
 
-       if (ctx.error)
+       if (ctx->error)
                drbd_chk_io_error(mdev, 1, true);
                /* that should force detach, so the in memory bitmap will be
                 * gone in a moment as well. */
 
        mdev->bm_writ_cnt++;
-       return ctx.error;
+       err = atomic_read(&ctx->in_flight) ? -EIO : ctx->error;
+       kref_put(&ctx->kref, &bm_aio_ctx_destroy);
+       return err;
 }
 
 /* NOTE
index 8d680562ba73a1ca17120779c06aa834eea18f39..02f013a073a75b66fe73c8658f1cde96ea7102a7 100644 (file)
@@ -712,7 +712,6 @@ struct drbd_request {
        struct list_head tl_requests; /* ring list in the transfer log */
        struct bio *master_bio;       /* master bio pointer */
        unsigned long rq_state; /* see comments above _req_mod() */
-       int seq_num;
        unsigned long start_time;
 };
 
@@ -851,6 +850,7 @@ enum {
        NEW_CUR_UUID,           /* Create new current UUID when thawing IO */
        AL_SUSPENDED,           /* Activity logging is currently suspended. */
        AHEAD_TO_SYNC_SOURCE,   /* Ahead -> SyncSource queued */
+       STATE_SENT,             /* Do not change state/UUIDs while this is set */
 };
 
 struct drbd_bitmap; /* opaque for drbd_conf */
@@ -862,31 +862,30 @@ enum bm_flag {
        BM_P_VMALLOCED = 0x10000, /* internal use only, will be masked out */
 
        /* currently locked for bulk operation */
-       BM_LOCKED_MASK = 0x7,
+       BM_LOCKED_MASK = 0xf,
 
        /* in detail, that is: */
        BM_DONT_CLEAR = 0x1,
        BM_DONT_SET   = 0x2,
        BM_DONT_TEST  = 0x4,
 
+       /* so we can mark it locked for bulk operation,
+        * and still allow all non-bulk operations */
+       BM_IS_LOCKED  = 0x8,
+
        /* (test bit, count bit) allowed (common case) */
-       BM_LOCKED_TEST_ALLOWED = 0x3,
+       BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED,
 
        /* testing bits, as well as setting new bits allowed, but clearing bits
         * would be unexpected.  Used during bitmap receive.  Setting new bits
         * requires sending of "out-of-sync" information, though. */
-       BM_LOCKED_SET_ALLOWED = 0x1,
+       BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED,
 
-       /* clear is not expected while bitmap is locked for bulk operation */
+       /* for drbd_bm_write_copy_pages, everything is allowed,
+        * only concurrent bulk operations are locked out. */
+       BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED,
 };
 
-
-/* TODO sort members for performance
- * MAYBE group them further */
-
-/* THINK maybe we actually want to use the default "event/%s" worker threads
- * or similar in linux 2.6, which uses per cpu data and threads.
- */
 struct drbd_work_queue {
        struct list_head q;
        struct semaphore s; /* producers up it, worker down()s it */
@@ -938,8 +937,7 @@ struct drbd_backing_dev {
 };
 
 struct drbd_md_io {
-       struct drbd_conf *mdev;
-       struct completion event;
+       unsigned int done;
        int error;
 };
 
@@ -1022,6 +1020,7 @@ struct drbd_conf {
        struct drbd_tl_epoch *newest_tle;
        struct drbd_tl_epoch *oldest_tle;
        struct list_head out_of_sequence_requests;
+       struct list_head barrier_acked_requests;
        struct hlist_head *tl_hash;
        unsigned int tl_hash_s;
 
@@ -1056,6 +1055,8 @@ struct drbd_conf {
        struct crypto_hash *csums_tfm;
        struct crypto_hash *verify_tfm;
 
+       unsigned long last_reattach_jif;
+       unsigned long last_reconnect_jif;
        struct drbd_thread receiver;
        struct drbd_thread worker;
        struct drbd_thread asender;
@@ -1094,7 +1095,8 @@ struct drbd_conf {
        wait_queue_head_t ee_wait;
        struct page *md_io_page;        /* one page buffer for md_io */
        struct page *md_io_tmpp;        /* for logical_block_size != 512 */
-       struct mutex md_io_mutex;       /* protects the md_io_buffer */
+       struct drbd_md_io md_io;
+       atomic_t md_io_in_use;          /* protects the md_io, md_io_page and md_io_tmpp */
        spinlock_t al_lock;
        wait_queue_head_t al_wait;
        struct lru_cache *act_log;      /* activity log */
@@ -1228,8 +1230,8 @@ extern int drbd_send_uuids(struct drbd_conf *mdev);
 extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev);
 extern int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev);
 extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags);
-extern int _drbd_send_state(struct drbd_conf *mdev);
-extern int drbd_send_state(struct drbd_conf *mdev);
+extern int drbd_send_state(struct drbd_conf *mdev, union drbd_state s);
+extern int drbd_send_current_state(struct drbd_conf *mdev);
 extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
                        enum drbd_packets cmd, struct p_header80 *h,
                        size_t size, unsigned msg_flags);
@@ -1461,6 +1463,7 @@ extern int  drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr);
 extern int  drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local);
 extern int  drbd_bm_read(struct drbd_conf *mdev) __must_hold(local);
 extern int  drbd_bm_write(struct drbd_conf *mdev) __must_hold(local);
+extern int  drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local);
 extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev,
                unsigned long al_enr);
 extern size_t       drbd_bm_words(struct drbd_conf *mdev);
@@ -1493,11 +1496,38 @@ extern struct kmem_cache *drbd_al_ext_cache;    /* activity log extents */
 extern mempool_t *drbd_request_mempool;
 extern mempool_t *drbd_ee_mempool;
 
-extern struct page *drbd_pp_pool; /* drbd's page pool */
+/* drbd's page pool, used to buffer data received from the peer,
+ * or data requested by the peer.
+ *
+ * This does not have an emergency reserve.
+ *
+ * When allocating from this pool, it first takes pages from the pool.
+ * Only if the pool is depleted will try to allocate from the system.
+ *
+ * The assumption is that pages taken from this pool will be processed,
+ * and given back, "quickly", and then can be recycled, so we can avoid
+ * frequent calls to alloc_page(), and still will be able to make progress even
+ * under memory pressure.
+ */
+extern struct page *drbd_pp_pool;
 extern spinlock_t   drbd_pp_lock;
 extern int         drbd_pp_vacant;
 extern wait_queue_head_t drbd_pp_wait;
 
+/* We also need a standard (emergency-reserve backed) page pool
+ * for meta data IO (activity log, bitmap).
+ * We can keep it global, as long as it is used as "N pages at a time".
+ * 128 should be plenty, currently we probably can get away with as few as 1.
+ */
+#define DRBD_MIN_POOL_PAGES    128
+extern mempool_t *drbd_md_io_page_pool;
+
+/* We also need to make sure we get a bio
+ * when we need it for housekeeping purposes */
+extern struct bio_set *drbd_md_io_bio_set;
+/* to allocate from that set */
+extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
+
 extern rwlock_t global_state_lock;
 
 extern struct drbd_conf *drbd_new_device(unsigned int minor);
@@ -1536,8 +1566,12 @@ extern void resume_next_sg(struct drbd_conf *mdev);
 extern void suspend_other_sg(struct drbd_conf *mdev);
 extern int drbd_resync_finished(struct drbd_conf *mdev);
 /* maybe rather drbd_main.c ? */
+extern void *drbd_md_get_buffer(struct drbd_conf *mdev);
+extern void drbd_md_put_buffer(struct drbd_conf *mdev);
 extern int drbd_md_sync_page_io(struct drbd_conf *mdev,
-               struct drbd_backing_dev *bdev, sector_t sector, int rw);
+                               struct drbd_backing_dev *bdev, sector_t sector, int rw);
+extern void wait_until_done_or_disk_failure(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
+                                           unsigned int *done);
 extern void drbd_ov_oos_found(struct drbd_conf*, sector_t, int);
 extern void drbd_rs_controller_reset(struct drbd_conf *mdev);
 
@@ -1754,19 +1788,6 @@ static inline struct page *page_chain_next(struct page *page)
 #define page_chain_for_each_safe(page, n) \
        for (; page && ({ n = page_chain_next(page); 1; }); page = n)
 
-static inline int drbd_bio_has_active_page(struct bio *bio)
-{
-       struct bio_vec *bvec;
-       int i;
-
-       __bio_for_each_segment(bvec, bio, i, 0) {
-               if (page_count(bvec->bv_page) > 1)
-                       return 1;
-       }
-
-       return 0;
-}
-
 static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e)
 {
        struct page *page = e->pages;
@@ -1777,7 +1798,6 @@ static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e)
        return 0;
 }
 
-
 static inline void drbd_state_lock(struct drbd_conf *mdev)
 {
        wait_event(mdev->misc_wait,
@@ -2230,7 +2250,7 @@ static inline void drbd_get_syncer_progress(struct drbd_conf *mdev,
                 * Note: currently we don't support such large bitmaps on 32bit
                 * arch anyways, but no harm done to be prepared for it here.
                 */
-               unsigned int shift = mdev->rs_total >= (1ULL << 32) ? 16 : 10;
+               unsigned int shift = mdev->rs_total > UINT_MAX ? 16 : 10;
                unsigned long left = *bits_left >> shift;
                unsigned long total = 1UL + (mdev->rs_total >> shift);
                unsigned long tmp = 1000UL - left * 1000UL/total;
@@ -2306,12 +2326,12 @@ static inline int drbd_state_is_stable(struct drbd_conf *mdev)
        case D_OUTDATED:
        case D_CONSISTENT:
        case D_UP_TO_DATE:
+       case D_FAILED:
                /* disk state is stable as well. */
                break;
 
        /* no new io accepted during tansitional states */
        case D_ATTACHING:
-       case D_FAILED:
        case D_NEGOTIATING:
        case D_UNKNOWN:
        case D_MASK:
index 211fc44f84be6ddda0112dbf2ad8a55ca1810c47..920ede2829d6c5e467e177ac43a3e97e9f550aac 100644 (file)
@@ -139,6 +139,8 @@ struct kmem_cache *drbd_bm_ext_cache;       /* bitmap extents */
 struct kmem_cache *drbd_al_ext_cache;  /* activity log extents */
 mempool_t *drbd_request_mempool;
 mempool_t *drbd_ee_mempool;
+mempool_t *drbd_md_io_page_pool;
+struct bio_set *drbd_md_io_bio_set;
 
 /* I do not use a standard mempool, because:
    1) I want to hand out the pre-allocated objects first.
@@ -159,7 +161,24 @@ static const struct block_device_operations drbd_ops = {
        .release = drbd_release,
 };
 
-#define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
+static void bio_destructor_drbd(struct bio *bio)
+{
+       bio_free(bio, drbd_md_io_bio_set);
+}
+
+struct bio *bio_alloc_drbd(gfp_t gfp_mask)
+{
+       struct bio *bio;
+
+       if (!drbd_md_io_bio_set)
+               return bio_alloc(gfp_mask, 1);
+
+       bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
+       if (!bio)
+               return NULL;
+       bio->bi_destructor = bio_destructor_drbd;
+       return bio;
+}
 
 #ifdef __CHECKER__
 /* When checking with sparse, and this is an inline function, sparse will
@@ -208,6 +227,7 @@ static int tl_init(struct drbd_conf *mdev)
        mdev->oldest_tle = b;
        mdev->newest_tle = b;
        INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
+       INIT_LIST_HEAD(&mdev->barrier_acked_requests);
 
        mdev->tl_hash = NULL;
        mdev->tl_hash_s = 0;
@@ -246,9 +266,7 @@ void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
        new->n_writes = 0;
 
        newest_before = mdev->newest_tle;
-       /* never send a barrier number == 0, because that is special-cased
-        * when using TCQ for our write ordering code */
-       new->br_number = (newest_before->br_number+1) ?: 1;
+       new->br_number = newest_before->br_number+1;
        if (mdev->newest_tle != new) {
                mdev->newest_tle->next = new;
                mdev->newest_tle = new;
@@ -311,7 +329,7 @@ void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
           These have been list_move'd to the out_of_sequence_requests list in
           _req_mod(, barrier_acked) above.
           */
-       list_del_init(&b->requests);
+       list_splice_init(&b->requests, &mdev->barrier_acked_requests);
 
        nob = b->next;
        if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
@@ -411,6 +429,23 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
                b = tmp;
                list_splice(&carry_reads, &b->requests);
        }
+
+       /* Actions operating on the disk state, also want to work on
+          requests that got barrier acked. */
+       switch (what) {
+       case fail_frozen_disk_io:
+       case restart_frozen_disk_io:
+               list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
+                       req = list_entry(le, struct drbd_request, tl_requests);
+                       _req_mod(req, what);
+               }
+
+       case connection_lost_while_pending:
+       case resend:
+               break;
+       default:
+               dev_err(DEV, "what = %d in _tl_restart()\n", what);
+       }
 }
 
 
@@ -457,6 +492,38 @@ void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
        spin_unlock_irq(&mdev->req_lock);
 }
 
+/**
+ * tl_abort_disk_io() - Abort disk I/O for all requests for a certain mdev in the TL
+ * @mdev:      DRBD device.
+ */
+void tl_abort_disk_io(struct drbd_conf *mdev)
+{
+       struct drbd_tl_epoch *b;
+       struct list_head *le, *tle;
+       struct drbd_request *req;
+
+       spin_lock_irq(&mdev->req_lock);
+       b = mdev->oldest_tle;
+       while (b) {
+               list_for_each_safe(le, tle, &b->requests) {
+                       req = list_entry(le, struct drbd_request, tl_requests);
+                       if (!(req->rq_state & RQ_LOCAL_PENDING))
+                               continue;
+                       _req_mod(req, abort_disk_io);
+               }
+               b = b->next;
+       }
+
+       list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
+               req = list_entry(le, struct drbd_request, tl_requests);
+               if (!(req->rq_state & RQ_LOCAL_PENDING))
+                       continue;
+               _req_mod(req, abort_disk_io);
+       }
+
+       spin_unlock_irq(&mdev->req_lock);
+}
+
 /**
  * cl_wide_st_chg() - true if the state change is a cluster wide one
  * @mdev:      DRBD device.
@@ -470,7 +537,7 @@ static int cl_wide_st_chg(struct drbd_conf *mdev,
                 ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
                  (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
                  (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
-                 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
+                 (os.disk != D_FAILED && ns.disk == D_FAILED))) ||
                (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
                (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
 }
@@ -509,8 +576,16 @@ static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
 static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *,
                                                    union drbd_state,
                                                    union drbd_state);
+enum sanitize_state_warnings {
+       NO_WARNING,
+       ABORTED_ONLINE_VERIFY,
+       ABORTED_RESYNC,
+       CONNECTION_LOST_NEGOTIATING,
+       IMPLICITLY_UPGRADED_DISK,
+       IMPLICITLY_UPGRADED_PDSK,
+};
 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
-                                      union drbd_state ns, const char **warn_sync_abort);
+                                      union drbd_state ns, enum sanitize_state_warnings *warn);
 int drbd_send_state_req(struct drbd_conf *,
                        union drbd_state, union drbd_state);
 
@@ -785,6 +860,13 @@ is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
        if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
                rv = SS_IN_TRANSIENT_STATE;
 
+       /* While establishing a connection only allow cstate to change.
+          Delay/refuse role changes, detach attach etc... */
+       if (test_bit(STATE_SENT, &mdev->flags) &&
+           !(os.conn == C_WF_REPORT_PARAMS ||
+             (ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION)))
+               rv = SS_IN_TRANSIENT_STATE;
+
        if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
                rv = SS_NEED_CONNECTION;
 
@@ -803,6 +885,21 @@ is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
        return rv;
 }
 
+static void print_sanitize_warnings(struct drbd_conf *mdev, enum sanitize_state_warnings warn)
+{
+       static const char *msg_table[] = {
+               [NO_WARNING] = "",
+               [ABORTED_ONLINE_VERIFY] = "Online-verify aborted.",
+               [ABORTED_RESYNC] = "Resync aborted.",
+               [CONNECTION_LOST_NEGOTIATING] = "Connection lost while negotiating, no data!",
+               [IMPLICITLY_UPGRADED_DISK] = "Implicitly upgraded disk",
+               [IMPLICITLY_UPGRADED_PDSK] = "Implicitly upgraded pdsk",
+       };
+
+       if (warn != NO_WARNING)
+               dev_warn(DEV, "%s\n", msg_table[warn]);
+}
+
 /**
  * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
  * @mdev:      DRBD device.
@@ -814,11 +911,14 @@ is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
  * to D_UNKNOWN. This rule and many more along those lines are in this function.
  */
 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
-                                      union drbd_state ns, const char **warn_sync_abort)
+                                      union drbd_state ns, enum sanitize_state_warnings *warn)
 {
        enum drbd_fencing_p fp;
        enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
 
+       if (warn)
+               *warn = NO_WARNING;
+
        fp = FP_DONT_CARE;
        if (get_ldev(mdev)) {
                fp = mdev->ldev->dc.fencing;
@@ -833,18 +933,13 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
        /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow.
         * If you try to go into some Sync* state, that shall fail (elsewhere). */
        if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
-           ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
+           ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_CONNECTED)
                ns.conn = os.conn;
 
        /* we cannot fail (again) if we already detached */
        if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
                ns.disk = D_DISKLESS;
 
-       /* if we are only D_ATTACHING yet,
-        * we can (and should) go directly to D_DISKLESS. */
-       if (ns.disk == D_FAILED && os.disk == D_ATTACHING)
-               ns.disk = D_DISKLESS;
-
        /* After C_DISCONNECTING only C_STANDALONE may follow */
        if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
                ns.conn = os.conn;
@@ -863,10 +958,9 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
        /* Abort resync if a disk fails/detaches */
        if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
            (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
-               if (warn_sync_abort)
-                       *warn_sync_abort =
-                               os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
-                               "Online-verify" : "Resync";
+               if (warn)
+                       *warn = os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
+                               ABORTED_ONLINE_VERIFY : ABORTED_RESYNC;
                ns.conn = C_CONNECTED;
        }
 
@@ -877,7 +971,8 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
                        ns.disk = mdev->new_state_tmp.disk;
                        ns.pdsk = mdev->new_state_tmp.pdsk;
                } else {
-                       dev_alert(DEV, "Connection lost while negotiating, no data!\n");
+                       if (warn)
+                               *warn = CONNECTION_LOST_NEGOTIATING;
                        ns.disk = D_DISKLESS;
                        ns.pdsk = D_UNKNOWN;
                }
@@ -959,16 +1054,16 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
                ns.disk = disk_max;
 
        if (ns.disk < disk_min) {
-               dev_warn(DEV, "Implicitly set disk from %s to %s\n",
-                        drbd_disk_str(ns.disk), drbd_disk_str(disk_min));
+               if (warn)
+                       *warn = IMPLICITLY_UPGRADED_DISK;
                ns.disk = disk_min;
        }
        if (ns.pdsk > pdsk_max)
                ns.pdsk = pdsk_max;
 
        if (ns.pdsk < pdsk_min) {
-               dev_warn(DEV, "Implicitly set pdsk from %s to %s\n",
-                        drbd_disk_str(ns.pdsk), drbd_disk_str(pdsk_min));
+               if (warn)
+                       *warn = IMPLICITLY_UPGRADED_PDSK;
                ns.pdsk = pdsk_min;
        }
 
@@ -1045,12 +1140,12 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
 {
        union drbd_state os;
        enum drbd_state_rv rv = SS_SUCCESS;
-       const char *warn_sync_abort = NULL;
+       enum sanitize_state_warnings ssw;
        struct after_state_chg_work *ascw;
 
        os = mdev->state;
 
-       ns = sanitize_state(mdev, os, ns, &warn_sync_abort);
+       ns = sanitize_state(mdev, os, ns, &ssw);
 
        if (ns.i == os.i)
                return SS_NOTHING_TO_DO;
@@ -1076,8 +1171,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
                return rv;
        }
 
-       if (warn_sync_abort)
-               dev_warn(DEV, "%s aborted.\n", warn_sync_abort);
+       print_sanitize_warnings(mdev, ssw);
 
        {
        char *pbp, pb[300];
@@ -1243,7 +1337,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
                drbd_thread_stop_nowait(&mdev->receiver);
 
        /* Upon network failure, we need to restart the receiver. */
-       if (os.conn > C_TEAR_DOWN &&
+       if (os.conn > C_WF_CONNECTION &&
            ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
                drbd_thread_restart_nowait(&mdev->receiver);
 
@@ -1251,6 +1345,15 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
        if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
                drbd_resume_al(mdev);
 
+       /* remember last connect and attach times so request_timer_fn() won't
+        * kill newly established sessions while we are still trying to thaw
+        * previously frozen IO */
+       if (os.conn != C_WF_REPORT_PARAMS && ns.conn == C_WF_REPORT_PARAMS)
+               mdev->last_reconnect_jif = jiffies;
+       if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
+           ns.disk > D_NEGOTIATING)
+               mdev->last_reattach_jif = jiffies;
+
        ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
        if (ascw) {
                ascw->os = os;
@@ -1354,12 +1457,16 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
        /* Here we have the actions that are performed after a
           state change. This function might sleep */
 
+       if (os.disk <= D_NEGOTIATING && ns.disk > D_NEGOTIATING)
+               mod_timer(&mdev->request_timer, jiffies + HZ);
+
        nsm.i = -1;
        if (ns.susp_nod) {
                if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
                        what = resend;
 
-               if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
+               if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
+                   ns.disk > D_NEGOTIATING)
                        what = restart_frozen_disk_io;
 
                if (what != nothing)
@@ -1408,7 +1515,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
        /* Do not change the order of the if above and the two below... */
        if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) {      /* attach on the peer */
                drbd_send_uuids(mdev);
-               drbd_send_state(mdev);
+               drbd_send_state(mdev, ns);
        }
        /* No point in queuing send_bitmap if we don't have a connection
         * anymore, so check also the _current_ state, not only the new state
@@ -1441,11 +1548,11 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
        }
 
        if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
-               if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) {
+               if (os.peer == R_SECONDARY && ns.peer == R_PRIMARY &&
+                   mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
                        drbd_uuid_new_current(mdev);
                        drbd_send_uuids(mdev);
                }
-
                /* D_DISKLESS Peer becomes secondary */
                if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
                        /* We may still be Primary ourselves.
@@ -1473,14 +1580,14 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
            os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
                drbd_send_sizes(mdev, 0, 0);  /* to start sync... */
                drbd_send_uuids(mdev);
-               drbd_send_state(mdev);
+               drbd_send_state(mdev, ns);
        }
 
        /* We want to pause/continue resync, tell peer. */
        if (ns.conn >= C_CONNECTED &&
             ((os.aftr_isp != ns.aftr_isp) ||
              (os.user_isp != ns.user_isp)))
-               drbd_send_state(mdev);
+               drbd_send_state(mdev, ns);
 
        /* In case one of the isp bits got set, suspend other devices. */
        if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
@@ -1490,10 +1597,10 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
        /* Make sure the peer gets informed about eventual state
           changes (ISP bits) while we were in WFReportParams. */
        if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
-               drbd_send_state(mdev);
+               drbd_send_state(mdev, ns);
 
        if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
-               drbd_send_state(mdev);
+               drbd_send_state(mdev, ns);
 
        /* We are in the progress to start a full sync... */
        if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
@@ -1513,33 +1620,38 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
        /* first half of local IO error, failure to attach,
         * or administrative detach */
        if (os.disk != D_FAILED && ns.disk == D_FAILED) {
-               enum drbd_io_error_p eh;
-               int was_io_error;
+               enum drbd_io_error_p eh = EP_PASS_ON;
+               int was_io_error = 0;
                /* corresponding get_ldev was in __drbd_set_state, to serialize
-                * our cleanup here with the transition to D_DISKLESS,
-                * so it is safe to dreference ldev here. */
-               eh = mdev->ldev->dc.on_io_error;
-               was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
-
-               /* current state still has to be D_FAILED,
-                * there is only one way out: to D_DISKLESS,
-                * and that may only happen after our put_ldev below. */
-               if (mdev->state.disk != D_FAILED)
-                       dev_err(DEV,
-                               "ASSERT FAILED: disk is %s during detach\n",
-                               drbd_disk_str(mdev->state.disk));
-
-               if (drbd_send_state(mdev))
-                       dev_warn(DEV, "Notified peer that I am detaching my disk\n");
-               else
-                       dev_err(DEV, "Sending state for detaching disk failed\n");
-
-               drbd_rs_cancel_all(mdev);
-
-               /* In case we want to get something to stable storage still,
-                * this may be the last chance.
-                * Following put_ldev may transition to D_DISKLESS. */
-               drbd_md_sync(mdev);
+                * our cleanup here with the transition to D_DISKLESS.
+                * But is is still not save to dreference ldev here, since
+                * we might come from an failed Attach before ldev was set. */
+               if (mdev->ldev) {
+                       eh = mdev->ldev->dc.on_io_error;
+                       was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
+
+                       /* Immediately allow completion of all application IO, that waits
+                          for completion from the local disk. */
+                       tl_abort_disk_io(mdev);
+
+                       /* current state still has to be D_FAILED,
+                        * there is only one way out: to D_DISKLESS,
+                        * and that may only happen after our put_ldev below. */
+                       if (mdev->state.disk != D_FAILED)
+                               dev_err(DEV,
+                                       "ASSERT FAILED: disk is %s during detach\n",
+                                       drbd_disk_str(mdev->state.disk));
+
+                       if (ns.conn >= C_CONNECTED)
+                               drbd_send_state(mdev, ns);
+
+                       drbd_rs_cancel_all(mdev);
+
+                       /* In case we want to get something to stable storage still,
+                        * this may be the last chance.
+                        * Following put_ldev may transition to D_DISKLESS. */
+                       drbd_md_sync(mdev);
+               }
                put_ldev(mdev);
 
                if (was_io_error && eh == EP_CALL_HELPER)
@@ -1561,16 +1673,17 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
                 mdev->rs_failed = 0;
                 atomic_set(&mdev->rs_pending_cnt, 0);
 
-               if (drbd_send_state(mdev))
-                       dev_warn(DEV, "Notified peer that I'm now diskless.\n");
+               if (ns.conn >= C_CONNECTED)
+                       drbd_send_state(mdev, ns);
+
                /* corresponding get_ldev in __drbd_set_state
                 * this may finally trigger drbd_ldev_destroy. */
                put_ldev(mdev);
        }
 
        /* Notify peer that I had a local IO error, and did not detached.. */
-       if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT)
-               drbd_send_state(mdev);
+       if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT && ns.conn >= C_CONNECTED)
+               drbd_send_state(mdev, ns);
 
        /* Disks got bigger while they were detached */
        if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
@@ -1588,7 +1701,13 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
        /* sync target done with resync.  Explicitly notify peer, even though
         * it should (at least for non-empty resyncs) already know itself. */
        if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
-               drbd_send_state(mdev);
+               drbd_send_state(mdev, ns);
+
+       /* Wake up role changes, that were delayed because of connection establishing */
+       if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS) {
+               clear_bit(STATE_SENT, &mdev->flags);
+               wake_up(&mdev->state_wait);
+       }
 
        /* This triggers bitmap writeout of potentially still unwritten pages
         * if the resync finished cleanly, or aborted because of peer disk
@@ -1598,8 +1717,8 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
         * No harm done if some bits change during this phase.
         */
        if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
-               drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL,
-                       "write from resync_finished", BM_LOCKED_SET_ALLOWED);
+               drbd_queue_bitmap_io(mdev, &drbd_bm_write_copy_pages, NULL,
+                       "write from resync_finished", BM_LOCKED_CHANGE_ALLOWED);
                put_ldev(mdev);
        }
 
@@ -2057,7 +2176,11 @@ int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
 
        D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
 
-       uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET;
+       uuid = mdev->ldev->md.uuid[UI_BITMAP];
+       if (uuid && uuid != UUID_JUST_CREATED)
+               uuid = uuid + UUID_NEW_BM_OFFSET;
+       else
+               get_random_bytes(&uuid, sizeof(u64));
        drbd_uuid_set(mdev, UI_BITMAP, uuid);
        drbd_print_uuids(mdev, "updated sync UUID");
        drbd_md_sync(mdev);
@@ -2089,6 +2212,10 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
                max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
        }
 
+       /* Never allow old drbd (up to 8.3.7) to see more than 32KiB */
+       if (mdev->agreed_pro_version <= 94)
+               max_bio_size = min_t(int, max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+
        p.d_size = cpu_to_be64(d_size);
        p.u_size = cpu_to_be64(u_size);
        p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
@@ -2102,10 +2229,10 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
 }
 
 /**
- * drbd_send_state() - Sends the drbd state to the peer
+ * drbd_send_current_state() - Sends the drbd state to the peer
  * @mdev:      DRBD device.
  */
-int drbd_send_state(struct drbd_conf *mdev)
+int drbd_send_current_state(struct drbd_conf *mdev)
 {
        struct socket *sock;
        struct p_state p;
@@ -2131,6 +2258,37 @@ int drbd_send_state(struct drbd_conf *mdev)
        return ok;
 }
 
+/**
+ * drbd_send_state() - After a state change, sends the new state to the peer
+ * @mdev:      DRBD device.
+ * @state:     the state to send, not necessarily the current state.
+ *
+ * Each state change queues an "after_state_ch" work, which will eventually
+ * send the resulting new state to the peer. If more state changes happen
+ * between queuing and processing of the after_state_ch work, we still
+ * want to send each intermediary state in the order it occurred.
+ */
+int drbd_send_state(struct drbd_conf *mdev, union drbd_state state)
+{
+       struct socket *sock;
+       struct p_state p;
+       int ok = 0;
+
+       mutex_lock(&mdev->data.mutex);
+
+       p.state = cpu_to_be32(state.i);
+       sock = mdev->data.socket;
+
+       if (likely(sock != NULL)) {
+               ok = _drbd_send_cmd(mdev, sock, P_STATE,
+                                   (struct p_header80 *)&p, sizeof(p), 0);
+       }
+
+       mutex_unlock(&mdev->data.mutex);
+
+       return ok;
+}
+
 int drbd_send_state_req(struct drbd_conf *mdev,
        union drbd_state mask, union drbd_state val)
 {
@@ -2615,7 +2773,7 @@ static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
        struct bio_vec *bvec;
        int i;
        /* hint all but last page with MSG_MORE */
-       __bio_for_each_segment(bvec, bio, i, 0) {
+       bio_for_each_segment(bvec, bio, i) {
                if (!_drbd_no_send_page(mdev, bvec->bv_page,
                                     bvec->bv_offset, bvec->bv_len,
                                     i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
@@ -2629,7 +2787,7 @@ static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
        struct bio_vec *bvec;
        int i;
        /* hint all but last page with MSG_MORE */
-       __bio_for_each_segment(bvec, bio, i, 0) {
+       bio_for_each_segment(bvec, bio, i) {
                if (!_drbd_send_page(mdev, bvec->bv_page,
                                     bvec->bv_offset, bvec->bv_len,
                                     i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
@@ -2695,8 +2853,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
 
        p.sector   = cpu_to_be64(req->sector);
        p.block_id = (unsigned long)req;
-       p.seq_num  = cpu_to_be32(req->seq_num =
-                                atomic_add_return(1, &mdev->packet_seq));
+       p.seq_num  = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
 
        dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
 
@@ -2987,8 +3144,8 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
        atomic_set(&mdev->rs_sect_in, 0);
        atomic_set(&mdev->rs_sect_ev, 0);
        atomic_set(&mdev->ap_in_flight, 0);
+       atomic_set(&mdev->md_io_in_use, 0);
 
-       mutex_init(&mdev->md_io_mutex);
        mutex_init(&mdev->data.mutex);
        mutex_init(&mdev->meta.mutex);
        sema_init(&mdev->data.work.s, 0);
@@ -3126,6 +3283,10 @@ static void drbd_destroy_mempools(void)
 
        /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
 
+       if (drbd_md_io_bio_set)
+               bioset_free(drbd_md_io_bio_set);
+       if (drbd_md_io_page_pool)
+               mempool_destroy(drbd_md_io_page_pool);
        if (drbd_ee_mempool)
                mempool_destroy(drbd_ee_mempool);
        if (drbd_request_mempool)
@@ -3139,6 +3300,8 @@ static void drbd_destroy_mempools(void)
        if (drbd_al_ext_cache)
                kmem_cache_destroy(drbd_al_ext_cache);
 
+       drbd_md_io_bio_set   = NULL;
+       drbd_md_io_page_pool = NULL;
        drbd_ee_mempool      = NULL;
        drbd_request_mempool = NULL;
        drbd_ee_cache        = NULL;
@@ -3162,6 +3325,8 @@ static int drbd_create_mempools(void)
        drbd_bm_ext_cache    = NULL;
        drbd_al_ext_cache    = NULL;
        drbd_pp_pool         = NULL;
+       drbd_md_io_page_pool = NULL;
+       drbd_md_io_bio_set   = NULL;
 
        /* caches */
        drbd_request_cache = kmem_cache_create(
@@ -3185,6 +3350,16 @@ static int drbd_create_mempools(void)
                goto Enomem;
 
        /* mempools */
+#ifdef COMPAT_HAVE_BIOSET_CREATE
+       drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
+       if (drbd_md_io_bio_set == NULL)
+               goto Enomem;
+#endif
+
+       drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
+       if (drbd_md_io_page_pool == NULL)
+               goto Enomem;
+
        drbd_request_mempool = mempool_create(number,
                mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
        if (drbd_request_mempool == NULL)
@@ -3262,6 +3437,8 @@ static void drbd_delete_device(unsigned int minor)
        if (!mdev)
                return;
 
+       del_timer_sync(&mdev->request_timer);
+
        /* paranoia asserts */
        if (mdev->open_cnt != 0)
                dev_err(DEV, "open_cnt = %d in %s:%u", mdev->open_cnt,
@@ -3666,8 +3843,10 @@ void drbd_md_sync(struct drbd_conf *mdev)
        if (!get_ldev_if_state(mdev, D_FAILED))
                return;
 
-       mutex_lock(&mdev->md_io_mutex);
-       buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
+       buffer = drbd_md_get_buffer(mdev);
+       if (!buffer)
+               goto out;
+
        memset(buffer, 0, 512);
 
        buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
@@ -3698,7 +3877,8 @@ void drbd_md_sync(struct drbd_conf *mdev)
         * since we updated it on metadata. */
        mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
 
-       mutex_unlock(&mdev->md_io_mutex);
+       drbd_md_put_buffer(mdev);
+out:
        put_ldev(mdev);
 }
 
@@ -3718,8 +3898,9 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
        if (!get_ldev_if_state(mdev, D_ATTACHING))
                return ERR_IO_MD_DISK;
 
-       mutex_lock(&mdev->md_io_mutex);
-       buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
+       buffer = drbd_md_get_buffer(mdev);
+       if (!buffer)
+               goto out;
 
        if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
                /* NOTE: can't do normal error processing here as this is
@@ -3780,7 +3961,8 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
                mdev->sync_conf.al_extents = 127;
 
  err:
-       mutex_unlock(&mdev->md_io_mutex);
+       drbd_md_put_buffer(mdev);
+ out:
        put_ldev(mdev);
 
        return rv;
@@ -4183,12 +4365,11 @@ const char *drbd_buildtag(void)
        static char buildtag[38] = "\0uilt-in";
 
        if (buildtag[0] == 0) {
-#ifdef CONFIG_MODULES
-               if (THIS_MODULE != NULL)
-                       sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
-               else
+#ifdef MODULE
+               sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
+#else
+               buildtag[0] = 'b';
 #endif
-                       buildtag[0] = 'b';
        }
 
        return buildtag;
index 946166e13953cca1d6c64d3feb9757d60690922d..6d4de6a72e8069018b2b4d050b7ef2aa8b259259 100644 (file)
@@ -289,7 +289,7 @@ static int _try_outdate_peer_async(void *data)
        */
        spin_lock_irq(&mdev->req_lock);
        ns = mdev->state;
-       if (ns.conn < C_WF_REPORT_PARAMS) {
+       if (ns.conn < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &mdev->flags)) {
                ns.pdsk = nps;
                _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
        }
@@ -432,7 +432,7 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
                /* if this was forced, we should consider sync */
                if (forced)
                        drbd_send_uuids(mdev);
-               drbd_send_state(mdev);
+               drbd_send_current_state(mdev);
        }
 
        drbd_md_sync(mdev);
@@ -845,9 +845,10 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
           Because new from 8.3.8 onwards the peer can use multiple
           BIOs for a single peer_request */
        if (mdev->state.conn >= C_CONNECTED) {
-               if (mdev->agreed_pro_version < 94)
-                       peer = mdev->peer_max_bio_size;
-               else if (mdev->agreed_pro_version == 94)
+               if (mdev->agreed_pro_version < 94) {
+                       peer = min_t(int, mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+                       /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
+               } else if (mdev->agreed_pro_version == 94)
                        peer = DRBD_MAX_SIZE_H80_PACKET;
                else /* drbd 8.3.8 onwards */
                        peer = DRBD_MAX_BIO_SIZE;
@@ -1032,7 +1033,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
                dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
                        (unsigned long long) drbd_get_max_capacity(nbc),
                        (unsigned long long) nbc->dc.disk_size);
-               retcode = ERR_DISK_TO_SMALL;
+               retcode = ERR_DISK_TOO_SMALL;
                goto fail;
        }
 
@@ -1046,7 +1047,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
        }
 
        if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
-               retcode = ERR_MD_DISK_TO_SMALL;
+               retcode = ERR_MD_DISK_TOO_SMALL;
                dev_warn(DEV, "refusing attach: md-device too small, "
                     "at least %llu sectors needed for this meta-disk type\n",
                     (unsigned long long) min_md_device_sectors);
@@ -1057,7 +1058,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
         * (we may currently be R_PRIMARY with no local disk...) */
        if (drbd_get_max_capacity(nbc) <
            drbd_get_capacity(mdev->this_bdev)) {
-               retcode = ERR_DISK_TO_SMALL;
+               retcode = ERR_DISK_TOO_SMALL;
                goto fail;
        }
 
@@ -1138,7 +1139,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
        if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
            drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
                dev_warn(DEV, "refusing to truncate a consistent device\n");
-               retcode = ERR_DISK_TO_SMALL;
+               retcode = ERR_DISK_TOO_SMALL;
                goto force_diskless_dec;
        }
 
@@ -1336,17 +1337,34 @@ static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
 {
        enum drbd_ret_code retcode;
        int ret;
+       struct detach dt = {};
+
+       if (!detach_from_tags(mdev, nlp->tag_list, &dt)) {
+               reply->ret_code = ERR_MANDATORY_TAG;
+               goto out;
+       }
+
+       if (dt.detach_force) {
+               drbd_force_state(mdev, NS(disk, D_FAILED));
+               reply->ret_code = SS_SUCCESS;
+               goto out;
+       }
+
        drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
+       drbd_md_get_buffer(mdev); /* make sure there is no in-flight meta-data IO */
        retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
+       drbd_md_put_buffer(mdev);
        /* D_FAILED will transition to DISKLESS. */
        ret = wait_event_interruptible(mdev->misc_wait,
                        mdev->state.disk != D_FAILED);
        drbd_resume_io(mdev);
+
        if ((int)retcode == (int)SS_IS_DISKLESS)
                retcode = SS_NOTHING_TO_DO;
        if (ret)
                retcode = ERR_INTR;
        reply->ret_code = retcode;
+out:
        return 0;
 }
 
@@ -1711,7 +1729,7 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
 
        if (rs.no_resync && mdev->agreed_pro_version < 93) {
                retcode = ERR_NEED_APV_93;
-               goto fail;
+               goto fail_ldev;
        }
 
        if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
@@ -1738,6 +1756,10 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  fail:
        reply->ret_code = retcode;
        return 0;
+
+ fail_ldev:
+       put_ldev(mdev);
+       goto fail;
 }
 
 static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
@@ -1941,6 +1963,7 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
 
        /* If there is still bitmap IO pending, probably because of a previous
         * resync just being finished, wait for it before requesting a new resync. */
+       drbd_suspend_io(mdev);
        wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
 
        retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
@@ -1959,6 +1982,7 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
 
                retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
        }
+       drbd_resume_io(mdev);
 
        reply->ret_code = retcode;
        return 0;
@@ -1980,6 +2004,7 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
 
        /* If there is still bitmap IO pending, probably because of a previous
         * resync just being finished, wait for it before requesting a new resync. */
+       drbd_suspend_io(mdev);
        wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
 
        retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
@@ -1998,6 +2023,7 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
                } else
                        retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
        }
+       drbd_resume_io(mdev);
 
        reply->ret_code = retcode;
        return 0;
@@ -2170,11 +2196,13 @@ static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
 
        /* If there is still bitmap IO pending, e.g. previous resync or verify
         * just being finished, wait for it before requesting a new resync. */
+       drbd_suspend_io(mdev);
        wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
 
        /* w_make_ov_request expects position to be aligned */
        mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT;
        reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
+       drbd_resume_io(mdev);
        return 0;
 }
 
index 2959cdfb77f556e0bed2a8131eb69c69cbfed84f..869bada2ed06838a656d431584afde516a9cd115 100644 (file)
@@ -52,7 +52,7 @@ void seq_printf_with_thousands_grouping(struct seq_file *seq, long v)
        if (unlikely(v >= 1000000)) {
                /* cool: > GiByte/s */
                seq_printf(seq, "%ld,", v / 1000000);
-               v /= 1000000;
+               v %= 1000000;
                seq_printf(seq, "%03ld,%03ld", v/1000, v % 1000);
        } else if (likely(v >= 1000))
                seq_printf(seq, "%ld,%03ld", v/1000, v % 1000);
index 436f519bed1c4190168d3f847a9dc692153ca89a..ea4836e0ae9829e12206e482cc50b70678a3e4aa 100644 (file)
@@ -466,6 +466,7 @@ static int drbd_accept(struct drbd_conf *mdev, const char **what,
                goto out;
        }
        (*newsock)->ops  = sock->ops;
+       __module_get((*newsock)->ops->owner);
 
 out:
        return err;
@@ -750,6 +751,7 @@ static int drbd_connect(struct drbd_conf *mdev)
 {
        struct socket *s, *sock, *msock;
        int try, h, ok;
+       enum drbd_state_rv rv;
 
        D_ASSERT(!mdev->data.socket);
 
@@ -888,25 +890,32 @@ retry:
                }
        }
 
-       if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
-               return 0;
-
        sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
        sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
 
        atomic_set(&mdev->packet_seq, 0);
        mdev->peer_seq = 0;
 
-       drbd_thread_start(&mdev->asender);
-
        if (drbd_send_protocol(mdev) == -1)
                return -1;
+       set_bit(STATE_SENT, &mdev->flags);
        drbd_send_sync_param(mdev, &mdev->sync_conf);
        drbd_send_sizes(mdev, 0, 0);
        drbd_send_uuids(mdev);
-       drbd_send_state(mdev);
+       drbd_send_current_state(mdev);
        clear_bit(USE_DEGR_WFC_T, &mdev->flags);
        clear_bit(RESIZE_PENDING, &mdev->flags);
+
+       spin_lock_irq(&mdev->req_lock);
+       rv = _drbd_set_state(_NS(mdev, conn, C_WF_REPORT_PARAMS), CS_VERBOSE, NULL);
+       if (mdev->state.conn != C_WF_REPORT_PARAMS)
+               clear_bit(STATE_SENT, &mdev->flags);
+       spin_unlock_irq(&mdev->req_lock);
+
+       if (rv < SS_SUCCESS)
+               return 0;
+
+       drbd_thread_start(&mdev->asender);
        mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
 
        return 1;
@@ -957,7 +966,7 @@ static void drbd_flush(struct drbd_conf *mdev)
                rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
                                        NULL);
                if (rv) {
-                       dev_err(DEV, "local disk flush failed with status %d\n", rv);
+                       dev_info(DEV, "local disk flush failed with status %d\n", rv);
                        /* would rather check on EOPNOTSUPP, but that is not reliable.
                         * don't try again for ANY return value != 0
                         * if (rv == -EOPNOTSUPP) */
@@ -1001,13 +1010,14 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
 
                if (epoch_size != 0 &&
                    atomic_read(&epoch->active) == 0 &&
-                   test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
+                   (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
                        if (!(ev & EV_CLEANUP)) {
                                spin_unlock(&mdev->epoch_lock);
                                drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
                                spin_lock(&mdev->epoch_lock);
                        }
-                       dec_unacked(mdev);
+                       if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
+                               dec_unacked(mdev);
 
                        if (mdev->current_epoch != epoch) {
                                next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
@@ -1096,7 +1106,11 @@ int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
        /* In most cases, we will only need one bio.  But in case the lower
         * level restrictions happen to be different at this offset on this
         * side than those of the sending peer, we may need to submit the
-        * request in more than one bio. */
+        * request in more than one bio.
+        *
+        * Plain bio_alloc is good enough here, this is no DRBD internally
+        * generated bio, but a bio allocated on behalf of the peer.
+        */
 next_bio:
        bio = bio_alloc(GFP_NOIO, nr_pages);
        if (!bio) {
@@ -1583,6 +1597,24 @@ static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int u
        return ok;
 }
 
+static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_epoch_entry *data_e)
+{
+
+       struct drbd_epoch_entry *rs_e;
+       bool rv = 0;
+
+       spin_lock_irq(&mdev->req_lock);
+       list_for_each_entry(rs_e, &mdev->sync_ee, w.list) {
+               if (overlaps(data_e->sector, data_e->size, rs_e->sector, rs_e->size)) {
+                       rv = 1;
+                       break;
+               }
+       }
+       spin_unlock_irq(&mdev->req_lock);
+
+       return rv;
+}
+
 /* Called from receive_Data.
  * Synchronize packets on sock with packets on msock.
  *
@@ -1826,6 +1858,9 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
        list_add(&e->w.list, &mdev->active_ee);
        spin_unlock_irq(&mdev->req_lock);
 
+       if (mdev->state.conn == C_SYNC_TARGET)
+               wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, e));
+
        switch (mdev->net_conf->wire_protocol) {
        case DRBD_PROT_C:
                inc_unacked(mdev);
@@ -2420,7 +2455,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
                        mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
                        mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
 
-                       dev_info(DEV, "Did not got last syncUUID packet, corrected:\n");
+                       dev_info(DEV, "Lost last syncUUID packet, corrected:\n");
                        drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
 
                        return -1;
@@ -2806,10 +2841,10 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
 
        if (apv >= 88) {
                if (apv == 88) {
-                       if (data_size > SHARED_SECRET_MAX) {
-                               dev_err(DEV, "verify-alg too long, "
-                                   "peer wants %u, accepting only %u byte\n",
-                                               data_size, SHARED_SECRET_MAX);
+                       if (data_size > SHARED_SECRET_MAX || data_size == 0) {
+                               dev_err(DEV, "verify-alg of wrong size, "
+                                       "peer wants %u, accepting only up to %u byte\n",
+                                       data_size, SHARED_SECRET_MAX);
                                return false;
                        }
 
@@ -3168,9 +3203,20 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
        os = ns = mdev->state;
        spin_unlock_irq(&mdev->req_lock);
 
-       /* peer says his disk is uptodate, while we think it is inconsistent,
-        * and this happens while we think we have a sync going on. */
-       if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE &&
+       /* If some other part of the code (asender thread, timeout)
+        * already decided to close the connection again,
+        * we must not "re-establish" it here. */
+       if (os.conn <= C_TEAR_DOWN)
+               return false;
+
+       /* If this is the "end of sync" confirmation, usually the peer disk
+        * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
+        * set) resync started in PausedSyncT, or if the timing of pause-/
+        * unpause-sync events has been "just right", the peer disk may
+        * transition from D_CONSISTENT to D_UP_TO_DATE as well.
+        */
+       if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
+           real_peer_disk == D_UP_TO_DATE &&
            os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
                /* If we are (becoming) SyncSource, but peer is still in sync
                 * preparation, ignore its uptodate-ness to avoid flapping, it
@@ -3288,7 +3334,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
                        /* Nowadays only used when forcing a node into primary role and
                           setting its disk to UpToDate with that */
                        drbd_send_uuids(mdev);
-                       drbd_send_state(mdev);
+                       drbd_send_current_state(mdev);
                }
        }
 
@@ -3776,6 +3822,13 @@ static void drbd_disconnect(struct drbd_conf *mdev)
        if (mdev->state.conn == C_STANDALONE)
                return;
 
+       /* We are about to start the cleanup after connection loss.
+        * Make sure drbd_make_request knows about that.
+        * Usually we should be in some network failure state already,
+        * but just in case we are not, we fix it up here.
+        */
+       drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
+
        /* asender does not clean up anything. it must not interfere, either */
        drbd_thread_stop(&mdev->asender);
        drbd_free_sock(mdev);
@@ -3803,8 +3856,6 @@ static void drbd_disconnect(struct drbd_conf *mdev)
        atomic_set(&mdev->rs_pending_cnt, 0);
        wake_up(&mdev->misc_wait);
 
-       del_timer(&mdev->request_timer);
-
        /* make sure syncer is stopped and w_resume_next_sg queued */
        del_timer_sync(&mdev->resync_timer);
        resync_timer_fn((unsigned long)mdev);
@@ -4433,7 +4484,7 @@ static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
 
        if (mdev->state.conn == C_AHEAD &&
            atomic_read(&mdev->ap_in_flight) == 0 &&
-           !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) {
+           !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
                mdev->start_resync_timer.expires = jiffies + HZ;
                add_timer(&mdev->start_resync_timer);
        }
index 4a0f314086e522f7c6172355d3c0c3b1d757c2ec..9c5c84946b056792fa45d99051bd5c28750db7e1 100644 (file)
@@ -37,6 +37,7 @@ static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req
        const int rw = bio_data_dir(bio);
        int cpu;
        cpu = part_stat_lock();
+       part_round_stats(cpu, &mdev->vdisk->part0);
        part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]);
        part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], bio_sectors(bio));
        part_inc_in_flight(&mdev->vdisk->part0, rw);
@@ -214,8 +215,7 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
 {
        const unsigned long s = req->rq_state;
        struct drbd_conf *mdev = req->mdev;
-       /* only WRITES may end up here without a master bio (on barrier ack) */
-       int rw = req->master_bio ? bio_data_dir(req->master_bio) : WRITE;
+       int rw = req->rq_state & RQ_WRITE ? WRITE : READ;
 
        /* we must not complete the master bio, while it is
         *      still being processed by _drbd_send_zc_bio (drbd_send_dblock)
@@ -230,7 +230,7 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
                return;
        if (s & RQ_NET_PENDING)
                return;
-       if (s & RQ_LOCAL_PENDING)
+       if (s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED))
                return;
 
        if (req->master_bio) {
@@ -277,6 +277,9 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
                req->master_bio = NULL;
        }
 
+       if (s & RQ_LOCAL_PENDING)
+               return;
+
        if ((s & RQ_NET_MASK) == 0 || (s & RQ_NET_DONE)) {
                /* this is disconnected (local only) operation,
                 * or protocol C P_WRITE_ACK,
@@ -429,7 +432,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                break;
 
        case completed_ok:
-               if (bio_data_dir(req->master_bio) == WRITE)
+               if (req->rq_state & RQ_WRITE)
                        mdev->writ_cnt += req->size>>9;
                else
                        mdev->read_cnt += req->size>>9;
@@ -438,7 +441,14 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                req->rq_state &= ~RQ_LOCAL_PENDING;
 
                _req_may_be_done_not_susp(req, m);
-               put_ldev(mdev);
+               break;
+
+       case abort_disk_io:
+               req->rq_state |= RQ_LOCAL_ABORTED;
+               if (req->rq_state & RQ_WRITE)
+                       _req_may_be_done_not_susp(req, m);
+               else
+                       goto goto_queue_for_net_read;
                break;
 
        case write_completed_with_error:
@@ -447,7 +457,6 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
 
                __drbd_chk_io_error(mdev, false);
                _req_may_be_done_not_susp(req, m);
-               put_ldev(mdev);
                break;
 
        case read_ahead_completed_with_error:
@@ -455,7 +464,6 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                req->rq_state |= RQ_LOCAL_COMPLETED;
                req->rq_state &= ~RQ_LOCAL_PENDING;
                _req_may_be_done_not_susp(req, m);
-               put_ldev(mdev);
                break;
 
        case read_completed_with_error:
@@ -467,7 +475,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                D_ASSERT(!(req->rq_state & RQ_NET_MASK));
 
                __drbd_chk_io_error(mdev, false);
-               put_ldev(mdev);
+
+       goto_queue_for_net_read:
 
                /* no point in retrying if there is no good remote data,
                 * or we have no connection. */
@@ -556,10 +565,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                drbd_queue_work(&mdev->data.work, &req->w);
                break;
 
-       case oos_handed_to_network:
-               /* actually the same */
+       case read_retry_remote_canceled:
        case send_canceled:
-               /* treat it the same */
        case send_failed:
                /* real cleanup will be done from tl_clear.  just update flags
                 * so it is no longer marked as on the worker queue */
@@ -589,17 +596,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                }
                req->rq_state &= ~RQ_NET_QUEUED;
                req->rq_state |= RQ_NET_SENT;
-               /* because _drbd_send_zc_bio could sleep, and may want to
-                * dereference the bio even after the "write_acked_by_peer" and
-                * "completed_ok" events came in, once we return from
-                * _drbd_send_zc_bio (drbd_send_dblock), we have to check
-                * whether it is done already, and end it.  */
                _req_may_be_done_not_susp(req, m);
                break;
 
-       case read_retry_remote_canceled:
+       case oos_handed_to_network:
+               /* Was not set PENDING, no longer QUEUED, so is now DONE
+                * as far as this connection is concerned. */
                req->rq_state &= ~RQ_NET_QUEUED;
-               /* fall through, in case we raced with drbd_disconnect */
+               req->rq_state |= RQ_NET_DONE;
+               _req_may_be_done_not_susp(req, m);
+               break;
+
        case connection_lost_while_pending:
                /* transfer log cleanup after connection loss */
                /* assert something? */
@@ -616,8 +623,6 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                        _req_may_be_done(req, m); /* Allowed while state.susp */
                break;
 
-       case write_acked_by_peer_and_sis:
-               req->rq_state |= RQ_NET_SIS;
        case conflict_discarded_by_peer:
                /* for discarded conflicting writes of multiple primaries,
                 * there is no need to keep anything in the tl, potential
@@ -628,18 +633,15 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                              (unsigned long long)req->sector, req->size);
                req->rq_state |= RQ_NET_DONE;
                /* fall through */
+       case write_acked_by_peer_and_sis:
        case write_acked_by_peer:
+               if (what == write_acked_by_peer_and_sis)
+                       req->rq_state |= RQ_NET_SIS;
                /* protocol C; successfully written on peer.
-                * Nothing to do here.
+                * Nothing more to do here.
                 * We want to keep the tl in place for all protocols, to cater
-                * for volatile write-back caches on lower level devices.
-                *
-                * A barrier request is expected to have forced all prior
-                * requests onto stable storage, so completion of a barrier
-                * request could set NET_DONE right here, and not wait for the
-                * P_BARRIER_ACK, but that is an unnecessary optimization. */
+                * for volatile write-back caches on lower level devices. */
 
-               /* this makes it effectively the same as for: */
        case recv_acked_by_peer:
                /* protocol B; pretends to be successfully written on peer.
                 * see also notes above in handed_over_to_network about
@@ -773,6 +775,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
        int local, remote, send_oos = 0;
        int err = -EIO;
        int ret = 0;
+       union drbd_state s;
 
        /* allocate outside of all locks; */
        req = drbd_req_new(mdev, bio);
@@ -834,8 +837,9 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
                drbd_al_begin_io(mdev, sector);
        }
 
-       remote = remote && drbd_should_do_remote(mdev->state);
-       send_oos = rw == WRITE && drbd_should_send_oos(mdev->state);
+       s = mdev->state;
+       remote = remote && drbd_should_do_remote(s);
+       send_oos = rw == WRITE && drbd_should_send_oos(s);
        D_ASSERT(!(remote && send_oos));
 
        if (!(local || remote) && !is_susp(mdev->state)) {
@@ -867,7 +871,7 @@ allocate_barrier:
 
        if (is_susp(mdev->state)) {
                /* If we got suspended, use the retry mechanism of
-                  generic_make_request() to restart processing of this
+                  drbd_make_request() to restart processing of this
                   bio. In the next call to drbd_make_request
                   we sleep in inc_ap_bio() */
                ret = 1;
@@ -1091,7 +1095,6 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
         */
        D_ASSERT(bio->bi_size > 0);
        D_ASSERT((bio->bi_size & 0x1ff) == 0);
-       D_ASSERT(bio->bi_idx == 0);
 
        /* to make some things easier, force alignment of requests within the
         * granularity of our hash tables */
@@ -1099,8 +1102,9 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
        e_enr = (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT;
 
        if (likely(s_enr == e_enr)) {
-               inc_ap_bio(mdev, 1);
-               drbd_make_request_common(mdev, bio, start_time);
+               do {
+                       inc_ap_bio(mdev, 1);
+               } while (drbd_make_request_common(mdev, bio, start_time));
                return;
        }
 
@@ -1196,36 +1200,66 @@ void request_timer_fn(unsigned long data)
        struct drbd_conf *mdev = (struct drbd_conf *) data;
        struct drbd_request *req; /* oldest request */
        struct list_head *le;
-       unsigned long et = 0; /* effective timeout = ko_count * timeout */
+       unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */
+       unsigned long now;
 
        if (get_net_conf(mdev)) {
-               et = mdev->net_conf->timeout*HZ/10 * mdev->net_conf->ko_count;
+               if (mdev->state.conn >= C_WF_REPORT_PARAMS)
+                       ent = mdev->net_conf->timeout*HZ/10
+                               * mdev->net_conf->ko_count;
                put_net_conf(mdev);
        }
-       if (!et || mdev->state.conn < C_WF_REPORT_PARAMS)
+       if (get_ldev(mdev)) { /* implicit state.disk >= D_INCONSISTENT */
+               dt = mdev->ldev->dc.disk_timeout * HZ / 10;
+               put_ldev(mdev);
+       }
+       et = min_not_zero(dt, ent);
+
+       if (!et)
                return; /* Recurring timer stopped */
 
+       now = jiffies;
+
        spin_lock_irq(&mdev->req_lock);
        le = &mdev->oldest_tle->requests;
        if (list_empty(le)) {
                spin_unlock_irq(&mdev->req_lock);
-               mod_timer(&mdev->request_timer, jiffies + et);
+               mod_timer(&mdev->request_timer, now + et);
                return;
        }
 
        le = le->prev;
        req = list_entry(le, struct drbd_request, tl_requests);
-       if (time_is_before_eq_jiffies(req->start_time + et)) {
-               if (req->rq_state & RQ_NET_PENDING) {
-                       dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n");
-                       _drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE, NULL);
-               } else {
-                       dev_warn(DEV, "Local backing block device frozen?\n");
-                       mod_timer(&mdev->request_timer, jiffies + et);
-               }
-       } else {
-               mod_timer(&mdev->request_timer, req->start_time + et);
-       }
 
+       /* The request is considered timed out, if
+        * - we have some effective timeout from the configuration,
+        *   with above state restrictions applied,
+        * - the oldest request is waiting for a response from the network
+        *   resp. the local disk,
+        * - the oldest request is in fact older than the effective timeout,
+        * - the connection was established (resp. disk was attached)
+        *   for longer than the timeout already.
+        * Note that for 32bit jiffies and very stable connections/disks,
+        * we may have a wrap around, which is catched by
+        *   !time_in_range(now, last_..._jif, last_..._jif + timeout).
+        *
+        * Side effect: once per 32bit wrap-around interval, which means every
+        * ~198 days with 250 HZ, we have a window where the timeout would need
+        * to expire twice (worst case) to become effective. Good enough.
+        */
+       if (ent && req->rq_state & RQ_NET_PENDING &&
+                time_after(now, req->start_time + ent) &&
+               !time_in_range(now, mdev->last_reconnect_jif, mdev->last_reconnect_jif + ent)) {
+               dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n");
+               _drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL);
+       }
+       if (dt && req->rq_state & RQ_LOCAL_PENDING &&
+                time_after(now, req->start_time + dt) &&
+               !time_in_range(now, mdev->last_reattach_jif, mdev->last_reattach_jif + dt)) {
+               dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n");
+               __drbd_chk_io_error(mdev, 1);
+       }
+       nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et;
        spin_unlock_irq(&mdev->req_lock);
+       mod_timer(&mdev->request_timer, nt);
 }
index 68a234a5fdc5bc4ab19e2a5ab85cd198e588b9bd..3d211191948613c47736ee2f405918fb52df7392 100644 (file)
@@ -105,6 +105,7 @@ enum drbd_req_event {
        read_completed_with_error,
        read_ahead_completed_with_error,
        write_completed_with_error,
+       abort_disk_io,
        completed_ok,
        resend,
        fail_frozen_disk_io,
@@ -118,18 +119,21 @@ enum drbd_req_event {
  * same time, so we should hold the request lock anyways.
  */
 enum drbd_req_state_bits {
-       /* 210
-        * 000: no local possible
-        * 001: to be submitted
+       /* 3210
+        * 0000: no local possible
+        * 0001: to be submitted
         *    UNUSED, we could map: 011: submitted, completion still pending
-        * 110: completed ok
-        * 010: completed with error
+        * 0110: completed ok
+        * 0010: completed with error
+        * 1001: Aborted (before completion)
+        * 1x10: Aborted and completed -> free
         */
        __RQ_LOCAL_PENDING,
        __RQ_LOCAL_COMPLETED,
        __RQ_LOCAL_OK,
+       __RQ_LOCAL_ABORTED,
 
-       /* 76543
+       /* 87654
         * 00000: no network possible
         * 00001: to be send
         * 00011: to be send, on worker queue
@@ -199,8 +203,9 @@ enum drbd_req_state_bits {
 #define RQ_LOCAL_PENDING   (1UL << __RQ_LOCAL_PENDING)
 #define RQ_LOCAL_COMPLETED (1UL << __RQ_LOCAL_COMPLETED)
 #define RQ_LOCAL_OK        (1UL << __RQ_LOCAL_OK)
+#define RQ_LOCAL_ABORTED   (1UL << __RQ_LOCAL_ABORTED)
 
-#define RQ_LOCAL_MASK      ((RQ_LOCAL_OK << 1)-1) /* 0x07 */
+#define RQ_LOCAL_MASK      ((RQ_LOCAL_ABORTED << 1)-1)
 
 #define RQ_NET_PENDING     (1UL << __RQ_NET_PENDING)
 #define RQ_NET_QUEUED      (1UL << __RQ_NET_QUEUED)
index 4d3e6f6213ba0436cc2eceff16b7876e58d952b6..620c70ff223118e6f259a200512203f5010e2cda 100644 (file)
@@ -70,11 +70,29 @@ rwlock_t global_state_lock;
 void drbd_md_io_complete(struct bio *bio, int error)
 {
        struct drbd_md_io *md_io;
+       struct drbd_conf *mdev;
 
        md_io = (struct drbd_md_io *)bio->bi_private;
+       mdev = container_of(md_io, struct drbd_conf, md_io);
+
        md_io->error = error;
 
-       complete(&md_io->event);
+       /* We grabbed an extra reference in _drbd_md_sync_page_io() to be able
+        * to timeout on the lower level device, and eventually detach from it.
+        * If this io completion runs after that timeout expired, this
+        * drbd_md_put_buffer() may allow us to finally try and re-attach.
+        * During normal operation, this only puts that extra reference
+        * down to 1 again.
+        * Make sure we first drop the reference, and only then signal
+        * completion, or we may (in drbd_al_read_log()) cycle so fast into the
+        * next drbd_md_sync_page_io(), that we trigger the
+        * ASSERT(atomic_read(&mdev->md_io_in_use) == 1) there.
+        */
+       drbd_md_put_buffer(mdev);
+       md_io->done = 1;
+       wake_up(&mdev->misc_wait);
+       bio_put(bio);
+       put_ldev(mdev);
 }
 
 /* reads on behalf of the partner,
@@ -226,6 +244,7 @@ void drbd_endio_pri(struct bio *bio, int error)
        spin_lock_irqsave(&mdev->req_lock, flags);
        __req_mod(req, what, &m);
        spin_unlock_irqrestore(&mdev->req_lock, flags);
+       put_ldev(mdev);
 
        if (m.bio)
                complete_master_bio(mdev, &m);
@@ -290,7 +309,7 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
        sg_init_table(&sg, 1);
        crypto_hash_init(&desc);
 
-       __bio_for_each_segment(bvec, bio, i, 0) {
+       bio_for_each_segment(bvec, bio, i) {
                sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
                crypto_hash_update(&desc, &sg, sg.length);
        }
@@ -728,7 +747,7 @@ int w_start_resync(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
        }
 
        drbd_start_resync(mdev, C_SYNC_SOURCE);
-       clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags);
+       clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags);
        return 1;
 }
 
@@ -1519,14 +1538,14 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
        }
 
        drbd_state_lock(mdev);
-
+       write_lock_irq(&global_state_lock);
        if (!get_ldev_if_state(mdev, D_NEGOTIATING)) {
+               write_unlock_irq(&global_state_lock);
                drbd_state_unlock(mdev);
                return;
        }
 
-       write_lock_irq(&global_state_lock);
-       ns = mdev->state;
+       ns.i = mdev->state.i;
 
        ns.aftr_isp = !_drbd_may_sync_now(mdev);
 
index b0b00d70c1669d6ef05993deb313dc516a23d552..cce7df367b793d111ef875bd5cd30a6f5e08782c 100644 (file)
@@ -551,7 +551,7 @@ static void floppy_ready(void);
 static void floppy_start(void);
 static void process_fd_request(void);
 static void recalibrate_floppy(void);
-static void floppy_shutdown(unsigned long);
+static void floppy_shutdown(struct work_struct *);
 
 static int floppy_request_regions(int);
 static void floppy_release_regions(int);
@@ -588,6 +588,8 @@ static int buffer_max = -1;
 static struct floppy_fdc_state fdc_state[N_FDC];
 static int fdc;                        /* current fdc */
 
+static struct workqueue_struct *floppy_wq;
+
 static struct floppy_struct *_floppy = floppy_type;
 static unsigned char current_drive;
 static long current_count_sectors;
@@ -629,16 +631,15 @@ static inline void set_debugt(void) { }
 static inline void debugt(const char *func, const char *msg) { }
 #endif /* DEBUGT */
 
-typedef void (*timeout_fn)(unsigned long);
-static DEFINE_TIMER(fd_timeout, floppy_shutdown, 0, 0);
 
+static DECLARE_DELAYED_WORK(fd_timeout, floppy_shutdown);
 static const char *timeout_message;
 
 static void is_alive(const char *func, const char *message)
 {
        /* this routine checks whether the floppy driver is "alive" */
        if (test_bit(0, &fdc_busy) && command_status < 2 &&
-           !timer_pending(&fd_timeout)) {
+           !delayed_work_pending(&fd_timeout)) {
                DPRINT("%s: timeout handler died.  %s\n", func, message);
        }
 }
@@ -666,15 +667,18 @@ static int output_log_pos;
 
 static void __reschedule_timeout(int drive, const char *message)
 {
+       unsigned long delay;
+
        if (drive == current_reqD)
                drive = current_drive;
-       del_timer(&fd_timeout);
+
        if (drive < 0 || drive >= N_DRIVE) {
-               fd_timeout.expires = jiffies + 20UL * HZ;
+               delay = 20UL * HZ;
                drive = 0;
        } else
-               fd_timeout.expires = jiffies + UDP->timeout;
-       add_timer(&fd_timeout);
+               delay = UDP->timeout;
+
+       queue_delayed_work(floppy_wq, &fd_timeout, delay);
        if (UDP->flags & FD_DEBUG)
                DPRINT("reschedule timeout %s\n", message);
        timeout_message = message;
@@ -872,7 +876,7 @@ static int lock_fdc(int drive, bool interruptible)
 
        command_status = FD_COMMAND_NONE;
 
-       __reschedule_timeout(drive, "lock fdc");
+       reschedule_timeout(drive, "lock fdc");
        set_fdc(drive);
        return 0;
 }
@@ -880,23 +884,15 @@ static int lock_fdc(int drive, bool interruptible)
 /* unlocks the driver */
 static void unlock_fdc(void)
 {
-       unsigned long flags;
-
-       raw_cmd = NULL;
        if (!test_bit(0, &fdc_busy))
                DPRINT("FDC access conflict!\n");
 
-       if (do_floppy)
-               DPRINT("device interrupt still active at FDC release: %pf!\n",
-                      do_floppy);
+       raw_cmd = NULL;
        command_status = FD_COMMAND_NONE;
-       spin_lock_irqsave(&floppy_lock, flags);
-       del_timer(&fd_timeout);
+       __cancel_delayed_work(&fd_timeout);
+       do_floppy = NULL;
        cont = NULL;
        clear_bit(0, &fdc_busy);
-       if (current_req || set_next_request())
-               do_fd_request(current_req->q);
-       spin_unlock_irqrestore(&floppy_lock, flags);
        wake_up(&fdc_wait);
 }
 
@@ -968,26 +964,24 @@ static DECLARE_WORK(floppy_work, NULL);
 
 static void schedule_bh(void (*handler)(void))
 {
+       WARN_ON(work_pending(&floppy_work));
+
        PREPARE_WORK(&floppy_work, (work_func_t)handler);
-       schedule_work(&floppy_work);
+       queue_work(floppy_wq, &floppy_work);
 }
 
-static DEFINE_TIMER(fd_timer, NULL, 0, 0);
+static DECLARE_DELAYED_WORK(fd_timer, NULL);
 
 static void cancel_activity(void)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&floppy_lock, flags);
        do_floppy = NULL;
-       PREPARE_WORK(&floppy_work, (work_func_t)empty);
-       del_timer(&fd_timer);
-       spin_unlock_irqrestore(&floppy_lock, flags);
+       cancel_delayed_work_sync(&fd_timer);
+       cancel_work_sync(&floppy_work);
 }
 
 /* this function makes sure that the disk stays in the drive during the
  * transfer */
-static void fd_watchdog(void)
+static void fd_watchdog(struct work_struct *arg)
 {
        debug_dcl(DP->flags, "calling disk change from watchdog\n");
 
@@ -997,21 +991,20 @@ static void fd_watchdog(void)
                cont->done(0);
                reset_fdc();
        } else {
-               del_timer(&fd_timer);
-               fd_timer.function = (timeout_fn)fd_watchdog;
-               fd_timer.expires = jiffies + HZ / 10;
-               add_timer(&fd_timer);
+               cancel_delayed_work(&fd_timer);
+               PREPARE_DELAYED_WORK(&fd_timer, fd_watchdog);
+               queue_delayed_work(floppy_wq, &fd_timer, HZ / 10);
        }
 }
 
 static void main_command_interrupt(void)
 {
-       del_timer(&fd_timer);
+       cancel_delayed_work(&fd_timer);
        cont->interrupt();
 }
 
 /* waits for a delay (spinup or select) to pass */
-static int fd_wait_for_completion(unsigned long delay, timeout_fn function)
+static int fd_wait_for_completion(unsigned long expires, work_func_t function)
 {
        if (FDCS->reset) {
                reset_fdc();    /* do the reset during sleep to win time
@@ -1020,11 +1013,10 @@ static int fd_wait_for_completion(unsigned long delay, timeout_fn function)
                return 1;
        }
 
-       if (time_before(jiffies, delay)) {
-               del_timer(&fd_timer);
-               fd_timer.function = function;
-               fd_timer.expires = delay;
-               add_timer(&fd_timer);
+       if (time_before(jiffies, expires)) {
+               cancel_delayed_work(&fd_timer);
+               PREPARE_DELAYED_WORK(&fd_timer, function);
+               queue_delayed_work(floppy_wq, &fd_timer, expires - jiffies);
                return 1;
        }
        return 0;
@@ -1342,7 +1334,7 @@ static int fdc_dtr(void)
         */
        FDCS->dtr = raw_cmd->rate & 3;
        return fd_wait_for_completion(jiffies + 2UL * HZ / 100,
-                                     (timeout_fn)floppy_ready);
+                                     (work_func_t)floppy_ready);
 }                              /* fdc_dtr */
 
 static void tell_sector(void)
@@ -1447,7 +1439,7 @@ static void setup_rw_floppy(void)
        int flags;
        int dflags;
        unsigned long ready_date;
-       timeout_fn function;
+       work_func_t function;
 
        flags = raw_cmd->flags;
        if (flags & (FD_RAW_READ | FD_RAW_WRITE))
@@ -1461,9 +1453,9 @@ static void setup_rw_floppy(void)
                 */
                if (time_after(ready_date, jiffies + DP->select_delay)) {
                        ready_date -= DP->select_delay;
-                       function = (timeout_fn)floppy_start;
+                       function = (work_func_t)floppy_start;
                } else
-                       function = (timeout_fn)setup_rw_floppy;
+                       function = (work_func_t)setup_rw_floppy;
 
                /* wait until the floppy is spinning fast enough */
                if (fd_wait_for_completion(ready_date, function))
@@ -1493,7 +1485,7 @@ static void setup_rw_floppy(void)
                inr = result();
                cont->interrupt();
        } else if (flags & FD_RAW_NEED_DISK)
-               fd_watchdog();
+               fd_watchdog(NULL);
 }
 
 static int blind_seek;
@@ -1802,20 +1794,22 @@ static void show_floppy(void)
                pr_info("do_floppy=%pf\n", do_floppy);
        if (work_pending(&floppy_work))
                pr_info("floppy_work.func=%pf\n", floppy_work.func);
-       if (timer_pending(&fd_timer))
-               pr_info("fd_timer.function=%pf\n", fd_timer.function);
-       if (timer_pending(&fd_timeout)) {
-               pr_info("timer_function=%pf\n", fd_timeout.function);
-               pr_info("expires=%lu\n", fd_timeout.expires - jiffies);
-               pr_info("now=%lu\n", jiffies);
-       }
+       if (delayed_work_pending(&fd_timer))
+               pr_info("delayed work.function=%p expires=%ld\n",
+                      fd_timer.work.func,
+                      fd_timer.timer.expires - jiffies);
+       if (delayed_work_pending(&fd_timeout))
+               pr_info("timer_function=%p expires=%ld\n",
+                      fd_timeout.work.func,
+                      fd_timeout.timer.expires - jiffies);
+
        pr_info("cont=%p\n", cont);
        pr_info("current_req=%p\n", current_req);
        pr_info("command_status=%d\n", command_status);
        pr_info("\n");
 }
 
-static void floppy_shutdown(unsigned long data)
+static void floppy_shutdown(struct work_struct *arg)
 {
        unsigned long flags;
 
@@ -1868,7 +1862,7 @@ static int start_motor(void (*function)(void))
 
        /* wait_for_completion also schedules reset if needed. */
        return fd_wait_for_completion(DRS->select_date + DP->select_delay,
-                                     (timeout_fn)function);
+                                     (work_func_t)function);
 }
 
 static void floppy_ready(void)
@@ -2821,7 +2815,6 @@ do_request:
                spin_lock_irq(&floppy_lock);
                pending = set_next_request();
                spin_unlock_irq(&floppy_lock);
-
                if (!pending) {
                        do_floppy = NULL;
                        unlock_fdc();
@@ -2898,13 +2891,15 @@ static void do_fd_request(struct request_queue *q)
                 current_req->cmd_flags))
                return;
 
-       if (test_bit(0, &fdc_busy)) {
+       if (test_and_set_bit(0, &fdc_busy)) {
                /* fdc busy, this new request will be treated when the
                   current one is done */
                is_alive(__func__, "old request running");
                return;
        }
-       lock_fdc(MAXTIMEOUT, false);
+       command_status = FD_COMMAND_NONE;
+       __reschedule_timeout(MAXTIMEOUT, "fd_request");
+       set_fdc(0);
        process_fd_request();
        is_alive(__func__, "");
 }
@@ -3612,9 +3607,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
 
        mutex_lock(&floppy_mutex);
        mutex_lock(&open_lock);
-       if (UDRS->fd_ref < 0)
-               UDRS->fd_ref = 0;
-       else if (!UDRS->fd_ref--) {
+       if (!UDRS->fd_ref--) {
                DPRINT("floppy_release with fd_ref == 0");
                UDRS->fd_ref = 0;
        }
@@ -3650,13 +3643,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
                set_bit(FD_VERIFY_BIT, &UDRS->flags);
        }
 
-       if (UDRS->fd_ref == -1 || (UDRS->fd_ref && (mode & FMODE_EXCL)))
-               goto out2;
-
-       if (mode & FMODE_EXCL)
-               UDRS->fd_ref = -1;
-       else
-               UDRS->fd_ref++;
+       UDRS->fd_ref++;
 
        opened_bdev[drive] = bdev;
 
@@ -3719,10 +3706,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
        mutex_unlock(&floppy_mutex);
        return 0;
 out:
-       if (UDRS->fd_ref < 0)
-               UDRS->fd_ref = 0;
-       else
-               UDRS->fd_ref--;
+       UDRS->fd_ref--;
+
        if (!UDRS->fd_ref)
                opened_bdev[drive] = NULL;
 out2:
@@ -4159,10 +4144,16 @@ static int __init floppy_init(void)
                        goto out_put_disk;
                }
 
+               floppy_wq = alloc_ordered_workqueue("floppy", 0);
+               if (!floppy_wq) {
+                       err = -ENOMEM;
+                       goto out_put_disk;
+               }
+
                disks[dr]->queue = blk_init_queue(do_fd_request, &floppy_lock);
                if (!disks[dr]->queue) {
                        err = -ENOMEM;
-                       goto out_put_disk;
+                       goto out_destroy_workq;
                }
 
                blk_queue_max_hw_sectors(disks[dr]->queue, 64);
@@ -4213,7 +4204,7 @@ static int __init floppy_init(void)
        use_virtual_dma = can_use_virtual_dma & 1;
        fdc_state[0].address = FDC1;
        if (fdc_state[0].address == -1) {
-               del_timer_sync(&fd_timeout);
+               cancel_delayed_work(&fd_timeout);
                err = -ENODEV;
                goto out_unreg_region;
        }
@@ -4224,7 +4215,7 @@ static int __init floppy_init(void)
        fdc = 0;                /* reset fdc in case of unexpected interrupt */
        err = floppy_grab_irq_and_dma();
        if (err) {
-               del_timer_sync(&fd_timeout);
+               cancel_delayed_work(&fd_timeout);
                err = -EBUSY;
                goto out_unreg_region;
        }
@@ -4281,13 +4272,13 @@ static int __init floppy_init(void)
                user_reset_fdc(-1, FD_RESET_ALWAYS, false);
        }
        fdc = 0;
-       del_timer_sync(&fd_timeout);
+       cancel_delayed_work(&fd_timeout);
        current_drive = 0;
        initialized = true;
        if (have_no_fdc) {
                DPRINT("no floppy controllers found\n");
                err = have_no_fdc;
-               goto out_flush_work;
+               goto out_release_dma;
        }
 
        for (drive = 0; drive < N_DRIVE; drive++) {
@@ -4302,7 +4293,7 @@ static int __init floppy_init(void)
 
                err = platform_device_register(&floppy_device[drive]);
                if (err)
-                       goto out_flush_work;
+                       goto out_release_dma;
 
                err = device_create_file(&floppy_device[drive].dev,
                                         &dev_attr_cmos);
@@ -4320,13 +4311,14 @@ static int __init floppy_init(void)
 
 out_unreg_platform_dev:
        platform_device_unregister(&floppy_device[drive]);
-out_flush_work:
-       flush_work_sync(&floppy_work);
+out_release_dma:
        if (atomic_read(&usage_count))
                floppy_release_irq_and_dma();
 out_unreg_region:
        blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
        platform_driver_unregister(&floppy_driver);
+out_destroy_workq:
+       destroy_workqueue(floppy_wq);
 out_unreg_blkdev:
        unregister_blkdev(FLOPPY_MAJOR, "fd");
 out_put_disk:
@@ -4397,7 +4389,7 @@ static int floppy_grab_irq_and_dma(void)
         * We might have scheduled a free_irq(), wait it to
         * drain first:
         */
-       flush_work_sync(&floppy_work);
+       flush_workqueue(floppy_wq);
 
        if (fd_request_irq()) {
                DPRINT("Unable to grab IRQ%d for the floppy driver\n",
@@ -4488,9 +4480,9 @@ static void floppy_release_irq_and_dma(void)
                        pr_info("motor off timer %d still active\n", drive);
 #endif
 
-       if (timer_pending(&fd_timeout))
+       if (delayed_work_pending(&fd_timeout))
                pr_info("floppy timer still active:%s\n", timeout_message);
-       if (timer_pending(&fd_timer))
+       if (delayed_work_pending(&fd_timer))
                pr_info("auxiliary floppy timer still active\n");
        if (work_pending(&floppy_work))
                pr_info("work still pending\n");
@@ -4560,8 +4552,9 @@ static void __exit floppy_module_exit(void)
                put_disk(disks[drive]);
        }
 
-       del_timer_sync(&fd_timeout);
-       del_timer_sync(&fd_timer);
+       cancel_delayed_work_sync(&fd_timeout);
+       cancel_delayed_work_sync(&fd_timer);
+       destroy_workqueue(floppy_wq);
 
        if (atomic_read(&usage_count))
                floppy_release_irq_and_dma();
index 304000c3d433f1d98afe6c0f0feea8553dc7bd04..264bc77dcb911c7030c787d3ad87cd79b654ce81 100644 (file)
@@ -294,18 +294,16 @@ static int hba_reset_nosleep(struct driver_data *dd)
  */
 static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag)
 {
-       unsigned long flags = 0;
-
        atomic_set(&port->commands[tag].active, 1);
 
-       spin_lock_irqsave(&port->cmd_issue_lock, flags);
+       spin_lock(&port->cmd_issue_lock);
 
        writel((1 << MTIP_TAG_BIT(tag)),
                        port->s_active[MTIP_TAG_INDEX(tag)]);
        writel((1 << MTIP_TAG_BIT(tag)),
                        port->cmd_issue[MTIP_TAG_INDEX(tag)]);
 
-       spin_unlock_irqrestore(&port->cmd_issue_lock, flags);
+       spin_unlock(&port->cmd_issue_lock);
 
        /* Set the command's timeout value.*/
        port->commands[tag].comp_time = jiffies + msecs_to_jiffies(
@@ -436,8 +434,7 @@ static void mtip_init_port(struct mtip_port *port)
                writel(0xFFFFFFFF, port->completed[i]);
 
        /* Clear any pending interrupts for this port */
-       writel(readl(port->dd->mmio + PORT_IRQ_STAT),
-                                       port->dd->mmio + PORT_IRQ_STAT);
+       writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT);
 
        /* Clear any pending interrupts on the HBA. */
        writel(readl(port->dd->mmio + HOST_IRQ_STAT),
@@ -782,13 +779,24 @@ static void mtip_handle_tfe(struct driver_data *dd)
 
        /* Stop the timer to prevent command timeouts. */
        del_timer(&port->cmd_timer);
+       set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
+
+       if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) &&
+                       test_bit(MTIP_TAG_INTERNAL, port->allocated)) {
+               cmd = &port->commands[MTIP_TAG_INTERNAL];
+               dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
+
+               atomic_inc(&cmd->active); /* active > 1 indicates error */
+               if (cmd->comp_data && cmd->comp_func) {
+                       cmd->comp_func(port, MTIP_TAG_INTERNAL,
+                                       cmd->comp_data, PORT_IRQ_TF_ERR);
+               }
+               goto handle_tfe_exit;
+       }
 
        /* clear the tag accumulator */
        memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
 
-       /* Set eh_active */
-       set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
-
        /* Loop through all the groups */
        for (group = 0; group < dd->slot_groups; group++) {
                completed = readl(port->completed[group]);
@@ -940,6 +948,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
        }
        print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
 
+handle_tfe_exit:
        /* clear eh_active */
        clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
        wake_up_interruptible(&port->svc_wait);
@@ -961,6 +970,8 @@ static inline void mtip_process_sdbf(struct driver_data *dd)
        /* walk all bits in all slot groups */
        for (group = 0; group < dd->slot_groups; group++) {
                completed = readl(port->completed[group]);
+               if (!completed)
+                       continue;
 
                /* clear completed status register in the hardware.*/
                writel(completed, port->completed[group]);
@@ -1329,22 +1340,6 @@ static int mtip_exec_internal_command(struct mtip_port *port,
                        }
                        rv = -EAGAIN;
                }
-
-               if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
-                       & (1 << MTIP_TAG_INTERNAL)) {
-                       dev_warn(&port->dd->pdev->dev,
-                               "Retiring internal command but CI is 1.\n");
-                       if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
-                                               &port->dd->dd_flag)) {
-                               hba_reset_nosleep(port->dd);
-                               rv = -ENXIO;
-                       } else {
-                               mtip_restart_port(port);
-                               rv = -EAGAIN;
-                       }
-                       goto exec_ic_exit;
-               }
-
        } else {
                /* Spin for <timeout> checking if command still outstanding */
                timeout = jiffies + msecs_to_jiffies(timeout);
@@ -1361,21 +1356,25 @@ static int mtip_exec_internal_command(struct mtip_port *port,
                                rv = -ENXIO;
                                goto exec_ic_exit;
                        }
+                       if (readl(port->mmio + PORT_IRQ_STAT) & PORT_IRQ_ERR) {
+                               atomic_inc(&int_cmd->active); /* error */
+                               break;
+                       }
                }
+       }
 
-               if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
+       if (atomic_read(&int_cmd->active) > 1) {
+               dev_err(&port->dd->pdev->dev,
+                       "Internal command [%02X] failed\n", fis->command);
+               rv = -EIO;
+       }
+       if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
                        & (1 << MTIP_TAG_INTERNAL)) {
-                       dev_err(&port->dd->pdev->dev,
-                               "Internal command did not complete [atomic]\n");
+               rv = -ENXIO;
+               if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+                                       &port->dd->dd_flag)) {
+                       mtip_restart_port(port);
                        rv = -EAGAIN;
-                       if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
-                                               &port->dd->dd_flag)) {
-                               hba_reset_nosleep(port->dd);
-                               rv = -ENXIO;
-                       } else {
-                               mtip_restart_port(port);
-                               rv = -EAGAIN;
-                       }
                }
        }
 exec_ic_exit:
@@ -1893,13 +1892,33 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
                                void __user *user_buffer)
 {
        struct host_to_dev_fis  fis;
-       struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG);
+       struct host_to_dev_fis *reply;
+       u8 *buf = NULL;
+       dma_addr_t dma_addr = 0;
+       int rv = 0, xfer_sz = command[3];
+
+       if (xfer_sz) {
+               if (user_buffer)
+                       return -EFAULT;
+
+               buf = dmam_alloc_coherent(&port->dd->pdev->dev,
+                               ATA_SECT_SIZE * xfer_sz,
+                               &dma_addr,
+                               GFP_KERNEL);
+               if (!buf) {
+                       dev_err(&port->dd->pdev->dev,
+                               "Memory allocation failed (%d bytes)\n",
+                               ATA_SECT_SIZE * xfer_sz);
+                       return -ENOMEM;
+               }
+               memset(buf, 0, ATA_SECT_SIZE * xfer_sz);
+       }
 
        /* Build the FIS. */
        memset(&fis, 0, sizeof(struct host_to_dev_fis));
-       fis.type                = 0x27;
-       fis.opts                = 1 << 7;
-       fis.command             = command[0];
+       fis.type        = 0x27;
+       fis.opts        = 1 << 7;
+       fis.command     = command[0];
        fis.features    = command[2];
        fis.sect_count  = command[3];
        if (fis.command == ATA_CMD_SMART) {
@@ -1908,6 +1927,11 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
                fis.cyl_hi      = 0xC2;
        }
 
+       if (xfer_sz)
+               reply = (port->rxfis + RX_FIS_PIO_SETUP);
+       else
+               reply = (port->rxfis + RX_FIS_D2H_REG);
+
        dbg_printk(MTIP_DRV_NAME
                " %s: User Command: cmd %x, sect %x, "
                "feat %x, sectcnt %x\n",
@@ -1917,43 +1941,46 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
                command[2],
                command[3]);
 
-       memset(port->sector_buffer, 0x00, ATA_SECT_SIZE);
-
        /* Execute the command. */
        if (mtip_exec_internal_command(port,
                                &fis,
                                 5,
-                                port->sector_buffer_dma,
-                                (command[3] != 0) ? ATA_SECT_SIZE : 0,
+                                (xfer_sz ? dma_addr : 0),
+                                (xfer_sz ? ATA_SECT_SIZE * xfer_sz : 0),
                                 0,
                                 GFP_KERNEL,
                                 MTIP_IOCTL_COMMAND_TIMEOUT_MS)
                                 < 0) {
-               return -1;
+               rv = -EFAULT;
+               goto exit_drive_command;
        }
 
        /* Collect the completion status. */
        command[0] = reply->command; /* Status*/
        command[1] = reply->features; /* Error*/
-       command[2] = command[3];
+       command[2] = reply->sect_count;
 
        dbg_printk(MTIP_DRV_NAME
                " %s: Completion Status: stat %x, "
-               "err %x, cmd %x\n",
+               "err %x, nsect %x\n",
                __func__,
                command[0],
                command[1],
                command[2]);
 
-       if (user_buffer && command[3]) {
+       if (xfer_sz) {
                if (copy_to_user(user_buffer,
-                                port->sector_buffer,
+                                buf,
                                 ATA_SECT_SIZE * command[3])) {
-                       return -EFAULT;
+                       rv = -EFAULT;
+                       goto exit_drive_command;
                }
        }
-
-       return 0;
+exit_drive_command:
+       if (buf)
+               dmam_free_coherent(&port->dd->pdev->dev,
+                               ATA_SECT_SIZE * xfer_sz, buf, dma_addr);
+       return rv;
 }
 
 /*
@@ -2003,6 +2030,32 @@ static unsigned int implicit_sector(unsigned char command,
        return rv;
 }
 
+static void mtip_set_timeout(struct host_to_dev_fis *fis, unsigned int *timeout)
+{
+       switch (fis->command) {
+       case ATA_CMD_DOWNLOAD_MICRO:
+               *timeout = 120000; /* 2 minutes */
+               break;
+       case ATA_CMD_SEC_ERASE_UNIT:
+       case 0xFC:
+               *timeout = 240000; /* 4 minutes */
+               break;
+       case ATA_CMD_STANDBYNOW1:
+               *timeout = 10000;  /* 10 seconds */
+               break;
+       case 0xF7:
+       case 0xFA:
+               *timeout = 60000;  /* 60 seconds */
+               break;
+       case ATA_CMD_SMART:
+               *timeout = 15000;  /* 15 seconds */
+               break;
+       default:
+               *timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
+               break;
+       }
+}
+
 /*
  * Executes a taskfile
  * See ide_taskfile_ioctl() for derivation
@@ -2023,7 +2076,7 @@ static int exec_drive_taskfile(struct driver_data *dd,
        unsigned int taskin = 0;
        unsigned int taskout = 0;
        u8 nsect = 0;
-       unsigned int timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
+       unsigned int timeout;
        unsigned int force_single_sector;
        unsigned int transfer_size;
        unsigned long task_file_data;
@@ -2153,32 +2206,7 @@ static int exec_drive_taskfile(struct driver_data *dd,
                fis.lba_hi,
                fis.device);
 
-       switch (fis.command) {
-       case ATA_CMD_DOWNLOAD_MICRO:
-               /* Change timeout for Download Microcode to 2 minutes */
-               timeout = 120000;
-               break;
-       case ATA_CMD_SEC_ERASE_UNIT:
-               /* Change timeout for Security Erase Unit to 4 minutes.*/
-               timeout = 240000;
-               break;
-       case ATA_CMD_STANDBYNOW1:
-               /* Change timeout for standby immediate to 10 seconds.*/
-               timeout = 10000;
-               break;
-       case 0xF7:
-       case 0xFA:
-               /* Change timeout for vendor unique command to 10 secs */
-               timeout = 10000;
-               break;
-       case ATA_CMD_SMART:
-               /* Change timeout for vendor unique command to 15 secs */
-               timeout = 15000;
-               break;
-       default:
-               timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
-               break;
-       }
+       mtip_set_timeout(&fis, &timeout);
 
        /* Determine the correct transfer size.*/
        if (force_single_sector)
@@ -2295,13 +2323,12 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
 {
        switch (cmd) {
        case HDIO_GET_IDENTITY:
-               if (mtip_get_identify(dd->port, (void __user *) arg) < 0) {
-                       dev_warn(&dd->pdev->dev,
-                               "Unable to read identity\n");
-                       return -EIO;
-               }
-
+       {
+               if (copy_to_user((void __user *)arg, dd->port->identify,
+                                               sizeof(u16) * ATA_ID_WORDS))
+                       return -EFAULT;
                break;
+       }
        case HDIO_DRIVE_CMD:
        {
                u8 drive_command[4];
@@ -2537,40 +2564,58 @@ static ssize_t mtip_hw_show_registers(struct device *dev,
        int size = 0;
        int n;
 
-       size += sprintf(&buf[size], "S ACTive:\n");
+       size += sprintf(&buf[size], "Hardware\n--------\n");
+       size += sprintf(&buf[size], "S ACTive      : [ 0x");
 
-       for (n = 0; n < dd->slot_groups; n++)
-               size += sprintf(&buf[size], "0x%08x\n",
+       for (n = dd->slot_groups-1; n >= 0; n--)
+               size += sprintf(&buf[size], "%08X ",
                                         readl(dd->port->s_active[n]));
 
-       size += sprintf(&buf[size], "Command Issue:\n");
+       size += sprintf(&buf[size], "]\n");
+       size += sprintf(&buf[size], "Command Issue : [ 0x");
 
-       for (n = 0; n < dd->slot_groups; n++)
-               size += sprintf(&buf[size], "0x%08x\n",
+       for (n = dd->slot_groups-1; n >= 0; n--)
+               size += sprintf(&buf[size], "%08X ",
                                        readl(dd->port->cmd_issue[n]));
 
-       size += sprintf(&buf[size], "Allocated:\n");
+       size += sprintf(&buf[size], "]\n");
+       size += sprintf(&buf[size], "Completed     : [ 0x");
+
+       for (n = dd->slot_groups-1; n >= 0; n--)
+               size += sprintf(&buf[size], "%08X ",
+                               readl(dd->port->completed[n]));
+
+       size += sprintf(&buf[size], "]\n");
+       size += sprintf(&buf[size], "PORT IRQ STAT : [ 0x%08X ]\n",
+                               readl(dd->port->mmio + PORT_IRQ_STAT));
+       size += sprintf(&buf[size], "HOST IRQ STAT : [ 0x%08X ]\n",
+                               readl(dd->mmio + HOST_IRQ_STAT));
+       size += sprintf(&buf[size], "\n");
 
-       for (n = 0; n < dd->slot_groups; n++) {
+       size += sprintf(&buf[size], "Local\n-----\n");
+       size += sprintf(&buf[size], "Allocated    : [ 0x");
+
+       for (n = dd->slot_groups-1; n >= 0; n--) {
                if (sizeof(long) > sizeof(u32))
                        group_allocated =
                                dd->port->allocated[n/2] >> (32*(n&1));
                else
                        group_allocated = dd->port->allocated[n];
-               size += sprintf(&buf[size], "0x%08x\n",
-                                group_allocated);
+               size += sprintf(&buf[size], "%08X ", group_allocated);
        }
+       size += sprintf(&buf[size], "]\n");
 
-       size += sprintf(&buf[size], "Completed:\n");
-
-       for (n = 0; n < dd->slot_groups; n++)
-               size += sprintf(&buf[size], "0x%08x\n",
-                               readl(dd->port->completed[n]));
+       size += sprintf(&buf[size], "Commands in Q: [ 0x");
 
-       size += sprintf(&buf[size], "PORT IRQ STAT : 0x%08x\n",
-                               readl(dd->port->mmio + PORT_IRQ_STAT));
-       size += sprintf(&buf[size], "HOST IRQ STAT : 0x%08x\n",
-                               readl(dd->mmio + HOST_IRQ_STAT));
+       for (n = dd->slot_groups-1; n >= 0; n--) {
+               if (sizeof(long) > sizeof(u32))
+                       group_allocated =
+                               dd->port->cmds_to_issue[n/2] >> (32*(n&1));
+               else
+                       group_allocated = dd->port->cmds_to_issue[n];
+               size += sprintf(&buf[size], "%08X ", group_allocated);
+       }
+       size += sprintf(&buf[size], "]\n");
 
        return size;
 }
@@ -2592,8 +2637,24 @@ static ssize_t mtip_hw_show_status(struct device *dev,
        return size;
 }
 
+static ssize_t mtip_hw_show_flags(struct device *dev,
+                               struct device_attribute *attr,
+                               char *buf)
+{
+       struct driver_data *dd = dev_to_disk(dev)->private_data;
+       int size = 0;
+
+       size += sprintf(&buf[size], "Flag in port struct : [ %08lX ]\n",
+                                                       dd->port->flags);
+       size += sprintf(&buf[size], "Flag in dd struct   : [ %08lX ]\n",
+                                                       dd->dd_flag);
+
+       return size;
+}
+
 static DEVICE_ATTR(registers, S_IRUGO, mtip_hw_show_registers, NULL);
 static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
+static DEVICE_ATTR(flags, S_IRUGO, mtip_hw_show_flags, NULL);
 
 /*
  * Create the sysfs related attributes.
@@ -2616,6 +2677,9 @@ static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
        if (sysfs_create_file(kobj, &dev_attr_status.attr))
                dev_warn(&dd->pdev->dev,
                        "Error creating 'status' sysfs entry\n");
+       if (sysfs_create_file(kobj, &dev_attr_flags.attr))
+               dev_warn(&dd->pdev->dev,
+                       "Error creating 'flags' sysfs entry\n");
        return 0;
 }
 
@@ -2636,6 +2700,7 @@ static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
 
        sysfs_remove_file(kobj, &dev_attr_registers.attr);
        sysfs_remove_file(kobj, &dev_attr_status.attr);
+       sysfs_remove_file(kobj, &dev_attr_flags.attr);
 
        return 0;
 }
@@ -3634,7 +3699,10 @@ skip_create_disk:
        set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags);
        blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
        blk_queue_physical_block_size(dd->queue, 4096);
+       blk_queue_max_hw_sectors(dd->queue, 0xffff);
+       blk_queue_max_segment_size(dd->queue, 0x400000);
        blk_queue_io_min(dd->queue, 4096);
+
        /*
         * write back cache is not supported in the device. FUA depends on
         * write back cache support, hence setting flush support to zero.
index 4ef58336310a126af9b4d0847dc4099e4af23610..b2c88da26b2a7b7f94ff77b6f1a1d2047f6b45f3 100644 (file)
 
 #define __force_bit2int (unsigned int __force)
 
-/* below are bit numbers in 'flags' defined in mtip_port */
-#define MTIP_PF_IC_ACTIVE_BIT          0 /* pio/ioctl */
-#define MTIP_PF_EH_ACTIVE_BIT          1 /* error handling */
-#define MTIP_PF_SE_ACTIVE_BIT          2 /* secure erase */
-#define MTIP_PF_DM_ACTIVE_BIT          3 /* download microcde */
-#define MTIP_PF_PAUSE_IO       ((1 << MTIP_PF_IC_ACTIVE_BIT) | \
+enum {
+       /* below are bit numbers in 'flags' defined in mtip_port */
+       MTIP_PF_IC_ACTIVE_BIT       = 0, /* pio/ioctl */
+       MTIP_PF_EH_ACTIVE_BIT       = 1, /* error handling */
+       MTIP_PF_SE_ACTIVE_BIT       = 2, /* secure erase */
+       MTIP_PF_DM_ACTIVE_BIT       = 3, /* download microcde */
+       MTIP_PF_PAUSE_IO      = ((1 << MTIP_PF_IC_ACTIVE_BIT) | \
                                (1 << MTIP_PF_EH_ACTIVE_BIT) | \
                                (1 << MTIP_PF_SE_ACTIVE_BIT) | \
-                               (1 << MTIP_PF_DM_ACTIVE_BIT))
-
-#define MTIP_PF_SVC_THD_ACTIVE_BIT     4
-#define MTIP_PF_ISSUE_CMDS_BIT         5
-#define MTIP_PF_REBUILD_BIT            6
-#define MTIP_PF_SVC_THD_STOP_BIT       8
-
-/* below are bit numbers in 'dd_flag' defined in driver_data */
-#define MTIP_DDF_REMOVE_PENDING_BIT    1
-#define MTIP_DDF_OVER_TEMP_BIT         2
-#define MTIP_DDF_WRITE_PROTECT_BIT     3
-#define MTIP_DDF_STOP_IO       ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \
+                               (1 << MTIP_PF_DM_ACTIVE_BIT)),
+
+       MTIP_PF_SVC_THD_ACTIVE_BIT  = 4,
+       MTIP_PF_ISSUE_CMDS_BIT      = 5,
+       MTIP_PF_REBUILD_BIT         = 6,
+       MTIP_PF_SVC_THD_STOP_BIT    = 8,
+
+       /* below are bit numbers in 'dd_flag' defined in driver_data */
+       MTIP_DDF_REMOVE_PENDING_BIT = 1,
+       MTIP_DDF_OVER_TEMP_BIT      = 2,
+       MTIP_DDF_WRITE_PROTECT_BIT  = 3,
+       MTIP_DDF_STOP_IO      = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \
                                (1 << MTIP_DDF_OVER_TEMP_BIT) | \
-                               (1 << MTIP_DDF_WRITE_PROTECT_BIT))
+                               (1 << MTIP_DDF_WRITE_PROTECT_BIT)),
 
-#define MTIP_DDF_CLEANUP_BIT           5
-#define MTIP_DDF_RESUME_BIT            6
-#define MTIP_DDF_INIT_DONE_BIT         7
-#define MTIP_DDF_REBUILD_FAILED_BIT    8
+       MTIP_DDF_CLEANUP_BIT        = 5,
+       MTIP_DDF_RESUME_BIT         = 6,
+       MTIP_DDF_INIT_DONE_BIT      = 7,
+       MTIP_DDF_REBUILD_FAILED_BIT = 8,
+};
 
 __packed struct smart_attr{
        u8 attr_id;
index 013c7a549fb6dbc3d5d1afe2e01730e951846507..65665c9c42c62ba5805324df96292e560f16b04a 100644 (file)
@@ -141,7 +141,7 @@ struct rbd_request {
 struct rbd_snap {
        struct  device          dev;
        const char              *name;
-       size_t                  size;
+       u64                     size;
        struct list_head        node;
        u64                     id;
 };
@@ -175,8 +175,7 @@ struct rbd_device {
        /* protects updating the header */
        struct rw_semaphore     header_rwsem;
        char                    snap_name[RBD_MAX_SNAP_NAME_LEN];
-       u32 cur_snap;   /* index+1 of current snapshot within snap context
-                          0 - for the head */
+       u64                     snap_id;        /* current snapshot id */
        int read_only;
 
        struct list_head        node;
@@ -241,7 +240,7 @@ static void rbd_put_dev(struct rbd_device *rbd_dev)
        put_device(&rbd_dev->dev);
 }
 
-static int __rbd_update_snaps(struct rbd_device *rbd_dev);
+static int __rbd_refresh_header(struct rbd_device *rbd_dev);
 
 static int rbd_open(struct block_device *bdev, fmode_t mode)
 {
@@ -450,7 +449,9 @@ static void rbd_client_release(struct kref *kref)
        struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
 
        dout("rbd_release_client %p\n", rbdc);
+       spin_lock(&rbd_client_list_lock);
        list_del(&rbdc->node);
+       spin_unlock(&rbd_client_list_lock);
 
        ceph_destroy_client(rbdc->client);
        kfree(rbdc->rbd_opts);
@@ -463,9 +464,7 @@ static void rbd_client_release(struct kref *kref)
  */
 static void rbd_put_client(struct rbd_device *rbd_dev)
 {
-       spin_lock(&rbd_client_list_lock);
        kref_put(&rbd_dev->rbd_client->kref, rbd_client_release);
-       spin_unlock(&rbd_client_list_lock);
        rbd_dev->rbd_client = NULL;
 }
 
@@ -487,16 +486,18 @@ static void rbd_coll_release(struct kref *kref)
  */
 static int rbd_header_from_disk(struct rbd_image_header *header,
                                 struct rbd_image_header_ondisk *ondisk,
-                                int allocated_snaps,
+                                u32 allocated_snaps,
                                 gfp_t gfp_flags)
 {
-       int i;
-       u32 snap_count;
+       u32 i, snap_count;
 
        if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT)))
                return -ENXIO;
 
        snap_count = le32_to_cpu(ondisk->snap_count);
+       if (snap_count > (UINT_MAX - sizeof(struct ceph_snap_context))
+                        / sizeof (*ondisk))
+               return -EINVAL;
        header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
                                snap_count * sizeof (*ondisk),
                                gfp_flags);
@@ -506,11 +507,11 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
        header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
        if (snap_count) {
                header->snap_names = kmalloc(header->snap_names_len,
-                                            GFP_KERNEL);
+                                            gfp_flags);
                if (!header->snap_names)
                        goto err_snapc;
                header->snap_sizes = kmalloc(snap_count * sizeof(u64),
-                                            GFP_KERNEL);
+                                            gfp_flags);
                if (!header->snap_sizes)
                        goto err_names;
        } else {
@@ -552,21 +553,6 @@ err_snapc:
        return -ENOMEM;
 }
 
-static int snap_index(struct rbd_image_header *header, int snap_num)
-{
-       return header->total_snaps - snap_num;
-}
-
-static u64 cur_snap_id(struct rbd_device *rbd_dev)
-{
-       struct rbd_image_header *header = &rbd_dev->header;
-
-       if (!rbd_dev->cur_snap)
-               return 0;
-
-       return header->snapc->snaps[snap_index(header, rbd_dev->cur_snap)];
-}
-
 static int snap_by_name(struct rbd_image_header *header, const char *snap_name,
                        u64 *seq, u64 *size)
 {
@@ -605,7 +591,7 @@ static int rbd_header_set_snap(struct rbd_device *dev, u64 *size)
                        snapc->seq = header->snap_seq;
                else
                        snapc->seq = 0;
-               dev->cur_snap = 0;
+               dev->snap_id = CEPH_NOSNAP;
                dev->read_only = 0;
                if (size)
                        *size = header->image_size;
@@ -613,8 +599,7 @@ static int rbd_header_set_snap(struct rbd_device *dev, u64 *size)
                ret = snap_by_name(header, dev->snap_name, &snapc->seq, size);
                if (ret < 0)
                        goto done;
-
-               dev->cur_snap = header->total_snaps - ret;
+               dev->snap_id = snapc->seq;
                dev->read_only = 1;
        }
 
@@ -935,7 +920,6 @@ static int rbd_do_request(struct request *rq,
        layout->fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
        layout->fl_stripe_count = cpu_to_le32(1);
        layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
-       layout->fl_pg_preferred = cpu_to_le32(-1);
        layout->fl_pg_pool = cpu_to_le32(dev->poolid);
        ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno,
                                req, ops);
@@ -1168,7 +1152,7 @@ static int rbd_req_read(struct request *rq,
                         int coll_index)
 {
        return rbd_do_op(rq, rbd_dev, NULL,
-                        (snapid ? snapid : CEPH_NOSNAP),
+                        snapid,
                         CEPH_OSD_OP_READ,
                         CEPH_OSD_FLAG_READ,
                         2,
@@ -1187,7 +1171,7 @@ static int rbd_req_sync_read(struct rbd_device *dev,
                          u64 *ver)
 {
        return rbd_req_sync_op(dev, NULL,
-                              (snapid ? snapid : CEPH_NOSNAP),
+                              snapid,
                               CEPH_OSD_OP_READ,
                               CEPH_OSD_FLAG_READ,
                               NULL,
@@ -1238,7 +1222,7 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
        dout("rbd_watch_cb %s notify_id=%lld opcode=%d\n", dev->obj_md_name,
                notify_id, (int)opcode);
        mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-       rc = __rbd_update_snaps(dev);
+       rc = __rbd_refresh_header(dev);
        mutex_unlock(&ctl_mutex);
        if (rc)
                pr_warning(RBD_DRV_NAME "%d got notification but failed to "
@@ -1521,7 +1505,7 @@ static void rbd_rq_fn(struct request_queue *q)
                                              coll, cur_seg);
                        else
                                rbd_req_read(rq, rbd_dev,
-                                            cur_snap_id(rbd_dev),
+                                            rbd_dev->snap_id,
                                             ofs,
                                             op_size, bio,
                                             coll, cur_seg);
@@ -1592,7 +1576,7 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
 {
        ssize_t rc;
        struct rbd_image_header_ondisk *dh;
-       int snap_count = 0;
+       u32 snap_count = 0;
        u64 ver;
        size_t len;
 
@@ -1656,7 +1640,7 @@ static int rbd_header_add_snap(struct rbd_device *dev,
        struct ceph_mon_client *monc;
 
        /* we should create a snapshot only if we're pointing at the head */
-       if (dev->cur_snap)
+       if (dev->snap_id != CEPH_NOSNAP)
                return -EINVAL;
 
        monc = &dev->rbd_client->client->monc;
@@ -1683,7 +1667,9 @@ static int rbd_header_add_snap(struct rbd_device *dev,
        if (ret < 0)
                return ret;
 
-       dev->header.snapc->seq =  new_snapid;
+       down_write(&dev->header_rwsem);
+       dev->header.snapc->seq = new_snapid;
+       up_write(&dev->header_rwsem);
 
        return 0;
 bad:
@@ -1703,7 +1689,7 @@ static void __rbd_remove_all_snaps(struct rbd_device *rbd_dev)
 /*
  * only read the first part of the ondisk header, without the snaps info
  */
-static int __rbd_update_snaps(struct rbd_device *rbd_dev)
+static int __rbd_refresh_header(struct rbd_device *rbd_dev)
 {
        int ret;
        struct rbd_image_header h;
@@ -1890,7 +1876,7 @@ static ssize_t rbd_image_refresh(struct device *dev,
 
        mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
 
-       rc = __rbd_update_snaps(rbd_dev);
+       rc = __rbd_refresh_header(rbd_dev);
        if (rc < 0)
                ret = rc;
 
@@ -1949,7 +1935,7 @@ static ssize_t rbd_snap_size_show(struct device *dev,
 {
        struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
 
-       return sprintf(buf, "%zd\n", snap->size);
+       return sprintf(buf, "%llu\n", (unsigned long long)snap->size);
 }
 
 static ssize_t rbd_snap_id_show(struct device *dev,
@@ -1958,7 +1944,7 @@ static ssize_t rbd_snap_id_show(struct device *dev,
 {
        struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
 
-       return sprintf(buf, "%llu\n", (unsigned long long) snap->id);
+       return sprintf(buf, "%llu\n", (unsigned long long)snap->id);
 }
 
 static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL);
@@ -2173,7 +2159,7 @@ static int rbd_init_watch_dev(struct rbd_device *rbd_dev)
                                         rbd_dev->header.obj_version);
                if (ret == -ERANGE) {
                        mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-                       rc = __rbd_update_snaps(rbd_dev);
+                       rc = __rbd_refresh_header(rbd_dev);
                        mutex_unlock(&ctl_mutex);
                        if (rc < 0)
                                return rc;
@@ -2558,7 +2544,7 @@ static ssize_t rbd_snap_add(struct device *dev,
        if (ret < 0)
                goto err_unlock;
 
-       ret = __rbd_update_snaps(rbd_dev);
+       ret = __rbd_refresh_header(rbd_dev);
        if (ret < 0)
                goto err_unlock;
 
index 4e86393a09cf5c880917fd81d3e67e507a83c0f6..60eed4bdd2e4528ae3c3b8871cd65f85d3c34952 100644 (file)
@@ -526,6 +526,14 @@ static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
        return 0;
 }
 
+static char *encode_disk_name(char *ptr, unsigned int n)
+{
+       if (n >= 26)
+               ptr = encode_disk_name(ptr, n / 26 - 1);
+       *ptr = 'a' + n % 26;
+       return ptr + 1;
+}
+
 static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
                               struct blkfront_info *info,
                               u16 vdisk_info, u16 sector_size)
@@ -536,6 +544,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
        unsigned int offset;
        int minor;
        int nr_parts;
+       char *ptr;
 
        BUG_ON(info->gd != NULL);
        BUG_ON(info->rq != NULL);
@@ -560,7 +569,11 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
                                        "emulated IDE disks,\n\t choose an xvd device name"
                                        "from xvde on\n", info->vdevice);
        }
-       err = -ENODEV;
+       if (minor >> MINORBITS) {
+               pr_warn("blkfront: %#x's minor (%#x) out of range; ignoring\n",
+                       info->vdevice, minor);
+               return -ENODEV;
+       }
 
        if ((minor % nr_parts) == 0)
                nr_minors = nr_parts;
@@ -574,23 +587,14 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
        if (gd == NULL)
                goto release;
 
-       if (nr_minors > 1) {
-               if (offset < 26)
-                       sprintf(gd->disk_name, "%s%c", DEV_NAME, 'a' + offset);
-               else
-                       sprintf(gd->disk_name, "%s%c%c", DEV_NAME,
-                               'a' + ((offset / 26)-1), 'a' + (offset % 26));
-       } else {
-               if (offset < 26)
-                       sprintf(gd->disk_name, "%s%c%d", DEV_NAME,
-                               'a' + offset,
-                               minor & (nr_parts - 1));
-               else
-                       sprintf(gd->disk_name, "%s%c%c%d", DEV_NAME,
-                               'a' + ((offset / 26) - 1),
-                               'a' + (offset % 26),
-                               minor & (nr_parts - 1));
-       }
+       strcpy(gd->disk_name, DEV_NAME);
+       ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
+       BUG_ON(ptr >= gd->disk_name + DISK_NAME_LEN);
+       if (nr_minors > 1)
+               *ptr = 0;
+       else
+               snprintf(ptr, gd->disk_name + DISK_NAME_LEN - ptr,
+                        "%d", minor & (nr_parts - 1));
 
        gd->major = XENVBD_MAJOR;
        gd->first_minor = minor;
@@ -1496,7 +1500,9 @@ module_init(xlblk_init);
 
 static void __exit xlblk_exit(void)
 {
-       return xenbus_unregister_driver(&blkfront_driver);
+       xenbus_unregister_driver(&blkfront_driver);
+       unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
+       kfree(minors);
 }
 module_exit(xlblk_exit);
 
index 165e1febae53676458e3d24256f08b547d14d0d8..4864407e3fc424814d5c5f46450b45c143c375f5 100644 (file)
@@ -12,6 +12,7 @@ config HAVE_MACH_CLKDEV
 config COMMON_CLK
        bool
        select HAVE_CLK_PREPARE
+       select CLKDEV_LOOKUP
        ---help---
          The common clock framework is a single definition of struct
          clk, useful across many platforms, as well as an
@@ -22,17 +23,6 @@ config COMMON_CLK
 menu "Common Clock Framework"
        depends on COMMON_CLK
 
-config COMMON_CLK_DISABLE_UNUSED
-       bool "Disabled unused clocks at boot"
-       depends on COMMON_CLK
-       ---help---
-         Traverses the entire clock tree and disables any clocks that are
-         enabled in hardware but have not been enabled by any device drivers.
-         This saves power and keeps the software model of the clock in line
-         with reality.
-
-         If in doubt, say "N".
-
 config COMMON_CLK_DEBUG
        bool "DebugFS representation of clock tree"
        depends on COMMON_CLK
index 1f736bc11c4bb26f887c4480d9484c64274a0d90..b9a5158a30b1272452dd99e543c18781d797d404 100644 (file)
@@ -1,4 +1,7 @@
 
 obj-$(CONFIG_CLKDEV_LOOKUP)    += clkdev.o
 obj-$(CONFIG_COMMON_CLK)       += clk.o clk-fixed-rate.o clk-gate.o \
-                                  clk-mux.o clk-divider.o
+                                  clk-mux.o clk-divider.o clk-fixed-factor.o
+# SoCs specific
+obj-$(CONFIG_ARCH_MXS)         += mxs/
+obj-$(CONFIG_PLAT_SPEAR)       += spear/
index d5ac6a75ea57d832c068686bd32e5705f2856c60..8ea11b444528f3191b417ee651551a6abce64721 100644 (file)
@@ -45,7 +45,6 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
 
        return parent_rate / div;
 }
-EXPORT_SYMBOL_GPL(clk_divider_recalc_rate);
 
 /*
  * The reverse of DIV_ROUND_UP: The maximum number which
@@ -68,8 +67,8 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
        if (divider->flags & CLK_DIVIDER_ONE_BASED)
                maxdiv--;
 
-       if (!best_parent_rate) {
-               parent_rate = __clk_get_rate(__clk_get_parent(hw->clk));
+       if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
+               parent_rate = *best_parent_rate;
                bestdiv = DIV_ROUND_UP(parent_rate, rate);
                bestdiv = bestdiv == 0 ? 1 : bestdiv;
                bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
@@ -109,24 +108,18 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
        int div;
        div = clk_divider_bestdiv(hw, rate, prate);
 
-       if (prate)
-               return *prate / div;
-       else {
-               unsigned long r;
-               r = __clk_get_rate(__clk_get_parent(hw->clk));
-               return r / div;
-       }
+       return *prate / div;
 }
-EXPORT_SYMBOL_GPL(clk_divider_round_rate);
 
-static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate)
+static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+                               unsigned long parent_rate)
 {
        struct clk_divider *divider = to_clk_divider(hw);
        unsigned int div;
        unsigned long flags = 0;
        u32 val;
 
-       div = __clk_get_rate(__clk_get_parent(hw->clk)) / rate;
+       div = parent_rate / rate;
 
        if (!(divider->flags & CLK_DIVIDER_ONE_BASED))
                div--;
@@ -147,15 +140,26 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(clk_divider_set_rate);
 
-struct clk_ops clk_divider_ops = {
+const struct clk_ops clk_divider_ops = {
        .recalc_rate = clk_divider_recalc_rate,
        .round_rate = clk_divider_round_rate,
        .set_rate = clk_divider_set_rate,
 };
 EXPORT_SYMBOL_GPL(clk_divider_ops);
 
+/**
+ * clk_register_divider - register a divider clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
 struct clk *clk_register_divider(struct device *dev, const char *name,
                const char *parent_name, unsigned long flags,
                void __iomem *reg, u8 shift, u8 width,
@@ -163,38 +167,34 @@ struct clk *clk_register_divider(struct device *dev, const char *name,
 {
        struct clk_divider *div;
        struct clk *clk;
+       struct clk_init_data init;
 
+       /* allocate the divider */
        div = kzalloc(sizeof(struct clk_divider), GFP_KERNEL);
-
        if (!div) {
                pr_err("%s: could not allocate divider clk\n", __func__);
-               return NULL;
+               return ERR_PTR(-ENOMEM);
        }
 
+       init.name = name;
+       init.ops = &clk_divider_ops;
+       init.flags = flags;
+       init.parent_names = (parent_name ? &parent_name: NULL);
+       init.num_parents = (parent_name ? 1 : 0);
+
        /* struct clk_divider assignments */
        div->reg = reg;
        div->shift = shift;
        div->width = width;
        div->flags = clk_divider_flags;
        div->lock = lock;
+       div->hw.init = &init;
 
-       if (parent_name) {
-               div->parent[0] = kstrdup(parent_name, GFP_KERNEL);
-               if (!div->parent[0])
-                       goto out;
-       }
-
-       clk = clk_register(dev, name,
-                       &clk_divider_ops, &div->hw,
-                       div->parent,
-                       (parent_name ? 1 : 0),
-                       flags);
-       if (clk)
-               return clk;
+       /* register the clock */
+       clk = clk_register(dev, &div->hw);
 
-out:
-       kfree(div->parent[0]);
-       kfree(div);
+       if (IS_ERR(clk))
+               kfree(div);
 
-       return NULL;
+       return clk;
 }
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
new file mode 100644 (file)
index 0000000..c8c003e
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Standard functionality for the common clock API.
+ */
+#include <linux/module.h>
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+/*
+ * DOC: basic fixed multiplier and divider clock that cannot gate
+ *
+ * Traits of this clock:
+ * prepare - clk_prepare only ensures that parents are prepared
+ * enable - clk_enable only ensures that parents are enabled
+ * rate - rate is fixed.  clk->rate = parent->rate / div * mult
+ * parent - fixed parent.  No clk_set_parent support
+ */
+
+#define to_clk_fixed_factor(_hw) container_of(_hw, struct clk_fixed_factor, hw)
+
+static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
+               unsigned long parent_rate)
+{
+       struct clk_fixed_factor *fix = to_clk_fixed_factor(hw);
+
+       return parent_rate * fix->mult / fix->div;
+}
+
+static long clk_factor_round_rate(struct clk_hw *hw, unsigned long rate,
+                               unsigned long *prate)
+{
+       struct clk_fixed_factor *fix = to_clk_fixed_factor(hw);
+
+       if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
+               unsigned long best_parent;
+
+               best_parent = (rate / fix->mult) * fix->div;
+               *prate = __clk_round_rate(__clk_get_parent(hw->clk),
+                               best_parent);
+       }
+
+       return (*prate / fix->div) * fix->mult;
+}
+
+static int clk_factor_set_rate(struct clk_hw *hw, unsigned long rate,
+                               unsigned long parent_rate)
+{
+       return 0;
+}
+
+struct clk_ops clk_fixed_factor_ops = {
+       .round_rate = clk_factor_round_rate,
+       .set_rate = clk_factor_set_rate,
+       .recalc_rate = clk_factor_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_fixed_factor_ops);
+
+struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
+               const char *parent_name, unsigned long flags,
+               unsigned int mult, unsigned int div)
+{
+       struct clk_fixed_factor *fix;
+       struct clk_init_data init;
+       struct clk *clk;
+
+       fix = kmalloc(sizeof(*fix), GFP_KERNEL);
+       if (!fix) {
+               pr_err("%s: could not allocate fixed factor clk\n", __func__);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       /* struct clk_fixed_factor assignments */
+       fix->mult = mult;
+       fix->div = div;
+       fix->hw.init = &init;
+
+       init.name = name;
+       init.ops = &clk_fixed_factor_ops;
+       init.flags = flags;
+       init.parent_names = &parent_name;
+       init.num_parents = 1;
+
+       clk = clk_register(dev, &fix->hw);
+
+       if (IS_ERR(clk))
+               kfree(fix);
+
+       return clk;
+}
index 90c79fb5d1bd61525090f684471edda1461a9685..cbd24622978660d3cd68899e91add03e3f80852d 100644 (file)
@@ -32,51 +32,50 @@ static unsigned long clk_fixed_rate_recalc_rate(struct clk_hw *hw,
 {
        return to_clk_fixed_rate(hw)->fixed_rate;
 }
-EXPORT_SYMBOL_GPL(clk_fixed_rate_recalc_rate);
 
-struct clk_ops clk_fixed_rate_ops = {
+const struct clk_ops clk_fixed_rate_ops = {
        .recalc_rate = clk_fixed_rate_recalc_rate,
 };
 EXPORT_SYMBOL_GPL(clk_fixed_rate_ops);
 
+/**
+ * clk_register_fixed_rate - register fixed-rate clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
 struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
                const char *parent_name, unsigned long flags,
                unsigned long fixed_rate)
 {
        struct clk_fixed_rate *fixed;
-       char **parent_names = NULL;
-       u8 len;
+       struct clk *clk;
+       struct clk_init_data init;
 
+       /* allocate fixed-rate clock */
        fixed = kzalloc(sizeof(struct clk_fixed_rate), GFP_KERNEL);
-
        if (!fixed) {
                pr_err("%s: could not allocate fixed clk\n", __func__);
                return ERR_PTR(-ENOMEM);
        }
 
+       init.name = name;
+       init.ops = &clk_fixed_rate_ops;
+       init.flags = flags;
+       init.parent_names = (parent_name ? &parent_name: NULL);
+       init.num_parents = (parent_name ? 1 : 0);
+
        /* struct clk_fixed_rate assignments */
        fixed->fixed_rate = fixed_rate;
+       fixed->hw.init = &init;
 
-       if (parent_name) {
-               parent_names = kmalloc(sizeof(char *), GFP_KERNEL);
-
-               if (! parent_names)
-                       goto out;
+       /* register the clock */
+       clk = clk_register(dev, &fixed->hw);
 
-               len = sizeof(char) * strlen(parent_name);
-
-               parent_names[0] = kmalloc(len, GFP_KERNEL);
-
-               if (!parent_names[0])
-                       goto out;
-
-               strncpy(parent_names[0], parent_name, len);
-       }
+       if (IS_ERR(clk))
+               kfree(fixed);
 
-out:
-       return clk_register(dev, name,
-                       &clk_fixed_rate_ops, &fixed->hw,
-                       parent_names,
-                       (parent_name ? 1 : 0),
-                       flags);
+       return clk;
 }
index b5902e2ef2fd72f73ce44166222d6d3ebb76a5c6..578465e04be6b900c37009a7bfe3feb3c7506728 100644 (file)
 
 #define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
 
-static void clk_gate_set_bit(struct clk_gate *gate)
+/*
+ * It works on following logic:
+ *
+ * For enabling clock, enable = 1
+ *     set2dis = 1     -> clear bit    -> set = 0
+ *     set2dis = 0     -> set bit      -> set = 1
+ *
+ * For disabling clock, enable = 0
+ *     set2dis = 1     -> set bit      -> set = 1
+ *     set2dis = 0     -> clear bit    -> set = 0
+ *
+ * So, result is always: enable xor set2dis.
+ */
+static void clk_gate_endisable(struct clk_hw *hw, int enable)
 {
-       u32 reg;
+       struct clk_gate *gate = to_clk_gate(hw);
+       int set = gate->flags & CLK_GATE_SET_TO_DISABLE ? 1 : 0;
        unsigned long flags = 0;
+       u32 reg;
+
+       set ^= enable;
 
        if (gate->lock)
                spin_lock_irqsave(gate->lock, flags);
 
        reg = readl(gate->reg);
-       reg |= BIT(gate->bit_idx);
-       writel(reg, gate->reg);
-
-       if (gate->lock)
-               spin_unlock_irqrestore(gate->lock, flags);
-}
-
-static void clk_gate_clear_bit(struct clk_gate *gate)
-{
-       u32 reg;
-       unsigned long flags = 0;
 
-       if (gate->lock)
-               spin_lock_irqsave(gate->lock, flags);
+       if (set)
+               reg |= BIT(gate->bit_idx);
+       else
+               reg &= ~BIT(gate->bit_idx);
 
-       reg = readl(gate->reg);
-       reg &= ~BIT(gate->bit_idx);
        writel(reg, gate->reg);
 
        if (gate->lock)
@@ -62,27 +68,15 @@ static void clk_gate_clear_bit(struct clk_gate *gate)
 
 static int clk_gate_enable(struct clk_hw *hw)
 {
-       struct clk_gate *gate = to_clk_gate(hw);
-
-       if (gate->flags & CLK_GATE_SET_TO_DISABLE)
-               clk_gate_clear_bit(gate);
-       else
-               clk_gate_set_bit(gate);
+       clk_gate_endisable(hw, 1);
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(clk_gate_enable);
 
 static void clk_gate_disable(struct clk_hw *hw)
 {
-       struct clk_gate *gate = to_clk_gate(hw);
-
-       if (gate->flags & CLK_GATE_SET_TO_DISABLE)
-               clk_gate_set_bit(gate);
-       else
-               clk_gate_clear_bit(gate);
+       clk_gate_endisable(hw, 0);
 }
-EXPORT_SYMBOL_GPL(clk_gate_disable);
 
 static int clk_gate_is_enabled(struct clk_hw *hw)
 {
@@ -99,15 +93,25 @@ static int clk_gate_is_enabled(struct clk_hw *hw)
 
        return reg ? 1 : 0;
 }
-EXPORT_SYMBOL_GPL(clk_gate_is_enabled);
 
-struct clk_ops clk_gate_ops = {
+const struct clk_ops clk_gate_ops = {
        .enable = clk_gate_enable,
        .disable = clk_gate_disable,
        .is_enabled = clk_gate_is_enabled,
 };
 EXPORT_SYMBOL_GPL(clk_gate_ops);
 
+/**
+ * clk_register_gate - register a gate clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of this clock's parent
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
 struct clk *clk_register_gate(struct device *dev, const char *name,
                const char *parent_name, unsigned long flags,
                void __iomem *reg, u8 bit_idx,
@@ -115,36 +119,32 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
 {
        struct clk_gate *gate;
        struct clk *clk;
+       struct clk_init_data init;
 
+       /* allocate the gate */
        gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
-
        if (!gate) {
                pr_err("%s: could not allocate gated clk\n", __func__);
-               return NULL;
+               return ERR_PTR(-ENOMEM);
        }
 
+       init.name = name;
+       init.ops = &clk_gate_ops;
+       init.flags = flags;
+       init.parent_names = (parent_name ? &parent_name: NULL);
+       init.num_parents = (parent_name ? 1 : 0);
+
        /* struct clk_gate assignments */
        gate->reg = reg;
        gate->bit_idx = bit_idx;
        gate->flags = clk_gate_flags;
        gate->lock = lock;
+       gate->hw.init = &init;
 
-       if (parent_name) {
-               gate->parent[0] = kstrdup(parent_name, GFP_KERNEL);
-               if (!gate->parent[0])
-                       goto out;
-       }
+       clk = clk_register(dev, &gate->hw);
+
+       if (IS_ERR(clk))
+               kfree(gate);
 
-       clk = clk_register(dev, name,
-                       &clk_gate_ops, &gate->hw,
-                       gate->parent,
-                       (parent_name ? 1 : 0),
-                       flags);
-       if (clk)
-               return clk;
-out:
-       kfree(gate->parent[0]);
-       kfree(gate);
-
-       return NULL;
+       return clk;
 }
index c71ad1f41a973fbfced6c9d6e603ec38137bf181..fd36a8ea73d9968455b7edc565975c5fa590b88f 100644 (file)
@@ -55,7 +55,6 @@ static u8 clk_mux_get_parent(struct clk_hw *hw)
 
        return val;
 }
-EXPORT_SYMBOL_GPL(clk_mux_get_parent);
 
 static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
 {
@@ -82,35 +81,47 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(clk_mux_set_parent);
 
-struct clk_ops clk_mux_ops = {
+const struct clk_ops clk_mux_ops = {
        .get_parent = clk_mux_get_parent,
        .set_parent = clk_mux_set_parent,
 };
 EXPORT_SYMBOL_GPL(clk_mux_ops);
 
 struct clk *clk_register_mux(struct device *dev, const char *name,
-               char **parent_names, u8 num_parents, unsigned long flags,
+               const char **parent_names, u8 num_parents, unsigned long flags,
                void __iomem *reg, u8 shift, u8 width,
                u8 clk_mux_flags, spinlock_t *lock)
 {
        struct clk_mux *mux;
+       struct clk *clk;
+       struct clk_init_data init;
 
-       mux = kmalloc(sizeof(struct clk_mux), GFP_KERNEL);
-
+       /* allocate the mux */
+       mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
        if (!mux) {
                pr_err("%s: could not allocate mux clk\n", __func__);
                return ERR_PTR(-ENOMEM);
        }
 
+       init.name = name;
+       init.ops = &clk_mux_ops;
+       init.flags = flags;
+       init.parent_names = parent_names;
+       init.num_parents = num_parents;
+
        /* struct clk_mux assignments */
        mux->reg = reg;
        mux->shift = shift;
        mux->width = width;
        mux->flags = clk_mux_flags;
        mux->lock = lock;
+       mux->hw.init = &init;
+
+       clk = clk_register(dev, &mux->hw);
+
+       if (IS_ERR(clk))
+               kfree(mux);
 
-       return clk_register(dev, name, &clk_mux_ops, &mux->hw,
-                       parent_names, num_parents, flags);
+       return clk;
 }
index 9cf6f59e3e19c34329d0ef991e6e44bec78d4260..687b00d67c8a77a88ad4e135605c1ed11a319efb 100644 (file)
@@ -194,9 +194,8 @@ static int __init clk_debug_init(void)
 late_initcall(clk_debug_init);
 #else
 static inline int clk_debug_register(struct clk *clk) { return 0; }
-#endif /* CONFIG_COMMON_CLK_DEBUG */
+#endif
 
-#ifdef CONFIG_COMMON_CLK_DISABLE_UNUSED
 /* caller must hold prepare_lock */
 static void clk_disable_unused_subtree(struct clk *clk)
 {
@@ -246,9 +245,6 @@ static int clk_disable_unused(void)
        return 0;
 }
 late_initcall(clk_disable_unused);
-#else
-static inline int clk_disable_unused(struct clk *clk) { return 0; }
-#endif /* CONFIG_COMMON_CLK_DISABLE_UNUSED */
 
 /***    helper functions   ***/
 
@@ -287,7 +283,7 @@ unsigned long __clk_get_rate(struct clk *clk)
        unsigned long ret;
 
        if (!clk) {
-               ret = -EINVAL;
+               ret = 0;
                goto out;
        }
 
@@ -297,7 +293,7 @@ unsigned long __clk_get_rate(struct clk *clk)
                goto out;
 
        if (!clk->parent)
-               ret = -ENODEV;
+               ret = 0;
 
 out:
        return ret;
@@ -562,7 +558,7 @@ EXPORT_SYMBOL_GPL(clk_enable);
  * @clk: the clk whose rate is being returned
  *
  * Simply returns the cached rate of the clk.  Does not query the hardware.  If
- * clk is NULL then returns -EINVAL.
+ * clk is NULL then returns 0.
  */
 unsigned long clk_get_rate(struct clk *clk)
 {
@@ -584,18 +580,22 @@ EXPORT_SYMBOL_GPL(clk_get_rate);
  */
 unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
 {
-       unsigned long unused;
+       unsigned long parent_rate = 0;
 
        if (!clk)
                return -EINVAL;
 
-       if (!clk->ops->round_rate)
-               return clk->rate;
+       if (!clk->ops->round_rate) {
+               if (clk->flags & CLK_SET_RATE_PARENT)
+                       return __clk_round_rate(clk->parent, rate);
+               else
+                       return clk->rate;
+       }
 
-       if (clk->flags & CLK_SET_RATE_PARENT)
-               return clk->ops->round_rate(clk->hw, rate, &unused);
-       else
-               return clk->ops->round_rate(clk->hw, rate, NULL);
+       if (clk->parent)
+               parent_rate = clk->parent->rate;
+
+       return clk->ops->round_rate(clk->hw, rate, &parent_rate);
 }
 
 /**
@@ -765,25 +765,41 @@ static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
 static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
 {
        struct clk *top = clk;
-       unsigned long best_parent_rate = clk->parent->rate;
+       unsigned long best_parent_rate = 0;
        unsigned long new_rate;
 
-       if (!clk->ops->round_rate && !(clk->flags & CLK_SET_RATE_PARENT)) {
-               clk->new_rate = clk->rate;
+       /* sanity */
+       if (IS_ERR_OR_NULL(clk))
+               return NULL;
+
+       /* save parent rate, if it exists */
+       if (clk->parent)
+               best_parent_rate = clk->parent->rate;
+
+       /* never propagate up to the parent */
+       if (!(clk->flags & CLK_SET_RATE_PARENT)) {
+               if (!clk->ops->round_rate) {
+                       clk->new_rate = clk->rate;
+                       return NULL;
+               }
+               new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
+               goto out;
+       }
+
+       /* need clk->parent from here on out */
+       if (!clk->parent) {
+               pr_debug("%s: %s has NULL parent\n", __func__, clk->name);
                return NULL;
        }
 
-       if (!clk->ops->round_rate && (clk->flags & CLK_SET_RATE_PARENT)) {
+       if (!clk->ops->round_rate) {
                top = clk_calc_new_rates(clk->parent, rate);
-               new_rate = clk->new_rate = clk->parent->new_rate;
+               new_rate = clk->parent->new_rate;
 
                goto out;
        }
 
-       if (clk->flags & CLK_SET_RATE_PARENT)
-               new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
-       else
-               new_rate = clk->ops->round_rate(clk->hw, rate, NULL);
+       new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
 
        if (best_parent_rate != clk->parent->rate) {
                top = clk_calc_new_rates(clk->parent, best_parent_rate);
@@ -839,7 +855,7 @@ static void clk_change_rate(struct clk *clk)
        old_rate = clk->rate;
 
        if (clk->ops->set_rate)
-               clk->ops->set_rate(clk->hw, clk->new_rate);
+               clk->ops->set_rate(clk->hw, clk->new_rate, clk->parent->rate);
 
        if (clk->ops->recalc_rate)
                clk->rate = clk->ops->recalc_rate(clk->hw,
@@ -859,38 +875,19 @@ static void clk_change_rate(struct clk *clk)
  * @clk: the clk whose rate is being changed
  * @rate: the new rate for clk
  *
- * In the simplest case clk_set_rate will only change the rate of clk.
- *
- * If clk has the CLK_SET_RATE_GATE flag set and it is enabled this call
- * will fail; only when the clk is disabled will it be able to change
- * its rate.
+ * In the simplest case clk_set_rate will only adjust the rate of clk.
  *
- * Setting the CLK_SET_RATE_PARENT flag allows clk_set_rate to
- * recursively propagate up to clk's parent; whether or not this happens
- * depends on the outcome of clk's .round_rate implementation.  If
- * *parent_rate is 0 after calling .round_rate then upstream parent
- * propagation is ignored.  If *parent_rate comes back with a new rate
- * for clk's parent then we propagate up to clk's parent and set it's
- * rate.  Upward propagation will continue until either a clk does not
- * support the CLK_SET_RATE_PARENT flag or .round_rate stops requesting
- * changes to clk's parent_rate.  If there is a failure during upstream
- * propagation then clk_set_rate will unwind and restore each clk's rate
- * that had been successfully changed.  Afterwards a rate change abort
- * notification will be propagated downstream, starting from the clk
- * that failed.
+ * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
+ * propagate up to clk's parent; whether or not this happens depends on the
+ * outcome of clk's .round_rate implementation.  If *parent_rate is unchanged
+ * after calling .round_rate then upstream parent propagation is ignored.  If
+ * *parent_rate comes back with a new rate for clk's parent then we propagate
+ * up to clk's parent and set it's rate.  Upward propagation will continue
+ * until either a clk does not support the CLK_SET_RATE_PARENT flag or
+ * .round_rate stops requesting changes to clk's parent_rate.
  *
- * At the end of all of the rate setting, clk_set_rate internally calls
- * __clk_recalc_rates and propagates the rate changes downstream,
- * starting from the highest clk whose rate was changed.  This has the
- * added benefit of propagating post-rate change notifiers.
- *
- * Note that while post-rate change and rate change abort notifications
- * are guaranteed to be sent to a clk only once per call to
- * clk_set_rate, pre-change notifications will be sent for every clk
- * whose rate is changed.  Stacking pre-change notifications is noisy
- * for the drivers subscribed to them, but this allows drivers to react
- * to intermediate clk rate changes up until the point where the final
- * rate is achieved at the end of upstream propagation.
+ * Rate changes are accomplished via tree traversal that also recalculates the
+ * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
  *
  * Returns 0 on success, -EERROR otherwise.
  */
@@ -906,6 +903,11 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
        if (rate == clk->rate)
                goto out;
 
+       if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
+               ret = -EBUSY;
+               goto out;
+       }
+
        /* calculate new rates and get the topmost changed clock */
        top = clk_calc_new_rates(clk, rate);
        if (!top) {
@@ -1175,40 +1177,41 @@ EXPORT_SYMBOL_GPL(clk_set_parent);
  *
  * Initializes the lists in struct clk, queries the hardware for the
  * parent and rate and sets them both.
- *
- * Any struct clk passed into __clk_init must have the following members
- * populated:
- *     .name
- *     .ops
- *     .hw
- *     .parent_names
- *     .num_parents
- *     .flags
- *
- * Essentially, everything that would normally be passed into clk_register is
- * assumed to be initialized already in __clk_init.  The other members may be
- * populated, but are optional.
- *
- * __clk_init is only exposed via clk-private.h and is intended for use with
- * very large numbers of clocks that need to be statically initialized.  It is
- * a layering violation to include clk-private.h from any code which implements
- * a clock's .ops; as such any statically initialized clock data MUST be in a
- * separate C file from the logic that implements it's operations.
  */
-void __clk_init(struct device *dev, struct clk *clk)
+int __clk_init(struct device *dev, struct clk *clk)
 {
-       int i;
+       int i, ret = 0;
        struct clk *orphan;
        struct hlist_node *tmp, *tmp2;
 
        if (!clk)
-               return;
+               return -EINVAL;
 
        mutex_lock(&prepare_lock);
 
        /* check to see if a clock with this name is already registered */
-       if (__clk_lookup(clk->name))
+       if (__clk_lookup(clk->name)) {
+               pr_debug("%s: clk %s already initialized\n",
+                               __func__, clk->name);
+               ret = -EEXIST;
+               goto out;
+       }
+
+       /* check that clk_ops are sane.  See Documentation/clk.txt */
+       if (clk->ops->set_rate &&
+                       !(clk->ops->round_rate && clk->ops->recalc_rate)) {
+               pr_warning("%s: %s must implement .round_rate & .recalc_rate\n",
+                               __func__, clk->name);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (clk->ops->set_parent && !clk->ops->get_parent) {
+               pr_warning("%s: %s must implement .get_parent & .set_parent\n",
+                               __func__, clk->name);
+               ret = -EINVAL;
                goto out;
+       }
 
        /* throw a WARN if any entries in parent_names are NULL */
        for (i = 0; i < clk->num_parents; i++)
@@ -1302,48 +1305,130 @@ void __clk_init(struct device *dev, struct clk *clk)
 out:
        mutex_unlock(&prepare_lock);
 
-       return;
+       return ret;
 }
 
+/**
+ * __clk_register - register a clock and return a cookie.
+ *
+ * Same as clk_register, except that the .clk field inside hw shall point to a
+ * preallocated (generally statically allocated) struct clk. None of the fields
+ * of the struct clk need to be initialized.
+ *
+ * The data pointed to by .init and .clk field shall NOT be marked as init
+ * data.
+ *
+ * __clk_register is only exposed via clk-private.h and is intended for use with
+ * very large numbers of clocks that need to be statically initialized.  It is
+ * a layering violation to include clk-private.h from any code which implements
+ * a clock's .ops; as such any statically initialized clock data MUST be in a
+ * separate C file from the logic that implements it's operations.  Returns 0
+ * on success, otherwise an error code.
+ */
+struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
+{
+       int ret;
+       struct clk *clk;
+
+       clk = hw->clk;
+       clk->name = hw->init->name;
+       clk->ops = hw->init->ops;
+       clk->hw = hw;
+       clk->flags = hw->init->flags;
+       clk->parent_names = hw->init->parent_names;
+       clk->num_parents = hw->init->num_parents;
+
+       ret = __clk_init(dev, clk);
+       if (ret)
+               return ERR_PTR(ret);
+
+       return clk;
+}
+EXPORT_SYMBOL_GPL(__clk_register);
+
 /**
  * clk_register - allocate a new clock, register it and return an opaque cookie
  * @dev: device that is registering this clock
- * @name: clock name
- * @ops: operations this clock supports
  * @hw: link to hardware-specific clock data
- * @parent_names: array of string names for all possible parents
- * @num_parents: number of possible parents
- * @flags: framework-level hints and quirks
  *
  * clk_register is the primary interface for populating the clock tree with new
  * clock nodes.  It returns a pointer to the newly allocated struct clk which
  * cannot be dereferenced by driver code but may be used in conjuction with the
- * rest of the clock API.
+ * rest of the clock API.  In the event of an error clk_register will return an
+ * error code; drivers must test for an error code after calling clk_register.
  */
-struct clk *clk_register(struct device *dev, const char *name,
-               const struct clk_ops *ops, struct clk_hw *hw,
-               char **parent_names, u8 num_parents, unsigned long flags)
+struct clk *clk_register(struct device *dev, struct clk_hw *hw)
 {
+       int i, ret;
        struct clk *clk;
 
        clk = kzalloc(sizeof(*clk), GFP_KERNEL);
-       if (!clk)
-               return NULL;
+       if (!clk) {
+               pr_err("%s: could not allocate clk\n", __func__);
+               ret = -ENOMEM;
+               goto fail_out;
+       }
 
-       clk->name = name;
-       clk->ops = ops;
+       clk->name = kstrdup(hw->init->name, GFP_KERNEL);
+       if (!clk->name) {
+               pr_err("%s: could not allocate clk->name\n", __func__);
+               ret = -ENOMEM;
+               goto fail_name;
+       }
+       clk->ops = hw->init->ops;
        clk->hw = hw;
-       clk->flags = flags;
-       clk->parent_names = parent_names;
-       clk->num_parents = num_parents;
+       clk->flags = hw->init->flags;
+       clk->num_parents = hw->init->num_parents;
        hw->clk = clk;
 
-       __clk_init(dev, clk);
+       /* allocate local copy in case parent_names is __initdata */
+       clk->parent_names = kzalloc((sizeof(char*) * clk->num_parents),
+                       GFP_KERNEL);
 
-       return clk;
+       if (!clk->parent_names) {
+               pr_err("%s: could not allocate clk->parent_names\n", __func__);
+               ret = -ENOMEM;
+               goto fail_parent_names;
+       }
+
+
+       /* copy each string name in case parent_names is __initdata */
+       for (i = 0; i < clk->num_parents; i++) {
+               clk->parent_names[i] = kstrdup(hw->init->parent_names[i],
+                                               GFP_KERNEL);
+               if (!clk->parent_names[i]) {
+                       pr_err("%s: could not copy parent_names\n", __func__);
+                       ret = -ENOMEM;
+                       goto fail_parent_names_copy;
+               }
+       }
+
+       ret = __clk_init(dev, clk);
+       if (!ret)
+               return clk;
+
+fail_parent_names_copy:
+       while (--i >= 0)
+               kfree(clk->parent_names[i]);
+       kfree(clk->parent_names);
+fail_parent_names:
+       kfree(clk->name);
+fail_name:
+       kfree(clk);
+fail_out:
+       return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(clk_register);
 
+/**
+ * clk_unregister - unregister a currently registered clock
+ * @clk: clock to unregister
+ *
+ * Currently unimplemented.
+ */
+void clk_unregister(struct clk *clk) {}
+EXPORT_SYMBOL_GPL(clk_unregister);
+
 /***        clk rate change notifiers        ***/
 
 /**
diff --git a/drivers/clk/mxs/Makefile b/drivers/clk/mxs/Makefile
new file mode 100644 (file)
index 0000000..7bedeec
--- /dev/null
@@ -0,0 +1,8 @@
+#
+# Makefile for mxs specific clk
+#
+
+obj-y += clk.o clk-pll.o clk-ref.o clk-div.o clk-frac.o
+
+obj-$(CONFIG_SOC_IMX23) += clk-imx23.o
+obj-$(CONFIG_SOC_IMX28) += clk-imx28.o
diff --git a/drivers/clk/mxs/clk-div.c b/drivers/clk/mxs/clk-div.c
new file mode 100644 (file)
index 0000000..90e1da9
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include "clk.h"
+
+/**
+ * struct clk_div - mxs integer divider clock
+ * @divider: the parent class
+ * @ops: pointer to clk_ops of parent class
+ * @reg: register address
+ * @busy: busy bit shift
+ *
+ * The mxs divider clock is a subclass of basic clk_divider with an
+ * addtional busy bit.
+ */
+struct clk_div {
+       struct clk_divider divider;
+       const struct clk_ops *ops;
+       void __iomem *reg;
+       u8 busy;
+};
+
+static inline struct clk_div *to_clk_div(struct clk_hw *hw)
+{
+       struct clk_divider *divider = container_of(hw, struct clk_divider, hw);
+
+       return container_of(divider, struct clk_div, divider);
+}
+
+static unsigned long clk_div_recalc_rate(struct clk_hw *hw,
+                                        unsigned long parent_rate)
+{
+       struct clk_div *div = to_clk_div(hw);
+
+       return div->ops->recalc_rate(&div->divider.hw, parent_rate);
+}
+
+static long clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
+                              unsigned long *prate)
+{
+       struct clk_div *div = to_clk_div(hw);
+
+       return div->ops->round_rate(&div->divider.hw, rate, prate);
+}
+
+static int clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
+                           unsigned long parent_rate)
+{
+       struct clk_div *div = to_clk_div(hw);
+       int ret;
+
+       ret = div->ops->set_rate(&div->divider.hw, rate, parent_rate);
+       if (!ret)
+               ret = mxs_clk_wait(div->reg, div->busy);
+
+       return ret;
+}
+
+static struct clk_ops clk_div_ops = {
+       .recalc_rate = clk_div_recalc_rate,
+       .round_rate = clk_div_round_rate,
+       .set_rate = clk_div_set_rate,
+};
+
+struct clk *mxs_clk_div(const char *name, const char *parent_name,
+                       void __iomem *reg, u8 shift, u8 width, u8 busy)
+{
+       struct clk_div *div;
+       struct clk *clk;
+       struct clk_init_data init;
+
+       div = kzalloc(sizeof(*div), GFP_KERNEL);
+       if (!div)
+               return ERR_PTR(-ENOMEM);
+
+       init.name = name;
+       init.ops = &clk_div_ops;
+       init.flags = CLK_SET_RATE_PARENT;
+       init.parent_names = (parent_name ? &parent_name: NULL);
+       init.num_parents = (parent_name ? 1 : 0);
+
+       div->reg = reg;
+       div->busy = busy;
+
+       div->divider.reg = reg;
+       div->divider.shift = shift;
+       div->divider.width = width;
+       div->divider.flags = CLK_DIVIDER_ONE_BASED;
+       div->divider.lock = &mxs_lock;
+       div->divider.hw.init = &init;
+       div->ops = &clk_divider_ops;
+
+       clk = clk_register(NULL, &div->divider.hw);
+       if (IS_ERR(clk))
+               kfree(div);
+
+       return clk;
+}
diff --git a/drivers/clk/mxs/clk-frac.c b/drivers/clk/mxs/clk-frac.c
new file mode 100644 (file)
index 0000000..e6aa6b5
--- /dev/null
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "clk.h"
+
+/**
+ * struct clk_frac - mxs fractional divider clock
+ * @hw: clk_hw for the fractional divider clock
+ * @reg: register address
+ * @shift: the divider bit shift
+ * @width: the divider bit width
+ * @busy: busy bit shift
+ *
+ * The clock is an adjustable fractional divider with a busy bit to wait
+ * when the divider is adjusted.
+ */
+struct clk_frac {
+       struct clk_hw hw;
+       void __iomem *reg;
+       u8 shift;
+       u8 width;
+       u8 busy;
+};
+
+#define to_clk_frac(_hw) container_of(_hw, struct clk_frac, hw)
+
+static unsigned long clk_frac_recalc_rate(struct clk_hw *hw,
+                                         unsigned long parent_rate)
+{
+       struct clk_frac *frac = to_clk_frac(hw);
+       u32 div;
+
+       div = readl_relaxed(frac->reg) >> frac->shift;
+       div &= (1 << frac->width) - 1;
+
+       return (parent_rate >> frac->width) * div;
+}
+
+static long clk_frac_round_rate(struct clk_hw *hw, unsigned long rate,
+                               unsigned long *prate)
+{
+       struct clk_frac *frac = to_clk_frac(hw);
+       unsigned long parent_rate = *prate;
+       u32 div;
+       u64 tmp;
+
+       if (rate > parent_rate)
+               return -EINVAL;
+
+       tmp = rate;
+       tmp <<= frac->width;
+       do_div(tmp, parent_rate);
+       div = tmp;
+
+       if (!div)
+               return -EINVAL;
+
+       return (parent_rate >> frac->width) * div;
+}
+
+static int clk_frac_set_rate(struct clk_hw *hw, unsigned long rate,
+                            unsigned long parent_rate)
+{
+       struct clk_frac *frac = to_clk_frac(hw);
+       unsigned long flags;
+       u32 div, val;
+       u64 tmp;
+
+       if (rate > parent_rate)
+               return -EINVAL;
+
+       tmp = rate;
+       tmp <<= frac->width;
+       do_div(tmp, parent_rate);
+       div = tmp;
+
+       if (!div)
+               return -EINVAL;
+
+       spin_lock_irqsave(&mxs_lock, flags);
+
+       val = readl_relaxed(frac->reg);
+       val &= ~(((1 << frac->width) - 1) << frac->shift);
+       val |= div << frac->shift;
+       writel_relaxed(val, frac->reg);
+
+       spin_unlock_irqrestore(&mxs_lock, flags);
+
+       return mxs_clk_wait(frac->reg, frac->busy);
+}
+
+static struct clk_ops clk_frac_ops = {
+       .recalc_rate = clk_frac_recalc_rate,
+       .round_rate = clk_frac_round_rate,
+       .set_rate = clk_frac_set_rate,
+};
+
+struct clk *mxs_clk_frac(const char *name, const char *parent_name,
+                        void __iomem *reg, u8 shift, u8 width, u8 busy)
+{
+       struct clk_frac *frac;
+       struct clk *clk;
+       struct clk_init_data init;
+
+       frac = kzalloc(sizeof(*frac), GFP_KERNEL);
+       if (!frac)
+               return ERR_PTR(-ENOMEM);
+
+       init.name = name;
+       init.ops = &clk_frac_ops;
+       init.flags = CLK_SET_RATE_PARENT;
+       init.parent_names = (parent_name ? &parent_name: NULL);
+       init.num_parents = (parent_name ? 1 : 0);
+
+       frac->reg = reg;
+       frac->shift = shift;
+       frac->width = width;
+       frac->busy = busy;
+       frac->hw.init = &init;
+
+       clk = clk_register(NULL, &frac->hw);
+       if (IS_ERR(clk))
+               kfree(frac);
+
+       return clk;
+}
diff --git a/drivers/clk/mxs/clk-imx23.c b/drivers/clk/mxs/clk-imx23.c
new file mode 100644 (file)
index 0000000..f7be225
--- /dev/null
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <mach/common.h>
+#include <mach/mx23.h>
+#include "clk.h"
+
+#define DIGCTRL                        MX23_IO_ADDRESS(MX23_DIGCTL_BASE_ADDR)
+#define CLKCTRL                        MX23_IO_ADDRESS(MX23_CLKCTRL_BASE_ADDR)
+#define PLLCTRL0               (CLKCTRL + 0x0000)
+#define CPU                    (CLKCTRL + 0x0020)
+#define HBUS                   (CLKCTRL + 0x0030)
+#define XBUS                   (CLKCTRL + 0x0040)
+#define XTAL                   (CLKCTRL + 0x0050)
+#define PIX                    (CLKCTRL + 0x0060)
+#define SSP                    (CLKCTRL + 0x0070)
+#define GPMI                   (CLKCTRL + 0x0080)
+#define SPDIF                  (CLKCTRL + 0x0090)
+#define EMI                    (CLKCTRL + 0x00a0)
+#define SAIF                   (CLKCTRL + 0x00c0)
+#define TV                     (CLKCTRL + 0x00d0)
+#define ETM                    (CLKCTRL + 0x00e0)
+#define FRAC                   (CLKCTRL + 0x00f0)
+#define CLKSEQ                 (CLKCTRL + 0x0110)
+
+#define BP_CPU_INTERRUPT_WAIT  12
+#define BP_CLKSEQ_BYPASS_SAIF  0
+#define BP_CLKSEQ_BYPASS_SSP   5
+#define BP_SAIF_DIV_FRAC_EN    16
+#define BP_FRAC_IOFRAC         24
+
+static void __init clk_misc_init(void)
+{
+       u32 val;
+
+       /* Gate off cpu clock in WFI for power saving */
+       __mxs_setl(1 << BP_CPU_INTERRUPT_WAIT, CPU);
+
+       /* Clear BYPASS for SAIF */
+       __mxs_clrl(1 << BP_CLKSEQ_BYPASS_SAIF, CLKSEQ);
+
+       /* SAIF has to use frac div for functional operation */
+       val = readl_relaxed(SAIF);
+       val |= 1 << BP_SAIF_DIV_FRAC_EN;
+       writel_relaxed(val, SAIF);
+
+       /*
+        * Source ssp clock from ref_io than ref_xtal,
+        * as ref_xtal only provides 24 MHz as maximum.
+        */
+       __mxs_clrl(1 << BP_CLKSEQ_BYPASS_SSP, CLKSEQ);
+
+       /*
+        * 480 MHz seems too high to be ssp clock source directly,
+        * so set frac to get a 288 MHz ref_io.
+        */
+       __mxs_clrl(0x3f << BP_FRAC_IOFRAC, FRAC);
+       __mxs_setl(30 << BP_FRAC_IOFRAC, FRAC);
+}
+
+static struct clk_lookup uart_lookups[] __initdata = {
+       { .dev_id = "duart", },
+       { .dev_id = "mxs-auart.0", },
+       { .dev_id = "mxs-auart.1", },
+       { .dev_id = "8006c000.serial", },
+       { .dev_id = "8006e000.serial", },
+       { .dev_id = "80070000.serial", },
+};
+
+static struct clk_lookup hbus_lookups[] __initdata = {
+       { .dev_id = "imx23-dma-apbh", },
+       { .dev_id = "80004000.dma-apbh", },
+};
+
+static struct clk_lookup xbus_lookups[] __initdata = {
+       { .dev_id = "duart", .con_id = "apb_pclk"},
+       { .dev_id = "80070000.serial", .con_id = "apb_pclk"},
+       { .dev_id = "imx23-dma-apbx", },
+       { .dev_id = "80024000.dma-apbx", },
+};
+
+static struct clk_lookup ssp_lookups[] __initdata = {
+       { .dev_id = "imx23-mmc.0", },
+       { .dev_id = "imx23-mmc.1", },
+       { .dev_id = "80010000.ssp", },
+       { .dev_id = "80034000.ssp", },
+};
+
+static struct clk_lookup lcdif_lookups[] __initdata = {
+       { .dev_id = "imx23-fb", },
+       { .dev_id = "80030000.lcdif", },
+};
+
+static struct clk_lookup gpmi_lookups[] __initdata = {
+       { .dev_id = "imx23-gpmi-nand", },
+       { .dev_id = "8000c000.gpmi", },
+};
+
+static const char *sel_pll[]  __initconst = { "pll", "ref_xtal", };
+static const char *sel_cpu[]  __initconst = { "ref_cpu", "ref_xtal", };
+static const char *sel_pix[]  __initconst = { "ref_pix", "ref_xtal", };
+static const char *sel_io[]   __initconst = { "ref_io", "ref_xtal", };
+static const char *cpu_sels[] __initconst = { "cpu_pll", "cpu_xtal", };
+static const char *emi_sels[] __initconst = { "emi_pll", "emi_xtal", };
+
+enum imx23_clk {
+       ref_xtal, pll, ref_cpu, ref_emi, ref_pix, ref_io, saif_sel,
+       lcdif_sel, gpmi_sel, ssp_sel, emi_sel, cpu, etm_sel, cpu_pll,
+       cpu_xtal, hbus, xbus, lcdif_div, ssp_div, gpmi_div, emi_pll,
+       emi_xtal, etm_div, saif_div, clk32k_div, rtc, adc, spdif_div,
+       clk32k, dri, pwm, filt, uart, ssp, gpmi, spdif, emi, saif,
+       lcdif, etm, usb, usb_pwr,
+       clk_max
+};
+
+static struct clk *clks[clk_max];
+
+static enum imx23_clk clks_init_on[] __initdata = {
+       cpu, hbus, xbus, emi, uart,
+};
+
+int __init mx23_clocks_init(void)
+{
+       int i;
+
+       clk_misc_init();
+
+       clks[ref_xtal] = mxs_clk_fixed("ref_xtal", 24000000);
+       clks[pll] = mxs_clk_pll("pll", "ref_xtal", PLLCTRL0, 16, 480000000);
+       clks[ref_cpu] = mxs_clk_ref("ref_cpu", "pll", FRAC, 0);
+       clks[ref_emi] = mxs_clk_ref("ref_emi", "pll", FRAC, 1);
+       clks[ref_pix] = mxs_clk_ref("ref_pix", "pll", FRAC, 2);
+       clks[ref_io] = mxs_clk_ref("ref_io", "pll", FRAC, 3);
+       clks[saif_sel] = mxs_clk_mux("saif_sel", CLKSEQ, 0, 1, sel_pll, ARRAY_SIZE(sel_pll));
+       clks[lcdif_sel] = mxs_clk_mux("lcdif_sel", CLKSEQ, 1, 1, sel_pix, ARRAY_SIZE(sel_pix));
+       clks[gpmi_sel] = mxs_clk_mux("gpmi_sel", CLKSEQ, 4, 1, sel_io, ARRAY_SIZE(sel_io));
+       clks[ssp_sel] = mxs_clk_mux("ssp_sel", CLKSEQ, 5, 1, sel_io, ARRAY_SIZE(sel_io));
+       clks[emi_sel] = mxs_clk_mux("emi_sel", CLKSEQ, 6, 1, emi_sels, ARRAY_SIZE(emi_sels));
+       clks[cpu] = mxs_clk_mux("cpu", CLKSEQ, 7, 1, cpu_sels, ARRAY_SIZE(cpu_sels));
+       clks[etm_sel] = mxs_clk_mux("etm_sel", CLKSEQ, 8, 1, sel_cpu, ARRAY_SIZE(sel_cpu));
+       clks[cpu_pll] = mxs_clk_div("cpu_pll", "ref_cpu", CPU, 0, 6, 28);
+       clks[cpu_xtal] = mxs_clk_div("cpu_xtal", "ref_xtal", CPU, 16, 10, 29);
+       clks[hbus] = mxs_clk_div("hbus", "cpu", HBUS, 0, 5, 29);
+       clks[xbus] = mxs_clk_div("xbus", "ref_xtal", XBUS, 0, 10, 31);
+       clks[lcdif_div] = mxs_clk_div("lcdif_div", "lcdif_sel", PIX, 0, 12, 29);
+       clks[ssp_div] = mxs_clk_div("ssp_div", "ssp_sel", SSP, 0, 9, 29);
+       clks[gpmi_div] = mxs_clk_div("gpmi_div", "gpmi_sel", GPMI, 0, 10, 29);
+       clks[emi_pll] = mxs_clk_div("emi_pll", "ref_emi", EMI, 0, 6, 28);
+       clks[emi_xtal] = mxs_clk_div("emi_xtal", "ref_xtal", EMI, 8, 4, 29);
+       clks[etm_div] = mxs_clk_div("etm_div", "etm_sel", ETM, 0, 6, 29);
+       clks[saif_div] = mxs_clk_frac("saif_div", "saif_sel", SAIF, 0, 16, 29);
+       clks[clk32k_div] = mxs_clk_fixed_factor("clk32k_div", "ref_xtal", 1, 750);
+       clks[rtc] = mxs_clk_fixed_factor("rtc", "ref_xtal", 1, 768);
+       clks[adc] = mxs_clk_fixed_factor("adc", "clk32k", 1, 16);
+       clks[spdif_div] = mxs_clk_fixed_factor("spdif_div", "pll", 1, 4);
+       clks[clk32k] = mxs_clk_gate("clk32k", "clk32k_div", XTAL, 26);
+       clks[dri] = mxs_clk_gate("dri", "ref_xtal", XTAL, 28);
+       clks[pwm] = mxs_clk_gate("pwm", "ref_xtal", XTAL, 29);
+       clks[filt] = mxs_clk_gate("filt", "ref_xtal", XTAL, 30);
+       clks[uart] = mxs_clk_gate("uart", "ref_xtal", XTAL, 31);
+       clks[ssp] = mxs_clk_gate("ssp", "ssp_div", SSP, 31);
+       clks[gpmi] = mxs_clk_gate("gpmi", "gpmi_div", GPMI, 31);
+       clks[spdif] = mxs_clk_gate("spdif", "spdif_div", SPDIF, 31);
+       clks[emi] = mxs_clk_gate("emi", "emi_sel", EMI, 31);
+       clks[saif] = mxs_clk_gate("saif", "saif_div", SAIF, 31);
+       clks[lcdif] = mxs_clk_gate("lcdif", "lcdif_div", PIX, 31);
+       clks[etm] = mxs_clk_gate("etm", "etm_div", ETM, 31);
+       clks[usb] = mxs_clk_gate("usb", "usb_pwr", DIGCTRL, 2);
+       clks[usb_pwr] = clk_register_gate(NULL, "usb_pwr", "pll", 0, PLLCTRL0, 18, 0, &mxs_lock);
+
+       for (i = 0; i < ARRAY_SIZE(clks); i++)
+               if (IS_ERR(clks[i])) {
+                       pr_err("i.MX23 clk %d: register failed with %ld\n",
+                               i, PTR_ERR(clks[i]));
+                       return PTR_ERR(clks[i]);
+               }
+
+       clk_register_clkdev(clks[clk32k], NULL, "timrot");
+       clk_register_clkdevs(clks[hbus], hbus_lookups, ARRAY_SIZE(hbus_lookups));
+       clk_register_clkdevs(clks[xbus], xbus_lookups, ARRAY_SIZE(xbus_lookups));
+       clk_register_clkdevs(clks[uart], uart_lookups, ARRAY_SIZE(uart_lookups));
+       clk_register_clkdevs(clks[ssp], ssp_lookups, ARRAY_SIZE(ssp_lookups));
+       clk_register_clkdevs(clks[gpmi], gpmi_lookups, ARRAY_SIZE(gpmi_lookups));
+       clk_register_clkdevs(clks[lcdif], lcdif_lookups, ARRAY_SIZE(lcdif_lookups));
+
+       for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
+               clk_prepare_enable(clks[clks_init_on[i]]);
+
+       mxs_timer_init(MX23_INT_TIMER0);
+
+       return 0;
+}
diff --git a/drivers/clk/mxs/clk-imx28.c b/drivers/clk/mxs/clk-imx28.c
new file mode 100644 (file)
index 0000000..2826a26
--- /dev/null
@@ -0,0 +1,338 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <mach/common.h>
+#include <mach/mx28.h>
+#include "clk.h"
+
+#define CLKCTRL                        MX28_IO_ADDRESS(MX28_CLKCTRL_BASE_ADDR)
+#define PLL0CTRL0              (CLKCTRL + 0x0000)
+#define PLL1CTRL0              (CLKCTRL + 0x0020)
+#define PLL2CTRL0              (CLKCTRL + 0x0040)
+#define CPU                    (CLKCTRL + 0x0050)
+#define HBUS                   (CLKCTRL + 0x0060)
+#define XBUS                   (CLKCTRL + 0x0070)
+#define XTAL                   (CLKCTRL + 0x0080)
+#define SSP0                   (CLKCTRL + 0x0090)
+#define SSP1                   (CLKCTRL + 0x00a0)
+#define SSP2                   (CLKCTRL + 0x00b0)
+#define SSP3                   (CLKCTRL + 0x00c0)
+#define GPMI                   (CLKCTRL + 0x00d0)
+#define SPDIF                  (CLKCTRL + 0x00e0)
+#define EMI                    (CLKCTRL + 0x00f0)
+#define SAIF0                  (CLKCTRL + 0x0100)
+#define SAIF1                  (CLKCTRL + 0x0110)
+#define LCDIF                  (CLKCTRL + 0x0120)
+#define ETM                    (CLKCTRL + 0x0130)
+#define ENET                   (CLKCTRL + 0x0140)
+#define FLEXCAN                        (CLKCTRL + 0x0160)
+#define FRAC0                  (CLKCTRL + 0x01b0)
+#define FRAC1                  (CLKCTRL + 0x01c0)
+#define CLKSEQ                 (CLKCTRL + 0x01d0)
+
+#define BP_CPU_INTERRUPT_WAIT  12
+#define BP_SAIF_DIV_FRAC_EN    16
+#define BP_ENET_DIV_TIME       21
+#define BP_ENET_SLEEP          31
+#define BP_CLKSEQ_BYPASS_SAIF0 0
+#define BP_CLKSEQ_BYPASS_SSP0  3
+#define BP_FRAC0_IO1FRAC       16
+#define BP_FRAC0_IO0FRAC       24
+
+#define DIGCTRL                        MX28_IO_ADDRESS(MX28_DIGCTL_BASE_ADDR)
+#define BP_SAIF_CLKMUX         10
+
+/*
+ * HW_SAIF_CLKMUX_SEL:
+ *  DIRECT(0x0): SAIF0 clock pins selected for SAIF0 input clocks, and SAIF1
+ *             clock pins selected for SAIF1 input clocks.
+ *  CROSSINPUT(0x1): SAIF1 clock inputs selected for SAIF0 input clocks, and
+ *             SAIF0 clock inputs selected for SAIF1 input clocks.
+ *  EXTMSTR0(0x2): SAIF0 clock pin selected for both SAIF0 and SAIF1 input
+ *             clocks.
+ *  EXTMSTR1(0x3): SAIF1 clock pin selected for both SAIF0 and SAIF1 input
+ *             clocks.
+ */
+int mxs_saif_clkmux_select(unsigned int clkmux)
+{
+       if (clkmux > 0x3)
+               return -EINVAL;
+
+       __mxs_clrl(0x3 << BP_SAIF_CLKMUX, DIGCTRL);
+       __mxs_setl(clkmux << BP_SAIF_CLKMUX, DIGCTRL);
+
+       return 0;
+}
+
+static void __init clk_misc_init(void)
+{
+       u32 val;
+
+       /* Gate off cpu clock in WFI for power saving */
+       __mxs_setl(1 << BP_CPU_INTERRUPT_WAIT, CPU);
+
+       /* 0 is a bad default value for a divider */
+       __mxs_setl(1 << BP_ENET_DIV_TIME, ENET);
+
+       /* Clear BYPASS for SAIF */
+       __mxs_clrl(0x3 << BP_CLKSEQ_BYPASS_SAIF0, CLKSEQ);
+
+       /* SAIF has to use frac div for functional operation */
+       val = readl_relaxed(SAIF0);
+       val |= 1 << BP_SAIF_DIV_FRAC_EN;
+       writel_relaxed(val, SAIF0);
+
+       val = readl_relaxed(SAIF1);
+       val |= 1 << BP_SAIF_DIV_FRAC_EN;
+       writel_relaxed(val, SAIF1);
+
+       /* Extra fec clock setting */
+       val = readl_relaxed(ENET);
+       val &= ~(1 << BP_ENET_SLEEP);
+       writel_relaxed(val, ENET);
+
+       /*
+        * Source ssp clock from ref_io than ref_xtal,
+        * as ref_xtal only provides 24 MHz as maximum.
+        */
+       __mxs_clrl(0xf << BP_CLKSEQ_BYPASS_SSP0, CLKSEQ);
+
+       /*
+        * 480 MHz seems too high to be ssp clock source directly,
+        * so set frac0 to get a 288 MHz ref_io0.
+        */
+       val = readl_relaxed(FRAC0);
+       val &= ~(0x3f << BP_FRAC0_IO0FRAC);
+       val |= 30 << BP_FRAC0_IO0FRAC;
+       writel_relaxed(val, FRAC0);
+}
+
+static struct clk_lookup uart_lookups[] __initdata = {
+       { .dev_id = "duart", },
+       { .dev_id = "mxs-auart.0", },
+       { .dev_id = "mxs-auart.1", },
+       { .dev_id = "mxs-auart.2", },
+       { .dev_id = "mxs-auart.3", },
+       { .dev_id = "mxs-auart.4", },
+       { .dev_id = "8006a000.serial", },
+       { .dev_id = "8006c000.serial", },
+       { .dev_id = "8006e000.serial", },
+       { .dev_id = "80070000.serial", },
+       { .dev_id = "80072000.serial", },
+       { .dev_id = "80074000.serial", },
+};
+
+static struct clk_lookup hbus_lookups[] __initdata = {
+       { .dev_id = "imx28-dma-apbh", },
+       { .dev_id = "80004000.dma-apbh", },
+};
+
+static struct clk_lookup xbus_lookups[] __initdata = {
+       { .dev_id = "duart", .con_id = "apb_pclk"},
+       { .dev_id = "80074000.serial", .con_id = "apb_pclk"},
+       { .dev_id = "imx28-dma-apbx", },
+       { .dev_id = "80024000.dma-apbx", },
+};
+
+static struct clk_lookup ssp0_lookups[] __initdata = {
+       { .dev_id = "imx28-mmc.0", },
+       { .dev_id = "80010000.ssp", },
+};
+
+static struct clk_lookup ssp1_lookups[] __initdata = {
+       { .dev_id = "imx28-mmc.1", },
+       { .dev_id = "80012000.ssp", },
+};
+
+static struct clk_lookup ssp2_lookups[] __initdata = {
+       { .dev_id = "imx28-mmc.2", },
+       { .dev_id = "80014000.ssp", },
+};
+
+static struct clk_lookup ssp3_lookups[] __initdata = {
+       { .dev_id = "imx28-mmc.3", },
+       { .dev_id = "80016000.ssp", },
+};
+
+static struct clk_lookup lcdif_lookups[] __initdata = {
+       { .dev_id = "imx28-fb", },
+       { .dev_id = "80030000.lcdif", },
+};
+
+static struct clk_lookup gpmi_lookups[] __initdata = {
+       { .dev_id = "imx28-gpmi-nand", },
+       { .dev_id = "8000c000.gpmi", },
+};
+
+static struct clk_lookup fec_lookups[] __initdata = {
+       { .dev_id = "imx28-fec.0", },
+       { .dev_id = "imx28-fec.1", },
+       { .dev_id = "800f0000.ethernet", },
+       { .dev_id = "800f4000.ethernet", },
+};
+
+static struct clk_lookup can0_lookups[] __initdata = {
+       { .dev_id = "flexcan.0", },
+       { .dev_id = "80032000.can", },
+};
+
+static struct clk_lookup can1_lookups[] __initdata = {
+       { .dev_id = "flexcan.1", },
+       { .dev_id = "80034000.can", },
+};
+
+static struct clk_lookup saif0_lookups[] __initdata = {
+       { .dev_id = "mxs-saif.0", },
+       { .dev_id = "80042000.saif", },
+};
+
+static struct clk_lookup saif1_lookups[] __initdata = {
+       { .dev_id = "mxs-saif.1", },
+       { .dev_id = "80046000.saif", },
+};
+
+static const char *sel_cpu[]  __initconst = { "ref_cpu", "ref_xtal", };
+static const char *sel_io0[]  __initconst = { "ref_io0", "ref_xtal", };
+static const char *sel_io1[]  __initconst = { "ref_io1", "ref_xtal", };
+static const char *sel_pix[]  __initconst = { "ref_pix", "ref_xtal", };
+static const char *sel_gpmi[] __initconst = { "ref_gpmi", "ref_xtal", };
+static const char *sel_pll0[] __initconst = { "pll0", "ref_xtal", };
+static const char *cpu_sels[] __initconst = { "cpu_pll", "cpu_xtal", };
+static const char *emi_sels[] __initconst = { "emi_pll", "emi_xtal", };
+static const char *ptp_sels[] __initconst = { "ref_xtal", "pll0", };
+
+enum imx28_clk {
+       ref_xtal, pll0, pll1, pll2, ref_cpu, ref_emi, ref_io0, ref_io1,
+       ref_pix, ref_hsadc, ref_gpmi, saif0_sel, saif1_sel, gpmi_sel,
+       ssp0_sel, ssp1_sel, ssp2_sel, ssp3_sel, emi_sel, etm_sel,
+       lcdif_sel, cpu, ptp_sel, cpu_pll, cpu_xtal, hbus, xbus,
+       ssp0_div, ssp1_div, ssp2_div, ssp3_div, gpmi_div, emi_pll,
+       emi_xtal, lcdif_div, etm_div, ptp, saif0_div, saif1_div,
+       clk32k_div, rtc, lradc, spdif_div, clk32k, pwm, uart, ssp0,
+       ssp1, ssp2, ssp3, gpmi, spdif, emi, saif0, saif1, lcdif, etm,
+       fec, can0, can1, usb0, usb1, usb0_pwr, usb1_pwr, enet_out,
+       clk_max
+};
+
+static struct clk *clks[clk_max];
+
+static enum imx28_clk clks_init_on[] __initdata = {
+       cpu, hbus, xbus, emi, uart,
+};
+
+int __init mx28_clocks_init(void)
+{
+       int i;
+
+       clk_misc_init();
+
+       clks[ref_xtal] = mxs_clk_fixed("ref_xtal", 24000000);
+       clks[pll0] = mxs_clk_pll("pll0", "ref_xtal", PLL0CTRL0, 17, 480000000);
+       clks[pll1] = mxs_clk_pll("pll1", "ref_xtal", PLL1CTRL0, 17, 480000000);
+       clks[pll2] = mxs_clk_pll("pll2", "ref_xtal", PLL2CTRL0, 23, 50000000);
+       clks[ref_cpu] = mxs_clk_ref("ref_cpu", "pll0", FRAC0, 0);
+       clks[ref_emi] = mxs_clk_ref("ref_emi", "pll0", FRAC0, 1);
+       clks[ref_io0] = mxs_clk_ref("ref_io0", "pll0", FRAC0, 2);
+       clks[ref_io1] = mxs_clk_ref("ref_io1", "pll0", FRAC0, 3);
+       clks[ref_pix] = mxs_clk_ref("ref_pix", "pll0", FRAC1, 0);
+       clks[ref_hsadc] = mxs_clk_ref("ref_hsadc", "pll0", FRAC1, 1);
+       clks[ref_gpmi] = mxs_clk_ref("ref_gpmi", "pll0", FRAC1, 2);
+       clks[saif0_sel] = mxs_clk_mux("saif0_sel", CLKSEQ, 0, 1, sel_pll0, ARRAY_SIZE(sel_pll0));
+       clks[saif1_sel] = mxs_clk_mux("saif1_sel", CLKSEQ, 1, 1, sel_pll0, ARRAY_SIZE(sel_pll0));
+       clks[gpmi_sel] = mxs_clk_mux("gpmi_sel", CLKSEQ, 2, 1, sel_gpmi, ARRAY_SIZE(sel_gpmi));
+       clks[ssp0_sel] = mxs_clk_mux("ssp0_sel", CLKSEQ, 3, 1, sel_io0, ARRAY_SIZE(sel_io0));
+       clks[ssp1_sel] = mxs_clk_mux("ssp1_sel", CLKSEQ, 4, 1, sel_io0, ARRAY_SIZE(sel_io0));
+       clks[ssp2_sel] = mxs_clk_mux("ssp2_sel", CLKSEQ, 5, 1, sel_io1, ARRAY_SIZE(sel_io1));
+       clks[ssp3_sel] = mxs_clk_mux("ssp3_sel", CLKSEQ, 6, 1, sel_io1, ARRAY_SIZE(sel_io1));
+       clks[emi_sel] = mxs_clk_mux("emi_sel", CLKSEQ, 7, 1, emi_sels, ARRAY_SIZE(emi_sels));
+       clks[etm_sel] = mxs_clk_mux("etm_sel", CLKSEQ, 8, 1, sel_cpu, ARRAY_SIZE(sel_cpu));
+       clks[lcdif_sel] = mxs_clk_mux("lcdif_sel", CLKSEQ, 14, 1, sel_pix, ARRAY_SIZE(sel_pix));
+       clks[cpu] = mxs_clk_mux("cpu", CLKSEQ, 18, 1, cpu_sels, ARRAY_SIZE(cpu_sels));
+       clks[ptp_sel] = mxs_clk_mux("ptp_sel", ENET, 19, 1, ptp_sels, ARRAY_SIZE(ptp_sels));
+       clks[cpu_pll] = mxs_clk_div("cpu_pll", "ref_cpu", CPU, 0, 6, 28);
+       clks[cpu_xtal] = mxs_clk_div("cpu_xtal", "ref_xtal", CPU, 16, 10, 29);
+       clks[hbus] = mxs_clk_div("hbus", "cpu", HBUS, 0, 5, 31);
+       clks[xbus] = mxs_clk_div("xbus", "ref_xtal", XBUS, 0, 10, 31);
+       clks[ssp0_div] = mxs_clk_div("ssp0_div", "ssp0_sel", SSP0, 0, 9, 29);
+       clks[ssp1_div] = mxs_clk_div("ssp1_div", "ssp1_sel", SSP1, 0, 9, 29);
+       clks[ssp2_div] = mxs_clk_div("ssp2_div", "ssp2_sel", SSP2, 0, 9, 29);
+       clks[ssp3_div] = mxs_clk_div("ssp3_div", "ssp3_sel", SSP3, 0, 9, 29);
+       clks[gpmi_div] = mxs_clk_div("gpmi_div", "gpmi_sel", GPMI, 0, 10, 29);
+       clks[emi_pll] = mxs_clk_div("emi_pll", "ref_emi", EMI, 0, 6, 28);
+       clks[emi_xtal] = mxs_clk_div("emi_xtal", "ref_xtal", EMI, 8, 4, 29);
+       clks[lcdif_div] = mxs_clk_div("lcdif_div", "lcdif_sel", LCDIF, 0, 13, 29);
+       clks[etm_div] = mxs_clk_div("etm_div", "etm_sel", ETM, 0, 7, 29);
+       clks[ptp] = mxs_clk_div("ptp", "ptp_sel", ENET, 21, 6, 27);
+       clks[saif0_div] = mxs_clk_frac("saif0_div", "saif0_sel", SAIF0, 0, 16, 29);
+       clks[saif1_div] = mxs_clk_frac("saif1_div", "saif1_sel", SAIF1, 0, 16, 29);
+       clks[clk32k_div] = mxs_clk_fixed_factor("clk32k_div", "ref_xtal", 1, 750);
+       clks[rtc] = mxs_clk_fixed_factor("rtc", "ref_xtal", 1, 768);
+       clks[lradc] = mxs_clk_fixed_factor("lradc", "clk32k", 1, 16);
+       clks[spdif_div] = mxs_clk_fixed_factor("spdif_div", "pll0", 1, 4);
+       clks[clk32k] = mxs_clk_gate("clk32k", "clk32k_div", XTAL, 26);
+       clks[pwm] = mxs_clk_gate("pwm", "ref_xtal", XTAL, 29);
+       clks[uart] = mxs_clk_gate("uart", "ref_xtal", XTAL, 31);
+       clks[ssp0] = mxs_clk_gate("ssp0", "ssp0_div", SSP0, 31);
+       clks[ssp1] = mxs_clk_gate("ssp1", "ssp1_div", SSP1, 31);
+       clks[ssp2] = mxs_clk_gate("ssp2", "ssp2_div", SSP2, 31);
+       clks[ssp3] = mxs_clk_gate("ssp3", "ssp3_div", SSP3, 31);
+       clks[gpmi] = mxs_clk_gate("gpmi", "gpmi_div", GPMI, 31);
+       clks[spdif] = mxs_clk_gate("spdif", "spdif_div", SPDIF, 31);
+       clks[emi] = mxs_clk_gate("emi", "emi_sel", EMI, 31);
+       clks[saif0] = mxs_clk_gate("saif0", "saif0_div", SAIF0, 31);
+       clks[saif1] = mxs_clk_gate("saif1", "saif1_div", SAIF1, 31);
+       clks[lcdif] = mxs_clk_gate("lcdif", "lcdif_div", LCDIF, 31);
+       clks[etm] = mxs_clk_gate("etm", "etm_div", ETM, 31);
+       clks[fec] = mxs_clk_gate("fec", "hbus", ENET, 30);
+       clks[can0] = mxs_clk_gate("can0", "ref_xtal", FLEXCAN, 30);
+       clks[can1] = mxs_clk_gate("can1", "ref_xtal", FLEXCAN, 28);
+       clks[usb0] = mxs_clk_gate("usb0", "usb0_pwr", DIGCTRL, 2);
+       clks[usb1] = mxs_clk_gate("usb1", "usb1_pwr", DIGCTRL, 16);
+       clks[usb0_pwr] = clk_register_gate(NULL, "usb0_pwr", "pll0", 0, PLL0CTRL0, 18, 0, &mxs_lock);
+       clks[usb1_pwr] = clk_register_gate(NULL, "usb1_pwr", "pll1", 0, PLL1CTRL0, 18, 0, &mxs_lock);
+       clks[enet_out] = clk_register_gate(NULL, "enet_out", "pll2", 0, ENET, 18, 0, &mxs_lock);
+
+       for (i = 0; i < ARRAY_SIZE(clks); i++)
+               if (IS_ERR(clks[i])) {
+                       pr_err("i.MX28 clk %d: register failed with %ld\n",
+                               i, PTR_ERR(clks[i]));
+                       return PTR_ERR(clks[i]);
+               }
+
+       clk_register_clkdev(clks[clk32k], NULL, "timrot");
+       clk_register_clkdev(clks[enet_out], NULL, "enet_out");
+       clk_register_clkdevs(clks[hbus], hbus_lookups, ARRAY_SIZE(hbus_lookups));
+       clk_register_clkdevs(clks[xbus], xbus_lookups, ARRAY_SIZE(xbus_lookups));
+       clk_register_clkdevs(clks[uart], uart_lookups, ARRAY_SIZE(uart_lookups));
+       clk_register_clkdevs(clks[ssp0], ssp0_lookups, ARRAY_SIZE(ssp0_lookups));
+       clk_register_clkdevs(clks[ssp1], ssp1_lookups, ARRAY_SIZE(ssp1_lookups));
+       clk_register_clkdevs(clks[ssp2], ssp2_lookups, ARRAY_SIZE(ssp2_lookups));
+       clk_register_clkdevs(clks[ssp3], ssp3_lookups, ARRAY_SIZE(ssp3_lookups));
+       clk_register_clkdevs(clks[gpmi], gpmi_lookups, ARRAY_SIZE(gpmi_lookups));
+       clk_register_clkdevs(clks[saif0], saif0_lookups, ARRAY_SIZE(saif0_lookups));
+       clk_register_clkdevs(clks[saif1], saif1_lookups, ARRAY_SIZE(saif1_lookups));
+       clk_register_clkdevs(clks[lcdif], lcdif_lookups, ARRAY_SIZE(lcdif_lookups));
+       clk_register_clkdevs(clks[fec], fec_lookups, ARRAY_SIZE(fec_lookups));
+       clk_register_clkdevs(clks[can0], can0_lookups, ARRAY_SIZE(can0_lookups));
+       clk_register_clkdevs(clks[can1], can1_lookups, ARRAY_SIZE(can1_lookups));
+
+       for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
+               clk_prepare_enable(clks[clks_init_on[i]]);
+
+       mxs_timer_init(MX28_INT_TIMER0);
+
+       return 0;
+}
diff --git a/drivers/clk/mxs/clk-pll.c b/drivers/clk/mxs/clk-pll.c
new file mode 100644 (file)
index 0000000..fadae41
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "clk.h"
+
+/**
+ * struct clk_pll - mxs pll clock
+ * @hw: clk_hw for the pll
+ * @base: base address of the pll
+ * @power: the shift of power bit
+ * @rate: the clock rate of the pll
+ *
+ * The mxs pll is a fixed rate clock with power and gate control,
+ * and the shift of gate bit is always 31.
+ */
+struct clk_pll {
+       struct clk_hw hw;
+       void __iomem *base;
+       u8 power;
+       unsigned long rate;
+};
+
+#define to_clk_pll(_hw) container_of(_hw, struct clk_pll, hw)
+
+static int clk_pll_prepare(struct clk_hw *hw)
+{
+       struct clk_pll *pll = to_clk_pll(hw);
+
+       writel_relaxed(1 << pll->power, pll->base + SET);
+
+       udelay(10);
+
+       return 0;
+}
+
+static void clk_pll_unprepare(struct clk_hw *hw)
+{
+       struct clk_pll *pll = to_clk_pll(hw);
+
+       writel_relaxed(1 << pll->power, pll->base + CLR);
+}
+
+static int clk_pll_enable(struct clk_hw *hw)
+{
+       struct clk_pll *pll = to_clk_pll(hw);
+
+       writel_relaxed(1 << 31, pll->base + CLR);
+
+       return 0;
+}
+
+static void clk_pll_disable(struct clk_hw *hw)
+{
+       struct clk_pll *pll = to_clk_pll(hw);
+
+       writel_relaxed(1 << 31, pll->base + SET);
+}
+
+static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
+                                        unsigned long parent_rate)
+{
+       struct clk_pll *pll = to_clk_pll(hw);
+
+       return pll->rate;
+}
+
+static const struct clk_ops clk_pll_ops = {
+       .prepare = clk_pll_prepare,
+       .unprepare = clk_pll_unprepare,
+       .enable = clk_pll_enable,
+       .disable = clk_pll_disable,
+       .recalc_rate = clk_pll_recalc_rate,
+};
+
+struct clk *mxs_clk_pll(const char *name, const char *parent_name,
+                       void __iomem *base, u8 power, unsigned long rate)
+{
+       struct clk_pll *pll;
+       struct clk *clk;
+       struct clk_init_data init;
+
+       pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+       if (!pll)
+               return ERR_PTR(-ENOMEM);
+
+       init.name = name;
+       init.ops = &clk_pll_ops;
+       init.flags = 0;
+       init.parent_names = (parent_name ? &parent_name: NULL);
+       init.num_parents = (parent_name ? 1 : 0);
+
+       pll->base = base;
+       pll->rate = rate;
+       pll->power = power;
+       pll->hw.init = &init;
+
+       clk = clk_register(NULL, &pll->hw);
+       if (IS_ERR(clk))
+               kfree(pll);
+
+       return clk;
+}
diff --git a/drivers/clk/mxs/clk-ref.c b/drivers/clk/mxs/clk-ref.c
new file mode 100644 (file)
index 0000000..4adeed6
--- /dev/null
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "clk.h"
+
+/**
+ * struct clk_ref - mxs reference clock
+ * @hw: clk_hw for the reference clock
+ * @reg: register address
+ * @idx: the index of the reference clock within the same register
+ *
+ * The mxs reference clock sources from pll.  Every 4 reference clocks share
+ * one register space, and @idx is used to identify them.  Each reference
+ * clock has a gate control and a fractional * divider.  The rate is calculated
+ * as pll rate  * (18 / FRAC), where FRAC = 18 ~ 35.
+ */
+struct clk_ref {
+       struct clk_hw hw;
+       void __iomem *reg;
+       u8 idx;
+};
+
+#define to_clk_ref(_hw) container_of(_hw, struct clk_ref, hw)
+
+static int clk_ref_enable(struct clk_hw *hw)
+{
+       struct clk_ref *ref = to_clk_ref(hw);
+
+       writel_relaxed(1 << ((ref->idx + 1) * 8 - 1), ref->reg + CLR);
+
+       return 0;
+}
+
+static void clk_ref_disable(struct clk_hw *hw)
+{
+       struct clk_ref *ref = to_clk_ref(hw);
+
+       writel_relaxed(1 << ((ref->idx + 1) * 8 - 1), ref->reg + SET);
+}
+
+static unsigned long clk_ref_recalc_rate(struct clk_hw *hw,
+                                        unsigned long parent_rate)
+{
+       struct clk_ref *ref = to_clk_ref(hw);
+       u64 tmp = parent_rate;
+       u8 frac = (readl_relaxed(ref->reg) >> (ref->idx * 8)) & 0x3f;
+
+       tmp *= 18;
+       do_div(tmp, frac);
+
+       return tmp;
+}
+
+static long clk_ref_round_rate(struct clk_hw *hw, unsigned long rate,
+                              unsigned long *prate)
+{
+       unsigned long parent_rate = *prate;
+       u64 tmp = parent_rate;
+       u8 frac;
+
+       tmp = tmp * 18 + rate / 2;
+       do_div(tmp, rate);
+       frac = tmp;
+
+       if (frac < 18)
+               frac = 18;
+       else if (frac > 35)
+               frac = 35;
+
+       tmp = parent_rate;
+       tmp *= 18;
+       do_div(tmp, frac);
+
+       return tmp;
+}
+
+static int clk_ref_set_rate(struct clk_hw *hw, unsigned long rate,
+                           unsigned long parent_rate)
+{
+       struct clk_ref *ref = to_clk_ref(hw);
+       unsigned long flags;
+       u64 tmp = parent_rate;
+       u32 val;
+       u8 frac, shift = ref->idx * 8;
+
+       tmp = tmp * 18 + rate / 2;
+       do_div(tmp, rate);
+       frac = tmp;
+
+       if (frac < 18)
+               frac = 18;
+       else if (frac > 35)
+               frac = 35;
+
+       spin_lock_irqsave(&mxs_lock, flags);
+
+       val = readl_relaxed(ref->reg);
+       val &= ~(0x3f << shift);
+       val |= frac << shift;
+       writel_relaxed(val, ref->reg);
+
+       spin_unlock_irqrestore(&mxs_lock, flags);
+
+       return 0;
+}
+
+static const struct clk_ops clk_ref_ops = {
+       .enable         = clk_ref_enable,
+       .disable        = clk_ref_disable,
+       .recalc_rate    = clk_ref_recalc_rate,
+       .round_rate     = clk_ref_round_rate,
+       .set_rate       = clk_ref_set_rate,
+};
+
+struct clk *mxs_clk_ref(const char *name, const char *parent_name,
+                       void __iomem *reg, u8 idx)
+{
+       struct clk_ref *ref;
+       struct clk *clk;
+       struct clk_init_data init;
+
+       ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+       if (!ref)
+               return ERR_PTR(-ENOMEM);
+
+       init.name = name;
+       init.ops = &clk_ref_ops;
+       init.flags = 0;
+       init.parent_names = (parent_name ? &parent_name: NULL);
+       init.num_parents = (parent_name ? 1 : 0);
+
+       ref->reg = reg;
+       ref->idx = idx;
+       ref->hw.init = &init;
+
+       clk = clk_register(NULL, &ref->hw);
+       if (IS_ERR(clk))
+               kfree(ref);
+
+       return clk;
+}
diff --git a/drivers/clk/mxs/clk.c b/drivers/clk/mxs/clk.c
new file mode 100644 (file)
index 0000000..b24d560
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/spinlock.h>
+
+DEFINE_SPINLOCK(mxs_lock);
+
+int mxs_clk_wait(void __iomem *reg, u8 shift)
+{
+       unsigned long timeout = jiffies + msecs_to_jiffies(10);
+
+       while (readl_relaxed(reg) & (1 << shift))
+               if (time_after(jiffies, timeout))
+                       return -ETIMEDOUT;
+
+       return 0;
+}
diff --git a/drivers/clk/mxs/clk.h b/drivers/clk/mxs/clk.h
new file mode 100644 (file)
index 0000000..81421e2
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#ifndef __MXS_CLK_H
+#define __MXS_CLK_H
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+
+#define SET    0x4
+#define CLR    0x8
+
+extern spinlock_t mxs_lock;
+
+int mxs_clk_wait(void __iomem *reg, u8 shift);
+
+struct clk *mxs_clk_pll(const char *name, const char *parent_name,
+                       void __iomem *base, u8 power, unsigned long rate);
+
+struct clk *mxs_clk_ref(const char *name, const char *parent_name,
+                       void __iomem *reg, u8 idx);
+
+struct clk *mxs_clk_div(const char *name, const char *parent_name,
+                       void __iomem *reg, u8 shift, u8 width, u8 busy);
+
+struct clk *mxs_clk_frac(const char *name, const char *parent_name,
+                        void __iomem *reg, u8 shift, u8 width, u8 busy);
+
+static inline struct clk *mxs_clk_fixed(const char *name, int rate)
+{
+       return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
+}
+
+static inline struct clk *mxs_clk_gate(const char *name,
+                       const char *parent_name, void __iomem *reg, u8 shift)
+{
+       return clk_register_gate(NULL, name, parent_name, CLK_SET_RATE_PARENT,
+                                reg, shift, CLK_GATE_SET_TO_DISABLE,
+                                &mxs_lock);
+}
+
+static inline struct clk *mxs_clk_mux(const char *name, void __iomem *reg,
+               u8 shift, u8 width, const char **parent_names, int num_parents)
+{
+       return clk_register_mux(NULL, name, parent_names, num_parents,
+                               CLK_SET_RATE_PARENT, reg, shift, width,
+                               0, &mxs_lock);
+}
+
+static inline struct clk *mxs_clk_fixed_factor(const char *name,
+               const char *parent_name, unsigned int mult, unsigned int div)
+{
+       return clk_register_fixed_factor(NULL, name, parent_name,
+                                        CLK_SET_RATE_PARENT, mult, div);
+}
+
+#endif /* __MXS_CLK_H */
diff --git a/drivers/clk/spear/Makefile b/drivers/clk/spear/Makefile
new file mode 100644 (file)
index 0000000..cdb425d
--- /dev/null
@@ -0,0 +1,10 @@
+#
+# SPEAr Clock specific Makefile
+#
+
+obj-y  += clk.o clk-aux-synth.o clk-frac-synth.o clk-gpt-synth.o clk-vco-pll.o
+
+obj-$(CONFIG_ARCH_SPEAR3XX)    += spear3xx_clock.o
+obj-$(CONFIG_ARCH_SPEAR6XX)    += spear6xx_clock.o
+obj-$(CONFIG_MACH_SPEAR1310)   += spear1310_clock.o
+obj-$(CONFIG_MACH_SPEAR1340)   += spear1340_clock.o
diff --git a/drivers/clk/spear/clk-aux-synth.c b/drivers/clk/spear/clk-aux-synth.c
new file mode 100644 (file)
index 0000000..af34074
--- /dev/null
@@ -0,0 +1,198 @@
+/*
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * Auxiliary Synthesizer clock implementation
+ */
+
+#define pr_fmt(fmt) "clk-aux-synth: " fmt
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include "clk.h"
+
+/*
+ * DOC: Auxiliary Synthesizer clock
+ *
+ * Aux synth gives rate for different values of eq, x and y
+ *
+ * Fout from synthesizer can be given from two equations:
+ * Fout1 = (Fin * X/Y)/2               EQ1
+ * Fout2 = Fin * X/Y                   EQ2
+ */
+
+#define to_clk_aux(_hw) container_of(_hw, struct clk_aux, hw)
+
+static struct aux_clk_masks default_aux_masks = {
+       .eq_sel_mask = AUX_EQ_SEL_MASK,
+       .eq_sel_shift = AUX_EQ_SEL_SHIFT,
+       .eq1_mask = AUX_EQ1_SEL,
+       .eq2_mask = AUX_EQ2_SEL,
+       .xscale_sel_mask = AUX_XSCALE_MASK,
+       .xscale_sel_shift = AUX_XSCALE_SHIFT,
+       .yscale_sel_mask = AUX_YSCALE_MASK,
+       .yscale_sel_shift = AUX_YSCALE_SHIFT,
+       .enable_bit = AUX_SYNT_ENB,
+};
+
+static unsigned long aux_calc_rate(struct clk_hw *hw, unsigned long prate,
+               int index)
+{
+       struct clk_aux *aux = to_clk_aux(hw);
+       struct aux_rate_tbl *rtbl = aux->rtbl;
+       u8 eq = rtbl[index].eq ? 1 : 2;
+
+       return (((prate / 10000) * rtbl[index].xscale) /
+                       (rtbl[index].yscale * eq)) * 10000;
+}
+
+static long clk_aux_round_rate(struct clk_hw *hw, unsigned long drate,
+               unsigned long *prate)
+{
+       struct clk_aux *aux = to_clk_aux(hw);
+       int unused;
+
+       return clk_round_rate_index(hw, drate, *prate, aux_calc_rate,
+                       aux->rtbl_cnt, &unused);
+}
+
+static unsigned long clk_aux_recalc_rate(struct clk_hw *hw,
+               unsigned long parent_rate)
+{
+       struct clk_aux *aux = to_clk_aux(hw);
+       unsigned int num = 1, den = 1, val, eqn;
+       unsigned long flags = 0;
+
+       if (aux->lock)
+               spin_lock_irqsave(aux->lock, flags);
+
+       val = readl_relaxed(aux->reg);
+
+       if (aux->lock)
+               spin_unlock_irqrestore(aux->lock, flags);
+
+       eqn = (val >> aux->masks->eq_sel_shift) & aux->masks->eq_sel_mask;
+       if (eqn == aux->masks->eq1_mask)
+               den = 2;
+
+       /* calculate numerator */
+       num = (val >> aux->masks->xscale_sel_shift) &
+               aux->masks->xscale_sel_mask;
+
+       /* calculate denominator */
+       den *= (val >> aux->masks->yscale_sel_shift) &
+               aux->masks->yscale_sel_mask;
+
+       if (!den)
+               return 0;
+
+       return (((parent_rate / 10000) * num) / den) * 10000;
+}
+
+/* Configures new clock rate of aux */
+static int clk_aux_set_rate(struct clk_hw *hw, unsigned long drate,
+                               unsigned long prate)
+{
+       struct clk_aux *aux = to_clk_aux(hw);
+       struct aux_rate_tbl *rtbl = aux->rtbl;
+       unsigned long val, flags = 0;
+       int i;
+
+       clk_round_rate_index(hw, drate, prate, aux_calc_rate, aux->rtbl_cnt,
+                       &i);
+
+       if (aux->lock)
+               spin_lock_irqsave(aux->lock, flags);
+
+       val = readl_relaxed(aux->reg) &
+               ~(aux->masks->eq_sel_mask << aux->masks->eq_sel_shift);
+       val |= (rtbl[i].eq & aux->masks->eq_sel_mask) <<
+               aux->masks->eq_sel_shift;
+       val &= ~(aux->masks->xscale_sel_mask << aux->masks->xscale_sel_shift);
+       val |= (rtbl[i].xscale & aux->masks->xscale_sel_mask) <<
+               aux->masks->xscale_sel_shift;
+       val &= ~(aux->masks->yscale_sel_mask << aux->masks->yscale_sel_shift);
+       val |= (rtbl[i].yscale & aux->masks->yscale_sel_mask) <<
+               aux->masks->yscale_sel_shift;
+       writel_relaxed(val, aux->reg);
+
+       if (aux->lock)
+               spin_unlock_irqrestore(aux->lock, flags);
+
+       return 0;
+}
+
+static struct clk_ops clk_aux_ops = {
+       .recalc_rate = clk_aux_recalc_rate,
+       .round_rate = clk_aux_round_rate,
+       .set_rate = clk_aux_set_rate,
+};
+
+struct clk *clk_register_aux(const char *aux_name, const char *gate_name,
+               const char *parent_name, unsigned long flags, void __iomem *reg,
+               struct aux_clk_masks *masks, struct aux_rate_tbl *rtbl,
+               u8 rtbl_cnt, spinlock_t *lock, struct clk **gate_clk)
+{
+       struct clk_aux *aux;
+       struct clk_init_data init;
+       struct clk *clk;
+
+       if (!aux_name || !parent_name || !reg || !rtbl || !rtbl_cnt) {
+               pr_err("Invalid arguments passed");
+               return ERR_PTR(-EINVAL);
+       }
+
+       aux = kzalloc(sizeof(*aux), GFP_KERNEL);
+       if (!aux) {
+               pr_err("could not allocate aux clk\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       /* struct clk_aux assignments */
+       if (!masks)
+               aux->masks = &default_aux_masks;
+       else
+               aux->masks = masks;
+
+       aux->reg = reg;
+       aux->rtbl = rtbl;
+       aux->rtbl_cnt = rtbl_cnt;
+       aux->lock = lock;
+       aux->hw.init = &init;
+
+       init.name = aux_name;
+       init.ops = &clk_aux_ops;
+       init.flags = flags;
+       init.parent_names = &parent_name;
+       init.num_parents = 1;
+
+       clk = clk_register(NULL, &aux->hw);
+       if (IS_ERR_OR_NULL(clk))
+               goto free_aux;
+
+       if (gate_name) {
+               struct clk *tgate_clk;
+
+               tgate_clk = clk_register_gate(NULL, gate_name, aux_name, 0, reg,
+                               aux->masks->enable_bit, 0, lock);
+               if (IS_ERR_OR_NULL(tgate_clk))
+                       goto free_aux;
+
+               if (gate_clk)
+                       *gate_clk = tgate_clk;
+       }
+
+       return clk;
+
+free_aux:
+       kfree(aux);
+       pr_err("clk register failed\n");
+
+       return NULL;
+}
diff --git a/drivers/clk/spear/clk-frac-synth.c b/drivers/clk/spear/clk-frac-synth.c
new file mode 100644 (file)
index 0000000..4dbdb3f
--- /dev/null
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * Fractional Synthesizer clock implementation
+ */
+
+#define pr_fmt(fmt) "clk-frac-synth: " fmt
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include "clk.h"
+
+#define DIV_FACTOR_MASK                0x1FFFF
+
+/*
+ * DOC: Fractional Synthesizer clock
+ *
+ * Fout from synthesizer can be given from below equation:
+ *
+ * Fout= Fin/2*div (division factor)
+ * div is 17 bits:-
+ *     0-13 (fractional part)
+ *     14-16 (integer part)
+ *     div is (16-14 bits).(13-0 bits) (in binary)
+ *
+ *     Fout = Fin/(2 * div)
+ *     Fout = ((Fin / 10000)/(2 * div)) * 10000
+ *     Fout = (2^14 * (Fin / 10000)/(2^14 * (2 * div))) * 10000
+ *     Fout = (((Fin / 10000) << 14)/(2 * (div << 14))) * 10000
+ *
+ * div << 14 simply 17 bit value written at register.
+ * Max error due to scaling down by 10000 is 10 KHz
+ */
+
+#define to_clk_frac(_hw) container_of(_hw, struct clk_frac, hw)
+
+static unsigned long frac_calc_rate(struct clk_hw *hw, unsigned long prate,
+               int index)
+{
+       struct clk_frac *frac = to_clk_frac(hw);
+       struct frac_rate_tbl *rtbl = frac->rtbl;
+
+       prate /= 10000;
+       prate <<= 14;
+       prate /= (2 * rtbl[index].div);
+       prate *= 10000;
+
+       return prate;
+}
+
+static long clk_frac_round_rate(struct clk_hw *hw, unsigned long drate,
+               unsigned long *prate)
+{
+       struct clk_frac *frac = to_clk_frac(hw);
+       int unused;
+
+       return clk_round_rate_index(hw, drate, *prate, frac_calc_rate,
+                       frac->rtbl_cnt, &unused);
+}
+
+static unsigned long clk_frac_recalc_rate(struct clk_hw *hw,
+               unsigned long parent_rate)
+{
+       struct clk_frac *frac = to_clk_frac(hw);
+       unsigned long flags = 0;
+       unsigned int div = 1, val;
+
+       if (frac->lock)
+               spin_lock_irqsave(frac->lock, flags);
+
+       val = readl_relaxed(frac->reg);
+
+       if (frac->lock)
+               spin_unlock_irqrestore(frac->lock, flags);
+
+       div = val & DIV_FACTOR_MASK;
+
+       if (!div)
+               return 0;
+
+       parent_rate = parent_rate / 10000;
+
+       parent_rate = (parent_rate << 14) / (2 * div);
+       return parent_rate * 10000;
+}
+
+/* Configures new clock rate of frac */
+static int clk_frac_set_rate(struct clk_hw *hw, unsigned long drate,
+                               unsigned long prate)
+{
+       struct clk_frac *frac = to_clk_frac(hw);
+       struct frac_rate_tbl *rtbl = frac->rtbl;
+       unsigned long flags = 0, val;
+       int i;
+
+       clk_round_rate_index(hw, drate, prate, frac_calc_rate, frac->rtbl_cnt,
+                       &i);
+
+       if (frac->lock)
+               spin_lock_irqsave(frac->lock, flags);
+
+       val = readl_relaxed(frac->reg) & ~DIV_FACTOR_MASK;
+       val |= rtbl[i].div & DIV_FACTOR_MASK;
+       writel_relaxed(val, frac->reg);
+
+       if (frac->lock)
+               spin_unlock_irqrestore(frac->lock, flags);
+
+       return 0;
+}
+
+struct clk_ops clk_frac_ops = {
+       .recalc_rate = clk_frac_recalc_rate,
+       .round_rate = clk_frac_round_rate,
+       .set_rate = clk_frac_set_rate,
+};
+
+struct clk *clk_register_frac(const char *name, const char *parent_name,
+               unsigned long flags, void __iomem *reg,
+               struct frac_rate_tbl *rtbl, u8 rtbl_cnt, spinlock_t *lock)
+{
+       struct clk_init_data init;
+       struct clk_frac *frac;
+       struct clk *clk;
+
+       if (!name || !parent_name || !reg || !rtbl || !rtbl_cnt) {
+               pr_err("Invalid arguments passed");
+               return ERR_PTR(-EINVAL);
+       }
+
+       frac = kzalloc(sizeof(*frac), GFP_KERNEL);
+       if (!frac) {
+               pr_err("could not allocate frac clk\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       /* struct clk_frac assignments */
+       frac->reg = reg;
+       frac->rtbl = rtbl;
+       frac->rtbl_cnt = rtbl_cnt;
+       frac->lock = lock;
+       frac->hw.init = &init;
+
+       init.name = name;
+       init.ops = &clk_frac_ops;
+       init.flags = flags;
+       init.parent_names = &parent_name;
+       init.num_parents = 1;
+
+       clk = clk_register(NULL, &frac->hw);
+       if (!IS_ERR_OR_NULL(clk))
+               return clk;
+
+       pr_err("clk register failed\n");
+       kfree(frac);
+
+       return NULL;
+}
diff --git a/drivers/clk/spear/clk-gpt-synth.c b/drivers/clk/spear/clk-gpt-synth.c
new file mode 100644 (file)
index 0000000..b471c97
--- /dev/null
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * General Purpose Timer Synthesizer clock implementation
+ */
+
+#define pr_fmt(fmt) "clk-gpt-synth: " fmt
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include "clk.h"
+
+#define GPT_MSCALE_MASK                0xFFF
+#define GPT_NSCALE_SHIFT       12
+#define GPT_NSCALE_MASK                0xF
+
+/*
+ * DOC: General Purpose Timer Synthesizer clock
+ *
+ * Calculates gpt synth clk rate for different values of mscale and nscale
+ *
+ * Fout= Fin/((2 ^ (N+1)) * (M+1))
+ */
+
+#define to_clk_gpt(_hw) container_of(_hw, struct clk_gpt, hw)
+
+static unsigned long gpt_calc_rate(struct clk_hw *hw, unsigned long prate,
+               int index)
+{
+       struct clk_gpt *gpt = to_clk_gpt(hw);
+       struct gpt_rate_tbl *rtbl = gpt->rtbl;
+
+       prate /= ((1 << (rtbl[index].nscale + 1)) * (rtbl[index].mscale + 1));
+
+       return prate;
+}
+
+static long clk_gpt_round_rate(struct clk_hw *hw, unsigned long drate,
+               unsigned long *prate)
+{
+       struct clk_gpt *gpt = to_clk_gpt(hw);
+       int unused;
+
+       return clk_round_rate_index(hw, drate, *prate, gpt_calc_rate,
+                       gpt->rtbl_cnt, &unused);
+}
+
+static unsigned long clk_gpt_recalc_rate(struct clk_hw *hw,
+               unsigned long parent_rate)
+{
+       struct clk_gpt *gpt = to_clk_gpt(hw);
+       unsigned long flags = 0;
+       unsigned int div = 1, val;
+
+       if (gpt->lock)
+               spin_lock_irqsave(gpt->lock, flags);
+
+       val = readl_relaxed(gpt->reg);
+
+       if (gpt->lock)
+               spin_unlock_irqrestore(gpt->lock, flags);
+
+       div += val & GPT_MSCALE_MASK;
+       div *= 1 << (((val >> GPT_NSCALE_SHIFT) & GPT_NSCALE_MASK) + 1);
+
+       if (!div)
+               return 0;
+
+       return parent_rate / div;
+}
+
+/* Configures new clock rate of gpt */
+static int clk_gpt_set_rate(struct clk_hw *hw, unsigned long drate,
+                               unsigned long prate)
+{
+       struct clk_gpt *gpt = to_clk_gpt(hw);
+       struct gpt_rate_tbl *rtbl = gpt->rtbl;
+       unsigned long flags = 0, val;
+       int i;
+
+       clk_round_rate_index(hw, drate, prate, gpt_calc_rate, gpt->rtbl_cnt,
+                       &i);
+
+       if (gpt->lock)
+               spin_lock_irqsave(gpt->lock, flags);
+
+       val = readl(gpt->reg) & ~GPT_MSCALE_MASK;
+       val &= ~(GPT_NSCALE_MASK << GPT_NSCALE_SHIFT);
+
+       val |= rtbl[i].mscale & GPT_MSCALE_MASK;
+       val |= (rtbl[i].nscale & GPT_NSCALE_MASK) << GPT_NSCALE_SHIFT;
+
+       writel_relaxed(val, gpt->reg);
+
+       if (gpt->lock)
+               spin_unlock_irqrestore(gpt->lock, flags);
+
+       return 0;
+}
+
+static struct clk_ops clk_gpt_ops = {
+       .recalc_rate = clk_gpt_recalc_rate,
+       .round_rate = clk_gpt_round_rate,
+       .set_rate = clk_gpt_set_rate,
+};
+
+struct clk *clk_register_gpt(const char *name, const char *parent_name, unsigned
+               long flags, void __iomem *reg, struct gpt_rate_tbl *rtbl, u8
+               rtbl_cnt, spinlock_t *lock)
+{
+       struct clk_init_data init;
+       struct clk_gpt *gpt;
+       struct clk *clk;
+
+       if (!name || !parent_name || !reg || !rtbl || !rtbl_cnt) {
+               pr_err("Invalid arguments passed");
+               return ERR_PTR(-EINVAL);
+       }
+
+       gpt = kzalloc(sizeof(*gpt), GFP_KERNEL);
+       if (!gpt) {
+               pr_err("could not allocate gpt clk\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       /* struct clk_gpt assignments */
+       gpt->reg = reg;
+       gpt->rtbl = rtbl;
+       gpt->rtbl_cnt = rtbl_cnt;
+       gpt->lock = lock;
+       gpt->hw.init = &init;
+
+       init.name = name;
+       init.ops = &clk_gpt_ops;
+       init.flags = flags;
+       init.parent_names = &parent_name;
+       init.num_parents = 1;
+
+       clk = clk_register(NULL, &gpt->hw);
+       if (!IS_ERR_OR_NULL(clk))
+               return clk;
+
+       pr_err("clk register failed\n");
+       kfree(gpt);
+
+       return NULL;
+}
diff --git a/drivers/clk/spear/clk-vco-pll.c b/drivers/clk/spear/clk-vco-pll.c
new file mode 100644 (file)
index 0000000..dcd4bdf
--- /dev/null
@@ -0,0 +1,363 @@
+/*
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * VCO-PLL clock implementation
+ */
+
+#define pr_fmt(fmt) "clk-vco-pll: " fmt
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include "clk.h"
+
+/*
+ * DOC: VCO-PLL clock
+ *
+ * VCO and PLL rate are derived from following equations:
+ *
+ * In normal mode
+ * vco = (2 * M[15:8] * Fin)/N
+ *
+ * In Dithered mode
+ * vco = (2 * M[15:0] * Fin)/(256 * N)
+ *
+ * pll_rate = pll/2^p
+ *
+ * vco and pll are very closely bound to each other, "vco needs to program:
+ * mode, m & n" and "pll needs to program p", both share common enable/disable
+ * logic.
+ *
+ * clk_register_vco_pll() registers instances of both vco & pll.
+ * CLK_SET_RATE_PARENT flag is forced for pll, as it will always pass its
+ * set_rate to vco. A single rate table exists for both the clocks, which
+ * configures m, n and p.
+ */
+
+/* PLL_CTR register masks */
+#define PLL_MODE_NORMAL                0
+#define PLL_MODE_FRACTION      1
+#define PLL_MODE_DITH_DSM      2
+#define PLL_MODE_DITH_SSM      3
+#define PLL_MODE_MASK          3
+#define PLL_MODE_SHIFT         3
+#define PLL_ENABLE             2
+
+#define PLL_LOCK_SHIFT         0
+#define PLL_LOCK_MASK          1
+
+/* PLL FRQ register masks */
+#define PLL_NORM_FDBK_M_MASK   0xFF
+#define PLL_NORM_FDBK_M_SHIFT  24
+#define PLL_DITH_FDBK_M_MASK   0xFFFF
+#define PLL_DITH_FDBK_M_SHIFT  16
+#define PLL_DIV_P_MASK         0x7
+#define PLL_DIV_P_SHIFT                8
+#define PLL_DIV_N_MASK         0xFF
+#define PLL_DIV_N_SHIFT                0
+
+#define to_clk_vco(_hw) container_of(_hw, struct clk_vco, hw)
+#define to_clk_pll(_hw) container_of(_hw, struct clk_pll, hw)
+
+/* Calculates pll clk rate for specific value of mode, m, n and p */
+static unsigned long pll_calc_rate(struct pll_rate_tbl *rtbl,
+               unsigned long prate, int index, unsigned long *pll_rate)
+{
+       unsigned long rate = prate;
+       unsigned int mode;
+
+       mode = rtbl[index].mode ? 256 : 1;
+       rate = (((2 * rate / 10000) * rtbl[index].m) / (mode * rtbl[index].n));
+
+       if (pll_rate)
+               *pll_rate = (rate / (1 << rtbl[index].p)) * 10000;
+
+       return rate * 10000;
+}
+
+static long clk_pll_round_rate_index(struct clk_hw *hw, unsigned long drate,
+                               unsigned long *prate, int *index)
+{
+       struct clk_pll *pll = to_clk_pll(hw);
+       unsigned long prev_rate, vco_prev_rate, rate = 0;
+       unsigned long vco_parent_rate =
+               __clk_get_rate(__clk_get_parent(__clk_get_parent(hw->clk)));
+
+       if (!prate) {
+               pr_err("%s: prate is must for pll clk\n", __func__);
+               return -EINVAL;
+       }
+
+       for (*index = 0; *index < pll->vco->rtbl_cnt; (*index)++) {
+               prev_rate = rate;
+               vco_prev_rate = *prate;
+               *prate = pll_calc_rate(pll->vco->rtbl, vco_parent_rate, *index,
+                               &rate);
+               if (drate < rate) {
+                       /* previous clock was best */
+                       if (*index) {
+                               rate = prev_rate;
+                               *prate = vco_prev_rate;
+                               (*index)--;
+                       }
+                       break;
+               }
+       }
+
+       return rate;
+}
+
+static long clk_pll_round_rate(struct clk_hw *hw, unsigned long drate,
+                               unsigned long *prate)
+{
+       int unused;
+
+       return clk_pll_round_rate_index(hw, drate, prate, &unused);
+}
+
+static unsigned long clk_pll_recalc_rate(struct clk_hw *hw, unsigned long
+               parent_rate)
+{
+       struct clk_pll *pll = to_clk_pll(hw);
+       unsigned long flags = 0;
+       unsigned int p;
+
+       if (pll->vco->lock)
+               spin_lock_irqsave(pll->vco->lock, flags);
+
+       p = readl_relaxed(pll->vco->cfg_reg);
+
+       if (pll->vco->lock)
+               spin_unlock_irqrestore(pll->vco->lock, flags);
+
+       p = (p >> PLL_DIV_P_SHIFT) & PLL_DIV_P_MASK;
+
+       return parent_rate / (1 << p);
+}
+
+static int clk_pll_set_rate(struct clk_hw *hw, unsigned long drate,
+                               unsigned long prate)
+{
+       struct clk_pll *pll = to_clk_pll(hw);
+       struct pll_rate_tbl *rtbl = pll->vco->rtbl;
+       unsigned long flags = 0, val;
+       int i;
+
+       clk_pll_round_rate_index(hw, drate, NULL, &i);
+
+       if (pll->vco->lock)
+               spin_lock_irqsave(pll->vco->lock, flags);
+
+       val = readl_relaxed(pll->vco->cfg_reg);
+       val &= ~(PLL_DIV_P_MASK << PLL_DIV_P_SHIFT);
+       val |= (rtbl[i].p & PLL_DIV_P_MASK) << PLL_DIV_P_SHIFT;
+       writel_relaxed(val, pll->vco->cfg_reg);
+
+       if (pll->vco->lock)
+               spin_unlock_irqrestore(pll->vco->lock, flags);
+
+       return 0;
+}
+
+static struct clk_ops clk_pll_ops = {
+       .recalc_rate = clk_pll_recalc_rate,
+       .round_rate = clk_pll_round_rate,
+       .set_rate = clk_pll_set_rate,
+};
+
+static inline unsigned long vco_calc_rate(struct clk_hw *hw,
+               unsigned long prate, int index)
+{
+       struct clk_vco *vco = to_clk_vco(hw);
+
+       return pll_calc_rate(vco->rtbl, prate, index, NULL);
+}
+
+static long clk_vco_round_rate(struct clk_hw *hw, unsigned long drate,
+               unsigned long *prate)
+{
+       struct clk_vco *vco = to_clk_vco(hw);
+       int unused;
+
+       return clk_round_rate_index(hw, drate, *prate, vco_calc_rate,
+                       vco->rtbl_cnt, &unused);
+}
+
+static unsigned long clk_vco_recalc_rate(struct clk_hw *hw,
+               unsigned long parent_rate)
+{
+       struct clk_vco *vco = to_clk_vco(hw);
+       unsigned long flags = 0;
+       unsigned int num = 2, den = 0, val, mode = 0;
+
+       if (vco->lock)
+               spin_lock_irqsave(vco->lock, flags);
+
+       mode = (readl_relaxed(vco->mode_reg) >> PLL_MODE_SHIFT) & PLL_MODE_MASK;
+
+       val = readl_relaxed(vco->cfg_reg);
+
+       if (vco->lock)
+               spin_unlock_irqrestore(vco->lock, flags);
+
+       den = (val >> PLL_DIV_N_SHIFT) & PLL_DIV_N_MASK;
+
+       /* calculate numerator & denominator */
+       if (!mode) {
+               /* Normal mode */
+               num *= (val >> PLL_NORM_FDBK_M_SHIFT) & PLL_NORM_FDBK_M_MASK;
+       } else {
+               /* Dithered mode */
+               num *= (val >> PLL_DITH_FDBK_M_SHIFT) & PLL_DITH_FDBK_M_MASK;
+               den *= 256;
+       }
+
+       if (!den) {
+               WARN(1, "%s: denominator can't be zero\n", __func__);
+               return 0;
+       }
+
+       return (((parent_rate / 10000) * num) / den) * 10000;
+}
+
+/* Configures new clock rate of vco */
+static int clk_vco_set_rate(struct clk_hw *hw, unsigned long drate,
+                               unsigned long prate)
+{
+       struct clk_vco *vco = to_clk_vco(hw);
+       struct pll_rate_tbl *rtbl = vco->rtbl;
+       unsigned long flags = 0, val;
+       int i;
+
+       clk_round_rate_index(hw, drate, prate, vco_calc_rate, vco->rtbl_cnt,
+                       &i);
+
+       if (vco->lock)
+               spin_lock_irqsave(vco->lock, flags);
+
+       val = readl_relaxed(vco->mode_reg);
+       val &= ~(PLL_MODE_MASK << PLL_MODE_SHIFT);
+       val |= (rtbl[i].mode & PLL_MODE_MASK) << PLL_MODE_SHIFT;
+       writel_relaxed(val, vco->mode_reg);
+
+       val = readl_relaxed(vco->cfg_reg);
+       val &= ~(PLL_DIV_N_MASK << PLL_DIV_N_SHIFT);
+       val |= (rtbl[i].n & PLL_DIV_N_MASK) << PLL_DIV_N_SHIFT;
+
+       val &= ~(PLL_DITH_FDBK_M_MASK << PLL_DITH_FDBK_M_SHIFT);
+       if (rtbl[i].mode)
+               val |= (rtbl[i].m & PLL_DITH_FDBK_M_MASK) <<
+                       PLL_DITH_FDBK_M_SHIFT;
+       else
+               val |= (rtbl[i].m & PLL_NORM_FDBK_M_MASK) <<
+                       PLL_NORM_FDBK_M_SHIFT;
+
+       writel_relaxed(val, vco->cfg_reg);
+
+       if (vco->lock)
+               spin_unlock_irqrestore(vco->lock, flags);
+
+       return 0;
+}
+
+static struct clk_ops clk_vco_ops = {
+       .recalc_rate = clk_vco_recalc_rate,
+       .round_rate = clk_vco_round_rate,
+       .set_rate = clk_vco_set_rate,
+};
+
+struct clk *clk_register_vco_pll(const char *vco_name, const char *pll_name,
+               const char *vco_gate_name, const char *parent_name,
+               unsigned long flags, void __iomem *mode_reg, void __iomem
+               *cfg_reg, struct pll_rate_tbl *rtbl, u8 rtbl_cnt,
+               spinlock_t *lock, struct clk **pll_clk,
+               struct clk **vco_gate_clk)
+{
+       struct clk_vco *vco;
+       struct clk_pll *pll;
+       struct clk *vco_clk, *tpll_clk, *tvco_gate_clk;
+       struct clk_init_data vco_init, pll_init;
+       const char **vco_parent_name;
+
+       if (!vco_name || !pll_name || !parent_name || !mode_reg || !cfg_reg ||
+                       !rtbl || !rtbl_cnt) {
+               pr_err("Invalid arguments passed");
+               return ERR_PTR(-EINVAL);
+       }
+
+       vco = kzalloc(sizeof(*vco), GFP_KERNEL);
+       if (!vco) {
+               pr_err("could not allocate vco clk\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+       if (!pll) {
+               pr_err("could not allocate pll clk\n");
+               goto free_vco;
+       }
+
+       /* struct clk_vco assignments */
+       vco->mode_reg = mode_reg;
+       vco->cfg_reg = cfg_reg;
+       vco->rtbl = rtbl;
+       vco->rtbl_cnt = rtbl_cnt;
+       vco->lock = lock;
+       vco->hw.init = &vco_init;
+
+       pll->vco = vco;
+       pll->hw.init = &pll_init;
+
+       if (vco_gate_name) {
+               tvco_gate_clk = clk_register_gate(NULL, vco_gate_name,
+                               parent_name, 0, mode_reg, PLL_ENABLE, 0, lock);
+               if (IS_ERR_OR_NULL(tvco_gate_clk))
+                       goto free_pll;
+
+               if (vco_gate_clk)
+                       *vco_gate_clk = tvco_gate_clk;
+               vco_parent_name = &vco_gate_name;
+       } else {
+               vco_parent_name = &parent_name;
+       }
+
+       vco_init.name = vco_name;
+       vco_init.ops = &clk_vco_ops;
+       vco_init.flags = flags;
+       vco_init.parent_names = vco_parent_name;
+       vco_init.num_parents = 1;
+
+       pll_init.name = pll_name;
+       pll_init.ops = &clk_pll_ops;
+       pll_init.flags = CLK_SET_RATE_PARENT;
+       pll_init.parent_names = &vco_name;
+       pll_init.num_parents = 1;
+
+       vco_clk = clk_register(NULL, &vco->hw);
+       if (IS_ERR_OR_NULL(vco_clk))
+               goto free_pll;
+
+       tpll_clk = clk_register(NULL, &pll->hw);
+       if (IS_ERR_OR_NULL(tpll_clk))
+               goto free_pll;
+
+       if (pll_clk)
+               *pll_clk = tpll_clk;
+
+       return vco_clk;
+
+free_pll:
+       kfree(pll);
+free_vco:
+       kfree(vco);
+
+       pr_err("Failed to register vco pll clock\n");
+
+       return ERR_PTR(-ENOMEM);
+}
diff --git a/drivers/clk/spear/clk.c b/drivers/clk/spear/clk.c
new file mode 100644 (file)
index 0000000..376d4e5
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * SPEAr clk - Common routines
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/types.h>
+#include "clk.h"
+
+long clk_round_rate_index(struct clk_hw *hw, unsigned long drate,
+               unsigned long parent_rate, clk_calc_rate calc_rate, u8 rtbl_cnt,
+               int *index)
+{
+       unsigned long prev_rate, rate = 0;
+
+       for (*index = 0; *index < rtbl_cnt; (*index)++) {
+               prev_rate = rate;
+               rate = calc_rate(hw, parent_rate, *index);
+               if (drate < rate) {
+                       /* previous clock was best */
+                       if (*index) {
+                               rate = prev_rate;
+                               (*index)--;
+                       }
+                       break;
+               }
+       }
+
+       return rate;
+}
diff --git a/drivers/clk/spear/clk.h b/drivers/clk/spear/clk.h
new file mode 100644 (file)
index 0000000..3321c46
--- /dev/null
@@ -0,0 +1,134 @@
+/*
+ * Clock framework definitions for SPEAr platform
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __SPEAR_CLK_H
+#define __SPEAR_CLK_H
+
+#include <linux/clk-provider.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+
+/* Auxiliary Synth clk */
+/* Default masks */
+#define AUX_EQ_SEL_SHIFT       30
+#define AUX_EQ_SEL_MASK                1
+#define AUX_EQ1_SEL            0
+#define AUX_EQ2_SEL            1
+#define AUX_XSCALE_SHIFT       16
+#define AUX_XSCALE_MASK                0xFFF
+#define AUX_YSCALE_SHIFT       0
+#define AUX_YSCALE_MASK                0xFFF
+#define AUX_SYNT_ENB           31
+
+struct aux_clk_masks {
+       u32 eq_sel_mask;
+       u32 eq_sel_shift;
+       u32 eq1_mask;
+       u32 eq2_mask;
+       u32 xscale_sel_mask;
+       u32 xscale_sel_shift;
+       u32 yscale_sel_mask;
+       u32 yscale_sel_shift;
+       u32 enable_bit;
+};
+
+struct aux_rate_tbl {
+       u16 xscale;
+       u16 yscale;
+       u8 eq;
+};
+
+struct clk_aux {
+       struct                  clk_hw hw;
+       void __iomem            *reg;
+       struct aux_clk_masks    *masks;
+       struct aux_rate_tbl     *rtbl;
+       u8                      rtbl_cnt;
+       spinlock_t              *lock;
+};
+
+/* Fractional Synth clk */
+struct frac_rate_tbl {
+       u32 div;
+};
+
+struct clk_frac {
+       struct                  clk_hw hw;
+       void __iomem            *reg;
+       struct frac_rate_tbl    *rtbl;
+       u8                      rtbl_cnt;
+       spinlock_t              *lock;
+};
+
+/* GPT clk */
+struct gpt_rate_tbl {
+       u16 mscale;
+       u16 nscale;
+};
+
+struct clk_gpt {
+       struct                  clk_hw hw;
+       void __iomem            *reg;
+       struct gpt_rate_tbl     *rtbl;
+       u8                      rtbl_cnt;
+       spinlock_t              *lock;
+};
+
+/* VCO-PLL clk */
+struct pll_rate_tbl {
+       u8 mode;
+       u16 m;
+       u8 n;
+       u8 p;
+};
+
+struct clk_vco {
+       struct                  clk_hw hw;
+       void __iomem            *mode_reg;
+       void __iomem            *cfg_reg;
+       struct pll_rate_tbl     *rtbl;
+       u8                      rtbl_cnt;
+       spinlock_t              *lock;
+};
+
+struct clk_pll {
+       struct                  clk_hw hw;
+       struct clk_vco          *vco;
+       const char              *parent[1];
+       spinlock_t              *lock;
+};
+
+typedef unsigned long (*clk_calc_rate)(struct clk_hw *hw, unsigned long prate,
+               int index);
+
+/* clk register routines */
+struct clk *clk_register_aux(const char *aux_name, const char *gate_name,
+               const char *parent_name, unsigned long flags, void __iomem *reg,
+               struct aux_clk_masks *masks, struct aux_rate_tbl *rtbl,
+               u8 rtbl_cnt, spinlock_t *lock, struct clk **gate_clk);
+struct clk *clk_register_frac(const char *name, const char *parent_name,
+               unsigned long flags, void __iomem *reg,
+               struct frac_rate_tbl *rtbl, u8 rtbl_cnt, spinlock_t *lock);
+struct clk *clk_register_gpt(const char *name, const char *parent_name, unsigned
+               long flags, void __iomem *reg, struct gpt_rate_tbl *rtbl, u8
+               rtbl_cnt, spinlock_t *lock);
+struct clk *clk_register_vco_pll(const char *vco_name, const char *pll_name,
+               const char *vco_gate_name, const char *parent_name,
+               unsigned long flags, void __iomem *mode_reg, void __iomem
+               *cfg_reg, struct pll_rate_tbl *rtbl, u8 rtbl_cnt,
+               spinlock_t *lock, struct clk **pll_clk,
+               struct clk **vco_gate_clk);
+
+long clk_round_rate_index(struct clk_hw *hw, unsigned long drate,
+               unsigned long parent_rate, clk_calc_rate calc_rate, u8 rtbl_cnt,
+               int *index);
+
+#endif /* __SPEAR_CLK_H */
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
new file mode 100644 (file)
index 0000000..42b68df
--- /dev/null
@@ -0,0 +1,1106 @@
+/*
+ * arch/arm/mach-spear13xx/spear1310_clock.c
+ *
+ * SPEAr1310 machine clock framework source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock_types.h>
+#include <mach/spear.h>
+#include "clk.h"
+
+/* PLL related registers and bit values */
+#define SPEAR1310_PLL_CFG                      (VA_MISC_BASE + 0x210)
+       /* PLL_CFG bit values */
+       #define SPEAR1310_CLCD_SYNT_CLK_MASK            1
+       #define SPEAR1310_CLCD_SYNT_CLK_SHIFT           31
+       #define SPEAR1310_RAS_SYNT2_3_CLK_MASK          2
+       #define SPEAR1310_RAS_SYNT2_3_CLK_SHIFT         29
+       #define SPEAR1310_RAS_SYNT_CLK_MASK             2
+       #define SPEAR1310_RAS_SYNT0_1_CLK_SHIFT         27
+       #define SPEAR1310_PLL_CLK_MASK                  2
+       #define SPEAR1310_PLL3_CLK_SHIFT                24
+       #define SPEAR1310_PLL2_CLK_SHIFT                22
+       #define SPEAR1310_PLL1_CLK_SHIFT                20
+
+#define SPEAR1310_PLL1_CTR                     (VA_MISC_BASE + 0x214)
+#define SPEAR1310_PLL1_FRQ                     (VA_MISC_BASE + 0x218)
+#define SPEAR1310_PLL2_CTR                     (VA_MISC_BASE + 0x220)
+#define SPEAR1310_PLL2_FRQ                     (VA_MISC_BASE + 0x224)
+#define SPEAR1310_PLL3_CTR                     (VA_MISC_BASE + 0x22C)
+#define SPEAR1310_PLL3_FRQ                     (VA_MISC_BASE + 0x230)
+#define SPEAR1310_PLL4_CTR                     (VA_MISC_BASE + 0x238)
+#define SPEAR1310_PLL4_FRQ                     (VA_MISC_BASE + 0x23C)
+#define SPEAR1310_PERIP_CLK_CFG                        (VA_MISC_BASE + 0x244)
+       /* PERIP_CLK_CFG bit values */
+       #define SPEAR1310_GPT_OSC24_VAL                 0
+       #define SPEAR1310_GPT_APB_VAL                   1
+       #define SPEAR1310_GPT_CLK_MASK                  1
+       #define SPEAR1310_GPT3_CLK_SHIFT                11
+       #define SPEAR1310_GPT2_CLK_SHIFT                10
+       #define SPEAR1310_GPT1_CLK_SHIFT                9
+       #define SPEAR1310_GPT0_CLK_SHIFT                8
+       #define SPEAR1310_UART_CLK_PLL5_VAL             0
+       #define SPEAR1310_UART_CLK_OSC24_VAL            1
+       #define SPEAR1310_UART_CLK_SYNT_VAL             2
+       #define SPEAR1310_UART_CLK_MASK                 2
+       #define SPEAR1310_UART_CLK_SHIFT                4
+
+       #define SPEAR1310_AUX_CLK_PLL5_VAL              0
+       #define SPEAR1310_AUX_CLK_SYNT_VAL              1
+       #define SPEAR1310_CLCD_CLK_MASK                 2
+       #define SPEAR1310_CLCD_CLK_SHIFT                2
+       #define SPEAR1310_C3_CLK_MASK                   1
+       #define SPEAR1310_C3_CLK_SHIFT                  1
+
+#define SPEAR1310_GMAC_CLK_CFG                 (VA_MISC_BASE + 0x248)
+       #define SPEAR1310_GMAC_PHY_IF_SEL_MASK          3
+       #define SPEAR1310_GMAC_PHY_IF_SEL_SHIFT         4
+       #define SPEAR1310_GMAC_PHY_CLK_MASK             1
+       #define SPEAR1310_GMAC_PHY_CLK_SHIFT            3
+       #define SPEAR1310_GMAC_PHY_INPUT_CLK_MASK       2
+       #define SPEAR1310_GMAC_PHY_INPUT_CLK_SHIFT      1
+
+#define SPEAR1310_I2S_CLK_CFG                  (VA_MISC_BASE + 0x24C)
+       /* I2S_CLK_CFG register mask */
+       #define SPEAR1310_I2S_SCLK_X_MASK               0x1F
+       #define SPEAR1310_I2S_SCLK_X_SHIFT              27
+       #define SPEAR1310_I2S_SCLK_Y_MASK               0x1F
+       #define SPEAR1310_I2S_SCLK_Y_SHIFT              22
+       #define SPEAR1310_I2S_SCLK_EQ_SEL_SHIFT         21
+       #define SPEAR1310_I2S_SCLK_SYNTH_ENB            20
+       #define SPEAR1310_I2S_PRS1_CLK_X_MASK           0xFF
+       #define SPEAR1310_I2S_PRS1_CLK_X_SHIFT          12
+       #define SPEAR1310_I2S_PRS1_CLK_Y_MASK           0xFF
+       #define SPEAR1310_I2S_PRS1_CLK_Y_SHIFT          4
+       #define SPEAR1310_I2S_PRS1_EQ_SEL_SHIFT         3
+       #define SPEAR1310_I2S_REF_SEL_MASK              1
+       #define SPEAR1310_I2S_REF_SHIFT                 2
+       #define SPEAR1310_I2S_SRC_CLK_MASK              2
+       #define SPEAR1310_I2S_SRC_CLK_SHIFT             0
+
+#define SPEAR1310_C3_CLK_SYNT                  (VA_MISC_BASE + 0x250)
+#define SPEAR1310_UART_CLK_SYNT                        (VA_MISC_BASE + 0x254)
+#define SPEAR1310_GMAC_CLK_SYNT                        (VA_MISC_BASE + 0x258)
+#define SPEAR1310_SDHCI_CLK_SYNT               (VA_MISC_BASE + 0x25C)
+#define SPEAR1310_CFXD_CLK_SYNT                        (VA_MISC_BASE + 0x260)
+#define SPEAR1310_ADC_CLK_SYNT                 (VA_MISC_BASE + 0x264)
+#define SPEAR1310_AMBA_CLK_SYNT                        (VA_MISC_BASE + 0x268)
+#define SPEAR1310_CLCD_CLK_SYNT                        (VA_MISC_BASE + 0x270)
+#define SPEAR1310_RAS_CLK_SYNT0                        (VA_MISC_BASE + 0x280)
+#define SPEAR1310_RAS_CLK_SYNT1                        (VA_MISC_BASE + 0x288)
+#define SPEAR1310_RAS_CLK_SYNT2                        (VA_MISC_BASE + 0x290)
+#define SPEAR1310_RAS_CLK_SYNT3                        (VA_MISC_BASE + 0x298)
+       /* Check Fractional synthesizer reg masks */
+
+#define SPEAR1310_PERIP1_CLK_ENB               (VA_MISC_BASE + 0x300)
+       /* PERIP1_CLK_ENB register masks */
+       #define SPEAR1310_RTC_CLK_ENB                   31
+       #define SPEAR1310_ADC_CLK_ENB                   30
+       #define SPEAR1310_C3_CLK_ENB                    29
+       #define SPEAR1310_JPEG_CLK_ENB                  28
+       #define SPEAR1310_CLCD_CLK_ENB                  27
+       #define SPEAR1310_DMA_CLK_ENB                   25
+       #define SPEAR1310_GPIO1_CLK_ENB                 24
+       #define SPEAR1310_GPIO0_CLK_ENB                 23
+       #define SPEAR1310_GPT1_CLK_ENB                  22
+       #define SPEAR1310_GPT0_CLK_ENB                  21
+       #define SPEAR1310_I2S0_CLK_ENB                  20
+       #define SPEAR1310_I2S1_CLK_ENB                  19
+       #define SPEAR1310_I2C0_CLK_ENB                  18
+       #define SPEAR1310_SSP_CLK_ENB                   17
+       #define SPEAR1310_UART_CLK_ENB                  15
+       #define SPEAR1310_PCIE_SATA_2_CLK_ENB           14
+       #define SPEAR1310_PCIE_SATA_1_CLK_ENB           13
+       #define SPEAR1310_PCIE_SATA_0_CLK_ENB           12
+       #define SPEAR1310_UOC_CLK_ENB                   11
+       #define SPEAR1310_UHC1_CLK_ENB                  10
+       #define SPEAR1310_UHC0_CLK_ENB                  9
+       #define SPEAR1310_GMAC_CLK_ENB                  8
+       #define SPEAR1310_CFXD_CLK_ENB                  7
+       #define SPEAR1310_SDHCI_CLK_ENB                 6
+       #define SPEAR1310_SMI_CLK_ENB                   5
+       #define SPEAR1310_FSMC_CLK_ENB                  4
+       #define SPEAR1310_SYSRAM0_CLK_ENB               3
+       #define SPEAR1310_SYSRAM1_CLK_ENB               2
+       #define SPEAR1310_SYSROM_CLK_ENB                1
+       #define SPEAR1310_BUS_CLK_ENB                   0
+
+#define SPEAR1310_PERIP2_CLK_ENB               (VA_MISC_BASE + 0x304)
+       /* PERIP2_CLK_ENB register masks */
+       #define SPEAR1310_THSENS_CLK_ENB                8
+       #define SPEAR1310_I2S_REF_PAD_CLK_ENB           7
+       #define SPEAR1310_ACP_CLK_ENB                   6
+       #define SPEAR1310_GPT3_CLK_ENB                  5
+       #define SPEAR1310_GPT2_CLK_ENB                  4
+       #define SPEAR1310_KBD_CLK_ENB                   3
+       #define SPEAR1310_CPU_DBG_CLK_ENB               2
+       #define SPEAR1310_DDR_CORE_CLK_ENB              1
+       #define SPEAR1310_DDR_CTRL_CLK_ENB              0
+
+#define SPEAR1310_RAS_CLK_ENB                  (VA_MISC_BASE + 0x310)
+       /* RAS_CLK_ENB register masks */
+       #define SPEAR1310_SYNT3_CLK_ENB                 17
+       #define SPEAR1310_SYNT2_CLK_ENB                 16
+       #define SPEAR1310_SYNT1_CLK_ENB                 15
+       #define SPEAR1310_SYNT0_CLK_ENB                 14
+       #define SPEAR1310_PCLK3_CLK_ENB                 13
+       #define SPEAR1310_PCLK2_CLK_ENB                 12
+       #define SPEAR1310_PCLK1_CLK_ENB                 11
+       #define SPEAR1310_PCLK0_CLK_ENB                 10
+       #define SPEAR1310_PLL3_CLK_ENB                  9
+       #define SPEAR1310_PLL2_CLK_ENB                  8
+       #define SPEAR1310_C125M_PAD_CLK_ENB             7
+       #define SPEAR1310_C30M_CLK_ENB                  6
+       #define SPEAR1310_C48M_CLK_ENB                  5
+       #define SPEAR1310_OSC_25M_CLK_ENB               4
+       #define SPEAR1310_OSC_32K_CLK_ENB               3
+       #define SPEAR1310_OSC_24M_CLK_ENB               2
+       #define SPEAR1310_PCLK_CLK_ENB                  1
+       #define SPEAR1310_ACLK_CLK_ENB                  0
+
+/* RAS Area Control Register */
+#define SPEAR1310_RAS_CTRL_REG0                        (VA_SPEAR1310_RAS_BASE + 0x000)
+       #define SPEAR1310_SSP1_CLK_MASK                 3
+       #define SPEAR1310_SSP1_CLK_SHIFT                26
+       #define SPEAR1310_TDM_CLK_MASK                  1
+       #define SPEAR1310_TDM2_CLK_SHIFT                24
+       #define SPEAR1310_TDM1_CLK_SHIFT                23
+       #define SPEAR1310_I2C_CLK_MASK                  1
+       #define SPEAR1310_I2C7_CLK_SHIFT                22
+       #define SPEAR1310_I2C6_CLK_SHIFT                21
+       #define SPEAR1310_I2C5_CLK_SHIFT                20
+       #define SPEAR1310_I2C4_CLK_SHIFT                19
+       #define SPEAR1310_I2C3_CLK_SHIFT                18
+       #define SPEAR1310_I2C2_CLK_SHIFT                17
+       #define SPEAR1310_I2C1_CLK_SHIFT                16
+       #define SPEAR1310_GPT64_CLK_MASK                1
+       #define SPEAR1310_GPT64_CLK_SHIFT               15
+       #define SPEAR1310_RAS_UART_CLK_MASK             1
+       #define SPEAR1310_UART5_CLK_SHIFT               14
+       #define SPEAR1310_UART4_CLK_SHIFT               13
+       #define SPEAR1310_UART3_CLK_SHIFT               12
+       #define SPEAR1310_UART2_CLK_SHIFT               11
+       #define SPEAR1310_UART1_CLK_SHIFT               10
+       #define SPEAR1310_PCI_CLK_MASK                  1
+       #define SPEAR1310_PCI_CLK_SHIFT                 0
+
+#define SPEAR1310_RAS_CTRL_REG1                        (VA_SPEAR1310_RAS_BASE + 0x004)
+       #define SPEAR1310_PHY_CLK_MASK                  0x3
+       #define SPEAR1310_RMII_PHY_CLK_SHIFT            0
+       #define SPEAR1310_SMII_RGMII_PHY_CLK_SHIFT      2
+
+#define SPEAR1310_RAS_SW_CLK_CTRL              (VA_SPEAR1310_RAS_BASE + 0x0148)
+       #define SPEAR1310_CAN1_CLK_ENB                  25
+       #define SPEAR1310_CAN0_CLK_ENB                  24
+       #define SPEAR1310_GPT64_CLK_ENB                 23
+       #define SPEAR1310_SSP1_CLK_ENB                  22
+       #define SPEAR1310_I2C7_CLK_ENB                  21
+       #define SPEAR1310_I2C6_CLK_ENB                  20
+       #define SPEAR1310_I2C5_CLK_ENB                  19
+       #define SPEAR1310_I2C4_CLK_ENB                  18
+       #define SPEAR1310_I2C3_CLK_ENB                  17
+       #define SPEAR1310_I2C2_CLK_ENB                  16
+       #define SPEAR1310_I2C1_CLK_ENB                  15
+       #define SPEAR1310_UART5_CLK_ENB                 14
+       #define SPEAR1310_UART4_CLK_ENB                 13
+       #define SPEAR1310_UART3_CLK_ENB                 12
+       #define SPEAR1310_UART2_CLK_ENB                 11
+       #define SPEAR1310_UART1_CLK_ENB                 10
+       #define SPEAR1310_RS485_1_CLK_ENB               9
+       #define SPEAR1310_RS485_0_CLK_ENB               8
+       #define SPEAR1310_TDM2_CLK_ENB                  7
+       #define SPEAR1310_TDM1_CLK_ENB                  6
+       #define SPEAR1310_PCI_CLK_ENB                   5
+       #define SPEAR1310_GMII_CLK_ENB                  4
+       #define SPEAR1310_MII2_CLK_ENB                  3
+       #define SPEAR1310_MII1_CLK_ENB                  2
+       #define SPEAR1310_MII0_CLK_ENB                  1
+       #define SPEAR1310_ESRAM_CLK_ENB                 0
+
+static DEFINE_SPINLOCK(_lock);
+
+/* pll rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll_rtbl[] = {
+       /* PCLK 24MHz */
+       {.mode = 0, .m = 0x83, .n = 0x04, .p = 0x5}, /* vco 1572, pll 49.125 MHz */
+       {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x3}, /* vco 1000, pll 125 MHz */
+       {.mode = 0, .m = 0x64, .n = 0x06, .p = 0x1}, /* vco 800, pll 400 MHz */
+       {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x1}, /* vco 1000, pll 500 MHz */
+       {.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x1}, /* vco 1328, pll 664 MHz */
+       {.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x1}, /* vco 1600, pll 800 MHz */
+       {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */
+};
+
+/* vco-pll4 rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll4_rtbl[] = {
+       {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x2}, /* vco 1000, pll 250 MHz */
+       {.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x2}, /* vco 1328, pll 332 MHz */
+       {.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x2}, /* vco 1600, pll 400 MHz */
+       {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */
+};
+
+/* aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl aux_rtbl[] = {
+       /* For VCO1div2 = 500 MHz */
+       {.xscale = 10, .yscale = 204, .eq = 0}, /* 12.29 MHz */
+       {.xscale = 4, .yscale = 21, .eq = 0}, /* 48 MHz */
+       {.xscale = 2, .yscale = 6, .eq = 0}, /* 83 MHz */
+       {.xscale = 2, .yscale = 4, .eq = 0}, /* 125 MHz */
+       {.xscale = 1, .yscale = 3, .eq = 1}, /* 166 MHz */
+       {.xscale = 1, .yscale = 2, .eq = 1}, /* 250 MHz */
+};
+
+/* gmac rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl gmac_rtbl[] = {
+       /* For gmac phy input clk */
+       {.xscale = 2, .yscale = 6, .eq = 0}, /* divided by 6 */
+       {.xscale = 2, .yscale = 4, .eq = 0}, /* divided by 4 */
+       {.xscale = 1, .yscale = 3, .eq = 1}, /* divided by 3 */
+       {.xscale = 1, .yscale = 2, .eq = 1}, /* divided by 2 */
+};
+
+/* clcd rate configuration table, in ascending order of rates */
+static struct frac_rate_tbl clcd_rtbl[] = {
+       {.div = 0x14000}, /* 25 Mhz , for vc01div4 = 250 MHz*/
+       {.div = 0x1284B}, /* 27 Mhz , for vc01div4 = 250 MHz*/
+       {.div = 0x0D8D3}, /* 58 Mhz , for vco1div4 = 393 MHz */
+       {.div = 0x0B72C}, /* 58 Mhz , for vco1div4 = 332 MHz */
+       {.div = 0x089EE}, /* 58 Mhz , for vc01div4 = 250 MHz*/
+       {.div = 0x06f1C}, /* 72 Mhz , for vc01div4 = 250 MHz*/
+       {.div = 0x06E58}, /* 58 Mhz , for vco1div4 = 200 MHz */
+       {.div = 0x06c1B}, /* 74 Mhz , for vc01div4 = 250 MHz*/
+       {.div = 0x04A12}, /* 108 Mhz , for vc01div4 = 250 MHz*/
+       {.div = 0x0378E}, /* 144 Mhz , for vc01div4 = 250 MHz*/
+};
+
+/* i2s prescaler1 masks */
+static struct aux_clk_masks i2s_prs1_masks = {
+       .eq_sel_mask = AUX_EQ_SEL_MASK,
+       .eq_sel_shift = SPEAR1310_I2S_PRS1_EQ_SEL_SHIFT,
+       .eq1_mask = AUX_EQ1_SEL,
+       .eq2_mask = AUX_EQ2_SEL,
+       .xscale_sel_mask = SPEAR1310_I2S_PRS1_CLK_X_MASK,
+       .xscale_sel_shift = SPEAR1310_I2S_PRS1_CLK_X_SHIFT,
+       .yscale_sel_mask = SPEAR1310_I2S_PRS1_CLK_Y_MASK,
+       .yscale_sel_shift = SPEAR1310_I2S_PRS1_CLK_Y_SHIFT,
+};
+
+/* i2s sclk (bit clock) syynthesizers masks */
+static struct aux_clk_masks i2s_sclk_masks = {
+       .eq_sel_mask = AUX_EQ_SEL_MASK,
+       .eq_sel_shift = SPEAR1310_I2S_SCLK_EQ_SEL_SHIFT,
+       .eq1_mask = AUX_EQ1_SEL,
+       .eq2_mask = AUX_EQ2_SEL,
+       .xscale_sel_mask = SPEAR1310_I2S_SCLK_X_MASK,
+       .xscale_sel_shift = SPEAR1310_I2S_SCLK_X_SHIFT,
+       .yscale_sel_mask = SPEAR1310_I2S_SCLK_Y_MASK,
+       .yscale_sel_shift = SPEAR1310_I2S_SCLK_Y_SHIFT,
+       .enable_bit = SPEAR1310_I2S_SCLK_SYNTH_ENB,
+};
+
+/* i2s prs1 aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl i2s_prs1_rtbl[] = {
+       /* For parent clk = 49.152 MHz */
+       {.xscale = 1, .yscale = 2, .eq = 0}, /* 12.288 MHz */
+};
+
+/* i2s sclk aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl i2s_sclk_rtbl[] = {
+       /* For i2s_ref_clk = 12.288MHz */
+       {.xscale = 1, .yscale = 4, .eq = 0}, /* 1.53 MHz */
+       {.xscale = 1, .yscale = 2, .eq = 0}, /* 3.07 Mhz */
+};
+
+/* adc rate configuration table, in ascending order of rates */
+/* possible adc range is 2.5 MHz to 20 MHz. */
+static struct aux_rate_tbl adc_rtbl[] = {
+       /* For ahb = 166.67 MHz */
+       {.xscale = 1, .yscale = 31, .eq = 0}, /* 2.68 MHz */
+       {.xscale = 2, .yscale = 21, .eq = 0}, /* 7.94 MHz */
+       {.xscale = 4, .yscale = 21, .eq = 0}, /* 15.87 MHz */
+       {.xscale = 10, .yscale = 42, .eq = 0}, /* 19.84 MHz */
+};
+
+/* General synth rate configuration table, in ascending order of rates */
+static struct frac_rate_tbl gen_rtbl[] = {
+       /* For vco1div4 = 250 MHz */
+       {.div = 0x14000}, /* 25 MHz */
+       {.div = 0x0A000}, /* 50 MHz */
+       {.div = 0x05000}, /* 100 MHz */
+       {.div = 0x02000}, /* 250 MHz */
+};
+
+/* clock parents */
+static const char *vco_parents[] = { "osc_24m_clk", "osc_25m_clk", };
+static const char *gpt_parents[] = { "osc_24m_clk", "apb_clk", };
+static const char *uart0_parents[] = { "pll5_clk", "uart_synth_gate_clk", };
+static const char *c3_parents[] = { "pll5_clk", "c3_synth_gate_clk", };
+static const char *gmac_phy_input_parents[] = { "gmii_125m_pad_clk", "pll2_clk",
+       "osc_25m_clk", };
+static const char *gmac_phy_parents[] = { "gmac_phy_input_mux_clk",
+       "gmac_phy_synth_gate_clk", };
+static const char *clcd_synth_parents[] = { "vco1div4_clk", "pll2_clk", };
+static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_synth_clk", };
+static const char *i2s_src_parents[] = { "vco1div2_clk", "none", "pll3_clk",
+       "i2s_src_pad_clk", };
+static const char *i2s_ref_parents[] = { "i2s_src_mux_clk", "i2s_prs1_clk", };
+static const char *gen_synth0_1_parents[] = { "vco1div4_clk", "vco3div2_clk",
+       "pll3_clk", };
+static const char *gen_synth2_3_parents[] = { "vco1div4_clk", "vco3div2_clk",
+       "pll2_clk", };
+static const char *rmii_phy_parents[] = { "ras_tx50_clk", "none",
+       "ras_pll2_clk", "ras_synth0_clk", };
+static const char *smii_rgmii_phy_parents[] = { "none", "ras_tx125_clk",
+       "ras_pll2_clk", "ras_synth0_clk", };
+static const char *uart_parents[] = { "ras_apb_clk", "gen_synth3_clk", };
+static const char *i2c_parents[] = { "ras_apb_clk", "gen_synth1_clk", };
+static const char *ssp1_parents[] = { "ras_apb_clk", "gen_synth1_clk",
+       "ras_plclk0_clk", };
+static const char *pci_parents[] = { "ras_pll3_clk", "gen_synth2_clk", };
+static const char *tdm_parents[] = { "ras_pll3_clk", "gen_synth1_clk", };
+
+void __init spear1310_clk_init(void)
+{
+       struct clk *clk, *clk1;
+
+       clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
+       clk_register_clkdev(clk, "apb_pclk", NULL);
+
+       clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
+                       32000);
+       clk_register_clkdev(clk, "osc_32k_clk", NULL);
+
+       clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, CLK_IS_ROOT,
+                       24000000);
+       clk_register_clkdev(clk, "osc_24m_clk", NULL);
+
+       clk = clk_register_fixed_rate(NULL, "osc_25m_clk", NULL, CLK_IS_ROOT,
+                       25000000);
+       clk_register_clkdev(clk, "osc_25m_clk", NULL);
+
+       clk = clk_register_fixed_rate(NULL, "gmii_125m_pad_clk", NULL,
+                       CLK_IS_ROOT, 125000000);
+       clk_register_clkdev(clk, "gmii_125m_pad_clk", NULL);
+
+       clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL,
+                       CLK_IS_ROOT, 12288000);
+       clk_register_clkdev(clk, "i2s_src_pad_clk", NULL);
+
+       /* clock derived from 32 KHz osc clk */
+       clk = clk_register_gate(NULL, "rtc-spear", "osc_32k_clk", 0,
+                       SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_RTC_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "fc900000.rtc");
+
+       /* clock derived from 24 or 25 MHz osc clk */
+       /* vco-pll */
+       clk = clk_register_mux(NULL, "vco1_mux_clk", vco_parents,
+                       ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
+                       SPEAR1310_PLL1_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "vco1_mux_clk", NULL);
+       clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mux_clk",
+                       0, SPEAR1310_PLL1_CTR, SPEAR1310_PLL1_FRQ, pll_rtbl,
+                       ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+       clk_register_clkdev(clk, "vco1_clk", NULL);
+       clk_register_clkdev(clk1, "pll1_clk", NULL);
+
+       clk = clk_register_mux(NULL, "vco2_mux_clk", vco_parents,
+                       ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
+                       SPEAR1310_PLL2_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "vco2_mux_clk", NULL);
+       clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mux_clk",
+                       0, SPEAR1310_PLL2_CTR, SPEAR1310_PLL2_FRQ, pll_rtbl,
+                       ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+       clk_register_clkdev(clk, "vco2_clk", NULL);
+       clk_register_clkdev(clk1, "pll2_clk", NULL);
+
+       clk = clk_register_mux(NULL, "vco3_mux_clk", vco_parents,
+                       ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
+                       SPEAR1310_PLL3_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "vco3_mux_clk", NULL);
+       clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mux_clk",
+                       0, SPEAR1310_PLL3_CTR, SPEAR1310_PLL3_FRQ, pll_rtbl,
+                       ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+       clk_register_clkdev(clk, "vco3_clk", NULL);
+       clk_register_clkdev(clk1, "pll3_clk", NULL);
+
+       clk = clk_register_vco_pll("vco4_clk", "pll4_clk", NULL, "osc_24m_clk",
+                       0, SPEAR1310_PLL4_CTR, SPEAR1310_PLL4_FRQ, pll4_rtbl,
+                       ARRAY_SIZE(pll4_rtbl), &_lock, &clk1, NULL);
+       clk_register_clkdev(clk, "vco4_clk", NULL);
+       clk_register_clkdev(clk1, "pll4_clk", NULL);
+
+       clk = clk_register_fixed_rate(NULL, "pll5_clk", "osc_24m_clk", 0,
+                       48000000);
+       clk_register_clkdev(clk, "pll5_clk", NULL);
+
+       clk = clk_register_fixed_rate(NULL, "pll6_clk", "osc_25m_clk", 0,
+                       25000000);
+       clk_register_clkdev(clk, "pll6_clk", NULL);
+
+       /* vco div n clocks */
+       clk = clk_register_fixed_factor(NULL, "vco1div2_clk", "vco1_clk", 0, 1,
+                       2);
+       clk_register_clkdev(clk, "vco1div2_clk", NULL);
+
+       clk = clk_register_fixed_factor(NULL, "vco1div4_clk", "vco1_clk", 0, 1,
+                       4);
+       clk_register_clkdev(clk, "vco1div4_clk", NULL);
+
+       clk = clk_register_fixed_factor(NULL, "vco2div2_clk", "vco2_clk", 0, 1,
+                       2);
+       clk_register_clkdev(clk, "vco2div2_clk", NULL);
+
+       clk = clk_register_fixed_factor(NULL, "vco3div2_clk", "vco3_clk", 0, 1,
+                       2);
+       clk_register_clkdev(clk, "vco3div2_clk", NULL);
+
+       /* peripherals */
+       clk_register_fixed_factor(NULL, "thermal_clk", "osc_24m_clk", 0, 1,
+                       128);
+       clk = clk_register_gate(NULL, "thermal_gate_clk", "thermal_clk", 0,
+                       SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_THSENS_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "spear_thermal");
+
+       /* clock derived from pll4 clk */
+       clk = clk_register_fixed_factor(NULL, "ddr_clk", "pll4_clk", 0, 1,
+                       1);
+       clk_register_clkdev(clk, "ddr_clk", NULL);
+
+       /* clock derived from pll1 clk */
+       clk = clk_register_fixed_factor(NULL, "cpu_clk", "pll1_clk", 0, 1, 2);
+       clk_register_clkdev(clk, "cpu_clk", NULL);
+
+       clk = clk_register_fixed_factor(NULL, "wdt_clk", "cpu_clk", 0, 1,
+                       2);
+       clk_register_clkdev(clk, NULL, "ec800620.wdt");
+
+       clk = clk_register_fixed_factor(NULL, "ahb_clk", "pll1_clk", 0, 1,
+                       6);
+       clk_register_clkdev(clk, "ahb_clk", NULL);
+
+       clk = clk_register_fixed_factor(NULL, "apb_clk", "pll1_clk", 0, 1,
+                       12);
+       clk_register_clkdev(clk, "apb_clk", NULL);
+
+       /* gpt clocks */
+       clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt_parents,
+                       ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+                       SPEAR1310_GPT0_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "gpt0_mux_clk", NULL);
+       clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mux_clk", 0,
+                       SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT0_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "gpt0");
+
+       clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt_parents,
+                       ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+                       SPEAR1310_GPT1_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
+       clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+                       SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT1_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "gpt1");
+
+       clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt_parents,
+                       ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+                       SPEAR1310_GPT2_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
+       clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+                       SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT2_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "gpt2");
+
+       clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt_parents,
+                       ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+                       SPEAR1310_GPT3_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
+       clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
+                       SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT3_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "gpt3");
+
+       /* others */
+       clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
+                       "vco1div2_clk", 0, SPEAR1310_UART_CLK_SYNT, NULL,
+                       aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "uart_synth_clk", NULL);
+       clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
+
+       clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
+                       ARRAY_SIZE(uart0_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+                       SPEAR1310_UART_CLK_SHIFT, SPEAR1310_UART_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "uart0_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "uart0_clk", "uart0_mux_clk", 0,
+                       SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UART_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "e0000000.serial");
+
+       clk = clk_register_aux("sdhci_synth_clk", "sdhci_synth_gate_clk",
+                       "vco1div2_clk", 0, SPEAR1310_SDHCI_CLK_SYNT, NULL,
+                       aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "sdhci_synth_clk", NULL);
+       clk_register_clkdev(clk1, "sdhci_synth_gate_clk", NULL);
+
+       clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_synth_gate_clk", 0,
+                       SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SDHCI_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "b3000000.sdhci");
+
+       clk = clk_register_aux("cfxd_synth_clk", "cfxd_synth_gate_clk",
+                       "vco1div2_clk", 0, SPEAR1310_CFXD_CLK_SYNT, NULL,
+                       aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "cfxd_synth_clk", NULL);
+       clk_register_clkdev(clk1, "cfxd_synth_gate_clk", NULL);
+
+       clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_synth_gate_clk", 0,
+                       SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_CFXD_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "b2800000.cf");
+       clk_register_clkdev(clk, NULL, "arasan_xd");
+
+       clk = clk_register_aux("c3_synth_clk", "c3_synth_gate_clk",
+                       "vco1div2_clk", 0, SPEAR1310_C3_CLK_SYNT, NULL,
+                       aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "c3_synth_clk", NULL);
+       clk_register_clkdev(clk1, "c3_synth_gate_clk", NULL);
+
+       clk = clk_register_mux(NULL, "c3_mux_clk", c3_parents,
+                       ARRAY_SIZE(c3_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+                       SPEAR1310_C3_CLK_SHIFT, SPEAR1310_C3_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "c3_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "c3_clk", "c3_mux_clk", 0,
+                       SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_C3_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "c3");
+
+       /* gmac */
+       clk = clk_register_mux(NULL, "gmac_phy_input_mux_clk",
+                       gmac_phy_input_parents,
+                       ARRAY_SIZE(gmac_phy_input_parents), 0,
+                       SPEAR1310_GMAC_CLK_CFG,
+                       SPEAR1310_GMAC_PHY_INPUT_CLK_SHIFT,
+                       SPEAR1310_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "gmac_phy_input_mux_clk", NULL);
+
+       clk = clk_register_aux("gmac_phy_synth_clk", "gmac_phy_synth_gate_clk",
+                       "gmac_phy_input_mux_clk", 0, SPEAR1310_GMAC_CLK_SYNT,
+                       NULL, gmac_rtbl, ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "gmac_phy_synth_clk", NULL);
+       clk_register_clkdev(clk1, "gmac_phy_synth_gate_clk", NULL);
+
+       clk = clk_register_mux(NULL, "gmac_phy_mux_clk", gmac_phy_parents,
+                       ARRAY_SIZE(gmac_phy_parents), 0,
+                       SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GMAC_PHY_CLK_SHIFT,
+                       SPEAR1310_GMAC_PHY_CLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "stmmacphy.0");
+
+       /* clcd */
+       clk = clk_register_mux(NULL, "clcd_synth_mux_clk", clcd_synth_parents,
+                       ARRAY_SIZE(clcd_synth_parents), 0,
+                       SPEAR1310_CLCD_CLK_SYNT, SPEAR1310_CLCD_SYNT_CLK_SHIFT,
+                       SPEAR1310_CLCD_SYNT_CLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "clcd_synth_mux_clk", NULL);
+
+       clk = clk_register_frac("clcd_synth_clk", "clcd_synth_mux_clk", 0,
+                       SPEAR1310_CLCD_CLK_SYNT, clcd_rtbl,
+                       ARRAY_SIZE(clcd_rtbl), &_lock);
+       clk_register_clkdev(clk, "clcd_synth_clk", NULL);
+
+       clk = clk_register_mux(NULL, "clcd_pixel_mux_clk", clcd_pixel_parents,
+                       ARRAY_SIZE(clcd_pixel_parents), 0,
+                       SPEAR1310_PERIP_CLK_CFG, SPEAR1310_CLCD_CLK_SHIFT,
+                       SPEAR1310_CLCD_CLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "clcd_pixel_clk", NULL);
+
+       clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mux_clk", 0,
+                       SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_CLCD_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "clcd_clk", NULL);
+
+       /* i2s */
+       clk = clk_register_mux(NULL, "i2s_src_mux_clk", i2s_src_parents,
+                       ARRAY_SIZE(i2s_src_parents), 0, SPEAR1310_I2S_CLK_CFG,
+                       SPEAR1310_I2S_SRC_CLK_SHIFT, SPEAR1310_I2S_SRC_CLK_MASK,
+                       0, &_lock);
+       clk_register_clkdev(clk, "i2s_src_clk", NULL);
+
+       clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mux_clk", 0,
+                       SPEAR1310_I2S_CLK_CFG, &i2s_prs1_masks, i2s_prs1_rtbl,
+                       ARRAY_SIZE(i2s_prs1_rtbl), &_lock, NULL);
+       clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
+
+       clk = clk_register_mux(NULL, "i2s_ref_mux_clk", i2s_ref_parents,
+                       ARRAY_SIZE(i2s_ref_parents), 0, SPEAR1310_I2S_CLK_CFG,
+                       SPEAR1310_I2S_REF_SHIFT, SPEAR1310_I2S_REF_SEL_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "i2s_ref_clk", NULL);
+
+       clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mux_clk", 0,
+                       SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_I2S_REF_PAD_CLK_ENB,
+                       0, &_lock);
+       clk_register_clkdev(clk, "i2s_ref_pad_clk", NULL);
+
+       clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gate_clk",
+                       "i2s_ref_pad_clk", 0, SPEAR1310_I2S_CLK_CFG,
+                       &i2s_sclk_masks, i2s_sclk_rtbl,
+                       ARRAY_SIZE(i2s_sclk_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "i2s_sclk_clk", NULL);
+       clk_register_clkdev(clk1, "i2s_sclk_gate_clk", NULL);
+
+       /* clock derived from ahb clk */
+       clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0,
+                       SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_I2C0_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "e0280000.i2c");
+
+       clk = clk_register_gate(NULL, "dma_clk", "ahb_clk", 0,
+                       SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_DMA_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "ea800000.dma");
+       clk_register_clkdev(clk, NULL, "eb000000.dma");
+
+       clk = clk_register_gate(NULL, "jpeg_clk", "ahb_clk", 0,
+                       SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_JPEG_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "b2000000.jpeg");
+
+       clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0,
+                       SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GMAC_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "e2000000.eth");
+
+       clk = clk_register_gate(NULL, "fsmc_clk", "ahb_clk", 0,
+                       SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_FSMC_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "b0000000.flash");
+
+       clk = clk_register_gate(NULL, "smi_clk", "ahb_clk", 0,
+                       SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SMI_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "ea000000.flash");
+
+       clk = clk_register_gate(NULL, "usbh0_clk", "ahb_clk", 0,
+                       SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UHC0_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "usbh.0_clk", NULL);
+
+       clk = clk_register_gate(NULL, "usbh1_clk", "ahb_clk", 0,
+                       SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UHC1_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "usbh.1_clk", NULL);
+
+       clk = clk_register_gate(NULL, "uoc_clk", "ahb_clk", 0,
+                       SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UOC_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "uoc");
+
+       clk = clk_register_gate(NULL, "pcie_sata_0_clk", "ahb_clk", 0,
+                       SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_PCIE_SATA_0_CLK_ENB,
+                       0, &_lock);
+       clk_register_clkdev(clk, NULL, "dw_pcie.0");
+       clk_register_clkdev(clk, NULL, "ahci.0");
+
+       clk = clk_register_gate(NULL, "pcie_sata_1_clk", "ahb_clk", 0,
+                       SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_PCIE_SATA_1_CLK_ENB,
+                       0, &_lock);
+       clk_register_clkdev(clk, NULL, "dw_pcie.1");
+       clk_register_clkdev(clk, NULL, "ahci.1");
+
+       clk = clk_register_gate(NULL, "pcie_sata_2_clk", "ahb_clk", 0,
+                       SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_PCIE_SATA_2_CLK_ENB,
+                       0, &_lock);
+       clk_register_clkdev(clk, NULL, "dw_pcie.2");
+       clk_register_clkdev(clk, NULL, "ahci.2");
+
+       clk = clk_register_gate(NULL, "sysram0_clk", "ahb_clk", 0,
+                       SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SYSRAM0_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "sysram0_clk", NULL);
+
+       clk = clk_register_gate(NULL, "sysram1_clk", "ahb_clk", 0,
+                       SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SYSRAM1_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "sysram1_clk", NULL);
+
+       clk = clk_register_aux("adc_synth_clk", "adc_synth_gate_clk", "ahb_clk",
+                       0, SPEAR1310_ADC_CLK_SYNT, NULL, adc_rtbl,
+                       ARRAY_SIZE(adc_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "adc_synth_clk", NULL);
+       clk_register_clkdev(clk1, "adc_synth_gate_clk", NULL);
+
+       clk = clk_register_gate(NULL, "adc_clk", "adc_synth_gate_clk", 0,
+                       SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_ADC_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "adc_clk");
+
+       /* clock derived from apb clk */
+       clk = clk_register_gate(NULL, "ssp0_clk", "apb_clk", 0,
+                       SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SSP_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "e0100000.spi");
+
+       clk = clk_register_gate(NULL, "gpio0_clk", "apb_clk", 0,
+                       SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPIO0_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "e0600000.gpio");
+
+       clk = clk_register_gate(NULL, "gpio1_clk", "apb_clk", 0,
+                       SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPIO1_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "e0680000.gpio");
+
+       clk = clk_register_gate(NULL, "i2s0_clk", "apb_clk", 0,
+                       SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_I2S0_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "e0180000.i2s");
+
+       clk = clk_register_gate(NULL, "i2s1_clk", "apb_clk", 0,
+                       SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_I2S1_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "e0200000.i2s");
+
+       clk = clk_register_gate(NULL, "kbd_clk", "apb_clk", 0,
+                       SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_KBD_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "e0300000.kbd");
+
+       /* RAS clks */
+       clk = clk_register_mux(NULL, "gen_synth0_1_mux_clk",
+                       gen_synth0_1_parents, ARRAY_SIZE(gen_synth0_1_parents),
+                       0, SPEAR1310_PLL_CFG, SPEAR1310_RAS_SYNT0_1_CLK_SHIFT,
+                       SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "gen_synth0_1_clk", NULL);
+
+       clk = clk_register_mux(NULL, "gen_synth2_3_mux_clk",
+                       gen_synth2_3_parents, ARRAY_SIZE(gen_synth2_3_parents),
+                       0, SPEAR1310_PLL_CFG, SPEAR1310_RAS_SYNT2_3_CLK_SHIFT,
+                       SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "gen_synth2_3_clk", NULL);
+
+       clk = clk_register_frac("gen_synth0_clk", "gen_synth0_1_clk", 0,
+                       SPEAR1310_RAS_CLK_SYNT0, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+                       &_lock);
+       clk_register_clkdev(clk, "gen_synth0_clk", NULL);
+
+       clk = clk_register_frac("gen_synth1_clk", "gen_synth0_1_clk", 0,
+                       SPEAR1310_RAS_CLK_SYNT1, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+                       &_lock);
+       clk_register_clkdev(clk, "gen_synth1_clk", NULL);
+
+       clk = clk_register_frac("gen_synth2_clk", "gen_synth2_3_clk", 0,
+                       SPEAR1310_RAS_CLK_SYNT2, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+                       &_lock);
+       clk_register_clkdev(clk, "gen_synth2_clk", NULL);
+
+       clk = clk_register_frac("gen_synth3_clk", "gen_synth2_3_clk", 0,
+                       SPEAR1310_RAS_CLK_SYNT3, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+                       &_lock);
+       clk_register_clkdev(clk, "gen_synth3_clk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_osc_24m_clk", "osc_24m_clk", 0,
+                       SPEAR1310_RAS_CLK_ENB, SPEAR1310_OSC_24M_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "ras_osc_24m_clk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_osc_25m_clk", "osc_25m_clk", 0,
+                       SPEAR1310_RAS_CLK_ENB, SPEAR1310_OSC_25M_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "ras_osc_25m_clk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_osc_32k_clk", "osc_32k_clk", 0,
+                       SPEAR1310_RAS_CLK_ENB, SPEAR1310_OSC_32K_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "ras_osc_32k_clk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_pll2_clk", "pll2_clk", 0,
+                       SPEAR1310_RAS_CLK_ENB, SPEAR1310_PLL2_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "ras_pll2_clk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_pll3_clk", "pll3_clk", 0,
+                       SPEAR1310_RAS_CLK_ENB, SPEAR1310_PLL3_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "ras_pll3_clk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_tx125_clk", "gmii_125m_pad_clk", 0,
+                       SPEAR1310_RAS_CLK_ENB, SPEAR1310_C125M_PAD_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "ras_tx125_clk", NULL);
+
+       clk = clk_register_fixed_rate(NULL, "ras_30m_fixed_clk", "pll5_clk", 0,
+                       30000000);
+       clk = clk_register_gate(NULL, "ras_30m_clk", "ras_30m_fixed_clk", 0,
+                       SPEAR1310_RAS_CLK_ENB, SPEAR1310_C30M_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "ras_30m_clk", NULL);
+
+       clk = clk_register_fixed_rate(NULL, "ras_48m_fixed_clk", "pll5_clk", 0,
+                       48000000);
+       clk = clk_register_gate(NULL, "ras_48m_clk", "ras_48m_fixed_clk", 0,
+                       SPEAR1310_RAS_CLK_ENB, SPEAR1310_C48M_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "ras_48m_clk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_ahb_clk", "ahb_clk", 0,
+                       SPEAR1310_RAS_CLK_ENB, SPEAR1310_ACLK_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "ras_ahb_clk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_apb_clk", "apb_clk", 0,
+                       SPEAR1310_RAS_CLK_ENB, SPEAR1310_PCLK_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "ras_apb_clk", NULL);
+
+       clk = clk_register_fixed_rate(NULL, "ras_plclk0_clk", NULL, CLK_IS_ROOT,
+                       50000000);
+
+       clk = clk_register_fixed_rate(NULL, "ras_tx50_clk", NULL, CLK_IS_ROOT,
+                       50000000);
+
+       clk = clk_register_gate(NULL, "can0_clk", "apb_clk", 0,
+                       SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_CAN0_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "c_can_platform.0");
+
+       clk = clk_register_gate(NULL, "can1_clk", "apb_clk", 0,
+                       SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_CAN1_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "c_can_platform.1");
+
+       clk = clk_register_gate(NULL, "ras_smii0_clk", "ras_ahb_clk", 0,
+                       SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_MII0_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "5c400000.eth");
+
+       clk = clk_register_gate(NULL, "ras_smii1_clk", "ras_ahb_clk", 0,
+                       SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_MII1_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "5c500000.eth");
+
+       clk = clk_register_gate(NULL, "ras_smii2_clk", "ras_ahb_clk", 0,
+                       SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_MII2_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "5c600000.eth");
+
+       clk = clk_register_gate(NULL, "ras_rgmii_clk", "ras_ahb_clk", 0,
+                       SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_GMII_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "5c700000.eth");
+
+       clk = clk_register_mux(NULL, "smii_rgmii_phy_mux_clk",
+                       smii_rgmii_phy_parents,
+                       ARRAY_SIZE(smii_rgmii_phy_parents), 0,
+                       SPEAR1310_RAS_CTRL_REG1,
+                       SPEAR1310_SMII_RGMII_PHY_CLK_SHIFT,
+                       SPEAR1310_PHY_CLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "stmmacphy.1");
+       clk_register_clkdev(clk, NULL, "stmmacphy.2");
+       clk_register_clkdev(clk, NULL, "stmmacphy.4");
+
+       clk = clk_register_mux(NULL, "rmii_phy_mux_clk", rmii_phy_parents,
+                       ARRAY_SIZE(rmii_phy_parents), 0,
+                       SPEAR1310_RAS_CTRL_REG1, SPEAR1310_RMII_PHY_CLK_SHIFT,
+                       SPEAR1310_PHY_CLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "stmmacphy.3");
+
+       clk = clk_register_mux(NULL, "uart1_mux_clk", uart_parents,
+                       ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+                       SPEAR1310_UART1_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
+                       0, &_lock);
+       clk_register_clkdev(clk, "uart1_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "uart1_clk", "uart1_mux_clk", 0,
+                       SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART1_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "5c800000.serial");
+
+       clk = clk_register_mux(NULL, "uart2_mux_clk", uart_parents,
+                       ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+                       SPEAR1310_UART2_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
+                       0, &_lock);
+       clk_register_clkdev(clk, "uart2_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "uart2_clk", "uart2_mux_clk", 0,
+                       SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART2_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "5c900000.serial");
+
+       clk = clk_register_mux(NULL, "uart3_mux_clk", uart_parents,
+                       ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+                       SPEAR1310_UART3_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
+                       0, &_lock);
+       clk_register_clkdev(clk, "uart3_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "uart3_clk", "uart3_mux_clk", 0,
+                       SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART3_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "5ca00000.serial");
+
+       clk = clk_register_mux(NULL, "uart4_mux_clk", uart_parents,
+                       ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+                       SPEAR1310_UART4_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
+                       0, &_lock);
+       clk_register_clkdev(clk, "uart4_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "uart4_clk", "uart4_mux_clk", 0,
+                       SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART4_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "5cb00000.serial");
+
+       clk = clk_register_mux(NULL, "uart5_mux_clk", uart_parents,
+                       ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+                       SPEAR1310_UART5_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
+                       0, &_lock);
+       clk_register_clkdev(clk, "uart5_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "uart5_clk", "uart5_mux_clk", 0,
+                       SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART5_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "5cc00000.serial");
+
+       clk = clk_register_mux(NULL, "i2c1_mux_clk", i2c_parents,
+                       ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+                       SPEAR1310_I2C1_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "i2c1_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "i2c1_clk", "i2c1_mux_clk", 0,
+                       SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C1_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "5cd00000.i2c");
+
+       clk = clk_register_mux(NULL, "i2c2_mux_clk", i2c_parents,
+                       ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+                       SPEAR1310_I2C2_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "i2c2_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "i2c2_clk", "i2c2_mux_clk", 0,
+                       SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C2_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "5ce00000.i2c");
+
+       clk = clk_register_mux(NULL, "i2c3_mux_clk", i2c_parents,
+                       ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+                       SPEAR1310_I2C3_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "i2c3_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "i2c3_clk", "i2c3_mux_clk", 0,
+                       SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C3_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "5cf00000.i2c");
+
+       clk = clk_register_mux(NULL, "i2c4_mux_clk", i2c_parents,
+                       ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+                       SPEAR1310_I2C4_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "i2c4_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "i2c4_clk", "i2c4_mux_clk", 0,
+                       SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C4_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "5d000000.i2c");
+
+       clk = clk_register_mux(NULL, "i2c5_mux_clk", i2c_parents,
+                       ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+                       SPEAR1310_I2C5_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "i2c5_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "i2c5_clk", "i2c5_mux_clk", 0,
+                       SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C5_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "5d100000.i2c");
+
+       clk = clk_register_mux(NULL, "i2c6_mux_clk", i2c_parents,
+                       ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+                       SPEAR1310_I2C6_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "i2c6_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "i2c6_clk", "i2c6_mux_clk", 0,
+                       SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C6_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "5d200000.i2c");
+
+       clk = clk_register_mux(NULL, "i2c7_mux_clk", i2c_parents,
+                       ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+                       SPEAR1310_I2C7_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "i2c7_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "i2c7_clk", "i2c7_mux_clk", 0,
+                       SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C7_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "5d300000.i2c");
+
+       clk = clk_register_mux(NULL, "ssp1_mux_clk", ssp1_parents,
+                       ARRAY_SIZE(ssp1_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+                       SPEAR1310_SSP1_CLK_SHIFT, SPEAR1310_SSP1_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "ssp1_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "ssp1_clk", "ssp1_mux_clk", 0,
+                       SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_SSP1_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "5d400000.spi");
+
+       clk = clk_register_mux(NULL, "pci_mux_clk", pci_parents,
+                       ARRAY_SIZE(pci_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+                       SPEAR1310_PCI_CLK_SHIFT, SPEAR1310_PCI_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "pci_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "pci_clk", "pci_mux_clk", 0,
+                       SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_PCI_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "pci");
+
+       clk = clk_register_mux(NULL, "tdm1_mux_clk", tdm_parents,
+                       ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+                       SPEAR1310_TDM1_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "tdm1_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "tdm1_clk", "tdm1_mux_clk", 0,
+                       SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_TDM1_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "tdm_hdlc.0");
+
+       clk = clk_register_mux(NULL, "tdm2_mux_clk", tdm_parents,
+                       ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+                       SPEAR1310_TDM2_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "tdm2_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "tdm2_clk", "tdm2_mux_clk", 0,
+                       SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_TDM2_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "tdm_hdlc.1");
+}
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
new file mode 100644 (file)
index 0000000..f130919
--- /dev/null
@@ -0,0 +1,964 @@
+/*
+ * arch/arm/mach-spear13xx/spear1340_clock.c
+ *
+ * SPEAr1340 machine clock framework source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock_types.h>
+#include <mach/spear.h>
+#include "clk.h"
+
+/* Clock Configuration Registers */
+#define SPEAR1340_SYS_CLK_CTRL                 (VA_MISC_BASE + 0x200)
+       #define SPEAR1340_HCLK_SRC_SEL_SHIFT    27
+       #define SPEAR1340_HCLK_SRC_SEL_MASK     1
+       #define SPEAR1340_SCLK_SRC_SEL_SHIFT    23
+       #define SPEAR1340_SCLK_SRC_SEL_MASK     3
+
+/* PLL related registers and bit values */
+#define SPEAR1340_PLL_CFG                      (VA_MISC_BASE + 0x210)
+       /* PLL_CFG bit values */
+       #define SPEAR1340_CLCD_SYNT_CLK_MASK            1
+       #define SPEAR1340_CLCD_SYNT_CLK_SHIFT           31
+       #define SPEAR1340_GEN_SYNT2_3_CLK_SHIFT         29
+       #define SPEAR1340_GEN_SYNT_CLK_MASK             2
+       #define SPEAR1340_GEN_SYNT0_1_CLK_SHIFT         27
+       #define SPEAR1340_PLL_CLK_MASK                  2
+       #define SPEAR1340_PLL3_CLK_SHIFT                24
+       #define SPEAR1340_PLL2_CLK_SHIFT                22
+       #define SPEAR1340_PLL1_CLK_SHIFT                20
+
+#define SPEAR1340_PLL1_CTR                     (VA_MISC_BASE + 0x214)
+#define SPEAR1340_PLL1_FRQ                     (VA_MISC_BASE + 0x218)
+#define SPEAR1340_PLL2_CTR                     (VA_MISC_BASE + 0x220)
+#define SPEAR1340_PLL2_FRQ                     (VA_MISC_BASE + 0x224)
+#define SPEAR1340_PLL3_CTR                     (VA_MISC_BASE + 0x22C)
+#define SPEAR1340_PLL3_FRQ                     (VA_MISC_BASE + 0x230)
+#define SPEAR1340_PLL4_CTR                     (VA_MISC_BASE + 0x238)
+#define SPEAR1340_PLL4_FRQ                     (VA_MISC_BASE + 0x23C)
+#define SPEAR1340_PERIP_CLK_CFG                        (VA_MISC_BASE + 0x244)
+       /* PERIP_CLK_CFG bit values */
+       #define SPEAR1340_SPDIF_CLK_MASK                1
+       #define SPEAR1340_SPDIF_OUT_CLK_SHIFT           15
+       #define SPEAR1340_SPDIF_IN_CLK_SHIFT            14
+       #define SPEAR1340_GPT3_CLK_SHIFT                13
+       #define SPEAR1340_GPT2_CLK_SHIFT                12
+       #define SPEAR1340_GPT_CLK_MASK                  1
+       #define SPEAR1340_GPT1_CLK_SHIFT                9
+       #define SPEAR1340_GPT0_CLK_SHIFT                8
+       #define SPEAR1340_UART_CLK_MASK                 2
+       #define SPEAR1340_UART1_CLK_SHIFT               6
+       #define SPEAR1340_UART0_CLK_SHIFT               4
+       #define SPEAR1340_CLCD_CLK_MASK                 2
+       #define SPEAR1340_CLCD_CLK_SHIFT                2
+       #define SPEAR1340_C3_CLK_MASK                   1
+       #define SPEAR1340_C3_CLK_SHIFT                  1
+
+#define SPEAR1340_GMAC_CLK_CFG                 (VA_MISC_BASE + 0x248)
+       #define SPEAR1340_GMAC_PHY_CLK_MASK             1
+       #define SPEAR1340_GMAC_PHY_CLK_SHIFT            2
+       #define SPEAR1340_GMAC_PHY_INPUT_CLK_MASK       2
+       #define SPEAR1340_GMAC_PHY_INPUT_CLK_SHIFT      0
+
+#define SPEAR1340_I2S_CLK_CFG                  (VA_MISC_BASE + 0x24C)
+       /* I2S_CLK_CFG register mask */
+       #define SPEAR1340_I2S_SCLK_X_MASK               0x1F
+       #define SPEAR1340_I2S_SCLK_X_SHIFT              27
+       #define SPEAR1340_I2S_SCLK_Y_MASK               0x1F
+       #define SPEAR1340_I2S_SCLK_Y_SHIFT              22
+       #define SPEAR1340_I2S_SCLK_EQ_SEL_SHIFT         21
+       #define SPEAR1340_I2S_SCLK_SYNTH_ENB            20
+       #define SPEAR1340_I2S_PRS1_CLK_X_MASK           0xFF
+       #define SPEAR1340_I2S_PRS1_CLK_X_SHIFT          12
+       #define SPEAR1340_I2S_PRS1_CLK_Y_MASK           0xFF
+       #define SPEAR1340_I2S_PRS1_CLK_Y_SHIFT          4
+       #define SPEAR1340_I2S_PRS1_EQ_SEL_SHIFT         3
+       #define SPEAR1340_I2S_REF_SEL_MASK              1
+       #define SPEAR1340_I2S_REF_SHIFT                 2
+       #define SPEAR1340_I2S_SRC_CLK_MASK              2
+       #define SPEAR1340_I2S_SRC_CLK_SHIFT             0
+
+#define SPEAR1340_C3_CLK_SYNT                  (VA_MISC_BASE + 0x250)
+#define SPEAR1340_UART0_CLK_SYNT               (VA_MISC_BASE + 0x254)
+#define SPEAR1340_UART1_CLK_SYNT               (VA_MISC_BASE + 0x258)
+#define SPEAR1340_GMAC_CLK_SYNT                        (VA_MISC_BASE + 0x25C)
+#define SPEAR1340_SDHCI_CLK_SYNT               (VA_MISC_BASE + 0x260)
+#define SPEAR1340_CFXD_CLK_SYNT                        (VA_MISC_BASE + 0x264)
+#define SPEAR1340_ADC_CLK_SYNT                 (VA_MISC_BASE + 0x270)
+#define SPEAR1340_AMBA_CLK_SYNT                        (VA_MISC_BASE + 0x274)
+#define SPEAR1340_CLCD_CLK_SYNT                        (VA_MISC_BASE + 0x27C)
+#define SPEAR1340_SYS_CLK_SYNT                 (VA_MISC_BASE + 0x284)
+#define SPEAR1340_GEN_CLK_SYNT0                        (VA_MISC_BASE + 0x28C)
+#define SPEAR1340_GEN_CLK_SYNT1                        (VA_MISC_BASE + 0x294)
+#define SPEAR1340_GEN_CLK_SYNT2                        (VA_MISC_BASE + 0x29C)
+#define SPEAR1340_GEN_CLK_SYNT3                        (VA_MISC_BASE + 0x304)
+#define SPEAR1340_PERIP1_CLK_ENB               (VA_MISC_BASE + 0x30C)
+       #define SPEAR1340_RTC_CLK_ENB                   31
+       #define SPEAR1340_ADC_CLK_ENB                   30
+       #define SPEAR1340_C3_CLK_ENB                    29
+       #define SPEAR1340_CLCD_CLK_ENB                  27
+       #define SPEAR1340_DMA_CLK_ENB                   25
+       #define SPEAR1340_GPIO1_CLK_ENB                 24
+       #define SPEAR1340_GPIO0_CLK_ENB                 23
+       #define SPEAR1340_GPT1_CLK_ENB                  22
+       #define SPEAR1340_GPT0_CLK_ENB                  21
+       #define SPEAR1340_I2S_PLAY_CLK_ENB              20
+       #define SPEAR1340_I2S_REC_CLK_ENB               19
+       #define SPEAR1340_I2C0_CLK_ENB                  18
+       #define SPEAR1340_SSP_CLK_ENB                   17
+       #define SPEAR1340_UART0_CLK_ENB                 15
+       #define SPEAR1340_PCIE_SATA_CLK_ENB             12
+       #define SPEAR1340_UOC_CLK_ENB                   11
+       #define SPEAR1340_UHC1_CLK_ENB                  10
+       #define SPEAR1340_UHC0_CLK_ENB                  9
+       #define SPEAR1340_GMAC_CLK_ENB                  8
+       #define SPEAR1340_CFXD_CLK_ENB                  7
+       #define SPEAR1340_SDHCI_CLK_ENB                 6
+       #define SPEAR1340_SMI_CLK_ENB                   5
+       #define SPEAR1340_FSMC_CLK_ENB                  4
+       #define SPEAR1340_SYSRAM0_CLK_ENB               3
+       #define SPEAR1340_SYSRAM1_CLK_ENB               2
+       #define SPEAR1340_SYSROM_CLK_ENB                1
+       #define SPEAR1340_BUS_CLK_ENB                   0
+
+#define SPEAR1340_PERIP2_CLK_ENB               (VA_MISC_BASE + 0x310)
+       #define SPEAR1340_THSENS_CLK_ENB                8
+       #define SPEAR1340_I2S_REF_PAD_CLK_ENB           7
+       #define SPEAR1340_ACP_CLK_ENB                   6
+       #define SPEAR1340_GPT3_CLK_ENB                  5
+       #define SPEAR1340_GPT2_CLK_ENB                  4
+       #define SPEAR1340_KBD_CLK_ENB                   3
+       #define SPEAR1340_CPU_DBG_CLK_ENB               2
+       #define SPEAR1340_DDR_CORE_CLK_ENB              1
+       #define SPEAR1340_DDR_CTRL_CLK_ENB              0
+
+#define SPEAR1340_PERIP3_CLK_ENB               (VA_MISC_BASE + 0x314)
+       #define SPEAR1340_PLGPIO_CLK_ENB                18
+       #define SPEAR1340_VIDEO_DEC_CLK_ENB             16
+       #define SPEAR1340_VIDEO_ENC_CLK_ENB             15
+       #define SPEAR1340_SPDIF_OUT_CLK_ENB             13
+       #define SPEAR1340_SPDIF_IN_CLK_ENB              12
+       #define SPEAR1340_VIDEO_IN_CLK_ENB              11
+       #define SPEAR1340_CAM0_CLK_ENB                  10
+       #define SPEAR1340_CAM1_CLK_ENB                  9
+       #define SPEAR1340_CAM2_CLK_ENB                  8
+       #define SPEAR1340_CAM3_CLK_ENB                  7
+       #define SPEAR1340_MALI_CLK_ENB                  6
+       #define SPEAR1340_CEC0_CLK_ENB                  5
+       #define SPEAR1340_CEC1_CLK_ENB                  4
+       #define SPEAR1340_PWM_CLK_ENB                   3
+       #define SPEAR1340_I2C1_CLK_ENB                  2
+       #define SPEAR1340_UART1_CLK_ENB                 1
+
+static DEFINE_SPINLOCK(_lock);
+
+/* pll rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll_rtbl[] = {
+       /* PCLK 24MHz */
+       {.mode = 0, .m = 0x83, .n = 0x04, .p = 0x5}, /* vco 1572, pll 49.125 MHz */
+       {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x3}, /* vco 1000, pll 125 MHz */
+       {.mode = 0, .m = 0x64, .n = 0x06, .p = 0x1}, /* vco 800, pll 400 MHz */
+       {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x1}, /* vco 1000, pll 500 MHz */
+       {.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x1}, /* vco 1328, pll 664 MHz */
+       {.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x1}, /* vco 1600, pll 800 MHz */
+       {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */
+       {.mode = 0, .m = 0x96, .n = 0x06, .p = 0x0}, /* vco 1200, pll 1200 MHz */
+};
+
+/* vco-pll4 rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll4_rtbl[] = {
+       {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x2}, /* vco 1000, pll 250 MHz */
+       {.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x2}, /* vco 1328, pll 332 MHz */
+       {.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x2}, /* vco 1600, pll 400 MHz */
+       {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */
+};
+
+/*
+ * All below entries generate 166 MHz for
+ * different values of vco1div2
+ */
+static struct frac_rate_tbl amba_synth_rtbl[] = {
+       {.div = 0x06062}, /* for vco1div2 = 500 MHz */
+       {.div = 0x04D1B}, /* for vco1div2 = 400 MHz */
+       {.div = 0x04000}, /* for vco1div2 = 332 MHz */
+       {.div = 0x03031}, /* for vco1div2 = 250 MHz */
+       {.div = 0x0268D}, /* for vco1div2 = 200 MHz */
+};
+
+/*
+ * Synthesizer Clock derived from vcodiv2. This clock is one of the
+ * possible clocks to feed cpu directly.
+ * We can program this synthesizer to make cpu run on different clock
+ * frequencies.
+ * Following table provides configuration values to let cpu run on 200,
+ * 250, 332, 400 or 500 MHz considering different possibilites of input
+ * (vco1div2) clock.
+ *
+ * --------------------------------------------------------------------
+ * vco1div2(Mhz)       fout(Mhz)       cpuclk = fout/2         div
+ * --------------------------------------------------------------------
+ * 400                 200             100                     0x04000
+ * 400                 250             125                     0x03333
+ * 400                 332             166                     0x0268D
+ * 400                 400             200                     0x02000
+ * --------------------------------------------------------------------
+ * 500                 200             100                     0x05000
+ * 500                 250             125                     0x04000
+ * 500                 332             166                     0x03031
+ * 500                 400             200                     0x02800
+ * 500                 500             250                     0x02000
+ * --------------------------------------------------------------------
+ * 664                 200             100                     0x06a38
+ * 664                 250             125                     0x054FD
+ * 664                 332             166                     0x04000
+ * 664                 400             200                     0x0351E
+ * 664                 500             250                     0x02A7E
+ * --------------------------------------------------------------------
+ * 800                 200             100                     0x08000
+ * 800                 250             125                     0x06666
+ * 800                 332             166                     0x04D18
+ * 800                 400             200                     0x04000
+ * 800                 500             250                     0x03333
+ * --------------------------------------------------------------------
+ * sys rate configuration table is in descending order of divisor.
+ */
+static struct frac_rate_tbl sys_synth_rtbl[] = {
+       {.div = 0x08000},
+       {.div = 0x06a38},
+       {.div = 0x06666},
+       {.div = 0x054FD},
+       {.div = 0x05000},
+       {.div = 0x04D18},
+       {.div = 0x04000},
+       {.div = 0x0351E},
+       {.div = 0x03333},
+       {.div = 0x03031},
+       {.div = 0x02A7E},
+       {.div = 0x02800},
+       {.div = 0x0268D},
+       {.div = 0x02000},
+};
+
+/* aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl aux_rtbl[] = {
+       /* For VCO1div2 = 500 MHz */
+       {.xscale = 10, .yscale = 204, .eq = 0}, /* 12.29 MHz */
+       {.xscale = 4, .yscale = 21, .eq = 0}, /* 48 MHz */
+       {.xscale = 2, .yscale = 6, .eq = 0}, /* 83 MHz */
+       {.xscale = 2, .yscale = 4, .eq = 0}, /* 125 MHz */
+       {.xscale = 1, .yscale = 3, .eq = 1}, /* 166 MHz */
+       {.xscale = 1, .yscale = 2, .eq = 1}, /* 250 MHz */
+};
+
+/* gmac rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl gmac_rtbl[] = {
+       /* For gmac phy input clk */
+       {.xscale = 2, .yscale = 6, .eq = 0}, /* divided by 6 */
+       {.xscale = 2, .yscale = 4, .eq = 0}, /* divided by 4 */
+       {.xscale = 1, .yscale = 3, .eq = 1}, /* divided by 3 */
+       {.xscale = 1, .yscale = 2, .eq = 1}, /* divided by 2 */
+};
+
+/* clcd rate configuration table, in ascending order of rates */
+static struct frac_rate_tbl clcd_rtbl[] = {
+       {.div = 0x14000}, /* 25 Mhz , for vc01div4 = 250 MHz*/
+       {.div = 0x1284B}, /* 27 Mhz , for vc01div4 = 250 MHz*/
+       {.div = 0x0D8D3}, /* 58 Mhz , for vco1div4 = 393 MHz */
+       {.div = 0x0B72C}, /* 58 Mhz , for vco1div4 = 332 MHz */
+       {.div = 0x089EE}, /* 58 Mhz , for vc01div4 = 250 MHz*/
+       {.div = 0x07BA0}, /* 65 Mhz , for vc01div4 = 250 MHz*/
+       {.div = 0x06f1C}, /* 72 Mhz , for vc01div4 = 250 MHz*/
+       {.div = 0x06E58}, /* 58 Mhz , for vco1div4 = 200 MHz */
+       {.div = 0x06c1B}, /* 74 Mhz , for vc01div4 = 250 MHz*/
+       {.div = 0x04A12}, /* 108 Mhz , for vc01div4 = 250 MHz*/
+       {.div = 0x0378E}, /* 144 Mhz , for vc01div4 = 250 MHz*/
+       {.div = 0x0360D}, /* 148 Mhz , for vc01div4 = 250 MHz*/
+       {.div = 0x035E0}, /* 148.5 MHz, for vc01div4 = 250 MHz*/
+};
+
+/* i2s prescaler1 masks */
+static struct aux_clk_masks i2s_prs1_masks = {
+       .eq_sel_mask = AUX_EQ_SEL_MASK,
+       .eq_sel_shift = SPEAR1340_I2S_PRS1_EQ_SEL_SHIFT,
+       .eq1_mask = AUX_EQ1_SEL,
+       .eq2_mask = AUX_EQ2_SEL,
+       .xscale_sel_mask = SPEAR1340_I2S_PRS1_CLK_X_MASK,
+       .xscale_sel_shift = SPEAR1340_I2S_PRS1_CLK_X_SHIFT,
+       .yscale_sel_mask = SPEAR1340_I2S_PRS1_CLK_Y_MASK,
+       .yscale_sel_shift = SPEAR1340_I2S_PRS1_CLK_Y_SHIFT,
+};
+
+/* i2s sclk (bit clock) syynthesizers masks */
+static struct aux_clk_masks i2s_sclk_masks = {
+       .eq_sel_mask = AUX_EQ_SEL_MASK,
+       .eq_sel_shift = SPEAR1340_I2S_SCLK_EQ_SEL_SHIFT,
+       .eq1_mask = AUX_EQ1_SEL,
+       .eq2_mask = AUX_EQ2_SEL,
+       .xscale_sel_mask = SPEAR1340_I2S_SCLK_X_MASK,
+       .xscale_sel_shift = SPEAR1340_I2S_SCLK_X_SHIFT,
+       .yscale_sel_mask = SPEAR1340_I2S_SCLK_Y_MASK,
+       .yscale_sel_shift = SPEAR1340_I2S_SCLK_Y_SHIFT,
+       .enable_bit = SPEAR1340_I2S_SCLK_SYNTH_ENB,
+};
+
+/* i2s prs1 aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl i2s_prs1_rtbl[] = {
+       /* For parent clk = 49.152 MHz */
+       {.xscale = 1, .yscale = 12, .eq = 0}, /* 2.048 MHz, smp freq = 8Khz */
+       {.xscale = 11, .yscale = 96, .eq = 0}, /* 2.816 MHz, smp freq = 11Khz */
+       {.xscale = 1, .yscale = 6, .eq = 0}, /* 4.096 MHz, smp freq = 16Khz */
+       {.xscale = 11, .yscale = 48, .eq = 0}, /* 5.632 MHz, smp freq = 22Khz */
+
+       /*
+        * with parent clk = 49.152, freq gen is 8.192 MHz, smp freq = 32Khz
+        * with parent clk = 12.288, freq gen is 2.048 MHz, smp freq = 8Khz
+        */
+       {.xscale = 1, .yscale = 3, .eq = 0},
+
+       /* For parent clk = 49.152 MHz */
+       {.xscale = 17, .yscale = 37, .eq = 0}, /* 11.289 MHz, smp freq = 44Khz*/
+       {.xscale = 1, .yscale = 2, .eq = 0}, /* 12.288 MHz, smp freq = 48Khz*/
+};
+
+/* i2s sclk aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl i2s_sclk_rtbl[] = {
+       /* For sclk = ref_clk * x/2/y */
+       {.xscale = 1, .yscale = 4, .eq = 0},
+       {.xscale = 1, .yscale = 2, .eq = 0},
+};
+
+/* adc rate configuration table, in ascending order of rates */
+/* possible adc range is 2.5 MHz to 20 MHz. */
+static struct aux_rate_tbl adc_rtbl[] = {
+       /* For ahb = 166.67 MHz */
+       {.xscale = 1, .yscale = 31, .eq = 0}, /* 2.68 MHz */
+       {.xscale = 2, .yscale = 21, .eq = 0}, /* 7.94 MHz */
+       {.xscale = 4, .yscale = 21, .eq = 0}, /* 15.87 MHz */
+       {.xscale = 10, .yscale = 42, .eq = 0}, /* 19.84 MHz */
+};
+
+/* General synth rate configuration table, in ascending order of rates */
+static struct frac_rate_tbl gen_rtbl[] = {
+       /* For vco1div4 = 250 MHz */
+       {.div = 0x1624E}, /* 22.5792 MHz */
+       {.div = 0x14585}, /* 24.576 MHz */
+       {.div = 0x14000}, /* 25 MHz */
+       {.div = 0x0B127}, /* 45.1584 MHz */
+       {.div = 0x0A000}, /* 50 MHz */
+       {.div = 0x061A8}, /* 81.92 MHz */
+       {.div = 0x05000}, /* 100 MHz */
+       {.div = 0x02800}, /* 200 MHz */
+       {.div = 0x02620}, /* 210 MHz */
+       {.div = 0x02460}, /* 220 MHz */
+       {.div = 0x022C0}, /* 230 MHz */
+       {.div = 0x02160}, /* 240 MHz */
+       {.div = 0x02000}, /* 250 MHz */
+};
+
+/* clock parents */
+static const char *vco_parents[] = { "osc_24m_clk", "osc_25m_clk", };
+static const char *sys_parents[] = { "none", "pll1_clk", "none", "none",
+       "sys_synth_clk", "none", "pll2_clk", "pll3_clk", };
+static const char *ahb_parents[] = { "cpu_div3_clk", "amba_synth_clk", };
+static const char *gpt_parents[] = { "osc_24m_clk", "apb_clk", };
+static const char *uart0_parents[] = { "pll5_clk", "osc_24m_clk",
+       "uart0_synth_gate_clk", };
+static const char *uart1_parents[] = { "pll5_clk", "osc_24m_clk",
+       "uart1_synth_gate_clk", };
+static const char *c3_parents[] = { "pll5_clk", "c3_synth_gate_clk", };
+static const char *gmac_phy_input_parents[] = { "gmii_125m_pad_clk", "pll2_clk",
+       "osc_25m_clk", };
+static const char *gmac_phy_parents[] = { "gmac_phy_input_mux_clk",
+       "gmac_phy_synth_gate_clk", };
+static const char *clcd_synth_parents[] = { "vco1div4_clk", "pll2_clk", };
+static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_synth_clk", };
+static const char *i2s_src_parents[] = { "vco1div2_clk", "pll2_clk", "pll3_clk",
+       "i2s_src_pad_clk", };
+static const char *i2s_ref_parents[] = { "i2s_src_mux_clk", "i2s_prs1_clk", };
+static const char *spdif_out_parents[] = { "i2s_src_pad_clk", "gen_synth2_clk",
+};
+static const char *spdif_in_parents[] = { "pll2_clk", "gen_synth3_clk", };
+
+static const char *gen_synth0_1_parents[] = { "vco1div4_clk", "vco3div2_clk",
+       "pll3_clk", };
+static const char *gen_synth2_3_parents[] = { "vco1div4_clk", "vco3div2_clk",
+       "pll2_clk", };
+
+void __init spear1340_clk_init(void)
+{
+       struct clk *clk, *clk1;
+
+       clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
+       clk_register_clkdev(clk, "apb_pclk", NULL);
+
+       clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
+                       32000);
+       clk_register_clkdev(clk, "osc_32k_clk", NULL);
+
+       clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, CLK_IS_ROOT,
+                       24000000);
+       clk_register_clkdev(clk, "osc_24m_clk", NULL);
+
+       clk = clk_register_fixed_rate(NULL, "osc_25m_clk", NULL, CLK_IS_ROOT,
+                       25000000);
+       clk_register_clkdev(clk, "osc_25m_clk", NULL);
+
+       clk = clk_register_fixed_rate(NULL, "gmii_125m_pad_clk", NULL,
+                       CLK_IS_ROOT, 125000000);
+       clk_register_clkdev(clk, "gmii_125m_pad_clk", NULL);
+
+       clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL,
+                       CLK_IS_ROOT, 12288000);
+       clk_register_clkdev(clk, "i2s_src_pad_clk", NULL);
+
+       /* clock derived from 32 KHz osc clk */
+       clk = clk_register_gate(NULL, "rtc-spear", "osc_32k_clk", 0,
+                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_RTC_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "fc900000.rtc");
+
+       /* clock derived from 24 or 25 MHz osc clk */
+       /* vco-pll */
+       clk = clk_register_mux(NULL, "vco1_mux_clk", vco_parents,
+                       ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
+                       SPEAR1340_PLL1_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "vco1_mux_clk", NULL);
+       clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mux_clk",
+                       0, SPEAR1340_PLL1_CTR, SPEAR1340_PLL1_FRQ, pll_rtbl,
+                       ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+       clk_register_clkdev(clk, "vco1_clk", NULL);
+       clk_register_clkdev(clk1, "pll1_clk", NULL);
+
+       clk = clk_register_mux(NULL, "vco2_mux_clk", vco_parents,
+                       ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
+                       SPEAR1340_PLL2_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "vco2_mux_clk", NULL);
+       clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mux_clk",
+                       0, SPEAR1340_PLL2_CTR, SPEAR1340_PLL2_FRQ, pll_rtbl,
+                       ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+       clk_register_clkdev(clk, "vco2_clk", NULL);
+       clk_register_clkdev(clk1, "pll2_clk", NULL);
+
+       clk = clk_register_mux(NULL, "vco3_mux_clk", vco_parents,
+                       ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
+                       SPEAR1340_PLL3_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "vco3_mux_clk", NULL);
+       clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mux_clk",
+                       0, SPEAR1340_PLL3_CTR, SPEAR1340_PLL3_FRQ, pll_rtbl,
+                       ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+       clk_register_clkdev(clk, "vco3_clk", NULL);
+       clk_register_clkdev(clk1, "pll3_clk", NULL);
+
+       clk = clk_register_vco_pll("vco4_clk", "pll4_clk", NULL, "osc_24m_clk",
+                       0, SPEAR1340_PLL4_CTR, SPEAR1340_PLL4_FRQ, pll4_rtbl,
+                       ARRAY_SIZE(pll4_rtbl), &_lock, &clk1, NULL);
+       clk_register_clkdev(clk, "vco4_clk", NULL);
+       clk_register_clkdev(clk1, "pll4_clk", NULL);
+
+       clk = clk_register_fixed_rate(NULL, "pll5_clk", "osc_24m_clk", 0,
+                       48000000);
+       clk_register_clkdev(clk, "pll5_clk", NULL);
+
+       clk = clk_register_fixed_rate(NULL, "pll6_clk", "osc_25m_clk", 0,
+                       25000000);
+       clk_register_clkdev(clk, "pll6_clk", NULL);
+
+       /* vco div n clocks */
+       clk = clk_register_fixed_factor(NULL, "vco1div2_clk", "vco1_clk", 0, 1,
+                       2);
+       clk_register_clkdev(clk, "vco1div2_clk", NULL);
+
+       clk = clk_register_fixed_factor(NULL, "vco1div4_clk", "vco1_clk", 0, 1,
+                       4);
+       clk_register_clkdev(clk, "vco1div4_clk", NULL);
+
+       clk = clk_register_fixed_factor(NULL, "vco2div2_clk", "vco2_clk", 0, 1,
+                       2);
+       clk_register_clkdev(clk, "vco2div2_clk", NULL);
+
+       clk = clk_register_fixed_factor(NULL, "vco3div2_clk", "vco3_clk", 0, 1,
+                       2);
+       clk_register_clkdev(clk, "vco3div2_clk", NULL);
+
+       /* peripherals */
+       clk_register_fixed_factor(NULL, "thermal_clk", "osc_24m_clk", 0, 1,
+                       128);
+       clk = clk_register_gate(NULL, "thermal_gate_clk", "thermal_clk", 0,
+                       SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_THSENS_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "spear_thermal");
+
+       /* clock derived from pll4 clk */
+       clk = clk_register_fixed_factor(NULL, "ddr_clk", "pll4_clk", 0, 1,
+                       1);
+       clk_register_clkdev(clk, "ddr_clk", NULL);
+
+       /* clock derived from pll1 clk */
+       clk = clk_register_frac("sys_synth_clk", "vco1div2_clk", 0,
+                       SPEAR1340_SYS_CLK_SYNT, sys_synth_rtbl,
+                       ARRAY_SIZE(sys_synth_rtbl), &_lock);
+       clk_register_clkdev(clk, "sys_synth_clk", NULL);
+
+       clk = clk_register_frac("amba_synth_clk", "vco1div2_clk", 0,
+                       SPEAR1340_AMBA_CLK_SYNT, amba_synth_rtbl,
+                       ARRAY_SIZE(amba_synth_rtbl), &_lock);
+       clk_register_clkdev(clk, "amba_synth_clk", NULL);
+
+       clk = clk_register_mux(NULL, "sys_mux_clk", sys_parents,
+                       ARRAY_SIZE(sys_parents), 0, SPEAR1340_SYS_CLK_CTRL,
+                       SPEAR1340_SCLK_SRC_SEL_SHIFT,
+                       SPEAR1340_SCLK_SRC_SEL_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "sys_clk", NULL);
+
+       clk = clk_register_fixed_factor(NULL, "cpu_clk", "sys_mux_clk", 0, 1,
+                       2);
+       clk_register_clkdev(clk, "cpu_clk", NULL);
+
+       clk = clk_register_fixed_factor(NULL, "cpu_div3_clk", "cpu_clk", 0, 1,
+                       3);
+       clk_register_clkdev(clk, "cpu_div3_clk", NULL);
+
+       clk = clk_register_fixed_factor(NULL, "wdt_clk", "cpu_clk", 0, 1,
+                       2);
+       clk_register_clkdev(clk, NULL, "ec800620.wdt");
+
+       clk = clk_register_mux(NULL, "ahb_clk", ahb_parents,
+                       ARRAY_SIZE(ahb_parents), 0, SPEAR1340_SYS_CLK_CTRL,
+                       SPEAR1340_HCLK_SRC_SEL_SHIFT,
+                       SPEAR1340_HCLK_SRC_SEL_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "ahb_clk", NULL);
+
+       clk = clk_register_fixed_factor(NULL, "apb_clk", "ahb_clk", 0, 1,
+                       2);
+       clk_register_clkdev(clk, "apb_clk", NULL);
+
+       /* gpt clocks */
+       clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt_parents,
+                       ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+                       SPEAR1340_GPT0_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "gpt0_mux_clk", NULL);
+       clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mux_clk", 0,
+                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT0_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "gpt0");
+
+       clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt_parents,
+                       ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+                       SPEAR1340_GPT1_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
+       clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT1_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "gpt1");
+
+       clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt_parents,
+                       ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+                       SPEAR1340_GPT2_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
+       clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+                       SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT2_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "gpt2");
+
+       clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt_parents,
+                       ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+                       SPEAR1340_GPT3_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
+       clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
+                       SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT3_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "gpt3");
+
+       /* others */
+       clk = clk_register_aux("uart0_synth_clk", "uart0_synth_gate_clk",
+                       "vco1div2_clk", 0, SPEAR1340_UART0_CLK_SYNT, NULL,
+                       aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "uart0_synth_clk", NULL);
+       clk_register_clkdev(clk1, "uart0_synth_gate_clk", NULL);
+
+       clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
+                       ARRAY_SIZE(uart0_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+                       SPEAR1340_UART0_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "uart0_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "uart0_clk", "uart0_mux_clk", 0,
+                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UART0_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "e0000000.serial");
+
+       clk = clk_register_aux("uart1_synth_clk", "uart1_synth_gate_clk",
+                       "vco1div2_clk", 0, SPEAR1340_UART1_CLK_SYNT, NULL,
+                       aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "uart1_synth_clk", NULL);
+       clk_register_clkdev(clk1, "uart1_synth_gate_clk", NULL);
+
+       clk = clk_register_mux(NULL, "uart1_mux_clk", uart1_parents,
+                       ARRAY_SIZE(uart1_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+                       SPEAR1340_UART1_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "uart1_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "uart1_clk", "uart1_mux_clk", 0,
+                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UART1_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "b4100000.serial");
+
+       clk = clk_register_aux("sdhci_synth_clk", "sdhci_synth_gate_clk",
+                       "vco1div2_clk", 0, SPEAR1340_SDHCI_CLK_SYNT, NULL,
+                       aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "sdhci_synth_clk", NULL);
+       clk_register_clkdev(clk1, "sdhci_synth_gate_clk", NULL);
+
+       clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_synth_gate_clk", 0,
+                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SDHCI_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "b3000000.sdhci");
+
+       clk = clk_register_aux("cfxd_synth_clk", "cfxd_synth_gate_clk",
+                       "vco1div2_clk", 0, SPEAR1340_CFXD_CLK_SYNT, NULL,
+                       aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "cfxd_synth_clk", NULL);
+       clk_register_clkdev(clk1, "cfxd_synth_gate_clk", NULL);
+
+       clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_synth_gate_clk", 0,
+                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_CFXD_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "b2800000.cf");
+       clk_register_clkdev(clk, NULL, "arasan_xd");
+
+       clk = clk_register_aux("c3_synth_clk", "c3_synth_gate_clk",
+                       "vco1div2_clk", 0, SPEAR1340_C3_CLK_SYNT, NULL,
+                       aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "c3_synth_clk", NULL);
+       clk_register_clkdev(clk1, "c3_synth_gate_clk", NULL);
+
+       clk = clk_register_mux(NULL, "c3_mux_clk", c3_parents,
+                       ARRAY_SIZE(c3_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+                       SPEAR1340_C3_CLK_SHIFT, SPEAR1340_C3_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "c3_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "c3_clk", "c3_mux_clk", 0,
+                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_C3_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "c3");
+
+       /* gmac */
+       clk = clk_register_mux(NULL, "gmac_phy_input_mux_clk",
+                       gmac_phy_input_parents,
+                       ARRAY_SIZE(gmac_phy_input_parents), 0,
+                       SPEAR1340_GMAC_CLK_CFG,
+                       SPEAR1340_GMAC_PHY_INPUT_CLK_SHIFT,
+                       SPEAR1340_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "gmac_phy_input_mux_clk", NULL);
+
+       clk = clk_register_aux("gmac_phy_synth_clk", "gmac_phy_synth_gate_clk",
+                       "gmac_phy_input_mux_clk", 0, SPEAR1340_GMAC_CLK_SYNT,
+                       NULL, gmac_rtbl, ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "gmac_phy_synth_clk", NULL);
+       clk_register_clkdev(clk1, "gmac_phy_synth_gate_clk", NULL);
+
+       clk = clk_register_mux(NULL, "gmac_phy_mux_clk", gmac_phy_parents,
+                       ARRAY_SIZE(gmac_phy_parents), 0,
+                       SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GMAC_PHY_CLK_SHIFT,
+                       SPEAR1340_GMAC_PHY_CLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "stmmacphy.0");
+
+       /* clcd */
+       clk = clk_register_mux(NULL, "clcd_synth_mux_clk", clcd_synth_parents,
+                       ARRAY_SIZE(clcd_synth_parents), 0,
+                       SPEAR1340_CLCD_CLK_SYNT, SPEAR1340_CLCD_SYNT_CLK_SHIFT,
+                       SPEAR1340_CLCD_SYNT_CLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "clcd_synth_mux_clk", NULL);
+
+       clk = clk_register_frac("clcd_synth_clk", "clcd_synth_mux_clk", 0,
+                       SPEAR1340_CLCD_CLK_SYNT, clcd_rtbl,
+                       ARRAY_SIZE(clcd_rtbl), &_lock);
+       clk_register_clkdev(clk, "clcd_synth_clk", NULL);
+
+       clk = clk_register_mux(NULL, "clcd_pixel_mux_clk", clcd_pixel_parents,
+                       ARRAY_SIZE(clcd_pixel_parents), 0,
+                       SPEAR1340_PERIP_CLK_CFG, SPEAR1340_CLCD_CLK_SHIFT,
+                       SPEAR1340_CLCD_CLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "clcd_pixel_clk", NULL);
+
+       clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mux_clk", 0,
+                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_CLCD_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "clcd_clk", NULL);
+
+       /* i2s */
+       clk = clk_register_mux(NULL, "i2s_src_mux_clk", i2s_src_parents,
+                       ARRAY_SIZE(i2s_src_parents), 0, SPEAR1340_I2S_CLK_CFG,
+                       SPEAR1340_I2S_SRC_CLK_SHIFT, SPEAR1340_I2S_SRC_CLK_MASK,
+                       0, &_lock);
+       clk_register_clkdev(clk, "i2s_src_clk", NULL);
+
+       clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mux_clk", 0,
+                       SPEAR1340_I2S_CLK_CFG, &i2s_prs1_masks, i2s_prs1_rtbl,
+                       ARRAY_SIZE(i2s_prs1_rtbl), &_lock, NULL);
+       clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
+
+       clk = clk_register_mux(NULL, "i2s_ref_mux_clk", i2s_ref_parents,
+                       ARRAY_SIZE(i2s_ref_parents), 0, SPEAR1340_I2S_CLK_CFG,
+                       SPEAR1340_I2S_REF_SHIFT, SPEAR1340_I2S_REF_SEL_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "i2s_ref_clk", NULL);
+
+       clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mux_clk", 0,
+                       SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_I2S_REF_PAD_CLK_ENB,
+                       0, &_lock);
+       clk_register_clkdev(clk, "i2s_ref_pad_clk", NULL);
+
+       clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gate_clk",
+                       "i2s_ref_mux_clk", 0, SPEAR1340_I2S_CLK_CFG,
+                       &i2s_sclk_masks, i2s_sclk_rtbl,
+                       ARRAY_SIZE(i2s_sclk_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "i2s_sclk_clk", NULL);
+       clk_register_clkdev(clk1, "i2s_sclk_gate_clk", NULL);
+
+       /* clock derived from ahb clk */
+       clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0,
+                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2C0_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "e0280000.i2c");
+
+       clk = clk_register_gate(NULL, "i2c1_clk", "ahb_clk", 0,
+                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2C1_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "b4000000.i2c");
+
+       clk = clk_register_gate(NULL, "dma_clk", "ahb_clk", 0,
+                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_DMA_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "ea800000.dma");
+       clk_register_clkdev(clk, NULL, "eb000000.dma");
+
+       clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0,
+                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GMAC_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "e2000000.eth");
+
+       clk = clk_register_gate(NULL, "fsmc_clk", "ahb_clk", 0,
+                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_FSMC_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "b0000000.flash");
+
+       clk = clk_register_gate(NULL, "smi_clk", "ahb_clk", 0,
+                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SMI_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "ea000000.flash");
+
+       clk = clk_register_gate(NULL, "usbh0_clk", "ahb_clk", 0,
+                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UHC0_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "usbh.0_clk", NULL);
+
+       clk = clk_register_gate(NULL, "usbh1_clk", "ahb_clk", 0,
+                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UHC1_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "usbh.1_clk", NULL);
+
+       clk = clk_register_gate(NULL, "uoc_clk", "ahb_clk", 0,
+                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UOC_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "uoc");
+
+       clk = clk_register_gate(NULL, "pcie_sata_clk", "ahb_clk", 0,
+                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_PCIE_SATA_CLK_ENB,
+                       0, &_lock);
+       clk_register_clkdev(clk, NULL, "dw_pcie");
+       clk_register_clkdev(clk, NULL, "ahci");
+
+       clk = clk_register_gate(NULL, "sysram0_clk", "ahb_clk", 0,
+                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SYSRAM0_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "sysram0_clk", NULL);
+
+       clk = clk_register_gate(NULL, "sysram1_clk", "ahb_clk", 0,
+                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SYSRAM1_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "sysram1_clk", NULL);
+
+       clk = clk_register_aux("adc_synth_clk", "adc_synth_gate_clk", "ahb_clk",
+                       0, SPEAR1340_ADC_CLK_SYNT, NULL, adc_rtbl,
+                       ARRAY_SIZE(adc_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "adc_synth_clk", NULL);
+       clk_register_clkdev(clk1, "adc_synth_gate_clk", NULL);
+
+       clk = clk_register_gate(NULL, "adc_clk", "adc_synth_gate_clk", 0,
+                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_ADC_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "adc_clk");
+
+       /* clock derived from apb clk */
+       clk = clk_register_gate(NULL, "ssp_clk", "apb_clk", 0,
+                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SSP_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "e0100000.spi");
+
+       clk = clk_register_gate(NULL, "gpio0_clk", "apb_clk", 0,
+                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPIO0_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "e0600000.gpio");
+
+       clk = clk_register_gate(NULL, "gpio1_clk", "apb_clk", 0,
+                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPIO1_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "e0680000.gpio");
+
+       clk = clk_register_gate(NULL, "i2s_play_clk", "apb_clk", 0,
+                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2S_PLAY_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "b2400000.i2s");
+
+       clk = clk_register_gate(NULL, "i2s_rec_clk", "apb_clk", 0,
+                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2S_REC_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "b2000000.i2s");
+
+       clk = clk_register_gate(NULL, "kbd_clk", "apb_clk", 0,
+                       SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_KBD_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "e0300000.kbd");
+
+       /* RAS clks */
+       clk = clk_register_mux(NULL, "gen_synth0_1_mux_clk",
+                       gen_synth0_1_parents, ARRAY_SIZE(gen_synth0_1_parents),
+                       0, SPEAR1340_PLL_CFG, SPEAR1340_GEN_SYNT0_1_CLK_SHIFT,
+                       SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "gen_synth0_1_clk", NULL);
+
+       clk = clk_register_mux(NULL, "gen_synth2_3_mux_clk",
+                       gen_synth2_3_parents, ARRAY_SIZE(gen_synth2_3_parents),
+                       0, SPEAR1340_PLL_CFG, SPEAR1340_GEN_SYNT2_3_CLK_SHIFT,
+                       SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "gen_synth2_3_clk", NULL);
+
+       clk = clk_register_frac("gen_synth0_clk", "gen_synth0_1_clk", 0,
+                       SPEAR1340_GEN_CLK_SYNT0, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+                       &_lock);
+       clk_register_clkdev(clk, "gen_synth0_clk", NULL);
+
+       clk = clk_register_frac("gen_synth1_clk", "gen_synth0_1_clk", 0,
+                       SPEAR1340_GEN_CLK_SYNT1, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+                       &_lock);
+       clk_register_clkdev(clk, "gen_synth1_clk", NULL);
+
+       clk = clk_register_frac("gen_synth2_clk", "gen_synth2_3_clk", 0,
+                       SPEAR1340_GEN_CLK_SYNT2, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+                       &_lock);
+       clk_register_clkdev(clk, "gen_synth2_clk", NULL);
+
+       clk = clk_register_frac("gen_synth3_clk", "gen_synth2_3_clk", 0,
+                       SPEAR1340_GEN_CLK_SYNT3, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+                       &_lock);
+       clk_register_clkdev(clk, "gen_synth3_clk", NULL);
+
+       clk = clk_register_gate(NULL, "mali_clk", "gen_synth3_clk", 0,
+                       SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_MALI_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "mali");
+
+       clk = clk_register_gate(NULL, "cec0_clk", "ahb_clk", 0,
+                       SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CEC0_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "spear_cec.0");
+
+       clk = clk_register_gate(NULL, "cec1_clk", "ahb_clk", 0,
+                       SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CEC1_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "spear_cec.1");
+
+       clk = clk_register_mux(NULL, "spdif_out_mux_clk", spdif_out_parents,
+                       ARRAY_SIZE(spdif_out_parents), 0,
+                       SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_OUT_CLK_SHIFT,
+                       SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "spdif_out_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "spdif_out_clk", "spdif_out_mux_clk", 0,
+                       SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_SPDIF_OUT_CLK_ENB,
+                       0, &_lock);
+       clk_register_clkdev(clk, NULL, "spdif-out");
+
+       clk = clk_register_mux(NULL, "spdif_in_mux_clk", spdif_in_parents,
+                       ARRAY_SIZE(spdif_in_parents), 0,
+                       SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_IN_CLK_SHIFT,
+                       SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "spdif_in_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "spdif_in_clk", "spdif_in_mux_clk", 0,
+                       SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_SPDIF_IN_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "spdif-in");
+
+       clk = clk_register_gate(NULL, "acp_clk", "acp_mux_clk", 0,
+                       SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_ACP_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "acp_clk");
+
+       clk = clk_register_gate(NULL, "plgpio_clk", "plgpio_mux_clk", 0,
+                       SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PLGPIO_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "plgpio");
+
+       clk = clk_register_gate(NULL, "video_dec_clk", "video_dec_mux_clk", 0,
+                       SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_DEC_CLK_ENB,
+                       0, &_lock);
+       clk_register_clkdev(clk, NULL, "video_dec");
+
+       clk = clk_register_gate(NULL, "video_enc_clk", "video_enc_mux_clk", 0,
+                       SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_ENC_CLK_ENB,
+                       0, &_lock);
+       clk_register_clkdev(clk, NULL, "video_enc");
+
+       clk = clk_register_gate(NULL, "video_in_clk", "video_in_mux_clk", 0,
+                       SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_IN_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "spear_vip");
+
+       clk = clk_register_gate(NULL, "cam0_clk", "cam0_mux_clk", 0,
+                       SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM0_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "spear_camif.0");
+
+       clk = clk_register_gate(NULL, "cam1_clk", "cam1_mux_clk", 0,
+                       SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM1_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "spear_camif.1");
+
+       clk = clk_register_gate(NULL, "cam2_clk", "cam2_mux_clk", 0,
+                       SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM2_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "spear_camif.2");
+
+       clk = clk_register_gate(NULL, "cam3_clk", "cam3_mux_clk", 0,
+                       SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM3_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "spear_camif.3");
+
+       clk = clk_register_gate(NULL, "pwm_clk", "pwm_mux_clk", 0,
+                       SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PWM_CLK_ENB, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "pwm");
+}
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
new file mode 100644 (file)
index 0000000..440bb3e
--- /dev/null
@@ -0,0 +1,612 @@
+/*
+ * SPEAr3xx machines clock framework source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock_types.h>
+#include <mach/misc_regs.h>
+#include "clk.h"
+
+static DEFINE_SPINLOCK(_lock);
+
+#define PLL1_CTR                       (MISC_BASE + 0x008)
+#define PLL1_FRQ                       (MISC_BASE + 0x00C)
+#define PLL2_CTR                       (MISC_BASE + 0x014)
+#define PLL2_FRQ                       (MISC_BASE + 0x018)
+#define PLL_CLK_CFG                    (MISC_BASE + 0x020)
+       /* PLL_CLK_CFG register masks */
+       #define MCTR_CLK_SHIFT          28
+       #define MCTR_CLK_MASK           3
+
+#define CORE_CLK_CFG                   (MISC_BASE + 0x024)
+       /* CORE CLK CFG register masks */
+       #define GEN_SYNTH2_3_CLK_SHIFT  18
+       #define GEN_SYNTH2_3_CLK_MASK   1
+
+       #define HCLK_RATIO_SHIFT        10
+       #define HCLK_RATIO_MASK         2
+       #define PCLK_RATIO_SHIFT        8
+       #define PCLK_RATIO_MASK         2
+
+#define PERIP_CLK_CFG                  (MISC_BASE + 0x028)
+       /* PERIP_CLK_CFG register masks */
+       #define UART_CLK_SHIFT          4
+       #define UART_CLK_MASK           1
+       #define FIRDA_CLK_SHIFT         5
+       #define FIRDA_CLK_MASK          2
+       #define GPT0_CLK_SHIFT          8
+       #define GPT1_CLK_SHIFT          11
+       #define GPT2_CLK_SHIFT          12
+       #define GPT_CLK_MASK            1
+
+#define PERIP1_CLK_ENB                 (MISC_BASE + 0x02C)
+       /* PERIP1_CLK_ENB register masks */
+       #define UART_CLK_ENB            3
+       #define SSP_CLK_ENB             5
+       #define I2C_CLK_ENB             7
+       #define JPEG_CLK_ENB            8
+       #define FIRDA_CLK_ENB           10
+       #define GPT1_CLK_ENB            11
+       #define GPT2_CLK_ENB            12
+       #define ADC_CLK_ENB             15
+       #define RTC_CLK_ENB             17
+       #define GPIO_CLK_ENB            18
+       #define DMA_CLK_ENB             19
+       #define SMI_CLK_ENB             21
+       #define GMAC_CLK_ENB            23
+       #define USBD_CLK_ENB            24
+       #define USBH_CLK_ENB            25
+       #define C3_CLK_ENB              31
+
+#define RAS_CLK_ENB                    (MISC_BASE + 0x034)
+       #define RAS_AHB_CLK_ENB         0
+       #define RAS_PLL1_CLK_ENB        1
+       #define RAS_APB_CLK_ENB         2
+       #define RAS_32K_CLK_ENB         3
+       #define RAS_24M_CLK_ENB         4
+       #define RAS_48M_CLK_ENB         5
+       #define RAS_PLL2_CLK_ENB        7
+       #define RAS_SYNT0_CLK_ENB       8
+       #define RAS_SYNT1_CLK_ENB       9
+       #define RAS_SYNT2_CLK_ENB       10
+       #define RAS_SYNT3_CLK_ENB       11
+
+#define PRSC0_CLK_CFG                  (MISC_BASE + 0x044)
+#define PRSC1_CLK_CFG                  (MISC_BASE + 0x048)
+#define PRSC2_CLK_CFG                  (MISC_BASE + 0x04C)
+#define AMEM_CLK_CFG                   (MISC_BASE + 0x050)
+       #define AMEM_CLK_ENB            0
+
+#define CLCD_CLK_SYNT                  (MISC_BASE + 0x05C)
+#define FIRDA_CLK_SYNT                 (MISC_BASE + 0x060)
+#define UART_CLK_SYNT                  (MISC_BASE + 0x064)
+#define GMAC_CLK_SYNT                  (MISC_BASE + 0x068)
+#define GEN0_CLK_SYNT                  (MISC_BASE + 0x06C)
+#define GEN1_CLK_SYNT                  (MISC_BASE + 0x070)
+#define GEN2_CLK_SYNT                  (MISC_BASE + 0x074)
+#define GEN3_CLK_SYNT                  (MISC_BASE + 0x078)
+
+/* pll rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll_rtbl[] = {
+       {.mode = 0, .m = 0x53, .n = 0x0C, .p = 0x1}, /* vco 332 & pll 166 MHz */
+       {.mode = 0, .m = 0x85, .n = 0x0C, .p = 0x1}, /* vco 532 & pll 266 MHz */
+       {.mode = 0, .m = 0xA6, .n = 0x0C, .p = 0x1}, /* vco 664 & pll 332 MHz */
+};
+
+/* aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl aux_rtbl[] = {
+       /* For PLL1 = 332 MHz */
+       {.xscale = 2, .yscale = 27, .eq = 0}, /* 12.296 MHz */
+       {.xscale = 2, .yscale = 8, .eq = 0}, /* 41.5 MHz */
+       {.xscale = 2, .yscale = 4, .eq = 0}, /* 83 MHz */
+       {.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
+};
+
+/* gpt rate configuration table, in ascending order of rates */
+static struct gpt_rate_tbl gpt_rtbl[] = {
+       /* For pll1 = 332 MHz */
+       {.mscale = 4, .nscale = 0}, /* 41.5 MHz */
+       {.mscale = 2, .nscale = 0}, /* 55.3 MHz */
+       {.mscale = 1, .nscale = 0}, /* 83 MHz */
+};
+
+/* clock parents */
+static const char *uart0_parents[] = { "pll3_48m_clk", "uart_synth_gate_clk", };
+static const char *firda_parents[] = { "pll3_48m_clk", "firda_synth_gate_clk",
+};
+static const char *gpt0_parents[] = { "pll3_48m_clk", "gpt0_synth_clk", };
+static const char *gpt1_parents[] = { "pll3_48m_clk", "gpt1_synth_clk", };
+static const char *gpt2_parents[] = { "pll3_48m_clk", "gpt2_synth_clk", };
+static const char *gen2_3_parents[] = { "pll1_clk", "pll2_clk", };
+static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none",
+       "pll2_clk", };
+
+#ifdef CONFIG_MACH_SPEAR300
+static void __init spear300_clk_init(void)
+{
+       struct clk *clk;
+
+       clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_48m_clk", 0,
+                       1, 1);
+       clk_register_clkdev(clk, NULL, "60000000.clcd");
+
+       clk = clk_register_fixed_factor(NULL, "fsmc_clk", "ras_ahb_clk", 0, 1,
+                       1);
+       clk_register_clkdev(clk, NULL, "94000000.flash");
+
+       clk = clk_register_fixed_factor(NULL, "sdhci_clk", "ras_ahb_clk", 0, 1,
+                       1);
+       clk_register_clkdev(clk, NULL, "70000000.sdhci");
+
+       clk = clk_register_fixed_factor(NULL, "gpio1_clk", "ras_apb_clk", 0, 1,
+                       1);
+       clk_register_clkdev(clk, NULL, "a9000000.gpio");
+
+       clk = clk_register_fixed_factor(NULL, "kbd_clk", "ras_apb_clk", 0, 1,
+                       1);
+       clk_register_clkdev(clk, NULL, "a0000000.kbd");
+}
+#endif
+
+/* array of all spear 310 clock lookups */
+#ifdef CONFIG_MACH_SPEAR310
+static void __init spear310_clk_init(void)
+{
+       struct clk *clk;
+
+       clk = clk_register_fixed_factor(NULL, "emi_clk", "ras_ahb_clk", 0, 1,
+                       1);
+       clk_register_clkdev(clk, "emi", NULL);
+
+       clk = clk_register_fixed_factor(NULL, "fsmc_clk", "ras_ahb_clk", 0, 1,
+                       1);
+       clk_register_clkdev(clk, NULL, "44000000.flash");
+
+       clk = clk_register_fixed_factor(NULL, "tdm_clk", "ras_ahb_clk", 0, 1,
+                       1);
+       clk_register_clkdev(clk, NULL, "tdm");
+
+       clk = clk_register_fixed_factor(NULL, "uart1_clk", "ras_apb_clk", 0, 1,
+                       1);
+       clk_register_clkdev(clk, NULL, "b2000000.serial");
+
+       clk = clk_register_fixed_factor(NULL, "uart2_clk", "ras_apb_clk", 0, 1,
+                       1);
+       clk_register_clkdev(clk, NULL, "b2080000.serial");
+
+       clk = clk_register_fixed_factor(NULL, "uart3_clk", "ras_apb_clk", 0, 1,
+                       1);
+       clk_register_clkdev(clk, NULL, "b2100000.serial");
+
+       clk = clk_register_fixed_factor(NULL, "uart4_clk", "ras_apb_clk", 0, 1,
+                       1);
+       clk_register_clkdev(clk, NULL, "b2180000.serial");
+
+       clk = clk_register_fixed_factor(NULL, "uart5_clk", "ras_apb_clk", 0, 1,
+                       1);
+       clk_register_clkdev(clk, NULL, "b2200000.serial");
+}
+#endif
+
+/* array of all spear 320 clock lookups */
+#ifdef CONFIG_MACH_SPEAR320
+       #define SMII_PCLK_SHIFT                         18
+       #define SMII_PCLK_MASK                          2
+       #define SMII_PCLK_VAL_PAD                       0x0
+       #define SMII_PCLK_VAL_PLL2                      0x1
+       #define SMII_PCLK_VAL_SYNTH0                    0x2
+       #define SDHCI_PCLK_SHIFT                        15
+       #define SDHCI_PCLK_MASK                         1
+       #define SDHCI_PCLK_VAL_48M                      0x0
+       #define SDHCI_PCLK_VAL_SYNTH3                   0x1
+       #define I2S_REF_PCLK_SHIFT                      8
+       #define I2S_REF_PCLK_MASK                       1
+       #define I2S_REF_PCLK_SYNTH_VAL                  0x1
+       #define I2S_REF_PCLK_PLL2_VAL                   0x0
+       #define UART1_PCLK_SHIFT                        6
+       #define UART1_PCLK_MASK                         1
+       #define SPEAR320_UARTX_PCLK_VAL_SYNTH1          0x0
+       #define SPEAR320_UARTX_PCLK_VAL_APB             0x1
+
+static const char *i2s_ref_parents[] = { "ras_pll2_clk",
+       "ras_gen2_synth_gate_clk", };
+static const char *sdhci_parents[] = { "ras_pll3_48m_clk",
+       "ras_gen3_synth_gate_clk",
+};
+static const char *smii0_parents[] = { "smii_125m_pad", "ras_pll2_clk",
+       "ras_gen0_synth_gate_clk", };
+static const char *uartx_parents[] = { "ras_gen1_synth_gate_clk", "ras_apb_clk",
+};
+
+static void __init spear320_clk_init(void)
+{
+       struct clk *clk;
+
+       clk = clk_register_fixed_rate(NULL, "smii_125m_pad_clk", NULL,
+                       CLK_IS_ROOT, 125000000);
+       clk_register_clkdev(clk, "smii_125m_pad", NULL);
+
+       clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_48m_clk", 0,
+                       1, 1);
+       clk_register_clkdev(clk, NULL, "90000000.clcd");
+
+       clk = clk_register_fixed_factor(NULL, "emi_clk", "ras_ahb_clk", 0, 1,
+                       1);
+       clk_register_clkdev(clk, "emi", NULL);
+
+       clk = clk_register_fixed_factor(NULL, "fsmc_clk", "ras_ahb_clk", 0, 1,
+                       1);
+       clk_register_clkdev(clk, NULL, "4c000000.flash");
+
+       clk = clk_register_fixed_factor(NULL, "i2c1_clk", "ras_ahb_clk", 0, 1,
+                       1);
+       clk_register_clkdev(clk, NULL, "a7000000.i2c");
+
+       clk = clk_register_fixed_factor(NULL, "pwm_clk", "ras_ahb_clk", 0, 1,
+                       1);
+       clk_register_clkdev(clk, "pwm", NULL);
+
+       clk = clk_register_fixed_factor(NULL, "ssp1_clk", "ras_ahb_clk", 0, 1,
+                       1);
+       clk_register_clkdev(clk, NULL, "a5000000.spi");
+
+       clk = clk_register_fixed_factor(NULL, "ssp2_clk", "ras_ahb_clk", 0, 1,
+                       1);
+       clk_register_clkdev(clk, NULL, "a6000000.spi");
+
+       clk = clk_register_fixed_factor(NULL, "can0_clk", "ras_apb_clk", 0, 1,
+                       1);
+       clk_register_clkdev(clk, NULL, "c_can_platform.0");
+
+       clk = clk_register_fixed_factor(NULL, "can1_clk", "ras_apb_clk", 0, 1,
+                       1);
+       clk_register_clkdev(clk, NULL, "c_can_platform.1");
+
+       clk = clk_register_fixed_factor(NULL, "i2s_clk", "ras_apb_clk", 0, 1,
+                       1);
+       clk_register_clkdev(clk, NULL, "i2s");
+
+       clk = clk_register_mux(NULL, "i2s_ref_clk", i2s_ref_parents,
+                       ARRAY_SIZE(i2s_ref_parents), 0, SPEAR320_CONTROL_REG,
+                       I2S_REF_PCLK_SHIFT, I2S_REF_PCLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "i2s_ref_clk", NULL);
+
+       clk = clk_register_fixed_factor(NULL, "i2s_sclk", "i2s_ref_clk", 0, 1,
+                       4);
+       clk_register_clkdev(clk, "i2s_sclk", NULL);
+
+       clk = clk_register_mux(NULL, "rs485_clk", uartx_parents,
+                       ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+                       SPEAR320_RS485_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "a9300000.serial");
+
+       clk = clk_register_mux(NULL, "sdhci_clk", sdhci_parents,
+                       ARRAY_SIZE(sdhci_parents), 0, SPEAR320_CONTROL_REG,
+                       SDHCI_PCLK_SHIFT, SDHCI_PCLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "70000000.sdhci");
+
+       clk = clk_register_mux(NULL, "smii_pclk", smii0_parents,
+                       ARRAY_SIZE(smii0_parents), 0, SPEAR320_CONTROL_REG,
+                       SMII_PCLK_SHIFT, SMII_PCLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "smii_pclk");
+
+       clk = clk_register_fixed_factor(NULL, "smii_clk", "smii_pclk", 0, 1, 1);
+       clk_register_clkdev(clk, NULL, "smii");
+
+       clk = clk_register_mux(NULL, "uart1_clk", uartx_parents,
+                       ARRAY_SIZE(uartx_parents), 0, SPEAR320_CONTROL_REG,
+                       UART1_PCLK_SHIFT, UART1_PCLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "a3000000.serial");
+
+       clk = clk_register_mux(NULL, "uart2_clk", uartx_parents,
+                       ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+                       SPEAR320_UART2_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "a4000000.serial");
+
+       clk = clk_register_mux(NULL, "uart3_clk", uartx_parents,
+                       ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+                       SPEAR320_UART3_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "a9100000.serial");
+
+       clk = clk_register_mux(NULL, "uart4_clk", uartx_parents,
+                       ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+                       SPEAR320_UART4_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "a9200000.serial");
+
+       clk = clk_register_mux(NULL, "uart5_clk", uartx_parents,
+                       ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+                       SPEAR320_UART5_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "60000000.serial");
+
+       clk = clk_register_mux(NULL, "uart6_clk", uartx_parents,
+                       ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+                       SPEAR320_UART6_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, NULL, "60100000.serial");
+}
+#endif
+
+void __init spear3xx_clk_init(void)
+{
+       struct clk *clk, *clk1;
+
+       clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
+       clk_register_clkdev(clk, "apb_pclk", NULL);
+
+       clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
+                       32000);
+       clk_register_clkdev(clk, "osc_32k_clk", NULL);
+
+       clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, CLK_IS_ROOT,
+                       24000000);
+       clk_register_clkdev(clk, "osc_24m_clk", NULL);
+
+       /* clock derived from 32 KHz osc clk */
+       clk = clk_register_gate(NULL, "rtc-spear", "osc_32k_clk", 0,
+                       PERIP1_CLK_ENB, RTC_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "fc900000.rtc");
+
+       /* clock derived from 24 MHz osc clk */
+       clk = clk_register_fixed_rate(NULL, "pll3_48m_clk", "osc_24m_clk", 0,
+                       48000000);
+       clk_register_clkdev(clk, "pll3_48m_clk", NULL);
+
+       clk = clk_register_fixed_factor(NULL, "wdt_clk", "osc_24m_clk", 0, 1,
+                       1);
+       clk_register_clkdev(clk, NULL, "fc880000.wdt");
+
+       clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL,
+                       "osc_24m_clk", 0, PLL1_CTR, PLL1_FRQ, pll_rtbl,
+                       ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+       clk_register_clkdev(clk, "vco1_clk", NULL);
+       clk_register_clkdev(clk1, "pll1_clk", NULL);
+
+       clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL,
+                       "osc_24m_clk", 0, PLL2_CTR, PLL2_FRQ, pll_rtbl,
+                       ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+       clk_register_clkdev(clk, "vco2_clk", NULL);
+       clk_register_clkdev(clk1, "pll2_clk", NULL);
+
+       /* clock derived from pll1 clk */
+       clk = clk_register_fixed_factor(NULL, "cpu_clk", "pll1_clk", 0, 1, 1);
+       clk_register_clkdev(clk, "cpu_clk", NULL);
+
+       clk = clk_register_divider(NULL, "ahb_clk", "pll1_clk",
+                       CLK_SET_RATE_PARENT, CORE_CLK_CFG, HCLK_RATIO_SHIFT,
+                       HCLK_RATIO_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "ahb_clk", NULL);
+
+       clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
+                       "pll1_clk", 0, UART_CLK_SYNT, NULL, aux_rtbl,
+                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "uart_synth_clk", NULL);
+       clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
+
+       clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
+                       ARRAY_SIZE(uart0_parents), 0, PERIP_CLK_CFG,
+                       UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "uart0_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "uart0", "uart0_mux_clk", 0,
+                       PERIP1_CLK_ENB, UART_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "d0000000.serial");
+
+       clk = clk_register_aux("firda_synth_clk", "firda_synth_gate_clk",
+                       "pll1_clk", 0, FIRDA_CLK_SYNT, NULL, aux_rtbl,
+                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "firda_synth_clk", NULL);
+       clk_register_clkdev(clk1, "firda_synth_gate_clk", NULL);
+
+       clk = clk_register_mux(NULL, "firda_mux_clk", firda_parents,
+                       ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG,
+                       FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "firda_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "firda_clk", "firda_mux_clk", 0,
+                       PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "firda");
+
+       /* gpt clocks */
+       clk_register_gpt("gpt0_synth_clk", "pll1_clk", 0, PRSC0_CLK_CFG,
+                       gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+       clk = clk_register_mux(NULL, "gpt0_clk", gpt0_parents,
+                       ARRAY_SIZE(gpt0_parents), 0, PERIP_CLK_CFG,
+                       GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "gpt0");
+
+       clk_register_gpt("gpt1_synth_clk", "pll1_clk", 0, PRSC1_CLK_CFG,
+                       gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+       clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt1_parents,
+                       ARRAY_SIZE(gpt1_parents), 0, PERIP_CLK_CFG,
+                       GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
+       clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+                       PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "gpt1");
+
+       clk_register_gpt("gpt2_synth_clk", "pll1_clk", 0, PRSC2_CLK_CFG,
+                       gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+       clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt2_parents,
+                       ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG,
+                       GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
+       clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+                       PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "gpt2");
+
+       /* general synths clocks */
+       clk = clk_register_aux("gen0_synth_clk", "gen0_synth_gate_clk",
+                       "pll1_clk", 0, GEN0_CLK_SYNT, NULL, aux_rtbl,
+                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "gen0_synth_clk", NULL);
+       clk_register_clkdev(clk1, "gen0_synth_gate_clk", NULL);
+
+       clk = clk_register_aux("gen1_synth_clk", "gen1_synth_gate_clk",
+                       "pll1_clk", 0, GEN1_CLK_SYNT, NULL, aux_rtbl,
+                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "gen1_synth_clk", NULL);
+       clk_register_clkdev(clk1, "gen1_synth_gate_clk", NULL);
+
+       clk = clk_register_mux(NULL, "gen2_3_parent_clk", gen2_3_parents,
+                       ARRAY_SIZE(gen2_3_parents), 0, CORE_CLK_CFG,
+                       GEN_SYNTH2_3_CLK_SHIFT, GEN_SYNTH2_3_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "gen2_3_parent_clk", NULL);
+
+       clk = clk_register_aux("gen2_synth_clk", "gen2_synth_gate_clk",
+                       "gen2_3_parent_clk", 0, GEN2_CLK_SYNT, NULL, aux_rtbl,
+                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "gen2_synth_clk", NULL);
+       clk_register_clkdev(clk1, "gen2_synth_gate_clk", NULL);
+
+       clk = clk_register_aux("gen3_synth_clk", "gen3_synth_gate_clk",
+                       "gen2_3_parent_clk", 0, GEN3_CLK_SYNT, NULL, aux_rtbl,
+                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "gen3_synth_clk", NULL);
+       clk_register_clkdev(clk1, "gen3_synth_gate_clk", NULL);
+
+       /* clock derived from pll3 clk */
+       clk = clk_register_gate(NULL, "usbh_clk", "pll3_48m_clk", 0,
+                       PERIP1_CLK_ENB, USBH_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, "usbh_clk", NULL);
+
+       clk = clk_register_fixed_factor(NULL, "usbh.0_clk", "usbh_clk", 0, 1,
+                       1);
+       clk_register_clkdev(clk, "usbh.0_clk", NULL);
+
+       clk = clk_register_fixed_factor(NULL, "usbh.1_clk", "usbh_clk", 0, 1,
+                       1);
+       clk_register_clkdev(clk, "usbh.1_clk", NULL);
+
+       clk = clk_register_gate(NULL, "usbd_clk", "pll3_48m_clk", 0,
+                       PERIP1_CLK_ENB, USBD_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "designware_udc");
+
+       /* clock derived from ahb clk */
+       clk = clk_register_fixed_factor(NULL, "ahbmult2_clk", "ahb_clk", 0, 2,
+                       1);
+       clk_register_clkdev(clk, "ahbmult2_clk", NULL);
+
+       clk = clk_register_mux(NULL, "ddr_clk", ddr_parents,
+                       ARRAY_SIZE(ddr_parents), 0, PLL_CLK_CFG, MCTR_CLK_SHIFT,
+                       MCTR_CLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "ddr_clk", NULL);
+
+       clk = clk_register_divider(NULL, "apb_clk", "ahb_clk",
+                       CLK_SET_RATE_PARENT, CORE_CLK_CFG, PCLK_RATIO_SHIFT,
+                       PCLK_RATIO_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "apb_clk", NULL);
+
+       clk = clk_register_gate(NULL, "amem_clk", "ahb_clk", 0, AMEM_CLK_CFG,
+                       AMEM_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, "amem_clk", NULL);
+
+       clk = clk_register_gate(NULL, "c3_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+                       C3_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "c3_clk");
+
+       clk = clk_register_gate(NULL, "dma_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+                       DMA_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "fc400000.dma");
+
+       clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+                       GMAC_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "e0800000.eth");
+
+       clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+                       I2C_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "d0180000.i2c");
+
+       clk = clk_register_gate(NULL, "jpeg_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+                       JPEG_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "jpeg");
+
+       clk = clk_register_gate(NULL, "smi_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+                       SMI_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "fc000000.flash");
+
+       /* clock derived from apb clk */
+       clk = clk_register_gate(NULL, "adc_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+                       ADC_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "adc");
+
+       clk = clk_register_gate(NULL, "gpio0_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+                       GPIO_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "fc980000.gpio");
+
+       clk = clk_register_gate(NULL, "ssp0_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+                       SSP_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "d0100000.spi");
+
+       /* RAS clk enable */
+       clk = clk_register_gate(NULL, "ras_ahb_clk", "ahb_clk", 0, RAS_CLK_ENB,
+                       RAS_AHB_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, "ras_ahb_clk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_apb_clk", "apb_clk", 0, RAS_CLK_ENB,
+                       RAS_APB_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, "ras_apb_clk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_32k_clk", "osc_32k_clk", 0,
+                       RAS_CLK_ENB, RAS_32K_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, "ras_32k_clk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_24m_clk", "osc_24m_clk", 0,
+                       RAS_CLK_ENB, RAS_24M_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, "ras_24m_clk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_pll1_clk", "pll1_clk", 0,
+                       RAS_CLK_ENB, RAS_PLL1_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, "ras_pll1_clk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_pll2_clk", "pll2_clk", 0,
+                       RAS_CLK_ENB, RAS_PLL2_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, "ras_pll2_clk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_pll3_48m_clk", "pll3_48m_clk", 0,
+                       RAS_CLK_ENB, RAS_48M_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, "ras_pll3_48m_clk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_gen0_synth_gate_clk",
+                       "gen0_synth_gate_clk", 0, RAS_CLK_ENB,
+                       RAS_SYNT0_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, "ras_gen0_synth_gate_clk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_gen1_synth_gate_clk",
+                       "gen1_synth_gate_clk", 0, RAS_CLK_ENB,
+                       RAS_SYNT1_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, "ras_gen1_synth_gate_clk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_gen2_synth_gate_clk",
+                       "gen2_synth_gate_clk", 0, RAS_CLK_ENB,
+                       RAS_SYNT2_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, "ras_gen2_synth_gate_clk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_gen3_synth_gate_clk",
+                       "gen3_synth_gate_clk", 0, RAS_CLK_ENB,
+                       RAS_SYNT3_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, "ras_gen3_synth_gate_clk", NULL);
+
+       if (of_machine_is_compatible("st,spear300"))
+               spear300_clk_init();
+       else if (of_machine_is_compatible("st,spear310"))
+               spear310_clk_init();
+       else if (of_machine_is_compatible("st,spear320"))
+               spear320_clk_init();
+}
diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c
new file mode 100644 (file)
index 0000000..f9a20b3
--- /dev/null
@@ -0,0 +1,342 @@
+/*
+ * SPEAr6xx machines clock framework source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/spinlock_types.h>
+#include <mach/misc_regs.h>
+#include "clk.h"
+
+static DEFINE_SPINLOCK(_lock);
+
+#define PLL1_CTR                       (MISC_BASE + 0x008)
+#define PLL1_FRQ                       (MISC_BASE + 0x00C)
+#define PLL2_CTR                       (MISC_BASE + 0x014)
+#define PLL2_FRQ                       (MISC_BASE + 0x018)
+#define PLL_CLK_CFG                    (MISC_BASE + 0x020)
+       /* PLL_CLK_CFG register masks */
+       #define MCTR_CLK_SHIFT          28
+       #define MCTR_CLK_MASK           3
+
+#define CORE_CLK_CFG                   (MISC_BASE + 0x024)
+       /* CORE CLK CFG register masks */
+       #define HCLK_RATIO_SHIFT        10
+       #define HCLK_RATIO_MASK         2
+       #define PCLK_RATIO_SHIFT        8
+       #define PCLK_RATIO_MASK         2
+
+#define PERIP_CLK_CFG                  (MISC_BASE + 0x028)
+       /* PERIP_CLK_CFG register masks */
+       #define CLCD_CLK_SHIFT          2
+       #define CLCD_CLK_MASK           2
+       #define UART_CLK_SHIFT          4
+       #define UART_CLK_MASK           1
+       #define FIRDA_CLK_SHIFT         5
+       #define FIRDA_CLK_MASK          2
+       #define GPT0_CLK_SHIFT          8
+       #define GPT1_CLK_SHIFT          10
+       #define GPT2_CLK_SHIFT          11
+       #define GPT3_CLK_SHIFT          12
+       #define GPT_CLK_MASK            1
+
+#define PERIP1_CLK_ENB                 (MISC_BASE + 0x02C)
+       /* PERIP1_CLK_ENB register masks */
+       #define UART0_CLK_ENB           3
+       #define UART1_CLK_ENB           4
+       #define SSP0_CLK_ENB            5
+       #define SSP1_CLK_ENB            6
+       #define I2C_CLK_ENB             7
+       #define JPEG_CLK_ENB            8
+       #define FSMC_CLK_ENB            9
+       #define FIRDA_CLK_ENB           10
+       #define GPT2_CLK_ENB            11
+       #define GPT3_CLK_ENB            12
+       #define GPIO2_CLK_ENB           13
+       #define SSP2_CLK_ENB            14
+       #define ADC_CLK_ENB             15
+       #define GPT1_CLK_ENB            11
+       #define RTC_CLK_ENB             17
+       #define GPIO1_CLK_ENB           18
+       #define DMA_CLK_ENB             19
+       #define SMI_CLK_ENB             21
+       #define CLCD_CLK_ENB            22
+       #define GMAC_CLK_ENB            23
+       #define USBD_CLK_ENB            24
+       #define USBH0_CLK_ENB           25
+       #define USBH1_CLK_ENB           26
+
+#define PRSC0_CLK_CFG                  (MISC_BASE + 0x044)
+#define PRSC1_CLK_CFG                  (MISC_BASE + 0x048)
+#define PRSC2_CLK_CFG                  (MISC_BASE + 0x04C)
+
+#define CLCD_CLK_SYNT                  (MISC_BASE + 0x05C)
+#define FIRDA_CLK_SYNT                 (MISC_BASE + 0x060)
+#define UART_CLK_SYNT                  (MISC_BASE + 0x064)
+
+/* vco rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll_rtbl[] = {
+       {.mode = 0, .m = 0x53, .n = 0x0F, .p = 0x1}, /* vco 332 & pll 166 MHz */
+       {.mode = 0, .m = 0x85, .n = 0x0F, .p = 0x1}, /* vco 532 & pll 266 MHz */
+       {.mode = 0, .m = 0xA6, .n = 0x0F, .p = 0x1}, /* vco 664 & pll 332 MHz */
+};
+
+/* aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl aux_rtbl[] = {
+       /* For PLL1 = 332 MHz */
+       {.xscale = 2, .yscale = 8, .eq = 0}, /* 41.5 MHz */
+       {.xscale = 2, .yscale = 4, .eq = 0}, /* 83 MHz */
+       {.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
+};
+
+static const char *clcd_parents[] = { "pll3_48m_clk", "clcd_synth_gate_clk", };
+static const char *firda_parents[] = { "pll3_48m_clk", "firda_synth_gate_clk",
+};
+static const char *uart_parents[] = { "pll3_48m_clk", "uart_synth_gate_clk", };
+static const char *gpt0_1_parents[] = { "pll3_48m_clk", "gpt0_1_synth_clk", };
+static const char *gpt2_parents[] = { "pll3_48m_clk", "gpt2_synth_clk", };
+static const char *gpt3_parents[] = { "pll3_48m_clk", "gpt3_synth_clk", };
+static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none",
+       "pll2_clk", };
+
+/* gpt rate configuration table, in ascending order of rates */
+static struct gpt_rate_tbl gpt_rtbl[] = {
+       /* For pll1 = 332 MHz */
+       {.mscale = 4, .nscale = 0}, /* 41.5 MHz */
+       {.mscale = 2, .nscale = 0}, /* 55.3 MHz */
+       {.mscale = 1, .nscale = 0}, /* 83 MHz */
+};
+
+void __init spear6xx_clk_init(void)
+{
+       struct clk *clk, *clk1;
+
+       clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
+       clk_register_clkdev(clk, "apb_pclk", NULL);
+
+       clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
+                       32000);
+       clk_register_clkdev(clk, "osc_32k_clk", NULL);
+
+       clk = clk_register_fixed_rate(NULL, "osc_30m_clk", NULL, CLK_IS_ROOT,
+                       30000000);
+       clk_register_clkdev(clk, "osc_30m_clk", NULL);
+
+       /* clock derived from 32 KHz osc clk */
+       clk = clk_register_gate(NULL, "rtc_spear", "osc_32k_clk", 0,
+                       PERIP1_CLK_ENB, RTC_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "rtc-spear");
+
+       /* clock derived from 30 MHz osc clk */
+       clk = clk_register_fixed_rate(NULL, "pll3_48m_clk", "osc_24m_clk", 0,
+                       48000000);
+       clk_register_clkdev(clk, "pll3_48m_clk", NULL);
+
+       clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "osc_30m_clk",
+                       0, PLL1_CTR, PLL1_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl),
+                       &_lock, &clk1, NULL);
+       clk_register_clkdev(clk, "vco1_clk", NULL);
+       clk_register_clkdev(clk1, "pll1_clk", NULL);
+
+       clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL,
+                       "osc_30m_clk", 0, PLL2_CTR, PLL2_FRQ, pll_rtbl,
+                       ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+       clk_register_clkdev(clk, "vco2_clk", NULL);
+       clk_register_clkdev(clk1, "pll2_clk", NULL);
+
+       clk = clk_register_fixed_factor(NULL, "wdt_clk", "osc_30m_clk", 0, 1,
+                       1);
+       clk_register_clkdev(clk, NULL, "wdt");
+
+       /* clock derived from pll1 clk */
+       clk = clk_register_fixed_factor(NULL, "cpu_clk", "pll1_clk", 0, 1, 1);
+       clk_register_clkdev(clk, "cpu_clk", NULL);
+
+       clk = clk_register_divider(NULL, "ahb_clk", "pll1_clk",
+                       CLK_SET_RATE_PARENT, CORE_CLK_CFG, HCLK_RATIO_SHIFT,
+                       HCLK_RATIO_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "ahb_clk", NULL);
+
+       clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
+                       "pll1_clk", 0, UART_CLK_SYNT, NULL, aux_rtbl,
+                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "uart_synth_clk", NULL);
+       clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
+
+       clk = clk_register_mux(NULL, "uart_mux_clk", uart_parents,
+                       ARRAY_SIZE(uart_parents), 0, PERIP_CLK_CFG,
+                       UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "uart_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "uart0", "uart_mux_clk", 0,
+                       PERIP1_CLK_ENB, UART0_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "d0000000.serial");
+
+       clk = clk_register_gate(NULL, "uart1", "uart_mux_clk", 0,
+                       PERIP1_CLK_ENB, UART1_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "d0080000.serial");
+
+       clk = clk_register_aux("firda_synth_clk", "firda_synth_gate_clk",
+                       "pll1_clk", 0, FIRDA_CLK_SYNT, NULL, aux_rtbl,
+                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "firda_synth_clk", NULL);
+       clk_register_clkdev(clk1, "firda_synth_gate_clk", NULL);
+
+       clk = clk_register_mux(NULL, "firda_mux_clk", firda_parents,
+                       ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG,
+                       FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "firda_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "firda_clk", "firda_mux_clk", 0,
+                       PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "firda");
+
+       clk = clk_register_aux("clcd_synth_clk", "clcd_synth_gate_clk",
+                       "pll1_clk", 0, CLCD_CLK_SYNT, NULL, aux_rtbl,
+                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "clcd_synth_clk", NULL);
+       clk_register_clkdev(clk1, "clcd_synth_gate_clk", NULL);
+
+       clk = clk_register_mux(NULL, "clcd_mux_clk", clcd_parents,
+                       ARRAY_SIZE(clcd_parents), 0, PERIP_CLK_CFG,
+                       CLCD_CLK_SHIFT, CLCD_CLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "clcd_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "clcd_clk", "clcd_mux_clk", 0,
+                       PERIP1_CLK_ENB, CLCD_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "clcd");
+
+       /* gpt clocks */
+       clk = clk_register_gpt("gpt0_1_synth_clk", "pll1_clk", 0, PRSC0_CLK_CFG,
+                       gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+       clk_register_clkdev(clk, "gpt0_1_synth_clk", NULL);
+
+       clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt0_1_parents,
+                       ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
+                       GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "gpt0");
+
+       clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt0_1_parents,
+                       ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
+                       GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+                       PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "gpt1");
+
+       clk = clk_register_gpt("gpt2_synth_clk", "pll1_clk", 0, PRSC1_CLK_CFG,
+                       gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+       clk_register_clkdev(clk, "gpt2_synth_clk", NULL);
+
+       clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt2_parents,
+                       ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG,
+                       GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+                       PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "gpt2");
+
+       clk = clk_register_gpt("gpt3_synth_clk", "pll1_clk", 0, PRSC2_CLK_CFG,
+                       gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+       clk_register_clkdev(clk, "gpt3_synth_clk", NULL);
+
+       clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt3_parents,
+                       ARRAY_SIZE(gpt3_parents), 0, PERIP_CLK_CFG,
+                       GPT3_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
+
+       clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
+                       PERIP1_CLK_ENB, GPT3_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "gpt3");
+
+       /* clock derived from pll3 clk */
+       clk = clk_register_gate(NULL, "usbh0_clk", "pll3_48m_clk", 0,
+                       PERIP1_CLK_ENB, USBH0_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "usbh.0_clk");
+
+       clk = clk_register_gate(NULL, "usbh1_clk", "pll3_48m_clk", 0,
+                       PERIP1_CLK_ENB, USBH1_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "usbh.1_clk");
+
+       clk = clk_register_gate(NULL, "usbd_clk", "pll3_48m_clk", 0,
+                       PERIP1_CLK_ENB, USBD_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "designware_udc");
+
+       /* clock derived from ahb clk */
+       clk = clk_register_fixed_factor(NULL, "ahbmult2_clk", "ahb_clk", 0, 2,
+                       1);
+       clk_register_clkdev(clk, "ahbmult2_clk", NULL);
+
+       clk = clk_register_mux(NULL, "ddr_clk", ddr_parents,
+                       ARRAY_SIZE(ddr_parents),
+                       0, PLL_CLK_CFG, MCTR_CLK_SHIFT, MCTR_CLK_MASK, 0,
+                       &_lock);
+       clk_register_clkdev(clk, "ddr_clk", NULL);
+
+       clk = clk_register_divider(NULL, "apb_clk", "ahb_clk",
+                       CLK_SET_RATE_PARENT, CORE_CLK_CFG, PCLK_RATIO_SHIFT,
+                       PCLK_RATIO_MASK, 0, &_lock);
+       clk_register_clkdev(clk, "apb_clk", NULL);
+
+       clk = clk_register_gate(NULL, "dma_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+                       DMA_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "fc400000.dma");
+
+       clk = clk_register_gate(NULL, "fsmc_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+                       FSMC_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "d1800000.flash");
+
+       clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+                       GMAC_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "gmac");
+
+       clk = clk_register_gate(NULL, "i2c_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+                       I2C_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "d0200000.i2c");
+
+       clk = clk_register_gate(NULL, "jpeg_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+                       JPEG_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "jpeg");
+
+       clk = clk_register_gate(NULL, "smi_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+                       SMI_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "fc000000.flash");
+
+       /* clock derived from apb clk */
+       clk = clk_register_gate(NULL, "adc_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+                       ADC_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "adc");
+
+       clk = clk_register_fixed_factor(NULL, "gpio0_clk", "apb_clk", 0, 1, 1);
+       clk_register_clkdev(clk, NULL, "f0100000.gpio");
+
+       clk = clk_register_gate(NULL, "gpio1_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+                       GPIO1_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "fc980000.gpio");
+
+       clk = clk_register_gate(NULL, "gpio2_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+                       GPIO2_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "d8100000.gpio");
+
+       clk = clk_register_gate(NULL, "ssp0_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+                       SSP0_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "ssp-pl022.0");
+
+       clk = clk_register_gate(NULL, "ssp1_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+                       SSP1_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "ssp-pl022.1");
+
+       clk = clk_register_gate(NULL, "ssp2_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+                       SSP2_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, NULL, "ssp-pl022.2");
+}
index e6ecc5f2394387efcdfa3b112edd83ac88591509..1cc6b3f3e262ac72ad958ea8e34a59c00663ed71 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
 #include <linux/module.h>
+#include <linux/clk.h>
 #include <crypto/internal/hash.h>
 #include <crypto/sha.h>
 
@@ -79,6 +80,7 @@ struct crypto_priv {
        void __iomem *reg;
        void __iomem *sram;
        int irq;
+       struct clk *clk;
        struct task_struct *queue_th;
 
        /* the lock protects queue and eng_st */
@@ -1053,6 +1055,12 @@ static int mv_probe(struct platform_device *pdev)
        if (ret)
                goto err_thread;
 
+       /* Not all platforms can gate the clock, so it is not
+          an error if the clock does not exists. */
+       cp->clk = clk_get(&pdev->dev, NULL);
+       if (!IS_ERR(cp->clk))
+               clk_prepare_enable(cp->clk);
+
        writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
        writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
        writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
@@ -1118,6 +1126,12 @@ static int mv_remove(struct platform_device *pdev)
        memset(cp->sram, 0, cp->sram_size);
        iounmap(cp->sram);
        iounmap(cp->reg);
+
+       if (!IS_ERR(cp->clk)) {
+               clk_disable_unprepare(cp->clk);
+               clk_put(cp->clk);
+       }
+
        kfree(cp);
        cpg = NULL;
        return 0;
index ef378b5b17e49079075f1fdf3f5834cca90f05a7..aadeb5be9dba25249d0215e3881b484fcfb813ac 100644 (file)
@@ -238,6 +238,7 @@ config IMX_DMA
 config MXS_DMA
        bool "MXS DMA support"
        depends on SOC_IMX23 || SOC_IMX28
+       select STMP_DEVICE
        select DMA_ENGINE
        help
          Support the MXS DMA engine. This engine including APBH-DMA
index 3d704abd7912b1ce9a2bd236e84188fdd81aace3..49ecbbb8932df2ecbc10aa601edd274acc21cf19 100644 (file)
@@ -95,10 +95,14 @@ static struct amba_driver pl08x_amba_driver;
  * struct vendor_data - vendor-specific config parameters for PL08x derivatives
  * @channels: the number of channels available in this variant
  * @dualmaster: whether this version supports dual AHB masters or not.
+ * @nomadik: whether the channels have Nomadik security extension bits
+ *     that need to be checked for permission before use and some registers are
+ *     missing
  */
 struct vendor_data {
        u8 channels;
        bool dualmaster;
+       bool nomadik;
 };
 
 /*
@@ -385,7 +389,7 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
 
                spin_lock_irqsave(&ch->lock, flags);
 
-               if (!ch->serving) {
+               if (!ch->locked && !ch->serving) {
                        ch->serving = virt_chan;
                        ch->signal = -1;
                        spin_unlock_irqrestore(&ch->lock, flags);
@@ -1324,7 +1328,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
        int ret, tmp;
 
        dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
-                       __func__, sgl->length, plchan->name);
+                       __func__, sg_dma_len(sgl), plchan->name);
 
        txd = pl08x_get_txd(plchan, flags);
        if (!txd) {
@@ -1378,11 +1382,11 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 
                dsg->len = sg_dma_len(sg);
                if (direction == DMA_MEM_TO_DEV) {
-                       dsg->src_addr = sg_phys(sg);
+                       dsg->src_addr = sg_dma_address(sg);
                        dsg->dst_addr = slave_addr;
                } else {
                        dsg->src_addr = slave_addr;
-                       dsg->dst_addr = sg_phys(sg);
+                       dsg->dst_addr = sg_dma_address(sg);
                }
        }
 
@@ -1484,6 +1488,9 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
  */
 static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
 {
+       /* The Nomadik variant does not have the config register */
+       if (pl08x->vd->nomadik)
+               return;
        writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
 }
 
@@ -1616,7 +1623,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
                        __func__, err);
                writel(err, pl08x->base + PL080_ERR_CLEAR);
        }
-       tc = readl(pl08x->base + PL080_INT_STATUS);
+       tc = readl(pl08x->base + PL080_TC_STATUS);
        if (tc)
                writel(tc, pl08x->base + PL080_TC_CLEAR);
 
@@ -1773,8 +1780,10 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
                spin_lock_irqsave(&ch->lock, flags);
                virt_chan = ch->serving;
 
-               seq_printf(s, "%d\t\t%s\n",
-                          ch->id, virt_chan ? virt_chan->name : "(none)");
+               seq_printf(s, "%d\t\t%s%s\n",
+                          ch->id,
+                          virt_chan ? virt_chan->name : "(none)",
+                          ch->locked ? " LOCKED" : "");
 
                spin_unlock_irqrestore(&ch->lock, flags);
        }
@@ -1918,7 +1927,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
        }
 
        /* Initialize physical channels */
-       pl08x->phy_chans = kmalloc((vd->channels * sizeof(*pl08x->phy_chans)),
+       pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)),
                        GFP_KERNEL);
        if (!pl08x->phy_chans) {
                dev_err(&adev->dev, "%s failed to allocate "
@@ -1933,8 +1942,23 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
                ch->id = i;
                ch->base = pl08x->base + PL080_Cx_BASE(i);
                spin_lock_init(&ch->lock);
-               ch->serving = NULL;
                ch->signal = -1;
+
+               /*
+                * Nomadik variants can have channels that are locked
+                * down for the secure world only. Lock up these channels
+                * by perpetually serving a dummy virtual channel.
+                */
+               if (vd->nomadik) {
+                       u32 val;
+
+                       val = readl(ch->base + PL080_CH_CONFIG);
+                       if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) {
+                               dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i);
+                               ch->locked = true;
+                       }
+               }
+
                dev_dbg(&adev->dev, "physical channel %d is %s\n",
                        i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
        }
@@ -2017,6 +2041,12 @@ static struct vendor_data vendor_pl080 = {
        .dualmaster = true,
 };
 
+static struct vendor_data vendor_nomadik = {
+       .channels = 8,
+       .dualmaster = true,
+       .nomadik = true,
+};
+
 static struct vendor_data vendor_pl081 = {
        .channels = 2,
        .dualmaster = false,
@@ -2037,9 +2067,9 @@ static struct amba_id pl08x_ids[] = {
        },
        /* Nomadik 8815 PL080 variant */
        {
-               .id     = 0x00280880,
+               .id     = 0x00280080,
                .mask   = 0x00ffffff,
-               .data   = &vendor_pl080,
+               .data   = &vendor_nomadik,
        },
        { 0, 0 },
 };
index bf0d7e4e345bd09babde30dfed59b4874f27b0fc..7292aa87b2dd3fb54a4797f7c676d74a6a047713 100644 (file)
@@ -39,7 +39,6 @@
  */
 
 #define        ATC_DEFAULT_CFG         (ATC_FIFOCFG_HALFFIFO)
-#define        ATC_DEFAULT_CTRLA       (0)
 #define        ATC_DEFAULT_CTRLB       (ATC_SIF(AT_DMA_MEM_IF) \
                                |ATC_DIF(AT_DMA_MEM_IF))
 
@@ -574,7 +573,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
                return NULL;
        }
 
-       ctrla =   ATC_DEFAULT_CTRLA;
        ctrlb =   ATC_DEFAULT_CTRLB | ATC_IEN
                | ATC_SRC_ADDR_MODE_INCR
                | ATC_DST_ADDR_MODE_INCR
@@ -585,13 +583,13 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
         * of the most common optimization.
         */
        if (!((src | dest  | len) & 3)) {
-               ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
+               ctrla = ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
                src_width = dst_width = 2;
        } else if (!((src | dest | len) & 1)) {
-               ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
+               ctrla = ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
                src_width = dst_width = 1;
        } else {
-               ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
+               ctrla = ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
                src_width = dst_width = 0;
        }
 
@@ -668,7 +666,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                return NULL;
        }
 
-       ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
+       ctrla =   ATC_SCSIZE(sconfig->src_maxburst)
+               | ATC_DCSIZE(sconfig->dst_maxburst);
        ctrlb = ATC_IEN;
 
        switch (direction) {
@@ -796,12 +795,12 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
                enum dma_transfer_direction direction)
 {
        struct at_dma_chan      *atchan = to_at_dma_chan(chan);
-       struct at_dma_slave     *atslave = chan->private;
        struct dma_slave_config *sconfig = &atchan->dma_sconfig;
        u32                     ctrla;
 
        /* prepare common CRTLA value */
-       ctrla =   ATC_DEFAULT_CTRLA | atslave->ctrla
+       ctrla =   ATC_SCSIZE(sconfig->src_maxburst)
+               | ATC_DCSIZE(sconfig->dst_maxburst)
                | ATC_DST_WIDTH(reg_width)
                | ATC_SRC_WIDTH(reg_width)
                | period_len >> reg_width;
index 897a8bcaec90d288003b3ee89d631b620d197935..8a6c8e8b2940885f32daba86c961453ec839b387 100644 (file)
 /* Bitfields in CTRLA */
 #define        ATC_BTSIZE_MAX          0xFFFFUL        /* Maximum Buffer Transfer Size */
 #define        ATC_BTSIZE(x)           (ATC_BTSIZE_MAX & (x)) /* Buffer Transfer Size */
-/* Chunck Tranfer size definitions are in at_hdmac.h */
+#define        ATC_SCSIZE_MASK         (0x7 << 16)     /* Source Chunk Transfer Size */
+#define                ATC_SCSIZE(x)           (ATC_SCSIZE_MASK & ((x) << 16))
+#define                ATC_SCSIZE_1            (0x0 << 16)
+#define                ATC_SCSIZE_4            (0x1 << 16)
+#define                ATC_SCSIZE_8            (0x2 << 16)
+#define                ATC_SCSIZE_16           (0x3 << 16)
+#define                ATC_SCSIZE_32           (0x4 << 16)
+#define                ATC_SCSIZE_64           (0x5 << 16)
+#define                ATC_SCSIZE_128          (0x6 << 16)
+#define                ATC_SCSIZE_256          (0x7 << 16)
+#define        ATC_DCSIZE_MASK         (0x7 << 20)     /* Destination Chunk Transfer Size */
+#define                ATC_DCSIZE(x)           (ATC_DCSIZE_MASK & ((x) << 20))
+#define                ATC_DCSIZE_1            (0x0 << 20)
+#define                ATC_DCSIZE_4            (0x1 << 20)
+#define                ATC_DCSIZE_8            (0x2 << 20)
+#define                ATC_DCSIZE_16           (0x3 << 20)
+#define                ATC_DCSIZE_32           (0x4 << 20)
+#define                ATC_DCSIZE_64           (0x5 << 20)
+#define                ATC_DCSIZE_128          (0x6 << 20)
+#define                ATC_DCSIZE_256          (0x7 << 20)
 #define        ATC_SRC_WIDTH_MASK      (0x3 << 24)     /* Source Single Transfer Size */
 #define                ATC_SRC_WIDTH(x)        ((x) << 24)
 #define                ATC_SRC_WIDTH_BYTE      (0x0 << 24)
index 750925f9638bab656b39ccff65bed7dbdbee405c..e67b4e06a918350137e8119e940618318f2c7867 100644 (file)
@@ -1033,7 +1033,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 
        if (!sgl)
                goto out;
-       if (sgl->length == 0)
+       if (sg_dma_len(sgl) == 0)
                goto out;
 
        spin_lock_irqsave(&cohc->lock, flg);
index 6c0e2d4c66827c7a179a55c8bc52375b932dfaa1..780e0429b38cd30236001e4a121cefc6fa347aea 100644 (file)
@@ -270,10 +270,10 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
 
                if (dir == DMA_MEM_TO_DEV)
                        /* increment source address */
-                       src = sg_phys(sg);
+                       src = sg_dma_address(sg);
                else
                        /* increment destination address */
-                       dst =  sg_phys(sg);
+                       dst = sg_dma_address(sg);
 
                bytes_to_transfer = sg_dma_len(sg);
 
index 7439079f5eed9c58bc9d39e0fc92795e29adc7dd..e23dc82d43acbb726c0825e3f259c53803cd3c58 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
+#include <linux/of.h>
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
@@ -742,7 +743,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                        struct dw_desc  *desc;
                        u32             len, dlen, mem;
 
-                       mem = sg_phys(sg);
+                       mem = sg_dma_address(sg);
                        len = sg_dma_len(sg);
 
                        if (!((mem | len) & 7))
@@ -809,7 +810,7 @@ slave_sg_todev_fill_desc:
                        struct dw_desc  *desc;
                        u32             len, dlen, mem;
 
-                       mem = sg_phys(sg);
+                       mem = sg_dma_address(sg);
                        len = sg_dma_len(sg);
 
                        if (!((mem | len) & 7))
@@ -1429,7 +1430,7 @@ static int __init dw_probe(struct platform_device *pdev)
                err = PTR_ERR(dw->clk);
                goto err_clk;
        }
-       clk_enable(dw->clk);
+       clk_prepare_enable(dw->clk);
 
        /* force dma off, just in case */
        dw_dma_off(dw);
@@ -1510,7 +1511,7 @@ static int __init dw_probe(struct platform_device *pdev)
        return 0;
 
 err_irq:
-       clk_disable(dw->clk);
+       clk_disable_unprepare(dw->clk);
        clk_put(dw->clk);
 err_clk:
        iounmap(dw->regs);
@@ -1540,7 +1541,7 @@ static int __exit dw_remove(struct platform_device *pdev)
                channel_clear_bit(dw, CH_EN, dwc->mask);
        }
 
-       clk_disable(dw->clk);
+       clk_disable_unprepare(dw->clk);
        clk_put(dw->clk);
 
        iounmap(dw->regs);
@@ -1559,7 +1560,7 @@ static void dw_shutdown(struct platform_device *pdev)
        struct dw_dma   *dw = platform_get_drvdata(pdev);
 
        dw_dma_off(platform_get_drvdata(pdev));
-       clk_disable(dw->clk);
+       clk_disable_unprepare(dw->clk);
 }
 
 static int dw_suspend_noirq(struct device *dev)
@@ -1568,7 +1569,7 @@ static int dw_suspend_noirq(struct device *dev)
        struct dw_dma   *dw = platform_get_drvdata(pdev);
 
        dw_dma_off(platform_get_drvdata(pdev));
-       clk_disable(dw->clk);
+       clk_disable_unprepare(dw->clk);
 
        return 0;
 }
@@ -1578,7 +1579,7 @@ static int dw_resume_noirq(struct device *dev)
        struct platform_device *pdev = to_platform_device(dev);
        struct dw_dma   *dw = platform_get_drvdata(pdev);
 
-       clk_enable(dw->clk);
+       clk_prepare_enable(dw->clk);
        dma_writel(dw, CFG, DW_CFG_DMA_EN);
        return 0;
 }
@@ -1592,12 +1593,21 @@ static const struct dev_pm_ops dw_dev_pm_ops = {
        .poweroff_noirq = dw_suspend_noirq,
 };
 
+#ifdef CONFIG_OF
+static const struct of_device_id dw_dma_id_table[] = {
+       { .compatible = "snps,dma-spear1340" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, dw_dma_id_table);
+#endif
+
 static struct platform_driver dw_driver = {
        .remove         = __exit_p(dw_remove),
        .shutdown       = dw_shutdown,
        .driver = {
                .name   = "dw_dmac",
                .pm     = &dw_dev_pm_ops,
+               .of_match_table = of_match_ptr(dw_dma_id_table),
        },
 };
 
index f6e9b572b998919ee117cf154e560788da439de3..c64917ec313dc25d501ad41312fa8e898df8e003 100644 (file)
@@ -71,6 +71,7 @@
 #define M2M_CONTROL_TM_SHIFT           13
 #define M2M_CONTROL_TM_TX              (1 << M2M_CONTROL_TM_SHIFT)
 #define M2M_CONTROL_TM_RX              (2 << M2M_CONTROL_TM_SHIFT)
+#define M2M_CONTROL_NFBINT             BIT(21)
 #define M2M_CONTROL_RSS_SHIFT          22
 #define M2M_CONTROL_RSS_SSPRX          (1 << M2M_CONTROL_RSS_SHIFT)
 #define M2M_CONTROL_RSS_SSPTX          (2 << M2M_CONTROL_RSS_SHIFT)
 #define M2M_CONTROL_PWSC_SHIFT         25
 
 #define M2M_INTERRUPT                  0x0004
-#define M2M_INTERRUPT_DONEINT          BIT(1)
+#define M2M_INTERRUPT_MASK             6
+
+#define M2M_STATUS                     0x000c
+#define M2M_STATUS_CTL_SHIFT           1
+#define M2M_STATUS_CTL_IDLE            (0 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_CTL_STALL           (1 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_CTL_MEMRD           (2 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_CTL_MEMWR           (3 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_CTL_BWCWAIT         (4 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_CTL_MASK            (7 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_BUF_SHIFT           4
+#define M2M_STATUS_BUF_NO              (0 << M2M_STATUS_BUF_SHIFT)
+#define M2M_STATUS_BUF_ON              (1 << M2M_STATUS_BUF_SHIFT)
+#define M2M_STATUS_BUF_NEXT            (2 << M2M_STATUS_BUF_SHIFT)
+#define M2M_STATUS_BUF_MASK            (3 << M2M_STATUS_BUF_SHIFT)
+#define M2M_STATUS_DONE                        BIT(6)
 
 #define M2M_BCR0                       0x0010
 #define M2M_BCR1                       0x0014
@@ -426,15 +442,6 @@ static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
 
 /*
  * M2M DMA implementation
- *
- * For the M2M transfers we don't use NFB at all. This is because it simply
- * doesn't work well with memcpy transfers. When you submit both buffers it is
- * extremely unlikely that you get an NFB interrupt, but it instead reports
- * DONE interrupt and both buffers are already transferred which means that we
- * weren't able to update the next buffer.
- *
- * So for now we "simulate" NFB by just submitting buffer after buffer
- * without double buffering.
  */
 
 static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
@@ -543,6 +550,11 @@ static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
        m2m_fill_desc(edmac);
        control |= M2M_CONTROL_DONEINT;
 
+       if (ep93xx_dma_advance_active(edmac)) {
+               m2m_fill_desc(edmac);
+               control |= M2M_CONTROL_NFBINT;
+       }
+
        /*
         * Now we can finally enable the channel. For M2M channel this must be
         * done _after_ the BCRx registers are programmed.
@@ -560,32 +572,89 @@ static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
        }
 }
 
+/*
+ * According to EP93xx User's Guide, we should receive DONE interrupt when all
+ * M2M DMA controller transactions complete normally. This is not always the
+ * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
+ * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
+ * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
+ * In effect, disabling the channel when only DONE bit is set could stop
+ * currently running DMA transfer. To avoid this, we use Buffer FSM and
+ * Control FSM to check current state of DMA channel.
+ */
 static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
 {
+       u32 status = readl(edmac->regs + M2M_STATUS);
+       u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
+       u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
+       bool done = status & M2M_STATUS_DONE;
+       bool last_done;
        u32 control;
+       struct ep93xx_dma_desc *desc;
 
-       if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_DONEINT))
+       /* Accept only DONE and NFB interrupts */
+       if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
                return INTERRUPT_UNKNOWN;
 
-       /* Clear the DONE bit */
-       writel(0, edmac->regs + M2M_INTERRUPT);
+       if (done) {
+               /* Clear the DONE bit */
+               writel(0, edmac->regs + M2M_INTERRUPT);
+       }
 
-       /* Disable interrupts and the channel */
-       control = readl(edmac->regs + M2M_CONTROL);
-       control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_ENABLE);
-       writel(control, edmac->regs + M2M_CONTROL);
+       /*
+        * Check whether we are done with descriptors or not. This, together
+        * with DMA channel state, determines action to take in interrupt.
+        */
+       desc = ep93xx_dma_get_active(edmac);
+       last_done = !desc || desc->txd.cookie;
 
        /*
-        * Since we only get DONE interrupt we have to find out ourselves
-        * whether there still is something to process. So we try to advance
-        * the chain an see whether it succeeds.
+        * Use M2M DMA Buffer FSM and Control FSM to check current state of
+        * DMA channel. Using DONE and NFB bits from channel status register
+        * or bits from channel interrupt register is not reliable.
         */
-       if (ep93xx_dma_advance_active(edmac)) {
-               edmac->edma->hw_submit(edmac);
-               return INTERRUPT_NEXT_BUFFER;
+       if (!last_done &&
+           (buf_fsm == M2M_STATUS_BUF_NO ||
+            buf_fsm == M2M_STATUS_BUF_ON)) {
+               /*
+                * Two buffers are ready for update when Buffer FSM is in
+                * DMA_NO_BUF state. Only one buffer can be prepared without
+                * disabling the channel or polling the DONE bit.
+                * To simplify things, always prepare only one buffer.
+                */
+               if (ep93xx_dma_advance_active(edmac)) {
+                       m2m_fill_desc(edmac);
+                       if (done && !edmac->chan.private) {
+                               /* Software trigger for memcpy channel */
+                               control = readl(edmac->regs + M2M_CONTROL);
+                               control |= M2M_CONTROL_START;
+                               writel(control, edmac->regs + M2M_CONTROL);
+                       }
+                       return INTERRUPT_NEXT_BUFFER;
+               } else {
+                       last_done = true;
+               }
+       }
+
+       /*
+        * Disable the channel only when Buffer FSM is in DMA_NO_BUF state
+        * and Control FSM is in DMA_STALL state.
+        */
+       if (last_done &&
+           buf_fsm == M2M_STATUS_BUF_NO &&
+           ctl_fsm == M2M_STATUS_CTL_STALL) {
+               /* Disable interrupts and the channel */
+               control = readl(edmac->regs + M2M_CONTROL);
+               control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
+                           | M2M_CONTROL_ENABLE);
+               writel(control, edmac->regs + M2M_CONTROL);
+               return INTERRUPT_DONE;
        }
 
-       return INTERRUPT_DONE;
+       /*
+        * Nothing to do this time.
+        */
+       return INTERRUPT_NEXT_BUFFER;
 }
 
 /*
index bb787d8e15296ed17eef8032be17f4b52647d173..fcfeb3cd8d3170aff7c30d6d151a0e7e15ecd968 100644 (file)
@@ -227,7 +227,7 @@ static inline int imxdma_sg_next(struct imxdma_desc *d)
        struct scatterlist *sg = d->sg;
        unsigned long now;
 
-       now = min(d->len, sg->length);
+       now = min(d->len, sg_dma_len(sg));
        if (d->len != IMX_DMA_LENGTH_LOOP)
                d->len -= now;
 
@@ -763,16 +763,16 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
        desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
 
        for_each_sg(sgl, sg, sg_len, i) {
-               dma_length += sg->length;
+               dma_length += sg_dma_len(sg);
        }
 
        switch (imxdmac->word_size) {
        case DMA_SLAVE_BUSWIDTH_4_BYTES:
-               if (sgl->length & 3 || sgl->dma_address & 3)
+               if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
                        return NULL;
                break;
        case DMA_SLAVE_BUSWIDTH_2_BYTES:
-               if (sgl->length & 1 || sgl->dma_address & 1)
+               if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1)
                        return NULL;
                break;
        case DMA_SLAVE_BUSWIDTH_1_BYTE:
@@ -831,13 +831,13 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
                imxdmac->sg_list[i].page_link = 0;
                imxdmac->sg_list[i].offset = 0;
                imxdmac->sg_list[i].dma_address = dma_addr;
-               imxdmac->sg_list[i].length = period_len;
+               sg_dma_len(&imxdmac->sg_list[i]) = period_len;
                dma_addr += period_len;
        }
 
        /* close the loop */
        imxdmac->sg_list[periods].offset = 0;
-       imxdmac->sg_list[periods].length = 0;
+       sg_dma_len(&imxdmac->sg_list[periods]) = 0;
        imxdmac->sg_list[periods].page_link =
                ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
 
index d3e38e28bb6b19a480eb1b05b732a0dc2f40eb7f..fb4f4990f5ebf9f6c8e1f97c9704f8b03642bd18 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/mm.h>
 #include <linux/interrupt.h>
 #include <linux/clk.h>
-#include <linux/wait.h>
+#include <linux/delay.h>
 #include <linux/sched.h>
 #include <linux/semaphore.h>
 #include <linux/spinlock.h>
@@ -271,6 +271,7 @@ struct sdma_channel {
        enum dma_status                 status;
        unsigned int                    chn_count;
        unsigned int                    chn_real_count;
+       struct tasklet_struct           tasklet;
 };
 
 #define IMX_DMA_SG_LOOP                BIT(0)
@@ -322,8 +323,9 @@ struct sdma_engine {
        struct sdma_context_data        *context;
        dma_addr_t                      context_phys;
        struct dma_device               dma_device;
-       struct clk                      *clk;
-       struct mutex                    channel_0_lock;
+       struct clk                      *clk_ipg;
+       struct clk                      *clk_ahb;
+       spinlock_t                      channel_0_lock;
        struct sdma_script_start_addrs  *script_addrs;
 };
 
@@ -401,19 +403,27 @@ static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
 }
 
 /*
- * sdma_run_channel - run a channel and wait till it's done
+ * sdma_run_channel0 - run a channel and wait till it's done
  */
-static int sdma_run_channel(struct sdma_channel *sdmac)
+static int sdma_run_channel0(struct sdma_engine *sdma)
 {
-       struct sdma_engine *sdma = sdmac->sdma;
-       int channel = sdmac->channel;
        int ret;
+       unsigned long timeout = 500;
 
-       init_completion(&sdmac->done);
+       sdma_enable_channel(sdma, 0);
 
-       sdma_enable_channel(sdma, channel);
+       while (!(ret = readl_relaxed(sdma->regs + SDMA_H_INTR) & 1)) {
+               if (timeout-- <= 0)
+                       break;
+               udelay(1);
+       }
 
-       ret = wait_for_completion_timeout(&sdmac->done, HZ);
+       if (ret) {
+               /* Clear the interrupt status */
+               writel_relaxed(ret, sdma->regs + SDMA_H_INTR);
+       } else {
+               dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
+       }
 
        return ret ? 0 : -ETIMEDOUT;
 }
@@ -425,17 +435,17 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
        void *buf_virt;
        dma_addr_t buf_phys;
        int ret;
-
-       mutex_lock(&sdma->channel_0_lock);
+       unsigned long flags;
 
        buf_virt = dma_alloc_coherent(NULL,
                        size,
                        &buf_phys, GFP_KERNEL);
        if (!buf_virt) {
-               ret = -ENOMEM;
-               goto err_out;
+               return -ENOMEM;
        }
 
+       spin_lock_irqsave(&sdma->channel_0_lock, flags);
+
        bd0->mode.command = C0_SETPM;
        bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
        bd0->mode.count = size / 2;
@@ -444,12 +454,11 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
 
        memcpy(buf_virt, buf, size);
 
-       ret = sdma_run_channel(&sdma->channel[0]);
+       ret = sdma_run_channel0(sdma);
 
-       dma_free_coherent(NULL, size, buf_virt, buf_phys);
+       spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
 
-err_out:
-       mutex_unlock(&sdma->channel_0_lock);
+       dma_free_coherent(NULL, size, buf_virt, buf_phys);
 
        return ret;
 }
@@ -534,13 +543,11 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
                sdmac->desc.callback(sdmac->desc.callback_param);
 }
 
-static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
+static void sdma_tasklet(unsigned long data)
 {
-       complete(&sdmac->done);
+       struct sdma_channel *sdmac = (struct sdma_channel *) data;
 
-       /* not interested in channel 0 interrupts */
-       if (sdmac->channel == 0)
-               return;
+       complete(&sdmac->done);
 
        if (sdmac->flags & IMX_DMA_SG_LOOP)
                sdma_handle_channel_loop(sdmac);
@@ -554,13 +561,15 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
        unsigned long stat;
 
        stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
+       /* not interested in channel 0 interrupts */
+       stat &= ~1;
        writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
 
        while (stat) {
                int channel = fls(stat) - 1;
                struct sdma_channel *sdmac = &sdma->channel[channel];
 
-               mxc_sdma_handle_channel(sdmac);
+               tasklet_schedule(&sdmac->tasklet);
 
                __clear_bit(channel, &stat);
        }
@@ -659,6 +668,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
        struct sdma_context_data *context = sdma->context;
        struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
        int ret;
+       unsigned long flags;
 
        if (sdmac->direction == DMA_DEV_TO_MEM) {
                load_address = sdmac->pc_from_device;
@@ -676,7 +686,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
        dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
        dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
 
-       mutex_lock(&sdma->channel_0_lock);
+       spin_lock_irqsave(&sdma->channel_0_lock, flags);
 
        memset(context, 0, sizeof(*context));
        context->channel_state.pc = load_address;
@@ -695,10 +705,9 @@ static int sdma_load_context(struct sdma_channel *sdmac)
        bd0->mode.count = sizeof(*context) / 4;
        bd0->buffer_addr = sdma->context_phys;
        bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
+       ret = sdma_run_channel0(sdma);
 
-       ret = sdma_run_channel(&sdma->channel[0]);
-
-       mutex_unlock(&sdma->channel_0_lock);
+       spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
 
        return ret;
 }
@@ -859,7 +868,8 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
        sdmac->peripheral_type = data->peripheral_type;
        sdmac->event_id0 = data->dma_request;
 
-       clk_enable(sdmac->sdma->clk);
+       clk_enable(sdmac->sdma->clk_ipg);
+       clk_enable(sdmac->sdma->clk_ahb);
 
        ret = sdma_request_channel(sdmac);
        if (ret)
@@ -896,7 +906,8 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
 
        dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
 
-       clk_disable(sdma->clk);
+       clk_disable(sdma->clk_ipg);
+       clk_disable(sdma->clk_ahb);
 }
 
 static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
@@ -938,7 +949,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
 
                bd->buffer_addr = sg->dma_address;
 
-               count = sg->length;
+               count = sg_dma_len(sg);
 
                if (count > 0xffff) {
                        dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
@@ -1169,12 +1180,14 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
        addr = (void *)header + header->script_addrs_start;
        ram_code = (void *)header + header->ram_code_start;
 
-       clk_enable(sdma->clk);
+       clk_enable(sdma->clk_ipg);
+       clk_enable(sdma->clk_ahb);
        /* download the RAM image for SDMA */
        sdma_load_script(sdma, ram_code,
                        header->ram_code_size,
                        addr->ram_code_start_addr);
-       clk_disable(sdma->clk);
+       clk_disable(sdma->clk_ipg);
+       clk_disable(sdma->clk_ahb);
 
        sdma_add_scripts(sdma, addr);
 
@@ -1216,7 +1229,8 @@ static int __init sdma_init(struct sdma_engine *sdma)
                return -ENODEV;
        }
 
-       clk_enable(sdma->clk);
+       clk_enable(sdma->clk_ipg);
+       clk_enable(sdma->clk_ahb);
 
        /* Be sure SDMA has not started yet */
        writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
@@ -1269,12 +1283,14 @@ static int __init sdma_init(struct sdma_engine *sdma)
        /* Initializes channel's priorities */
        sdma_set_channel_priority(&sdma->channel[0], 7);
 
-       clk_disable(sdma->clk);
+       clk_disable(sdma->clk_ipg);
+       clk_disable(sdma->clk_ahb);
 
        return 0;
 
 err_dma_alloc:
-       clk_disable(sdma->clk);
+       clk_disable(sdma->clk_ipg);
+       clk_disable(sdma->clk_ahb);
        dev_err(sdma->dev, "initialisation failed with %d\n", ret);
        return ret;
 }
@@ -1297,7 +1313,7 @@ static int __init sdma_probe(struct platform_device *pdev)
        if (!sdma)
                return -ENOMEM;
 
-       mutex_init(&sdma->channel_0_lock);
+       spin_lock_init(&sdma->channel_0_lock);
 
        sdma->dev = &pdev->dev;
 
@@ -1313,12 +1329,21 @@ static int __init sdma_probe(struct platform_device *pdev)
                goto err_request_region;
        }
 
-       sdma->clk = clk_get(&pdev->dev, NULL);
-       if (IS_ERR(sdma->clk)) {
-               ret = PTR_ERR(sdma->clk);
+       sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+       if (IS_ERR(sdma->clk_ipg)) {
+               ret = PTR_ERR(sdma->clk_ipg);
                goto err_clk;
        }
 
+       sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+       if (IS_ERR(sdma->clk_ahb)) {
+               ret = PTR_ERR(sdma->clk_ahb);
+               goto err_clk;
+       }
+
+       clk_prepare(sdma->clk_ipg);
+       clk_prepare(sdma->clk_ahb);
+
        sdma->regs = ioremap(iores->start, resource_size(iores));
        if (!sdma->regs) {
                ret = -ENOMEM;
@@ -1359,6 +1384,8 @@ static int __init sdma_probe(struct platform_device *pdev)
                dma_cookie_init(&sdmac->chan);
                sdmac->channel = i;
 
+               tasklet_init(&sdmac->tasklet, sdma_tasklet,
+                            (unsigned long) sdmac);
                /*
                 * Add the channel to the DMAC list. Do not add channel 0 though
                 * because we need it internally in the SDMA driver. This also means
@@ -1426,7 +1453,6 @@ err_alloc:
 err_request_irq:
        iounmap(sdma->regs);
 err_ioremap:
-       clk_put(sdma->clk);
 err_clk:
        release_mem_region(iores->start, resource_size(iores));
 err_request_region:
index c900ca7aaec4b16d2ad5292c9997ff51d9616492..222e907bfaaa4b50d501f804d27669bd8091476a 100644 (file)
@@ -394,11 +394,11 @@ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
                        }
                }
                /*Populate CTL_HI values*/
-               ctl_hi.ctlx.block_ts = get_block_ts(sg->length,
+               ctl_hi.ctlx.block_ts = get_block_ts(sg_dma_len(sg),
                                                        desc->width,
                                                        midc->dma->block_size);
                /*Populate SAR and DAR values*/
-               sg_phy_addr = sg_phys(sg);
+               sg_phy_addr = sg_dma_address(sg);
                if (desc->dirn ==  DMA_MEM_TO_DEV) {
                        lli_bloc_desc->sar  = sg_phy_addr;
                        lli_bloc_desc->dar  = mids->dma_slave.dst_addr;
@@ -747,7 +747,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
                        txd = intel_mid_dma_prep_memcpy(chan,
                                                mids->dma_slave.dst_addr,
                                                mids->dma_slave.src_addr,
-                                               sgl->length,
+                                               sg_dma_len(sgl),
                                                flags);
                        return txd;
                } else {
@@ -759,7 +759,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
        pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
                        sg_len, direction, flags);
 
-       txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags);
+       txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sg_dma_len(sgl), flags);
        if (NULL == txd) {
                pr_err("MDMA: Prep memcpy failed\n");
                return NULL;
index 62e3f8ec2461f5584fabe343426f7654d678922f..5ec72044ea4c13a5849eef9675c10335c4307416 100644 (file)
@@ -1715,7 +1715,7 @@ static int __init ipu_probe(struct platform_device *pdev)
        }
 
        /* Make sure IPU HSP clock is running */
-       clk_enable(ipu_data.ipu_clk);
+       clk_prepare_enable(ipu_data.ipu_clk);
 
        /* Disable all interrupts */
        idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_1);
@@ -1747,7 +1747,7 @@ static int __init ipu_probe(struct platform_device *pdev)
 err_idmac_init:
 err_attach_irq:
        ipu_irq_detach_irq(&ipu_data, pdev);
-       clk_disable(ipu_data.ipu_clk);
+       clk_disable_unprepare(ipu_data.ipu_clk);
        clk_put(ipu_data.ipu_clk);
 err_clk_get:
        iounmap(ipu_data.reg_ic);
@@ -1765,7 +1765,7 @@ static int __exit ipu_remove(struct platform_device *pdev)
 
        ipu_idmac_exit(ipu);
        ipu_irq_detach_irq(ipu, pdev);
-       clk_disable(ipu->ipu_clk);
+       clk_disable_unprepare(ipu->ipu_clk);
        clk_put(ipu->ipu_clk);
        iounmap(ipu->reg_ic);
        iounmap(ipu->reg_ipu);
index fa5d55fea46cc4829ba55b84a53cb42baafe81c9..0b12e68bf79ca72a238b59e9639a9a3f02f6f3d5 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/memory.h>
+#include <linux/clk.h>
 #include <plat/mv_xor.h>
 
 #include "dmaengine.h"
@@ -1307,11 +1308,25 @@ static int mv_xor_shared_probe(struct platform_device *pdev)
        if (dram)
                mv_xor_conf_mbus_windows(msp, dram);
 
+       /* Not all platforms can gate the clock, so it is not
+        * an error if the clock does not exists.
+        */
+       msp->clk = clk_get(&pdev->dev, NULL);
+       if (!IS_ERR(msp->clk))
+               clk_prepare_enable(msp->clk);
+
        return 0;
 }
 
 static int mv_xor_shared_remove(struct platform_device *pdev)
 {
+       struct mv_xor_shared_private *msp = platform_get_drvdata(pdev);
+
+       if (!IS_ERR(msp->clk)) {
+               clk_disable_unprepare(msp->clk);
+               clk_put(msp->clk);
+       }
+
        return 0;
 }
 
index 654876b7ba1deae84a06f89cdf617b153d4315ef..a5b422f5a8abf4e3f4485440da8eeb6e01ebfb1b 100644 (file)
@@ -55,6 +55,7 @@
 struct mv_xor_shared_private {
        void __iomem    *xor_base;
        void __iomem    *xor_high_base;
+       struct clk      *clk;
 };
 
 
index 655d4ce6ed0d94fcae71ed687641f707e08a8ed8..c96ab15319f245c363bf1ed693c5a61e0669fcef 100644 (file)
 #include <linux/platform_device.h>
 #include <linux/dmaengine.h>
 #include <linux/delay.h>
+#include <linux/module.h>
 #include <linux/fsl/mxs-dma.h>
+#include <linux/stmp_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 
 #include <asm/irq.h>
 #include <mach/mxs.h>
-#include <mach/common.h>
 
 #include "dmaengine.h"
 
  * dma can program the controller registers of peripheral devices.
  */
 
-#define MXS_DMA_APBH           0
-#define MXS_DMA_APBX           1
-#define dma_is_apbh()          (mxs_dma->dev_id == MXS_DMA_APBH)
-
-#define APBH_VERSION_LATEST    3
-#define apbh_is_old()          (mxs_dma->version < APBH_VERSION_LATEST)
+#define dma_is_apbh(mxs_dma)   ((mxs_dma)->type == MXS_DMA_APBH)
+#define apbh_is_old(mxs_dma)   ((mxs_dma)->dev_id == IMX23_DMA)
 
 #define HW_APBHX_CTRL0                         0x000
 #define BM_APBH_CTRL0_APB_BURST8_EN            (1 << 29)
 #define HW_APBHX_CTRL2                         0x020
 #define HW_APBHX_CHANNEL_CTRL                  0x030
 #define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL    16
-#define HW_APBH_VERSION                                (cpu_is_mx23() ? 0x3f0 : 0x800)
-#define HW_APBX_VERSION                                0x800
-#define BP_APBHX_VERSION_MAJOR                 24
-#define HW_APBHX_CHn_NXTCMDAR(n) \
-       (((dma_is_apbh() && apbh_is_old()) ? 0x050 : 0x110) + (n) * 0x70)
-#define HW_APBHX_CHn_SEMA(n) \
-       (((dma_is_apbh() && apbh_is_old()) ? 0x080 : 0x140) + (n) * 0x70)
+/*
+ * The offset of NXTCMDAR register is different per both dma type and version,
+ * while stride for each channel is all the same 0x70.
+ */
+#define HW_APBHX_CHn_NXTCMDAR(d, n) \
+       (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70)
+#define HW_APBHX_CHn_SEMA(d, n) \
+       (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70)
 
 /*
  * ccw bits definitions
@@ -121,9 +121,19 @@ struct mxs_dma_chan {
 #define MXS_DMA_CHANNELS               16
 #define MXS_DMA_CHANNELS_MASK          0xffff
 
+enum mxs_dma_devtype {
+       MXS_DMA_APBH,
+       MXS_DMA_APBX,
+};
+
+enum mxs_dma_id {
+       IMX23_DMA,
+       IMX28_DMA,
+};
+
 struct mxs_dma_engine {
-       int                             dev_id;
-       unsigned int                    version;
+       enum mxs_dma_id                 dev_id;
+       enum mxs_dma_devtype            type;
        void __iomem                    *base;
        struct clk                      *clk;
        struct dma_device               dma_device;
@@ -131,17 +141,86 @@ struct mxs_dma_engine {
        struct mxs_dma_chan             mxs_chans[MXS_DMA_CHANNELS];
 };
 
+struct mxs_dma_type {
+       enum mxs_dma_id id;
+       enum mxs_dma_devtype type;
+};
+
+static struct mxs_dma_type mxs_dma_types[] = {
+       {
+               .id = IMX23_DMA,
+               .type = MXS_DMA_APBH,
+       }, {
+               .id = IMX23_DMA,
+               .type = MXS_DMA_APBX,
+       }, {
+               .id = IMX28_DMA,
+               .type = MXS_DMA_APBH,
+       }, {
+               .id = IMX28_DMA,
+               .type = MXS_DMA_APBX,
+       }
+};
+
+static struct platform_device_id mxs_dma_ids[] = {
+       {
+               .name = "imx23-dma-apbh",
+               .driver_data = (kernel_ulong_t) &mxs_dma_types[0],
+       }, {
+               .name = "imx23-dma-apbx",
+               .driver_data = (kernel_ulong_t) &mxs_dma_types[1],
+       }, {
+               .name = "imx28-dma-apbh",
+               .driver_data = (kernel_ulong_t) &mxs_dma_types[2],
+       }, {
+               .name = "imx28-dma-apbx",
+               .driver_data = (kernel_ulong_t) &mxs_dma_types[3],
+       }, {
+               /* end of list */
+       }
+};
+
+static const struct of_device_id mxs_dma_dt_ids[] = {
+       { .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_ids[0], },
+       { .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_ids[1], },
+       { .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_ids[2], },
+       { .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_ids[3], },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_dma_dt_ids);
+
+static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
+{
+       return container_of(chan, struct mxs_dma_chan, chan);
+}
+
+int mxs_dma_is_apbh(struct dma_chan *chan)
+{
+       struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+       struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+
+       return dma_is_apbh(mxs_dma);
+}
+
+int mxs_dma_is_apbx(struct dma_chan *chan)
+{
+       struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+       struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+
+       return !dma_is_apbh(mxs_dma);
+}
+
 static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
 {
        struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
        int chan_id = mxs_chan->chan.chan_id;
 
-       if (dma_is_apbh() && apbh_is_old())
+       if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
                writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL),
-                       mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+                       mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
        else
                writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL),
-                       mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR);
+                       mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
 }
 
 static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
@@ -151,10 +230,10 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
 
        /* set cmd_addr up */
        writel(mxs_chan->ccw_phys,
-               mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id));
+               mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id));
 
        /* write 1 to SEMA to kick off the channel */
-       writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id));
+       writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
 }
 
 static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
@@ -168,12 +247,12 @@ static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
        int chan_id = mxs_chan->chan.chan_id;
 
        /* freeze the channel */
-       if (dma_is_apbh() && apbh_is_old())
+       if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
                writel(1 << chan_id,
-                       mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+                       mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
        else
                writel(1 << chan_id,
-                       mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR);
+                       mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
 
        mxs_chan->status = DMA_PAUSED;
 }
@@ -184,21 +263,16 @@ static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan)
        int chan_id = mxs_chan->chan.chan_id;
 
        /* unfreeze the channel */
-       if (dma_is_apbh() && apbh_is_old())
+       if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
                writel(1 << chan_id,
-                       mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
+                       mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_CLR);
        else
                writel(1 << chan_id,
-                       mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_CLR_ADDR);
+                       mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR);
 
        mxs_chan->status = DMA_IN_PROGRESS;
 }
 
-static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
-{
-       return container_of(chan, struct mxs_dma_chan, chan);
-}
-
 static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
 {
        return dma_cookie_assign(tx);
@@ -220,11 +294,11 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
        /* completion status */
        stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1);
        stat1 &= MXS_DMA_CHANNELS_MASK;
-       writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + MXS_CLR_ADDR);
+       writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR);
 
        /* error status */
        stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2);
-       writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + MXS_CLR_ADDR);
+       writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR);
 
        /*
         * When both completion and error of termination bits set at the
@@ -415,9 +489,9 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
                ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
        } else {
                for_each_sg(sgl, sg, sg_len, i) {
-                       if (sg->length > MAX_XFER_BYTES) {
+                       if (sg_dma_len(sg) > MAX_XFER_BYTES) {
                                dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n",
-                                               sg->length, MAX_XFER_BYTES);
+                                               sg_dma_len(sg), MAX_XFER_BYTES);
                                goto err_out;
                        }
 
@@ -425,7 +499,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
 
                        ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
                        ccw->bufaddr = sg->dma_address;
-                       ccw->xfer_bytes = sg->length;
+                       ccw->xfer_bytes = sg_dma_len(sg);
 
                        ccw->bits = 0;
                        ccw->bits |= CCW_CHAIN;
@@ -567,27 +641,21 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
        if (ret)
                return ret;
 
-       ret = mxs_reset_block(mxs_dma->base);
+       ret = stmp_reset_block(mxs_dma->base);
        if (ret)
                goto err_out;
 
-       /* only major version matters */
-       mxs_dma->version = readl(mxs_dma->base +
-                               ((mxs_dma->dev_id == MXS_DMA_APBX) ?
-                               HW_APBX_VERSION : HW_APBH_VERSION)) >>
-                               BP_APBHX_VERSION_MAJOR;
-
        /* enable apbh burst */
-       if (dma_is_apbh()) {
+       if (dma_is_apbh(mxs_dma)) {
                writel(BM_APBH_CTRL0_APB_BURST_EN,
-                       mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+                       mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
                writel(BM_APBH_CTRL0_APB_BURST8_EN,
-                       mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+                       mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
        }
 
        /* enable irq for all the channels */
        writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS,
-               mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR);
+               mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_SET);
 
 err_out:
        clk_disable_unprepare(mxs_dma->clk);
@@ -596,8 +664,9 @@ err_out:
 
 static int __init mxs_dma_probe(struct platform_device *pdev)
 {
-       const struct platform_device_id *id_entry =
-                               platform_get_device_id(pdev);
+       const struct platform_device_id *id_entry;
+       const struct of_device_id *of_id;
+       const struct mxs_dma_type *dma_type;
        struct mxs_dma_engine *mxs_dma;
        struct resource *iores;
        int ret, i;
@@ -606,7 +675,15 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
        if (!mxs_dma)
                return -ENOMEM;
 
-       mxs_dma->dev_id = id_entry->driver_data;
+       of_id = of_match_device(mxs_dma_dt_ids, &pdev->dev);
+       if (of_id)
+               id_entry = of_id->data;
+       else
+               id_entry = platform_get_device_id(pdev);
+
+       dma_type = (struct mxs_dma_type *)id_entry->driver_data;
+       mxs_dma->type = dma_type->type;
+       mxs_dma->dev_id = dma_type->id;
 
        iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
@@ -689,23 +766,12 @@ err_request_region:
        return ret;
 }
 
-static struct platform_device_id mxs_dma_type[] = {
-       {
-               .name = "mxs-dma-apbh",
-               .driver_data = MXS_DMA_APBH,
-       }, {
-               .name = "mxs-dma-apbx",
-               .driver_data = MXS_DMA_APBX,
-       }, {
-               /* end of list */
-       }
-};
-
 static struct platform_driver mxs_dma_driver = {
        .driver         = {
                .name   = "mxs-dma",
+               .of_match_table = mxs_dma_dt_ids,
        },
-       .id_table       = mxs_dma_type,
+       .id_table       = mxs_dma_ids,
 };
 
 static int __init mxs_dma_module_init(void)
index 65c0495a6d40b74a1f0d7dc659f4ff72ec7c4c63..987ab5cd2617a1cc15fa807aba82ae411af5f77b 100644 (file)
@@ -621,7 +621,7 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
                        goto err_desc_get;
 
                desc->regs.dev_addr = reg;
-               desc->regs.mem_addr = sg_phys(sg);
+               desc->regs.mem_addr = sg_dma_address(sg);
                desc->regs.size = sg_dma_len(sg);
                desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ;
 
index fa3fb21e60bed6bb112dadf869ac6adc6e19dec9..cbcc28e79be6331570af5ccd3132e12efe67aebc 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/interrupt.h>
 #include <linux/dma-mapping.h>
 #include <linux/dmaengine.h>
-#include <linux/interrupt.h>
 #include <linux/amba/bus.h>
 #include <linux/amba/pl330.h>
 #include <linux/pm_runtime.h>
index 2ed1ac3513f3d4de118d7937f40fadc202748a93..000d309602b2d76fd825d895be5ab5b91837d19f 100644 (file)
@@ -2362,7 +2362,7 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
        }
 
        sg[periods].offset = 0;
-       sg[periods].length = 0;
+       sg_dma_len(&sg[periods]) = 0;
        sg[periods].page_link =
                ((unsigned long)sg | 0x01) & ~0x02;
 
index 7ef73c919c5da24a0e260a600f97c775e7fb8117..7be9b7288e90eaaf5fab79f34dcac2ceafbc3a2b 100644 (file)
@@ -715,25 +715,6 @@ static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
                                     input_addr_to_dram_addr(mci, input_addr));
 }
 
-/*
- * Find the minimum and maximum InputAddr values that map to the given @csrow.
- * Pass back these values in *input_addr_min and *input_addr_max.
- */
-static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
-                             u64 *input_addr_min, u64 *input_addr_max)
-{
-       struct amd64_pvt *pvt;
-       u64 base, mask;
-
-       pvt = mci->pvt_info;
-       BUG_ON((csrow < 0) || (csrow >= pvt->csels[0].b_cnt));
-
-       get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
-
-       *input_addr_min = base & ~mask;
-       *input_addr_max = base | mask;
-}
-
 /* Map the Error address to a PAGE and PAGE OFFSET. */
 static inline void error_address_to_page_and_offset(u64 error_address,
                                                    u32 *page, u32 *offset)
@@ -1058,6 +1039,37 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
        int channel, csrow;
        u32 page, offset;
 
+       error_address_to_page_and_offset(sys_addr, &page, &offset);
+
+       /*
+        * Find out which node the error address belongs to. This may be
+        * different from the node that detected the error.
+        */
+       src_mci = find_mc_by_sys_addr(mci, sys_addr);
+       if (!src_mci) {
+               amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
+                            (unsigned long)sys_addr);
+               edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+                                    page, offset, syndrome,
+                                    -1, -1, -1,
+                                    EDAC_MOD_STR,
+                                    "failed to map error addr to a node",
+                                    NULL);
+               return;
+       }
+
+       /* Now map the sys_addr to a CSROW */
+       csrow = sys_addr_to_csrow(src_mci, sys_addr);
+       if (csrow < 0) {
+               edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+                                    page, offset, syndrome,
+                                    -1, -1, -1,
+                                    EDAC_MOD_STR,
+                                    "failed to map error addr to a csrow",
+                                    NULL);
+               return;
+       }
+
        /* CHIPKILL enabled */
        if (pvt->nbcfg & NBCFG_CHIPKILL) {
                channel = get_channel_from_ecc_syndrome(mci, syndrome);
@@ -1067,9 +1079,15 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
                         * 2 DIMMs is in error. So we need to ID 'both' of them
                         * as suspect.
                         */
-                       amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible "
-                                          "error reporting race\n", syndrome);
-                       edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+                       amd64_mc_warn(src_mci, "unknown syndrome 0x%04x - "
+                                     "possible error reporting race\n",
+                                     syndrome);
+                       edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+                                            page, offset, syndrome,
+                                            csrow, -1, -1,
+                                            EDAC_MOD_STR,
+                                            "unknown syndrome - possible error reporting race",
+                                            NULL);
                        return;
                }
        } else {
@@ -1084,28 +1102,10 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
                channel = ((sys_addr & BIT(3)) != 0);
        }
 
-       /*
-        * Find out which node the error address belongs to. This may be
-        * different from the node that detected the error.
-        */
-       src_mci = find_mc_by_sys_addr(mci, sys_addr);
-       if (!src_mci) {
-               amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
-                            (unsigned long)sys_addr);
-               edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
-               return;
-       }
-
-       /* Now map the sys_addr to a CSROW */
-       csrow = sys_addr_to_csrow(src_mci, sys_addr);
-       if (csrow < 0) {
-               edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
-       } else {
-               error_address_to_page_and_offset(sys_addr, &page, &offset);
-
-               edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
-                                 channel, EDAC_MOD_STR);
-       }
+       edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, src_mci,
+                            page, offset, syndrome,
+                            csrow, channel, -1,
+                            EDAC_MOD_STR, "", NULL);
 }
 
 static int ddr2_cs_size(unsigned i, bool dct_width)
@@ -1611,15 +1611,20 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
        u32 page, offset;
        int nid, csrow, chan = 0;
 
+       error_address_to_page_and_offset(sys_addr, &page, &offset);
+
        csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
 
        if (csrow < 0) {
-               edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+               edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+                                    page, offset, syndrome,
+                                    -1, -1, -1,
+                                    EDAC_MOD_STR,
+                                    "failed to map error addr to a csrow",
+                                    NULL);
                return;
        }
 
-       error_address_to_page_and_offset(sys_addr, &page, &offset);
-
        /*
         * We need the syndromes for channel detection only when we're
         * ganged. Otherwise @chan should already contain the channel at
@@ -1628,16 +1633,10 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
        if (dct_ganging_enabled(pvt))
                chan = get_channel_from_ecc_syndrome(mci, syndrome);
 
-       if (chan >= 0)
-               edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan,
-                                 EDAC_MOD_STR);
-       else
-               /*
-                * Channel unknown, report all channels on this CSROW as failed.
-                */
-               for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++)
-                       edac_mc_handle_ce(mci, page, offset, syndrome,
-                                         csrow, chan, EDAC_MOD_STR);
+       edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+                            page, offset, syndrome,
+                            csrow, chan, -1,
+                            EDAC_MOD_STR, "", NULL);
 }
 
 /*
@@ -1918,7 +1917,12 @@ static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m)
        /* Ensure that the Error Address is VALID */
        if (!(m->status & MCI_STATUS_ADDRV)) {
                amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
-               edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+               edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+                                    0, 0, 0,
+                                    -1, -1, -1,
+                                    EDAC_MOD_STR,
+                                    "HW has no ERROR_ADDRESS available",
+                                    NULL);
                return;
        }
 
@@ -1942,11 +1946,17 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
 
        if (!(m->status & MCI_STATUS_ADDRV)) {
                amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
-               edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+               edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+                                    0, 0, 0,
+                                    -1, -1, -1,
+                                    EDAC_MOD_STR,
+                                    "HW has no ERROR_ADDRESS available",
+                                    NULL);
                return;
        }
 
        sys_addr = get_error_address(m);
+       error_address_to_page_and_offset(sys_addr, &page, &offset);
 
        /*
         * Find out which node the error address belongs to. This may be
@@ -1956,7 +1966,11 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
        if (!src_mci) {
                amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
                                  (unsigned long)sys_addr);
-               edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+               edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+                                    page, offset, 0,
+                                    -1, -1, -1,
+                                    EDAC_MOD_STR,
+                                    "ERROR ADDRESS NOT mapped to a MC", NULL);
                return;
        }
 
@@ -1966,10 +1980,17 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
        if (csrow < 0) {
                amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
                                  (unsigned long)sys_addr);
-               edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+               edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+                                    page, offset, 0,
+                                    -1, -1, -1,
+                                    EDAC_MOD_STR,
+                                    "ERROR ADDRESS NOT mapped to CS",
+                                    NULL);
        } else {
-               error_address_to_page_and_offset(sys_addr, &page, &offset);
-               edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
+               edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+                                    page, offset, 0,
+                                    csrow, -1, -1,
+                                    EDAC_MOD_STR, "", NULL);
        }
 }
 
@@ -2171,7 +2192,7 @@ static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
        nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
 
        debugf0("  (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
-       debugf0("    nr_pages= %u  channel-count = %d\n",
+       debugf0("    nr_pages/channel= %u  channel-count = %d\n",
                nr_pages, pvt->channel_count);
 
        return nr_pages;
@@ -2185,9 +2206,12 @@ static int init_csrows(struct mem_ctl_info *mci)
 {
        struct csrow_info *csrow;
        struct amd64_pvt *pvt = mci->pvt_info;
-       u64 input_addr_min, input_addr_max, sys_addr, base, mask;
+       u64 base, mask;
        u32 val;
-       int i, empty = 1;
+       int i, j, empty = 1;
+       enum mem_type mtype;
+       enum edac_type edac_mode;
+       int nr_pages = 0;
 
        amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
 
@@ -2211,41 +2235,32 @@ static int init_csrows(struct mem_ctl_info *mci)
 
                empty = 0;
                if (csrow_enabled(i, 0, pvt))
-                       csrow->nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
+                       nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
                if (csrow_enabled(i, 1, pvt))
-                       csrow->nr_pages += amd64_csrow_nr_pages(pvt, 1, i);
-               find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
-               sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
-               csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
-               sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
-               csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
+                       nr_pages += amd64_csrow_nr_pages(pvt, 1, i);
 
                get_cs_base_and_mask(pvt, i, 0, &base, &mask);
-               csrow->page_mask = ~mask;
                /* 8 bytes of resolution */
 
-               csrow->mtype = amd64_determine_memory_type(pvt, i);
+               mtype = amd64_determine_memory_type(pvt, i);
 
                debugf1("  for MC node %d csrow %d:\n", pvt->mc_node_id, i);
-               debugf1("    input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
-                       (unsigned long)input_addr_min,
-                       (unsigned long)input_addr_max);
-               debugf1("    sys_addr: 0x%lx  page_mask: 0x%lx\n",
-                       (unsigned long)sys_addr, csrow->page_mask);
-               debugf1("    nr_pages: %u  first_page: 0x%lx "
-                       "last_page: 0x%lx\n",
-                       (unsigned)csrow->nr_pages,
-                       csrow->first_page, csrow->last_page);
+               debugf1("    nr_pages: %u\n", nr_pages * pvt->channel_count);
 
                /*
                 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
                 */
                if (pvt->nbcfg & NBCFG_ECC_ENABLE)
-                       csrow->edac_mode =
-                           (pvt->nbcfg & NBCFG_CHIPKILL) ?
-                           EDAC_S4ECD4ED : EDAC_SECDED;
+                       edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ?
+                                   EDAC_S4ECD4ED : EDAC_SECDED;
                else
-                       csrow->edac_mode = EDAC_NONE;
+                       edac_mode = EDAC_NONE;
+
+               for (j = 0; j < pvt->channel_count; j++) {
+                       csrow->channels[j].dimm->mtype = mtype;
+                       csrow->channels[j].dimm->edac_mode = edac_mode;
+                       csrow->channels[j].dimm->nr_pages = nr_pages;
+               }
        }
 
        return empty;
@@ -2540,6 +2555,7 @@ static int amd64_init_one_instance(struct pci_dev *F2)
        struct amd64_pvt *pvt = NULL;
        struct amd64_family_type *fam_type = NULL;
        struct mem_ctl_info *mci = NULL;
+       struct edac_mc_layer layers[2];
        int err = 0, ret;
        u8 nid = get_node_id(F2);
 
@@ -2574,7 +2590,13 @@ static int amd64_init_one_instance(struct pci_dev *F2)
                goto err_siblings;
 
        ret = -ENOMEM;
-       mci = edac_mc_alloc(0, pvt->csels[0].b_cnt, pvt->channel_count, nid);
+       layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+       layers[0].size = pvt->csels[0].b_cnt;
+       layers[0].is_virt_csrow = true;
+       layers[1].type = EDAC_MC_LAYER_CHANNEL;
+       layers[1].size = pvt->channel_count;
+       layers[1].is_virt_csrow = false;
+       mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
        if (!mci)
                goto err_siblings;
 
index f8fd3c807bde02c93de9d5e912cfc94bbd9b1916..9774d443fa57616a9f7e64ba06a2fae3e3bc2f92 100644 (file)
@@ -29,7 +29,6 @@
        edac_mc_chipset_printk(mci, level, "amd76x", fmt, ##arg)
 
 #define AMD76X_NR_CSROWS 8
-#define AMD76X_NR_CHANS  1
 #define AMD76X_NR_DIMMS  4
 
 /* AMD 76x register addresses - device 0 function 0 - PCI bridge */
@@ -146,8 +145,10 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci,
 
                if (handle_errors) {
                        row = (info->ecc_mode_status >> 4) & 0xf;
-                       edac_mc_handle_ue(mci, mci->csrows[row].first_page, 0,
-                                       row, mci->ctl_name);
+                       edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+                                            mci->csrows[row].first_page, 0, 0,
+                                            row, 0, -1,
+                                            mci->ctl_name, "", NULL);
                }
        }
 
@@ -159,8 +160,10 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci,
 
                if (handle_errors) {
                        row = info->ecc_mode_status & 0xf;
-                       edac_mc_handle_ce(mci, mci->csrows[row].first_page, 0,
-                                       0, row, 0, mci->ctl_name);
+                       edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+                                            mci->csrows[row].first_page, 0, 0,
+                                            row, 0, -1,
+                                            mci->ctl_name, "", NULL);
                }
        }
 
@@ -186,11 +189,13 @@ static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
                        enum edac_type edac_mode)
 {
        struct csrow_info *csrow;
+       struct dimm_info *dimm;
        u32 mba, mba_base, mba_mask, dms;
        int index;
 
        for (index = 0; index < mci->nr_csrows; index++) {
                csrow = &mci->csrows[index];
+               dimm = csrow->channels[0].dimm;
 
                /* find the DRAM Chip Select Base address and mask */
                pci_read_config_dword(pdev,
@@ -203,13 +208,13 @@ static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
                mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL;
                pci_read_config_dword(pdev, AMD76X_DRAM_MODE_STATUS, &dms);
                csrow->first_page = mba_base >> PAGE_SHIFT;
-               csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
-               csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+               dimm->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
+               csrow->last_page = csrow->first_page + dimm->nr_pages - 1;
                csrow->page_mask = mba_mask >> PAGE_SHIFT;
-               csrow->grain = csrow->nr_pages << PAGE_SHIFT;
-               csrow->mtype = MEM_RDDR;
-               csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
-               csrow->edac_mode = edac_mode;
+               dimm->grain = dimm->nr_pages << PAGE_SHIFT;
+               dimm->mtype = MEM_RDDR;
+               dimm->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
+               dimm->edac_mode = edac_mode;
        }
 }
 
@@ -230,7 +235,8 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
                EDAC_SECDED,
                EDAC_SECDED
        };
-       struct mem_ctl_info *mci = NULL;
+       struct mem_ctl_info *mci;
+       struct edac_mc_layer layers[2];
        u32 ems;
        u32 ems_mode;
        struct amd76x_error_info discard;
@@ -238,11 +244,17 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
        debugf0("%s()\n", __func__);
        pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
        ems_mode = (ems >> 10) & 0x3;
-       mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS, 0);
 
-       if (mci == NULL) {
+       layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+       layers[0].size = AMD76X_NR_CSROWS;
+       layers[0].is_virt_csrow = true;
+       layers[1].type = EDAC_MC_LAYER_CHANNEL;
+       layers[1].size = 1;
+       layers[1].is_virt_csrow = false;
+       mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
+
+       if (mci == NULL)
                return -ENOMEM;
-       }
 
        debugf0("%s(): mci = %p\n", __func__, mci);
        mci->dev = &pdev->dev;
index 9a6a274e6925f2c03edf2a92fda72b28f1d65aa6..69ee6aab5c716fefd0b919a5da4b46ef5c7c4a64 100644 (file)
@@ -48,8 +48,9 @@ static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar)
        syndrome = (ar & 0x000000001fe00000ul) >> 21;
 
        /* TODO: Decoding of the error address */
-       edac_mc_handle_ce(mci, csrow->first_page + pfn, offset,
-                         syndrome, 0, chan, "");
+       edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+                            csrow->first_page + pfn, offset, syndrome,
+                            0, chan, -1, "", "", NULL);
 }
 
 static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
@@ -69,7 +70,9 @@ static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
        offset = address & ~PAGE_MASK;
 
        /* TODO: Decoding of the error address */
-       edac_mc_handle_ue(mci, csrow->first_page + pfn, offset, 0, "");
+       edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+                            csrow->first_page + pfn, offset, 0,
+                            0, chan, -1, "", "", NULL);
 }
 
 static void cell_edac_check(struct mem_ctl_info *mci)
@@ -124,8 +127,11 @@ static void cell_edac_check(struct mem_ctl_info *mci)
 static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
 {
        struct csrow_info               *csrow = &mci->csrows[0];
+       struct dimm_info                *dimm;
        struct cell_edac_priv           *priv = mci->pvt_info;
        struct device_node              *np;
+       int                             j;
+       u32                             nr_pages;
 
        for (np = NULL;
             (np = of_find_node_by_name(np, "memory")) != NULL;) {
@@ -140,15 +146,20 @@ static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
                if (of_node_to_nid(np) != priv->node)
                        continue;
                csrow->first_page = r.start >> PAGE_SHIFT;
-               csrow->nr_pages = resource_size(&r) >> PAGE_SHIFT;
-               csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
-               csrow->mtype = MEM_XDR;
-               csrow->edac_mode = EDAC_SECDED;
+               nr_pages = resource_size(&r) >> PAGE_SHIFT;
+               csrow->last_page = csrow->first_page + nr_pages - 1;
+
+               for (j = 0; j < csrow->nr_channels; j++) {
+                       dimm = csrow->channels[j].dimm;
+                       dimm->mtype = MEM_XDR;
+                       dimm->edac_mode = EDAC_SECDED;
+                       dimm->nr_pages = nr_pages / csrow->nr_channels;
+               }
                dev_dbg(mci->dev,
                        "Initialized on node %d, chanmask=0x%x,"
                        " first_page=0x%lx, nr_pages=0x%x\n",
                        priv->node, priv->chanmask,
-                       csrow->first_page, csrow->nr_pages);
+                       csrow->first_page, nr_pages);
                break;
        }
 }
@@ -157,9 +168,10 @@ static int __devinit cell_edac_probe(struct platform_device *pdev)
 {
        struct cbe_mic_tm_regs __iomem  *regs;
        struct mem_ctl_info             *mci;
+       struct edac_mc_layer            layers[2];
        struct cell_edac_priv           *priv;
        u64                             reg;
-       int                             rc, chanmask;
+       int                             rc, chanmask, num_chans;
 
        regs = cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(pdev->id));
        if (regs == NULL)
@@ -184,8 +196,16 @@ static int __devinit cell_edac_probe(struct platform_device *pdev)
                in_be64(&regs->mic_fir));
 
        /* Allocate & init EDAC MC data structure */
-       mci = edac_mc_alloc(sizeof(struct cell_edac_priv), 1,
-                           chanmask == 3 ? 2 : 1, pdev->id);
+       num_chans = chanmask == 3 ? 2 : 1;
+
+       layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+       layers[0].size = 1;
+       layers[0].is_virt_csrow = true;
+       layers[1].type = EDAC_MC_LAYER_CHANNEL;
+       layers[1].size = num_chans;
+       layers[1].is_virt_csrow = false;
+       mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers,
+                           sizeof(struct cell_edac_priv));
        if (mci == NULL)
                return -ENOMEM;
        priv = mci->pvt_info;
index a774c0ddaf5b06f05e19ddacccf0423a824f1423..e22030a9de66fbc0c38fd2d0b274b1464dda7a2f 100644 (file)
@@ -329,9 +329,10 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci)
 {
        struct cpc925_mc_pdata *pdata = mci->pvt_info;
        struct csrow_info *csrow;
-       int index;
+       struct dimm_info *dimm;
+       int index, j;
        u32 mbmr, mbbar, bba;
-       unsigned long row_size, last_nr_pages = 0;
+       unsigned long row_size, nr_pages, last_nr_pages = 0;
 
        get_total_mem(pdata);
 
@@ -350,36 +351,41 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci)
 
                row_size = bba * (1UL << 28);   /* 256M */
                csrow->first_page = last_nr_pages;
-               csrow->nr_pages = row_size >> PAGE_SHIFT;
-               csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+               nr_pages = row_size >> PAGE_SHIFT;
+               csrow->last_page = csrow->first_page + nr_pages - 1;
                last_nr_pages = csrow->last_page + 1;
 
-               csrow->mtype = MEM_RDDR;
-               csrow->edac_mode = EDAC_SECDED;
-
-               switch (csrow->nr_channels) {
-               case 1: /* Single channel */
-                       csrow->grain = 32; /* four-beat burst of 32 bytes */
-                       break;
-               case 2: /* Dual channel */
-               default:
-                       csrow->grain = 64; /* four-beat burst of 64 bytes */
-                       break;
-               }
-
-               switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
-               case 6: /* 0110, no way to differentiate X8 VS X16 */
-               case 5: /* 0101 */
-               case 8: /* 1000 */
-                       csrow->dtype = DEV_X16;
-                       break;
-               case 7: /* 0111 */
-               case 9: /* 1001 */
-                       csrow->dtype = DEV_X8;
-                       break;
-               default:
-                       csrow->dtype = DEV_UNKNOWN;
-                       break;
+               for (j = 0; j < csrow->nr_channels; j++) {
+                       dimm = csrow->channels[j].dimm;
+
+                       dimm->nr_pages = nr_pages / csrow->nr_channels;
+                       dimm->mtype = MEM_RDDR;
+                       dimm->edac_mode = EDAC_SECDED;
+
+                       switch (csrow->nr_channels) {
+                       case 1: /* Single channel */
+                               dimm->grain = 32; /* four-beat burst of 32 bytes */
+                               break;
+                       case 2: /* Dual channel */
+                       default:
+                               dimm->grain = 64; /* four-beat burst of 64 bytes */
+                               break;
+                       }
+
+                       switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
+                       case 6: /* 0110, no way to differentiate X8 VS X16 */
+                       case 5: /* 0101 */
+                       case 8: /* 1000 */
+                               dimm->dtype = DEV_X16;
+                               break;
+                       case 7: /* 0111 */
+                       case 9: /* 1001 */
+                               dimm->dtype = DEV_X8;
+                               break;
+                       default:
+                               dimm->dtype = DEV_UNKNOWN;
+                               break;
+                       }
                }
        }
 }
@@ -549,13 +555,18 @@ static void cpc925_mc_check(struct mem_ctl_info *mci)
        if (apiexcp & CECC_EXCP_DETECTED) {
                cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n");
                channel = cpc925_mc_find_channel(mci, syndrome);
-               edac_mc_handle_ce(mci, pfn, offset, syndrome,
-                                 csrow, channel, mci->ctl_name);
+               edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+                                    pfn, offset, syndrome,
+                                    csrow, channel, -1,
+                                    mci->ctl_name, "", NULL);
        }
 
        if (apiexcp & UECC_EXCP_DETECTED) {
                cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
-               edac_mc_handle_ue(mci, pfn, offset, csrow, mci->ctl_name);
+               edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+                                    pfn, offset, 0,
+                                    csrow, -1, -1,
+                                    mci->ctl_name, "", NULL);
        }
 
        cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n");
@@ -927,6 +938,7 @@ static int __devinit cpc925_probe(struct platform_device *pdev)
 {
        static int edac_mc_idx;
        struct mem_ctl_info *mci;
+       struct edac_mc_layer layers[2];
        void __iomem *vbase;
        struct cpc925_mc_pdata *pdata;
        struct resource *r;
@@ -962,9 +974,16 @@ static int __devinit cpc925_probe(struct platform_device *pdev)
                goto err2;
        }
 
-       nr_channels = cpc925_mc_get_channels(vbase);
-       mci = edac_mc_alloc(sizeof(struct cpc925_mc_pdata),
-                       CPC925_NR_CSROWS, nr_channels + 1, edac_mc_idx);
+       nr_channels = cpc925_mc_get_channels(vbase) + 1;
+
+       layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+       layers[0].size = CPC925_NR_CSROWS;
+       layers[0].is_virt_csrow = true;
+       layers[1].type = EDAC_MC_LAYER_CHANNEL;
+       layers[1].size = nr_channels;
+       layers[1].is_virt_csrow = false;
+       mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
+                           sizeof(struct cpc925_mc_pdata));
        if (!mci) {
                cpc925_printk(KERN_ERR, "No memory for mem_ctl_info\n");
                res = -ENOMEM;
index 41223261ede9bb858cb5424431f9aeb04a70e36f..3186512c97393f80e1da0748cf496746ba91e52e 100644 (file)
@@ -4,7 +4,11 @@
  * This file may be distributed under the terms of the
  * GNU General Public License.
  *
- * See "enum e752x_chips" below for supported chipsets
+ * Implement support for the e7520, E7525, e7320 and i3100 memory controllers.
+ *
+ * Datasheets:
+ *     http://www.intel.in/content/www/in/en/chipsets/e7525-memory-controller-hub-datasheet.html
+ *     ftp://download.intel.com/design/intarch/datashts/31345803.pdf
  *
  * Written by Tom Zimmerman
  *
@@ -13,8 +17,6 @@
  *     Wang Zhenyu at intel.com
  *     Dave Jiang at mvista.com
  *
- * $Id: edac_e752x.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
- *
  */
 
 #include <linux/module.h>
@@ -187,6 +189,25 @@ enum e752x_chips {
        I3100 = 3
 };
 
+/*
+ * Those chips Support single-rank and dual-rank memories only.
+ *
+ * On e752x chips, the odd rows are present only on dual-rank memories.
+ * Dividing the rank by two will provide the dimm#
+ *
+ * i3100 MC has a different mapping: it supports only 4 ranks.
+ *
+ * The mapping is (from 1 to n):
+ *     slot       single-ranked        double-ranked
+ *     dimm #1 -> rank #4              NA
+ *     dimm #2 -> rank #3              NA
+ *     dimm #3 -> rank #2              Ranks 2 and 3
+ *     dimm #4 -> rank $1              Ranks 1 and 4
+ *
+ * FIXME: The current mapping for i3100 considers that it supports up to 8
+ *       ranks/chanel, but datasheet says that the MC supports only 4 ranks.
+ */
+
 struct e752x_pvt {
        struct pci_dev *bridge_ck;
        struct pci_dev *dev_d0f0;
@@ -350,8 +371,10 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
        channel = !(error_one & 1);
 
        /* e752x mc reads 34:6 of the DRAM linear address */
-       edac_mc_handle_ce(mci, page, offset_in_page(sec1_add << 4),
-                       sec1_syndrome, row, channel, "e752x CE");
+       edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+                            page, offset_in_page(sec1_add << 4), sec1_syndrome,
+                            row, channel, -1,
+                            "e752x CE", "", NULL);
 }
 
 static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
@@ -385,9 +408,12 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
                        edac_mc_find_csrow_by_page(mci, block_page);
 
                /* e752x mc reads 34:6 of the DRAM linear address */
-               edac_mc_handle_ue(mci, block_page,
-                               offset_in_page(error_2b << 4),
-                               row, "e752x UE from Read");
+               edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+                                       block_page,
+                                       offset_in_page(error_2b << 4), 0,
+                                        row, -1, -1,
+                                       "e752x UE from Read", "", NULL);
+
        }
        if (error_one & 0x0404) {
                error_2b = scrb_add;
@@ -401,9 +427,11 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
                        edac_mc_find_csrow_by_page(mci, block_page);
 
                /* e752x mc reads 34:6 of the DRAM linear address */
-               edac_mc_handle_ue(mci, block_page,
-                               offset_in_page(error_2b << 4),
-                               row, "e752x UE from Scruber");
+               edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+                                       block_page,
+                                       offset_in_page(error_2b << 4), 0,
+                                       row, -1, -1,
+                                       "e752x UE from Scruber", "", NULL);
        }
 }
 
@@ -426,7 +454,9 @@ static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
                return;
 
        debugf3("%s()\n", __func__);
-       edac_mc_handle_ue_no_info(mci, "e752x UE log memory write");
+       edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+                            -1, -1, -1,
+                            "e752x UE log memory write", "", NULL);
 }
 
 static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
@@ -1044,7 +1074,7 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
        int drc_drbg;           /* DRB granularity 0=64mb, 1=128mb */
        int drc_ddim;           /* DRAM Data Integrity Mode 0=none, 2=edac */
        u8 value;
-       u32 dra, drc, cumul_size;
+       u32 dra, drc, cumul_size, i, nr_pages;
 
        dra = 0;
        for (index = 0; index < 4; index++) {
@@ -1053,7 +1083,7 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
                dra |= dra_reg << (index * 8);
        }
        pci_read_config_dword(pdev, E752X_DRC, &drc);
-       drc_chan = dual_channel_active(ddrcsr);
+       drc_chan = dual_channel_active(ddrcsr) ? 1 : 0;
        drc_drbg = drc_chan + 1;        /* 128 in dual mode, 64 in single */
        drc_ddim = (drc >> 20) & 0x3;
 
@@ -1078,26 +1108,33 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
 
                csrow->first_page = last_cumul_size;
                csrow->last_page = cumul_size - 1;
-               csrow->nr_pages = cumul_size - last_cumul_size;
+               nr_pages = cumul_size - last_cumul_size;
                last_cumul_size = cumul_size;
-               csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
-               csrow->mtype = MEM_RDDR;        /* only one type supported */
-               csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
-
-               /*
-                * if single channel or x8 devices then SECDED
-                * if dual channel and x4 then S4ECD4ED
-                */
-               if (drc_ddim) {
-                       if (drc_chan && mem_dev) {
-                               csrow->edac_mode = EDAC_S4ECD4ED;
-                               mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
-                       } else {
-                               csrow->edac_mode = EDAC_SECDED;
-                               mci->edac_cap |= EDAC_FLAG_SECDED;
-                       }
-               } else
-                       csrow->edac_mode = EDAC_NONE;
+
+               for (i = 0; i < csrow->nr_channels; i++) {
+                       struct dimm_info *dimm = csrow->channels[i].dimm;
+
+                       debugf3("Initializing rank at (%i,%i)\n", index, i);
+                       dimm->nr_pages = nr_pages / csrow->nr_channels;
+                       dimm->grain = 1 << 12;  /* 4KiB - resolution of CELOG */
+                       dimm->mtype = MEM_RDDR; /* only one type supported */
+                       dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
+
+                       /*
+                       * if single channel or x8 devices then SECDED
+                       * if dual channel and x4 then S4ECD4ED
+                       */
+                       if (drc_ddim) {
+                               if (drc_chan && mem_dev) {
+                                       dimm->edac_mode = EDAC_S4ECD4ED;
+                                       mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
+                               } else {
+                                       dimm->edac_mode = EDAC_SECDED;
+                                       mci->edac_cap |= EDAC_FLAG_SECDED;
+                               }
+                       } else
+                               dimm->edac_mode = EDAC_NONE;
+               }
        }
 }
 
@@ -1226,6 +1263,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
        u16 pci_data;
        u8 stat8;
        struct mem_ctl_info *mci;
+       struct edac_mc_layer layers[2];
        struct e752x_pvt *pvt;
        u16 ddrcsr;
        int drc_chan;           /* Number of channels 0=1chan,1=2chan */
@@ -1252,11 +1290,15 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
        /* Dual channel = 1, Single channel = 0 */
        drc_chan = dual_channel_active(ddrcsr);
 
-       mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1, 0);
-
-       if (mci == NULL) {
+       layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+       layers[0].size = E752X_NR_CSROWS;
+       layers[0].is_virt_csrow = true;
+       layers[1].type = EDAC_MC_LAYER_CHANNEL;
+       layers[1].size = drc_chan + 1;
+       layers[1].is_virt_csrow = false;
+       mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
+       if (mci == NULL)
                return -ENOMEM;
-       }
 
        debugf3("%s(): init mci\n", __func__);
        mci->mtype_cap = MEM_FLAG_RDDR;
index 68dea87b72e639448b2eb75ed0f219be3e6b9d39..9a9c1a5467977ca6bb69042651310b2830275757 100644 (file)
@@ -10,6 +10,9 @@
  * Based on work by Dan Hollis <goemon at anime dot net> and others.
  *     http://www.anime.net/~goemon/linux-ecc/
  *
+ * Datasheet:
+ *     http://www.intel.com/content/www/us/en/chipsets/e7501-chipset-memory-controller-hub-datasheet.html
+ *
  * Contributors:
  *     Eric Biederman (Linux Networx)
  *     Tom Zimmerman (Linux Networx)
@@ -71,7 +74,7 @@
 #endif                         /* PCI_DEVICE_ID_INTEL_7505_1_ERR */
 
 #define E7XXX_NR_CSROWS                8       /* number of csrows */
-#define E7XXX_NR_DIMMS         8       /* FIXME - is this correct? */
+#define E7XXX_NR_DIMMS         8       /* 2 channels, 4 dimms/channel */
 
 /* E7XXX register addresses - device 0 function 0 */
 #define E7XXX_DRB              0x60    /* DRAM row boundary register (8b) */
@@ -216,13 +219,15 @@ static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
        row = edac_mc_find_csrow_by_page(mci, page);
        /* convert syndrome to channel */
        channel = e7xxx_find_channel(syndrome);
-       edac_mc_handle_ce(mci, page, 0, syndrome, row, channel, "e7xxx CE");
+       edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, page, 0, syndrome,
+                            row, channel, -1, "e7xxx CE", "", NULL);
 }
 
 static void process_ce_no_info(struct mem_ctl_info *mci)
 {
        debugf3("%s()\n", __func__);
-       edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow");
+       edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1,
+                            "e7xxx CE log register overflow", "", NULL);
 }
 
 static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
@@ -236,13 +241,17 @@ static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
        /* FIXME - should use PAGE_SHIFT */
        block_page = error_2b >> 6;     /* convert to 4k address */
        row = edac_mc_find_csrow_by_page(mci, block_page);
-       edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE");
+
+       edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, block_page, 0, 0,
+                            row, -1, -1, "e7xxx UE", "", NULL);
 }
 
 static void process_ue_no_info(struct mem_ctl_info *mci)
 {
        debugf3("%s()\n", __func__);
-       edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow");
+
+       edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1,
+                            "e7xxx UE log register overflow", "", NULL);
 }
 
 static void e7xxx_get_error_info(struct mem_ctl_info *mci,
@@ -347,11 +356,12 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
                        int dev_idx, u32 drc)
 {
        unsigned long last_cumul_size;
-       int index;
+       int index, j;
        u8 value;
-       u32 dra, cumul_size;
+       u32 dra, cumul_size, nr_pages;
        int drc_chan, drc_drbg, drc_ddim, mem_dev;
        struct csrow_info *csrow;
+       struct dimm_info *dimm;
 
        pci_read_config_dword(pdev, E7XXX_DRA, &dra);
        drc_chan = dual_channel_active(drc, dev_idx);
@@ -379,26 +389,32 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
 
                csrow->first_page = last_cumul_size;
                csrow->last_page = cumul_size - 1;
-               csrow->nr_pages = cumul_size - last_cumul_size;
+               nr_pages = cumul_size - last_cumul_size;
                last_cumul_size = cumul_size;
-               csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
-               csrow->mtype = MEM_RDDR;        /* only one type supported */
-               csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
-
-               /*
-                * if single channel or x8 devices then SECDED
-                * if dual channel and x4 then S4ECD4ED
-                */
-               if (drc_ddim) {
-                       if (drc_chan && mem_dev) {
-                               csrow->edac_mode = EDAC_S4ECD4ED;
-                               mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
-                       } else {
-                               csrow->edac_mode = EDAC_SECDED;
-                               mci->edac_cap |= EDAC_FLAG_SECDED;
-                       }
-               } else
-                       csrow->edac_mode = EDAC_NONE;
+
+               for (j = 0; j < drc_chan + 1; j++) {
+                       dimm = csrow->channels[j].dimm;
+
+                       dimm->nr_pages = nr_pages / (drc_chan + 1);
+                       dimm->grain = 1 << 12;  /* 4KiB - resolution of CELOG */
+                       dimm->mtype = MEM_RDDR; /* only one type supported */
+                       dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
+
+                       /*
+                       * if single channel or x8 devices then SECDED
+                       * if dual channel and x4 then S4ECD4ED
+                       */
+                       if (drc_ddim) {
+                               if (drc_chan && mem_dev) {
+                                       dimm->edac_mode = EDAC_S4ECD4ED;
+                                       mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
+                               } else {
+                                       dimm->edac_mode = EDAC_SECDED;
+                                       mci->edac_cap |= EDAC_FLAG_SECDED;
+                               }
+                       } else
+                               dimm->edac_mode = EDAC_NONE;
+               }
        }
 }
 
@@ -406,6 +422,7 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
 {
        u16 pci_data;
        struct mem_ctl_info *mci = NULL;
+       struct edac_mc_layer layers[2];
        struct e7xxx_pvt *pvt = NULL;
        u32 drc;
        int drc_chan;
@@ -416,8 +433,21 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
        pci_read_config_dword(pdev, E7XXX_DRC, &drc);
 
        drc_chan = dual_channel_active(drc, dev_idx);
-       mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1, 0);
-
+       /*
+        * According with the datasheet, this device has a maximum of
+        * 4 DIMMS per channel, either single-rank or dual-rank. So, the
+        * total amount of dimms is 8 (E7XXX_NR_DIMMS).
+        * That means that the DIMM is mapped as CSROWs, and the channel
+        * will map the rank. So, an error to either channel should be
+        * attributed to the same dimm.
+        */
+       layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+       layers[0].size = E7XXX_NR_CSROWS;
+       layers[0].is_virt_csrow = true;
+       layers[1].type = EDAC_MC_LAYER_CHANNEL;
+       layers[1].size = drc_chan + 1;
+       layers[1].is_virt_csrow = false;
+       mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
        if (mci == NULL)
                return -ENOMEM;
 
index 5b739411d62f69b026bbd54f16006e8f0b957b42..117490d4f8359d0fbe9ab50729270e1e0424fe09 100644 (file)
@@ -447,8 +447,10 @@ static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
 
 #endif                         /* CONFIG_PCI */
 
-extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
-                                         unsigned nr_chans, int edac_index);
+struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
+                                  unsigned n_layers,
+                                  struct edac_mc_layer *layers,
+                                  unsigned sz_pvt);
 extern int edac_mc_add_mc(struct mem_ctl_info *mci);
 extern void edac_mc_free(struct mem_ctl_info *mci);
 extern struct mem_ctl_info *edac_mc_find(int idx);
@@ -456,35 +458,17 @@ extern struct mem_ctl_info *find_mci_by_dev(struct device *dev);
 extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev);
 extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
                                      unsigned long page);
-
-/*
- * The no info errors are used when error overflows are reported.
- * There are a limited number of error logging registers that can
- * be exausted.  When all registers are exhausted and an additional
- * error occurs then an error overflow register records that an
- * error occurred and the type of error, but doesn't have any
- * further information.  The ce/ue versions make for cleaner
- * reporting logic and function interface - reduces conditional
- * statement clutter and extra function arguments.
- */
-extern void edac_mc_handle_ce(struct mem_ctl_info *mci,
-                             unsigned long page_frame_number,
-                             unsigned long offset_in_page,
-                             unsigned long syndrome, int row, int channel,
-                             const char *msg);
-extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
-                                     const char *msg);
-extern void edac_mc_handle_ue(struct mem_ctl_info *mci,
-                             unsigned long page_frame_number,
-                             unsigned long offset_in_page, int row,
-                             const char *msg);
-extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
-                                     const char *msg);
-extern void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci, unsigned int csrow,
-                                 unsigned int channel0, unsigned int channel1,
-                                 char *msg);
-extern void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci, unsigned int csrow,
-                                 unsigned int channel, char *msg);
+void edac_mc_handle_error(const enum hw_event_mc_err_type type,
+                         struct mem_ctl_info *mci,
+                         const unsigned long page_frame_number,
+                         const unsigned long offset_in_page,
+                         const unsigned long syndrome,
+                         const int layer0,
+                         const int layer1,
+                         const int layer2,
+                         const char *msg,
+                         const char *other_detail,
+                         const void *mcelog);
 
 /*
  * edac_device APIs
@@ -496,6 +480,7 @@ extern void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
 extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
                                int inst_nr, int block_nr, const char *msg);
 extern int edac_device_alloc_index(void);
+extern const char *edac_layer_name[];
 
 /*
  * edac_pci APIs
index 45b8f4bdd773ca324c0e5f04f1c3c4a883bd03be..ee3f1f810c1e094c27dbd012d51c4cdfa9b4ee55 100644 (file)
@@ -79,7 +79,7 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
        unsigned total_size;
        unsigned count;
        unsigned instance, block, attr;
-       void *pvt;
+       void *pvt, *p;
        int err;
 
        debugf4("%s() instances=%d blocks=%d\n",
@@ -92,35 +92,30 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
         * to be at least as stringent as what the compiler would
         * provide if we could simply hardcode everything into a single struct.
         */
-       dev_ctl = (struct edac_device_ctl_info *)NULL;
+       p = NULL;
+       dev_ctl = edac_align_ptr(&p, sizeof(*dev_ctl), 1);
 
        /* Calc the 'end' offset past end of ONE ctl_info structure
         * which will become the start of the 'instance' array
         */
-       dev_inst = edac_align_ptr(&dev_ctl[1], sizeof(*dev_inst));
+       dev_inst = edac_align_ptr(&p, sizeof(*dev_inst), nr_instances);
 
        /* Calc the 'end' offset past the instance array within the ctl_info
         * which will become the start of the block array
         */
-       dev_blk = edac_align_ptr(&dev_inst[nr_instances], sizeof(*dev_blk));
+       count = nr_instances * nr_blocks;
+       dev_blk = edac_align_ptr(&p, sizeof(*dev_blk), count);
 
        /* Calc the 'end' offset past the dev_blk array
         * which will become the start of the attrib array, if any.
         */
-       count = nr_instances * nr_blocks;
-       dev_attrib = edac_align_ptr(&dev_blk[count], sizeof(*dev_attrib));
-
-       /* Check for case of when an attribute array is specified */
-       if (nr_attrib > 0) {
-               /* calc how many nr_attrib we need */
+       /* calc how many nr_attrib we need */
+       if (nr_attrib > 0)
                count *= nr_attrib;
+       dev_attrib = edac_align_ptr(&p, sizeof(*dev_attrib), count);
 
-               /* Calc the 'end' offset past the attributes array */
-               pvt = edac_align_ptr(&dev_attrib[count], sz_private);
-       } else {
-               /* no attribute array specified */
-               pvt = edac_align_ptr(dev_attrib, sz_private);
-       }
+       /* Calc the 'end' offset past the attributes array */
+       pvt = edac_align_ptr(&p, sz_private, 1);
 
        /* 'pvt' now points to where the private data area is.
         * At this point 'pvt' (like dev_inst,dev_blk and dev_attrib)
index feef7733fae7702733feb642bfcf0ac7c431f484..10f375032e9686f7c719c857ceca5dd3f9ef81ed 100644 (file)
@@ -43,9 +43,26 @@ static void edac_mc_dump_channel(struct rank_info *chan)
 {
        debugf4("\tchannel = %p\n", chan);
        debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx);
-       debugf4("\tchannel->ce_count = %d\n", chan->ce_count);
-       debugf4("\tchannel->label = '%s'\n", chan->label);
        debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
+       debugf4("\tchannel->dimm = %p\n", chan->dimm);
+}
+
+static void edac_mc_dump_dimm(struct dimm_info *dimm)
+{
+       int i;
+
+       debugf4("\tdimm = %p\n", dimm);
+       debugf4("\tdimm->label = '%s'\n", dimm->label);
+       debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
+       debugf4("\tdimm location ");
+       for (i = 0; i < dimm->mci->n_layers; i++) {
+               printk(KERN_CONT "%d", dimm->location[i]);
+               if (i < dimm->mci->n_layers - 1)
+                       printk(KERN_CONT ".");
+       }
+       printk(KERN_CONT "\n");
+       debugf4("\tdimm->grain = %d\n", dimm->grain);
+       debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
 }
 
 static void edac_mc_dump_csrow(struct csrow_info *csrow)
@@ -55,7 +72,6 @@ static void edac_mc_dump_csrow(struct csrow_info *csrow)
        debugf4("\tcsrow->first_page = 0x%lx\n", csrow->first_page);
        debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page);
        debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
-       debugf4("\tcsrow->nr_pages = 0x%x\n", csrow->nr_pages);
        debugf4("\tcsrow->nr_channels = %d\n", csrow->nr_channels);
        debugf4("\tcsrow->channels = %p\n", csrow->channels);
        debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
@@ -70,6 +86,8 @@ static void edac_mc_dump_mci(struct mem_ctl_info *mci)
        debugf4("\tmci->edac_check = %p\n", mci->edac_check);
        debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
                mci->nr_csrows, mci->csrows);
+       debugf3("\tmci->nr_dimms = %d, dimms = %p\n",
+               mci->tot_dimms, mci->dimms);
        debugf3("\tdev = %p\n", mci->dev);
        debugf3("\tmod_name:ctl_name = %s:%s\n", mci->mod_name, mci->ctl_name);
        debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
@@ -101,18 +119,37 @@ const char *edac_mem_types[] = {
 };
 EXPORT_SYMBOL_GPL(edac_mem_types);
 
-/* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
- * Adjust 'ptr' so that its alignment is at least as stringent as what the
- * compiler would provide for X and return the aligned result.
+/**
+ * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation
+ * @p:         pointer to a pointer with the memory offset to be used. At
+ *             return, this will be incremented to point to the next offset
+ * @size:      Size of the data structure to be reserved
+ * @n_elems:   Number of elements that should be reserved
  *
  * If 'size' is a constant, the compiler will optimize this whole function
- * down to either a no-op or the addition of a constant to the value of 'ptr'.
+ * down to either a no-op or the addition of a constant to the value of '*p'.
+ *
+ * The 'p' pointer is absolutely needed to keep the proper advancing
+ * further in memory to the proper offsets when allocating the struct along
+ * with its embedded structs, as edac_device_alloc_ctl_info() does it
+ * above, for example.
+ *
+ * At return, the pointer 'p' will be incremented to be used on a next call
+ * to this function.
  */
-void *edac_align_ptr(void *ptr, unsigned size)
+void *edac_align_ptr(void **p, unsigned size, int n_elems)
 {
        unsigned align, r;
+       void *ptr = *p;
+
+       *p += size * n_elems;
 
-       /* Here we assume that the alignment of a "long long" is the most
+       /*
+        * 'p' can possibly be an unaligned item X such that sizeof(X) is
+        * 'size'.  Adjust 'p' so that its alignment is at least as
+        * stringent as what the compiler would provide for X and return
+        * the aligned result.
+        * Here we assume that the alignment of a "long long" is the most
         * stringent alignment that the compiler will ever provide by default.
         * As far as I know, this is a reasonable assumption.
         */
@@ -132,14 +169,18 @@ void *edac_align_ptr(void *ptr, unsigned size)
        if (r == 0)
                return (char *)ptr;
 
+       *p += align - r;
+
        return (void *)(((unsigned long)ptr) + align - r);
 }
 
 /**
- * edac_mc_alloc: Allocate a struct mem_ctl_info structure
- * @size_pvt:  size of private storage needed
- * @nr_csrows: Number of CWROWS needed for this MC
- * @nr_chans:  Number of channels for the MC
+ * edac_mc_alloc: Allocate and partially fill a struct mem_ctl_info structure
+ * @mc_num:            Memory controller number
+ * @n_layers:          Number of MC hierarchy layers
+ * layers:             Describes each layer as seen by the Memory Controller
+ * @size_pvt:          size of private storage needed
+ *
  *
  * Everything is kmalloc'ed as one big chunk - more efficient.
  * Only can be used if all structures have the same lifetime - otherwise
@@ -147,32 +188,77 @@ void *edac_align_ptr(void *ptr, unsigned size)
  *
  * Use edac_mc_free() to free mc structures allocated by this function.
  *
+ * NOTE: drivers handle multi-rank memories in different ways: in some
+ * drivers, one multi-rank memory stick is mapped as one entry, while, in
+ * others, a single multi-rank memory stick would be mapped into several
+ * entries. Currently, this function will allocate multiple struct dimm_info
+ * on such scenarios, as grouping the multiple ranks require drivers change.
+ *
  * Returns:
- *     NULL allocation failed
- *     struct mem_ctl_info pointer
+ *     On failure: NULL
+ *     On success: struct mem_ctl_info pointer
  */
-struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
-                               unsigned nr_chans, int edac_index)
+struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
+                                  unsigned n_layers,
+                                  struct edac_mc_layer *layers,
+                                  unsigned sz_pvt)
 {
        struct mem_ctl_info *mci;
-       struct csrow_info *csi, *csrow;
+       struct edac_mc_layer *layer;
+       struct csrow_info *csi, *csr;
        struct rank_info *chi, *chp, *chan;
-       void *pvt;
-       unsigned size;
-       int row, chn;
-       int err;
+       struct dimm_info *dimm;
+       u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
+       unsigned pos[EDAC_MAX_LAYERS];
+       unsigned size, tot_dimms = 1, count = 1;
+       unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
+       void *pvt, *p, *ptr = NULL;
+       int i, j, err, row, chn, n, len;
+       bool per_rank = false;
+
+       BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0);
+       /*
+        * Calculate the total amount of dimms and csrows/cschannels while
+        * in the old API emulation mode
+        */
+       for (i = 0; i < n_layers; i++) {
+               tot_dimms *= layers[i].size;
+               if (layers[i].is_virt_csrow)
+                       tot_csrows *= layers[i].size;
+               else
+                       tot_channels *= layers[i].size;
+
+               if (layers[i].type == EDAC_MC_LAYER_CHIP_SELECT)
+                       per_rank = true;
+       }
 
        /* Figure out the offsets of the various items from the start of an mc
         * structure.  We want the alignment of each item to be at least as
         * stringent as what the compiler would provide if we could simply
         * hardcode everything into a single struct.
         */
-       mci = (struct mem_ctl_info *)0;
-       csi = edac_align_ptr(&mci[1], sizeof(*csi));
-       chi = edac_align_ptr(&csi[nr_csrows], sizeof(*chi));
-       pvt = edac_align_ptr(&chi[nr_chans * nr_csrows], sz_pvt);
+       mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
+       layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
+       csi = edac_align_ptr(&ptr, sizeof(*csi), tot_csrows);
+       chi = edac_align_ptr(&ptr, sizeof(*chi), tot_csrows * tot_channels);
+       dimm = edac_align_ptr(&ptr, sizeof(*dimm), tot_dimms);
+       for (i = 0; i < n_layers; i++) {
+               count *= layers[i].size;
+               debugf4("%s: errcount layer %d size %d\n", __func__, i, count);
+               ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
+               ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
+               tot_errcount += 2 * count;
+       }
+
+       debugf4("%s: allocating %d error counters\n", __func__, tot_errcount);
+       pvt = edac_align_ptr(&ptr, sz_pvt, 1);
        size = ((unsigned long)pvt) + sz_pvt;
 
+       debugf1("%s(): allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
+               __func__, size,
+               tot_dimms,
+               per_rank ? "ranks" : "dimms",
+               tot_csrows * tot_channels);
        mci = kzalloc(size, GFP_KERNEL);
        if (mci == NULL)
                return NULL;
@@ -180,28 +266,103 @@ struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
        /* Adjust pointers so they point within the memory we just allocated
         * rather than an imaginary chunk of memory located at address 0.
         */
+       layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
        csi = (struct csrow_info *)(((char *)mci) + ((unsigned long)csi));
        chi = (struct rank_info *)(((char *)mci) + ((unsigned long)chi));
+       dimm = (struct dimm_info *)(((char *)mci) + ((unsigned long)dimm));
+       for (i = 0; i < n_layers; i++) {
+               mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i]));
+               mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i]));
+       }
        pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
 
        /* setup index and various internal pointers */
-       mci->mc_idx = edac_index;
+       mci->mc_idx = mc_num;
        mci->csrows = csi;
+       mci->dimms  = dimm;
+       mci->tot_dimms = tot_dimms;
        mci->pvt_info = pvt;
-       mci->nr_csrows = nr_csrows;
-
-       for (row = 0; row < nr_csrows; row++) {
-               csrow = &csi[row];
-               csrow->csrow_idx = row;
-               csrow->mci = mci;
-               csrow->nr_channels = nr_chans;
-               chp = &chi[row * nr_chans];
-               csrow->channels = chp;
+       mci->n_layers = n_layers;
+       mci->layers = layer;
+       memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
+       mci->nr_csrows = tot_csrows;
+       mci->num_cschannel = tot_channels;
+       mci->mem_is_per_rank = per_rank;
 
-               for (chn = 0; chn < nr_chans; chn++) {
+       /*
+        * Fill the csrow struct
+        */
+       for (row = 0; row < tot_csrows; row++) {
+               csr = &csi[row];
+               csr->csrow_idx = row;
+               csr->mci = mci;
+               csr->nr_channels = tot_channels;
+               chp = &chi[row * tot_channels];
+               csr->channels = chp;
+
+               for (chn = 0; chn < tot_channels; chn++) {
                        chan = &chp[chn];
                        chan->chan_idx = chn;
-                       chan->csrow = csrow;
+                       chan->csrow = csr;
+               }
+       }
+
+       /*
+        * Fill the dimm struct
+        */
+       memset(&pos, 0, sizeof(pos));
+       row = 0;
+       chn = 0;
+       debugf4("%s: initializing %d %s\n", __func__, tot_dimms,
+               per_rank ? "ranks" : "dimms");
+       for (i = 0; i < tot_dimms; i++) {
+               chan = &csi[row].channels[chn];
+               dimm = EDAC_DIMM_PTR(layer, mci->dimms, n_layers,
+                              pos[0], pos[1], pos[2]);
+               dimm->mci = mci;
+
+               debugf2("%s: %d: %s%zd (%d:%d:%d): row %d, chan %d\n", __func__,
+                       i, per_rank ? "rank" : "dimm", (dimm - mci->dimms),
+                       pos[0], pos[1], pos[2], row, chn);
+
+               /*
+                * Copy DIMM location and initialize it.
+                */
+               len = sizeof(dimm->label);
+               p = dimm->label;
+               n = snprintf(p, len, "mc#%u", mc_num);
+               p += n;
+               len -= n;
+               for (j = 0; j < n_layers; j++) {
+                       n = snprintf(p, len, "%s#%u",
+                                    edac_layer_name[layers[j].type],
+                                    pos[j]);
+                       p += n;
+                       len -= n;
+                       dimm->location[j] = pos[j];
+
+                       if (len <= 0)
+                               break;
+               }
+
+               /* Link it to the csrows old API data */
+               chan->dimm = dimm;
+               dimm->csrow = row;
+               dimm->cschannel = chn;
+
+               /* Increment csrow location */
+               row++;
+               if (row == tot_csrows) {
+                       row = 0;
+                       chn++;
+               }
+
+               /* Increment dimm location */
+               for (j = n_layers - 1; j >= 0; j--) {
+                       pos[j]++;
+                       if (pos[j] < layers[j].size)
+                               break;
+                       pos[j] = 0;
                }
        }
 
@@ -490,7 +651,6 @@ EXPORT_SYMBOL(edac_mc_find);
  * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
  *                 create sysfs entries associated with mci structure
  * @mci: pointer to the mci structure to be added to the list
- * @mc_idx: A unique numeric identifier to be assigned to the 'mci' structure.
  *
  * Return:
  *     0       Success
@@ -517,6 +677,8 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
                                edac_mc_dump_channel(&mci->csrows[i].
                                                channels[j]);
                }
+               for (i = 0; i < mci->tot_dimms; i++)
+                       edac_mc_dump_dimm(&mci->dimms[i]);
        }
 #endif
        mutex_lock(&mem_ctls_mutex);
@@ -636,15 +798,19 @@ static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
 int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
 {
        struct csrow_info *csrows = mci->csrows;
-       int row, i;
+       int row, i, j, n;
 
        debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page);
        row = -1;
 
        for (i = 0; i < mci->nr_csrows; i++) {
                struct csrow_info *csrow = &csrows[i];
-
-               if (csrow->nr_pages == 0)
+               n = 0;
+               for (j = 0; j < csrow->nr_channels; j++) {
+                       struct dimm_info *dimm = csrow->channels[j].dimm;
+                       n += dimm->nr_pages;
+               }
+               if (n == 0)
                        continue;
 
                debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) "
@@ -670,249 +836,307 @@ int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
 }
 EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
 
-/* FIXME - setable log (warning/emerg) levels */
-/* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */
-void edac_mc_handle_ce(struct mem_ctl_info *mci,
-               unsigned long page_frame_number,
-               unsigned long offset_in_page, unsigned long syndrome,
-               int row, int channel, const char *msg)
-{
-       unsigned long remapped_page;
+const char *edac_layer_name[] = {
+       [EDAC_MC_LAYER_BRANCH] = "branch",
+       [EDAC_MC_LAYER_CHANNEL] = "channel",
+       [EDAC_MC_LAYER_SLOT] = "slot",
+       [EDAC_MC_LAYER_CHIP_SELECT] = "csrow",
+};
+EXPORT_SYMBOL_GPL(edac_layer_name);
 
-       debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
+static void edac_inc_ce_error(struct mem_ctl_info *mci,
+                                   bool enable_per_layer_report,
+                                   const int pos[EDAC_MAX_LAYERS])
+{
+       int i, index = 0;
 
-       /* FIXME - maybe make panic on INTERNAL ERROR an option */
-       if (row >= mci->nr_csrows || row < 0) {
-               /* something is wrong */
-               edac_mc_printk(mci, KERN_ERR,
-                       "INTERNAL ERROR: row out of range "
-                       "(%d >= %d)\n", row, mci->nr_csrows);
-               edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
-               return;
-       }
+       mci->ce_mc++;
 
-       if (channel >= mci->csrows[row].nr_channels || channel < 0) {
-               /* something is wrong */
-               edac_mc_printk(mci, KERN_ERR,
-                       "INTERNAL ERROR: channel out of range "
-                       "(%d >= %d)\n", channel,
-                       mci->csrows[row].nr_channels);
-               edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
+       if (!enable_per_layer_report) {
+               mci->ce_noinfo_count++;
                return;
        }
 
-       if (edac_mc_get_log_ce())
-               /* FIXME - put in DIMM location */
-               edac_mc_printk(mci, KERN_WARNING,
-                       "CE page 0x%lx, offset 0x%lx, grain %d, syndrome "
-                       "0x%lx, row %d, channel %d, label \"%s\": %s\n",
-                       page_frame_number, offset_in_page,
-                       mci->csrows[row].grain, syndrome, row, channel,
-                       mci->csrows[row].channels[channel].label, msg);
-
-       mci->ce_count++;
-       mci->csrows[row].ce_count++;
-       mci->csrows[row].channels[channel].ce_count++;
-
-       if (mci->scrub_mode & SCRUB_SW_SRC) {
-               /*
-                * Some MC's can remap memory so that it is still available
-                * at a different address when PCI devices map into memory.
-                * MC's that can't do this lose the memory where PCI devices
-                * are mapped.  This mapping is MC dependent and so we call
-                * back into the MC driver for it to map the MC page to
-                * a physical (CPU) page which can then be mapped to a virtual
-                * page - which can then be scrubbed.
-                */
-               remapped_page = mci->ctl_page_to_phys ?
-                       mci->ctl_page_to_phys(mci, page_frame_number) :
-                       page_frame_number;
+       for (i = 0; i < mci->n_layers; i++) {
+               if (pos[i] < 0)
+                       break;
+               index += pos[i];
+               mci->ce_per_layer[i][index]++;
 
-               edac_mc_scrub_block(remapped_page, offset_in_page,
-                               mci->csrows[row].grain);
+               if (i < mci->n_layers - 1)
+                       index *= mci->layers[i + 1].size;
        }
 }
-EXPORT_SYMBOL_GPL(edac_mc_handle_ce);
 
-void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, const char *msg)
+static void edac_inc_ue_error(struct mem_ctl_info *mci,
+                                   bool enable_per_layer_report,
+                                   const int pos[EDAC_MAX_LAYERS])
 {
-       if (edac_mc_get_log_ce())
-               edac_mc_printk(mci, KERN_WARNING,
-                       "CE - no information available: %s\n", msg);
+       int i, index = 0;
 
-       mci->ce_noinfo_count++;
-       mci->ce_count++;
-}
-EXPORT_SYMBOL_GPL(edac_mc_handle_ce_no_info);
+       mci->ue_mc++;
 
-void edac_mc_handle_ue(struct mem_ctl_info *mci,
-               unsigned long page_frame_number,
-               unsigned long offset_in_page, int row, const char *msg)
-{
-       int len = EDAC_MC_LABEL_LEN * 4;
-       char labels[len + 1];
-       char *pos = labels;
-       int chan;
-       int chars;
-
-       debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
-
-       /* FIXME - maybe make panic on INTERNAL ERROR an option */
-       if (row >= mci->nr_csrows || row < 0) {
-               /* something is wrong */
-               edac_mc_printk(mci, KERN_ERR,
-                       "INTERNAL ERROR: row out of range "
-                       "(%d >= %d)\n", row, mci->nr_csrows);
-               edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
+       if (!enable_per_layer_report) {
+               mci->ce_noinfo_count++;
                return;
        }
 
-       chars = snprintf(pos, len + 1, "%s",
-                        mci->csrows[row].channels[0].label);
-       len -= chars;
-       pos += chars;
+       for (i = 0; i < mci->n_layers; i++) {
+               if (pos[i] < 0)
+                       break;
+               index += pos[i];
+               mci->ue_per_layer[i][index]++;
 
-       for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0);
-               chan++) {
-               chars = snprintf(pos, len + 1, ":%s",
-                                mci->csrows[row].channels[chan].label);
-               len -= chars;
-               pos += chars;
+               if (i < mci->n_layers - 1)
+                       index *= mci->layers[i + 1].size;
        }
+}
 
-       if (edac_mc_get_log_ue())
-               edac_mc_printk(mci, KERN_EMERG,
-                       "UE page 0x%lx, offset 0x%lx, grain %d, row %d, "
-                       "labels \"%s\": %s\n", page_frame_number,
-                       offset_in_page, mci->csrows[row].grain, row,
-                       labels, msg);
+static void edac_ce_error(struct mem_ctl_info *mci,
+                         const int pos[EDAC_MAX_LAYERS],
+                         const char *msg,
+                         const char *location,
+                         const char *label,
+                         const char *detail,
+                         const char *other_detail,
+                         const bool enable_per_layer_report,
+                         const unsigned long page_frame_number,
+                         const unsigned long offset_in_page,
+                         u32 grain)
+{
+       unsigned long remapped_page;
 
-       if (edac_mc_get_panic_on_ue())
-               panic("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, "
-                       "row %d, labels \"%s\": %s\n", mci->mc_idx,
-                       page_frame_number, offset_in_page,
-                       mci->csrows[row].grain, row, labels, msg);
+       if (edac_mc_get_log_ce()) {
+               if (other_detail && *other_detail)
+                       edac_mc_printk(mci, KERN_WARNING,
+                                      "CE %s on %s (%s%s - %s)\n",
+                                      msg, label, location,
+                                      detail, other_detail);
+               else
+                       edac_mc_printk(mci, KERN_WARNING,
+                                      "CE %s on %s (%s%s)\n",
+                                      msg, label, location,
+                                      detail);
+       }
+       edac_inc_ce_error(mci, enable_per_layer_report, pos);
 
-       mci->ue_count++;
-       mci->csrows[row].ue_count++;
+       if (mci->scrub_mode & SCRUB_SW_SRC) {
+               /*
+                       * Some memory controllers (called MCs below) can remap
+                       * memory so that it is still available at a different
+                       * address when PCI devices map into memory.
+                       * MC's that can't do this, lose the memory where PCI
+                       * devices are mapped. This mapping is MC-dependent
+                       * and so we call back into the MC driver for it to
+                       * map the MC page to a physical (CPU) page which can
+                       * then be mapped to a virtual page - which can then
+                       * be scrubbed.
+                       */
+               remapped_page = mci->ctl_page_to_phys ?
+                       mci->ctl_page_to_phys(mci, page_frame_number) :
+                       page_frame_number;
+
+               edac_mc_scrub_block(remapped_page,
+                                       offset_in_page, grain);
+       }
 }
-EXPORT_SYMBOL_GPL(edac_mc_handle_ue);
 
-void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, const char *msg)
+static void edac_ue_error(struct mem_ctl_info *mci,
+                         const int pos[EDAC_MAX_LAYERS],
+                         const char *msg,
+                         const char *location,
+                         const char *label,
+                         const char *detail,
+                         const char *other_detail,
+                         const bool enable_per_layer_report)
 {
-       if (edac_mc_get_panic_on_ue())
-               panic("EDAC MC%d: Uncorrected Error", mci->mc_idx);
+       if (edac_mc_get_log_ue()) {
+               if (other_detail && *other_detail)
+                       edac_mc_printk(mci, KERN_WARNING,
+                                      "UE %s on %s (%s%s - %s)\n",
+                                      msg, label, location, detail,
+                                      other_detail);
+               else
+                       edac_mc_printk(mci, KERN_WARNING,
+                                      "UE %s on %s (%s%s)\n",
+                                      msg, label, location, detail);
+       }
 
-       if (edac_mc_get_log_ue())
-               edac_mc_printk(mci, KERN_WARNING,
-                       "UE - no information available: %s\n", msg);
-       mci->ue_noinfo_count++;
-       mci->ue_count++;
+       if (edac_mc_get_panic_on_ue()) {
+               if (other_detail && *other_detail)
+                       panic("UE %s on %s (%s%s - %s)\n",
+                             msg, label, location, detail, other_detail);
+               else
+                       panic("UE %s on %s (%s%s)\n",
+                             msg, label, location, detail);
+       }
+
+       edac_inc_ue_error(mci, enable_per_layer_report, pos);
 }
-EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info);
 
-/*************************************************************
- * On Fully Buffered DIMM modules, this help function is
- * called to process UE events
- */
-void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci,
-                       unsigned int csrow,
-                       unsigned int channela,
-                       unsigned int channelb, char *msg)
+#define OTHER_LABEL " or "
+void edac_mc_handle_error(const enum hw_event_mc_err_type type,
+                         struct mem_ctl_info *mci,
+                         const unsigned long page_frame_number,
+                         const unsigned long offset_in_page,
+                         const unsigned long syndrome,
+                         const int layer0,
+                         const int layer1,
+                         const int layer2,
+                         const char *msg,
+                         const char *other_detail,
+                         const void *mcelog)
 {
-       int len = EDAC_MC_LABEL_LEN * 4;
-       char labels[len + 1];
-       char *pos = labels;
-       int chars;
+       /* FIXME: too much for stack: move it to some pre-alocated area */
+       char detail[80], location[80];
+       char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * mci->tot_dimms];
+       char *p;
+       int row = -1, chan = -1;
+       int pos[EDAC_MAX_LAYERS] = { layer0, layer1, layer2 };
+       int i;
+       u32 grain;
+       bool enable_per_layer_report = false;
 
-       if (csrow >= mci->nr_csrows) {
-               /* something is wrong */
-               edac_mc_printk(mci, KERN_ERR,
-                       "INTERNAL ERROR: row out of range (%d >= %d)\n",
-                       csrow, mci->nr_csrows);
-               edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
-               return;
-       }
+       debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
 
-       if (channela >= mci->csrows[csrow].nr_channels) {
-               /* something is wrong */
-               edac_mc_printk(mci, KERN_ERR,
-                       "INTERNAL ERROR: channel-a out of range "
-                       "(%d >= %d)\n",
-                       channela, mci->csrows[csrow].nr_channels);
-               edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
-               return;
+       /*
+        * Check if the event report is consistent and if the memory
+        * location is known. If it is known, enable_per_layer_report will be
+        * true, the DIMM(s) label info will be filled and the per-layer
+        * error counters will be incremented.
+        */
+       for (i = 0; i < mci->n_layers; i++) {
+               if (pos[i] >= (int)mci->layers[i].size) {
+                       if (type == HW_EVENT_ERR_CORRECTED)
+                               p = "CE";
+                       else
+                               p = "UE";
+
+                       edac_mc_printk(mci, KERN_ERR,
+                                      "INTERNAL ERROR: %s value is out of range (%d >= %d)\n",
+                                      edac_layer_name[mci->layers[i].type],
+                                      pos[i], mci->layers[i].size);
+                       /*
+                        * Instead of just returning it, let's use what's
+                        * known about the error. The increment routines and
+                        * the DIMM filter logic will do the right thing by
+                        * pointing the likely damaged DIMMs.
+                        */
+                       pos[i] = -1;
+               }
+               if (pos[i] >= 0)
+                       enable_per_layer_report = true;
        }
 
-       if (channelb >= mci->csrows[csrow].nr_channels) {
-               /* something is wrong */
-               edac_mc_printk(mci, KERN_ERR,
-                       "INTERNAL ERROR: channel-b out of range "
-                       "(%d >= %d)\n",
-                       channelb, mci->csrows[csrow].nr_channels);
-               edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
-               return;
-       }
+       /*
+        * Get the dimm label/grain that applies to the match criteria.
+        * As the error algorithm may not be able to point to just one memory
+        * stick, the logic here will get all possible labels that could
+        * pottentially be affected by the error.
+        * On FB-DIMM memory controllers, for uncorrected errors, it is common
+        * to have only the MC channel and the MC dimm (also called "branch")
+        * but the channel is not known, as the memory is arranged in pairs,
+        * where each memory belongs to a separate channel within the same
+        * branch.
+        */
+       grain = 0;
+       p = label;
+       *p = '\0';
+       for (i = 0; i < mci->tot_dimms; i++) {
+               struct dimm_info *dimm = &mci->dimms[i];
 
-       mci->ue_count++;
-       mci->csrows[csrow].ue_count++;
+               if (layer0 >= 0 && layer0 != dimm->location[0])
+                       continue;
+               if (layer1 >= 0 && layer1 != dimm->location[1])
+                       continue;
+               if (layer2 >= 0 && layer2 != dimm->location[2])
+                       continue;
 
-       /* Generate the DIMM labels from the specified channels */
-       chars = snprintf(pos, len + 1, "%s",
-                        mci->csrows[csrow].channels[channela].label);
-       len -= chars;
-       pos += chars;
-       chars = snprintf(pos, len + 1, "-%s",
-                        mci->csrows[csrow].channels[channelb].label);
+               /* get the max grain, over the error match range */
+               if (dimm->grain > grain)
+                       grain = dimm->grain;
 
-       if (edac_mc_get_log_ue())
-               edac_mc_printk(mci, KERN_EMERG,
-                       "UE row %d, channel-a= %d channel-b= %d "
-                       "labels \"%s\": %s\n", csrow, channela, channelb,
-                       labels, msg);
+               /*
+                * If the error is memory-controller wide, there's no need to
+                * seek for the affected DIMMs because the whole
+                * channel/memory controller/...  may be affected.
+                * Also, don't show errors for empty DIMM slots.
+                */
+               if (enable_per_layer_report && dimm->nr_pages) {
+                       if (p != label) {
+                               strcpy(p, OTHER_LABEL);
+                               p += strlen(OTHER_LABEL);
+                       }
+                       strcpy(p, dimm->label);
+                       p += strlen(p);
+                       *p = '\0';
+
+                       /*
+                        * get csrow/channel of the DIMM, in order to allow
+                        * incrementing the compat API counters
+                        */
+                       debugf4("%s: %s csrows map: (%d,%d)\n",
+                               __func__,
+                               mci->mem_is_per_rank ? "rank" : "dimm",
+                               dimm->csrow, dimm->cschannel);
+
+                       if (row == -1)
+                               row = dimm->csrow;
+                       else if (row >= 0 && row != dimm->csrow)
+                               row = -2;
+
+                       if (chan == -1)
+                               chan = dimm->cschannel;
+                       else if (chan >= 0 && chan != dimm->cschannel)
+                               chan = -2;
+               }
+       }
 
-       if (edac_mc_get_panic_on_ue())
-               panic("UE row %d, channel-a= %d channel-b= %d "
-                       "labels \"%s\": %s\n", csrow, channela,
-                       channelb, labels, msg);
-}
-EXPORT_SYMBOL(edac_mc_handle_fbd_ue);
+       if (!enable_per_layer_report) {
+               strcpy(label, "any memory");
+       } else {
+               debugf4("%s: csrow/channel to increment: (%d,%d)\n",
+                       __func__, row, chan);
+               if (p == label)
+                       strcpy(label, "unknown memory");
+               if (type == HW_EVENT_ERR_CORRECTED) {
+                       if (row >= 0) {
+                               mci->csrows[row].ce_count++;
+                               if (chan >= 0)
+                                       mci->csrows[row].channels[chan].ce_count++;
+                       }
+               } else
+                       if (row >= 0)
+                               mci->csrows[row].ue_count++;
+       }
 
-/*************************************************************
- * On Fully Buffered DIMM modules, this help function is
- * called to process CE events
- */
-void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci,
-                       unsigned int csrow, unsigned int channel, char *msg)
-{
+       /* Fill the RAM location data */
+       p = location;
+       for (i = 0; i < mci->n_layers; i++) {
+               if (pos[i] < 0)
+                       continue;
 
-       /* Ensure boundary values */
-       if (csrow >= mci->nr_csrows) {
-               /* something is wrong */
-               edac_mc_printk(mci, KERN_ERR,
-                       "INTERNAL ERROR: row out of range (%d >= %d)\n",
-                       csrow, mci->nr_csrows);
-               edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
-               return;
-       }
-       if (channel >= mci->csrows[csrow].nr_channels) {
-               /* something is wrong */
-               edac_mc_printk(mci, KERN_ERR,
-                       "INTERNAL ERROR: channel out of range (%d >= %d)\n",
-                       channel, mci->csrows[csrow].nr_channels);
-               edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
-               return;
+               p += sprintf(p, "%s:%d ",
+                            edac_layer_name[mci->layers[i].type],
+                            pos[i]);
        }
 
-       if (edac_mc_get_log_ce())
-               /* FIXME - put in DIMM location */
-               edac_mc_printk(mci, KERN_WARNING,
-                       "CE row %d, channel %d, label \"%s\": %s\n",
-                       csrow, channel,
-                       mci->csrows[csrow].channels[channel].label, msg);
+       /* Memory type dependent details about the error */
+       if (type == HW_EVENT_ERR_CORRECTED) {
+               snprintf(detail, sizeof(detail),
+                       "page:0x%lx offset:0x%lx grain:%d syndrome:0x%lx",
+                       page_frame_number, offset_in_page,
+                       grain, syndrome);
+               edac_ce_error(mci, pos, msg, location, label, detail,
+                             other_detail, enable_per_layer_report,
+                             page_frame_number, offset_in_page, grain);
+       } else {
+               snprintf(detail, sizeof(detail),
+                       "page:0x%lx offset:0x%lx grain:%d",
+                       page_frame_number, offset_in_page, grain);
 
-       mci->ce_count++;
-       mci->csrows[csrow].ce_count++;
-       mci->csrows[csrow].channels[channel].ce_count++;
+               edac_ue_error(mci, pos, msg, location, label, detail,
+                             other_detail, enable_per_layer_report);
+       }
 }
-EXPORT_SYMBOL(edac_mc_handle_fbd_ce);
+EXPORT_SYMBOL_GPL(edac_mc_handle_error);
index e9a28f576d144dee247d70cb43e4d06952acfa46..f6a29b0eedc8535bb33769ca81cf8f88abe602e8 100644 (file)
@@ -144,25 +144,31 @@ static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data,
 static ssize_t csrow_size_show(struct csrow_info *csrow, char *data,
                                int private)
 {
-       return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages));
+       int i;
+       u32 nr_pages = 0;
+
+       for (i = 0; i < csrow->nr_channels; i++)
+               nr_pages += csrow->channels[i].dimm->nr_pages;
+
+       return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages));
 }
 
 static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data,
                                int private)
 {
-       return sprintf(data, "%s\n", mem_types[csrow->mtype]);
+       return sprintf(data, "%s\n", mem_types[csrow->channels[0].dimm->mtype]);
 }
 
 static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data,
                                int private)
 {
-       return sprintf(data, "%s\n", dev_types[csrow->dtype]);
+       return sprintf(data, "%s\n", dev_types[csrow->channels[0].dimm->dtype]);
 }
 
 static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data,
                                int private)
 {
-       return sprintf(data, "%s\n", edac_caps[csrow->edac_mode]);
+       return sprintf(data, "%s\n", edac_caps[csrow->channels[0].dimm->edac_mode]);
 }
 
 /* show/store functions for DIMM Label attributes */
@@ -170,11 +176,11 @@ static ssize_t channel_dimm_label_show(struct csrow_info *csrow,
                                char *data, int channel)
 {
        /* if field has not been initialized, there is nothing to send */
-       if (!csrow->channels[channel].label[0])
+       if (!csrow->channels[channel].dimm->label[0])
                return 0;
 
        return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
-                       csrow->channels[channel].label);
+                       csrow->channels[channel].dimm->label);
 }
 
 static ssize_t channel_dimm_label_store(struct csrow_info *csrow,
@@ -184,8 +190,8 @@ static ssize_t channel_dimm_label_store(struct csrow_info *csrow,
        ssize_t max_size = 0;
 
        max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
-       strncpy(csrow->channels[channel].label, data, max_size);
-       csrow->channels[channel].label[max_size] = '\0';
+       strncpy(csrow->channels[channel].dimm->label, data, max_size);
+       csrow->channels[channel].dimm->label[max_size] = '\0';
 
        return max_size;
 }
@@ -419,8 +425,8 @@ static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
 
        mci->ue_noinfo_count = 0;
        mci->ce_noinfo_count = 0;
-       mci->ue_count = 0;
-       mci->ce_count = 0;
+       mci->ue_mc = 0;
+       mci->ce_mc = 0;
 
        for (row = 0; row < mci->nr_csrows; row++) {
                struct csrow_info *ri = &mci->csrows[row];
@@ -489,12 +495,12 @@ static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data)
 /* default attribute files for the MCI object */
 static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data)
 {
-       return sprintf(data, "%d\n", mci->ue_count);
+       return sprintf(data, "%d\n", mci->ue_mc);
 }
 
 static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data)
 {
-       return sprintf(data, "%d\n", mci->ce_count);
+       return sprintf(data, "%d\n", mci->ce_mc);
 }
 
 static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data)
@@ -519,16 +525,16 @@ static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data)
 
 static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
 {
-       int total_pages, csrow_idx;
+       int total_pages = 0, csrow_idx, j;
 
-       for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows;
-               csrow_idx++) {
+       for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) {
                struct csrow_info *csrow = &mci->csrows[csrow_idx];
 
-               if (!csrow->nr_pages)
-                       continue;
+               for (j = 0; j < csrow->nr_channels; j++) {
+                       struct dimm_info *dimm = csrow->channels[j].dimm;
 
-               total_pages += csrow->nr_pages;
+                       total_pages += dimm->nr_pages;
+               }
        }
 
        return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
@@ -900,7 +906,7 @@ static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
  */
 int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
 {
-       int i;
+       int i, j;
        int err;
        struct csrow_info *csrow;
        struct kobject *kobj_mci = &mci->edac_mci_kobj;
@@ -934,10 +940,13 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
        /* Make directories for each CSROW object under the mc<id> kobject
         */
        for (i = 0; i < mci->nr_csrows; i++) {
+               int nr_pages = 0;
+
                csrow = &mci->csrows[i];
+               for (j = 0; j < csrow->nr_channels; j++)
+                       nr_pages += csrow->channels[j].dimm->nr_pages;
 
-               /* Only expose populated CSROWs */
-               if (csrow->nr_pages > 0) {
+               if (nr_pages > 0) {
                        err = edac_create_csrow_object(mci, csrow, i);
                        if (err) {
                                debugf1("%s() failure: create csrow %d obj\n",
@@ -949,12 +958,15 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
 
        return 0;
 
-       /* CSROW error: backout what has already been registered,  */
 fail1:
        for (i--; i >= 0; i--) {
-               if (csrow->nr_pages > 0) {
+               int nr_pages = 0;
+
+               csrow = &mci->csrows[i];
+               for (j = 0; j < csrow->nr_channels; j++)
+                       nr_pages += csrow->channels[j].dimm->nr_pages;
+               if (nr_pages > 0)
                        kobject_put(&mci->csrows[i].kobj);
-               }
        }
 
        /* remove the mci instance's attributes, if any */
@@ -973,14 +985,20 @@ fail0:
  */
 void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
 {
-       int i;
+       struct csrow_info *csrow;
+       int i, j;
 
        debugf0("%s()\n", __func__);
 
        /* remove all csrow kobjects */
        debugf4("%s()  unregister this mci kobj\n", __func__);
        for (i = 0; i < mci->nr_csrows; i++) {
-               if (mci->csrows[i].nr_pages > 0) {
+               int nr_pages = 0;
+
+               csrow = &mci->csrows[i];
+               for (j = 0; j < csrow->nr_channels; j++)
+                       nr_pages += csrow->channels[j].dimm->nr_pages;
+               if (nr_pages > 0) {
                        debugf0("%s()  unreg csrow-%d\n", __func__, i);
                        kobject_put(&mci->csrows[i].kobj);
                }
index 00f81b47a51ffd886e58c4aeaa70d8c0b896246f..0ea7d14cb930748e75aadbb18e48e4616fdc315e 100644 (file)
@@ -50,7 +50,7 @@ extern void edac_device_reset_delay_period(struct edac_device_ctl_info
                                           *edac_dev, unsigned long value);
 extern void edac_mc_reset_delay_period(int value);
 
-extern void *edac_align_ptr(void *ptr, unsigned size);
+extern void *edac_align_ptr(void **p, unsigned size, int n_elems);
 
 /*
  * EDAC PCI functions
index 63af1c5673d1bbc790d5a98228dd050fc9de36b5..f1ac866498864dfbfc8e73ad091860d790142a68 100644 (file)
@@ -42,13 +42,13 @@ struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
                                                const char *edac_pci_name)
 {
        struct edac_pci_ctl_info *pci;
-       void *pvt;
+       void *p = NULL, *pvt;
        unsigned int size;
 
        debugf1("%s()\n", __func__);
 
-       pci = (struct edac_pci_ctl_info *)0;
-       pvt = edac_align_ptr(&pci[1], sz_pvt);
+       pci = edac_align_ptr(&p, sizeof(*pci), 1);
+       pvt = edac_align_ptr(&p, 1, sz_pvt);
        size = ((unsigned long)pvt) + sz_pvt;
 
        /* Alloc the needed control struct memory */
index 277689a688413147b5271211dc1182a9fe44dffa..8ad1744faacd9559f6f4ab66eb1bf1b960fd4ee0 100644 (file)
@@ -245,7 +245,9 @@ static int i3000_process_error_info(struct mem_ctl_info *mci,
                return 1;
 
        if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
-               edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+               edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+                                    -1, -1, -1,
+                                    "UE overwrote CE", "", NULL);
                info->errsts = info->errsts2;
        }
 
@@ -256,10 +258,15 @@ static int i3000_process_error_info(struct mem_ctl_info *mci,
        row = edac_mc_find_csrow_by_page(mci, pfn);
 
        if (info->errsts & I3000_ERRSTS_UE)
-               edac_mc_handle_ue(mci, pfn, offset, row, "i3000 UE");
+               edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+                                    pfn, offset, 0,
+                                    row, -1, -1,
+                                    "i3000 UE", "", NULL);
        else
-               edac_mc_handle_ce(mci, pfn, offset, info->derrsyn, row,
-                               multi_chan ? channel : 0, "i3000 CE");
+               edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+                                    pfn, offset, info->derrsyn,
+                                    row, multi_chan ? channel : 0, -1,
+                                    "i3000 CE", "", NULL);
 
        return 1;
 }
@@ -304,9 +311,10 @@ static int i3000_is_interleaved(const unsigned char *c0dra,
 static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
 {
        int rc;
-       int i;
+       int i, j;
        struct mem_ctl_info *mci = NULL;
-       unsigned long last_cumul_size;
+       struct edac_mc_layer layers[2];
+       unsigned long last_cumul_size, nr_pages;
        int interleaved, nr_channels;
        unsigned char dra[I3000_RANKS / 2], drb[I3000_RANKS];
        unsigned char *c0dra = dra, *c1dra = &dra[I3000_RANKS_PER_CHANNEL / 2];
@@ -347,7 +355,14 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
         */
        interleaved = i3000_is_interleaved(c0dra, c1dra, c0drb, c1drb);
        nr_channels = interleaved ? 2 : 1;
-       mci = edac_mc_alloc(0, I3000_RANKS / nr_channels, nr_channels, 0);
+
+       layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+       layers[0].size = I3000_RANKS / nr_channels;
+       layers[0].is_virt_csrow = true;
+       layers[1].type = EDAC_MC_LAYER_CHANNEL;
+       layers[1].size = nr_channels;
+       layers[1].is_virt_csrow = false;
+       mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
        if (!mci)
                return -ENOMEM;
 
@@ -386,19 +401,23 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
                        cumul_size <<= 1;
                debugf3("MC: %s(): (%d) cumul_size 0x%x\n",
                        __func__, i, cumul_size);
-               if (cumul_size == last_cumul_size) {
-                       csrow->mtype = MEM_EMPTY;
+               if (cumul_size == last_cumul_size)
                        continue;
-               }
 
                csrow->first_page = last_cumul_size;
                csrow->last_page = cumul_size - 1;
-               csrow->nr_pages = cumul_size - last_cumul_size;
+               nr_pages = cumul_size - last_cumul_size;
                last_cumul_size = cumul_size;
-               csrow->grain = I3000_DEAP_GRAIN;
-               csrow->mtype = MEM_DDR2;
-               csrow->dtype = DEV_UNKNOWN;
-               csrow->edac_mode = EDAC_UNKNOWN;
+
+               for (j = 0; j < nr_channels; j++) {
+                       struct dimm_info *dimm = csrow->channels[j].dimm;
+
+                       dimm->nr_pages = nr_pages / nr_channels;
+                       dimm->grain = I3000_DEAP_GRAIN;
+                       dimm->mtype = MEM_DDR2;
+                       dimm->dtype = DEV_UNKNOWN;
+                       dimm->edac_mode = EDAC_UNKNOWN;
+               }
        }
 
        /*
index 046808c6357df00d39a120caa2a0089de2b59aca..bbe43ef718238c72d159affd4d2f80e22141f88f 100644 (file)
@@ -23,6 +23,7 @@
 
 #define PCI_DEVICE_ID_INTEL_3200_HB    0x29f0
 
+#define I3200_DIMMS            4
 #define I3200_RANKS            8
 #define I3200_RANKS_PER_CHANNEL        4
 #define I3200_CHANNELS         2
@@ -217,21 +218,25 @@ static void i3200_process_error_info(struct mem_ctl_info *mci,
                return;
 
        if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) {
-               edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+               edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+                                    -1, -1, -1, "UE overwrote CE", "", NULL);
                info->errsts = info->errsts2;
        }
 
        for (channel = 0; channel < nr_channels; channel++) {
                log = info->eccerrlog[channel];
                if (log & I3200_ECCERRLOG_UE) {
-                       edac_mc_handle_ue(mci, 0, 0,
-                               eccerrlog_row(channel, log),
-                               "i3200 UE");
+                       edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+                                            0, 0, 0,
+                                            eccerrlog_row(channel, log),
+                                            -1, -1,
+                                            "i3000 UE", "", NULL);
                } else if (log & I3200_ECCERRLOG_CE) {
-                       edac_mc_handle_ce(mci, 0, 0,
-                               eccerrlog_syndrome(log),
-                               eccerrlog_row(channel, log), 0,
-                               "i3200 CE");
+                       edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+                                            0, 0, eccerrlog_syndrome(log),
+                                            eccerrlog_row(channel, log),
+                                            -1, -1,
+                                            "i3000 UE", "", NULL);
                }
        }
 }
@@ -319,9 +324,9 @@ static unsigned long drb_to_nr_pages(
 static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
 {
        int rc;
-       int i;
+       int i, j;
        struct mem_ctl_info *mci = NULL;
-       unsigned long last_page;
+       struct edac_mc_layer layers[2];
        u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL];
        bool stacked;
        void __iomem *window;
@@ -336,8 +341,14 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
        i3200_get_drbs(window, drbs);
        nr_channels = how_many_channels(pdev);
 
-       mci = edac_mc_alloc(sizeof(struct i3200_priv), I3200_RANKS,
-               nr_channels, 0);
+       layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+       layers[0].size = I3200_DIMMS;
+       layers[0].is_virt_csrow = true;
+       layers[1].type = EDAC_MC_LAYER_CHANNEL;
+       layers[1].size = nr_channels;
+       layers[1].is_virt_csrow = false;
+       mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+                           sizeof(struct i3200_priv));
        if (!mci)
                return -ENOMEM;
 
@@ -366,7 +377,6 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
         * cumulative; the last one will contain the total memory
         * contained in all ranks.
         */
-       last_page = -1UL;
        for (i = 0; i < mci->nr_csrows; i++) {
                unsigned long nr_pages;
                struct csrow_info *csrow = &mci->csrows[i];
@@ -375,20 +385,18 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
                        i / I3200_RANKS_PER_CHANNEL,
                        i % I3200_RANKS_PER_CHANNEL);
 
-               if (nr_pages == 0) {
-                       csrow->mtype = MEM_EMPTY;
+               if (nr_pages == 0)
                        continue;
-               }
 
-               csrow->first_page = last_page + 1;
-               last_page += nr_pages;
-               csrow->last_page = last_page;
-               csrow->nr_pages = nr_pages;
+               for (j = 0; j < nr_channels; j++) {
+                       struct dimm_info *dimm = csrow->channels[j].dimm;
 
-               csrow->grain = nr_pages << PAGE_SHIFT;
-               csrow->mtype = MEM_DDR2;
-               csrow->dtype = DEV_UNKNOWN;
-               csrow->edac_mode = EDAC_UNKNOWN;
+                       dimm->nr_pages = nr_pages / nr_channels;
+                       dimm->grain = nr_pages << PAGE_SHIFT;
+                       dimm->mtype = MEM_DDR2;
+                       dimm->dtype = DEV_UNKNOWN;
+                       dimm->edac_mode = EDAC_UNKNOWN;
+               }
        }
 
        i3200_clear_error_info(mci);
index a2680d8e744b1f8de5239e50a1332535308ca26d..11ea835f155a840dc2ae3048ca67b904e1385801 100644 (file)
 #define MTR3           0x8C
 
 #define NUM_MTRS               4
-#define CHANNELS_PER_BRANCH    (2)
+#define CHANNELS_PER_BRANCH    2
+#define MAX_BRANCHES           2
 
 /* Defines to extract the vaious fields from the
  *     MTRx - Memory Technology Registers
@@ -473,7 +474,6 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
        char msg[EDAC_MC_LABEL_LEN + 1 + 160];
        char *specific = NULL;
        u32 allErrors;
-       int branch;
        int channel;
        int bank;
        int rank;
@@ -485,8 +485,7 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
        if (!allErrors)
                return;         /* if no error, return now */
 
-       branch = EXTRACT_FBDCHAN_INDX(info->ferr_fat_fbd);
-       channel = branch;
+       channel = EXTRACT_FBDCHAN_INDX(info->ferr_fat_fbd);
 
        /* Use the NON-Recoverable macros to extract data */
        bank = NREC_BANK(info->nrecmema);
@@ -495,9 +494,9 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
        ras = NREC_RAS(info->nrecmemb);
        cas = NREC_CAS(info->nrecmemb);
 
-       debugf0("\t\tCSROW= %d  Channels= %d,%d  (Branch= %d "
-               "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
-               rank, channel, channel + 1, branch >> 1, bank,
+       debugf0("\t\tCSROW= %d  Channel= %d "
+               "(DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
+               rank, channel, bank,
                rdwr ? "Write" : "Read", ras, cas);
 
        /* Only 1 bit will be on */
@@ -533,13 +532,14 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
 
        /* Form out message */
        snprintf(msg, sizeof(msg),
-                "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d CAS=%d "
-                "FATAL Err=0x%x (%s))",
-                branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas,
-                allErrors, specific);
+                "Bank=%d RAS=%d CAS=%d FATAL Err=0x%x (%s)",
+                bank, ras, cas, allErrors, specific);
 
        /* Call the helper to output message */
-       edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
+       edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 0, 0, 0,
+                            channel >> 1, channel & 1, rank,
+                            rdwr ? "Write error" : "Read error",
+                            msg, NULL);
 }
 
 /*
@@ -633,13 +633,14 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
 
                /* Form out message */
                snprintf(msg, sizeof(msg),
-                        "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d "
-                        "CAS=%d, UE Err=0x%x (%s))",
-                        branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas,
-                        ue_errors, specific);
+                        "Rank=%d Bank=%d RAS=%d CAS=%d, UE Err=0x%x (%s)",
+                        rank, bank, ras, cas, ue_errors, specific);
 
                /* Call the helper to output message */
-               edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
+               edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+                               channel >> 1, -1, rank,
+                               rdwr ? "Write error" : "Read error",
+                               msg, NULL);
        }
 
        /* Check correctable errors */
@@ -685,13 +686,16 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
 
                /* Form out message */
                snprintf(msg, sizeof(msg),
-                        "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d "
+                        "Rank=%d Bank=%d RDWR=%s RAS=%d "
                         "CAS=%d, CE Err=0x%x (%s))", branch >> 1, bank,
                         rdwr ? "Write" : "Read", ras, cas, ce_errors,
                         specific);
 
                /* Call the helper to output message */
-               edac_mc_handle_fbd_ce(mci, rank, channel, msg);
+               edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
+                               channel >> 1, channel % 2, rank,
+                               rdwr ? "Write error" : "Read error",
+                               msg, NULL);
        }
 
        if (!misc_messages)
@@ -731,11 +735,12 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
 
                /* Form out message */
                snprintf(msg, sizeof(msg),
-                        "(Branch=%d Err=%#x (%s))", branch >> 1,
-                        misc_errors, specific);
+                        "Err=%#x (%s)", misc_errors, specific);
 
                /* Call the helper to output message */
-               edac_mc_handle_fbd_ce(mci, 0, 0, msg);
+               edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
+                               branch >> 1, -1, -1,
+                               "Misc error", msg, NULL);
        }
 }
 
@@ -956,14 +961,14 @@ static int determine_amb_present_reg(struct i5000_pvt *pvt, int channel)
  *
  *     return the proper MTR register as determine by the csrow and channel desired
  */
-static int determine_mtr(struct i5000_pvt *pvt, int csrow, int channel)
+static int determine_mtr(struct i5000_pvt *pvt, int slot, int channel)
 {
        int mtr;
 
        if (channel < CHANNELS_PER_BRANCH)
-               mtr = pvt->b0_mtr[csrow >> 1];
+               mtr = pvt->b0_mtr[slot];
        else
-               mtr = pvt->b1_mtr[csrow >> 1];
+               mtr = pvt->b1_mtr[slot];
 
        return mtr;
 }
@@ -988,37 +993,34 @@ static void decode_mtr(int slot_row, u16 mtr)
        debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
 }
 
-static void handle_channel(struct i5000_pvt *pvt, int csrow, int channel,
+static void handle_channel(struct i5000_pvt *pvt, int slot, int channel,
                        struct i5000_dimm_info *dinfo)
 {
        int mtr;
        int amb_present_reg;
        int addrBits;
 
-       mtr = determine_mtr(pvt, csrow, channel);
+       mtr = determine_mtr(pvt, slot, channel);
        if (MTR_DIMMS_PRESENT(mtr)) {
                amb_present_reg = determine_amb_present_reg(pvt, channel);
 
-               /* Determine if there is  a  DIMM present in this DIMM slot */
-               if (amb_present_reg & (1 << (csrow >> 1))) {
+               /* Determine if there is a DIMM present in this DIMM slot */
+               if (amb_present_reg) {
                        dinfo->dual_rank = MTR_DIMM_RANK(mtr);
 
-                       if (!((dinfo->dual_rank == 0) &&
-                               ((csrow & 0x1) == 0x1))) {
-                               /* Start with the number of bits for a Bank
-                                * on the DRAM */
-                               addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
-                               /* Add thenumber of ROW bits */
-                               addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
-                               /* add the number of COLUMN bits */
-                               addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
-
-                               addrBits += 6;  /* add 64 bits per DIMM */
-                               addrBits -= 20; /* divide by 2^^20 */
-                               addrBits -= 3;  /* 8 bits per bytes */
-
-                               dinfo->megabytes = 1 << addrBits;
-                       }
+                       /* Start with the number of bits for a Bank
+                               * on the DRAM */
+                       addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
+                       /* Add the number of ROW bits */
+                       addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
+                       /* add the number of COLUMN bits */
+                       addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
+
+                       addrBits += 6;  /* add 64 bits per DIMM */
+                       addrBits -= 20; /* divide by 2^^20 */
+                       addrBits -= 3;  /* 8 bits per bytes */
+
+                       dinfo->megabytes = 1 << addrBits;
                }
        }
 }
@@ -1032,10 +1034,9 @@ static void handle_channel(struct i5000_pvt *pvt, int csrow, int channel,
 static void calculate_dimm_size(struct i5000_pvt *pvt)
 {
        struct i5000_dimm_info *dinfo;
-       int csrow, max_csrows;
+       int slot, channel, branch;
        char *p, *mem_buffer;
        int space, n;
-       int channel;
 
        /* ================= Generate some debug output ================= */
        space = PAGE_SIZE;
@@ -1046,22 +1047,17 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
                return;
        }
 
-       n = snprintf(p, space, "\n");
-       p += n;
-       space -= n;
-
-       /* Scan all the actual CSROWS (which is # of DIMMS * 2)
+       /* Scan all the actual slots
         * and calculate the information for each DIMM
-        * Start with the highest csrow first, to display it first
-        * and work toward the 0th csrow
+        * Start with the highest slot first, to display it first
+        * and work toward the 0th slot
         */
-       max_csrows = pvt->maxdimmperch * 2;
-       for (csrow = max_csrows - 1; csrow >= 0; csrow--) {
+       for (slot = pvt->maxdimmperch - 1; slot >= 0; slot--) {
 
-               /* on an odd csrow, first output a 'boundary' marker,
+               /* on an odd slot, first output a 'boundary' marker,
                 * then reset the message buffer  */
-               if (csrow & 0x1) {
-                       n = snprintf(p, space, "---------------------------"
+               if (slot & 0x1) {
+                       n = snprintf(p, space, "--------------------------"
                                "--------------------------------");
                        p += n;
                        space -= n;
@@ -1069,30 +1065,39 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
                        p = mem_buffer;
                        space = PAGE_SIZE;
                }
-               n = snprintf(p, space, "csrow %2d    ", csrow);
+               n = snprintf(p, space, "slot %2d    ", slot);
                p += n;
                space -= n;
 
                for (channel = 0; channel < pvt->maxch; channel++) {
-                       dinfo = &pvt->dimm_info[csrow][channel];
-                       handle_channel(pvt, csrow, channel, dinfo);
-                       n = snprintf(p, space, "%4d MB   | ", dinfo->megabytes);
+                       dinfo = &pvt->dimm_info[slot][channel];
+                       handle_channel(pvt, slot, channel, dinfo);
+                       if (dinfo->megabytes)
+                               n = snprintf(p, space, "%4d MB %dR| ",
+                                            dinfo->megabytes, dinfo->dual_rank + 1);
+                       else
+                               n = snprintf(p, space, "%4d MB   | ", 0);
                        p += n;
                        space -= n;
                }
-               n = snprintf(p, space, "\n");
                p += n;
                space -= n;
+               debugf2("%s\n", mem_buffer);
+               p = mem_buffer;
+               space = PAGE_SIZE;
        }
 
        /* Output the last bottom 'boundary' marker */
-       n = snprintf(p, space, "---------------------------"
-               "--------------------------------\n");
+       n = snprintf(p, space, "--------------------------"
+               "--------------------------------");
        p += n;
        space -= n;
+       debugf2("%s\n", mem_buffer);
+       p = mem_buffer;
+       space = PAGE_SIZE;
 
        /* now output the 'channel' labels */
-       n = snprintf(p, space, "            ");
+       n = snprintf(p, space, "           ");
        p += n;
        space -= n;
        for (channel = 0; channel < pvt->maxch; channel++) {
@@ -1100,9 +1105,17 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
                p += n;
                space -= n;
        }
-       n = snprintf(p, space, "\n");
+       debugf2("%s\n", mem_buffer);
+       p = mem_buffer;
+       space = PAGE_SIZE;
+
+       n = snprintf(p, space, "           ");
        p += n;
-       space -= n;
+       for (branch = 0; branch < MAX_BRANCHES; branch++) {
+               n = snprintf(p, space, "       branch %d       | ", branch);
+               p += n;
+               space -= n;
+       }
 
        /* output the last message and free buffer */
        debugf2("%s\n", mem_buffer);
@@ -1235,13 +1248,13 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci)
 static int i5000_init_csrows(struct mem_ctl_info *mci)
 {
        struct i5000_pvt *pvt;
-       struct csrow_info *p_csrow;
+       struct dimm_info *dimm;
        int empty, channel_count;
        int max_csrows;
-       int mtr, mtr1;
+       int mtr;
        int csrow_megs;
        int channel;
-       int csrow;
+       int slot;
 
        pvt = mci->pvt_info;
 
@@ -1250,43 +1263,40 @@ static int i5000_init_csrows(struct mem_ctl_info *mci)
 
        empty = 1;              /* Assume NO memory */
 
-       for (csrow = 0; csrow < max_csrows; csrow++) {
-               p_csrow = &mci->csrows[csrow];
-
-               p_csrow->csrow_idx = csrow;
-
-               /* use branch 0 for the basis */
-               mtr = pvt->b0_mtr[csrow >> 1];
-               mtr1 = pvt->b1_mtr[csrow >> 1];
-
-               /* if no DIMMS on this row, continue */
-               if (!MTR_DIMMS_PRESENT(mtr) && !MTR_DIMMS_PRESENT(mtr1))
-                       continue;
+       /*
+        * FIXME: The memory layout used to map slot/channel into the
+        * real memory architecture is weird: branch+slot are "csrows"
+        * and channel is channel. That required an extra array (dimm_info)
+        * to map the dimms. A good cleanup would be to remove this array,
+        * and do a loop here with branch, channel, slot
+        */
+       for (slot = 0; slot < max_csrows; slot++) {
+               for (channel = 0; channel < pvt->maxch; channel++) {
 
-               /* FAKE OUT VALUES, FIXME */
-               p_csrow->first_page = 0 + csrow * 20;
-               p_csrow->last_page = 9 + csrow * 20;
-               p_csrow->page_mask = 0xFFF;
+                       mtr = determine_mtr(pvt, slot, channel);
 
-               p_csrow->grain = 8;
+                       if (!MTR_DIMMS_PRESENT(mtr))
+                               continue;
 
-               csrow_megs = 0;
-               for (channel = 0; channel < pvt->maxch; channel++) {
-                       csrow_megs += pvt->dimm_info[csrow][channel].megabytes;
-               }
+                       dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+                                      channel / MAX_BRANCHES,
+                                      channel % MAX_BRANCHES, slot);
 
-               p_csrow->nr_pages = csrow_megs << 8;
+                       csrow_megs = pvt->dimm_info[slot][channel].megabytes;
+                       dimm->grain = 8;
 
-               /* Assume DDR2 for now */
-               p_csrow->mtype = MEM_FB_DDR2;
+                       /* Assume DDR2 for now */
+                       dimm->mtype = MEM_FB_DDR2;
 
-               /* ask what device type on this row */
-               if (MTR_DRAM_WIDTH(mtr))
-                       p_csrow->dtype = DEV_X8;
-               else
-                       p_csrow->dtype = DEV_X4;
+                       /* ask what device type on this row */
+                       if (MTR_DRAM_WIDTH(mtr))
+                               dimm->dtype = DEV_X8;
+                       else
+                               dimm->dtype = DEV_X4;
 
-               p_csrow->edac_mode = EDAC_S8ECD8ED;
+                       dimm->edac_mode = EDAC_S8ECD8ED;
+                       dimm->nr_pages = csrow_megs << 8;
+               }
 
                empty = 0;
        }
@@ -1317,7 +1327,7 @@ static void i5000_enable_error_reporting(struct mem_ctl_info *mci)
 }
 
 /*
- * i5000_get_dimm_and_channel_counts(pdev, &num_csrows, &num_channels)
+ * i5000_get_dimm_and_channel_counts(pdev, &nr_csrows, &num_channels)
  *
  *     ask the device how many channels are present and how many CSROWS
  *      as well
@@ -1332,7 +1342,7 @@ static void i5000_get_dimm_and_channel_counts(struct pci_dev *pdev,
         * supported on this memory controller
         */
        pci_read_config_byte(pdev, MAXDIMMPERCH, &value);
-       *num_dimms_per_channel = (int)value *2;
+       *num_dimms_per_channel = (int)value;
 
        pci_read_config_byte(pdev, MAXCH, &value);
        *num_channels = (int)value;
@@ -1348,10 +1358,10 @@ static void i5000_get_dimm_and_channel_counts(struct pci_dev *pdev,
 static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
 {
        struct mem_ctl_info *mci;
+       struct edac_mc_layer layers[3];
        struct i5000_pvt *pvt;
        int num_channels;
        int num_dimms_per_channel;
-       int num_csrows;
 
        debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
                __FILE__, __func__,
@@ -1377,14 +1387,22 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
         */
        i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel,
                                        &num_channels);
-       num_csrows = num_dimms_per_channel * 2;
 
-       debugf0("MC: %s(): Number of - Channels= %d  DIMMS= %d  CSROWS= %d\n",
-               __func__, num_channels, num_dimms_per_channel, num_csrows);
+       debugf0("MC: %s(): Number of Branches=2 Channels= %d  DIMMS= %d\n",
+               __func__, num_channels, num_dimms_per_channel);
 
        /* allocate a new MC control structure */
-       mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
 
+       layers[0].type = EDAC_MC_LAYER_BRANCH;
+       layers[0].size = MAX_BRANCHES;
+       layers[0].is_virt_csrow = false;
+       layers[1].type = EDAC_MC_LAYER_CHANNEL;
+       layers[1].size = num_channels / MAX_BRANCHES;
+       layers[1].is_virt_csrow = false;
+       layers[2].type = EDAC_MC_LAYER_SLOT;
+       layers[2].size = num_dimms_per_channel;
+       layers[2].is_virt_csrow = true;
+       mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
        if (mci == NULL)
                return -ENOMEM;
 
index d500749464ea6038e147f25404586c1289d28b1d..e9e7c2a29dc389d9462f50e5b1d17367b9a7d194 100644 (file)
  * rows for each respective channel are laid out one after another,
  * the first half belonging to channel 0, the second half belonging
  * to channel 1.
+ *
+ * This driver is for DDR2 DIMMs, and it uses chip select to select among the
+ * several ranks. However, instead of showing memories as ranks, it outputs
+ * them as DIMM's. An internal table creates the association between ranks
+ * and DIMM's.
  */
 #include <linux/module.h>
 #include <linux/init.h>
@@ -410,14 +415,6 @@ static int i5100_csrow_to_chan(const struct mem_ctl_info *mci, int csrow)
        return csrow / priv->ranksperchan;
 }
 
-static unsigned i5100_rank_to_csrow(const struct mem_ctl_info *mci,
-                                   int chan, int rank)
-{
-       const struct i5100_priv *priv = mci->pvt_info;
-
-       return chan * priv->ranksperchan + rank;
-}
-
 static void i5100_handle_ce(struct mem_ctl_info *mci,
                            int chan,
                            unsigned bank,
@@ -427,17 +424,17 @@ static void i5100_handle_ce(struct mem_ctl_info *mci,
                            unsigned ras,
                            const char *msg)
 {
-       const int csrow = i5100_rank_to_csrow(mci, chan, rank);
+       char detail[80];
 
-       printk(KERN_ERR
-               "CE chan %d, bank %u, rank %u, syndrome 0x%lx, "
-               "cas %u, ras %u, csrow %u, label \"%s\": %s\n",
-               chan, bank, rank, syndrome, cas, ras,
-               csrow, mci->csrows[csrow].channels[0].label, msg);
+       /* Form out message */
+       snprintf(detail, sizeof(detail),
+                "bank %u, cas %u, ras %u\n",
+                bank, cas, ras);
 
-       mci->ce_count++;
-       mci->csrows[csrow].ce_count++;
-       mci->csrows[csrow].channels[0].ce_count++;
+       edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+                            0, 0, syndrome,
+                            chan, rank, -1,
+                            msg, detail, NULL);
 }
 
 static void i5100_handle_ue(struct mem_ctl_info *mci,
@@ -449,16 +446,17 @@ static void i5100_handle_ue(struct mem_ctl_info *mci,
                            unsigned ras,
                            const char *msg)
 {
-       const int csrow = i5100_rank_to_csrow(mci, chan, rank);
+       char detail[80];
 
-       printk(KERN_ERR
-               "UE chan %d, bank %u, rank %u, syndrome 0x%lx, "
-               "cas %u, ras %u, csrow %u, label \"%s\": %s\n",
-               chan, bank, rank, syndrome, cas, ras,
-               csrow, mci->csrows[csrow].channels[0].label, msg);
+       /* Form out message */
+       snprintf(detail, sizeof(detail),
+                "bank %u, cas %u, ras %u\n",
+                bank, cas, ras);
 
-       mci->ue_count++;
-       mci->csrows[csrow].ue_count++;
+       edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+                            0, 0, syndrome,
+                            chan, rank, -1,
+                            msg, detail, NULL);
 }
 
 static void i5100_read_log(struct mem_ctl_info *mci, int chan,
@@ -835,10 +833,10 @@ static void __devinit i5100_init_interleaving(struct pci_dev *pdev,
 static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
 {
        int i;
-       unsigned long total_pages = 0UL;
        struct i5100_priv *priv = mci->pvt_info;
 
-       for (i = 0; i < mci->nr_csrows; i++) {
+       for (i = 0; i < mci->tot_dimms; i++) {
+               struct dimm_info *dimm;
                const unsigned long npages = i5100_npages(mci, i);
                const unsigned chan = i5100_csrow_to_chan(mci, i);
                const unsigned rank = i5100_csrow_to_rank(mci, i);
@@ -846,33 +844,23 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
                if (!npages)
                        continue;
 
-               /*
-                * FIXME: these two are totally bogus -- I don't see how to
-                * map them correctly to this structure...
-                */
-               mci->csrows[i].first_page = total_pages;
-               mci->csrows[i].last_page = total_pages + npages - 1;
-               mci->csrows[i].page_mask = 0UL;
-
-               mci->csrows[i].nr_pages = npages;
-               mci->csrows[i].grain = 32;
-               mci->csrows[i].csrow_idx = i;
-               mci->csrows[i].dtype =
-                       (priv->mtr[chan][rank].width == 4) ? DEV_X4 : DEV_X8;
-               mci->csrows[i].ue_count = 0;
-               mci->csrows[i].ce_count = 0;
-               mci->csrows[i].mtype = MEM_RDDR2;
-               mci->csrows[i].edac_mode = EDAC_SECDED;
-               mci->csrows[i].mci = mci;
-               mci->csrows[i].nr_channels = 1;
-               mci->csrows[i].channels[0].chan_idx = 0;
-               mci->csrows[i].channels[0].ce_count = 0;
-               mci->csrows[i].channels[0].csrow = mci->csrows + i;
-               snprintf(mci->csrows[i].channels[0].label,
-                        sizeof(mci->csrows[i].channels[0].label),
-                        "DIMM%u", i5100_rank_to_slot(mci, chan, rank));
-
-               total_pages += npages;
+               dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+                              chan, rank, 0);
+
+               dimm->nr_pages = npages;
+               if (npages) {
+                       dimm->grain = 32;
+                       dimm->dtype = (priv->mtr[chan][rank].width == 4) ?
+                                       DEV_X4 : DEV_X8;
+                       dimm->mtype = MEM_RDDR2;
+                       dimm->edac_mode = EDAC_SECDED;
+                       snprintf(dimm->label, sizeof(dimm->label),
+                               "DIMM%u",
+                               i5100_rank_to_slot(mci, chan, rank));
+               }
+
+               debugf2("dimm channel %d, rank %d, size %ld\n",
+                       chan, rank, (long)PAGES_TO_MiB(npages));
        }
 }
 
@@ -881,6 +869,7 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
 {
        int rc;
        struct mem_ctl_info *mci;
+       struct edac_mc_layer layers[2];
        struct i5100_priv *priv;
        struct pci_dev *ch0mm, *ch1mm;
        int ret = 0;
@@ -941,7 +930,14 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
                goto bail_ch1;
        }
 
-       mci = edac_mc_alloc(sizeof(*priv), ranksperch * 2, 1, 0);
+       layers[0].type = EDAC_MC_LAYER_CHANNEL;
+       layers[0].size = 2;
+       layers[0].is_virt_csrow = false;
+       layers[1].type = EDAC_MC_LAYER_SLOT;
+       layers[1].size = ranksperch;
+       layers[1].is_virt_csrow = true;
+       mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+                           sizeof(*priv));
        if (!mci) {
                ret = -ENOMEM;
                goto bail_disable_ch1;
index 1869a1018fb5215b51e80ce53c274ae5c1190566..6640c29e1885a814d8f7e69318541d1afd666037 100644 (file)
  * Intel 5400 Chipset Memory Controller Hub (MCH) - Datasheet
  *     http://developer.intel.com/design/chipsets/datashts/313070.htm
  *
+ * This Memory Controller manages DDR2 FB-DIMMs. It has 2 branches, each with
+ * 2 channels operating in lockstep no-mirror mode. Each channel can have up to
+ * 4 dimm's, each with up to 8GB.
+ *
  */
 
 #include <linux/module.h>
        edac_mc_chipset_printk(mci, level, "i5400", fmt, ##arg)
 
 /* Limits for i5400 */
-#define NUM_MTRS_PER_BRANCH    4
+#define MAX_BRANCHES           2
 #define CHANNELS_PER_BRANCH    2
-#define MAX_DIMMS_PER_CHANNEL  NUM_MTRS_PER_BRANCH
-#define        MAX_CHANNELS            4
-/* max possible csrows per channel */
-#define MAX_CSROWS             (MAX_DIMMS_PER_CHANNEL)
+#define DIMMS_PER_CHANNEL      4
+#define        MAX_CHANNELS            (MAX_BRANCHES * CHANNELS_PER_BRANCH)
 
 /* Device 16,
  * Function 0: System Address
@@ -347,16 +349,16 @@ struct i5400_pvt {
 
        u16 mir0, mir1;
 
-       u16 b0_mtr[NUM_MTRS_PER_BRANCH];        /* Memory Technlogy Reg */
+       u16 b0_mtr[DIMMS_PER_CHANNEL];  /* Memory Technlogy Reg */
        u16 b0_ambpresent0;                     /* Branch 0, Channel 0 */
        u16 b0_ambpresent1;                     /* Brnach 0, Channel 1 */
 
-       u16 b1_mtr[NUM_MTRS_PER_BRANCH];        /* Memory Technlogy Reg */
+       u16 b1_mtr[DIMMS_PER_CHANNEL];  /* Memory Technlogy Reg */
        u16 b1_ambpresent0;                     /* Branch 1, Channel 8 */
        u16 b1_ambpresent1;                     /* Branch 1, Channel 1 */
 
        /* DIMM information matrix, allocating architecture maximums */
-       struct i5400_dimm_info dimm_info[MAX_CSROWS][MAX_CHANNELS];
+       struct i5400_dimm_info dimm_info[DIMMS_PER_CHANNEL][MAX_CHANNELS];
 
        /* Actual values for this controller */
        int maxch;                              /* Max channels */
@@ -532,13 +534,15 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
        int ras, cas;
        int errnum;
        char *type = NULL;
+       enum hw_event_mc_err_type tp_event = HW_EVENT_ERR_UNCORRECTED;
 
        if (!allErrors)
                return;         /* if no error, return now */
 
-       if (allErrors &  ERROR_FAT_MASK)
+       if (allErrors &  ERROR_FAT_MASK) {
                type = "FATAL";
-       else if (allErrors & FERR_NF_UNCORRECTABLE)
+               tp_event = HW_EVENT_ERR_FATAL;
+       } else if (allErrors & FERR_NF_UNCORRECTABLE)
                type = "NON-FATAL uncorrected";
        else
                type = "NON-FATAL recoverable";
@@ -556,7 +560,7 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
        ras = nrec_ras(info);
        cas = nrec_cas(info);
 
-       debugf0("\t\tCSROW= %d  Channels= %d,%d  (Branch= %d "
+       debugf0("\t\tDIMM= %d  Channels= %d,%d  (Branch= %d "
                "DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n",
                rank, channel, channel + 1, branch >> 1, bank,
                buf_id, rdwr_str(rdwr), ras, cas);
@@ -566,13 +570,13 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
 
        /* Form out message */
        snprintf(msg, sizeof(msg),
-                "%s (Branch=%d DRAM-Bank=%d Buffer ID = %d RDWR=%s "
-                "RAS=%d CAS=%d %s Err=0x%lx (%s))",
-                type, branch >> 1, bank, buf_id, rdwr_str(rdwr), ras, cas,
-                type, allErrors, error_name[errnum]);
+                "Bank=%d Buffer ID = %d RAS=%d CAS=%d Err=0x%lx (%s)",
+                bank, buf_id, ras, cas, allErrors, error_name[errnum]);
 
-       /* Call the helper to output message */
-       edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
+       edac_mc_handle_error(tp_event, mci, 0, 0, 0,
+                            branch >> 1, -1, rank,
+                            rdwr ? "Write error" : "Read error",
+                            msg, NULL);
 }
 
 /*
@@ -630,7 +634,7 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
                /* Only 1 bit will be on */
                errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
 
-               debugf0("\t\tCSROW= %d Channel= %d  (Branch %d "
+               debugf0("\t\tDIMM= %d Channel= %d  (Branch %d "
                        "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
                        rank, channel, branch >> 1, bank,
                        rdwr_str(rdwr), ras, cas);
@@ -642,8 +646,10 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
                         branch >> 1, bank, rdwr_str(rdwr), ras, cas,
                         allErrors, error_name[errnum]);
 
-               /* Call the helper to output message */
-               edac_mc_handle_fbd_ce(mci, rank, channel, msg);
+               edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
+                                    branch >> 1, channel % 2, rank,
+                                    rdwr ? "Write error" : "Read error",
+                                    msg, NULL);
 
                return;
        }
@@ -831,8 +837,8 @@ static int i5400_get_devices(struct mem_ctl_info *mci, int dev_idx)
 /*
  *     determine_amb_present
  *
- *             the information is contained in NUM_MTRS_PER_BRANCH different
- *             registers determining which of the NUM_MTRS_PER_BRANCH requires
+ *             the information is contained in DIMMS_PER_CHANNEL different
+ *             registers determining which of the DIMMS_PER_CHANNEL requires
  *              knowing which channel is in question
  *
  *     2 branches, each with 2 channels
@@ -861,11 +867,11 @@ static int determine_amb_present_reg(struct i5400_pvt *pvt, int channel)
 }
 
 /*
- * determine_mtr(pvt, csrow, channel)
+ * determine_mtr(pvt, dimm, channel)
  *
- * return the proper MTR register as determine by the csrow and desired channel
+ * return the proper MTR register as determine by the dimm and desired channel
  */
-static int determine_mtr(struct i5400_pvt *pvt, int csrow, int channel)
+static int determine_mtr(struct i5400_pvt *pvt, int dimm, int channel)
 {
        int mtr;
        int n;
@@ -873,11 +879,11 @@ static int determine_mtr(struct i5400_pvt *pvt, int csrow, int channel)
        /* There is one MTR for each slot pair of FB-DIMMs,
           Each slot pair may be at branch 0 or branch 1.
         */
-       n = csrow;
+       n = dimm;
 
-       if (n >= NUM_MTRS_PER_BRANCH) {
-               debugf0("ERROR: trying to access an invalid csrow: %d\n",
-                       csrow);
+       if (n >= DIMMS_PER_CHANNEL) {
+               debugf0("ERROR: trying to access an invalid dimm: %d\n",
+                       dimm);
                return 0;
        }
 
@@ -913,19 +919,19 @@ static void decode_mtr(int slot_row, u16 mtr)
        debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
 }
 
-static void handle_channel(struct i5400_pvt *pvt, int csrow, int channel,
+static void handle_channel(struct i5400_pvt *pvt, int dimm, int channel,
                        struct i5400_dimm_info *dinfo)
 {
        int mtr;
        int amb_present_reg;
        int addrBits;
 
-       mtr = determine_mtr(pvt, csrow, channel);
+       mtr = determine_mtr(pvt, dimm, channel);
        if (MTR_DIMMS_PRESENT(mtr)) {
                amb_present_reg = determine_amb_present_reg(pvt, channel);
 
                /* Determine if there is a DIMM present in this DIMM slot */
-               if (amb_present_reg & (1 << csrow)) {
+               if (amb_present_reg & (1 << dimm)) {
                        /* Start with the number of bits for a Bank
                         * on the DRAM */
                        addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
@@ -954,10 +960,10 @@ static void handle_channel(struct i5400_pvt *pvt, int csrow, int channel,
 static void calculate_dimm_size(struct i5400_pvt *pvt)
 {
        struct i5400_dimm_info *dinfo;
-       int csrow, max_csrows;
+       int dimm, max_dimms;
        char *p, *mem_buffer;
        int space, n;
-       int channel;
+       int channel, branch;
 
        /* ================= Generate some debug output ================= */
        space = PAGE_SIZE;
@@ -968,32 +974,32 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
                return;
        }
 
-       /* Scan all the actual CSROWS
+       /* Scan all the actual DIMMS
         * and calculate the information for each DIMM
-        * Start with the highest csrow first, to display it first
-        * and work toward the 0th csrow
+        * Start with the highest dimm first, to display it first
+        * and work toward the 0th dimm
         */
-       max_csrows = pvt->maxdimmperch;
-       for (csrow = max_csrows - 1; csrow >= 0; csrow--) {
+       max_dimms = pvt->maxdimmperch;
+       for (dimm = max_dimms - 1; dimm >= 0; dimm--) {
 
-               /* on an odd csrow, first output a 'boundary' marker,
+               /* on an odd dimm, first output a 'boundary' marker,
                 * then reset the message buffer  */
-               if (csrow & 0x1) {
+               if (dimm & 0x1) {
                        n = snprintf(p, space, "---------------------------"
-                                       "--------------------------------");
+                                       "-------------------------------");
                        p += n;
                        space -= n;
                        debugf2("%s\n", mem_buffer);
                        p = mem_buffer;
                        space = PAGE_SIZE;
                }
-               n = snprintf(p, space, "csrow %2d    ", csrow);
+               n = snprintf(p, space, "dimm %2d    ", dimm);
                p += n;
                space -= n;
 
                for (channel = 0; channel < pvt->maxch; channel++) {
-                       dinfo = &pvt->dimm_info[csrow][channel];
-                       handle_channel(pvt, csrow, channel, dinfo);
+                       dinfo = &pvt->dimm_info[dimm][channel];
+                       handle_channel(pvt, dimm, channel, dinfo);
                        n = snprintf(p, space, "%4d MB   | ", dinfo->megabytes);
                        p += n;
                        space -= n;
@@ -1005,7 +1011,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
 
        /* Output the last bottom 'boundary' marker */
        n = snprintf(p, space, "---------------------------"
-                       "--------------------------------");
+                       "-------------------------------");
        p += n;
        space -= n;
        debugf2("%s\n", mem_buffer);
@@ -1013,7 +1019,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
        space = PAGE_SIZE;
 
        /* now output the 'channel' labels */
-       n = snprintf(p, space, "            ");
+       n = snprintf(p, space, "           ");
        p += n;
        space -= n;
        for (channel = 0; channel < pvt->maxch; channel++) {
@@ -1022,6 +1028,19 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
                space -= n;
        }
 
+       space -= n;
+       debugf2("%s\n", mem_buffer);
+       p = mem_buffer;
+       space = PAGE_SIZE;
+
+       n = snprintf(p, space, "           ");
+       p += n;
+       for (branch = 0; branch < MAX_BRANCHES; branch++) {
+               n = snprintf(p, space, "       branch %d       | ", branch);
+               p += n;
+               space -= n;
+       }
+
        /* output the last message and free buffer */
        debugf2("%s\n", mem_buffer);
        kfree(mem_buffer);
@@ -1080,7 +1099,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
        debugf2("MIR1: limit= 0x%x  WAY1= %u  WAY0= %x\n", limit, way1, way0);
 
        /* Get the set of MTR[0-3] regs by each branch */
-       for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) {
+       for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) {
                int where = MTR0 + (slot_row * sizeof(u16));
 
                /* Branch 0 set of MTR registers */
@@ -1105,7 +1124,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
        /* Read and dump branch 0's MTRs */
        debugf2("\nMemory Technology Registers:\n");
        debugf2("   Branch 0:\n");
-       for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++)
+       for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
                decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
 
        pci_read_config_word(pvt->branch_0, AMBPRESENT_0,
@@ -1122,7 +1141,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
        } else {
                /* Read and dump  branch 1's MTRs */
                debugf2("   Branch 1:\n");
-               for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++)
+               for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
                        decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
 
                pci_read_config_word(pvt->branch_1, AMBPRESENT_0,
@@ -1141,7 +1160,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
 }
 
 /*
- *     i5400_init_csrows       Initialize the 'csrows' table within
+ *     i5400_init_dimms        Initialize the 'dimms' table within
  *                             the mci control structure with the
  *                             addressing of memory.
  *
@@ -1149,64 +1168,68 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
  *             0       success
  *             1       no actual memory found on this MC
  */
-static int i5400_init_csrows(struct mem_ctl_info *mci)
+static int i5400_init_dimms(struct mem_ctl_info *mci)
 {
        struct i5400_pvt *pvt;
-       struct csrow_info *p_csrow;
-       int empty, channel_count;
-       int max_csrows;
+       struct dimm_info *dimm;
+       int ndimms, channel_count;
+       int max_dimms;
        int mtr;
-       int csrow_megs;
-       int channel;
-       int csrow;
+       int size_mb;
+       int  channel, slot;
 
        pvt = mci->pvt_info;
 
        channel_count = pvt->maxch;
-       max_csrows = pvt->maxdimmperch;
+       max_dimms = pvt->maxdimmperch;
 
-       empty = 1;              /* Assume NO memory */
+       ndimms = 0;
 
-       for (csrow = 0; csrow < max_csrows; csrow++) {
-               p_csrow = &mci->csrows[csrow];
-
-               p_csrow->csrow_idx = csrow;
-
-               /* use branch 0 for the basis */
-               mtr = determine_mtr(pvt, csrow, 0);
-
-               /* if no DIMMS on this row, continue */
-               if (!MTR_DIMMS_PRESENT(mtr))
-                       continue;
-
-               /* FAKE OUT VALUES, FIXME */
-               p_csrow->first_page = 0 + csrow * 20;
-               p_csrow->last_page = 9 + csrow * 20;
-               p_csrow->page_mask = 0xFFF;
-
-               p_csrow->grain = 8;
-
-               csrow_megs = 0;
-               for (channel = 0; channel < pvt->maxch; channel++)
-                       csrow_megs += pvt->dimm_info[csrow][channel].megabytes;
-
-               p_csrow->nr_pages = csrow_megs << 8;
-
-               /* Assume DDR2 for now */
-               p_csrow->mtype = MEM_FB_DDR2;
-
-               /* ask what device type on this row */
-               if (MTR_DRAM_WIDTH(mtr))
-                       p_csrow->dtype = DEV_X8;
-               else
-                       p_csrow->dtype = DEV_X4;
-
-               p_csrow->edac_mode = EDAC_S8ECD8ED;
-
-               empty = 0;
+       /*
+        * FIXME: remove  pvt->dimm_info[slot][channel] and use the 3
+        * layers here.
+        */
+       for (channel = 0; channel < mci->layers[0].size * mci->layers[1].size;
+            channel++) {
+               for (slot = 0; slot < mci->layers[2].size; slot++) {
+                       mtr = determine_mtr(pvt, slot, channel);
+
+                       /* if no DIMMS on this slot, continue */
+                       if (!MTR_DIMMS_PRESENT(mtr))
+                               continue;
+
+                       dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+                                      channel / 2, channel % 2, slot);
+
+                       size_mb =  pvt->dimm_info[slot][channel].megabytes;
+
+                       debugf2("%s: dimm%zd (branch %d channel %d slot %d): %d.%03d GB\n",
+                               __func__, dimm - mci->dimms,
+                               channel / 2, channel % 2, slot,
+                               size_mb / 1000, size_mb % 1000);
+
+                       dimm->nr_pages = size_mb << 8;
+                       dimm->grain = 8;
+                       dimm->dtype = MTR_DRAM_WIDTH(mtr) ? DEV_X8 : DEV_X4;
+                       dimm->mtype = MEM_FB_DDR2;
+                       /*
+                        * The eccc mechanism is SDDC (aka SECC), with
+                        * is similar to Chipkill.
+                        */
+                       dimm->edac_mode = MTR_DRAM_WIDTH(mtr) ?
+                                         EDAC_S8ECD8ED : EDAC_S4ECD4ED;
+                       ndimms++;
+               }
        }
 
-       return empty;
+       /*
+        * When just one memory is provided, it should be at location (0,0,0).
+        * With such single-DIMM mode, the SDCC algorithm degrades to SECDEC+.
+        */
+       if (ndimms == 1)
+               mci->dimms[0].edac_mode = EDAC_SECDED;
+
+       return (ndimms == 0);
 }
 
 /*
@@ -1242,9 +1265,7 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
 {
        struct mem_ctl_info *mci;
        struct i5400_pvt *pvt;
-       int num_channels;
-       int num_dimms_per_channel;
-       int num_csrows;
+       struct edac_mc_layer layers[3];
 
        if (dev_idx >= ARRAY_SIZE(i5400_devs))
                return -EINVAL;
@@ -1258,23 +1279,21 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
        if (PCI_FUNC(pdev->devfn) != 0)
                return -ENODEV;
 
-       /* As we don't have a motherboard identification routine to determine
-        * actual number of slots/dimms per channel, we thus utilize the
-        * resource as specified by the chipset. Thus, we might have
-        * have more DIMMs per channel than actually on the mobo, but this
-        * allows the driver to support up to the chipset max, without
-        * some fancy mobo determination.
+       /*
+        * allocate a new MC control structure
+        *
+        * This drivers uses the DIMM slot as "csrow" and the rest as "channel".
         */
-       num_dimms_per_channel = MAX_DIMMS_PER_CHANNEL;
-       num_channels = MAX_CHANNELS;
-       num_csrows = num_dimms_per_channel;
-
-       debugf0("MC: %s(): Number of - Channels= %d  DIMMS= %d  CSROWS= %d\n",
-               __func__, num_channels, num_dimms_per_channel, num_csrows);
-
-       /* allocate a new MC control structure */
-       mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
-
+       layers[0].type = EDAC_MC_LAYER_BRANCH;
+       layers[0].size = MAX_BRANCHES;
+       layers[0].is_virt_csrow = false;
+       layers[1].type = EDAC_MC_LAYER_CHANNEL;
+       layers[1].size = CHANNELS_PER_BRANCH;
+       layers[1].is_virt_csrow = false;
+       layers[2].type = EDAC_MC_LAYER_SLOT;
+       layers[2].size = DIMMS_PER_CHANNEL;
+       layers[2].is_virt_csrow = true;
+       mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
        if (mci == NULL)
                return -ENOMEM;
 
@@ -1284,8 +1303,8 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
 
        pvt = mci->pvt_info;
        pvt->system_address = pdev;     /* Record this device in our private */
-       pvt->maxch = num_channels;
-       pvt->maxdimmperch = num_dimms_per_channel;
+       pvt->maxch = MAX_CHANNELS;
+       pvt->maxdimmperch = DIMMS_PER_CHANNEL;
 
        /* 'get' the pci devices we want to reserve for our use */
        if (i5400_get_devices(mci, dev_idx))
@@ -1307,13 +1326,13 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
        /* Set the function pointer to an actual operation function */
        mci->edac_check = i5400_check_error;
 
-       /* initialize the MC control structure 'csrows' table
+       /* initialize the MC control structure 'dimms' table
         * with the mapping and control information */
-       if (i5400_init_csrows(mci)) {
+       if (i5400_init_dimms(mci)) {
                debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n"
-                       "    because i5400_init_csrows() returned nonzero "
+                       "    because i5400_init_dimms() returned nonzero "
                        "value\n");
-               mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */
+               mci->edac_cap = EDAC_FLAG_NONE; /* no dimms found */
        } else {
                debugf1("MC: Enable error reporting now\n");
                i5400_enable_error_reporting(mci);
index 3bafa3bca14873d0a3d22b08650c92ec3fd5e572..97c22fd650eec1953dfba7b3c773c1ddf082d700 100644 (file)
@@ -464,17 +464,14 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
                                FERR_FAT_FBD, error_reg);
 
                snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
-                       "FATAL (Branch=%d DRAM-Bank=%d %s "
-                       "RAS=%d CAS=%d Err=0x%lx (%s))",
-                       branch, bank,
-                       is_wr ? "RDWR" : "RD",
-                       ras, cas,
-                       errors, specific);
-
-               /* Call the helper to output message */
-               edac_mc_handle_fbd_ue(mci, rank, branch << 1,
-                                     (branch << 1) + 1,
-                                     pvt->tmp_prt_buffer);
+                        "Bank=%d RAS=%d CAS=%d Err=0x%lx (%s))",
+                        bank, ras, cas, errors, specific);
+
+               edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 0, 0, 0,
+                                    branch, -1, rank,
+                                    is_wr ? "Write error" : "Read error",
+                                    pvt->tmp_prt_buffer, NULL);
+
        }
 
        /* read in the 1st NON-FATAL error register */
@@ -513,23 +510,14 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
 
                /* Form out message */
                snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
-                       "Corrected error (Branch=%d, Channel %d), "
-                       " DRAM-Bank=%d %s "
-                       "RAS=%d CAS=%d, CE Err=0x%lx, Syndrome=0x%08x(%s))",
-                       branch, channel,
-                       bank,
-                       is_wr ? "RDWR" : "RD",
-                       ras, cas,
-                       errors, syndrome, specific);
-
-               /*
-                * Call the helper to output message
-                * NOTE: Errors are reported per-branch, and not per-channel
-                *       Currently, we don't know how to identify the right
-                *       channel.
-                */
-               edac_mc_handle_fbd_ce(mci, rank, channel,
-                                     pvt->tmp_prt_buffer);
+                        "DRAM-Bank=%d RAS=%d CAS=%d, Err=0x%lx (%s))",
+                        bank, ras, cas, errors, specific);
+
+               edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0,
+                                    syndrome,
+                                    branch >> 1, channel % 2, rank,
+                                    is_wr ? "Write error" : "Read error",
+                                    pvt->tmp_prt_buffer, NULL);
        }
        return;
 }
@@ -617,8 +605,7 @@ static void i7300_enable_error_reporting(struct mem_ctl_info *mci)
 static int decode_mtr(struct i7300_pvt *pvt,
                      int slot, int ch, int branch,
                      struct i7300_dimm_info *dinfo,
-                     struct csrow_info *p_csrow,
-                     u32 *nr_pages)
+                     struct dimm_info *dimm)
 {
        int mtr, ans, addrBits, channel;
 
@@ -650,7 +637,6 @@ static int decode_mtr(struct i7300_pvt *pvt,
        addrBits -= 3;  /* 8 bits per bytes */
 
        dinfo->megabytes = 1 << addrBits;
-       *nr_pages = dinfo->megabytes << 8;
 
        debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
 
@@ -663,11 +649,6 @@ static int decode_mtr(struct i7300_pvt *pvt,
        debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
        debugf2("\t\tSIZE: %d MB\n", dinfo->megabytes);
 
-       p_csrow->grain = 8;
-       p_csrow->mtype = MEM_FB_DDR2;
-       p_csrow->csrow_idx = slot;
-       p_csrow->page_mask = 0;
-
        /*
         * The type of error detection actually depends of the
         * mode of operation. When it is just one single memory chip, at
@@ -677,15 +658,18 @@ static int decode_mtr(struct i7300_pvt *pvt,
         * See datasheet Sections 7.3.6 to 7.3.8
         */
 
+       dimm->nr_pages = MiB_TO_PAGES(dinfo->megabytes);
+       dimm->grain = 8;
+       dimm->mtype = MEM_FB_DDR2;
        if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
-               p_csrow->edac_mode = EDAC_SECDED;
+               dimm->edac_mode = EDAC_SECDED;
                debugf2("\t\tECC code is 8-byte-over-32-byte SECDED+ code\n");
        } else {
                debugf2("\t\tECC code is on Lockstep mode\n");
                if (MTR_DRAM_WIDTH(mtr) == 8)
-                       p_csrow->edac_mode = EDAC_S8ECD8ED;
+                       dimm->edac_mode = EDAC_S8ECD8ED;
                else
-                       p_csrow->edac_mode = EDAC_S4ECD4ED;
+                       dimm->edac_mode = EDAC_S4ECD4ED;
        }
 
        /* ask what device type on this row */
@@ -694,9 +678,9 @@ static int decode_mtr(struct i7300_pvt *pvt,
                        IS_SCRBALGO_ENHANCED(pvt->mc_settings) ?
                                            "enhanced" : "normal");
 
-               p_csrow->dtype = DEV_X8;
+               dimm->dtype = DEV_X8;
        } else
-               p_csrow->dtype = DEV_X4;
+               dimm->dtype = DEV_X4;
 
        return mtr;
 }
@@ -774,11 +758,10 @@ static int i7300_init_csrows(struct mem_ctl_info *mci)
 {
        struct i7300_pvt *pvt;
        struct i7300_dimm_info *dinfo;
-       struct csrow_info *p_csrow;
        int rc = -ENODEV;
        int mtr;
        int ch, branch, slot, channel;
-       u32 last_page = 0, nr_pages;
+       struct dimm_info *dimm;
 
        pvt = mci->pvt_info;
 
@@ -809,25 +792,23 @@ static int i7300_init_csrows(struct mem_ctl_info *mci)
                        pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
                                        where,
                                        &pvt->mtr[slot][branch]);
-                       for (ch = 0; ch < MAX_BRANCHES; ch++) {
+                       for (ch = 0; ch < MAX_CH_PER_BRANCH; ch++) {
                                int channel = to_channel(ch, branch);
 
+                               dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
+                                              mci->n_layers, branch, ch, slot);
+
                                dinfo = &pvt->dimm_info[slot][channel];
-                               p_csrow = &mci->csrows[slot];
 
                                mtr = decode_mtr(pvt, slot, ch, branch,
-                                                dinfo, p_csrow, &nr_pages);
+                                                dinfo, dimm);
+
                                /* if no DIMMS on this row, continue */
                                if (!MTR_DIMMS_PRESENT(mtr))
                                        continue;
 
-                               /* Update per_csrow memory count */
-                               p_csrow->nr_pages += nr_pages;
-                               p_csrow->first_page = last_page;
-                               last_page += nr_pages;
-                               p_csrow->last_page = last_page;
-
                                rc = 0;
+
                        }
                }
        }
@@ -1042,10 +1023,8 @@ static int __devinit i7300_init_one(struct pci_dev *pdev,
                                    const struct pci_device_id *id)
 {
        struct mem_ctl_info *mci;
+       struct edac_mc_layer layers[3];
        struct i7300_pvt *pvt;
-       int num_channels;
-       int num_dimms_per_channel;
-       int num_csrows;
        int rc;
 
        /* wake up device */
@@ -1062,23 +1041,17 @@ static int __devinit i7300_init_one(struct pci_dev *pdev,
        if (PCI_FUNC(pdev->devfn) != 0)
                return -ENODEV;
 
-       /* As we don't have a motherboard identification routine to determine
-        * actual number of slots/dimms per channel, we thus utilize the
-        * resource as specified by the chipset. Thus, we might have
-        * have more DIMMs per channel than actually on the mobo, but this
-        * allows the driver to support up to the chipset max, without
-        * some fancy mobo determination.
-        */
-       num_dimms_per_channel = MAX_SLOTS;
-       num_channels = MAX_CHANNELS;
-       num_csrows = MAX_SLOTS * MAX_CHANNELS;
-
-       debugf0("MC: %s(): Number of - Channels= %d  DIMMS= %d  CSROWS= %d\n",
-               __func__, num_channels, num_dimms_per_channel, num_csrows);
-
        /* allocate a new MC control structure */
-       mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
-
+       layers[0].type = EDAC_MC_LAYER_BRANCH;
+       layers[0].size = MAX_BRANCHES;
+       layers[0].is_virt_csrow = false;
+       layers[1].type = EDAC_MC_LAYER_CHANNEL;
+       layers[1].size = MAX_CH_PER_BRANCH;
+       layers[1].is_virt_csrow = true;
+       layers[2].type = EDAC_MC_LAYER_SLOT;
+       layers[2].size = MAX_SLOTS;
+       layers[2].is_virt_csrow = true;
+       mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
        if (mci == NULL)
                return -ENOMEM;
 
index 7f1dfcc4e597f8e75971a6785b95ab73d56cefed..d27778f65a5dc3d8f5731cc4e6c1cff7cc6f4a1c 100644 (file)
@@ -221,7 +221,9 @@ struct i7core_inject {
 };
 
 struct i7core_channel {
-       u32             ranks;
+       bool            is_3dimms_present;
+       bool            is_single_4rank;
+       bool            has_4rank;
        u32             dimms;
 };
 
@@ -257,7 +259,6 @@ struct i7core_pvt {
        struct i7core_channel   channel[NUM_CHANS];
 
        int             ce_count_available;
-       int             csrow_map[NUM_CHANS][MAX_DIMMS];
 
                        /* ECC corrected errors counts per udimm */
        unsigned long   udimm_ce_count[MAX_DIMMS];
@@ -492,116 +493,15 @@ static void free_i7core_dev(struct i7core_dev *i7core_dev)
 /****************************************************************************
                        Memory check routines
  ****************************************************************************/
-static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot,
-                                         unsigned func)
-{
-       struct i7core_dev *i7core_dev = get_i7core_dev(socket);
-       int i;
-
-       if (!i7core_dev)
-               return NULL;
-
-       for (i = 0; i < i7core_dev->n_devs; i++) {
-               if (!i7core_dev->pdev[i])
-                       continue;
-
-               if (PCI_SLOT(i7core_dev->pdev[i]->devfn) == slot &&
-                   PCI_FUNC(i7core_dev->pdev[i]->devfn) == func) {
-                       return i7core_dev->pdev[i];
-               }
-       }
-
-       return NULL;
-}
-
-/**
- * i7core_get_active_channels() - gets the number of channels and csrows
- * @socket:    Quick Path Interconnect socket
- * @channels:  Number of channels that will be returned
- * @csrows:    Number of csrows found
- *
- * Since EDAC core needs to know in advance the number of available channels
- * and csrows, in order to allocate memory for csrows/channels, it is needed
- * to run two similar steps. At the first step, implemented on this function,
- * it checks the number of csrows/channels present at one socket.
- * this is used in order to properly allocate the size of mci components.
- *
- * It should be noticed that none of the current available datasheets explain
- * or even mention how csrows are seen by the memory controller. So, we need
- * to add a fake description for csrows.
- * So, this driver is attributing one DIMM memory for one csrow.
- */
-static int i7core_get_active_channels(const u8 socket, unsigned *channels,
-                                     unsigned *csrows)
-{
-       struct pci_dev *pdev = NULL;
-       int i, j;
-       u32 status, control;
-
-       *channels = 0;
-       *csrows = 0;
-
-       pdev = get_pdev_slot_func(socket, 3, 0);
-       if (!pdev) {
-               i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n",
-                             socket);
-               return -ENODEV;
-       }
-
-       /* Device 3 function 0 reads */
-       pci_read_config_dword(pdev, MC_STATUS, &status);
-       pci_read_config_dword(pdev, MC_CONTROL, &control);
-
-       for (i = 0; i < NUM_CHANS; i++) {
-               u32 dimm_dod[3];
-               /* Check if the channel is active */
-               if (!(control & (1 << (8 + i))))
-                       continue;
-
-               /* Check if the channel is disabled */
-               if (status & (1 << i))
-                       continue;
-
-               pdev = get_pdev_slot_func(socket, i + 4, 1);
-               if (!pdev) {
-                       i7core_printk(KERN_ERR, "Couldn't find socket %d "
-                                               "fn %d.%d!!!\n",
-                                               socket, i + 4, 1);
-                       return -ENODEV;
-               }
-               /* Devices 4-6 function 1 */
-               pci_read_config_dword(pdev,
-                               MC_DOD_CH_DIMM0, &dimm_dod[0]);
-               pci_read_config_dword(pdev,
-                               MC_DOD_CH_DIMM1, &dimm_dod[1]);
-               pci_read_config_dword(pdev,
-                               MC_DOD_CH_DIMM2, &dimm_dod[2]);
 
-               (*channels)++;
-
-               for (j = 0; j < 3; j++) {
-                       if (!DIMM_PRESENT(dimm_dod[j]))
-                               continue;
-                       (*csrows)++;
-               }
-       }
-
-       debugf0("Number of active channels on socket %d: %d\n",
-               socket, *channels);
-
-       return 0;
-}
-
-static int get_dimm_config(const struct mem_ctl_info *mci)
+static int get_dimm_config(struct mem_ctl_info *mci)
 {
        struct i7core_pvt *pvt = mci->pvt_info;
-       struct csrow_info *csr;
        struct pci_dev *pdev;
        int i, j;
-       int csrow = 0;
-       unsigned long last_page = 0;
        enum edac_type mode;
        enum mem_type mtype;
+       struct dimm_info *dimm;
 
        /* Get data from the MC register, function 0 */
        pdev = pvt->pci_mcr[0];
@@ -657,21 +557,20 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
                pci_read_config_dword(pvt->pci_ch[i][0],
                                MC_CHANNEL_DIMM_INIT_PARAMS, &data);
 
-               pvt->channel[i].ranks = (data & QUAD_RANK_PRESENT) ?
-                                               4 : 2;
+
+               if (data & THREE_DIMMS_PRESENT)
+                       pvt->channel[i].is_3dimms_present = true;
+
+               if (data & SINGLE_QUAD_RANK_PRESENT)
+                       pvt->channel[i].is_single_4rank = true;
+
+               if (data & QUAD_RANK_PRESENT)
+                       pvt->channel[i].has_4rank = true;
 
                if (data & REGISTERED_DIMM)
                        mtype = MEM_RDDR3;
                else
                        mtype = MEM_DDR3;
-#if 0
-               if (data & THREE_DIMMS_PRESENT)
-                       pvt->channel[i].dimms = 3;
-               else if (data & SINGLE_QUAD_RANK_PRESENT)
-                       pvt->channel[i].dimms = 1;
-               else
-                       pvt->channel[i].dimms = 2;
-#endif
 
                /* Devices 4-6 function 1 */
                pci_read_config_dword(pvt->pci_ch[i][1],
@@ -682,11 +581,13 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
                                MC_DOD_CH_DIMM2, &dimm_dod[2]);
 
                debugf0("Ch%d phy rd%d, wr%d (0x%08x): "
-                       "%d ranks, %cDIMMs\n",
+                       "%s%s%s%cDIMMs\n",
                        i,
                        RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
                        data,
-                       pvt->channel[i].ranks,
+                       pvt->channel[i].is_3dimms_present ? "3DIMMS " : "",
+                       pvt->channel[i].is_3dimms_present ? "SINGLE_4R " : "",
+                       pvt->channel[i].has_4rank ? "HAS_4R " : "",
                        (data & REGISTERED_DIMM) ? 'R' : 'U');
 
                for (j = 0; j < 3; j++) {
@@ -696,6 +597,8 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
                        if (!DIMM_PRESENT(dimm_dod[j]))
                                continue;
 
+                       dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+                                      i, j, 0);
                        banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
                        ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
                        rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
@@ -704,8 +607,6 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
                        /* DDR3 has 8 I/O banks */
                        size = (rows * cols * banks * ranks) >> (20 - 3);
 
-                       pvt->channel[i].dimms++;
-
                        debugf0("\tdimm %d %d Mb offset: %x, "
                                "bank: %d, rank: %d, row: %#x, col: %#x\n",
                                j, size,
@@ -714,44 +615,28 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
 
                        npages = MiB_TO_PAGES(size);
 
-                       csr = &mci->csrows[csrow];
-                       csr->first_page = last_page + 1;
-                       last_page += npages;
-                       csr->last_page = last_page;
-                       csr->nr_pages = npages;
-
-                       csr->page_mask = 0;
-                       csr->grain = 8;
-                       csr->csrow_idx = csrow;
-                       csr->nr_channels = 1;
-
-                       csr->channels[0].chan_idx = i;
-                       csr->channels[0].ce_count = 0;
-
-                       pvt->csrow_map[i][j] = csrow;
+                       dimm->nr_pages = npages;
 
                        switch (banks) {
                        case 4:
-                               csr->dtype = DEV_X4;
+                               dimm->dtype = DEV_X4;
                                break;
                        case 8:
-                               csr->dtype = DEV_X8;
+                               dimm->dtype = DEV_X8;
                                break;
                        case 16:
-                               csr->dtype = DEV_X16;
+                               dimm->dtype = DEV_X16;
                                break;
                        default:
-                               csr->dtype = DEV_UNKNOWN;
+                               dimm->dtype = DEV_UNKNOWN;
                        }
 
-                       csr->edac_mode = mode;
-                       csr->mtype = mtype;
-                       snprintf(csr->channels[0].label,
-                                       sizeof(csr->channels[0].label),
-                                       "CPU#%uChannel#%u_DIMM#%u",
-                                       pvt->i7core_dev->socket, i, j);
-
-                       csrow++;
+                       snprintf(dimm->label, sizeof(dimm->label),
+                                "CPU#%uChannel#%u_DIMM#%u",
+                                pvt->i7core_dev->socket, i, j);
+                       dimm->grain = 8;
+                       dimm->edac_mode = mode;
+                       dimm->mtype = mtype;
                }
 
                pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
@@ -1567,22 +1452,16 @@ error:
 /****************************************************************************
                        Error check routines
  ****************************************************************************/
-static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci,
+static void i7core_rdimm_update_errcount(struct mem_ctl_info *mci,
                                      const int chan,
                                      const int dimm,
                                      const int add)
 {
-       char *msg;
-       struct i7core_pvt *pvt = mci->pvt_info;
-       int row = pvt->csrow_map[chan][dimm], i;
+       int i;
 
        for (i = 0; i < add; i++) {
-               msg = kasprintf(GFP_KERNEL, "Corrected error "
-                               "(Socket=%d channel=%d dimm=%d)",
-                               pvt->i7core_dev->socket, chan, dimm);
-
-               edac_mc_handle_fbd_ce(mci, row, 0, msg);
-               kfree (msg);
+               edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
+                                    chan, dimm, -1, "error", "", NULL);
        }
 }
 
@@ -1623,11 +1502,11 @@ static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
 
        /*updated the edac core */
        if (add0 != 0)
-               i7core_rdimm_update_csrow(mci, chan, 0, add0);
+               i7core_rdimm_update_errcount(mci, chan, 0, add0);
        if (add1 != 0)
-               i7core_rdimm_update_csrow(mci, chan, 1, add1);
+               i7core_rdimm_update_errcount(mci, chan, 1, add1);
        if (add2 != 0)
-               i7core_rdimm_update_csrow(mci, chan, 2, add2);
+               i7core_rdimm_update_errcount(mci, chan, 2, add2);
 
 }
 
@@ -1747,20 +1626,30 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
                                    const struct mce *m)
 {
        struct i7core_pvt *pvt = mci->pvt_info;
-       char *type, *optype, *err, *msg;
+       char *type, *optype, *err, msg[80];
+       enum hw_event_mc_err_type tp_event;
        unsigned long error = m->status & 0x1ff0000l;
+       bool uncorrected_error = m->mcgstatus & 1ll << 61;
+       bool ripv = m->mcgstatus & 1;
        u32 optypenum = (m->status >> 4) & 0x07;
        u32 core_err_cnt = (m->status >> 38) & 0x7fff;
        u32 dimm = (m->misc >> 16) & 0x3;
        u32 channel = (m->misc >> 18) & 0x3;
        u32 syndrome = m->misc >> 32;
        u32 errnum = find_first_bit(&error, 32);
-       int csrow;
 
-       if (m->mcgstatus & 1)
-               type = "FATAL";
-       else
-               type = "NON_FATAL";
+       if (uncorrected_error) {
+               if (ripv) {
+                       type = "FATAL";
+                       tp_event = HW_EVENT_ERR_FATAL;
+               } else {
+                       type = "NON_FATAL";
+                       tp_event = HW_EVENT_ERR_UNCORRECTED;
+               }
+       } else {
+               type = "CORRECTED";
+               tp_event = HW_EVENT_ERR_CORRECTED;
+       }
 
        switch (optypenum) {
        case 0:
@@ -1815,27 +1704,20 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
                err = "unknown";
        }
 
-       /* FIXME: should convert addr into bank and rank information */
-       msg = kasprintf(GFP_ATOMIC,
-               "%s (addr = 0x%08llx, cpu=%d, Dimm=%d, Channel=%d, "
-               "syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n",
-               type, (long long) m->addr, m->cpu, dimm, channel,
-               syndrome, core_err_cnt, (long long)m->status,
-               (long long)m->misc, optype, err);
-
-       debugf0("%s", msg);
-
-       csrow = pvt->csrow_map[channel][dimm];
+       snprintf(msg, sizeof(msg), "count=%d %s", core_err_cnt, optype);
 
-       /* Call the helper to output message */
-       if (m->mcgstatus & 1)
-               edac_mc_handle_fbd_ue(mci, csrow, 0,
-                               0 /* FIXME: should be channel here */, msg);
-       else if (!pvt->is_registered)
-               edac_mc_handle_fbd_ce(mci, csrow,
-                               0 /* FIXME: should be channel here */, msg);
-
-       kfree(msg);
+       /*
+        * Call the helper to output message
+        * FIXME: what to do if core_err_cnt > 1? Currently, it generates
+        * only one event
+        */
+       if (uncorrected_error || !pvt->is_registered)
+               edac_mc_handle_error(tp_event, mci,
+                                    m->addr >> PAGE_SHIFT,
+                                    m->addr & ~PAGE_MASK,
+                                    syndrome,
+                                    channel, dimm, -1,
+                                    err, msg, m);
 }
 
 /*
@@ -2252,15 +2134,19 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
 {
        struct mem_ctl_info *mci;
        struct i7core_pvt *pvt;
-       int rc, channels, csrows;
-
-       /* Check the number of active and not disabled channels */
-       rc = i7core_get_active_channels(i7core_dev->socket, &channels, &csrows);
-       if (unlikely(rc < 0))
-               return rc;
+       int rc;
+       struct edac_mc_layer layers[2];
 
        /* allocate a new MC control structure */
-       mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, i7core_dev->socket);
+
+       layers[0].type = EDAC_MC_LAYER_CHANNEL;
+       layers[0].size = NUM_CHANS;
+       layers[0].is_virt_csrow = false;
+       layers[1].type = EDAC_MC_LAYER_SLOT;
+       layers[1].size = MAX_DIMMS;
+       layers[1].is_virt_csrow = true;
+       mci = edac_mc_alloc(i7core_dev->socket, ARRAY_SIZE(layers), layers,
+                           sizeof(*pvt));
        if (unlikely(!mci))
                return -ENOMEM;
 
index 3bf2b2f490e7d98ad16374acf4c5e209b32de6cb..52072c28a8a652466f31ed8be165b591098143ca 100644 (file)
@@ -12,7 +12,7 @@
  * 440GX fix by Jason Uhlenkott <juhlenko@akamai.com>.
  *
  * Written with reference to 82443BX Host Bridge Datasheet:
- * http://download.intel.com/design/chipsets/datashts/29063301.pdf 
+ * http://download.intel.com/design/chipsets/datashts/29063301.pdf
  * references to this document given in [].
  *
  * This module doesn't support the 440LX, but it may be possible to
@@ -156,19 +156,19 @@ static int i82443bxgx_edacmc_process_error_info(struct mem_ctl_info *mci,
        if (info->eap & I82443BXGX_EAP_OFFSET_SBE) {
                error_found = 1;
                if (handle_errors)
-                       edac_mc_handle_ce(mci, page, pageoffset,
-                               /* 440BX/GX don't make syndrome information
-                                * available */
-                               0, edac_mc_find_csrow_by_page(mci, page), 0,
-                               mci->ctl_name);
+                       edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+                                            page, pageoffset, 0,
+                                            edac_mc_find_csrow_by_page(mci, page),
+                                            0, -1, mci->ctl_name, "", NULL);
        }
 
        if (info->eap & I82443BXGX_EAP_OFFSET_MBE) {
                error_found = 1;
                if (handle_errors)
-                       edac_mc_handle_ue(mci, page, pageoffset,
-                                       edac_mc_find_csrow_by_page(mci, page),
-                                       mci->ctl_name);
+                       edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+                                            page, pageoffset, 0,
+                                            edac_mc_find_csrow_by_page(mci, page),
+                                            0, -1, mci->ctl_name, "", NULL);
        }
 
        return error_found;
@@ -189,6 +189,7 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
                                enum mem_type mtype)
 {
        struct csrow_info *csrow;
+       struct dimm_info *dimm;
        int index;
        u8 drbar, dramc;
        u32 row_base, row_high_limit, row_high_limit_last;
@@ -197,6 +198,8 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
        row_high_limit_last = 0;
        for (index = 0; index < mci->nr_csrows; index++) {
                csrow = &mci->csrows[index];
+               dimm = csrow->channels[0].dimm;
+
                pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar);
                debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n",
                        mci->mc_idx, __FILE__, __func__, index, drbar);
@@ -217,14 +220,14 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
                row_base = row_high_limit_last;
                csrow->first_page = row_base >> PAGE_SHIFT;
                csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
-               csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
+               dimm->nr_pages = csrow->last_page - csrow->first_page + 1;
                /* EAP reports in 4kilobyte granularity [61] */
-               csrow->grain = 1 << 12;
-               csrow->mtype = mtype;
+               dimm->grain = 1 << 12;
+               dimm->mtype = mtype;
                /* I don't think 440BX can tell you device type? FIXME? */
-               csrow->dtype = DEV_UNKNOWN;
+               dimm->dtype = DEV_UNKNOWN;
                /* Mode is global to all rows on 440BX */
-               csrow->edac_mode = edac_mode;
+               dimm->edac_mode = edac_mode;
                row_high_limit_last = row_high_limit;
        }
 }
@@ -232,6 +235,7 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
 static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
 {
        struct mem_ctl_info *mci;
+       struct edac_mc_layer layers[2];
        u8 dramc;
        u32 nbxcfg, ecc_mode;
        enum mem_type mtype;
@@ -245,8 +249,13 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
        if (pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg))
                return -EIO;
 
-       mci = edac_mc_alloc(0, I82443BXGX_NR_CSROWS, I82443BXGX_NR_CHANS, 0);
-
+       layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+       layers[0].size = I82443BXGX_NR_CSROWS;
+       layers[0].is_virt_csrow = true;
+       layers[1].type = EDAC_MC_LAYER_CHANNEL;
+       layers[1].size = I82443BXGX_NR_CHANS;
+       layers[1].is_virt_csrow = false;
+       mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
        if (mci == NULL)
                return -ENOMEM;
 
index c779092d18d1cf823350faf64890c2e0c94d9736..08045059d10bcc64a06318c9fcdd7826e7ea83fc 100644 (file)
@@ -99,6 +99,7 @@ static int i82860_process_error_info(struct mem_ctl_info *mci,
                                struct i82860_error_info *info,
                                int handle_errors)
 {
+       struct dimm_info *dimm;
        int row;
 
        if (!(info->errsts2 & 0x0003))
@@ -108,18 +109,25 @@ static int i82860_process_error_info(struct mem_ctl_info *mci,
                return 1;
 
        if ((info->errsts ^ info->errsts2) & 0x0003) {
-               edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+               edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+                                    -1, -1, -1, "UE overwrote CE", "", NULL);
                info->errsts = info->errsts2;
        }
 
        info->eap >>= PAGE_SHIFT;
        row = edac_mc_find_csrow_by_page(mci, info->eap);
+       dimm = mci->csrows[row].channels[0].dimm;
 
        if (info->errsts & 0x0002)
-               edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE");
+               edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+                                    info->eap, 0, 0,
+                                    dimm->location[0], dimm->location[1], -1,
+                                    "i82860 UE", "", NULL);
        else
-               edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, 0,
-                               "i82860 UE");
+               edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+                                    info->eap, 0, info->derrsyn,
+                                    dimm->location[0], dimm->location[1], -1,
+                                    "i82860 CE", "", NULL);
 
        return 1;
 }
@@ -140,6 +148,7 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
        u16 value;
        u32 cumul_size;
        struct csrow_info *csrow;
+       struct dimm_info *dimm;
        int index;
 
        pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim);
@@ -153,6 +162,8 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
         */
        for (index = 0; index < mci->nr_csrows; index++) {
                csrow = &mci->csrows[index];
+               dimm = csrow->channels[0].dimm;
+
                pci_read_config_word(pdev, I82860_GBA + index * 2, &value);
                cumul_size = (value & I82860_GBA_MASK) <<
                        (I82860_GBA_SHIFT - PAGE_SHIFT);
@@ -164,30 +175,38 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
 
                csrow->first_page = last_cumul_size;
                csrow->last_page = cumul_size - 1;
-               csrow->nr_pages = cumul_size - last_cumul_size;
+               dimm->nr_pages = cumul_size - last_cumul_size;
                last_cumul_size = cumul_size;
-               csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
-               csrow->mtype = MEM_RMBS;
-               csrow->dtype = DEV_UNKNOWN;
-               csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
+               dimm->grain = 1 << 12;  /* I82860_EAP has 4KiB reolution */
+               dimm->mtype = MEM_RMBS;
+               dimm->dtype = DEV_UNKNOWN;
+               dimm->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
        }
 }
 
 static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
 {
        struct mem_ctl_info *mci;
+       struct edac_mc_layer layers[2];
        struct i82860_error_info discard;
 
-       /* RDRAM has channels but these don't map onto the abstractions that
-          edac uses.
-          The device groups from the GRA registers seem to map reasonably
-          well onto the notion of a chip select row.
-          There are 16 GRA registers and since the name is associated with
-          the channel and the GRA registers map to physical devices so we are
-          going to make 1 channel for group.
+       /*
+        * RDRAM has channels but these don't map onto the csrow abstraction.
+        * According with the datasheet, there are 2 Rambus channels, supporting
+        * up to 16 direct RDRAM devices.
+        * The device groups from the GRA registers seem to map reasonably
+        * well onto the notion of a chip select row.
+        * There are 16 GRA registers and since the name is associated with
+        * the channel and the GRA registers map to physical devices so we are
+        * going to make 1 channel for group.
         */
-       mci = edac_mc_alloc(0, 16, 1, 0);
-
+       layers[0].type = EDAC_MC_LAYER_CHANNEL;
+       layers[0].size = 2;
+       layers[0].is_virt_csrow = true;
+       layers[1].type = EDAC_MC_LAYER_SLOT;
+       layers[1].size = 8;
+       layers[1].is_virt_csrow = true;
+       mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
        if (!mci)
                return -ENOMEM;
 
index 10f15d85fb5eee63659c8ba5667826d25a5e2a66..b613e31c16e5de18f47a8a335690cfc4c487a752 100644 (file)
@@ -38,7 +38,8 @@
 #endif                         /* PCI_DEVICE_ID_INTEL_82875_6 */
 
 /* four csrows in dual channel, eight in single channel */
-#define I82875P_NR_CSROWS(nr_chans) (8/(nr_chans))
+#define I82875P_NR_DIMMS               8
+#define I82875P_NR_CSROWS(nr_chans)    (I82875P_NR_DIMMS / (nr_chans))
 
 /* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */
 #define I82875P_EAP            0x58    /* Error Address Pointer (32b)
@@ -235,7 +236,9 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci,
                return 1;
 
        if ((info->errsts ^ info->errsts2) & 0x0081) {
-               edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+               edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+                                    -1, -1, -1,
+                                    "UE overwrote CE", "", NULL);
                info->errsts = info->errsts2;
        }
 
@@ -243,11 +246,15 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci,
        row = edac_mc_find_csrow_by_page(mci, info->eap);
 
        if (info->errsts & 0x0080)
-               edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE");
+               edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+                                    info->eap, 0, 0,
+                                    row, -1, -1,
+                                    "i82875p UE", "", NULL);
        else
-               edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
-                               multi_chan ? (info->des & 0x1) : 0,
-                               "i82875p CE");
+               edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+                                    info->eap, 0, info->derrsyn,
+                                    row, multi_chan ? (info->des & 0x1) : 0,
+                                    -1, "i82875p CE", "", NULL);
 
        return 1;
 }
@@ -342,11 +349,13 @@ static void i82875p_init_csrows(struct mem_ctl_info *mci,
                                void __iomem * ovrfl_window, u32 drc)
 {
        struct csrow_info *csrow;
+       struct dimm_info *dimm;
+       unsigned nr_chans = dual_channel_active(drc) + 1;
        unsigned long last_cumul_size;
        u8 value;
        u32 drc_ddim;           /* DRAM Data Integrity Mode 0=none,2=edac */
-       u32 cumul_size;
-       int index;
+       u32 cumul_size, nr_pages;
+       int index, j;
 
        drc_ddim = (drc >> 18) & 0x1;
        last_cumul_size = 0;
@@ -369,12 +378,18 @@ static void i82875p_init_csrows(struct mem_ctl_info *mci,
 
                csrow->first_page = last_cumul_size;
                csrow->last_page = cumul_size - 1;
-               csrow->nr_pages = cumul_size - last_cumul_size;
+               nr_pages = cumul_size - last_cumul_size;
                last_cumul_size = cumul_size;
-               csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
-               csrow->mtype = MEM_DDR;
-               csrow->dtype = DEV_UNKNOWN;
-               csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
+
+               for (j = 0; j < nr_chans; j++) {
+                       dimm = csrow->channels[j].dimm;
+
+                       dimm->nr_pages = nr_pages / nr_chans;
+                       dimm->grain = 1 << 12;  /* I82875P_EAP has 4KiB reolution */
+                       dimm->mtype = MEM_DDR;
+                       dimm->dtype = DEV_UNKNOWN;
+                       dimm->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
+               }
        }
 }
 
@@ -382,6 +397,7 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
 {
        int rc = -ENODEV;
        struct mem_ctl_info *mci;
+       struct edac_mc_layer layers[2];
        struct i82875p_pvt *pvt;
        struct pci_dev *ovrfl_pdev;
        void __iomem *ovrfl_window;
@@ -397,9 +413,14 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
                return -ENODEV;
        drc = readl(ovrfl_window + I82875P_DRC);
        nr_chans = dual_channel_active(drc) + 1;
-       mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans),
-                       nr_chans, 0);
 
+       layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+       layers[0].size = I82875P_NR_CSROWS(nr_chans);
+       layers[0].is_virt_csrow = true;
+       layers[1].type = EDAC_MC_LAYER_CHANNEL;
+       layers[1].size = nr_chans;
+       layers[1].is_virt_csrow = false;
+       mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
        if (!mci) {
                rc = -ENOMEM;
                goto fail0;
index 0cd8368f88f8c3e634da1e09f2d2df43b67ef1f1..433332c7cdbabe3bf54fbce0e9622403af29869c 100644 (file)
@@ -29,7 +29,8 @@
 #define PCI_DEVICE_ID_INTEL_82975_0    0x277c
 #endif                         /* PCI_DEVICE_ID_INTEL_82975_0 */
 
-#define I82975X_NR_CSROWS(nr_chans)            (8/(nr_chans))
+#define I82975X_NR_DIMMS               8
+#define I82975X_NR_CSROWS(nr_chans)    (I82975X_NR_DIMMS / (nr_chans))
 
 /* Intel 82975X register addresses - device 0 function 0 - DRAM Controller */
 #define I82975X_EAP            0x58    /* Dram Error Address Pointer (32b)
@@ -287,7 +288,8 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
                return 1;
 
        if ((info->errsts ^ info->errsts2) & 0x0003) {
-               edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+               edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+                                    -1, -1, -1, "UE overwrote CE", "", NULL);
                info->errsts = info->errsts2;
        }
 
@@ -309,13 +311,18 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
        chan = (mci->csrows[row].nr_channels == 1) ? 0 : info->eap & 1;
        offst = info->eap
                        & ((1 << PAGE_SHIFT) -
-                               (1 << mci->csrows[row].grain));
+                          (1 << mci->csrows[row].channels[chan].dimm->grain));
 
        if (info->errsts & 0x0002)
-               edac_mc_handle_ue(mci, page, offst , row, "i82975x UE");
+               edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+                                    page, offst, 0,
+                                    row, -1, -1,
+                                    "i82975x UE", "", NULL);
        else
-               edac_mc_handle_ce(mci, page, offst, info->derrsyn, row,
-                               chan, "i82975x CE");
+               edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+                                    page, offst, info->derrsyn,
+                                    row, chan ? chan : 0, -1,
+                                    "i82975x CE", "", NULL);
 
        return 1;
 }
@@ -370,8 +377,10 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
        struct csrow_info *csrow;
        unsigned long last_cumul_size;
        u8 value;
-       u32 cumul_size;
+       u32 cumul_size, nr_pages;
        int index, chan;
+       struct dimm_info *dimm;
+       enum dev_type dtype;
 
        last_cumul_size = 0;
 
@@ -400,28 +409,33 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
                debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
                        cumul_size);
 
+               nr_pages = cumul_size - last_cumul_size;
+               if (!nr_pages)
+                       continue;
+
                /*
                 * Initialise dram labels
                 * index values:
                 *   [0-7] for single-channel; i.e. csrow->nr_channels = 1
                 *   [0-3] for dual-channel; i.e. csrow->nr_channels = 2
                 */
-               for (chan = 0; chan < csrow->nr_channels; chan++)
-                       strncpy(csrow->channels[chan].label,
+               dtype = i82975x_dram_type(mch_window, index);
+               for (chan = 0; chan < csrow->nr_channels; chan++) {
+                       dimm = mci->csrows[index].channels[chan].dimm;
+
+                       dimm->nr_pages = nr_pages / csrow->nr_channels;
+                       strncpy(csrow->channels[chan].dimm->label,
                                        labels[(index >> 1) + (chan * 2)],
                                        EDAC_MC_LABEL_LEN);
-
-               if (cumul_size == last_cumul_size)
-                       continue;       /* not populated */
+                       dimm->grain = 1 << 7;   /* 128Byte cache-line resolution */
+                       dimm->dtype = i82975x_dram_type(mch_window, index);
+                       dimm->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
+                       dimm->edac_mode = EDAC_SECDED; /* only supported */
+               }
 
                csrow->first_page = last_cumul_size;
                csrow->last_page = cumul_size - 1;
-               csrow->nr_pages = cumul_size - last_cumul_size;
                last_cumul_size = cumul_size;
-               csrow->grain = 1 << 7;  /* 128Byte cache-line resolution */
-               csrow->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
-               csrow->dtype = i82975x_dram_type(mch_window, index);
-               csrow->edac_mode = EDAC_SECDED; /* only supported */
        }
 }
 
@@ -463,6 +477,7 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
 {
        int rc = -ENODEV;
        struct mem_ctl_info *mci;
+       struct edac_mc_layer layers[2];
        struct i82975x_pvt *pvt;
        void __iomem *mch_window;
        u32 mchbar;
@@ -531,8 +546,13 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
        chans = dual_channel_active(mch_window) + 1;
 
        /* assuming only one controller, index thus is 0 */
-       mci = edac_mc_alloc(sizeof(*pvt), I82975X_NR_CSROWS(chans),
-                                       chans, 0);
+       layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+       layers[0].size = I82975X_NR_DIMMS;
+       layers[0].is_virt_csrow = true;
+       layers[1].type = EDAC_MC_LAYER_CHANNEL;
+       layers[1].size = I82975X_NR_CSROWS(chans);
+       layers[1].is_virt_csrow = false;
+       mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
        if (!mci) {
                rc = -ENOMEM;
                goto fail1;
index c6074c5cd1ef49bd492d2e94793686a2710d694f..8c87a5e870577c06b0082ca7d3b19eb7a81a252b 100644 (file)
@@ -5,8 +5,6 @@
 
 #include <asm/mce.h>
 
-#define BIT_64(n)                      (U64_C(1) << (n))
-
 #define EC(x)                          ((x) & 0xffff)
 #define XEC(x, mask)                   (((x) >> 16) & mask)
 
index 73464a62adf74ae1483a16ad71847c03fc03169f..4c402353ba98d9aeceb5bbd8062eeec916f39d9d 100644 (file)
@@ -854,12 +854,16 @@ static void mpc85xx_mc_check(struct mem_ctl_info *mci)
                mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
 
        if (err_detect & DDR_EDE_SBE)
-               edac_mc_handle_ce(mci, pfn, err_addr & ~PAGE_MASK,
-                                 syndrome, row_index, 0, mci->ctl_name);
+               edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+                                    pfn, err_addr & ~PAGE_MASK, syndrome,
+                                    row_index, 0, -1,
+                                    mci->ctl_name, "", NULL);
 
        if (err_detect & DDR_EDE_MBE)
-               edac_mc_handle_ue(mci, pfn, err_addr & ~PAGE_MASK,
-                                 row_index, mci->ctl_name);
+               edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+                                    pfn, err_addr & ~PAGE_MASK, syndrome,
+                                    row_index, 0, -1,
+                                    mci->ctl_name, "", NULL);
 
        out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
 }
@@ -883,6 +887,7 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
 {
        struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
        struct csrow_info *csrow;
+       struct dimm_info *dimm;
        u32 sdram_ctl;
        u32 sdtype;
        enum mem_type mtype;
@@ -929,6 +934,8 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
                u32 end;
 
                csrow = &mci->csrows[index];
+               dimm = csrow->channels[0].dimm;
+
                cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 +
                                  (index * MPC85XX_MC_CS_BNDS_OFS));
 
@@ -944,19 +951,21 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
 
                csrow->first_page = start;
                csrow->last_page = end;
-               csrow->nr_pages = end + 1 - start;
-               csrow->grain = 8;
-               csrow->mtype = mtype;
-               csrow->dtype = DEV_UNKNOWN;
+
+               dimm->nr_pages = end + 1 - start;
+               dimm->grain = 8;
+               dimm->mtype = mtype;
+               dimm->dtype = DEV_UNKNOWN;
                if (sdram_ctl & DSC_X32_EN)
-                       csrow->dtype = DEV_X32;
-               csrow->edac_mode = EDAC_SECDED;
+                       dimm->dtype = DEV_X32;
+               dimm->edac_mode = EDAC_SECDED;
        }
 }
 
 static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
 {
        struct mem_ctl_info *mci;
+       struct edac_mc_layer layers[2];
        struct mpc85xx_mc_pdata *pdata;
        struct resource r;
        u32 sdram_ctl;
@@ -965,7 +974,13 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
        if (!devres_open_group(&op->dev, mpc85xx_mc_err_probe, GFP_KERNEL))
                return -ENOMEM;
 
-       mci = edac_mc_alloc(sizeof(*pdata), 4, 1, edac_mc_idx);
+       layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+       layers[0].size = 4;
+       layers[0].is_virt_csrow = true;
+       layers[1].type = EDAC_MC_LAYER_CHANNEL;
+       layers[1].size = 1;
+       layers[1].is_virt_csrow = false;
+       mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), sizeof(*pdata));
        if (!mci) {
                devres_release_group(&op->dev, mpc85xx_mc_err_probe);
                return -ENOMEM;
index 7e5ff367705c66c25f8ad914772729a04cf585f4..b0bb5a3d2527698c1f4659997fc6950526e5e215 100644 (file)
@@ -611,12 +611,17 @@ static void mv64x60_mc_check(struct mem_ctl_info *mci)
 
        /* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */
        if (!(reg & 0x1))
-               edac_mc_handle_ce(mci, err_addr >> PAGE_SHIFT,
-                                 err_addr & PAGE_MASK, syndrome, 0, 0,
-                                 mci->ctl_name);
+               edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+                                    err_addr >> PAGE_SHIFT,
+                                    err_addr & PAGE_MASK, syndrome,
+                                    0, 0, -1,
+                                    mci->ctl_name, "", NULL);
        else    /* 2 bit error, UE */
-               edac_mc_handle_ue(mci, err_addr >> PAGE_SHIFT,
-                                 err_addr & PAGE_MASK, 0, mci->ctl_name);
+               edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+                                    err_addr >> PAGE_SHIFT,
+                                    err_addr & PAGE_MASK, 0,
+                                    0, 0, -1,
+                                    mci->ctl_name, "", NULL);
 
        /* clear the error */
        out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
@@ -656,6 +661,8 @@ static void mv64x60_init_csrows(struct mem_ctl_info *mci,
                                struct mv64x60_mc_pdata *pdata)
 {
        struct csrow_info *csrow;
+       struct dimm_info *dimm;
+
        u32 devtype;
        u32 ctl;
 
@@ -664,35 +671,36 @@ static void mv64x60_init_csrows(struct mem_ctl_info *mci,
        ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
 
        csrow = &mci->csrows[0];
-       csrow->first_page = 0;
-       csrow->nr_pages = pdata->total_mem >> PAGE_SHIFT;
-       csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
-       csrow->grain = 8;
+       dimm = csrow->channels[0].dimm;
+
+       dimm->nr_pages = pdata->total_mem >> PAGE_SHIFT;
+       dimm->grain = 8;
 
-       csrow->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR;
+       dimm->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR;
 
        devtype = (ctl >> 20) & 0x3;
        switch (devtype) {
        case 0x0:
-               csrow->dtype = DEV_X32;
+               dimm->dtype = DEV_X32;
                break;
        case 0x2:               /* could be X8 too, but no way to tell */
-               csrow->dtype = DEV_X16;
+               dimm->dtype = DEV_X16;
                break;
        case 0x3:
-               csrow->dtype = DEV_X4;
+               dimm->dtype = DEV_X4;
                break;
        default:
-               csrow->dtype = DEV_UNKNOWN;
+               dimm->dtype = DEV_UNKNOWN;
                break;
        }
 
-       csrow->edac_mode = EDAC_SECDED;
+       dimm->edac_mode = EDAC_SECDED;
 }
 
 static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
 {
        struct mem_ctl_info *mci;
+       struct edac_mc_layer layers[2];
        struct mv64x60_mc_pdata *pdata;
        struct resource *r;
        u32 ctl;
@@ -701,7 +709,14 @@ static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
        if (!devres_open_group(&pdev->dev, mv64x60_mc_err_probe, GFP_KERNEL))
                return -ENOMEM;
 
-       mci = edac_mc_alloc(sizeof(struct mv64x60_mc_pdata), 1, 1, edac_mc_idx);
+       layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+       layers[0].size = 1;
+       layers[0].is_virt_csrow = true;
+       layers[1].type = EDAC_MC_LAYER_CHANNEL;
+       layers[1].size = 1;
+       layers[1].is_virt_csrow = false;
+       mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
+                           sizeof(struct mv64x60_mc_pdata));
        if (!mci) {
                printk(KERN_ERR "%s: No memory for CPU err\n", __func__);
                devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
index 7f71ee43674486fe3051013729b7b287225f4619..b095a906a994bc7b092362f21ed9f31eecf8d288 100644 (file)
@@ -110,15 +110,16 @@ static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta)
        /* uncorrectable/multi-bit errors */
        if (errsta & (MCDEBUG_ERRSTA_MBE_STATUS |
                      MCDEBUG_ERRSTA_RFL_STATUS)) {
-               edac_mc_handle_ue(mci, mci->csrows[cs].first_page, 0,
-                                 cs, mci->ctl_name);
+               edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+                                    mci->csrows[cs].first_page, 0, 0,
+                                    cs, 0, -1, mci->ctl_name, "", NULL);
        }
 
        /* correctable/single-bit errors */
-       if (errsta & MCDEBUG_ERRSTA_SBE_STATUS) {
-               edac_mc_handle_ce(mci, mci->csrows[cs].first_page, 0,
-                                 0, cs, 0, mci->ctl_name);
-       }
+       if (errsta & MCDEBUG_ERRSTA_SBE_STATUS)
+               edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+                                    mci->csrows[cs].first_page, 0, 0,
+                                    cs, 0, -1, mci->ctl_name, "", NULL);
 }
 
 static void pasemi_edac_check(struct mem_ctl_info *mci)
@@ -135,11 +136,13 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
                                   enum edac_type edac_mode)
 {
        struct csrow_info *csrow;
+       struct dimm_info *dimm;
        u32 rankcfg;
        int index;
 
        for (index = 0; index < mci->nr_csrows; index++) {
                csrow = &mci->csrows[index];
+               dimm = csrow->channels[0].dimm;
 
                pci_read_config_dword(pdev,
                                      MCDRAM_RANKCFG + (index * 12),
@@ -151,20 +154,20 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
                switch ((rankcfg & MCDRAM_RANKCFG_TYPE_SIZE_M) >>
                        MCDRAM_RANKCFG_TYPE_SIZE_S) {
                case 0:
-                       csrow->nr_pages = 128 << (20 - PAGE_SHIFT);
+                       dimm->nr_pages = 128 << (20 - PAGE_SHIFT);
                        break;
                case 1:
-                       csrow->nr_pages = 256 << (20 - PAGE_SHIFT);
+                       dimm->nr_pages = 256 << (20 - PAGE_SHIFT);
                        break;
                case 2:
                case 3:
-                       csrow->nr_pages = 512 << (20 - PAGE_SHIFT);
+                       dimm->nr_pages = 512 << (20 - PAGE_SHIFT);
                        break;
                case 4:
-                       csrow->nr_pages = 1024 << (20 - PAGE_SHIFT);
+                       dimm->nr_pages = 1024 << (20 - PAGE_SHIFT);
                        break;
                case 5:
-                       csrow->nr_pages = 2048 << (20 - PAGE_SHIFT);
+                       dimm->nr_pages = 2048 << (20 - PAGE_SHIFT);
                        break;
                default:
                        edac_mc_printk(mci, KERN_ERR,
@@ -174,13 +177,13 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
                }
 
                csrow->first_page = last_page_in_mmc;
-               csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
-               last_page_in_mmc += csrow->nr_pages;
+               csrow->last_page = csrow->first_page + dimm->nr_pages - 1;
+               last_page_in_mmc += dimm->nr_pages;
                csrow->page_mask = 0;
-               csrow->grain = PASEMI_EDAC_ERROR_GRAIN;
-               csrow->mtype = MEM_DDR;
-               csrow->dtype = DEV_UNKNOWN;
-               csrow->edac_mode = edac_mode;
+               dimm->grain = PASEMI_EDAC_ERROR_GRAIN;
+               dimm->mtype = MEM_DDR;
+               dimm->dtype = DEV_UNKNOWN;
+               dimm->edac_mode = edac_mode;
        }
        return 0;
 }
@@ -189,6 +192,7 @@ static int __devinit pasemi_edac_probe(struct pci_dev *pdev,
                const struct pci_device_id *ent)
 {
        struct mem_ctl_info *mci = NULL;
+       struct edac_mc_layer layers[2];
        u32 errctl1, errcor, scrub, mcen;
 
        pci_read_config_dword(pdev, MCCFG_MCEN, &mcen);
@@ -205,9 +209,14 @@ static int __devinit pasemi_edac_probe(struct pci_dev *pdev,
                MCDEBUG_ERRCTL1_RFL_LOG_EN;
        pci_write_config_dword(pdev, MCDEBUG_ERRCTL1, errctl1);
 
-       mci = edac_mc_alloc(0, PASEMI_EDAC_NR_CSROWS, PASEMI_EDAC_NR_CHANS,
-                               system_mmc_id++);
-
+       layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+       layers[0].size = PASEMI_EDAC_NR_CSROWS;
+       layers[0].is_virt_csrow = true;
+       layers[1].type = EDAC_MC_LAYER_CHANNEL;
+       layers[1].size = PASEMI_EDAC_NR_CHANS;
+       layers[1].is_virt_csrow = false;
+       mci = edac_mc_alloc(system_mmc_id++, ARRAY_SIZE(layers), layers,
+                           0);
        if (mci == NULL)
                return -ENOMEM;
 
index d427c69bb8b1ebf811b4aeadcf06ac532b3be4b1..f3f9fed06ad7d34ec8e3607f8141da6256e88e49 100644 (file)
@@ -727,7 +727,10 @@ ppc4xx_edac_handle_ce(struct mem_ctl_info *mci,
 
        for (row = 0; row < mci->nr_csrows; row++)
                if (ppc4xx_edac_check_bank_error(status, row))
-                       edac_mc_handle_ce_no_info(mci, message);
+                       edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+                                            0, 0, 0,
+                                            row, 0, -1,
+                                            message, "", NULL);
 }
 
 /**
@@ -755,7 +758,10 @@ ppc4xx_edac_handle_ue(struct mem_ctl_info *mci,
 
        for (row = 0; row < mci->nr_csrows; row++)
                if (ppc4xx_edac_check_bank_error(status, row))
-                       edac_mc_handle_ue(mci, page, offset, row, message);
+                       edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+                                            page, offset, 0,
+                                            row, 0, -1,
+                                            message, "", NULL);
 }
 
 /**
@@ -895,9 +901,8 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
        enum mem_type mtype;
        enum dev_type dtype;
        enum edac_type edac_mode;
-       int row;
-       u32 mbxcf, size;
-       static u32 ppc4xx_last_page;
+       int row, j;
+       u32 mbxcf, size, nr_pages;
 
        /* Establish the memory type and width */
 
@@ -948,7 +953,7 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
                case SDRAM_MBCF_SZ_2GB:
                case SDRAM_MBCF_SZ_4GB:
                case SDRAM_MBCF_SZ_8GB:
-                       csi->nr_pages = SDRAM_MBCF_SZ_TO_PAGES(size);
+                       nr_pages = SDRAM_MBCF_SZ_TO_PAGES(size);
                        break;
                default:
                        ppc4xx_edac_mc_printk(KERN_ERR, mci,
@@ -959,10 +964,6 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
                        goto done;
                }
 
-               csi->first_page = ppc4xx_last_page;
-               csi->last_page  = csi->first_page + csi->nr_pages - 1;
-               csi->page_mask  = 0;
-
                /*
                 * It's unclear exactly what grain should be set to
                 * here. The SDRAM_ECCES register allows resolution of
@@ -975,15 +976,17 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
                 * possible values would be the PLB width (16), the
                 * page size (PAGE_SIZE) or the memory width (2 or 4).
                 */
+               for (j = 0; j < csi->nr_channels; j++) {
+                       struct dimm_info *dimm = csi->channels[j].dimm;
 
-               csi->grain      = 1;
-
-               csi->mtype      = mtype;
-               csi->dtype      = dtype;
+                       dimm->nr_pages  = nr_pages / csi->nr_channels;
+                       dimm->grain     = 1;
 
-               csi->edac_mode  = edac_mode;
+                       dimm->mtype     = mtype;
+                       dimm->dtype     = dtype;
 
-               ppc4xx_last_page += csi->nr_pages;
+                       dimm->edac_mode = edac_mode;
+               }
        }
 
  done:
@@ -1236,6 +1239,7 @@ static int __devinit ppc4xx_edac_probe(struct platform_device *op)
        dcr_host_t dcr_host;
        const struct device_node *np = op->dev.of_node;
        struct mem_ctl_info *mci = NULL;
+       struct edac_mc_layer layers[2];
        static int ppc4xx_edac_instance;
 
        /*
@@ -1281,12 +1285,14 @@ static int __devinit ppc4xx_edac_probe(struct platform_device *op)
         * controller instance and perform the appropriate
         * initialization.
         */
-
-       mci = edac_mc_alloc(sizeof(struct ppc4xx_edac_pdata),
-                           ppc4xx_edac_nr_csrows,
-                           ppc4xx_edac_nr_chans,
-                           ppc4xx_edac_instance);
-
+       layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+       layers[0].size = ppc4xx_edac_nr_csrows;
+       layers[0].is_virt_csrow = true;
+       layers[1].type = EDAC_MC_LAYER_CHANNEL;
+       layers[1].size = ppc4xx_edac_nr_chans;
+       layers[1].is_virt_csrow = false;
+       mci = edac_mc_alloc(ppc4xx_edac_instance, ARRAY_SIZE(layers), layers,
+                           sizeof(struct ppc4xx_edac_pdata));
        if (mci == NULL) {
                ppc4xx_edac_printk(KERN_ERR, "%s: "
                                   "Failed to allocate EDAC MC instance!\n",
index 6d908ad72d6458c2dce43983672a92f174e69c5c..e1cacd164f316d3821e750f106a82540e536d794 100644 (file)
@@ -179,10 +179,11 @@ static int r82600_process_error_info(struct mem_ctl_info *mci,
                error_found = 1;
 
                if (handle_errors)
-                       edac_mc_handle_ce(mci, page, 0, /* not avail */
-                                       syndrome,
-                                       edac_mc_find_csrow_by_page(mci, page),
-                                       0, mci->ctl_name);
+                       edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+                                            page, 0, syndrome,
+                                            edac_mc_find_csrow_by_page(mci, page),
+                                            0, -1,
+                                            mci->ctl_name, "", NULL);
        }
 
        if (info->eapr & BIT(1)) {      /* UE? */
@@ -190,9 +191,11 @@ static int r82600_process_error_info(struct mem_ctl_info *mci,
 
                if (handle_errors)
                        /* 82600 doesn't give enough info */
-                       edac_mc_handle_ue(mci, page, 0,
-                                       edac_mc_find_csrow_by_page(mci, page),
-                                       mci->ctl_name);
+                       edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+                                            page, 0, 0,
+                                            edac_mc_find_csrow_by_page(mci, page),
+                                            0, -1,
+                                            mci->ctl_name, "", NULL);
        }
 
        return error_found;
@@ -216,6 +219,7 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
                        u8 dramcr)
 {
        struct csrow_info *csrow;
+       struct dimm_info *dimm;
        int index;
        u8 drbar;               /* SDRAM Row Boundary Address Register */
        u32 row_high_limit, row_high_limit_last;
@@ -227,6 +231,7 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
 
        for (index = 0; index < mci->nr_csrows; index++) {
                csrow = &mci->csrows[index];
+               dimm = csrow->channels[0].dimm;
 
                /* find the DRAM Chip Select Base address and mask */
                pci_read_config_byte(pdev, R82600_DRBA + index, &drbar);
@@ -247,16 +252,17 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
 
                csrow->first_page = row_base >> PAGE_SHIFT;
                csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
-               csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
+
+               dimm->nr_pages = csrow->last_page - csrow->first_page + 1;
                /* Error address is top 19 bits - so granularity is      *
                 * 14 bits                                               */
-               csrow->grain = 1 << 14;
-               csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
+               dimm->grain = 1 << 14;
+               dimm->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
                /* FIXME - check that this is unknowable with this chipset */
-               csrow->dtype = DEV_UNKNOWN;
+               dimm->dtype = DEV_UNKNOWN;
 
                /* Mode is global on 82600 */
-               csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
+               dimm->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
                row_high_limit_last = row_high_limit;
        }
 }
@@ -264,6 +270,7 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
 static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
 {
        struct mem_ctl_info *mci;
+       struct edac_mc_layer layers[2];
        u8 dramcr;
        u32 eapr;
        u32 scrub_disabled;
@@ -278,8 +285,13 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
        debugf2("%s(): sdram refresh rate = %#0x\n", __func__,
                sdram_refresh_rate);
        debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr);
-       mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS, 0);
-
+       layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+       layers[0].size = R82600_NR_CSROWS;
+       layers[0].is_virt_csrow = true;
+       layers[1].type = EDAC_MC_LAYER_CHANNEL;
+       layers[1].size = R82600_NR_CHANS;
+       layers[1].is_virt_csrow = false;
+       mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
        if (mci == NULL)
                return -ENOMEM;
 
index 123204f8e23b947a9f7035403f6961f9d988ec22..4adaf4b7da993c3d6b1f25d430dbdf1aa3e7a875 100644 (file)
@@ -314,8 +314,6 @@ struct sbridge_pvt {
        struct sbridge_info     info;
        struct sbridge_channel  channel[NUM_CHANNELS];
 
-       int                     csrow_map[NUM_CHANNELS][MAX_DIMMS];
-
        /* Memory type detection */
        bool                    is_mirrored, is_lockstep, is_close_pg;
 
@@ -487,29 +485,14 @@ static struct pci_dev *get_pdev_slot_func(u8 bus, unsigned slot,
 }
 
 /**
- * sbridge_get_active_channels() - gets the number of channels and csrows
+ * check_if_ecc_is_active() - Checks if ECC is active
  * bus:                Device bus
- * @channels:  Number of channels that will be returned
- * @csrows:    Number of csrows found
- *
- * Since EDAC core needs to know in advance the number of available channels
- * and csrows, in order to allocate memory for csrows/channels, it is needed
- * to run two similar steps. At the first step, implemented on this function,
- * it checks the number of csrows/channels present at one socket, identified
- * by the associated PCI bus.
- * this is used in order to properly allocate the size of mci components.
- * Note: one csrow is one dimm.
  */
-static int sbridge_get_active_channels(const u8 bus, unsigned *channels,
-                                     unsigned *csrows)
+static int check_if_ecc_is_active(const u8 bus)
 {
        struct pci_dev *pdev = NULL;
-       int i, j;
        u32 mcmtr;
 
-       *channels = 0;
-       *csrows = 0;
-
        pdev = get_pdev_slot_func(bus, 15, 0);
        if (!pdev) {
                sbridge_printk(KERN_ERR, "Couldn't find PCI device "
@@ -523,41 +506,14 @@ static int sbridge_get_active_channels(const u8 bus, unsigned *channels,
                sbridge_printk(KERN_ERR, "ECC is disabled. Aborting\n");
                return -ENODEV;
        }
-
-       for (i = 0; i < NUM_CHANNELS; i++) {
-               u32 mtr;
-
-               /* Device 15 functions 2 - 5  */
-               pdev = get_pdev_slot_func(bus, 15, 2 + i);
-               if (!pdev) {
-                       sbridge_printk(KERN_ERR, "Couldn't find PCI device "
-                                                "%2x.%02d.%d!!!\n",
-                                                bus, 15, 2 + i);
-                       return -ENODEV;
-               }
-               (*channels)++;
-
-               for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) {
-                       pci_read_config_dword(pdev, mtr_regs[j], &mtr);
-                       debugf1("Bus#%02x channel #%d  MTR%d = %x\n", bus, i, j, mtr);
-                       if (IS_DIMM_PRESENT(mtr))
-                               (*csrows)++;
-               }
-       }
-
-       debugf0("Number of active channels: %d, number of active dimms: %d\n",
-               *channels, *csrows);
-
        return 0;
 }
 
-static int get_dimm_config(const struct mem_ctl_info *mci)
+static int get_dimm_config(struct mem_ctl_info *mci)
 {
        struct sbridge_pvt *pvt = mci->pvt_info;
-       struct csrow_info *csr;
+       struct dimm_info *dimm;
        int i, j, banks, ranks, rows, cols, size, npages;
-       int csrow = 0;
-       unsigned long last_page = 0;
        u32 reg;
        enum edac_type mode;
        enum mem_type mtype;
@@ -616,6 +572,8 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
                u32 mtr;
 
                for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) {
+                       dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+                                      i, j, 0);
                        pci_read_config_dword(pvt->pci_tad[i],
                                              mtr_regs[j], &mtr);
                        debugf4("Channel #%d  MTR%d = %x\n", i, j, mtr);
@@ -634,29 +592,15 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
                                        pvt->sbridge_dev->mc, i, j,
                                        size, npages,
                                        banks, ranks, rows, cols);
-                               csr = &mci->csrows[csrow];
-
-                               csr->first_page = last_page;
-                               csr->last_page = last_page + npages - 1;
-                               csr->page_mask = 0UL;   /* Unused */
-                               csr->nr_pages = npages;
-                               csr->grain = 32;
-                               csr->csrow_idx = csrow;
-                               csr->dtype = (banks == 8) ? DEV_X8 : DEV_X4;
-                               csr->ce_count = 0;
-                               csr->ue_count = 0;
-                               csr->mtype = mtype;
-                               csr->edac_mode = mode;
-                               csr->nr_channels = 1;
-                               csr->channels[0].chan_idx = i;
-                               csr->channels[0].ce_count = 0;
-                               pvt->csrow_map[i][j] = csrow;
-                               snprintf(csr->channels[0].label,
-                                        sizeof(csr->channels[0].label),
+
+                               dimm->nr_pages = npages;
+                               dimm->grain = 32;
+                               dimm->dtype = (banks == 8) ? DEV_X8 : DEV_X4;
+                               dimm->mtype = mtype;
+                               dimm->edac_mode = mode;
+                               snprintf(dimm->label, sizeof(dimm->label),
                                         "CPU_SrcID#%u_Channel#%u_DIMM#%u",
                                         pvt->sbridge_dev->source_id, i, j);
-                               last_page += npages;
-                               csrow++;
                        }
                }
        }
@@ -844,11 +788,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
                                 u8 *socket,
                                 long *channel_mask,
                                 u8 *rank,
-                                char *area_type)
+                                char **area_type, char *msg)
 {
        struct mem_ctl_info     *new_mci;
        struct sbridge_pvt *pvt = mci->pvt_info;
-       char                    msg[256];
        int                     n_rir, n_sads, n_tads, sad_way, sck_xch;
        int                     sad_interl, idx, base_ch;
        int                     interleave_mode;
@@ -870,12 +813,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
         */
        if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) {
                sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr);
-               edac_mc_handle_ce_no_info(mci, msg);
                return -EINVAL;
        }
        if (addr >= (u64)pvt->tohm) {
                sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr);
-               edac_mc_handle_ce_no_info(mci, msg);
                return -EINVAL;
        }
 
@@ -892,7 +833,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
                limit = SAD_LIMIT(reg);
                if (limit <= prv) {
                        sprintf(msg, "Can't discover the memory socket");
-                       edac_mc_handle_ce_no_info(mci, msg);
                        return -EINVAL;
                }
                if  (addr <= limit)
@@ -901,10 +841,9 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
        }
        if (n_sads == MAX_SAD) {
                sprintf(msg, "Can't discover the memory socket");
-               edac_mc_handle_ce_no_info(mci, msg);
                return -EINVAL;
        }
-       area_type = get_dram_attr(reg);
+       *area_type = get_dram_attr(reg);
        interleave_mode = INTERLEAVE_MODE(reg);
 
        pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads],
@@ -942,7 +881,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
                break;
        default:
                sprintf(msg, "Can't discover socket interleave");
-               edac_mc_handle_ce_no_info(mci, msg);
                return -EINVAL;
        }
        *socket = sad_interleave[idx];
@@ -957,7 +895,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
        if (!new_mci) {
                sprintf(msg, "Struct for socket #%u wasn't initialized",
                        *socket);
-               edac_mc_handle_ce_no_info(mci, msg);
                return -EINVAL;
        }
        mci = new_mci;
@@ -973,7 +910,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
                limit = TAD_LIMIT(reg);
                if (limit <= prv) {
                        sprintf(msg, "Can't discover the memory channel");
-                       edac_mc_handle_ce_no_info(mci, msg);
                        return -EINVAL;
                }
                if  (addr <= limit)
@@ -1013,7 +949,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
                break;
        default:
                sprintf(msg, "Can't discover the TAD target");
-               edac_mc_handle_ce_no_info(mci, msg);
                return -EINVAL;
        }
        *channel_mask = 1 << base_ch;
@@ -1027,7 +962,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
                        break;
                default:
                        sprintf(msg, "Invalid mirror set. Can't decode addr");
-                       edac_mc_handle_ce_no_info(mci, msg);
                        return -EINVAL;
                }
        } else
@@ -1055,7 +989,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
        if (offset > addr) {
                sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!",
                        offset, addr);
-               edac_mc_handle_ce_no_info(mci, msg);
                return -EINVAL;
        }
        addr -= offset;
@@ -1095,7 +1028,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
        if (n_rir == MAX_RIR_RANGES) {
                sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx",
                        ch_addr);
-               edac_mc_handle_ce_no_info(mci, msg);
                return -EINVAL;
        }
        rir_way = RIR_WAY(reg);
@@ -1409,7 +1341,8 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
 {
        struct mem_ctl_info *new_mci;
        struct sbridge_pvt *pvt = mci->pvt_info;
-       char *type, *optype, *msg, *recoverable_msg;
+       enum hw_event_mc_err_type tp_event;
+       char *type, *optype, msg[256];
        bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
        bool overflow = GET_BITFIELD(m->status, 62, 62);
        bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
@@ -1421,13 +1354,21 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
        u32 optypenum = GET_BITFIELD(m->status, 4, 6);
        long channel_mask, first_channel;
        u8  rank, socket;
-       int csrow, rc, dimm;
-       char *area_type = "Unknown";
-
-       if (ripv)
-               type = "NON_FATAL";
-       else
-               type = "FATAL";
+       int rc, dimm;
+       char *area_type = NULL;
+
+       if (uncorrected_error) {
+               if (ripv) {
+                       type = "FATAL";
+                       tp_event = HW_EVENT_ERR_FATAL;
+               } else {
+                       type = "NON_FATAL";
+                       tp_event = HW_EVENT_ERR_UNCORRECTED;
+               }
+       } else {
+               type = "CORRECTED";
+               tp_event = HW_EVENT_ERR_CORRECTED;
+       }
 
        /*
         * According with Table 15-9 of the Intel Architecture spec vol 3A,
@@ -1445,19 +1386,19 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
        } else {
                switch (optypenum) {
                case 0:
-                       optype = "generic undef request";
+                       optype = "generic undef request error";
                        break;
                case 1:
-                       optype = "memory read";
+                       optype = "memory read error";
                        break;
                case 2:
-                       optype = "memory write";
+                       optype = "memory write error";
                        break;
                case 3:
-                       optype = "addr/cmd";
+                       optype = "addr/cmd error";
                        break;
                case 4:
-                       optype = "memory scrubbing";
+                       optype = "memory scrubbing error";
                        break;
                default:
                        optype = "reserved";
@@ -1466,13 +1407,13 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
        }
 
        rc = get_memory_error_data(mci, m->addr, &socket,
-                                  &channel_mask, &rank, area_type);
+                                  &channel_mask, &rank, &area_type, msg);
        if (rc < 0)
-               return;
+               goto err_parsing;
        new_mci = get_mci_for_node_id(socket);
        if (!new_mci) {
-               edac_mc_handle_ce_no_info(mci, "Error: socket got corrupted!");
-               return;
+               strcpy(msg, "Error: socket got corrupted!");
+               goto err_parsing;
        }
        mci = new_mci;
        pvt = mci->pvt_info;
@@ -1486,45 +1427,39 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
        else
                dimm = 2;
 
-       csrow = pvt->csrow_map[first_channel][dimm];
-
-       if (uncorrected_error && recoverable)
-               recoverable_msg = " recoverable";
-       else
-               recoverable_msg = "";
 
        /*
-        * FIXME: What should we do with "channel" information on mcelog?
-        * Probably, we can just discard it, as the channel information
-        * comes from the get_memory_error_data() address decoding
+        * FIXME: On some memory configurations (mirror, lockstep), the
+        * Memory Controller can't point the error to a single DIMM. The
+        * EDAC core should be handling the channel mask, in order to point
+        * to the group of dimm's where the error may be happening.
         */
-       msg = kasprintf(GFP_ATOMIC,
-                       "%d %s error(s): %s on %s area %s%s: cpu=%d Err=%04x:%04x (ch=%d), "
-                       "addr = 0x%08llx => socket=%d, Channel=%ld(mask=%ld), rank=%d\n",
-                       core_err_cnt,
-                       area_type,
-                       optype,
-                       type,
-                       recoverable_msg,
-                       overflow ? "OVERFLOW" : "",
-                       m->cpu,
-                       mscod, errcode,
-                       channel,                /* 1111b means not specified */
-                       (long long) m->addr,
-                       socket,
-                       first_channel,          /* This is the real channel on SB */
-                       channel_mask,
-                       rank);
+       snprintf(msg, sizeof(msg),
+                "count:%d%s%s area:%s err_code:%04x:%04x socket:%d channel_mask:%ld rank:%d",
+                core_err_cnt,
+                overflow ? " OVERFLOW" : "",
+                (uncorrected_error && recoverable) ? " recoverable" : "",
+                area_type,
+                mscod, errcode,
+                socket,
+                channel_mask,
+                rank);
 
        debugf0("%s", msg);
 
+       /* FIXME: need support for channel mask */
+
        /* Call the helper to output message */
-       if (uncorrected_error)
-               edac_mc_handle_fbd_ue(mci, csrow, 0, 0, msg);
-       else
-               edac_mc_handle_fbd_ce(mci, csrow, 0, msg);
+       edac_mc_handle_error(tp_event, mci,
+                            m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
+                            channel, dimm, -1,
+                            optype, msg, m);
+       return;
+err_parsing:
+       edac_mc_handle_error(tp_event, mci, 0, 0, 0,
+                            -1, -1, -1,
+                            msg, "", m);
 
-       kfree(msg);
 }
 
 /*
@@ -1683,16 +1618,25 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
 static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
 {
        struct mem_ctl_info *mci;
+       struct edac_mc_layer layers[2];
        struct sbridge_pvt *pvt;
-       int rc, channels, csrows;
+       int rc;
 
        /* Check the number of active and not disabled channels */
-       rc = sbridge_get_active_channels(sbridge_dev->bus, &channels, &csrows);
+       rc = check_if_ecc_is_active(sbridge_dev->bus);
        if (unlikely(rc < 0))
                return rc;
 
        /* allocate a new MC control structure */
-       mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, sbridge_dev->mc);
+       layers[0].type = EDAC_MC_LAYER_CHANNEL;
+       layers[0].size = NUM_CHANNELS;
+       layers[0].is_virt_csrow = false;
+       layers[1].type = EDAC_MC_LAYER_SLOT;
+       layers[1].size = MAX_DIMMS;
+       layers[1].is_virt_csrow = true;
+       mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers,
+                           sizeof(*pvt));
+
        if (unlikely(!mci))
                return -ENOMEM;
 
index e99d00976189344193e974cc9b972512a8f7359e..7bb4614730db846445909d460a5b5a0953d8ab50 100644 (file)
@@ -71,7 +71,10 @@ static void tile_edac_check(struct mem_ctl_info *mci)
        if (mem_error.sbe_count != priv->ce_count) {
                dev_dbg(mci->dev, "ECC CE err on node %d\n", priv->node);
                priv->ce_count = mem_error.sbe_count;
-               edac_mc_handle_ce(mci, 0, 0, 0, 0, 0, mci->ctl_name);
+               edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+                                    0, 0, 0,
+                                    0, 0, -1,
+                                    mci->ctl_name, "", NULL);
        }
 }
 
@@ -84,6 +87,7 @@ static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci)
        struct csrow_info       *csrow = &mci->csrows[0];
        struct tile_edac_priv   *priv = mci->pvt_info;
        struct mshim_mem_info   mem_info;
+       struct dimm_info *dimm = csrow->channels[0].dimm;
 
        if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_info,
                sizeof(struct mshim_mem_info), MSHIM_MEM_INFO_OFF) !=
@@ -93,27 +97,25 @@ static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci)
        }
 
        if (mem_info.mem_ecc)
-               csrow->edac_mode = EDAC_SECDED;
+               dimm->edac_mode = EDAC_SECDED;
        else
-               csrow->edac_mode = EDAC_NONE;
+               dimm->edac_mode = EDAC_NONE;
        switch (mem_info.mem_type) {
        case DDR2:
-               csrow->mtype = MEM_DDR2;
+               dimm->mtype = MEM_DDR2;
                break;
 
        case DDR3:
-               csrow->mtype = MEM_DDR3;
+               dimm->mtype = MEM_DDR3;
                break;
 
        default:
                return -1;
        }
 
-       csrow->first_page = 0;
-       csrow->nr_pages = mem_info.mem_size >> PAGE_SHIFT;
-       csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
-       csrow->grain = TILE_EDAC_ERROR_GRAIN;
-       csrow->dtype = DEV_UNKNOWN;
+       dimm->nr_pages = mem_info.mem_size >> PAGE_SHIFT;
+       dimm->grain = TILE_EDAC_ERROR_GRAIN;
+       dimm->dtype = DEV_UNKNOWN;
 
        return 0;
 }
@@ -123,6 +125,7 @@ static int __devinit tile_edac_mc_probe(struct platform_device *pdev)
        char                    hv_file[32];
        int                     hv_devhdl;
        struct mem_ctl_info     *mci;
+       struct edac_mc_layer    layers[2];
        struct tile_edac_priv   *priv;
        int                     rc;
 
@@ -132,8 +135,14 @@ static int __devinit tile_edac_mc_probe(struct platform_device *pdev)
                return -EINVAL;
 
        /* A TILE MC has a single channel and one chip-select row. */
-       mci = edac_mc_alloc(sizeof(struct tile_edac_priv),
-               TILE_EDAC_NR_CSROWS, TILE_EDAC_NR_CHANS, pdev->id);
+       layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+       layers[0].size = TILE_EDAC_NR_CSROWS;
+       layers[0].is_virt_csrow = true;
+       layers[1].type = EDAC_MC_LAYER_CHANNEL;
+       layers[1].size = TILE_EDAC_NR_CHANS;
+       layers[1].is_virt_csrow = false;
+       mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers,
+                           sizeof(struct tile_edac_priv));
        if (mci == NULL)
                return -ENOMEM;
        priv = mci->pvt_info;
index a438297389e5d2919476e3a41a61751e8dd38dc3..1ac7962d63eadcd5ba8ddd17ae58b96b9a062e2e 100644 (file)
@@ -215,19 +215,26 @@ static void x38_process_error_info(struct mem_ctl_info *mci,
                return;
 
        if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
-               edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+               edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+                                    -1, -1, -1,
+                                    "UE overwrote CE", "", NULL);
                info->errsts = info->errsts2;
        }
 
        for (channel = 0; channel < x38_channel_num; channel++) {
                log = info->eccerrlog[channel];
                if (log & X38_ECCERRLOG_UE) {
-                       edac_mc_handle_ue(mci, 0, 0,
-                               eccerrlog_row(channel, log), "x38 UE");
+                       edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+                                            0, 0, 0,
+                                            eccerrlog_row(channel, log),
+                                            -1, -1,
+                                            "x38 UE", "", NULL);
                } else if (log & X38_ECCERRLOG_CE) {
-                       edac_mc_handle_ce(mci, 0, 0,
-                               eccerrlog_syndrome(log),
-                               eccerrlog_row(channel, log), 0, "x38 CE");
+                       edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+                                            0, 0, eccerrlog_syndrome(log),
+                                            eccerrlog_row(channel, log),
+                                            -1, -1,
+                                            "x38 CE", "", NULL);
                }
        }
 }
@@ -317,9 +324,9 @@ static unsigned long drb_to_nr_pages(
 static int x38_probe1(struct pci_dev *pdev, int dev_idx)
 {
        int rc;
-       int i;
+       int i, j;
        struct mem_ctl_info *mci = NULL;
-       unsigned long last_page;
+       struct edac_mc_layer layers[2];
        u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL];
        bool stacked;
        void __iomem *window;
@@ -335,7 +342,13 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
        how_many_channel(pdev);
 
        /* FIXME: unconventional pvt_info usage */
-       mci = edac_mc_alloc(0, X38_RANKS, x38_channel_num, 0);
+       layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+       layers[0].size = X38_RANKS;
+       layers[0].is_virt_csrow = true;
+       layers[1].type = EDAC_MC_LAYER_CHANNEL;
+       layers[1].size = x38_channel_num;
+       layers[1].is_virt_csrow = false;
+       mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
        if (!mci)
                return -ENOMEM;
 
@@ -363,7 +376,6 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
         * cumulative; the last one will contain the total memory
         * contained in all ranks.
         */
-       last_page = -1UL;
        for (i = 0; i < mci->nr_csrows; i++) {
                unsigned long nr_pages;
                struct csrow_info *csrow = &mci->csrows[i];
@@ -372,20 +384,18 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
                        i / X38_RANKS_PER_CHANNEL,
                        i % X38_RANKS_PER_CHANNEL);
 
-               if (nr_pages == 0) {
-                       csrow->mtype = MEM_EMPTY;
+               if (nr_pages == 0)
                        continue;
-               }
 
-               csrow->first_page = last_page + 1;
-               last_page += nr_pages;
-               csrow->last_page = last_page;
-               csrow->nr_pages = nr_pages;
+               for (j = 0; j < x38_channel_num; j++) {
+                       struct dimm_info *dimm = csrow->channels[j].dimm;
 
-               csrow->grain = nr_pages << PAGE_SHIFT;
-               csrow->mtype = MEM_DDR2;
-               csrow->dtype = DEV_UNKNOWN;
-               csrow->edac_mode = EDAC_UNKNOWN;
+                       dimm->nr_pages = nr_pages / x38_channel_num;
+                       dimm->grain = nr_pages << PAGE_SHIFT;
+                       dimm->mtype = MEM_DDR2;
+                       dimm->dtype = DEV_UNKNOWN;
+                       dimm->edac_mode = EDAC_UNKNOWN;
+               }
        }
 
        x38_clear_error_info(mci);
index aa3642cb820989e77d89799c11551463a36416a6..c4067d0141f7c083ea9de58c72dc0ea3122ca300 100644 (file)
@@ -114,6 +114,14 @@ config GPIO_EP93XX
        depends on ARCH_EP93XX
        select GPIO_GENERIC
 
+config GPIO_MM_LANTIQ
+       bool "Lantiq Memory mapped GPIOs"
+       depends on LANTIQ && SOC_XWAY
+       help
+         This enables support for memory mapped GPIOs on the External Bus Unit
+         (EBU) found on Lantiq SoCs. The gpios are output only as they are
+         created by attaching a 16bit latch to the bus.
+
 config GPIO_MPC5200
        def_bool y
        depends on PPC_MPC52xx
@@ -167,6 +175,14 @@ config GPIO_PXA
        help
          Say yes here to support the PXA GPIO device
 
+config GPIO_STA2X11
+       bool "STA2x11/ConneXt GPIO support"
+       depends on MFD_STA2X11
+       select GENERIC_IRQ_CHIP
+       help
+         Say yes here to support the STA2x11/ConneXt GPIO device.
+         The GPIO module has 128 GPIO pins with alternate functions.
+
 config GPIO_XILINX
        bool "Xilinx GPIO support"
        depends on PPC_OF || MICROBLAZE
@@ -180,13 +196,13 @@ config GPIO_VR41XX
          Say yes here to support the NEC VR4100 series General-purpose I/O Uint
 
 config GPIO_SCH
-       tristate "Intel SCH/TunnelCreek GPIO"
+       tristate "Intel SCH/TunnelCreek/Centerton GPIO"
        depends on PCI && X86
        select MFD_CORE
        select LPC_SCH
        help
-         Say yes here to support GPIO interface on Intel Poulsbo SCH
-         or Intel Tunnel Creek processor.
+         Say yes here to support GPIO interface on Intel Poulsbo SCH,
+         Intel Tunnel Creek processor or Intel Centerton processor.
          The Intel SCH contains a total of 14 GPIO pins. Ten GPIOs are
          powered by the core power rail and are turned off during sleep
          modes (S3 and higher). The remaining four GPIOs are powered by
@@ -195,6 +211,22 @@ config GPIO_SCH
          system from the Suspend-to-RAM state.
          The Intel Tunnel Creek processor has 5 GPIOs powered by the
          core power rail and 9 from suspend power supply.
+         The Intel Centerton processor has a total of 30 GPIO pins.
+         Twenty-one are powered by the core power rail and 9 from the
+         suspend power supply.
+
+config GPIO_ICH
+       tristate "Intel ICH GPIO"
+       depends on PCI && X86
+       select MFD_CORE
+       select LPC_ICH
+       help
+         Say yes here to support the GPIO functionality of a number of Intel
+         ICH-based chipsets.  Currently supported devices: ICH6, ICH7, ICH8
+         ICH9, ICH10, Series 5/3400 (eg Ibex Peak), Series 6/C200 (eg
+         Cougar Point), NM10 (Tiger Point), and 3100 (Whitmore Lake).
+
+         If unsure, say N.
 
 config GPIO_VX855
        tristate "VIA VX855/VX875 GPIO"
@@ -334,6 +366,16 @@ config GPIO_STMPE
          This enables support for the GPIOs found on the STMPE I/O
          Expanders.
 
+config GPIO_STP_XWAY
+       bool "XWAY STP GPIOs"
+       depends on SOC_XWAY
+       help
+         This enables support for the Serial To Parallel (STP) unit found on
+         XWAY SoC. The STP allows the SoC to drive a shift registers cascade,
+         that can be up to 24 bit. This peripheral is aimed at driving leds.
+         Some of the gpios/leds can be auto updated by the soc with dsl and
+         phy status.
+
 config GPIO_TC3589X
        bool "TC3589X GPIOs"
        depends on MFD_TC3589X
index 07a79e245407ea23d9210aecdb2af24481cfd8b5..0f55662002c357c4d61c673b81655e4c74844742 100644 (file)
@@ -19,6 +19,7 @@ obj-$(CONFIG_ARCH_DAVINCI)    += gpio-davinci.o
 obj-$(CONFIG_GPIO_EM)          += gpio-em.o
 obj-$(CONFIG_GPIO_EP93XX)      += gpio-ep93xx.o
 obj-$(CONFIG_GPIO_GE_FPGA)     += gpio-ge.o
+obj-$(CONFIG_GPIO_ICH)         += gpio-ich.o
 obj-$(CONFIG_GPIO_IT8761E)     += gpio-it8761e.o
 obj-$(CONFIG_GPIO_JANZ_TTL)    += gpio-janz-ttl.o
 obj-$(CONFIG_ARCH_KS8695)      += gpio-ks8695.o
@@ -32,6 +33,7 @@ obj-$(CONFIG_GPIO_MC33880)    += gpio-mc33880.o
 obj-$(CONFIG_GPIO_MC9S08DZ60)  += gpio-mc9s08dz60.o
 obj-$(CONFIG_GPIO_MCP23S08)    += gpio-mcp23s08.o
 obj-$(CONFIG_GPIO_ML_IOH)      += gpio-ml-ioh.o
+obj-$(CONFIG_GPIO_MM_LANTIQ)   += gpio-mm-lantiq.o
 obj-$(CONFIG_GPIO_MPC5200)     += gpio-mpc5200.o
 obj-$(CONFIG_GPIO_MPC8XXX)     += gpio-mpc8xxx.o
 obj-$(CONFIG_GPIO_MSIC)                += gpio-msic.o
@@ -51,7 +53,9 @@ obj-$(CONFIG_PLAT_SAMSUNG)    += gpio-samsung.o
 obj-$(CONFIG_ARCH_SA1100)      += gpio-sa1100.o
 obj-$(CONFIG_GPIO_SCH)         += gpio-sch.o
 obj-$(CONFIG_GPIO_SODAVILLE)   += gpio-sodaville.o
+obj-$(CONFIG_GPIO_STA2X11)     += gpio-sta2x11.o
 obj-$(CONFIG_GPIO_STMPE)       += gpio-stmpe.o
+obj-$(CONFIG_GPIO_STP_XWAY)    += gpio-stp-xway.o
 obj-$(CONFIG_GPIO_SX150X)      += gpio-sx150x.o
 obj-$(CONFIG_GPIO_TC3589X)     += gpio-tc3589x.o
 obj-$(CONFIG_ARCH_TEGRA)       += gpio-tegra.o
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
new file mode 100644 (file)
index 0000000..b7c0651
--- /dev/null
@@ -0,0 +1,419 @@
+/*
+ * Intel ICH6-10, Series 5 and 6 GPIO driver
+ *
+ * Copyright (C) 2010 Extreme Engineering Solutions.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/lpc_ich.h>
+
+#define DRV_NAME "gpio_ich"
+
+/*
+ * GPIO register offsets in GPIO I/O space.
+ * Each chunk of 32 GPIOs is manipulated via its own USE_SELx, IO_SELx, and
+ * LVLx registers.  Logic in the read/write functions takes a register and
+ * an absolute bit number and determines the proper register offset and bit
+ * number in that register.  For example, to read the value of GPIO bit 50
+ * the code would access offset ichx_regs[2(=GPIO_LVL)][1(=50/32)],
+ * bit 18 (50%32).
+ */
+enum GPIO_REG {
+       GPIO_USE_SEL = 0,
+       GPIO_IO_SEL,
+       GPIO_LVL,
+};
+
+static const u8 ichx_regs[3][3] = {
+       {0x00, 0x30, 0x40},     /* USE_SEL[1-3] offsets */
+       {0x04, 0x34, 0x44},     /* IO_SEL[1-3] offsets */
+       {0x0c, 0x38, 0x48},     /* LVL[1-3] offsets */
+};
+
+#define ICHX_WRITE(val, reg, base_res) outl(val, (reg) + (base_res)->start)
+#define ICHX_READ(reg, base_res)       inl((reg) + (base_res)->start)
+
+struct ichx_desc {
+       /* Max GPIO pins the chipset can have */
+       uint ngpio;
+
+       /* Whether the chipset has GPIO in GPE0_STS in the PM IO region */
+       bool uses_gpe0;
+
+       /* USE_SEL is bogus on some chipsets, eg 3100 */
+       u32 use_sel_ignore[3];
+
+       /* Some chipsets have quirks, let these use their own request/get */
+       int (*request)(struct gpio_chip *chip, unsigned offset);
+       int (*get)(struct gpio_chip *chip, unsigned offset);
+};
+
+static struct {
+       spinlock_t lock;
+       struct platform_device *dev;
+       struct gpio_chip chip;
+       struct resource *gpio_base;     /* GPIO IO base */
+       struct resource *pm_base;       /* Power Mangagment IO base */
+       struct ichx_desc *desc; /* Pointer to chipset-specific description */
+       u32 orig_gpio_ctrl;     /* Orig CTRL value, used to restore on exit */
+} ichx_priv;
+
+static int modparam_gpiobase = -1;     /* dynamic */
+module_param_named(gpiobase, modparam_gpiobase, int, 0444);
+MODULE_PARM_DESC(gpiobase, "The GPIO number base. -1 means dynamic, "
+                          "which is the default.");
+
+static int ichx_write_bit(int reg, unsigned nr, int val, int verify)
+{
+       unsigned long flags;
+       u32 data, tmp;
+       int reg_nr = nr / 32;
+       int bit = nr & 0x1f;
+       int ret = 0;
+
+       spin_lock_irqsave(&ichx_priv.lock, flags);
+
+       data = ICHX_READ(ichx_regs[reg][reg_nr], ichx_priv.gpio_base);
+       if (val)
+               data |= 1 << bit;
+       else
+               data &= ~(1 << bit);
+       ICHX_WRITE(data, ichx_regs[reg][reg_nr], ichx_priv.gpio_base);
+       tmp = ICHX_READ(ichx_regs[reg][reg_nr], ichx_priv.gpio_base);
+       if (verify && data != tmp)
+               ret = -EPERM;
+
+       spin_unlock_irqrestore(&ichx_priv.lock, flags);
+
+       return ret;
+}
+
+static int ichx_read_bit(int reg, unsigned nr)
+{
+       unsigned long flags;
+       u32 data;
+       int reg_nr = nr / 32;
+       int bit = nr & 0x1f;
+
+       spin_lock_irqsave(&ichx_priv.lock, flags);
+
+       data = ICHX_READ(ichx_regs[reg][reg_nr], ichx_priv.gpio_base);
+
+       spin_unlock_irqrestore(&ichx_priv.lock, flags);
+
+       return data & (1 << bit) ? 1 : 0;
+}
+
+static int ichx_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
+{
+       /*
+        * Try setting pin as an input and verify it worked since many pins
+        * are output-only.
+        */
+       if (ichx_write_bit(GPIO_IO_SEL, nr, 1, 1))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int ichx_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
+                                       int val)
+{
+       /* Set GPIO output value. */
+       ichx_write_bit(GPIO_LVL, nr, val, 0);
+
+       /*
+        * Try setting pin as an output and verify it worked since many pins
+        * are input-only.
+        */
+       if (ichx_write_bit(GPIO_IO_SEL, nr, 0, 1))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int ichx_gpio_get(struct gpio_chip *chip, unsigned nr)
+{
+       return ichx_read_bit(GPIO_LVL, nr);
+}
+
+static int ich6_gpio_get(struct gpio_chip *chip, unsigned nr)
+{
+       unsigned long flags;
+       u32 data;
+
+       /*
+        * GPI 0 - 15 need to be read from the power management registers on
+        * a ICH6/3100 bridge.
+        */
+       if (nr < 16) {
+               if (!ichx_priv.pm_base)
+                       return -ENXIO;
+
+               spin_lock_irqsave(&ichx_priv.lock, flags);
+
+               /* GPI 0 - 15 are latched, write 1 to clear*/
+               ICHX_WRITE(1 << (16 + nr), 0, ichx_priv.pm_base);
+               data = ICHX_READ(0, ichx_priv.pm_base);
+
+               spin_unlock_irqrestore(&ichx_priv.lock, flags);
+
+               return (data >> 16) & (1 << nr) ? 1 : 0;
+       } else {
+               return ichx_gpio_get(chip, nr);
+       }
+}
+
+static int ichx_gpio_request(struct gpio_chip *chip, unsigned nr)
+{
+       /*
+        * Note we assume the BIOS properly set a bridge's USE value.  Some
+        * chips (eg Intel 3100) have bogus USE values though, so first see if
+        * the chipset's USE value can be trusted for this specific bit.
+        * If it can't be trusted, assume that the pin can be used as a GPIO.
+        */
+       if (ichx_priv.desc->use_sel_ignore[nr / 32] & (1 << (nr & 0x1f)))
+               return 1;
+
+       return ichx_read_bit(GPIO_USE_SEL, nr) ? 0 : -ENODEV;
+}
+
+static int ich6_gpio_request(struct gpio_chip *chip, unsigned nr)
+{
+       /*
+        * Fixups for bits 16 and 17 are necessary on the Intel ICH6/3100
+        * bridge as they are controlled by USE register bits 0 and 1.  See
+        * "Table 704 GPIO_USE_SEL1 register" in the i3100 datasheet for
+        * additional info.
+        */
+       if (nr == 16 || nr == 17)
+               nr -= 16;
+
+       return ichx_gpio_request(chip, nr);
+}
+
+static void ichx_gpio_set(struct gpio_chip *chip, unsigned nr, int val)
+{
+       ichx_write_bit(GPIO_LVL, nr, val, 0);
+}
+
+static void __devinit ichx_gpiolib_setup(struct gpio_chip *chip)
+{
+       chip->owner = THIS_MODULE;
+       chip->label = DRV_NAME;
+       chip->dev = &ichx_priv.dev->dev;
+
+       /* Allow chip-specific overrides of request()/get() */
+       chip->request = ichx_priv.desc->request ?
+               ichx_priv.desc->request : ichx_gpio_request;
+       chip->get = ichx_priv.desc->get ?
+               ichx_priv.desc->get : ichx_gpio_get;
+
+       chip->set = ichx_gpio_set;
+       chip->direction_input = ichx_gpio_direction_input;
+       chip->direction_output = ichx_gpio_direction_output;
+       chip->base = modparam_gpiobase;
+       chip->ngpio = ichx_priv.desc->ngpio;
+       chip->can_sleep = 0;
+       chip->dbg_show = NULL;
+}
+
+/* ICH6-based, 631xesb-based */
+static struct ichx_desc ich6_desc = {
+       /* Bridges using the ICH6 controller need fixups for GPIO 0 - 17 */
+       .request = ich6_gpio_request,
+       .get = ich6_gpio_get,
+
+       /* GPIO 0-15 are read in the GPE0_STS PM register */
+       .uses_gpe0 = true,
+
+       .ngpio = 50,
+};
+
+/* Intel 3100 */
+static struct ichx_desc i3100_desc = {
+       /*
+        * Bits 16,17, 20 of USE_SEL and bit 16 of USE_SEL2 always read 0 on
+        * the Intel 3100.  See "Table 712. GPIO Summary Table" of 3100
+        * Datasheet for more info.
+        */
+       .use_sel_ignore = {0x00130000, 0x00010000, 0x0},
+
+       /* The 3100 needs fixups for GPIO 0 - 17 */
+       .request = ich6_gpio_request,
+       .get = ich6_gpio_get,
+
+       /* GPIO 0-15 are read in the GPE0_STS PM register */
+       .uses_gpe0 = true,
+
+       .ngpio = 50,
+};
+
+/* ICH7 and ICH8-based */
+static struct ichx_desc ich7_desc = {
+       .ngpio = 50,
+};
+
+/* ICH9-based */
+static struct ichx_desc ich9_desc = {
+       .ngpio = 61,
+};
+
+/* ICH10-based - Consumer/corporate versions have different amount of GPIO */
+static struct ichx_desc ich10_cons_desc = {
+       .ngpio = 61,
+};
+static struct ichx_desc ich10_corp_desc = {
+       .ngpio = 72,
+};
+
+/* Intel 5 series, 6 series, 3400 series, and C200 series */
+static struct ichx_desc intel5_desc = {
+       .ngpio = 76,
+};
+
+static int __devinit ichx_gpio_probe(struct platform_device *pdev)
+{
+       struct resource *res_base, *res_pm;
+       int err;
+       struct lpc_ich_info *ich_info = pdev->dev.platform_data;
+
+       if (!ich_info)
+               return -ENODEV;
+
+       ichx_priv.dev = pdev;
+
+       switch (ich_info->gpio_version) {
+       case ICH_I3100_GPIO:
+               ichx_priv.desc = &i3100_desc;
+               break;
+       case ICH_V5_GPIO:
+               ichx_priv.desc = &intel5_desc;
+               break;
+       case ICH_V6_GPIO:
+               ichx_priv.desc = &ich6_desc;
+               break;
+       case ICH_V7_GPIO:
+               ichx_priv.desc = &ich7_desc;
+               break;
+       case ICH_V9_GPIO:
+               ichx_priv.desc = &ich9_desc;
+               break;
+       case ICH_V10CORP_GPIO:
+               ichx_priv.desc = &ich10_corp_desc;
+               break;
+       case ICH_V10CONS_GPIO:
+               ichx_priv.desc = &ich10_cons_desc;
+               break;
+       default:
+               return -ENODEV;
+       }
+
+       res_base = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_GPIO);
+       if (!res_base || !res_base->start || !res_base->end)
+               return -ENODEV;
+
+       if (!request_region(res_base->start, resource_size(res_base),
+                               pdev->name))
+               return -EBUSY;
+
+       ichx_priv.gpio_base = res_base;
+
+       /*
+        * If necessary, determine the I/O address of ACPI/power management
+        * registers which are needed to read the the GPE0 register for GPI pins
+        * 0 - 15 on some chipsets.
+        */
+       if (!ichx_priv.desc->uses_gpe0)
+               goto init;
+
+       res_pm = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_GPE0);
+       if (!res_pm) {
+               pr_warn("ACPI BAR is unavailable, GPI 0 - 15 unavailable\n");
+               goto init;
+       }
+
+       if (!request_region(res_pm->start, resource_size(res_pm),
+                       pdev->name)) {
+               pr_warn("ACPI BAR is busy, GPI 0 - 15 unavailable\n");
+               goto init;
+       }
+
+       ichx_priv.pm_base = res_pm;
+
+init:
+       ichx_gpiolib_setup(&ichx_priv.chip);
+       err = gpiochip_add(&ichx_priv.chip);
+       if (err) {
+               pr_err("Failed to register GPIOs\n");
+               goto add_err;
+       }
+
+       pr_info("GPIO from %d to %d on %s\n", ichx_priv.chip.base,
+              ichx_priv.chip.base + ichx_priv.chip.ngpio - 1, DRV_NAME);
+
+       return 0;
+
+add_err:
+       release_region(ichx_priv.gpio_base->start,
+                       resource_size(ichx_priv.gpio_base));
+       if (ichx_priv.pm_base)
+               release_region(ichx_priv.pm_base->start,
+                               resource_size(ichx_priv.pm_base));
+       return err;
+}
+
+static int __devexit ichx_gpio_remove(struct platform_device *pdev)
+{
+       int err;
+
+       err = gpiochip_remove(&ichx_priv.chip);
+       if (err) {
+               dev_err(&pdev->dev, "%s failed, %d\n",
+                               "gpiochip_remove()", err);
+               return err;
+       }
+
+       release_region(ichx_priv.gpio_base->start,
+                               resource_size(ichx_priv.gpio_base));
+       if (ichx_priv.pm_base)
+               release_region(ichx_priv.pm_base->start,
+                               resource_size(ichx_priv.pm_base));
+
+       return 0;
+}
+
+static struct platform_driver ichx_gpio_driver = {
+       .driver         = {
+               .owner  = THIS_MODULE,
+               .name   = DRV_NAME,
+       },
+       .probe          = ichx_gpio_probe,
+       .remove         = __devexit_p(ichx_gpio_remove),
+};
+
+module_platform_driver(ichx_gpio_driver);
+
+MODULE_AUTHOR("Peter Tyser <ptyser@xes-inc.com>");
+MODULE_DESCRIPTION("GPIO interface for Intel ICH series");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:"DRV_NAME);
diff --git a/drivers/gpio/gpio-mm-lantiq.c b/drivers/gpio/gpio-mm-lantiq.c
new file mode 100644 (file)
index 0000000..2983dfb
--- /dev/null
@@ -0,0 +1,158 @@
+/*
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include <lantiq_soc.h>
+
+/*
+ * By attaching hardware latches to the EBU it is possible to create output
+ * only gpios. This driver configures a special memory address, which when
+ * written to outputs 16 bit to the latches.
+ */
+
+#define LTQ_EBU_BUSCON 0x1e7ff         /* 16 bit access, slowest timing */
+#define LTQ_EBU_WP     0x80000000      /* write protect bit */
+
+struct ltq_mm {
+       struct of_mm_gpio_chip mmchip;
+       u16 shadow;     /* shadow the latches state */
+};
+
+/**
+ * ltq_mm_apply() - write the shadow value to the ebu address.
+ * @chip:     Pointer to our private data structure.
+ *
+ * Write the shadow value to the EBU to set the gpios. We need to set the
+ * global EBU lock to make sure that PCI/MTD dont break.
+ */
+static void ltq_mm_apply(struct ltq_mm *chip)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&ebu_lock, flags);
+       ltq_ebu_w32(LTQ_EBU_BUSCON, LTQ_EBU_BUSCON1);
+       __raw_writew(chip->shadow, chip->mmchip.regs);
+       ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1);
+       spin_unlock_irqrestore(&ebu_lock, flags);
+}
+
+/**
+ * ltq_mm_set() - gpio_chip->set - set gpios.
+ * @gc:     Pointer to gpio_chip device structure.
+ * @gpio:   GPIO signal number.
+ * @val:    Value to be written to specified signal.
+ *
+ * Set the shadow value and call ltq_mm_apply.
+ */
+static void ltq_mm_set(struct gpio_chip *gc, unsigned offset, int value)
+{
+       struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+       struct ltq_mm *chip =
+               container_of(mm_gc, struct ltq_mm, mmchip);
+
+       if (value)
+               chip->shadow |= (1 << offset);
+       else
+               chip->shadow &= ~(1 << offset);
+       ltq_mm_apply(chip);
+}
+
+/**
+ * ltq_mm_dir_out() - gpio_chip->dir_out - set gpio direction.
+ * @gc:     Pointer to gpio_chip device structure.
+ * @gpio:   GPIO signal number.
+ * @val:    Value to be written to specified signal.
+ *
+ * Same as ltq_mm_set, always returns 0.
+ */
+static int ltq_mm_dir_out(struct gpio_chip *gc, unsigned offset, int value)
+{
+       ltq_mm_set(gc, offset, value);
+
+       return 0;
+}
+
+/**
+ * ltq_mm_save_regs() - Set initial values of GPIO pins
+ * @mm_gc: pointer to memory mapped GPIO chip structure
+ */
+static void ltq_mm_save_regs(struct of_mm_gpio_chip *mm_gc)
+{
+       struct ltq_mm *chip =
+               container_of(mm_gc, struct ltq_mm, mmchip);
+
+       /* tell the ebu controller which memory address we will be using */
+       ltq_ebu_w32(CPHYSADDR(chip->mmchip.regs) | 0x1, LTQ_EBU_ADDRSEL1);
+
+       ltq_mm_apply(chip);
+}
+
+static int ltq_mm_probe(struct platform_device *pdev)
+{
+       struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       struct ltq_mm *chip;
+       const __be32 *shadow;
+       int ret = 0;
+
+       if (!res) {
+               dev_err(&pdev->dev, "failed to get memory resource\n");
+               return -ENOENT;
+       }
+
+       chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+       if (!chip)
+               return -ENOMEM;
+
+       chip->mmchip.gc.ngpio = 16;
+       chip->mmchip.gc.label = "gpio-mm-ltq";
+       chip->mmchip.gc.direction_output = ltq_mm_dir_out;
+       chip->mmchip.gc.set = ltq_mm_set;
+       chip->mmchip.save_regs = ltq_mm_save_regs;
+
+       /* store the shadow value if one was passed by the devicetree */
+       shadow = of_get_property(pdev->dev.of_node, "lantiq,shadow", NULL);
+       if (shadow)
+               chip->shadow = be32_to_cpu(*shadow);
+
+       ret = of_mm_gpiochip_add(pdev->dev.of_node, &chip->mmchip);
+       if (ret)
+               kfree(chip);
+       return ret;
+}
+
+static const struct of_device_id ltq_mm_match[] = {
+       { .compatible = "lantiq,gpio-mm" },
+       {},
+};
+MODULE_DEVICE_TABLE(of, ltq_mm_match);
+
+static struct platform_driver ltq_mm_driver = {
+       .probe = ltq_mm_probe,
+       .driver = {
+               .name = "gpio-mm-ltq",
+               .owner = THIS_MODULE,
+               .of_match_table = ltq_mm_match,
+       },
+};
+
+static int __init ltq_mm_init(void)
+{
+       return platform_driver_register(&ltq_mm_driver);
+}
+
+subsys_initcall(ltq_mm_init);
index b4136501abd82e43d6752deb2265767bf97ee60a..39e495669961bd9284f6a169afebd1c4845e502e 100644 (file)
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/basic_mmio_gpio.h>
 #include <linux/module.h>
-#include <mach/mxs.h>
 
 #define MXS_SET                0x4
 #define MXS_CLR                0x8
 
-#define PINCTRL_DOUT(n)                ((cpu_is_mx23() ? 0x0500 : 0x0700) + (n) * 0x10)
-#define PINCTRL_DIN(n)         ((cpu_is_mx23() ? 0x0600 : 0x0900) + (n) * 0x10)
-#define PINCTRL_DOE(n)         ((cpu_is_mx23() ? 0x0700 : 0x0b00) + (n) * 0x10)
-#define PINCTRL_PIN2IRQ(n)     ((cpu_is_mx23() ? 0x0800 : 0x1000) + (n) * 0x10)
-#define PINCTRL_IRQEN(n)       ((cpu_is_mx23() ? 0x0900 : 0x1100) + (n) * 0x10)
-#define PINCTRL_IRQLEV(n)      ((cpu_is_mx23() ? 0x0a00 : 0x1200) + (n) * 0x10)
-#define PINCTRL_IRQPOL(n)      ((cpu_is_mx23() ? 0x0b00 : 0x1300) + (n) * 0x10)
-#define PINCTRL_IRQSTAT(n)     ((cpu_is_mx23() ? 0x0c00 : 0x1400) + (n) * 0x10)
+#define PINCTRL_DOUT(p)                ((is_imx23_gpio(p) ? 0x0500 : 0x0700) + (p->id) * 0x10)
+#define PINCTRL_DIN(p)         ((is_imx23_gpio(p) ? 0x0600 : 0x0900) + (p->id) * 0x10)
+#define PINCTRL_DOE(p)         ((is_imx23_gpio(p) ? 0x0700 : 0x0b00) + (p->id) * 0x10)
+#define PINCTRL_PIN2IRQ(p)     ((is_imx23_gpio(p) ? 0x0800 : 0x1000) + (p->id) * 0x10)
+#define PINCTRL_IRQEN(p)       ((is_imx23_gpio(p) ? 0x0900 : 0x1100) + (p->id) * 0x10)
+#define PINCTRL_IRQLEV(p)      ((is_imx23_gpio(p) ? 0x0a00 : 0x1200) + (p->id) * 0x10)
+#define PINCTRL_IRQPOL(p)      ((is_imx23_gpio(p) ? 0x0b00 : 0x1300) + (p->id) * 0x10)
+#define PINCTRL_IRQSTAT(p)     ((is_imx23_gpio(p) ? 0x0c00 : 0x1400) + (p->id) * 0x10)
 
 #define GPIO_INT_FALL_EDGE     0x0
 #define GPIO_INT_LOW_LEV       0x1
 
 #define irq_to_gpio(irq)       ((irq) - MXS_GPIO_IRQ_START)
 
+enum mxs_gpio_id {
+       IMX23_GPIO,
+       IMX28_GPIO,
+};
+
 struct mxs_gpio_port {
        void __iomem *base;
        int id;
        int irq;
        int virtual_irq_start;
        struct bgpio_chip bgc;
+       enum mxs_gpio_id devid;
 };
 
+static inline int is_imx23_gpio(struct mxs_gpio_port *port)
+{
+       return port->devid == IMX23_GPIO;
+}
+
+static inline int is_imx28_gpio(struct mxs_gpio_port *port)
+{
+       return port->devid == IMX28_GPIO;
+}
+
 /* Note: This driver assumes 32 GPIOs are handled in one register */
 
 static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
@@ -89,21 +107,21 @@ static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
        }
 
        /* set level or edge */
-       pin_addr = port->base + PINCTRL_IRQLEV(port->id);
+       pin_addr = port->base + PINCTRL_IRQLEV(port);
        if (edge & GPIO_INT_LEV_MASK)
                writel(pin_mask, pin_addr + MXS_SET);
        else
                writel(pin_mask, pin_addr + MXS_CLR);
 
        /* set polarity */
-       pin_addr = port->base + PINCTRL_IRQPOL(port->id);
+       pin_addr = port->base + PINCTRL_IRQPOL(port);
        if (edge & GPIO_INT_POL_MASK)
                writel(pin_mask, pin_addr + MXS_SET);
        else
                writel(pin_mask, pin_addr + MXS_CLR);
 
        writel(1 << (gpio & 0x1f),
-              port->base + PINCTRL_IRQSTAT(port->id) + MXS_CLR);
+              port->base + PINCTRL_IRQSTAT(port) + MXS_CLR);
 
        return 0;
 }
@@ -117,8 +135,8 @@ static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc)
 
        desc->irq_data.chip->irq_ack(&desc->irq_data);
 
-       irq_stat = readl(port->base + PINCTRL_IRQSTAT(port->id)) &
-                       readl(port->base + PINCTRL_IRQEN(port->id));
+       irq_stat = readl(port->base + PINCTRL_IRQSTAT(port)) &
+                       readl(port->base + PINCTRL_IRQEN(port));
 
        while (irq_stat != 0) {
                int irqoffset = fls(irq_stat) - 1;
@@ -164,8 +182,8 @@ static void __init mxs_gpio_init_gc(struct mxs_gpio_port *port)
        ct->chip.irq_unmask = irq_gc_mask_set_bit;
        ct->chip.irq_set_type = mxs_gpio_set_irq_type;
        ct->chip.irq_set_wake = mxs_gpio_set_wake_irq;
-       ct->regs.ack = PINCTRL_IRQSTAT(port->id) + MXS_CLR;
-       ct->regs.mask = PINCTRL_IRQEN(port->id);
+       ct->regs.ack = PINCTRL_IRQSTAT(port) + MXS_CLR;
+       ct->regs.mask = PINCTRL_IRQEN(port);
 
        irq_setup_generic_chip(gc, IRQ_MSK(32), 0, IRQ_NOREQUEST, 0);
 }
@@ -179,60 +197,83 @@ static int mxs_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
        return port->virtual_irq_start + offset;
 }
 
+static struct platform_device_id mxs_gpio_ids[] = {
+       {
+               .name = "imx23-gpio",
+               .driver_data = IMX23_GPIO,
+       }, {
+               .name = "imx28-gpio",
+               .driver_data = IMX28_GPIO,
+       }, {
+               /* sentinel */
+       }
+};
+MODULE_DEVICE_TABLE(platform, mxs_gpio_ids);
+
+static const struct of_device_id mxs_gpio_dt_ids[] = {
+       { .compatible = "fsl,imx23-gpio", .data = (void *) IMX23_GPIO, },
+       { .compatible = "fsl,imx28-gpio", .data = (void *) IMX28_GPIO, },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_gpio_dt_ids);
+
 static int __devinit mxs_gpio_probe(struct platform_device *pdev)
 {
+       const struct of_device_id *of_id =
+                       of_match_device(mxs_gpio_dt_ids, &pdev->dev);
+       struct device_node *np = pdev->dev.of_node;
+       struct device_node *parent;
        static void __iomem *base;
        struct mxs_gpio_port *port;
        struct resource *iores = NULL;
        int err;
 
-       port = kzalloc(sizeof(struct mxs_gpio_port), GFP_KERNEL);
+       port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
        if (!port)
                return -ENOMEM;
 
-       port->id = pdev->id;
+       if (np) {
+               port->id = of_alias_get_id(np, "gpio");
+               if (port->id < 0)
+                       return port->id;
+               port->devid = (enum mxs_gpio_id) of_id->data;
+       } else {
+               port->id = pdev->id;
+               port->devid = pdev->id_entry->driver_data;
+       }
        port->virtual_irq_start = MXS_GPIO_IRQ_START + port->id * 32;
 
+       port->irq = platform_get_irq(pdev, 0);
+       if (port->irq < 0)
+               return port->irq;
+
        /*
         * map memory region only once, as all the gpio ports
         * share the same one
         */
        if (!base) {
-               iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-               if (!iores) {
-                       err = -ENODEV;
-                       goto out_kfree;
-               }
-
-               if (!request_mem_region(iores->start, resource_size(iores),
-                                       pdev->name)) {
-                       err = -EBUSY;
-                       goto out_kfree;
-               }
-
-               base = ioremap(iores->start, resource_size(iores));
-               if (!base) {
-                       err = -ENOMEM;
-                       goto out_release_mem;
+               if (np) {
+                       parent = of_get_parent(np);
+                       base = of_iomap(parent, 0);
+                       of_node_put(parent);
+               } else {
+                       iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+                       base = devm_request_and_ioremap(&pdev->dev, iores);
                }
+               if (!base)
+                       return -EADDRNOTAVAIL;
        }
        port->base = base;
 
-       port->irq = platform_get_irq(pdev, 0);
-       if (port->irq < 0) {
-               err = -EINVAL;
-               goto out_iounmap;
-       }
-
        /*
         * select the pin interrupt functionality but initially
         * disable the interrupts
         */
-       writel(~0U, port->base + PINCTRL_PIN2IRQ(port->id));
-       writel(0, port->base + PINCTRL_IRQEN(port->id));
+       writel(~0U, port->base + PINCTRL_PIN2IRQ(port));
+       writel(0, port->base + PINCTRL_IRQEN(port));
 
        /* clear address has to be used to clear IRQSTAT bits */
-       writel(~0U, port->base + PINCTRL_IRQSTAT(port->id) + MXS_CLR);
+       writel(~0U, port->base + PINCTRL_IRQSTAT(port) + MXS_CLR);
 
        /* gpio-mxs can be a generic irq chip */
        mxs_gpio_init_gc(port);
@@ -242,41 +283,32 @@ static int __devinit mxs_gpio_probe(struct platform_device *pdev)
        irq_set_handler_data(port->irq, port);
 
        err = bgpio_init(&port->bgc, &pdev->dev, 4,
-                        port->base + PINCTRL_DIN(port->id),
-                        port->base + PINCTRL_DOUT(port->id), NULL,
-                        port->base + PINCTRL_DOE(port->id), NULL, 0);
+                        port->base + PINCTRL_DIN(port),
+                        port->base + PINCTRL_DOUT(port), NULL,
+                        port->base + PINCTRL_DOE(port), NULL, 0);
        if (err)
-               goto out_iounmap;
+               return err;
 
        port->bgc.gc.to_irq = mxs_gpio_to_irq;
        port->bgc.gc.base = port->id * 32;
 
        err = gpiochip_add(&port->bgc.gc);
-       if (err)
-               goto out_bgpio_remove;
+       if (err) {
+               bgpio_remove(&port->bgc);
+               return err;
+       }
 
        return 0;
-
-out_bgpio_remove:
-       bgpio_remove(&port->bgc);
-out_iounmap:
-       if (iores)
-               iounmap(port->base);
-out_release_mem:
-       if (iores)
-               release_mem_region(iores->start, resource_size(iores));
-out_kfree:
-       kfree(port);
-       dev_info(&pdev->dev, "%s failed with errno %d\n", __func__, err);
-       return err;
 }
 
 static struct platform_driver mxs_gpio_driver = {
        .driver         = {
                .name   = "gpio-mxs",
                .owner  = THIS_MODULE,
+               .of_match_table = mxs_gpio_dt_ids,
        },
        .probe          = mxs_gpio_probe,
+       .id_table       = mxs_gpio_ids,
 };
 
 static int __init mxs_gpio_init(void)
index 421f6af0f99582022f395da0f2e0d9f2d6523b83..b6453d0e44add0846373be7aed37f1f41d66b06f 100644 (file)
@@ -2452,6 +2452,12 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = {
                        .ngpio  = EXYNOS5_GPIO_C3_NR,
                        .label  = "GPC3",
                },
+       }, {
+               .chip   = {
+                       .base   = EXYNOS5_GPC4(0),
+                       .ngpio  = EXYNOS5_GPIO_C4_NR,
+                       .label  = "GPC4",
+               },
        }, {
                .chip   = {
                        .base   = EXYNOS5_GPD0(0),
@@ -2826,8 +2832,11 @@ static __init void exynos5_gpiolib_init(void)
                goto err_ioremap1;
        }
 
+       /* need to set base address for gpc4 */
+       exynos5_gpios_1[11].base = gpio_base1 + 0x2E0;
+
        /* need to set base address for gpx */
-       chip = &exynos5_gpios_1[20];
+       chip = &exynos5_gpios_1[21];
        gpx_base = gpio_base1 + 0xC00;
        for (i = 0; i < 4; i++, chip++, gpx_base += 0x20)
                chip->base = gpx_base;
index 8cadf4d683a822e8e21c26cfe47871690fb62a75..424dce8e3f30107ce2e9b5eb9b5d2bb394164cf9 100644 (file)
@@ -232,6 +232,14 @@ static int __devinit sch_gpio_probe(struct platform_device *pdev)
                        sch_gpio_resume.ngpio = 9;
                        break;
 
+               case PCI_DEVICE_ID_INTEL_CENTERTON_ILB:
+                       sch_gpio_core.base = 0;
+                       sch_gpio_core.ngpio = 21;
+
+                       sch_gpio_resume.base = 21;
+                       sch_gpio_resume.ngpio = 9;
+                       break;
+
                default:
                        return -ENODEV;
        }
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
new file mode 100644 (file)
index 0000000..38416be
--- /dev/null
@@ -0,0 +1,435 @@
+/*
+ * STMicroelectronics ConneXt (STA2X11) GPIO driver
+ *
+ * Copyright 2012 ST Microelectronics (Alessandro Rubini)
+ * Based on gpio-ml-ioh.c, Copyright 2010 OKI Semiconductors Ltd.
+ * Also based on previous sta2x11 work, Copyright 2011 Wind River Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/sta2x11-mfd.h>
+
+struct gsta_regs {
+       u32 dat;                /* 0x00 */
+       u32 dats;
+       u32 datc;
+       u32 pdis;
+       u32 dir;                /* 0x10 */
+       u32 dirs;
+       u32 dirc;
+       u32 unused_1c;
+       u32 afsela;             /* 0x20 */
+       u32 unused_24[7];
+       u32 rimsc;              /* 0x40 */
+       u32 fimsc;
+       u32 is;
+       u32 ic;
+};
+
+struct gsta_gpio {
+       spinlock_t                      lock;
+       struct device                   *dev;
+       void __iomem                    *reg_base;
+       struct gsta_regs __iomem        *regs[GSTA_NR_BLOCKS];
+       struct gpio_chip                gpio;
+       int                             irq_base;
+       /* FIXME: save the whole config here (AF, ...) */
+       unsigned                        irq_type[GSTA_NR_GPIO];
+};
+
+static inline struct gsta_regs __iomem *__regs(struct gsta_gpio *chip, int nr)
+{
+       return chip->regs[nr / GSTA_GPIO_PER_BLOCK];
+}
+
+static inline u32 __bit(int nr)
+{
+       return 1U << (nr % GSTA_GPIO_PER_BLOCK);
+}
+
+/*
+ * gpio methods
+ */
+
+static void gsta_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
+{
+       struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
+       struct gsta_regs __iomem *regs = __regs(chip, nr);
+       u32 bit = __bit(nr);
+
+       if (val)
+               writel(bit, &regs->dats);
+       else
+               writel(bit, &regs->datc);
+}
+
+static int gsta_gpio_get(struct gpio_chip *gpio, unsigned nr)
+{
+       struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
+       struct gsta_regs __iomem *regs = __regs(chip, nr);
+       u32 bit = __bit(nr);
+
+       return readl(&regs->dat) & bit;
+}
+
+static int gsta_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
+                                     int val)
+{
+       struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
+       struct gsta_regs __iomem *regs = __regs(chip, nr);
+       u32 bit = __bit(nr);
+
+       writel(bit, &regs->dirs);
+       /* Data register after direction, otherwise pullup/down is selected */
+       if (val)
+               writel(bit, &regs->dats);
+       else
+               writel(bit, &regs->datc);
+       return 0;
+}
+
+static int gsta_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
+{
+       struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
+       struct gsta_regs __iomem *regs = __regs(chip, nr);
+       u32 bit = __bit(nr);
+
+       writel(bit, &regs->dirc);
+       return 0;
+}
+
+static int gsta_gpio_to_irq(struct gpio_chip *gpio, unsigned offset)
+{
+       struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
+       return chip->irq_base + offset;
+}
+
+static void gsta_gpio_setup(struct gsta_gpio *chip) /* called from probe */
+{
+       struct gpio_chip *gpio = &chip->gpio;
+
+       /*
+        * ARCH_NR_GPIOS is currently 256 and dynamic allocation starts
+        * from the end. However, for compatibility, we need the first
+        * ConneXt device to start from gpio 0: it's the main chipset
+        * on most boards so documents and drivers assume gpio0..gpio127
+        */
+       static int gpio_base;
+
+       gpio->label = dev_name(chip->dev);
+       gpio->owner = THIS_MODULE;
+       gpio->direction_input = gsta_gpio_direction_input;
+       gpio->get = gsta_gpio_get;
+       gpio->direction_output = gsta_gpio_direction_output;
+       gpio->set = gsta_gpio_set;
+       gpio->dbg_show = NULL;
+       gpio->base = gpio_base;
+       gpio->ngpio = GSTA_NR_GPIO;
+       gpio->can_sleep = 0;
+       gpio->to_irq = gsta_gpio_to_irq;
+
+       /*
+        * After the first device, turn to dynamic gpio numbers.
+        * For example, with ARCH_NR_GPIOS = 256 we can fit two cards
+        */
+       if (!gpio_base)
+               gpio_base = -1;
+}
+
+/*
+ * Special method: alternate functions and pullup/pulldown. This is only
+ * invoked on startup to configure gpio's according to platform data.
+ * FIXME : this functionality shall be managed (and exported to other drivers)
+ * via the pin control subsystem.
+ */
+static void gsta_set_config(struct gsta_gpio *chip, int nr, unsigned cfg)
+{
+       struct gsta_regs __iomem *regs = __regs(chip, nr);
+       unsigned long flags;
+       u32 bit = __bit(nr);
+       u32 val;
+       int err = 0;
+
+       pr_info("%s: %p %i %i\n", __func__, chip, nr, cfg);
+
+       if (cfg == PINMUX_TYPE_NONE)
+               return;
+
+       /* Alternate function or not? */
+       spin_lock_irqsave(&chip->lock, flags);
+       val = readl(&regs->afsela);
+       if (cfg == PINMUX_TYPE_FUNCTION)
+               val |= bit;
+       else
+               val &= ~bit;
+       writel(val | bit, &regs->afsela);
+       if (cfg == PINMUX_TYPE_FUNCTION) {
+               spin_unlock_irqrestore(&chip->lock, flags);
+               return;
+       }
+
+       /* not alternate function: set details */
+       switch (cfg) {
+       case PINMUX_TYPE_OUTPUT_LOW:
+               writel(bit, &regs->dirs);
+               writel(bit, &regs->datc);
+               break;
+       case PINMUX_TYPE_OUTPUT_HIGH:
+               writel(bit, &regs->dirs);
+               writel(bit, &regs->dats);
+               break;
+       case PINMUX_TYPE_INPUT:
+               writel(bit, &regs->dirc);
+               val = readl(&regs->pdis) | bit;
+               writel(val, &regs->pdis);
+               break;
+       case PINMUX_TYPE_INPUT_PULLUP:
+               writel(bit, &regs->dirc);
+               val = readl(&regs->pdis) & ~bit;
+               writel(val, &regs->pdis);
+               writel(bit, &regs->dats);
+               break;
+       case PINMUX_TYPE_INPUT_PULLDOWN:
+               writel(bit, &regs->dirc);
+               val = readl(&regs->pdis) & ~bit;
+               writel(val, &regs->pdis);
+               writel(bit, &regs->datc);
+               break;
+       default:
+               err = 1;
+       }
+       spin_unlock_irqrestore(&chip->lock, flags);
+       if (err)
+               pr_err("%s: chip %p, pin %i, cfg %i is invalid\n",
+                      __func__, chip, nr, cfg);
+}
+
+/*
+ * Irq methods
+ */
+
+static void gsta_irq_disable(struct irq_data *data)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+       struct gsta_gpio *chip = gc->private;
+       int nr = data->irq - chip->irq_base;
+       struct gsta_regs __iomem *regs = __regs(chip, nr);
+       u32 bit = __bit(nr);
+       u32 val;
+       unsigned long flags;
+
+       spin_lock_irqsave(&chip->lock, flags);
+       if (chip->irq_type[nr] & IRQ_TYPE_EDGE_RISING) {
+               val = readl(&regs->rimsc) & ~bit;
+               writel(val, &regs->rimsc);
+       }
+       if (chip->irq_type[nr] & IRQ_TYPE_EDGE_FALLING) {
+               val = readl(&regs->fimsc) & ~bit;
+               writel(val, &regs->fimsc);
+       }
+       spin_unlock_irqrestore(&chip->lock, flags);
+       return;
+}
+
+static void gsta_irq_enable(struct irq_data *data)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+       struct gsta_gpio *chip = gc->private;
+       int nr = data->irq - chip->irq_base;
+       struct gsta_regs __iomem *regs = __regs(chip, nr);
+       u32 bit = __bit(nr);
+       u32 val;
+       int type;
+       unsigned long flags;
+
+       type = chip->irq_type[nr];
+
+       spin_lock_irqsave(&chip->lock, flags);
+       val = readl(&regs->rimsc);
+       if (type & IRQ_TYPE_EDGE_RISING)
+               writel(val | bit, &regs->rimsc);
+       else
+               writel(val & ~bit, &regs->rimsc);
+       val = readl(&regs->rimsc);
+       if (type & IRQ_TYPE_EDGE_FALLING)
+               writel(val | bit, &regs->fimsc);
+       else
+               writel(val & ~bit, &regs->fimsc);
+       spin_unlock_irqrestore(&chip->lock, flags);
+       return;
+}
+
+static int gsta_irq_type(struct irq_data *d, unsigned int type)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       struct gsta_gpio *chip = gc->private;
+       int nr = d->irq - chip->irq_base;
+
+       /* We only support edge interrupts */
+       if (!(type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))) {
+               pr_debug("%s: unsupported type 0x%x\n", __func__, type);
+               return -EINVAL;
+       }
+
+       chip->irq_type[nr] = type; /* used for enable/disable */
+
+       gsta_irq_enable(d);
+       return 0;
+}
+
+static irqreturn_t gsta_gpio_handler(int irq, void *dev_id)
+{
+       struct gsta_gpio *chip = dev_id;
+       struct gsta_regs __iomem *regs;
+       u32 is;
+       int i, nr, base;
+       irqreturn_t ret = IRQ_NONE;
+
+       for (i = 0; i < GSTA_NR_BLOCKS; i++) {
+               regs = chip->regs[i];
+               base = chip->irq_base + i * GSTA_GPIO_PER_BLOCK;
+               while ((is = readl(&regs->is))) {
+                       nr = __ffs(is);
+                       irq = base + nr;
+                       generic_handle_irq(irq);
+                       writel(1 << nr, &regs->ic);
+                       ret = IRQ_HANDLED;
+               }
+       }
+       return ret;
+}
+
+static __devinit void gsta_alloc_irq_chip(struct gsta_gpio *chip)
+{
+       struct irq_chip_generic *gc;
+       struct irq_chip_type *ct;
+
+       gc = irq_alloc_generic_chip(KBUILD_MODNAME, 1, chip->irq_base,
+                                    chip->reg_base, handle_simple_irq);
+       gc->private = chip;
+       ct = gc->chip_types;
+
+       ct->chip.irq_set_type = gsta_irq_type;
+       ct->chip.irq_disable = gsta_irq_disable;
+       ct->chip.irq_enable = gsta_irq_enable;
+
+       /* FIXME: this makes at most 32 interrupts. Request 0 by now */
+       irq_setup_generic_chip(gc, 0 /* IRQ_MSK(GSTA_GPIO_PER_BLOCK) */, 0,
+                              IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+
+       /* Set up all all 128 interrupts: code from setup_generic_chip */
+       {
+               struct irq_chip_type *ct = gc->chip_types;
+               int i, j;
+               for (j = 0; j < GSTA_NR_GPIO; j++) {
+                       i = chip->irq_base + j;
+                       irq_set_chip_and_handler(i, &ct->chip, ct->handler);
+                       irq_set_chip_data(i, gc);
+                       irq_modify_status(i, IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+               }
+               gc->irq_cnt = i - gc->irq_base;
+       }
+}
+
+/* The platform device used here is instantiated by the MFD device */
+static int __devinit gsta_probe(struct platform_device *dev)
+{
+       int i, err;
+       struct pci_dev *pdev;
+       struct sta2x11_gpio_pdata *gpio_pdata;
+       struct gsta_gpio *chip;
+       struct resource *res;
+
+       pdev = *(struct pci_dev **)(dev->dev.platform_data);
+       gpio_pdata = dev_get_platdata(&pdev->dev);
+
+       if (gpio_pdata == NULL)
+               dev_err(&dev->dev, "no gpio config\n");
+       pr_debug("gpio config: %p\n", gpio_pdata);
+
+       res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+
+       chip = devm_kzalloc(&dev->dev, sizeof(*chip), GFP_KERNEL);
+       chip->dev = &dev->dev;
+       chip->reg_base = devm_request_and_ioremap(&dev->dev, res);
+
+       for (i = 0; i < GSTA_NR_BLOCKS; i++) {
+               chip->regs[i] = chip->reg_base + i * 4096;
+               /* disable all irqs */
+               writel(0, &chip->regs[i]->rimsc);
+               writel(0, &chip->regs[i]->fimsc);
+               writel(~0, &chip->regs[i]->ic);
+       }
+       spin_lock_init(&chip->lock);
+       gsta_gpio_setup(chip);
+       for (i = 0; i < GSTA_NR_GPIO; i++)
+               gsta_set_config(chip, i, gpio_pdata->pinconfig[i]);
+
+       /* 384 was used in previous code: be compatible for other drivers */
+       err = irq_alloc_descs(-1, 384, GSTA_NR_GPIO, NUMA_NO_NODE);
+       if (err < 0) {
+               dev_warn(&dev->dev, "sta2x11 gpio: Can't get irq base (%i)\n",
+                        -err);
+               return err;
+       }
+       chip->irq_base = err;
+       gsta_alloc_irq_chip(chip);
+
+       err = request_irq(pdev->irq, gsta_gpio_handler,
+                            IRQF_SHARED, KBUILD_MODNAME, chip);
+       if (err < 0) {
+               dev_err(&dev->dev, "sta2x11 gpio: Can't request irq (%i)\n",
+                       -err);
+               goto err_free_descs;
+       }
+
+       err = gpiochip_add(&chip->gpio);
+       if (err < 0) {
+               dev_err(&dev->dev, "sta2x11 gpio: Can't register (%i)\n",
+                       -err);
+               goto err_free_irq;
+       }
+
+       platform_set_drvdata(dev, chip);
+       return 0;
+
+err_free_irq:
+       free_irq(pdev->irq, chip);
+err_free_descs:
+       irq_free_descs(chip->irq_base, GSTA_NR_GPIO);
+       return err;
+}
+
+static struct platform_driver sta2x11_gpio_platform_driver = {
+       .driver = {
+               .name   = "sta2x11-gpio",
+               .owner  = THIS_MODULE,
+       },
+       .probe = gsta_probe,
+};
+
+module_platform_driver(sta2x11_gpio_platform_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("sta2x11_gpio GPIO driver");
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
new file mode 100644 (file)
index 0000000..e35096b
--- /dev/null
@@ -0,0 +1,301 @@
+/*
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/of_platform.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/of_gpio.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+#include <lantiq_soc.h>
+
+/*
+ * The Serial To Parallel (STP) is found on MIPS based Lantiq socs. It is a
+ * peripheral controller used to drive external shift register cascades. At most
+ * 3 groups of 8 bits can be driven. The hardware is able to allow the DSL modem
+ * to drive the 2 LSBs of the cascade automatically.
+ */
+
+/* control register 0 */
+#define XWAY_STP_CON0          0x00
+/* control register 1 */
+#define XWAY_STP_CON1          0x04
+/* data register 0 */
+#define XWAY_STP_CPU0          0x08
+/* data register 1 */
+#define XWAY_STP_CPU1          0x0C
+/* access register */
+#define XWAY_STP_AR            0x10
+
+/* software or hardware update select bit */
+#define XWAY_STP_CON_SWU       BIT(31)
+
+/* automatic update rates */
+#define XWAY_STP_2HZ           0
+#define XWAY_STP_4HZ           BIT(23)
+#define XWAY_STP_8HZ           BIT(24)
+#define XWAY_STP_10HZ          (BIT(24) | BIT(23))
+#define XWAY_STP_SPEED_MASK    (0xf << 23)
+
+/* clock source for automatic update */
+#define XWAY_STP_UPD_FPI       BIT(31)
+#define XWAY_STP_UPD_MASK      (BIT(31) | BIT(30))
+
+/* let the adsl core drive the 2 LSBs */
+#define XWAY_STP_ADSL_SHIFT    24
+#define XWAY_STP_ADSL_MASK     0x3
+
+/* 2 groups of 3 bits can be driven by the phys */
+#define XWAY_STP_PHY_MASK      0x3
+#define XWAY_STP_PHY1_SHIFT    27
+#define XWAY_STP_PHY2_SHIFT    15
+
+/* STP has 3 groups of 8 bits */
+#define XWAY_STP_GROUP0                BIT(0)
+#define XWAY_STP_GROUP1                BIT(1)
+#define XWAY_STP_GROUP2                BIT(2)
+#define XWAY_STP_GROUP_MASK    (0x7)
+
+/* Edge configuration bits */
+#define XWAY_STP_FALLING       BIT(26)
+#define XWAY_STP_EDGE_MASK     BIT(26)
+
+#define xway_stp_r32(m, reg)           __raw_readl(m + reg)
+#define xway_stp_w32(m, val, reg)      __raw_writel(val, m + reg)
+#define xway_stp_w32_mask(m, clear, set, reg) \
+               ltq_w32((ltq_r32(m + reg) & ~(clear)) | (set), \
+               m + reg)
+
+struct xway_stp {
+       struct gpio_chip gc;
+       void __iomem *virt;
+       u32 edge;       /* rising or falling edge triggered shift register */
+       u16 shadow;     /* shadow the shift registers state */
+       u8 groups;      /* we can drive 1-3 groups of 8bit each */
+       u8 dsl;         /* the 2 LSBs can be driven by the dsl core */
+       u8 phy1;        /* 3 bits can be driven by phy1 */
+       u8 phy2;        /* 3 bits can be driven by phy2 */
+       u8 reserved;    /* mask out the hw driven bits in gpio_request */
+};
+
+/**
+ * xway_stp_set() - gpio_chip->set - set gpios.
+ * @gc:     Pointer to gpio_chip device structure.
+ * @gpio:   GPIO signal number.
+ * @val:    Value to be written to specified signal.
+ *
+ * Set the shadow value and call ltq_ebu_apply.
+ */
+static void xway_stp_set(struct gpio_chip *gc, unsigned gpio, int val)
+{
+       struct xway_stp *chip =
+               container_of(gc, struct xway_stp, gc);
+
+       if (val)
+               chip->shadow |= BIT(gpio);
+       else
+               chip->shadow &= ~BIT(gpio);
+       xway_stp_w32(chip->virt, chip->shadow, XWAY_STP_CPU0);
+       xway_stp_w32_mask(chip->virt, 0, XWAY_STP_CON_SWU, XWAY_STP_CON0);
+}
+
+/**
+ * xway_stp_dir_out() - gpio_chip->dir_out - set gpio direction.
+ * @gc:     Pointer to gpio_chip device structure.
+ * @gpio:   GPIO signal number.
+ * @val:    Value to be written to specified signal.
+ *
+ * Same as xway_stp_set, always returns 0.
+ */
+static int xway_stp_dir_out(struct gpio_chip *gc, unsigned gpio, int val)
+{
+       xway_stp_set(gc, gpio, val);
+
+       return 0;
+}
+
+/**
+ * xway_stp_request() - gpio_chip->request
+ * @gc:     Pointer to gpio_chip device structure.
+ * @gpio:   GPIO signal number.
+ *
+ * We mask out the HW driven pins
+ */
+static int xway_stp_request(struct gpio_chip *gc, unsigned gpio)
+{
+       struct xway_stp *chip =
+               container_of(gc, struct xway_stp, gc);
+
+       if ((gpio < 8) && (chip->reserved & BIT(gpio))) {
+               dev_err(gc->dev, "GPIO %d is driven by hardware\n", gpio);
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+/**
+ * xway_stp_hw_init() - Configure the STP unit and enable the clock gate
+ * @virt: pointer to the remapped register range
+ */
+static int xway_stp_hw_init(struct xway_stp *chip)
+{
+       /* sane defaults */
+       xway_stp_w32(chip->virt, 0, XWAY_STP_AR);
+       xway_stp_w32(chip->virt, 0, XWAY_STP_CPU0);
+       xway_stp_w32(chip->virt, 0, XWAY_STP_CPU1);
+       xway_stp_w32(chip->virt, XWAY_STP_CON_SWU, XWAY_STP_CON0);
+       xway_stp_w32(chip->virt, 0, XWAY_STP_CON1);
+
+       /* apply edge trigger settings for the shift register */
+       xway_stp_w32_mask(chip->virt, XWAY_STP_EDGE_MASK,
+                               chip->edge, XWAY_STP_CON0);
+
+       /* apply led group settings */
+       xway_stp_w32_mask(chip->virt, XWAY_STP_GROUP_MASK,
+                               chip->groups, XWAY_STP_CON1);
+
+       /* tell the hardware which pins are controlled by the dsl modem */
+       xway_stp_w32_mask(chip->virt,
+                       XWAY_STP_ADSL_MASK << XWAY_STP_ADSL_SHIFT,
+                       chip->dsl << XWAY_STP_ADSL_SHIFT,
+                       XWAY_STP_CON0);
+
+       /* tell the hardware which pins are controlled by the phys */
+       xway_stp_w32_mask(chip->virt,
+                       XWAY_STP_PHY_MASK << XWAY_STP_PHY1_SHIFT,
+                       chip->phy1 << XWAY_STP_PHY1_SHIFT,
+                       XWAY_STP_CON0);
+       xway_stp_w32_mask(chip->virt,
+                       XWAY_STP_PHY_MASK << XWAY_STP_PHY2_SHIFT,
+                       chip->phy2 << XWAY_STP_PHY2_SHIFT,
+                       XWAY_STP_CON1);
+
+       /* mask out the hw driven bits in gpio_request */
+       chip->reserved = (chip->phy2 << 5) | (chip->phy1 << 2) | chip->dsl;
+
+       /*
+        * if we have pins that are driven by hw, we need to tell the stp what
+        * clock to use as a timer.
+        */
+       if (chip->reserved)
+               xway_stp_w32_mask(chip->virt, XWAY_STP_UPD_MASK,
+                       XWAY_STP_UPD_FPI, XWAY_STP_CON1);
+
+       return 0;
+}
+
+static int __devinit xway_stp_probe(struct platform_device *pdev)
+{
+       struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       const __be32 *shadow, *groups, *dsl, *phy;
+       struct xway_stp *chip;
+       struct clk *clk;
+       int ret = 0;
+
+       if (!res) {
+               dev_err(&pdev->dev, "failed to request STP resource\n");
+               return -ENOENT;
+       }
+
+       chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+       if (!chip)
+               return -ENOMEM;
+
+       chip->virt = devm_request_and_ioremap(&pdev->dev, res);
+       if (!chip->virt) {
+               dev_err(&pdev->dev, "failed to remap STP memory\n");
+               return -ENOMEM;
+       }
+       chip->gc.dev = &pdev->dev;
+       chip->gc.label = "stp-xway";
+       chip->gc.direction_output = xway_stp_dir_out;
+       chip->gc.set = xway_stp_set;
+       chip->gc.request = xway_stp_request;
+       chip->gc.base = -1;
+       chip->gc.owner = THIS_MODULE;
+
+       /* store the shadow value if one was passed by the devicetree */
+       shadow = of_get_property(pdev->dev.of_node, "lantiq,shadow", NULL);
+       if (shadow)
+               chip->shadow = be32_to_cpu(*shadow);
+
+       /* find out which gpio groups should be enabled */
+       groups = of_get_property(pdev->dev.of_node, "lantiq,groups", NULL);
+       if (groups)
+               chip->groups = be32_to_cpu(*groups) & XWAY_STP_GROUP_MASK;
+       else
+               chip->groups = XWAY_STP_GROUP0;
+       chip->gc.ngpio = fls(chip->groups) * 8;
+
+       /* find out which gpios are controlled by the dsl core */
+       dsl = of_get_property(pdev->dev.of_node, "lantiq,dsl", NULL);
+       if (dsl)
+               chip->dsl = be32_to_cpu(*dsl) & XWAY_STP_ADSL_MASK;
+
+       /* find out which gpios are controlled by the phys */
+       if (of_machine_is_compatible("lantiq,ar9") ||
+                       of_machine_is_compatible("lantiq,gr9") ||
+                       of_machine_is_compatible("lantiq,vr9")) {
+               phy = of_get_property(pdev->dev.of_node, "lantiq,phy1", NULL);
+               if (phy)
+                       chip->phy1 = be32_to_cpu(*phy) & XWAY_STP_PHY_MASK;
+               phy = of_get_property(pdev->dev.of_node, "lantiq,phy2", NULL);
+               if (phy)
+                       chip->phy2 = be32_to_cpu(*phy) & XWAY_STP_PHY_MASK;
+       }
+
+       /* check which edge trigger we should use, default to a falling edge */
+       if (!of_find_property(pdev->dev.of_node, "lantiq,rising", NULL))
+               chip->edge = XWAY_STP_FALLING;
+
+       clk = clk_get(&pdev->dev, NULL);
+       if (IS_ERR(clk)) {
+               dev_err(&pdev->dev, "Failed to get clock\n");
+               return PTR_ERR(clk);
+       }
+       clk_enable(clk);
+
+       ret = xway_stp_hw_init(chip);
+       if (!ret)
+               ret = gpiochip_add(&chip->gc);
+
+       if (!ret)
+               dev_info(&pdev->dev, "Init done\n");
+
+       return ret;
+}
+
+static const struct of_device_id xway_stp_match[] = {
+       { .compatible = "lantiq,gpio-stp-xway" },
+       {},
+};
+MODULE_DEVICE_TABLE(of, xway_stp_match);
+
+static struct platform_driver xway_stp_driver = {
+       .probe = xway_stp_probe,
+       .driver = {
+               .name = "gpio-stp-xway",
+               .owner = THIS_MODULE,
+               .of_match_table = xway_stp_match,
+       },
+};
+
+int __init xway_stp_init(void)
+{
+       return platform_driver_register(&xway_stp_driver);
+}
+
+subsys_initcall(xway_stp_init);
index 7eef648a3351a2b89e1de03f95f168b8889f8634..c1ad2884f2edb0b79f3894a9a16662e5776bda51 100644 (file)
 #include <linux/errno.h>
 #include <linux/gpio.h>
 #include <linux/i2c.h>
+#include <linux/platform_device.h>
 #include <linux/mfd/tps65910.h>
+#include <linux/of_device.h>
+
+struct tps65910_gpio {
+       struct gpio_chip gpio_chip;
+       struct tps65910 *tps65910;
+};
+
+static inline struct tps65910_gpio *to_tps65910_gpio(struct gpio_chip *chip)
+{
+       return container_of(chip, struct tps65910_gpio, gpio_chip);
+}
 
 static int tps65910_gpio_get(struct gpio_chip *gc, unsigned offset)
 {
-       struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
-       uint8_t val;
+       struct tps65910_gpio *tps65910_gpio = to_tps65910_gpio(gc);
+       struct tps65910 *tps65910 = tps65910_gpio->tps65910;
+       unsigned int val;
 
-       tps65910->read(tps65910, TPS65910_GPIO0 + offset, 1, &val);
+       tps65910_reg_read(tps65910, TPS65910_GPIO0 + offset, &val);
 
        if (val & GPIO_STS_MASK)
                return 1;
@@ -36,83 +49,170 @@ static int tps65910_gpio_get(struct gpio_chip *gc, unsigned offset)
 static void tps65910_gpio_set(struct gpio_chip *gc, unsigned offset,
                              int value)
 {
-       struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
+       struct tps65910_gpio *tps65910_gpio = to_tps65910_gpio(gc);
+       struct tps65910 *tps65910 = tps65910_gpio->tps65910;
 
        if (value)
-               tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset,
+               tps65910_reg_set_bits(tps65910, TPS65910_GPIO0 + offset,
                                                GPIO_SET_MASK);
        else
-               tps65910_clear_bits(tps65910, TPS65910_GPIO0 + offset,
+               tps65910_reg_clear_bits(tps65910, TPS65910_GPIO0 + offset,
                                                GPIO_SET_MASK);
 }
 
 static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset,
                                int value)
 {
-       struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
+       struct tps65910_gpio *tps65910_gpio = to_tps65910_gpio(gc);
+       struct tps65910 *tps65910 = tps65910_gpio->tps65910;
 
        /* Set the initial value */
        tps65910_gpio_set(gc, offset, value);
 
-       return tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset,
+       return tps65910_reg_set_bits(tps65910, TPS65910_GPIO0 + offset,
                                                GPIO_CFG_MASK);
 }
 
 static int tps65910_gpio_input(struct gpio_chip *gc, unsigned offset)
 {
-       struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
+       struct tps65910_gpio *tps65910_gpio = to_tps65910_gpio(gc);
+       struct tps65910 *tps65910 = tps65910_gpio->tps65910;
 
-       return tps65910_clear_bits(tps65910, TPS65910_GPIO0 + offset,
+       return tps65910_reg_clear_bits(tps65910, TPS65910_GPIO0 + offset,
                                                GPIO_CFG_MASK);
 }
 
-void tps65910_gpio_init(struct tps65910 *tps65910, int gpio_base)
+#ifdef CONFIG_OF
+static struct tps65910_board *tps65910_parse_dt_for_gpio(struct device *dev,
+               struct tps65910 *tps65910, int chip_ngpio)
 {
+       struct tps65910_board *tps65910_board = tps65910->of_plat_data;
+       unsigned int prop_array[TPS6591X_MAX_NUM_GPIO];
+       int ngpio = min(chip_ngpio, TPS6591X_MAX_NUM_GPIO);
        int ret;
-       struct tps65910_board *board_data;
+       int idx;
+
+       tps65910_board->gpio_base = -1;
+       ret = of_property_read_u32_array(tps65910->dev->of_node,
+                       "ti,en-gpio-sleep", prop_array, ngpio);
+       if (ret < 0) {
+               dev_dbg(dev, "ti,en-gpio-sleep not specified\n");
+               return tps65910_board;
+       }
 
-       if (!gpio_base)
-               return;
+       for (idx = 0; idx < ngpio; idx++)
+               tps65910_board->en_gpio_sleep[idx] = (prop_array[idx] != 0);
 
-       tps65910->gpio.owner            = THIS_MODULE;
-       tps65910->gpio.label            = tps65910->i2c_client->name;
-       tps65910->gpio.dev              = tps65910->dev;
-       tps65910->gpio.base             = gpio_base;
+       return tps65910_board;
+}
+#else
+static struct tps65910_board *tps65910_parse_dt_for_gpio(struct device *dev,
+               struct tps65910 *tps65910, int chip_ngpio)
+{
+       return NULL;
+}
+#endif
+
+static int __devinit tps65910_gpio_probe(struct platform_device *pdev)
+{
+       struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
+       struct tps65910_board *pdata = dev_get_platdata(tps65910->dev);
+       struct tps65910_gpio *tps65910_gpio;
+       int ret;
+       int i;
+
+       tps65910_gpio = devm_kzalloc(&pdev->dev,
+                               sizeof(*tps65910_gpio), GFP_KERNEL);
+       if (!tps65910_gpio) {
+               dev_err(&pdev->dev, "Could not allocate tps65910_gpio\n");
+               return -ENOMEM;
+       }
+
+       tps65910_gpio->tps65910 = tps65910;
+
+       tps65910_gpio->gpio_chip.owner = THIS_MODULE;
+       tps65910_gpio->gpio_chip.label = tps65910->i2c_client->name;
 
        switch(tps65910_chip_id(tps65910)) {
        case TPS65910:
-               tps65910->gpio.ngpio    = TPS65910_NUM_GPIO;
+               tps65910_gpio->gpio_chip.ngpio = TPS65910_NUM_GPIO;
                break;
        case TPS65911:
-               tps65910->gpio.ngpio    = TPS65911_NUM_GPIO;
+               tps65910_gpio->gpio_chip.ngpio = TPS65911_NUM_GPIO;
                break;
        default:
-               return;
+               return -EINVAL;
+       }
+       tps65910_gpio->gpio_chip.can_sleep = 1;
+       tps65910_gpio->gpio_chip.direction_input = tps65910_gpio_input;
+       tps65910_gpio->gpio_chip.direction_output = tps65910_gpio_output;
+       tps65910_gpio->gpio_chip.set    = tps65910_gpio_set;
+       tps65910_gpio->gpio_chip.get    = tps65910_gpio_get;
+       tps65910_gpio->gpio_chip.dev = &pdev->dev;
+       if (pdata && pdata->gpio_base)
+               tps65910_gpio->gpio_chip.base = pdata->gpio_base;
+       else
+               tps65910_gpio->gpio_chip.base = -1;
+
+       if (!pdata && tps65910->dev->of_node)
+               pdata = tps65910_parse_dt_for_gpio(&pdev->dev, tps65910,
+                       tps65910_gpio->gpio_chip.ngpio);
+
+       if (!pdata)
+               goto skip_init;
+
+       /* Configure sleep control for gpios if provided */
+       for (i = 0; i < tps65910_gpio->gpio_chip.ngpio; ++i) {
+               if (!pdata->en_gpio_sleep[i])
+                       continue;
+
+               ret = tps65910_reg_set_bits(tps65910,
+                       TPS65910_GPIO0 + i, GPIO_SLEEP_MASK);
+               if (ret < 0)
+                       dev_warn(tps65910->dev,
+                               "GPIO Sleep setting failed with err %d\n", ret);
        }
-       tps65910->gpio.can_sleep        = 1;
-
-       tps65910->gpio.direction_input  = tps65910_gpio_input;
-       tps65910->gpio.direction_output = tps65910_gpio_output;
-       tps65910->gpio.set              = tps65910_gpio_set;
-       tps65910->gpio.get              = tps65910_gpio_get;
-
-       /* Configure sleep control for gpios */
-       board_data = dev_get_platdata(tps65910->dev);
-       if (board_data) {
-               int i;
-               for (i = 0; i < tps65910->gpio.ngpio; ++i) {
-                       if (board_data->en_gpio_sleep[i]) {
-                               ret = tps65910_set_bits(tps65910,
-                                       TPS65910_GPIO0 + i, GPIO_SLEEP_MASK);
-                               if (ret < 0)
-                                       dev_warn(tps65910->dev,
-                                               "GPIO Sleep setting failed\n");
-                       }
-               }
+
+skip_init:
+       ret = gpiochip_add(&tps65910_gpio->gpio_chip);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
+               return ret;
        }
 
-       ret = gpiochip_add(&tps65910->gpio);
+       platform_set_drvdata(pdev, tps65910_gpio);
+
+       return ret;
+}
+
+static int __devexit tps65910_gpio_remove(struct platform_device *pdev)
+{
+       struct tps65910_gpio *tps65910_gpio = platform_get_drvdata(pdev);
 
-       if (ret)
-               dev_warn(tps65910->dev, "GPIO registration failed: %d\n", ret);
+       return gpiochip_remove(&tps65910_gpio->gpio_chip);
 }
+
+static struct platform_driver tps65910_gpio_driver = {
+       .driver.name    = "tps65910-gpio",
+       .driver.owner   = THIS_MODULE,
+       .probe          = tps65910_gpio_probe,
+       .remove         = __devexit_p(tps65910_gpio_remove),
+};
+
+static int __init tps65910_gpio_init(void)
+{
+       return platform_driver_register(&tps65910_gpio_driver);
+}
+subsys_initcall(tps65910_gpio_init);
+
+static void __exit tps65910_gpio_exit(void)
+{
+       platform_driver_unregister(&tps65910_gpio_driver);
+}
+module_exit(tps65910_gpio_exit);
+
+MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
+MODULE_AUTHOR("Jorge Eduardo Candelaria jedu@slimlogic.co.uk>");
+MODULE_DESCRIPTION("GPIO interface for TPS65910/TPS6511 PMICs");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tps65910-gpio");
index deb949e75ec1dee2f6ad20c2541148beac431d1a..e56a2165641c845137b0b37afd4c30d93f570a84 100644 (file)
@@ -102,10 +102,8 @@ static int wm831x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
        struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip);
        struct wm831x *wm831x = wm831x_gpio->wm831x;
 
-       if (!wm831x->irq_base)
-               return -EINVAL;
-
-       return wm831x->irq_base + WM831X_IRQ_GPIO_1 + offset;
+       return irq_create_mapping(wm831x->irq_domain,
+                                 WM831X_IRQ_GPIO_1 + offset);
 }
 
 static int wm831x_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
index d7038230b71e7f113e8ae453bce0327528de575a..7053140c65969758f9f22ded7cea130fe0ab7bd5 100644 (file)
@@ -35,9 +35,28 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
        {0,}
 };
 
+
+static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+       struct apertures_struct *ap;
+       bool primary = false;
+
+       ap = alloc_apertures(1);
+       ap->ranges[0].base = pci_resource_start(pdev, 0);
+       ap->ranges[0].size = pci_resource_len(pdev, 0);
+
+#ifdef CONFIG_X86
+       primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+       remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
+       kfree(ap);
+}
+
 static int __devinit
 cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
+       cirrus_kick_out_firmware_fb(pdev);
+
        return drm_get_pci_dev(pdev, ent, &driver);
 }
 
index 21bdfa8836f79e1da44efb3e09e3674b31aac661..64ea597cb6d390219abd198a086e131b08b6d54d 100644 (file)
@@ -145,7 +145,7 @@ struct cirrus_device {
                struct ttm_bo_device bdev;
                atomic_t validate_sequence;
        } ttm;
-
+       bool mm_inited;
 };
 
 
index 2ebcd11a5023089a3a7a8d890c08e3a673184860..50e170f879dece492968752212068ebec4ad14d3 100644 (file)
@@ -275,12 +275,17 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
                                    pci_resource_len(dev->pdev, 0),
                                    DRM_MTRR_WC);
 
+       cirrus->mm_inited = true;
        return 0;
 }
 
 void cirrus_mm_fini(struct cirrus_device *cirrus)
 {
        struct drm_device *dev = cirrus->dev;
+
+       if (!cirrus->mm_inited)
+               return;
+
        ttm_bo_device_release(&cirrus->ttm.bdev);
 
        cirrus_ttm_global_release(cirrus);
index 92cea9d77ec913e8bc21417dd07ff97e8d27cb9c..08a7aa722d6b8f0d798b7a59ccd5b8146183f497 100644 (file)
@@ -2116,7 +2116,7 @@ out:
        return ret;
 }
 
-static int format_check(struct drm_mode_fb_cmd2 *r)
+static int format_check(const struct drm_mode_fb_cmd2 *r)
 {
        uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
 
@@ -2185,7 +2185,7 @@ static int format_check(struct drm_mode_fb_cmd2 *r)
        }
 }
 
-static int framebuffer_check(struct drm_mode_fb_cmd2 *r)
+static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
 {
        int ret, hsub, vsub, num_planes, i;
 
@@ -3126,7 +3126,7 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
 EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
 
 static bool drm_property_change_is_valid(struct drm_property *property,
-                                        __u64 value)
+                                        uint64_t value)
 {
        if (property->flags & DRM_MODE_PROP_IMMUTABLE)
                return false;
@@ -3136,7 +3136,7 @@ static bool drm_property_change_is_valid(struct drm_property *property,
                return true;
        } else if (property->flags & DRM_MODE_PROP_BITMASK) {
                int i;
-               __u64 valid_mask = 0;
+               uint64_t valid_mask = 0;
                for (i = 0; i < property->num_values; i++)
                        valid_mask |= (1ULL << property->values[i]);
                return !(value & ~valid_mask);
index 608bddfc7e35ad93ebe7c522c2b2fb77cd8a44b6..eb92fe257a3937441a35ac01ec8dd414ee4d57d3 100644 (file)
@@ -30,7 +30,7 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
-#include <linux/export.h>
+#include <linux/module.h>
 #include "drmP.h"
 #include "drm_edid.h"
 #include "drm_edid_modes.h"
@@ -66,6 +66,8 @@
 #define EDID_QUIRK_FIRST_DETAILED_PREFERRED    (1 << 5)
 /* use +hsync +vsync for detailed mode */
 #define EDID_QUIRK_DETAILED_SYNC_PP            (1 << 6)
+/* Force reduced-blanking timings for detailed modes */
+#define EDID_QUIRK_FORCE_REDUCED_BLANKING      (1 << 7)
 
 struct detailed_mode_closure {
        struct drm_connector *connector;
@@ -120,6 +122,9 @@ static struct edid_quirk {
        /* Samsung SyncMaster 22[5-6]BW */
        { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
        { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
+
+       /* ViewSonic VA2026w */
+       { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
 };
 
 /*** DDC fetch and block validation ***/
@@ -144,6 +149,10 @@ int drm_edid_header_is_valid(const u8 *raw_edid)
 }
 EXPORT_SYMBOL(drm_edid_header_is_valid);
 
+static int edid_fixup __read_mostly = 6;
+module_param_named(edid_fixup, edid_fixup, int, 0400);
+MODULE_PARM_DESC(edid_fixup,
+                "Minimum number of valid EDID header bytes (0-8, default 6)");
 
 /*
  * Sanity check the EDID block (base or extension).  Return 0 if the block
@@ -155,10 +164,13 @@ bool drm_edid_block_valid(u8 *raw_edid, int block)
        u8 csum = 0;
        struct edid *edid = (struct edid *)raw_edid;
 
+       if (edid_fixup > 8 || edid_fixup < 0)
+               edid_fixup = 6;
+
        if (block == 0) {
                int score = drm_edid_header_is_valid(raw_edid);
                if (score == 8) ;
-               else if (score >= 6) {
+               else if (score >= edid_fixup) {
                        DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
                        memcpy(raw_edid, edid_header, sizeof(edid_header));
                } else {
@@ -885,12 +897,19 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
                                "Wrong Hsync/Vsync pulse width\n");
                return NULL;
        }
+
+       if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
+               mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
+               if (!mode)
+                       return NULL;
+
+               goto set_size;
+       }
+
        mode = drm_mode_create(dev);
        if (!mode)
                return NULL;
 
-       mode->type = DRM_MODE_TYPE_DRIVER;
-
        if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
                timing->pixel_clock = cpu_to_le16(1088);
 
@@ -914,8 +933,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
 
        drm_mode_do_interlace_quirk(mode, pt);
 
-       drm_mode_set_name(mode);
-
        if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
                pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
        }
@@ -925,6 +942,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
        mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
                DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
 
+set_size:
        mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
        mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
 
@@ -938,6 +956,9 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
                mode->height_mm = edid->height_cm * 10;
        }
 
+       mode->type = DRM_MODE_TYPE_DRIVER;
+       drm_mode_set_name(mode);
+
        return mode;
 }
 
index f920fb5e42b63846e3d8b7b782b492e547e18eef..fa9439159ebd6bc85cdf4e27a307d9cde12dcbd6 100644 (file)
@@ -130,11 +130,10 @@ static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
                return -EINVAL;
 
        /* This is all entirely broken */
-       down_write(&current->mm->mmap_sem);
        old_fops = file_priv->filp->f_op;
        file_priv->filp->f_op = &i810_buffer_fops;
        dev_priv->mmap_buffer = buf;
-       buf_priv->virtual = (void *)do_mmap(file_priv->filp, 0, buf->total,
+       buf_priv->virtual = (void *)vm_mmap(file_priv->filp, 0, buf->total,
                                            PROT_READ | PROT_WRITE,
                                            MAP_SHARED, buf->bus_address);
        dev_priv->mmap_buffer = NULL;
@@ -145,7 +144,6 @@ static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
                retcode = PTR_ERR(buf_priv->virtual);
                buf_priv->virtual = NULL;
        }
-       up_write(&current->mm->mmap_sem);
 
        return retcode;
 }
index eb2b3c25b9e12b19c2113444d5d2f8e175756ba6..5363e9c66c27e4fe1a4be0fecf26dd4263920be6 100644 (file)
@@ -2032,6 +2032,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
                                 1, minor);
        drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
                                 1, minor);
+       drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
+                                1, minor);
 }
 
 #endif /* CONFIG_DEBUG_FS */
index 377c21f531e49ba93bdcb31aaaff290b15292bc2..c9cfc67c2cf58acdf7871a6e81fda66d3c45dedb 100644 (file)
@@ -942,6 +942,9 @@ struct drm_i915_gem_object {
 
        /* prime dma-buf support */
        struct sg_table *sg_table;
+       void *dma_buf_vmapping;
+       int vmapping_count;
+
        /**
         * Used for performing relocations during execbuffer insertion.
         */
index c1e5c66553dfcff66892876f854f3e708d92ac93..288d7b8f49ae48858a30c6ad1f9f7ce1d6d6e600 100644 (file)
@@ -2063,10 +2063,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
        if (obj->gtt_space == NULL)
                return 0;
 
-       if (obj->pin_count != 0) {
-               DRM_ERROR("Attempting to unbind pinned buffer\n");
-               return -EINVAL;
-       }
+       if (obj->pin_count)
+               return -EBUSY;
 
        ret = i915_gem_object_finish_gpu(obj);
        if (ret)
@@ -3293,6 +3291,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
        struct address_space *mapping;
+       u32 mask;
 
        obj = kzalloc(sizeof(*obj), GFP_KERNEL);
        if (obj == NULL)
@@ -3303,8 +3302,15 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
                return NULL;
        }
 
+       mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
+       if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
+               /* 965gm cannot relocate objects above 4GiB. */
+               mask &= ~__GFP_HIGHMEM;
+               mask |= __GFP_DMA32;
+       }
+
        mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
-       mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
+       mapping_set_gfp_mask(mapping, mask);
 
        i915_gem_info_add_obj(dev_priv, size);
 
index 8e269178d6a5a65c2f5ab8068ac86d283711fab6..aa308e1337db7c8bacbf73d00dc80badecb617a2 100644 (file)
@@ -74,6 +74,59 @@ static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
        }
 }
 
+static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+{
+       struct drm_i915_gem_object *obj = dma_buf->priv;
+       struct drm_device *dev = obj->base.dev;
+       int ret;
+
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ERR_PTR(ret);
+
+       if (obj->dma_buf_vmapping) {
+               obj->vmapping_count++;
+               goto out_unlock;
+       }
+
+       if (!obj->pages) {
+               ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
+               if (ret) {
+                       mutex_unlock(&dev->struct_mutex);
+                       return ERR_PTR(ret);
+               }
+       }
+
+       obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL);
+       if (!obj->dma_buf_vmapping) {
+               DRM_ERROR("failed to vmap object\n");
+               goto out_unlock;
+       }
+
+       obj->vmapping_count = 1;
+out_unlock:
+       mutex_unlock(&dev->struct_mutex);
+       return obj->dma_buf_vmapping;
+}
+
+static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+       struct drm_i915_gem_object *obj = dma_buf->priv;
+       struct drm_device *dev = obj->base.dev;
+       int ret;
+
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return;
+
+       --obj->vmapping_count;
+       if (obj->vmapping_count == 0) {
+               vunmap(obj->dma_buf_vmapping);
+               obj->dma_buf_vmapping = NULL;
+       }
+       mutex_unlock(&dev->struct_mutex);
+}
+
 static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
 {
        return NULL;
@@ -93,6 +146,11 @@ static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_n
 
 }
 
+static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+       return -EINVAL;
+}
+
 static const struct dma_buf_ops i915_dmabuf_ops =  {
        .map_dma_buf = i915_gem_map_dma_buf,
        .unmap_dma_buf = i915_gem_unmap_dma_buf,
@@ -101,6 +159,9 @@ static const struct dma_buf_ops i915_dmabuf_ops =  {
        .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
        .kunmap = i915_gem_dmabuf_kunmap,
        .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
+       .mmap = i915_gem_dmabuf_mmap,
+       .vmap = i915_gem_dmabuf_vmap,
+       .vunmap = i915_gem_dmabuf_vunmap,
 };
 
 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
index cc4a633076110bce0f844d1c24e8c739c9b6917c..1417660a93ec00a0a8a24cc797acc7b7754db063 100644 (file)
@@ -350,8 +350,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
 {
        drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
                                                    rps_work);
-       u8 new_delay = dev_priv->cur_delay;
        u32 pm_iir, pm_imr;
+       u8 new_delay;
 
        spin_lock_irq(&dev_priv->rps_lock);
        pm_iir = dev_priv->pm_iir;
@@ -360,41 +360,18 @@ static void gen6_pm_rps_work(struct work_struct *work)
        I915_WRITE(GEN6_PMIMR, 0);
        spin_unlock_irq(&dev_priv->rps_lock);
 
-       if (!pm_iir)
+       if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
                return;
 
        mutex_lock(&dev_priv->dev->struct_mutex);
-       if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
-               if (dev_priv->cur_delay != dev_priv->max_delay)
-                       new_delay = dev_priv->cur_delay + 1;
-               if (new_delay > dev_priv->max_delay)
-                       new_delay = dev_priv->max_delay;
-       } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
-               gen6_gt_force_wake_get(dev_priv);
-               if (dev_priv->cur_delay != dev_priv->min_delay)
-                       new_delay = dev_priv->cur_delay - 1;
-               if (new_delay < dev_priv->min_delay) {
-                       new_delay = dev_priv->min_delay;
-                       I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-                                  I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
-                                  ((new_delay << 16) & 0x3f0000));
-               } else {
-                       /* Make sure we continue to get down interrupts
-                        * until we hit the minimum frequency */
-                       I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-                                  I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
-               }
-               gen6_gt_force_wake_put(dev_priv);
-       }
+
+       if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
+               new_delay = dev_priv->cur_delay + 1;
+       else
+               new_delay = dev_priv->cur_delay - 1;
 
        gen6_set_rps(dev_priv->dev, new_delay);
-       dev_priv->cur_delay = new_delay;
 
-       /*
-        * rps_lock not held here because clearing is non-destructive. There is
-        * an *extremely* unlikely race with gen6_rps_enable() that is prevented
-        * by holding struct_mutex for the duration of the write.
-        */
        mutex_unlock(&dev_priv->dev->struct_mutex);
 }
 
index ee61ad1e642b06848f537fb6000b3d83c3bf1be1..9147894209061dd69c12ddc9bde72f904ebae3ac 100644 (file)
@@ -910,9 +910,10 @@ static void assert_pll(struct drm_i915_private *dev_priv,
 
 /* For ILK+ */
 static void assert_pch_pll(struct drm_i915_private *dev_priv,
-                          struct intel_crtc *intel_crtc, bool state)
+                          struct intel_pch_pll *pll,
+                          struct intel_crtc *crtc,
+                          bool state)
 {
-       int reg;
        u32 val;
        bool cur_state;
 
@@ -921,30 +922,37 @@ static void assert_pch_pll(struct drm_i915_private *dev_priv,
                return;
        }
 
-       if (!intel_crtc->pch_pll) {
-               WARN(1, "asserting PCH PLL enabled with no PLL\n");
+       if (WARN (!pll,
+                 "asserting PCH PLL %s with no PLL\n", state_string(state)))
                return;
-       }
 
-       if (HAS_PCH_CPT(dev_priv->dev)) {
+       val = I915_READ(pll->pll_reg);
+       cur_state = !!(val & DPLL_VCO_ENABLE);
+       WARN(cur_state != state,
+            "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n",
+            pll->pll_reg, state_string(state), state_string(cur_state), val);
+
+       /* Make sure the selected PLL is correctly attached to the transcoder */
+       if (crtc && HAS_PCH_CPT(dev_priv->dev)) {
                u32 pch_dpll;
 
                pch_dpll = I915_READ(PCH_DPLL_SEL);
-
-               /* Make sure the selected PLL is enabled to the transcoder */
-               WARN(!((pch_dpll >> (4 * intel_crtc->pipe)) & 8),
-                    "transcoder %d PLL not enabled\n", intel_crtc->pipe);
+               cur_state = pll->pll_reg == _PCH_DPLL_B;
+               if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state,
+                         "PLL[%d] not attached to this transcoder %d: %08x\n",
+                         cur_state, crtc->pipe, pch_dpll)) {
+                       cur_state = !!(val >> (4*crtc->pipe + 3));
+                       WARN(cur_state != state,
+                            "PLL[%d] not %s on this transcoder %d: %08x\n",
+                            pll->pll_reg == _PCH_DPLL_B,
+                            state_string(state),
+                            crtc->pipe,
+                            val);
+               }
        }
-
-       reg = intel_crtc->pch_pll->pll_reg;
-       val = I915_READ(reg);
-       cur_state = !!(val & DPLL_VCO_ENABLE);
-       WARN(cur_state != state,
-            "PCH PLL state assertion failure (expected %s, current %s)\n",
-            state_string(state), state_string(cur_state));
 }
-#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
-#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
+#define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true)
+#define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false)
 
 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
                          enum pipe pipe, bool state)
@@ -1424,7 +1432,7 @@ static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
        assert_pch_refclk_enabled(dev_priv);
 
        if (pll->active++ && pll->on) {
-               assert_pch_pll_enabled(dev_priv, intel_crtc);
+               assert_pch_pll_enabled(dev_priv, pll, NULL);
                return;
        }
 
@@ -1460,12 +1468,12 @@ static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
                      intel_crtc->base.base.id);
 
        if (WARN_ON(pll->active == 0)) {
-               assert_pch_pll_disabled(dev_priv, intel_crtc);
+               assert_pch_pll_disabled(dev_priv, pll, NULL);
                return;
        }
 
        if (--pll->active) {
-               assert_pch_pll_enabled(dev_priv, intel_crtc);
+               assert_pch_pll_enabled(dev_priv, pll, NULL);
                return;
        }
 
@@ -1495,7 +1503,9 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
        BUG_ON(dev_priv->info->gen < 5);
 
        /* Make sure PCH DPLL is enabled */
-       assert_pch_pll_enabled(dev_priv, to_intel_crtc(crtc));
+       assert_pch_pll_enabled(dev_priv,
+                              to_intel_crtc(crtc)->pch_pll,
+                              to_intel_crtc(crtc));
 
        /* FDI must be feeding us bits for PCH ports */
        assert_fdi_tx_enabled(dev_priv, pipe);
index 71c7096e386950f5fc8f2fbad253dc80cd764e16..296cfc201a81ea9a0017abfb0e97eee7eaf8dc18 100644 (file)
@@ -266,6 +266,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
        if (mode->clock < 10000)
                return MODE_CLOCK_LOW;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+               return MODE_H_ILLEGAL;
+
        return MODE_OK;
 }
 
@@ -702,6 +705,9 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
                mode->clock = intel_dp->panel_fixed_mode->clock;
        }
 
+       if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+               return false;
+
        DRM_DEBUG_KMS("DP link computation with max lane count %i "
                      "max bw %02x pixel clock %iKHz\n",
                      max_lane_count, bws[max_clock], mode->clock);
@@ -1154,11 +1160,10 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
 
        DRM_DEBUG_KMS("Turn eDP power off\n");
 
-       WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n");
-       ironlake_panel_vdd_off_sync(intel_dp); /* finish any pending work */
+       WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
 
        pp = ironlake_get_pp_control(dev_priv);
-       pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+       pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
        I915_WRITE(PCH_PP_CONTROL, pp);
        POSTING_READ(PCH_PP_CONTROL);
 
@@ -1266,18 +1271,16 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
+
+       /* Make sure the panel is off before trying to change the mode. But also
+        * ensure that we have vdd while we switch off the panel. */
+       ironlake_edp_panel_vdd_on(intel_dp);
        ironlake_edp_backlight_off(intel_dp);
        ironlake_edp_panel_off(intel_dp);
 
-       /* Wake up the sink first */
-       ironlake_edp_panel_vdd_on(intel_dp);
        intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
        intel_dp_link_down(intel_dp);
        ironlake_edp_panel_vdd_off(intel_dp, false);
-
-       /* Make sure the panel is off before trying to
-        * change the mode
-        */
 }
 
 static void intel_dp_commit(struct drm_encoder *encoder)
@@ -1309,10 +1312,11 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
        uint32_t dp_reg = I915_READ(intel_dp->output_reg);
 
        if (mode != DRM_MODE_DPMS_ON) {
+               /* Switching the panel off requires vdd. */
+               ironlake_edp_panel_vdd_on(intel_dp);
                ironlake_edp_backlight_off(intel_dp);
                ironlake_edp_panel_off(intel_dp);
 
-               ironlake_edp_panel_vdd_on(intel_dp);
                intel_dp_sink_dpms(intel_dp, mode);
                intel_dp_link_down(intel_dp);
                ironlake_edp_panel_vdd_off(intel_dp, false);
index 4a9707dd0f9c1885644b9847cfecd062a8bd8872..1991a4408cf9e10896bd5295ba4b3b4b1d74856b 100644 (file)
@@ -396,11 +396,22 @@ clear_err:
         * Wait for bus to IDLE before clearing NAK.
         * If we clear the NAK while bus is still active, then it will stay
         * active and the next transaction may fail.
+        *
+        * If no ACK is received during the address phase of a transaction, the
+        * adapter must report -ENXIO. It is not clear what to return if no ACK
+        * is received at other times. But we have to be careful to not return
+        * spurious -ENXIO because that will prevent i2c and drm edid functions
+        * from retrying. So return -ENXIO only when gmbus properly quiescents -
+        * timing out seems to happen when there _is_ a ddc chip present, but
+        * it's slow responding and only answers on the 2nd retry.
         */
+       ret = -ENXIO;
        if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
-                    10))
+                    10)) {
                DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
                              adapter->name);
+               ret = -ETIMEDOUT;
+       }
 
        /* Toggle the Software Clear Interrupt bit. This has the effect
         * of resetting the GMBUS controller and so clearing the
@@ -414,14 +425,6 @@ clear_err:
                         adapter->name, msgs[i].addr,
                         (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
 
-       /*
-        * If no ACK is received during the address phase of a transaction,
-        * the adapter must report -ENXIO.
-        * It is not clear what to return if no ACK is received at other times.
-        * So, we always return -ENXIO in all NAK cases, to ensure we send
-        * it at least during the one case that is specified.
-        */
-       ret = -ENXIO;
        goto out;
 
 timeout:
index 9dee82350defb0590ecc16326a4cdd712d15b0e4..08eb04c787e834e52e7850b7b864f1901d684f31 100644 (file)
@@ -745,6 +745,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
                },
        },
+       {
+               .callback = intel_no_lvds_dmi_callback,
+               .ident = "Hewlett-Packard HP t5740e Thin Client",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "HP t5740e Thin Client"),
+               },
+       },
        {
                .callback = intel_no_lvds_dmi_callback,
                .ident = "Hewlett-Packard t5745",
index 8e79ff67ec98931e6a7680d8b9e28375d8db1978..d0ce2a5b1d3f09ffa65340026388f5b5920e320a 100644 (file)
@@ -2270,10 +2270,33 @@ void ironlake_disable_drps(struct drm_device *dev)
 void gen6_set_rps(struct drm_device *dev, u8 val)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 swreq;
+       u32 limits;
 
-       swreq = (val & 0x3ff) << 25;
-       I915_WRITE(GEN6_RPNSWREQ, swreq);
+       limits = 0;
+       if (val >= dev_priv->max_delay)
+               val = dev_priv->max_delay;
+       else
+               limits |= dev_priv->max_delay << 24;
+
+       if (val <= dev_priv->min_delay)
+               val = dev_priv->min_delay;
+       else
+               limits |= dev_priv->min_delay << 16;
+
+       if (val == dev_priv->cur_delay)
+               return;
+
+       I915_WRITE(GEN6_RPNSWREQ,
+                  GEN6_FREQUENCY(val) |
+                  GEN6_OFFSET(0) |
+                  GEN6_AGGRESSIVE_TURBO);
+
+       /* Make sure we continue to get interrupts
+        * until we hit the minimum or maximum frequencies.
+        */
+       I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
+
+       dev_priv->cur_delay = val;
 }
 
 void gen6_disable_rps(struct drm_device *dev)
@@ -2327,11 +2350,10 @@ int intel_enable_rc6(const struct drm_device *dev)
 void gen6_enable_rps(struct drm_i915_private *dev_priv)
 {
        struct intel_ring_buffer *ring;
-       u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-       u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+       u32 rp_state_cap;
+       u32 gt_perf_status;
        u32 pcu_mbox, rc6_mask = 0;
        u32 gtfifodbg;
-       int cur_freq, min_freq, max_freq;
        int rc6_mode;
        int i;
 
@@ -2352,6 +2374,14 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
 
        gen6_gt_force_wake_get(dev_priv);
 
+       rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+       gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+
+       /* In units of 100MHz */
+       dev_priv->max_delay = rp_state_cap & 0xff;
+       dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16;
+       dev_priv->cur_delay = 0;
+
        /* disable the counters and set deterministic thresholds */
        I915_WRITE(GEN6_RC_CONTROL, 0);
 
@@ -2399,8 +2429,8 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
 
        I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
        I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-                  18 << 24 |
-                  6 << 16);
+                  dev_priv->max_delay << 24 |
+                  dev_priv->min_delay << 16);
        I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
        I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
        I915_WRITE(GEN6_RP_UP_EI, 100000);
@@ -2408,7 +2438,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
        I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
        I915_WRITE(GEN6_RP_CONTROL,
                   GEN6_RP_MEDIA_TURBO |
-                  GEN6_RP_MEDIA_HW_MODE |
+                  GEN6_RP_MEDIA_HW_NORMAL_MODE |
                   GEN6_RP_MEDIA_IS_GFX |
                   GEN6_RP_ENABLE |
                   GEN6_RP_UP_BUSY_AVG |
@@ -2426,10 +2456,6 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
                     500))
                DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
 
-       min_freq = (rp_state_cap & 0xff0000) >> 16;
-       max_freq = rp_state_cap & 0xff;
-       cur_freq = (gt_perf_status & 0xff00) >> 8;
-
        /* Check for overclock support */
        if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
                     500))
@@ -2440,14 +2466,11 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
                     500))
                DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
        if (pcu_mbox & (1<<31)) { /* OC supported */
-               max_freq = pcu_mbox & 0xff;
+               dev_priv->max_delay = pcu_mbox & 0xff;
                DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
        }
 
-       /* In units of 100MHz */
-       dev_priv->max_delay = max_freq;
-       dev_priv->min_delay = min_freq;
-       dev_priv->cur_delay = cur_freq;
+       gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
 
        /* requires MSI enabled */
        I915_WRITE(GEN6_PMIER,
@@ -3580,8 +3603,9 @@ static void gen6_sanitize_pm(struct drm_device *dev)
                limits |= (dev_priv->min_delay & 0x3f) << 16;
 
        if (old != limits) {
-               DRM_ERROR("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS expected %08x, was %08x\n",
-                         limits, old);
+               /* Note that the known failure case is to read back 0. */
+               DRM_DEBUG_DRIVER("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS "
+                                "expected %08x, was %08x\n", limits, old);
                I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
        }
 
index a949b73880c8302db5f3b255429cf24ab12fea8d..b6a9d45fc3c69d4b5be7e8c6f93490636b6049c2 100644 (file)
@@ -783,10 +783,12 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
                ((v_sync_len & 0x30) >> 4);
 
        dtd->part2.dtd_flags = 0x18;
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+               dtd->part2.dtd_flags |= DTD_FLAG_INTERLACE;
        if (mode->flags & DRM_MODE_FLAG_PHSYNC)
-               dtd->part2.dtd_flags |= 0x2;
+               dtd->part2.dtd_flags |= DTD_FLAG_HSYNC_POSITIVE;
        if (mode->flags & DRM_MODE_FLAG_PVSYNC)
-               dtd->part2.dtd_flags |= 0x4;
+               dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
 
        dtd->part2.sdvo_flags = 0;
        dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
@@ -820,9 +822,11 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
        mode->clock = dtd->part1.clock * 10;
 
        mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
-       if (dtd->part2.dtd_flags & 0x2)
+       if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
+               mode->flags |= DRM_MODE_FLAG_INTERLACE;
+       if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
                mode->flags |= DRM_MODE_FLAG_PHSYNC;
-       if (dtd->part2.dtd_flags & 0x4)
+       if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
                mode->flags |= DRM_MODE_FLAG_PVSYNC;
 }
 
index 6b7b22f4d63ec77c05ebf70ee35110d9678b841e..9d030142ee43476c89cf2769f0d8c75cf1697daa 100644 (file)
@@ -61,6 +61,11 @@ struct intel_sdvo_caps {
        u16 output_flags;
 } __attribute__((packed));
 
+/* Note: SDVO detailed timing flags match EDID misc flags. */
+#define DTD_FLAG_HSYNC_POSITIVE (1 << 1)
+#define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
+#define DTD_FLAG_INTERLACE     (1 << 7)
+
 /** This matches the EDID DTD structure, more or less */
 struct intel_sdvo_dtd {
        struct {
index 3346612d2953eff35c3e9025b044d33e75e28206..a233a51fd7e60c9f48ea89fd9b425140f4bf0ceb 100644 (file)
@@ -673,6 +673,54 @@ static const struct tv_mode tv_modes[] = {
 
                .filter_table = filter_table,
        },
+       {
+               .name       = "480p",
+               .clock          = 107520,
+               .refresh        = 59940,
+               .oversample     = TV_OVERSAMPLE_4X,
+               .component_only = 1,
+
+               .hsync_end      = 64,               .hblank_end         = 122,
+               .hblank_start   = 842,              .htotal             = 857,
+
+               .progressive    = true,             .trilevel_sync = false,
+
+               .vsync_start_f1 = 12,               .vsync_start_f2     = 12,
+               .vsync_len      = 12,
+
+               .veq_ena        = false,
+
+               .vi_end_f1      = 44,               .vi_end_f2          = 44,
+               .nbr_end        = 479,
+
+               .burst_ena      = false,
+
+               .filter_table = filter_table,
+       },
+       {
+               .name       = "576p",
+               .clock          = 107520,
+               .refresh        = 50000,
+               .oversample     = TV_OVERSAMPLE_4X,
+               .component_only = 1,
+
+               .hsync_end      = 64,               .hblank_end         = 139,
+               .hblank_start   = 859,              .htotal             = 863,
+
+               .progressive    = true,             .trilevel_sync = false,
+
+               .vsync_start_f1 = 10,               .vsync_start_f2     = 10,
+               .vsync_len      = 10,
+
+               .veq_ena        = false,
+
+               .vi_end_f1      = 48,               .vi_end_f2          = 48,
+               .nbr_end        = 575,
+
+               .burst_ena      = false,
+
+               .filter_table = filter_table,
+       },
        {
                .name       = "720p@60Hz",
                .clock          = 148800,
@@ -1194,6 +1242,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
 
        I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
        I915_WRITE(TV_CTL, save_tv_ctl);
+       POSTING_READ(TV_CTL);
+
+       /* For unknown reasons the hw barfs if we don't do this vblank wait. */
+       intel_wait_for_vblank(intel_tv->base.base.dev,
+                             to_intel_crtc(intel_tv->base.base.crtc)->pipe);
 
        /* Restore interrupt config */
        if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
index 3c8e04f54713b7964c5c915f3b06ca1a70f54578..93e832d6c3286346eff00f179fcc49e5caeba473 100644 (file)
@@ -41,9 +41,28 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
 
 MODULE_DEVICE_TABLE(pci, pciidlist);
 
+static void mgag200_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+       struct apertures_struct *ap;
+       bool primary = false;
+
+       ap = alloc_apertures(1);
+       ap->ranges[0].base = pci_resource_start(pdev, 0);
+       ap->ranges[0].size = pci_resource_len(pdev, 0);
+
+#ifdef CONFIG_X86
+       primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+       remove_conflicting_framebuffers(ap, "mgag200drmfb", primary);
+       kfree(ap);
+}
+
+
 static int __devinit
 mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
+       mgag200_kick_out_firmware_fb(pdev);
+
        return drm_get_pci_dev(pdev, ent, &driver);
 }
 
index 634d222c93dea4b310183a1174c474cd2f80a2b8..8613cb23808c585ef35175f4d4305f1c739d1add 100644 (file)
@@ -123,6 +123,9 @@ struct nouveau_bo {
 
        struct drm_gem_object *gem;
        int pin_refcnt;
+
+       struct ttm_bo_kmap_obj dma_buf_vmap;
+       int vmapping_count;
 };
 
 #define nouveau_bo_tile_layout(nvbo)                           \
index c58aab7370c575949a41113014120b252548169c..a89240e5fb2962334e601d7f4ff6fa1363f7f061 100644 (file)
@@ -61,6 +61,48 @@ static void nouveau_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
 
 }
 
+static int nouveau_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+       return -EINVAL;
+}
+
+static void *nouveau_gem_prime_vmap(struct dma_buf *dma_buf)
+{
+       struct nouveau_bo *nvbo = dma_buf->priv;
+       struct drm_device *dev = nvbo->gem->dev;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       if (nvbo->vmapping_count) {
+               nvbo->vmapping_count++;
+               goto out_unlock;
+       }
+
+       ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages,
+                         &nvbo->dma_buf_vmap);
+       if (ret) {
+               mutex_unlock(&dev->struct_mutex);
+               return ERR_PTR(ret);
+       }
+       nvbo->vmapping_count = 1;
+out_unlock:
+       mutex_unlock(&dev->struct_mutex);
+       return nvbo->dma_buf_vmap.virtual;
+}
+
+static void nouveau_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+       struct nouveau_bo *nvbo = dma_buf->priv;
+       struct drm_device *dev = nvbo->gem->dev;
+
+       mutex_lock(&dev->struct_mutex);
+       nvbo->vmapping_count--;
+       if (nvbo->vmapping_count == 0) {
+               ttm_bo_kunmap(&nvbo->dma_buf_vmap);
+       }
+       mutex_unlock(&dev->struct_mutex);
+}
+
 static const struct dma_buf_ops nouveau_dmabuf_ops =  {
        .map_dma_buf = nouveau_gem_map_dma_buf,
        .unmap_dma_buf = nouveau_gem_unmap_dma_buf,
@@ -69,6 +111,9 @@ static const struct dma_buf_ops nouveau_dmabuf_ops =  {
        .kmap_atomic = nouveau_gem_kmap_atomic,
        .kunmap = nouveau_gem_kunmap,
        .kunmap_atomic = nouveau_gem_kunmap_atomic,
+       .mmap = nouveau_gem_prime_mmap,
+       .vmap = nouveau_gem_prime_vmap,
+       .vunmap = nouveau_gem_prime_vunmap,
 };
 
 static int
index 58991af90502dea8b98d9aac157d182a5feca21e..01550d05e2738d12d6e29fb191740ba8a5dc6637 100644 (file)
@@ -1029,6 +1029,11 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
                WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
                WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
                WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+               if ((rdev->family == CHIP_JUNIPER) ||
+                   (rdev->family == CHIP_CYPRESS) ||
+                   (rdev->family == CHIP_HEMLOCK) ||
+                   (rdev->family == CHIP_BARTS))
+                       WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
        }
        WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
        WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
@@ -1553,163 +1558,10 @@ int evergreen_cp_resume(struct radeon_device *rdev)
 /*
  * Core functions
  */
-static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
-                                                 u32 num_tile_pipes,
-                                                 u32 num_backends,
-                                                 u32 backend_disable_mask)
-{
-       u32 backend_map = 0;
-       u32 enabled_backends_mask = 0;
-       u32 enabled_backends_count = 0;
-       u32 cur_pipe;
-       u32 swizzle_pipe[EVERGREEN_MAX_PIPES];
-       u32 cur_backend = 0;
-       u32 i;
-       bool force_no_swizzle;
-
-       if (num_tile_pipes > EVERGREEN_MAX_PIPES)
-               num_tile_pipes = EVERGREEN_MAX_PIPES;
-       if (num_tile_pipes < 1)
-               num_tile_pipes = 1;
-       if (num_backends > EVERGREEN_MAX_BACKENDS)
-               num_backends = EVERGREEN_MAX_BACKENDS;
-       if (num_backends < 1)
-               num_backends = 1;
-
-       for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
-               if (((backend_disable_mask >> i) & 1) == 0) {
-                       enabled_backends_mask |= (1 << i);
-                       ++enabled_backends_count;
-               }
-               if (enabled_backends_count == num_backends)
-                       break;
-       }
-
-       if (enabled_backends_count == 0) {
-               enabled_backends_mask = 1;
-               enabled_backends_count = 1;
-       }
-
-       if (enabled_backends_count != num_backends)
-               num_backends = enabled_backends_count;
-
-       memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES);
-       switch (rdev->family) {
-       case CHIP_CEDAR:
-       case CHIP_REDWOOD:
-       case CHIP_PALM:
-       case CHIP_SUMO:
-       case CHIP_SUMO2:
-       case CHIP_TURKS:
-       case CHIP_CAICOS:
-               force_no_swizzle = false;
-               break;
-       case CHIP_CYPRESS:
-       case CHIP_HEMLOCK:
-       case CHIP_JUNIPER:
-       case CHIP_BARTS:
-       default:
-               force_no_swizzle = true;
-               break;
-       }
-       if (force_no_swizzle) {
-               bool last_backend_enabled = false;
-
-               force_no_swizzle = false;
-               for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
-                       if (((enabled_backends_mask >> i) & 1) == 1) {
-                               if (last_backend_enabled)
-                                       force_no_swizzle = true;
-                               last_backend_enabled = true;
-                       } else
-                               last_backend_enabled = false;
-               }
-       }
-
-       switch (num_tile_pipes) {
-       case 1:
-       case 3:
-       case 5:
-       case 7:
-               DRM_ERROR("odd number of pipes!\n");
-               break;
-       case 2:
-               swizzle_pipe[0] = 0;
-               swizzle_pipe[1] = 1;
-               break;
-       case 4:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 1;
-                       swizzle_pipe[3] = 3;
-               }
-               break;
-       case 6:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-                       swizzle_pipe[4] = 4;
-                       swizzle_pipe[5] = 5;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 4;
-                       swizzle_pipe[3] = 1;
-                       swizzle_pipe[4] = 3;
-                       swizzle_pipe[5] = 5;
-               }
-               break;
-       case 8:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-                       swizzle_pipe[4] = 4;
-                       swizzle_pipe[5] = 5;
-                       swizzle_pipe[6] = 6;
-                       swizzle_pipe[7] = 7;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 4;
-                       swizzle_pipe[3] = 6;
-                       swizzle_pipe[4] = 1;
-                       swizzle_pipe[5] = 3;
-                       swizzle_pipe[6] = 5;
-                       swizzle_pipe[7] = 7;
-               }
-               break;
-       }
-
-       for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
-               while (((1 << cur_backend) & enabled_backends_mask) == 0)
-                       cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
-
-               backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
-
-               cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
-       }
-
-       return backend_map;
-}
-
 static void evergreen_gpu_init(struct radeon_device *rdev)
 {
-       u32 cc_rb_backend_disable = 0;
-       u32 cc_gc_shader_pipe_config;
-       u32 gb_addr_config = 0;
+       u32 gb_addr_config;
        u32 mc_shared_chmap, mc_arb_ramcfg;
-       u32 gb_backend_map;
-       u32 grbm_gfx_index;
        u32 sx_debug_1;
        u32 smx_dc_ctl0;
        u32 sq_config;
@@ -1724,6 +1576,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
        u32 sq_stack_resource_mgmt_3;
        u32 vgt_cache_invalidation;
        u32 hdp_host_path_cntl, tmp;
+       u32 disabled_rb_mask;
        int i, j, num_shader_engines, ps_thread_count;
 
        switch (rdev->family) {
@@ -1748,6 +1601,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                rdev->config.evergreen.sc_prim_fifo_size = 0x100;
                rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = CYPRESS_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_JUNIPER:
                rdev->config.evergreen.num_ses = 1;
@@ -1769,6 +1623,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                rdev->config.evergreen.sc_prim_fifo_size = 0x100;
                rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = JUNIPER_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_REDWOOD:
                rdev->config.evergreen.num_ses = 1;
@@ -1790,6 +1645,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                rdev->config.evergreen.sc_prim_fifo_size = 0x100;
                rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_CEDAR:
        default:
@@ -1812,6 +1668,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                rdev->config.evergreen.sc_prim_fifo_size = 0x40;
                rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_PALM:
                rdev->config.evergreen.num_ses = 1;
@@ -1833,6 +1690,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                rdev->config.evergreen.sc_prim_fifo_size = 0x40;
                rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_SUMO:
                rdev->config.evergreen.num_ses = 1;
@@ -1860,6 +1718,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                rdev->config.evergreen.sc_prim_fifo_size = 0x40;
                rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_SUMO2:
                rdev->config.evergreen.num_ses = 1;
@@ -1881,6 +1740,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                rdev->config.evergreen.sc_prim_fifo_size = 0x40;
                rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_BARTS:
                rdev->config.evergreen.num_ses = 2;
@@ -1902,6 +1762,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                rdev->config.evergreen.sc_prim_fifo_size = 0x100;
                rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = BARTS_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_TURKS:
                rdev->config.evergreen.num_ses = 1;
@@ -1923,6 +1784,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                rdev->config.evergreen.sc_prim_fifo_size = 0x100;
                rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = TURKS_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_CAICOS:
                rdev->config.evergreen.num_ses = 1;
@@ -1944,6 +1806,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                rdev->config.evergreen.sc_prim_fifo_size = 0x40;
                rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = CAICOS_GB_ADDR_CONFIG_GOLDEN;
                break;
        }
 
@@ -1960,20 +1823,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
 
        evergreen_fix_pci_max_read_req_size(rdev);
 
-       cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
-
-       cc_gc_shader_pipe_config |=
-               INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes)
-                                 & EVERGREEN_MAX_PIPES_MASK);
-       cc_gc_shader_pipe_config |=
-               INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds)
-                              & EVERGREEN_MAX_SIMDS_MASK);
-
-       cc_rb_backend_disable =
-               BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends)
-                               & EVERGREEN_MAX_BACKENDS_MASK);
-
-
        mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
        if ((rdev->family == CHIP_PALM) ||
            (rdev->family == CHIP_SUMO) ||
@@ -1982,134 +1831,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
        else
                mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
 
-       switch (rdev->config.evergreen.max_tile_pipes) {
-       case 1:
-       default:
-               gb_addr_config |= NUM_PIPES(0);
-               break;
-       case 2:
-               gb_addr_config |= NUM_PIPES(1);
-               break;
-       case 4:
-               gb_addr_config |= NUM_PIPES(2);
-               break;
-       case 8:
-               gb_addr_config |= NUM_PIPES(3);
-               break;
-       }
-
-       gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
-       gb_addr_config |= BANK_INTERLEAVE_SIZE(0);
-       gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1);
-       gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1);
-       gb_addr_config |= NUM_GPUS(0); /* Hemlock? */
-       gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
-
-       if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
-               gb_addr_config |= ROW_SIZE(2);
-       else
-               gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT);
-
-       if (rdev->ddev->pdev->device == 0x689e) {
-               u32 efuse_straps_4;
-               u32 efuse_straps_3;
-               u8 efuse_box_bit_131_124;
-
-               WREG32(RCU_IND_INDEX, 0x204);
-               efuse_straps_4 = RREG32(RCU_IND_DATA);
-               WREG32(RCU_IND_INDEX, 0x203);
-               efuse_straps_3 = RREG32(RCU_IND_DATA);
-               efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28));
-
-               switch(efuse_box_bit_131_124) {
-               case 0x00:
-                       gb_backend_map = 0x76543210;
-                       break;
-               case 0x55:
-                       gb_backend_map = 0x77553311;
-                       break;
-               case 0x56:
-                       gb_backend_map = 0x77553300;
-                       break;
-               case 0x59:
-                       gb_backend_map = 0x77552211;
-                       break;
-               case 0x66:
-                       gb_backend_map = 0x77443300;
-                       break;
-               case 0x99:
-                       gb_backend_map = 0x66552211;
-                       break;
-               case 0x5a:
-                       gb_backend_map = 0x77552200;
-                       break;
-               case 0xaa:
-                       gb_backend_map = 0x66442200;
-                       break;
-               case 0x95:
-                       gb_backend_map = 0x66553311;
-                       break;
-               default:
-                       DRM_ERROR("bad backend map, using default\n");
-                       gb_backend_map =
-                               evergreen_get_tile_pipe_to_backend_map(rdev,
-                                                                      rdev->config.evergreen.max_tile_pipes,
-                                                                      rdev->config.evergreen.max_backends,
-                                                                      ((EVERGREEN_MAX_BACKENDS_MASK <<
-                                                                  rdev->config.evergreen.max_backends) &
-                                                                       EVERGREEN_MAX_BACKENDS_MASK));
-                       break;
-               }
-       } else if (rdev->ddev->pdev->device == 0x68b9) {
-               u32 efuse_straps_3;
-               u8 efuse_box_bit_127_124;
-
-               WREG32(RCU_IND_INDEX, 0x203);
-               efuse_straps_3 = RREG32(RCU_IND_DATA);
-               efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28);
-
-               switch(efuse_box_bit_127_124) {
-               case 0x0:
-                       gb_backend_map = 0x00003210;
-                       break;
-               case 0x5:
-               case 0x6:
-               case 0x9:
-               case 0xa:
-                       gb_backend_map = 0x00003311;
-                       break;
-               default:
-                       DRM_ERROR("bad backend map, using default\n");
-                       gb_backend_map =
-                               evergreen_get_tile_pipe_to_backend_map(rdev,
-                                                                      rdev->config.evergreen.max_tile_pipes,
-                                                                      rdev->config.evergreen.max_backends,
-                                                                      ((EVERGREEN_MAX_BACKENDS_MASK <<
-                                                                  rdev->config.evergreen.max_backends) &
-                                                                       EVERGREEN_MAX_BACKENDS_MASK));
-                       break;
-               }
-       } else {
-               switch (rdev->family) {
-               case CHIP_CYPRESS:
-               case CHIP_HEMLOCK:
-               case CHIP_BARTS:
-                       gb_backend_map = 0x66442200;
-                       break;
-               case CHIP_JUNIPER:
-                       gb_backend_map = 0x00002200;
-                       break;
-               default:
-                       gb_backend_map =
-                               evergreen_get_tile_pipe_to_backend_map(rdev,
-                                                                      rdev->config.evergreen.max_tile_pipes,
-                                                                      rdev->config.evergreen.max_backends,
-                                                                      ((EVERGREEN_MAX_BACKENDS_MASK <<
-                                                                        rdev->config.evergreen.max_backends) &
-                                                                       EVERGREEN_MAX_BACKENDS_MASK));
-               }
-       }
-
        /* setup tiling info dword.  gb_addr_config is not adequate since it does
         * not have bank info, so create a custom tiling dword.
         * bits 3:0   num_pipes
@@ -2136,45 +1857,54 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
        /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
        if (rdev->flags & RADEON_IS_IGP)
                rdev->config.evergreen.tile_config |= 1 << 4;
-       else
-               rdev->config.evergreen.tile_config |=
-                       ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
-       rdev->config.evergreen.tile_config |=
-               ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8;
+       else {
+               if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+                       rdev->config.evergreen.tile_config |= 1 << 4;
+               else
+                       rdev->config.evergreen.tile_config |= 0 << 4;
+       }
+       rdev->config.evergreen.tile_config |= 0 << 8;
        rdev->config.evergreen.tile_config |=
                ((gb_addr_config & 0x30000000) >> 28) << 12;
 
-       rdev->config.evergreen.backend_map = gb_backend_map;
-       WREG32(GB_BACKEND_MAP, gb_backend_map);
-       WREG32(GB_ADDR_CONFIG, gb_addr_config);
-       WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
-       WREG32(HDP_ADDR_CONFIG, gb_addr_config);
-
-       num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
-       grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
+       num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1;
 
-       for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
-               u32 rb = cc_rb_backend_disable | (0xf0 << 16);
-               u32 sp = cc_gc_shader_pipe_config;
-               u32 gfx = grbm_gfx_index | SE_INDEX(i);
+       if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
+               u32 efuse_straps_4;
+               u32 efuse_straps_3;
 
-               if (i == num_shader_engines) {
-                       rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK);
-                       sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK);
+               WREG32(RCU_IND_INDEX, 0x204);
+               efuse_straps_4 = RREG32(RCU_IND_DATA);
+               WREG32(RCU_IND_INDEX, 0x203);
+               efuse_straps_3 = RREG32(RCU_IND_DATA);
+               tmp = (((efuse_straps_4 & 0xf) << 4) |
+                     ((efuse_straps_3 & 0xf0000000) >> 28));
+       } else {
+               tmp = 0;
+               for (i = (rdev->config.evergreen.num_ses - 1); i >= 0; i--) {
+                       u32 rb_disable_bitmap;
+
+                       WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+                       WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+                       rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
+                       tmp <<= 4;
+                       tmp |= rb_disable_bitmap;
                }
+       }
+       /* enabled rb are just the one not disabled :) */
+       disabled_rb_mask = tmp;
 
-               WREG32(GRBM_GFX_INDEX, gfx);
-               WREG32(RLC_GFX_INDEX, gfx);
+       WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+       WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
 
-               WREG32(CC_RB_BACKEND_DISABLE, rb);
-               WREG32(CC_SYS_RB_BACKEND_DISABLE, rb);
-               WREG32(GC_USER_RB_BACKEND_DISABLE, rb);
-               WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
-        }
+       WREG32(GB_ADDR_CONFIG, gb_addr_config);
+       WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+       WREG32(HDP_ADDR_CONFIG, gb_addr_config);
 
-       grbm_gfx_index |= SE_BROADCAST_WRITES;
-       WREG32(GRBM_GFX_INDEX, grbm_gfx_index);
-       WREG32(RLC_GFX_INDEX, grbm_gfx_index);
+       tmp = gb_addr_config & NUM_PIPES_MASK;
+       tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
+                                       EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
+       WREG32(GB_BACKEND_MAP, tmp);
 
        WREG32(CGTS_SYS_TCC_DISABLE, 0);
        WREG32(CGTS_TCC_DISABLE, 0);
index 79130bfd1d6f058750b16a4c8f09eaf85ec992bf..2773039b49027917b6568dff37ca1e95ec2b9a95 100644 (file)
 #define EVERGREEN_MAX_PIPES_MASK        0xFF
 #define EVERGREEN_MAX_LDS_NUM           0xFFFF
 
+#define CYPRESS_GB_ADDR_CONFIG_GOLDEN        0x02011003
+#define BARTS_GB_ADDR_CONFIG_GOLDEN          0x02011003
+#define CAYMAN_GB_ADDR_CONFIG_GOLDEN         0x02011003
+#define JUNIPER_GB_ADDR_CONFIG_GOLDEN        0x02010002
+#define REDWOOD_GB_ADDR_CONFIG_GOLDEN        0x02010002
+#define TURKS_GB_ADDR_CONFIG_GOLDEN          0x02010002
+#define CEDAR_GB_ADDR_CONFIG_GOLDEN          0x02010001
+#define CAICOS_GB_ADDR_CONFIG_GOLDEN         0x02010001
+
 /* Registers */
 
 #define RCU_IND_INDEX                                  0x100
@@ -54,6 +63,7 @@
 #define                BACKEND_DISABLE(x)                      ((x) << 16)
 #define GB_ADDR_CONFIG                                 0x98F8
 #define                NUM_PIPES(x)                            ((x) << 0)
+#define                NUM_PIPES_MASK                          0x0000000f
 #define                PIPE_INTERLEAVE_SIZE(x)                 ((x) << 4)
 #define                BANK_INTERLEAVE_SIZE(x)                 ((x) << 8)
 #define                NUM_SHADER_ENGINES(x)                   ((x) << 12)
 #define        MC_VM_MD_L1_TLB0_CNTL                           0x2654
 #define        MC_VM_MD_L1_TLB1_CNTL                           0x2658
 #define        MC_VM_MD_L1_TLB2_CNTL                           0x265C
+#define        MC_VM_MD_L1_TLB3_CNTL                           0x2698
 
 #define        FUS_MC_VM_MD_L1_TLB0_CNTL                       0x265C
 #define        FUS_MC_VM_MD_L1_TLB1_CNTL                       0x2660
index b01c2dd627b0e9a345e2dc8964d415ba37deffff..3df4efa1194206b516efeacddabefae16744b064 100644 (file)
@@ -417,215 +417,17 @@ out:
 /*
  * Core functions
  */
-static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
-                                              u32 num_tile_pipes,
-                                              u32 num_backends_per_asic,
-                                              u32 *backend_disable_mask_per_asic,
-                                              u32 num_shader_engines)
-{
-       u32 backend_map = 0;
-       u32 enabled_backends_mask = 0;
-       u32 enabled_backends_count = 0;
-       u32 num_backends_per_se;
-       u32 cur_pipe;
-       u32 swizzle_pipe[CAYMAN_MAX_PIPES];
-       u32 cur_backend = 0;
-       u32 i;
-       bool force_no_swizzle;
-
-       /* force legal values */
-       if (num_tile_pipes < 1)
-               num_tile_pipes = 1;
-       if (num_tile_pipes > rdev->config.cayman.max_tile_pipes)
-               num_tile_pipes = rdev->config.cayman.max_tile_pipes;
-       if (num_shader_engines < 1)
-               num_shader_engines = 1;
-       if (num_shader_engines > rdev->config.cayman.max_shader_engines)
-               num_shader_engines = rdev->config.cayman.max_shader_engines;
-       if (num_backends_per_asic < num_shader_engines)
-               num_backends_per_asic = num_shader_engines;
-       if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines))
-               num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines;
-
-       /* make sure we have the same number of backends per se */
-       num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines);
-       /* set up the number of backends per se */
-       num_backends_per_se = num_backends_per_asic / num_shader_engines;
-       if (num_backends_per_se > rdev->config.cayman.max_backends_per_se) {
-               num_backends_per_se = rdev->config.cayman.max_backends_per_se;
-               num_backends_per_asic = num_backends_per_se * num_shader_engines;
-       }
-
-       /* create enable mask and count for enabled backends */
-       for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
-               if (((*backend_disable_mask_per_asic >> i) & 1) == 0) {
-                       enabled_backends_mask |= (1 << i);
-                       ++enabled_backends_count;
-               }
-               if (enabled_backends_count == num_backends_per_asic)
-                       break;
-       }
-
-       /* force the backends mask to match the current number of backends */
-       if (enabled_backends_count != num_backends_per_asic) {
-               u32 this_backend_enabled;
-               u32 shader_engine;
-               u32 backend_per_se;
-
-               enabled_backends_mask = 0;
-               enabled_backends_count = 0;
-               *backend_disable_mask_per_asic = CAYMAN_MAX_BACKENDS_MASK;
-               for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
-                       /* calc the current se */
-                       shader_engine = i / rdev->config.cayman.max_backends_per_se;
-                       /* calc the backend per se */
-                       backend_per_se = i % rdev->config.cayman.max_backends_per_se;
-                       /* default to not enabled */
-                       this_backend_enabled = 0;
-                       if ((shader_engine < num_shader_engines) &&
-                           (backend_per_se < num_backends_per_se))
-                               this_backend_enabled = 1;
-                       if (this_backend_enabled) {
-                               enabled_backends_mask |= (1 << i);
-                               *backend_disable_mask_per_asic &= ~(1 << i);
-                               ++enabled_backends_count;
-                       }
-               }
-       }
-
-
-       memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * CAYMAN_MAX_PIPES);
-       switch (rdev->family) {
-       case CHIP_CAYMAN:
-       case CHIP_ARUBA:
-               force_no_swizzle = true;
-               break;
-       default:
-               force_no_swizzle = false;
-               break;
-       }
-       if (force_no_swizzle) {
-               bool last_backend_enabled = false;
-
-               force_no_swizzle = false;
-               for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
-                       if (((enabled_backends_mask >> i) & 1) == 1) {
-                               if (last_backend_enabled)
-                                       force_no_swizzle = true;
-                               last_backend_enabled = true;
-                       } else
-                               last_backend_enabled = false;
-               }
-       }
-
-       switch (num_tile_pipes) {
-       case 1:
-       case 3:
-       case 5:
-       case 7:
-               DRM_ERROR("odd number of pipes!\n");
-               break;
-       case 2:
-               swizzle_pipe[0] = 0;
-               swizzle_pipe[1] = 1;
-               break;
-       case 4:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 1;
-                       swizzle_pipe[3] = 3;
-               }
-               break;
-       case 6:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-                       swizzle_pipe[4] = 4;
-                       swizzle_pipe[5] = 5;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 4;
-                       swizzle_pipe[3] = 1;
-                       swizzle_pipe[4] = 3;
-                       swizzle_pipe[5] = 5;
-               }
-               break;
-       case 8:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-                       swizzle_pipe[4] = 4;
-                       swizzle_pipe[5] = 5;
-                       swizzle_pipe[6] = 6;
-                       swizzle_pipe[7] = 7;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 4;
-                       swizzle_pipe[3] = 6;
-                       swizzle_pipe[4] = 1;
-                       swizzle_pipe[5] = 3;
-                       swizzle_pipe[6] = 5;
-                       swizzle_pipe[7] = 7;
-               }
-               break;
-       }
-
-       for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
-               while (((1 << cur_backend) & enabled_backends_mask) == 0)
-                       cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
-
-               backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
-
-               cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
-       }
-
-       return backend_map;
-}
-
-static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev,
-                                           u32 disable_mask_per_se,
-                                           u32 max_disable_mask_per_se,
-                                           u32 num_shader_engines)
-{
-       u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se);
-       u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se;
-
-       if (num_shader_engines == 1)
-               return disable_mask_per_asic;
-       else if (num_shader_engines == 2)
-               return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se);
-       else
-               return 0xffffffff;
-}
-
 static void cayman_gpu_init(struct radeon_device *rdev)
 {
-       u32 cc_rb_backend_disable = 0;
-       u32 cc_gc_shader_pipe_config;
        u32 gb_addr_config = 0;
        u32 mc_shared_chmap, mc_arb_ramcfg;
-       u32 gb_backend_map;
        u32 cgts_tcc_disable;
        u32 sx_debug_1;
        u32 smx_dc_ctl0;
-       u32 gc_user_shader_pipe_config;
-       u32 gc_user_rb_backend_disable;
-       u32 cgts_user_tcc_disable;
        u32 cgts_sm_ctrl_reg;
        u32 hdp_host_path_cntl;
        u32 tmp;
+       u32 disabled_rb_mask;
        int i, j;
 
        switch (rdev->family) {
@@ -650,6 +452,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
                rdev->config.cayman.sc_prim_fifo_size = 0x100;
                rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_ARUBA:
        default:
@@ -687,6 +490,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
                rdev->config.cayman.sc_prim_fifo_size = 0x40;
                rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN;
                break;
        }
 
@@ -706,39 +510,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
        mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
        mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
 
-       cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
-       cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
-       cgts_tcc_disable = 0xffff0000;
-       for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
-               cgts_tcc_disable &= ~(1 << (16 + i));
-       gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
-       gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG);
-       cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
-
-       rdev->config.cayman.num_shader_engines = rdev->config.cayman.max_shader_engines;
-       tmp = ((~gc_user_shader_pipe_config) & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
-       rdev->config.cayman.num_shader_pipes_per_simd = r600_count_pipe_bits(tmp);
-       rdev->config.cayman.num_tile_pipes = rdev->config.cayman.max_tile_pipes;
-       tmp = ((~gc_user_shader_pipe_config) & INACTIVE_SIMDS_MASK) >> INACTIVE_SIMDS_SHIFT;
-       rdev->config.cayman.num_simds_per_se = r600_count_pipe_bits(tmp);
-       tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
-       rdev->config.cayman.num_backends_per_se = r600_count_pipe_bits(tmp);
-       tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
-       rdev->config.cayman.backend_disable_mask_per_asic =
-               cayman_get_disable_mask_per_asic(rdev, tmp, CAYMAN_MAX_BACKENDS_PER_SE_MASK,
-                                                rdev->config.cayman.num_shader_engines);
-       rdev->config.cayman.backend_map =
-               cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
-                                                   rdev->config.cayman.num_backends_per_se *
-                                                   rdev->config.cayman.num_shader_engines,
-                                                   &rdev->config.cayman.backend_disable_mask_per_asic,
-                                                   rdev->config.cayman.num_shader_engines);
-       tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT;
-       rdev->config.cayman.num_texture_channel_caches = r600_count_pipe_bits(tmp);
-       tmp = (mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT;
-       rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
-       if (rdev->config.cayman.mem_max_burst_length_bytes > 512)
-               rdev->config.cayman.mem_max_burst_length_bytes = 512;
        tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
        rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
        if (rdev->config.cayman.mem_row_size_in_kb > 4)
@@ -748,73 +519,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
        rdev->config.cayman.num_gpus = 1;
        rdev->config.cayman.multi_gpu_tile_size = 64;
 
-       //gb_addr_config = 0x02011003
-#if 0
-       gb_addr_config = RREG32(GB_ADDR_CONFIG);
-#else
-       gb_addr_config = 0;
-       switch (rdev->config.cayman.num_tile_pipes) {
-       case 1:
-       default:
-               gb_addr_config |= NUM_PIPES(0);
-               break;
-       case 2:
-               gb_addr_config |= NUM_PIPES(1);
-               break;
-       case 4:
-               gb_addr_config |= NUM_PIPES(2);
-               break;
-       case 8:
-               gb_addr_config |= NUM_PIPES(3);
-               break;
-       }
-
-       tmp = (rdev->config.cayman.mem_max_burst_length_bytes / 256) - 1;
-       gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp);
-       gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.cayman.num_shader_engines - 1);
-       tmp = (rdev->config.cayman.shader_engine_tile_size / 16) - 1;
-       gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp);
-       switch (rdev->config.cayman.num_gpus) {
-       case 1:
-       default:
-               gb_addr_config |= NUM_GPUS(0);
-               break;
-       case 2:
-               gb_addr_config |= NUM_GPUS(1);
-               break;
-       case 4:
-               gb_addr_config |= NUM_GPUS(2);
-               break;
-       }
-       switch (rdev->config.cayman.multi_gpu_tile_size) {
-       case 16:
-               gb_addr_config |= MULTI_GPU_TILE_SIZE(0);
-               break;
-       case 32:
-       default:
-               gb_addr_config |= MULTI_GPU_TILE_SIZE(1);
-               break;
-       case 64:
-               gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
-               break;
-       case 128:
-               gb_addr_config |= MULTI_GPU_TILE_SIZE(3);
-               break;
-       }
-       switch (rdev->config.cayman.mem_row_size_in_kb) {
-       case 1:
-       default:
-               gb_addr_config |= ROW_SIZE(0);
-               break;
-       case 2:
-               gb_addr_config |= ROW_SIZE(1);
-               break;
-       case 4:
-               gb_addr_config |= ROW_SIZE(2);
-               break;
-       }
-#endif
-
        tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
        rdev->config.cayman.num_tile_pipes = (1 << tmp);
        tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
@@ -828,17 +532,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
        tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
        rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
 
-       //gb_backend_map = 0x76541032;
-#if 0
-       gb_backend_map = RREG32(GB_BACKEND_MAP);
-#else
-       gb_backend_map =
-               cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
-                                                   rdev->config.cayman.num_backends_per_se *
-                                                   rdev->config.cayman.num_shader_engines,
-                                                   &rdev->config.cayman.backend_disable_mask_per_asic,
-                                                   rdev->config.cayman.num_shader_engines);
-#endif
+
        /* setup tiling info dword.  gb_addr_config is not adequate since it does
         * not have bank info, so create a custom tiling dword.
         * bits 3:0   num_pipes
@@ -865,34 +559,50 @@ static void cayman_gpu_init(struct radeon_device *rdev)
 
        /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
        if (rdev->flags & RADEON_IS_IGP)
-               rdev->config.evergreen.tile_config |= 1 << 4;
-       else
-               rdev->config.cayman.tile_config |=
-                       ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
+               rdev->config.cayman.tile_config |= 1 << 4;
+       else {
+               if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+                       rdev->config.cayman.tile_config |= 1 << 4;
+               else
+                       rdev->config.cayman.tile_config |= 0 << 4;
+       }
        rdev->config.cayman.tile_config |=
                ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
        rdev->config.cayman.tile_config |=
                ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
 
-       rdev->config.cayman.backend_map = gb_backend_map;
-       WREG32(GB_BACKEND_MAP, gb_backend_map);
+       tmp = 0;
+       for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) {
+               u32 rb_disable_bitmap;
+
+               WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+               WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+               rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
+               tmp <<= 4;
+               tmp |= rb_disable_bitmap;
+       }
+       /* enabled rb are just the one not disabled :) */
+       disabled_rb_mask = tmp;
+
+       WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+       WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+
        WREG32(GB_ADDR_CONFIG, gb_addr_config);
        WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
        WREG32(HDP_ADDR_CONFIG, gb_addr_config);
 
-       /* primary versions */
-       WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-       WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-       WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+       tmp = gb_addr_config & NUM_PIPES_MASK;
+       tmp = r6xx_remap_render_backend(rdev, tmp,
+                                       rdev->config.cayman.max_backends_per_se *
+                                       rdev->config.cayman.max_shader_engines,
+                                       CAYMAN_MAX_BACKENDS, disabled_rb_mask);
+       WREG32(GB_BACKEND_MAP, tmp);
 
+       cgts_tcc_disable = 0xffff0000;
+       for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
+               cgts_tcc_disable &= ~(1 << (16 + i));
        WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
        WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
-
-       /* user versions */
-       WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-       WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-       WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
-
        WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
        WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
 
index 2aa7046ada56af451726cccbb78a59b1a7bfaaa0..a0b98066e20796a180f429fce4ef2283e285667e 100644 (file)
@@ -41,6 +41,9 @@
 #define CAYMAN_MAX_TCC               16
 #define CAYMAN_MAX_TCC_MASK          0xFF
 
+#define CAYMAN_GB_ADDR_CONFIG_GOLDEN       0x02011003
+#define ARUBA_GB_ADDR_CONFIG_GOLDEN        0x12010001
+
 #define DMIF_ADDR_CONFIG                               0xBD4
 #define        SRBM_GFX_CNTL                                   0x0E44
 #define                RINGID(x)                                       (((x) & 0x3) << 0)
 #define        CGTS_SYS_TCC_DISABLE                            0x3F90
 #define        CGTS_USER_SYS_TCC_DISABLE                       0x3F94
 
+#define RLC_GFX_INDEX                                  0x3FC4
+
 #define        CONFIG_MEMSIZE                                  0x5428
 
 #define HDP_MEM_COHERENCY_FLUSH_CNTL                   0x5480
 #define                SOFT_RESET_VGT                                  (1 << 14)
 #define                SOFT_RESET_IA                                   (1 << 15)
 
+#define GRBM_GFX_INDEX                                 0x802C
+#define                INSTANCE_INDEX(x)                       ((x) << 0)
+#define                SE_INDEX(x)                             ((x) << 16)
+#define                INSTANCE_BROADCAST_WRITES               (1 << 30)
+#define                SE_BROADCAST_WRITES                     (1 << 31)
+
 #define        SCRATCH_REG0                                    0x8500
 #define        SCRATCH_REG1                                    0x8504
 #define        SCRATCH_REG2                                    0x8508
index f388a1d73b635f6e385e9defbc34d0b022538c4d..45cfcea635076f17f35619d7e916aea10546bffb 100644 (file)
@@ -1376,113 +1376,51 @@ int r600_asic_reset(struct radeon_device *rdev)
        return r600_gpu_soft_reset(rdev);
 }
 
-static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
-                                            u32 num_backends,
-                                            u32 backend_disable_mask)
-{
-       u32 backend_map = 0;
-       u32 enabled_backends_mask;
-       u32 enabled_backends_count;
-       u32 cur_pipe;
-       u32 swizzle_pipe[R6XX_MAX_PIPES];
-       u32 cur_backend;
-       u32 i;
-
-       if (num_tile_pipes > R6XX_MAX_PIPES)
-               num_tile_pipes = R6XX_MAX_PIPES;
-       if (num_tile_pipes < 1)
-               num_tile_pipes = 1;
-       if (num_backends > R6XX_MAX_BACKENDS)
-               num_backends = R6XX_MAX_BACKENDS;
-       if (num_backends < 1)
-               num_backends = 1;
-
-       enabled_backends_mask = 0;
-       enabled_backends_count = 0;
-       for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
-               if (((backend_disable_mask >> i) & 1) == 0) {
-                       enabled_backends_mask |= (1 << i);
-                       ++enabled_backends_count;
-               }
-               if (enabled_backends_count == num_backends)
-                       break;
-       }
-
-       if (enabled_backends_count == 0) {
-               enabled_backends_mask = 1;
-               enabled_backends_count = 1;
-       }
-
-       if (enabled_backends_count != num_backends)
-               num_backends = enabled_backends_count;
-
-       memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
-       switch (num_tile_pipes) {
-       case 1:
-               swizzle_pipe[0] = 0;
-               break;
-       case 2:
-               swizzle_pipe[0] = 0;
-               swizzle_pipe[1] = 1;
-               break;
-       case 3:
-               swizzle_pipe[0] = 0;
-               swizzle_pipe[1] = 1;
-               swizzle_pipe[2] = 2;
-               break;
-       case 4:
-               swizzle_pipe[0] = 0;
-               swizzle_pipe[1] = 1;
-               swizzle_pipe[2] = 2;
-               swizzle_pipe[3] = 3;
-               break;
-       case 5:
-               swizzle_pipe[0] = 0;
-               swizzle_pipe[1] = 1;
-               swizzle_pipe[2] = 2;
-               swizzle_pipe[3] = 3;
-               swizzle_pipe[4] = 4;
-               break;
-       case 6:
-               swizzle_pipe[0] = 0;
-               swizzle_pipe[1] = 2;
-               swizzle_pipe[2] = 4;
-               swizzle_pipe[3] = 5;
-               swizzle_pipe[4] = 1;
-               swizzle_pipe[5] = 3;
-               break;
-       case 7:
-               swizzle_pipe[0] = 0;
-               swizzle_pipe[1] = 2;
-               swizzle_pipe[2] = 4;
-               swizzle_pipe[3] = 6;
-               swizzle_pipe[4] = 1;
-               swizzle_pipe[5] = 3;
-               swizzle_pipe[6] = 5;
-               break;
-       case 8:
-               swizzle_pipe[0] = 0;
-               swizzle_pipe[1] = 2;
-               swizzle_pipe[2] = 4;
-               swizzle_pipe[3] = 6;
-               swizzle_pipe[4] = 1;
-               swizzle_pipe[5] = 3;
-               swizzle_pipe[6] = 5;
-               swizzle_pipe[7] = 7;
-               break;
+u32 r6xx_remap_render_backend(struct radeon_device *rdev,
+                             u32 tiling_pipe_num,
+                             u32 max_rb_num,
+                             u32 total_max_rb_num,
+                             u32 disabled_rb_mask)
+{
+       u32 rendering_pipe_num, rb_num_width, req_rb_num;
+       u32 pipe_rb_ratio, pipe_rb_remain;
+       u32 data = 0, mask = 1 << (max_rb_num - 1);
+       unsigned i, j;
+
+       /* mask out the RBs that don't exist on that asic */
+       disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
+
+       rendering_pipe_num = 1 << tiling_pipe_num;
+       req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
+       BUG_ON(rendering_pipe_num < req_rb_num);
+
+       pipe_rb_ratio = rendering_pipe_num / req_rb_num;
+       pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
+
+       if (rdev->family <= CHIP_RV740) {
+               /* r6xx/r7xx */
+               rb_num_width = 2;
+       } else {
+               /* eg+ */
+               rb_num_width = 4;
        }
 
-       cur_backend = 0;
-       for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
-               while (((1 << cur_backend) & enabled_backends_mask) == 0)
-                       cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
-
-               backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
-
-               cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
+       for (i = 0; i < max_rb_num; i++) {
+               if (!(mask & disabled_rb_mask)) {
+                       for (j = 0; j < pipe_rb_ratio; j++) {
+                               data <<= rb_num_width;
+                               data |= max_rb_num - i - 1;
+                       }
+                       if (pipe_rb_remain) {
+                               data <<= rb_num_width;
+                               data |= max_rb_num - i - 1;
+                               pipe_rb_remain--;
+                       }
+               }
+               mask >>= 1;
        }
 
-       return backend_map;
+       return data;
 }
 
 int r600_count_pipe_bits(uint32_t val)
@@ -1500,7 +1438,6 @@ void r600_gpu_init(struct radeon_device *rdev)
 {
        u32 tiling_config;
        u32 ramcfg;
-       u32 backend_map;
        u32 cc_rb_backend_disable;
        u32 cc_gc_shader_pipe_config;
        u32 tmp;
@@ -1511,8 +1448,9 @@ void r600_gpu_init(struct radeon_device *rdev)
        u32 sq_thread_resource_mgmt = 0;
        u32 sq_stack_resource_mgmt_1 = 0;
        u32 sq_stack_resource_mgmt_2 = 0;
+       u32 disabled_rb_mask;
 
-       /* FIXME: implement */
+       rdev->config.r600.tiling_group_size = 256;
        switch (rdev->family) {
        case CHIP_R600:
                rdev->config.r600.max_pipes = 4;
@@ -1616,10 +1554,7 @@ void r600_gpu_init(struct radeon_device *rdev)
        rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
        tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
        tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
-       if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
-               rdev->config.r600.tiling_group_size = 512;
-       else
-               rdev->config.r600.tiling_group_size = 256;
+
        tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
        if (tmp > 3) {
                tiling_config |= ROW_TILING(3);
@@ -1631,32 +1566,36 @@ void r600_gpu_init(struct radeon_device *rdev)
        tiling_config |= BANK_SWAPS(1);
 
        cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
-       cc_rb_backend_disable |=
-               BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
-
-       cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
-       cc_gc_shader_pipe_config |=
-               INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
-       cc_gc_shader_pipe_config |=
-               INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
-
-       backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
-                                                       (R6XX_MAX_BACKENDS -
-                                                        r600_count_pipe_bits((cc_rb_backend_disable &
-                                                                              R6XX_MAX_BACKENDS_MASK) >> 16)),
-                                                       (cc_rb_backend_disable >> 16));
+       tmp = R6XX_MAX_BACKENDS -
+               r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
+       if (tmp < rdev->config.r600.max_backends) {
+               rdev->config.r600.max_backends = tmp;
+       }
+
+       cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
+       tmp = R6XX_MAX_PIPES -
+               r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
+       if (tmp < rdev->config.r600.max_pipes) {
+               rdev->config.r600.max_pipes = tmp;
+       }
+       tmp = R6XX_MAX_SIMDS -
+               r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
+       if (tmp < rdev->config.r600.max_simds) {
+               rdev->config.r600.max_simds = tmp;
+       }
+
+       disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
+       tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
+       tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
+                                       R6XX_MAX_BACKENDS, disabled_rb_mask);
+       tiling_config |= tmp << 16;
+       rdev->config.r600.backend_map = tmp;
+
        rdev->config.r600.tile_config = tiling_config;
-       rdev->config.r600.backend_map = backend_map;
-       tiling_config |= BACKEND_MAP(backend_map);
        WREG32(GB_TILING_CONFIG, tiling_config);
        WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
        WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
 
-       /* Setup pipes */
-       WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-       WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
-       WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
-
        tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
        WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
        WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
index 15bd3b216243c2207777d4ab20cb2e0fe091a232..a0dbf1fe6a40815b335a06d9a7c14202aad625bf 100644 (file)
 #define                BACKEND_MAP(x)                                  ((x) << 16)
 
 #define GB_TILING_CONFIG                               0x98F0
+#define     PIPE_TILING__SHIFT              1
+#define     PIPE_TILING__MASK               0x0000000e
 
 #define        GC_USER_SHADER_PIPE_CONFIG                      0x8954
 #define                INACTIVE_QD_PIPES(x)                            ((x) << 8)
index 1dc3a4aba0205f5afccaea25d71a785868e3506b..85dac33e3cce3c0eeca90ae3d57d066b4c771a24 100644 (file)
@@ -346,6 +346,9 @@ struct radeon_bo {
        /* Constant after initialization */
        struct radeon_device            *rdev;
        struct drm_gem_object           gem_base;
+
+       struct ttm_bo_kmap_obj dma_buf_vmap;
+       int vmapping_count;
 };
 #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
 
@@ -848,7 +851,6 @@ struct radeon_cs_parser {
        s32                     priority;
 };
 
-extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
 extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
 extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
 
@@ -1846,6 +1848,11 @@ extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
 extern void r600_hdmi_enable(struct drm_encoder *encoder);
 extern void r600_hdmi_disable(struct drm_encoder *encoder);
 extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
+extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
+                                    u32 tiling_pipe_num,
+                                    u32 max_rb_num,
+                                    u32 total_max_rb_num,
+                                    u32 enabled_rb_mask);
 
 /*
  * evergreen functions used by radeon_encoder.c
index f6e69b8c06c6110e3aecf80b41eb3b1ca6eb6968..b1e3820df36397e9c2ad7a7798be5412c0853df2 100644 (file)
@@ -444,7 +444,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
         */
        if ((dev->pdev->device == 0x9498) &&
            (dev->pdev->subsystem_vendor == 0x1682) &&
-           (dev->pdev->subsystem_device == 0x2452)) {
+           (dev->pdev->subsystem_device == 0x2452) &&
+           (i2c_bus->valid == false) &&
+           !(supported_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))) {
                struct radeon_device *rdev = dev->dev_private;
                *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
        }
index c7d64a7390339e7b6fe29e27de06265843a865ff..142f89462aa4ddab99030f7b285a8f6b2e883925 100644 (file)
@@ -147,6 +147,7 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
                                           sync_to_ring, p->ring);
 }
 
+/* XXX: note that this is called from the legacy UMS CS ioctl as well */
 int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
 {
        struct drm_radeon_cs *cs = data;
@@ -245,22 +246,24 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
                }
        }
 
-       if ((p->cs_flags & RADEON_CS_USE_VM) &&
-           !p->rdev->vm_manager.enabled) {
-               DRM_ERROR("VM not active on asic!\n");
-               return -EINVAL;
-       }
-
-       /* we only support VM on SI+ */
-       if ((p->rdev->family >= CHIP_TAHITI) &&
-           ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
-               DRM_ERROR("VM required on SI+!\n");
-               return -EINVAL;
-       }
+       /* these are KMS only */
+       if (p->rdev) {
+               if ((p->cs_flags & RADEON_CS_USE_VM) &&
+                   !p->rdev->vm_manager.enabled) {
+                       DRM_ERROR("VM not active on asic!\n");
+                       return -EINVAL;
+               }
 
-       if (radeon_cs_get_ring(p, ring, priority))
-               return -EINVAL;
+               /* we only support VM on SI+ */
+               if ((p->rdev->family >= CHIP_TAHITI) &&
+                   ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
+                       DRM_ERROR("VM required on SI+!\n");
+                       return -EINVAL;
+               }
 
+               if (radeon_cs_get_ring(p, ring, priority))
+                       return -EINVAL;
+       }
 
        /* deal with non-vm */
        if ((p->chunk_ib_idx != -1) &&
@@ -580,7 +583,7 @@ int radeon_cs_finish_pages(struct radeon_cs_parser *p)
        return 0;
 }
 
-int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
+static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
 {
        int new_page;
        struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
@@ -623,3 +626,28 @@ int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
 
        return new_page;
 }
+
+u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
+{
+       struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+       u32 pg_idx, pg_offset;
+       u32 idx_value = 0;
+       int new_page;
+
+       pg_idx = (idx * 4) / PAGE_SIZE;
+       pg_offset = (idx * 4) % PAGE_SIZE;
+
+       if (ibc->kpage_idx[0] == pg_idx)
+               return ibc->kpage[0][pg_offset/4];
+       if (ibc->kpage_idx[1] == pg_idx)
+               return ibc->kpage[1][pg_offset/4];
+
+       new_page = radeon_cs_update_pages(p, pg_idx);
+       if (new_page < 0) {
+               p->parser_error = new_page;
+               return 0;
+       }
+
+       idx_value = ibc->kpage[new_page][pg_offset/4];
+       return idx_value;
+}
index b8f835d8ecb4127a2c4b8771e1b74859f0561c5d..8ddab4c76710f1b00e3917e0415a6aad91965f9a 100644 (file)
@@ -85,6 +85,47 @@ static void radeon_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, v
 
 }
 
+static int radeon_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+       return -EINVAL;
+}
+
+static void *radeon_gem_prime_vmap(struct dma_buf *dma_buf)
+{
+       struct radeon_bo *bo = dma_buf->priv;
+       struct drm_device *dev = bo->rdev->ddev;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       if (bo->vmapping_count) {
+               bo->vmapping_count++;
+               goto out_unlock;
+       }
+
+       ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
+                         &bo->dma_buf_vmap);
+       if (ret) {
+               mutex_unlock(&dev->struct_mutex);
+               return ERR_PTR(ret);
+       }
+       bo->vmapping_count = 1;
+out_unlock:
+       mutex_unlock(&dev->struct_mutex);
+       return bo->dma_buf_vmap.virtual;
+}
+
+static void radeon_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+       struct radeon_bo *bo = dma_buf->priv;
+       struct drm_device *dev = bo->rdev->ddev;
+
+       mutex_lock(&dev->struct_mutex);
+       bo->vmapping_count--;
+       if (bo->vmapping_count == 0) {
+               ttm_bo_kunmap(&bo->dma_buf_vmap);
+       }
+       mutex_unlock(&dev->struct_mutex);
+}
 const static struct dma_buf_ops radeon_dmabuf_ops =  {
        .map_dma_buf = radeon_gem_map_dma_buf,
        .unmap_dma_buf = radeon_gem_unmap_dma_buf,
@@ -93,6 +134,9 @@ const static struct dma_buf_ops radeon_dmabuf_ops =  {
        .kmap_atomic = radeon_gem_kmap_atomic,
        .kunmap = radeon_gem_kunmap,
        .kunmap_atomic = radeon_gem_kunmap_atomic,
+       .mmap = radeon_gem_prime_mmap,
+       .vmap = radeon_gem_prime_vmap,
+       .vunmap = radeon_gem_prime_vunmap,
 };
 
 static int radeon_prime_create(struct drm_device *dev,
index 493a7be753065afb8b2aba0d080efd75927c4569..983658c91358939e0123a4545519d2d6c3b24f05 100644 (file)
  */
 int radeon_debugfs_sa_init(struct radeon_device *rdev);
 
-u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
-{
-       struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
-       u32 pg_idx, pg_offset;
-       u32 idx_value = 0;
-       int new_page;
-
-       pg_idx = (idx * 4) / PAGE_SIZE;
-       pg_offset = (idx * 4) % PAGE_SIZE;
-
-       if (ibc->kpage_idx[0] == pg_idx)
-               return ibc->kpage[0][pg_offset/4];
-       if (ibc->kpage_idx[1] == pg_idx)
-               return ibc->kpage[1][pg_offset/4];
-
-       new_page = radeon_cs_update_pages(p, pg_idx);
-       if (new_page < 0) {
-               p->parser_error = new_page;
-               return 0;
-       }
-
-       idx_value = ibc->kpage[new_page][pg_offset/4];
-       return idx_value;
-}
-
 int radeon_ib_get(struct radeon_device *rdev, int ring,
                  struct radeon_ib *ib, unsigned size)
 {
index c2f473bc13b85bf189b44d823d0ef193b94fd3c2..04ddc365a908c8110537b80dc998a95ee337d0e1 100644 (file)
@@ -151,6 +151,8 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
        WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
        WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
        WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+       if (rdev->family == CHIP_RV740)
+               WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
        WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
        WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
        WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
@@ -363,180 +365,6 @@ void r700_cp_fini(struct radeon_device *rdev)
 /*
  * Core functions
  */
-static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
-                                            u32 num_tile_pipes,
-                                            u32 num_backends,
-                                            u32 backend_disable_mask)
-{
-       u32 backend_map = 0;
-       u32 enabled_backends_mask;
-       u32 enabled_backends_count;
-       u32 cur_pipe;
-       u32 swizzle_pipe[R7XX_MAX_PIPES];
-       u32 cur_backend;
-       u32 i;
-       bool force_no_swizzle;
-
-       if (num_tile_pipes > R7XX_MAX_PIPES)
-               num_tile_pipes = R7XX_MAX_PIPES;
-       if (num_tile_pipes < 1)
-               num_tile_pipes = 1;
-       if (num_backends > R7XX_MAX_BACKENDS)
-               num_backends = R7XX_MAX_BACKENDS;
-       if (num_backends < 1)
-               num_backends = 1;
-
-       enabled_backends_mask = 0;
-       enabled_backends_count = 0;
-       for (i = 0; i < R7XX_MAX_BACKENDS; ++i) {
-               if (((backend_disable_mask >> i) & 1) == 0) {
-                       enabled_backends_mask |= (1 << i);
-                       ++enabled_backends_count;
-               }
-               if (enabled_backends_count == num_backends)
-                       break;
-       }
-
-       if (enabled_backends_count == 0) {
-               enabled_backends_mask = 1;
-               enabled_backends_count = 1;
-       }
-
-       if (enabled_backends_count != num_backends)
-               num_backends = enabled_backends_count;
-
-       switch (rdev->family) {
-       case CHIP_RV770:
-       case CHIP_RV730:
-               force_no_swizzle = false;
-               break;
-       case CHIP_RV710:
-       case CHIP_RV740:
-       default:
-               force_no_swizzle = true;
-               break;
-       }
-
-       memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
-       switch (num_tile_pipes) {
-       case 1:
-               swizzle_pipe[0] = 0;
-               break;
-       case 2:
-               swizzle_pipe[0] = 0;
-               swizzle_pipe[1] = 1;
-               break;
-       case 3:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 1;
-               }
-               break;
-       case 4:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 3;
-                       swizzle_pipe[3] = 1;
-               }
-               break;
-       case 5:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-                       swizzle_pipe[4] = 4;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 4;
-                       swizzle_pipe[3] = 1;
-                       swizzle_pipe[4] = 3;
-               }
-               break;
-       case 6:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-                       swizzle_pipe[4] = 4;
-                       swizzle_pipe[5] = 5;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 4;
-                       swizzle_pipe[3] = 5;
-                       swizzle_pipe[4] = 3;
-                       swizzle_pipe[5] = 1;
-               }
-               break;
-       case 7:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-                       swizzle_pipe[4] = 4;
-                       swizzle_pipe[5] = 5;
-                       swizzle_pipe[6] = 6;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 4;
-                       swizzle_pipe[3] = 6;
-                       swizzle_pipe[4] = 3;
-                       swizzle_pipe[5] = 1;
-                       swizzle_pipe[6] = 5;
-               }
-               break;
-       case 8:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-                       swizzle_pipe[4] = 4;
-                       swizzle_pipe[5] = 5;
-                       swizzle_pipe[6] = 6;
-                       swizzle_pipe[7] = 7;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 4;
-                       swizzle_pipe[3] = 6;
-                       swizzle_pipe[4] = 3;
-                       swizzle_pipe[5] = 1;
-                       swizzle_pipe[6] = 7;
-                       swizzle_pipe[7] = 5;
-               }
-               break;
-       }
-
-       cur_backend = 0;
-       for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
-               while (((1 << cur_backend) & enabled_backends_mask) == 0)
-                       cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
-
-               backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
-
-               cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
-       }
-
-       return backend_map;
-}
-
 static void rv770_gpu_init(struct radeon_device *rdev)
 {
        int i, j, num_qd_pipes;
@@ -552,14 +380,17 @@ static void rv770_gpu_init(struct radeon_device *rdev)
        u32 sq_thread_resource_mgmt;
        u32 hdp_host_path_cntl;
        u32 sq_dyn_gpr_size_simd_ab_0;
-       u32 backend_map;
        u32 gb_tiling_config = 0;
        u32 cc_rb_backend_disable = 0;
        u32 cc_gc_shader_pipe_config = 0;
        u32 mc_arb_ramcfg;
-       u32 db_debug4;
+       u32 db_debug4, tmp;
+       u32 inactive_pipes, shader_pipe_config;
+       u32 disabled_rb_mask;
+       unsigned active_number;
 
        /* setup chip specs */
+       rdev->config.rv770.tiling_group_size = 256;
        switch (rdev->family) {
        case CHIP_RV770:
                rdev->config.rv770.max_pipes = 4;
@@ -670,33 +501,70 @@ static void rv770_gpu_init(struct radeon_device *rdev)
        /* setup tiling, simd, pipe config */
        mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
 
+       shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
+       inactive_pipes = (shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
+       for (i = 0, tmp = 1, active_number = 0; i < R7XX_MAX_PIPES; i++) {
+               if (!(inactive_pipes & tmp)) {
+                       active_number++;
+               }
+               tmp <<= 1;
+       }
+       if (active_number == 1) {
+               WREG32(SPI_CONFIG_CNTL, DISABLE_INTERP_1);
+       } else {
+               WREG32(SPI_CONFIG_CNTL, 0);
+       }
+
+       cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+       tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16);
+       if (tmp < rdev->config.rv770.max_backends) {
+               rdev->config.rv770.max_backends = tmp;
+       }
+
+       cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+       tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK);
+       if (tmp < rdev->config.rv770.max_pipes) {
+               rdev->config.rv770.max_pipes = tmp;
+       }
+       tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
+       if (tmp < rdev->config.rv770.max_simds) {
+               rdev->config.rv770.max_simds = tmp;
+       }
+
        switch (rdev->config.rv770.max_tile_pipes) {
        case 1:
        default:
-               gb_tiling_config |= PIPE_TILING(0);
+               gb_tiling_config = PIPE_TILING(0);
                break;
        case 2:
-               gb_tiling_config |= PIPE_TILING(1);
+               gb_tiling_config = PIPE_TILING(1);
                break;
        case 4:
-               gb_tiling_config |= PIPE_TILING(2);
+               gb_tiling_config = PIPE_TILING(2);
                break;
        case 8:
-               gb_tiling_config |= PIPE_TILING(3);
+               gb_tiling_config = PIPE_TILING(3);
                break;
        }
        rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
 
+       disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK;
+       tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
+       tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends,
+                                       R7XX_MAX_BACKENDS, disabled_rb_mask);
+       gb_tiling_config |= tmp << 16;
+       rdev->config.rv770.backend_map = tmp;
+
        if (rdev->family == CHIP_RV770)
                gb_tiling_config |= BANK_TILING(1);
-       else
-               gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
+       else {
+               if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+                       gb_tiling_config |= BANK_TILING(1);
+               else
+                       gb_tiling_config |= BANK_TILING(0);
+       }
        rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
        gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
-       if ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
-               rdev->config.rv770.tiling_group_size = 512;
-       else
-               rdev->config.rv770.tiling_group_size = 256;
        if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
                gb_tiling_config |= ROW_TILING(3);
                gb_tiling_config |= SAMPLE_SPLIT(3);
@@ -708,47 +576,19 @@ static void rv770_gpu_init(struct radeon_device *rdev)
        }
 
        gb_tiling_config |= BANK_SWAPS(1);
-
-       cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
-       cc_rb_backend_disable |=
-               BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
-
-       cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
-       cc_gc_shader_pipe_config |=
-               INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK);
-       cc_gc_shader_pipe_config |=
-               INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK);
-
-       if (rdev->family == CHIP_RV740)
-               backend_map = 0x28;
-       else
-               backend_map = r700_get_tile_pipe_to_backend_map(rdev,
-                                                               rdev->config.rv770.max_tile_pipes,
-                                                               (R7XX_MAX_BACKENDS -
-                                                                r600_count_pipe_bits((cc_rb_backend_disable &
-                                                                                      R7XX_MAX_BACKENDS_MASK) >> 16)),
-                                                               (cc_rb_backend_disable >> 16));
-
        rdev->config.rv770.tile_config = gb_tiling_config;
-       rdev->config.rv770.backend_map = backend_map;
-       gb_tiling_config |= BACKEND_MAP(backend_map);
 
        WREG32(GB_TILING_CONFIG, gb_tiling_config);
        WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
        WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
 
-       WREG32(CC_RB_BACKEND_DISABLE,      cc_rb_backend_disable);
-       WREG32(CC_GC_SHADER_PIPE_CONFIG,   cc_gc_shader_pipe_config);
-       WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
-       WREG32(CC_SYS_RB_BACKEND_DISABLE,  cc_rb_backend_disable);
-
        WREG32(CGTS_SYS_TCC_DISABLE, 0);
        WREG32(CGTS_TCC_DISABLE, 0);
        WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
        WREG32(CGTS_USER_TCC_DISABLE, 0);
 
-       num_qd_pipes =
-               R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
+
+       num_qd_pipes = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
        WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
        WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
 
@@ -809,8 +649,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
 
        WREG32(VGT_NUM_INSTANCES, 1);
 
-       WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
-
        WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
 
        WREG32(CP_PERFMON_CNTL, 0);
index 9c549f702f2f35e995db4b88ffb31ad5b4d21e9b..fdc0898960119d8c6f38104c32e1960a7e0d16f9 100644 (file)
 #define                BACKEND_MAP(x)                                  ((x) << 16)
 
 #define GB_TILING_CONFIG                               0x98F0
+#define     PIPE_TILING__SHIFT              1
+#define     PIPE_TILING__MASK               0x0000000e
 
 #define        GC_USER_SHADER_PIPE_CONFIG                      0x8954
 #define                INACTIVE_QD_PIPES(x)                            ((x) << 8)
 #define                INACTIVE_QD_PIPES_MASK                          0x0000FF00
+#define                INACTIVE_QD_PIPES_SHIFT                     8
 #define                INACTIVE_SIMDS(x)                               ((x) << 16)
 #define                INACTIVE_SIMDS_MASK                             0x00FF0000
 
 #define        MC_VM_MD_L1_TLB0_CNTL                           0x2654
 #define        MC_VM_MD_L1_TLB1_CNTL                           0x2658
 #define        MC_VM_MD_L1_TLB2_CNTL                           0x265C
+#define        MC_VM_MD_L1_TLB3_CNTL                           0x2698
 #define        MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR              0x203C
 #define        MC_VM_SYSTEM_APERTURE_HIGH_ADDR                 0x2038
 #define        MC_VM_SYSTEM_APERTURE_LOW_ADDR                  0x2034
index 36792bd4da77598e69dad490b3cf4b4ac39828f1..b67cfcaa661f87bffbb84d3f0ed9786efb591e5a 100644 (file)
@@ -1834,6 +1834,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
                        spin_unlock(&glob->lru_lock);
                        (void) ttm_bo_cleanup_refs(bo, false, false, false);
                        kref_put(&bo->list_kref, ttm_bo_release_list);
+                       spin_lock(&glob->lru_lock);
                        continue;
                }
 
index a029ee39b0c526d0e0fe63150acd25ea992f73c0..ce9a61179925cd540f4d4956b991513ca53b27e2 100644 (file)
@@ -156,8 +156,17 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
        if (!fb->active_16)
                return 0;
 
-       if (!fb->obj->vmapping)
-               udl_gem_vmap(fb->obj);
+       if (!fb->obj->vmapping) {
+               ret = udl_gem_vmap(fb->obj);
+               if (ret == -ENOMEM) {
+                       DRM_ERROR("failed to vmap fb\n");
+                       return 0;
+               }
+               if (!fb->obj->vmapping) {
+                       DRM_ERROR("failed to vmapping\n");
+                       return 0;
+               }
+       }
 
        start_cycles = get_cycles();
 
index 40efd32f7dce85f0d45e8fce92cc64ea1e11990e..7bd65bdd15a8092e955d959c08fd6dd04781d490 100644 (file)
@@ -180,6 +180,18 @@ int udl_gem_vmap(struct udl_gem_object *obj)
        int page_count = obj->base.size / PAGE_SIZE;
        int ret;
 
+       if (obj->base.import_attach) {
+               ret = dma_buf_begin_cpu_access(obj->base.import_attach->dmabuf,
+                                              0, obj->base.size, DMA_BIDIRECTIONAL);
+               if (ret)
+                       return -EINVAL;
+
+               obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
+               if (!obj->vmapping)
+                       return -ENOMEM;
+               return 0;
+       }
+               
        ret = udl_gem_get_pages(obj, GFP_KERNEL);
        if (ret)
                return ret;
@@ -192,6 +204,13 @@ int udl_gem_vmap(struct udl_gem_object *obj)
 
 void udl_gem_vunmap(struct udl_gem_object *obj)
 {
+       if (obj->base.import_attach) {
+               dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
+               dma_buf_end_cpu_access(obj->base.import_attach->dmabuf, 0,
+                                      obj->base.size, DMA_BIDIRECTIONAL);
+               return;
+       }
+
        if (obj->vmapping)
                vunmap(obj->vmapping);
 
@@ -202,12 +221,12 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
 {
        struct udl_gem_object *obj = to_udl_bo(gem_obj);
 
-       if (gem_obj->import_attach)
-               drm_prime_gem_destroy(gem_obj, obj->sg);
-
        if (obj->vmapping)
                udl_gem_vunmap(obj);
 
+       if (gem_obj->import_attach)
+               drm_prime_gem_destroy(gem_obj, obj->sg);
+
        if (obj->pages)
                udl_gem_put_pages(obj);
 
@@ -234,7 +253,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
 
        ret = udl_gem_get_pages(gobj, GFP_KERNEL);
        if (ret)
-               return ret;
+               goto out;
        if (!gobj->base.map_list.map) {
                ret = drm_gem_create_mmap_offset(obj);
                if (ret)
@@ -257,8 +276,6 @@ static int udl_prime_create(struct drm_device *dev,
 {
        struct udl_gem_object *obj;
        int npages;
-       int i;
-       struct scatterlist *iter;
 
        npages = size / PAGE_SIZE;
 
index 51c9ba5cd2fbff85f4411ec6629f112ad8373a70..21ee7822656041c2e99c7f8f549038034f56c6ce 100644 (file)
@@ -66,7 +66,7 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
        cmd += sizeof(remap_cmd) / sizeof(uint32);
 
        for (i = 0; i < num_pages; ++i) {
-               if (VMW_PPN_SIZE > 4)
+               if (VMW_PPN_SIZE <= 4)
                        *cmd = page_to_pfn(*pages++);
                else
                        *((uint64_t *)cmd) = page_to_pfn(*pages++);
index 7cd9bf42108b7368803322d7116c3a8951d30a1b..6f1d167cb1ea9c2174c403a8a0f0b6b0f1d33bc7 100644 (file)
@@ -1036,8 +1036,9 @@ config SENSORS_SCH56XX_COMMON
 
 config SENSORS_SCH5627
        tristate "SMSC SCH5627"
-       depends on !PPC
+       depends on !PPC && WATCHDOG
        select SENSORS_SCH56XX_COMMON
+       select WATCHDOG_CORE
        help
          If you say yes here you get support for the hardware monitoring
          features of the SMSC SCH5627 Super-I/O chip including support for
@@ -1048,8 +1049,9 @@ config SENSORS_SCH5627
 
 config SENSORS_SCH5636
        tristate "SMSC SCH5636"
-       depends on !PPC
+       depends on !PPC && WATCHDOG
        select SENSORS_SCH56XX_COMMON
+       select WATCHDOG_CORE
        help
          SMSC SCH5636 Super I/O chips include an embedded microcontroller for
          hardware monitoring solutions, allowing motherboard manufacturers to
index 8ec6dfbccb640f8e3f89d22c701cc6151ffcebfe..8342275378b85759f57b9af583f34d281e58cd25 100644 (file)
@@ -579,7 +579,7 @@ static int __devinit sch5627_probe(struct platform_device *pdev)
        }
 
        /* Note failing to register the watchdog is not a fatal error */
-       data->watchdog = sch56xx_watchdog_register(data->addr,
+       data->watchdog = sch56xx_watchdog_register(&pdev->dev, data->addr,
                        (build_code << 24) | (build_id << 8) | hwmon_rev,
                        &data->update_lock, 1);
 
index 906d4ed32d81abd2b6c7d59f1fcd77c035fad8bc..96a7e68718cadb8348ea4d7680750ade1d6d143b 100644 (file)
@@ -510,7 +510,7 @@ static int __devinit sch5636_probe(struct platform_device *pdev)
        }
 
        /* Note failing to register the watchdog is not a fatal error */
-       data->watchdog = sch56xx_watchdog_register(data->addr,
+       data->watchdog = sch56xx_watchdog_register(&pdev->dev, data->addr,
                                        (revision[0] << 8) | revision[1],
                                        &data->update_lock, 0);
 
index ce52fc57d41d70cf8bb119e9e3a2d3756e294615..4380f5d07be2b8b5398d43e2e393307f099341e6 100644 (file)
@@ -66,15 +66,10 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
 
 struct sch56xx_watchdog_data {
        u16 addr;
-       u32 revision;
        struct mutex *io_lock;
-       struct mutex watchdog_lock;
-       struct list_head list; /* member of the watchdog_data_list */
        struct kref kref;
-       struct miscdevice watchdog_miscdev;
-       unsigned long watchdog_is_open;
-       char watchdog_name[10]; /* must be unique to avoid sysfs conflict */
-       char watchdog_expect_close;
+       struct watchdog_info wdinfo;
+       struct watchdog_device wddev;
        u8 watchdog_preset;
        u8 watchdog_control;
        u8 watchdog_output_enable;
@@ -82,15 +77,6 @@ struct sch56xx_watchdog_data {
 
 static struct platform_device *sch56xx_pdev;
 
-/*
- * Somewhat ugly :( global data pointer list with all sch56xx devices, so that
- * we can find our device data as when using misc_register there is no other
- * method to get to ones device data from the open fop.
- */
-static LIST_HEAD(watchdog_data_list);
-/* Note this lock not only protect list access, but also data.kref access */
-static DEFINE_MUTEX(watchdog_data_mutex);
-
 /* Super I/O functions */
 static inline int superio_inb(int base, int reg)
 {
@@ -272,22 +258,22 @@ EXPORT_SYMBOL(sch56xx_read_virtual_reg12);
  * Watchdog routines
  */
 
-/*
- * Release our data struct when the platform device has been released *and*
- * all references to our watchdog device are released.
- */
-static void sch56xx_watchdog_release_resources(struct kref *r)
+/* Release our data struct when we're unregistered *and*
+   all references to our watchdog device are released */
+static void watchdog_release_resources(struct kref *r)
 {
        struct sch56xx_watchdog_data *data =
                container_of(r, struct sch56xx_watchdog_data, kref);
        kfree(data);
 }
 
-static int watchdog_set_timeout(struct sch56xx_watchdog_data *data,
-                               int timeout)
+static int watchdog_set_timeout(struct watchdog_device *wddev,
+                               unsigned int timeout)
 {
-       int ret, resolution;
+       struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
+       unsigned int resolution;
        u8 control;
+       int ret;
 
        /* 1 second or 60 second resolution? */
        if (timeout <= 255)
@@ -298,12 +284,6 @@ static int watchdog_set_timeout(struct sch56xx_watchdog_data *data,
        if (timeout < resolution || timeout > (resolution * 255))
                return -EINVAL;
 
-       mutex_lock(&data->watchdog_lock);
-       if (!data->addr) {
-               ret = -ENODEV;
-               goto leave;
-       }
-
        if (resolution == 1)
                control = data->watchdog_control | SCH56XX_WDOG_TIME_BASE_SEC;
        else
@@ -316,7 +296,7 @@ static int watchdog_set_timeout(struct sch56xx_watchdog_data *data,
                                                control);
                mutex_unlock(data->io_lock);
                if (ret)
-                       goto leave;
+                       return ret;
 
                data->watchdog_control = control;
        }
@@ -326,38 +306,17 @@ static int watchdog_set_timeout(struct sch56xx_watchdog_data *data,
         * the watchdog countdown.
         */
        data->watchdog_preset = DIV_ROUND_UP(timeout, resolution);
+       wddev->timeout = data->watchdog_preset * resolution;
 
-       ret = data->watchdog_preset * resolution;
-leave:
-       mutex_unlock(&data->watchdog_lock);
-       return ret;
-}
-
-static int watchdog_get_timeout(struct sch56xx_watchdog_data *data)
-{
-       int timeout;
-
-       mutex_lock(&data->watchdog_lock);
-       if (data->watchdog_control & SCH56XX_WDOG_TIME_BASE_SEC)
-               timeout = data->watchdog_preset;
-       else
-               timeout = data->watchdog_preset * 60;
-       mutex_unlock(&data->watchdog_lock);
-
-       return timeout;
+       return 0;
 }
 
-static int watchdog_start(struct sch56xx_watchdog_data *data)
+static int watchdog_start(struct watchdog_device *wddev)
 {
+       struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
        int ret;
        u8 val;
 
-       mutex_lock(&data->watchdog_lock);
-       if (!data->addr) {
-               ret = -ENODEV;
-               goto leave_unlock_watchdog;
-       }
-
        /*
         * The sch56xx's watchdog cannot really be started / stopped
         * it is always running, but we can avoid the timer expiring
@@ -385,18 +344,14 @@ static int watchdog_start(struct sch56xx_watchdog_data *data)
        if (ret)
                goto leave;
 
-       /* 2. Enable output (if not already enabled) */
-       if (!(data->watchdog_output_enable & SCH56XX_WDOG_OUTPUT_ENABLE)) {
-               val = data->watchdog_output_enable |
-                     SCH56XX_WDOG_OUTPUT_ENABLE;
-               ret = sch56xx_write_virtual_reg(data->addr,
-                                               SCH56XX_REG_WDOG_OUTPUT_ENABLE,
-                                               val);
-               if (ret)
-                       goto leave;
+       /* 2. Enable output */
+       val = data->watchdog_output_enable | SCH56XX_WDOG_OUTPUT_ENABLE;
+       ret = sch56xx_write_virtual_reg(data->addr,
+                                       SCH56XX_REG_WDOG_OUTPUT_ENABLE, val);
+       if (ret)
+               goto leave;
 
-               data->watchdog_output_enable = val;
-       }
+       data->watchdog_output_enable = val;
 
        /* 3. Clear the watchdog event bit if set */
        val = inb(data->addr + 9);
@@ -405,234 +360,70 @@ static int watchdog_start(struct sch56xx_watchdog_data *data)
 
 leave:
        mutex_unlock(data->io_lock);
-leave_unlock_watchdog:
-       mutex_unlock(&data->watchdog_lock);
        return ret;
 }
 
-static int watchdog_trigger(struct sch56xx_watchdog_data *data)
+static int watchdog_trigger(struct watchdog_device *wddev)
 {
+       struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
        int ret;
 
-       mutex_lock(&data->watchdog_lock);
-       if (!data->addr) {
-               ret = -ENODEV;
-               goto leave;
-       }
-
        /* Reset the watchdog countdown counter */
        mutex_lock(data->io_lock);
        ret = sch56xx_write_virtual_reg(data->addr, SCH56XX_REG_WDOG_PRESET,
                                        data->watchdog_preset);
        mutex_unlock(data->io_lock);
-leave:
-       mutex_unlock(&data->watchdog_lock);
+
        return ret;
 }
 
-static int watchdog_stop_unlocked(struct sch56xx_watchdog_data *data)
+static int watchdog_stop(struct watchdog_device *wddev)
 {
+       struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
        int ret = 0;
        u8 val;
 
-       if (!data->addr)
-               return -ENODEV;
-
-       if (data->watchdog_output_enable & SCH56XX_WDOG_OUTPUT_ENABLE) {
-               val = data->watchdog_output_enable &
-                     ~SCH56XX_WDOG_OUTPUT_ENABLE;
-               mutex_lock(data->io_lock);
-               ret = sch56xx_write_virtual_reg(data->addr,
-                                               SCH56XX_REG_WDOG_OUTPUT_ENABLE,
-                                               val);
-               mutex_unlock(data->io_lock);
-               if (ret)
-                       return ret;
-
-               data->watchdog_output_enable = val;
-       }
-
-       return ret;
-}
-
-static int watchdog_stop(struct sch56xx_watchdog_data *data)
-{
-       int ret;
-
-       mutex_lock(&data->watchdog_lock);
-       ret = watchdog_stop_unlocked(data);
-       mutex_unlock(&data->watchdog_lock);
-
-       return ret;
-}
-
-static int watchdog_release(struct inode *inode, struct file *filp)
-{
-       struct sch56xx_watchdog_data *data = filp->private_data;
-
-       if (data->watchdog_expect_close) {
-               watchdog_stop(data);
-               data->watchdog_expect_close = 0;
-       } else {
-               watchdog_trigger(data);
-               pr_crit("unexpected close, not stopping watchdog!\n");
-       }
-
-       clear_bit(0, &data->watchdog_is_open);
-
-       mutex_lock(&watchdog_data_mutex);
-       kref_put(&data->kref, sch56xx_watchdog_release_resources);
-       mutex_unlock(&watchdog_data_mutex);
+       val = data->watchdog_output_enable & ~SCH56XX_WDOG_OUTPUT_ENABLE;
+       mutex_lock(data->io_lock);
+       ret = sch56xx_write_virtual_reg(data->addr,
+                                       SCH56XX_REG_WDOG_OUTPUT_ENABLE, val);
+       mutex_unlock(data->io_lock);
+       if (ret)
+               return ret;
 
+       data->watchdog_output_enable = val;
        return 0;
 }
 
-static int watchdog_open(struct inode *inode, struct file *filp)
+static void watchdog_ref(struct watchdog_device *wddev)
 {
-       struct sch56xx_watchdog_data *pos, *data = NULL;
-       int ret, watchdog_is_open;
-
-       /*
-        * We get called from drivers/char/misc.c with misc_mtx hold, and we
-        * call misc_register() from sch56xx_watchdog_probe() with
-        * watchdog_data_mutex hold, as misc_register() takes the misc_mtx
-        * lock, this is a possible deadlock, so we use mutex_trylock here.
-        */
-       if (!mutex_trylock(&watchdog_data_mutex))
-               return -ERESTARTSYS;
-       list_for_each_entry(pos, &watchdog_data_list, list) {
-               if (pos->watchdog_miscdev.minor == iminor(inode)) {
-                       data = pos;
-                       break;
-               }
-       }
-       /* Note we can never not have found data, so we don't check for this */
-       watchdog_is_open = test_and_set_bit(0, &data->watchdog_is_open);
-       if (!watchdog_is_open)
-               kref_get(&data->kref);
-       mutex_unlock(&watchdog_data_mutex);
-
-       if (watchdog_is_open)
-               return -EBUSY;
-
-       filp->private_data = data;
-
-       /* Start the watchdog */
-       ret = watchdog_start(data);
-       if (ret) {
-               watchdog_release(inode, filp);
-               return ret;
-       }
+       struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
 
-       return nonseekable_open(inode, filp);
+       kref_get(&data->kref);
 }
 
-static ssize_t watchdog_write(struct file *filp, const char __user *buf,
-       size_t count, loff_t *offset)
+static void watchdog_unref(struct watchdog_device *wddev)
 {
-       int ret;
-       struct sch56xx_watchdog_data *data = filp->private_data;
-
-       if (count) {
-               if (!nowayout) {
-                       size_t i;
-
-                       /* Clear it in case it was set with a previous write */
-                       data->watchdog_expect_close = 0;
-
-                       for (i = 0; i != count; i++) {
-                               char c;
-                               if (get_user(c, buf + i))
-                                       return -EFAULT;
-                               if (c == 'V')
-                                       data->watchdog_expect_close = 1;
-                       }
-               }
-               ret = watchdog_trigger(data);
-               if (ret)
-                       return ret;
-       }
-       return count;
-}
-
-static long watchdog_ioctl(struct file *filp, unsigned int cmd,
-                          unsigned long arg)
-{
-       struct watchdog_info ident = {
-               .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT,
-               .identity = "sch56xx watchdog"
-       };
-       int i, ret = 0;
-       struct sch56xx_watchdog_data *data = filp->private_data;
-
-       switch (cmd) {
-       case WDIOC_GETSUPPORT:
-               ident.firmware_version = data->revision;
-               if (!nowayout)
-                       ident.options |= WDIOF_MAGICCLOSE;
-               if (copy_to_user((void __user *)arg, &ident, sizeof(ident)))
-                       ret = -EFAULT;
-               break;
-
-       case WDIOC_GETSTATUS:
-       case WDIOC_GETBOOTSTATUS:
-               ret = put_user(0, (int __user *)arg);
-               break;
-
-       case WDIOC_KEEPALIVE:
-               ret = watchdog_trigger(data);
-               break;
+       struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
 
-       case WDIOC_GETTIMEOUT:
-               i = watchdog_get_timeout(data);
-               ret = put_user(i, (int __user *)arg);
-               break;
-
-       case WDIOC_SETTIMEOUT:
-               if (get_user(i, (int __user *)arg)) {
-                       ret = -EFAULT;
-                       break;
-               }
-               ret = watchdog_set_timeout(data, i);
-               if (ret >= 0)
-                       ret = put_user(ret, (int __user *)arg);
-               break;
-
-       case WDIOC_SETOPTIONS:
-               if (get_user(i, (int __user *)arg)) {
-                       ret = -EFAULT;
-                       break;
-               }
-
-               if (i & WDIOS_DISABLECARD)
-                       ret = watchdog_stop(data);
-               else if (i & WDIOS_ENABLECARD)
-                       ret = watchdog_trigger(data);
-               else
-                       ret = -EINVAL;
-               break;
-
-       default:
-               ret = -ENOTTY;
-       }
-       return ret;
+       kref_put(&data->kref, watchdog_release_resources);
 }
 
-static const struct file_operations watchdog_fops = {
-       .owner = THIS_MODULE,
-       .llseek = no_llseek,
-       .open = watchdog_open,
-       .release = watchdog_release,
-       .write = watchdog_write,
-       .unlocked_ioctl = watchdog_ioctl,
+static const struct watchdog_ops watchdog_ops = {
+       .owner          = THIS_MODULE,
+       .start          = watchdog_start,
+       .stop           = watchdog_stop,
+       .ping           = watchdog_trigger,
+       .set_timeout    = watchdog_set_timeout,
+       .ref            = watchdog_ref,
+       .unref          = watchdog_unref,
 };
 
-struct sch56xx_watchdog_data *sch56xx_watchdog_register(
+struct sch56xx_watchdog_data *sch56xx_watchdog_register(struct device *parent,
        u16 addr, u32 revision, struct mutex *io_lock, int check_enabled)
 {
        struct sch56xx_watchdog_data *data;
-       int i, err, control, output_enable;
-       const int watchdog_minors[] = { WATCHDOG_MINOR, 212, 213, 214, 215 };
+       int err, control, output_enable;
 
        /* Cache the watchdog registers */
        mutex_lock(io_lock);
@@ -656,82 +447,55 @@ struct sch56xx_watchdog_data *sch56xx_watchdog_register(
                return NULL;
 
        data->addr = addr;
-       data->revision = revision;
        data->io_lock = io_lock;
-       data->watchdog_control = control;
-       data->watchdog_output_enable = output_enable;
-       mutex_init(&data->watchdog_lock);
-       INIT_LIST_HEAD(&data->list);
        kref_init(&data->kref);
 
-       err = watchdog_set_timeout(data, 60);
-       if (err < 0)
-               goto error;
-
-       /*
-        * We take the data_mutex lock early so that watchdog_open() cannot
-        * run when misc_register() has completed, but we've not yet added
-        * our data to the watchdog_data_list.
-        */
-       mutex_lock(&watchdog_data_mutex);
-       for (i = 0; i < ARRAY_SIZE(watchdog_minors); i++) {
-               /* Register our watchdog part */
-               snprintf(data->watchdog_name, sizeof(data->watchdog_name),
-                       "watchdog%c", (i == 0) ? '\0' : ('0' + i));
-               data->watchdog_miscdev.name = data->watchdog_name;
-               data->watchdog_miscdev.fops = &watchdog_fops;
-               data->watchdog_miscdev.minor = watchdog_minors[i];
-               err = misc_register(&data->watchdog_miscdev);
-               if (err == -EBUSY)
-                       continue;
-               if (err)
-                       break;
+       strlcpy(data->wdinfo.identity, "sch56xx watchdog",
+               sizeof(data->wdinfo.identity));
+       data->wdinfo.firmware_version = revision;
+       data->wdinfo.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT;
+       if (!nowayout)
+               data->wdinfo.options |= WDIOF_MAGICCLOSE;
+
+       data->wddev.info = &data->wdinfo;
+       data->wddev.ops = &watchdog_ops;
+       data->wddev.parent = parent;
+       data->wddev.timeout = 60;
+       data->wddev.min_timeout = 1;
+       data->wddev.max_timeout = 255 * 60;
+       if (nowayout)
+               set_bit(WDOG_NO_WAY_OUT, &data->wddev.status);
+       if (output_enable & SCH56XX_WDOG_OUTPUT_ENABLE)
+               set_bit(WDOG_ACTIVE, &data->wddev.status);
+
+       /* Since the watchdog uses a downcounter there is no register to read
+          the BIOS set timeout from (if any was set at all) ->
+          Choose a preset which will give us a 1 minute timeout */
+       if (control & SCH56XX_WDOG_TIME_BASE_SEC)
+               data->watchdog_preset = 60; /* seconds */
+       else
+               data->watchdog_preset = 1; /* minute */
 
-               list_add(&data->list, &watchdog_data_list);
-               pr_info("Registered /dev/%s chardev major 10, minor: %d\n",
-                       data->watchdog_name, watchdog_minors[i]);
-               break;
-       }
-       mutex_unlock(&watchdog_data_mutex);
+       data->watchdog_control = control;
+       data->watchdog_output_enable = output_enable;
 
+       watchdog_set_drvdata(&data->wddev, data);
+       err = watchdog_register_device(&data->wddev);
        if (err) {
                pr_err("Registering watchdog chardev: %d\n", err);
-               goto error;
-       }
-       if (i == ARRAY_SIZE(watchdog_minors)) {
-               pr_warn("Couldn't register watchdog (no free minor)\n");
-               goto error;
+               kfree(data);
+               return NULL;
        }
 
        return data;
-
-error:
-       kfree(data);
-       return NULL;
 }
 EXPORT_SYMBOL(sch56xx_watchdog_register);
 
 void sch56xx_watchdog_unregister(struct sch56xx_watchdog_data *data)
 {
-       mutex_lock(&watchdog_data_mutex);
-       misc_deregister(&data->watchdog_miscdev);
-       list_del(&data->list);
-       mutex_unlock(&watchdog_data_mutex);
-
-       mutex_lock(&data->watchdog_lock);
-       if (data->watchdog_is_open) {
-               pr_warn("platform device unregistered with watchdog "
-                       "open! Stopping watchdog.\n");
-               watchdog_stop_unlocked(data);
-       }
-       /* Tell the wdog start/stop/trigger functions our dev is gone */
-       data->addr = 0;
-       data->io_lock = NULL;
-       mutex_unlock(&data->watchdog_lock);
-
-       mutex_lock(&watchdog_data_mutex);
-       kref_put(&data->kref, sch56xx_watchdog_release_resources);
-       mutex_unlock(&watchdog_data_mutex);
+       watchdog_unregister_device(&data->wddev);
+       kref_put(&data->kref, watchdog_release_resources);
+       /* Don't touch data after this it may have been free-ed! */
 }
 EXPORT_SYMBOL(sch56xx_watchdog_unregister);
 
index 7475086eb978e148f2e5c8039f7bedad2052b0ea..704ea2c6d28a772695d73000f1480032dcd649e8 100644 (file)
@@ -27,6 +27,6 @@ int sch56xx_read_virtual_reg16(u16 addr, u16 reg);
 int sch56xx_read_virtual_reg12(u16 addr, u16 msb_reg, u16 lsn_reg,
                               int high_nibble);
 
-struct sch56xx_watchdog_data *sch56xx_watchdog_register(
+struct sch56xx_watchdog_data *sch56xx_watchdog_register(struct device *parent,
        u16 addr, u32 revision, struct mutex *io_lock, int check_enabled);
 void sch56xx_watchdog_unregister(struct sch56xx_watchdog_data *data);
index 5f13c62e64b4c4053c8d8cf8ebc60a1d7f144726..5a3bb3d738d853c4f016180de15181564d5b0bc8 100644 (file)
@@ -49,7 +49,6 @@ config I2C_CHARDEV
 
 config I2C_MUX
        tristate "I2C bus multiplexing support"
-       depends on EXPERIMENTAL
        help
          Say Y here if you want the I2C core to support the ability to
          handle multiplexed I2C bus topologies, by presenting each
index 7f0b83219744e52aa7cd68a7ac5ea6edc49537d1..fad22b0bb5b06fff58eb7f5fd49533fd841a7464 100644 (file)
@@ -608,7 +608,7 @@ bailout:
 
 static u32 bit_func(struct i2c_adapter *adap)
 {
-       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+       return I2C_FUNC_I2C | I2C_FUNC_NOSTART | I2C_FUNC_SMBUS_EMUL |
               I2C_FUNC_SMBUS_READ_BLOCK_DATA |
               I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
               I2C_FUNC_10BIT_ADDR | I2C_FUNC_PROTOCOL_MANGLING;
index 94468a64ce3ac5c56ddf0a04bc8441ca2d9df0a3..7244c8be606360dd10455b8984e344479f0d8bb1 100644 (file)
@@ -445,20 +445,6 @@ config I2C_IOP3XX
          This driver can also be built as a module.  If so, the module
          will be called i2c-iop3xx.
 
-config I2C_IXP2000
-       tristate "IXP2000 GPIO-Based I2C Interface (DEPRECATED)"
-       depends on ARCH_IXP2000
-       select I2C_ALGOBIT
-       help
-         Say Y here if you have an Intel IXP2000 (2400, 2800, 2850) based
-         system and are using GPIO lines for an I2C bus.
-
-         This support is also available as a module. If so, the module
-         will be called i2c-ixp2000.
-
-         This driver is deprecated and will be dropped soon. Use i2c-gpio
-         instead.
-
 config I2C_MPC
        tristate "MPC107/824x/85xx/512x/52xx/83xx/86xx"
        depends on PPC
@@ -483,6 +469,7 @@ config I2C_MV64XXX
 config I2C_MXS
        tristate "Freescale i.MX28 I2C interface"
        depends on SOC_IMX28
+       select STMP_DEVICE
        help
          Say Y here if you want to use the I2C bus controller on
          the Freescale i.MX28 processors.
index 569567b0d02704653a8884757fa47aaba0fb453e..ce3c2be7fb40a6cb453a92a9cc1eb89da510383f 100644 (file)
@@ -44,7 +44,6 @@ obj-$(CONFIG_I2C_IBM_IIC)     += i2c-ibm_iic.o
 obj-$(CONFIG_I2C_IMX)          += i2c-imx.o
 obj-$(CONFIG_I2C_INTEL_MID)    += i2c-intel-mid.o
 obj-$(CONFIG_I2C_IOP3XX)       += i2c-iop3xx.o
-obj-$(CONFIG_I2C_IXP2000)      += i2c-ixp2000.o
 obj-$(CONFIG_I2C_MPC)          += i2c-mpc.o
 obj-$(CONFIG_I2C_MV64XXX)      += i2c-mv64xxx.o
 obj-$(CONFIG_I2C_MXS)          += i2c-mxs.o
index a76d85fa3ad781d15a90f73875a1c857703d14a8..79b4bcb3b85cea6d79d1c447200748811d74d6fc 100644 (file)
@@ -755,7 +755,7 @@ static int davinci_i2c_remove(struct platform_device *pdev)
        dev->clk = NULL;
 
        davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, 0);
-       free_irq(IRQ_I2C, dev);
+       free_irq(dev->irq, dev);
        iounmap(dev->base);
        kfree(dev);
 
index df8799241009e85ca045fb149f3e108986a5a09c..1e48bec80edfb08a0628cc816004c1955075fc42 100644 (file)
@@ -164,9 +164,15 @@ static char *abort_sources[] = {
 
 u32 dw_readl(struct dw_i2c_dev *dev, int offset)
 {
-       u32 value = readl(dev->base + offset);
+       u32 value;
 
-       if (dev->swab)
+       if (dev->accessor_flags & ACCESS_16BIT)
+               value = readw(dev->base + offset) |
+                       (readw(dev->base + offset + 2) << 16);
+       else
+               value = readl(dev->base + offset);
+
+       if (dev->accessor_flags & ACCESS_SWAP)
                return swab32(value);
        else
                return value;
@@ -174,10 +180,15 @@ u32 dw_readl(struct dw_i2c_dev *dev, int offset)
 
 void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset)
 {
-       if (dev->swab)
+       if (dev->accessor_flags & ACCESS_SWAP)
                b = swab32(b);
 
-       writel(b, dev->base + offset);
+       if (dev->accessor_flags & ACCESS_16BIT) {
+               writew((u16)b, dev->base + offset);
+               writew((u16)(b >> 16), dev->base + offset + 2);
+       } else {
+               writel(b, dev->base + offset);
+       }
 }
 
 static u32
@@ -251,14 +262,14 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
 
        input_clock_khz = dev->get_clk_rate_khz(dev);
 
-       /* Configure register endianess access */
        reg = dw_readl(dev, DW_IC_COMP_TYPE);
        if (reg == ___constant_swab32(DW_IC_COMP_TYPE_VALUE)) {
-               dev->swab = 1;
-               reg = DW_IC_COMP_TYPE_VALUE;
-       }
-
-       if (reg != DW_IC_COMP_TYPE_VALUE) {
+               /* Configure register endianess access */
+               dev->accessor_flags |= ACCESS_SWAP;
+       } else if (reg == (DW_IC_COMP_TYPE_VALUE & 0x0000ffff)) {
+               /* Configure register access mode 16bit */
+               dev->accessor_flags |= ACCESS_16BIT;
+       } else if (reg != DW_IC_COMP_TYPE_VALUE) {
                dev_err(dev->dev, "Unknown Synopsys component type: "
                        "0x%08x\n", reg);
                return -ENODEV;
index 02d1a2ddd853bed03ecbffa6b13b839c46f72038..9c1840ee09c7a4fde94a468a5ff339bdb42ae2ac 100644 (file)
@@ -82,7 +82,7 @@ struct dw_i2c_dev {
        unsigned int            status;
        u32                     abort_source;
        int                     irq;
-       int                     swab;
+       u32                     accessor_flags;
        struct i2c_adapter      adapter;
        u32                     functionality;
        u32                     master_cfg;
@@ -90,6 +90,9 @@ struct dw_i2c_dev {
        unsigned int            rx_fifo_depth;
 };
 
+#define ACCESS_SWAP            0x00000001
+#define ACCESS_16BIT           0x00000002
+
 extern u32 dw_readl(struct dw_i2c_dev *dev, int offset);
 extern void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset);
 extern int i2c_dw_init(struct dw_i2c_dev *dev);
index 4ba589ab8614050d65ec1f45d918899a36e3ac0b..0506fef8dc001ed8a8bb026ed9cae3674309d5ce 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/interrupt.h>
 #include <linux/of_i2c.h>
 #include <linux/platform_device.h>
+#include <linux/pm.h>
 #include <linux/io.h>
 #include <linux/slab.h>
 #include "i2c-designware-core.h"
@@ -95,7 +96,7 @@ static int __devinit dw_i2c_probe(struct platform_device *pdev)
                r = -ENODEV;
                goto err_free_mem;
        }
-       clk_enable(dev->clk);
+       clk_prepare_enable(dev->clk);
 
        dev->functionality =
                I2C_FUNC_I2C |
@@ -155,7 +156,7 @@ err_free_irq:
 err_iounmap:
        iounmap(dev->base);
 err_unuse_clocks:
-       clk_disable(dev->clk);
+       clk_disable_unprepare(dev->clk);
        clk_put(dev->clk);
        dev->clk = NULL;
 err_free_mem:
@@ -177,7 +178,7 @@ static int __devexit dw_i2c_remove(struct platform_device *pdev)
        i2c_del_adapter(&dev->adapter);
        put_device(&pdev->dev);
 
-       clk_disable(dev->clk);
+       clk_disable_unprepare(dev->clk);
        clk_put(dev->clk);
        dev->clk = NULL;
 
@@ -198,6 +199,31 @@ static const struct of_device_id dw_i2c_of_match[] = {
 MODULE_DEVICE_TABLE(of, dw_i2c_of_match);
 #endif
 
+#ifdef CONFIG_PM
+static int dw_i2c_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
+
+       clk_disable_unprepare(i_dev->clk);
+
+       return 0;
+}
+
+static int dw_i2c_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
+
+       clk_prepare_enable(i_dev->clk);
+       i2c_dw_init(i_dev);
+
+       return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(dw_i2c_dev_pm_ops, dw_i2c_suspend, dw_i2c_resume);
+
 /* work with hotplug and coldplug */
 MODULE_ALIAS("platform:i2c_designware");
 
@@ -207,6 +233,7 @@ static struct platform_driver dw_i2c_driver = {
                .name   = "i2c_designware",
                .owner  = THIS_MODULE,
                .of_match_table = of_match_ptr(dw_i2c_of_match),
+               .pm     = &dw_i2c_dev_pm_ops,
        },
 };
 
index c811289b61e21628f28d79b71f27651c39e3e024..2f74ae872e1e5512239a9e045b19e31e797507b1 100644 (file)
@@ -263,11 +263,6 @@ static void pch_i2c_init(struct i2c_algo_pch_data *adap)
        init_waitqueue_head(&pch_event);
 }
 
-static inline bool ktime_lt(const ktime_t cmp1, const ktime_t cmp2)
-{
-       return cmp1.tv64 < cmp2.tv64;
-}
-
 /**
  * pch_i2c_wait_for_bus_idle() - check the status of bus.
  * @adap:      Pointer to struct i2c_algo_pch_data.
@@ -316,33 +311,6 @@ static void pch_i2c_start(struct i2c_algo_pch_data *adap)
        pch_setbit(adap->pch_base_address, PCH_I2CCTL, PCH_START);
 }
 
-/**
- * pch_i2c_wait_for_xfer_complete() - initiates a wait for the tx complete event
- * @adap:      Pointer to struct i2c_algo_pch_data.
- */
-static s32 pch_i2c_wait_for_xfer_complete(struct i2c_algo_pch_data *adap)
-{
-       long ret;
-       ret = wait_event_timeout(pch_event,
-                       (adap->pch_event_flag != 0), msecs_to_jiffies(1000));
-
-       if (ret == 0) {
-               pch_err(adap, "timeout: %x\n", adap->pch_event_flag);
-               adap->pch_event_flag = 0;
-               return -ETIMEDOUT;
-       }
-
-       if (adap->pch_event_flag & I2C_ERROR_MASK) {
-               pch_err(adap, "error bits set: %x\n", adap->pch_event_flag);
-               adap->pch_event_flag = 0;
-               return -EIO;
-       }
-
-       adap->pch_event_flag = 0;
-
-       return 0;
-}
-
 /**
  * pch_i2c_getack() - to confirm ACK/NACK
  * @adap:      Pointer to struct i2c_algo_pch_data.
@@ -373,6 +341,40 @@ static void pch_i2c_stop(struct i2c_algo_pch_data *adap)
        pch_clrbit(adap->pch_base_address, PCH_I2CCTL, PCH_START);
 }
 
+static int pch_i2c_wait_for_check_xfer(struct i2c_algo_pch_data *adap)
+{
+       long ret;
+
+       ret = wait_event_timeout(pch_event,
+                       (adap->pch_event_flag != 0), msecs_to_jiffies(1000));
+       if (!ret) {
+               pch_err(adap, "%s:wait-event timeout\n", __func__);
+               adap->pch_event_flag = 0;
+               pch_i2c_stop(adap);
+               pch_i2c_init(adap);
+               return -ETIMEDOUT;
+       }
+
+       if (adap->pch_event_flag & I2C_ERROR_MASK) {
+               pch_err(adap, "Lost Arbitration\n");
+               adap->pch_event_flag = 0;
+               pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMAL_BIT);
+               pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMIF_BIT);
+               pch_i2c_init(adap);
+               return -EAGAIN;
+       }
+
+       adap->pch_event_flag = 0;
+
+       if (pch_i2c_getack(adap)) {
+               pch_dbg(adap, "Receive NACK for slave address"
+                       "setting\n");
+               return -EIO;
+       }
+
+       return 0;
+}
+
 /**
  * pch_i2c_repstart() - generate repeated start condition in normal mode
  * @adap:      Pointer to struct i2c_algo_pch_data.
@@ -427,27 +429,12 @@ static s32 pch_i2c_writebytes(struct i2c_adapter *i2c_adap,
                if (first)
                        pch_i2c_start(adap);
 
-               rtn = pch_i2c_wait_for_xfer_complete(adap);
-               if (rtn == 0) {
-                       if (pch_i2c_getack(adap)) {
-                               pch_dbg(adap, "Receive NACK for slave address"
-                                       "setting\n");
-                               return -EIO;
-                       }
-                       addr_8_lsb = (addr & I2C_ADDR_MSK);
-                       iowrite32(addr_8_lsb, p + PCH_I2CDR);
-               } else if (rtn == -EIO) { /* Arbitration Lost */
-                       pch_err(adap, "Lost Arbitration\n");
-                       pch_clrbit(adap->pch_base_address, PCH_I2CSR,
-                                  I2CMAL_BIT);
-                       pch_clrbit(adap->pch_base_address, PCH_I2CSR,
-                                  I2CMIF_BIT);
-                       pch_i2c_init(adap);
-                       return -EAGAIN;
-               } else { /* wait-event timeout */
-                       pch_i2c_stop(adap);
-                       return -ETIME;
-               }
+               rtn = pch_i2c_wait_for_check_xfer(adap);
+               if (rtn)
+                       return rtn;
+
+               addr_8_lsb = (addr & I2C_ADDR_MSK);
+               iowrite32(addr_8_lsb, p + PCH_I2CDR);
        } else {
                /* set 7 bit slave address and R/W bit as 0 */
                iowrite32(addr << 1, p + PCH_I2CDR);
@@ -455,44 +442,21 @@ static s32 pch_i2c_writebytes(struct i2c_adapter *i2c_adap,
                        pch_i2c_start(adap);
        }
 
-       rtn = pch_i2c_wait_for_xfer_complete(adap);
-       if (rtn == 0) {
-               if (pch_i2c_getack(adap)) {
-                       pch_dbg(adap, "Receive NACK for slave address"
-                               "setting\n");
-                       return -EIO;
-               }
-       } else if (rtn == -EIO) { /* Arbitration Lost */
-               pch_err(adap, "Lost Arbitration\n");
-               pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMAL_BIT);
-               pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMIF_BIT);
-               pch_i2c_init(adap);
-               return -EAGAIN;
-       } else { /* wait-event timeout */
-               pch_i2c_stop(adap);
-               return -ETIME;
-       }
+       rtn = pch_i2c_wait_for_check_xfer(adap);
+       if (rtn)
+               return rtn;
 
        for (wrcount = 0; wrcount < length; ++wrcount) {
                /* write buffer value to I2C data register */
                iowrite32(buf[wrcount], p + PCH_I2CDR);
                pch_dbg(adap, "writing %x to Data register\n", buf[wrcount]);
 
-               rtn = pch_i2c_wait_for_xfer_complete(adap);
-               if (rtn == 0) {
-                       if (pch_i2c_getack(adap)) {
-                               pch_dbg(adap, "Receive NACK for slave address"
-                                       "setting\n");
-                               return -EIO;
-                       }
-                       pch_clrbit(adap->pch_base_address, PCH_I2CSR,
-                                  I2CMCF_BIT);
-                       pch_clrbit(adap->pch_base_address, PCH_I2CSR,
-                                  I2CMIF_BIT);
-               } else { /* wait-event timeout */
-                       pch_i2c_stop(adap);
-                       return -ETIME;
-               }
+               rtn = pch_i2c_wait_for_check_xfer(adap);
+               if (rtn)
+                       return rtn;
+
+               pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMCF_BIT);
+               pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMIF_BIT);
        }
 
        /* check if this is the last message */
@@ -580,50 +544,21 @@ static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
                if (first)
                        pch_i2c_start(adap);
 
-               rtn = pch_i2c_wait_for_xfer_complete(adap);
-               if (rtn == 0) {
-                       if (pch_i2c_getack(adap)) {
-                               pch_dbg(adap, "Receive NACK for slave address"
-                                       "setting\n");
-                               return -EIO;
-                       }
-                       addr_8_lsb = (addr & I2C_ADDR_MSK);
-                       iowrite32(addr_8_lsb, p + PCH_I2CDR);
-               } else if (rtn == -EIO) { /* Arbitration Lost */
-                       pch_err(adap, "Lost Arbitration\n");
-                       pch_clrbit(adap->pch_base_address, PCH_I2CSR,
-                                  I2CMAL_BIT);
-                       pch_clrbit(adap->pch_base_address, PCH_I2CSR,
-                                  I2CMIF_BIT);
-                       pch_i2c_init(adap);
-                       return -EAGAIN;
-               } else { /* wait-event timeout */
-                       pch_i2c_stop(adap);
-                       return -ETIME;
-               }
+               rtn = pch_i2c_wait_for_check_xfer(adap);
+               if (rtn)
+                       return rtn;
+
+               addr_8_lsb = (addr & I2C_ADDR_MSK);
+               iowrite32(addr_8_lsb, p + PCH_I2CDR);
+
                pch_i2c_restart(adap);
-               rtn = pch_i2c_wait_for_xfer_complete(adap);
-               if (rtn == 0) {
-                       if (pch_i2c_getack(adap)) {
-                               pch_dbg(adap, "Receive NACK for slave address"
-                                       "setting\n");
-                               return -EIO;
-                       }
-                       addr_2_msb |= I2C_RD;
-                       iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK,
-                                 p + PCH_I2CDR);
-               } else if (rtn == -EIO) { /* Arbitration Lost */
-                       pch_err(adap, "Lost Arbitration\n");
-                       pch_clrbit(adap->pch_base_address, PCH_I2CSR,
-                                  I2CMAL_BIT);
-                       pch_clrbit(adap->pch_base_address, PCH_I2CSR,
-                                  I2CMIF_BIT);
-                       pch_i2c_init(adap);
-                       return -EAGAIN;
-               } else { /* wait-event timeout */
-                       pch_i2c_stop(adap);
-                       return -ETIME;
-               }
+
+               rtn = pch_i2c_wait_for_check_xfer(adap);
+               if (rtn)
+                       return rtn;
+
+               addr_2_msb |= I2C_RD;
+               iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK, p + PCH_I2CDR);
        } else {
                /* 7 address bits + R/W bit */
                addr = (((addr) << 1) | (I2C_RD));
@@ -634,23 +569,9 @@ static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
        if (first)
                pch_i2c_start(adap);
 
-       rtn = pch_i2c_wait_for_xfer_complete(adap);
-       if (rtn == 0) {
-               if (pch_i2c_getack(adap)) {
-                       pch_dbg(adap, "Receive NACK for slave address"
-                               "setting\n");
-                       return -EIO;
-               }
-       } else if (rtn == -EIO) { /* Arbitration Lost */
-               pch_err(adap, "Lost Arbitration\n");
-               pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMAL_BIT);
-               pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMIF_BIT);
-               pch_i2c_init(adap);
-               return -EAGAIN;
-       } else { /* wait-event timeout */
-               pch_i2c_stop(adap);
-               return -ETIME;
-       }
+       rtn = pch_i2c_wait_for_check_xfer(adap);
+       if (rtn)
+               return rtn;
 
        if (length == 0) {
                pch_i2c_stop(adap);
@@ -669,18 +590,9 @@ static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
                        if (loop != 1)
                                read_index++;
 
-                       rtn = pch_i2c_wait_for_xfer_complete(adap);
-                       if (rtn == 0) {
-                               if (pch_i2c_getack(adap)) {
-                                       pch_dbg(adap, "Receive NACK for slave"
-                                               "address setting\n");
-                                       return -EIO;
-                               }
-                       } else { /* wait-event timeout */
-                               pch_i2c_stop(adap);
-                               return -ETIME;
-                       }
-
+                       rtn = pch_i2c_wait_for_check_xfer(adap);
+                       if (rtn)
+                               return rtn;
                }       /* end for */
 
                pch_i2c_sendnack(adap);
@@ -690,17 +602,9 @@ static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
                if (length != 1)
                        read_index++;
 
-               rtn = pch_i2c_wait_for_xfer_complete(adap);
-               if (rtn == 0) {
-                       if (pch_i2c_getack(adap)) {
-                               pch_dbg(adap, "Receive NACK for slave"
-                                       "address setting\n");
-                               return -EIO;
-                       }
-               } else { /* wait-event timeout */
-                       pch_i2c_stop(adap);
-                       return -ETIME;
-               }
+               rtn = pch_i2c_wait_for_check_xfer(adap);
+               if (rtn)
+                       return rtn;
 
                if (last)
                        pch_i2c_stop(adap);
@@ -790,7 +694,7 @@ static s32 pch_i2c_xfer(struct i2c_adapter *i2c_adap,
 
        ret = mutex_lock_interruptible(&pch_mutex);
        if (ret)
-               return -ERESTARTSYS;
+               return ret;
 
        if (adap->p_adapter_info->pch_i2c_suspended) {
                mutex_unlock(&pch_mutex);
@@ -909,7 +813,7 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
 
                pch_adap->owner = THIS_MODULE;
                pch_adap->class = I2C_CLASS_HWMON;
-               strcpy(pch_adap->name, KBUILD_MODNAME);
+               strlcpy(pch_adap->name, KBUILD_MODNAME, sizeof(pch_adap->name));
                pch_adap->algo = &pch_algorithm;
                pch_adap->algo_data = &adap_info->pch_data[i];
 
@@ -963,7 +867,7 @@ static void __devexit pch_i2c_remove(struct pci_dev *pdev)
                pci_iounmap(pdev, adap_info->pch_data[0].pch_base_address);
 
        for (i = 0; i < adap_info->ch_num; i++)
-               adap_info->pch_data[i].pch_base_address = 0;
+               adap_info->pch_data[i].pch_base_address = NULL;
 
        pci_set_drvdata(pdev, NULL);
 
index c0330a41db039d15ddfece5a81a58abf50d6c1b9..e62d2d938628fecbcb507cb2ef23a93186b88a09 100644 (file)
@@ -190,12 +190,7 @@ static int __devinit i2c_gpio_probe(struct platform_device *pdev)
        adap->dev.parent = &pdev->dev;
        adap->dev.of_node = pdev->dev.of_node;
 
-       /*
-        * If "dev->id" is negative we consider it as zero.
-        * The reason to do so is to avoid sysfs names that only make
-        * sense when there are multiple adapters.
-        */
-       adap->nr = (pdev->id != -1) ? pdev->id : 0;
+       adap->nr = pdev->id;
        ret = i2c_bit_add_numbered_bus(adap);
        if (ret)
                goto err_add_bus;
index 56bce9a8bcbb3e5d4d973467f6b75a4416ebe522..8d6b504d65c44e72800514db4ac3027ce3673636 100644 (file)
@@ -512,7 +512,7 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
        }
 
        /* Setup i2c_imx driver structure */
-       strcpy(i2c_imx->adapter.name, pdev->name);
+       strlcpy(i2c_imx->adapter.name, pdev->name, sizeof(i2c_imx->adapter.name));
        i2c_imx->adapter.owner          = THIS_MODULE;
        i2c_imx->adapter.algo           = &i2c_imx_algo;
        i2c_imx->adapter.dev.parent     = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-ixp2000.c b/drivers/i2c/busses/i2c-ixp2000.c
deleted file mode 100644 (file)
index 5d263f9..0000000
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * drivers/i2c/busses/i2c-ixp2000.c
- *
- * I2C adapter for IXP2000 systems using GPIOs for I2C bus
- *
- * Author: Deepak Saxena <dsaxena@plexity.net>
- * Based on IXDP2400 code by: Naeem M. Afzal <naeem.m.afzal@intel.com>
- * Made generic by: Jeff Daly <jeffrey.daly@intel.com>
- *
- * Copyright (c) 2003-2004 MontaVista Software Inc.
- *
- * This file is licensed under  the terms of the GNU General Public 
- * License version 2. This program is licensed "as is" without any 
- * warranty of any kind, whether express or implied.
- *
- * From Jeff Daly:
- *
- * I2C adapter driver for Intel IXDP2xxx platforms. This should work for any
- * IXP2000 platform if it uses the HW GPIO in the same manner.  Basically, 
- * SDA and SCL GPIOs have external pullups.  Setting the respective GPIO to 
- * an input will make the signal a '1' via the pullup.  Setting them to 
- * outputs will pull them down. 
- *
- * The GPIOs are open drain signals and are used as configuration strap inputs
- * during power-up so there's generally a buffer on the board that needs to be 
- * 'enabled' to drive the GPIOs.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
-#include <linux/slab.h>
-
-#include <mach/hardware.h>     /* Pick up IXP2000-specific bits */
-#include <mach/gpio-ixp2000.h>
-
-static inline int ixp2000_scl_pin(void *data)
-{
-       return ((struct ixp2000_i2c_pins*)data)->scl_pin;
-}
-
-static inline int ixp2000_sda_pin(void *data)
-{
-       return ((struct ixp2000_i2c_pins*)data)->sda_pin;
-}
-
-
-static void ixp2000_bit_setscl(void *data, int val)
-{
-       int i = 5000;
-
-       if (val) {
-               gpio_line_config(ixp2000_scl_pin(data), GPIO_IN);
-               while(!gpio_line_get(ixp2000_scl_pin(data)) && i--);
-       } else {
-               gpio_line_config(ixp2000_scl_pin(data), GPIO_OUT);
-       }
-}
-
-static void ixp2000_bit_setsda(void *data, int val)
-{
-       if (val) {
-               gpio_line_config(ixp2000_sda_pin(data), GPIO_IN);
-       } else {
-               gpio_line_config(ixp2000_sda_pin(data), GPIO_OUT);
-       }
-}
-
-static int ixp2000_bit_getscl(void *data)
-{
-       return gpio_line_get(ixp2000_scl_pin(data));
-}
-
-static int ixp2000_bit_getsda(void *data)
-{
-       return gpio_line_get(ixp2000_sda_pin(data));
-}
-
-struct ixp2000_i2c_data {
-       struct ixp2000_i2c_pins *gpio_pins;
-       struct i2c_adapter adapter;
-       struct i2c_algo_bit_data algo_data;
-};
-
-static int ixp2000_i2c_remove(struct platform_device *plat_dev)
-{
-       struct ixp2000_i2c_data *drv_data = platform_get_drvdata(plat_dev);
-
-       platform_set_drvdata(plat_dev, NULL);
-
-       i2c_del_adapter(&drv_data->adapter);
-
-       kfree(drv_data);
-
-       return 0;
-}
-
-static int ixp2000_i2c_probe(struct platform_device *plat_dev)
-{
-       int err;
-       struct ixp2000_i2c_pins *gpio = plat_dev->dev.platform_data;
-       struct ixp2000_i2c_data *drv_data = 
-               kzalloc(sizeof(struct ixp2000_i2c_data), GFP_KERNEL);
-
-       if (!drv_data)
-               return -ENOMEM;
-       drv_data->gpio_pins = gpio;
-
-       drv_data->algo_data.data = gpio;
-       drv_data->algo_data.setsda = ixp2000_bit_setsda;
-       drv_data->algo_data.setscl = ixp2000_bit_setscl;
-       drv_data->algo_data.getsda = ixp2000_bit_getsda;
-       drv_data->algo_data.getscl = ixp2000_bit_getscl;
-       drv_data->algo_data.udelay = 6;
-       drv_data->algo_data.timeout = HZ;
-
-       strlcpy(drv_data->adapter.name, plat_dev->dev.driver->name,
-               sizeof(drv_data->adapter.name));
-       drv_data->adapter.algo_data = &drv_data->algo_data,
-
-       drv_data->adapter.dev.parent = &plat_dev->dev;
-
-       gpio_line_config(gpio->sda_pin, GPIO_IN);
-       gpio_line_config(gpio->scl_pin, GPIO_IN);
-       gpio_line_set(gpio->scl_pin, 0);
-       gpio_line_set(gpio->sda_pin, 0);
-
-       if ((err = i2c_bit_add_bus(&drv_data->adapter)) != 0) {
-               dev_err(&plat_dev->dev, "Could not install, error %d\n", err);
-               kfree(drv_data);
-               return err;
-       } 
-
-       platform_set_drvdata(plat_dev, drv_data);
-
-       return 0;
-}
-
-static struct platform_driver ixp2000_i2c_driver = {
-       .probe          = ixp2000_i2c_probe,
-       .remove         = ixp2000_i2c_remove,
-       .driver         = {
-               .name   = "IXP2000-I2C",
-               .owner  = THIS_MODULE,
-       },
-};
-
-module_platform_driver(ixp2000_i2c_driver);
-
-MODULE_AUTHOR ("Deepak Saxena <dsaxena@plexity.net>");
-MODULE_DESCRIPTION("IXP2000 GPIO-based I2C bus driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:IXP2000-I2C");
-
index 206caacd30d7fb52d65f59207bc82a4f7fc3912b..b76731edbf106cfbe505173a677da38df02c146b 100644 (file)
@@ -64,6 +64,9 @@ struct mpc_i2c {
        struct i2c_adapter adap;
        int irq;
        u32 real_clk;
+#ifdef CONFIG_PM
+       u8 fdr, dfsrr;
+#endif
 };
 
 struct mpc_i2c_divider {
@@ -703,6 +706,30 @@ static int __devexit fsl_i2c_remove(struct platform_device *op)
        return 0;
 };
 
+#ifdef CONFIG_PM
+static int mpc_i2c_suspend(struct device *dev)
+{
+       struct mpc_i2c *i2c = dev_get_drvdata(dev);
+
+       i2c->fdr = readb(i2c->base + MPC_I2C_FDR);
+       i2c->dfsrr = readb(i2c->base + MPC_I2C_DFSRR);
+
+       return 0;
+}
+
+static int mpc_i2c_resume(struct device *dev)
+{
+       struct mpc_i2c *i2c = dev_get_drvdata(dev);
+
+       writeb(i2c->fdr, i2c->base + MPC_I2C_FDR);
+       writeb(i2c->dfsrr, i2c->base + MPC_I2C_DFSRR);
+
+       return 0;
+}
+
+SIMPLE_DEV_PM_OPS(mpc_i2c_pm_ops, mpc_i2c_suspend, mpc_i2c_resume);
+#endif
+
 static struct mpc_i2c_data mpc_i2c_data_512x __devinitdata = {
        .setup = mpc_i2c_setup_512x,
 };
@@ -747,6 +774,9 @@ static struct platform_driver mpc_i2c_driver = {
                .owner = THIS_MODULE,
                .name = DRV_NAME,
                .of_match_table = mpc_i2c_of_match,
+#ifdef CONFIG_PM
+               .pm = &mpc_i2c_pm_ops,
+#endif
        },
 };
 
index 7fa73eed84a7b695ed4fd45fda8b2cb9ee303182..04eb441b6ce1945b8375b81cb7c7cd89cf172f7c 100644 (file)
 #include <linux/jiffies.h>
 #include <linux/io.h>
 #include <linux/pinctrl/consumer.h>
-
-#include <mach/common.h>
+#include <linux/stmp_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_i2c.h>
 
 #define DRIVER_NAME "mxs-i2c"
 
@@ -112,13 +114,9 @@ struct mxs_i2c_dev {
        struct i2c_adapter adapter;
 };
 
-/*
- * TODO: check if calls to here are really needed. If not, we could get rid of
- * mxs_reset_block and the mach-dependency. Needs an I2C analyzer, probably.
- */
 static void mxs_i2c_reset(struct mxs_i2c_dev *i2c)
 {
-       mxs_reset_block(i2c->regs);
+       stmp_reset_block(i2c->regs);
        writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET);
        writel(MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE,
                        i2c->regs + MXS_I2C_QUEUECTRL_SET);
@@ -371,6 +369,7 @@ static int __devinit mxs_i2c_probe(struct platform_device *pdev)
        adap->algo = &mxs_i2c_algo;
        adap->dev.parent = dev;
        adap->nr = pdev->id;
+       adap->dev.of_node = pdev->dev.of_node;
        i2c_set_adapdata(adap, i2c);
        err = i2c_add_numbered_adapter(adap);
        if (err) {
@@ -380,6 +379,8 @@ static int __devinit mxs_i2c_probe(struct platform_device *pdev)
                return err;
        }
 
+       of_i2c_register_devices(adap);
+
        return 0;
 }
 
@@ -399,10 +400,17 @@ static int __devexit mxs_i2c_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id mxs_i2c_dt_ids[] = {
+       { .compatible = "fsl,imx28-i2c", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_i2c_dt_ids);
+
 static struct platform_driver mxs_i2c_driver = {
        .driver = {
                   .name = DRIVER_NAME,
                   .owner = THIS_MODULE,
+                  .of_match_table = mxs_i2c_dt_ids,
                   },
        .remove = __devexit_p(mxs_i2c_remove),
 };
index 03b61577888748a4d9a61cfe46b413eaaa219737..a26dfb8cd58690ce3df06c1c1685118899259a11 100644 (file)
@@ -502,7 +502,8 @@ static int nuc900_i2c_xfer(struct i2c_adapter *adap,
 /* declare our i2c functionality */
 static u32 nuc900_i2c_func(struct i2c_adapter *adap)
 {
-       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
+       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_NOSTART |
+               I2C_FUNC_PROTOCOL_MANGLING;
 }
 
 /* i2c bus registration info */
index 18068dee48f1aa11543413894485e4916bfc5811..75194c579b6d78f419f8886de4d028a2447e08a4 100644 (file)
@@ -55,6 +55,7 @@
 #include <linux/i2c-ocores.h>
 #include <linux/slab.h>
 #include <linux/io.h>
+#include <linux/of_i2c.h>
 
 struct ocores_i2c {
        void __iomem *base;
@@ -343,6 +344,8 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
        if (pdata) {
                for (i = 0; i < pdata->num_devices; i++)
                        i2c_new_device(&i2c->adap, pdata->devices + i);
+       } else {
+               of_i2c_register_devices(&i2c->adap);
        }
 
        return 0;
index 2adbf1a8fdea0019d8ca3acae29790ca18722552..675878f49f76a2ad1c1235068c775abfc583ef0e 100644 (file)
@@ -171,7 +171,7 @@ static int __devinit i2c_pca_pf_probe(struct platform_device *pdev)
        i2c->io_size = resource_size(res);
        i2c->irq = irq;
 
-       i2c->adap.nr = pdev->id >= 0 ? pdev->id : 0;
+       i2c->adap.nr = pdev->id;
        i2c->adap.owner = THIS_MODULE;
        snprintf(i2c->adap.name, sizeof(i2c->adap.name),
                 "PCA9564/PCA9665 at 0x%08lx",
index f6733267fa9cba0baf67ec3bf1ef862a22ccf02f..a997c7d3f95dec538c68dfc8948906b87395df2a 100644 (file)
@@ -1131,11 +1131,6 @@ static int i2c_pxa_probe(struct platform_device *dev)
        spin_lock_init(&i2c->lock);
        init_waitqueue_head(&i2c->wait);
 
-       /*
-        * If "dev->id" is negative we consider it as zero.
-        * The reason to do so is to avoid sysfs names that only make
-        * sense when there are multiple adapters.
-        */
        i2c->adap.nr = dev->id;
        snprintf(i2c->adap.name, sizeof(i2c->adap.name), "pxa_i2c-i2c.%u",
                 i2c->adap.nr);
index 737f7218a32ce5b136af3ecb4156f807f4fd28b8..01959154572d88f0eb954327759fc66bcf9da5c8 100644 (file)
 #include <plat/regs-iic.h>
 #include <plat/iic.h>
 
-/* i2c controller state */
+/* Treat S3C2410 as baseline hardware, anything else is supported via quirks */
+#define QUIRK_S3C2440          (1 << 0)
+#define QUIRK_HDMIPHY          (1 << 1)
+#define QUIRK_NO_GPIO          (1 << 2)
 
+/* i2c controller state */
 enum s3c24xx_i2c_state {
        STATE_IDLE,
        STATE_START,
@@ -54,14 +58,10 @@ enum s3c24xx_i2c_state {
        STATE_STOP
 };
 
-enum s3c24xx_i2c_type {
-       TYPE_S3C2410,
-       TYPE_S3C2440,
-};
-
 struct s3c24xx_i2c {
        spinlock_t              lock;
        wait_queue_head_t       wait;
+       unsigned int            quirks;
        unsigned int            suspended:1;
 
        struct i2c_msg          *msg;
@@ -88,26 +88,45 @@ struct s3c24xx_i2c {
 #endif
 };
 
-/* default platform data removed, dev should always carry data. */
+static struct platform_device_id s3c24xx_driver_ids[] = {
+       {
+               .name           = "s3c2410-i2c",
+               .driver_data    = 0,
+       }, {
+               .name           = "s3c2440-i2c",
+               .driver_data    = QUIRK_S3C2440,
+       }, {
+               .name           = "s3c2440-hdmiphy-i2c",
+               .driver_data    = QUIRK_S3C2440 | QUIRK_HDMIPHY | QUIRK_NO_GPIO,
+       }, { },
+};
+MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
+
+#ifdef CONFIG_OF
+static const struct of_device_id s3c24xx_i2c_match[] = {
+       { .compatible = "samsung,s3c2410-i2c", .data = (void *)0 },
+       { .compatible = "samsung,s3c2440-i2c", .data = (void *)QUIRK_S3C2440 },
+       { .compatible = "samsung,s3c2440-hdmiphy-i2c",
+         .data = (void *)(QUIRK_S3C2440 | QUIRK_HDMIPHY | QUIRK_NO_GPIO) },
+       {},
+};
+MODULE_DEVICE_TABLE(of, s3c24xx_i2c_match);
+#endif
 
-/* s3c24xx_i2c_is2440()
+/* s3c24xx_get_device_quirks
  *
- * return true is this is an s3c2440
+ * Get controller type either from device tree or platform device variant.
 */
 
-static inline int s3c24xx_i2c_is2440(struct s3c24xx_i2c *i2c)
+static inline unsigned int s3c24xx_get_device_quirks(struct platform_device *pdev)
 {
-       struct platform_device *pdev = to_platform_device(i2c->dev);
-       enum s3c24xx_i2c_type type;
-
-#ifdef CONFIG_OF
-       if (i2c->dev->of_node)
-               return of_device_is_compatible(i2c->dev->of_node,
-                               "samsung,s3c2440-i2c");
-#endif
+       if (pdev->dev.of_node) {
+               const struct of_device_id *match;
+               match = of_match_node(&s3c24xx_i2c_match, pdev->dev.of_node);
+               return (unsigned int)match->data;
+       }
 
-       type = platform_get_device_id(pdev)->driver_data;
-       return type == TYPE_S3C2440;
+       return platform_get_device_id(pdev)->driver_data;
 }
 
 /* s3c24xx_i2c_master_complete
@@ -471,6 +490,13 @@ static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
        unsigned long iicstat;
        int timeout = 400;
 
+       /* the timeout for HDMIPHY is reduced to 10 ms because
+        * the hangup is expected to happen, so waiting 400 ms
+        * causes only unnecessary system hangup
+        */
+       if (i2c->quirks & QUIRK_HDMIPHY)
+               timeout = 10;
+
        while (timeout-- > 0) {
                iicstat = readl(i2c->regs + S3C2410_IICSTAT);
 
@@ -480,6 +506,15 @@ static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
                msleep(1);
        }
 
+       /* hang-up of bus dedicated for HDMIPHY occurred, resetting */
+       if (i2c->quirks & QUIRK_HDMIPHY) {
+               writel(0, i2c->regs + S3C2410_IICCON);
+               writel(0, i2c->regs + S3C2410_IICSTAT);
+               writel(0, i2c->regs + S3C2410_IICDS);
+
+               return 0;
+       }
+
        return -ETIMEDOUT;
 }
 
@@ -591,7 +626,8 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
 /* declare our i2c functionality */
 static u32 s3c24xx_i2c_func(struct i2c_adapter *adap)
 {
-       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
+       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_NOSTART |
+               I2C_FUNC_PROTOCOL_MANGLING;
 }
 
 /* i2c bus registration info */
@@ -676,7 +712,7 @@ static int s3c24xx_i2c_clockrate(struct s3c24xx_i2c *i2c, unsigned int *got)
 
        writel(iiccon, i2c->regs + S3C2410_IICCON);
 
-       if (s3c24xx_i2c_is2440(i2c)) {
+       if (i2c->quirks & QUIRK_S3C2440) {
                unsigned long sda_delay;
 
                if (pdata->sda_delay) {
@@ -761,6 +797,9 @@ static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c)
 {
        int idx, gpio, ret;
 
+       if (i2c->quirks & QUIRK_NO_GPIO)
+               return 0;
+
        for (idx = 0; idx < 2; idx++) {
                gpio = of_get_gpio(i2c->dev->of_node, idx);
                if (!gpio_is_valid(gpio)) {
@@ -785,6 +824,10 @@ free_gpio:
 static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
 {
        unsigned int idx;
+
+       if (i2c->quirks & QUIRK_NO_GPIO)
+               return;
+
        for (idx = 0; idx < 2; idx++)
                gpio_free(i2c->gpios[idx]);
 }
@@ -906,6 +949,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
                goto err_noclk;
        }
 
+       i2c->quirks = s3c24xx_get_device_quirks(pdev);
        if (pdata)
                memcpy(i2c->pdata, pdata, sizeof(*pdata));
        else
@@ -1110,28 +1154,6 @@ static const struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = {
 
 /* device driver for platform bus bits */
 
-static struct platform_device_id s3c24xx_driver_ids[] = {
-       {
-               .name           = "s3c2410-i2c",
-               .driver_data    = TYPE_S3C2410,
-       }, {
-               .name           = "s3c2440-i2c",
-               .driver_data    = TYPE_S3C2440,
-       }, { },
-};
-MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
-
-#ifdef CONFIG_OF
-static const struct of_device_id s3c24xx_i2c_match[] = {
-       { .compatible = "samsung,s3c2410-i2c" },
-       { .compatible = "samsung,s3c2440-i2c" },
-       {},
-};
-MODULE_DEVICE_TABLE(of, s3c24xx_i2c_match);
-#else
-#define s3c24xx_i2c_match NULL
-#endif
-
 static struct platform_driver s3c24xx_i2c_driver = {
        .probe          = s3c24xx_i2c_probe,
        .remove         = s3c24xx_i2c_remove,
@@ -1140,7 +1162,7 @@ static struct platform_driver s3c24xx_i2c_driver = {
                .owner  = THIS_MODULE,
                .name   = "s3c-i2c",
                .pm     = S3C24XX_DEV_PM_OPS,
-               .of_match_table = s3c24xx_i2c_match,
+               .of_match_table = of_match_ptr(s3c24xx_i2c_match),
        },
 };
 
index 675c9692d14860553bbdb03154e5fd88a5e7526f..8110ca45f3420f56bb3549bfcd8a2393fb0879d7 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/platform_device.h>
 #include <linux/interrupt.h>
 #include <linux/i2c.h>
+#include <linux/of_i2c.h>
 #include <linux/err.h>
 #include <linux/pm_runtime.h>
 #include <linux/clk.h>
@@ -653,6 +654,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
        adap->dev.parent = &dev->dev;
        adap->retries = 5;
        adap->nr = dev->id;
+       adap->dev.of_node = dev->dev.of_node;
 
        strlcpy(adap->name, dev->name, sizeof(adap->name));
 
@@ -667,6 +669,8 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
 
        dev_info(&dev->dev, "I2C adapter %d with bus speed %lu Hz\n",
                 adap->nr, pd->bus_speed);
+
+       of_i2c_register_devices(adap);
        return 0;
 
  err_all:
@@ -710,11 +714,18 @@ static const struct dev_pm_ops sh_mobile_i2c_dev_pm_ops = {
        .runtime_resume = sh_mobile_i2c_runtime_nop,
 };
 
+static const struct of_device_id sh_mobile_i2c_dt_ids[] __devinitconst = {
+       { .compatible = "renesas,rmobile-iic", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, sh_mobile_i2c_dt_ids);
+
 static struct platform_driver sh_mobile_i2c_driver = {
        .driver         = {
                .name           = "i2c-sh_mobile",
                .owner          = THIS_MODULE,
                .pm             = &sh_mobile_i2c_dev_pm_ops,
+               .of_match_table = sh_mobile_i2c_dt_ids,
        },
        .probe          = sh_mobile_i2c_probe,
        .remove         = sh_mobile_i2c_remove,
index 55e5ea62ccee3b69148c13ce337a1ff8531bc467..8b2e555a9563204476be25184d442d0422a49920 100644 (file)
@@ -401,8 +401,6 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
                        disable_irq_nosync(i2c_dev->irq);
                        i2c_dev->irq_disabled = 1;
                }
-
-               complete(&i2c_dev->msg_complete);
                goto err;
        }
 
@@ -411,7 +409,6 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
                        i2c_dev->msg_err |= I2C_ERR_NO_ACK;
                if (status & I2C_INT_ARBITRATION_LOST)
                        i2c_dev->msg_err |= I2C_ERR_ARBITRATION_LOST;
-               complete(&i2c_dev->msg_complete);
                goto err;
        }
 
@@ -429,14 +426,14 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
                        tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ);
        }
 
+       i2c_writel(i2c_dev, status, I2C_INT_STATUS);
+       if (i2c_dev->is_dvc)
+               dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
+
        if (status & I2C_INT_PACKET_XFER_COMPLETE) {
                BUG_ON(i2c_dev->msg_buf_remaining);
                complete(&i2c_dev->msg_complete);
        }
-
-       i2c_writel(i2c_dev, status, I2C_INT_STATUS);
-       if (i2c_dev->is_dvc)
-               dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
        return IRQ_HANDLED;
 err:
        /* An error occurred, mask all interrupts */
@@ -446,6 +443,8 @@ err:
        i2c_writel(i2c_dev, status, I2C_INT_STATUS);
        if (i2c_dev->is_dvc)
                dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
+
+       complete(&i2c_dev->msg_complete);
        return IRQ_HANDLED;
 }
 
@@ -476,12 +475,15 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
        packet_header = msg->len - 1;
        i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
 
-       packet_header = msg->addr << I2C_HEADER_SLAVE_ADDR_SHIFT;
-       packet_header |= I2C_HEADER_IE_ENABLE;
+       packet_header = I2C_HEADER_IE_ENABLE;
        if (!stop)
                packet_header |= I2C_HEADER_REPEAT_START;
-       if (msg->flags & I2C_M_TEN)
+       if (msg->flags & I2C_M_TEN) {
+               packet_header |= msg->addr;
                packet_header |= I2C_HEADER_10BIT_ADDR;
+       } else {
+               packet_header |= msg->addr << I2C_HEADER_SLAVE_ADDR_SHIFT;
+       }
        if (msg->flags & I2C_M_IGNORE_NAK)
                packet_header |= I2C_HEADER_CONT_ON_NAK;
        if (msg->flags & I2C_M_RD)
@@ -557,7 +559,7 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
 
 static u32 tegra_i2c_func(struct i2c_adapter *adap)
 {
-       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR;
 }
 
 static const struct i2c_algorithm tegra_i2c_algo = {
index f585aead50cc32eb045010406ab8e7e7e0b61d97..eec20db6246fac995f3bde2ae1e45177b37d4fe3 100644 (file)
@@ -104,13 +104,8 @@ static int i2c_versatile_probe(struct platform_device *dev)
        i2c->algo = i2c_versatile_algo;
        i2c->algo.data = i2c;
 
-       if (dev->id >= 0) {
-               /* static bus numbering */
-               i2c->adap.nr = dev->id;
-               ret = i2c_bit_add_numbered_bus(&i2c->adap);
-       } else
-               /* dynamic bus numbering */
-               ret = i2c_bit_add_bus(&i2c->adap);
+       i2c->adap.nr = dev->id;
+       ret = i2c_bit_add_numbered_bus(&i2c->adap);
        if (ret >= 0) {
                platform_set_drvdata(dev, i2c);
                of_i2c_register_devices(&i2c->adap);
index 2bded7647ef25b1a98f2211d1723de10568e3e7f..641d0e5e33036a3643027bbd7a61b3b0c1204401 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/i2c-xiic.h>
 #include <linux/io.h>
 #include <linux/slab.h>
+#include <linux/of_i2c.h>
 
 #define DRIVER_NAME "xiic-i2c"
 
@@ -705,8 +706,6 @@ static int __devinit xiic_i2c_probe(struct platform_device *pdev)
                goto resource_missing;
 
        pdata = (struct xiic_i2c_platform_data *) pdev->dev.platform_data;
-       if (!pdata)
-               return -EINVAL;
 
        i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
        if (!i2c)
@@ -730,6 +729,7 @@ static int __devinit xiic_i2c_probe(struct platform_device *pdev)
        i2c->adap = xiic_adapter;
        i2c_set_adapdata(&i2c->adap, i2c);
        i2c->adap.dev.parent = &pdev->dev;
+       i2c->adap.dev.of_node = pdev->dev.of_node;
 
        xiic_reinit(i2c);
 
@@ -748,9 +748,13 @@ static int __devinit xiic_i2c_probe(struct platform_device *pdev)
                goto add_adapter_failed;
        }
 
-       /* add in known devices to the bus */
-       for (i = 0; i < pdata->num_devices; i++)
-               i2c_new_device(&i2c->adap, pdata->devices + i);
+       if (pdata) {
+               /* add in known devices to the bus */
+               for (i = 0; i < pdata->num_devices; i++)
+                       i2c_new_device(&i2c->adap, pdata->devices + i);
+       }
+
+       of_i2c_register_devices(&i2c->adap);
 
        return 0;
 
@@ -795,12 +799,21 @@ static int __devexit xiic_i2c_remove(struct platform_device* pdev)
        return 0;
 }
 
+#if defined(CONFIG_OF)
+static const struct of_device_id xiic_of_match[] __devinitconst = {
+       { .compatible = "xlnx,xps-iic-2.00.a", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, xiic_of_match);
+#endif
+
 static struct platform_driver xiic_i2c_driver = {
        .probe   = xiic_i2c_probe,
        .remove  = __devexit_p(xiic_i2c_remove),
        .driver  = {
                .owner = THIS_MODULE,
                .name = DRIVER_NAME,
+               .of_match_table = of_match_ptr(xiic_of_match),
        },
 };
 
index feb7dc359186495ead9dbff484e0ab76507eff0c..a6ad32bc0a96d633629f5a2a4cd85cdf3420c57d 100644 (file)
@@ -772,6 +772,23 @@ struct device_type i2c_adapter_type = {
 };
 EXPORT_SYMBOL_GPL(i2c_adapter_type);
 
+/**
+ * i2c_verify_adapter - return parameter as i2c_adapter or NULL
+ * @dev: device, probably from some driver model iterator
+ *
+ * When traversing the driver model tree, perhaps using driver model
+ * iterators like @device_for_each_child(), you can't assume very much
+ * about the nodes you find.  Use this function to avoid oopses caused
+ * by wrongly treating some non-I2C device as an i2c_adapter.
+ */
+struct i2c_adapter *i2c_verify_adapter(struct device *dev)
+{
+       return (dev->type == &i2c_adapter_type)
+                       ? to_i2c_adapter(dev)
+                       : NULL;
+}
+EXPORT_SYMBOL(i2c_verify_adapter);
+
 #ifdef CONFIG_I2C_COMPAT
 static struct class_compat *i2c_adapter_compat_class;
 #endif
index 45048323b75eab01d5875cb05464207c2eb614b3..5ec2261574ec4fdb7e2b6fd3d8b3879c6ff187f0 100644 (file)
@@ -265,19 +265,41 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
 
        res = 0;
        for (i = 0; i < rdwr_arg.nmsgs; i++) {
-               /* Limit the size of the message to a sane amount;
-                * and don't let length change either. */
-               if ((rdwr_pa[i].len > 8192) ||
-                   (rdwr_pa[i].flags & I2C_M_RECV_LEN)) {
+               /* Limit the size of the message to a sane amount */
+               if (rdwr_pa[i].len > 8192) {
                        res = -EINVAL;
                        break;
                }
+
                data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
                rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
                if (IS_ERR(rdwr_pa[i].buf)) {
                        res = PTR_ERR(rdwr_pa[i].buf);
                        break;
                }
+
+               /*
+                * If the message length is received from the slave (similar
+                * to SMBus block read), we must ensure that the buffer will
+                * be large enough to cope with a message length of
+                * I2C_SMBUS_BLOCK_MAX as this is the maximum underlying bus
+                * drivers allow. The first byte in the buffer must be
+                * pre-filled with the number of extra bytes, which must be
+                * at least one to hold the message length, but can be
+                * greater (for example to account for a checksum byte at
+                * the end of the message.)
+                */
+               if (rdwr_pa[i].flags & I2C_M_RECV_LEN) {
+                       if (!(rdwr_pa[i].flags & I2C_M_RD) ||
+                           rdwr_pa[i].buf[0] < 1 ||
+                           rdwr_pa[i].len < rdwr_pa[i].buf[0] +
+                                            I2C_SMBUS_BLOCK_MAX) {
+                               res = -EINVAL;
+                               break;
+                       }
+
+                       rdwr_pa[i].len = rdwr_pa[i].buf[0];
+               }
        }
        if (res < 0) {
                int j;
index d7a4833be4161d37bd4883dfa474b068224ab466..1038c381aea5bf4542bb574ae308027e607594d9 100644 (file)
@@ -24,6 +24,8 @@
 #include <linux/slab.h>
 #include <linux/i2c.h>
 #include <linux/i2c-mux.h>
+#include <linux/of.h>
+#include <linux/of_i2c.h>
 
 /* multiplexer per channel data */
 struct i2c_mux_priv {
@@ -31,11 +33,11 @@ struct i2c_mux_priv {
        struct i2c_algorithm algo;
 
        struct i2c_adapter *parent;
-       void *mux_dev;  /* the mux chip/device */
+       void *mux_priv; /* the mux chip/device */
        u32  chan_id;   /* the channel id */
 
-       int (*select)(struct i2c_adapter *, void *mux_dev, u32 chan_id);
-       int (*deselect)(struct i2c_adapter *, void *mux_dev, u32 chan_id);
+       int (*select)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
+       int (*deselect)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
 };
 
 static int i2c_mux_master_xfer(struct i2c_adapter *adap,
@@ -47,11 +49,11 @@ static int i2c_mux_master_xfer(struct i2c_adapter *adap,
 
        /* Switch to the right mux port and perform the transfer. */
 
-       ret = priv->select(parent, priv->mux_dev, priv->chan_id);
+       ret = priv->select(parent, priv->mux_priv, priv->chan_id);
        if (ret >= 0)
                ret = parent->algo->master_xfer(parent, msgs, num);
        if (priv->deselect)
-               priv->deselect(parent, priv->mux_dev, priv->chan_id);
+               priv->deselect(parent, priv->mux_priv, priv->chan_id);
 
        return ret;
 }
@@ -67,12 +69,12 @@ static int i2c_mux_smbus_xfer(struct i2c_adapter *adap,
 
        /* Select the right mux port and perform the transfer. */
 
-       ret = priv->select(parent, priv->mux_dev, priv->chan_id);
+       ret = priv->select(parent, priv->mux_priv, priv->chan_id);
        if (ret >= 0)
                ret = parent->algo->smbus_xfer(parent, addr, flags,
                                        read_write, command, size, data);
        if (priv->deselect)
-               priv->deselect(parent, priv->mux_dev, priv->chan_id);
+               priv->deselect(parent, priv->mux_priv, priv->chan_id);
 
        return ret;
 }
@@ -87,7 +89,8 @@ static u32 i2c_mux_functionality(struct i2c_adapter *adap)
 }
 
 struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
-                               void *mux_dev, u32 force_nr, u32 chan_id,
+                               struct device *mux_dev,
+                               void *mux_priv, u32 force_nr, u32 chan_id,
                                int (*select) (struct i2c_adapter *,
                                               void *, u32),
                                int (*deselect) (struct i2c_adapter *,
@@ -102,7 +105,7 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
 
        /* Set up private adapter data */
        priv->parent = parent;
-       priv->mux_dev = mux_dev;
+       priv->mux_priv = mux_priv;
        priv->chan_id = chan_id;
        priv->select = select;
        priv->deselect = deselect;
@@ -124,6 +127,25 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
        priv->adap.algo_data = priv;
        priv->adap.dev.parent = &parent->dev;
 
+       /*
+        * Try to populate the mux adapter's of_node, expands to
+        * nothing if !CONFIG_OF.
+        */
+       if (mux_dev->of_node) {
+               struct device_node *child;
+               u32 reg;
+
+               for_each_child_of_node(mux_dev->of_node, child) {
+                       ret = of_property_read_u32(child, "reg", &reg);
+                       if (ret)
+                               continue;
+                       if (chan_id == reg) {
+                               priv->adap.dev.of_node = child;
+                               break;
+                       }
+               }
+       }
+
        if (force_nr) {
                priv->adap.nr = force_nr;
                ret = i2c_add_numbered_adapter(&priv->adap);
@@ -141,6 +163,8 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
        dev_info(&parent->dev, "Added multiplexed i2c bus %d\n",
                 i2c_adapter_id(&priv->adap));
 
+       of_i2c_register_devices(&priv->adap);
+
        return &priv->adap;
 }
 EXPORT_SYMBOL_GPL(i2c_add_mux_adapter);
index 90b7a01638998a5f9da8974429406f1f889e721f..beb2491db274ade737e785931920c2ef2b10771b 100644 (file)
@@ -15,7 +15,7 @@ config I2C_MUX_GPIO
          through GPIO pins.
 
          This driver can also be built as a module.  If so, the module
-         will be called gpio-i2cmux.
+         will be called i2c-mux-gpio.
 
 config I2C_MUX_PCA9541
        tristate "NXP PCA9541 I2C Master Selector"
@@ -25,7 +25,7 @@ config I2C_MUX_PCA9541
          I2C Master Selector.
 
          This driver can also be built as a module.  If so, the module
-         will be called pca9541.
+         will be called i2c-mux-pca9541.
 
 config I2C_MUX_PCA954x
        tristate "Philips PCA954x I2C Mux/switches"
@@ -35,6 +35,6 @@ config I2C_MUX_PCA954x
          I2C mux/switch devices.
 
          This driver can also be built as a module.  If so, the module
-         will be called pca954x.
+         will be called i2c-mux-pca954x.
 
 endmenu
index 4640436ea61f58f5562af3571899d9690eccbeb5..5826249b29ca4664f32a0f1e75e97bc70056b3c7 100644 (file)
@@ -1,8 +1,8 @@
 #
 # Makefile for multiplexer I2C chip drivers.
 
-obj-$(CONFIG_I2C_MUX_GPIO)     += gpio-i2cmux.o
-obj-$(CONFIG_I2C_MUX_PCA9541)  += pca9541.o
-obj-$(CONFIG_I2C_MUX_PCA954x)  += pca954x.o
+obj-$(CONFIG_I2C_MUX_GPIO)     += i2c-mux-gpio.o
+obj-$(CONFIG_I2C_MUX_PCA9541)  += i2c-mux-pca9541.o
+obj-$(CONFIG_I2C_MUX_PCA954x)  += i2c-mux-pca954x.o
 
 ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
diff --git a/drivers/i2c/muxes/gpio-i2cmux.c b/drivers/i2c/muxes/gpio-i2cmux.c
deleted file mode 100644 (file)
index e5fa695..0000000
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * I2C multiplexer using GPIO API
- *
- * Peter Korsgaard <peter.korsgaard@barco.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/i2c.h>
-#include <linux/i2c-mux.h>
-#include <linux/gpio-i2cmux.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
-
-struct gpiomux {
-       struct i2c_adapter *parent;
-       struct i2c_adapter **adap; /* child busses */
-       struct gpio_i2cmux_platform_data data;
-};
-
-static void gpiomux_set(const struct gpiomux *mux, unsigned val)
-{
-       int i;
-
-       for (i = 0; i < mux->data.n_gpios; i++)
-               gpio_set_value(mux->data.gpios[i], val & (1 << i));
-}
-
-static int gpiomux_select(struct i2c_adapter *adap, void *data, u32 chan)
-{
-       struct gpiomux *mux = data;
-
-       gpiomux_set(mux, mux->data.values[chan]);
-
-       return 0;
-}
-
-static int gpiomux_deselect(struct i2c_adapter *adap, void *data, u32 chan)
-{
-       struct gpiomux *mux = data;
-
-       gpiomux_set(mux, mux->data.idle);
-
-       return 0;
-}
-
-static int __devinit gpiomux_probe(struct platform_device *pdev)
-{
-       struct gpiomux *mux;
-       struct gpio_i2cmux_platform_data *pdata;
-       struct i2c_adapter *parent;
-       int (*deselect) (struct i2c_adapter *, void *, u32);
-       unsigned initial_state;
-       int i, ret;
-
-       pdata = pdev->dev.platform_data;
-       if (!pdata) {
-               dev_err(&pdev->dev, "Missing platform data\n");
-               return -ENODEV;
-       }
-
-       parent = i2c_get_adapter(pdata->parent);
-       if (!parent) {
-               dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
-                       pdata->parent);
-               return -ENODEV;
-       }
-
-       mux = kzalloc(sizeof(*mux), GFP_KERNEL);
-       if (!mux) {
-               ret = -ENOMEM;
-               goto alloc_failed;
-       }
-
-       mux->parent = parent;
-       mux->data = *pdata;
-       mux->adap = kzalloc(sizeof(struct i2c_adapter *) * pdata->n_values,
-                           GFP_KERNEL);
-       if (!mux->adap) {
-               ret = -ENOMEM;
-               goto alloc_failed2;
-       }
-
-       if (pdata->idle != GPIO_I2CMUX_NO_IDLE) {
-               initial_state = pdata->idle;
-               deselect = gpiomux_deselect;
-       } else {
-               initial_state = pdata->values[0];
-               deselect = NULL;
-       }
-
-       for (i = 0; i < pdata->n_gpios; i++) {
-               ret = gpio_request(pdata->gpios[i], "gpio-i2cmux");
-               if (ret)
-                       goto err_request_gpio;
-               gpio_direction_output(pdata->gpios[i],
-                                     initial_state & (1 << i));
-       }
-
-       for (i = 0; i < pdata->n_values; i++) {
-               u32 nr = pdata->base_nr ? (pdata->base_nr + i) : 0;
-
-               mux->adap[i] = i2c_add_mux_adapter(parent, mux, nr, i,
-                                                  gpiomux_select, deselect);
-               if (!mux->adap[i]) {
-                       ret = -ENODEV;
-                       dev_err(&pdev->dev, "Failed to add adapter %d\n", i);
-                       goto add_adapter_failed;
-               }
-       }
-
-       dev_info(&pdev->dev, "%d port mux on %s adapter\n",
-                pdata->n_values, parent->name);
-
-       platform_set_drvdata(pdev, mux);
-
-       return 0;
-
-add_adapter_failed:
-       for (; i > 0; i--)
-               i2c_del_mux_adapter(mux->adap[i - 1]);
-       i = pdata->n_gpios;
-err_request_gpio:
-       for (; i > 0; i--)
-               gpio_free(pdata->gpios[i - 1]);
-       kfree(mux->adap);
-alloc_failed2:
-       kfree(mux);
-alloc_failed:
-       i2c_put_adapter(parent);
-
-       return ret;
-}
-
-static int __devexit gpiomux_remove(struct platform_device *pdev)
-{
-       struct gpiomux *mux = platform_get_drvdata(pdev);
-       int i;
-
-       for (i = 0; i < mux->data.n_values; i++)
-               i2c_del_mux_adapter(mux->adap[i]);
-
-       for (i = 0; i < mux->data.n_gpios; i++)
-               gpio_free(mux->data.gpios[i]);
-
-       platform_set_drvdata(pdev, NULL);
-       i2c_put_adapter(mux->parent);
-       kfree(mux->adap);
-       kfree(mux);
-
-       return 0;
-}
-
-static struct platform_driver gpiomux_driver = {
-       .probe  = gpiomux_probe,
-       .remove = __devexit_p(gpiomux_remove),
-       .driver = {
-               .owner  = THIS_MODULE,
-               .name   = "gpio-i2cmux",
-       },
-};
-
-module_platform_driver(gpiomux_driver);
-
-MODULE_DESCRIPTION("GPIO-based I2C multiplexer driver");
-MODULE_AUTHOR("Peter Korsgaard <peter.korsgaard@barco.com>");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:gpio-i2cmux");
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
new file mode 100644 (file)
index 0000000..68b1f8e
--- /dev/null
@@ -0,0 +1,173 @@
+/*
+ * I2C multiplexer using GPIO API
+ *
+ * Peter Korsgaard <peter.korsgaard@barco.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/i2c-mux-gpio.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+
+struct gpiomux {
+       struct i2c_adapter *parent;
+       struct i2c_adapter **adap; /* child busses */
+       struct i2c_mux_gpio_platform_data data;
+};
+
+static void i2c_mux_gpio_set(const struct gpiomux *mux, unsigned val)
+{
+       int i;
+
+       for (i = 0; i < mux->data.n_gpios; i++)
+               gpio_set_value(mux->data.gpios[i], val & (1 << i));
+}
+
+static int i2c_mux_gpio_select(struct i2c_adapter *adap, void *data, u32 chan)
+{
+       struct gpiomux *mux = data;
+
+       i2c_mux_gpio_set(mux, mux->data.values[chan]);
+
+       return 0;
+}
+
+static int i2c_mux_gpio_deselect(struct i2c_adapter *adap, void *data, u32 chan)
+{
+       struct gpiomux *mux = data;
+
+       i2c_mux_gpio_set(mux, mux->data.idle);
+
+       return 0;
+}
+
+static int __devinit i2c_mux_gpio_probe(struct platform_device *pdev)
+{
+       struct gpiomux *mux;
+       struct i2c_mux_gpio_platform_data *pdata;
+       struct i2c_adapter *parent;
+       int (*deselect) (struct i2c_adapter *, void *, u32);
+       unsigned initial_state;
+       int i, ret;
+
+       pdata = pdev->dev.platform_data;
+       if (!pdata) {
+               dev_err(&pdev->dev, "Missing platform data\n");
+               return -ENODEV;
+       }
+
+       parent = i2c_get_adapter(pdata->parent);
+       if (!parent) {
+               dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
+                       pdata->parent);
+               return -ENODEV;
+       }
+
+       mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+       if (!mux) {
+               ret = -ENOMEM;
+               goto alloc_failed;
+       }
+
+       mux->parent = parent;
+       mux->data = *pdata;
+       mux->adap = kzalloc(sizeof(struct i2c_adapter *) * pdata->n_values,
+                           GFP_KERNEL);
+       if (!mux->adap) {
+               ret = -ENOMEM;
+               goto alloc_failed2;
+       }
+
+       if (pdata->idle != I2C_MUX_GPIO_NO_IDLE) {
+               initial_state = pdata->idle;
+               deselect = i2c_mux_gpio_deselect;
+       } else {
+               initial_state = pdata->values[0];
+               deselect = NULL;
+       }
+
+       for (i = 0; i < pdata->n_gpios; i++) {
+               ret = gpio_request(pdata->gpios[i], "i2c-mux-gpio");
+               if (ret)
+                       goto err_request_gpio;
+               gpio_direction_output(pdata->gpios[i],
+                                     initial_state & (1 << i));
+       }
+
+       for (i = 0; i < pdata->n_values; i++) {
+               u32 nr = pdata->base_nr ? (pdata->base_nr + i) : 0;
+
+               mux->adap[i] = i2c_add_mux_adapter(parent, &pdev->dev, mux, nr, i,
+                                                  i2c_mux_gpio_select, deselect);
+               if (!mux->adap[i]) {
+                       ret = -ENODEV;
+                       dev_err(&pdev->dev, "Failed to add adapter %d\n", i);
+                       goto add_adapter_failed;
+               }
+       }
+
+       dev_info(&pdev->dev, "%d port mux on %s adapter\n",
+                pdata->n_values, parent->name);
+
+       platform_set_drvdata(pdev, mux);
+
+       return 0;
+
+add_adapter_failed:
+       for (; i > 0; i--)
+               i2c_del_mux_adapter(mux->adap[i - 1]);
+       i = pdata->n_gpios;
+err_request_gpio:
+       for (; i > 0; i--)
+               gpio_free(pdata->gpios[i - 1]);
+       kfree(mux->adap);
+alloc_failed2:
+       kfree(mux);
+alloc_failed:
+       i2c_put_adapter(parent);
+
+       return ret;
+}
+
+static int __devexit i2c_mux_gpio_remove(struct platform_device *pdev)
+{
+       struct gpiomux *mux = platform_get_drvdata(pdev);
+       int i;
+
+       for (i = 0; i < mux->data.n_values; i++)
+               i2c_del_mux_adapter(mux->adap[i]);
+
+       for (i = 0; i < mux->data.n_gpios; i++)
+               gpio_free(mux->data.gpios[i]);
+
+       platform_set_drvdata(pdev, NULL);
+       i2c_put_adapter(mux->parent);
+       kfree(mux->adap);
+       kfree(mux);
+
+       return 0;
+}
+
+static struct platform_driver i2c_mux_gpio_driver = {
+       .probe  = i2c_mux_gpio_probe,
+       .remove = __devexit_p(i2c_mux_gpio_remove),
+       .driver = {
+               .owner  = THIS_MODULE,
+               .name   = "i2c-mux-gpio",
+       },
+};
+
+module_platform_driver(i2c_mux_gpio_driver);
+
+MODULE_DESCRIPTION("GPIO-based I2C multiplexer driver");
+MODULE_AUTHOR("Peter Korsgaard <peter.korsgaard@barco.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:i2c-mux-gpio");
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
new file mode 100644 (file)
index 0000000..8aacde1
--- /dev/null
@@ -0,0 +1,401 @@
+/*
+ * I2C multiplexer driver for PCA9541 bus master selector
+ *
+ * Copyright (c) 2010 Ericsson AB.
+ *
+ * Author: Guenter Roeck <guenter.roeck@ericsson.com>
+ *
+ * Derived from:
+ *  pca954x.c
+ *
+ *  Copyright (c) 2008-2009 Rodolfo Giometti <giometti@linux.it>
+ *  Copyright (c) 2008-2009 Eurotech S.p.A. <info@eurotech.it>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+
+#include <linux/i2c/pca954x.h>
+
+/*
+ * The PCA9541 is a bus master selector. It supports two I2C masters connected
+ * to a single slave bus.
+ *
+ * Before each bus transaction, a master has to acquire bus ownership. After the
+ * transaction is complete, bus ownership has to be released. This fits well
+ * into the I2C multiplexer framework, which provides select and release
+ * functions for this purpose. For this reason, this driver is modeled as
+ * single-channel I2C bus multiplexer.
+ *
+ * This driver assumes that the two bus masters are controlled by two different
+ * hosts. If a single host controls both masters, platform code has to ensure
+ * that only one of the masters is instantiated at any given time.
+ */
+
+#define PCA9541_CONTROL                0x01
+#define PCA9541_ISTAT          0x02
+
+#define PCA9541_CTL_MYBUS      (1 << 0)
+#define PCA9541_CTL_NMYBUS     (1 << 1)
+#define PCA9541_CTL_BUSON      (1 << 2)
+#define PCA9541_CTL_NBUSON     (1 << 3)
+#define PCA9541_CTL_BUSINIT    (1 << 4)
+#define PCA9541_CTL_TESTON     (1 << 6)
+#define PCA9541_CTL_NTESTON    (1 << 7)
+
+#define PCA9541_ISTAT_INTIN    (1 << 0)
+#define PCA9541_ISTAT_BUSINIT  (1 << 1)
+#define PCA9541_ISTAT_BUSOK    (1 << 2)
+#define PCA9541_ISTAT_BUSLOST  (1 << 3)
+#define PCA9541_ISTAT_MYTEST   (1 << 6)
+#define PCA9541_ISTAT_NMYTEST  (1 << 7)
+
+#define BUSON          (PCA9541_CTL_BUSON | PCA9541_CTL_NBUSON)
+#define MYBUS          (PCA9541_CTL_MYBUS | PCA9541_CTL_NMYBUS)
+#define mybus(x)       (!((x) & MYBUS) || ((x) & MYBUS) == MYBUS)
+#define busoff(x)      (!((x) & BUSON) || ((x) & BUSON) == BUSON)
+
+/* arbitration timeouts, in jiffies */
+#define ARB_TIMEOUT    (HZ / 8)        /* 125 ms until forcing bus ownership */
+#define ARB2_TIMEOUT   (HZ / 4)        /* 250 ms until acquisition failure */
+
+/* arbitration retry delays, in us */
+#define SELECT_DELAY_SHORT     50
+#define SELECT_DELAY_LONG      1000
+
+struct pca9541 {
+       struct i2c_adapter *mux_adap;
+       unsigned long select_timeout;
+       unsigned long arb_timeout;
+};
+
+static const struct i2c_device_id pca9541_id[] = {
+       {"pca9541", 0},
+       {}
+};
+
+MODULE_DEVICE_TABLE(i2c, pca9541_id);
+
+/*
+ * Write to chip register. Don't use i2c_transfer()/i2c_smbus_xfer()
+ * as they will try to lock the adapter a second time.
+ */
+static int pca9541_reg_write(struct i2c_client *client, u8 command, u8 val)
+{
+       struct i2c_adapter *adap = client->adapter;
+       int ret;
+
+       if (adap->algo->master_xfer) {
+               struct i2c_msg msg;
+               char buf[2];
+
+               msg.addr = client->addr;
+               msg.flags = 0;
+               msg.len = 2;
+               buf[0] = command;
+               buf[1] = val;
+               msg.buf = buf;
+               ret = adap->algo->master_xfer(adap, &msg, 1);
+       } else {
+               union i2c_smbus_data data;
+
+               data.byte = val;
+               ret = adap->algo->smbus_xfer(adap, client->addr,
+                                            client->flags,
+                                            I2C_SMBUS_WRITE,
+                                            command,
+                                            I2C_SMBUS_BYTE_DATA, &data);
+       }
+
+       return ret;
+}
+
+/*
+ * Read from chip register. Don't use i2c_transfer()/i2c_smbus_xfer()
+ * as they will try to lock adapter a second time.
+ */
+static int pca9541_reg_read(struct i2c_client *client, u8 command)
+{
+       struct i2c_adapter *adap = client->adapter;
+       int ret;
+       u8 val;
+
+       if (adap->algo->master_xfer) {
+               struct i2c_msg msg[2] = {
+                       {
+                               .addr = client->addr,
+                               .flags = 0,
+                               .len = 1,
+                               .buf = &command
+                       },
+                       {
+                               .addr = client->addr,
+                               .flags = I2C_M_RD,
+                               .len = 1,
+                               .buf = &val
+                       }
+               };
+               ret = adap->algo->master_xfer(adap, msg, 2);
+               if (ret == 2)
+                       ret = val;
+               else if (ret >= 0)
+                       ret = -EIO;
+       } else {
+               union i2c_smbus_data data;
+
+               ret = adap->algo->smbus_xfer(adap, client->addr,
+                                            client->flags,
+                                            I2C_SMBUS_READ,
+                                            command,
+                                            I2C_SMBUS_BYTE_DATA, &data);
+               if (!ret)
+                       ret = data.byte;
+       }
+       return ret;
+}
+
+/*
+ * Arbitration management functions
+ */
+
+/* Release bus. Also reset NTESTON and BUSINIT if it was set. */
+static void pca9541_release_bus(struct i2c_client *client)
+{
+       int reg;
+
+       reg = pca9541_reg_read(client, PCA9541_CONTROL);
+       if (reg >= 0 && !busoff(reg) && mybus(reg))
+               pca9541_reg_write(client, PCA9541_CONTROL,
+                                 (reg & PCA9541_CTL_NBUSON) >> 1);
+}
+
+/*
+ * Arbitration is defined as a two-step process. A bus master can only activate
+ * the slave bus if it owns it; otherwise it has to request ownership first.
+ * This multi-step process ensures that access contention is resolved
+ * gracefully.
+ *
+ * Bus Ownership       Other master    Action
+ * state               requested access
+ * ----------------------------------------------------
+ * off -               yes             wait for arbitration timeout or
+ *                                     for other master to drop request
+ * off no              no              take ownership
+ * off yes             no              turn on bus
+ * on  yes             -               done
+ * on  no              -               wait for arbitration timeout or
+ *                                     for other master to release bus
+ *
+ * The main contention point occurs if the slave bus is off and both masters
+ * request ownership at the same time. In this case, one master will turn on
+ * the slave bus, believing that it owns it. The other master will request
+ * bus ownership. Result is that the bus is turned on, and master which did
+ * _not_ own the slave bus before ends up owning it.
+ */
+
+/* Control commands per PCA9541 datasheet */
+static const u8 pca9541_control[16] = {
+       4, 0, 1, 5, 4, 4, 5, 5, 0, 0, 1, 1, 0, 4, 5, 1
+};
+
+/*
+ * Channel arbitration
+ *
+ * Return values:
+ *  <0: error
+ *  0 : bus not acquired
+ *  1 : bus acquired
+ */
+static int pca9541_arbitrate(struct i2c_client *client)
+{
+       struct pca9541 *data = i2c_get_clientdata(client);
+       int reg;
+
+       reg = pca9541_reg_read(client, PCA9541_CONTROL);
+       if (reg < 0)
+               return reg;
+
+       if (busoff(reg)) {
+               int istat;
+               /*
+                * Bus is off. Request ownership or turn it on unless
+                * other master requested ownership.
+                */
+               istat = pca9541_reg_read(client, PCA9541_ISTAT);
+               if (!(istat & PCA9541_ISTAT_NMYTEST)
+                   || time_is_before_eq_jiffies(data->arb_timeout)) {
+                       /*
+                        * Other master did not request ownership,
+                        * or arbitration timeout expired. Take the bus.
+                        */
+                       pca9541_reg_write(client,
+                                         PCA9541_CONTROL,
+                                         pca9541_control[reg & 0x0f]
+                                         | PCA9541_CTL_NTESTON);
+                       data->select_timeout = SELECT_DELAY_SHORT;
+               } else {
+                       /*
+                        * Other master requested ownership.
+                        * Set extra long timeout to give it time to acquire it.
+                        */
+                       data->select_timeout = SELECT_DELAY_LONG * 2;
+               }
+       } else if (mybus(reg)) {
+               /*
+                * Bus is on, and we own it. We are done with acquisition.
+                * Reset NTESTON and BUSINIT, then return success.
+                */
+               if (reg & (PCA9541_CTL_NTESTON | PCA9541_CTL_BUSINIT))
+                       pca9541_reg_write(client,
+                                         PCA9541_CONTROL,
+                                         reg & ~(PCA9541_CTL_NTESTON
+                                                 | PCA9541_CTL_BUSINIT));
+               return 1;
+       } else {
+               /*
+                * Other master owns the bus.
+                * If arbitration timeout has expired, force ownership.
+                * Otherwise request it.
+                */
+               data->select_timeout = SELECT_DELAY_LONG;
+               if (time_is_before_eq_jiffies(data->arb_timeout)) {
+                       /* Time is up, take the bus and reset it. */
+                       pca9541_reg_write(client,
+                                         PCA9541_CONTROL,
+                                         pca9541_control[reg & 0x0f]
+                                         | PCA9541_CTL_BUSINIT
+                                         | PCA9541_CTL_NTESTON);
+               } else {
+                       /* Request bus ownership if needed */
+                       if (!(reg & PCA9541_CTL_NTESTON))
+                               pca9541_reg_write(client,
+                                                 PCA9541_CONTROL,
+                                                 reg | PCA9541_CTL_NTESTON);
+               }
+       }
+       return 0;
+}
+
+static int pca9541_select_chan(struct i2c_adapter *adap, void *client, u32 chan)
+{
+       struct pca9541 *data = i2c_get_clientdata(client);
+       int ret;
+       unsigned long timeout = jiffies + ARB2_TIMEOUT;
+               /* give up after this time */
+
+       data->arb_timeout = jiffies + ARB_TIMEOUT;
+               /* force bus ownership after this time */
+
+       do {
+               ret = pca9541_arbitrate(client);
+               if (ret)
+                       return ret < 0 ? ret : 0;
+
+               if (data->select_timeout == SELECT_DELAY_SHORT)
+                       udelay(data->select_timeout);
+               else
+                       msleep(data->select_timeout / 1000);
+       } while (time_is_after_eq_jiffies(timeout));
+
+       return -ETIMEDOUT;
+}
+
+static int pca9541_release_chan(struct i2c_adapter *adap,
+                               void *client, u32 chan)
+{
+       pca9541_release_bus(client);
+       return 0;
+}
+
+/*
+ * I2C init/probing/exit functions
+ */
+static int pca9541_probe(struct i2c_client *client,
+                        const struct i2c_device_id *id)
+{
+       struct i2c_adapter *adap = client->adapter;
+       struct pca954x_platform_data *pdata = client->dev.platform_data;
+       struct pca9541 *data;
+       int force;
+       int ret = -ENODEV;
+
+       if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE_DATA))
+               goto err;
+
+       data = kzalloc(sizeof(struct pca9541), GFP_KERNEL);
+       if (!data) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       i2c_set_clientdata(client, data);
+
+       /*
+        * I2C accesses are unprotected here.
+        * We have to lock the adapter before releasing the bus.
+        */
+       i2c_lock_adapter(adap);
+       pca9541_release_bus(client);
+       i2c_unlock_adapter(adap);
+
+       /* Create mux adapter */
+
+       force = 0;
+       if (pdata)
+               force = pdata->modes[0].adap_id;
+       data->mux_adap = i2c_add_mux_adapter(adap, &client->dev, client,
+                                            force, 0,
+                                            pca9541_select_chan,
+                                            pca9541_release_chan);
+
+       if (data->mux_adap == NULL) {
+               dev_err(&client->dev, "failed to register master selector\n");
+               goto exit_free;
+       }
+
+       dev_info(&client->dev, "registered master selector for I2C %s\n",
+                client->name);
+
+       return 0;
+
+exit_free:
+       kfree(data);
+err:
+       return ret;
+}
+
+static int pca9541_remove(struct i2c_client *client)
+{
+       struct pca9541 *data = i2c_get_clientdata(client);
+
+       i2c_del_mux_adapter(data->mux_adap);
+
+       kfree(data);
+       return 0;
+}
+
+static struct i2c_driver pca9541_driver = {
+       .driver = {
+                  .name = "pca9541",
+                  .owner = THIS_MODULE,
+                  },
+       .probe = pca9541_probe,
+       .remove = pca9541_remove,
+       .id_table = pca9541_id,
+};
+
+module_i2c_driver(pca9541_driver);
+
+MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_DESCRIPTION("PCA9541 I2C master selector driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
new file mode 100644 (file)
index 0000000..f2dfe0d
--- /dev/null
@@ -0,0 +1,291 @@
+/*
+ * I2C multiplexer
+ *
+ * Copyright (c) 2008-2009 Rodolfo Giometti <giometti@linux.it>
+ * Copyright (c) 2008-2009 Eurotech S.p.A. <info@eurotech.it>
+ *
+ * This module supports the PCA954x series of I2C multiplexer/switch chips
+ * made by Philips Semiconductors.
+ * This includes the:
+ *      PCA9540, PCA9542, PCA9543, PCA9544, PCA9545, PCA9546, PCA9547
+ *      and PCA9548.
+ *
+ * These chips are all controlled via the I2C bus itself, and all have a
+ * single 8-bit register. The upstream "parent" bus fans out to two,
+ * four, or eight downstream busses or channels; which of these
+ * are selected is determined by the chip type and register contents. A
+ * mux can select only one sub-bus at a time; a switch can select any
+ * combination simultaneously.
+ *
+ * Based on:
+ *     pca954x.c from Kumar Gala <galak@kernel.crashing.org>
+ * Copyright (C) 2006
+ *
+ * Based on:
+ *     pca954x.c from Ken Harrenstien
+ * Copyright (C) 2004 Google, Inc. (Ken Harrenstien)
+ *
+ * Based on:
+ *     i2c-virtual_cb.c from Brian Kuschak <bkuschak@yahoo.com>
+ * and
+ *     pca9540.c from Jean Delvare <khali@linux-fr.org>.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+
+#include <linux/i2c/pca954x.h>
+
+#define PCA954X_MAX_NCHANS 8
+
+enum pca_type {
+       pca_9540,
+       pca_9542,
+       pca_9543,
+       pca_9544,
+       pca_9545,
+       pca_9546,
+       pca_9547,
+       pca_9548,
+};
+
+struct pca954x {
+       enum pca_type type;
+       struct i2c_adapter *virt_adaps[PCA954X_MAX_NCHANS];
+
+       u8 last_chan;           /* last register value */
+};
+
+struct chip_desc {
+       u8 nchans;
+       u8 enable;      /* used for muxes only */
+       enum muxtype {
+               pca954x_ismux = 0,
+               pca954x_isswi
+       } muxtype;
+};
+
+/* Provide specs for the PCA954x types we know about */
+static const struct chip_desc chips[] = {
+       [pca_9540] = {
+               .nchans = 2,
+               .enable = 0x4,
+               .muxtype = pca954x_ismux,
+       },
+       [pca_9543] = {
+               .nchans = 2,
+               .muxtype = pca954x_isswi,
+       },
+       [pca_9544] = {
+               .nchans = 4,
+               .enable = 0x4,
+               .muxtype = pca954x_ismux,
+       },
+       [pca_9545] = {
+               .nchans = 4,
+               .muxtype = pca954x_isswi,
+       },
+       [pca_9547] = {
+               .nchans = 8,
+               .enable = 0x8,
+               .muxtype = pca954x_ismux,
+       },
+       [pca_9548] = {
+               .nchans = 8,
+               .muxtype = pca954x_isswi,
+       },
+};
+
+static const struct i2c_device_id pca954x_id[] = {
+       { "pca9540", pca_9540 },
+       { "pca9542", pca_9540 },
+       { "pca9543", pca_9543 },
+       { "pca9544", pca_9544 },
+       { "pca9545", pca_9545 },
+       { "pca9546", pca_9545 },
+       { "pca9547", pca_9547 },
+       { "pca9548", pca_9548 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, pca954x_id);
+
+/* Write to mux register. Don't use i2c_transfer()/i2c_smbus_xfer()
+   for this as they will try to lock adapter a second time */
+static int pca954x_reg_write(struct i2c_adapter *adap,
+                            struct i2c_client *client, u8 val)
+{
+       int ret = -ENODEV;
+
+       if (adap->algo->master_xfer) {
+               struct i2c_msg msg;
+               char buf[1];
+
+               msg.addr = client->addr;
+               msg.flags = 0;
+               msg.len = 1;
+               buf[0] = val;
+               msg.buf = buf;
+               ret = adap->algo->master_xfer(adap, &msg, 1);
+       } else {
+               union i2c_smbus_data data;
+               ret = adap->algo->smbus_xfer(adap, client->addr,
+                                            client->flags,
+                                            I2C_SMBUS_WRITE,
+                                            val, I2C_SMBUS_BYTE, &data);
+       }
+
+       return ret;
+}
+
+static int pca954x_select_chan(struct i2c_adapter *adap,
+                              void *client, u32 chan)
+{
+       struct pca954x *data = i2c_get_clientdata(client);
+       const struct chip_desc *chip = &chips[data->type];
+       u8 regval;
+       int ret = 0;
+
+       /* we make switches look like muxes, not sure how to be smarter */
+       if (chip->muxtype == pca954x_ismux)
+               regval = chan | chip->enable;
+       else
+               regval = 1 << chan;
+
+       /* Only select the channel if its different from the last channel */
+       if (data->last_chan != regval) {
+               ret = pca954x_reg_write(adap, client, regval);
+               data->last_chan = regval;
+       }
+
+       return ret;
+}
+
+static int pca954x_deselect_mux(struct i2c_adapter *adap,
+                               void *client, u32 chan)
+{
+       struct pca954x *data = i2c_get_clientdata(client);
+
+       /* Deselect active channel */
+       data->last_chan = 0;
+       return pca954x_reg_write(adap, client, data->last_chan);
+}
+
+/*
+ * I2C init/probing/exit functions
+ */
+static int pca954x_probe(struct i2c_client *client,
+                        const struct i2c_device_id *id)
+{
+       struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent);
+       struct pca954x_platform_data *pdata = client->dev.platform_data;
+       int num, force;
+       struct pca954x *data;
+       int ret = -ENODEV;
+
+       if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE))
+               goto err;
+
+       data = kzalloc(sizeof(struct pca954x), GFP_KERNEL);
+       if (!data) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       i2c_set_clientdata(client, data);
+
+       /* Write the mux register at addr to verify
+        * that the mux is in fact present. This also
+        * initializes the mux to disconnected state.
+        */
+       if (i2c_smbus_write_byte(client, 0) < 0) {
+               dev_warn(&client->dev, "probe failed\n");
+               goto exit_free;
+       }
+
+       data->type = id->driver_data;
+       data->last_chan = 0;               /* force the first selection */
+
+       /* Now create an adapter for each channel */
+       for (num = 0; num < chips[data->type].nchans; num++) {
+               force = 0;                        /* dynamic adap number */
+               if (pdata) {
+                       if (num < pdata->num_modes)
+                               /* force static number */
+                               force = pdata->modes[num].adap_id;
+                       else
+                               /* discard unconfigured channels */
+                               break;
+               }
+
+               data->virt_adaps[num] =
+                       i2c_add_mux_adapter(adap, &client->dev, client,
+                               force, num, pca954x_select_chan,
+                               (pdata && pdata->modes[num].deselect_on_exit)
+                                       ? pca954x_deselect_mux : NULL);
+
+               if (data->virt_adaps[num] == NULL) {
+                       ret = -ENODEV;
+                       dev_err(&client->dev,
+                               "failed to register multiplexed adapter"
+                               " %d as bus %d\n", num, force);
+                       goto virt_reg_failed;
+               }
+       }
+
+       dev_info(&client->dev,
+                "registered %d multiplexed busses for I2C %s %s\n",
+                num, chips[data->type].muxtype == pca954x_ismux
+                               ? "mux" : "switch", client->name);
+
+       return 0;
+
+virt_reg_failed:
+       for (num--; num >= 0; num--)
+               i2c_del_mux_adapter(data->virt_adaps[num]);
+exit_free:
+       kfree(data);
+err:
+       return ret;
+}
+
+static int pca954x_remove(struct i2c_client *client)
+{
+       struct pca954x *data = i2c_get_clientdata(client);
+       const struct chip_desc *chip = &chips[data->type];
+       int i, err;
+
+       for (i = 0; i < chip->nchans; ++i)
+               if (data->virt_adaps[i]) {
+                       err = i2c_del_mux_adapter(data->virt_adaps[i]);
+                       if (err)
+                               return err;
+                       data->virt_adaps[i] = NULL;
+               }
+
+       kfree(data);
+       return 0;
+}
+
+static struct i2c_driver pca954x_driver = {
+       .driver         = {
+               .name   = "pca954x",
+               .owner  = THIS_MODULE,
+       },
+       .probe          = pca954x_probe,
+       .remove         = pca954x_remove,
+       .id_table       = pca954x_id,
+};
+
+module_i2c_driver(pca954x_driver);
+
+MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
+MODULE_DESCRIPTION("PCA954x I2C mux/switch driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/muxes/pca9541.c b/drivers/i2c/muxes/pca9541.c
deleted file mode 100644 (file)
index e0df9b6..0000000
+++ /dev/null
@@ -1,400 +0,0 @@
-/*
- * I2C multiplexer driver for PCA9541 bus master selector
- *
- * Copyright (c) 2010 Ericsson AB.
- *
- * Author: Guenter Roeck <guenter.roeck@ericsson.com>
- *
- * Derived from:
- *  pca954x.c
- *
- *  Copyright (c) 2008-2009 Rodolfo Giometti <giometti@linux.it>
- *  Copyright (c) 2008-2009 Eurotech S.p.A. <info@eurotech.it>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/jiffies.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/i2c.h>
-#include <linux/i2c-mux.h>
-
-#include <linux/i2c/pca954x.h>
-
-/*
- * The PCA9541 is a bus master selector. It supports two I2C masters connected
- * to a single slave bus.
- *
- * Before each bus transaction, a master has to acquire bus ownership. After the
- * transaction is complete, bus ownership has to be released. This fits well
- * into the I2C multiplexer framework, which provides select and release
- * functions for this purpose. For this reason, this driver is modeled as
- * single-channel I2C bus multiplexer.
- *
- * This driver assumes that the two bus masters are controlled by two different
- * hosts. If a single host controls both masters, platform code has to ensure
- * that only one of the masters is instantiated at any given time.
- */
-
-#define PCA9541_CONTROL                0x01
-#define PCA9541_ISTAT          0x02
-
-#define PCA9541_CTL_MYBUS      (1 << 0)
-#define PCA9541_CTL_NMYBUS     (1 << 1)
-#define PCA9541_CTL_BUSON      (1 << 2)
-#define PCA9541_CTL_NBUSON     (1 << 3)
-#define PCA9541_CTL_BUSINIT    (1 << 4)
-#define PCA9541_CTL_TESTON     (1 << 6)
-#define PCA9541_CTL_NTESTON    (1 << 7)
-
-#define PCA9541_ISTAT_INTIN    (1 << 0)
-#define PCA9541_ISTAT_BUSINIT  (1 << 1)
-#define PCA9541_ISTAT_BUSOK    (1 << 2)
-#define PCA9541_ISTAT_BUSLOST  (1 << 3)
-#define PCA9541_ISTAT_MYTEST   (1 << 6)
-#define PCA9541_ISTAT_NMYTEST  (1 << 7)
-
-#define BUSON          (PCA9541_CTL_BUSON | PCA9541_CTL_NBUSON)
-#define MYBUS          (PCA9541_CTL_MYBUS | PCA9541_CTL_NMYBUS)
-#define mybus(x)       (!((x) & MYBUS) || ((x) & MYBUS) == MYBUS)
-#define busoff(x)      (!((x) & BUSON) || ((x) & BUSON) == BUSON)
-
-/* arbitration timeouts, in jiffies */
-#define ARB_TIMEOUT    (HZ / 8)        /* 125 ms until forcing bus ownership */
-#define ARB2_TIMEOUT   (HZ / 4)        /* 250 ms until acquisition failure */
-
-/* arbitration retry delays, in us */
-#define SELECT_DELAY_SHORT     50
-#define SELECT_DELAY_LONG      1000
-
-struct pca9541 {
-       struct i2c_adapter *mux_adap;
-       unsigned long select_timeout;
-       unsigned long arb_timeout;
-};
-
-static const struct i2c_device_id pca9541_id[] = {
-       {"pca9541", 0},
-       {}
-};
-
-MODULE_DEVICE_TABLE(i2c, pca9541_id);
-
-/*
- * Write to chip register. Don't use i2c_transfer()/i2c_smbus_xfer()
- * as they will try to lock the adapter a second time.
- */
-static int pca9541_reg_write(struct i2c_client *client, u8 command, u8 val)
-{
-       struct i2c_adapter *adap = client->adapter;
-       int ret;
-
-       if (adap->algo->master_xfer) {
-               struct i2c_msg msg;
-               char buf[2];
-
-               msg.addr = client->addr;
-               msg.flags = 0;
-               msg.len = 2;
-               buf[0] = command;
-               buf[1] = val;
-               msg.buf = buf;
-               ret = adap->algo->master_xfer(adap, &msg, 1);
-       } else {
-               union i2c_smbus_data data;
-
-               data.byte = val;
-               ret = adap->algo->smbus_xfer(adap, client->addr,
-                                            client->flags,
-                                            I2C_SMBUS_WRITE,
-                                            command,
-                                            I2C_SMBUS_BYTE_DATA, &data);
-       }
-
-       return ret;
-}
-
-/*
- * Read from chip register. Don't use i2c_transfer()/i2c_smbus_xfer()
- * as they will try to lock adapter a second time.
- */
-static int pca9541_reg_read(struct i2c_client *client, u8 command)
-{
-       struct i2c_adapter *adap = client->adapter;
-       int ret;
-       u8 val;
-
-       if (adap->algo->master_xfer) {
-               struct i2c_msg msg[2] = {
-                       {
-                               .addr = client->addr,
-                               .flags = 0,
-                               .len = 1,
-                               .buf = &command
-                       },
-                       {
-                               .addr = client->addr,
-                               .flags = I2C_M_RD,
-                               .len = 1,
-                               .buf = &val
-                       }
-               };
-               ret = adap->algo->master_xfer(adap, msg, 2);
-               if (ret == 2)
-                       ret = val;
-               else if (ret >= 0)
-                       ret = -EIO;
-       } else {
-               union i2c_smbus_data data;
-
-               ret = adap->algo->smbus_xfer(adap, client->addr,
-                                            client->flags,
-                                            I2C_SMBUS_READ,
-                                            command,
-                                            I2C_SMBUS_BYTE_DATA, &data);
-               if (!ret)
-                       ret = data.byte;
-       }
-       return ret;
-}
-
-/*
- * Arbitration management functions
- */
-
-/* Release bus. Also reset NTESTON and BUSINIT if it was set. */
-static void pca9541_release_bus(struct i2c_client *client)
-{
-       int reg;
-
-       reg = pca9541_reg_read(client, PCA9541_CONTROL);
-       if (reg >= 0 && !busoff(reg) && mybus(reg))
-               pca9541_reg_write(client, PCA9541_CONTROL,
-                                 (reg & PCA9541_CTL_NBUSON) >> 1);
-}
-
-/*
- * Arbitration is defined as a two-step process. A bus master can only activate
- * the slave bus if it owns it; otherwise it has to request ownership first.
- * This multi-step process ensures that access contention is resolved
- * gracefully.
- *
- * Bus Ownership       Other master    Action
- * state               requested access
- * ----------------------------------------------------
- * off -               yes             wait for arbitration timeout or
- *                                     for other master to drop request
- * off no              no              take ownership
- * off yes             no              turn on bus
- * on  yes             -               done
- * on  no              -               wait for arbitration timeout or
- *                                     for other master to release bus
- *
- * The main contention point occurs if the slave bus is off and both masters
- * request ownership at the same time. In this case, one master will turn on
- * the slave bus, believing that it owns it. The other master will request
- * bus ownership. Result is that the bus is turned on, and master which did
- * _not_ own the slave bus before ends up owning it.
- */
-
-/* Control commands per PCA9541 datasheet */
-static const u8 pca9541_control[16] = {
-       4, 0, 1, 5, 4, 4, 5, 5, 0, 0, 1, 1, 0, 4, 5, 1
-};
-
-/*
- * Channel arbitration
- *
- * Return values:
- *  <0: error
- *  0 : bus not acquired
- *  1 : bus acquired
- */
-static int pca9541_arbitrate(struct i2c_client *client)
-{
-       struct pca9541 *data = i2c_get_clientdata(client);
-       int reg;
-
-       reg = pca9541_reg_read(client, PCA9541_CONTROL);
-       if (reg < 0)
-               return reg;
-
-       if (busoff(reg)) {
-               int istat;
-               /*
-                * Bus is off. Request ownership or turn it on unless
-                * other master requested ownership.
-                */
-               istat = pca9541_reg_read(client, PCA9541_ISTAT);
-               if (!(istat & PCA9541_ISTAT_NMYTEST)
-                   || time_is_before_eq_jiffies(data->arb_timeout)) {
-                       /*
-                        * Other master did not request ownership,
-                        * or arbitration timeout expired. Take the bus.
-                        */
-                       pca9541_reg_write(client,
-                                         PCA9541_CONTROL,
-                                         pca9541_control[reg & 0x0f]
-                                         | PCA9541_CTL_NTESTON);
-                       data->select_timeout = SELECT_DELAY_SHORT;
-               } else {
-                       /*
-                        * Other master requested ownership.
-                        * Set extra long timeout to give it time to acquire it.
-                        */
-                       data->select_timeout = SELECT_DELAY_LONG * 2;
-               }
-       } else if (mybus(reg)) {
-               /*
-                * Bus is on, and we own it. We are done with acquisition.
-                * Reset NTESTON and BUSINIT, then return success.
-                */
-               if (reg & (PCA9541_CTL_NTESTON | PCA9541_CTL_BUSINIT))
-                       pca9541_reg_write(client,
-                                         PCA9541_CONTROL,
-                                         reg & ~(PCA9541_CTL_NTESTON
-                                                 | PCA9541_CTL_BUSINIT));
-               return 1;
-       } else {
-               /*
-                * Other master owns the bus.
-                * If arbitration timeout has expired, force ownership.
-                * Otherwise request it.
-                */
-               data->select_timeout = SELECT_DELAY_LONG;
-               if (time_is_before_eq_jiffies(data->arb_timeout)) {
-                       /* Time is up, take the bus and reset it. */
-                       pca9541_reg_write(client,
-                                         PCA9541_CONTROL,
-                                         pca9541_control[reg & 0x0f]
-                                         | PCA9541_CTL_BUSINIT
-                                         | PCA9541_CTL_NTESTON);
-               } else {
-                       /* Request bus ownership if needed */
-                       if (!(reg & PCA9541_CTL_NTESTON))
-                               pca9541_reg_write(client,
-                                                 PCA9541_CONTROL,
-                                                 reg | PCA9541_CTL_NTESTON);
-               }
-       }
-       return 0;
-}
-
-static int pca9541_select_chan(struct i2c_adapter *adap, void *client, u32 chan)
-{
-       struct pca9541 *data = i2c_get_clientdata(client);
-       int ret;
-       unsigned long timeout = jiffies + ARB2_TIMEOUT;
-               /* give up after this time */
-
-       data->arb_timeout = jiffies + ARB_TIMEOUT;
-               /* force bus ownership after this time */
-
-       do {
-               ret = pca9541_arbitrate(client);
-               if (ret)
-                       return ret < 0 ? ret : 0;
-
-               if (data->select_timeout == SELECT_DELAY_SHORT)
-                       udelay(data->select_timeout);
-               else
-                       msleep(data->select_timeout / 1000);
-       } while (time_is_after_eq_jiffies(timeout));
-
-       return -ETIMEDOUT;
-}
-
-static int pca9541_release_chan(struct i2c_adapter *adap,
-                               void *client, u32 chan)
-{
-       pca9541_release_bus(client);
-       return 0;
-}
-
-/*
- * I2C init/probing/exit functions
- */
-static int pca9541_probe(struct i2c_client *client,
-                        const struct i2c_device_id *id)
-{
-       struct i2c_adapter *adap = client->adapter;
-       struct pca954x_platform_data *pdata = client->dev.platform_data;
-       struct pca9541 *data;
-       int force;
-       int ret = -ENODEV;
-
-       if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE_DATA))
-               goto err;
-
-       data = kzalloc(sizeof(struct pca9541), GFP_KERNEL);
-       if (!data) {
-               ret = -ENOMEM;
-               goto err;
-       }
-
-       i2c_set_clientdata(client, data);
-
-       /*
-        * I2C accesses are unprotected here.
-        * We have to lock the adapter before releasing the bus.
-        */
-       i2c_lock_adapter(adap);
-       pca9541_release_bus(client);
-       i2c_unlock_adapter(adap);
-
-       /* Create mux adapter */
-
-       force = 0;
-       if (pdata)
-               force = pdata->modes[0].adap_id;
-       data->mux_adap = i2c_add_mux_adapter(adap, client, force, 0,
-                                            pca9541_select_chan,
-                                            pca9541_release_chan);
-
-       if (data->mux_adap == NULL) {
-               dev_err(&client->dev, "failed to register master selector\n");
-               goto exit_free;
-       }
-
-       dev_info(&client->dev, "registered master selector for I2C %s\n",
-                client->name);
-
-       return 0;
-
-exit_free:
-       kfree(data);
-err:
-       return ret;
-}
-
-static int pca9541_remove(struct i2c_client *client)
-{
-       struct pca9541 *data = i2c_get_clientdata(client);
-
-       i2c_del_mux_adapter(data->mux_adap);
-
-       kfree(data);
-       return 0;
-}
-
-static struct i2c_driver pca9541_driver = {
-       .driver = {
-                  .name = "pca9541",
-                  .owner = THIS_MODULE,
-                  },
-       .probe = pca9541_probe,
-       .remove = pca9541_remove,
-       .id_table = pca9541_id,
-};
-
-module_i2c_driver(pca9541_driver);
-
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
-MODULE_DESCRIPTION("PCA9541 I2C master selector driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/muxes/pca954x.c b/drivers/i2c/muxes/pca954x.c
deleted file mode 100644 (file)
index 0e37ef2..0000000
+++ /dev/null
@@ -1,291 +0,0 @@
-/*
- * I2C multiplexer
- *
- * Copyright (c) 2008-2009 Rodolfo Giometti <giometti@linux.it>
- * Copyright (c) 2008-2009 Eurotech S.p.A. <info@eurotech.it>
- *
- * This module supports the PCA954x series of I2C multiplexer/switch chips
- * made by Philips Semiconductors.
- * This includes the:
- *      PCA9540, PCA9542, PCA9543, PCA9544, PCA9545, PCA9546, PCA9547
- *      and PCA9548.
- *
- * These chips are all controlled via the I2C bus itself, and all have a
- * single 8-bit register. The upstream "parent" bus fans out to two,
- * four, or eight downstream busses or channels; which of these
- * are selected is determined by the chip type and register contents. A
- * mux can select only one sub-bus at a time; a switch can select any
- * combination simultaneously.
- *
- * Based on:
- *     pca954x.c from Kumar Gala <galak@kernel.crashing.org>
- * Copyright (C) 2006
- *
- * Based on:
- *     pca954x.c from Ken Harrenstien
- * Copyright (C) 2004 Google, Inc. (Ken Harrenstien)
- *
- * Based on:
- *     i2c-virtual_cb.c from Brian Kuschak <bkuschak@yahoo.com>
- * and
- *     pca9540.c from Jean Delvare <khali@linux-fr.org>.
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/i2c.h>
-#include <linux/i2c-mux.h>
-
-#include <linux/i2c/pca954x.h>
-
-#define PCA954X_MAX_NCHANS 8
-
-enum pca_type {
-       pca_9540,
-       pca_9542,
-       pca_9543,
-       pca_9544,
-       pca_9545,
-       pca_9546,
-       pca_9547,
-       pca_9548,
-};
-
-struct pca954x {
-       enum pca_type type;
-       struct i2c_adapter *virt_adaps[PCA954X_MAX_NCHANS];
-
-       u8 last_chan;           /* last register value */
-};
-
-struct chip_desc {
-       u8 nchans;
-       u8 enable;      /* used for muxes only */
-       enum muxtype {
-               pca954x_ismux = 0,
-               pca954x_isswi
-       } muxtype;
-};
-
-/* Provide specs for the PCA954x types we know about */
-static const struct chip_desc chips[] = {
-       [pca_9540] = {
-               .nchans = 2,
-               .enable = 0x4,
-               .muxtype = pca954x_ismux,
-       },
-       [pca_9543] = {
-               .nchans = 2,
-               .muxtype = pca954x_isswi,
-       },
-       [pca_9544] = {
-               .nchans = 4,
-               .enable = 0x4,
-               .muxtype = pca954x_ismux,
-       },
-       [pca_9545] = {
-               .nchans = 4,
-               .muxtype = pca954x_isswi,
-       },
-       [pca_9547] = {
-               .nchans = 8,
-               .enable = 0x8,
-               .muxtype = pca954x_ismux,
-       },
-       [pca_9548] = {
-               .nchans = 8,
-               .muxtype = pca954x_isswi,
-       },
-};
-
-static const struct i2c_device_id pca954x_id[] = {
-       { "pca9540", pca_9540 },
-       { "pca9542", pca_9540 },
-       { "pca9543", pca_9543 },
-       { "pca9544", pca_9544 },
-       { "pca9545", pca_9545 },
-       { "pca9546", pca_9545 },
-       { "pca9547", pca_9547 },
-       { "pca9548", pca_9548 },
-       { }
-};
-MODULE_DEVICE_TABLE(i2c, pca954x_id);
-
-/* Write to mux register. Don't use i2c_transfer()/i2c_smbus_xfer()
-   for this as they will try to lock adapter a second time */
-static int pca954x_reg_write(struct i2c_adapter *adap,
-                            struct i2c_client *client, u8 val)
-{
-       int ret = -ENODEV;
-
-       if (adap->algo->master_xfer) {
-               struct i2c_msg msg;
-               char buf[1];
-
-               msg.addr = client->addr;
-               msg.flags = 0;
-               msg.len = 1;
-               buf[0] = val;
-               msg.buf = buf;
-               ret = adap->algo->master_xfer(adap, &msg, 1);
-       } else {
-               union i2c_smbus_data data;
-               ret = adap->algo->smbus_xfer(adap, client->addr,
-                                            client->flags,
-                                            I2C_SMBUS_WRITE,
-                                            val, I2C_SMBUS_BYTE, &data);
-       }
-
-       return ret;
-}
-
-static int pca954x_select_chan(struct i2c_adapter *adap,
-                              void *client, u32 chan)
-{
-       struct pca954x *data = i2c_get_clientdata(client);
-       const struct chip_desc *chip = &chips[data->type];
-       u8 regval;
-       int ret = 0;
-
-       /* we make switches look like muxes, not sure how to be smarter */
-       if (chip->muxtype == pca954x_ismux)
-               regval = chan | chip->enable;
-       else
-               regval = 1 << chan;
-
-       /* Only select the channel if its different from the last channel */
-       if (data->last_chan != regval) {
-               ret = pca954x_reg_write(adap, client, regval);
-               data->last_chan = regval;
-       }
-
-       return ret;
-}
-
-static int pca954x_deselect_mux(struct i2c_adapter *adap,
-                               void *client, u32 chan)
-{
-       struct pca954x *data = i2c_get_clientdata(client);
-
-       /* Deselect active channel */
-       data->last_chan = 0;
-       return pca954x_reg_write(adap, client, data->last_chan);
-}
-
-/*
- * I2C init/probing/exit functions
- */
-static int pca954x_probe(struct i2c_client *client,
-                        const struct i2c_device_id *id)
-{
-       struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent);
-       struct pca954x_platform_data *pdata = client->dev.platform_data;
-       int num, force;
-       struct pca954x *data;
-       int ret = -ENODEV;
-
-       if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE))
-               goto err;
-
-       data = kzalloc(sizeof(struct pca954x), GFP_KERNEL);
-       if (!data) {
-               ret = -ENOMEM;
-               goto err;
-       }
-
-       i2c_set_clientdata(client, data);
-
-       /* Write the mux register at addr to verify
-        * that the mux is in fact present. This also
-        * initializes the mux to disconnected state.
-        */
-       if (i2c_smbus_write_byte(client, 0) < 0) {
-               dev_warn(&client->dev, "probe failed\n");
-               goto exit_free;
-       }
-
-       data->type = id->driver_data;
-       data->last_chan = 0;               /* force the first selection */
-
-       /* Now create an adapter for each channel */
-       for (num = 0; num < chips[data->type].nchans; num++) {
-               force = 0;                        /* dynamic adap number */
-               if (pdata) {
-                       if (num < pdata->num_modes)
-                               /* force static number */
-                               force = pdata->modes[num].adap_id;
-                       else
-                               /* discard unconfigured channels */
-                               break;
-               }
-
-               data->virt_adaps[num] =
-                       i2c_add_mux_adapter(adap, client,
-                               force, num, pca954x_select_chan,
-                               (pdata && pdata->modes[num].deselect_on_exit)
-                                       ? pca954x_deselect_mux : NULL);
-
-               if (data->virt_adaps[num] == NULL) {
-                       ret = -ENODEV;
-                       dev_err(&client->dev,
-                               "failed to register multiplexed adapter"
-                               " %d as bus %d\n", num, force);
-                       goto virt_reg_failed;
-               }
-       }
-
-       dev_info(&client->dev,
-                "registered %d multiplexed busses for I2C %s %s\n",
-                num, chips[data->type].muxtype == pca954x_ismux
-                               ? "mux" : "switch", client->name);
-
-       return 0;
-
-virt_reg_failed:
-       for (num--; num >= 0; num--)
-               i2c_del_mux_adapter(data->virt_adaps[num]);
-exit_free:
-       kfree(data);
-err:
-       return ret;
-}
-
-static int pca954x_remove(struct i2c_client *client)
-{
-       struct pca954x *data = i2c_get_clientdata(client);
-       const struct chip_desc *chip = &chips[data->type];
-       int i, err;
-
-       for (i = 0; i < chip->nchans; ++i)
-               if (data->virt_adaps[i]) {
-                       err = i2c_del_mux_adapter(data->virt_adaps[i]);
-                       if (err)
-                               return err;
-                       data->virt_adaps[i] = NULL;
-               }
-
-       kfree(data);
-       return 0;
-}
-
-static struct i2c_driver pca954x_driver = {
-       .driver         = {
-               .name   = "pca954x",
-               .owner  = THIS_MODULE,
-       },
-       .probe          = pca954x_probe,
-       .remove         = pca954x_remove,
-       .id_table       = pca954x_id,
-};
-
-module_i2c_driver(pca954x_driver);
-
-MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
-MODULE_DESCRIPTION("PCA954x I2C mux/switch driver");
-MODULE_LICENSE("GPL v2");
index 3063464474bf637e1a65d8dd52c144bb95ffba1d..57d19d4e0a2d94a22f68d13c6f0ee44a9ab8807b 100644 (file)
@@ -231,6 +231,7 @@ static int __devinit as5011_probe(struct i2c_client *client,
        }
 
        if (!i2c_check_functionality(client->adapter,
+                                    I2C_FUNC_NOSTART |
                                     I2C_FUNC_PROTOCOL_MANGLING)) {
                dev_err(&client->dev,
                        "need i2c bus that supports protocol mangling\n");
index 29fe1b2be1c1f46541fba50815f4bb8d349e8847..7f7b72464a37e547f7b199a89cab5202270e2163 100644 (file)
@@ -311,7 +311,15 @@ static void pxa27x_keypad_scan_direct(struct pxa27x_keypad *keypad)
        if (pdata->enable_rotary0 || pdata->enable_rotary1)
                pxa27x_keypad_scan_rotary(keypad);
 
-       new_state = KPDK_DK(kpdk) & keypad->direct_key_mask;
+       /*
+        * The KPDR_DK only output the key pin level, so it relates to board,
+        * and low level may be active.
+        */
+       if (pdata->direct_key_low_active)
+               new_state = ~KPDK_DK(kpdk) & keypad->direct_key_mask;
+       else
+               new_state = KPDK_DK(kpdk) & keypad->direct_key_mask;
+
        bits_changed = keypad->direct_key_state ^ new_state;
 
        if (bits_changed == 0)
@@ -383,7 +391,14 @@ static void pxa27x_keypad_config(struct pxa27x_keypad *keypad)
        if (pdata->direct_key_num > direct_key_num)
                direct_key_num = pdata->direct_key_num;
 
-       keypad->direct_key_mask = ((2 << direct_key_num) - 1) & ~mask;
+       /*
+        * Direct keys usage may not start from KP_DKIN0, check the platfrom
+        * mask data to config the specific.
+        */
+       if (pdata->direct_key_mask)
+               keypad->direct_key_mask = pdata->direct_key_mask;
+       else
+               keypad->direct_key_mask = ((1 << direct_key_num) - 1) & ~mask;
 
        /* enable direct key */
        if (direct_key_num)
@@ -399,7 +414,7 @@ static int pxa27x_keypad_open(struct input_dev *dev)
        struct pxa27x_keypad *keypad = input_get_drvdata(dev);
 
        /* Enable unit clock */
-       clk_enable(keypad->clk);
+       clk_prepare_enable(keypad->clk);
        pxa27x_keypad_config(keypad);
 
        return 0;
@@ -410,7 +425,7 @@ static void pxa27x_keypad_close(struct input_dev *dev)
        struct pxa27x_keypad *keypad = input_get_drvdata(dev);
 
        /* Disable clock unit */
-       clk_disable(keypad->clk);
+       clk_disable_unprepare(keypad->clk);
 }
 
 #ifdef CONFIG_PM
@@ -419,10 +434,14 @@ static int pxa27x_keypad_suspend(struct device *dev)
        struct platform_device *pdev = to_platform_device(dev);
        struct pxa27x_keypad *keypad = platform_get_drvdata(pdev);
 
-       clk_disable(keypad->clk);
-
+       /*
+        * If the keypad is used a wake up source, clock can not be disabled.
+        * Or it can not detect the key pressing.
+        */
        if (device_may_wakeup(&pdev->dev))
                enable_irq_wake(keypad->irq);
+       else
+               clk_disable_unprepare(keypad->clk);
 
        return 0;
 }
@@ -433,19 +452,24 @@ static int pxa27x_keypad_resume(struct device *dev)
        struct pxa27x_keypad *keypad = platform_get_drvdata(pdev);
        struct input_dev *input_dev = keypad->input_dev;
 
-       if (device_may_wakeup(&pdev->dev))
+       /*
+        * If the keypad is used as wake up source, the clock is not turned
+        * off. So do not need configure it again.
+        */
+       if (device_may_wakeup(&pdev->dev)) {
                disable_irq_wake(keypad->irq);
+       } else {
+               mutex_lock(&input_dev->mutex);
 
-       mutex_lock(&input_dev->mutex);
+               if (input_dev->users) {
+                       /* Enable unit clock */
+                       clk_prepare_enable(keypad->clk);
+                       pxa27x_keypad_config(keypad);
+               }
 
-       if (input_dev->users) {
-               /* Enable unit clock */
-               clk_enable(keypad->clk);
-               pxa27x_keypad_config(keypad);
+               mutex_unlock(&input_dev->mutex);
        }
 
-       mutex_unlock(&input_dev->mutex);
-
        return 0;
 }
 
index 47f18d6bce46826160e653821d7be213223c4bb4..6790a812a1db7ecf66b2c990687e162211624537 100644 (file)
@@ -73,7 +73,7 @@ static int __devinit wm831x_on_probe(struct platform_device *pdev)
 {
        struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
        struct wm831x_on *wm831x_on;
-       int irq = platform_get_irq(pdev, 0);
+       int irq = wm831x_irq(wm831x, platform_get_irq(pdev, 0));
        int ret;
 
        wm831x_on = kzalloc(sizeof(struct wm831x_on), GFP_KERNEL);
index 4bc851a9dc3d6cf821f1c8ec05f7abf37fb4c781..e83410721e38b79f6d9a7a93e6f6a689ef73f1ab 100644 (file)
@@ -260,15 +260,16 @@ static __devinit int wm831x_ts_probe(struct platform_device *pdev)
         * If we have a direct IRQ use it, otherwise use the interrupt
         * from the WM831x IRQ controller.
         */
+       wm831x_ts->data_irq = wm831x_irq(wm831x,
+                                        platform_get_irq_byname(pdev,
+                                                                "TCHDATA"));
        if (pdata && pdata->data_irq)
                wm831x_ts->data_irq = pdata->data_irq;
-       else
-               wm831x_ts->data_irq = platform_get_irq_byname(pdev, "TCHDATA");
 
+       wm831x_ts->pd_irq = wm831x_irq(wm831x,
+                                      platform_get_irq_byname(pdev, "TCHPD"));
        if (pdata && pdata->pd_irq)
                wm831x_ts->pd_irq = pdata->pd_irq;
-       else
-               wm831x_ts->pd_irq = platform_get_irq_byname(pdev, "TCHPD");
 
        if (pdata)
                wm831x_ts->pressure = pdata->pressure;
index c69843742bb041e911a116e0f96558c20de2bb63..34089372753893ab3cf0a247d097fd3f4f67c688 100644 (file)
@@ -162,4 +162,25 @@ config TEGRA_IOMMU_SMMU
          space through the SMMU (System Memory Management Unit)
          hardware included on Tegra SoCs.
 
+config EXYNOS_IOMMU
+       bool "Exynos IOMMU Support"
+       depends on ARCH_EXYNOS && EXYNOS_DEV_SYSMMU
+       select IOMMU_API
+       help
+         Support for the IOMMU(System MMU) of Samsung Exynos application
+         processor family. This enables H/W multimedia accellerators to see
+         non-linear physical memory chunks as a linear memory in their
+         address spaces
+
+         If unsure, say N here.
+
+config EXYNOS_IOMMU_DEBUG
+       bool "Debugging log for Exynos IOMMU"
+       depends on EXYNOS_IOMMU
+       help
+         Select this to see the detailed log message that shows what
+         happens in the IOMMU driver
+
+         Say N unless you need kernel log message for IOMMU debugging
+
 endif # IOMMU_SUPPORT
index 3e5e82ae9f0de957a7c67f004776850cc7ef3536..76e54ef796dec14864f0ca5c05dd44cc4a5f570f 100644 (file)
@@ -10,3 +10,4 @@ obj-$(CONFIG_OMAP_IOVMM) += omap-iovmm.o
 obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
 obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
 obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
+obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
index a5bee8e2dfce7904b637f8fd3d9871c75b7824a5..d90a421e9caccbbfc38a3d2eb4dd20ef2a50c77b 100644 (file)
@@ -450,12 +450,27 @@ static void dump_command(unsigned long phys_addr)
 
 static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
 {
-       u32 *event = __evt;
-       int type  = (event[1] >> EVENT_TYPE_SHIFT)  & EVENT_TYPE_MASK;
-       int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
-       int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
-       int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
-       u64 address = (u64)(((u64)event[3]) << 32) | event[2];
+       int type, devid, domid, flags;
+       volatile u32 *event = __evt;
+       int count = 0;
+       u64 address;
+
+retry:
+       type    = (event[1] >> EVENT_TYPE_SHIFT)  & EVENT_TYPE_MASK;
+       devid   = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
+       domid   = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
+       flags   = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
+       address = (u64)(((u64)event[3]) << 32) | event[2];
+
+       if (type == 0) {
+               /* Did we hit the erratum? */
+               if (++count == LOOP_TIMEOUT) {
+                       pr_err("AMD-Vi: No event written to event log\n");
+                       return;
+               }
+               udelay(1);
+               goto retry;
+       }
 
        printk(KERN_ERR "AMD-Vi: Event logged [");
 
@@ -508,6 +523,8 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
        default:
                printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
        }
+
+       memset(__evt, 0, 4 * sizeof(u32));
 }
 
 static void iommu_poll_events(struct amd_iommu *iommu)
@@ -2035,20 +2052,20 @@ out_err:
 }
 
 /* FIXME: Move this to PCI code */
-#define PCI_PRI_TLP_OFF                (1 << 2)
+#define PCI_PRI_TLP_OFF                (1 << 15)
 
 bool pci_pri_tlp_required(struct pci_dev *pdev)
 {
-       u16 control;
+       u16 status;
        int pos;
 
        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
        if (!pos)
                return false;
 
-       pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+       pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
 
-       return (control & PCI_PRI_TLP_OFF) ? true : false;
+       return (status & PCI_PRI_TLP_OFF) ? true : false;
 }
 
 /*
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
new file mode 100644 (file)
index 0000000..9a114b9
--- /dev/null
@@ -0,0 +1,1076 @@
+/* linux/drivers/iommu/exynos_iommu.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
+#define DEBUG
+#endif
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/iommu.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/memblock.h>
+#include <linux/export.h>
+
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
+
+#include <mach/sysmmu.h>
+
+/* We does not consider super section mapping (16MB) */
+#define SECT_ORDER 20
+#define LPAGE_ORDER 16
+#define SPAGE_ORDER 12
+
+#define SECT_SIZE (1 << SECT_ORDER)
+#define LPAGE_SIZE (1 << LPAGE_ORDER)
+#define SPAGE_SIZE (1 << SPAGE_ORDER)
+
+#define SECT_MASK (~(SECT_SIZE - 1))
+#define LPAGE_MASK (~(LPAGE_SIZE - 1))
+#define SPAGE_MASK (~(SPAGE_SIZE - 1))
+
+#define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
+#define lv1ent_page(sent) ((*(sent) & 3) == 1)
+#define lv1ent_section(sent) ((*(sent) & 3) == 2)
+
+#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
+#define lv2ent_small(pent) ((*(pent) & 2) == 2)
+#define lv2ent_large(pent) ((*(pent) & 3) == 1)
+
+#define section_phys(sent) (*(sent) & SECT_MASK)
+#define section_offs(iova) ((iova) & 0xFFFFF)
+#define lpage_phys(pent) (*(pent) & LPAGE_MASK)
+#define lpage_offs(iova) ((iova) & 0xFFFF)
+#define spage_phys(pent) (*(pent) & SPAGE_MASK)
+#define spage_offs(iova) ((iova) & 0xFFF)
+
+#define lv1ent_offset(iova) ((iova) >> SECT_ORDER)
+#define lv2ent_offset(iova) (((iova) & 0xFF000) >> SPAGE_ORDER)
+
+#define NUM_LV1ENTRIES 4096
+#define NUM_LV2ENTRIES 256
+
+#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
+
+#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
+
+#define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
+
+#define mk_lv1ent_sect(pa) ((pa) | 2)
+#define mk_lv1ent_page(pa) ((pa) | 1)
+#define mk_lv2ent_lpage(pa) ((pa) | 1)
+#define mk_lv2ent_spage(pa) ((pa) | 2)
+
+#define CTRL_ENABLE    0x5
+#define CTRL_BLOCK     0x7
+#define CTRL_DISABLE   0x0
+
+#define REG_MMU_CTRL           0x000
+#define REG_MMU_CFG            0x004
+#define REG_MMU_STATUS         0x008
+#define REG_MMU_FLUSH          0x00C
+#define REG_MMU_FLUSH_ENTRY    0x010
+#define REG_PT_BASE_ADDR       0x014
+#define REG_INT_STATUS         0x018
+#define REG_INT_CLEAR          0x01C
+
+#define REG_PAGE_FAULT_ADDR    0x024
+#define REG_AW_FAULT_ADDR      0x028
+#define REG_AR_FAULT_ADDR      0x02C
+#define REG_DEFAULT_SLAVE_ADDR 0x030
+
+#define REG_MMU_VERSION                0x034
+
+#define REG_PB0_SADDR          0x04C
+#define REG_PB0_EADDR          0x050
+#define REG_PB1_SADDR          0x054
+#define REG_PB1_EADDR          0x058
+
+static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
+{
+       return pgtable + lv1ent_offset(iova);
+}
+
+static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
+{
+       return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova);
+}
+
+enum exynos_sysmmu_inttype {
+       SYSMMU_PAGEFAULT,
+       SYSMMU_AR_MULTIHIT,
+       SYSMMU_AW_MULTIHIT,
+       SYSMMU_BUSERROR,
+       SYSMMU_AR_SECURITY,
+       SYSMMU_AR_ACCESS,
+       SYSMMU_AW_SECURITY,
+       SYSMMU_AW_PROTECTION, /* 7 */
+       SYSMMU_FAULT_UNKNOWN,
+       SYSMMU_FAULTS_NUM
+};
+
+/*
+ * @itype: type of fault.
+ * @pgtable_base: the physical address of page table base. This is 0 if @itype
+ *                is SYSMMU_BUSERROR.
+ * @fault_addr: the device (virtual) address that the System MMU tried to
+ *             translated. This is 0 if @itype is SYSMMU_BUSERROR.
+ */
+typedef int (*sysmmu_fault_handler_t)(enum exynos_sysmmu_inttype itype,
+                       unsigned long pgtable_base, unsigned long fault_addr);
+
+static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
+       REG_PAGE_FAULT_ADDR,
+       REG_AR_FAULT_ADDR,
+       REG_AW_FAULT_ADDR,
+       REG_DEFAULT_SLAVE_ADDR,
+       REG_AR_FAULT_ADDR,
+       REG_AR_FAULT_ADDR,
+       REG_AW_FAULT_ADDR,
+       REG_AW_FAULT_ADDR
+};
+
+static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
+       "PAGE FAULT",
+       "AR MULTI-HIT FAULT",
+       "AW MULTI-HIT FAULT",
+       "BUS ERROR",
+       "AR SECURITY PROTECTION FAULT",
+       "AR ACCESS PROTECTION FAULT",
+       "AW SECURITY PROTECTION FAULT",
+       "AW ACCESS PROTECTION FAULT",
+       "UNKNOWN FAULT"
+};
+
+struct exynos_iommu_domain {
+       struct list_head clients; /* list of sysmmu_drvdata.node */
+       unsigned long *pgtable; /* lv1 page table, 16KB */
+       short *lv2entcnt; /* free lv2 entry counter for each section */
+       spinlock_t lock; /* lock for this structure */
+       spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
+};
+
+struct sysmmu_drvdata {
+       struct list_head node; /* entry of exynos_iommu_domain.clients */
+       struct device *sysmmu;  /* System MMU's device descriptor */
+       struct device *dev;     /* Owner of system MMU */
+       char *dbgname;
+       int nsfrs;
+       void __iomem **sfrbases;
+       struct clk *clk[2];
+       int activations;
+       rwlock_t lock;
+       struct iommu_domain *domain;
+       sysmmu_fault_handler_t fault_handler;
+       unsigned long pgtable;
+};
+
+static bool set_sysmmu_active(struct sysmmu_drvdata *data)
+{
+       /* return true if the System MMU was not active previously
+          and it needs to be initialized */
+       return ++data->activations == 1;
+}
+
+static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
+{
+       /* return true if the System MMU is needed to be disabled */
+       BUG_ON(data->activations < 1);
+       return --data->activations == 0;
+}
+
+static bool is_sysmmu_active(struct sysmmu_drvdata *data)
+{
+       return data->activations > 0;
+}
+
+static void sysmmu_unblock(void __iomem *sfrbase)
+{
+       __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
+}
+
+static bool sysmmu_block(void __iomem *sfrbase)
+{
+       int i = 120;
+
+       __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
+       while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
+               --i;
+
+       if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
+               sysmmu_unblock(sfrbase);
+               return false;
+       }
+
+       return true;
+}
+
+static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
+{
+       __raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
+}
+
+static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
+                                               unsigned long iova)
+{
+       __raw_writel((iova & SPAGE_MASK) | 1, sfrbase + REG_MMU_FLUSH_ENTRY);
+}
+
+static void __sysmmu_set_ptbase(void __iomem *sfrbase,
+                                      unsigned long pgd)
+{
+       __raw_writel(0x1, sfrbase + REG_MMU_CFG); /* 16KB LV1, LRU */
+       __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
+
+       __sysmmu_tlb_invalidate(sfrbase);
+}
+
+static void __sysmmu_set_prefbuf(void __iomem *sfrbase, unsigned long base,
+                                               unsigned long size, int idx)
+{
+       __raw_writel(base, sfrbase + REG_PB0_SADDR + idx * 8);
+       __raw_writel(size - 1 + base,  sfrbase + REG_PB0_EADDR + idx * 8);
+}
+
+void exynos_sysmmu_set_prefbuf(struct device *dev,
+                               unsigned long base0, unsigned long size0,
+                               unsigned long base1, unsigned long size1)
+{
+       struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+       unsigned long flags;
+       int i;
+
+       BUG_ON((base0 + size0) <= base0);
+       BUG_ON((size1 > 0) && ((base1 + size1) <= base1));
+
+       read_lock_irqsave(&data->lock, flags);
+       if (!is_sysmmu_active(data))
+               goto finish;
+
+       for (i = 0; i < data->nsfrs; i++) {
+               if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
+                       if (!sysmmu_block(data->sfrbases[i]))
+                               continue;
+
+                       if (size1 == 0) {
+                               if (size0 <= SZ_128K) {
+                                       base1 = base0;
+                                       size1 = size0;
+                               } else {
+                                       size1 = size0 -
+                                               ALIGN(size0 / 2, SZ_64K);
+                                       size0 = size0 - size1;
+                                       base1 = base0 + size0;
+                               }
+                       }
+
+                       __sysmmu_set_prefbuf(
+                                       data->sfrbases[i], base0, size0, 0);
+                       __sysmmu_set_prefbuf(
+                                       data->sfrbases[i], base1, size1, 1);
+
+                       sysmmu_unblock(data->sfrbases[i]);
+               }
+       }
+finish:
+       read_unlock_irqrestore(&data->lock, flags);
+}
+
+static void __set_fault_handler(struct sysmmu_drvdata *data,
+                                       sysmmu_fault_handler_t handler)
+{
+       unsigned long flags;
+
+       write_lock_irqsave(&data->lock, flags);
+       data->fault_handler = handler;
+       write_unlock_irqrestore(&data->lock, flags);
+}
+
+void exynos_sysmmu_set_fault_handler(struct device *dev,
+                                       sysmmu_fault_handler_t handler)
+{
+       struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+
+       __set_fault_handler(data, handler);
+}
+
+static int default_fault_handler(enum exynos_sysmmu_inttype itype,
+                    unsigned long pgtable_base, unsigned long fault_addr)
+{
+       unsigned long *ent;
+
+       if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
+               itype = SYSMMU_FAULT_UNKNOWN;
+
+       pr_err("%s occured at 0x%lx(Page table base: 0x%lx)\n",
+                       sysmmu_fault_name[itype], fault_addr, pgtable_base);
+
+       ent = section_entry(__va(pgtable_base), fault_addr);
+       pr_err("\tLv1 entry: 0x%lx\n", *ent);
+
+       if (lv1ent_page(ent)) {
+               ent = page_entry(ent, fault_addr);
+               pr_err("\t Lv2 entry: 0x%lx\n", *ent);
+       }
+
+       pr_err("Generating Kernel OOPS... because it is unrecoverable.\n");
+
+       BUG();
+
+       return 0;
+}
+
+static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
+{
+       /* SYSMMU is in blocked when interrupt occurred. */
+       struct sysmmu_drvdata *data = dev_id;
+       struct resource *irqres;
+       struct platform_device *pdev;
+       enum exynos_sysmmu_inttype itype;
+       unsigned long addr = -1;
+
+       int i, ret = -ENOSYS;
+
+       read_lock(&data->lock);
+
+       WARN_ON(!is_sysmmu_active(data));
+
+       pdev = to_platform_device(data->sysmmu);
+       for (i = 0; i < (pdev->num_resources / 2); i++) {
+               irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+               if (irqres && ((int)irqres->start == irq))
+                       break;
+       }
+
+       if (i == pdev->num_resources) {
+               itype = SYSMMU_FAULT_UNKNOWN;
+       } else {
+               itype = (enum exynos_sysmmu_inttype)
+                       __ffs(__raw_readl(data->sfrbases[i] + REG_INT_STATUS));
+               if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
+                       itype = SYSMMU_FAULT_UNKNOWN;
+               else
+                       addr = __raw_readl(
+                               data->sfrbases[i] + fault_reg_offset[itype]);
+       }
+
+       if (data->domain)
+               ret = report_iommu_fault(data->domain, data->dev,
+                               addr, itype);
+
+       if ((ret == -ENOSYS) && data->fault_handler) {
+               unsigned long base = data->pgtable;
+               if (itype != SYSMMU_FAULT_UNKNOWN)
+                       base = __raw_readl(
+                                       data->sfrbases[i] + REG_PT_BASE_ADDR);
+               ret = data->fault_handler(itype, base, addr);
+       }
+
+       if (!ret && (itype != SYSMMU_FAULT_UNKNOWN))
+               __raw_writel(1 << itype, data->sfrbases[i] + REG_INT_CLEAR);
+       else
+               dev_dbg(data->sysmmu, "(%s) %s is not handled.\n",
+                               data->dbgname, sysmmu_fault_name[itype]);
+
+       if (itype != SYSMMU_FAULT_UNKNOWN)
+               sysmmu_unblock(data->sfrbases[i]);
+
+       read_unlock(&data->lock);
+
+       return IRQ_HANDLED;
+}
+
+static bool __exynos_sysmmu_disable(struct sysmmu_drvdata *data)
+{
+       unsigned long flags;
+       bool disabled = false;
+       int i;
+
+       write_lock_irqsave(&data->lock, flags);
+
+       if (!set_sysmmu_inactive(data))
+               goto finish;
+
+       for (i = 0; i < data->nsfrs; i++)
+               __raw_writel(CTRL_DISABLE, data->sfrbases[i] + REG_MMU_CTRL);
+
+       if (data->clk[1])
+               clk_disable(data->clk[1]);
+       if (data->clk[0])
+               clk_disable(data->clk[0]);
+
+       disabled = true;
+       data->pgtable = 0;
+       data->domain = NULL;
+finish:
+       write_unlock_irqrestore(&data->lock, flags);
+
+       if (disabled)
+               dev_dbg(data->sysmmu, "(%s) Disabled\n", data->dbgname);
+       else
+               dev_dbg(data->sysmmu, "(%s) %d times left to be disabled\n",
+                                       data->dbgname, data->activations);
+
+       return disabled;
+}
+
+/* __exynos_sysmmu_enable: Enables System MMU
+ *
+ * returns -error if an error occurred and System MMU is not enabled,
+ * 0 if the System MMU has been just enabled and 1 if System MMU was already
+ * enabled before.
+ */
+static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data,
+                       unsigned long pgtable, struct iommu_domain *domain)
+{
+       int i, ret = 0;
+       unsigned long flags;
+
+       write_lock_irqsave(&data->lock, flags);
+
+       if (!set_sysmmu_active(data)) {
+               if (WARN_ON(pgtable != data->pgtable)) {
+                       ret = -EBUSY;
+                       set_sysmmu_inactive(data);
+               } else {
+                       ret = 1;
+               }
+
+               dev_dbg(data->sysmmu, "(%s) Already enabled\n", data->dbgname);
+               goto finish;
+       }
+
+       if (data->clk[0])
+               clk_enable(data->clk[0]);
+       if (data->clk[1])
+               clk_enable(data->clk[1]);
+
+       data->pgtable = pgtable;
+
+       for (i = 0; i < data->nsfrs; i++) {
+               __sysmmu_set_ptbase(data->sfrbases[i], pgtable);
+
+               if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
+                       /* System MMU version is 3.x */
+                       __raw_writel((1 << 12) | (2 << 28),
+                                       data->sfrbases[i] + REG_MMU_CFG);
+                       __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 0);
+                       __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 1);
+               }
+
+               __raw_writel(CTRL_ENABLE, data->sfrbases[i] + REG_MMU_CTRL);
+       }
+
+       data->domain = domain;
+
+       dev_dbg(data->sysmmu, "(%s) Enabled\n", data->dbgname);
+finish:
+       write_unlock_irqrestore(&data->lock, flags);
+
+       return ret;
+}
+
+int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable)
+{
+       struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+       int ret;
+
+       BUG_ON(!memblock_is_memory(pgtable));
+
+       ret = pm_runtime_get_sync(data->sysmmu);
+       if (ret < 0) {
+               dev_dbg(data->sysmmu, "(%s) Failed to enable\n", data->dbgname);
+               return ret;
+       }
+
+       ret = __exynos_sysmmu_enable(data, pgtable, NULL);
+       if (WARN_ON(ret < 0)) {
+               pm_runtime_put(data->sysmmu);
+               dev_err(data->sysmmu,
+                       "(%s) Already enabled with page table %#lx\n",
+                       data->dbgname, data->pgtable);
+       } else {
+               data->dev = dev;
+       }
+
+       return ret;
+}
+
+bool exynos_sysmmu_disable(struct device *dev)
+{
+       struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+       bool disabled;
+
+       disabled = __exynos_sysmmu_disable(data);
+       pm_runtime_put(data->sysmmu);
+
+       return disabled;
+}
+
+static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova)
+{
+       unsigned long flags;
+       struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+
+       read_lock_irqsave(&data->lock, flags);
+
+       if (is_sysmmu_active(data)) {
+               int i;
+               for (i = 0; i < data->nsfrs; i++) {
+                       if (sysmmu_block(data->sfrbases[i])) {
+                               __sysmmu_tlb_invalidate_entry(
+                                               data->sfrbases[i], iova);
+                               sysmmu_unblock(data->sfrbases[i]);
+                       }
+               }
+       } else {
+               dev_dbg(data->sysmmu,
+                       "(%s) Disabled. Skipping invalidating TLB.\n",
+                       data->dbgname);
+       }
+
+       read_unlock_irqrestore(&data->lock, flags);
+}
+
+void exynos_sysmmu_tlb_invalidate(struct device *dev)
+{
+       unsigned long flags;
+       struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+
+       read_lock_irqsave(&data->lock, flags);
+
+       if (is_sysmmu_active(data)) {
+               int i;
+               for (i = 0; i < data->nsfrs; i++) {
+                       if (sysmmu_block(data->sfrbases[i])) {
+                               __sysmmu_tlb_invalidate(data->sfrbases[i]);
+                               sysmmu_unblock(data->sfrbases[i]);
+                       }
+               }
+       } else {
+               dev_dbg(data->sysmmu,
+                       "(%s) Disabled. Skipping invalidating TLB.\n",
+                       data->dbgname);
+       }
+
+       read_unlock_irqrestore(&data->lock, flags);
+}
+
+static int exynos_sysmmu_probe(struct platform_device *pdev)
+{
+       int i, ret;
+       struct device *dev;
+       struct sysmmu_drvdata *data;
+
+       dev = &pdev->dev;
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data) {
+               dev_dbg(dev, "Not enough memory\n");
+               ret = -ENOMEM;
+               goto err_alloc;
+       }
+
+       ret = dev_set_drvdata(dev, data);
+       if (ret) {
+               dev_dbg(dev, "Unabled to initialize driver data\n");
+               goto err_init;
+       }
+
+       data->nsfrs = pdev->num_resources / 2;
+       data->sfrbases = kmalloc(sizeof(*data->sfrbases) * data->nsfrs,
+                                                               GFP_KERNEL);
+       if (data->sfrbases == NULL) {
+               dev_dbg(dev, "Not enough memory\n");
+               ret = -ENOMEM;
+               goto err_init;
+       }
+
+       for (i = 0; i < data->nsfrs; i++) {
+               struct resource *res;
+               res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+               if (!res) {
+                       dev_dbg(dev, "Unable to find IOMEM region\n");
+                       ret = -ENOENT;
+                       goto err_res;
+               }
+
+               data->sfrbases[i] = ioremap(res->start, resource_size(res));
+               if (!data->sfrbases[i]) {
+                       dev_dbg(dev, "Unable to map IOMEM @ PA:%#x\n",
+                                                       res->start);
+                       ret = -ENOENT;
+                       goto err_res;
+               }
+       }
+
+       for (i = 0; i < data->nsfrs; i++) {
+               ret = platform_get_irq(pdev, i);
+               if (ret <= 0) {
+                       dev_dbg(dev, "Unable to find IRQ resource\n");
+                       goto err_irq;
+               }
+
+               ret = request_irq(ret, exynos_sysmmu_irq, 0,
+                                       dev_name(dev), data);
+               if (ret) {
+                       dev_dbg(dev, "Unabled to register interrupt handler\n");
+                       goto err_irq;
+               }
+       }
+
+       if (dev_get_platdata(dev)) {
+               char *deli, *beg;
+               struct sysmmu_platform_data *platdata = dev_get_platdata(dev);
+
+               beg = platdata->clockname;
+
+               for (deli = beg; (*deli != '\0') && (*deli != ','); deli++)
+                       /* NOTHING */;
+
+               if (*deli == '\0')
+                       deli = NULL;
+               else
+                       *deli = '\0';
+
+               data->clk[0] = clk_get(dev, beg);
+               if (IS_ERR(data->clk[0])) {
+                       data->clk[0] = NULL;
+                       dev_dbg(dev, "No clock descriptor registered\n");
+               }
+
+               if (data->clk[0] && deli) {
+                       *deli = ',';
+                       data->clk[1] = clk_get(dev, deli + 1);
+                       if (IS_ERR(data->clk[1]))
+                               data->clk[1] = NULL;
+               }
+
+               data->dbgname = platdata->dbgname;
+       }
+
+       data->sysmmu = dev;
+       rwlock_init(&data->lock);
+       INIT_LIST_HEAD(&data->node);
+
+       __set_fault_handler(data, &default_fault_handler);
+
+       if (dev->parent)
+               pm_runtime_enable(dev);
+
+       dev_dbg(dev, "(%s) Initialized\n", data->dbgname);
+       return 0;
+err_irq:
+       while (i-- > 0) {
+               int irq;
+
+               irq = platform_get_irq(pdev, i);
+               free_irq(irq, data);
+       }
+err_res:
+       while (data->nsfrs-- > 0)
+               iounmap(data->sfrbases[data->nsfrs]);
+       kfree(data->sfrbases);
+err_init:
+       kfree(data);
+err_alloc:
+       dev_err(dev, "Failed to initialize\n");
+       return ret;
+}
+
+static struct platform_driver exynos_sysmmu_driver = {
+       .probe          = exynos_sysmmu_probe,
+       .driver         = {
+               .owner          = THIS_MODULE,
+               .name           = "exynos-sysmmu",
+       }
+};
+
+static inline void pgtable_flush(void *vastart, void *vaend)
+{
+       dmac_flush_range(vastart, vaend);
+       outer_flush_range(virt_to_phys(vastart),
+                               virt_to_phys(vaend));
+}
+
+static int exynos_iommu_domain_init(struct iommu_domain *domain)
+{
+       struct exynos_iommu_domain *priv;
+
+       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->pgtable = (unsigned long *)__get_free_pages(
+                                               GFP_KERNEL | __GFP_ZERO, 2);
+       if (!priv->pgtable)
+               goto err_pgtable;
+
+       priv->lv2entcnt = (short *)__get_free_pages(
+                                               GFP_KERNEL | __GFP_ZERO, 1);
+       if (!priv->lv2entcnt)
+               goto err_counter;
+
+       pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
+
+       spin_lock_init(&priv->lock);
+       spin_lock_init(&priv->pgtablelock);
+       INIT_LIST_HEAD(&priv->clients);
+
+       domain->priv = priv;
+       return 0;
+
+err_counter:
+       free_pages((unsigned long)priv->pgtable, 2);
+err_pgtable:
+       kfree(priv);
+       return -ENOMEM;
+}
+
+static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
+{
+       struct exynos_iommu_domain *priv = domain->priv;
+       struct sysmmu_drvdata *data;
+       unsigned long flags;
+       int i;
+
+       WARN_ON(!list_empty(&priv->clients));
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       list_for_each_entry(data, &priv->clients, node) {
+               while (!exynos_sysmmu_disable(data->dev))
+                       ; /* until System MMU is actually disabled */
+       }
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       for (i = 0; i < NUM_LV1ENTRIES; i++)
+               if (lv1ent_page(priv->pgtable + i))
+                       kfree(__va(lv2table_base(priv->pgtable + i)));
+
+       free_pages((unsigned long)priv->pgtable, 2);
+       free_pages((unsigned long)priv->lv2entcnt, 1);
+       kfree(domain->priv);
+       domain->priv = NULL;
+}
+
+static int exynos_iommu_attach_device(struct iommu_domain *domain,
+                                  struct device *dev)
+{
+       struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+       struct exynos_iommu_domain *priv = domain->priv;
+       unsigned long flags;
+       int ret;
+
+       ret = pm_runtime_get_sync(data->sysmmu);
+       if (ret < 0)
+               return ret;
+
+       ret = 0;
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       ret = __exynos_sysmmu_enable(data, __pa(priv->pgtable), domain);
+
+       if (ret == 0) {
+               /* 'data->node' must not be appeared in priv->clients */
+               BUG_ON(!list_empty(&data->node));
+               data->dev = dev;
+               list_add_tail(&data->node, &priv->clients);
+       }
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       if (ret < 0) {
+               dev_err(dev, "%s: Failed to attach IOMMU with pgtable %#lx\n",
+                               __func__, __pa(priv->pgtable));
+               pm_runtime_put(data->sysmmu);
+       } else if (ret > 0) {
+               dev_dbg(dev, "%s: IOMMU with pgtable 0x%lx already attached\n",
+                                       __func__, __pa(priv->pgtable));
+       } else {
+               dev_dbg(dev, "%s: Attached new IOMMU with pgtable 0x%lx\n",
+                                       __func__, __pa(priv->pgtable));
+       }
+
+       return ret;
+}
+
+static void exynos_iommu_detach_device(struct iommu_domain *domain,
+                                   struct device *dev)
+{
+       struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+       struct exynos_iommu_domain *priv = domain->priv;
+       struct list_head *pos;
+       unsigned long flags;
+       bool found = false;
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       list_for_each(pos, &priv->clients) {
+               if (list_entry(pos, struct sysmmu_drvdata, node) == data) {
+                       found = true;
+                       break;
+               }
+       }
+
+       if (!found)
+               goto finish;
+
+       if (__exynos_sysmmu_disable(data)) {
+               dev_dbg(dev, "%s: Detached IOMMU with pgtable %#lx\n",
+                                       __func__, __pa(priv->pgtable));
+               list_del(&data->node);
+               INIT_LIST_HEAD(&data->node);
+
+       } else {
+               dev_dbg(dev, "%s: Detaching IOMMU with pgtable %#lx delayed",
+                                       __func__, __pa(priv->pgtable));
+       }
+
+finish:
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       if (found)
+               pm_runtime_put(data->sysmmu);
+}
+
+static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,
+                                       short *pgcounter)
+{
+       if (lv1ent_fault(sent)) {
+               unsigned long *pent;
+
+               pent = kzalloc(LV2TABLE_SIZE, GFP_ATOMIC);
+               BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
+               if (!pent)
+                       return NULL;
+
+               *sent = mk_lv1ent_page(__pa(pent));
+               *pgcounter = NUM_LV2ENTRIES;
+               pgtable_flush(pent, pent + NUM_LV2ENTRIES);
+               pgtable_flush(sent, sent + 1);
+       }
+
+       return page_entry(sent, iova);
+}
+
+static int lv1set_section(unsigned long *sent, phys_addr_t paddr, short *pgcnt)
+{
+       if (lv1ent_section(sent))
+               return -EADDRINUSE;
+
+       if (lv1ent_page(sent)) {
+               if (*pgcnt != NUM_LV2ENTRIES)
+                       return -EADDRINUSE;
+
+               kfree(page_entry(sent, 0));
+
+               *pgcnt = 0;
+       }
+
+       *sent = mk_lv1ent_sect(paddr);
+
+       pgtable_flush(sent, sent + 1);
+
+       return 0;
+}
+
+static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
+                                                               short *pgcnt)
+{
+       if (size == SPAGE_SIZE) {
+               if (!lv2ent_fault(pent))
+                       return -EADDRINUSE;
+
+               *pent = mk_lv2ent_spage(paddr);
+               pgtable_flush(pent, pent + 1);
+               *pgcnt -= 1;
+       } else { /* size == LPAGE_SIZE */
+               int i;
+               for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
+                       if (!lv2ent_fault(pent)) {
+                               memset(pent, 0, sizeof(*pent) * i);
+                               return -EADDRINUSE;
+                       }
+
+                       *pent = mk_lv2ent_lpage(paddr);
+               }
+               pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
+               *pgcnt -= SPAGES_PER_LPAGE;
+       }
+
+       return 0;
+}
+
+static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova,
+                        phys_addr_t paddr, size_t size, int prot)
+{
+       struct exynos_iommu_domain *priv = domain->priv;
+       unsigned long *entry;
+       unsigned long flags;
+       int ret = -ENOMEM;
+
+       BUG_ON(priv->pgtable == NULL);
+
+       spin_lock_irqsave(&priv->pgtablelock, flags);
+
+       entry = section_entry(priv->pgtable, iova);
+
+       if (size == SECT_SIZE) {
+               ret = lv1set_section(entry, paddr,
+                                       &priv->lv2entcnt[lv1ent_offset(iova)]);
+       } else {
+               unsigned long *pent;
+
+               pent = alloc_lv2entry(entry, iova,
+                                       &priv->lv2entcnt[lv1ent_offset(iova)]);
+
+               if (!pent)
+                       ret = -ENOMEM;
+               else
+                       ret = lv2set_page(pent, paddr, size,
+                                       &priv->lv2entcnt[lv1ent_offset(iova)]);
+       }
+
+       if (ret) {
+               pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n",
+                                                       __func__, iova, size);
+       }
+
+       spin_unlock_irqrestore(&priv->pgtablelock, flags);
+
+       return ret;
+}
+
+static size_t exynos_iommu_unmap(struct iommu_domain *domain,
+                                              unsigned long iova, size_t size)
+{
+       struct exynos_iommu_domain *priv = domain->priv;
+       struct sysmmu_drvdata *data;
+       unsigned long flags;
+       unsigned long *ent;
+
+       BUG_ON(priv->pgtable == NULL);
+
+       spin_lock_irqsave(&priv->pgtablelock, flags);
+
+       ent = section_entry(priv->pgtable, iova);
+
+       if (lv1ent_section(ent)) {
+               BUG_ON(size < SECT_SIZE);
+
+               *ent = 0;
+               pgtable_flush(ent, ent + 1);
+               size = SECT_SIZE;
+               goto done;
+       }
+
+       if (unlikely(lv1ent_fault(ent))) {
+               if (size > SECT_SIZE)
+                       size = SECT_SIZE;
+               goto done;
+       }
+
+       /* lv1ent_page(sent) == true here */
+
+       ent = page_entry(ent, iova);
+
+       if (unlikely(lv2ent_fault(ent))) {
+               size = SPAGE_SIZE;
+               goto done;
+       }
+
+       if (lv2ent_small(ent)) {
+               *ent = 0;
+               size = SPAGE_SIZE;
+               priv->lv2entcnt[lv1ent_offset(iova)] += 1;
+               goto done;
+       }
+
+       /* lv1ent_large(ent) == true here */
+       BUG_ON(size < LPAGE_SIZE);
+
+       memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
+
+       size = LPAGE_SIZE;
+       priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
+done:
+       spin_unlock_irqrestore(&priv->pgtablelock, flags);
+
+       spin_lock_irqsave(&priv->lock, flags);
+       list_for_each_entry(data, &priv->clients, node)
+               sysmmu_tlb_invalidate_entry(data->dev, iova);
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+
+       return size;
+}
+
+static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
+                                         unsigned long iova)
+{
+       struct exynos_iommu_domain *priv = domain->priv;
+       unsigned long *entry;
+       unsigned long flags;
+       phys_addr_t phys = 0;
+
+       spin_lock_irqsave(&priv->pgtablelock, flags);
+
+       entry = section_entry(priv->pgtable, iova);
+
+       if (lv1ent_section(entry)) {
+               phys = section_phys(entry) + section_offs(iova);
+       } else if (lv1ent_page(entry)) {
+               entry = page_entry(entry, iova);
+
+               if (lv2ent_large(entry))
+                       phys = lpage_phys(entry) + lpage_offs(iova);
+               else if (lv2ent_small(entry))
+                       phys = spage_phys(entry) + spage_offs(iova);
+       }
+
+       spin_unlock_irqrestore(&priv->pgtablelock, flags);
+
+       return phys;
+}
+
+static struct iommu_ops exynos_iommu_ops = {
+       .domain_init = &exynos_iommu_domain_init,
+       .domain_destroy = &exynos_iommu_domain_destroy,
+       .attach_dev = &exynos_iommu_attach_device,
+       .detach_dev = &exynos_iommu_detach_device,
+       .map = &exynos_iommu_map,
+       .unmap = &exynos_iommu_unmap,
+       .iova_to_phys = &exynos_iommu_iova_to_phys,
+       .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
+};
+
+static int __init exynos_iommu_init(void)
+{
+       int ret;
+
+       ret = platform_driver_register(&exynos_sysmmu_driver);
+
+       if (ret == 0)
+               bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
+
+       return ret;
+}
+subsys_initcall(exynos_iommu_init);
index bf2fbaad5e2295a2417c26ef1224ea8a51e8238e..b12af2ff8c5407aa699eca003d4e680334e9e970 100644 (file)
@@ -1907,6 +1907,15 @@ static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
        iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
 }
 
+static inline void unlink_domain_info(struct device_domain_info *info)
+{
+       assert_spin_locked(&device_domain_lock);
+       list_del(&info->link);
+       list_del(&info->global);
+       if (info->dev)
+               info->dev->dev.archdata.iommu = NULL;
+}
+
 static void domain_remove_dev_info(struct dmar_domain *domain)
 {
        struct device_domain_info *info;
@@ -1917,10 +1926,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
        while (!list_empty(&domain->devices)) {
                info = list_entry(domain->devices.next,
                        struct device_domain_info, link);
-               list_del(&info->link);
-               list_del(&info->global);
-               if (info->dev)
-                       info->dev->dev.archdata.iommu = NULL;
+               unlink_domain_info(info);
                spin_unlock_irqrestore(&device_domain_lock, flags);
 
                iommu_disable_dev_iotlb(info);
@@ -2287,12 +2293,6 @@ static int domain_add_dev_info(struct dmar_domain *domain,
        if (!info)
                return -ENOMEM;
 
-       ret = domain_context_mapping(domain, pdev, translation);
-       if (ret) {
-               free_devinfo_mem(info);
-               return ret;
-       }
-
        info->segment = pci_domain_nr(pdev->bus);
        info->bus = pdev->bus->number;
        info->devfn = pdev->devfn;
@@ -2305,6 +2305,15 @@ static int domain_add_dev_info(struct dmar_domain *domain,
        pdev->dev.archdata.iommu = info;
        spin_unlock_irqrestore(&device_domain_lock, flags);
 
+       ret = domain_context_mapping(domain, pdev, translation);
+       if (ret) {
+               spin_lock_irqsave(&device_domain_lock, flags);
+               unlink_domain_info(info);
+               spin_unlock_irqrestore(&device_domain_lock, flags);
+               free_devinfo_mem(info);
+               return ret;
+       }
+
        return 0;
 }
 
@@ -3728,10 +3737,7 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
                if (info->segment == pci_domain_nr(pdev->bus) &&
                    info->bus == pdev->bus->number &&
                    info->devfn == pdev->devfn) {
-                       list_del(&info->link);
-                       list_del(&info->global);
-                       if (info->dev)
-                               info->dev->dev.archdata.iommu = NULL;
+                       unlink_domain_info(info);
                        spin_unlock_irqrestore(&device_domain_lock, flags);
 
                        iommu_disable_dev_iotlb(info);
@@ -3786,11 +3792,7 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
        while (!list_empty(&domain->devices)) {
                info = list_entry(domain->devices.next,
                        struct device_domain_info, link);
-               list_del(&info->link);
-               list_del(&info->global);
-               if (info->dev)
-                       info->dev->dev.archdata.iommu = NULL;
-
+               unlink_domain_info(info);
                spin_unlock_irqrestore(&device_domain_lock, flags1);
 
                iommu_disable_dev_iotlb(info);
index 2198b2dbbcd3ad964b03a13dd6fd8dd336f27bed..8b9ded88e6f5322c18c02ec6e685b05d8decfd29 100644 (file)
@@ -119,6 +119,7 @@ EXPORT_SYMBOL_GPL(iommu_present);
  * iommu_set_fault_handler() - set a fault handler for an iommu domain
  * @domain: iommu domain
  * @handler: fault handler
+ * @token: user data, will be passed back to the fault handler
  *
  * This function should be used by IOMMU users which want to be notified
  * whenever an IOMMU fault happens.
@@ -127,11 +128,13 @@ EXPORT_SYMBOL_GPL(iommu_present);
  * error code otherwise.
  */
 void iommu_set_fault_handler(struct iommu_domain *domain,
-                                       iommu_fault_handler_t handler)
+                                       iommu_fault_handler_t handler,
+                                       void *token)
 {
        BUG_ON(!domain);
 
        domain->handler = handler;
+       domain->handler_token = token;
 }
 EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
 
index 6899dcd02dfa0e35df014c42651ba4c5b4f6bb67..e70ee2b59df95b9427c4c1594384776d97103015 100644 (file)
  * @pgtable:   the page table
  * @iommu_dev: an omap iommu device attached to this domain. only a single
  *             iommu device can be attached for now.
+ * @dev:       Device using this domain.
  * @lock:      domain lock, should be taken when attaching/detaching
  */
 struct omap_iommu_domain {
        u32 *pgtable;
        struct omap_iommu *iommu_dev;
+       struct device *dev;
        spinlock_t lock;
 };
 
@@ -1081,6 +1083,7 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
        }
 
        omap_domain->iommu_dev = arch_data->iommu_dev = oiommu;
+       omap_domain->dev = dev;
        oiommu->domain = domain;
 
 out:
@@ -1088,19 +1091,16 @@ out:
        return ret;
 }
 
-static void omap_iommu_detach_dev(struct iommu_domain *domain,
-                                struct device *dev)
+static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
+                       struct device *dev)
 {
-       struct omap_iommu_domain *omap_domain = domain->priv;
-       struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
        struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
-
-       spin_lock(&omap_domain->lock);
+       struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
 
        /* only a single device is supported per domain for now */
        if (omap_domain->iommu_dev != oiommu) {
                dev_err(dev, "invalid iommu device\n");
-               goto out;
+               return;
        }
 
        iopgtable_clear_entry_all(oiommu);
@@ -1108,8 +1108,16 @@ static void omap_iommu_detach_dev(struct iommu_domain *domain,
        omap_iommu_detach(oiommu);
 
        omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
+       omap_domain->dev = NULL;
+}
 
-out:
+static void omap_iommu_detach_dev(struct iommu_domain *domain,
+                                struct device *dev)
+{
+       struct omap_iommu_domain *omap_domain = domain->priv;
+
+       spin_lock(&omap_domain->lock);
+       _omap_iommu_detach_dev(omap_domain, dev);
        spin_unlock(&omap_domain->lock);
 }
 
@@ -1148,13 +1156,19 @@ out:
        return -ENOMEM;
 }
 
-/* assume device was already detached */
 static void omap_iommu_domain_destroy(struct iommu_domain *domain)
 {
        struct omap_iommu_domain *omap_domain = domain->priv;
 
        domain->priv = NULL;
 
+       /*
+        * An iommu device is still attached
+        * (currently, only one device can be attached) ?
+        */
+       if (omap_domain->iommu_dev)
+               _omap_iommu_detach_dev(omap_domain, omap_domain->dev);
+
        kfree(omap_domain->pgtable);
        kfree(omap_domain);
 }
index 779306ee7b160f1608e13179a27f244b3726771f..0c0a37792218452fb9d48f7b84364604564669b0 100644 (file)
 #include <linux/device.h>
 #include <linux/io.h>
 #include <linux/iommu.h>
+#include <linux/of.h>
 
 #include <asm/cacheflush.h>
 
 /* bitmap of the page sizes currently supported */
 #define GART_IOMMU_PGSIZES     (SZ_4K)
 
-#define GART_CONFIG            0x24
-#define GART_ENTRY_ADDR                0x28
-#define GART_ENTRY_DATA                0x2c
+#define GART_REG_BASE          0x24
+#define GART_CONFIG            (0x24 - GART_REG_BASE)
+#define GART_ENTRY_ADDR                (0x28 - GART_REG_BASE)
+#define GART_ENTRY_DATA                (0x2c - GART_REG_BASE)
 #define GART_ENTRY_PHYS_ADDR_VALID     (1 << 31)
 
 #define GART_PAGE_SHIFT                12
@@ -158,7 +160,7 @@ static int gart_iommu_attach_dev(struct iommu_domain *domain,
        struct gart_client *client, *c;
        int err = 0;
 
-       gart = dev_get_drvdata(dev->parent);
+       gart = gart_handle;
        if (!gart)
                return -EINVAL;
        domain->priv = gart;
@@ -422,6 +424,14 @@ const struct dev_pm_ops tegra_gart_pm_ops = {
        .resume         = tegra_gart_resume,
 };
 
+#ifdef CONFIG_OF
+static struct of_device_id tegra_gart_of_match[] __devinitdata = {
+       { .compatible = "nvidia,tegra20-gart", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, tegra_gart_of_match);
+#endif
+
 static struct platform_driver tegra_gart_driver = {
        .probe          = tegra_gart_probe,
        .remove         = tegra_gart_remove,
@@ -429,6 +439,7 @@ static struct platform_driver tegra_gart_driver = {
                .owner  = THIS_MODULE,
                .name   = "tegra-gart",
                .pm     = &tegra_gart_pm_ops,
+               .of_match_table = of_match_ptr(tegra_gart_of_match),
        },
 };
 
@@ -448,4 +459,5 @@ module_exit(tegra_gart_exit);
 
 MODULE_DESCRIPTION("IOMMU API for GART in Tegra20");
 MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
+MODULE_ALIAS("platform:tegra-gart");
 MODULE_LICENSE("GPL v2");
index eb93c821f592d8ad87fa129b8b430d3ccac17976..ecd679043d7740e6883aae9cbee68da6321fedc1 100644 (file)
@@ -733,7 +733,7 @@ static int smmu_iommu_attach_dev(struct iommu_domain *domain,
                pr_info("Reserve \"page zero\" for AVP vectors using a common dummy\n");
        }
 
-       dev_dbg(smmu->dev, "%s is attached\n", dev_name(c->dev));
+       dev_dbg(smmu->dev, "%s is attached\n", dev_name(dev));
        return 0;
 
 err_client:
index ff4b8cfda585b6461309824f277e52d263e0dcb6..04cb8c88d74b7678d12389898ba0702cf0c17b55 100644 (file)
@@ -50,6 +50,19 @@ config LEDS_LM3530
          controlled manually or using PWM input or using ambient
          light automatically.
 
+config LEDS_LM3533
+       tristate "LED support for LM3533"
+       depends on LEDS_CLASS
+       depends on MFD_LM3533
+       help
+         This option enables support for the LEDs on National Semiconductor /
+         TI LM3533 Lighting Power chips.
+
+         The LEDs can be controlled directly, through PWM input, or by the
+         ambient-light-sensor interface. The chip supports
+         hardware-accelerated blinking with maximum on and off periods of 9.8
+         and 77 seconds respectively.
+
 config LEDS_LOCOMO
        tristate "LED Support for Locomo device"
        depends on LEDS_CLASS
@@ -259,6 +272,14 @@ config LEDS_DA903X
          This option enables support for on-chip LED drivers found
          on Dialog Semiconductor DA9030/DA9034 PMICs.
 
+config LEDS_DA9052
+       tristate "Dialog DA9052/DA9053 LEDS"
+       depends on LEDS_CLASS
+       depends on PMIC_DA9052
+       help
+         This option enables support for on-chip LED drivers found
+         on Dialog Semiconductor DA9052-BC and DA9053-AA/Bx PMICs.
+
 config LEDS_DAC124S085
        tristate "LED Support for DAC124S085 SPI DAC"
        depends on LEDS_CLASS
@@ -471,4 +492,12 @@ config LEDS_TRIGGER_DEFAULT_ON
 comment "iptables trigger is under Netfilter config (LED target)"
        depends on LEDS_TRIGGERS
 
+config LEDS_TRIGGER_TRANSIENT
+       tristate "LED Transient Trigger"
+       depends on LEDS_TRIGGERS
+       help
+         This allows one time activation of a transient state on
+         GPIO/PWM based hadrware.
+         If unsure, say Y.
+
 endif # NEW_LEDS
index 890481cb09f6b23aa762a242b4a9b4e98998fb91..f8958cd6cf6e82813483aaf52a5ff3cde1e684a5 100644 (file)
@@ -10,6 +10,7 @@ obj-$(CONFIG_LEDS_ATMEL_PWM)          += leds-atmel-pwm.o
 obj-$(CONFIG_LEDS_BD2802)              += leds-bd2802.o
 obj-$(CONFIG_LEDS_LOCOMO)              += leds-locomo.o
 obj-$(CONFIG_LEDS_LM3530)              += leds-lm3530.o
+obj-$(CONFIG_LEDS_LM3533)              += leds-lm3533.o
 obj-$(CONFIG_LEDS_MIKROTIK_RB532)      += leds-rb532.o
 obj-$(CONFIG_LEDS_S3C24XX)             += leds-s3c24xx.o
 obj-$(CONFIG_LEDS_NET48XX)             += leds-net48xx.o
@@ -31,6 +32,7 @@ obj-$(CONFIG_LEDS_FSG)                        += leds-fsg.o
 obj-$(CONFIG_LEDS_PCA955X)             += leds-pca955x.o
 obj-$(CONFIG_LEDS_PCA9633)             += leds-pca9633.o
 obj-$(CONFIG_LEDS_DA903X)              += leds-da903x.o
+obj-$(CONFIG_LEDS_DA9052)              += leds-da9052.o
 obj-$(CONFIG_LEDS_WM831X_STATUS)       += leds-wm831x-status.o
 obj-$(CONFIG_LEDS_WM8350)              += leds-wm8350.o
 obj-$(CONFIG_LEDS_PWM)                 += leds-pwm.o
@@ -56,3 +58,4 @@ obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT)  += ledtrig-heartbeat.o
 obj-$(CONFIG_LEDS_TRIGGER_BACKLIGHT)   += ledtrig-backlight.o
 obj-$(CONFIG_LEDS_TRIGGER_GPIO)                += ledtrig-gpio.o
 obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON)  += ledtrig-default-on.o
+obj-$(CONFIG_LEDS_TRIGGER_TRANSIENT)   += ledtrig-transient.o
index 5bff8439dc68a7e8c6424f0cbed3746cd9820efb..8ee92c81aec2c1577c3fb1cfd173d8835c2c7725 100644 (file)
@@ -44,23 +44,18 @@ static ssize_t led_brightness_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t size)
 {
        struct led_classdev *led_cdev = dev_get_drvdata(dev);
+       unsigned long state;
        ssize_t ret = -EINVAL;
-       char *after;
-       unsigned long state = simple_strtoul(buf, &after, 10);
-       size_t count = after - buf;
 
-       if (isspace(*after))
-               count++;
+       ret = kstrtoul(buf, 10, &state);
+       if (ret)
+               return ret;
 
-       if (count == size) {
-               ret = count;
+       if (state == LED_OFF)
+               led_trigger_remove(led_cdev);
+       led_set_brightness(led_cdev, state);
 
-               if (state == LED_OFF)
-                       led_trigger_remove(led_cdev);
-               led_set_brightness(led_cdev, state);
-       }
-
-       return ret;
+       return size;
 }
 
 static ssize_t led_max_brightness_show(struct device *dev,
diff --git a/drivers/leds/leds-da9052.c b/drivers/leds/leds-da9052.c
new file mode 100644 (file)
index 0000000..58a5244
--- /dev/null
@@ -0,0 +1,214 @@
+/*
+ * LED Driver for Dialog DA9052 PMICs.
+ *
+ * Copyright(c) 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+
+#include <linux/mfd/da9052/reg.h>
+#include <linux/mfd/da9052/da9052.h>
+#include <linux/mfd/da9052/pdata.h>
+
+#define DA9052_OPENDRAIN_OUTPUT        2
+#define DA9052_SET_HIGH_LVL_OUTPUT     (1 << 3)
+#define DA9052_MASK_UPPER_NIBBLE       0xF0
+#define DA9052_MASK_LOWER_NIBBLE       0x0F
+#define DA9052_NIBBLE_SHIFT            4
+#define DA9052_MAX_BRIGHTNESS          0x5f
+
+struct da9052_led {
+       struct led_classdev cdev;
+       struct work_struct work;
+       struct da9052 *da9052;
+       unsigned char led_index;
+       unsigned char id;
+       int brightness;
+};
+
+static unsigned char led_reg[] = {
+       DA9052_LED_CONT_4_REG,
+       DA9052_LED_CONT_5_REG,
+};
+
+static int da9052_set_led_brightness(struct da9052_led *led)
+{
+       u8 val;
+       int error;
+
+       val = (led->brightness & 0x7f) | DA9052_LED_CONT_DIM;
+
+       error = da9052_reg_write(led->da9052, led_reg[led->led_index], val);
+       if (error < 0)
+               dev_err(led->da9052->dev, "Failed to set led brightness, %d\n",
+                       error);
+       return error;
+}
+
+static void da9052_led_work(struct work_struct *work)
+{
+       struct da9052_led *led = container_of(work, struct da9052_led, work);
+
+       da9052_set_led_brightness(led);
+}
+
+static void da9052_led_set(struct led_classdev *led_cdev,
+                          enum led_brightness value)
+{
+       struct da9052_led *led;
+
+       led = container_of(led_cdev, struct da9052_led, cdev);
+       led->brightness = value;
+       schedule_work(&led->work);
+}
+
+static int da9052_configure_leds(struct da9052 *da9052)
+{
+       int error;
+       unsigned char register_value = DA9052_OPENDRAIN_OUTPUT
+                                      | DA9052_SET_HIGH_LVL_OUTPUT;
+
+       error = da9052_reg_update(da9052, DA9052_GPIO_14_15_REG,
+                                 DA9052_MASK_LOWER_NIBBLE,
+                                 register_value);
+
+       if (error < 0) {
+               dev_err(da9052->dev, "Failed to write GPIO 14-15 reg, %d\n",
+                       error);
+               return error;
+       }
+
+       error = da9052_reg_update(da9052, DA9052_GPIO_14_15_REG,
+                                 DA9052_MASK_UPPER_NIBBLE,
+                                 register_value << DA9052_NIBBLE_SHIFT);
+       if (error < 0)
+               dev_err(da9052->dev, "Failed to write GPIO 14-15 reg, %d\n",
+                       error);
+
+       return error;
+}
+
+static int __devinit da9052_led_probe(struct platform_device *pdev)
+{
+       struct da9052_pdata *pdata;
+       struct da9052 *da9052;
+       struct led_platform_data *pled;
+       struct da9052_led *led = NULL;
+       int error = -ENODEV;
+       int i;
+
+       da9052 = dev_get_drvdata(pdev->dev.parent);
+       pdata = da9052->dev->platform_data;
+       if (pdata == NULL) {
+               dev_err(&pdev->dev, "No platform data\n");
+               goto err;
+       }
+
+       pled = pdata->pled;
+       if (pled == NULL) {
+               dev_err(&pdev->dev, "No platform data for LED\n");
+               goto err;
+       }
+
+       led = devm_kzalloc(&pdev->dev,
+                          sizeof(struct da9052_led) * pled->num_leds,
+                          GFP_KERNEL);
+       if (led == NULL) {
+               dev_err(&pdev->dev, "Failed to alloc memory\n");
+               error = -ENOMEM;
+               goto err;
+       }
+
+       for (i = 0; i < pled->num_leds; i++) {
+               led[i].cdev.name = pled->leds[i].name;
+               led[i].cdev.brightness_set = da9052_led_set;
+               led[i].cdev.brightness = LED_OFF;
+               led[i].cdev.max_brightness = DA9052_MAX_BRIGHTNESS;
+               led[i].brightness = LED_OFF;
+               led[i].led_index = pled->leds[i].flags;
+               led[i].da9052 = dev_get_drvdata(pdev->dev.parent);
+               INIT_WORK(&led[i].work, da9052_led_work);
+
+               error = led_classdev_register(pdev->dev.parent, &led[i].cdev);
+               if (error) {
+                       dev_err(&pdev->dev, "Failed to register led %d\n",
+                               led[i].led_index);
+                       goto err_register;
+               }
+
+               error = da9052_set_led_brightness(&led[i]);
+               if (error) {
+                       dev_err(&pdev->dev, "Unable to init led %d\n",
+                               led[i].led_index);
+                       continue;
+               }
+       }
+       error = da9052_configure_leds(led->da9052);
+       if (error) {
+               dev_err(&pdev->dev, "Failed to configure GPIO LED%d\n", error);
+               goto err_register;
+       }
+
+       platform_set_drvdata(pdev, led);
+
+       return 0;
+
+err_register:
+       for (i = i - 1; i >= 0; i--) {
+               led_classdev_unregister(&led[i].cdev);
+               cancel_work_sync(&led[i].work);
+       }
+err:
+       return error;
+}
+
+static int __devexit da9052_led_remove(struct platform_device *pdev)
+{
+       struct da9052_led *led = platform_get_drvdata(pdev);
+       struct da9052_pdata *pdata;
+       struct da9052 *da9052;
+       struct led_platform_data *pled;
+       int i;
+
+       da9052 = dev_get_drvdata(pdev->dev.parent);
+       pdata = da9052->dev->platform_data;
+       pled = pdata->pled;
+
+       for (i = 0; i < pled->num_leds; i++) {
+               led[i].brightness = 0;
+               da9052_set_led_brightness(&led[i]);
+               led_classdev_unregister(&led[i].cdev);
+               cancel_work_sync(&led[i].work);
+       }
+
+       return 0;
+}
+
+static struct platform_driver da9052_led_driver = {
+       .driver         = {
+               .name   = "da9052-leds",
+               .owner  = THIS_MODULE,
+       },
+       .probe          = da9052_led_probe,
+       .remove         = __devexit_p(da9052_led_remove),
+};
+
+module_platform_driver(da9052_led_driver);
+
+MODULE_AUTHOR("Dialog Semiconductor Ltd <dchen@diasemi.com>");
+MODULE_DESCRIPTION("LED driver for Dialog DA9052 PMIC");
+MODULE_LICENSE("GPL");
index 968fd5fef4fc5e2871dc7b5539f80b6b5328ea1b..84ba6de8039c8334558175d231bc6c062ff8c579 100644 (file)
@@ -113,6 +113,18 @@ struct lm3530_data {
        bool enable;
 };
 
+/*
+ * struct lm3530_als_data
+ * @config  : value of ALS configuration register
+ * @imp_sel : value of ALS resistor select register
+ * @zone    : values of ALS ZB(Zone Boundary) registers
+ */
+struct lm3530_als_data {
+       u8 config;
+       u8 imp_sel;
+       u8 zones[LM3530_ALS_ZB_MAX];
+};
+
 static const u8 lm3530_reg[LM3530_REG_MAX] = {
        LM3530_GEN_CONFIG,
        LM3530_ALS_CONFIG,
@@ -141,29 +153,65 @@ static int lm3530_get_mode_from_str(const char *str)
        return -1;
 }
 
+static void lm3530_als_configure(struct lm3530_platform_data *pdata,
+                               struct lm3530_als_data *als)
+{
+       int i;
+       u32 als_vmin, als_vmax, als_vstep;
+
+       if (pdata->als_vmax == 0) {
+               pdata->als_vmin = 0;
+               pdata->als_vmax = LM3530_ALS_WINDOW_mV;
+       }
+
+       als_vmin = pdata->als_vmin;
+       als_vmax = pdata->als_vmax;
+
+       if ((als_vmax - als_vmin) > LM3530_ALS_WINDOW_mV)
+               pdata->als_vmax = als_vmax = als_vmin + LM3530_ALS_WINDOW_mV;
+
+       /* n zone boundary makes n+1 zones */
+       als_vstep = (als_vmax - als_vmin) / (LM3530_ALS_ZB_MAX + 1);
+
+       for (i = 0; i < LM3530_ALS_ZB_MAX; i++)
+               als->zones[i] = (((als_vmin + LM3530_ALS_OFFSET_mV) +
+                       als_vstep + (i * als_vstep)) * LED_FULL) / 1000;
+
+       als->config =
+               (pdata->als_avrg_time << LM3530_ALS_AVG_TIME_SHIFT) |
+               (LM3530_ENABLE_ALS) |
+               (pdata->als_input_mode << LM3530_ALS_SEL_SHIFT);
+
+       als->imp_sel =
+               (pdata->als1_resistor_sel << LM3530_ALS1_IMP_SHIFT) |
+               (pdata->als2_resistor_sel << LM3530_ALS2_IMP_SHIFT);
+}
+
 static int lm3530_init_registers(struct lm3530_data *drvdata)
 {
        int ret = 0;
        int i;
        u8 gen_config;
-       u8 als_config = 0;
        u8 brt_ramp;
-       u8 als_imp_sel = 0;
        u8 brightness;
        u8 reg_val[LM3530_REG_MAX];
-       u8 zones[LM3530_ALS_ZB_MAX];
-       u32 als_vmin, als_vmax, als_vstep;
        struct lm3530_platform_data *pdata = drvdata->pdata;
        struct i2c_client *client = drvdata->client;
        struct lm3530_pwm_data *pwm = &pdata->pwm_data;
+       struct lm3530_als_data als;
+
+       memset(&als, 0, sizeof(struct lm3530_als_data));
 
        gen_config = (pdata->brt_ramp_law << LM3530_RAMP_LAW_SHIFT) |
                        ((pdata->max_current & 7) << LM3530_MAX_CURR_SHIFT);
 
        switch (drvdata->mode) {
        case LM3530_BL_MODE_MANUAL:
+               gen_config |= LM3530_ENABLE_I2C;
+               break;
        case LM3530_BL_MODE_ALS:
                gen_config |= LM3530_ENABLE_I2C;
+               lm3530_als_configure(pdata, &als);
                break;
        case LM3530_BL_MODE_PWM:
                gen_config |= LM3530_ENABLE_PWM | LM3530_ENABLE_PWM_SIMPLE |
@@ -171,38 +219,6 @@ static int lm3530_init_registers(struct lm3530_data *drvdata)
                break;
        }
 
-       if (drvdata->mode == LM3530_BL_MODE_ALS) {
-               if (pdata->als_vmax == 0) {
-                       pdata->als_vmin = 0;
-                       pdata->als_vmax = LM3530_ALS_WINDOW_mV;
-               }
-
-               als_vmin = pdata->als_vmin;
-               als_vmax = pdata->als_vmax;
-
-               if ((als_vmax - als_vmin) > LM3530_ALS_WINDOW_mV)
-                       pdata->als_vmax = als_vmax =
-                               als_vmin + LM3530_ALS_WINDOW_mV;
-
-               /* n zone boundary makes n+1 zones */
-               als_vstep = (als_vmax - als_vmin) / (LM3530_ALS_ZB_MAX + 1);
-
-               for (i = 0; i < LM3530_ALS_ZB_MAX; i++)
-                       zones[i] = (((als_vmin + LM3530_ALS_OFFSET_mV) +
-                                       als_vstep + (i * als_vstep)) * LED_FULL)
-                                       / 1000;
-
-               als_config =
-                       (pdata->als_avrg_time << LM3530_ALS_AVG_TIME_SHIFT) |
-                       (LM3530_ENABLE_ALS) |
-                       (pdata->als_input_mode << LM3530_ALS_SEL_SHIFT);
-
-               als_imp_sel =
-                       (pdata->als1_resistor_sel << LM3530_ALS1_IMP_SHIFT) |
-                       (pdata->als2_resistor_sel << LM3530_ALS2_IMP_SHIFT);
-
-       }
-
        brt_ramp = (pdata->brt_ramp_fall << LM3530_BRT_RAMP_FALL_SHIFT) |
                        (pdata->brt_ramp_rise << LM3530_BRT_RAMP_RISE_SHIFT);
 
@@ -215,14 +231,14 @@ static int lm3530_init_registers(struct lm3530_data *drvdata)
                brightness = drvdata->led_dev.max_brightness;
 
        reg_val[0] = gen_config;        /* LM3530_GEN_CONFIG */
-       reg_val[1] = als_config;        /* LM3530_ALS_CONFIG */
+       reg_val[1] = als.config;        /* LM3530_ALS_CONFIG */
        reg_val[2] = brt_ramp;          /* LM3530_BRT_RAMP_RATE */
-       reg_val[3] = als_imp_sel;       /* LM3530_ALS_IMP_SELECT */
+       reg_val[3] = als.imp_sel;       /* LM3530_ALS_IMP_SELECT */
        reg_val[4] = brightness;        /* LM3530_BRT_CTRL_REG */
-       reg_val[5] = zones[0];          /* LM3530_ALS_ZB0_REG */
-       reg_val[6] = zones[1];          /* LM3530_ALS_ZB1_REG */
-       reg_val[7] = zones[2];          /* LM3530_ALS_ZB2_REG */
-       reg_val[8] = zones[3];          /* LM3530_ALS_ZB3_REG */
+       reg_val[5] = als.zones[0];      /* LM3530_ALS_ZB0_REG */
+       reg_val[6] = als.zones[1];      /* LM3530_ALS_ZB1_REG */
+       reg_val[7] = als.zones[2];      /* LM3530_ALS_ZB2_REG */
+       reg_val[8] = als.zones[3];      /* LM3530_ALS_ZB3_REG */
        reg_val[9] = LM3530_DEF_ZT_0;   /* LM3530_ALS_Z0T_REG */
        reg_val[10] = LM3530_DEF_ZT_1;  /* LM3530_ALS_Z1T_REG */
        reg_val[11] = LM3530_DEF_ZT_2;  /* LM3530_ALS_Z2T_REG */
diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c
new file mode 100644 (file)
index 0000000..f56b6e7
--- /dev/null
@@ -0,0 +1,785 @@
+/*
+ * leds-lm3533.c -- LM3533 LED driver
+ *
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * Author: Johan Hovold <jhovold@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/leds.h>
+#include <linux/mfd/core.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include <linux/mfd/lm3533.h>
+
+
+#define LM3533_LVCTRLBANK_MIN          2
+#define LM3533_LVCTRLBANK_MAX          5
+#define LM3533_LVCTRLBANK_COUNT                4
+#define LM3533_RISEFALLTIME_MAX                7
+#define LM3533_ALS_CHANNEL_LV_MIN      1
+#define LM3533_ALS_CHANNEL_LV_MAX      2
+
+#define LM3533_REG_CTRLBANK_BCONF_BASE         0x1b
+#define LM3533_REG_PATTERN_ENABLE              0x28
+#define LM3533_REG_PATTERN_LOW_TIME_BASE       0x71
+#define LM3533_REG_PATTERN_HIGH_TIME_BASE      0x72
+#define LM3533_REG_PATTERN_RISETIME_BASE       0x74
+#define LM3533_REG_PATTERN_FALLTIME_BASE       0x75
+
+#define LM3533_REG_PATTERN_STEP                        0x10
+
+#define LM3533_REG_CTRLBANK_BCONF_MAPPING_MASK         0x04
+#define LM3533_REG_CTRLBANK_BCONF_ALS_EN_MASK          0x02
+#define LM3533_REG_CTRLBANK_BCONF_ALS_CHANNEL_MASK     0x01
+
+#define LM3533_LED_FLAG_PATTERN_ENABLE         1
+
+
+struct lm3533_led {
+       struct lm3533 *lm3533;
+       struct lm3533_ctrlbank cb;
+       struct led_classdev cdev;
+       int id;
+
+       struct mutex mutex;
+       unsigned long flags;
+
+       struct work_struct work;
+       u8 new_brightness;
+};
+
+
+static inline struct lm3533_led *to_lm3533_led(struct led_classdev *cdev)
+{
+       return container_of(cdev, struct lm3533_led, cdev);
+}
+
+static inline int lm3533_led_get_ctrlbank_id(struct lm3533_led *led)
+{
+       return led->id + 2;
+}
+
+static inline u8 lm3533_led_get_lv_reg(struct lm3533_led *led, u8 base)
+{
+       return base + led->id;
+}
+
+static inline u8 lm3533_led_get_pattern(struct lm3533_led *led)
+{
+       return led->id;
+}
+
+static inline u8 lm3533_led_get_pattern_reg(struct lm3533_led *led,
+                                                               u8 base)
+{
+       return base + lm3533_led_get_pattern(led) * LM3533_REG_PATTERN_STEP;
+}
+
+static int lm3533_led_pattern_enable(struct lm3533_led *led, int enable)
+{
+       u8 mask;
+       u8 val;
+       int pattern;
+       int state;
+       int ret = 0;
+
+       dev_dbg(led->cdev.dev, "%s - %d\n", __func__, enable);
+
+       mutex_lock(&led->mutex);
+
+       state = test_bit(LM3533_LED_FLAG_PATTERN_ENABLE, &led->flags);
+       if ((enable && state) || (!enable && !state))
+               goto out;
+
+       pattern = lm3533_led_get_pattern(led);
+       mask = 1 << (2 * pattern);
+
+       if (enable)
+               val = mask;
+       else
+               val = 0;
+
+       ret = lm3533_update(led->lm3533, LM3533_REG_PATTERN_ENABLE, val, mask);
+       if (ret) {
+               dev_err(led->cdev.dev, "failed to enable pattern %d (%d)\n",
+                                                       pattern, enable);
+               goto out;
+       }
+
+       __change_bit(LM3533_LED_FLAG_PATTERN_ENABLE, &led->flags);
+out:
+       mutex_unlock(&led->mutex);
+
+       return ret;
+}
+
+static void lm3533_led_work(struct work_struct *work)
+{
+       struct lm3533_led *led = container_of(work, struct lm3533_led, work);
+
+       dev_dbg(led->cdev.dev, "%s - %u\n", __func__, led->new_brightness);
+
+       if (led->new_brightness == 0)
+               lm3533_led_pattern_enable(led, 0);      /* disable blink */
+
+       lm3533_ctrlbank_set_brightness(&led->cb, led->new_brightness);
+}
+
+static void lm3533_led_set(struct led_classdev *cdev,
+                                               enum led_brightness value)
+{
+       struct lm3533_led *led = to_lm3533_led(cdev);
+
+       dev_dbg(led->cdev.dev, "%s - %d\n", __func__, value);
+
+       led->new_brightness = value;
+       schedule_work(&led->work);
+}
+
+static enum led_brightness lm3533_led_get(struct led_classdev *cdev)
+{
+       struct lm3533_led *led = to_lm3533_led(cdev);
+       u8 val;
+       int ret;
+
+       ret = lm3533_ctrlbank_get_brightness(&led->cb, &val);
+       if (ret)
+               return ret;
+
+       dev_dbg(led->cdev.dev, "%s - %u\n", __func__, val);
+
+       return val;
+}
+
+/* Pattern generator defines (delays in us). */
+#define LM3533_LED_DELAY1_VMIN 0x00
+#define LM3533_LED_DELAY2_VMIN 0x3d
+#define LM3533_LED_DELAY3_VMIN 0x80
+
+#define LM3533_LED_DELAY1_VMAX (LM3533_LED_DELAY2_VMIN - 1)
+#define LM3533_LED_DELAY2_VMAX (LM3533_LED_DELAY3_VMIN - 1)
+#define LM3533_LED_DELAY3_VMAX 0xff
+
+#define LM3533_LED_DELAY1_TMIN 16384U
+#define LM3533_LED_DELAY2_TMIN 1130496U
+#define LM3533_LED_DELAY3_TMIN 10305536U
+
+#define LM3533_LED_DELAY1_TMAX 999424U
+#define LM3533_LED_DELAY2_TMAX 9781248U
+#define LM3533_LED_DELAY3_TMAX 76890112U
+
+/* t_step = (t_max - t_min) / (v_max - v_min) */
+#define LM3533_LED_DELAY1_TSTEP        16384
+#define LM3533_LED_DELAY2_TSTEP        131072
+#define LM3533_LED_DELAY3_TSTEP        524288
+
+/* Delay limits for hardware accelerated blinking (in ms). */
+#define LM3533_LED_DELAY_ON_MAX \
+       ((LM3533_LED_DELAY2_TMAX + LM3533_LED_DELAY2_TSTEP / 2) / 1000)
+#define LM3533_LED_DELAY_OFF_MAX \
+       ((LM3533_LED_DELAY3_TMAX + LM3533_LED_DELAY3_TSTEP / 2) / 1000)
+
+/*
+ * Returns linear map of *t from [t_min,t_max] to [v_min,v_max] with a step
+ * size of t_step, where
+ *
+ *     t_step = (t_max - t_min) / (v_max - v_min)
+ *
+ * and updates *t to reflect the mapped value.
+ */
+static u8 time_to_val(unsigned *t, unsigned t_min, unsigned t_step,
+                                                       u8 v_min, u8 v_max)
+{
+       unsigned val;
+
+       val = (*t + t_step / 2 - t_min) / t_step + v_min;
+
+       *t = t_step * (val - v_min) + t_min;
+
+       return (u8)val;
+}
+
+/*
+ * Returns time code corresponding to *delay (in ms) and updates *delay to
+ * reflect actual hardware delay.
+ *
+ * Hardware supports 256 discrete delay times, divided into three groups with
+ * the following ranges and step-sizes:
+ *
+ *     [   16,   999]  [0x00, 0x3e]    step  16 ms
+ *     [ 1130,  9781]  [0x3d, 0x7f]    step 131 ms
+ *     [10306, 76890]  [0x80, 0xff]    step 524 ms
+ *
+ * Note that delay group 3 is only available for delay_off.
+ */
+static u8 lm3533_led_get_hw_delay(unsigned *delay)
+{
+       unsigned t;
+       u8 val;
+
+       t = *delay * 1000;
+
+       if (t >= (LM3533_LED_DELAY2_TMAX + LM3533_LED_DELAY3_TMIN) / 2) {
+               t = clamp(t, LM3533_LED_DELAY3_TMIN, LM3533_LED_DELAY3_TMAX);
+               val = time_to_val(&t,   LM3533_LED_DELAY3_TMIN,
+                                       LM3533_LED_DELAY3_TSTEP,
+                                       LM3533_LED_DELAY3_VMIN,
+                                       LM3533_LED_DELAY3_VMAX);
+       } else if (t >= (LM3533_LED_DELAY1_TMAX + LM3533_LED_DELAY2_TMIN) / 2) {
+               t = clamp(t, LM3533_LED_DELAY2_TMIN, LM3533_LED_DELAY2_TMAX);
+               val = time_to_val(&t,   LM3533_LED_DELAY2_TMIN,
+                                       LM3533_LED_DELAY2_TSTEP,
+                                       LM3533_LED_DELAY2_VMIN,
+                                       LM3533_LED_DELAY2_VMAX);
+       } else {
+               t = clamp(t, LM3533_LED_DELAY1_TMIN, LM3533_LED_DELAY1_TMAX);
+               val = time_to_val(&t,   LM3533_LED_DELAY1_TMIN,
+                                       LM3533_LED_DELAY1_TSTEP,
+                                       LM3533_LED_DELAY1_VMIN,
+                                       LM3533_LED_DELAY1_VMAX);
+       }
+
+       *delay = (t + 500) / 1000;
+
+       return val;
+}
+
+/*
+ * Set delay register base to *delay (in ms) and update *delay to reflect
+ * actual hardware delay used.
+ */
+static u8 lm3533_led_delay_set(struct lm3533_led *led, u8 base,
+                                                       unsigned long *delay)
+{
+       unsigned t;
+       u8 val;
+       u8 reg;
+       int ret;
+
+       t = (unsigned)*delay;
+
+       /* Delay group 3 is only available for low time (delay off). */
+       if (base != LM3533_REG_PATTERN_LOW_TIME_BASE)
+               t = min(t, LM3533_LED_DELAY2_TMAX / 1000);
+
+       val = lm3533_led_get_hw_delay(&t);
+
+       dev_dbg(led->cdev.dev, "%s - %lu: %u (0x%02x)\n", __func__,
+                                                       *delay, t, val);
+       reg = lm3533_led_get_pattern_reg(led, base);
+       ret = lm3533_write(led->lm3533, reg, val);
+       if (ret)
+               dev_err(led->cdev.dev, "failed to set delay (%02x)\n", reg);
+
+       *delay = t;
+
+       return ret;
+}
+
+static int lm3533_led_delay_on_set(struct lm3533_led *led, unsigned long *t)
+{
+       return lm3533_led_delay_set(led, LM3533_REG_PATTERN_HIGH_TIME_BASE, t);
+}
+
+static int lm3533_led_delay_off_set(struct lm3533_led *led, unsigned long *t)
+{
+       return lm3533_led_delay_set(led, LM3533_REG_PATTERN_LOW_TIME_BASE, t);
+}
+
+static int lm3533_led_blink_set(struct led_classdev *cdev,
+                               unsigned long *delay_on,
+                               unsigned long *delay_off)
+{
+       struct lm3533_led *led = to_lm3533_led(cdev);
+       int ret;
+
+       dev_dbg(led->cdev.dev, "%s - on = %lu, off = %lu\n", __func__,
+                                                       *delay_on, *delay_off);
+
+       if (*delay_on > LM3533_LED_DELAY_ON_MAX ||
+                                       *delay_off > LM3533_LED_DELAY_OFF_MAX)
+               return -EINVAL;
+
+       if (*delay_on == 0 && *delay_off == 0) {
+               *delay_on = 500;
+               *delay_off = 500;
+       }
+
+       ret = lm3533_led_delay_on_set(led, delay_on);
+       if (ret)
+               return ret;
+
+       ret = lm3533_led_delay_off_set(led, delay_off);
+       if (ret)
+               return ret;
+
+       return lm3533_led_pattern_enable(led, 1);
+}
+
+static ssize_t show_id(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct led_classdev *led_cdev = dev_get_drvdata(dev);
+       struct lm3533_led *led = to_lm3533_led(led_cdev);
+
+       return scnprintf(buf, PAGE_SIZE, "%d\n", led->id);
+}
+
+/*
+ * Pattern generator rise/fall times:
+ *
+ *   0 - 2048 us (default)
+ *   1 - 262 ms
+ *   2 - 524 ms
+ *   3 - 1.049 s
+ *   4 - 2.097 s
+ *   5 - 4.194 s
+ *   6 - 8.389 s
+ *   7 - 16.78 s
+ */
+static ssize_t show_risefalltime(struct device *dev,
+                                       struct device_attribute *attr,
+                                       char *buf, u8 base)
+{
+       struct led_classdev *led_cdev = dev_get_drvdata(dev);
+       struct lm3533_led *led = to_lm3533_led(led_cdev);
+       ssize_t ret;
+       u8 reg;
+       u8 val;
+
+       reg = lm3533_led_get_pattern_reg(led, base);
+       ret = lm3533_read(led->lm3533, reg, &val);
+       if (ret)
+               return ret;
+
+       return scnprintf(buf, PAGE_SIZE, "%x\n", val);
+}
+
+static ssize_t show_risetime(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       return show_risefalltime(dev, attr, buf,
+                                       LM3533_REG_PATTERN_RISETIME_BASE);
+}
+
+static ssize_t show_falltime(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       return show_risefalltime(dev, attr, buf,
+                                       LM3533_REG_PATTERN_FALLTIME_BASE);
+}
+
+static ssize_t store_risefalltime(struct device *dev,
+                                       struct device_attribute *attr,
+                                       const char *buf, size_t len, u8 base)
+{
+       struct led_classdev *led_cdev = dev_get_drvdata(dev);
+       struct lm3533_led *led = to_lm3533_led(led_cdev);
+       u8 val;
+       u8 reg;
+       int ret;
+
+       if (kstrtou8(buf, 0, &val) || val > LM3533_RISEFALLTIME_MAX)
+               return -EINVAL;
+
+       reg = lm3533_led_get_pattern_reg(led, base);
+       ret = lm3533_write(led->lm3533, reg, val);
+       if (ret)
+               return ret;
+
+       return len;
+}
+
+static ssize_t store_risetime(struct device *dev,
+                                       struct device_attribute *attr,
+                                       const char *buf, size_t len)
+{
+       return store_risefalltime(dev, attr, buf, len,
+                                       LM3533_REG_PATTERN_RISETIME_BASE);
+}
+
+static ssize_t store_falltime(struct device *dev,
+                                       struct device_attribute *attr,
+                                       const char *buf, size_t len)
+{
+       return store_risefalltime(dev, attr, buf, len,
+                                       LM3533_REG_PATTERN_FALLTIME_BASE);
+}
+
+static ssize_t show_als_channel(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct led_classdev *led_cdev = dev_get_drvdata(dev);
+       struct lm3533_led *led = to_lm3533_led(led_cdev);
+       unsigned channel;
+       u8 reg;
+       u8 val;
+       int ret;
+
+       reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+       ret = lm3533_read(led->lm3533, reg, &val);
+       if (ret)
+               return ret;
+
+       channel = (val & LM3533_REG_CTRLBANK_BCONF_ALS_CHANNEL_MASK) + 1;
+
+       return scnprintf(buf, PAGE_SIZE, "%u\n", channel);
+}
+
+static ssize_t store_als_channel(struct device *dev,
+                                       struct device_attribute *attr,
+                                       const char *buf, size_t len)
+{
+       struct led_classdev *led_cdev = dev_get_drvdata(dev);
+       struct lm3533_led *led = to_lm3533_led(led_cdev);
+       unsigned channel;
+       u8 reg;
+       u8 val;
+       u8 mask;
+       int ret;
+
+       if (kstrtouint(buf, 0, &channel))
+               return -EINVAL;
+
+       if (channel < LM3533_ALS_CHANNEL_LV_MIN ||
+                                       channel > LM3533_ALS_CHANNEL_LV_MAX)
+               return -EINVAL;
+
+       reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+       mask = LM3533_REG_CTRLBANK_BCONF_ALS_CHANNEL_MASK;
+       val = channel - 1;
+
+       ret = lm3533_update(led->lm3533, reg, val, mask);
+       if (ret)
+               return ret;
+
+       return len;
+}
+
+static ssize_t show_als_en(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct led_classdev *led_cdev = dev_get_drvdata(dev);
+       struct lm3533_led *led = to_lm3533_led(led_cdev);
+       bool enable;
+       u8 reg;
+       u8 val;
+       int ret;
+
+       reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+       ret = lm3533_read(led->lm3533, reg, &val);
+       if (ret)
+               return ret;
+
+       enable = val & LM3533_REG_CTRLBANK_BCONF_ALS_EN_MASK;
+
+       return scnprintf(buf, PAGE_SIZE, "%d\n", enable);
+}
+
+static ssize_t store_als_en(struct device *dev,
+                                       struct device_attribute *attr,
+                                       const char *buf, size_t len)
+{
+       struct led_classdev *led_cdev = dev_get_drvdata(dev);
+       struct lm3533_led *led = to_lm3533_led(led_cdev);
+       unsigned enable;
+       u8 reg;
+       u8 mask;
+       u8 val;
+       int ret;
+
+       if (kstrtouint(buf, 0, &enable))
+               return -EINVAL;
+
+       reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+       mask = LM3533_REG_CTRLBANK_BCONF_ALS_EN_MASK;
+
+       if (enable)
+               val = mask;
+       else
+               val = 0;
+
+       ret = lm3533_update(led->lm3533, reg, val, mask);
+       if (ret)
+               return ret;
+
+       return len;
+}
+
+static ssize_t show_linear(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct led_classdev *led_cdev = dev_get_drvdata(dev);
+       struct lm3533_led *led = to_lm3533_led(led_cdev);
+       u8 reg;
+       u8 val;
+       int linear;
+       int ret;
+
+       reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+       ret = lm3533_read(led->lm3533, reg, &val);
+       if (ret)
+               return ret;
+
+       if (val & LM3533_REG_CTRLBANK_BCONF_MAPPING_MASK)
+               linear = 1;
+       else
+               linear = 0;
+
+       return scnprintf(buf, PAGE_SIZE, "%x\n", linear);
+}
+
+static ssize_t store_linear(struct device *dev,
+                                       struct device_attribute *attr,
+                                       const char *buf, size_t len)
+{
+       struct led_classdev *led_cdev = dev_get_drvdata(dev);
+       struct lm3533_led *led = to_lm3533_led(led_cdev);
+       unsigned long linear;
+       u8 reg;
+       u8 mask;
+       u8 val;
+       int ret;
+
+       if (kstrtoul(buf, 0, &linear))
+               return -EINVAL;
+
+       reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+       mask = LM3533_REG_CTRLBANK_BCONF_MAPPING_MASK;
+
+       if (linear)
+               val = mask;
+       else
+               val = 0;
+
+       ret = lm3533_update(led->lm3533, reg, val, mask);
+       if (ret)
+               return ret;
+
+       return len;
+}
+
+static ssize_t show_pwm(struct device *dev,
+                                       struct device_attribute *attr,
+                                       char *buf)
+{
+       struct led_classdev *led_cdev = dev_get_drvdata(dev);
+       struct lm3533_led *led = to_lm3533_led(led_cdev);
+       u8 val;
+       int ret;
+
+       ret = lm3533_ctrlbank_get_pwm(&led->cb, &val);
+       if (ret)
+               return ret;
+
+       return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t store_pwm(struct device *dev,
+                                       struct device_attribute *attr,
+                                       const char *buf, size_t len)
+{
+       struct led_classdev *led_cdev = dev_get_drvdata(dev);
+       struct lm3533_led *led = to_lm3533_led(led_cdev);
+       u8 val;
+       int ret;
+
+       if (kstrtou8(buf, 0, &val))
+               return -EINVAL;
+
+       ret = lm3533_ctrlbank_set_pwm(&led->cb, val);
+       if (ret)
+               return ret;
+
+       return len;
+}
+
+static LM3533_ATTR_RW(als_channel);
+static LM3533_ATTR_RW(als_en);
+static LM3533_ATTR_RW(falltime);
+static LM3533_ATTR_RO(id);
+static LM3533_ATTR_RW(linear);
+static LM3533_ATTR_RW(pwm);
+static LM3533_ATTR_RW(risetime);
+
+static struct attribute *lm3533_led_attributes[] = {
+       &dev_attr_als_channel.attr,
+       &dev_attr_als_en.attr,
+       &dev_attr_falltime.attr,
+       &dev_attr_id.attr,
+       &dev_attr_linear.attr,
+       &dev_attr_pwm.attr,
+       &dev_attr_risetime.attr,
+       NULL,
+};
+
+static umode_t lm3533_led_attr_is_visible(struct kobject *kobj,
+                                            struct attribute *attr, int n)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct led_classdev *led_cdev = dev_get_drvdata(dev);
+       struct lm3533_led *led = to_lm3533_led(led_cdev);
+       umode_t mode = attr->mode;
+
+       if (attr == &dev_attr_als_channel.attr ||
+                                       attr == &dev_attr_als_en.attr) {
+               if (!led->lm3533->have_als)
+                       mode = 0;
+       }
+
+       return mode;
+};
+
+static struct attribute_group lm3533_led_attribute_group = {
+       .is_visible     = lm3533_led_attr_is_visible,
+       .attrs          = lm3533_led_attributes
+};
+
+static int __devinit lm3533_led_setup(struct lm3533_led *led,
+                                       struct lm3533_led_platform_data *pdata)
+{
+       int ret;
+
+       ret = lm3533_ctrlbank_set_max_current(&led->cb, pdata->max_current);
+       if (ret)
+               return ret;
+
+       return lm3533_ctrlbank_set_pwm(&led->cb, pdata->pwm);
+}
+
+static int __devinit lm3533_led_probe(struct platform_device *pdev)
+{
+       struct lm3533 *lm3533;
+       struct lm3533_led_platform_data *pdata;
+       struct lm3533_led *led;
+       int ret;
+
+       dev_dbg(&pdev->dev, "%s\n", __func__);
+
+       lm3533 = dev_get_drvdata(pdev->dev.parent);
+       if (!lm3533)
+               return -EINVAL;
+
+       pdata = pdev->dev.platform_data;
+       if (!pdata) {
+               dev_err(&pdev->dev, "no platform data\n");
+               return -EINVAL;
+       }
+
+       if (pdev->id < 0 || pdev->id >= LM3533_LVCTRLBANK_COUNT) {
+               dev_err(&pdev->dev, "illegal LED id %d\n", pdev->id);
+               return -EINVAL;
+       }
+
+       led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
+       if (!led)
+               return -ENOMEM;
+
+       led->lm3533 = lm3533;
+       led->cdev.name = pdata->name;
+       led->cdev.default_trigger = pdata->default_trigger;
+       led->cdev.brightness_set = lm3533_led_set;
+       led->cdev.brightness_get = lm3533_led_get;
+       led->cdev.blink_set = lm3533_led_blink_set;
+       led->cdev.brightness = LED_OFF;
+       led->id = pdev->id;
+
+       mutex_init(&led->mutex);
+       INIT_WORK(&led->work, lm3533_led_work);
+
+       /* The class framework makes a callback to get brightness during
+        * registration so use parent device (for error reporting) until
+        * registered.
+        */
+       led->cb.lm3533 = lm3533;
+       led->cb.id = lm3533_led_get_ctrlbank_id(led);
+       led->cb.dev = lm3533->dev;
+
+       platform_set_drvdata(pdev, led);
+
+       ret = led_classdev_register(pdev->dev.parent, &led->cdev);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to register LED %d\n", pdev->id);
+               return ret;
+       }
+
+       led->cb.dev = led->cdev.dev;
+
+       ret = sysfs_create_group(&led->cdev.dev->kobj,
+                                               &lm3533_led_attribute_group);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "failed to create sysfs attributes\n");
+               goto err_unregister;
+       }
+
+       ret = lm3533_led_setup(led, pdata);
+       if (ret)
+               goto err_sysfs_remove;
+
+       ret = lm3533_ctrlbank_enable(&led->cb);
+       if (ret)
+               goto err_sysfs_remove;
+
+       return 0;
+
+err_sysfs_remove:
+       sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group);
+err_unregister:
+       led_classdev_unregister(&led->cdev);
+       flush_work_sync(&led->work);
+
+       return ret;
+}
+
+static int __devexit lm3533_led_remove(struct platform_device *pdev)
+{
+       struct lm3533_led *led = platform_get_drvdata(pdev);
+
+       dev_dbg(&pdev->dev, "%s\n", __func__);
+
+       lm3533_ctrlbank_disable(&led->cb);
+       sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group);
+       led_classdev_unregister(&led->cdev);
+       flush_work_sync(&led->work);
+
+       return 0;
+}
+
+static void lm3533_led_shutdown(struct platform_device *pdev)
+{
+
+       struct lm3533_led *led = platform_get_drvdata(pdev);
+
+       dev_dbg(&pdev->dev, "%s\n", __func__);
+
+       lm3533_ctrlbank_disable(&led->cb);
+       lm3533_led_set(&led->cdev, LED_OFF);            /* disable blink */
+       flush_work_sync(&led->work);
+}
+
+static struct platform_driver lm3533_led_driver = {
+       .driver = {
+               .name = "lm3533-leds",
+               .owner = THIS_MODULE,
+       },
+       .probe          = lm3533_led_probe,
+       .remove         = __devexit_p(lm3533_led_remove),
+       .shutdown       = lm3533_led_shutdown,
+};
+module_platform_driver(lm3533_led_driver);
+
+MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>");
+MODULE_DESCRIPTION("LM3533 LED driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:lm3533-leds");
index 410a723b86910ac8876e696bc4d6b0b85237e998..23815624f35ef59ea11d6cfe167e173257c548c2 100644 (file)
@@ -193,9 +193,14 @@ static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern)
 
        /* move current engine to direct mode and remember the state */
        ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT);
+       if (ret)
+               return ret;
+
        /* Mode change requires min 500 us delay. 1 - 2 ms  with margin */
        usleep_range(1000, 2000);
-       ret |= lp5521_read(client, LP5521_REG_OP_MODE, &mode);
+       ret = lp5521_read(client, LP5521_REG_OP_MODE, &mode);
+       if (ret)
+               return ret;
 
        /* For loading, all the engines to load mode */
        lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
@@ -211,8 +216,7 @@ static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern)
                                LP5521_PROG_MEM_SIZE,
                                pattern);
 
-       ret |= lp5521_write(client, LP5521_REG_OP_MODE, mode);
-       return ret;
+       return lp5521_write(client, LP5521_REG_OP_MODE, mode);
 }
 
 static int lp5521_set_led_current(struct lp5521_chip *chip, int led, u8 curr)
@@ -785,7 +789,7 @@ static int __devinit lp5521_probe(struct i2c_client *client,
         * LP5521_REG_ENABLE register will not have any effect - strange!
         */
        ret = lp5521_read(client, LP5521_REG_R_CURRENT, &buf);
-       if (buf != LP5521_REG_R_CURR_DEFAULT) {
+       if (ret || buf != LP5521_REG_R_CURR_DEFAULT) {
                dev_err(&client->dev, "error in resetting chip\n");
                goto fail2;
        }
index 8bc4915415509d6e2456a0d4ea46446bf9188eca..4cc6a2e3df3487e1ce971b407bd56095d84b9b54 100644 (file)
@@ -280,7 +280,7 @@ static int __devinit mc13783_led_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
+       led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
        if (led == NULL) {
                dev_err(&pdev->dev, "failed to alloc memory\n");
                return -ENOMEM;
index dcc3bc3d38db8cac7384c65009965967bc026f24..5f462dbf0dbbf4920bfc281c6ad69cfc8486ed5d 100644 (file)
@@ -101,11 +101,16 @@ static const struct i2c_device_id pca955x_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, pca955x_id);
 
-struct pca955x_led {
+struct pca955x {
+       struct mutex lock;
+       struct pca955x_led *leds;
        struct pca955x_chipdef  *chipdef;
        struct i2c_client       *client;
+};
+
+struct pca955x_led {
+       struct pca955x  *pca955x;
        struct work_struct      work;
-       spinlock_t              lock;
        enum led_brightness     brightness;
        struct led_classdev     led_cdev;
        int                     led_num;        /* 0 .. 15 potentially */
@@ -140,7 +145,7 @@ static inline u8 pca955x_ledsel(u8 oldval, int led_num, int state)
  */
 static void pca955x_write_psc(struct i2c_client *client, int n, u8 val)
 {
-       struct pca955x_led *pca955x = i2c_get_clientdata(client);
+       struct pca955x *pca955x = i2c_get_clientdata(client);
 
        i2c_smbus_write_byte_data(client,
                pca95xx_num_input_regs(pca955x->chipdef->bits) + 2*n,
@@ -156,7 +161,7 @@ static void pca955x_write_psc(struct i2c_client *client, int n, u8 val)
  */
 static void pca955x_write_pwm(struct i2c_client *client, int n, u8 val)
 {
-       struct pca955x_led *pca955x = i2c_get_clientdata(client);
+       struct pca955x *pca955x = i2c_get_clientdata(client);
 
        i2c_smbus_write_byte_data(client,
                pca95xx_num_input_regs(pca955x->chipdef->bits) + 1 + 2*n,
@@ -169,7 +174,7 @@ static void pca955x_write_pwm(struct i2c_client *client, int n, u8 val)
  */
 static void pca955x_write_ls(struct i2c_client *client, int n, u8 val)
 {
-       struct pca955x_led *pca955x = i2c_get_clientdata(client);
+       struct pca955x *pca955x = i2c_get_clientdata(client);
 
        i2c_smbus_write_byte_data(client,
                pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n,
@@ -182,7 +187,7 @@ static void pca955x_write_ls(struct i2c_client *client, int n, u8 val)
  */
 static u8 pca955x_read_ls(struct i2c_client *client, int n)
 {
-       struct pca955x_led *pca955x = i2c_get_clientdata(client);
+       struct pca955x *pca955x = i2c_get_clientdata(client);
 
        return (u8) i2c_smbus_read_byte_data(client,
                pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n);
@@ -190,18 +195,23 @@ static u8 pca955x_read_ls(struct i2c_client *client, int n)
 
 static void pca955x_led_work(struct work_struct *work)
 {
-       struct pca955x_led *pca955x;
+       struct pca955x_led *pca955x_led;
+       struct pca955x *pca955x;
        u8 ls;
        int chip_ls;    /* which LSx to use (0-3 potentially) */
        int ls_led;     /* which set of bits within LSx to use (0-3) */
 
-       pca955x = container_of(work, struct pca955x_led, work);
-       chip_ls = pca955x->led_num / 4;
-       ls_led = pca955x->led_num % 4;
+       pca955x_led = container_of(work, struct pca955x_led, work);
+       pca955x = pca955x_led->pca955x;
+
+       chip_ls = pca955x_led->led_num / 4;
+       ls_led = pca955x_led->led_num % 4;
+
+       mutex_lock(&pca955x->lock);
 
        ls = pca955x_read_ls(pca955x->client, chip_ls);
 
-       switch (pca955x->brightness) {
+       switch (pca955x_led->brightness) {
        case LED_FULL:
                ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_LED_ON);
                break;
@@ -219,12 +229,15 @@ static void pca955x_led_work(struct work_struct *work)
                 * OFF, HALF, or FULL.  But, this is probably better than
                 * just turning off for all other values.
                 */
-               pca955x_write_pwm(pca955x->client, 1, 255-pca955x->brightness);
+               pca955x_write_pwm(pca955x->client, 1,
+                               255 - pca955x_led->brightness);
                ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_BLINK1);
                break;
        }
 
        pca955x_write_ls(pca955x->client, chip_ls, ls);
+
+       mutex_unlock(&pca955x->lock);
 }
 
 static void pca955x_led_set(struct led_classdev *led_cdev, enum led_brightness value)
@@ -233,7 +246,6 @@ static void pca955x_led_set(struct led_classdev *led_cdev, enum led_brightness v
 
        pca955x = container_of(led_cdev, struct pca955x_led, led_cdev);
 
-       spin_lock(&pca955x->lock);
        pca955x->brightness = value;
 
        /*
@@ -241,14 +253,13 @@ static void pca955x_led_set(struct led_classdev *led_cdev, enum led_brightness v
         * can sleep.
         */
        schedule_work(&pca955x->work);
-
-       spin_unlock(&pca955x->lock);
 }
 
 static int __devinit pca955x_probe(struct i2c_client *client,
                                        const struct i2c_device_id *id)
 {
-       struct pca955x_led *pca955x;
+       struct pca955x *pca955x;
+       struct pca955x_led *pca955x_led;
        struct pca955x_chipdef *chip;
        struct i2c_adapter *adapter;
        struct led_platform_data *pdata;
@@ -282,39 +293,48 @@ static int __devinit pca955x_probe(struct i2c_client *client,
                }
        }
 
-       pca955x = kzalloc(sizeof(*pca955x) * chip->bits, GFP_KERNEL);
+       pca955x = kzalloc(sizeof(*pca955x), GFP_KERNEL);
        if (!pca955x)
                return -ENOMEM;
 
+       pca955x->leds = kzalloc(sizeof(*pca955x_led) * chip->bits, GFP_KERNEL);
+       if (!pca955x->leds) {
+               err = -ENOMEM;
+               goto exit_nomem;
+       }
+
        i2c_set_clientdata(client, pca955x);
 
+       mutex_init(&pca955x->lock);
+       pca955x->client = client;
+       pca955x->chipdef = chip;
+
        for (i = 0; i < chip->bits; i++) {
-               pca955x[i].chipdef = chip;
-               pca955x[i].client = client;
-               pca955x[i].led_num = i;
+               pca955x_led = &pca955x->leds[i];
+               pca955x_led->led_num = i;
+               pca955x_led->pca955x = pca955x;
 
                /* Platform data can specify LED names and default triggers */
                if (pdata) {
                        if (pdata->leds[i].name)
-                               snprintf(pca955x[i].name,
-                                        sizeof(pca955x[i].name), "pca955x:%s",
-                                        pdata->leds[i].name);
+                               snprintf(pca955x_led->name,
+                                       sizeof(pca955x_led->name), "pca955x:%s",
+                                       pdata->leds[i].name);
                        if (pdata->leds[i].default_trigger)
-                               pca955x[i].led_cdev.default_trigger =
+                               pca955x_led->led_cdev.default_trigger =
                                        pdata->leds[i].default_trigger;
                } else {
-                       snprintf(pca955x[i].name, sizeof(pca955x[i].name),
+                       snprintf(pca955x_led->name, sizeof(pca955x_led->name),
                                 "pca955x:%d", i);
                }
 
-               spin_lock_init(&pca955x[i].lock);
-
-               pca955x[i].led_cdev.name = pca955x[i].name;
-               pca955x[i].led_cdev.brightness_set = pca955x_led_set;
+               pca955x_led->led_cdev.name = pca955x_led->name;
+               pca955x_led->led_cdev.brightness_set = pca955x_led_set;
 
-               INIT_WORK(&pca955x[i].work, pca955x_led_work);
+               INIT_WORK(&pca955x_led->work, pca955x_led_work);
 
-               err = led_classdev_register(&client->dev, &pca955x[i].led_cdev);
+               err = led_classdev_register(&client->dev,
+                                       &pca955x_led->led_cdev);
                if (err < 0)
                        goto exit;
        }
@@ -337,10 +357,12 @@ static int __devinit pca955x_probe(struct i2c_client *client,
 
 exit:
        while (i--) {
-               led_classdev_unregister(&pca955x[i].led_cdev);
-               cancel_work_sync(&pca955x[i].work);
+               led_classdev_unregister(&pca955x->leds[i].led_cdev);
+               cancel_work_sync(&pca955x->leds[i].work);
        }
 
+       kfree(pca955x->leds);
+exit_nomem:
        kfree(pca955x);
 
        return err;
@@ -348,14 +370,15 @@ exit:
 
 static int __devexit pca955x_remove(struct i2c_client *client)
 {
-       struct pca955x_led *pca955x = i2c_get_clientdata(client);
+       struct pca955x *pca955x = i2c_get_clientdata(client);
        int i;
 
        for (i = 0; i < pca955x->chipdef->bits; i++) {
-               led_classdev_unregister(&pca955x[i].led_cdev);
-               cancel_work_sync(&pca955x[i].work);
+               led_classdev_unregister(&pca955x->leds[i].led_cdev);
+               cancel_work_sync(&pca955x->leds[i].work);
        }
 
+       kfree(pca955x->leds);
        kfree(pca955x);
 
        return 0;
index 2b513a2ad7dec3b7d81b934e8d852903f73548c9..e2726867c5d42c7a5f2cc6c37b786d922eb0b883 100644 (file)
@@ -120,6 +120,7 @@ static void bl_trig_activate(struct led_classdev *led)
        ret = fb_register_client(&n->notifier);
        if (ret)
                dev_err(led->dev, "unable to register backlight trigger\n");
+       led->activated = true;
 
        return;
 
@@ -133,10 +134,11 @@ static void bl_trig_deactivate(struct led_classdev *led)
        struct bl_trig_notifier *n =
                (struct bl_trig_notifier *) led->trigger_data;
 
-       if (n) {
+       if (led->activated) {
                device_remove_file(led->dev, &dev_attr_inverted);
                fb_unregister_client(&n->notifier);
                kfree(n);
+               led->activated = false;
        }
 }
 
index ecc4bf3f37a937d74ab71f6c6cf91c9fc3763eb9..f057c101b896e56561a41f54809e1a10e52d0aaa 100644 (file)
@@ -200,6 +200,7 @@ static void gpio_trig_activate(struct led_classdev *led)
        gpio_data->led = led;
        led->trigger_data = gpio_data;
        INIT_WORK(&gpio_data->work, gpio_trig_work);
+       led->activated = true;
 
        return;
 
@@ -217,7 +218,7 @@ static void gpio_trig_deactivate(struct led_classdev *led)
 {
        struct gpio_trig_data *gpio_data = led->trigger_data;
 
-       if (gpio_data) {
+       if (led->activated) {
                device_remove_file(led->dev, &dev_attr_gpio);
                device_remove_file(led->dev, &dev_attr_inverted);
                device_remove_file(led->dev, &dev_attr_desired_brightness);
@@ -225,6 +226,7 @@ static void gpio_trig_deactivate(struct led_classdev *led)
                if (gpio_data->gpio != 0)
                        free_irq(gpio_to_irq(gpio_data->gpio), led);
                kfree(gpio_data);
+               led->activated = false;
        }
 }
 
index 759c0bba4a8fa54d95b12d5434ad72a643a25f88..41dc76db43118a347e4a8a06923b0fd076a12dc3 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/timer.h>
 #include <linux/sched.h>
 #include <linux/leds.h>
+#include <linux/reboot.h>
 #include "leds.h"
 
 struct heartbeat_trig_data {
@@ -83,15 +84,17 @@ static void heartbeat_trig_activate(struct led_classdev *led_cdev)
                    led_heartbeat_function, (unsigned long) led_cdev);
        heartbeat_data->phase = 0;
        led_heartbeat_function(heartbeat_data->timer.data);
+       led_cdev->activated = true;
 }
 
 static void heartbeat_trig_deactivate(struct led_classdev *led_cdev)
 {
        struct heartbeat_trig_data *heartbeat_data = led_cdev->trigger_data;
 
-       if (heartbeat_data) {
+       if (led_cdev->activated) {
                del_timer_sync(&heartbeat_data->timer);
                kfree(heartbeat_data);
+               led_cdev->activated = false;
        }
 }
 
@@ -101,13 +104,38 @@ static struct led_trigger heartbeat_led_trigger = {
        .deactivate = heartbeat_trig_deactivate,
 };
 
+static int heartbeat_reboot_notifier(struct notifier_block *nb,
+                                    unsigned long code, void *unused)
+{
+       led_trigger_unregister(&heartbeat_led_trigger);
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block heartbeat_reboot_nb = {
+       .notifier_call = heartbeat_reboot_notifier,
+};
+
+static struct notifier_block heartbeat_panic_nb = {
+       .notifier_call = heartbeat_reboot_notifier,
+};
+
 static int __init heartbeat_trig_init(void)
 {
-       return led_trigger_register(&heartbeat_led_trigger);
+       int rc = led_trigger_register(&heartbeat_led_trigger);
+
+       if (!rc) {
+               atomic_notifier_chain_register(&panic_notifier_list,
+                                              &heartbeat_panic_nb);
+               register_reboot_notifier(&heartbeat_reboot_nb);
+       }
+       return rc;
 }
 
 static void __exit heartbeat_trig_exit(void)
 {
+       unregister_reboot_notifier(&heartbeat_reboot_nb);
+       atomic_notifier_chain_unregister(&panic_notifier_list,
+                                        &heartbeat_panic_nb);
        led_trigger_unregister(&heartbeat_led_trigger);
 }
 
index 328c64c0841cdda76f0d4ac7ed2b13cf801bec7b..9010f7abaf2cac05b5d65c118af6fcbb70a8f34e 100644 (file)
@@ -31,21 +31,17 @@ static ssize_t led_delay_on_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t size)
 {
        struct led_classdev *led_cdev = dev_get_drvdata(dev);
-       int ret = -EINVAL;
-       char *after;
-       unsigned long state = simple_strtoul(buf, &after, 10);
-       size_t count = after - buf;
-
-       if (isspace(*after))
-               count++;
-
-       if (count == size) {
-               led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off);
-               led_cdev->blink_delay_on = state;
-               ret = count;
-       }
+       unsigned long state;
+       ssize_t ret = -EINVAL;
+
+       ret = kstrtoul(buf, 10, &state);
+       if (ret)
+               return ret;
 
-       return ret;
+       led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off);
+       led_cdev->blink_delay_on = state;
+
+       return size;
 }
 
 static ssize_t led_delay_off_show(struct device *dev,
@@ -60,21 +56,17 @@ static ssize_t led_delay_off_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t size)
 {
        struct led_classdev *led_cdev = dev_get_drvdata(dev);
-       int ret = -EINVAL;
-       char *after;
-       unsigned long state = simple_strtoul(buf, &after, 10);
-       size_t count = after - buf;
-
-       if (isspace(*after))
-               count++;
-
-       if (count == size) {
-               led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state);
-               led_cdev->blink_delay_off = state;
-               ret = count;
-       }
+       unsigned long state;
+       ssize_t ret = -EINVAL;
 
-       return ret;
+       ret = kstrtoul(buf, 10, &state);
+       if (ret)
+               return ret;
+
+       led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state);
+       led_cdev->blink_delay_off = state;
+
+       return size;
 }
 
 static DEVICE_ATTR(delay_on, 0644, led_delay_on_show, led_delay_on_store);
@@ -95,8 +87,7 @@ static void timer_trig_activate(struct led_classdev *led_cdev)
 
        led_blink_set(led_cdev, &led_cdev->blink_delay_on,
                      &led_cdev->blink_delay_off);
-
-       led_cdev->trigger_data = (void *)1;
+       led_cdev->activated = true;
 
        return;
 
@@ -106,9 +97,10 @@ err_out_delayon:
 
 static void timer_trig_deactivate(struct led_classdev *led_cdev)
 {
-       if (led_cdev->trigger_data) {
+       if (led_cdev->activated) {
                device_remove_file(led_cdev->dev, &dev_attr_delay_on);
                device_remove_file(led_cdev->dev, &dev_attr_delay_off);
+               led_cdev->activated = false;
        }
 
        /* Stop blinking */
diff --git a/drivers/leds/ledtrig-transient.c b/drivers/leds/ledtrig-transient.c
new file mode 100644 (file)
index 0000000..83179f4
--- /dev/null
@@ -0,0 +1,237 @@
+/*
+ * LED Kernel Transient Trigger
+ *
+ * Copyright (C) 2012 Shuah Khan <shuahkhan@gmail.com>
+ *
+ * Based on Richard Purdie's ledtrig-timer.c and Atsushi Nemoto's
+ * ledtrig-heartbeat.c
+ * Design and use-case input from Jonas Bonn <jonas@southpole.se> and
+ * Neil Brown <neilb@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+/*
+ * Transient trigger allows one shot timer activation. Please refer to
+ * Documentation/leds/ledtrig-transient.txt for details
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/leds.h>
+#include "leds.h"
+
+struct transient_trig_data {
+       int activate;
+       int state;
+       int restore_state;
+       unsigned long duration;
+       struct timer_list timer;
+};
+
+static void transient_timer_function(unsigned long data)
+{
+       struct led_classdev *led_cdev = (struct led_classdev *) data;
+       struct transient_trig_data *transient_data = led_cdev->trigger_data;
+
+       transient_data->activate = 0;
+       led_set_brightness(led_cdev, transient_data->restore_state);
+}
+
+static ssize_t transient_activate_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct led_classdev *led_cdev = dev_get_drvdata(dev);
+       struct transient_trig_data *transient_data = led_cdev->trigger_data;
+
+       return sprintf(buf, "%d\n", transient_data->activate);
+}
+
+static ssize_t transient_activate_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t size)
+{
+       struct led_classdev *led_cdev = dev_get_drvdata(dev);
+       struct transient_trig_data *transient_data = led_cdev->trigger_data;
+       unsigned long state;
+       ssize_t ret;
+
+       ret = kstrtoul(buf, 10, &state);
+       if (ret)
+               return ret;
+
+       if (state != 1 && state != 0)
+               return -EINVAL;
+
+       /* cancel the running timer */
+       if (state == 0 && transient_data->activate == 1) {
+               del_timer(&transient_data->timer);
+               transient_data->activate = state;
+               led_set_brightness(led_cdev, transient_data->restore_state);
+               return size;
+       }
+
+       /* start timer if there is no active timer */
+       if (state == 1 && transient_data->activate == 0 &&
+           transient_data->duration != 0) {
+               transient_data->activate = state;
+               led_set_brightness(led_cdev, transient_data->state);
+               transient_data->restore_state =
+                   (transient_data->state == LED_FULL) ? LED_OFF : LED_FULL;
+               mod_timer(&transient_data->timer,
+                         jiffies + transient_data->duration);
+       }
+
+       /* state == 0 && transient_data->activate == 0
+               timer is not active - just return */
+       /* state == 1 && transient_data->activate == 1
+               timer is already active - just return */
+
+       return size;
+}
+
+static ssize_t transient_duration_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct led_classdev *led_cdev = dev_get_drvdata(dev);
+       struct transient_trig_data *transient_data = led_cdev->trigger_data;
+
+       return sprintf(buf, "%lu\n", transient_data->duration);
+}
+
+static ssize_t transient_duration_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t size)
+{
+       struct led_classdev *led_cdev = dev_get_drvdata(dev);
+       struct transient_trig_data *transient_data = led_cdev->trigger_data;
+       unsigned long state;
+       ssize_t ret;
+
+       ret = kstrtoul(buf, 10, &state);
+       if (ret)
+               return ret;
+
+       transient_data->duration = state;
+       return size;
+}
+
+static ssize_t transient_state_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct led_classdev *led_cdev = dev_get_drvdata(dev);
+       struct transient_trig_data *transient_data = led_cdev->trigger_data;
+       int state;
+
+       state = (transient_data->state == LED_FULL) ? 1 : 0;
+       return sprintf(buf, "%d\n", state);
+}
+
+static ssize_t transient_state_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t size)
+{
+       struct led_classdev *led_cdev = dev_get_drvdata(dev);
+       struct transient_trig_data *transient_data = led_cdev->trigger_data;
+       unsigned long state;
+       ssize_t ret;
+
+       ret = kstrtoul(buf, 10, &state);
+       if (ret)
+               return ret;
+
+       if (state != 1 && state != 0)
+               return -EINVAL;
+
+       transient_data->state = (state == 1) ? LED_FULL : LED_OFF;
+       return size;
+}
+
+static DEVICE_ATTR(activate, 0644, transient_activate_show,
+                  transient_activate_store);
+static DEVICE_ATTR(duration, 0644, transient_duration_show,
+                  transient_duration_store);
+static DEVICE_ATTR(state, 0644, transient_state_show, transient_state_store);
+
+static void transient_trig_activate(struct led_classdev *led_cdev)
+{
+       int rc;
+       struct transient_trig_data *tdata;
+
+       tdata = kzalloc(sizeof(struct transient_trig_data), GFP_KERNEL);
+       if (!tdata) {
+               dev_err(led_cdev->dev,
+                       "unable to allocate transient trigger\n");
+               return;
+       }
+       led_cdev->trigger_data = tdata;
+
+       rc = device_create_file(led_cdev->dev, &dev_attr_activate);
+       if (rc)
+               goto err_out;
+
+       rc = device_create_file(led_cdev->dev, &dev_attr_duration);
+       if (rc)
+               goto err_out_duration;
+
+       rc = device_create_file(led_cdev->dev, &dev_attr_state);
+       if (rc)
+               goto err_out_state;
+
+       setup_timer(&tdata->timer, transient_timer_function,
+                   (unsigned long) led_cdev);
+       led_cdev->activated = true;
+
+       return;
+
+err_out_state:
+       device_remove_file(led_cdev->dev, &dev_attr_duration);
+err_out_duration:
+       device_remove_file(led_cdev->dev, &dev_attr_activate);
+err_out:
+       dev_err(led_cdev->dev, "unable to register transient trigger\n");
+       led_cdev->trigger_data = NULL;
+       kfree(tdata);
+}
+
+static void transient_trig_deactivate(struct led_classdev *led_cdev)
+{
+       struct transient_trig_data *transient_data = led_cdev->trigger_data;
+
+       if (led_cdev->activated) {
+               del_timer_sync(&transient_data->timer);
+               led_set_brightness(led_cdev, transient_data->restore_state);
+               device_remove_file(led_cdev->dev, &dev_attr_activate);
+               device_remove_file(led_cdev->dev, &dev_attr_duration);
+               device_remove_file(led_cdev->dev, &dev_attr_state);
+               led_cdev->trigger_data = NULL;
+               led_cdev->activated = false;
+               kfree(transient_data);
+       }
+}
+
+static struct led_trigger transient_trigger = {
+       .name     = "transient",
+       .activate = transient_trig_activate,
+       .deactivate = transient_trig_deactivate,
+};
+
+static int __init transient_trig_init(void)
+{
+       return led_trigger_register(&transient_trigger);
+}
+
+static void __exit transient_trig_exit(void)
+{
+       led_trigger_unregister(&transient_trigger);
+}
+
+module_init(transient_trig_init);
+module_exit(transient_trig_exit);
+
+MODULE_AUTHOR("Shuah Khan <shuahkhan@gmail.com>");
+MODULE_DESCRIPTION("Transient LED trigger");
+MODULE_LICENSE("GPL");
index 754f38f8a6922d94d425fcaef5d834a573851fd4..638dae048b4fada0633f2592538ee16535072b87 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/slab.h>
 #include <linux/time.h>
 #include <linux/workqueue.h>
+#include <linux/delay.h>
 #include <scsi/scsi_dh.h>
 #include <linux/atomic.h>
 
@@ -61,11 +62,11 @@ struct multipath {
        struct list_head list;
        struct dm_target *ti;
 
-       spinlock_t lock;
-
        const char *hw_handler_name;
        char *hw_handler_params;
 
+       spinlock_t lock;
+
        unsigned nr_priority_groups;
        struct list_head priority_groups;
 
@@ -81,16 +82,17 @@ struct multipath {
        struct priority_group *next_pg; /* Switch to this PG if set */
        unsigned repeat_count;          /* I/Os left before calling PS again */
 
-       unsigned queue_io;              /* Must we queue all I/O? */
-       unsigned queue_if_no_path;      /* Queue I/O if last path fails? */
-       unsigned saved_queue_if_no_path;/* Saved state during suspension */
+       unsigned queue_io:1;            /* Must we queue all I/O? */
+       unsigned queue_if_no_path:1;    /* Queue I/O if last path fails? */
+       unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
+
        unsigned pg_init_retries;       /* Number of times to retry pg_init */
        unsigned pg_init_count;         /* Number of times pg_init called */
        unsigned pg_init_delay_msecs;   /* Number of msecs before pg_init retry */
 
+       unsigned queue_size;
        struct work_struct process_queued_ios;
        struct list_head queued_ios;
-       unsigned queue_size;
 
        struct work_struct trigger_event;
 
@@ -328,14 +330,18 @@ static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
        /*
         * Loop through priority groups until we find a valid path.
         * First time we skip PGs marked 'bypassed'.
-        * Second time we only try the ones we skipped.
+        * Second time we only try the ones we skipped, but set
+        * pg_init_delay_retry so we do not hammer controllers.
         */
        do {
                list_for_each_entry(pg, &m->priority_groups, list) {
                        if (pg->bypassed == bypassed)
                                continue;
-                       if (!__choose_path_in_pg(m, pg, nr_bytes))
+                       if (!__choose_path_in_pg(m, pg, nr_bytes)) {
+                               if (!bypassed)
+                                       m->pg_init_delay_retry = 1;
                                return;
+                       }
                }
        } while (bypassed--);
 
@@ -481,9 +487,6 @@ static void process_queued_ios(struct work_struct *work)
 
        spin_lock_irqsave(&m->lock, flags);
 
-       if (!m->queue_size)
-               goto out;
-
        if (!m->current_pgpath)
                __choose_pgpath(m, 0);
 
@@ -496,7 +499,6 @@ static void process_queued_ios(struct work_struct *work)
        if (m->pg_init_required && !m->pg_init_in_progress && pgpath)
                __pg_init_all_paths(m);
 
-out:
        spin_unlock_irqrestore(&m->lock, flags);
        if (!must_queue)
                dispatch_queued_ios(m);
@@ -1517,11 +1519,16 @@ out:
 static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
                           unsigned long arg)
 {
-       struct multipath *m = (struct multipath *) ti->private;
-       struct block_device *bdev = NULL;
-       fmode_t mode = 0;
+       struct multipath *m = ti->private;
+       struct block_device *bdev;
+       fmode_t mode;
        unsigned long flags;
-       int r = 0;
+       int r;
+
+again:
+       bdev = NULL;
+       mode = 0;
+       r = 0;
 
        spin_lock_irqsave(&m->lock, flags);
 
@@ -1546,6 +1553,12 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
        if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
                r = scsi_verify_blk_ioctl(NULL, cmd);
 
+       if (r == -EAGAIN && !fatal_signal_pending(current)) {
+               queue_work(kmultipathd, &m->process_queued_ios);
+               msleep(10);
+               goto again;
+       }
+
        return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
 }
 
@@ -1643,7 +1656,7 @@ out:
  *---------------------------------------------------------------*/
 static struct target_type multipath_target = {
        .name = "multipath",
-       .version = {1, 3, 0},
+       .version = {1, 4, 0},
        .module = THIS_MODULE,
        .ctr = multipath_ctr,
        .dtr = multipath_dtr,
index 737d38865b693fb0288306477587f4e0d52f6cfa..3e2907f0bc462e261c05fab97cb0bff272649868 100644 (file)
@@ -1082,31 +1082,155 @@ int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
        return 0;
 }
 
-static int __get_held_metadata_root(struct dm_pool_metadata *pmd,
-                                   dm_block_t *result)
+static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
+{
+       int r, inc;
+       struct thin_disk_superblock *disk_super;
+       struct dm_block *copy, *sblock;
+       dm_block_t held_root;
+
+       /*
+        * Copy the superblock.
+        */
+       dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION);
+       r = dm_tm_shadow_block(pmd->tm, THIN_SUPERBLOCK_LOCATION,
+                              &sb_validator, &copy, &inc);
+       if (r)
+               return r;
+
+       BUG_ON(!inc);
+
+       held_root = dm_block_location(copy);
+       disk_super = dm_block_data(copy);
+
+       if (le64_to_cpu(disk_super->held_root)) {
+               DMWARN("Pool metadata snapshot already exists: release this before taking another.");
+
+               dm_tm_dec(pmd->tm, held_root);
+               dm_tm_unlock(pmd->tm, copy);
+               pmd->need_commit = 1;
+
+               return -EBUSY;
+       }
+
+       /*
+        * Wipe the spacemap since we're not publishing this.
+        */
+       memset(&disk_super->data_space_map_root, 0,
+              sizeof(disk_super->data_space_map_root));
+       memset(&disk_super->metadata_space_map_root, 0,
+              sizeof(disk_super->metadata_space_map_root));
+
+       /*
+        * Increment the data structures that need to be preserved.
+        */
+       dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->data_mapping_root));
+       dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->device_details_root));
+       dm_tm_unlock(pmd->tm, copy);
+
+       /*
+        * Write the held root into the superblock.
+        */
+       r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
+                            &sb_validator, &sblock);
+       if (r) {
+               dm_tm_dec(pmd->tm, held_root);
+               pmd->need_commit = 1;
+               return r;
+       }
+
+       disk_super = dm_block_data(sblock);
+       disk_super->held_root = cpu_to_le64(held_root);
+       dm_bm_unlock(sblock);
+
+       pmd->need_commit = 1;
+
+       return 0;
+}
+
+int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd)
+{
+       int r;
+
+       down_write(&pmd->root_lock);
+       r = __reserve_metadata_snap(pmd);
+       up_write(&pmd->root_lock);
+
+       return r;
+}
+
+static int __release_metadata_snap(struct dm_pool_metadata *pmd)
 {
        int r;
        struct thin_disk_superblock *disk_super;
-       struct dm_block *sblock;
+       struct dm_block *sblock, *copy;
+       dm_block_t held_root;
 
        r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
                             &sb_validator, &sblock);
        if (r)
                return r;
 
+       disk_super = dm_block_data(sblock);
+       held_root = le64_to_cpu(disk_super->held_root);
+       disk_super->held_root = cpu_to_le64(0);
+       pmd->need_commit = 1;
+
+       dm_bm_unlock(sblock);
+
+       if (!held_root) {
+               DMWARN("No pool metadata snapshot found: nothing to release.");
+               return -EINVAL;
+       }
+
+       r = dm_tm_read_lock(pmd->tm, held_root, &sb_validator, &copy);
+       if (r)
+               return r;
+
+       disk_super = dm_block_data(copy);
+       dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root));
+       dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root));
+       dm_sm_dec_block(pmd->metadata_sm, held_root);
+
+       return dm_tm_unlock(pmd->tm, copy);
+}
+
+int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd)
+{
+       int r;
+
+       down_write(&pmd->root_lock);
+       r = __release_metadata_snap(pmd);
+       up_write(&pmd->root_lock);
+
+       return r;
+}
+
+static int __get_metadata_snap(struct dm_pool_metadata *pmd,
+                              dm_block_t *result)
+{
+       int r;
+       struct thin_disk_superblock *disk_super;
+       struct dm_block *sblock;
+
+       r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
+                           &sb_validator, &sblock);
+       if (r)
+               return r;
+
        disk_super = dm_block_data(sblock);
        *result = le64_to_cpu(disk_super->held_root);
 
        return dm_bm_unlock(sblock);
 }
 
-int dm_pool_get_held_metadata_root(struct dm_pool_metadata *pmd,
-                                  dm_block_t *result)
+int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
+                             dm_block_t *result)
 {
        int r;
 
        down_read(&pmd->root_lock);
-       r = __get_held_metadata_root(pmd, result);
+       r = __get_metadata_snap(pmd, result);
        up_read(&pmd->root_lock);
 
        return r;
index ed4725e67c96fbae52f1ce48c28d79f0a02075a4..b88918ccdaf688e3bc5f10478023276893cc3521 100644 (file)
@@ -90,11 +90,18 @@ int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
 
 /*
  * Hold/get root for userspace transaction.
+ *
+ * The metadata snapshot is a copy of the current superblock (minus the
+ * space maps).  Userland can access the data structures for READ
+ * operations only.  A small performance hit is incurred by providing this
+ * copy of the metadata to userland due to extra copy-on-write operations
+ * on the metadata nodes.  Release this as soon as you finish with it.
  */
-int dm_pool_hold_metadata_root(struct dm_pool_metadata *pmd);
+int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd);
+int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd);
 
-int dm_pool_get_held_metadata_root(struct dm_pool_metadata *pmd,
-                                  dm_block_t *result);
+int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
+                             dm_block_t *result);
 
 /*
  * Actions on a single virtual device.
index eb3d138ff55afc629e0d0167acae140223a86553..37fdaf81bd1f89abfd28f6a46d2be7ce95f8bcba 100644 (file)
@@ -111,7 +111,7 @@ struct cell_key {
        dm_block_t block;
 };
 
-struct cell {
+struct dm_bio_prison_cell {
        struct hlist_node list;
        struct bio_prison *prison;
        struct cell_key key;
@@ -141,6 +141,8 @@ static uint32_t calc_nr_buckets(unsigned nr_cells)
        return n;
 }
 
+static struct kmem_cache *_cell_cache;
+
 /*
  * @nr_cells should be the number of cells you want in use _concurrently_.
  * Don't confuse it with the number of distinct keys.
@@ -157,8 +159,7 @@ static struct bio_prison *prison_create(unsigned nr_cells)
                return NULL;
 
        spin_lock_init(&prison->lock);
-       prison->cell_pool = mempool_create_kmalloc_pool(nr_cells,
-                                                       sizeof(struct cell));
+       prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache);
        if (!prison->cell_pool) {
                kfree(prison);
                return NULL;
@@ -194,10 +195,10 @@ static int keys_equal(struct cell_key *lhs, struct cell_key *rhs)
                       (lhs->block == rhs->block);
 }
 
-static struct cell *__search_bucket(struct hlist_head *bucket,
-                                   struct cell_key *key)
+static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
+                                                 struct cell_key *key)
 {
-       struct cell *cell;
+       struct dm_bio_prison_cell *cell;
        struct hlist_node *tmp;
 
        hlist_for_each_entry(cell, tmp, bucket, list)
@@ -214,12 +215,12 @@ static struct cell *__search_bucket(struct hlist_head *bucket,
  * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
  */
 static int bio_detain(struct bio_prison *prison, struct cell_key *key,
-                     struct bio *inmate, struct cell **ref)
+                     struct bio *inmate, struct dm_bio_prison_cell **ref)
 {
        int r = 1;
        unsigned long flags;
        uint32_t hash = hash_key(prison, key);
-       struct cell *cell, *cell2;
+       struct dm_bio_prison_cell *cell, *cell2;
 
        BUG_ON(hash > prison->nr_buckets);
 
@@ -273,7 +274,7 @@ out:
 /*
  * @inmates must have been initialised prior to this call
  */
-static void __cell_release(struct cell *cell, struct bio_list *inmates)
+static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
 {
        struct bio_prison *prison = cell->prison;
 
@@ -287,7 +288,7 @@ static void __cell_release(struct cell *cell, struct bio_list *inmates)
        mempool_free(cell, prison->cell_pool);
 }
 
-static void cell_release(struct cell *cell, struct bio_list *bios)
+static void cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
 {
        unsigned long flags;
        struct bio_prison *prison = cell->prison;
@@ -303,7 +304,7 @@ static void cell_release(struct cell *cell, struct bio_list *bios)
  * bio may be in the cell.  This function releases the cell, and also does
  * a sanity check.
  */
-static void __cell_release_singleton(struct cell *cell, struct bio *bio)
+static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
 {
        BUG_ON(cell->holder != bio);
        BUG_ON(!bio_list_empty(&cell->bios));
@@ -311,7 +312,7 @@ static void __cell_release_singleton(struct cell *cell, struct bio *bio)
        __cell_release(cell, NULL);
 }
 
-static void cell_release_singleton(struct cell *cell, struct bio *bio)
+static void cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
 {
        unsigned long flags;
        struct bio_prison *prison = cell->prison;
@@ -324,7 +325,8 @@ static void cell_release_singleton(struct cell *cell, struct bio *bio)
 /*
  * Sometimes we don't want the holder, just the additional bios.
  */
-static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
+static void __cell_release_no_holder(struct dm_bio_prison_cell *cell,
+                                    struct bio_list *inmates)
 {
        struct bio_prison *prison = cell->prison;
 
@@ -334,7 +336,8 @@ static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates
        mempool_free(cell, prison->cell_pool);
 }
 
-static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
+static void cell_release_no_holder(struct dm_bio_prison_cell *cell,
+                                  struct bio_list *inmates)
 {
        unsigned long flags;
        struct bio_prison *prison = cell->prison;
@@ -344,7 +347,7 @@ static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
        spin_unlock_irqrestore(&prison->lock, flags);
 }
 
-static void cell_error(struct cell *cell)
+static void cell_error(struct dm_bio_prison_cell *cell)
 {
        struct bio_prison *prison = cell->prison;
        struct bio_list bios;
@@ -491,7 +494,7 @@ static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
  * also provides the interface for creating and destroying internal
  * devices.
  */
-struct new_mapping;
+struct dm_thin_new_mapping;
 
 struct pool_features {
        unsigned zero_new_blocks:1;
@@ -537,7 +540,7 @@ struct pool {
        struct deferred_set shared_read_ds;
        struct deferred_set all_io_ds;
 
-       struct new_mapping *next_mapping;
+       struct dm_thin_new_mapping *next_mapping;
        mempool_t *mapping_pool;
        mempool_t *endio_hook_pool;
 };
@@ -630,11 +633,11 @@ static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev
 
 /*----------------------------------------------------------------*/
 
-struct endio_hook {
+struct dm_thin_endio_hook {
        struct thin_c *tc;
        struct deferred_entry *shared_read_entry;
        struct deferred_entry *all_io_entry;
-       struct new_mapping *overwrite_mapping;
+       struct dm_thin_new_mapping *overwrite_mapping;
 };
 
 static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
@@ -647,7 +650,8 @@ static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
        bio_list_init(master);
 
        while ((bio = bio_list_pop(&bios))) {
-               struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+               struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+
                if (h->tc == tc)
                        bio_endio(bio, DM_ENDIO_REQUEUE);
                else
@@ -736,7 +740,7 @@ static void wake_worker(struct pool *pool)
 /*
  * Bio endio functions.
  */
-struct new_mapping {
+struct dm_thin_new_mapping {
        struct list_head list;
 
        unsigned quiesced:1;
@@ -746,7 +750,7 @@ struct new_mapping {
        struct thin_c *tc;
        dm_block_t virt_block;
        dm_block_t data_block;
-       struct cell *cell, *cell2;
+       struct dm_bio_prison_cell *cell, *cell2;
        int err;
 
        /*
@@ -759,7 +763,7 @@ struct new_mapping {
        bio_end_io_t *saved_bi_end_io;
 };
 
-static void __maybe_add_mapping(struct new_mapping *m)
+static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
 {
        struct pool *pool = m->tc->pool;
 
@@ -772,7 +776,7 @@ static void __maybe_add_mapping(struct new_mapping *m)
 static void copy_complete(int read_err, unsigned long write_err, void *context)
 {
        unsigned long flags;
-       struct new_mapping *m = context;
+       struct dm_thin_new_mapping *m = context;
        struct pool *pool = m->tc->pool;
 
        m->err = read_err || write_err ? -EIO : 0;
@@ -786,8 +790,8 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
 static void overwrite_endio(struct bio *bio, int err)
 {
        unsigned long flags;
-       struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
-       struct new_mapping *m = h->overwrite_mapping;
+       struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+       struct dm_thin_new_mapping *m = h->overwrite_mapping;
        struct pool *pool = m->tc->pool;
 
        m->err = err;
@@ -811,7 +815,7 @@ static void overwrite_endio(struct bio *bio, int err)
 /*
  * This sends the bios in the cell back to the deferred_bios list.
  */
-static void cell_defer(struct thin_c *tc, struct cell *cell,
+static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
                       dm_block_t data_block)
 {
        struct pool *pool = tc->pool;
@@ -828,7 +832,7 @@ static void cell_defer(struct thin_c *tc, struct cell *cell,
  * Same as cell_defer above, except it omits one particular detainee,
  * a write bio that covers the block and has already been processed.
  */
-static void cell_defer_except(struct thin_c *tc, struct cell *cell)
+static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell)
 {
        struct bio_list bios;
        struct pool *pool = tc->pool;
@@ -843,7 +847,7 @@ static void cell_defer_except(struct thin_c *tc, struct cell *cell)
        wake_worker(pool);
 }
 
-static void process_prepared_mapping(struct new_mapping *m)
+static void process_prepared_mapping(struct dm_thin_new_mapping *m)
 {
        struct thin_c *tc = m->tc;
        struct bio *bio;
@@ -886,7 +890,7 @@ static void process_prepared_mapping(struct new_mapping *m)
        mempool_free(m, tc->pool->mapping_pool);
 }
 
-static void process_prepared_discard(struct new_mapping *m)
+static void process_prepared_discard(struct dm_thin_new_mapping *m)
 {
        int r;
        struct thin_c *tc = m->tc;
@@ -909,11 +913,11 @@ static void process_prepared_discard(struct new_mapping *m)
 }
 
 static void process_prepared(struct pool *pool, struct list_head *head,
-                            void (*fn)(struct new_mapping *))
+                            void (*fn)(struct dm_thin_new_mapping *))
 {
        unsigned long flags;
        struct list_head maps;
-       struct new_mapping *m, *tmp;
+       struct dm_thin_new_mapping *m, *tmp;
 
        INIT_LIST_HEAD(&maps);
        spin_lock_irqsave(&pool->lock, flags);
@@ -957,9 +961,9 @@ static int ensure_next_mapping(struct pool *pool)
        return pool->next_mapping ? 0 : -ENOMEM;
 }
 
-static struct new_mapping *get_next_mapping(struct pool *pool)
+static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
 {
-       struct new_mapping *r = pool->next_mapping;
+       struct dm_thin_new_mapping *r = pool->next_mapping;
 
        BUG_ON(!pool->next_mapping);
 
@@ -971,11 +975,11 @@ static struct new_mapping *get_next_mapping(struct pool *pool)
 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
                          struct dm_dev *origin, dm_block_t data_origin,
                          dm_block_t data_dest,
-                         struct cell *cell, struct bio *bio)
+                         struct dm_bio_prison_cell *cell, struct bio *bio)
 {
        int r;
        struct pool *pool = tc->pool;
-       struct new_mapping *m = get_next_mapping(pool);
+       struct dm_thin_new_mapping *m = get_next_mapping(pool);
 
        INIT_LIST_HEAD(&m->list);
        m->quiesced = 0;
@@ -997,7 +1001,8 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
         * bio immediately. Otherwise we use kcopyd to clone the data first.
         */
        if (io_overwrites_block(pool, bio)) {
-               struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+               struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+
                h->overwrite_mapping = m;
                m->bio = bio;
                save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
@@ -1025,7 +1030,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
 
 static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
                                   dm_block_t data_origin, dm_block_t data_dest,
-                                  struct cell *cell, struct bio *bio)
+                                  struct dm_bio_prison_cell *cell, struct bio *bio)
 {
        schedule_copy(tc, virt_block, tc->pool_dev,
                      data_origin, data_dest, cell, bio);
@@ -1033,18 +1038,18 @@ static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
 
 static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
                                   dm_block_t data_dest,
-                                  struct cell *cell, struct bio *bio)
+                                  struct dm_bio_prison_cell *cell, struct bio *bio)
 {
        schedule_copy(tc, virt_block, tc->origin_dev,
                      virt_block, data_dest, cell, bio);
 }
 
 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
-                         dm_block_t data_block, struct cell *cell,
+                         dm_block_t data_block, struct dm_bio_prison_cell *cell,
                          struct bio *bio)
 {
        struct pool *pool = tc->pool;
-       struct new_mapping *m = get_next_mapping(pool);
+       struct dm_thin_new_mapping *m = get_next_mapping(pool);
 
        INIT_LIST_HEAD(&m->list);
        m->quiesced = 1;
@@ -1065,12 +1070,12 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
                process_prepared_mapping(m);
 
        else if (io_overwrites_block(pool, bio)) {
-               struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+               struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+
                h->overwrite_mapping = m;
                m->bio = bio;
                save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
                remap_and_issue(tc, bio, data_block);
-
        } else {
                int r;
                struct dm_io_region to;
@@ -1155,7 +1160,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
  */
 static void retry_on_resume(struct bio *bio)
 {
-       struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+       struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
        struct thin_c *tc = h->tc;
        struct pool *pool = tc->pool;
        unsigned long flags;
@@ -1165,7 +1170,7 @@ static void retry_on_resume(struct bio *bio)
        spin_unlock_irqrestore(&pool->lock, flags);
 }
 
-static void no_space(struct cell *cell)
+static void no_space(struct dm_bio_prison_cell *cell)
 {
        struct bio *bio;
        struct bio_list bios;
@@ -1182,11 +1187,11 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
        int r;
        unsigned long flags;
        struct pool *pool = tc->pool;
-       struct cell *cell, *cell2;
+       struct dm_bio_prison_cell *cell, *cell2;
        struct cell_key key, key2;
        dm_block_t block = get_bio_block(tc, bio);
        struct dm_thin_lookup_result lookup_result;
-       struct new_mapping *m;
+       struct dm_thin_new_mapping *m;
 
        build_virtual_key(tc->td, block, &key);
        if (bio_detain(tc->pool->prison, &key, bio, &cell))
@@ -1263,7 +1268,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
                          struct cell_key *key,
                          struct dm_thin_lookup_result *lookup_result,
-                         struct cell *cell)
+                         struct dm_bio_prison_cell *cell)
 {
        int r;
        dm_block_t data_block;
@@ -1290,7 +1295,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
                               dm_block_t block,
                               struct dm_thin_lookup_result *lookup_result)
 {
-       struct cell *cell;
+       struct dm_bio_prison_cell *cell;
        struct pool *pool = tc->pool;
        struct cell_key key;
 
@@ -1305,7 +1310,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
        if (bio_data_dir(bio) == WRITE)
                break_sharing(tc, bio, block, &key, lookup_result, cell);
        else {
-               struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+               struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
 
                h->shared_read_entry = ds_inc(&pool->shared_read_ds);
 
@@ -1315,7 +1320,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
 }
 
 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
-                           struct cell *cell)
+                           struct dm_bio_prison_cell *cell)
 {
        int r;
        dm_block_t data_block;
@@ -1363,7 +1368,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
 {
        int r;
        dm_block_t block = get_bio_block(tc, bio);
-       struct cell *cell;
+       struct dm_bio_prison_cell *cell;
        struct cell_key key;
        struct dm_thin_lookup_result lookup_result;
 
@@ -1432,7 +1437,7 @@ static void process_deferred_bios(struct pool *pool)
        spin_unlock_irqrestore(&pool->lock, flags);
 
        while ((bio = bio_list_pop(&bios))) {
-               struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+               struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
                struct thin_c *tc = h->tc;
 
                /*
@@ -1522,10 +1527,10 @@ static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
        wake_worker(pool);
 }
 
-static struct endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
+static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
 {
        struct pool *pool = tc->pool;
-       struct endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
+       struct dm_thin_endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
 
        h->tc = tc;
        h->shared_read_entry = NULL;
@@ -1687,6 +1692,9 @@ static void __pool_destroy(struct pool *pool)
        kfree(pool);
 }
 
+static struct kmem_cache *_new_mapping_cache;
+static struct kmem_cache *_endio_hook_cache;
+
 static struct pool *pool_create(struct mapped_device *pool_md,
                                struct block_device *metadata_dev,
                                unsigned long block_size, char **error)
@@ -1755,16 +1763,16 @@ static struct pool *pool_create(struct mapped_device *pool_md,
        ds_init(&pool->all_io_ds);
 
        pool->next_mapping = NULL;
-       pool->mapping_pool =
-               mempool_create_kmalloc_pool(MAPPING_POOL_SIZE, sizeof(struct new_mapping));
+       pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
+                                                     _new_mapping_cache);
        if (!pool->mapping_pool) {
                *error = "Error creating pool's mapping mempool";
                err_p = ERR_PTR(-ENOMEM);
                goto bad_mapping_pool;
        }
 
-       pool->endio_hook_pool =
-               mempool_create_kmalloc_pool(ENDIO_HOOK_POOL_SIZE, sizeof(struct endio_hook));
+       pool->endio_hook_pool = mempool_create_slab_pool(ENDIO_HOOK_POOL_SIZE,
+                                                        _endio_hook_cache);
        if (!pool->endio_hook_pool) {
                *error = "Error creating pool's endio_hook mempool";
                err_p = ERR_PTR(-ENOMEM);
@@ -2276,6 +2284,36 @@ static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct po
        return 0;
 }
 
+static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
+{
+       int r;
+
+       r = check_arg_count(argc, 1);
+       if (r)
+               return r;
+
+       r = dm_pool_reserve_metadata_snap(pool->pmd);
+       if (r)
+               DMWARN("reserve_metadata_snap message failed.");
+
+       return r;
+}
+
+static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
+{
+       int r;
+
+       r = check_arg_count(argc, 1);
+       if (r)
+               return r;
+
+       r = dm_pool_release_metadata_snap(pool->pmd);
+       if (r)
+               DMWARN("release_metadata_snap message failed.");
+
+       return r;
+}
+
 /*
  * Messages supported:
  *   create_thin       <dev_id>
@@ -2283,6 +2321,8 @@ static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct po
  *   delete            <dev_id>
  *   trim              <dev_id> <new_size_in_sectors>
  *   set_transaction_id <current_trans_id> <new_trans_id>
+ *   reserve_metadata_snap
+ *   release_metadata_snap
  */
 static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
 {
@@ -2302,6 +2342,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
        else if (!strcasecmp(argv[0], "set_transaction_id"))
                r = process_set_transaction_id_mesg(argc, argv, pool);
 
+       else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
+               r = process_reserve_metadata_snap_mesg(argc, argv, pool);
+
+       else if (!strcasecmp(argv[0], "release_metadata_snap"))
+               r = process_release_metadata_snap_mesg(argc, argv, pool);
+
        else
                DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
 
@@ -2361,7 +2407,7 @@ static int pool_status(struct dm_target *ti, status_type_t type,
                if (r)
                        return r;
 
-               r = dm_pool_get_held_metadata_root(pool->pmd, &held_root);
+               r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
                if (r)
                        return r;
 
@@ -2457,7 +2503,7 @@ static struct target_type pool_target = {
        .name = "thin-pool",
        .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
                    DM_TARGET_IMMUTABLE,
-       .version = {1, 1, 0},
+       .version = {1, 2, 0},
        .module = THIS_MODULE,
        .ctr = pool_ctr,
        .dtr = pool_dtr,
@@ -2613,9 +2659,9 @@ static int thin_endio(struct dm_target *ti,
                      union map_info *map_context)
 {
        unsigned long flags;
-       struct endio_hook *h = map_context->ptr;
+       struct dm_thin_endio_hook *h = map_context->ptr;
        struct list_head work;
-       struct new_mapping *m, *tmp;
+       struct dm_thin_new_mapping *m, *tmp;
        struct pool *pool = h->tc->pool;
 
        if (h->shared_read_entry) {
@@ -2755,7 +2801,32 @@ static int __init dm_thin_init(void)
 
        r = dm_register_target(&pool_target);
        if (r)
-               dm_unregister_target(&thin_target);
+               goto bad_pool_target;
+
+       r = -ENOMEM;
+
+       _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
+       if (!_cell_cache)
+               goto bad_cell_cache;
+
+       _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
+       if (!_new_mapping_cache)
+               goto bad_new_mapping_cache;
+
+       _endio_hook_cache = KMEM_CACHE(dm_thin_endio_hook, 0);
+       if (!_endio_hook_cache)
+               goto bad_endio_hook_cache;
+
+       return 0;
+
+bad_endio_hook_cache:
+       kmem_cache_destroy(_new_mapping_cache);
+bad_new_mapping_cache:
+       kmem_cache_destroy(_cell_cache);
+bad_cell_cache:
+       dm_unregister_target(&pool_target);
+bad_pool_target:
+       dm_unregister_target(&thin_target);
 
        return r;
 }
@@ -2764,6 +2835,10 @@ static void dm_thin_exit(void)
 {
        dm_unregister_target(&thin_target);
        dm_unregister_target(&pool_target);
+
+       kmem_cache_destroy(_cell_cache);
+       kmem_cache_destroy(_new_mapping_cache);
+       kmem_cache_destroy(_endio_hook_cache);
 }
 
 module_init(dm_thin_init);
index 6f8d38747d7f438294fca80e24a3e3a61441dda1..400fe144c0cd1c94dd5c325c5ed690a425b7ece3 100644 (file)
@@ -249,6 +249,7 @@ int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
 
        return r;
 }
+EXPORT_SYMBOL_GPL(dm_tm_shadow_block);
 
 int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
                    struct dm_block_validator *v,
@@ -259,6 +260,7 @@ int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
 
        return dm_bm_read_lock(tm->bm, b, v, blk);
 }
+EXPORT_SYMBOL_GPL(dm_tm_read_lock);
 
 int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
 {
index 02d54a057b601f761ea5354a3dc4de29f699f67d..f13643d313531d16b59f98b532df328762c23b41 100644 (file)
@@ -511,7 +511,7 @@ static void mx3_camera_activate(struct mx3_camera_dev *mx3_cam,
        /* ipu_csi_init_interface() */
        csi_reg_write(mx3_cam, conf, CSI_SENS_CONF);
 
-       clk_enable(mx3_cam->clk);
+       clk_prepare_enable(mx3_cam->clk);
        rate = clk_round_rate(mx3_cam->clk, mx3_cam->mclk);
        dev_dbg(icd->parent, "Set SENS_CONF to %x, rate %ld\n", conf, rate);
        if (rate)
@@ -552,7 +552,7 @@ static void mx3_camera_remove_device(struct soc_camera_device *icd)
                *ichan = NULL;
        }
 
-       clk_disable(mx3_cam->clk);
+       clk_disable_unprepare(mx3_cam->clk);
 
        mx3_cam->icd = NULL;
 
index a5c591ffe395d01b6e7e09f927c03f3048688620..d99db5623acf45039f53dd9eb7e6b9620d88735d 100644 (file)
@@ -1653,7 +1653,6 @@ mpt_mapresources(MPT_ADAPTER *ioc)
        unsigned long    port;
        u32              msize;
        u32              psize;
-       u8               revision;
        int              r = -ENODEV;
        struct pci_dev *pdev;
 
@@ -1670,8 +1669,6 @@ mpt_mapresources(MPT_ADAPTER *ioc)
                return r;
        }
 
-       pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
-
        if (sizeof(dma_addr_t) > 4) {
                const uint64_t required_mask = dma_get_required_mask
                    (&pdev->dev);
@@ -1779,7 +1776,6 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
        MPT_ADAPTER     *ioc;
        u8               cb_idx;
        int              r = -ENODEV;
-       u8               revision;
        u8               pcixcmd;
        static int       mpt_ids = 0;
 #ifdef CONFIG_PROC_FS
@@ -1887,8 +1883,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
        dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "facts @ %p, pfacts[0] @ %p\n",
            ioc->name, &ioc->facts, &ioc->pfacts[0]));
 
-       pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
-       mpt_get_product_name(pdev->vendor, pdev->device, revision, ioc->prod_name);
+       mpt_get_product_name(pdev->vendor, pdev->device, pdev->revision,
+                            ioc->prod_name);
 
        switch (pdev->device)
        {
@@ -1903,7 +1899,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
                break;
 
        case MPI_MANUFACTPAGE_DEVICEID_FC929X:
-               if (revision < XL_929) {
+               if (pdev->revision < XL_929) {
                        /* 929X Chip Fix. Set Split transactions level
                        * for PCIX. Set MOST bits to zero.
                        */
@@ -1934,7 +1930,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
                /* 1030 Chip Fix. Disable Split transactions
                 * for PCIX. Set MOST bits to zero if Rev < C0( = 8).
                 */
-               if (revision < C0_1030) {
+               if (pdev->revision < C0_1030) {
                        pci_read_config_byte(pdev, 0x6a, &pcixcmd);
                        pcixcmd &= 0x8F;
                        pci_write_config_byte(pdev, 0x6a, pcixcmd);
@@ -6483,6 +6479,7 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
                                printk(MYIOC_s_INFO_FMT "%s: host reset in"
                                        " progress mpt_config timed out.!!\n",
                                        __func__, ioc->name);
+                               mutex_unlock(&ioc->mptbase_cmds.mutex);
                                return -EFAULT;
                        }
                        spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
index 6e6e16aab9dae36e8a1d736ba436c3e0431ca749..b383b6961e59549c8075b197d89adb6417b252cf 100644 (file)
@@ -1250,7 +1250,6 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
        int                     iocnum;
        unsigned int            port;
        int                     cim_rev;
-       u8                      revision;
        struct scsi_device      *sdev;
        VirtDevice              *vdevice;
 
@@ -1324,8 +1323,7 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
        pdev = (struct pci_dev *) ioc->pcidev;
 
        karg->pciId = pdev->device;
-       pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
-       karg->hwRev = revision;
+       karg->hwRev = pdev->revision;
        karg->subSystemDevice = pdev->subsystem_device;
        karg->subSystemVendor = pdev->subsystem_vendor;
 
index f4b4dad77391590d02967faefc1476c5c65ac6b8..e129c820df7da7d6430e62891558f92cdbe235ab 100644 (file)
@@ -106,6 +106,19 @@ config UCB1400_CORE
          To compile this driver as a module, choose M here: the
          module will be called ucb1400_core.
 
+config MFD_LM3533
+       tristate "LM3533 Lighting Power chip"
+       depends on I2C
+       select MFD_CORE
+       select REGMAP_I2C
+       help
+         Say yes here to enable support for National Semiconductor / TI
+         LM3533 Lighting Power chips.
+
+         This driver provides common support for accessing the device;
+         additional drivers must be enabled in order to use the LED,
+         backlight or ambient-light-sensor functionality of the device.
+
 config TPS6105X
        tristate "TPS61050/61052 Boost Converters"
        depends on I2C
@@ -177,8 +190,8 @@ config MFD_TPS65910
        bool "TPS65910 Power Management chip"
        depends on I2C=y && GPIOLIB
        select MFD_CORE
-       select GPIO_TPS65910
        select REGMAP_I2C
+       select IRQ_DOMAIN
        help
          if you say yes here you get support for the TPS65910 series of
          Power Management chips.
@@ -409,6 +422,19 @@ config PMIC_ADP5520
          individual components like LCD backlight, LEDs, GPIOs and Kepad
          under the corresponding menus.
 
+config MFD_MAX77693
+       bool "Maxim Semiconductor MAX77693 PMIC Support"
+       depends on I2C=y && GENERIC_HARDIRQS
+       select MFD_CORE
+       select REGMAP_I2C
+       help
+         Say yes here to support for Maxim Semiconductor MAX77693.
+         This is a companion Power Management IC with Flash, Haptic, Charger,
+         and MUIC(Micro USB Interface Controller) controls on chip.
+         This driver provides common support for accessing the device;
+         additional drivers must be enabled in order to use the functionality
+         of the device.
+
 config MFD_MAX8925
        bool "Maxim Semiconductor MAX8925 PMIC Support"
        depends on I2C=y && GENERIC_HARDIRQS
@@ -454,9 +480,9 @@ config MFD_S5M_CORE
         of the device
 
 config MFD_WM8400
-       tristate "Support Wolfson Microelectronics WM8400"
+       bool "Support Wolfson Microelectronics WM8400"
        select MFD_CORE
-       depends on I2C
+       depends on I2C=y
        select REGMAP_I2C
        help
          Support for the Wolfson Microelecronics WM8400 PMIC and audio
@@ -473,6 +499,7 @@ config MFD_WM831X_I2C
        select MFD_CORE
        select MFD_WM831X
        select REGMAP_I2C
+       select IRQ_DOMAIN
        depends on I2C=y && GENERIC_HARDIRQS
        help
          Support for the Wolfson Microelecronics WM831x and WM832x PMICs
@@ -485,6 +512,7 @@ config MFD_WM831X_SPI
        select MFD_CORE
        select MFD_WM831X
        select REGMAP_SPI
+       select IRQ_DOMAIN
        depends on SPI_MASTER && GENERIC_HARDIRQS
        help
          Support for the Wolfson Microelecronics WM831x and WM832x PMICs
@@ -597,17 +625,32 @@ config MFD_MC13783
        tristate
 
 config MFD_MC13XXX
-       tristate "Support Freescale MC13783 and MC13892"
-       depends on SPI_MASTER
+       tristate
+       depends on SPI_MASTER || I2C
        select MFD_CORE
        select MFD_MC13783
        help
-         Support for the Freescale (Atlas) PMIC and audio CODECs
-         MC13783 and MC13892.
-         This driver provides common support for accessing  the device,
+         Enable support for the Freescale MC13783 and MC13892 PMICs.
+         This driver provides common support for accessing the device,
          additional drivers must be enabled in order to use the
          functionality of the device.
 
+config MFD_MC13XXX_SPI
+       tristate "Freescale MC13783 and MC13892 SPI interface"
+       depends on SPI_MASTER
+       select REGMAP_SPI
+       select MFD_MC13XXX
+       help
+         Select this if your MC13xxx is connected via an SPI bus.
+
+config MFD_MC13XXX_I2C
+       tristate "Freescale MC13892 I2C interface"
+       depends on I2C
+       select REGMAP_I2C
+       select MFD_MC13XXX
+       help
+         Select this if your MC13xxx is connected via an I2C bus.
+
 config ABX500_CORE
        bool "ST-Ericsson ABX500 Mixed Signal Circuit register functions"
        default y if ARCH_U300 || ARCH_U8500
@@ -651,7 +694,7 @@ config EZX_PCAP
 
 config AB8500_CORE
        bool "ST-Ericsson AB8500 Mixed Signal Power Management chip"
-       depends on GENERIC_HARDIRQS && ABX500_CORE
+       depends on GENERIC_HARDIRQS && ABX500_CORE && MFD_DB8500_PRCMU
        select MFD_CORE
        help
          Select this option to enable access to AB8500 power management
@@ -722,6 +765,16 @@ config LPC_SCH
          LPC bridge function of the Intel SCH provides support for
          System Management Bus and General Purpose I/O.
 
+config LPC_ICH
+       tristate "Intel ICH LPC"
+       depends on PCI
+       select MFD_CORE
+       help
+         The LPC bridge function of the Intel ICH provides support for
+         many functional units. This driver provides needed support for
+         other drivers to control these functions, currently GPIO and
+         watchdog.
+
 config MFD_RDC321X
        tristate "Support for RDC-R321x southbridge"
        select MFD_CORE
@@ -854,6 +907,11 @@ config MFD_RC5T583
          Additional drivers must be enabled in order to use the
          different functionality of the device.
 
+config MFD_STA2X11
+       bool "STA2X11 multi function device support"
+       depends on STA2X11
+       select MFD_CORE
+
 config MFD_ANATOP
        bool "Support for Freescale i.MX on-chip ANATOP controller"
        depends on SOC_IMX6Q
index 43672b87805a6346178d000426f11f38a15efaad..75f6ed68a4b9e259616795e41bba94eea0b755ea 100644 (file)
@@ -15,6 +15,7 @@ obj-$(CONFIG_MFD_DAVINCI_VOICECODEC)  += davinci_voicecodec.o
 obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o
 obj-$(CONFIG_MFD_TI_SSP)       += ti-ssp.o
 
+obj-$(CONFIG_MFD_STA2X11)      += sta2x11-mfd.o
 obj-$(CONFIG_MFD_STMPE)                += stmpe.o
 obj-$(CONFIG_STMPE_I2C)                += stmpe-i2c.o
 obj-$(CONFIG_STMPE_SPI)                += stmpe-spi.o
@@ -54,6 +55,8 @@ obj-$(CONFIG_TWL6030_PWM)     += twl6030-pwm.o
 obj-$(CONFIG_TWL6040_CORE)     += twl6040-core.o twl6040-irq.o
 
 obj-$(CONFIG_MFD_MC13XXX)      += mc13xxx-core.o
+obj-$(CONFIG_MFD_MC13XXX_SPI)  += mc13xxx-spi.o
+obj-$(CONFIG_MFD_MC13XXX_I2C)  += mc13xxx-i2c.o
 
 obj-$(CONFIG_MFD_CORE)         += mfd-core.o
 
@@ -75,6 +78,7 @@ obj-$(CONFIG_PMIC_DA9052)     += da9052-core.o
 obj-$(CONFIG_MFD_DA9052_SPI)   += da9052-spi.o
 obj-$(CONFIG_MFD_DA9052_I2C)   += da9052-i2c.o
 
+obj-$(CONFIG_MFD_MAX77693)     += max77693.o max77693-irq.o
 max8925-objs                   := max8925-core.o max8925-i2c.o
 obj-$(CONFIG_MFD_MAX8925)      += max8925.o
 obj-$(CONFIG_MFD_MAX8997)      += max8997.o max8997-irq.o
@@ -87,15 +91,15 @@ obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o
 obj-$(CONFIG_ABX500_CORE)      += abx500-core.o
 obj-$(CONFIG_AB3100_CORE)      += ab3100-core.o
 obj-$(CONFIG_AB3100_OTP)       += ab3100-otp.o
-obj-$(CONFIG_AB8500_CORE)      += ab8500-core.o ab8500-sysctrl.o
 obj-$(CONFIG_AB8500_DEBUG)     += ab8500-debugfs.o
 obj-$(CONFIG_AB8500_GPADC)     += ab8500-gpadc.o
 obj-$(CONFIG_MFD_DB8500_PRCMU) += db8500-prcmu.o
-# ab8500-i2c need to come after db8500-prcmu (which provides the channel)
-obj-$(CONFIG_AB8500_I2C_CORE)  += ab8500-i2c.o
+# ab8500-core need to come after db8500-prcmu (which provides the channel)
+obj-$(CONFIG_AB8500_CORE)      += ab8500-core.o ab8500-sysctrl.o
 obj-$(CONFIG_MFD_TIMBERDALE)    += timberdale.o
 obj-$(CONFIG_PMIC_ADP5520)     += adp5520.o
 obj-$(CONFIG_LPC_SCH)          += lpc_sch.o
+obj-$(CONFIG_LPC_ICH)          += lpc_ich.o
 obj-$(CONFIG_MFD_RDC321X)      += rdc321x-southbridge.o
 obj-$(CONFIG_MFD_JANZ_CMODIO)  += janz-cmodio.o
 obj-$(CONFIG_MFD_JZ4740_ADC)   += jz4740-adc.o
index 1f08704f7ae8ecab27c3247d4009f0c71de91b83..dac0e299860353f0a299a8b66c14c745e587a133 100644 (file)
 #include <linux/mfd/core.h>
 #include <linux/mfd/abx500.h>
 #include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/dbx500-prcmu.h>
 #include <linux/regulator/ab8500.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 
 /*
  * Interrupt register offsets
 #define AB8500_IT_MASK23_REG           0x56
 #define AB8500_IT_MASK24_REG           0x57
 
+/*
+ * latch hierarchy registers
+ */
+#define AB8500_IT_LATCHHIER1_REG       0x60
+#define AB8500_IT_LATCHHIER2_REG       0x61
+#define AB8500_IT_LATCHHIER3_REG       0x62
+
+#define AB8500_IT_LATCHHIER_NUM                3
+
 #define AB8500_REV_REG                 0x80
 #define AB8500_IC_NAME_REG             0x82
 #define AB8500_SWITCH_OFF_STATUS       0x00
 
 #define AB8500_TURN_ON_STATUS          0x00
 
+static bool no_bm; /* No battery management */
+module_param(no_bm, bool, S_IRUGO);
+
 #define AB9540_MODEM_CTRL2_REG                 0x23
 #define AB9540_MODEM_CTRL2_SWDBBRSTN_BIT       BIT(2)
 
@@ -125,6 +140,41 @@ static const char ab8500_version_str[][7] = {
        [AB8500_VERSION_AB8540] = "AB8540",
 };
 
+static int ab8500_i2c_write(struct ab8500 *ab8500, u16 addr, u8 data)
+{
+       int ret;
+
+       ret = prcmu_abb_write((u8)(addr >> 8), (u8)(addr & 0xFF), &data, 1);
+       if (ret < 0)
+               dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
+       return ret;
+}
+
+static int ab8500_i2c_write_masked(struct ab8500 *ab8500, u16 addr, u8 mask,
+       u8 data)
+{
+       int ret;
+
+       ret = prcmu_abb_write_masked((u8)(addr >> 8), (u8)(addr & 0xFF), &data,
+               &mask, 1);
+       if (ret < 0)
+               dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
+       return ret;
+}
+
+static int ab8500_i2c_read(struct ab8500 *ab8500, u16 addr)
+{
+       int ret;
+       u8 data;
+
+       ret = prcmu_abb_read((u8)(addr >> 8), (u8)(addr & 0xFF), &data, 1);
+       if (ret < 0) {
+               dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
+               return ret;
+       }
+       return (int)data;
+}
+
 static int ab8500_get_chip_id(struct device *dev)
 {
        struct ab8500 *ab8500;
@@ -161,9 +211,13 @@ static int set_register_interruptible(struct ab8500 *ab8500, u8 bank,
 static int ab8500_set_register(struct device *dev, u8 bank,
        u8 reg, u8 value)
 {
+       int ret;
        struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
 
-       return set_register_interruptible(ab8500, bank, reg, value);
+       atomic_inc(&ab8500->transfer_ongoing);
+       ret = set_register_interruptible(ab8500, bank, reg, value);
+       atomic_dec(&ab8500->transfer_ongoing);
+       return ret;
 }
 
 static int get_register_interruptible(struct ab8500 *ab8500, u8 bank,
@@ -192,9 +246,13 @@ static int get_register_interruptible(struct ab8500 *ab8500, u8 bank,
 static int ab8500_get_register(struct device *dev, u8 bank,
        u8 reg, u8 *value)
 {
+       int ret;
        struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
 
-       return get_register_interruptible(ab8500, bank, reg, value);
+       atomic_inc(&ab8500->transfer_ongoing);
+       ret = get_register_interruptible(ab8500, bank, reg, value);
+       atomic_dec(&ab8500->transfer_ongoing);
+       return ret;
 }
 
 static int mask_and_set_register_interruptible(struct ab8500 *ab8500, u8 bank,
@@ -241,11 +299,14 @@ out:
 static int ab8500_mask_and_set_register(struct device *dev,
        u8 bank, u8 reg, u8 bitmask, u8 bitvalues)
 {
+       int ret;
        struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
 
-       return mask_and_set_register_interruptible(ab8500, bank, reg,
-               bitmask, bitvalues);
-
+       atomic_inc(&ab8500->transfer_ongoing);
+       ret= mask_and_set_register_interruptible(ab8500, bank, reg,
+                                                bitmask, bitvalues);
+       atomic_dec(&ab8500->transfer_ongoing);
+       return ret;
 }
 
 static struct abx500_ops ab8500_ops = {
@@ -264,6 +325,7 @@ static void ab8500_irq_lock(struct irq_data *data)
        struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data);
 
        mutex_lock(&ab8500->irq_lock);
+       atomic_inc(&ab8500->transfer_ongoing);
 }
 
 static void ab8500_irq_sync_unlock(struct irq_data *data)
@@ -292,7 +354,7 @@ static void ab8500_irq_sync_unlock(struct irq_data *data)
                reg = AB8500_IT_MASK1_REG + ab8500->irq_reg_offset[i];
                set_register_interruptible(ab8500, AB8500_INTERRUPT, reg, new);
        }
-
+       atomic_dec(&ab8500->transfer_ongoing);
        mutex_unlock(&ab8500->irq_lock);
 }
 
@@ -325,6 +387,90 @@ static struct irq_chip ab8500_irq_chip = {
        .irq_unmask             = ab8500_irq_unmask,
 };
 
+static int ab8500_handle_hierarchical_line(struct ab8500 *ab8500,
+                                       int latch_offset, u8 latch_val)
+{
+       int int_bit = __ffs(latch_val);
+       int line, i;
+
+       do {
+               int_bit = __ffs(latch_val);
+
+               for (i = 0; i < ab8500->mask_size; i++)
+                       if (ab8500->irq_reg_offset[i] == latch_offset)
+                               break;
+
+               if (i >= ab8500->mask_size) {
+                       dev_err(ab8500->dev, "Register offset 0x%2x not declared\n",
+                                       latch_offset);
+                       return -ENXIO;
+               }
+
+               line = (i << 3) + int_bit;
+               latch_val &= ~(1 << int_bit);
+
+               handle_nested_irq(ab8500->irq_base + line);
+       } while (latch_val);
+
+       return 0;
+}
+
+static int ab8500_handle_hierarchical_latch(struct ab8500 *ab8500,
+                                       int hier_offset, u8 hier_val)
+{
+       int latch_bit, status;
+       u8 latch_offset, latch_val;
+
+       do {
+               latch_bit = __ffs(hier_val);
+               latch_offset = (hier_offset << 3) + latch_bit;
+
+               /* Fix inconsistent ITFromLatch25 bit mapping... */
+               if (unlikely(latch_offset == 17))
+                       latch_offset = 24;
+
+               status = get_register_interruptible(ab8500,
+                               AB8500_INTERRUPT,
+                               AB8500_IT_LATCH1_REG + latch_offset,
+                               &latch_val);
+               if (status < 0 || latch_val == 0)
+                       goto discard;
+
+               status = ab8500_handle_hierarchical_line(ab8500,
+                               latch_offset, latch_val);
+               if (status < 0)
+                       return status;
+discard:
+               hier_val &= ~(1 << latch_bit);
+       } while (hier_val);
+
+       return 0;
+}
+
+static irqreturn_t ab8500_hierarchical_irq(int irq, void *dev)
+{
+       struct ab8500 *ab8500 = dev;
+       u8 i;
+
+       dev_vdbg(ab8500->dev, "interrupt\n");
+
+       /*  Hierarchical interrupt version */
+       for (i = 0; i < AB8500_IT_LATCHHIER_NUM; i++) {
+               int status;
+               u8 hier_val;
+
+               status = get_register_interruptible(ab8500, AB8500_INTERRUPT,
+                       AB8500_IT_LATCHHIER1_REG + i, &hier_val);
+               if (status < 0 || hier_val == 0)
+                       continue;
+
+               status = ab8500_handle_hierarchical_latch(ab8500, i, hier_val);
+               if (status < 0)
+                       break;
+       }
+       return IRQ_HANDLED;
+}
+
 static irqreturn_t ab8500_irq(int irq, void *dev)
 {
        struct ab8500 *ab8500 = dev;
@@ -332,6 +478,8 @@ static irqreturn_t ab8500_irq(int irq, void *dev)
 
        dev_vdbg(ab8500->dev, "interrupt\n");
 
+       atomic_inc(&ab8500->transfer_ongoing);
+
        for (i = 0; i < ab8500->mask_size; i++) {
                int regoffset = ab8500->irq_reg_offset[i];
                int status;
@@ -355,9 +503,10 @@ static irqreturn_t ab8500_irq(int irq, void *dev)
 
                        handle_nested_irq(ab8500->irq_base + line);
                        value &= ~(1 << bit);
+
                } while (value);
        }
-
+       atomic_dec(&ab8500->transfer_ongoing);
        return IRQ_HANDLED;
 }
 
@@ -411,6 +560,14 @@ static void ab8500_irq_remove(struct ab8500 *ab8500)
        }
 }
 
+int ab8500_suspend(struct ab8500 *ab8500)
+{
+       if (atomic_read(&ab8500->transfer_ongoing))
+               return -EINVAL;
+       else
+               return 0;
+}
+
 /* AB8500 GPIO Resources */
 static struct resource __devinitdata ab8500_gpio_resources[] = {
        {
@@ -744,6 +901,39 @@ static struct resource __devinitdata ab8500_usb_resources[] = {
        },
 };
 
+static struct resource __devinitdata ab8505_iddet_resources[] = {
+       {
+               .name  = "KeyDeglitch",
+               .start = AB8505_INT_KEYDEGLITCH,
+               .end   = AB8505_INT_KEYDEGLITCH,
+               .flags = IORESOURCE_IRQ,
+       },
+       {
+               .name  = "KP",
+               .start = AB8505_INT_KP,
+               .end   = AB8505_INT_KP,
+               .flags = IORESOURCE_IRQ,
+       },
+       {
+               .name  = "IKP",
+               .start = AB8505_INT_IKP,
+               .end   = AB8505_INT_IKP,
+               .flags = IORESOURCE_IRQ,
+       },
+       {
+               .name  = "IKR",
+               .start = AB8505_INT_IKR,
+               .end   = AB8505_INT_IKR,
+               .flags = IORESOURCE_IRQ,
+       },
+       {
+               .name  = "KeyStuck",
+               .start = AB8505_INT_KEYSTUCK,
+               .end   = AB8505_INT_KEYSTUCK,
+               .flags = IORESOURCE_IRQ,
+       },
+};
+
 static struct resource __devinitdata ab8500_temp_resources[] = {
        {
                .name  = "AB8500_TEMP_WARM",
@@ -777,35 +967,11 @@ static struct mfd_cell __devinitdata abx500_common_devs[] = {
                .num_resources = ARRAY_SIZE(ab8500_rtc_resources),
                .resources = ab8500_rtc_resources,
        },
-       {
-               .name = "ab8500-charger",
-               .num_resources = ARRAY_SIZE(ab8500_charger_resources),
-               .resources = ab8500_charger_resources,
-       },
-       {
-               .name = "ab8500-btemp",
-               .num_resources = ARRAY_SIZE(ab8500_btemp_resources),
-               .resources = ab8500_btemp_resources,
-       },
-       {
-               .name = "ab8500-fg",
-               .num_resources = ARRAY_SIZE(ab8500_fg_resources),
-               .resources = ab8500_fg_resources,
-       },
-       {
-               .name = "ab8500-chargalg",
-               .num_resources = ARRAY_SIZE(ab8500_chargalg_resources),
-               .resources = ab8500_chargalg_resources,
-       },
        {
                .name = "ab8500-acc-det",
                .num_resources = ARRAY_SIZE(ab8500_av_acc_detect_resources),
                .resources = ab8500_av_acc_detect_resources,
        },
-       {
-               .name = "ab8500-codec",
-       },
-
        {
                .name = "ab8500-poweron-key",
                .num_resources = ARRAY_SIZE(ab8500_poweronkey_db_resources),
@@ -834,6 +1000,29 @@ static struct mfd_cell __devinitdata abx500_common_devs[] = {
        },
 };
 
+static struct mfd_cell __devinitdata ab8500_bm_devs[] = {
+       {
+               .name = "ab8500-charger",
+               .num_resources = ARRAY_SIZE(ab8500_charger_resources),
+               .resources = ab8500_charger_resources,
+       },
+       {
+               .name = "ab8500-btemp",
+               .num_resources = ARRAY_SIZE(ab8500_btemp_resources),
+               .resources = ab8500_btemp_resources,
+       },
+       {
+               .name = "ab8500-fg",
+               .num_resources = ARRAY_SIZE(ab8500_fg_resources),
+               .resources = ab8500_fg_resources,
+       },
+       {
+               .name = "ab8500-chargalg",
+               .num_resources = ARRAY_SIZE(ab8500_chargalg_resources),
+               .resources = ab8500_chargalg_resources,
+       },
+};
+
 static struct mfd_cell __devinitdata ab8500_devs[] = {
        {
                .name = "ab8500-gpio",
@@ -845,6 +1034,9 @@ static struct mfd_cell __devinitdata ab8500_devs[] = {
                .num_resources = ARRAY_SIZE(ab8500_usb_resources),
                .resources = ab8500_usb_resources,
        },
+       {
+               .name = "ab8500-codec",
+       },
 };
 
 static struct mfd_cell __devinitdata ab9540_devs[] = {
@@ -858,6 +1050,18 @@ static struct mfd_cell __devinitdata ab9540_devs[] = {
                .num_resources = ARRAY_SIZE(ab8500_usb_resources),
                .resources = ab8500_usb_resources,
        },
+       {
+               .name = "ab9540-codec",
+       },
+};
+
+/* Device list common to ab9540 and ab8505 */
+static struct mfd_cell __devinitdata ab9540_ab8505_devs[] = {
+       {
+               .name = "ab-iddet",
+               .num_resources = ARRAY_SIZE(ab8505_iddet_resources),
+               .resources = ab8505_iddet_resources,
+       },
 };
 
 static ssize_t show_chip_id(struct device *dev,
@@ -1003,18 +1207,66 @@ static struct attribute_group ab9540_attr_group = {
        .attrs  = ab9540_sysfs_entries,
 };
 
-int __devinit ab8500_init(struct ab8500 *ab8500, enum ab8500_version version)
+static const struct of_device_id ab8500_match[] = {
+       {
+               .compatible = "stericsson,ab8500",
+               .data = (void *)AB8500_VERSION_AB8500,
+       },
+       {},
+};
+
+static int __devinit ab8500_probe(struct platform_device *pdev)
 {
-       struct ab8500_platform_data *plat = dev_get_platdata(ab8500->dev);
+       struct ab8500_platform_data *plat = dev_get_platdata(&pdev->dev);
+       const struct platform_device_id *platid = platform_get_device_id(pdev);
+       enum ab8500_version version = AB8500_VERSION_UNDEFINED;
+       struct device_node *np = pdev->dev.of_node;
+       struct ab8500 *ab8500;
+       struct resource *resource;
        int ret;
        int i;
        u8 value;
 
+       ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL);
+       if (!ab8500)
+               return -ENOMEM;
+
        if (plat)
                ab8500->irq_base = plat->irq_base;
+       else if (np)
+               ret = of_property_read_u32(np, "stericsson,irq-base", &ab8500->irq_base);
+
+       if (!ab8500->irq_base) {
+               dev_info(&pdev->dev, "couldn't find irq-base\n");
+               ret = -EINVAL;
+               goto out_free_ab8500;
+       }
+
+       ab8500->dev = &pdev->dev;
+
+       resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (!resource) {
+               ret = -ENODEV;
+               goto out_free_ab8500;
+       }
+
+       ab8500->irq = resource->start;
+
+       ab8500->read = ab8500_i2c_read;
+       ab8500->write = ab8500_i2c_write;
+       ab8500->write_masked = ab8500_i2c_write_masked;
 
        mutex_init(&ab8500->lock);
        mutex_init(&ab8500->irq_lock);
+       atomic_set(&ab8500->transfer_ongoing, 0);
+
+       platform_set_drvdata(pdev, ab8500);
+
+       if (platid)
+               version = platid->driver_data;
+       else if (np)
+               version = (unsigned int)
+                       of_match_device(ab8500_match, &pdev->dev)->data;
 
        if (version != AB8500_VERSION_UNDEFINED)
                ab8500->version = version;
@@ -1022,7 +1274,7 @@ int __devinit ab8500_init(struct ab8500 *ab8500, enum ab8500_version version)
                ret = get_register_interruptible(ab8500, AB8500_MISC,
                        AB8500_IC_NAME_REG, &value);
                if (ret < 0)
-                       return ret;
+                       goto out_free_ab8500;
 
                ab8500->version = value;
        }
@@ -1030,7 +1282,7 @@ int __devinit ab8500_init(struct ab8500 *ab8500, enum ab8500_version version)
        ret = get_register_interruptible(ab8500, AB8500_MISC,
                AB8500_REV_REG, &value);
        if (ret < 0)
-               return ret;
+               goto out_free_ab8500;
 
        ab8500->chip_id = value;
 
@@ -1105,30 +1357,57 @@ int __devinit ab8500_init(struct ab8500 *ab8500, enum ab8500_version version)
                if (ret)
                        goto out_freeoldmask;
 
-               ret = request_threaded_irq(ab8500->irq, NULL, ab8500_irq,
-                                          IRQF_ONESHOT | IRQF_NO_SUSPEND,
-                                          "ab8500", ab8500);
+               /*  Activate this feature only in ab9540 */
+               /*  till tests are done on ab8500 1p2 or later*/
+               if (is_ab9540(ab8500))
+                       ret = request_threaded_irq(ab8500->irq, NULL,
+                                       ab8500_hierarchical_irq,
+                                       IRQF_ONESHOT | IRQF_NO_SUSPEND,
+                                       "ab8500", ab8500);
+               else
+                       ret = request_threaded_irq(ab8500->irq, NULL,
+                                       ab8500_irq,
+                                       IRQF_ONESHOT | IRQF_NO_SUSPEND,
+                                       "ab8500", ab8500);
                if (ret)
                        goto out_removeirq;
        }
 
-       ret = mfd_add_devices(ab8500->dev, 0, abx500_common_devs,
-                             ARRAY_SIZE(abx500_common_devs), NULL,
-                             ab8500->irq_base);
+       if (!np) {
+               ret = mfd_add_devices(ab8500->dev, 0, abx500_common_devs,
+                               ARRAY_SIZE(abx500_common_devs), NULL,
+                               ab8500->irq_base);
 
-       if (ret)
-               goto out_freeirq;
+               if (ret)
+                       goto out_freeirq;
+
+               if (is_ab9540(ab8500))
+                       ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs,
+                                       ARRAY_SIZE(ab9540_devs), NULL,
+                                       ab8500->irq_base);
+               else
+                       ret = mfd_add_devices(ab8500->dev, 0, ab8500_devs,
+                                       ARRAY_SIZE(ab8500_devs), NULL,
+                                       ab8500->irq_base);
+               if (ret)
+                       goto out_freeirq;
 
-       if (is_ab9540(ab8500))
-               ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs,
-                             ARRAY_SIZE(ab9540_devs), NULL,
-                             ab8500->irq_base);
-       else
-               ret = mfd_add_devices(ab8500->dev, 0, ab8500_devs,
-                             ARRAY_SIZE(ab9540_devs), NULL,
-                             ab8500->irq_base);
-       if (ret)
-               goto out_freeirq;
+               if (is_ab9540(ab8500) || is_ab8505(ab8500))
+                       ret = mfd_add_devices(ab8500->dev, 0, ab9540_ab8505_devs,
+                                       ARRAY_SIZE(ab9540_ab8505_devs), NULL,
+                                       ab8500->irq_base);
+               if (ret)
+                       goto out_freeirq;
+       }
+
+       if (!no_bm) {
+               /* Add battery management devices */
+               ret = mfd_add_devices(ab8500->dev, 0, ab8500_bm_devs,
+                                     ARRAY_SIZE(ab8500_bm_devs), NULL,
+                                     ab8500->irq_base);
+               if (ret)
+                       dev_err(ab8500->dev, "error adding bm devices\n");
+       }
 
        if (is_ab9540(ab8500))
                ret = sysfs_create_group(&ab8500->dev->kobj,
@@ -1151,12 +1430,16 @@ out_freeoldmask:
        kfree(ab8500->oldmask);
 out_freemask:
        kfree(ab8500->mask);
+out_free_ab8500:
+       kfree(ab8500);
 
        return ret;
 }
 
-int __devexit ab8500_exit(struct ab8500 *ab8500)
+static int __devexit ab8500_remove(struct platform_device *pdev)
 {
+       struct ab8500 *ab8500 = platform_get_drvdata(pdev);
+
        if (is_ab9540(ab8500))
                sysfs_remove_group(&ab8500->dev->kobj, &ab9540_attr_group);
        else
@@ -1168,10 +1451,42 @@ int __devexit ab8500_exit(struct ab8500 *ab8500)
        }
        kfree(ab8500->oldmask);
        kfree(ab8500->mask);
+       kfree(ab8500);
 
        return 0;
 }
 
+static const struct platform_device_id ab8500_id[] = {
+       { "ab8500-core", AB8500_VERSION_AB8500 },
+       { "ab8505-i2c", AB8500_VERSION_AB8505 },
+       { "ab9540-i2c", AB8500_VERSION_AB9540 },
+       { "ab8540-i2c", AB8500_VERSION_AB8540 },
+       { }
+};
+
+static struct platform_driver ab8500_core_driver = {
+       .driver = {
+               .name = "ab8500-core",
+               .owner = THIS_MODULE,
+               .of_match_table = ab8500_match,
+       },
+       .probe  = ab8500_probe,
+       .remove = __devexit_p(ab8500_remove),
+       .id_table = ab8500_id,
+};
+
+static int __init ab8500_core_init(void)
+{
+       return platform_driver_register(&ab8500_core_driver);
+}
+
+static void __exit ab8500_core_exit(void)
+{
+       platform_driver_unregister(&ab8500_core_driver);
+}
+arch_initcall(ab8500_core_init);
+module_exit(ab8500_core_exit);
+
 MODULE_AUTHOR("Mattias Wallin, Srinidhi Kasagar, Rabin Vincent");
 MODULE_DESCRIPTION("AB8500 MFD core");
 MODULE_LICENSE("GPL v2");
index 9a0211aa88971c0207b3caa18d30eab6cb57821a..50c4c89ab2202fba117a05087799bfaf9ddba9df 100644 (file)
@@ -608,10 +608,16 @@ static int __devexit ab8500_debug_remove(struct platform_device *plf)
        return 0;
 }
 
+static const struct of_device_id ab8500_debug_match[] = {
+        { .compatible = "stericsson,ab8500-debug", },
+        {}
+};
+
 static struct platform_driver ab8500_debug_driver = {
        .driver = {
                .name = "ab8500-debug",
                .owner = THIS_MODULE,
+               .of_match_table = ab8500_debug_match,
        },
        .probe  = ab8500_debug_probe,
        .remove = __devexit_p(ab8500_debug_remove)
index c39fc716e1dcf520592bc866cba0be4e661d0e09..b86fd8e1ec3fbae7c4bf6770c1598a777d657d78 100644 (file)
@@ -584,7 +584,7 @@ static int __devinit ab8500_gpadc_probe(struct platform_device *pdev)
 
        gpadc->irq = platform_get_irq_byname(pdev, "SW_CONV_END");
        if (gpadc->irq < 0) {
-               dev_err(gpadc->dev, "failed to get platform irq-%d\n",
+               dev_err(&pdev->dev, "failed to get platform irq-%d\n",
                        gpadc->irq);
                ret = gpadc->irq;
                goto fail;
@@ -648,12 +648,18 @@ static int __devexit ab8500_gpadc_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id ab8500_gpadc_match[] = {
+       { .compatible = "stericsson,ab8500-gpadc", },
+       {}
+};
+
 static struct platform_driver ab8500_gpadc_driver = {
        .probe = ab8500_gpadc_probe,
        .remove = __devexit_p(ab8500_gpadc_remove),
        .driver = {
                .name = "ab8500-gpadc",
                .owner = THIS_MODULE,
+               .of_match_table = ab8500_gpadc_match,
        },
 };
 
diff --git a/drivers/mfd/ab8500-i2c.c b/drivers/mfd/ab8500-i2c.c
deleted file mode 100644 (file)
index b83045f..0000000
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2010
- * Author: Mattias Wallin <mattias.wallin@stericsson.com> for ST-Ericsson.
- * License Terms: GNU General Public License v2
- * This file was based on drivers/mfd/ab8500-spi.c
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/mfd/abx500/ab8500.h>
-#include <linux/mfd/dbx500-prcmu.h>
-
-static int ab8500_i2c_write(struct ab8500 *ab8500, u16 addr, u8 data)
-{
-       int ret;
-
-       ret = prcmu_abb_write((u8)(addr >> 8), (u8)(addr & 0xFF), &data, 1);
-       if (ret < 0)
-               dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
-       return ret;
-}
-
-static int ab8500_i2c_write_masked(struct ab8500 *ab8500, u16 addr, u8 mask,
-       u8 data)
-{
-       int ret;
-
-       ret = prcmu_abb_write_masked((u8)(addr >> 8), (u8)(addr & 0xFF), &data,
-               &mask, 1);
-       if (ret < 0)
-               dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
-       return ret;
-}
-
-static int ab8500_i2c_read(struct ab8500 *ab8500, u16 addr)
-{
-       int ret;
-       u8 data;
-
-       ret = prcmu_abb_read((u8)(addr >> 8), (u8)(addr & 0xFF), &data, 1);
-       if (ret < 0) {
-               dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
-               return ret;
-       }
-       return (int)data;
-}
-
-static int __devinit ab8500_i2c_probe(struct platform_device *plf)
-{
-       const struct platform_device_id *platid = platform_get_device_id(plf);
-       struct ab8500 *ab8500;
-       struct resource *resource;
-       int ret;
-
-       ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL);
-       if (!ab8500)
-               return -ENOMEM;
-
-       ab8500->dev = &plf->dev;
-
-       resource = platform_get_resource(plf, IORESOURCE_IRQ, 0);
-       if (!resource) {
-               kfree(ab8500);
-               return -ENODEV;
-       }
-
-       ab8500->irq = resource->start;
-
-       ab8500->read = ab8500_i2c_read;
-       ab8500->write = ab8500_i2c_write;
-       ab8500->write_masked = ab8500_i2c_write_masked;
-
-       platform_set_drvdata(plf, ab8500);
-
-       ret = ab8500_init(ab8500, platid->driver_data);
-       if (ret)
-               kfree(ab8500);
-
-
-       return ret;
-}
-
-static int __devexit ab8500_i2c_remove(struct platform_device *plf)
-{
-       struct ab8500 *ab8500 = platform_get_drvdata(plf);
-
-       ab8500_exit(ab8500);
-       kfree(ab8500);
-
-       return 0;
-}
-
-static const struct platform_device_id ab8500_id[] = {
-       { "ab8500-i2c", AB8500_VERSION_AB8500 },
-       { "ab8505-i2c", AB8500_VERSION_AB8505 },
-       { "ab9540-i2c", AB8500_VERSION_AB9540 },
-       { "ab8540-i2c", AB8500_VERSION_AB8540 },
-       { }
-};
-
-static struct platform_driver ab8500_i2c_driver = {
-       .driver = {
-               .name = "ab8500-i2c",
-               .owner = THIS_MODULE,
-       },
-       .probe  = ab8500_i2c_probe,
-       .remove = __devexit_p(ab8500_i2c_remove),
-       .id_table = ab8500_id,
-};
-
-static int __init ab8500_i2c_init(void)
-{
-       return platform_driver_register(&ab8500_i2c_driver);
-}
-
-static void __exit ab8500_i2c_exit(void)
-{
-       platform_driver_unregister(&ab8500_i2c_driver);
-}
-arch_initcall(ab8500_i2c_init);
-module_exit(ab8500_i2c_exit);
-
-MODULE_AUTHOR("Mattias WALLIN <mattias.wallin@stericsson.com");
-MODULE_DESCRIPTION("AB8500 Core access via PRCMU I2C");
-MODULE_LICENSE("GPL v2");
index c28d4eb1eff019517d166476c590652f2ac1b8b1..5a3e51ccf25863b94dccd26b25f4e7002b90ac97 100644 (file)
@@ -61,10 +61,16 @@ static int __devexit ab8500_sysctrl_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id ab8500_sysctrl_match[] = {
+       { .compatible = "stericsson,ab8500-sysctrl", },
+       {}
+};
+
 static struct platform_driver ab8500_sysctrl_driver = {
        .driver = {
                .name = "ab8500-sysctrl",
                .owner = THIS_MODULE,
+               .of_match_table = ab8500_sysctrl_match,
        },
        .probe = ab8500_sysctrl_probe,
        .remove = __devexit_p(ab8500_sysctrl_remove),
index 2af42480635ec458f868bd180839b79050bbd6de..6da06341f6c909312afea387e875fe82cc1592b8 100644 (file)
 #include <linux/of_address.h>
 #include <linux/mfd/anatop.h>
 
-u32 anatop_get_bits(struct anatop *adata, u32 addr, int bit_shift,
-                   int bit_width)
+u32 anatop_read_reg(struct anatop *adata, u32 addr)
 {
-       u32 val, mask;
-
-       if (bit_width == 32)
-               mask = ~0;
-       else
-               mask = (1 << bit_width) - 1;
-
-       val = readl(adata->ioreg + addr);
-       val = (val >> bit_shift) & mask;
-
-       return val;
+       return readl(adata->ioreg + addr);
 }
-EXPORT_SYMBOL_GPL(anatop_get_bits);
+EXPORT_SYMBOL_GPL(anatop_read_reg);
 
-void anatop_set_bits(struct anatop *adata, u32 addr, int bit_shift,
-                    int bit_width, u32 data)
+void anatop_write_reg(struct anatop *adata, u32 addr, u32 data, u32 mask)
 {
-       u32 val, mask;
+       u32 val;
 
-       if (bit_width == 32)
-               mask = ~0;
-       else
-               mask = (1 << bit_width) - 1;
+       data &= mask;
 
        spin_lock(&adata->reglock);
-       val = readl(adata->ioreg + addr) & ~(mask << bit_shift);
-       writel((data << bit_shift) | val, adata->ioreg + addr);
+       val = readl(adata->ioreg + addr);
+       val &= ~mask;
+       val |= data;
+       writel(val, adata->ioreg + addr);
        spin_unlock(&adata->reglock);
 }
-EXPORT_SYMBOL_GPL(anatop_set_bits);
+EXPORT_SYMBOL_GPL(anatop_write_reg);
 
 static const struct of_device_id of_anatop_match[] = {
        { .compatible = "fsl,imx6q-anatop", },
index 1582c3d952579e66306e1ad8ce2713f3b0d03f9c..383421bf57609a994b7f8d537f97c269d2ab6302 100644 (file)
@@ -353,12 +353,28 @@ static int asic3_gpio_irq_type(struct irq_data *data, unsigned int type)
        return 0;
 }
 
+static int asic3_gpio_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+       struct asic3 *asic = irq_data_get_irq_chip_data(data);
+       u32 bank, index;
+       u16 bit;
+
+       bank = asic3_irq_to_bank(asic, data->irq);
+       index = asic3_irq_to_index(asic, data->irq);
+       bit = 1<<index;
+
+       asic3_set_register(asic, bank + ASIC3_GPIO_SLEEP_MASK, bit, !on);
+
+       return 0;
+}
+
 static struct irq_chip asic3_gpio_irq_chip = {
        .name           = "ASIC3-GPIO",
        .irq_ack        = asic3_mask_gpio_irq,
        .irq_mask       = asic3_mask_gpio_irq,
        .irq_unmask     = asic3_unmask_gpio_irq,
        .irq_set_type   = asic3_gpio_irq_type,
+       .irq_set_wake   = asic3_gpio_irq_set_wake,
 };
 
 static struct irq_chip asic3_irq_chip = {
@@ -529,7 +545,7 @@ static int asic3_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
 {
        struct asic3 *asic = container_of(chip, struct asic3, gpio);
 
-       return (offset < ASIC3_NUM_GPIOS) ? asic->irq_base + offset : -ENXIO;
+       return asic->irq_base + offset;
 }
 
 static __init int asic3_gpio_probe(struct platform_device *pdev,
@@ -894,10 +910,13 @@ static int __init asic3_mfd_probe(struct platform_device *pdev,
        asic3_mmc_resources[0].start >>= asic->bus_shift;
        asic3_mmc_resources[0].end   >>= asic->bus_shift;
 
-       ret = mfd_add_devices(&pdev->dev, pdev->id,
+       if (pdata->clock_rate) {
+               ds1wm_pdata.clock_rate = pdata->clock_rate;
+               ret = mfd_add_devices(&pdev->dev, pdev->id,
                        &asic3_cell_ds1wm, 1, mem, asic->irq_base);
-       if (ret < 0)
-               goto out;
+               if (ret < 0)
+                       goto out;
+       }
 
        if (mem_sdio && (irq >= 0)) {
                ret = mfd_add_devices(&pdev->dev, pdev->id,
@@ -1000,6 +1019,9 @@ static int __init asic3_probe(struct platform_device *pdev)
 
        asic3_mfd_probe(pdev, pdata, mem);
 
+       asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
+               (ASIC3_EXTCF_CF0_BUF_EN|ASIC3_EXTCF_CF0_PWAIT_EN), 1);
+
        dev_info(asic->dev, "ASIC3 Core driver\n");
 
        return 0;
@@ -1021,6 +1043,9 @@ static int __devexit asic3_remove(struct platform_device *pdev)
        int ret;
        struct asic3 *asic = platform_get_drvdata(pdev);
 
+       asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
+               (ASIC3_EXTCF_CF0_BUF_EN|ASIC3_EXTCF_CF0_PWAIT_EN), 0);
+
        asic3_mfd_remove(pdev);
 
        ret = asic3_gpio_remove(pdev);
index 315fef5d466ac3f2d380246ffa75a056d131cb52..3419e726de478cb330801d1dfb1db42d2a5d1748 100644 (file)
@@ -186,18 +186,7 @@ static struct pci_driver cs5535_mfd_driver = {
        .remove = __devexit_p(cs5535_mfd_remove),
 };
 
-static int __init cs5535_mfd_init(void)
-{
-       return pci_register_driver(&cs5535_mfd_driver);
-}
-
-static void __exit cs5535_mfd_exit(void)
-{
-       pci_unregister_driver(&cs5535_mfd_driver);
-}
-
-module_init(cs5535_mfd_init);
-module_exit(cs5535_mfd_exit);
+module_pci_driver(cs5535_mfd_driver);
 
 MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
 MODULE_DESCRIPTION("MFD driver for CS5535/CS5536 southbridge's ISA PCI device");
index 7776aff46269c38134bc4ca67d81d703989545b1..1f1313c905736f352d09c8f8d764be2464467cc5 100644 (file)
@@ -318,6 +318,135 @@ static bool da9052_reg_volatile(struct device *dev, unsigned int reg)
        }
 }
 
+/*
+ * TBAT look-up table is computed from the R90 reg (8 bit register)
+ * reading as below. The battery temperature is in milliCentigrade
+ * TBAT = (1/(t1+1/298) - 273) * 1000 mC
+ * where t1 = (1/B)* ln(( ADCval * 2.5)/(R25*ITBAT*255))
+ * Default values are R25 = 10e3, B = 3380, ITBAT = 50e-6
+ * Example:
+ * R25=10E3, B=3380, ITBAT=50e-6, ADCVAL=62d calculates
+ * TBAT = 20015 mili degrees Centrigrade
+ *
+*/
+static const int32_t tbat_lookup[255] = {
+       183258, 144221, 124334, 111336, 101826, 94397, 88343, 83257,
+       78889, 75071, 71688, 68656, 65914, 63414, 61120, 59001,
+       570366, 55204, 53490, 51881, 50364, 48931, 47574, 46285,
+       45059, 43889, 42772, 41703, 40678, 39694, 38748, 37838,
+       36961, 36115, 35297, 34507, 33743, 33002, 32284, 31588,
+       30911, 30254, 29615, 28994, 28389, 27799, 27225, 26664,
+       26117, 25584, 25062, 24553, 24054, 23567, 23091, 22624,
+       22167, 21719, 21281, 20851, 20429, 20015, 19610, 19211,
+       18820, 18436, 18058, 17688, 17323, 16965, 16612, 16266,
+       15925, 15589, 15259, 14933, 14613, 14298, 13987, 13681,
+       13379, 13082, 12788, 12499, 12214, 11933, 11655, 11382,
+       11112, 10845, 10582, 10322, 10066, 9812, 9562, 9315,
+       9071, 8830, 8591, 8356, 8123, 7893, 7665, 7440,
+       7218, 6998, 6780, 6565, 6352, 6141, 5933, 5726,
+       5522, 5320, 5120, 4922, 4726, 4532, 4340, 4149,
+       3961, 3774, 3589, 3406, 3225, 3045, 2867, 2690,
+       2516, 2342, 2170, 2000, 1831, 1664, 1498, 1334,
+       1171, 1009, 849, 690, 532, 376, 221, 67,
+       -84, -236, -386, -535, -683, -830, -975, -1119,
+       -1263, -1405, -1546, -1686, -1825, -1964, -2101, -2237,
+       -2372, -2506, -2639, -2771, -2902, -3033, -3162, -3291,
+       -3418, -3545, -3671, -3796, -3920, -4044, -4166, -4288,
+       -4409, -4529, -4649, -4767, -4885, -5002, -5119, -5235,
+       -5349, -5464, -5577, -5690, -5802, -5913, -6024, -6134,
+       -6244, -6352, -6461, -6568, -6675, -6781, -6887, -6992,
+       -7096, -7200, -7303, -7406, -7508, -7609, -7710, -7810,
+       -7910, -8009, -8108, -8206, -8304, -8401, -8497, -8593,
+       -8689, -8784, -8878, -8972, -9066, -9159, -9251, -9343,
+       -9435, -9526, -9617, -9707, -9796, -9886, -9975, -10063,
+       -10151, -10238, -10325, -10412, -10839, -10923, -11007, -11090,
+       -11173, -11256, -11338, -11420, -11501, -11583, -11663, -11744,
+       -11823, -11903, -11982
+};
+
+static const u8 chan_mux[DA9052_ADC_VBBAT + 1] = {
+       [DA9052_ADC_VDDOUT]     = DA9052_ADC_MAN_MUXSEL_VDDOUT,
+       [DA9052_ADC_ICH]        = DA9052_ADC_MAN_MUXSEL_ICH,
+       [DA9052_ADC_TBAT]       = DA9052_ADC_MAN_MUXSEL_TBAT,
+       [DA9052_ADC_VBAT]       = DA9052_ADC_MAN_MUXSEL_VBAT,
+       [DA9052_ADC_IN4]        = DA9052_ADC_MAN_MUXSEL_AD4,
+       [DA9052_ADC_IN5]        = DA9052_ADC_MAN_MUXSEL_AD5,
+       [DA9052_ADC_IN6]        = DA9052_ADC_MAN_MUXSEL_AD6,
+       [DA9052_ADC_VBBAT]      = DA9052_ADC_MAN_MUXSEL_VBBAT
+};
+
+int da9052_adc_manual_read(struct da9052 *da9052, unsigned char channel)
+{
+       int ret;
+       unsigned short calc_data;
+       unsigned short data;
+       unsigned char mux_sel;
+
+       if (channel > DA9052_ADC_VBBAT)
+               return -EINVAL;
+
+       mutex_lock(&da9052->auxadc_lock);
+
+       /* Channel gets activated on enabling the Conversion bit */
+       mux_sel = chan_mux[channel] | DA9052_ADC_MAN_MAN_CONV;
+
+       ret = da9052_reg_write(da9052, DA9052_ADC_MAN_REG, mux_sel);
+       if (ret < 0)
+               goto err;
+
+       /* Wait for an interrupt */
+       if (!wait_for_completion_timeout(&da9052->done,
+                                        msecs_to_jiffies(500))) {
+               dev_err(da9052->dev,
+                       "timeout waiting for ADC conversion interrupt\n");
+               ret = -ETIMEDOUT;
+               goto err;
+       }
+
+       ret = da9052_reg_read(da9052, DA9052_ADC_RES_H_REG);
+       if (ret < 0)
+               goto err;
+
+       calc_data = (unsigned short)ret;
+       data = calc_data << 2;
+
+       ret = da9052_reg_read(da9052, DA9052_ADC_RES_L_REG);
+       if (ret < 0)
+               goto err;
+
+       calc_data = (unsigned short)(ret & DA9052_ADC_RES_LSB);
+       data |= calc_data;
+
+       ret = data;
+
+err:
+       mutex_unlock(&da9052->auxadc_lock);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(da9052_adc_manual_read);
+
+static irqreturn_t da9052_auxadc_irq(int irq, void *irq_data)
+{
+       struct da9052 *da9052 = irq_data;
+
+       complete(&da9052->done);
+
+       return IRQ_HANDLED;
+}
+
+int da9052_adc_read_temp(struct da9052 *da9052)
+{
+       int tbat;
+
+       tbat = da9052_reg_read(da9052, DA9052_TBAT_RES_REG);
+       if (tbat <= 0)
+               return tbat;
+
+       /* ARRAY_SIZE check is not needed since TBAT is a 8-bit register */
+       return tbat_lookup[tbat - 1];
+}
+EXPORT_SYMBOL_GPL(da9052_adc_read_temp);
+
 static struct resource da9052_rtc_resource = {
        .name = "ALM",
        .start = DA9052_IRQ_ALARM,
@@ -646,6 +775,9 @@ int __devinit da9052_device_init(struct da9052 *da9052, u8 chip_id)
        struct irq_desc *desc;
        int ret;
 
+       mutex_init(&da9052->auxadc_lock);
+       init_completion(&da9052->done);
+
        if (pdata && pdata->init != NULL)
                pdata->init(da9052);
 
@@ -665,6 +797,12 @@ int __devinit da9052_device_init(struct da9052 *da9052, u8 chip_id)
 
        da9052->irq_base = regmap_irq_chip_get_base(da9052->irq_data);
 
+       ret = request_threaded_irq(DA9052_IRQ_ADC_EOM, NULL, da9052_auxadc_irq,
+                                  IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+                                  "adc irq", da9052);
+       if (ret != 0)
+               dev_err(da9052->dev, "DA9052 ADC IRQ failed ret=%d\n", ret);
+
        ret = mfd_add_devices(da9052->dev, -1, da9052_subdev_info,
                              ARRAY_SIZE(da9052_subdev_info), NULL, 0);
        if (ret)
@@ -673,6 +811,7 @@ int __devinit da9052_device_init(struct da9052 *da9052, u8 chip_id)
        return 0;
 
 err:
+       free_irq(DA9052_IRQ_ADC_EOM, da9052);
        mfd_remove_devices(da9052->dev);
 regmap_err:
        return ret;
@@ -680,6 +819,7 @@ regmap_err:
 
 void da9052_device_exit(struct da9052 *da9052)
 {
+       free_irq(DA9052_IRQ_ADC_EOM, da9052);
        regmap_del_irq_chip(da9052->chip_irq, da9052->irq_data);
        mfd_remove_devices(da9052->dev);
 }
index 36b88e395499f1673ecb57eee39b0c6a3fa920cd..82c9d64502868ba94ea8d215d9a5e864ed42f19a 100644 (file)
 #include <linux/mfd/da9052/da9052.h>
 #include <linux/mfd/da9052/reg.h>
 
+#ifdef CONFIG_OF
+#include <linux/of.h>
+#include <linux/of_device.h>
+#endif
+
 static int da9052_i2c_enable_multiwrite(struct da9052 *da9052)
 {
        int reg_val, ret;
@@ -41,13 +46,31 @@ static int da9052_i2c_enable_multiwrite(struct da9052 *da9052)
        return 0;
 }
 
+static struct i2c_device_id da9052_i2c_id[] = {
+       {"da9052", DA9052},
+       {"da9053-aa", DA9053_AA},
+       {"da9053-ba", DA9053_BA},
+       {"da9053-bb", DA9053_BB},
+       {}
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id dialog_dt_ids[] = {
+       { .compatible = "dlg,da9052", .data = &da9052_i2c_id[0] },
+       { .compatible = "dlg,da9053-aa", .data = &da9052_i2c_id[1] },
+       { .compatible = "dlg,da9053-ab", .data = &da9052_i2c_id[2] },
+       { .compatible = "dlg,da9053-bb", .data = &da9052_i2c_id[3] },
+       { /* sentinel */ }
+};
+#endif
+
 static int __devinit da9052_i2c_probe(struct i2c_client *client,
                                       const struct i2c_device_id *id)
 {
        struct da9052 *da9052;
        int ret;
 
-       da9052 = kzalloc(sizeof(struct da9052), GFP_KERNEL);
+       da9052 = devm_kzalloc(&client->dev, sizeof(struct da9052), GFP_KERNEL);
        if (!da9052)
                return -ENOMEM;
 
@@ -55,8 +78,7 @@ static int __devinit da9052_i2c_probe(struct i2c_client *client,
                                     I2C_FUNC_SMBUS_BYTE_DATA)) {
                dev_info(&client->dev, "Error in %s:i2c_check_functionality\n",
                         __func__);
-               ret = -ENODEV;
-               goto err;
+               return  -ENODEV;
        }
 
        da9052->dev = &client->dev;
@@ -64,29 +86,39 @@ static int __devinit da9052_i2c_probe(struct i2c_client *client,
 
        i2c_set_clientdata(client, da9052);
 
-       da9052->regmap = regmap_init_i2c(client, &da9052_regmap_config);
+       da9052->regmap = devm_regmap_init_i2c(client, &da9052_regmap_config);
        if (IS_ERR(da9052->regmap)) {
                ret = PTR_ERR(da9052->regmap);
                dev_err(&client->dev, "Failed to allocate register map: %d\n",
                        ret);
-               goto err;
+               return ret;
        }
 
        ret = da9052_i2c_enable_multiwrite(da9052);
        if (ret < 0)
-               goto err_regmap;
+               return ret;
+
+#ifdef CONFIG_OF
+       if (!id) {
+               struct device_node *np = client->dev.of_node;
+               const struct of_device_id *deviceid;
+
+               deviceid = of_match_node(dialog_dt_ids, np);
+               id = (const struct i2c_device_id *)deviceid->data;
+       }
+#endif
+
+       if (!id) {
+               ret = -ENODEV;
+               dev_err(&client->dev, "id is null.\n");
+               return ret;
+       }
 
        ret = da9052_device_init(da9052, id->driver_data);
        if (ret != 0)
-               goto err_regmap;
+               return ret;
 
        return 0;
-
-err_regmap:
-       regmap_exit(da9052->regmap);
-err:
-       kfree(da9052);
-       return ret;
 }
 
 static int __devexit da9052_i2c_remove(struct i2c_client *client)
@@ -94,20 +126,9 @@ static int __devexit da9052_i2c_remove(struct i2c_client *client)
        struct da9052 *da9052 = i2c_get_clientdata(client);
 
        da9052_device_exit(da9052);
-       regmap_exit(da9052->regmap);
-       kfree(da9052);
-
        return 0;
 }
 
-static struct i2c_device_id da9052_i2c_id[] = {
-       {"da9052", DA9052},
-       {"da9053-aa", DA9053_AA},
-       {"da9053-ba", DA9053_BA},
-       {"da9053-bb", DA9053_BB},
-       {}
-};
-
 static struct i2c_driver da9052_i2c_driver = {
        .probe = da9052_i2c_probe,
        .remove = __devexit_p(da9052_i2c_remove),
@@ -115,6 +136,9 @@ static struct i2c_driver da9052_i2c_driver = {
        .driver = {
                .name = "da9052",
                .owner = THIS_MODULE,
+#ifdef CONFIG_OF
+               .of_match_table = dialog_dt_ids,
+#endif
        },
 };
 
index 6faf149e8d94c8d2b90c3ef69edc6d2e98a0fa6d..dbeadc5a6436caca55f69a68f0865cc71084b0c8 100644 (file)
@@ -25,8 +25,9 @@ static int __devinit da9052_spi_probe(struct spi_device *spi)
 {
        int ret;
        const struct spi_device_id *id = spi_get_device_id(spi);
-       struct da9052 *da9052 = kzalloc(sizeof(struct da9052), GFP_KERNEL);
+       struct da9052 *da9052;
 
+       da9052 = devm_kzalloc(&spi->dev, sizeof(struct da9052), GFP_KERNEL);
        if (!da9052)
                return -ENOMEM;
 
@@ -42,25 +43,19 @@ static int __devinit da9052_spi_probe(struct spi_device *spi)
        da9052_regmap_config.read_flag_mask = 1;
        da9052_regmap_config.write_flag_mask = 0;
 
-       da9052->regmap = regmap_init_spi(spi, &da9052_regmap_config);
+       da9052->regmap = devm_regmap_init_spi(spi, &da9052_regmap_config);
        if (IS_ERR(da9052->regmap)) {
                ret = PTR_ERR(da9052->regmap);
                dev_err(&spi->dev, "Failed to allocate register map: %d\n",
                        ret);
-               goto err;
+               return ret;
        }
 
        ret = da9052_device_init(da9052, id->driver_data);
        if (ret != 0)
-               goto err_regmap;
+               return ret;
 
        return 0;
-
-err_regmap:
-       regmap_exit(da9052->regmap);
-err:
-       kfree(da9052);
-       return ret;
 }
 
 static int __devexit da9052_spi_remove(struct spi_device *spi)
@@ -68,9 +63,6 @@ static int __devexit da9052_spi_remove(struct spi_device *spi)
        struct da9052 *da9052 = dev_get_drvdata(&spi->dev);
 
        da9052_device_exit(da9052);
-       regmap_exit(da9052->regmap);
-       kfree(da9052);
-
        return 0;
 }
 
@@ -88,7 +80,6 @@ static struct spi_driver da9052_spi_driver = {
        .id_table = da9052_spi_id,
        .driver = {
                .name = "da9052",
-               .bus = &spi_bus_type,
                .owner = THIS_MODULE,
        },
 };
index 5be32489714f61a279b4fe30ce7badb37d5833a4..50e83dc5dc49b7520dfab72c52c80bb02e485f4c 100644 (file)
@@ -2720,6 +2720,7 @@ static struct regulator_consumer_supply db8500_vape_consumers[] = {
        REGULATOR_SUPPLY("v-i2c", "nmk-i2c.1"),
        REGULATOR_SUPPLY("v-i2c", "nmk-i2c.2"),
        REGULATOR_SUPPLY("v-i2c", "nmk-i2c.3"),
+       REGULATOR_SUPPLY("v-i2c", "nmk-i2c.4"),
        /* "v-mmc" changed to "vcore" in the mainline kernel */
        REGULATOR_SUPPLY("vcore", "sdi0"),
        REGULATOR_SUPPLY("vcore", "sdi1"),
@@ -2734,6 +2735,7 @@ static struct regulator_consumer_supply db8500_vape_consumers[] = {
        REGULATOR_SUPPLY("vcore", "uart2"),
        REGULATOR_SUPPLY("v-ape", "nmk-ske-keypad.0"),
        REGULATOR_SUPPLY("v-hsi", "ste_hsi.0"),
+       REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
 };
 
 static struct regulator_consumer_supply db8500_vsmps2_consumers[] = {
@@ -2958,9 +2960,10 @@ static struct mfd_cell db8500_prcmu_devs[] = {
  * prcmu_fw_init - arch init call for the Linux PRCMU fw init logic
  *
  */
-static int __init db8500_prcmu_probe(struct platform_device *pdev)
+static int __devinit db8500_prcmu_probe(struct platform_device *pdev)
 {
-       int err = 0;
+       struct device_node *np = pdev->dev.of_node;
+       int irq = 0, err = 0;
 
        if (ux500_is_svp())
                return -ENODEV;
@@ -2970,8 +2973,14 @@ static int __init db8500_prcmu_probe(struct platform_device *pdev)
        /* Clean up the mailbox interrupts after pre-kernel code. */
        writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLR);
 
-       err = request_threaded_irq(IRQ_DB8500_PRCMU1, prcmu_irq_handler,
-               prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL);
+       if (np)
+               irq = platform_get_irq(pdev, 0);
+
+       if (!np || irq <= 0)
+               irq = IRQ_DB8500_PRCMU1;
+
+       err = request_threaded_irq(irq, prcmu_irq_handler,
+               prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL);
        if (err < 0) {
                pr_err("prcmu: Failed to allocate IRQ_DB8500_PRCMU1.\n");
                err = -EBUSY;
@@ -2981,14 +2990,16 @@ static int __init db8500_prcmu_probe(struct platform_device *pdev)
        if (cpu_is_u8500v20_or_later())
                prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
 
-       err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs,
-                             ARRAY_SIZE(db8500_prcmu_devs), NULL,
-                             0);
+       if (!np) {
+               err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs,
+                               ARRAY_SIZE(db8500_prcmu_devs), NULL, 0);
+               if (err) {
+                       pr_err("prcmu: Failed to add subdevices\n");
+                       return err;
+               }
+       }
 
-       if (err)
-               pr_err("prcmu: Failed to add subdevices\n");
-       else
-               pr_info("DB8500 PRCMU initialized\n");
+       pr_info("DB8500 PRCMU initialized\n");
 
 no_irq_return:
        return err;
@@ -2999,11 +3010,12 @@ static struct platform_driver db8500_prcmu_driver = {
                .name = "db8500-prcmu",
                .owner = THIS_MODULE,
        },
+       .probe = db8500_prcmu_probe,
 };
 
 static int __init db8500_prcmu_init(void)
 {
-       return platform_driver_probe(&db8500_prcmu_driver, db8500_prcmu_probe);
+       return platform_driver_register(&db8500_prcmu_driver);
 }
 
 arch_initcall(db8500_prcmu_init);
index b76657eb0c51044503e3c71b7345c98596d33cbd..59df5584cb58f54a25a424ca2551417a231fcb4d 100644 (file)
@@ -406,7 +406,7 @@ static int __devinit intel_msic_probe(struct platform_device *pdev)
                return -ENXIO;
        }
 
-       msic = kzalloc(sizeof(*msic), GFP_KERNEL);
+       msic = devm_kzalloc(&pdev->dev, sizeof(*msic), GFP_KERNEL);
        if (!msic)
                return -ENOMEM;
 
@@ -421,21 +421,13 @@ static int __devinit intel_msic_probe(struct platform_device *pdev)
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
                dev_err(&pdev->dev, "failed to get SRAM iomem resource\n");
-               ret = -ENODEV;
-               goto fail_free_msic;
+               return -ENODEV;
        }
 
-       res = request_mem_region(res->start, resource_size(res), pdev->name);
-       if (!res) {
-               ret = -EBUSY;
-               goto fail_free_msic;
-       }
-
-       msic->irq_base = ioremap_nocache(res->start, resource_size(res));
+       msic->irq_base = devm_request_and_ioremap(&pdev->dev, res);
        if (!msic->irq_base) {
                dev_err(&pdev->dev, "failed to map SRAM memory\n");
-               ret = -ENOMEM;
-               goto fail_release_region;
+               return -ENOMEM;
        }
 
        platform_set_drvdata(pdev, msic);
@@ -443,7 +435,7 @@ static int __devinit intel_msic_probe(struct platform_device *pdev)
        ret = intel_msic_init_devices(msic);
        if (ret) {
                dev_err(&pdev->dev, "failed to initialize MSIC devices\n");
-               goto fail_unmap_mem;
+               return ret;
        }
 
        dev_info(&pdev->dev, "Intel MSIC version %c%d (vendor %#x)\n",
@@ -451,27 +443,14 @@ static int __devinit intel_msic_probe(struct platform_device *pdev)
                 msic->vendor);
 
        return 0;
-
-fail_unmap_mem:
-       iounmap(msic->irq_base);
-fail_release_region:
-       release_mem_region(res->start, resource_size(res));
-fail_free_msic:
-       kfree(msic);
-
-       return ret;
 }
 
 static int __devexit intel_msic_remove(struct platform_device *pdev)
 {
        struct intel_msic *msic = platform_get_drvdata(pdev);
-       struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
        intel_msic_remove_devices(msic);
        platform_set_drvdata(pdev, NULL);
-       iounmap(msic->irq_base);
-       release_mem_region(res->start, resource_size(res));
-       kfree(msic);
 
        return 0;
 }
index a9223ed1b7c5d9a152a8533c72ac1faafd3b7385..2ea99989551af85a4796c69e5fc2b65ba7a951e9 100644 (file)
@@ -283,23 +283,8 @@ static struct pci_driver cmodio_pci_driver = {
        .remove   = __devexit_p(cmodio_pci_remove),
 };
 
-/*
- * Module Init / Exit
- */
-
-static int __init cmodio_init(void)
-{
-       return pci_register_driver(&cmodio_pci_driver);
-}
-
-static void __exit cmodio_exit(void)
-{
-       pci_unregister_driver(&cmodio_pci_driver);
-}
+module_pci_driver(cmodio_pci_driver);
 
 MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
 MODULE_DESCRIPTION("Janz CMOD-IO PCI MODULbus Carrier Board Driver");
 MODULE_LICENSE("GPL");
-
-module_init(cmodio_init);
-module_exit(cmodio_exit);
diff --git a/drivers/mfd/lm3533-core.c b/drivers/mfd/lm3533-core.c
new file mode 100644 (file)
index 0000000..0b2879b
--- /dev/null
@@ -0,0 +1,667 @@
+/*
+ * lm3533-core.c -- LM3533 Core
+ *
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * Author: Johan Hovold <jhovold@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/regmap.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <linux/mfd/lm3533.h>
+
+
+#define LM3533_BOOST_OVP_MASK          0x06
+#define LM3533_BOOST_OVP_SHIFT         1
+
+#define LM3533_BOOST_FREQ_MASK         0x01
+#define LM3533_BOOST_FREQ_SHIFT                0
+
+#define LM3533_BL_ID_MASK              1
+#define LM3533_LED_ID_MASK             3
+#define LM3533_BL_ID_MAX               1
+#define LM3533_LED_ID_MAX              3
+
+#define LM3533_HVLED_ID_MAX            2
+#define LM3533_LVLED_ID_MAX            5
+
+#define LM3533_REG_OUTPUT_CONF1                0x10
+#define LM3533_REG_OUTPUT_CONF2                0x11
+#define LM3533_REG_BOOST_PWM           0x2c
+
+#define LM3533_REG_MAX                 0xb2
+
+
+static struct mfd_cell lm3533_als_devs[] = {
+       {
+               .name   = "lm3533-als",
+               .id     = -1,
+       },
+};
+
+static struct mfd_cell lm3533_bl_devs[] = {
+       {
+               .name   = "lm3533-backlight",
+               .id     = 0,
+       },
+       {
+               .name   = "lm3533-backlight",
+               .id     = 1,
+       },
+};
+
+static struct mfd_cell lm3533_led_devs[] = {
+       {
+               .name   = "lm3533-leds",
+               .id     = 0,
+       },
+       {
+               .name   = "lm3533-leds",
+               .id     = 1,
+       },
+       {
+               .name   = "lm3533-leds",
+               .id     = 2,
+       },
+       {
+               .name   = "lm3533-leds",
+               .id     = 3,
+       },
+};
+
+int lm3533_read(struct lm3533 *lm3533, u8 reg, u8 *val)
+{
+       int tmp;
+       int ret;
+
+       ret = regmap_read(lm3533->regmap, reg, &tmp);
+       if (ret < 0) {
+               dev_err(lm3533->dev, "failed to read register %02x: %d\n",
+                                                               reg, ret);
+               return ret;
+       }
+
+       *val = tmp;
+
+       dev_dbg(lm3533->dev, "read [%02x]: %02x\n", reg, *val);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_read);
+
+int lm3533_write(struct lm3533 *lm3533, u8 reg, u8 val)
+{
+       int ret;
+
+       dev_dbg(lm3533->dev, "write [%02x]: %02x\n", reg, val);
+
+       ret = regmap_write(lm3533->regmap, reg, val);
+       if (ret < 0) {
+               dev_err(lm3533->dev, "failed to write register %02x: %d\n",
+                                                               reg, ret);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_write);
+
+int lm3533_update(struct lm3533 *lm3533, u8 reg, u8 val, u8 mask)
+{
+       int ret;
+
+       dev_dbg(lm3533->dev, "update [%02x]: %02x/%02x\n", reg, val, mask);
+
+       ret = regmap_update_bits(lm3533->regmap, reg, mask, val);
+       if (ret < 0) {
+               dev_err(lm3533->dev, "failed to update register %02x: %d\n",
+                                                               reg, ret);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_update);
+
+static int lm3533_set_boost_freq(struct lm3533 *lm3533,
+                                               enum lm3533_boost_freq freq)
+{
+       int ret;
+
+       ret = lm3533_update(lm3533, LM3533_REG_BOOST_PWM,
+                                       freq << LM3533_BOOST_FREQ_SHIFT,
+                                       LM3533_BOOST_FREQ_MASK);
+       if (ret)
+               dev_err(lm3533->dev, "failed to set boost frequency\n");
+
+       return ret;
+}
+
+
+static int lm3533_set_boost_ovp(struct lm3533 *lm3533,
+                                               enum lm3533_boost_ovp ovp)
+{
+       int ret;
+
+       ret = lm3533_update(lm3533, LM3533_REG_BOOST_PWM,
+                                       ovp << LM3533_BOOST_OVP_SHIFT,
+                                       LM3533_BOOST_OVP_MASK);
+       if (ret)
+               dev_err(lm3533->dev, "failed to set boost ovp\n");
+
+       return ret;
+}
+
+/*
+ * HVLED output config -- output hvled controlled by backlight bl
+ */
+static int lm3533_set_hvled_config(struct lm3533 *lm3533, u8 hvled, u8 bl)
+{
+       u8 val;
+       u8 mask;
+       int shift;
+       int ret;
+
+       if (hvled == 0 || hvled > LM3533_HVLED_ID_MAX)
+               return -EINVAL;
+
+       if (bl > LM3533_BL_ID_MAX)
+               return -EINVAL;
+
+       shift = hvled - 1;
+       mask = LM3533_BL_ID_MASK << shift;
+       val = bl << shift;
+
+       ret = lm3533_update(lm3533, LM3533_REG_OUTPUT_CONF1, val, mask);
+       if (ret)
+               dev_err(lm3533->dev, "failed to set hvled config\n");
+
+       return ret;
+}
+
+/*
+ * LVLED output config -- output lvled controlled by LED led
+ */
+static int lm3533_set_lvled_config(struct lm3533 *lm3533, u8 lvled, u8 led)
+{
+       u8 reg;
+       u8 val;
+       u8 mask;
+       int shift;
+       int ret;
+
+       if (lvled == 0 || lvled > LM3533_LVLED_ID_MAX)
+               return -EINVAL;
+
+       if (led > LM3533_LED_ID_MAX)
+               return -EINVAL;
+
+       if (lvled < 4) {
+               reg = LM3533_REG_OUTPUT_CONF1;
+               shift = 2 * lvled;
+       } else {
+               reg = LM3533_REG_OUTPUT_CONF2;
+               shift = 2 * (lvled - 4);
+       }
+
+       mask = LM3533_LED_ID_MASK << shift;
+       val = led << shift;
+
+       ret = lm3533_update(lm3533, reg, val, mask);
+       if (ret)
+               dev_err(lm3533->dev, "failed to set lvled config\n");
+
+       return ret;
+}
+
+static void lm3533_enable(struct lm3533 *lm3533)
+{
+       if (gpio_is_valid(lm3533->gpio_hwen))
+               gpio_set_value(lm3533->gpio_hwen, 1);
+}
+
+static void lm3533_disable(struct lm3533 *lm3533)
+{
+       if (gpio_is_valid(lm3533->gpio_hwen))
+               gpio_set_value(lm3533->gpio_hwen, 0);
+}
+
+enum lm3533_attribute_type {
+       LM3533_ATTR_TYPE_BACKLIGHT,
+       LM3533_ATTR_TYPE_LED,
+};
+
+struct lm3533_device_attribute {
+       struct device_attribute dev_attr;
+       enum lm3533_attribute_type type;
+       union {
+               struct {
+                       u8 id;
+               } output;
+       } u;
+};
+
+#define to_lm3533_dev_attr(_attr) \
+       container_of(_attr, struct lm3533_device_attribute, dev_attr)
+
+static ssize_t show_output(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct lm3533 *lm3533 = dev_get_drvdata(dev);
+       struct lm3533_device_attribute *lattr = to_lm3533_dev_attr(attr);
+       int id = lattr->u.output.id;
+       u8 reg;
+       u8 val;
+       u8 mask;
+       int shift;
+       int ret;
+
+       if (lattr->type == LM3533_ATTR_TYPE_BACKLIGHT) {
+               reg = LM3533_REG_OUTPUT_CONF1;
+               shift = id - 1;
+               mask = LM3533_BL_ID_MASK << shift;
+       } else {
+               if (id < 4) {
+                       reg = LM3533_REG_OUTPUT_CONF1;
+                       shift = 2 * id;
+               } else {
+                       reg = LM3533_REG_OUTPUT_CONF2;
+                       shift = 2 * (id - 4);
+               }
+               mask = LM3533_LED_ID_MASK << shift;
+       }
+
+       ret = lm3533_read(lm3533, reg, &val);
+       if (ret)
+               return ret;
+
+       val = (val & mask) >> shift;
+
+       return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t store_output(struct device *dev,
+                                       struct device_attribute *attr,
+                                       const char *buf, size_t len)
+{
+       struct lm3533 *lm3533 = dev_get_drvdata(dev);
+       struct lm3533_device_attribute *lattr = to_lm3533_dev_attr(attr);
+       int id = lattr->u.output.id;
+       u8 val;
+       int ret;
+
+       if (kstrtou8(buf, 0, &val))
+               return -EINVAL;
+
+       if (lattr->type == LM3533_ATTR_TYPE_BACKLIGHT)
+               ret = lm3533_set_hvled_config(lm3533, id, val);
+       else
+               ret = lm3533_set_lvled_config(lm3533, id, val);
+
+       if (ret)
+               return ret;
+
+       return len;
+}
+
+#define LM3533_OUTPUT_ATTR(_name, _mode, _show, _store, _type, _id) \
+       struct lm3533_device_attribute lm3533_dev_attr_##_name = \
+               { .dev_attr     = __ATTR(_name, _mode, _show, _store), \
+                 .type         = _type, \
+                 .u.output     = { .id = _id }, }
+
+#define LM3533_OUTPUT_ATTR_RW(_name, _type, _id) \
+       LM3533_OUTPUT_ATTR(output_##_name, S_IRUGO | S_IWUSR, \
+                                       show_output, store_output, _type, _id)
+
+#define LM3533_OUTPUT_HVLED_ATTR_RW(_nr) \
+       LM3533_OUTPUT_ATTR_RW(hvled##_nr, LM3533_ATTR_TYPE_BACKLIGHT, _nr)
+#define LM3533_OUTPUT_LVLED_ATTR_RW(_nr) \
+       LM3533_OUTPUT_ATTR_RW(lvled##_nr, LM3533_ATTR_TYPE_LED, _nr)
+/*
+ * Output config:
+ *
+ * output_hvled<nr>    0-1
+ * output_lvled<nr>    0-3
+ */
+static LM3533_OUTPUT_HVLED_ATTR_RW(1);
+static LM3533_OUTPUT_HVLED_ATTR_RW(2);
+static LM3533_OUTPUT_LVLED_ATTR_RW(1);
+static LM3533_OUTPUT_LVLED_ATTR_RW(2);
+static LM3533_OUTPUT_LVLED_ATTR_RW(3);
+static LM3533_OUTPUT_LVLED_ATTR_RW(4);
+static LM3533_OUTPUT_LVLED_ATTR_RW(5);
+
+static struct attribute *lm3533_attributes[] = {
+       &lm3533_dev_attr_output_hvled1.dev_attr.attr,
+       &lm3533_dev_attr_output_hvled2.dev_attr.attr,
+       &lm3533_dev_attr_output_lvled1.dev_attr.attr,
+       &lm3533_dev_attr_output_lvled2.dev_attr.attr,
+       &lm3533_dev_attr_output_lvled3.dev_attr.attr,
+       &lm3533_dev_attr_output_lvled4.dev_attr.attr,
+       &lm3533_dev_attr_output_lvled5.dev_attr.attr,
+       NULL,
+};
+
+#define to_dev_attr(_attr) \
+       container_of(_attr, struct device_attribute, attr)
+
+static umode_t lm3533_attr_is_visible(struct kobject *kobj,
+                                            struct attribute *attr, int n)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct lm3533 *lm3533 = dev_get_drvdata(dev);
+       struct device_attribute *dattr = to_dev_attr(attr);
+       struct lm3533_device_attribute *lattr = to_lm3533_dev_attr(dattr);
+       enum lm3533_attribute_type type = lattr->type;
+       umode_t mode = attr->mode;
+
+       if (!lm3533->have_backlights && type == LM3533_ATTR_TYPE_BACKLIGHT)
+               mode = 0;
+       else if (!lm3533->have_leds && type == LM3533_ATTR_TYPE_LED)
+               mode = 0;
+
+       return mode;
+};
+
+static struct attribute_group lm3533_attribute_group = {
+       .is_visible     = lm3533_attr_is_visible,
+       .attrs          = lm3533_attributes
+};
+
+static int __devinit lm3533_device_als_init(struct lm3533 *lm3533)
+{
+       struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
+       int ret;
+
+       if (!pdata->als)
+               return 0;
+
+       lm3533_als_devs[0].platform_data = pdata->als;
+       lm3533_als_devs[0].pdata_size = sizeof(*pdata->als);
+
+       ret = mfd_add_devices(lm3533->dev, 0, lm3533_als_devs, 1, NULL, 0);
+       if (ret) {
+               dev_err(lm3533->dev, "failed to add ALS device\n");
+               return ret;
+       }
+
+       lm3533->have_als = 1;
+
+       return 0;
+}
+
+static int __devinit lm3533_device_bl_init(struct lm3533 *lm3533)
+{
+       struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
+       int i;
+       int ret;
+
+       if (!pdata->backlights || pdata->num_backlights == 0)
+               return 0;
+
+       if (pdata->num_backlights > ARRAY_SIZE(lm3533_bl_devs))
+               pdata->num_backlights = ARRAY_SIZE(lm3533_bl_devs);
+
+       for (i = 0; i < pdata->num_backlights; ++i) {
+               lm3533_bl_devs[i].platform_data = &pdata->backlights[i];
+               lm3533_bl_devs[i].pdata_size = sizeof(pdata->backlights[i]);
+       }
+
+       ret = mfd_add_devices(lm3533->dev, 0, lm3533_bl_devs,
+                                       pdata->num_backlights, NULL, 0);
+       if (ret) {
+               dev_err(lm3533->dev, "failed to add backlight devices\n");
+               return ret;
+       }
+
+       lm3533->have_backlights = 1;
+
+       return 0;
+}
+
+static int __devinit lm3533_device_led_init(struct lm3533 *lm3533)
+{
+       struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
+       int i;
+       int ret;
+
+       if (!pdata->leds || pdata->num_leds == 0)
+               return 0;
+
+       if (pdata->num_leds > ARRAY_SIZE(lm3533_led_devs))
+               pdata->num_leds = ARRAY_SIZE(lm3533_led_devs);
+
+       for (i = 0; i < pdata->num_leds; ++i) {
+               lm3533_led_devs[i].platform_data = &pdata->leds[i];
+               lm3533_led_devs[i].pdata_size = sizeof(pdata->leds[i]);
+       }
+
+       ret = mfd_add_devices(lm3533->dev, 0, lm3533_led_devs,
+                                               pdata->num_leds, NULL, 0);
+       if (ret) {
+               dev_err(lm3533->dev, "failed to add LED devices\n");
+               return ret;
+       }
+
+       lm3533->have_leds = 1;
+
+       return 0;
+}
+
+static int __devinit lm3533_device_setup(struct lm3533 *lm3533,
+                                       struct lm3533_platform_data *pdata)
+{
+       int ret;
+
+       ret = lm3533_set_boost_freq(lm3533, pdata->boost_freq);
+       if (ret)
+               return ret;
+
+       ret = lm3533_set_boost_ovp(lm3533, pdata->boost_ovp);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int __devinit lm3533_device_init(struct lm3533 *lm3533)
+{
+       struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
+       int ret;
+
+       dev_dbg(lm3533->dev, "%s\n", __func__);
+
+       if (!pdata) {
+               dev_err(lm3533->dev, "no platform data\n");
+               return -EINVAL;
+       }
+
+       lm3533->gpio_hwen = pdata->gpio_hwen;
+
+       dev_set_drvdata(lm3533->dev, lm3533);
+
+       if (gpio_is_valid(lm3533->gpio_hwen)) {
+               ret = gpio_request_one(lm3533->gpio_hwen, GPIOF_OUT_INIT_LOW,
+                                                               "lm3533-hwen");
+               if (ret < 0) {
+                       dev_err(lm3533->dev,
+                               "failed to request HWEN GPIO %d\n",
+                               lm3533->gpio_hwen);
+                       return ret;
+               }
+       }
+
+       lm3533_enable(lm3533);
+
+       ret = lm3533_device_setup(lm3533, pdata);
+       if (ret)
+               goto err_disable;
+
+       lm3533_device_als_init(lm3533);
+       lm3533_device_bl_init(lm3533);
+       lm3533_device_led_init(lm3533);
+
+       ret = sysfs_create_group(&lm3533->dev->kobj, &lm3533_attribute_group);
+       if (ret < 0) {
+               dev_err(lm3533->dev, "failed to create sysfs attributes\n");
+               goto err_unregister;
+       }
+
+       return 0;
+
+err_unregister:
+       mfd_remove_devices(lm3533->dev);
+err_disable:
+       lm3533_disable(lm3533);
+       if (gpio_is_valid(lm3533->gpio_hwen))
+               gpio_free(lm3533->gpio_hwen);
+
+       return ret;
+}
+
+static void __devexit lm3533_device_exit(struct lm3533 *lm3533)
+{
+       dev_dbg(lm3533->dev, "%s\n", __func__);
+
+       sysfs_remove_group(&lm3533->dev->kobj, &lm3533_attribute_group);
+
+       mfd_remove_devices(lm3533->dev);
+       lm3533_disable(lm3533);
+       if (gpio_is_valid(lm3533->gpio_hwen))
+               gpio_free(lm3533->gpio_hwen);
+}
+
+static bool lm3533_readable_register(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case 0x10 ... 0x2c:
+       case 0x30 ... 0x38:
+       case 0x40 ... 0x45:
+       case 0x50 ... 0x57:
+       case 0x60 ... 0x6e:
+       case 0x70 ... 0x75:
+       case 0x80 ... 0x85:
+       case 0x90 ... 0x95:
+       case 0xa0 ... 0xa5:
+       case 0xb0 ... 0xb2:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static bool lm3533_volatile_register(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case 0x34 ... 0x36:     /* zone */
+       case 0x37 ... 0x38:     /* adc */
+       case 0xb0 ... 0xb1:     /* fault */
+               return true;
+       default:
+               return false;
+       }
+}
+
+static bool lm3533_precious_register(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case 0x34:              /* zone */
+               return true;
+       default:
+               return false;
+       }
+}
+
+static struct regmap_config regmap_config = {
+       .reg_bits       = 8,
+       .val_bits       = 8,
+       .max_register   = LM3533_REG_MAX,
+       .readable_reg   = lm3533_readable_register,
+       .volatile_reg   = lm3533_volatile_register,
+       .precious_reg   = lm3533_precious_register,
+};
+
+static int __devinit lm3533_i2c_probe(struct i2c_client *i2c,
+                                       const struct i2c_device_id *id)
+{
+       struct lm3533 *lm3533;
+       int ret;
+
+       dev_dbg(&i2c->dev, "%s\n", __func__);
+
+       lm3533 = devm_kzalloc(&i2c->dev, sizeof(*lm3533), GFP_KERNEL);
+       if (!lm3533)
+               return -ENOMEM;
+
+       i2c_set_clientdata(i2c, lm3533);
+
+       lm3533->regmap = devm_regmap_init_i2c(i2c, &regmap_config);
+       if (IS_ERR(lm3533->regmap))
+               return PTR_ERR(lm3533->regmap);
+
+       lm3533->dev = &i2c->dev;
+       lm3533->irq = i2c->irq;
+
+       ret = lm3533_device_init(lm3533);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int __devexit lm3533_i2c_remove(struct i2c_client *i2c)
+{
+       struct lm3533 *lm3533 = i2c_get_clientdata(i2c);
+
+       dev_dbg(&i2c->dev, "%s\n", __func__);
+
+       lm3533_device_exit(lm3533);
+
+       return 0;
+}
+
+static const struct i2c_device_id lm3533_i2c_ids[] = {
+       { "lm3533", 0 },
+       { },
+};
+MODULE_DEVICE_TABLE(i2c, lm3533_i2c_ids);
+
+static struct i2c_driver lm3533_i2c_driver = {
+       .driver = {
+                  .name = "lm3533",
+                  .owner = THIS_MODULE,
+       },
+       .id_table       = lm3533_i2c_ids,
+       .probe          = lm3533_i2c_probe,
+       .remove         = __devexit_p(lm3533_i2c_remove),
+};
+
+static int __init lm3533_i2c_init(void)
+{
+       return i2c_add_driver(&lm3533_i2c_driver);
+}
+subsys_initcall(lm3533_i2c_init);
+
+static void __exit lm3533_i2c_exit(void)
+{
+       i2c_del_driver(&lm3533_i2c_driver);
+}
+module_exit(lm3533_i2c_exit);
+
+MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>");
+MODULE_DESCRIPTION("LM3533 Core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/lm3533-ctrlbank.c b/drivers/mfd/lm3533-ctrlbank.c
new file mode 100644 (file)
index 0000000..a4cb7a5
--- /dev/null
@@ -0,0 +1,148 @@
+/*
+ * lm3533-ctrlbank.c -- LM3533 Generic Control Bank interface
+ *
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * Author: Johan Hovold <jhovold@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+
+#include <linux/mfd/lm3533.h>
+
+
+#define LM3533_MAX_CURRENT_MIN         5000
+#define LM3533_MAX_CURRENT_MAX         29800
+#define LM3533_MAX_CURRENT_STEP                800
+
+#define LM3533_BRIGHTNESS_MAX          255
+#define LM3533_PWM_MAX                 0x3f
+
+#define LM3533_REG_PWM_BASE            0x14
+#define LM3533_REG_MAX_CURRENT_BASE    0x1f
+#define LM3533_REG_CTRLBANK_ENABLE     0x27
+#define LM3533_REG_BRIGHTNESS_BASE     0x40
+
+
+static inline u8 lm3533_ctrlbank_get_reg(struct lm3533_ctrlbank *cb, u8 base)
+{
+       return base + cb->id;
+}
+
+int lm3533_ctrlbank_enable(struct lm3533_ctrlbank *cb)
+{
+       u8 mask;
+       int ret;
+
+       dev_dbg(cb->dev, "%s - %d\n", __func__, cb->id);
+
+       mask = 1 << cb->id;
+       ret = lm3533_update(cb->lm3533, LM3533_REG_CTRLBANK_ENABLE,
+                                                               mask, mask);
+       if (ret)
+               dev_err(cb->dev, "failed to enable ctrlbank %d\n", cb->id);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_enable);
+
+int lm3533_ctrlbank_disable(struct lm3533_ctrlbank *cb)
+{
+       u8 mask;
+       int ret;
+
+       dev_dbg(cb->dev, "%s - %d\n", __func__, cb->id);
+
+       mask = 1 << cb->id;
+       ret = lm3533_update(cb->lm3533, LM3533_REG_CTRLBANK_ENABLE, 0, mask);
+       if (ret)
+               dev_err(cb->dev, "failed to disable ctrlbank %d\n", cb->id);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_disable);
+
+/*
+ * Full-scale current.
+ *
+ * imax                5000 - 29800 uA (800 uA step)
+ */
+int lm3533_ctrlbank_set_max_current(struct lm3533_ctrlbank *cb, u16 imax)
+{
+       u8 reg;
+       u8 val;
+       int ret;
+
+       if (imax < LM3533_MAX_CURRENT_MIN || imax > LM3533_MAX_CURRENT_MAX)
+               return -EINVAL;
+
+       val = (imax - LM3533_MAX_CURRENT_MIN) / LM3533_MAX_CURRENT_STEP;
+
+       reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_MAX_CURRENT_BASE);
+       ret = lm3533_write(cb->lm3533, reg, val);
+       if (ret)
+               dev_err(cb->dev, "failed to set max current\n");
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_set_max_current);
+
+#define lm3533_ctrlbank_set(_name, _NAME)                              \
+int lm3533_ctrlbank_set_##_name(struct lm3533_ctrlbank *cb, u8 val)    \
+{                                                                      \
+       u8 reg;                                                         \
+       int ret;                                                        \
+                                                                       \
+       if (val > LM3533_##_NAME##_MAX)                                 \
+               return -EINVAL;                                         \
+                                                                       \
+       reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_##_NAME##_BASE);   \
+       ret = lm3533_write(cb->lm3533, reg, val);                       \
+       if (ret)                                                        \
+               dev_err(cb->dev, "failed to set " #_name "\n");         \
+                                                                       \
+       return ret;                                                     \
+}                                                                      \
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_set_##_name);
+
+#define lm3533_ctrlbank_get(_name, _NAME)                              \
+int lm3533_ctrlbank_get_##_name(struct lm3533_ctrlbank *cb, u8 *val)   \
+{                                                                      \
+       u8 reg;                                                         \
+       int ret;                                                        \
+                                                                       \
+       reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_##_NAME##_BASE);   \
+       ret = lm3533_read(cb->lm3533, reg, val);                        \
+       if (ret)                                                        \
+               dev_err(cb->dev, "failed to get " #_name "\n");         \
+                                                                       \
+       return ret;                                                     \
+}                                                                      \
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_get_##_name);
+
+lm3533_ctrlbank_set(brightness, BRIGHTNESS);
+lm3533_ctrlbank_get(brightness, BRIGHTNESS);
+
+/*
+ * PWM-input control mask:
+ *
+ *   bit 5 - PWM-input enabled in Zone 4
+ *   bit 4 - PWM-input enabled in Zone 3
+ *   bit 3 - PWM-input enabled in Zone 2
+ *   bit 2 - PWM-input enabled in Zone 1
+ *   bit 1 - PWM-input enabled in Zone 0
+ *   bit 0 - PWM-input enabled
+ */
+lm3533_ctrlbank_set(pwm, PWM);
+lm3533_ctrlbank_get(pwm, PWM);
+
+
+MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>");
+MODULE_DESCRIPTION("LM3533 Control Bank interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
new file mode 100644 (file)
index 0000000..027cc8f
--- /dev/null
@@ -0,0 +1,888 @@
+/*
+ *  lpc_ich.c - LPC interface for Intel ICH
+ *
+ *  LPC bridge function of the Intel ICH contains many other
+ *  functional units, such as Interrupt controllers, Timers,
+ *  Power Management, System Management, GPIO, RTC, and LPC
+ *  Configuration Registers.
+ *
+ *  This driver is derived from lpc_sch.
+
+ *  Copyright (c) 2011 Extreme Engineering Solution, Inc.
+ *  Author: Aaron Sierra <asierra@xes-inc.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *  This driver supports the following I/O Controller hubs:
+ *     (See the intel documentation on http://developer.intel.com.)
+ *     document number 290655-003, 290677-014: 82801AA (ICH), 82801AB (ICHO)
+ *     document number 290687-002, 298242-027: 82801BA (ICH2)
+ *     document number 290733-003, 290739-013: 82801CA (ICH3-S)
+ *     document number 290716-001, 290718-007: 82801CAM (ICH3-M)
+ *     document number 290744-001, 290745-025: 82801DB (ICH4)
+ *     document number 252337-001, 252663-008: 82801DBM (ICH4-M)
+ *     document number 273599-001, 273645-002: 82801E (C-ICH)
+ *     document number 252516-001, 252517-028: 82801EB (ICH5), 82801ER (ICH5R)
+ *     document number 300641-004, 300884-013: 6300ESB
+ *     document number 301473-002, 301474-026: 82801F (ICH6)
+ *     document number 313082-001, 313075-006: 631xESB, 632xESB
+ *     document number 307013-003, 307014-024: 82801G (ICH7)
+ *     document number 322896-001, 322897-001: NM10
+ *     document number 313056-003, 313057-017: 82801H (ICH8)
+ *     document number 316972-004, 316973-012: 82801I (ICH9)
+ *     document number 319973-002, 319974-002: 82801J (ICH10)
+ *     document number 322169-001, 322170-003: 5 Series, 3400 Series (PCH)
+ *     document number 320066-003, 320257-008: EP80597 (IICH)
+ *     document number 324645-001, 324646-001: Cougar Point (CPT)
+ *     document number TBD : Patsburg (PBG)
+ *     document number TBD : DH89xxCC
+ *     document number TBD : Panther Point
+ *     document number TBD : Lynx Point
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/acpi.h>
+#include <linux/pci.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/lpc_ich.h>
+
+#define ACPIBASE               0x40
+#define ACPIBASE_GPE_OFF       0x28
+#define ACPIBASE_GPE_END       0x2f
+#define ACPIBASE_SMI_OFF       0x30
+#define ACPIBASE_SMI_END       0x33
+#define ACPIBASE_TCO_OFF       0x60
+#define ACPIBASE_TCO_END       0x7f
+#define ACPICTRL               0x44
+
+#define ACPIBASE_GCS_OFF       0x3410
+#define ACPIBASE_GCS_END       0x3414
+
+#define GPIOBASE               0x48
+#define GPIOCTRL               0x4C
+
+#define RCBABASE               0xf0
+
+#define wdt_io_res(i) wdt_res(0, i)
+#define wdt_mem_res(i) wdt_res(ICH_RES_MEM_OFF, i)
+#define wdt_res(b, i) (&wdt_ich_res[(b) + (i)])
+
+static int lpc_ich_acpi_save = -1;
+static int lpc_ich_gpio_save = -1;
+
+static struct resource wdt_ich_res[] = {
+       /* ACPI - TCO */
+       {
+               .flags = IORESOURCE_IO,
+       },
+       /* ACPI - SMI */
+       {
+               .flags = IORESOURCE_IO,
+       },
+       /* GCS */
+       {
+               .flags = IORESOURCE_MEM,
+       },
+};
+
+static struct resource gpio_ich_res[] = {
+       /* GPIO */
+       {
+               .flags = IORESOURCE_IO,
+       },
+       /* ACPI - GPE0 */
+       {
+               .flags = IORESOURCE_IO,
+       },
+};
+
+enum lpc_cells {
+       LPC_WDT = 0,
+       LPC_GPIO,
+};
+
+static struct mfd_cell lpc_ich_cells[] = {
+       [LPC_WDT] = {
+               .name = "iTCO_wdt",
+               .num_resources = ARRAY_SIZE(wdt_ich_res),
+               .resources = wdt_ich_res,
+               .ignore_resource_conflicts = true,
+       },
+       [LPC_GPIO] = {
+               .name = "gpio_ich",
+               .num_resources = ARRAY_SIZE(gpio_ich_res),
+               .resources = gpio_ich_res,
+               .ignore_resource_conflicts = true,
+       },
+};
+
+/* chipset related info */
+enum lpc_chipsets {
+       LPC_ICH = 0,    /* ICH */
+       LPC_ICH0,       /* ICH0 */
+       LPC_ICH2,       /* ICH2 */
+       LPC_ICH2M,      /* ICH2-M */
+       LPC_ICH3,       /* ICH3-S */
+       LPC_ICH3M,      /* ICH3-M */
+       LPC_ICH4,       /* ICH4 */
+       LPC_ICH4M,      /* ICH4-M */
+       LPC_CICH,       /* C-ICH */
+       LPC_ICH5,       /* ICH5 & ICH5R */
+       LPC_6300ESB,    /* 6300ESB */
+       LPC_ICH6,       /* ICH6 & ICH6R */
+       LPC_ICH6M,      /* ICH6-M */
+       LPC_ICH6W,      /* ICH6W & ICH6RW */
+       LPC_631XESB,    /* 631xESB/632xESB */
+       LPC_ICH7,       /* ICH7 & ICH7R */
+       LPC_ICH7DH,     /* ICH7DH */
+       LPC_ICH7M,      /* ICH7-M & ICH7-U */
+       LPC_ICH7MDH,    /* ICH7-M DH */
+       LPC_NM10,       /* NM10 */
+       LPC_ICH8,       /* ICH8 & ICH8R */
+       LPC_ICH8DH,     /* ICH8DH */
+       LPC_ICH8DO,     /* ICH8DO */
+       LPC_ICH8M,      /* ICH8M */
+       LPC_ICH8ME,     /* ICH8M-E */
+       LPC_ICH9,       /* ICH9 */
+       LPC_ICH9R,      /* ICH9R */
+       LPC_ICH9DH,     /* ICH9DH */
+       LPC_ICH9DO,     /* ICH9DO */
+       LPC_ICH9M,      /* ICH9M */
+       LPC_ICH9ME,     /* ICH9M-E */
+       LPC_ICH10,      /* ICH10 */
+       LPC_ICH10R,     /* ICH10R */
+       LPC_ICH10D,     /* ICH10D */
+       LPC_ICH10DO,    /* ICH10DO */
+       LPC_PCH,        /* PCH Desktop Full Featured */
+       LPC_PCHM,       /* PCH Mobile Full Featured */
+       LPC_P55,        /* P55 */
+       LPC_PM55,       /* PM55 */
+       LPC_H55,        /* H55 */
+       LPC_QM57,       /* QM57 */
+       LPC_H57,        /* H57 */
+       LPC_HM55,       /* HM55 */
+       LPC_Q57,        /* Q57 */
+       LPC_HM57,       /* HM57 */
+       LPC_PCHMSFF,    /* PCH Mobile SFF Full Featured */
+       LPC_QS57,       /* QS57 */
+       LPC_3400,       /* 3400 */
+       LPC_3420,       /* 3420 */
+       LPC_3450,       /* 3450 */
+       LPC_EP80579,    /* EP80579 */
+       LPC_CPT,        /* Cougar Point */
+       LPC_CPTD,       /* Cougar Point Desktop */
+       LPC_CPTM,       /* Cougar Point Mobile */
+       LPC_PBG,        /* Patsburg */
+       LPC_DH89XXCC,   /* DH89xxCC */
+       LPC_PPT,        /* Panther Point */
+       LPC_LPT,        /* Lynx Point */
+};
+
+struct lpc_ich_info lpc_chipset_info[] __devinitdata = {
+       [LPC_ICH] = {
+               .name = "ICH",
+               .iTCO_version = 1,
+       },
+       [LPC_ICH0] = {
+               .name = "ICH0",
+               .iTCO_version = 1,
+       },
+       [LPC_ICH2] = {
+               .name = "ICH2",
+               .iTCO_version = 1,
+       },
+       [LPC_ICH2M] = {
+               .name = "ICH2-M",
+               .iTCO_version = 1,
+       },
+       [LPC_ICH3] = {
+               .name = "ICH3-S",
+               .iTCO_version = 1,
+       },
+       [LPC_ICH3M] = {
+               .name = "ICH3-M",
+               .iTCO_version = 1,
+       },
+       [LPC_ICH4] = {
+               .name = "ICH4",
+               .iTCO_version = 1,
+       },
+       [LPC_ICH4M] = {
+               .name = "ICH4-M",
+               .iTCO_version = 1,
+       },
+       [LPC_CICH] = {
+               .name = "C-ICH",
+               .iTCO_version = 1,
+       },
+       [LPC_ICH5] = {
+               .name = "ICH5 or ICH5R",
+               .iTCO_version = 1,
+       },
+       [LPC_6300ESB] = {
+               .name = "6300ESB",
+               .iTCO_version = 1,
+       },
+       [LPC_ICH6] = {
+               .name = "ICH6 or ICH6R",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V6_GPIO,
+       },
+       [LPC_ICH6M] = {
+               .name = "ICH6-M",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V6_GPIO,
+       },
+       [LPC_ICH6W] = {
+               .name = "ICH6W or ICH6RW",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V6_GPIO,
+       },
+       [LPC_631XESB] = {
+               .name = "631xESB/632xESB",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V6_GPIO,
+       },
+       [LPC_ICH7] = {
+               .name = "ICH7 or ICH7R",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V7_GPIO,
+       },
+       [LPC_ICH7DH] = {
+               .name = "ICH7DH",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V7_GPIO,
+       },
+       [LPC_ICH7M] = {
+               .name = "ICH7-M or ICH7-U",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V7_GPIO,
+       },
+       [LPC_ICH7MDH] = {
+               .name = "ICH7-M DH",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V7_GPIO,
+       },
+       [LPC_NM10] = {
+               .name = "NM10",
+               .iTCO_version = 2,
+       },
+       [LPC_ICH8] = {
+               .name = "ICH8 or ICH8R",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V7_GPIO,
+       },
+       [LPC_ICH8DH] = {
+               .name = "ICH8DH",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V7_GPIO,
+       },
+       [LPC_ICH8DO] = {
+               .name = "ICH8DO",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V7_GPIO,
+       },
+       [LPC_ICH8M] = {
+               .name = "ICH8M",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V7_GPIO,
+       },
+       [LPC_ICH8ME] = {
+               .name = "ICH8M-E",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V7_GPIO,
+       },
+       [LPC_ICH9] = {
+               .name = "ICH9",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V9_GPIO,
+       },
+       [LPC_ICH9R] = {
+               .name = "ICH9R",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V9_GPIO,
+       },
+       [LPC_ICH9DH] = {
+               .name = "ICH9DH",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V9_GPIO,
+       },
+       [LPC_ICH9DO] = {
+               .name = "ICH9DO",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V9_GPIO,
+       },
+       [LPC_ICH9M] = {
+               .name = "ICH9M",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V9_GPIO,
+       },
+       [LPC_ICH9ME] = {
+               .name = "ICH9M-E",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V9_GPIO,
+       },
+       [LPC_ICH10] = {
+               .name = "ICH10",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V10CONS_GPIO,
+       },
+       [LPC_ICH10R] = {
+               .name = "ICH10R",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V10CONS_GPIO,
+       },
+       [LPC_ICH10D] = {
+               .name = "ICH10D",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V10CORP_GPIO,
+       },
+       [LPC_ICH10DO] = {
+               .name = "ICH10DO",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V10CORP_GPIO,
+       },
+       [LPC_PCH] = {
+               .name = "PCH Desktop Full Featured",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V5_GPIO,
+       },
+       [LPC_PCHM] = {
+               .name = "PCH Mobile Full Featured",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V5_GPIO,
+       },
+       [LPC_P55] = {
+               .name = "P55",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V5_GPIO,
+       },
+       [LPC_PM55] = {
+               .name = "PM55",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V5_GPIO,
+       },
+       [LPC_H55] = {
+               .name = "H55",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V5_GPIO,
+       },
+       [LPC_QM57] = {
+               .name = "QM57",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V5_GPIO,
+       },
+       [LPC_H57] = {
+               .name = "H57",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V5_GPIO,
+       },
+       [LPC_HM55] = {
+               .name = "HM55",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V5_GPIO,
+       },
+       [LPC_Q57] = {
+               .name = "Q57",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V5_GPIO,
+       },
+       [LPC_HM57] = {
+               .name = "HM57",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V5_GPIO,
+       },
+       [LPC_PCHMSFF] = {
+               .name = "PCH Mobile SFF Full Featured",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V5_GPIO,
+       },
+       [LPC_QS57] = {
+               .name = "QS57",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V5_GPIO,
+       },
+       [LPC_3400] = {
+               .name = "3400",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V5_GPIO,
+       },
+       [LPC_3420] = {
+               .name = "3420",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V5_GPIO,
+       },
+       [LPC_3450] = {
+               .name = "3450",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V5_GPIO,
+       },
+       [LPC_EP80579] = {
+               .name = "EP80579",
+               .iTCO_version = 2,
+       },
+       [LPC_CPT] = {
+               .name = "Cougar Point",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V5_GPIO,
+       },
+       [LPC_CPTD] = {
+               .name = "Cougar Point Desktop",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V5_GPIO,
+       },
+       [LPC_CPTM] = {
+               .name = "Cougar Point Mobile",
+               .iTCO_version = 2,
+               .gpio_version = ICH_V5_GPIO,
+       },
+       [LPC_PBG] = {
+               .name = "Patsburg",
+               .iTCO_version = 2,
+       },
+       [LPC_DH89XXCC] = {
+               .name = "DH89xxCC",
+               .iTCO_version = 2,
+       },
+       [LPC_PPT] = {
+               .name = "Panther Point",
+               .iTCO_version = 2,
+       },
+       [LPC_LPT] = {
+               .name = "Lynx Point",
+               .iTCO_version = 2,
+       },
+};
+
+/*
+ * This data only exists for exporting the supported PCI ids
+ * via MODULE_DEVICE_TABLE.  We do not actually register a
+ * pci_driver, because the I/O Controller Hub has also other
+ * functions that probably will be registered by other drivers.
+ */
+static DEFINE_PCI_DEVICE_TABLE(lpc_ich_ids) = {
+       { PCI_VDEVICE(INTEL, 0x2410), LPC_ICH},
+       { PCI_VDEVICE(INTEL, 0x2420), LPC_ICH0},
+       { PCI_VDEVICE(INTEL, 0x2440), LPC_ICH2},
+       { PCI_VDEVICE(INTEL, 0x244c), LPC_ICH2M},
+       { PCI_VDEVICE(INTEL, 0x2480), LPC_ICH3},
+       { PCI_VDEVICE(INTEL, 0x248c), LPC_ICH3M},
+       { PCI_VDEVICE(INTEL, 0x24c0), LPC_ICH4},
+       { PCI_VDEVICE(INTEL, 0x24cc), LPC_ICH4M},
+       { PCI_VDEVICE(INTEL, 0x2450), LPC_CICH},
+       { PCI_VDEVICE(INTEL, 0x24d0), LPC_ICH5},
+       { PCI_VDEVICE(INTEL, 0x25a1), LPC_6300ESB},
+       { PCI_VDEVICE(INTEL, 0x2640), LPC_ICH6},
+       { PCI_VDEVICE(INTEL, 0x2641), LPC_ICH6M},
+       { PCI_VDEVICE(INTEL, 0x2642), LPC_ICH6W},
+       { PCI_VDEVICE(INTEL, 0x2670), LPC_631XESB},
+       { PCI_VDEVICE(INTEL, 0x2671), LPC_631XESB},
+       { PCI_VDEVICE(INTEL, 0x2672), LPC_631XESB},
+       { PCI_VDEVICE(INTEL, 0x2673), LPC_631XESB},
+       { PCI_VDEVICE(INTEL, 0x2674), LPC_631XESB},
+       { PCI_VDEVICE(INTEL, 0x2675), LPC_631XESB},
+       { PCI_VDEVICE(INTEL, 0x2676), LPC_631XESB},
+       { PCI_VDEVICE(INTEL, 0x2677), LPC_631XESB},
+       { PCI_VDEVICE(INTEL, 0x2678), LPC_631XESB},
+       { PCI_VDEVICE(INTEL, 0x2679), LPC_631XESB},
+       { PCI_VDEVICE(INTEL, 0x267a), LPC_631XESB},
+       { PCI_VDEVICE(INTEL, 0x267b), LPC_631XESB},
+       { PCI_VDEVICE(INTEL, 0x267c), LPC_631XESB},
+       { PCI_VDEVICE(INTEL, 0x267d), LPC_631XESB},
+       { PCI_VDEVICE(INTEL, 0x267e), LPC_631XESB},
+       { PCI_VDEVICE(INTEL, 0x267f), LPC_631XESB},
+       { PCI_VDEVICE(INTEL, 0x27b8), LPC_ICH7},
+       { PCI_VDEVICE(INTEL, 0x27b0), LPC_ICH7DH},
+       { PCI_VDEVICE(INTEL, 0x27b9), LPC_ICH7M},
+       { PCI_VDEVICE(INTEL, 0x27bd), LPC_ICH7MDH},
+       { PCI_VDEVICE(INTEL, 0x27bc), LPC_NM10},
+       { PCI_VDEVICE(INTEL, 0x2810), LPC_ICH8},
+       { PCI_VDEVICE(INTEL, 0x2812), LPC_ICH8DH},
+       { PCI_VDEVICE(INTEL, 0x2814), LPC_ICH8DO},
+       { PCI_VDEVICE(INTEL, 0x2815), LPC_ICH8M},
+       { PCI_VDEVICE(INTEL, 0x2811), LPC_ICH8ME},
+       { PCI_VDEVICE(INTEL, 0x2918), LPC_ICH9},
+       { PCI_VDEVICE(INTEL, 0x2916), LPC_ICH9R},
+       { PCI_VDEVICE(INTEL, 0x2912), LPC_ICH9DH},
+       { PCI_VDEVICE(INTEL, 0x2914), LPC_ICH9DO},
+       { PCI_VDEVICE(INTEL, 0x2919), LPC_ICH9M},
+       { PCI_VDEVICE(INTEL, 0x2917), LPC_ICH9ME},
+       { PCI_VDEVICE(INTEL, 0x3a18), LPC_ICH10},
+       { PCI_VDEVICE(INTEL, 0x3a16), LPC_ICH10R},
+       { PCI_VDEVICE(INTEL, 0x3a1a), LPC_ICH10D},
+       { PCI_VDEVICE(INTEL, 0x3a14), LPC_ICH10DO},
+       { PCI_VDEVICE(INTEL, 0x3b00), LPC_PCH},
+       { PCI_VDEVICE(INTEL, 0x3b01), LPC_PCHM},
+       { PCI_VDEVICE(INTEL, 0x3b02), LPC_P55},
+       { PCI_VDEVICE(INTEL, 0x3b03), LPC_PM55},
+       { PCI_VDEVICE(INTEL, 0x3b06), LPC_H55},
+       { PCI_VDEVICE(INTEL, 0x3b07), LPC_QM57},
+       { PCI_VDEVICE(INTEL, 0x3b08), LPC_H57},
+       { PCI_VDEVICE(INTEL, 0x3b09), LPC_HM55},
+       { PCI_VDEVICE(INTEL, 0x3b0a), LPC_Q57},
+       { PCI_VDEVICE(INTEL, 0x3b0b), LPC_HM57},
+       { PCI_VDEVICE(INTEL, 0x3b0d), LPC_PCHMSFF},
+       { PCI_VDEVICE(INTEL, 0x3b0f), LPC_QS57},
+       { PCI_VDEVICE(INTEL, 0x3b12), LPC_3400},
+       { PCI_VDEVICE(INTEL, 0x3b14), LPC_3420},
+       { PCI_VDEVICE(INTEL, 0x3b16), LPC_3450},
+       { PCI_VDEVICE(INTEL, 0x5031), LPC_EP80579},
+       { PCI_VDEVICE(INTEL, 0x1c41), LPC_CPT},
+       { PCI_VDEVICE(INTEL, 0x1c42), LPC_CPTD},
+       { PCI_VDEVICE(INTEL, 0x1c43), LPC_CPTM},
+       { PCI_VDEVICE(INTEL, 0x1c44), LPC_CPT},
+       { PCI_VDEVICE(INTEL, 0x1c45), LPC_CPT},
+       { PCI_VDEVICE(INTEL, 0x1c46), LPC_CPT},
+       { PCI_VDEVICE(INTEL, 0x1c47), LPC_CPT},
+       { PCI_VDEVICE(INTEL, 0x1c48), LPC_CPT},
+       { PCI_VDEVICE(INTEL, 0x1c49), LPC_CPT},
+       { PCI_VDEVICE(INTEL, 0x1c4a), LPC_CPT},
+       { PCI_VDEVICE(INTEL, 0x1c4b), LPC_CPT},
+       { PCI_VDEVICE(INTEL, 0x1c4c), LPC_CPT},
+       { PCI_VDEVICE(INTEL, 0x1c4d), LPC_CPT},
+       { PCI_VDEVICE(INTEL, 0x1c4e), LPC_CPT},
+       { PCI_VDEVICE(INTEL, 0x1c4f), LPC_CPT},
+       { PCI_VDEVICE(INTEL, 0x1c50), LPC_CPT},
+       { PCI_VDEVICE(INTEL, 0x1c51), LPC_CPT},
+       { PCI_VDEVICE(INTEL, 0x1c52), LPC_CPT},
+       { PCI_VDEVICE(INTEL, 0x1c53), LPC_CPT},
+       { PCI_VDEVICE(INTEL, 0x1c54), LPC_CPT},
+       { PCI_VDEVICE(INTEL, 0x1c55), LPC_CPT},
+       { PCI_VDEVICE(INTEL, 0x1c56), LPC_CPT},
+       { PCI_VDEVICE(INTEL, 0x1c57), LPC_CPT},
+       { PCI_VDEVICE(INTEL, 0x1c58), LPC_CPT},
+       { PCI_VDEVICE(INTEL, 0x1c59), LPC_CPT},
+       { PCI_VDEVICE(INTEL, 0x1c5a), LPC_CPT},
+       { PCI_VDEVICE(INTEL, 0x1c5b), LPC_CPT},
+       { PCI_VDEVICE(INTEL, 0x1c5c), LPC_CPT},
+       { PCI_VDEVICE(INTEL, 0x1c5d), LPC_CPT},
+       { PCI_VDEVICE(INTEL, 0x1c5e), LPC_CPT},
+       { PCI_VDEVICE(INTEL, 0x1c5f), LPC_CPT},
+       { PCI_VDEVICE(INTEL, 0x1d40), LPC_PBG},
+       { PCI_VDEVICE(INTEL, 0x1d41), LPC_PBG},
+       { PCI_VDEVICE(INTEL, 0x2310), LPC_DH89XXCC},
+       { PCI_VDEVICE(INTEL, 0x1e40), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e41), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e42), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e43), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e44), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e45), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e46), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e47), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e48), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e49), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e4a), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e4b), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e4c), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e4d), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e4e), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e4f), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e50), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e51), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e52), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e53), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e54), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e55), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e56), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e57), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e58), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e59), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e5a), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e5b), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e5c), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e5d), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e5e), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x1e5f), LPC_PPT},
+       { PCI_VDEVICE(INTEL, 0x8c40), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c41), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c42), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c43), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c44), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c45), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c46), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c47), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c48), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c49), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c4a), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c4b), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c4c), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c4d), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c4e), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c4f), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c50), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c51), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c52), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c53), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c54), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c55), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c56), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c57), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c58), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c59), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c5a), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c5b), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c5c), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c5d), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c5e), LPC_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c5f), LPC_LPT},
+       { 0, },                 /* End of list */
+};
+MODULE_DEVICE_TABLE(pci, lpc_ich_ids);
+
+static void lpc_ich_restore_config_space(struct pci_dev *dev)
+{
+       if (lpc_ich_acpi_save >= 0) {
+               pci_write_config_byte(dev, ACPICTRL, lpc_ich_acpi_save);
+               lpc_ich_acpi_save = -1;
+       }
+
+       if (lpc_ich_gpio_save >= 0) {
+               pci_write_config_byte(dev, GPIOCTRL, lpc_ich_gpio_save);
+               lpc_ich_gpio_save = -1;
+       }
+}
+
+static void __devinit lpc_ich_enable_acpi_space(struct pci_dev *dev)
+{
+       u8 reg_save;
+
+       pci_read_config_byte(dev, ACPICTRL, &reg_save);
+       pci_write_config_byte(dev, ACPICTRL, reg_save | 0x10);
+       lpc_ich_acpi_save = reg_save;
+}
+
+static void __devinit lpc_ich_enable_gpio_space(struct pci_dev *dev)
+{
+       u8 reg_save;
+
+       pci_read_config_byte(dev, GPIOCTRL, &reg_save);
+       pci_write_config_byte(dev, GPIOCTRL, reg_save | 0x10);
+       lpc_ich_gpio_save = reg_save;
+}
+
+static void __devinit lpc_ich_finalize_cell(struct mfd_cell *cell,
+                                       const struct pci_device_id *id)
+{
+       cell->platform_data = &lpc_chipset_info[id->driver_data];
+       cell->pdata_size = sizeof(struct lpc_ich_info);
+}
+
+static int __devinit lpc_ich_init_gpio(struct pci_dev *dev,
+                               const struct pci_device_id *id)
+{
+       u32 base_addr_cfg;
+       u32 base_addr;
+       int ret;
+       bool acpi_conflict = false;
+       struct resource *res;
+
+       /* Setup power management base register */
+       pci_read_config_dword(dev, ACPIBASE, &base_addr_cfg);
+       base_addr = base_addr_cfg & 0x0000ff80;
+       if (!base_addr) {
+               dev_err(&dev->dev, "I/O space for ACPI uninitialized\n");
+               lpc_ich_cells[LPC_GPIO].num_resources--;
+               goto gpe0_done;
+       }
+
+       res = &gpio_ich_res[ICH_RES_GPE0];
+       res->start = base_addr + ACPIBASE_GPE_OFF;
+       res->end = base_addr + ACPIBASE_GPE_END;
+       ret = acpi_check_resource_conflict(res);
+       if (ret) {
+               /*
+                * This isn't fatal for the GPIO, but we have to make sure that
+                * the platform_device subsystem doesn't see this resource
+                * or it will register an invalid region.
+                */
+               lpc_ich_cells[LPC_GPIO].num_resources--;
+               acpi_conflict = true;
+       } else {
+               lpc_ich_enable_acpi_space(dev);
+       }
+
+gpe0_done:
+       /* Setup GPIO base register */
+       pci_read_config_dword(dev, GPIOBASE, &base_addr_cfg);
+       base_addr = base_addr_cfg & 0x0000ff80;
+       if (!base_addr) {
+               dev_err(&dev->dev, "I/O space for GPIO uninitialized\n");
+               ret = -ENODEV;
+               goto gpio_done;
+       }
+
+       /* Older devices provide fewer GPIO and have a smaller resource size. */
+       res = &gpio_ich_res[ICH_RES_GPIO];
+       res->start = base_addr;
+       switch (lpc_chipset_info[id->driver_data].gpio_version) {
+       case ICH_V5_GPIO:
+       case ICH_V10CORP_GPIO:
+               res->end = res->start + 128 - 1;
+               break;
+       default:
+               res->end = res->start + 64 - 1;
+               break;
+       }
+
+       ret = acpi_check_resource_conflict(res);
+       if (ret) {
+               /* this isn't necessarily fatal for the GPIO */
+               acpi_conflict = true;
+               goto gpio_done;
+       }
+       lpc_ich_enable_gpio_space(dev);
+
+       lpc_ich_finalize_cell(&lpc_ich_cells[LPC_GPIO], id);
+       ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_GPIO],
+                               1, NULL, 0);
+
+gpio_done:
+       if (acpi_conflict)
+               pr_warn("Resource conflict(s) found affecting %s\n",
+                               lpc_ich_cells[LPC_GPIO].name);
+       return ret;
+}
+
+static int __devinit lpc_ich_init_wdt(struct pci_dev *dev,
+                               const struct pci_device_id *id)
+{
+       u32 base_addr_cfg;
+       u32 base_addr;
+       int ret;
+       bool acpi_conflict = false;
+       struct resource *res;
+
+       /* Setup power management base register */
+       pci_read_config_dword(dev, ACPIBASE, &base_addr_cfg);
+       base_addr = base_addr_cfg & 0x0000ff80;
+       if (!base_addr) {
+               dev_err(&dev->dev, "I/O space for ACPI uninitialized\n");
+               ret = -ENODEV;
+               goto wdt_done;
+       }
+
+       res = wdt_io_res(ICH_RES_IO_TCO);
+       res->start = base_addr + ACPIBASE_TCO_OFF;
+       res->end = base_addr + ACPIBASE_TCO_END;
+       ret = acpi_check_resource_conflict(res);
+       if (ret) {
+               acpi_conflict = true;
+               goto wdt_done;
+       }
+
+       res = wdt_io_res(ICH_RES_IO_SMI);
+       res->start = base_addr + ACPIBASE_SMI_OFF;
+       res->end = base_addr + ACPIBASE_SMI_END;
+       ret = acpi_check_resource_conflict(res);
+       if (ret) {
+               acpi_conflict = true;
+               goto wdt_done;
+       }
+       lpc_ich_enable_acpi_space(dev);
+
+       /*
+        * Get the Memory-Mapped GCS register. To get access to it
+        * we have to read RCBA from PCI Config space 0xf0 and use
+        * it as base. GCS = RCBA + ICH6_GCS(0x3410).
+        */
+       if (lpc_chipset_info[id->driver_data].iTCO_version == 2) {
+               pci_read_config_dword(dev, RCBABASE, &base_addr_cfg);
+               base_addr = base_addr_cfg & 0xffffc000;
+               if (!(base_addr_cfg & 1)) {
+                       pr_err("RCBA is disabled by hardware/BIOS, "
+                                       "device disabled\n");
+                       ret = -ENODEV;
+                       goto wdt_done;
+               }
+               res = wdt_mem_res(ICH_RES_MEM_GCS);
+               res->start = base_addr + ACPIBASE_GCS_OFF;
+               res->end = base_addr + ACPIBASE_GCS_END;
+               ret = acpi_check_resource_conflict(res);
+               if (ret) {
+                       acpi_conflict = true;
+                       goto wdt_done;
+               }
+       }
+
+       lpc_ich_finalize_cell(&lpc_ich_cells[LPC_WDT], id);
+       ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_WDT],
+                               1, NULL, 0);
+
+wdt_done:
+       if (acpi_conflict)
+               pr_warn("Resource conflict(s) found affecting %s\n",
+                               lpc_ich_cells[LPC_WDT].name);
+       return ret;
+}
+
+static int __devinit lpc_ich_probe(struct pci_dev *dev,
+                               const struct pci_device_id *id)
+{
+       int ret;
+       bool cell_added = false;
+
+       ret = lpc_ich_init_wdt(dev, id);
+       if (!ret)
+               cell_added = true;
+
+       ret = lpc_ich_init_gpio(dev, id);
+       if (!ret)
+               cell_added = true;
+
+       /*
+        * We only care if at least one or none of the cells registered
+        * successfully.
+        */
+       if (!cell_added) {
+               lpc_ich_restore_config_space(dev);
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static void __devexit lpc_ich_remove(struct pci_dev *dev)
+{
+       mfd_remove_devices(&dev->dev);
+       lpc_ich_restore_config_space(dev);
+}
+
+static struct pci_driver lpc_ich_driver = {
+       .name           = "lpc_ich",
+       .id_table       = lpc_ich_ids,
+       .probe          = lpc_ich_probe,
+       .remove         = __devexit_p(lpc_ich_remove),
+};
+
+static int __init lpc_ich_init(void)
+{
+       return pci_register_driver(&lpc_ich_driver);
+}
+
+static void __exit lpc_ich_exit(void)
+{
+       pci_unregister_driver(&lpc_ich_driver);
+}
+
+module_init(lpc_ich_init);
+module_exit(lpc_ich_exit);
+
+MODULE_AUTHOR("Aaron Sierra <asierra@xes-inc.com>");
+MODULE_DESCRIPTION("LPC interface for Intel ICH");
+MODULE_LICENSE("GPL");
index abc421364a454f0f596f7e5be734e346838318cb..9f20abc5e3937065238ff1f3240c27cde9cbb4f6 100644 (file)
@@ -36,6 +36,7 @@
 
 #define GPIOBASE       0x44
 #define GPIO_IO_SIZE   64
+#define GPIO_IO_SIZE_CENTERTON 128
 
 #define WDTBASE                0x84
 #define WDT_IO_SIZE    64
@@ -68,7 +69,7 @@ static struct resource wdt_sch_resource = {
 
 static struct mfd_cell tunnelcreek_cells[] = {
        {
-               .name = "tunnelcreek_wdt",
+               .name = "ie6xx_wdt",
                .num_resources = 1,
                .resources = &wdt_sch_resource,
        },
@@ -77,6 +78,7 @@ static struct mfd_cell tunnelcreek_cells[] = {
 static DEFINE_PCI_DEVICE_TABLE(lpc_sch_ids) = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SCH_LPC) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ITC_LPC) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CENTERTON_ILB) },
        { 0, }
 };
 MODULE_DEVICE_TABLE(pci, lpc_sch_ids);
@@ -115,7 +117,11 @@ static int __devinit lpc_sch_probe(struct pci_dev *dev,
        }
 
        gpio_sch_resource.start = base_addr;
-       gpio_sch_resource.end = base_addr + GPIO_IO_SIZE - 1;
+
+       if (id->device == PCI_DEVICE_ID_INTEL_CENTERTON_ILB)
+               gpio_sch_resource.end = base_addr + GPIO_IO_SIZE_CENTERTON - 1;
+       else
+               gpio_sch_resource.end = base_addr + GPIO_IO_SIZE - 1;
 
        for (i=0; i < ARRAY_SIZE(lpc_sch_cells); i++)
                lpc_sch_cells[i].id = id->device;
@@ -125,7 +131,8 @@ static int __devinit lpc_sch_probe(struct pci_dev *dev,
        if (ret)
                goto out_dev;
 
-       if (id->device == PCI_DEVICE_ID_INTEL_ITC_LPC) {
+       if (id->device == PCI_DEVICE_ID_INTEL_ITC_LPC
+        || id->device == PCI_DEVICE_ID_INTEL_CENTERTON_ILB) {
                pci_read_config_dword(dev, WDTBASE, &base_addr_cfg);
                if (!(base_addr_cfg & (1 << 31))) {
                        dev_err(&dev->dev, "Decode of the WDT I/O range disabled\n");
@@ -167,18 +174,7 @@ static struct pci_driver lpc_sch_driver = {
        .remove         = __devexit_p(lpc_sch_remove),
 };
 
-static int __init lpc_sch_init(void)
-{
-       return pci_register_driver(&lpc_sch_driver);
-}
-
-static void __exit lpc_sch_exit(void)
-{
-       pci_unregister_driver(&lpc_sch_driver);
-}
-
-module_init(lpc_sch_init);
-module_exit(lpc_sch_exit);
+module_pci_driver(lpc_sch_driver);
 
 MODULE_AUTHOR("Denis Turischev <denis@compulab.co.il>");
 MODULE_DESCRIPTION("LPC interface for Intel Poulsbo SCH");
diff --git a/drivers/mfd/max77693-irq.c b/drivers/mfd/max77693-irq.c
new file mode 100644 (file)
index 0000000..2b40356
--- /dev/null
@@ -0,0 +1,309 @@
+/*
+ * max77693-irq.c - Interrupt controller support for MAX77693
+ *
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * SangYoung Son <hello.son@samsung.com>
+ *
+ * This program is not provided / owned by Maxim Integrated Products.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * This driver is based on max8997-irq.c
+ */
+
+#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/max77693.h>
+#include <linux/mfd/max77693-private.h>
+
+static const u8 max77693_mask_reg[] = {
+       [LED_INT] = MAX77693_LED_REG_FLASH_INT_MASK,
+       [TOPSYS_INT] = MAX77693_PMIC_REG_TOPSYS_INT_MASK,
+       [CHG_INT] = MAX77693_CHG_REG_CHG_INT_MASK,
+       [MUIC_INT1] = MAX77693_MUIC_REG_INTMASK1,
+       [MUIC_INT2] = MAX77693_MUIC_REG_INTMASK2,
+       [MUIC_INT3] = MAX77693_MUIC_REG_INTMASK3,
+};
+
+static struct regmap *max77693_get_regmap(struct max77693_dev *max77693,
+                               enum max77693_irq_source src)
+{
+       switch (src) {
+       case LED_INT ... CHG_INT:
+               return max77693->regmap;
+       case MUIC_INT1 ... MUIC_INT3:
+               return max77693->regmap_muic;
+       default:
+               return ERR_PTR(-EINVAL);
+       }
+}
+
+struct max77693_irq_data {
+       int mask;
+       enum max77693_irq_source group;
+};
+
+#define DECLARE_IRQ(idx, _group, _mask)                \
+       [(idx)] = { .group = (_group), .mask = (_mask) }
+static const struct max77693_irq_data max77693_irqs[] = {
+       DECLARE_IRQ(MAX77693_LED_IRQ_FLED2_OPEN,        LED_INT, 1 << 0),
+       DECLARE_IRQ(MAX77693_LED_IRQ_FLED2_SHORT,       LED_INT, 1 << 1),
+       DECLARE_IRQ(MAX77693_LED_IRQ_FLED1_OPEN,        LED_INT, 1 << 2),
+       DECLARE_IRQ(MAX77693_LED_IRQ_FLED1_SHORT,       LED_INT, 1 << 3),
+       DECLARE_IRQ(MAX77693_LED_IRQ_MAX_FLASH,         LED_INT, 1 << 4),
+
+       DECLARE_IRQ(MAX77693_TOPSYS_IRQ_T120C_INT,      TOPSYS_INT, 1 << 0),
+       DECLARE_IRQ(MAX77693_TOPSYS_IRQ_T140C_INT,      TOPSYS_INT, 1 << 1),
+       DECLARE_IRQ(MAX77693_TOPSYS_IRQ_LOWSYS_INT,     TOPSYS_INT, 1 << 3),
+
+       DECLARE_IRQ(MAX77693_CHG_IRQ_BYP_I,             CHG_INT, 1 << 0),
+       DECLARE_IRQ(MAX77693_CHG_IRQ_THM_I,             CHG_INT, 1 << 2),
+       DECLARE_IRQ(MAX77693_CHG_IRQ_BAT_I,             CHG_INT, 1 << 3),
+       DECLARE_IRQ(MAX77693_CHG_IRQ_CHG_I,             CHG_INT, 1 << 4),
+       DECLARE_IRQ(MAX77693_CHG_IRQ_CHGIN_I,           CHG_INT, 1 << 6),
+
+       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC,         MUIC_INT1, 1 << 0),
+       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC_LOW,     MUIC_INT1, 1 << 1),
+       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC_ERR,     MUIC_INT1, 1 << 2),
+       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC1K,       MUIC_INT1, 1 << 3),
+
+       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_CHGTYP,      MUIC_INT2, 1 << 0),
+       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_CHGDETREUN,  MUIC_INT2, 1 << 1),
+       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_DCDTMR,      MUIC_INT2, 1 << 2),
+       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_DXOVP,       MUIC_INT2, 1 << 3),
+       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_VBVOLT,      MUIC_INT2, 1 << 4),
+       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_VIDRM,       MUIC_INT2, 1 << 5),
+
+       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_EOC,         MUIC_INT3, 1 << 0),
+       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_CGMBC,       MUIC_INT3, 1 << 1),
+       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_OVP,         MUIC_INT3, 1 << 2),
+       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_MBCCHG_ERR,  MUIC_INT3, 1 << 3),
+       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_CHG_ENABLED, MUIC_INT3, 1 << 4),
+       DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_BAT_DET,     MUIC_INT3, 1 << 5),
+};
+
+static void max77693_irq_lock(struct irq_data *data)
+{
+       struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
+
+       mutex_lock(&max77693->irqlock);
+}
+
+static void max77693_irq_sync_unlock(struct irq_data *data)
+{
+       struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
+       int i;
+
+       for (i = 0; i < MAX77693_IRQ_GROUP_NR; i++) {
+               u8 mask_reg = max77693_mask_reg[i];
+               struct regmap *map = max77693_get_regmap(max77693, i);
+
+               if (mask_reg == MAX77693_REG_INVALID ||
+                               IS_ERR_OR_NULL(map))
+                       continue;
+               max77693->irq_masks_cache[i] = max77693->irq_masks_cur[i];
+
+               max77693_write_reg(map, max77693_mask_reg[i],
+                               max77693->irq_masks_cur[i]);
+       }
+
+       mutex_unlock(&max77693->irqlock);
+}
+
+static const inline struct max77693_irq_data *
+irq_to_max77693_irq(struct max77693_dev *max77693, int irq)
+{
+       return &max77693_irqs[irq];
+}
+
+static void max77693_irq_mask(struct irq_data *data)
+{
+       struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
+       const struct max77693_irq_data *irq_data =
+                               irq_to_max77693_irq(max77693, data->irq);
+
+       if (irq_data->group >= MUIC_INT1 && irq_data->group <= MUIC_INT3)
+               max77693->irq_masks_cur[irq_data->group] &= ~irq_data->mask;
+       else
+               max77693->irq_masks_cur[irq_data->group] |= irq_data->mask;
+}
+
+static void max77693_irq_unmask(struct irq_data *data)
+{
+       struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
+       const struct max77693_irq_data *irq_data =
+           irq_to_max77693_irq(max77693, data->irq);
+
+       if (irq_data->group >= MUIC_INT1 && irq_data->group <= MUIC_INT3)
+               max77693->irq_masks_cur[irq_data->group] |= irq_data->mask;
+       else
+               max77693->irq_masks_cur[irq_data->group] &= ~irq_data->mask;
+}
+
+static struct irq_chip max77693_irq_chip = {
+       .name                   = "max77693",
+       .irq_bus_lock           = max77693_irq_lock,
+       .irq_bus_sync_unlock    = max77693_irq_sync_unlock,
+       .irq_mask               = max77693_irq_mask,
+       .irq_unmask             = max77693_irq_unmask,
+};
+
+#define MAX77693_IRQSRC_CHG            (1 << 0)
+#define MAX77693_IRQSRC_TOP            (1 << 1)
+#define MAX77693_IRQSRC_FLASH          (1 << 2)
+#define MAX77693_IRQSRC_MUIC           (1 << 3)
+static irqreturn_t max77693_irq_thread(int irq, void *data)
+{
+       struct max77693_dev *max77693 = data;
+       u8 irq_reg[MAX77693_IRQ_GROUP_NR] = {};
+       u8 irq_src;
+       int ret;
+       int i, cur_irq;
+
+       ret = max77693_read_reg(max77693->regmap, MAX77693_PMIC_REG_INTSRC,
+                               &irq_src);
+       if (ret < 0) {
+               dev_err(max77693->dev, "Failed to read interrupt source: %d\n",
+                               ret);
+               return IRQ_NONE;
+       }
+
+       if (irq_src & MAX77693_IRQSRC_CHG)
+               /* CHG_INT */
+               ret = max77693_read_reg(max77693->regmap, MAX77693_CHG_REG_CHG_INT,
+                               &irq_reg[CHG_INT]);
+
+       if (irq_src & MAX77693_IRQSRC_TOP)
+               /* TOPSYS_INT */
+               ret = max77693_read_reg(max77693->regmap,
+                       MAX77693_PMIC_REG_TOPSYS_INT, &irq_reg[TOPSYS_INT]);
+
+       if (irq_src & MAX77693_IRQSRC_FLASH)
+               /* LED_INT */
+               ret = max77693_read_reg(max77693->regmap,
+                       MAX77693_LED_REG_FLASH_INT, &irq_reg[LED_INT]);
+
+       if (irq_src & MAX77693_IRQSRC_MUIC)
+               /* MUIC INT1 ~ INT3 */
+               max77693_bulk_read(max77693->regmap, MAX77693_MUIC_REG_INT1,
+                       MAX77693_NUM_IRQ_MUIC_REGS, &irq_reg[MUIC_INT1]);
+
+       /* Apply masking */
+       for (i = 0; i < MAX77693_IRQ_GROUP_NR; i++) {
+               if (i >= MUIC_INT1 && i <= MUIC_INT3)
+                       irq_reg[i] &= max77693->irq_masks_cur[i];
+               else
+                       irq_reg[i] &= ~max77693->irq_masks_cur[i];
+       }
+
+       /* Report */
+       for (i = 0; i < MAX77693_IRQ_NR; i++) {
+               if (irq_reg[max77693_irqs[i].group] & max77693_irqs[i].mask) {
+                       cur_irq = irq_find_mapping(max77693->irq_domain, i);
+                       if (cur_irq)
+                               handle_nested_irq(cur_irq);
+               }
+       }
+
+       return IRQ_HANDLED;
+}
+
+int max77693_irq_resume(struct max77693_dev *max77693)
+{
+       if (max77693->irq)
+               max77693_irq_thread(0, max77693);
+
+       return 0;
+}
+
+static int max77693_irq_domain_map(struct irq_domain *d, unsigned int irq,
+                               irq_hw_number_t hw)
+{
+       struct max77693_dev *max77693 = d->host_data;
+
+       irq_set_chip_data(irq, max77693);
+       irq_set_chip_and_handler(irq, &max77693_irq_chip, handle_edge_irq);
+       irq_set_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+       set_irq_flags(irq, IRQF_VALID);
+#else
+       irq_set_noprobe(irq);
+#endif
+       return 0;
+}
+
+static struct irq_domain_ops max77693_irq_domain_ops = {
+       .map = max77693_irq_domain_map,
+};
+
+int max77693_irq_init(struct max77693_dev *max77693)
+{
+       struct irq_domain *domain;
+       int i;
+       int ret;
+
+       mutex_init(&max77693->irqlock);
+
+       /* Mask individual interrupt sources */
+       for (i = 0; i < MAX77693_IRQ_GROUP_NR; i++) {
+               struct regmap *map;
+               /* MUIC IRQ  0:MASK 1:NOT MASK */
+               /* Other IRQ 1:MASK 0:NOT MASK */
+               if (i >= MUIC_INT1 && i <= MUIC_INT3) {
+                       max77693->irq_masks_cur[i] = 0x00;
+                       max77693->irq_masks_cache[i] = 0x00;
+               } else {
+                       max77693->irq_masks_cur[i] = 0xff;
+                       max77693->irq_masks_cache[i] = 0xff;
+               }
+               map = max77693_get_regmap(max77693, i);
+
+               if (IS_ERR_OR_NULL(map))
+                       continue;
+               if (max77693_mask_reg[i] == MAX77693_REG_INVALID)
+                       continue;
+               if (i >= MUIC_INT1 && i <= MUIC_INT3)
+                       max77693_write_reg(map, max77693_mask_reg[i], 0x00);
+               else
+                       max77693_write_reg(map, max77693_mask_reg[i], 0xff);
+       }
+
+       domain = irq_domain_add_linear(NULL, MAX77693_IRQ_NR,
+                                       &max77693_irq_domain_ops, max77693);
+       if (!domain) {
+               dev_err(max77693->dev, "could not create irq domain\n");
+               return -ENODEV;
+       }
+       max77693->irq_domain = domain;
+
+       ret = request_threaded_irq(max77693->irq, NULL, max77693_irq_thread,
+                                  IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                                  "max77693-irq", max77693);
+
+       if (ret)
+               dev_err(max77693->dev, "Failed to request IRQ %d: %d\n",
+                       max77693->irq, ret);
+
+       return 0;
+}
+
+void max77693_irq_exit(struct max77693_dev *max77693)
+{
+       if (max77693->irq)
+               free_irq(max77693->irq, max77693);
+}
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
new file mode 100644 (file)
index 0000000..e9e4278
--- /dev/null
@@ -0,0 +1,249 @@
+/*
+ * max77693.c - mfd core driver for the MAX 77693
+ *
+ * Copyright (C) 2012 Samsung Electronics
+ * SangYoung Son <hello.son@smasung.com>
+ *
+ * This program is not provided / owned by Maxim Integrated Products.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * This driver is based on max8997.c
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include <linux/mutex.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/max77693.h>
+#include <linux/mfd/max77693-private.h>
+#include <linux/regulator/machine.h>
+#include <linux/regmap.h>
+
+#define I2C_ADDR_PMIC  (0xCC >> 1)     /* Charger, Flash LED */
+#define I2C_ADDR_MUIC  (0x4A >> 1)
+#define I2C_ADDR_HAPTIC        (0x90 >> 1)
+
+static struct mfd_cell max77693_devs[] = {
+       { .name = "max77693-pmic", },
+       { .name = "max77693-charger", },
+       { .name = "max77693-flash", },
+       { .name = "max77693-muic", },
+       { .name = "max77693-haptic", },
+};
+
+int max77693_read_reg(struct regmap *map, u8 reg, u8 *dest)
+{
+       unsigned int val;
+       int ret;
+
+       ret = regmap_read(map, reg, &val);
+       *dest = val;
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(max77693_read_reg);
+
+int max77693_bulk_read(struct regmap *map, u8 reg, int count, u8 *buf)
+{
+       int ret;
+
+       ret = regmap_bulk_read(map, reg, buf, count);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(max77693_bulk_read);
+
+int max77693_write_reg(struct regmap *map, u8 reg, u8 value)
+{
+       int ret;
+
+       ret = regmap_write(map, reg, value);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(max77693_write_reg);
+
+int max77693_bulk_write(struct regmap *map, u8 reg, int count, u8 *buf)
+{
+       int ret;
+
+       ret = regmap_bulk_write(map, reg, buf, count);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(max77693_bulk_write);
+
+int max77693_update_reg(struct regmap *map, u8 reg, u8 val, u8 mask)
+{
+       int ret;
+
+       ret = regmap_update_bits(map, reg, mask, val);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(max77693_update_reg);
+
+static const struct regmap_config max77693_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .max_register = MAX77693_PMIC_REG_END,
+};
+
+static int max77693_i2c_probe(struct i2c_client *i2c,
+                             const struct i2c_device_id *id)
+{
+       struct max77693_dev *max77693;
+       struct max77693_platform_data *pdata = i2c->dev.platform_data;
+       u8 reg_data;
+       int ret = 0;
+
+       max77693 = devm_kzalloc(&i2c->dev,
+                       sizeof(struct max77693_dev), GFP_KERNEL);
+       if (max77693 == NULL)
+               return -ENOMEM;
+
+       max77693->regmap = devm_regmap_init_i2c(i2c, &max77693_regmap_config);
+       if (IS_ERR(max77693->regmap)) {
+               ret = PTR_ERR(max77693->regmap);
+               dev_err(max77693->dev,"failed to allocate register map: %d\n",
+                               ret);
+               goto err_regmap;
+       }
+
+       i2c_set_clientdata(i2c, max77693);
+       max77693->dev = &i2c->dev;
+       max77693->i2c = i2c;
+       max77693->irq = i2c->irq;
+       max77693->type = id->driver_data;
+
+       if (!pdata)
+               goto err_regmap;
+
+       max77693->wakeup = pdata->wakeup;
+
+       mutex_init(&max77693->iolock);
+
+       if (max77693_read_reg(max77693->regmap,
+                               MAX77693_PMIC_REG_PMIC_ID2, &reg_data) < 0) {
+               dev_err(max77693->dev, "device not found on this channel\n");
+               ret = -ENODEV;
+               goto err_regmap;
+       } else
+               dev_info(max77693->dev, "device ID: 0x%x\n", reg_data);
+
+       max77693->muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC);
+       i2c_set_clientdata(max77693->muic, max77693);
+
+       max77693->haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC);
+       i2c_set_clientdata(max77693->haptic, max77693);
+
+       ret = max77693_irq_init(max77693);
+       if (ret < 0)
+               goto err_mfd;
+
+       pm_runtime_set_active(max77693->dev);
+
+       ret = mfd_add_devices(max77693->dev, -1, max77693_devs,
+                       ARRAY_SIZE(max77693_devs), NULL, 0);
+       if (ret < 0)
+               goto err_mfd;
+
+       device_init_wakeup(max77693->dev, pdata->wakeup);
+
+       return ret;
+
+err_mfd:
+       i2c_unregister_device(max77693->muic);
+       i2c_unregister_device(max77693->haptic);
+err_regmap:
+       kfree(max77693);
+
+       return ret;
+}
+
+static int max77693_i2c_remove(struct i2c_client *i2c)
+{
+       struct max77693_dev *max77693 = i2c_get_clientdata(i2c);
+
+       mfd_remove_devices(max77693->dev);
+       i2c_unregister_device(max77693->muic);
+       i2c_unregister_device(max77693->haptic);
+
+       return 0;
+}
+
+static const struct i2c_device_id max77693_i2c_id[] = {
+       { "max77693", TYPE_MAX77693 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, max77693_i2c_id);
+
+static int max77693_suspend(struct device *dev)
+{
+       struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+       struct max77693_dev *max77693 = i2c_get_clientdata(i2c);
+
+       if (device_may_wakeup(dev))
+               irq_set_irq_wake(max77693->irq, 1);
+       return 0;
+}
+
+static int max77693_resume(struct device *dev)
+{
+       struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+       struct max77693_dev *max77693 = i2c_get_clientdata(i2c);
+
+       if (device_may_wakeup(dev))
+               irq_set_irq_wake(max77693->irq, 0);
+       return max77693_irq_resume(max77693);
+}
+
+const struct dev_pm_ops max77693_pm = {
+       .suspend = max77693_suspend,
+       .resume = max77693_resume,
+};
+
+static struct i2c_driver max77693_i2c_driver = {
+       .driver = {
+                  .name = "max77693",
+                  .owner = THIS_MODULE,
+                  .pm = &max77693_pm,
+       },
+       .probe = max77693_i2c_probe,
+       .remove = max77693_i2c_remove,
+       .id_table = max77693_i2c_id,
+};
+
+static int __init max77693_i2c_init(void)
+{
+       return i2c_add_driver(&max77693_i2c_driver);
+}
+/* init early so consumer devices can complete system boot */
+subsys_initcall(max77693_i2c_init);
+
+static void __exit max77693_i2c_exit(void)
+{
+       i2c_del_driver(&max77693_i2c_driver);
+}
+module_exit(max77693_i2c_exit);
+
+MODULE_DESCRIPTION("MAXIM 77693 multi-function core driver");
+MODULE_AUTHOR("SangYoung, Son <hello.son@samsung.com>");
+MODULE_LICENSE("GPL");
index 738722cdecaaa23e075b25cf2407357cea372ee7..f0ea3b8b3e4ad979d5e1d5fe53bde83a7392dfd2 100644 (file)
 #include <linux/platform_device.h>
 #include <linux/mutex.h>
 #include <linux/interrupt.h>
-#include <linux/spi/spi.h>
 #include <linux/mfd/core.h>
 #include <linux/mfd/mc13xxx.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/of_gpio.h>
 
-struct mc13xxx {
-       struct spi_device *spidev;
-       struct mutex lock;
-       int irq;
-       int flags;
-
-       irq_handler_t irqhandler[MC13XXX_NUM_IRQ];
-       void *irqdata[MC13XXX_NUM_IRQ];
-
-       int adcflags;
-};
+#include "mc13xxx.h"
 
 #define MC13XXX_IRQSTAT0       0
 #define MC13XXX_IRQSTAT0_ADCDONEI      (1 << 0)
@@ -139,34 +128,29 @@ struct mc13xxx {
 
 #define MC13XXX_ADC2           45
 
-#define MC13XXX_NUMREGS 0x3f
-
 void mc13xxx_lock(struct mc13xxx *mc13xxx)
 {
        if (!mutex_trylock(&mc13xxx->lock)) {
-               dev_dbg(&mc13xxx->spidev->dev, "wait for %s from %pf\n",
+               dev_dbg(mc13xxx->dev, "wait for %s from %pf\n",
                                __func__, __builtin_return_address(0));
 
                mutex_lock(&mc13xxx->lock);
        }
-       dev_dbg(&mc13xxx->spidev->dev, "%s from %pf\n",
+       dev_dbg(mc13xxx->dev, "%s from %pf\n",
                        __func__, __builtin_return_address(0));
 }
 EXPORT_SYMBOL(mc13xxx_lock);
 
 void mc13xxx_unlock(struct mc13xxx *mc13xxx)
 {
-       dev_dbg(&mc13xxx->spidev->dev, "%s from %pf\n",
+       dev_dbg(mc13xxx->dev, "%s from %pf\n",
                        __func__, __builtin_return_address(0));
        mutex_unlock(&mc13xxx->lock);
 }
 EXPORT_SYMBOL(mc13xxx_unlock);
 
-#define MC13XXX_REGOFFSET_SHIFT 25
 int mc13xxx_reg_read(struct mc13xxx *mc13xxx, unsigned int offset, u32 *val)
 {
-       struct spi_transfer t;
-       struct spi_message m;
        int ret;
 
        BUG_ON(!mutex_is_locked(&mc13xxx->lock));
@@ -174,84 +158,35 @@ int mc13xxx_reg_read(struct mc13xxx *mc13xxx, unsigned int offset, u32 *val)
        if (offset > MC13XXX_NUMREGS)
                return -EINVAL;
 
-       *val = offset << MC13XXX_REGOFFSET_SHIFT;
-
-       memset(&t, 0, sizeof(t));
-
-       t.tx_buf = val;
-       t.rx_buf = val;
-       t.len = sizeof(u32);
-
-       spi_message_init(&m);
-       spi_message_add_tail(&t, &m);
-
-       ret = spi_sync(mc13xxx->spidev, &m);
-
-       /* error in message.status implies error return from spi_sync */
-       BUG_ON(!ret && m.status);
+       ret = regmap_read(mc13xxx->regmap, offset, val);
+       dev_vdbg(mc13xxx->dev, "[0x%02x] -> 0x%06x\n", offset, *val);
 
-       if (ret)
-               return ret;
-
-       *val &= 0xffffff;
-
-       dev_vdbg(&mc13xxx->spidev->dev, "[0x%02x] -> 0x%06x\n", offset, *val);
-
-       return 0;
+       return ret;
 }
 EXPORT_SYMBOL(mc13xxx_reg_read);
 
 int mc13xxx_reg_write(struct mc13xxx *mc13xxx, unsigned int offset, u32 val)
 {
-       u32 buf;
-       struct spi_transfer t;
-       struct spi_message m;
-       int ret;
-
        BUG_ON(!mutex_is_locked(&mc13xxx->lock));
 
-       dev_vdbg(&mc13xxx->spidev->dev, "[0x%02x] <- 0x%06x\n", offset, val);
+       dev_vdbg(mc13xxx->dev, "[0x%02x] <- 0x%06x\n", offset, val);
 
        if (offset > MC13XXX_NUMREGS || val > 0xffffff)
                return -EINVAL;
 
-       buf = 1 << 31 | offset << MC13XXX_REGOFFSET_SHIFT | val;
-
-       memset(&t, 0, sizeof(t));
-
-       t.tx_buf = &buf;
-       t.rx_buf = &buf;
-       t.len = sizeof(u32);
-
-       spi_message_init(&m);
-       spi_message_add_tail(&t, &m);
-
-       ret = spi_sync(mc13xxx->spidev, &m);
-
-       BUG_ON(!ret && m.status);
-
-       if (ret)
-               return ret;
-
-       return 0;
+       return regmap_write(mc13xxx->regmap, offset, val);
 }
 EXPORT_SYMBOL(mc13xxx_reg_write);
 
 int mc13xxx_reg_rmw(struct mc13xxx *mc13xxx, unsigned int offset,
                u32 mask, u32 val)
 {
-       int ret;
-       u32 valread;
-
+       BUG_ON(!mutex_is_locked(&mc13xxx->lock));
        BUG_ON(val & ~mask);
+       dev_vdbg(mc13xxx->dev, "[0x%02x] <- 0x%06x (mask: 0x%06x)\n",
+                       offset, val, mask);
 
-       ret = mc13xxx_reg_read(mc13xxx, offset, &valread);
-       if (ret)
-               return ret;
-
-       valread = (valread & ~mask) | val;
-
-       return mc13xxx_reg_write(mc13xxx, offset, valread);
+       return regmap_update_bits(mc13xxx->regmap, offset, mask, val);
 }
 EXPORT_SYMBOL(mc13xxx_reg_rmw);
 
@@ -439,7 +374,7 @@ static int mc13xxx_irq_handle(struct mc13xxx *mc13xxx,
                        if (handled == IRQ_HANDLED)
                                num_handled++;
                } else {
-                       dev_err(&mc13xxx->spidev->dev,
+                       dev_err(mc13xxx->dev,
                                        "BUG: irq %u but no handler\n",
                                        baseirq + irq);
 
@@ -475,25 +410,23 @@ static irqreturn_t mc13xxx_irq_thread(int irq, void *data)
        return IRQ_RETVAL(handled);
 }
 
-enum mc13xxx_id {
-       MC13XXX_ID_MC13783,
-       MC13XXX_ID_MC13892,
-       MC13XXX_ID_INVALID,
-};
-
 static const char *mc13xxx_chipname[] = {
        [MC13XXX_ID_MC13783] = "mc13783",
        [MC13XXX_ID_MC13892] = "mc13892",
 };
 
 #define maskval(reg, mask)     (((reg) & (mask)) >> __ffs(mask))
-static int mc13xxx_identify(struct mc13xxx *mc13xxx, enum mc13xxx_id *id)
+static int mc13xxx_identify(struct mc13xxx *mc13xxx)
 {
        u32 icid;
        u32 revision;
-       const char *name;
        int ret;
 
+       /*
+        * Get the generation ID from register 46, as apparently some older
+        * IC revisions only have this info at this location. Newer ICs seem to
+        * have both.
+        */
        ret = mc13xxx_reg_read(mc13xxx, 46, &icid);
        if (ret)
                return ret;
@@ -502,26 +435,23 @@ static int mc13xxx_identify(struct mc13xxx *mc13xxx, enum mc13xxx_id *id)
 
        switch (icid) {
        case 2:
-               *id = MC13XXX_ID_MC13783;
-               name = "mc13783";
+               mc13xxx->ictype = MC13XXX_ID_MC13783;
                break;
        case 7:
-               *id = MC13XXX_ID_MC13892;
-               name = "mc13892";
+               mc13xxx->ictype = MC13XXX_ID_MC13892;
                break;
        default:
-               *id = MC13XXX_ID_INVALID;
+               mc13xxx->ictype = MC13XXX_ID_INVALID;
                break;
        }
 
-       if (*id == MC13XXX_ID_MC13783 || *id == MC13XXX_ID_MC13892) {
+       if (mc13xxx->ictype == MC13XXX_ID_MC13783 ||
+                       mc13xxx->ictype == MC13XXX_ID_MC13892) {
                ret = mc13xxx_reg_read(mc13xxx, MC13XXX_REVISION, &revision);
-               if (ret)
-                       return ret;
 
-               dev_info(&mc13xxx->spidev->dev, "%s: rev: %d.%d, "
+               dev_info(mc13xxx->dev, "%s: rev: %d.%d, "
                                "fin: %d, fab: %d, icid: %d/%d\n",
-                               mc13xxx_chipname[*id],
+                               mc13xxx_chipname[mc13xxx->ictype],
                                maskval(revision, MC13XXX_REVISION_REVFULL),
                                maskval(revision, MC13XXX_REVISION_REVMETAL),
                                maskval(revision, MC13XXX_REVISION_FIN),
@@ -530,26 +460,12 @@ static int mc13xxx_identify(struct mc13xxx *mc13xxx, enum mc13xxx_id *id)
                                maskval(revision, MC13XXX_REVISION_ICIDCODE));
        }
 
-       if (*id != MC13XXX_ID_INVALID) {
-               const struct spi_device_id *devid =
-                       spi_get_device_id(mc13xxx->spidev);
-               if (!devid || devid->driver_data != *id)
-                       dev_warn(&mc13xxx->spidev->dev, "device id doesn't "
-                                       "match auto detection!\n");
-       }
-
-       return 0;
+       return (mc13xxx->ictype == MC13XXX_ID_INVALID) ? -ENODEV : 0;
 }
 
 static const char *mc13xxx_get_chipname(struct mc13xxx *mc13xxx)
 {
-       const struct spi_device_id *devid =
-               spi_get_device_id(mc13xxx->spidev);
-
-       if (!devid)
-               return NULL;
-
-       return mc13xxx_chipname[devid->driver_data];
+       return mc13xxx_chipname[mc13xxx->ictype];
 }
 
 int mc13xxx_get_flags(struct mc13xxx *mc13xxx)
@@ -592,7 +508,7 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
        };
        init_completion(&adcdone_data.done);
 
-       dev_dbg(&mc13xxx->spidev->dev, "%s\n", __func__);
+       dev_dbg(mc13xxx->dev, "%s\n", __func__);
 
        mc13xxx_lock(mc13xxx);
 
@@ -637,7 +553,8 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
        adc1 |= ato << MC13783_ADC1_ATO_SHIFT;
        if (atox)
                adc1 |= MC13783_ADC1_ATOX;
-       dev_dbg(&mc13xxx->spidev->dev, "%s: request irq\n", __func__);
+
+       dev_dbg(mc13xxx->dev, "%s: request irq\n", __func__);
        mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_ADCDONE,
                        mc13xxx_handler_adcdone, __func__, &adcdone_data);
        mc13xxx_irq_ack(mc13xxx, MC13XXX_IRQ_ADCDONE);
@@ -695,7 +612,7 @@ static int mc13xxx_add_subdevice_pdata(struct mc13xxx *mc13xxx,
        if (!cell.name)
                return -ENOMEM;
 
-       return mfd_add_devices(&mc13xxx->spidev->dev, -1, &cell, 1, NULL, 0);
+       return mfd_add_devices(mc13xxx->dev, -1, &cell, 1, NULL, 0);
 }
 
 static int mc13xxx_add_subdevice(struct mc13xxx *mc13xxx, const char *format)
@@ -706,7 +623,7 @@ static int mc13xxx_add_subdevice(struct mc13xxx *mc13xxx, const char *format)
 #ifdef CONFIG_OF
 static int mc13xxx_probe_flags_dt(struct mc13xxx *mc13xxx)
 {
-       struct device_node *np = mc13xxx->spidev->dev.of_node;
+       struct device_node *np = mc13xxx->dev->of_node;
 
        if (!np)
                return -ENODEV;
@@ -732,55 +649,15 @@ static inline int mc13xxx_probe_flags_dt(struct mc13xxx *mc13xxx)
 }
 #endif
 
-static const struct spi_device_id mc13xxx_device_id[] = {
-       {
-               .name = "mc13783",
-               .driver_data = MC13XXX_ID_MC13783,
-       }, {
-               .name = "mc13892",
-               .driver_data = MC13XXX_ID_MC13892,
-       }, {
-               /* sentinel */
-       }
-};
-MODULE_DEVICE_TABLE(spi, mc13xxx_device_id);
-
-static const struct of_device_id mc13xxx_dt_ids[] = {
-       { .compatible = "fsl,mc13783", .data = (void *) MC13XXX_ID_MC13783, },
-       { .compatible = "fsl,mc13892", .data = (void *) MC13XXX_ID_MC13892, },
-       { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
-
-static int mc13xxx_probe(struct spi_device *spi)
+int mc13xxx_common_init(struct mc13xxx *mc13xxx,
+               struct mc13xxx_platform_data *pdata, int irq)
 {
-       const struct of_device_id *of_id;
-       struct spi_driver *sdrv = to_spi_driver(spi->dev.driver);
-       struct mc13xxx *mc13xxx;
-       struct mc13xxx_platform_data *pdata = dev_get_platdata(&spi->dev);
-       enum mc13xxx_id id;
        int ret;
 
-       of_id = of_match_device(mc13xxx_dt_ids, &spi->dev);
-       if (of_id)
-               sdrv->id_table = &mc13xxx_device_id[(enum mc13xxx_id) of_id->data];
-
-       mc13xxx = kzalloc(sizeof(*mc13xxx), GFP_KERNEL);
-       if (!mc13xxx)
-               return -ENOMEM;
-
-       dev_set_drvdata(&spi->dev, mc13xxx);
-       spi->mode = SPI_MODE_0 | SPI_CS_HIGH;
-       spi->bits_per_word = 32;
-       spi_setup(spi);
-
-       mc13xxx->spidev = spi;
-
-       mutex_init(&mc13xxx->lock);
        mc13xxx_lock(mc13xxx);
 
-       ret = mc13xxx_identify(mc13xxx, &id);
-       if (ret || id == MC13XXX_ID_INVALID)
+       ret = mc13xxx_identify(mc13xxx);
+       if (ret)
                goto err_revision;
 
        /* mask all irqs */
@@ -792,18 +669,19 @@ static int mc13xxx_probe(struct spi_device *spi)
        if (ret)
                goto err_mask;
 
-       ret = request_threaded_irq(spi->irq, NULL, mc13xxx_irq_thread,
+       ret = request_threaded_irq(irq, NULL, mc13xxx_irq_thread,
                        IRQF_ONESHOT | IRQF_TRIGGER_HIGH, "mc13xxx", mc13xxx);
 
        if (ret) {
 err_mask:
 err_revision:
                mc13xxx_unlock(mc13xxx);
-               dev_set_drvdata(&spi->dev, NULL);
                kfree(mc13xxx);
                return ret;
        }
 
+       mc13xxx->irq = irq;
+
        mc13xxx_unlock(mc13xxx);
 
        if (mc13xxx_probe_flags_dt(mc13xxx) < 0 && pdata)
@@ -838,42 +716,19 @@ err_revision:
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(mc13xxx_common_init);
 
-static int __devexit mc13xxx_remove(struct spi_device *spi)
+void mc13xxx_common_cleanup(struct mc13xxx *mc13xxx)
 {
-       struct mc13xxx *mc13xxx = dev_get_drvdata(&spi->dev);
+       free_irq(mc13xxx->irq, mc13xxx);
 
-       free_irq(mc13xxx->spidev->irq, mc13xxx);
+       mfd_remove_devices(mc13xxx->dev);
 
-       mfd_remove_devices(&spi->dev);
+       regmap_exit(mc13xxx->regmap);
 
        kfree(mc13xxx);
-
-       return 0;
-}
-
-static struct spi_driver mc13xxx_driver = {
-       .id_table = mc13xxx_device_id,
-       .driver = {
-               .name = "mc13xxx",
-               .owner = THIS_MODULE,
-               .of_match_table = mc13xxx_dt_ids,
-       },
-       .probe = mc13xxx_probe,
-       .remove = __devexit_p(mc13xxx_remove),
-};
-
-static int __init mc13xxx_init(void)
-{
-       return spi_register_driver(&mc13xxx_driver);
-}
-subsys_initcall(mc13xxx_init);
-
-static void __exit mc13xxx_exit(void)
-{
-       spi_unregister_driver(&mc13xxx_driver);
 }
-module_exit(mc13xxx_exit);
+EXPORT_SYMBOL_GPL(mc13xxx_common_cleanup);
 
 MODULE_DESCRIPTION("Core driver for Freescale MC13XXX PMIC");
 MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
diff --git a/drivers/mfd/mc13xxx-i2c.c b/drivers/mfd/mc13xxx-i2c.c
new file mode 100644 (file)
index 0000000..d22501d
--- /dev/null
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2009-2010 Creative Product Design
+ * Marc Reilly marc@cpdesign.com.au
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/mc13xxx.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+
+#include "mc13xxx.h"
+
+static const struct i2c_device_id mc13xxx_i2c_device_id[] = {
+       {
+               .name = "mc13892",
+               .driver_data = MC13XXX_ID_MC13892,
+       }, {
+               /* sentinel */
+       }
+};
+MODULE_DEVICE_TABLE(i2c, mc13xxx_i2c_device_id);
+
+static const struct of_device_id mc13xxx_dt_ids[] = {
+       {
+               .compatible = "fsl,mc13892",
+               .data = (void *) &mc13xxx_i2c_device_id[0],
+       }, {
+               /* sentinel */
+       }
+};
+MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
+
+static struct regmap_config mc13xxx_regmap_i2c_config = {
+       .reg_bits = 8,
+       .val_bits = 24,
+
+       .max_register = MC13XXX_NUMREGS,
+
+       .cache_type = REGCACHE_NONE,
+};
+
+static int mc13xxx_i2c_probe(struct i2c_client *client,
+               const struct i2c_device_id *id)
+{
+       const struct of_device_id *of_id;
+       struct i2c_driver *idrv = to_i2c_driver(client->dev.driver);
+       struct mc13xxx *mc13xxx;
+       struct mc13xxx_platform_data *pdata = dev_get_platdata(&client->dev);
+       int ret;
+
+       of_id = of_match_device(mc13xxx_dt_ids, &client->dev);
+       if (of_id)
+               idrv->id_table = (const struct i2c_device_id*) of_id->data;
+
+       mc13xxx = kzalloc(sizeof(*mc13xxx), GFP_KERNEL);
+       if (!mc13xxx)
+               return -ENOMEM;
+
+       dev_set_drvdata(&client->dev, mc13xxx);
+
+       mc13xxx->dev = &client->dev;
+       mutex_init(&mc13xxx->lock);
+
+       mc13xxx->regmap = regmap_init_i2c(client, &mc13xxx_regmap_i2c_config);
+       if (IS_ERR(mc13xxx->regmap)) {
+               ret = PTR_ERR(mc13xxx->regmap);
+               dev_err(mc13xxx->dev, "Failed to initialize register map: %d\n",
+                               ret);
+               dev_set_drvdata(&client->dev, NULL);
+               kfree(mc13xxx);
+               return ret;
+       }
+
+       ret = mc13xxx_common_init(mc13xxx, pdata, client->irq);
+
+       if (ret == 0 && (id->driver_data != mc13xxx->ictype))
+               dev_warn(mc13xxx->dev,
+                               "device id doesn't match auto detection!\n");
+
+       return ret;
+}
+
+static int __devexit mc13xxx_i2c_remove(struct i2c_client *client)
+{
+       struct mc13xxx *mc13xxx = dev_get_drvdata(&client->dev);
+
+       mc13xxx_common_cleanup(mc13xxx);
+
+       return 0;
+}
+
+static struct i2c_driver mc13xxx_i2c_driver = {
+       .id_table = mc13xxx_i2c_device_id,
+       .driver = {
+               .owner = THIS_MODULE,
+               .name = "mc13xxx",
+               .of_match_table = mc13xxx_dt_ids,
+       },
+       .probe = mc13xxx_i2c_probe,
+       .remove = __devexit_p(mc13xxx_i2c_remove),
+};
+
+static int __init mc13xxx_i2c_init(void)
+{
+       return i2c_add_driver(&mc13xxx_i2c_driver);
+}
+subsys_initcall(mc13xxx_i2c_init);
+
+static void __exit mc13xxx_i2c_exit(void)
+{
+       i2c_del_driver(&mc13xxx_i2c_driver);
+}
+module_exit(mc13xxx_i2c_exit);
+
+MODULE_DESCRIPTION("i2c driver for Freescale MC13XXX PMIC");
+MODULE_AUTHOR("Marc Reilly <marc@cpdesign.com.au");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c
new file mode 100644 (file)
index 0000000..3fcdab3
--- /dev/null
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2009-2010 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
+ *
+ * loosely based on an earlier driver that has
+ * Copyright 2009 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/mc13xxx.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/err.h>
+#include <linux/spi/spi.h>
+
+#include "mc13xxx.h"
+
+static const struct spi_device_id mc13xxx_device_id[] = {
+       {
+               .name = "mc13783",
+               .driver_data = MC13XXX_ID_MC13783,
+       }, {
+               .name = "mc13892",
+               .driver_data = MC13XXX_ID_MC13892,
+       }, {
+               /* sentinel */
+       }
+};
+MODULE_DEVICE_TABLE(spi, mc13xxx_device_id);
+
+static const struct of_device_id mc13xxx_dt_ids[] = {
+       { .compatible = "fsl,mc13783", .data = (void *) MC13XXX_ID_MC13783, },
+       { .compatible = "fsl,mc13892", .data = (void *) MC13XXX_ID_MC13892, },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
+
+static struct regmap_config mc13xxx_regmap_spi_config = {
+       .reg_bits = 7,
+       .pad_bits = 1,
+       .val_bits = 24,
+
+       .max_register = MC13XXX_NUMREGS,
+
+       .cache_type = REGCACHE_NONE,
+};
+
+static int mc13xxx_spi_probe(struct spi_device *spi)
+{
+       const struct of_device_id *of_id;
+       struct spi_driver *sdrv = to_spi_driver(spi->dev.driver);
+       struct mc13xxx *mc13xxx;
+       struct mc13xxx_platform_data *pdata = dev_get_platdata(&spi->dev);
+       int ret;
+
+       of_id = of_match_device(mc13xxx_dt_ids, &spi->dev);
+       if (of_id)
+               sdrv->id_table = &mc13xxx_device_id[(enum mc13xxx_id) of_id->data];
+
+       mc13xxx = kzalloc(sizeof(*mc13xxx), GFP_KERNEL);
+       if (!mc13xxx)
+               return -ENOMEM;
+
+       dev_set_drvdata(&spi->dev, mc13xxx);
+       spi->mode = SPI_MODE_0 | SPI_CS_HIGH;
+       spi->bits_per_word = 32;
+
+       mc13xxx->dev = &spi->dev;
+       mutex_init(&mc13xxx->lock);
+
+       mc13xxx->regmap = regmap_init_spi(spi, &mc13xxx_regmap_spi_config);
+       if (IS_ERR(mc13xxx->regmap)) {
+               ret = PTR_ERR(mc13xxx->regmap);
+               dev_err(mc13xxx->dev, "Failed to initialize register map: %d\n",
+                               ret);
+               dev_set_drvdata(&spi->dev, NULL);
+               kfree(mc13xxx);
+               return ret;
+       }
+
+       ret = mc13xxx_common_init(mc13xxx, pdata, spi->irq);
+
+       if (ret) {
+               dev_set_drvdata(&spi->dev, NULL);
+       } else {
+               const struct spi_device_id *devid =
+                       spi_get_device_id(spi);
+               if (!devid || devid->driver_data != mc13xxx->ictype)
+                       dev_warn(mc13xxx->dev,
+                               "device id doesn't match auto detection!\n");
+       }
+
+       return ret;
+}
+
+static int __devexit mc13xxx_spi_remove(struct spi_device *spi)
+{
+       struct mc13xxx *mc13xxx = dev_get_drvdata(&spi->dev);
+
+       mc13xxx_common_cleanup(mc13xxx);
+
+       return 0;
+}
+
+static struct spi_driver mc13xxx_spi_driver = {
+       .id_table = mc13xxx_device_id,
+       .driver = {
+               .name = "mc13xxx",
+               .owner = THIS_MODULE,
+               .of_match_table = mc13xxx_dt_ids,
+       },
+       .probe = mc13xxx_spi_probe,
+       .remove = __devexit_p(mc13xxx_spi_remove),
+};
+
+static int __init mc13xxx_init(void)
+{
+       return spi_register_driver(&mc13xxx_spi_driver);
+}
+subsys_initcall(mc13xxx_init);
+
+static void __exit mc13xxx_exit(void)
+{
+       spi_unregister_driver(&mc13xxx_spi_driver);
+}
+module_exit(mc13xxx_exit);
+
+MODULE_DESCRIPTION("Core driver for Freescale MC13XXX PMIC");
+MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/mc13xxx.h b/drivers/mfd/mc13xxx.h
new file mode 100644 (file)
index 0000000..bbba06f
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2012 Creative Product Design
+ * Marc Reilly <marc@cpdesign.com.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+#ifndef __DRIVERS_MFD_MC13XXX_H
+#define __DRIVERS_MFD_MC13XXX_H
+
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/mfd/mc13xxx.h>
+
+enum mc13xxx_id {
+       MC13XXX_ID_MC13783,
+       MC13XXX_ID_MC13892,
+       MC13XXX_ID_INVALID,
+};
+
+#define MC13XXX_NUMREGS 0x3f
+
+struct mc13xxx {
+       struct regmap *regmap;
+
+       struct device *dev;
+       enum mc13xxx_id ictype;
+
+       struct mutex lock;
+       int irq;
+       int flags;
+
+       irq_handler_t irqhandler[MC13XXX_NUM_IRQ];
+       void *irqdata[MC13XXX_NUM_IRQ];
+
+       int adcflags;
+};
+
+int mc13xxx_common_init(struct mc13xxx *mc13xxx,
+               struct mc13xxx_platform_data *pdata, int irq);
+
+void mc13xxx_common_cleanup(struct mc13xxx *mc13xxx);
+
+#endif /* __DRIVERS_MFD_MC13XXX_H */
index 189c2f07b83f4331af92528886864e0f5c5fa1d7..29c122bf28ea723e36cafccda86eb09319d595ac 100644 (file)
@@ -204,7 +204,7 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
                return -ENOENT;
        }
 
-       pcf = kzalloc(sizeof(*pcf), GFP_KERNEL);
+       pcf = devm_kzalloc(&client->dev, sizeof(*pcf), GFP_KERNEL);
        if (!pcf)
                return -ENOMEM;
 
@@ -212,12 +212,11 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
 
        mutex_init(&pcf->lock);
 
-       pcf->regmap = regmap_init_i2c(client, &pcf50633_regmap_config);
+       pcf->regmap = devm_regmap_init_i2c(client, &pcf50633_regmap_config);
        if (IS_ERR(pcf->regmap)) {
                ret = PTR_ERR(pcf->regmap);
-               dev_err(pcf->dev, "Failed to allocate register map: %d\n",
-                       ret);
-               goto err_free;
+               dev_err(pcf->dev, "Failed to allocate register map: %d\n", ret);
+               return ret;
        }
 
        i2c_set_clientdata(client, pcf);
@@ -228,7 +227,7 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
        if (version < 0 || variant < 0) {
                dev_err(pcf->dev, "Unable to probe pcf50633\n");
                ret = -ENODEV;
-               goto err_regmap;
+               return ret;
        }
 
        dev_info(pcf->dev, "Probed device version %d variant %d\n",
@@ -237,16 +236,11 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
        pcf50633_irq_init(pcf, client->irq);
 
        /* Create sub devices */
-       pcf50633_client_dev_register(pcf, "pcf50633-input",
-                                               &pcf->input_pdev);
-       pcf50633_client_dev_register(pcf, "pcf50633-rtc",
-                                               &pcf->rtc_pdev);
-       pcf50633_client_dev_register(pcf, "pcf50633-mbc",
-                                               &pcf->mbc_pdev);
-       pcf50633_client_dev_register(pcf, "pcf50633-adc",
-                                               &pcf->adc_pdev);
-       pcf50633_client_dev_register(pcf, "pcf50633-backlight",
-                                               &pcf->bl_pdev);
+       pcf50633_client_dev_register(pcf, "pcf50633-input", &pcf->input_pdev);
+       pcf50633_client_dev_register(pcf, "pcf50633-rtc", &pcf->rtc_pdev);
+       pcf50633_client_dev_register(pcf, "pcf50633-mbc", &pcf->mbc_pdev);
+       pcf50633_client_dev_register(pcf, "pcf50633-adc", &pcf->adc_pdev);
+       pcf50633_client_dev_register(pcf, "pcf50633-backlight", &pcf->bl_pdev);
 
 
        for (i = 0; i < PCF50633_NUM_REGULATORS; i++) {
@@ -274,13 +268,6 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
                pdata->probe_done(pcf);
 
        return 0;
-
-err_regmap:
-       regmap_exit(pcf->regmap);
-err_free:
-       kfree(pcf);
-
-       return ret;
 }
 
 static int __devexit pcf50633_remove(struct i2c_client *client)
@@ -300,9 +287,6 @@ static int __devexit pcf50633_remove(struct i2c_client *client)
        for (i = 0; i < PCF50633_NUM_REGULATORS; i++)
                platform_device_unregister(pcf->regulator_pdev[i]);
 
-       regmap_exit(pcf->regmap);
-       kfree(pcf);
-
        return 0;
 }
 
index 44afae0a69ce75a5fada79d48acae246db1dfdb0..cdc1df7fa0e94d10a26059c18dd347c045dcf2cd 100644 (file)
@@ -75,6 +75,7 @@ static struct deepsleep_control_data deepsleep_data[] = {
        (RC5T583_EXT_PWRREQ1_CONTROL | RC5T583_EXT_PWRREQ2_CONTROL)
 
 static struct mfd_cell rc5t583_subdevs[] = {
+       {.name = "rc5t583-gpio",},
        {.name = "rc5t583-regulator",},
        {.name = "rc5t583-rtc",      },
        {.name = "rc5t583-key",      }
@@ -267,7 +268,7 @@ static int __devinit rc5t583_i2c_probe(struct i2c_client *i2c,
        rc5t583->dev = &i2c->dev;
        i2c_set_clientdata(i2c, rc5t583);
 
-       rc5t583->regmap = regmap_init_i2c(i2c, &rc5t583_regmap_config);
+       rc5t583->regmap = devm_regmap_init_i2c(i2c, &rc5t583_regmap_config);
        if (IS_ERR(rc5t583->regmap)) {
                ret = PTR_ERR(rc5t583->regmap);
                dev_err(&i2c->dev, "regmap initialization failed: %d\n", ret);
@@ -276,7 +277,7 @@ static int __devinit rc5t583_i2c_probe(struct i2c_client *i2c,
 
        ret = rc5t583_clear_ext_power_req(rc5t583, pdata);
        if (ret < 0)
-               goto err_irq_init;
+               return ret;
 
        if (i2c->irq) {
                ret = rc5t583_irq_init(rc5t583, i2c->irq, pdata->irq_base);
@@ -299,8 +300,6 @@ static int __devinit rc5t583_i2c_probe(struct i2c_client *i2c,
 err_add_devs:
        if (irq_init_success)
                rc5t583_irq_exit(rc5t583);
-err_irq_init:
-       regmap_exit(rc5t583->regmap);
        return ret;
 }
 
@@ -310,7 +309,6 @@ static int  __devexit rc5t583_i2c_remove(struct i2c_client *i2c)
 
        mfd_remove_devices(rc5t583->dev);
        rc5t583_irq_exit(rc5t583);
-       regmap_exit(rc5t583->regmap);
        return 0;
 }
 
index 809bd4a610895c75baf469ceb1d3bec417aa64c8..685d61e431adfa4f8b733a13bd5c93cfb71c9a44 100644 (file)
@@ -108,18 +108,7 @@ static struct pci_driver rdc321x_sb_driver = {
        .remove         = __devexit_p(rdc321x_sb_remove),
 };
 
-static int __init rdc321x_sb_init(void)
-{
-       return pci_register_driver(&rdc321x_sb_driver);
-}
-
-static void __exit rdc321x_sb_exit(void)
-{
-       pci_unregister_driver(&rdc321x_sb_driver);
-}
-
-module_init(rdc321x_sb_init);
-module_exit(rdc321x_sb_exit);
+module_pci_driver(rdc321x_sb_driver);
 
 MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
 MODULE_LICENSE("GPL");
index 48949d998d105f62172dae5697d3e4e3dd99819a..dd170307e60e7a2642c825df88f28fdd7d568d51 100644 (file)
@@ -114,12 +114,12 @@ static int s5m87xx_i2c_probe(struct i2c_client *i2c,
                s5m87xx->wakeup = pdata->wakeup;
        }
 
-       s5m87xx->regmap = regmap_init_i2c(i2c, &s5m_regmap_config);
+       s5m87xx->regmap = devm_regmap_init_i2c(i2c, &s5m_regmap_config);
        if (IS_ERR(s5m87xx->regmap)) {
                ret = PTR_ERR(s5m87xx->regmap);
                dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
                        ret);
-               goto err;
+               return ret;
        }
 
        s5m87xx->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
@@ -159,7 +159,6 @@ err:
        mfd_remove_devices(s5m87xx->dev);
        s5m_irq_exit(s5m87xx);
        i2c_unregister_device(s5m87xx->rtc);
-       regmap_exit(s5m87xx->regmap);
        return ret;
 }
 
@@ -170,7 +169,6 @@ static int s5m87xx_i2c_remove(struct i2c_client *i2c)
        mfd_remove_devices(s5m87xx->dev);
        s5m_irq_exit(s5m87xx);
        i2c_unregister_device(s5m87xx->rtc);
-       regmap_exit(s5m87xx->regmap);
        return 0;
 }
 
diff --git a/drivers/mfd/sta2x11-mfd.c b/drivers/mfd/sta2x11-mfd.c
new file mode 100644 (file)
index 0000000..d31fed0
--- /dev/null
@@ -0,0 +1,467 @@
+/*
+ * Copyright (c) 2009-2011 Wind River Systems, Inc.
+ * Copyright (c) 2011 ST Microelectronics (Alessandro Rubini)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/sta2x11-mfd.h>
+
+#include <asm/sta2x11.h>
+
+/* This describes STA2X11 MFD chip for us, we may have several */
+struct sta2x11_mfd {
+       struct sta2x11_instance *instance;
+       spinlock_t lock;
+       struct list_head list;
+       void __iomem *sctl_regs;
+       void __iomem *apbreg_regs;
+};
+
+static LIST_HEAD(sta2x11_mfd_list);
+
+/* Three functions to act on the list */
+static struct sta2x11_mfd *sta2x11_mfd_find(struct pci_dev *pdev)
+{
+       struct sta2x11_instance *instance;
+       struct sta2x11_mfd *mfd;
+
+       if (!pdev && !list_empty(&sta2x11_mfd_list)) {
+               pr_warning("%s: Unspecified device, "
+                           "using first instance\n", __func__);
+               return list_entry(sta2x11_mfd_list.next,
+                                 struct sta2x11_mfd, list);
+       }
+
+       instance = sta2x11_get_instance(pdev);
+       if (!instance)
+               return NULL;
+       list_for_each_entry(mfd, &sta2x11_mfd_list, list) {
+               if (mfd->instance == instance)
+                       return mfd;
+       }
+       return NULL;
+}
+
+static int __devinit sta2x11_mfd_add(struct pci_dev *pdev, gfp_t flags)
+{
+       struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
+       struct sta2x11_instance *instance;
+
+       if (mfd)
+               return -EBUSY;
+       instance = sta2x11_get_instance(pdev);
+       if (!instance)
+               return -EINVAL;
+       mfd = kzalloc(sizeof(*mfd), flags);
+       if (!mfd)
+               return -ENOMEM;
+       INIT_LIST_HEAD(&mfd->list);
+       spin_lock_init(&mfd->lock);
+       mfd->instance = instance;
+       list_add(&mfd->list, &sta2x11_mfd_list);
+       return 0;
+}
+
+static int __devexit mfd_remove(struct pci_dev *pdev)
+{
+       struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
+
+       if (!mfd)
+               return -ENODEV;
+       list_del(&mfd->list);
+       kfree(mfd);
+       return 0;
+}
+
+/* These two functions are exported and are not expected to fail */
+u32 sta2x11_sctl_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
+{
+       struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
+       u32 r;
+       unsigned long flags;
+
+       if (!mfd) {
+               dev_warn(&pdev->dev, ": can't access sctl regs\n");
+               return 0;
+       }
+       if (!mfd->sctl_regs) {
+               dev_warn(&pdev->dev, ": system ctl not initialized\n");
+               return 0;
+       }
+       spin_lock_irqsave(&mfd->lock, flags);
+       r = readl(mfd->sctl_regs + reg);
+       r &= ~mask;
+       r |= val;
+       if (mask)
+               writel(r, mfd->sctl_regs + reg);
+       spin_unlock_irqrestore(&mfd->lock, flags);
+       return r;
+}
+EXPORT_SYMBOL(sta2x11_sctl_mask);
+
+u32 sta2x11_apbreg_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
+{
+       struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
+       u32 r;
+       unsigned long flags;
+
+       if (!mfd) {
+               dev_warn(&pdev->dev, ": can't access apb regs\n");
+               return 0;
+       }
+       if (!mfd->apbreg_regs) {
+               dev_warn(&pdev->dev, ": apb bridge not initialized\n");
+               return 0;
+       }
+       spin_lock_irqsave(&mfd->lock, flags);
+       r = readl(mfd->apbreg_regs + reg);
+       r &= ~mask;
+       r |= val;
+       if (mask)
+               writel(r, mfd->apbreg_regs + reg);
+       spin_unlock_irqrestore(&mfd->lock, flags);
+       return r;
+}
+EXPORT_SYMBOL(sta2x11_apbreg_mask);
+
+/* Two debugfs files, for our registers (FIXME: one instance only) */
+#define REG(regname) {.name = #regname, .offset = SCTL_ ## regname}
+static struct debugfs_reg32 sta2x11_sctl_regs[] = {
+       REG(SCCTL), REG(ARMCFG), REG(SCPLLCTL), REG(SCPLLFCTRL),
+       REG(SCRESFRACT), REG(SCRESCTRL1), REG(SCRESXTRL2), REG(SCPEREN0),
+       REG(SCPEREN1), REG(SCPEREN2), REG(SCGRST), REG(SCPCIPMCR1),
+       REG(SCPCIPMCR2), REG(SCPCIPMSR1), REG(SCPCIPMSR2), REG(SCPCIPMSR3),
+       REG(SCINTREN), REG(SCRISR), REG(SCCLKSTAT0), REG(SCCLKSTAT1),
+       REG(SCCLKSTAT2), REG(SCRSTSTA),
+};
+#undef REG
+
+static struct debugfs_regset32 sctl_regset = {
+       .regs = sta2x11_sctl_regs,
+       .nregs = ARRAY_SIZE(sta2x11_sctl_regs),
+};
+
+#define REG(regname) {.name = #regname, .offset = regname}
+static struct debugfs_reg32 sta2x11_apbreg_regs[] = {
+       REG(APBREG_BSR), REG(APBREG_PAER), REG(APBREG_PWAC), REG(APBREG_PRAC),
+       REG(APBREG_PCG), REG(APBREG_PUR), REG(APBREG_EMU_PCG),
+};
+#undef REG
+
+static struct debugfs_regset32 apbreg_regset = {
+       .regs = sta2x11_apbreg_regs,
+       .nregs = ARRAY_SIZE(sta2x11_apbreg_regs),
+};
+
+static struct dentry *sta2x11_sctl_debugfs;
+static struct dentry *sta2x11_apbreg_debugfs;
+
+/* Probe for the two platform devices */
+static int sta2x11_sctl_probe(struct platform_device *dev)
+{
+       struct pci_dev **pdev;
+       struct sta2x11_mfd *mfd;
+       struct resource *res;
+
+       pdev = dev->dev.platform_data;
+       mfd = sta2x11_mfd_find(*pdev);
+       if (!mfd)
+               return -ENODEV;
+
+       res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+       if (!res)
+               return -ENOMEM;
+
+       if (!request_mem_region(res->start, resource_size(res),
+                               "sta2x11-sctl"))
+               return -EBUSY;
+
+       mfd->sctl_regs = ioremap(res->start, resource_size(res));
+       if (!mfd->sctl_regs) {
+               release_mem_region(res->start, resource_size(res));
+               return -ENOMEM;
+       }
+       sctl_regset.base = mfd->sctl_regs;
+       sta2x11_sctl_debugfs = debugfs_create_regset32("sta2x11-sctl",
+                                                 S_IFREG | S_IRUGO,
+                                                 NULL, &sctl_regset);
+       return 0;
+}
+
+static int sta2x11_apbreg_probe(struct platform_device *dev)
+{
+       struct pci_dev **pdev;
+       struct sta2x11_mfd *mfd;
+       struct resource *res;
+
+       pdev = dev->dev.platform_data;
+       dev_dbg(&dev->dev, "%s: pdata is %p\n", __func__, pdev);
+       dev_dbg(&dev->dev, "%s: *pdata is %p\n", __func__, *pdev);
+
+       mfd = sta2x11_mfd_find(*pdev);
+       if (!mfd)
+               return -ENODEV;
+
+       res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+       if (!res)
+               return -ENOMEM;
+
+       if (!request_mem_region(res->start, resource_size(res),
+                               "sta2x11-apbreg"))
+               return -EBUSY;
+
+       mfd->apbreg_regs = ioremap(res->start, resource_size(res));
+       if (!mfd->apbreg_regs) {
+               release_mem_region(res->start, resource_size(res));
+               return -ENOMEM;
+       }
+       dev_dbg(&dev->dev, "%s: regbase %p\n", __func__, mfd->apbreg_regs);
+
+       apbreg_regset.base = mfd->apbreg_regs;
+       sta2x11_apbreg_debugfs = debugfs_create_regset32("sta2x11-apbreg",
+                                                 S_IFREG | S_IRUGO,
+                                                 NULL, &apbreg_regset);
+       return 0;
+}
+
+/* The two platform drivers */
+static struct platform_driver sta2x11_sctl_platform_driver = {
+       .driver = {
+               .name   = "sta2x11-sctl",
+               .owner  = THIS_MODULE,
+       },
+       .probe          = sta2x11_sctl_probe,
+};
+
+static int __init sta2x11_sctl_init(void)
+{
+       pr_info("%s\n", __func__);
+       return platform_driver_register(&sta2x11_sctl_platform_driver);
+}
+
+static struct platform_driver sta2x11_platform_driver = {
+       .driver = {
+               .name   = "sta2x11-apbreg",
+               .owner  = THIS_MODULE,
+       },
+       .probe          = sta2x11_apbreg_probe,
+};
+
+static int __init sta2x11_apbreg_init(void)
+{
+       pr_info("%s\n", __func__);
+       return platform_driver_register(&sta2x11_platform_driver);
+}
+
+/*
+ * What follows is the PCI device that hosts the above two pdevs.
+ * Each logic block is 4kB and they are all consecutive: we use this info.
+ */
+
+/* Bar 0 */
+enum bar0_cells {
+       STA2X11_GPIO_0 = 0,
+       STA2X11_GPIO_1,
+       STA2X11_GPIO_2,
+       STA2X11_GPIO_3,
+       STA2X11_SCTL,
+       STA2X11_SCR,
+       STA2X11_TIME,
+};
+/* Bar 1 */
+enum bar1_cells {
+       STA2X11_APBREG = 0,
+};
+#define CELL_4K(_name, _cell) { \
+               .name = _name, \
+               .start = _cell * 4096, .end = _cell * 4096 + 4095, \
+               .flags = IORESOURCE_MEM, \
+               }
+
+static const __devinitconst struct resource gpio_resources[] = {
+       {
+               .name = "sta2x11_gpio", /* 4 consecutive cells, 1 driver */
+               .start = 0,
+               .end = (4 * 4096) - 1,
+               .flags = IORESOURCE_MEM,
+       }
+};
+static const __devinitconst struct resource sctl_resources[] = {
+       CELL_4K("sta2x11-sctl", STA2X11_SCTL),
+};
+static const __devinitconst struct resource scr_resources[] = {
+       CELL_4K("sta2x11-scr", STA2X11_SCR),
+};
+static const __devinitconst struct resource time_resources[] = {
+       CELL_4K("sta2x11-time", STA2X11_TIME),
+};
+
+static const __devinitconst struct resource apbreg_resources[] = {
+       CELL_4K("sta2x11-apbreg", STA2X11_APBREG),
+};
+
+#define DEV(_name, _r) \
+       { .name = _name, .num_resources = ARRAY_SIZE(_r), .resources = _r, }
+
+static __devinitdata struct mfd_cell sta2x11_mfd_bar0[] = {
+       DEV("sta2x11-gpio", gpio_resources), /* offset 0: we add pdata later */
+       DEV("sta2x11-sctl", sctl_resources),
+       DEV("sta2x11-scr", scr_resources),
+       DEV("sta2x11-time", time_resources),
+};
+
+static __devinitdata struct mfd_cell sta2x11_mfd_bar1[] = {
+       DEV("sta2x11-apbreg", apbreg_resources),
+};
+
+static int sta2x11_mfd_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       pci_save_state(pdev);
+       pci_disable_device(pdev);
+       pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+       return 0;
+}
+
+static int sta2x11_mfd_resume(struct pci_dev *pdev)
+{
+       int err;
+
+       pci_set_power_state(pdev, 0);
+       err = pci_enable_device(pdev);
+       if (err)
+               return err;
+       pci_restore_state(pdev);
+
+       return 0;
+}
+
+static int __devinit sta2x11_mfd_probe(struct pci_dev *pdev,
+                                      const struct pci_device_id *pci_id)
+{
+       int err, i;
+       struct sta2x11_gpio_pdata *gpio_data;
+
+       dev_info(&pdev->dev, "%s\n", __func__);
+
+       err = pci_enable_device(pdev);
+       if (err) {
+               dev_err(&pdev->dev, "Can't enable device.\n");
+               return err;
+       }
+
+       err = pci_enable_msi(pdev);
+       if (err)
+               dev_info(&pdev->dev, "Enable msi failed\n");
+
+       /* Read gpio config data as pci device's platform data */
+       gpio_data = dev_get_platdata(&pdev->dev);
+       if (!gpio_data)
+               dev_warn(&pdev->dev, "no gpio configuration\n");
+
+       dev_dbg(&pdev->dev, "%s, gpio_data = %p (%p)\n", __func__,
+               gpio_data, &gpio_data);
+       dev_dbg(&pdev->dev, "%s, pdev = %p (%p)\n", __func__,
+               pdev, &pdev);
+
+       /* platform data is the pci device for all of them */
+       for (i = 0; i < ARRAY_SIZE(sta2x11_mfd_bar0); i++) {
+               sta2x11_mfd_bar0[i].pdata_size = sizeof(pdev);
+               sta2x11_mfd_bar0[i].platform_data = &pdev;
+       }
+       sta2x11_mfd_bar1[0].pdata_size = sizeof(pdev);
+       sta2x11_mfd_bar1[0].platform_data = &pdev;
+
+       /* Record this pdev before mfd_add_devices: their probe looks for it */
+       sta2x11_mfd_add(pdev, GFP_ATOMIC);
+
+
+       err = mfd_add_devices(&pdev->dev, -1,
+                             sta2x11_mfd_bar0,
+                             ARRAY_SIZE(sta2x11_mfd_bar0),
+                             &pdev->resource[0],
+                             0);
+       if (err) {
+               dev_err(&pdev->dev, "mfd_add_devices[0] failed: %d\n", err);
+               goto err_disable;
+       }
+
+       err = mfd_add_devices(&pdev->dev, -1,
+                             sta2x11_mfd_bar1,
+                             ARRAY_SIZE(sta2x11_mfd_bar1),
+                             &pdev->resource[1],
+                             0);
+       if (err) {
+               dev_err(&pdev->dev, "mfd_add_devices[1] failed: %d\n", err);
+               goto err_disable;
+       }
+
+       return 0;
+
+err_disable:
+       mfd_remove_devices(&pdev->dev);
+       pci_disable_device(pdev);
+       pci_disable_msi(pdev);
+       return err;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(sta2x11_mfd_tbl) = {
+       {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_GPIO)},
+       {0,},
+};
+
+static struct pci_driver sta2x11_mfd_driver = {
+       .name =         "sta2x11-mfd",
+       .id_table =     sta2x11_mfd_tbl,
+       .probe =        sta2x11_mfd_probe,
+       .suspend =      sta2x11_mfd_suspend,
+       .resume =       sta2x11_mfd_resume,
+};
+
+static int __init sta2x11_mfd_init(void)
+{
+       pr_info("%s\n", __func__);
+       return pci_register_driver(&sta2x11_mfd_driver);
+}
+
+/*
+ * All of this must be ready before "normal" devices like MMCI appear.
+ * But MFD (the pci device) can't be too early. The following choice
+ * prepares platform drivers very early and probe the PCI device later,
+ * but before other PCI devices.
+ */
+subsys_initcall(sta2x11_apbreg_init);
+subsys_initcall(sta2x11_sctl_init);
+rootfs_initcall(sta2x11_mfd_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Wind River");
+MODULE_DESCRIPTION("STA2x11 mfd for GPIO, SCTL and APBREG");
+MODULE_DEVICE_TABLE(pci, sta2x11_mfd_tbl);
index b58c43c7ea93bf8f913f62a0774eb109175ebc45..afd459013ecbb1e89485cf37f643f6d7d3c91a20 100644 (file)
@@ -122,7 +122,6 @@ MODULE_DEVICE_TABLE(spi, stmpe_id);
 static struct spi_driver stmpe_spi_driver = {
        .driver = {
                .name   = "stmpe-spi",
-               .bus    = &spi_bus_type,
                .owner  = THIS_MODULE,
 #ifdef CONFIG_PM
                .pm     = &stmpe_dev_pm_ops,
index 47f802bf184818c81d9d0c9baeba9225ec63a603..396b9d1b6bd68bc80ee0120693949461688b8b9f 100644 (file)
@@ -283,27 +283,24 @@ static int __devinit tps65090_i2c_probe(struct i2c_client *client,
                }
        }
 
-       tps65090->rmap = regmap_init_i2c(tps65090->client,
-               &tps65090_regmap_config);
+       tps65090->rmap = devm_regmap_init_i2c(tps65090->client,
+                                             &tps65090_regmap_config);
        if (IS_ERR(tps65090->rmap)) {
-               dev_err(&client->dev, "regmap_init failed with err: %ld\n",
-                       PTR_ERR(tps65090->rmap));
+               ret = PTR_ERR(tps65090->rmap);
+               dev_err(&client->dev, "regmap_init failed with err: %d\n", ret);
                goto err_irq_exit;
-       };
+       }
 
        ret = mfd_add_devices(tps65090->dev, -1, tps65090s,
                ARRAY_SIZE(tps65090s), NULL, 0);
        if (ret) {
                dev_err(&client->dev, "add mfd devices failed with err: %d\n",
                        ret);
-               goto err_regmap_exit;
+               goto err_irq_exit;
        }
 
        return 0;
 
-err_regmap_exit:
-       regmap_exit(tps65090->rmap);
-
 err_irq_exit:
        if (client->irq)
                free_irq(client->irq, tps65090);
@@ -316,29 +313,34 @@ static int __devexit tps65090_i2c_remove(struct i2c_client *client)
        struct tps65090 *tps65090 = i2c_get_clientdata(client);
 
        mfd_remove_devices(tps65090->dev);
-       regmap_exit(tps65090->rmap);
        if (client->irq)
                free_irq(client->irq, tps65090);
 
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int tps65090_i2c_suspend(struct i2c_client *client, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int tps65090_suspend(struct device *dev)
 {
+       struct i2c_client *client = to_i2c_client(dev);
        if (client->irq)
                disable_irq(client->irq);
        return 0;
 }
 
-static int tps65090_i2c_resume(struct i2c_client *client)
+static int tps65090_resume(struct device *dev)
 {
+       struct i2c_client *client = to_i2c_client(dev);
        if (client->irq)
                enable_irq(client->irq);
        return 0;
 }
 #endif
 
+static const struct dev_pm_ops tps65090_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(tps65090_suspend, tps65090_resume)
+};
+
 static const struct i2c_device_id tps65090_id_table[] = {
        { "tps65090", 0 },
        { },
@@ -349,13 +351,10 @@ static struct i2c_driver tps65090_driver = {
        .driver = {
                .name   = "tps65090",
                .owner  = THIS_MODULE,
+               .pm     = &tps65090_pm_ops,
        },
        .probe          = tps65090_i2c_probe,
        .remove         = __devexit_p(tps65090_i2c_remove),
-#ifdef CONFIG_PM
-       .suspend        = tps65090_i2c_suspend,
-       .resume         = tps65090_i2c_resume,
-#endif
        .id_table       = tps65090_id_table,
 };
 
index f7d854e4cc62dd1df9728d3b0a3a6366aa12dc75..db194e433c085358b8d4716f062af3700bba1956 100644 (file)
@@ -96,7 +96,7 @@ EXPORT_SYMBOL_GPL(tps65217_reg_write);
  * @val: Value to write.
  * @level: Password protected level
  */
-int tps65217_update_bits(struct tps65217 *tps, unsigned int reg,
+static int tps65217_update_bits(struct tps65217 *tps, unsigned int reg,
                unsigned int mask, unsigned int val, unsigned int level)
 {
        int ret;
@@ -150,7 +150,7 @@ static int __devinit tps65217_probe(struct i2c_client *client,
                return -ENOMEM;
 
        tps->pdata = pdata;
-       tps->regmap = regmap_init_i2c(client, &tps65217_regmap_config);
+       tps->regmap = devm_regmap_init_i2c(client, &tps65217_regmap_config);
        if (IS_ERR(tps->regmap)) {
                ret = PTR_ERR(tps->regmap);
                dev_err(tps->dev, "Failed to allocate register map: %d\n",
@@ -163,9 +163,9 @@ static int __devinit tps65217_probe(struct i2c_client *client,
 
        ret = tps65217_reg_read(tps, TPS65217_REG_CHIPID, &version);
        if (ret < 0) {
-               dev_err(tps->dev, "Failed to read revision"
-                                       " register: %d\n", ret);
-               goto err_regmap;
+               dev_err(tps->dev, "Failed to read revision register: %d\n",
+                       ret);
+               return ret;
        }
 
        dev_info(tps->dev, "TPS65217 ID %#x version 1.%d\n",
@@ -190,11 +190,6 @@ static int __devinit tps65217_probe(struct i2c_client *client,
        }
 
        return 0;
-
-err_regmap:
-       regmap_exit(tps->regmap);
-
-       return ret;
 }
 
 static int __devexit tps65217_remove(struct i2c_client *client)
@@ -205,8 +200,6 @@ static int __devexit tps65217_remove(struct i2c_client *client)
        for (i = 0; i < TPS65217_NUM_REGULATOR; i++)
                platform_device_unregister(tps->regulator_pdev[i]);
 
-       regmap_exit(tps->regmap);
-
        return 0;
 }
 
index c9ed5c00a6211bb2349858ef76161eb880e45396..09aab3e4776dac883e67e4dcb71bffcc4eddb8ec 100644 (file)
 #include <linux/device.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
+#include <linux/irqdomain.h>
 #include <linux/gpio.h>
 #include <linux/mfd/tps65910.h>
 
-static inline int irq_to_tps65910_irq(struct tps65910 *tps65910,
-                                                       int irq)
-{
-       return (irq - tps65910->irq_base);
-}
-
 /*
  * This is a threaded IRQ handler so can access I2C/SPI.  Since all
  * interrupts are clear on read the IRQ line will be reasserted and
@@ -41,28 +36,28 @@ static inline int irq_to_tps65910_irq(struct tps65910 *tps65910,
 static irqreturn_t tps65910_irq(int irq, void *irq_data)
 {
        struct tps65910 *tps65910 = irq_data;
+       unsigned int reg;
        u32 irq_sts;
        u32 irq_mask;
-       u8 reg;
        int i;
 
-       tps65910->read(tps65910, TPS65910_INT_STS, 1, &reg);
+       tps65910_reg_read(tps65910, TPS65910_INT_STS, &reg);
        irq_sts = reg;
-       tps65910->read(tps65910, TPS65910_INT_STS2, 1, &reg);
+       tps65910_reg_read(tps65910, TPS65910_INT_STS2, &reg);
        irq_sts |= reg << 8;
        switch (tps65910_chip_id(tps65910)) {
        case TPS65911:
-               tps65910->read(tps65910, TPS65910_INT_STS3, 1, &reg);
+               tps65910_reg_read(tps65910, TPS65910_INT_STS3, &reg);
                irq_sts |= reg << 16;
        }
 
-       tps65910->read(tps65910, TPS65910_INT_MSK, 1, &reg);
+       tps65910_reg_read(tps65910, TPS65910_INT_MSK, &reg);
        irq_mask = reg;
-       tps65910->read(tps65910, TPS65910_INT_MSK2, 1, &reg);
+       tps65910_reg_read(tps65910, TPS65910_INT_MSK2, &reg);
        irq_mask |= reg << 8;
        switch (tps65910_chip_id(tps65910)) {
        case TPS65911:
-               tps65910->read(tps65910, TPS65910_INT_MSK3, 1, &reg);
+               tps65910_reg_read(tps65910, TPS65910_INT_MSK3, &reg);
                irq_mask |= reg << 16;
        }
 
@@ -76,19 +71,19 @@ static irqreturn_t tps65910_irq(int irq, void *irq_data)
                if (!(irq_sts & (1 << i)))
                        continue;
 
-               handle_nested_irq(tps65910->irq_base + i);
+               handle_nested_irq(irq_find_mapping(tps65910->domain, i));
        }
 
        /* Write the STS register back to clear IRQs we handled */
        reg = irq_sts & 0xFF;
        irq_sts >>= 8;
-       tps65910->write(tps65910, TPS65910_INT_STS, 1, &reg);
+       tps65910_reg_write(tps65910, TPS65910_INT_STS, reg);
        reg = irq_sts & 0xFF;
-       tps65910->write(tps65910, TPS65910_INT_STS2, 1, &reg);
+       tps65910_reg_write(tps65910, TPS65910_INT_STS2, reg);
        switch (tps65910_chip_id(tps65910)) {
        case TPS65911:
                reg = irq_sts >> 8;
-               tps65910->write(tps65910, TPS65910_INT_STS3, 1, &reg);
+               tps65910_reg_write(tps65910, TPS65910_INT_STS3, reg);
        }
 
        return IRQ_HANDLED;
@@ -105,27 +100,27 @@ static void tps65910_irq_sync_unlock(struct irq_data *data)
 {
        struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
        u32 reg_mask;
-       u8 reg;
+       unsigned int reg;
 
-       tps65910->read(tps65910, TPS65910_INT_MSK, 1, &reg);
+       tps65910_reg_read(tps65910, TPS65910_INT_MSK, &reg);
        reg_mask = reg;
-       tps65910->read(tps65910, TPS65910_INT_MSK2, 1, &reg);
+       tps65910_reg_read(tps65910, TPS65910_INT_MSK2, &reg);
        reg_mask |= reg << 8;
        switch (tps65910_chip_id(tps65910)) {
        case TPS65911:
-               tps65910->read(tps65910, TPS65910_INT_MSK3, 1, &reg);
+               tps65910_reg_read(tps65910, TPS65910_INT_MSK3, &reg);
                reg_mask |= reg << 16;
        }
 
        if (tps65910->irq_mask != reg_mask) {
                reg = tps65910->irq_mask & 0xFF;
-               tps65910->write(tps65910, TPS65910_INT_MSK, 1, &reg);
+               tps65910_reg_write(tps65910, TPS65910_INT_MSK, reg);
                reg = tps65910->irq_mask >> 8 & 0xFF;
-               tps65910->write(tps65910, TPS65910_INT_MSK2, 1, &reg);
+               tps65910_reg_write(tps65910, TPS65910_INT_MSK2, reg);
                switch (tps65910_chip_id(tps65910)) {
                case TPS65911:
                        reg = tps65910->irq_mask >> 16;
-                       tps65910->write(tps65910, TPS65910_INT_MSK3, 1, &reg);
+                       tps65910_reg_write(tps65910, TPS65910_INT_MSK3, reg);
                }
        }
        mutex_unlock(&tps65910->irq_lock);
@@ -135,14 +130,14 @@ static void tps65910_irq_enable(struct irq_data *data)
 {
        struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
 
-       tps65910->irq_mask &= ~( 1 << irq_to_tps65910_irq(tps65910, data->irq));
+       tps65910->irq_mask &= ~(1 << data->hwirq);
 }
 
 static void tps65910_irq_disable(struct irq_data *data)
 {
        struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
 
-       tps65910->irq_mask |= ( 1 << irq_to_tps65910_irq(tps65910, data->irq));
+       tps65910->irq_mask |= (1 << data->hwirq);
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -164,10 +159,35 @@ static struct irq_chip tps65910_irq_chip = {
        .irq_set_wake = tps65910_irq_set_wake,
 };
 
+static int tps65910_irq_map(struct irq_domain *h, unsigned int virq,
+                               irq_hw_number_t hw)
+{
+       struct tps65910 *tps65910 = h->host_data;
+
+       irq_set_chip_data(virq, tps65910);
+       irq_set_chip_and_handler(virq, &tps65910_irq_chip, handle_edge_irq);
+       irq_set_nested_thread(virq, 1);
+
+       /* ARM needs us to explicitly flag the IRQ as valid
+        * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+       set_irq_flags(virq, IRQF_VALID);
+#else
+       irq_set_noprobe(virq);
+#endif
+
+       return 0;
+}
+
+static struct irq_domain_ops tps65910_domain_ops = {
+       .map    = tps65910_irq_map,
+       .xlate  = irq_domain_xlate_twocell,
+};
+
 int tps65910_irq_init(struct tps65910 *tps65910, int irq,
                    struct tps65910_platform_data *pdata)
 {
-       int ret, cur_irq;
+       int ret;
        int flags = IRQF_ONESHOT;
 
        if (!irq) {
@@ -175,17 +195,11 @@ int tps65910_irq_init(struct tps65910 *tps65910, int irq,
                return -EINVAL;
        }
 
-       if (!pdata || !pdata->irq_base) {
-               dev_warn(tps65910->dev, "No interrupt support, no IRQ base\n");
+       if (!pdata) {
+               dev_warn(tps65910->dev, "No interrupt support, no pdata\n");
                return -EINVAL;
        }
 
-       tps65910->irq_mask = 0xFFFFFF;
-
-       mutex_init(&tps65910->irq_lock);
-       tps65910->chip_irq = irq;
-       tps65910->irq_base = pdata->irq_base;
-
        switch (tps65910_chip_id(tps65910)) {
        case TPS65910:
                tps65910->irq_num = TPS65910_NUM_IRQ;
@@ -195,22 +209,36 @@ int tps65910_irq_init(struct tps65910 *tps65910, int irq,
                break;
        }
 
-       /* Register with genirq */
-       for (cur_irq = tps65910->irq_base;
-            cur_irq < tps65910->irq_num + tps65910->irq_base;
-            cur_irq++) {
-               irq_set_chip_data(cur_irq, tps65910);
-               irq_set_chip_and_handler(cur_irq, &tps65910_irq_chip,
-                                        handle_edge_irq);
-               irq_set_nested_thread(cur_irq, 1);
-
-               /* ARM needs us to explicitly flag the IRQ as valid
-                * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
-               set_irq_flags(cur_irq, IRQF_VALID);
-#else
-               irq_set_noprobe(cur_irq);
-#endif
+       if (pdata->irq_base > 0) {
+               pdata->irq_base = irq_alloc_descs(pdata->irq_base, 0,
+                                       tps65910->irq_num, -1);
+               if (pdata->irq_base < 0) {
+                       dev_warn(tps65910->dev, "Failed to alloc IRQs: %d\n",
+                                       pdata->irq_base);
+                       return pdata->irq_base;
+               }
+       }
+
+       tps65910->irq_mask = 0xFFFFFF;
+
+       mutex_init(&tps65910->irq_lock);
+       tps65910->chip_irq = irq;
+       tps65910->irq_base = pdata->irq_base;
+
+       if (pdata->irq_base > 0)
+               tps65910->domain = irq_domain_add_legacy(tps65910->dev->of_node,
+                                       tps65910->irq_num,
+                                       pdata->irq_base,
+                                       0,
+                                       &tps65910_domain_ops, tps65910);
+       else
+               tps65910->domain = irq_domain_add_linear(tps65910->dev->of_node,
+                                       tps65910->irq_num,
+                                       &tps65910_domain_ops, tps65910);
+
+       if (!tps65910->domain) {
+               dev_err(tps65910->dev, "Failed to create IRQ domain\n");
+               return -ENOMEM;
        }
 
        ret = request_threaded_irq(irq, NULL, tps65910_irq, flags,
index bf2b25ebf2ca7d4c9c2777158518bd2a3185a39c..be9e07b77325a6afef8ed64253bc4a51e5cb4cc9 100644 (file)
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
-#include <linux/gpio.h>
 #include <linux/mfd/core.h>
 #include <linux/regmap.h>
 #include <linux/mfd/tps65910.h>
+#include <linux/of_device.h>
 
 static struct mfd_cell tps65910s[] = {
+       {
+               .name = "tps65910-gpio",
+       },
        {
                .name = "tps65910-pmic",
        },
@@ -37,30 +40,6 @@ static struct mfd_cell tps65910s[] = {
 };
 
 
-static int tps65910_i2c_read(struct tps65910 *tps65910, u8 reg,
-                                 int bytes, void *dest)
-{
-       return regmap_bulk_read(tps65910->regmap, reg, dest, bytes);
-}
-
-static int tps65910_i2c_write(struct tps65910 *tps65910, u8 reg,
-                                 int bytes, void *src)
-{
-       return regmap_bulk_write(tps65910->regmap, reg, src, bytes);
-}
-
-int tps65910_set_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
-{
-       return regmap_update_bits(tps65910->regmap, reg, mask, mask);
-}
-EXPORT_SYMBOL_GPL(tps65910_set_bits);
-
-int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
-{
-       return regmap_update_bits(tps65910->regmap, reg, mask, 0);
-}
-EXPORT_SYMBOL_GPL(tps65910_clear_bits);
-
 static bool is_volatile_reg(struct device *dev, unsigned int reg)
 {
        struct tps65910 *tps65910 = dev_get_drvdata(dev);
@@ -85,80 +64,197 @@ static const struct regmap_config tps65910_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
        .volatile_reg = is_volatile_reg,
-       .max_register = TPS65910_MAX_REGISTER,
-       .num_reg_defaults_raw = TPS65910_MAX_REGISTER,
+       .max_register = TPS65910_MAX_REGISTER - 1,
        .cache_type = REGCACHE_RBTREE,
 };
 
-static int tps65910_i2c_probe(struct i2c_client *i2c,
-                           const struct i2c_device_id *id)
+static int __devinit tps65910_sleepinit(struct tps65910 *tps65910,
+               struct tps65910_board *pmic_pdata)
+{
+       struct device *dev = NULL;
+       int ret = 0;
+
+       dev = tps65910->dev;
+
+       if (!pmic_pdata->en_dev_slp)
+               return 0;
+
+       /* enabling SLEEP device state */
+       ret = tps65910_reg_set_bits(tps65910, TPS65910_DEVCTRL,
+                               DEVCTRL_DEV_SLP_MASK);
+       if (ret < 0) {
+               dev_err(dev, "set dev_slp failed: %d\n", ret);
+               goto err_sleep_init;
+       }
+
+       /* Return if there is no sleep keepon data. */
+       if (!pmic_pdata->slp_keepon)
+               return 0;
+
+       if (pmic_pdata->slp_keepon->therm_keepon) {
+               ret = tps65910_reg_set_bits(tps65910,
+                               TPS65910_SLEEP_KEEP_RES_ON,
+                               SLEEP_KEEP_RES_ON_THERM_KEEPON_MASK);
+               if (ret < 0) {
+                       dev_err(dev, "set therm_keepon failed: %d\n", ret);
+                       goto disable_dev_slp;
+               }
+       }
+
+       if (pmic_pdata->slp_keepon->clkout32k_keepon) {
+               ret = tps65910_reg_set_bits(tps65910,
+                               TPS65910_SLEEP_KEEP_RES_ON,
+                               SLEEP_KEEP_RES_ON_CLKOUT32K_KEEPON_MASK);
+               if (ret < 0) {
+                       dev_err(dev, "set clkout32k_keepon failed: %d\n", ret);
+                       goto disable_dev_slp;
+               }
+       }
+
+       if (pmic_pdata->slp_keepon->i2chs_keepon) {
+               ret = tps65910_reg_set_bits(tps65910,
+                               TPS65910_SLEEP_KEEP_RES_ON,
+                               SLEEP_KEEP_RES_ON_I2CHS_KEEPON_MASK);
+               if (ret < 0) {
+                       dev_err(dev, "set i2chs_keepon failed: %d\n", ret);
+                       goto disable_dev_slp;
+               }
+       }
+
+       return 0;
+
+disable_dev_slp:
+       tps65910_reg_clear_bits(tps65910, TPS65910_DEVCTRL,
+                               DEVCTRL_DEV_SLP_MASK);
+
+err_sleep_init:
+       return ret;
+}
+
+#ifdef CONFIG_OF
+static struct of_device_id tps65910_of_match[] = {
+       { .compatible = "ti,tps65910", .data = (void *)TPS65910},
+       { .compatible = "ti,tps65911", .data = (void *)TPS65911},
+       { },
+};
+MODULE_DEVICE_TABLE(of, tps65910_of_match);
+
+static struct tps65910_board *tps65910_parse_dt(struct i2c_client *client,
+                                               int *chip_id)
+{
+       struct device_node *np = client->dev.of_node;
+       struct tps65910_board *board_info;
+       unsigned int prop;
+       const struct of_device_id *match;
+       int ret = 0;
+
+       match = of_match_device(tps65910_of_match, &client->dev);
+       if (!match) {
+               dev_err(&client->dev, "Failed to find matching dt id\n");
+               return NULL;
+       }
+
+       *chip_id  = (int)match->data;
+
+       board_info = devm_kzalloc(&client->dev, sizeof(*board_info),
+                       GFP_KERNEL);
+       if (!board_info) {
+               dev_err(&client->dev, "Failed to allocate pdata\n");
+               return NULL;
+       }
+
+       ret = of_property_read_u32(np, "ti,vmbch-threshold", &prop);
+       if (!ret)
+               board_info->vmbch_threshold = prop;
+       else if (*chip_id == TPS65911)
+               dev_warn(&client->dev, "VMBCH-Threshold not specified");
+
+       ret = of_property_read_u32(np, "ti,vmbch2-threshold", &prop);
+       if (!ret)
+               board_info->vmbch2_threshold = prop;
+       else if (*chip_id == TPS65911)
+               dev_warn(&client->dev, "VMBCH2-Threshold not specified");
+
+       board_info->irq = client->irq;
+       board_info->irq_base = -1;
+
+       return board_info;
+}
+#else
+static inline
+struct tps65910_board *tps65910_parse_dt(struct i2c_client *client,
+                                        int *chip_id)
+{
+       return NULL;
+}
+#endif
+
+static __devinit int tps65910_i2c_probe(struct i2c_client *i2c,
+                                       const struct i2c_device_id *id)
 {
        struct tps65910 *tps65910;
        struct tps65910_board *pmic_plat_data;
+       struct tps65910_board *of_pmic_plat_data = NULL;
        struct tps65910_platform_data *init_data;
        int ret = 0;
+       int chip_id = id->driver_data;
 
        pmic_plat_data = dev_get_platdata(&i2c->dev);
+
+       if (!pmic_plat_data && i2c->dev.of_node) {
+               pmic_plat_data = tps65910_parse_dt(i2c, &chip_id);
+               of_pmic_plat_data = pmic_plat_data;
+       }
+
        if (!pmic_plat_data)
                return -EINVAL;
 
-       init_data = kzalloc(sizeof(struct tps65910_platform_data), GFP_KERNEL);
+       init_data = devm_kzalloc(&i2c->dev, sizeof(*init_data), GFP_KERNEL);
        if (init_data == NULL)
                return -ENOMEM;
 
-       tps65910 = kzalloc(sizeof(struct tps65910), GFP_KERNEL);
-       if (tps65910 == NULL) {
-               kfree(init_data);
+       tps65910 = devm_kzalloc(&i2c->dev, sizeof(*tps65910), GFP_KERNEL);
+       if (tps65910 == NULL)
                return -ENOMEM;
-       }
 
+       tps65910->of_plat_data = of_pmic_plat_data;
        i2c_set_clientdata(i2c, tps65910);
        tps65910->dev = &i2c->dev;
        tps65910->i2c_client = i2c;
-       tps65910->id = id->driver_data;
-       tps65910->read = tps65910_i2c_read;
-       tps65910->write = tps65910_i2c_write;
+       tps65910->id = chip_id;
        mutex_init(&tps65910->io_mutex);
 
-       tps65910->regmap = regmap_init_i2c(i2c, &tps65910_regmap_config);
+       tps65910->regmap = devm_regmap_init_i2c(i2c, &tps65910_regmap_config);
        if (IS_ERR(tps65910->regmap)) {
                ret = PTR_ERR(tps65910->regmap);
                dev_err(&i2c->dev, "regmap initialization failed: %d\n", ret);
-               goto regmap_err;
+               return ret;
        }
 
        ret = mfd_add_devices(tps65910->dev, -1,
                              tps65910s, ARRAY_SIZE(tps65910s),
                              NULL, 0);
-       if (ret < 0)
-               goto err;
+       if (ret < 0) {
+               dev_err(&i2c->dev, "mfd_add_devices failed: %d\n", ret);
+               return ret;
+       }
 
        init_data->irq = pmic_plat_data->irq;
        init_data->irq_base = pmic_plat_data->irq_base;
 
-       tps65910_gpio_init(tps65910, pmic_plat_data->gpio_base);
-
        tps65910_irq_init(tps65910, init_data->irq, init_data);
 
-       kfree(init_data);
-       return ret;
+       tps65910_sleepinit(tps65910, pmic_plat_data);
 
-err:
-       regmap_exit(tps65910->regmap);
-regmap_err:
-       kfree(tps65910);
-       kfree(init_data);
        return ret;
 }
 
-static int tps65910_i2c_remove(struct i2c_client *i2c)
+static __devexit int tps65910_i2c_remove(struct i2c_client *i2c)
 {
        struct tps65910 *tps65910 = i2c_get_clientdata(i2c);
 
        tps65910_irq_exit(tps65910);
        mfd_remove_devices(tps65910->dev);
-       regmap_exit(tps65910->regmap);
-       kfree(tps65910);
 
        return 0;
 }
@@ -175,9 +271,10 @@ static struct i2c_driver tps65910_i2c_driver = {
        .driver = {
                   .name = "tps65910",
                   .owner = THIS_MODULE,
+                  .of_match_table = of_match_ptr(tps65910_of_match),
        },
        .probe = tps65910_i2c_probe,
-       .remove = tps65910_i2c_remove,
+       .remove = __devexit_p(tps65910_i2c_remove),
        .id_table = tps65910_i2c_id,
 };
 
index 5d656e8143583634196061e709dcd1944d0b1121..ad733d76207ac11320f69603ea1a3235f81b9495 100644 (file)
@@ -757,6 +757,7 @@ int twl4030_init_irq(struct device *dev, int irq_num)
                dev_err(dev, "could not claim irq%d: %d\n", irq_num, status);
                goto fail_rqirq;
        }
+       enable_irq_wake(irq_num);
 
        return irq_base;
 fail_rqirq:
index 2d6bedadca096d68eec4ce61332b57d6f08140a8..4ded9e7aa246efdc0e38347fab044f46ef2dc7ef 100644 (file)
 #include <linux/types.h>
 #include <linux/slab.h>
 #include <linux/kernel.h>
+#include <linux/err.h>
 #include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
 #include <linux/gpio.h>
 #include <linux/delay.h>
 #include <linux/i2c.h>
 #include <linux/err.h>
 #include <linux/mfd/core.h>
 #include <linux/mfd/twl6040.h>
+#include <linux/regulator/consumer.h>
 
 #define VIBRACTRL_MEMBER(reg) ((reg == TWL6040_REG_VIBCTLL) ? 0 : 1)
+#define TWL6040_NUM_SUPPLIES   (2)
+
+static bool twl6040_has_vibra(struct twl6040_platform_data *pdata,
+                             struct device_node *node)
+{
+       if (pdata && pdata->vibra)
+               return true;
+
+#ifdef CONFIG_OF
+       if (of_find_node_by_name(node, "vibra"))
+               return true;
+#endif
+
+       return false;
+}
 
 int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg)
 {
@@ -502,17 +523,18 @@ static int __devinit twl6040_probe(struct i2c_client *client,
                                     const struct i2c_device_id *id)
 {
        struct twl6040_platform_data *pdata = client->dev.platform_data;
+       struct device_node *node = client->dev.of_node;
        struct twl6040 *twl6040;
        struct mfd_cell *cell = NULL;
-       int ret, children = 0;
+       int irq, ret, children = 0;
 
-       if (!pdata) {
+       if (!pdata && !node) {
                dev_err(&client->dev, "Platform data is missing\n");
                return -EINVAL;
        }
 
        /* In order to operate correctly we need valid interrupt config */
-       if (!client->irq || !pdata->irq_base) {
+       if (!client->irq) {
                dev_err(&client->dev, "Invalid IRQ configuration\n");
                return -EINVAL;
        }
@@ -524,7 +546,7 @@ static int __devinit twl6040_probe(struct i2c_client *client,
                goto err;
        }
 
-       twl6040->regmap = regmap_init_i2c(client, &twl6040_regmap_config);
+       twl6040->regmap = devm_regmap_init_i2c(client, &twl6040_regmap_config);
        if (IS_ERR(twl6040->regmap)) {
                ret = PTR_ERR(twl6040->regmap);
                goto err;
@@ -532,9 +554,23 @@ static int __devinit twl6040_probe(struct i2c_client *client,
 
        i2c_set_clientdata(client, twl6040);
 
+       twl6040->supplies[0].supply = "vio";
+       twl6040->supplies[1].supply = "v2v1";
+       ret = regulator_bulk_get(&client->dev, TWL6040_NUM_SUPPLIES,
+                                twl6040->supplies);
+       if (ret != 0) {
+               dev_err(&client->dev, "Failed to get supplies: %d\n", ret);
+               goto regulator_get_err;
+       }
+
+       ret = regulator_bulk_enable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
+       if (ret != 0) {
+               dev_err(&client->dev, "Failed to enable supplies: %d\n", ret);
+               goto power_err;
+       }
+
        twl6040->dev = &client->dev;
        twl6040->irq = client->irq;
-       twl6040->irq_base = pdata->irq_base;
 
        mutex_init(&twl6040->mutex);
        mutex_init(&twl6040->io_mutex);
@@ -543,22 +579,26 @@ static int __devinit twl6040_probe(struct i2c_client *client,
        twl6040->rev = twl6040_reg_read(twl6040, TWL6040_REG_ASICREV);
 
        /* ERRATA: Automatic power-up is not possible in ES1.0 */
-       if (twl6040_get_revid(twl6040) > TWL6040_REV_ES1_0)
-               twl6040->audpwron = pdata->audpwron_gpio;
-       else
+       if (twl6040_get_revid(twl6040) > TWL6040_REV_ES1_0) {
+               if (pdata)
+                       twl6040->audpwron = pdata->audpwron_gpio;
+               else
+                       twl6040->audpwron = of_get_named_gpio(node,
+                                               "ti,audpwron-gpio", 0);
+       } else
                twl6040->audpwron = -EINVAL;
 
        if (gpio_is_valid(twl6040->audpwron)) {
                ret = gpio_request_one(twl6040->audpwron, GPIOF_OUT_INIT_LOW,
                                       "audpwron");
                if (ret)
-                       goto gpio1_err;
+                       goto gpio_err;
        }
 
        /* codec interrupt */
        ret = twl6040_irq_init(twl6040);
        if (ret)
-               goto gpio2_err;
+               goto irq_init_err;
 
        ret = request_threaded_irq(twl6040->irq_base + TWL6040_IRQ_READY,
                                   NULL, twl6040_naudint_handler, 0,
@@ -572,22 +612,27 @@ static int __devinit twl6040_probe(struct i2c_client *client,
        /* dual-access registers controlled by I2C only */
        twl6040_set_bits(twl6040, TWL6040_REG_ACCCTL, TWL6040_I2CSEL);
 
-       if (pdata->codec) {
-               int irq = twl6040->irq_base + TWL6040_IRQ_PLUG;
-
-               cell = &twl6040->cells[children];
-               cell->name = "twl6040-codec";
-               twl6040_codec_rsrc[0].start = irq;
-               twl6040_codec_rsrc[0].end = irq;
-               cell->resources = twl6040_codec_rsrc;
-               cell->num_resources = ARRAY_SIZE(twl6040_codec_rsrc);
+       /*
+        * The main functionality of twl6040 to provide audio on OMAP4+ systems.
+        * We can add the ASoC codec child whenever this driver has been loaded.
+        * The ASoC codec can work without pdata, pass the platform_data only if
+        * it has been provided.
+        */
+       irq = twl6040->irq_base + TWL6040_IRQ_PLUG;
+       cell = &twl6040->cells[children];
+       cell->name = "twl6040-codec";
+       twl6040_codec_rsrc[0].start = irq;
+       twl6040_codec_rsrc[0].end = irq;
+       cell->resources = twl6040_codec_rsrc;
+       cell->num_resources = ARRAY_SIZE(twl6040_codec_rsrc);
+       if (pdata && pdata->codec) {
                cell->platform_data = pdata->codec;
                cell->pdata_size = sizeof(*pdata->codec);
-               children++;
        }
+       children++;
 
-       if (pdata->vibra) {
-               int irq = twl6040->irq_base + TWL6040_IRQ_VIB;
+       if (twl6040_has_vibra(pdata, node)) {
+               irq = twl6040->irq_base + TWL6040_IRQ_VIB;
 
                cell = &twl6040->cells[children];
                cell->name = "twl6040-vibra";
@@ -596,21 +641,17 @@ static int __devinit twl6040_probe(struct i2c_client *client,
                cell->resources = twl6040_vibra_rsrc;
                cell->num_resources = ARRAY_SIZE(twl6040_vibra_rsrc);
 
-               cell->platform_data = pdata->vibra;
-               cell->pdata_size = sizeof(*pdata->vibra);
+               if (pdata && pdata->vibra) {
+                       cell->platform_data = pdata->vibra;
+                       cell->pdata_size = sizeof(*pdata->vibra);
+               }
                children++;
        }
 
-       if (children) {
-               ret = mfd_add_devices(&client->dev, -1, twl6040->cells,
-                                     children, NULL, 0);
-               if (ret)
-                       goto mfd_err;
-       } else {
-               dev_err(&client->dev, "No platform data found for children\n");
-               ret = -ENODEV;
+       ret = mfd_add_devices(&client->dev, -1, twl6040->cells, children,
+                             NULL, 0);
+       if (ret)
                goto mfd_err;
-       }
 
        return 0;
 
@@ -618,12 +659,15 @@ mfd_err:
        free_irq(twl6040->irq_base + TWL6040_IRQ_READY, twl6040);
 irq_err:
        twl6040_irq_exit(twl6040);
-gpio2_err:
+irq_init_err:
        if (gpio_is_valid(twl6040->audpwron))
                gpio_free(twl6040->audpwron);
-gpio1_err:
+gpio_err:
+       regulator_bulk_disable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
+power_err:
+       regulator_bulk_free(TWL6040_NUM_SUPPLIES, twl6040->supplies);
+regulator_get_err:
        i2c_set_clientdata(client, NULL);
-       regmap_exit(twl6040->regmap);
 err:
        return ret;
 }
@@ -643,7 +687,9 @@ static int __devexit twl6040_remove(struct i2c_client *client)
 
        mfd_remove_devices(&client->dev);
        i2c_set_clientdata(client, NULL);
-       regmap_exit(twl6040->regmap);
+
+       regulator_bulk_disable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
+       regulator_bulk_free(TWL6040_NUM_SUPPLIES, twl6040->supplies);
 
        return 0;
 }
index b3f8ddaa28a8b792c14d2b5c785ffed4b099a735..4b42543da2282cae615fbc77583491b486903542 100644 (file)
 
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/err.h>
 #include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/irqdomain.h>
 #include <linux/interrupt.h>
 #include <linux/mfd/core.h>
 #include <linux/mfd/twl6040.h>
@@ -138,7 +141,8 @@ static irqreturn_t twl6040_irq_thread(int irq, void *data)
 
 int twl6040_irq_init(struct twl6040 *twl6040)
 {
-       int cur_irq, ret;
+       struct device_node *node = twl6040->dev->of_node;
+       int i, nr_irqs, irq_base, ret;
        u8 val;
 
        mutex_init(&twl6040->irq_mutex);
@@ -148,21 +152,31 @@ int twl6040_irq_init(struct twl6040 *twl6040)
        twl6040->irq_masks_cache = TWL6040_ALLINT_MSK;
        twl6040_reg_write(twl6040, TWL6040_REG_INTMR, TWL6040_ALLINT_MSK);
 
+       nr_irqs = ARRAY_SIZE(twl6040_irqs);
+
+       irq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
+       if (IS_ERR_VALUE(irq_base)) {
+               dev_err(twl6040->dev, "Fail to allocate IRQ descs\n");
+               return irq_base;
+       }
+       twl6040->irq_base = irq_base;
+
+       irq_domain_add_legacy(node, ARRAY_SIZE(twl6040_irqs), irq_base, 0,
+                             &irq_domain_simple_ops, NULL);
+
        /* Register them with genirq */
-       for (cur_irq = twl6040->irq_base;
-            cur_irq < twl6040->irq_base + ARRAY_SIZE(twl6040_irqs);
-            cur_irq++) {
-               irq_set_chip_data(cur_irq, twl6040);
-               irq_set_chip_and_handler(cur_irq, &twl6040_irq_chip,
+       for (i = irq_base; i < irq_base + nr_irqs; i++) {
+               irq_set_chip_data(i, twl6040);
+               irq_set_chip_and_handler(i, &twl6040_irq_chip,
                                         handle_level_irq);
-               irq_set_nested_thread(cur_irq, 1);
+               irq_set_nested_thread(i, 1);
 
                /* ARM needs us to explicitly flag the IRQ as valid
                 * and will set them noprobe when we do so. */
 #ifdef CONFIG_ARM
-               set_irq_flags(cur_irq, IRQF_VALID);
+               set_irq_flags(i, IRQF_VALID);
 #else
-               irq_set_noprobe(cur_irq);
+               irq_set_noprobe(i);
 #endif
        }
 
index b73cc15e00818d4e15f2f44f9fe0a9f12e6196a6..872aff21e4be6fa682eb01aad72b04492efd622e 100644 (file)
@@ -131,17 +131,7 @@ static struct pci_driver vx855_pci_driver = {
        .remove         = __devexit_p(vx855_remove),
 };
 
-static int vx855_init(void)
-{
-       return pci_register_driver(&vx855_pci_driver);
-}
-module_init(vx855_init);
-
-static void vx855_exit(void)
-{
-       pci_unregister_driver(&vx855_pci_driver);
-}
-module_exit(vx855_exit);
+module_pci_driver(vx855_pci_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Harald Welte <HaraldWelte@viatech.com>");
index 87210954a066f9598cc40b0dd7b2acfb00017f34..6ee3018d8653f6d8b73b92d70d296d2ed01e8f62 100644 (file)
@@ -280,11 +280,11 @@ void wm831x_auxadc_init(struct wm831x *wm831x)
        mutex_init(&wm831x->auxadc_lock);
        INIT_LIST_HEAD(&wm831x->auxadc_pending);
 
-       if (wm831x->irq && wm831x->irq_base) {
+       if (wm831x->irq) {
                wm831x->auxadc_read = wm831x_auxadc_read_irq;
 
-               ret = request_threaded_irq(wm831x->irq_base +
-                                          WM831X_IRQ_AUXADC_DATA,
+               ret = request_threaded_irq(wm831x_irq(wm831x,
+                                                     WM831X_IRQ_AUXADC_DATA),
                                           NULL, wm831x_auxadc_irq, 0,
                                           "auxadc", wm831x);
                if (ret < 0) {
index 838056c3493a75e848a4b3ba88b2416c9d98a846..946698fd2dc6a4dc9b3a7868bc7f44c920a46201 100644 (file)
@@ -614,8 +614,15 @@ int wm831x_set_bits(struct wm831x *wm831x, unsigned short reg,
 }
 EXPORT_SYMBOL_GPL(wm831x_set_bits);
 
+static struct resource wm831x_io_parent = {
+       .start = 0,
+       .end   = 0xffffffff,
+       .flags = IORESOURCE_IO,
+};
+
 static struct resource wm831x_dcdc1_resources[] = {
        {
+               .parent = &wm831x_io_parent,
                .start = WM831X_DC1_CONTROL_1,
                .end   = WM831X_DC1_DVS_CONTROL,
                .flags = IORESOURCE_IO,
@@ -637,6 +644,7 @@ static struct resource wm831x_dcdc1_resources[] = {
 
 static struct resource wm831x_dcdc2_resources[] = {
        {
+               .parent = &wm831x_io_parent,
                .start = WM831X_DC2_CONTROL_1,
                .end   = WM831X_DC2_DVS_CONTROL,
                .flags = IORESOURCE_IO,
@@ -657,6 +665,7 @@ static struct resource wm831x_dcdc2_resources[] = {
 
 static struct resource wm831x_dcdc3_resources[] = {
        {
+               .parent = &wm831x_io_parent,
                .start = WM831X_DC3_CONTROL_1,
                .end   = WM831X_DC3_SLEEP_CONTROL,
                .flags = IORESOURCE_IO,
@@ -671,6 +680,7 @@ static struct resource wm831x_dcdc3_resources[] = {
 
 static struct resource wm831x_dcdc4_resources[] = {
        {
+               .parent = &wm831x_io_parent,
                .start = WM831X_DC4_CONTROL,
                .end   = WM831X_DC4_SLEEP_CONTROL,
                .flags = IORESOURCE_IO,
@@ -685,6 +695,7 @@ static struct resource wm831x_dcdc4_resources[] = {
 
 static struct resource wm8320_dcdc4_buck_resources[] = {
        {
+               .parent = &wm831x_io_parent,
                .start = WM831X_DC4_CONTROL,
                .end   = WM832X_DC4_SLEEP_CONTROL,
                .flags = IORESOURCE_IO,
@@ -707,6 +718,7 @@ static struct resource wm831x_gpio_resources[] = {
 
 static struct resource wm831x_isink1_resources[] = {
        {
+               .parent = &wm831x_io_parent,
                .start = WM831X_CURRENT_SINK_1,
                .end   = WM831X_CURRENT_SINK_1,
                .flags = IORESOURCE_IO,
@@ -720,6 +732,7 @@ static struct resource wm831x_isink1_resources[] = {
 
 static struct resource wm831x_isink2_resources[] = {
        {
+               .parent = &wm831x_io_parent,
                .start = WM831X_CURRENT_SINK_2,
                .end   = WM831X_CURRENT_SINK_2,
                .flags = IORESOURCE_IO,
@@ -733,6 +746,7 @@ static struct resource wm831x_isink2_resources[] = {
 
 static struct resource wm831x_ldo1_resources[] = {
        {
+               .parent = &wm831x_io_parent,
                .start = WM831X_LDO1_CONTROL,
                .end   = WM831X_LDO1_SLEEP_CONTROL,
                .flags = IORESOURCE_IO,
@@ -747,6 +761,7 @@ static struct resource wm831x_ldo1_resources[] = {
 
 static struct resource wm831x_ldo2_resources[] = {
        {
+               .parent = &wm831x_io_parent,
                .start = WM831X_LDO2_CONTROL,
                .end   = WM831X_LDO2_SLEEP_CONTROL,
                .flags = IORESOURCE_IO,
@@ -761,6 +776,7 @@ static struct resource wm831x_ldo2_resources[] = {
 
 static struct resource wm831x_ldo3_resources[] = {
        {
+               .parent = &wm831x_io_parent,
                .start = WM831X_LDO3_CONTROL,
                .end   = WM831X_LDO3_SLEEP_CONTROL,
                .flags = IORESOURCE_IO,
@@ -775,6 +791,7 @@ static struct resource wm831x_ldo3_resources[] = {
 
 static struct resource wm831x_ldo4_resources[] = {
        {
+               .parent = &wm831x_io_parent,
                .start = WM831X_LDO4_CONTROL,
                .end   = WM831X_LDO4_SLEEP_CONTROL,
                .flags = IORESOURCE_IO,
@@ -789,6 +806,7 @@ static struct resource wm831x_ldo4_resources[] = {
 
 static struct resource wm831x_ldo5_resources[] = {
        {
+               .parent = &wm831x_io_parent,
                .start = WM831X_LDO5_CONTROL,
                .end   = WM831X_LDO5_SLEEP_CONTROL,
                .flags = IORESOURCE_IO,
@@ -803,6 +821,7 @@ static struct resource wm831x_ldo5_resources[] = {
 
 static struct resource wm831x_ldo6_resources[] = {
        {
+               .parent = &wm831x_io_parent,
                .start = WM831X_LDO6_CONTROL,
                .end   = WM831X_LDO6_SLEEP_CONTROL,
                .flags = IORESOURCE_IO,
@@ -817,6 +836,7 @@ static struct resource wm831x_ldo6_resources[] = {
 
 static struct resource wm831x_ldo7_resources[] = {
        {
+               .parent = &wm831x_io_parent,
                .start = WM831X_LDO7_CONTROL,
                .end   = WM831X_LDO7_SLEEP_CONTROL,
                .flags = IORESOURCE_IO,
@@ -831,6 +851,7 @@ static struct resource wm831x_ldo7_resources[] = {
 
 static struct resource wm831x_ldo8_resources[] = {
        {
+               .parent = &wm831x_io_parent,
                .start = WM831X_LDO8_CONTROL,
                .end   = WM831X_LDO8_SLEEP_CONTROL,
                .flags = IORESOURCE_IO,
@@ -845,6 +866,7 @@ static struct resource wm831x_ldo8_resources[] = {
 
 static struct resource wm831x_ldo9_resources[] = {
        {
+               .parent = &wm831x_io_parent,
                .start = WM831X_LDO9_CONTROL,
                .end   = WM831X_LDO9_SLEEP_CONTROL,
                .flags = IORESOURCE_IO,
@@ -859,6 +881,7 @@ static struct resource wm831x_ldo9_resources[] = {
 
 static struct resource wm831x_ldo10_resources[] = {
        {
+               .parent = &wm831x_io_parent,
                .start = WM831X_LDO10_CONTROL,
                .end   = WM831X_LDO10_SLEEP_CONTROL,
                .flags = IORESOURCE_IO,
@@ -873,6 +896,7 @@ static struct resource wm831x_ldo10_resources[] = {
 
 static struct resource wm831x_ldo11_resources[] = {
        {
+               .parent = &wm831x_io_parent,
                .start = WM831X_LDO11_ON_CONTROL,
                .end   = WM831X_LDO11_SLEEP_CONTROL,
                .flags = IORESOURCE_IO,
@@ -974,6 +998,7 @@ static struct resource wm831x_rtc_resources[] = {
 
 static struct resource wm831x_status1_resources[] = {
        {
+               .parent = &wm831x_io_parent,
                .start = WM831X_STATUS_LED_1,
                .end   = WM831X_STATUS_LED_1,
                .flags = IORESOURCE_IO,
@@ -982,6 +1007,7 @@ static struct resource wm831x_status1_resources[] = {
 
 static struct resource wm831x_status2_resources[] = {
        {
+               .parent = &wm831x_io_parent,
                .start = WM831X_STATUS_LED_2,
                .end   = WM831X_STATUS_LED_2,
                .flags = IORESOURCE_IO,
@@ -1787,27 +1813,27 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
        case WM8310:
                ret = mfd_add_devices(wm831x->dev, wm831x_num,
                                      wm8310_devs, ARRAY_SIZE(wm8310_devs),
-                                     NULL, wm831x->irq_base);
+                                     NULL, 0);
                break;
 
        case WM8311:
                ret = mfd_add_devices(wm831x->dev, wm831x_num,
                                      wm8311_devs, ARRAY_SIZE(wm8311_devs),
-                                     NULL, wm831x->irq_base);
+                                     NULL, 0);
                if (!pdata || !pdata->disable_touch)
                        mfd_add_devices(wm831x->dev, wm831x_num,
                                        touch_devs, ARRAY_SIZE(touch_devs),
-                                       NULL, wm831x->irq_base);
+                                       NULL, 0);
                break;
 
        case WM8312:
                ret = mfd_add_devices(wm831x->dev, wm831x_num,
                                      wm8312_devs, ARRAY_SIZE(wm8312_devs),
-                                     NULL, wm831x->irq_base);
+                                     NULL, 0);
                if (!pdata || !pdata->disable_touch)
                        mfd_add_devices(wm831x->dev, wm831x_num,
                                        touch_devs, ARRAY_SIZE(touch_devs),
-                                       NULL, wm831x->irq_base);
+                                       NULL, 0);
                break;
 
        case WM8320:
@@ -1816,7 +1842,7 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
        case WM8326:
                ret = mfd_add_devices(wm831x->dev, wm831x_num,
                                      wm8320_devs, ARRAY_SIZE(wm8320_devs),
-                                     NULL, wm831x->irq_base);
+                                     NULL, 0);
                break;
 
        default:
@@ -1841,7 +1867,7 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
        if (ret & WM831X_XTAL_ENA) {
                ret = mfd_add_devices(wm831x->dev, wm831x_num,
                                      rtc_devs, ARRAY_SIZE(rtc_devs),
-                                     NULL, wm831x->irq_base);
+                                     NULL, 0);
                if (ret != 0) {
                        dev_err(wm831x->dev, "Failed to add RTC: %d\n", ret);
                        goto err_irq;
@@ -1854,7 +1880,7 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
                /* Treat errors as non-critical */
                ret = mfd_add_devices(wm831x->dev, wm831x_num, backlight_devs,
                                      ARRAY_SIZE(backlight_devs), NULL,
-                                     wm831x->irq_base);
+                                     0);
                if (ret < 0)
                        dev_err(wm831x->dev, "Failed to add backlight: %d\n",
                                ret);
@@ -1883,8 +1909,7 @@ void wm831x_device_exit(struct wm831x *wm831x)
 {
        wm831x_otp_exit(wm831x);
        mfd_remove_devices(wm831x->dev);
-       if (wm831x->irq_base)
-               free_irq(wm831x->irq_base + WM831X_IRQ_AUXADC_DATA, wm831x);
+       free_irq(wm831x_irq(wm831x, WM831X_IRQ_AUXADC_DATA), wm831x);
        wm831x_irq_exit(wm831x);
 }
 
index bec4d053916012a696442fff39b847be647b9db0..804e56ec99eb284f4c32e1f44f8b7a143d4f340c 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/irq.h>
 #include <linux/mfd/core.h>
 #include <linux/interrupt.h>
+#include <linux/irqdomain.h>
 
 #include <linux/mfd/wm831x/core.h>
 #include <linux/mfd/wm831x/pdata.h>
@@ -328,7 +329,7 @@ static inline int irq_data_to_status_reg(struct wm831x_irq_data *irq_data)
 static inline struct wm831x_irq_data *irq_to_wm831x_irq(struct wm831x *wm831x,
                                                        int irq)
 {
-       return &wm831x_irqs[irq - wm831x->irq_base];
+       return &wm831x_irqs[irq];
 }
 
 static void wm831x_irq_lock(struct irq_data *data)
@@ -374,7 +375,7 @@ static void wm831x_irq_enable(struct irq_data *data)
 {
        struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
        struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x,
-                                                            data->irq);
+                                                            data->hwirq);
 
        wm831x->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
 }
@@ -383,7 +384,7 @@ static void wm831x_irq_disable(struct irq_data *data)
 {
        struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
        struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x,
-                                                            data->irq);
+                                                            data->hwirq);
 
        wm831x->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
 }
@@ -393,7 +394,7 @@ static int wm831x_irq_set_type(struct irq_data *data, unsigned int type)
        struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
        int irq;
 
-       irq = data->irq - wm831x->irq_base;
+       irq = data->hwirq;
 
        if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) {
                /* Ignore internal-only IRQs */
@@ -412,22 +413,25 @@ static int wm831x_irq_set_type(struct irq_data *data, unsigned int type)
         * do the update here as we can be called with the bus lock
         * held.
         */
+       wm831x->gpio_level_low[irq] = false;
+       wm831x->gpio_level_high[irq] = false;
        switch (type) {
        case IRQ_TYPE_EDGE_BOTH:
                wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_INT_MODE;
-               wm831x->gpio_level[irq] = false;
                break;
        case IRQ_TYPE_EDGE_RISING:
                wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_POL;
-               wm831x->gpio_level[irq] = false;
                break;
        case IRQ_TYPE_EDGE_FALLING:
                wm831x->gpio_update[irq] = 0x10000;
-               wm831x->gpio_level[irq] = false;
                break;
        case IRQ_TYPE_LEVEL_HIGH:
                wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_POL;
-               wm831x->gpio_level[irq] = true;
+               wm831x->gpio_level_high[irq] = true;
+               break;
+       case IRQ_TYPE_LEVEL_LOW:
+               wm831x->gpio_update[irq] = 0x10000;
+               wm831x->gpio_level_low[irq] = true;
                break;
        default:
                return -EINVAL;
@@ -469,9 +473,11 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
         * descriptors.
         */
        if (primary & WM831X_TCHPD_INT)
-               handle_nested_irq(wm831x->irq_base + WM831X_IRQ_TCHPD);
+               handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
+                                                  WM831X_IRQ_TCHPD));
        if (primary & WM831X_TCHDATA_INT)
-               handle_nested_irq(wm831x->irq_base + WM831X_IRQ_TCHDATA);
+               handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
+                                                  WM831X_IRQ_TCHDATA));
        primary &= ~(WM831X_TCHDATA_EINT | WM831X_TCHPD_EINT);
 
        for (i = 0; i < ARRAY_SIZE(wm831x_irqs); i++) {
@@ -507,16 +513,29 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
                }
 
                if (*status & wm831x_irqs[i].mask)
-                       handle_nested_irq(wm831x->irq_base + i);
+                       handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
+                                                          i));
 
                /* Simulate an edge triggered IRQ by polling the input
                 * status.  This is sucky but improves interoperability.
                 */
                if (primary == WM831X_GP_INT &&
-                   wm831x->gpio_level[i - WM831X_IRQ_GPIO_1]) {
+                   wm831x->gpio_level_high[i - WM831X_IRQ_GPIO_1]) {
                        ret = wm831x_reg_read(wm831x, WM831X_GPIO_LEVEL);
                        while (ret & 1 << (i - WM831X_IRQ_GPIO_1)) {
-                               handle_nested_irq(wm831x->irq_base + i);
+                               handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
+                                                                  i));
+                               ret = wm831x_reg_read(wm831x,
+                                                     WM831X_GPIO_LEVEL);
+                       }
+               }
+
+               if (primary == WM831X_GP_INT &&
+                   wm831x->gpio_level_low[i - WM831X_IRQ_GPIO_1]) {
+                       ret = wm831x_reg_read(wm831x, WM831X_GPIO_LEVEL);
+                       while (!(ret & 1 << (i - WM831X_IRQ_GPIO_1))) {
+                               handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
+                                                                  i));
                                ret = wm831x_reg_read(wm831x,
                                                      WM831X_GPIO_LEVEL);
                        }
@@ -527,10 +546,34 @@ out:
        return IRQ_HANDLED;
 }
 
+static int wm831x_irq_map(struct irq_domain *h, unsigned int virq,
+                         irq_hw_number_t hw)
+{
+       irq_set_chip_data(virq, h->host_data);
+       irq_set_chip_and_handler(virq, &wm831x_irq_chip, handle_edge_irq);
+       irq_set_nested_thread(virq, 1);
+
+       /* ARM needs us to explicitly flag the IRQ as valid
+        * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+       set_irq_flags(virq, IRQF_VALID);
+#else
+       irq_set_noprobe(virq);
+#endif
+
+       return 0;
+}
+
+static struct irq_domain_ops wm831x_irq_domain_ops = {
+       .map    = wm831x_irq_map,
+       .xlate  = irq_domain_xlate_twocell,
+};
+
 int wm831x_irq_init(struct wm831x *wm831x, int irq)
 {
        struct wm831x_pdata *pdata = wm831x->dev->platform_data;
-       int i, cur_irq, ret;
+       struct irq_domain *domain;
+       int i, ret, irq_base;
 
        mutex_init(&wm831x->irq_lock);
 
@@ -543,18 +586,33 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
        }
 
        /* Try to dynamically allocate IRQs if no base is specified */
-       if (!pdata || !pdata->irq_base)
-               wm831x->irq_base = -1;
+       if (pdata && pdata->irq_base) {
+               irq_base = irq_alloc_descs(pdata->irq_base, 0,
+                                          WM831X_NUM_IRQS, 0);
+               if (irq_base < 0) {
+                       dev_warn(wm831x->dev, "Failed to allocate IRQs: %d\n",
+                                irq_base);
+                       irq_base = 0;
+               }
+       } else {
+               irq_base = 0;
+       }
+
+       if (irq_base)
+               domain = irq_domain_add_legacy(wm831x->dev->of_node,
+                                              ARRAY_SIZE(wm831x_irqs),
+                                              irq_base, 0,
+                                              &wm831x_irq_domain_ops,
+                                              wm831x);
        else
-               wm831x->irq_base = pdata->irq_base;
+               domain = irq_domain_add_linear(wm831x->dev->of_node,
+                                              ARRAY_SIZE(wm831x_irqs),
+                                              &wm831x_irq_domain_ops,
+                                              wm831x);
 
-       wm831x->irq_base = irq_alloc_descs(wm831x->irq_base, 0,
-                                          WM831X_NUM_IRQS, 0);
-       if (wm831x->irq_base < 0) {
-               dev_warn(wm831x->dev, "Failed to allocate IRQs: %d\n",
-                        wm831x->irq_base);
-               wm831x->irq_base = 0;
-               return 0;
+       if (!domain) {
+               dev_warn(wm831x->dev, "Failed to allocate IRQ domain\n");
+               return -EINVAL;
        }
 
        if (pdata && pdata->irq_cmos)
@@ -565,38 +623,22 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
        wm831x_set_bits(wm831x, WM831X_IRQ_CONFIG,
                        WM831X_IRQ_OD, i);
 
-       /* Try to flag /IRQ as a wake source; there are a number of
-        * unconditional wake sources in the PMIC so this isn't
-        * conditional but we don't actually care *too* much if it
-        * fails.
-        */
-       ret = enable_irq_wake(irq);
-       if (ret != 0) {
-               dev_warn(wm831x->dev, "Can't enable IRQ as wake source: %d\n",
-                        ret);
-       }
-
        wm831x->irq = irq;
-
-       /* Register them with genirq */
-       for (cur_irq = wm831x->irq_base;
-            cur_irq < ARRAY_SIZE(wm831x_irqs) + wm831x->irq_base;
-            cur_irq++) {
-               irq_set_chip_data(cur_irq, wm831x);
-               irq_set_chip_and_handler(cur_irq, &wm831x_irq_chip,
-                                        handle_edge_irq);
-               irq_set_nested_thread(cur_irq, 1);
-
-               /* ARM needs us to explicitly flag the IRQ as valid
-                * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
-               set_irq_flags(cur_irq, IRQF_VALID);
-#else
-               irq_set_noprobe(cur_irq);
-#endif
-       }
+       wm831x->irq_domain = domain;
 
        if (irq) {
+               /* Try to flag /IRQ as a wake source; there are a number of
+                * unconditional wake sources in the PMIC so this isn't
+                * conditional but we don't actually care *too* much if it
+                * fails.
+                */
+               ret = enable_irq_wake(irq);
+               if (ret != 0) {
+                       dev_warn(wm831x->dev,
+                                "Can't enable IRQ as wake source: %d\n",
+                                ret);
+               }
+
                ret = request_threaded_irq(irq, NULL, wm831x_irq_thread,
                                           IRQF_TRIGGER_LOW | IRQF_ONESHOT,
                                           "wm831x", wm831x);
index dd1caaac55e4d4138ccc69b19997793418a76e7d..8a9b11ca076ac3c528e914b52dd78d958b714fae 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/device.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
+#include <linux/regmap.h>
 #include <linux/workqueue.h>
 
 #include <linux/mfd/wm8350/core.h>
@@ -74,7 +75,7 @@ static int wm8350_phys_read(struct wm8350 *wm8350, u8 reg, int num_regs,
        int bytes = num_regs * 2;
 
        dev_dbg(wm8350->dev, "volatile read\n");
-       ret = wm8350->read_dev(wm8350, reg, bytes, (char *)dest);
+       ret = regmap_raw_read(wm8350->regmap, reg, dest, bytes);
 
        for (i = reg; i < reg + num_regs; i++) {
                /* Cache is CPU endian */
@@ -96,9 +97,6 @@ static int wm8350_read(struct wm8350 *wm8350, u8 reg, int num_regs, u16 *dest)
        int ret = 0;
        int bytes = num_regs * 2;
 
-       if (wm8350->read_dev == NULL)
-               return -ENODEV;
-
        if ((reg + num_regs - 1) > WM8350_MAX_REGISTER) {
                dev_err(wm8350->dev, "invalid reg %x\n",
                        reg + num_regs - 1);
@@ -149,9 +147,6 @@ static int wm8350_write(struct wm8350 *wm8350, u8 reg, int num_regs, u16 *src)
        int end = reg + num_regs;
        int bytes = num_regs * 2;
 
-       if (wm8350->write_dev == NULL)
-               return -ENODEV;
-
        if ((reg + num_regs - 1) > WM8350_MAX_REGISTER) {
                dev_err(wm8350->dev, "invalid reg %x\n",
                        reg + num_regs - 1);
@@ -182,7 +177,7 @@ static int wm8350_write(struct wm8350 *wm8350, u8 reg, int num_regs, u16 *src)
        }
 
        /* Actually write it out */
-       return wm8350->write_dev(wm8350, reg, bytes, (char *)src);
+       return regmap_raw_write(wm8350->regmap, reg, src, bytes);
 }
 
 /*
@@ -515,9 +510,8 @@ static int wm8350_create_cache(struct wm8350 *wm8350, int type, int mode)
         * a PMIC so the device many not be in a virgin state and we
         * can't rely on the silicon values.
         */
-       ret = wm8350->read_dev(wm8350, 0,
-                              sizeof(u16) * (WM8350_MAX_REGISTER + 1),
-                              wm8350->reg_cache);
+       ret = regmap_raw_read(wm8350->regmap, 0, wm8350->reg_cache,
+                             sizeof(u16) * (WM8350_MAX_REGISTER + 1));
        if (ret < 0) {
                dev_err(wm8350->dev,
                        "failed to read initial cache values\n");
@@ -570,35 +564,30 @@ int wm8350_device_init(struct wm8350 *wm8350, int irq,
                       struct wm8350_platform_data *pdata)
 {
        int ret;
-       u16 id1, id2, mask_rev;
-       u16 cust_id, mode, chip_rev;
+       unsigned int id1, id2, mask_rev;
+       unsigned int cust_id, mode, chip_rev;
 
        dev_set_drvdata(wm8350->dev, wm8350);
 
        /* get WM8350 revision and config mode */
-       ret = wm8350->read_dev(wm8350, WM8350_RESET_ID, sizeof(id1), &id1);
+       ret = regmap_read(wm8350->regmap, WM8350_RESET_ID, &id1);
        if (ret != 0) {
                dev_err(wm8350->dev, "Failed to read ID: %d\n", ret);
                goto err;
        }
 
-       ret = wm8350->read_dev(wm8350, WM8350_ID, sizeof(id2), &id2);
+       ret = regmap_read(wm8350->regmap, WM8350_ID, &id2);
        if (ret != 0) {
                dev_err(wm8350->dev, "Failed to read ID: %d\n", ret);
                goto err;
        }
 
-       ret = wm8350->read_dev(wm8350, WM8350_REVISION, sizeof(mask_rev),
-                              &mask_rev);
+       ret = regmap_read(wm8350->regmap, WM8350_REVISION, &mask_rev);
        if (ret != 0) {
                dev_err(wm8350->dev, "Failed to read revision: %d\n", ret);
                goto err;
        }
 
-       id1 = be16_to_cpu(id1);
-       id2 = be16_to_cpu(id2);
-       mask_rev = be16_to_cpu(mask_rev);
-
        if (id1 != 0x6143) {
                dev_err(wm8350->dev,
                        "Device with ID %x is not a WM8350\n", id1);
index d955faaf27c4a8f824301ae5e3e34998f4333bf0..a68aceb4e48c880fbf7b79e64459762241eeaa29 100644 (file)
 
 #include <linux/module.h>
 #include <linux/moduleparam.h>
+#include <linux/err.h>
 #include <linux/init.h>
 #include <linux/i2c.h>
 #include <linux/platform_device.h>
 #include <linux/mfd/wm8350/core.h>
+#include <linux/regmap.h>
 #include <linux/slab.h>
 
-static int wm8350_i2c_read_device(struct wm8350 *wm8350, char reg,
-                                 int bytes, void *dest)
-{
-       int ret;
-
-       ret = i2c_master_send(wm8350->i2c_client, &reg, 1);
-       if (ret < 0)
-               return ret;
-       ret = i2c_master_recv(wm8350->i2c_client, dest, bytes);
-       if (ret < 0)
-               return ret;
-       if (ret != bytes)
-               return -EIO;
-       return 0;
-}
-
-static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
-                                  int bytes, void *src)
-{
-       /* we add 1 byte for device register */
-       u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
-       int ret;
-
-       if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
-               return -EINVAL;
-
-       msg[0] = reg;
-       memcpy(&msg[1], src, bytes);
-       ret = i2c_master_send(wm8350->i2c_client, msg, bytes + 1);
-       if (ret < 0)
-               return ret;
-       if (ret != bytes + 1)
-               return -EIO;
-       return 0;
-}
+static const struct regmap_config wm8350_regmap = {
+       .reg_bits = 8,
+       .val_bits = 16,
+};
 
 static int wm8350_i2c_probe(struct i2c_client *i2c,
                            const struct i2c_device_id *id)
@@ -67,20 +38,18 @@ static int wm8350_i2c_probe(struct i2c_client *i2c,
        if (wm8350 == NULL)
                return -ENOMEM;
 
+       wm8350->regmap = devm_regmap_init_i2c(i2c, &wm8350_regmap);
+       if (IS_ERR(wm8350->regmap)) {
+               ret = PTR_ERR(wm8350->regmap);
+               dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+                       ret);
+               return ret;
+       }
+
        i2c_set_clientdata(i2c, wm8350);
        wm8350->dev = &i2c->dev;
-       wm8350->i2c_client = i2c;
-       wm8350->read_dev = wm8350_i2c_read_device;
-       wm8350->write_dev = wm8350_i2c_write_device;
-
-       ret = wm8350_device_init(wm8350, i2c->irq, i2c->dev.platform_data);
-       if (ret < 0)
-               goto err;
-
-       return ret;
 
-err:
-       return ret;
+       return wm8350_device_init(wm8350, i2c->irq, i2c->dev.platform_data);
 }
 
 static int wm8350_i2c_remove(struct i2c_client *i2c)
index 1189a17f0f25f6362b5d97c2ff65e8d7cd685c5f..4b7d378551d58daf515532dbcaea2c6f35c17f1e 100644 (file)
 #include <linux/regmap.h>
 #include <linux/slab.h>
 
-static struct {
-       u16  readable;    /* Mask of readable bits */
-       u16  writable;    /* Mask of writable bits */
-       u16  vol;         /* Mask of volatile bits */
-       int  is_codec;    /* Register controlled by codec reset */
-       u16  default_val; /* Value on reset */
-} reg_data[] = {
-       { 0xFFFF, 0xFFFF, 0x0000, 0, 0x6172 }, /* R0 */
-       { 0x7000, 0x0000, 0x8000, 0, 0x0000 }, /* R1 */
-       { 0xFF17, 0xFF17, 0x0000, 0, 0x0000 }, /* R2 */
-       { 0xEBF3, 0xEBF3, 0x0000, 1, 0x6000 }, /* R3 */
-       { 0x3CF3, 0x3CF3, 0x0000, 1, 0x0000 }, /* R4  */
-       { 0xF1F8, 0xF1F8, 0x0000, 1, 0x4050 }, /* R5  */
-       { 0xFC1F, 0xFC1F, 0x0000, 1, 0x4000 }, /* R6  */
-       { 0xDFDE, 0xDFDE, 0x0000, 1, 0x01C8 }, /* R7  */
-       { 0xFCFC, 0xFCFC, 0x0000, 1, 0x0000 }, /* R8  */
-       { 0xEFFF, 0xEFFF, 0x0000, 1, 0x0040 }, /* R9  */
-       { 0xEFFF, 0xEFFF, 0x0000, 1, 0x0040 }, /* R10 */
-       { 0x27F7, 0x27F7, 0x0000, 1, 0x0004 }, /* R11 */
-       { 0x01FF, 0x01FF, 0x0000, 1, 0x00C0 }, /* R12 */
-       { 0x01FF, 0x01FF, 0x0000, 1, 0x00C0 }, /* R13 */
-       { 0x1FEF, 0x1FEF, 0x0000, 1, 0x0000 }, /* R14 */
-       { 0x0163, 0x0163, 0x0000, 1, 0x0100 }, /* R15 */
-       { 0x01FF, 0x01FF, 0x0000, 1, 0x00C0 }, /* R16 */
-       { 0x01FF, 0x01FF, 0x0000, 1, 0x00C0 }, /* R17 */
-       { 0x1FFF, 0x0FFF, 0x0000, 1, 0x0000 }, /* R18 */
-       { 0xFFFF, 0xFFFF, 0x0000, 1, 0x1000 }, /* R19 */
-       { 0xFFFF, 0xFFFF, 0x0000, 1, 0x1010 }, /* R20 */
-       { 0xFFFF, 0xFFFF, 0x0000, 1, 0x1010 }, /* R21 */
-       { 0x0FDD, 0x0FDD, 0x0000, 1, 0x8000 }, /* R22 */
-       { 0x1FFF, 0x1FFF, 0x0000, 1, 0x0800 }, /* R23 */
-       { 0x0000, 0x01DF, 0x0000, 1, 0x008B }, /* R24 */
-       { 0x0000, 0x01DF, 0x0000, 1, 0x008B }, /* R25 */
-       { 0x0000, 0x01DF, 0x0000, 1, 0x008B }, /* R26 */
-       { 0x0000, 0x01DF, 0x0000, 1, 0x008B }, /* R27 */
-       { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R28 */
-       { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R29 */
-       { 0x0000, 0x0077, 0x0000, 1, 0x0066 }, /* R30 */
-       { 0x0000, 0x0033, 0x0000, 1, 0x0022 }, /* R31 */
-       { 0x0000, 0x01FF, 0x0000, 1, 0x0079 }, /* R32 */
-       { 0x0000, 0x01FF, 0x0000, 1, 0x0079 }, /* R33 */
-       { 0x0000, 0x0003, 0x0000, 1, 0x0003 }, /* R34 */
-       { 0x0000, 0x01FF, 0x0000, 1, 0x0003 }, /* R35 */
-       { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R36 */
-       { 0x0000, 0x003F, 0x0000, 1, 0x0100 }, /* R37 */
-       { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R38 */
-       { 0x0000, 0x000F, 0x0000, 0, 0x0000 }, /* R39 */
-       { 0x0000, 0x00FF, 0x0000, 1, 0x0000 }, /* R40 */
-       { 0x0000, 0x01B7, 0x0000, 1, 0x0000 }, /* R41 */
-       { 0x0000, 0x01B7, 0x0000, 1, 0x0000 }, /* R42 */
-       { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R43 */
-       { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R44 */
-       { 0x0000, 0x00FD, 0x0000, 1, 0x0000 }, /* R45 */
-       { 0x0000, 0x00FD, 0x0000, 1, 0x0000 }, /* R46 */
-       { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R47 */
-       { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R48 */
-       { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R49 */
-       { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R50 */
-       { 0x0000, 0x01B3, 0x0000, 1, 0x0180 }, /* R51 */
-       { 0x0000, 0x0077, 0x0000, 1, 0x0000 }, /* R52 */
-       { 0x0000, 0x0077, 0x0000, 1, 0x0000 }, /* R53 */
-       { 0x0000, 0x00FF, 0x0000, 1, 0x0000 }, /* R54 */
-       { 0x0000, 0x0001, 0x0000, 1, 0x0000 }, /* R55 */
-       { 0x0000, 0x003F, 0x0000, 1, 0x0000 }, /* R56 */
-       { 0x0000, 0x004F, 0x0000, 1, 0x0000 }, /* R57 */
-       { 0x0000, 0x00FD, 0x0000, 1, 0x0000 }, /* R58 */
-       { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R59 */
-       { 0x1FFF, 0x1FFF, 0x0000, 1, 0x0000 }, /* R60 */
-       { 0xFFFF, 0xFFFF, 0x0000, 1, 0x0000 }, /* R61 */
-       { 0x03FF, 0x03FF, 0x0000, 1, 0x0000 }, /* R62 */
-       { 0x007F, 0x007F, 0x0000, 1, 0x0000 }, /* R63 */
-       { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R64 */
-       { 0xDFFF, 0xDFFF, 0x0000, 0, 0x0000 }, /* R65 */
-       { 0xDFFF, 0xDFFF, 0x0000, 0, 0x0000 }, /* R66 */
-       { 0xDFFF, 0xDFFF, 0x0000, 0, 0x0000 }, /* R67 */
-       { 0xDFFF, 0xDFFF, 0x0000, 0, 0x0000 }, /* R68 */
-       { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R69 */
-       { 0xFFFF, 0xFFFF, 0x0000, 0, 0x4400 }, /* R70 */
-       { 0x23FF, 0x23FF, 0x0000, 0, 0x0000 }, /* R71 */
-       { 0xFFFF, 0xFFFF, 0x0000, 0, 0x4400 }, /* R72 */
-       { 0x23FF, 0x23FF, 0x0000, 0, 0x0000 }, /* R73 */
-       { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R74 */
-       { 0x000E, 0x000E, 0x0000, 0, 0x0008 }, /* R75 */
-       { 0xE00F, 0xE00F, 0x0000, 0, 0x0000 }, /* R76 */
-       { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R77 */
-       { 0x03C0, 0x03C0, 0x0000, 0, 0x02C0 }, /* R78 */
-       { 0xFFFF, 0x0000, 0xffff, 0, 0x0000 }, /* R79 */
-       { 0xFFFF, 0xFFFF, 0x0000, 0, 0x0000 }, /* R80 */
-       { 0xFFFF, 0x0000, 0xffff, 0, 0x0000 }, /* R81 */
-       { 0x2BFF, 0x0000, 0xffff, 0, 0x0000 }, /* R82 */
-       { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R83 */
-       { 0x80FF, 0x80FF, 0x0000, 0, 0x00ff }, /* R84 */
-};
-
-static int wm8400_read(struct wm8400 *wm8400, u8 reg, int num_regs, u16 *dest)
+static bool wm8400_volatile(struct device *dev, unsigned int reg)
 {
-       int i, ret = 0;
-
-       BUG_ON(reg + num_regs > ARRAY_SIZE(wm8400->reg_cache));
-
-       /* If there are any volatile reads then read back the entire block */
-       for (i = reg; i < reg + num_regs; i++)
-               if (reg_data[i].vol) {
-                       ret = regmap_bulk_read(wm8400->regmap, reg, dest,
-                                              num_regs);
-                       return ret;
-               }
-
-       /* Otherwise use the cache */
-       memcpy(dest, &wm8400->reg_cache[reg], num_regs * sizeof(u16));
-
-       return 0;
-}
-
-static int wm8400_write(struct wm8400 *wm8400, u8 reg, int num_regs,
-                       u16 *src)
-{
-       int ret, i;
-
-       BUG_ON(reg + num_regs > ARRAY_SIZE(wm8400->reg_cache));
-
-       for (i = 0; i < num_regs; i++) {
-               BUG_ON(!reg_data[reg + i].writable);
-               wm8400->reg_cache[reg + i] = src[i];
-               ret = regmap_write(wm8400->regmap, reg, src[i]);
-               if (ret != 0)
-                       return ret;
+       switch (reg) {
+       case WM8400_INTERRUPT_STATUS_1:
+       case WM8400_INTERRUPT_LEVELS:
+       case WM8400_SHUTDOWN_REASON:
+               return true;
+       default:
+               return false;
        }
-
-       return 0;
 }
 
 /**
@@ -165,13 +45,12 @@ static int wm8400_write(struct wm8400 *wm8400, u8 reg, int num_regs,
  */
 u16 wm8400_reg_read(struct wm8400 *wm8400, u8 reg)
 {
-       u16 val;
-
-       mutex_lock(&wm8400->io_lock);
-
-       wm8400_read(wm8400, reg, 1, &val);
+       unsigned int val;
+       int ret;
 
-       mutex_unlock(&wm8400->io_lock);
+       ret = regmap_read(wm8400->regmap, reg, &val);
+       if (ret < 0)
+               return ret;
 
        return val;
 }
@@ -179,63 +58,10 @@ EXPORT_SYMBOL_GPL(wm8400_reg_read);
 
 int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data)
 {
-       int ret;
-
-       mutex_lock(&wm8400->io_lock);
-
-       ret = wm8400_read(wm8400, reg, count, data);
-
-       mutex_unlock(&wm8400->io_lock);
-
-       return ret;
+       return regmap_bulk_read(wm8400->regmap, reg, data, count);
 }
 EXPORT_SYMBOL_GPL(wm8400_block_read);
 
-/**
- * wm8400_set_bits - Bitmask write
- *
- * @wm8400: Pointer to wm8400 control structure
- * @reg:    Register to access
- * @mask:   Mask of bits to change
- * @val:    Value to set for masked bits
- */
-int wm8400_set_bits(struct wm8400 *wm8400, u8 reg, u16 mask, u16 val)
-{
-       u16 tmp;
-       int ret;
-
-       mutex_lock(&wm8400->io_lock);
-
-       ret = wm8400_read(wm8400, reg, 1, &tmp);
-       tmp = (tmp & ~mask) | val;
-       if (ret == 0)
-               ret = wm8400_write(wm8400, reg, 1, &tmp);
-
-       mutex_unlock(&wm8400->io_lock);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(wm8400_set_bits);
-
-/**
- * wm8400_reset_codec_reg_cache - Reset cached codec registers to
- * their default values.
- */
-void wm8400_reset_codec_reg_cache(struct wm8400 *wm8400)
-{
-       int i;
-
-       mutex_lock(&wm8400->io_lock);
-
-       /* Reset all codec registers to their initial value */
-       for (i = 0; i < ARRAY_SIZE(wm8400->reg_cache); i++)
-               if (reg_data[i].is_codec)
-                       wm8400->reg_cache[i] = reg_data[i].default_val;
-
-       mutex_unlock(&wm8400->io_lock);
-}
-EXPORT_SYMBOL_GPL(wm8400_reset_codec_reg_cache);
-
 static int wm8400_register_codec(struct wm8400 *wm8400)
 {
        struct mfd_cell cell = {
@@ -257,44 +83,24 @@ static int wm8400_register_codec(struct wm8400 *wm8400)
 static int wm8400_init(struct wm8400 *wm8400,
                       struct wm8400_platform_data *pdata)
 {
-       u16 reg;
-       int ret, i;
-
-       mutex_init(&wm8400->io_lock);
+       unsigned int reg;
+       int ret;
 
        dev_set_drvdata(wm8400->dev, wm8400);
 
        /* Check that this is actually a WM8400 */
-       ret = regmap_read(wm8400->regmap, WM8400_RESET_ID, &i);
+       ret = regmap_read(wm8400->regmap, WM8400_RESET_ID, &reg);
        if (ret != 0) {
                dev_err(wm8400->dev, "Chip ID register read failed\n");
                return -EIO;
        }
-       if (i != reg_data[WM8400_RESET_ID].default_val) {
-               dev_err(wm8400->dev, "Device is not a WM8400, ID is %x\n", i);
+       if (reg != 0x6172) {
+               dev_err(wm8400->dev, "Device is not a WM8400, ID is %x\n",
+                       reg);
                return -ENODEV;
        }
 
-       /* We don't know what state the hardware is in and since this
-        * is a PMIC we can't reset it safely so initialise the register
-        * cache from the hardware.
-        */
-       ret = regmap_raw_read(wm8400->regmap, 0, wm8400->reg_cache,
-                             ARRAY_SIZE(wm8400->reg_cache));
-       if (ret != 0) {
-               dev_err(wm8400->dev, "Register cache read failed\n");
-               return -EIO;
-       }
-       for (i = 0; i < ARRAY_SIZE(wm8400->reg_cache); i++)
-               wm8400->reg_cache[i] = be16_to_cpu(wm8400->reg_cache[i]);
-
-       /* If the codec is in reset use hard coded values */
-       if (!(wm8400->reg_cache[WM8400_POWER_MANAGEMENT_1] & WM8400_CODEC_ENA))
-               for (i = 0; i < ARRAY_SIZE(wm8400->reg_cache); i++)
-                       if (reg_data[i].is_codec)
-                               wm8400->reg_cache[i] = reg_data[i].default_val;
-
-       ret = wm8400_read(wm8400, WM8400_ID, 1, &reg);
+       ret = regmap_read(wm8400->regmap, WM8400_ID, &reg);
        if (ret != 0) {
                dev_err(wm8400->dev, "ID register read failed: %d\n", ret);
                return ret;
@@ -334,8 +140,22 @@ static const struct regmap_config wm8400_regmap_config = {
        .reg_bits = 8,
        .val_bits = 16,
        .max_register = WM8400_REGISTER_COUNT - 1,
+
+       .volatile_reg = wm8400_volatile,
+
+       .cache_type = REGCACHE_RBTREE,
 };
 
+/**
+ * wm8400_reset_codec_reg_cache - Reset cached codec registers to
+ * their default values.
+ */
+void wm8400_reset_codec_reg_cache(struct wm8400 *wm8400)
+{
+       regmap_reinit_cache(wm8400->regmap, &wm8400_regmap_config);
+}
+EXPORT_SYMBOL_GPL(wm8400_reset_codec_reg_cache);
+
 #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
 static int wm8400_i2c_probe(struct i2c_client *i2c,
                            const struct i2c_device_id *id)
index 9d7ca1e978fad30775d1de8b6913333a137118c6..1e321d349777199dad0bdd050288f72af60917a6 100644 (file)
@@ -500,7 +500,8 @@ static __devinit int wm8994_device_init(struct wm8994 *wm8994, int irq)
                        ret);
                goto err_enable;
        }
-       wm8994->revision = ret;
+       wm8994->revision = ret & WM8994_CHIP_REV_MASK;
+       wm8994->cust_id = (ret & WM8994_CUST_ID_MASK) >> WM8994_CUST_ID_SHIFT;
 
        switch (wm8994->type) {
        case WM8994:
@@ -553,8 +554,8 @@ static __devinit int wm8994_device_init(struct wm8994 *wm8994, int irq)
                break;
        }
 
-       dev_info(wm8994->dev, "%s revision %c\n", devname,
-                'A' + wm8994->revision);
+       dev_info(wm8994->dev, "%s revision %c CUST_ID %02x\n", devname,
+                'A' + wm8994->revision, wm8994->cust_id);
 
        switch (wm8994->type) {
        case WM1811:
@@ -732,23 +733,7 @@ static struct i2c_driver wm8994_i2c_driver = {
        .id_table = wm8994_i2c_id,
 };
 
-static int __init wm8994_i2c_init(void)
-{
-       int ret;
-
-       ret = i2c_add_driver(&wm8994_i2c_driver);
-       if (ret != 0)
-               pr_err("Failed to register wm8994 I2C driver: %d\n", ret);
-
-       return ret;
-}
-module_init(wm8994_i2c_init);
-
-static void __exit wm8994_i2c_exit(void)
-{
-       i2c_del_driver(&wm8994_i2c_driver);
-}
-module_exit(wm8994_i2c_exit);
+module_i2c_driver(wm8994_i2c_driver);
 
 MODULE_DESCRIPTION("Core support for the WM8994 audio CODEC");
 MODULE_LICENSE("GPL");
index bfd25af6ecb106e75eddb4cdc0afad561238506f..52e9e29449403b1d5b21dc215973f7e4b8de41e6 100644 (file)
@@ -1122,7 +1122,6 @@ static bool wm8994_volatile_register(struct device *dev, unsigned int reg)
        case WM8994_RATE_STATUS:
        case WM8958_MIC_DETECT_3:
        case WM8994_DC_SERVO_4E:
-       case WM8994_CHIP_REVISION:
        case WM8994_INTERRUPT_STATUS_1:
        case WM8994_INTERRUPT_STATUS_2:
                return true;
index d7a9aa14e5d5aafd8c0efc907b127147a202d197..042a8fe4efaabd2ab57db4299275c19750cad23d 100644 (file)
@@ -142,10 +142,16 @@ static int __devexit ab8500_pwm_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id ab8500_pwm_match[] = {
+       { .compatible = "stericsson,ab8500-pwm", },
+       {}
+};
+
 static struct platform_driver ab8500_pwm_driver = {
        .driver = {
                .name = "ab8500-pwm",
                .owner = THIS_MODULE,
+               .of_match_table = ab8500_pwm_match,
        },
        .probe = ab8500_pwm_probe,
        .remove = __devexit_p(ab8500_pwm_remove),
index dabec556ebb87421f04372ecf1452b645629ab17..dd2d374dcc7aa43363a5366350f79c6a17e81a70 100644 (file)
@@ -384,7 +384,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
        md = mmc_blk_get(bdev->bd_disk);
        if (!md) {
                err = -EINVAL;
-               goto cmd_done;
+               goto cmd_err;
        }
 
        card = md->queue.card;
@@ -483,6 +483,7 @@ cmd_rel_host:
 
 cmd_done:
        mmc_blk_put(md);
+cmd_err:
        kfree(idata->buf);
        kfree(idata);
        return err;
@@ -1283,7 +1284,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
        int ret = 1, disable_multi = 0, retry = 0, type;
        enum mmc_blk_status status;
        struct mmc_queue_req *mq_rq;
-       struct request *req;
+       struct request *req = rqc;
        struct mmc_async_req *areq;
 
        if (!rqc && !mq->mqrq_prev->req)
@@ -1291,6 +1292,16 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
 
        do {
                if (rqc) {
+                       /*
+                        * When 4KB native sector is enabled, only 8 blocks
+                        * multiple read or write is allowed
+                        */
+                       if ((brq->data.blocks & 0x07) &&
+                           (card->ext_csd.data_sector_size == 4096)) {
+                               pr_err("%s: Transfer size is not 4KB sector size aligned\n",
+                                       req->rq_disk->disk_name);
+                               goto cmd_abort;
+                       }
                        mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
                        areq = &mq->mqrq_cur->mmc_active;
                } else
@@ -1538,7 +1549,12 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
        snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
                 "mmcblk%d%s", md->name_idx, subname ? subname : "");
 
-       blk_queue_logical_block_size(md->queue.queue, 512);
+       if (mmc_card_mmc(card))
+               blk_queue_logical_block_size(md->queue.queue,
+                                            card->ext_csd.data_sector_size);
+       else
+               blk_queue_logical_block_size(md->queue.queue, 512);
+
        set_capacity(md->disk, size);
 
        if (mmc_host_cmd23(card->host)) {
index 996f8e36e23d8aa8bbf7c333fd6d6d56b8ec79ad..e360a979857d297e1468a7c9aa41b86a975a2f82 100644 (file)
@@ -96,7 +96,7 @@ static int mmc_queue_thread(void *d)
  * on any queue on this host, and attempt to issue it.  This may
  * not be the queue we were asked to process.
  */
-static void mmc_request(struct request_queue *q)
+static void mmc_request_fn(struct request_queue *q)
 {
        struct mmc_queue *mq = q->queuedata;
        struct request *req;
@@ -171,12 +171,10 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
                limit = *mmc_dev(host)->dma_mask;
 
        mq->card = card;
-       mq->queue = blk_init_queue(mmc_request, lock);
+       mq->queue = blk_init_queue(mmc_request_fn, lock);
        if (!mq->queue)
                return -ENOMEM;
 
-       memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
-       memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
        mq->mqrq_cur = mqrq_cur;
        mq->mqrq_prev = mqrq_prev;
        mq->queue->queuedata = mq;
index c60cee92a2b2fe9bfd4a9b863dbc95d0e76e2eae..9b68933f27e783260242d5568b860cabaf8c6f0e 100644 (file)
@@ -122,6 +122,7 @@ static int mmc_bus_remove(struct device *dev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int mmc_bus_suspend(struct device *dev)
 {
        struct mmc_driver *drv = to_mmc_driver(dev->driver);
@@ -143,6 +144,7 @@ static int mmc_bus_resume(struct device *dev)
                ret = drv->resume(card);
        return ret;
 }
+#endif
 
 #ifdef CONFIG_PM_RUNTIME
 
index 2c14be73254c385d9a481c89db978c7c82c1b6e9..f13e38deceac760fcbd9ae4cd2a1fdf7e5d79671 100644 (file)
@@ -73,6 +73,9 @@ void mmc_cd_gpio_free(struct mmc_host *host)
 {
        struct mmc_cd_gpio *cd = host->hotplug.handler_priv;
 
+       if (!cd)
+               return;
+
        free_irq(host->hotplug.irq, host);
        gpio_free(cd->gpio);
        kfree(cd);
index ba821fe70bca03dd3fbf17661e150ff737af242c..0b6141d29dbd1d9a3f146f2bb1b9923be9e0ad84 100644 (file)
@@ -42,6 +42,7 @@
 #include "sdio_ops.h"
 
 static struct workqueue_struct *workqueue;
+static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
 
 /*
  * Enabling software CRCs on the data blocks can be a significant (30%)
@@ -1157,6 +1158,9 @@ static void mmc_power_up(struct mmc_host *host)
 {
        int bit;
 
+       if (host->ios.power_mode == MMC_POWER_ON)
+               return;
+
        mmc_host_clk_hold(host);
 
        /* If ocr is set, we use it */
@@ -1199,6 +1203,10 @@ static void mmc_power_up(struct mmc_host *host)
 void mmc_power_off(struct mmc_host *host)
 {
        int err = 0;
+
+       if (host->ios.power_mode == MMC_POWER_OFF)
+               return;
+
        mmc_host_clk_hold(host);
 
        host->ios.clock = 0;
@@ -2005,7 +2013,6 @@ EXPORT_SYMBOL(mmc_detect_card_removed);
 
 void mmc_rescan(struct work_struct *work)
 {
-       static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
        struct mmc_host *host =
                container_of(work, struct mmc_host, detect.work);
        int i;
@@ -2044,8 +2051,12 @@ void mmc_rescan(struct work_struct *work)
         */
        mmc_bus_put(host);
 
-       if (host->ops->get_cd && host->ops->get_cd(host) == 0)
+       if (host->ops->get_cd && host->ops->get_cd(host) == 0) {
+               mmc_claim_host(host);
+               mmc_power_off(host);
+               mmc_release_host(host);
                goto out;
+       }
 
        mmc_claim_host(host);
        for (i = 0; i < ARRAY_SIZE(freqs); i++) {
@@ -2063,7 +2074,8 @@ void mmc_rescan(struct work_struct *work)
 
 void mmc_start_host(struct mmc_host *host)
 {
-       mmc_power_off(host);
+       host->f_init = max(freqs[0], host->f_min);
+       mmc_power_up(host);
        mmc_detect_change(host, 0);
 }
 
index 54df5adc04137741ee7710d6ef5ee119add388d7..2d4a4b74675060133fecd7021fa2d774e5f303c9 100644 (file)
@@ -235,6 +235,36 @@ static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
        return err;
 }
 
+static void mmc_select_card_type(struct mmc_card *card)
+{
+       struct mmc_host *host = card->host;
+       u8 card_type = card->ext_csd.raw_card_type & EXT_CSD_CARD_TYPE_MASK;
+       unsigned int caps = host->caps, caps2 = host->caps2;
+       unsigned int hs_max_dtr = 0;
+
+       if (card_type & EXT_CSD_CARD_TYPE_26)
+               hs_max_dtr = MMC_HIGH_26_MAX_DTR;
+
+       if (caps & MMC_CAP_MMC_HIGHSPEED &&
+                       card_type & EXT_CSD_CARD_TYPE_52)
+               hs_max_dtr = MMC_HIGH_52_MAX_DTR;
+
+       if ((caps & MMC_CAP_1_8V_DDR &&
+                       card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) ||
+           (caps & MMC_CAP_1_2V_DDR &&
+                       card_type & EXT_CSD_CARD_TYPE_DDR_1_2V))
+               hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
+
+       if ((caps2 & MMC_CAP2_HS200_1_8V_SDR &&
+                       card_type & EXT_CSD_CARD_TYPE_SDR_1_8V) ||
+           (caps2 & MMC_CAP2_HS200_1_2V_SDR &&
+                       card_type & EXT_CSD_CARD_TYPE_SDR_1_2V))
+               hs_max_dtr = MMC_HS200_MAX_DTR;
+
+       card->ext_csd.hs_max_dtr = hs_max_dtr;
+       card->ext_csd.card_type = card_type;
+}
+
 /*
  * Decode extended CSD.
  */
@@ -284,56 +314,9 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
                if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
                        mmc_card_set_blockaddr(card);
        }
+
        card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
-       switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
-       case EXT_CSD_CARD_TYPE_SDR_ALL:
-       case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_8V:
-       case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_2V:
-       case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_52:
-               card->ext_csd.hs_max_dtr = 200000000;
-               card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_200;
-               break;
-       case EXT_CSD_CARD_TYPE_SDR_1_2V_ALL:
-       case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_8V:
-       case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_2V:
-       case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_52:
-               card->ext_csd.hs_max_dtr = 200000000;
-               card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_2V;
-               break;
-       case EXT_CSD_CARD_TYPE_SDR_1_8V_ALL:
-       case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_8V:
-       case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_2V:
-       case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_52:
-               card->ext_csd.hs_max_dtr = 200000000;
-               card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_8V;
-               break;
-       case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 |
-            EXT_CSD_CARD_TYPE_26:
-               card->ext_csd.hs_max_dtr = 52000000;
-               card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_52;
-               break;
-       case EXT_CSD_CARD_TYPE_DDR_1_2V | EXT_CSD_CARD_TYPE_52 |
-            EXT_CSD_CARD_TYPE_26:
-               card->ext_csd.hs_max_dtr = 52000000;
-               card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_2V;
-               break;
-       case EXT_CSD_CARD_TYPE_DDR_1_8V | EXT_CSD_CARD_TYPE_52 |
-            EXT_CSD_CARD_TYPE_26:
-               card->ext_csd.hs_max_dtr = 52000000;
-               card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_8V;
-               break;
-       case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
-               card->ext_csd.hs_max_dtr = 52000000;
-               break;
-       case EXT_CSD_CARD_TYPE_26:
-               card->ext_csd.hs_max_dtr = 26000000;
-               break;
-       default:
-               /* MMC v4 spec says this cannot happen */
-               pr_warning("%s: card is mmc v4 but doesn't "
-                       "support any high-speed modes.\n",
-                       mmc_hostname(card->host));
-       }
+       mmc_select_card_type(card);
 
        card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
        card->ext_csd.raw_erase_timeout_mult =
@@ -533,6 +516,8 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
                } else {
                        card->ext_csd.data_tag_unit_size = 0;
                }
+       } else {
+               card->ext_csd.data_sector_size = 512;
        }
 
 out:
@@ -556,14 +541,10 @@ static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
        err = mmc_get_ext_csd(card, &bw_ext_csd);
 
        if (err || bw_ext_csd == NULL) {
-               if (bus_width != MMC_BUS_WIDTH_1)
-                       err = -EINVAL;
+               err = -EINVAL;
                goto out;
        }
 
-       if (bus_width == MMC_BUS_WIDTH_1)
-               goto out;
-
        /* only compare read only fields */
        err = !((card->ext_csd.raw_partition_support ==
                        bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
@@ -736,6 +717,10 @@ static int mmc_select_powerclass(struct mmc_card *card,
                                 card->ext_csd.generic_cmd6_time);
        }
 
+       if (err)
+               pr_err("%s: power class selection for ext_csd_bus_width %d"
+                      " failed\n", mmc_hostname(card->host), bus_width);
+
        return err;
 }
 
@@ -745,7 +730,7 @@ static int mmc_select_powerclass(struct mmc_card *card,
  */
 static int mmc_select_hs200(struct mmc_card *card)
 {
-       int idx, err = 0;
+       int idx, err = -EINVAL;
        struct mmc_host *host;
        static unsigned ext_csd_bits[] = {
                EXT_CSD_BUS_WIDTH_4,
@@ -761,10 +746,12 @@ static int mmc_select_hs200(struct mmc_card *card)
        host = card->host;
 
        if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V &&
-           host->caps2 & MMC_CAP2_HS200_1_2V_SDR)
-               if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0))
-                       err = mmc_set_signal_voltage(host,
-                                                    MMC_SIGNAL_VOLTAGE_180, 0);
+                       host->caps2 & MMC_CAP2_HS200_1_2V_SDR)
+               err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0);
+
+       if (err && card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_8V &&
+                       host->caps2 & MMC_CAP2_HS200_1_8V_SDR)
+               err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, 0);
 
        /* If fails try again during next card power cycle */
        if (err)
@@ -1117,9 +1104,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
                                EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
                err = mmc_select_powerclass(card, ext_csd_bits, ext_csd);
                if (err)
-                       pr_warning("%s: power class selection to bus width %d"
-                                  " failed\n", mmc_hostname(card->host),
-                                  1 << bus_width);
+                       goto err;
        }
 
        /*
@@ -1151,10 +1136,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
                        err = mmc_select_powerclass(card, ext_csd_bits[idx][0],
                                                    ext_csd);
                        if (err)
-                               pr_warning("%s: power class selection to "
-                                          "bus width %d failed\n",
-                                          mmc_hostname(card->host),
-                                          1 << bus_width);
+                               goto err;
 
                        err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                                         EXT_CSD_BUS_WIDTH,
@@ -1182,10 +1164,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
                        err = mmc_select_powerclass(card, ext_csd_bits[idx][1],
                                                    ext_csd);
                        if (err)
-                               pr_warning("%s: power class selection to "
-                                          "bus width %d ddr %d failed\n",
-                                          mmc_hostname(card->host),
-                                          1 << bus_width, ddr);
+                               goto err;
 
                        err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                                         EXT_CSD_BUS_WIDTH,
index 2c7c83f832d289c25eaf83cf4d7ef32d420fb002..13d0e95380ab8f73d601060c3415b82bb253d12b 100644 (file)
@@ -947,7 +947,7 @@ static int mmc_sdio_resume(struct mmc_host *host)
        }
 
        if (!err && host->sdio_irqs)
-               mmc_signal_sdio_irq(host);
+               wake_up_process(host->sdio_irq_thread);
        mmc_release_host(host);
 
        /*
index f573e7f9f74020dae69aa3ba861bbcadf66ffc4e..3d8ceb4084debf900693f07f1bb7a5d8588cdfab 100644 (file)
 
 #include "sdio_ops.h"
 
-static int process_sdio_pending_irqs(struct mmc_card *card)
+static int process_sdio_pending_irqs(struct mmc_host *host)
 {
+       struct mmc_card *card = host->card;
        int i, ret, count;
        unsigned char pending;
        struct sdio_func *func;
 
        /*
         * Optimization, if there is only 1 function interrupt registered
-        * call irq handler directly
+        * and we know an IRQ was signaled then call irq handler directly.
+        * Otherwise do the full probe.
         */
        func = card->sdio_single_irq;
-       if (func) {
+       if (func && host->sdio_irq_pending) {
                func->irq_handler(func);
                return 1;
        }
@@ -116,7 +118,8 @@ static int sdio_irq_thread(void *_host)
                ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);
                if (ret)
                        break;
-               ret = process_sdio_pending_irqs(host->card);
+               ret = process_sdio_pending_irqs(host);
+               host->sdio_irq_pending = false;
                mmc_release_host(host);
 
                /*
index 2bc06e7344db5970c754ab6cbf0a849830d8339f..aa131b32e3b2981231ef0144398dce93c71c0ccc 100644 (file)
@@ -278,10 +278,13 @@ choice
          Choose which driver to use for the Atmel MCI Silicon
 
 config MMC_AT91
-       tristate "AT91 SD/MMC Card Interface support"
+       tristate "AT91 SD/MMC Card Interface support (DEPRECATED)"
        depends on ARCH_AT91
        help
-         This selects the AT91 MCI controller.
+         This selects the AT91 MCI controller. This driver will
+         be removed soon (for more information have a look to
+         Documentation/feature-removal-schedule.txt). Please use
+         MMC_ATMEL_MCI.
 
          If unsure, say N.
 
@@ -307,16 +310,6 @@ config MMC_ATMELMCI_DMA
 
          If unsure, say N.
 
-config MMC_IMX
-       tristate "Motorola i.MX Multimedia Card Interface support"
-       depends on ARCH_MX1
-       help
-         This selects the Motorola i.MX Multimedia card Interface.
-         If you have a i.MX platform with a Multimedia Card slot,
-         say Y or M here.
-
-         If unsure, say N.
-
 config MMC_MSM
        tristate "Qualcomm SDCC Controller Support"
        depends on MMC && ARCH_MSM
index 3e7e26d0807346a82353544a0623086ce4249409..8922b06be9256d3b1c9f24117e5b47f29b054dc6 100644 (file)
@@ -4,7 +4,6 @@
 
 obj-$(CONFIG_MMC_ARMMMCI)      += mmci.o
 obj-$(CONFIG_MMC_PXA)          += pxamci.o
-obj-$(CONFIG_MMC_IMX)          += imxmmc.o
 obj-$(CONFIG_MMC_MXC)          += mxcmmc.o
 obj-$(CONFIG_MMC_MXS)          += mxs-mmc.o
 obj-$(CONFIG_MMC_SDHCI)                += sdhci.o
index e94476beca181287b39c3af3a0b542a519bef353..420aca642b14ba42a6fa7fe014cc865e49445b73 100644 (file)
 #define ATMCI_DMA_THRESHOLD    16
 
 enum {
-       EVENT_CMD_COMPLETE = 0,
+       EVENT_CMD_RDY = 0,
        EVENT_XFER_COMPLETE,
-       EVENT_DATA_COMPLETE,
+       EVENT_NOTBUSY,
        EVENT_DATA_ERROR,
 };
 
 enum atmel_mci_state {
        STATE_IDLE = 0,
        STATE_SENDING_CMD,
-       STATE_SENDING_DATA,
-       STATE_DATA_BUSY,
+       STATE_DATA_XFER,
+       STATE_WAITING_NOTBUSY,
        STATE_SENDING_STOP,
-       STATE_DATA_ERROR,
+       STATE_END_REQUEST,
 };
 
 enum atmci_xfer_dir {
@@ -78,6 +78,9 @@ struct atmel_mci_caps {
        bool    has_highspeed;
        bool    has_rwproof;
        bool    has_odd_clk_div;
+       bool    has_bad_data_ordering;
+       bool    need_reset_after_xfer;
+       bool    need_blksz_mul_4;
 };
 
 struct atmel_mci_dma {
@@ -91,6 +94,11 @@ struct atmel_mci_dma {
  * @regs: Pointer to MMIO registers.
  * @sg: Scatterlist entry currently being processed by PIO or PDC code.
  * @pio_offset: Offset into the current scatterlist entry.
+ * @buffer: Buffer used if we don't have the r/w proof capability. We
+ *      don't have the time to switch pdc buffers so we have to use only
+ *      one buffer for the full transaction.
+ * @buf_size: size of the buffer.
+ * @phys_buf_addr: buffer address needed for pdc.
  * @cur_slot: The slot which is currently using the controller.
  * @mrq: The request currently being processed on @cur_slot,
  *     or NULL if the controller is idle.
@@ -116,6 +124,7 @@ struct atmel_mci_dma {
  * @queue: List of slots waiting for access to the controller.
  * @need_clock_update: Update the clock rate before the next request.
  * @need_reset: Reset controller before next request.
+ * @timer: Timer to balance the data timeout error flag which cannot rise.
  * @mode_reg: Value of the MR register.
  * @cfg_reg: Value of the CFG register.
  * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
@@ -166,6 +175,9 @@ struct atmel_mci {
 
        struct scatterlist      *sg;
        unsigned int            pio_offset;
+       unsigned int            *buffer;
+       unsigned int            buf_size;
+       dma_addr_t              buf_phys_addr;
 
        struct atmel_mci_slot   *cur_slot;
        struct mmc_request      *mrq;
@@ -189,6 +201,7 @@ struct atmel_mci {
 
        bool                    need_clock_update;
        bool                    need_reset;
+       struct timer_list       timer;
        u32                     mode_reg;
        u32                     cfg_reg;
        unsigned long           bus_hz;
@@ -480,6 +493,32 @@ err:
        dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
 }
 
+static inline unsigned int atmci_get_version(struct atmel_mci *host)
+{
+       return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
+}
+
+static void atmci_timeout_timer(unsigned long data)
+{
+       struct atmel_mci *host;
+
+       host = (struct atmel_mci *)data;
+
+       dev_dbg(&host->pdev->dev, "software timeout\n");
+
+       if (host->mrq->cmd->data) {
+               host->mrq->cmd->data->error = -ETIMEDOUT;
+               host->data = NULL;
+       } else {
+               host->mrq->cmd->error = -ETIMEDOUT;
+               host->cmd = NULL;
+       }
+       host->need_reset = 1;
+       host->state = STATE_END_REQUEST;
+       smp_wmb();
+       tasklet_schedule(&host->tasklet);
+}
+
 static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,
                                        unsigned int ns)
 {
@@ -591,6 +630,7 @@ static void atmci_send_command(struct atmel_mci *host,
 
 static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
 {
+       dev_dbg(&host->pdev->dev, "send stop command\n");
        atmci_send_command(host, data->stop, host->stop_cmdr);
        atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
 }
@@ -603,6 +643,7 @@ static void atmci_pdc_set_single_buf(struct atmel_mci *host,
        enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb)
 {
        u32 pointer_reg, counter_reg;
+       unsigned int buf_size;
 
        if (dir == XFER_RECEIVE) {
                pointer_reg = ATMEL_PDC_RPR;
@@ -617,8 +658,15 @@ static void atmci_pdc_set_single_buf(struct atmel_mci *host,
                counter_reg += ATMEL_PDC_SCND_BUF_OFF;
        }
 
-       atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
-       if (host->data_size <= sg_dma_len(host->sg)) {
+       if (!host->caps.has_rwproof) {
+               buf_size = host->buf_size;
+               atmci_writel(host, pointer_reg, host->buf_phys_addr);
+       } else {
+               buf_size = sg_dma_len(host->sg);
+               atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
+       }
+
+       if (host->data_size <= buf_size) {
                if (host->data_size & 0x3) {
                        /* If size is different from modulo 4, transfer bytes */
                        atmci_writel(host, counter_reg, host->data_size);
@@ -670,7 +718,20 @@ static void atmci_pdc_cleanup(struct atmel_mci *host)
  */
 static void atmci_pdc_complete(struct atmel_mci *host)
 {
+       int transfer_size = host->data->blocks * host->data->blksz;
+       int i;
+
        atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
+
+       if ((!host->caps.has_rwproof)
+           && (host->data->flags & MMC_DATA_READ)) {
+               if (host->caps.has_bad_data_ordering)
+                       for (i = 0; i < transfer_size; i++)
+                               host->buffer[i] = swab32(host->buffer[i]);
+               sg_copy_from_buffer(host->data->sg, host->data->sg_len,
+                                   host->buffer, transfer_size);
+       }
+
        atmci_pdc_cleanup(host);
 
        /*
@@ -678,9 +739,10 @@ static void atmci_pdc_complete(struct atmel_mci *host)
         * to send the stop command or waiting for NBUSY in this case.
         */
        if (host->data) {
+               dev_dbg(&host->pdev->dev,
+                       "(%s) set pending xfer complete\n", __func__);
                atmci_set_pending(host, EVENT_XFER_COMPLETE);
                tasklet_schedule(&host->tasklet);
-               atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
        }
 }
 
@@ -716,6 +778,8 @@ static void atmci_dma_complete(void *arg)
         * to send the stop command or waiting for NBUSY in this case.
         */
        if (data) {
+               dev_dbg(&host->pdev->dev,
+                       "(%s) set pending xfer complete\n", __func__);
                atmci_set_pending(host, EVENT_XFER_COMPLETE);
                tasklet_schedule(&host->tasklet);
 
@@ -791,6 +855,7 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
        u32 iflags, tmp;
        unsigned int sg_len;
        enum dma_data_direction dir;
+       int i;
 
        data->error = -EINPROGRESS;
 
@@ -806,7 +871,7 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
                iflags |= ATMCI_ENDRX | ATMCI_RXBUFF;
        } else {
                dir = DMA_TO_DEVICE;
-               iflags |= ATMCI_ENDTX | ATMCI_TXBUFE;
+               iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE;
        }
 
        /* Set BLKLEN */
@@ -818,6 +883,16 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
        /* Configure PDC */
        host->data_size = data->blocks * data->blksz;
        sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir);
+
+       if ((!host->caps.has_rwproof)
+           && (host->data->flags & MMC_DATA_WRITE)) {
+               sg_copy_to_buffer(host->data->sg, host->data->sg_len,
+                                 host->buffer, host->data_size);
+               if (host->caps.has_bad_data_ordering)
+                       for (i = 0; i < host->data_size; i++)
+                               host->buffer[i] = swab32(host->buffer[i]);
+       }
+
        if (host->data_size)
                atmci_pdc_set_both_buf(host,
                        ((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT));
@@ -931,6 +1006,8 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
 
 static void atmci_stop_transfer(struct atmel_mci *host)
 {
+       dev_dbg(&host->pdev->dev,
+               "(%s) set pending xfer complete\n", __func__);
        atmci_set_pending(host, EVENT_XFER_COMPLETE);
        atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
 }
@@ -940,8 +1017,7 @@ static void atmci_stop_transfer(struct atmel_mci *host)
  */
 static void atmci_stop_transfer_pdc(struct atmel_mci *host)
 {
-       atmci_set_pending(host, EVENT_XFER_COMPLETE);
-       atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+       atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
 }
 
 static void atmci_stop_transfer_dma(struct atmel_mci *host)
@@ -953,6 +1029,8 @@ static void atmci_stop_transfer_dma(struct atmel_mci *host)
                atmci_dma_cleanup(host);
        } else {
                /* Data transfer was stopped by the interrupt handler */
+               dev_dbg(&host->pdev->dev,
+                       "(%s) set pending xfer complete\n", __func__);
                atmci_set_pending(host, EVENT_XFER_COMPLETE);
                atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
        }
@@ -977,9 +1055,12 @@ static void atmci_start_request(struct atmel_mci *host,
 
        host->pending_events = 0;
        host->completed_events = 0;
+       host->cmd_status = 0;
        host->data_status = 0;
 
-       if (host->need_reset) {
+       dev_dbg(&host->pdev->dev, "start request: cmd %u\n", mrq->cmd->opcode);
+
+       if (host->need_reset || host->caps.need_reset_after_xfer) {
                iflags = atmci_readl(host, ATMCI_IMR);
                iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB);
                atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
@@ -994,7 +1075,7 @@ static void atmci_start_request(struct atmel_mci *host,
 
        iflags = atmci_readl(host, ATMCI_IMR);
        if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
-               dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
+               dev_dbg(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
                                iflags);
 
        if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) {
@@ -1043,6 +1124,8 @@ static void atmci_start_request(struct atmel_mci *host,
         * prepared yet.)
         */
        atmci_writel(host, ATMCI_IER, iflags);
+
+       mod_timer(&host->timer, jiffies +  msecs_to_jiffies(2000));
 }
 
 static void atmci_queue_request(struct atmel_mci *host,
@@ -1057,6 +1140,7 @@ static void atmci_queue_request(struct atmel_mci *host,
                host->state = STATE_SENDING_CMD;
                atmci_start_request(host, slot);
        } else {
+               dev_dbg(&host->pdev->dev, "queue request\n");
                list_add_tail(&slot->queue_node, &host->queue);
        }
        spin_unlock_bh(&host->lock);
@@ -1069,6 +1153,7 @@ static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
        struct mmc_data         *data;
 
        WARN_ON(slot->mrq);
+       dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode);
 
        /*
         * We may "know" the card is gone even though there's still an
@@ -1308,6 +1393,8 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
                host->state = STATE_IDLE;
        }
 
+       del_timer(&host->timer);
+
        spin_unlock(&host->lock);
        mmc_request_done(prev_mmc, mrq);
        spin_lock(&host->lock);
@@ -1330,21 +1417,13 @@ static void atmci_command_complete(struct atmel_mci *host,
                cmd->error = -EILSEQ;
        else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE))
                cmd->error = -EIO;
-       else
-               cmd->error = 0;
-
-       if (cmd->error) {
-               dev_dbg(&host->pdev->dev,
-                       "command error: status=0x%08x\n", status);
-
-               if (cmd->data) {
-                       host->stop_transfer(host);
-                       host->data = NULL;
-                       atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY
-                                       | ATMCI_TXRDY | ATMCI_RXRDY
-                                       | ATMCI_DATA_ERROR_FLAGS);
+       else if (host->mrq->data && (host->mrq->data->blksz & 3)) {
+               if (host->caps.need_blksz_mul_4) {
+                       cmd->error = -EINVAL;
+                       host->need_reset = 1;
                }
-       }
+       } else
+               cmd->error = 0;
 }
 
 static void atmci_detect_change(unsigned long data)
@@ -1407,23 +1486,21 @@ static void atmci_detect_change(unsigned long data)
                                        break;
                                case STATE_SENDING_CMD:
                                        mrq->cmd->error = -ENOMEDIUM;
-                                       if (!mrq->data)
-                                               break;
-                                       /* fall through */
-                               case STATE_SENDING_DATA:
+                                       if (mrq->data)
+                                               host->stop_transfer(host);
+                                       break;
+                               case STATE_DATA_XFER:
                                        mrq->data->error = -ENOMEDIUM;
                                        host->stop_transfer(host);
                                        break;
-                               case STATE_DATA_BUSY:
-                               case STATE_DATA_ERROR:
-                                       if (mrq->data->error == -EINPROGRESS)
-                                               mrq->data->error = -ENOMEDIUM;
-                                       if (!mrq->stop)
-                                               break;
-                                       /* fall through */
+                               case STATE_WAITING_NOTBUSY:
+                                       mrq->data->error = -ENOMEDIUM;
+                                       break;
                                case STATE_SENDING_STOP:
                                        mrq->stop->error = -ENOMEDIUM;
                                        break;
+                               case STATE_END_REQUEST:
+                                       break;
                                }
 
                                atmci_request_end(host, mrq);
@@ -1451,7 +1528,6 @@ static void atmci_tasklet_func(unsigned long priv)
        struct atmel_mci        *host = (struct atmel_mci *)priv;
        struct mmc_request      *mrq = host->mrq;
        struct mmc_data         *data = host->data;
-       struct mmc_command      *cmd = host->cmd;
        enum atmel_mci_state    state = host->state;
        enum atmel_mci_state    prev_state;
        u32                     status;
@@ -1467,107 +1543,186 @@ static void atmci_tasklet_func(unsigned long priv)
 
        do {
                prev_state = state;
+               dev_dbg(&host->pdev->dev, "FSM: state=%d\n", state);
 
                switch (state) {
                case STATE_IDLE:
                        break;
 
                case STATE_SENDING_CMD:
+                       /*
+                        * Command has been sent, we are waiting for command
+                        * ready. Then we have three next states possible:
+                        * END_REQUEST by default, WAITING_NOTBUSY if it's a
+                        * command needing it or DATA_XFER if there is data.
+                        */
+                       dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
                        if (!atmci_test_and_clear_pending(host,
-                                               EVENT_CMD_COMPLETE))
+                                               EVENT_CMD_RDY))
                                break;
 
+                       dev_dbg(&host->pdev->dev, "set completed cmd ready\n");
                        host->cmd = NULL;
-                       atmci_set_completed(host, EVENT_CMD_COMPLETE);
+                       atmci_set_completed(host, EVENT_CMD_RDY);
                        atmci_command_complete(host, mrq->cmd);
-                       if (!mrq->data || cmd->error) {
-                               atmci_request_end(host, host->mrq);
-                               goto unlock;
-                       }
+                       if (mrq->data) {
+                               dev_dbg(&host->pdev->dev,
+                                       "command with data transfer");
+                               /*
+                                * If there is a command error don't start
+                                * data transfer.
+                                */
+                               if (mrq->cmd->error) {
+                                       host->stop_transfer(host);
+                                       host->data = NULL;
+                                       atmci_writel(host, ATMCI_IDR,
+                                                    ATMCI_TXRDY | ATMCI_RXRDY
+                                                    | ATMCI_DATA_ERROR_FLAGS);
+                                       state = STATE_END_REQUEST;
+                               } else
+                                       state = STATE_DATA_XFER;
+                       } else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) {
+                               dev_dbg(&host->pdev->dev,
+                                       "command response need waiting notbusy");
+                               atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+                               state = STATE_WAITING_NOTBUSY;
+                       } else
+                               state = STATE_END_REQUEST;
 
-                       prev_state = state = STATE_SENDING_DATA;
-                       /* fall through */
+                       break;
 
-               case STATE_SENDING_DATA:
+               case STATE_DATA_XFER:
                        if (atmci_test_and_clear_pending(host,
                                                EVENT_DATA_ERROR)) {
-                               host->stop_transfer(host);
-                               if (data->stop)
-                                       atmci_send_stop_cmd(host, data);
-                               state = STATE_DATA_ERROR;
+                               dev_dbg(&host->pdev->dev, "set completed data error\n");
+                               atmci_set_completed(host, EVENT_DATA_ERROR);
+                               state = STATE_END_REQUEST;
                                break;
                        }
 
+                       /*
+                        * A data transfer is in progress. The event expected
+                        * to move to the next state depends of data transfer
+                        * type (PDC or DMA). Once transfer done we can move
+                        * to the next step which is WAITING_NOTBUSY in write
+                        * case and directly SENDING_STOP in read case.
+                        */
+                       dev_dbg(&host->pdev->dev, "FSM: xfer complete?\n");
                        if (!atmci_test_and_clear_pending(host,
                                                EVENT_XFER_COMPLETE))
                                break;
 
+                       dev_dbg(&host->pdev->dev,
+                               "(%s) set completed xfer complete\n",
+                               __func__);
                        atmci_set_completed(host, EVENT_XFER_COMPLETE);
-                       prev_state = state = STATE_DATA_BUSY;
-                       /* fall through */
 
-               case STATE_DATA_BUSY:
-                       if (!atmci_test_and_clear_pending(host,
-                                               EVENT_DATA_COMPLETE))
-                               break;
-
-                       host->data = NULL;
-                       atmci_set_completed(host, EVENT_DATA_COMPLETE);
-                       status = host->data_status;
-                       if (unlikely(status & ATMCI_DATA_ERROR_FLAGS)) {
-                               if (status & ATMCI_DTOE) {
-                                       dev_dbg(&host->pdev->dev,
-                                                       "data timeout error\n");
-                                       data->error = -ETIMEDOUT;
-                               } else if (status & ATMCI_DCRCE) {
-                                       dev_dbg(&host->pdev->dev,
-                                                       "data CRC error\n");
-                                       data->error = -EILSEQ;
-                               } else {
-                                       dev_dbg(&host->pdev->dev,
-                                               "data FIFO error (status=%08x)\n",
-                                               status);
-                                       data->error = -EIO;
-                               }
+                       if (host->data->flags & MMC_DATA_WRITE) {
+                               atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+                               state = STATE_WAITING_NOTBUSY;
+                       } else if (host->mrq->stop) {
+                               atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
+                               atmci_send_stop_cmd(host, data);
+                               state = STATE_SENDING_STOP;
                        } else {
+                               host->data = NULL;
                                data->bytes_xfered = data->blocks * data->blksz;
                                data->error = 0;
-                               atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS);
+                               state = STATE_END_REQUEST;
                        }
+                       break;
 
-                       if (!data->stop) {
-                               atmci_request_end(host, host->mrq);
-                               goto unlock;
-                       }
+               case STATE_WAITING_NOTBUSY:
+                       /*
+                        * We can be in the state for two reasons: a command
+                        * requiring waiting not busy signal (stop command
+                        * included) or a write operation. In the latest case,
+                        * we need to send a stop command.
+                        */
+                       dev_dbg(&host->pdev->dev, "FSM: not busy?\n");
+                       if (!atmci_test_and_clear_pending(host,
+                                               EVENT_NOTBUSY))
+                               break;
 
-                       prev_state = state = STATE_SENDING_STOP;
-                       if (!data->error)
-                               atmci_send_stop_cmd(host, data);
-                       /* fall through */
+                       dev_dbg(&host->pdev->dev, "set completed not busy\n");
+                       atmci_set_completed(host, EVENT_NOTBUSY);
+
+                       if (host->data) {
+                               /*
+                                * For some commands such as CMD53, even if
+                                * there is data transfer, there is no stop
+                                * command to send.
+                                */
+                               if (host->mrq->stop) {
+                                       atmci_writel(host, ATMCI_IER,
+                                                    ATMCI_CMDRDY);
+                                       atmci_send_stop_cmd(host, data);
+                                       state = STATE_SENDING_STOP;
+                               } else {
+                                       host->data = NULL;
+                                       data->bytes_xfered = data->blocks
+                                                            * data->blksz;
+                                       data->error = 0;
+                                       state = STATE_END_REQUEST;
+                               }
+                       } else
+                               state = STATE_END_REQUEST;
+                       break;
 
                case STATE_SENDING_STOP:
+                       /*
+                        * In this state, it is important to set host->data to
+                        * NULL (which is tested in the waiting notbusy state)
+                        * in order to go to the end request state instead of
+                        * sending stop again.
+                        */
+                       dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
                        if (!atmci_test_and_clear_pending(host,
-                                               EVENT_CMD_COMPLETE))
+                                               EVENT_CMD_RDY))
                                break;
 
+                       dev_dbg(&host->pdev->dev, "FSM: cmd ready\n");
                        host->cmd = NULL;
+                       host->data = NULL;
+                       data->bytes_xfered = data->blocks * data->blksz;
+                       data->error = 0;
                        atmci_command_complete(host, mrq->stop);
-                       atmci_request_end(host, host->mrq);
-                       goto unlock;
+                       if (mrq->stop->error) {
+                               host->stop_transfer(host);
+                               atmci_writel(host, ATMCI_IDR,
+                                            ATMCI_TXRDY | ATMCI_RXRDY
+                                            | ATMCI_DATA_ERROR_FLAGS);
+                               state = STATE_END_REQUEST;
+                       } else {
+                               atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+                               state = STATE_WAITING_NOTBUSY;
+                       }
+                       break;
 
-               case STATE_DATA_ERROR:
-                       if (!atmci_test_and_clear_pending(host,
-                                               EVENT_XFER_COMPLETE))
-                               break;
+               case STATE_END_REQUEST:
+                       atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY | ATMCI_RXRDY
+                                          | ATMCI_DATA_ERROR_FLAGS);
+                       status = host->data_status;
+                       if (unlikely(status)) {
+                               host->stop_transfer(host);
+                               host->data = NULL;
+                               if (status & ATMCI_DTOE) {
+                                       data->error = -ETIMEDOUT;
+                               } else if (status & ATMCI_DCRCE) {
+                                       data->error = -EILSEQ;
+                               } else {
+                                       data->error = -EIO;
+                               }
+                       }
 
-                       state = STATE_DATA_BUSY;
+                       atmci_request_end(host, host->mrq);
+                       state = STATE_IDLE;
                        break;
                }
        } while (state != prev_state);
 
        host->state = state;
 
-unlock:
        spin_unlock(&host->lock);
 }
 
@@ -1620,9 +1775,6 @@ static void atmci_read_data_pio(struct atmel_mci *host)
                                                | ATMCI_DATA_ERROR_FLAGS));
                        host->data_status = status;
                        data->bytes_xfered += nbytes;
-                       smp_wmb();
-                       atmci_set_pending(host, EVENT_DATA_ERROR);
-                       tasklet_schedule(&host->tasklet);
                        return;
                }
        } while (status & ATMCI_RXRDY);
@@ -1691,9 +1843,6 @@ static void atmci_write_data_pio(struct atmel_mci *host)
                                                | ATMCI_DATA_ERROR_FLAGS));
                        host->data_status = status;
                        data->bytes_xfered += nbytes;
-                       smp_wmb();
-                       atmci_set_pending(host, EVENT_DATA_ERROR);
-                       tasklet_schedule(&host->tasklet);
                        return;
                }
        } while (status & ATMCI_TXRDY);
@@ -1711,16 +1860,6 @@ done:
        atmci_set_pending(host, EVENT_XFER_COMPLETE);
 }
 
-static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status)
-{
-       atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
-
-       host->cmd_status = status;
-       smp_wmb();
-       atmci_set_pending(host, EVENT_CMD_COMPLETE);
-       tasklet_schedule(&host->tasklet);
-}
-
 static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
 {
        int     i;
@@ -1748,17 +1887,21 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
                        break;
 
                if (pending & ATMCI_DATA_ERROR_FLAGS) {
+                       dev_dbg(&host->pdev->dev, "IRQ: data error\n");
                        atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS
-                                       | ATMCI_RXRDY | ATMCI_TXRDY);
-                       pending &= atmci_readl(host, ATMCI_IMR);
+                                       | ATMCI_RXRDY | ATMCI_TXRDY
+                                       | ATMCI_ENDRX | ATMCI_ENDTX
+                                       | ATMCI_RXBUFF | ATMCI_TXBUFE);
 
                        host->data_status = status;
+                       dev_dbg(&host->pdev->dev, "set pending data error\n");
                        smp_wmb();
                        atmci_set_pending(host, EVENT_DATA_ERROR);
                        tasklet_schedule(&host->tasklet);
                }
 
                if (pending & ATMCI_TXBUFE) {
+                       dev_dbg(&host->pdev->dev, "IRQ: tx buffer empty\n");
                        atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE);
                        atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
                        /*
@@ -1774,6 +1917,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
                                atmci_pdc_complete(host);
                        }
                } else if (pending & ATMCI_ENDTX) {
+                       dev_dbg(&host->pdev->dev, "IRQ: end of tx buffer\n");
                        atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
 
                        if (host->data_size) {
@@ -1784,6 +1928,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
                }
 
                if (pending & ATMCI_RXBUFF) {
+                       dev_dbg(&host->pdev->dev, "IRQ: rx buffer full\n");
                        atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF);
                        atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
                        /*
@@ -1799,6 +1944,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
                                atmci_pdc_complete(host);
                        }
                } else if (pending & ATMCI_ENDRX) {
+                       dev_dbg(&host->pdev->dev, "IRQ: end of rx buffer\n");
                        atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
 
                        if (host->data_size) {
@@ -1808,23 +1954,44 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
                        }
                }
 
+               /*
+                * First mci IPs, so mainly the ones having pdc, have some
+                * issues with the notbusy signal. You can't get it after
+                * data transmission if you have not sent a stop command.
+                * The appropriate workaround is to use the BLKE signal.
+                */
+               if (pending & ATMCI_BLKE) {
+                       dev_dbg(&host->pdev->dev, "IRQ: blke\n");
+                       atmci_writel(host, ATMCI_IDR, ATMCI_BLKE);
+                       smp_wmb();
+                       dev_dbg(&host->pdev->dev, "set pending notbusy\n");
+                       atmci_set_pending(host, EVENT_NOTBUSY);
+                       tasklet_schedule(&host->tasklet);
+               }
 
                if (pending & ATMCI_NOTBUSY) {
-                       atmci_writel(host, ATMCI_IDR,
-                                       ATMCI_DATA_ERROR_FLAGS | ATMCI_NOTBUSY);
-                       if (!host->data_status)
-                               host->data_status = status;
+                       dev_dbg(&host->pdev->dev, "IRQ: not_busy\n");
+                       atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY);
                        smp_wmb();
-                       atmci_set_pending(host, EVENT_DATA_COMPLETE);
+                       dev_dbg(&host->pdev->dev, "set pending notbusy\n");
+                       atmci_set_pending(host, EVENT_NOTBUSY);
                        tasklet_schedule(&host->tasklet);
                }
+
                if (pending & ATMCI_RXRDY)
                        atmci_read_data_pio(host);
                if (pending & ATMCI_TXRDY)
                        atmci_write_data_pio(host);
 
-               if (pending & ATMCI_CMDRDY)
-                       atmci_cmd_interrupt(host, status);
+               if (pending & ATMCI_CMDRDY) {
+                       dev_dbg(&host->pdev->dev, "IRQ: cmd ready\n");
+                       atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
+                       host->cmd_status = status;
+                       smp_wmb();
+                       dev_dbg(&host->pdev->dev, "set pending cmd rdy\n");
+                       atmci_set_pending(host, EVENT_CMD_RDY);
+                       tasklet_schedule(&host->tasklet);
+               }
 
                if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
                        atmci_sdio_interrupt(host, status);
@@ -1877,13 +2044,26 @@ static int __init atmci_init_slot(struct atmel_mci *host,
                mmc->caps |= MMC_CAP_SDIO_IRQ;
        if (host->caps.has_highspeed)
                mmc->caps |= MMC_CAP_SD_HIGHSPEED;
-       if (slot_data->bus_width >= 4)
+       /*
+        * Without the read/write proof capability, it is strongly suggested to
+        * use only one bit for data to prevent fifo underruns and overruns
+        * which will corrupt data.
+        */
+       if ((slot_data->bus_width >= 4) && host->caps.has_rwproof)
                mmc->caps |= MMC_CAP_4_BIT_DATA;
 
-       mmc->max_segs = 64;
-       mmc->max_req_size = 32768 * 512;
-       mmc->max_blk_size = 32768;
-       mmc->max_blk_count = 512;
+       if (atmci_get_version(host) < 0x200) {
+               mmc->max_segs = 256;
+               mmc->max_blk_size = 4095;
+               mmc->max_blk_count = 256;
+               mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+               mmc->max_seg_size = mmc->max_blk_size * mmc->max_segs;
+       } else {
+               mmc->max_segs = 64;
+               mmc->max_req_size = 32768 * 512;
+               mmc->max_blk_size = 32768;
+               mmc->max_blk_count = 512;
+       }
 
        /* Assume card is present initially */
        set_bit(ATMCI_CARD_PRESENT, &slot->flags);
@@ -2007,11 +2187,6 @@ static bool atmci_configure_dma(struct atmel_mci *host)
        }
 }
 
-static inline unsigned int atmci_get_version(struct atmel_mci *host)
-{
-       return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
-}
-
 /*
  * HSMCI (High Speed MCI) module is not fully compatible with MCI module.
  * HSMCI provides DMA support and a new config register but no more supports
@@ -2032,6 +2207,9 @@ static void __init atmci_get_cap(struct atmel_mci *host)
        host->caps.has_highspeed = 0;
        host->caps.has_rwproof = 0;
        host->caps.has_odd_clk_div = 0;
+       host->caps.has_bad_data_ordering = 1;
+       host->caps.need_reset_after_xfer = 1;
+       host->caps.need_blksz_mul_4 = 1;
 
        /* keep only major version number */
        switch (version & 0xf00) {
@@ -2051,7 +2229,11 @@ static void __init atmci_get_cap(struct atmel_mci *host)
                host->caps.has_highspeed = 1;
        case 0x200:
                host->caps.has_rwproof = 1;
+               host->caps.need_blksz_mul_4 = 0;
        case 0x100:
+               host->caps.has_bad_data_ordering = 0;
+               host->caps.need_reset_after_xfer = 0;
+       case 0x0:
                break;
        default:
                host->caps.has_pdc = 0;
@@ -2138,14 +2320,20 @@ static int __init atmci_probe(struct platform_device *pdev)
        if (pdata->slot[0].bus_width) {
                ret = atmci_init_slot(host, &pdata->slot[0],
                                0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA);
-               if (!ret)
+               if (!ret) {
                        nr_slots++;
+                       host->buf_size = host->slot[0]->mmc->max_req_size;
+               }
        }
        if (pdata->slot[1].bus_width) {
                ret = atmci_init_slot(host, &pdata->slot[1],
                                1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB);
-               if (!ret)
+               if (!ret) {
                        nr_slots++;
+                       if (host->slot[1]->mmc->max_req_size > host->buf_size)
+                               host->buf_size =
+                                       host->slot[1]->mmc->max_req_size;
+               }
        }
 
        if (!nr_slots) {
@@ -2153,6 +2341,19 @@ static int __init atmci_probe(struct platform_device *pdev)
                goto err_init_slot;
        }
 
+       if (!host->caps.has_rwproof) {
+               host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size,
+                                                 &host->buf_phys_addr,
+                                                 GFP_KERNEL);
+               if (!host->buffer) {
+                       ret = -ENOMEM;
+                       dev_err(&pdev->dev, "buffer allocation failed\n");
+                       goto err_init_slot;
+               }
+       }
+
+       setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
+
        dev_info(&pdev->dev,
                        "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
                        host->mapbase, irq, nr_slots);
@@ -2179,6 +2380,10 @@ static int __exit atmci_remove(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, NULL);
 
+       if (host->buffer)
+               dma_free_coherent(&pdev->dev, host->buf_size,
+                                 host->buffer, host->buf_phys_addr);
+
        for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
                if (host->slot[i])
                        atmci_cleanup_slot(host->slot[i], i);
index c1f3673ae1efa9df281bbb88687029af24309f29..7cf6c624bf737fd1858422a0b6237b74de50fb83 100644 (file)
@@ -1533,4 +1533,5 @@ module_exit(davinci_mmcsd_exit);
 MODULE_AUTHOR("Texas Instruments India");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller");
+MODULE_ALIAS("platform:davinci_mmc");
 
index ab3fc46171079d4c8d07da35d5d00792032854d0..9bbf45f8c538ade0990444c0c5d63614ddc09c3f 100644 (file)
@@ -100,8 +100,6 @@ struct dw_mci_slot {
        int                     last_detect_state;
 };
 
-static struct workqueue_struct *dw_mci_card_workqueue;
-
 #if defined(CONFIG_DEBUG_FS)
 static int dw_mci_req_show(struct seq_file *s, void *v)
 {
@@ -859,10 +857,10 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
        int_mask = mci_readl(host, INTMASK);
        if (enb) {
                mci_writel(host, INTMASK,
-                          (int_mask | (1 << SDMMC_INT_SDIO(slot->id))));
+                          (int_mask | SDMMC_INT_SDIO(slot->id)));
        } else {
                mci_writel(host, INTMASK,
-                          (int_mask & ~(1 << SDMMC_INT_SDIO(slot->id))));
+                          (int_mask & ~SDMMC_INT_SDIO(slot->id)));
        }
 }
 
@@ -1605,7 +1603,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
 
                if (pending & SDMMC_INT_CD) {
                        mci_writel(host, RINTSTS, SDMMC_INT_CD);
-                       queue_work(dw_mci_card_workqueue, &host->card_work);
+                       queue_work(host->card_workqueue, &host->card_work);
                }
 
                /* Handle SDIO Interrupts */
@@ -1844,7 +1842,7 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
         * Card may have been plugged in prior to boot so we
         * need to run the detect tasklet
         */
-       queue_work(dw_mci_card_workqueue, &host->card_work);
+       queue_work(host->card_workqueue, &host->card_work);
 
        return 0;
 }
@@ -2021,9 +2019,9 @@ int dw_mci_probe(struct dw_mci *host)
        mci_writel(host, CLKSRC, 0);
 
        tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
-       dw_mci_card_workqueue = alloc_workqueue("dw-mci-card",
+       host->card_workqueue = alloc_workqueue("dw-mci-card",
                        WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
-       if (!dw_mci_card_workqueue)
+       if (!host->card_workqueue)
                goto err_dmaunmap;
        INIT_WORK(&host->card_work, dw_mci_work_routine_card);
        ret = request_irq(host->irq, dw_mci_interrupt, host->irq_flags, "dw-mci", host);
@@ -2085,7 +2083,7 @@ err_init_slot:
        free_irq(host->irq, host);
 
 err_workqueue:
-       destroy_workqueue(dw_mci_card_workqueue);
+       destroy_workqueue(host->card_workqueue);
 
 err_dmaunmap:
        if (host->use_dma && host->dma_ops->exit)
@@ -2119,7 +2117,7 @@ void dw_mci_remove(struct dw_mci *host)
        mci_writel(host, CLKSRC, 0);
 
        free_irq(host->irq, host);
-       destroy_workqueue(dw_mci_card_workqueue);
+       destroy_workqueue(host->card_workqueue);
        dma_free_coherent(&host->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
 
        if (host->use_dma && host->dma_ops->exit)
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
deleted file mode 100644 (file)
index ea0f3ce..0000000
+++ /dev/null
@@ -1,1169 +0,0 @@
-/*
- *  linux/drivers/mmc/host/imxmmc.c - Motorola i.MX MMCI driver
- *
- *  Copyright (C) 2004 Sascha Hauer, Pengutronix <sascha@saschahauer.de>
- *  Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com>
- *
- *  derived from pxamci.c by Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/blkdev.h>
-#include <linux/dma-mapping.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/card.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-
-#include <asm/dma.h>
-#include <asm/irq.h>
-#include <asm/sizes.h>
-#include <mach/mmc.h>
-#include <mach/imx-dma.h>
-
-#include "imxmmc.h"
-
-#define DRIVER_NAME "imx-mmc"
-
-#define IMXMCI_INT_MASK_DEFAULT (INT_MASK_BUF_READY | INT_MASK_DATA_TRAN | \
-                                INT_MASK_WRITE_OP_DONE | INT_MASK_END_CMD_RES | \
-                                INT_MASK_AUTO_CARD_DETECT | INT_MASK_DAT0_EN | INT_MASK_SDIO)
-
-struct imxmci_host {
-       struct mmc_host         *mmc;
-       spinlock_t              lock;
-       struct resource         *res;
-       void __iomem            *base;
-       int                     irq;
-       imx_dmach_t             dma;
-       volatile unsigned int   imask;
-       unsigned int            power_mode;
-       unsigned int            present;
-       struct imxmmc_platform_data *pdata;
-
-       struct mmc_request      *req;
-       struct mmc_command      *cmd;
-       struct mmc_data         *data;
-
-       struct timer_list       timer;
-       struct tasklet_struct   tasklet;
-       unsigned int            status_reg;
-       unsigned long           pending_events;
-       /* Next two fields are there for CPU driven transfers to overcome SDHC deficiencies */
-       u16                     *data_ptr;
-       unsigned int            data_cnt;
-       atomic_t                stuck_timeout;
-
-       unsigned int            dma_nents;
-       unsigned int            dma_size;
-       unsigned int            dma_dir;
-       int                     dma_allocated;
-
-       unsigned char           actual_bus_width;
-
-       int                     prev_cmd_code;
-
-       struct clk              *clk;
-};
-
-#define IMXMCI_PEND_IRQ_b      0
-#define IMXMCI_PEND_DMA_END_b  1
-#define IMXMCI_PEND_DMA_ERR_b  2
-#define IMXMCI_PEND_WAIT_RESP_b        3
-#define IMXMCI_PEND_DMA_DATA_b 4
-#define IMXMCI_PEND_CPU_DATA_b 5
-#define IMXMCI_PEND_CARD_XCHG_b        6
-#define IMXMCI_PEND_SET_INIT_b 7
-#define IMXMCI_PEND_STARTED_b  8
-
-#define IMXMCI_PEND_IRQ_m      (1 << IMXMCI_PEND_IRQ_b)
-#define IMXMCI_PEND_DMA_END_m  (1 << IMXMCI_PEND_DMA_END_b)
-#define IMXMCI_PEND_DMA_ERR_m  (1 << IMXMCI_PEND_DMA_ERR_b)
-#define IMXMCI_PEND_WAIT_RESP_m        (1 << IMXMCI_PEND_WAIT_RESP_b)
-#define IMXMCI_PEND_DMA_DATA_m (1 << IMXMCI_PEND_DMA_DATA_b)
-#define IMXMCI_PEND_CPU_DATA_m (1 << IMXMCI_PEND_CPU_DATA_b)
-#define IMXMCI_PEND_CARD_XCHG_m        (1 << IMXMCI_PEND_CARD_XCHG_b)
-#define IMXMCI_PEND_SET_INIT_m (1 << IMXMCI_PEND_SET_INIT_b)
-#define IMXMCI_PEND_STARTED_m  (1 << IMXMCI_PEND_STARTED_b)
-
-static void imxmci_stop_clock(struct imxmci_host *host)
-{
-       int i = 0;
-       u16 reg;
-
-       reg = readw(host->base + MMC_REG_STR_STP_CLK);
-       writew(reg & ~STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
-       while (i < 0x1000) {
-               if (!(i & 0x7f)) {
-                       reg = readw(host->base + MMC_REG_STR_STP_CLK);
-                       writew(reg | STR_STP_CLK_STOP_CLK,
-                                       host->base + MMC_REG_STR_STP_CLK);
-               }
-
-               reg = readw(host->base + MMC_REG_STATUS);
-               if (!(reg & STATUS_CARD_BUS_CLK_RUN)) {
-                       /* Check twice before cut */
-                       reg = readw(host->base + MMC_REG_STATUS);
-                       if (!(reg & STATUS_CARD_BUS_CLK_RUN))
-                               return;
-               }
-
-               i++;
-       }
-       dev_dbg(mmc_dev(host->mmc), "imxmci_stop_clock blocked, no luck\n");
-}
-
-static int imxmci_start_clock(struct imxmci_host *host)
-{
-       unsigned int trials = 0;
-       unsigned int delay_limit = 128;
-       unsigned long flags;
-       u16 reg;
-
-       reg = readw(host->base + MMC_REG_STR_STP_CLK);
-       writew(reg & ~STR_STP_CLK_STOP_CLK, host->base + MMC_REG_STR_STP_CLK);
-
-       clear_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
-
-       /*
-        * Command start of the clock, this usually succeeds in less
-        * then 6 delay loops, but during card detection (low clockrate)
-        * it takes up to 5000 delay loops and sometimes fails for the first time
-        */
-       reg = readw(host->base + MMC_REG_STR_STP_CLK);
-       writew(reg | STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
-
-       do {
-               unsigned int delay = delay_limit;
-
-               while (delay--) {
-                       reg = readw(host->base + MMC_REG_STATUS);
-                       if (reg & STATUS_CARD_BUS_CLK_RUN) {
-                               /* Check twice before cut */
-                               reg = readw(host->base + MMC_REG_STATUS);
-                               if (reg & STATUS_CARD_BUS_CLK_RUN)
-                                       return 0;
-                       }
-
-                       if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
-                               return 0;
-               }
-
-               local_irq_save(flags);
-               /*
-                * Ensure, that request is not doubled under all possible circumstances.
-                * It is possible, that cock running state is missed, because some other
-                * IRQ or schedule delays this function execution and the clocks has
-                * been already stopped by other means (response processing, SDHC HW)
-                */
-               if (!test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events)) {
-                       reg = readw(host->base + MMC_REG_STR_STP_CLK);
-                       writew(reg | STR_STP_CLK_START_CLK,
-                                       host->base + MMC_REG_STR_STP_CLK);
-               }
-               local_irq_restore(flags);
-
-       } while (++trials < 256);
-
-       dev_err(mmc_dev(host->mmc), "imxmci_start_clock blocked, no luck\n");
-
-       return -1;
-}
-
-static void imxmci_softreset(struct imxmci_host *host)
-{
-       int i;
-
-       /* reset sequence */
-       writew(0x08, host->base + MMC_REG_STR_STP_CLK);
-       writew(0x0D, host->base + MMC_REG_STR_STP_CLK);
-
-       for (i = 0; i < 8; i++)
-               writew(0x05, host->base + MMC_REG_STR_STP_CLK);
-
-       writew(0xff, host->base + MMC_REG_RES_TO);
-       writew(512, host->base + MMC_REG_BLK_LEN);
-       writew(1, host->base + MMC_REG_NOB);
-}
-
-static int imxmci_busy_wait_for_status(struct imxmci_host *host,
-                                      unsigned int *pstat, unsigned int stat_mask,
-                                      int timeout, const char *where)
-{
-       int loops = 0;
-
-       while (!(*pstat & stat_mask)) {
-               loops += 2;
-               if (loops >= timeout) {
-                       dev_dbg(mmc_dev(host->mmc), "busy wait timeout in %s, STATUS = 0x%x (0x%x)\n",
-                               where, *pstat, stat_mask);
-                       return -1;
-               }
-               udelay(2);
-               *pstat |= readw(host->base + MMC_REG_STATUS);
-       }
-       if (!loops)
-               return 0;
-
-       /* The busy-wait is expected there for clock <8MHz due to SDHC hardware flaws */
-       if (!(stat_mask & STATUS_END_CMD_RESP) || (host->mmc->ios.clock >= 8000000))
-               dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n",
-                        loops, where, *pstat, stat_mask);
-       return loops;
-}
-
-static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
-{
-       unsigned int nob = data->blocks;
-       unsigned int blksz = data->blksz;
-       unsigned int datasz = nob * blksz;
-       int i;
-
-       if (data->flags & MMC_DATA_STREAM)
-               nob = 0xffff;
-
-       host->data = data;
-       data->bytes_xfered = 0;
-
-       writew(nob, host->base + MMC_REG_NOB);
-       writew(blksz, host->base + MMC_REG_BLK_LEN);
-
-       /*
-        * DMA cannot be used for small block sizes, we have to use CPU driven transfers otherwise.
-        * We are in big troubles for non-512 byte transfers according to note in the paragraph
-        * 20.6.7 of User Manual anyway, but we need to be able to transfer SCR at least.
-        * The situation is even more complex in reality. The SDHC in not able to handle wll
-        * partial FIFO fills and reads. The length has to be rounded up to burst size multiple.
-        * This is required for SCR read at least.
-        */
-       if (datasz < 512) {
-               host->dma_size = datasz;
-               if (data->flags & MMC_DATA_READ) {
-                       host->dma_dir = DMA_FROM_DEVICE;
-
-                       /* Hack to enable read SCR */
-                       writew(1, host->base + MMC_REG_NOB);
-                       writew(512, host->base + MMC_REG_BLK_LEN);
-               } else {
-                       host->dma_dir = DMA_TO_DEVICE;
-               }
-
-               /* Convert back to virtual address */
-               host->data_ptr = (u16 *)sg_virt(data->sg);
-               host->data_cnt = 0;
-
-               clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
-               set_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);
-
-               return;
-       }
-
-       if (data->flags & MMC_DATA_READ) {
-               host->dma_dir = DMA_FROM_DEVICE;
-               host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
-                                            data->sg_len,  host->dma_dir);
-
-               imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
-                                host->res->start + MMC_REG_BUFFER_ACCESS,
-                                DMA_MODE_READ);
-
-               /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_READ, IMX_DMA_WIDTH_16, CCR_REN);*/
-               CCR(host->dma) = CCR_DMOD_LINEAR | CCR_DSIZ_32 | CCR_SMOD_FIFO | CCR_SSIZ_16 | CCR_REN;
-       } else {
-               host->dma_dir = DMA_TO_DEVICE;
-
-               host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
-                                            data->sg_len,  host->dma_dir);
-
-               imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
-                                host->res->start + MMC_REG_BUFFER_ACCESS,
-                                DMA_MODE_WRITE);
-
-               /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_WRITE, IMX_DMA_WIDTH_16, CCR_REN);*/
-               CCR(host->dma) = CCR_SMOD_LINEAR | CCR_SSIZ_32 | CCR_DMOD_FIFO | CCR_DSIZ_16 | CCR_REN;
-       }
-
-#if 1  /* This code is there only for consistency checking and can be disabled in future */
-       host->dma_size = 0;
-       for (i = 0; i < host->dma_nents; i++)
-               host->dma_size += data->sg[i].length;
-
-       if (datasz > host->dma_size) {
-               dev_err(mmc_dev(host->mmc), "imxmci_setup_data datasz 0x%x > 0x%x dm_size\n",
-                       datasz, host->dma_size);
-       }
-#endif
-
-       host->dma_size = datasz;
-
-       wmb();
-
-       set_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
-       clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);
-
-       /* start DMA engine for read, write is delayed after initial response */
-       if (host->dma_dir == DMA_FROM_DEVICE)
-               imx_dma_enable(host->dma);
-}
-
-static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd, unsigned int cmdat)
-{
-       unsigned long flags;
-       u32 imask;
-
-       WARN_ON(host->cmd != NULL);
-       host->cmd = cmd;
-
-       /* Ensure, that clock are stopped else command programming and start fails */
-       imxmci_stop_clock(host);
-
-       if (cmd->flags & MMC_RSP_BUSY)
-               cmdat |= CMD_DAT_CONT_BUSY;
-
-       switch (mmc_resp_type(cmd)) {
-       case MMC_RSP_R1: /* short CRC, OPCODE */
-       case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */
-               cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R1;
-               break;
-       case MMC_RSP_R2: /* long 136 bit + CRC */
-               cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R2;
-               break;
-       case MMC_RSP_R3: /* short */
-               cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R3;
-               break;
-       default:
-               break;
-       }
-
-       if (test_and_clear_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events))
-               cmdat |= CMD_DAT_CONT_INIT; /* This command needs init */
-
-       if (host->actual_bus_width == MMC_BUS_WIDTH_4)
-               cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
-
-       writew(cmd->opcode, host->base + MMC_REG_CMD);
-       writew(cmd->arg >> 16, host->base + MMC_REG_ARGH);
-       writew(cmd->arg & 0xffff, host->base + MMC_REG_ARGL);
-       writew(cmdat, host->base + MMC_REG_CMD_DAT_CONT);
-
-       atomic_set(&host->stuck_timeout, 0);
-       set_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events);
-
-
-       imask = IMXMCI_INT_MASK_DEFAULT;
-       imask &= ~INT_MASK_END_CMD_RES;
-       if (cmdat & CMD_DAT_CONT_DATA_ENABLE) {
-               /* imask &= ~INT_MASK_BUF_READY; */
-               imask &= ~INT_MASK_DATA_TRAN;
-               if (cmdat & CMD_DAT_CONT_WRITE)
-                       imask &= ~INT_MASK_WRITE_OP_DONE;
-               if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events))
-                       imask &= ~INT_MASK_BUF_READY;
-       }
-
-       spin_lock_irqsave(&host->lock, flags);
-       host->imask = imask;
-       writew(host->imask, host->base + MMC_REG_INT_MASK);
-       spin_unlock_irqrestore(&host->lock, flags);
-
-       dev_dbg(mmc_dev(host->mmc), "CMD%02d (0x%02x) mask set to 0x%04x\n",
-               cmd->opcode, cmd->opcode, imask);
-
-       imxmci_start_clock(host);
-}
-
-static void imxmci_finish_request(struct imxmci_host *host, struct mmc_request *req)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&host->lock, flags);
-
-       host->pending_events &= ~(IMXMCI_PEND_WAIT_RESP_m | IMXMCI_PEND_DMA_END_m |
-                                 IMXMCI_PEND_DMA_DATA_m | IMXMCI_PEND_CPU_DATA_m);
-
-       host->imask = IMXMCI_INT_MASK_DEFAULT;
-       writew(host->imask, host->base + MMC_REG_INT_MASK);
-
-       spin_unlock_irqrestore(&host->lock, flags);
-
-       if (req && req->cmd)
-               host->prev_cmd_code = req->cmd->opcode;
-
-       host->req = NULL;
-       host->cmd = NULL;
-       host->data = NULL;
-       mmc_request_done(host->mmc, req);
-}
-
-static int imxmci_finish_data(struct imxmci_host *host, unsigned int stat)
-{
-       struct mmc_data *data = host->data;
-       int data_error;
-
-       if (test_and_clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) {
-               imx_dma_disable(host->dma);
-               dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_nents,
-                            host->dma_dir);
-       }
-
-       if (stat & STATUS_ERR_MASK) {
-               dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n", stat);
-               if (stat & (STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR))
-                       data->error = -EILSEQ;
-               else if (stat & STATUS_TIME_OUT_READ)
-                       data->error = -ETIMEDOUT;
-               else
-                       data->error = -EIO;
-       } else {
-               data->bytes_xfered = host->dma_size;
-       }
-
-       data_error = data->error;
-
-       host->data = NULL;
-
-       return data_error;
-}
-
-static int imxmci_cmd_done(struct imxmci_host *host, unsigned int stat)
-{
-       struct mmc_command *cmd = host->cmd;
-       int i;
-       u32 a, b, c;
-       struct mmc_data *data = host->data;
-
-       if (!cmd)
-               return 0;
-
-       host->cmd = NULL;
-
-       if (stat & STATUS_TIME_OUT_RESP) {
-               dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n");
-               cmd->error = -ETIMEDOUT;
-       } else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
-               dev_dbg(mmc_dev(host->mmc), "cmd crc error\n");
-               cmd->error = -EILSEQ;
-       }
-
-       if (cmd->flags & MMC_RSP_PRESENT) {
-               if (cmd->flags & MMC_RSP_136) {
-                       for (i = 0; i < 4; i++) {
-                               a = readw(host->base + MMC_REG_RES_FIFO);
-                               b = readw(host->base + MMC_REG_RES_FIFO);
-                               cmd->resp[i] = a << 16 | b;
-                       }
-               } else {
-                       a = readw(host->base + MMC_REG_RES_FIFO);
-                       b = readw(host->base + MMC_REG_RES_FIFO);
-                       c = readw(host->base + MMC_REG_RES_FIFO);
-                       cmd->resp[0] = a << 24 | b << 8 | c >> 8;
-               }
-       }
-
-       dev_dbg(mmc_dev(host->mmc), "RESP 0x%08x, 0x%08x, 0x%08x, 0x%08x, error %d\n",
-               cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3], cmd->error);
-
-       if (data && !cmd->error && !(stat & STATUS_ERR_MASK)) {
-               if (host->req->data->flags & MMC_DATA_WRITE) {
-
-                       /* Wait for FIFO to be empty before starting DMA write */
-
-                       stat = readw(host->base + MMC_REG_STATUS);
-                       if (imxmci_busy_wait_for_status(host, &stat,
-                                                       STATUS_APPL_BUFF_FE,
-                                                       40, "imxmci_cmd_done DMA WR") < 0) {
-                               cmd->error = -EIO;
-                               imxmci_finish_data(host, stat);
-                               if (host->req)
-                                       imxmci_finish_request(host, host->req);
-                               dev_warn(mmc_dev(host->mmc), "STATUS = 0x%04x\n",
-                                        stat);
-                               return 0;
-                       }
-
-                       if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
-                               imx_dma_enable(host->dma);
-               }
-       } else {
-               struct mmc_request *req;
-               imxmci_stop_clock(host);
-               req = host->req;
-
-               if (data)
-                       imxmci_finish_data(host, stat);
-
-               if (req)
-                       imxmci_finish_request(host, req);
-               else
-                       dev_warn(mmc_dev(host->mmc), "imxmci_cmd_done: no request to finish\n");
-       }
-
-       return 1;
-}
-
-static int imxmci_data_done(struct imxmci_host *host, unsigned int stat)
-{
-       struct mmc_data *data = host->data;
-       int data_error;
-
-       if (!data)
-               return 0;
-
-       data_error = imxmci_finish_data(host, stat);
-
-       if (host->req->stop) {
-               imxmci_stop_clock(host);
-               imxmci_start_cmd(host, host->req->stop, 0);
-       } else {
-               struct mmc_request *req;
-               req = host->req;
-               if (req)
-                       imxmci_finish_request(host, req);
-               else
-                       dev_warn(mmc_dev(host->mmc), "imxmci_data_done: no request to finish\n");
-       }
-
-       return 1;
-}
-
-static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
-{
-       int i;
-       int burst_len;
-       int trans_done = 0;
-       unsigned int stat = *pstat;
-
-       if (host->actual_bus_width != MMC_BUS_WIDTH_4)
-               burst_len = 16;
-       else
-               burst_len = 64;
-
-       /* This is unfortunately required */
-       dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data running STATUS = 0x%x\n",
-               stat);
-
-       udelay(20);     /* required for clocks < 8MHz*/
-
-       if (host->dma_dir == DMA_FROM_DEVICE) {
-               imxmci_busy_wait_for_status(host, &stat,
-                                           STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE |
-                                           STATUS_TIME_OUT_READ,
-                                           50, "imxmci_cpu_driven_data read");
-
-               while ((stat & (STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE)) &&
-                      !(stat & STATUS_TIME_OUT_READ) &&
-                      (host->data_cnt < 512)) {
-
-                       udelay(20);     /* required for clocks < 8MHz*/
-
-                       for (i = burst_len; i >= 2 ; i -= 2) {
-                               u16 data;
-                               data = readw(host->base + MMC_REG_BUFFER_ACCESS);
-                               udelay(10);     /* required for clocks < 8MHz*/
-                               if (host->data_cnt+2 <= host->dma_size) {
-                                       *(host->data_ptr++) = data;
-                               } else {
-                                       if (host->data_cnt < host->dma_size)
-                                               *(u8 *)(host->data_ptr) = data;
-                               }
-                               host->data_cnt += 2;
-                       }
-
-                       stat = readw(host->base + MMC_REG_STATUS);
-
-                       dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read %d burst %d STATUS = 0x%x\n",
-                               host->data_cnt, burst_len, stat);
-               }
-
-               if ((stat & STATUS_DATA_TRANS_DONE) && (host->data_cnt >= 512))
-                       trans_done = 1;
-
-               if (host->dma_size & 0x1ff)
-                       stat &= ~STATUS_CRC_READ_ERR;
-
-               if (stat & STATUS_TIME_OUT_READ) {
-                       dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read timeout STATUS = 0x%x\n",
-                               stat);
-                       trans_done = -1;
-               }
-
-       } else {
-               imxmci_busy_wait_for_status(host, &stat,
-                                           STATUS_APPL_BUFF_FE,
-                                           20, "imxmci_cpu_driven_data write");
-
-               while ((stat & STATUS_APPL_BUFF_FE) &&
-                      (host->data_cnt < host->dma_size)) {
-                       if (burst_len >= host->dma_size - host->data_cnt) {
-                               burst_len = host->dma_size - host->data_cnt;
-                               host->data_cnt = host->dma_size;
-                               trans_done = 1;
-                       } else {
-                               host->data_cnt += burst_len;
-                       }
-
-                       for (i = burst_len; i > 0 ; i -= 2)
-                               writew(*(host->data_ptr++), host->base + MMC_REG_BUFFER_ACCESS);
-
-                       stat = readw(host->base + MMC_REG_STATUS);
-
-                       dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data write burst %d STATUS = 0x%x\n",
-                               burst_len, stat);
-               }
-       }
-
-       *pstat = stat;
-
-       return trans_done;
-}
-
-static void imxmci_dma_irq(int dma, void *devid)
-{
-       struct imxmci_host *host = devid;
-       u32 stat = readw(host->base + MMC_REG_STATUS);
-
-       atomic_set(&host->stuck_timeout, 0);
-       host->status_reg = stat;
-       set_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
-       tasklet_schedule(&host->tasklet);
-}
-
-static irqreturn_t imxmci_irq(int irq, void *devid)
-{
-       struct imxmci_host *host = devid;
-       u32 stat = readw(host->base + MMC_REG_STATUS);
-       int handled = 1;
-
-       writew(host->imask | INT_MASK_SDIO | INT_MASK_AUTO_CARD_DETECT,
-                       host->base + MMC_REG_INT_MASK);
-
-       atomic_set(&host->stuck_timeout, 0);
-       host->status_reg = stat;
-       set_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
-       set_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
-       tasklet_schedule(&host->tasklet);
-
-       return IRQ_RETVAL(handled);
-}
-
-static void imxmci_tasklet_fnc(unsigned long data)
-{
-       struct imxmci_host *host = (struct imxmci_host *)data;
-       u32 stat;
-       unsigned int data_dir_mask = 0; /* STATUS_WR_CRC_ERROR_CODE_MASK */
-       int timeout = 0;
-
-       if (atomic_read(&host->stuck_timeout) > 4) {
-               char *what;
-               timeout = 1;
-               stat = readw(host->base + MMC_REG_STATUS);
-               host->status_reg = stat;
-               if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
-                       if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
-                               what = "RESP+DMA";
-                       else
-                               what = "RESP";
-               else
-                       if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
-                               if (test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events))
-                                       what = "DATA";
-                               else
-                                       what = "DMA";
-                       else
-                               what = "???";
-
-               dev_err(mmc_dev(host->mmc),
-                       "%s TIMEOUT, hardware stucked STATUS = 0x%04x IMASK = 0x%04x\n",
-                       what, stat,
-                       readw(host->base + MMC_REG_INT_MASK));
-               dev_err(mmc_dev(host->mmc),
-                       "CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n",
-                       readw(host->base + MMC_REG_CMD_DAT_CONT),
-                       readw(host->base + MMC_REG_BLK_LEN),
-                       readw(host->base + MMC_REG_NOB),
-                       CCR(host->dma));
-               dev_err(mmc_dev(host->mmc), "CMD%d, prevCMD%d, bus %d-bit, dma_size = 0x%x\n",
-                       host->cmd ? host->cmd->opcode : 0,
-                       host->prev_cmd_code,
-                       1 << host->actual_bus_width, host->dma_size);
-       }
-
-       if (!host->present || timeout)
-               host->status_reg = STATUS_TIME_OUT_RESP | STATUS_TIME_OUT_READ |
-                       STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR;
-
-       if (test_bit(IMXMCI_PEND_IRQ_b, &host->pending_events) || timeout) {
-               clear_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
-
-               stat = readw(host->base + MMC_REG_STATUS);
-               /*
-                * This is not required in theory, but there is chance to miss some flag
-                * which clears automatically by mask write, FreeScale original code keeps
-                * stat from IRQ time so do I
-                */
-               stat |= host->status_reg;
-
-               if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events))
-                       stat &= ~STATUS_CRC_READ_ERR;
-
-               if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {
-                       imxmci_busy_wait_for_status(host, &stat,
-                                                   STATUS_END_CMD_RESP | STATUS_ERR_MASK,
-                                                   20, "imxmci_tasklet_fnc resp (ERRATUM #4)");
-               }
-
-               if (stat & (STATUS_END_CMD_RESP | STATUS_ERR_MASK)) {
-                       if (test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
-                               imxmci_cmd_done(host, stat);
-                       if (host->data && (stat & STATUS_ERR_MASK))
-                               imxmci_data_done(host, stat);
-               }
-
-               if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) {
-                       stat |= readw(host->base + MMC_REG_STATUS);
-                       if (imxmci_cpu_driven_data(host, &stat)) {
-                               if (test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
-                                       imxmci_cmd_done(host, stat);
-                               atomic_clear_mask(IMXMCI_PEND_IRQ_m|IMXMCI_PEND_CPU_DATA_m,
-                                                 &host->pending_events);
-                               imxmci_data_done(host, stat);
-                       }
-               }
-       }
-
-       if (test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events) &&
-           !test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {
-
-               stat = readw(host->base + MMC_REG_STATUS);
-               /* Same as above */
-               stat |= host->status_reg;
-
-               if (host->dma_dir == DMA_TO_DEVICE)
-                       data_dir_mask = STATUS_WRITE_OP_DONE;
-               else
-                       data_dir_mask = STATUS_DATA_TRANS_DONE;
-
-               if (stat & data_dir_mask) {
-                       clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
-                       imxmci_data_done(host, stat);
-               }
-       }
-
-       if (test_and_clear_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events)) {
-
-               if (host->cmd)
-                       imxmci_cmd_done(host, STATUS_TIME_OUT_RESP);
-
-               if (host->data)
-                       imxmci_data_done(host, STATUS_TIME_OUT_READ |
-                                        STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR);
-
-               if (host->req)
-                       imxmci_finish_request(host, host->req);
-
-               mmc_detect_change(host->mmc, msecs_to_jiffies(100));
-
-       }
-}
-
-static void imxmci_request(struct mmc_host *mmc, struct mmc_request *req)
-{
-       struct imxmci_host *host = mmc_priv(mmc);
-       unsigned int cmdat;
-
-       WARN_ON(host->req != NULL);
-
-       host->req = req;
-
-       cmdat = 0;
-
-       if (req->data) {
-               imxmci_setup_data(host, req->data);
-
-               cmdat |= CMD_DAT_CONT_DATA_ENABLE;
-
-               if (req->data->flags & MMC_DATA_WRITE)
-                       cmdat |= CMD_DAT_CONT_WRITE;
-
-               if (req->data->flags & MMC_DATA_STREAM)
-                       cmdat |= CMD_DAT_CONT_STREAM_BLOCK;
-       }
-
-       imxmci_start_cmd(host, req->cmd, cmdat);
-}
-
-#define CLK_RATE 19200000
-
-static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
-{
-       struct imxmci_host *host = mmc_priv(mmc);
-       int prescaler;
-
-       if (ios->bus_width == MMC_BUS_WIDTH_4) {
-               host->actual_bus_width = MMC_BUS_WIDTH_4;
-               imx_gpio_mode(PB11_PF_SD_DAT3);
-               BLR(host->dma) = 0;     /* burst 64 byte read/write */
-       } else {
-               host->actual_bus_width = MMC_BUS_WIDTH_1;
-               imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11);
-               BLR(host->dma) = 16;    /* burst 16 byte read/write */
-       }
-
-       if (host->power_mode != ios->power_mode) {
-               switch (ios->power_mode) {
-               case MMC_POWER_OFF:
-                       break;
-               case MMC_POWER_UP:
-                       set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events);
-                       break;
-               case MMC_POWER_ON:
-                       break;
-               }
-               host->power_mode = ios->power_mode;
-       }
-
-       if (ios->clock) {
-               unsigned int clk;
-               u16 reg;
-
-               /* The prescaler is 5 for PERCLK2 equal to 96MHz
-                * then 96MHz / 5 = 19.2 MHz
-                */
-               clk = clk_get_rate(host->clk);
-               prescaler = (clk + (CLK_RATE * 7) / 8) / CLK_RATE;
-               switch (prescaler) {
-               case 0:
-               case 1: prescaler = 0;
-                       break;
-               case 2: prescaler = 1;
-                       break;
-               case 3: prescaler = 2;
-                       break;
-               case 4: prescaler = 4;
-                       break;
-               default:
-               case 5: prescaler = 5;
-                       break;
-               }
-
-               dev_dbg(mmc_dev(host->mmc), "PERCLK2 %d MHz -> prescaler %d\n",
-                       clk, prescaler);
-
-               for (clk = 0; clk < 8; clk++) {
-                       int x;
-                       x = CLK_RATE / (1 << clk);
-                       if (x <= ios->clock)
-                               break;
-               }
-
-               /* enable controller */
-               reg = readw(host->base + MMC_REG_STR_STP_CLK);
-               writew(reg | STR_STP_CLK_ENABLE,
-                               host->base + MMC_REG_STR_STP_CLK);
-
-               imxmci_stop_clock(host);
-               writew((prescaler << 3) | clk, host->base + MMC_REG_CLK_RATE);
-               /*
-                * Under my understanding, clock should not be started there, because it would
-                * initiate SDHC sequencer and send last or random command into card
-                */
-               /* imxmci_start_clock(host); */
-
-               dev_dbg(mmc_dev(host->mmc),
-                       "MMC_CLK_RATE: 0x%08x\n",
-                       readw(host->base + MMC_REG_CLK_RATE));
-       } else {
-               imxmci_stop_clock(host);
-       }
-}
-
-static int imxmci_get_ro(struct mmc_host *mmc)
-{
-       struct imxmci_host *host = mmc_priv(mmc);
-
-       if (host->pdata && host->pdata->get_ro)
-               return !!host->pdata->get_ro(mmc_dev(mmc));
-       /*
-        * Board doesn't support read only detection; let the mmc core
-        * decide what to do.
-        */
-       return -ENOSYS;
-}
-
-
-static const struct mmc_host_ops imxmci_ops = {
-       .request        = imxmci_request,
-       .set_ios        = imxmci_set_ios,
-       .get_ro         = imxmci_get_ro,
-};
-
-static void imxmci_check_status(unsigned long data)
-{
-       struct imxmci_host *host = (struct imxmci_host *)data;
-
-       if (host->pdata && host->pdata->card_present &&
-           host->pdata->card_present(mmc_dev(host->mmc)) != host->present) {
-               host->present ^= 1;
-               dev_info(mmc_dev(host->mmc), "card %s\n",
-                     host->present ? "inserted" : "removed");
-
-               set_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events);
-               tasklet_schedule(&host->tasklet);
-       }
-
-       if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events) ||
-           test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) {
-               atomic_inc(&host->stuck_timeout);
-               if (atomic_read(&host->stuck_timeout) > 4)
-                       tasklet_schedule(&host->tasklet);
-       } else {
-               atomic_set(&host->stuck_timeout, 0);
-
-       }
-
-       mod_timer(&host->timer, jiffies + (HZ>>1));
-}
-
-static int __init imxmci_probe(struct platform_device *pdev)
-{
-       struct mmc_host *mmc;
-       struct imxmci_host *host = NULL;
-       struct resource *r;
-       int ret = 0, irq;
-       u16 rev_no;
-
-       pr_info("i.MX mmc driver\n");
-
-       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       irq = platform_get_irq(pdev, 0);
-       if (!r || irq < 0)
-               return -ENXIO;
-
-       r = request_mem_region(r->start, resource_size(r), pdev->name);
-       if (!r)
-               return -EBUSY;
-
-       mmc = mmc_alloc_host(sizeof(struct imxmci_host), &pdev->dev);
-       if (!mmc) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       mmc->ops = &imxmci_ops;
-       mmc->f_min = 150000;
-       mmc->f_max = CLK_RATE/2;
-       mmc->ocr_avail = MMC_VDD_32_33;
-       mmc->caps = MMC_CAP_4_BIT_DATA;
-
-       /* MMC core transfer sizes tunable parameters */
-       mmc->max_segs = 64;
-       mmc->max_seg_size = 64*512;     /* default PAGE_CACHE_SIZE */
-       mmc->max_req_size = 64*512;     /* default PAGE_CACHE_SIZE */
-       mmc->max_blk_size = 2048;
-       mmc->max_blk_count = 65535;
-
-       host = mmc_priv(mmc);
-       host->base = ioremap(r->start, resource_size(r));
-       if (!host->base) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       host->mmc = mmc;
-       host->dma_allocated = 0;
-       host->pdata = pdev->dev.platform_data;
-       if (!host->pdata)
-               dev_warn(&pdev->dev, "No platform data provided!\n");
-
-       spin_lock_init(&host->lock);
-       host->res = r;
-       host->irq = irq;
-
-       host->clk = clk_get(&pdev->dev, "perclk2");
-       if (IS_ERR(host->clk)) {
-               ret = PTR_ERR(host->clk);
-               goto out;
-       }
-       clk_enable(host->clk);
-
-       imx_gpio_mode(PB8_PF_SD_DAT0);
-       imx_gpio_mode(PB9_PF_SD_DAT1);
-       imx_gpio_mode(PB10_PF_SD_DAT2);
-       /* Configured as GPIO with pull-up to ensure right MCC card mode */
-       /* Switched to PB11_PF_SD_DAT3 if 4 bit bus is configured */
-       imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11);
-       /* imx_gpio_mode(PB11_PF_SD_DAT3); */
-       imx_gpio_mode(PB12_PF_SD_CLK);
-       imx_gpio_mode(PB13_PF_SD_CMD);
-
-       imxmci_softreset(host);
-
-       rev_no = readw(host->base + MMC_REG_REV_NO);
-       if (rev_no != 0x390) {
-               dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n",
-                       readw(host->base + MMC_REG_REV_NO));
-               goto out;
-       }
-
-       /* recommended in data sheet */
-       writew(0x2db4, host->base + MMC_REG_READ_TO);
-
-       host->imask = IMXMCI_INT_MASK_DEFAULT;
-       writew(host->imask, host->base + MMC_REG_INT_MASK);
-
-       host->dma = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_LOW);
-       if(host->dma < 0) {
-               dev_err(mmc_dev(host->mmc), "imx_dma_request_by_prio failed\n");
-               ret = -EBUSY;
-               goto out;
-       }
-       host->dma_allocated = 1;
-       imx_dma_setup_handlers(host->dma, imxmci_dma_irq, NULL, host);
-       RSSR(host->dma) = DMA_REQ_SDHC;
-
-       tasklet_init(&host->tasklet, imxmci_tasklet_fnc, (unsigned long)host);
-       host->status_reg=0;
-       host->pending_events=0;
-
-       ret = request_irq(host->irq, imxmci_irq, 0, DRIVER_NAME, host);
-       if (ret)
-               goto out;
-
-       if (host->pdata && host->pdata->card_present)
-               host->present = host->pdata->card_present(mmc_dev(mmc));
-       else    /* if there is no way to detect assume that card is present */
-               host->present = 1;
-
-       init_timer(&host->timer);
-       host->timer.data = (unsigned long)host;
-       host->timer.function = imxmci_check_status;
-       add_timer(&host->timer);
-       mod_timer(&host->timer, jiffies + (HZ >> 1));
-
-       platform_set_drvdata(pdev, mmc);
-
-       mmc_add_host(mmc);
-
-       return 0;
-
-out:
-       if (host) {
-               if (host->dma_allocated) {
-                       imx_dma_free(host->dma);
-                       host->dma_allocated = 0;
-               }
-               if (host->clk) {
-                       clk_disable(host->clk);
-                       clk_put(host->clk);
-               }
-               if (host->base)
-                       iounmap(host->base);
-       }
-       if (mmc)
-               mmc_free_host(mmc);
-       release_mem_region(r->start, resource_size(r));
-       return ret;
-}
-
-static int __exit imxmci_remove(struct platform_device *pdev)
-{
-       struct mmc_host *mmc = platform_get_drvdata(pdev);
-
-       platform_set_drvdata(pdev, NULL);
-
-       if (mmc) {
-               struct imxmci_host *host = mmc_priv(mmc);
-
-               tasklet_disable(&host->tasklet);
-
-               del_timer_sync(&host->timer);
-               mmc_remove_host(mmc);
-
-               free_irq(host->irq, host);
-               iounmap(host->base);
-               if (host->dma_allocated) {
-                       imx_dma_free(host->dma);
-                       host->dma_allocated = 0;
-               }
-
-               tasklet_kill(&host->tasklet);
-
-               clk_disable(host->clk);
-               clk_put(host->clk);
-
-               release_mem_region(host->res->start, resource_size(host->res));
-
-               mmc_free_host(mmc);
-       }
-       return 0;
-}
-
-#ifdef CONFIG_PM
-static int imxmci_suspend(struct platform_device *dev, pm_message_t state)
-{
-       struct mmc_host *mmc = platform_get_drvdata(dev);
-       int ret = 0;
-
-       if (mmc)
-               ret = mmc_suspend_host(mmc);
-
-       return ret;
-}
-
-static int imxmci_resume(struct platform_device *dev)
-{
-       struct mmc_host *mmc = platform_get_drvdata(dev);
-       struct imxmci_host *host;
-       int ret = 0;
-
-       if (mmc) {
-               host = mmc_priv(mmc);
-               if (host)
-                       set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events);
-               ret = mmc_resume_host(mmc);
-       }
-
-       return ret;
-}
-#else
-#define imxmci_suspend  NULL
-#define imxmci_resume   NULL
-#endif /* CONFIG_PM */
-
-static struct platform_driver imxmci_driver = {
-       .remove         = __exit_p(imxmci_remove),
-       .suspend        = imxmci_suspend,
-       .resume         = imxmci_resume,
-       .driver         = {
-               .name           = DRIVER_NAME,
-               .owner          = THIS_MODULE,
-       }
-};
-
-static int __init imxmci_init(void)
-{
-       return platform_driver_probe(&imxmci_driver, imxmci_probe);
-}
-
-static void __exit imxmci_exit(void)
-{
-       platform_driver_unregister(&imxmci_driver);
-}
-
-module_init(imxmci_init);
-module_exit(imxmci_exit);
-
-MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
-MODULE_AUTHOR("Sascha Hauer, Pengutronix");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:imx-mmc");
diff --git a/drivers/mmc/host/imxmmc.h b/drivers/mmc/host/imxmmc.h
deleted file mode 100644 (file)
index 09d5d4e..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-#define MMC_REG_STR_STP_CLK            0x00
-#define MMC_REG_STATUS                 0x04
-#define MMC_REG_CLK_RATE               0x08
-#define MMC_REG_CMD_DAT_CONT           0x0C
-#define MMC_REG_RES_TO                 0x10
-#define MMC_REG_READ_TO                        0x14
-#define MMC_REG_BLK_LEN                        0x18
-#define MMC_REG_NOB                    0x1C
-#define MMC_REG_REV_NO                 0x20
-#define MMC_REG_INT_MASK               0x24
-#define MMC_REG_CMD                    0x28
-#define MMC_REG_ARGH                   0x2C
-#define MMC_REG_ARGL                   0x30
-#define MMC_REG_RES_FIFO               0x34
-#define MMC_REG_BUFFER_ACCESS          0x38
-
-#define STR_STP_CLK_IPG_CLK_GATE_DIS    (1<<15)
-#define STR_STP_CLK_IPG_PERCLK_GATE_DIS (1<<14)
-#define STR_STP_CLK_ENDIAN              (1<<5)
-#define STR_STP_CLK_RESET               (1<<3)
-#define STR_STP_CLK_ENABLE              (1<<2)
-#define STR_STP_CLK_START_CLK           (1<<1)
-#define STR_STP_CLK_STOP_CLK            (1<<0)
-#define STATUS_CARD_PRESENCE            (1<<15)
-#define STATUS_SDIO_INT_ACTIVE          (1<<14)
-#define STATUS_END_CMD_RESP             (1<<13)
-#define STATUS_WRITE_OP_DONE            (1<<12)
-#define STATUS_DATA_TRANS_DONE          (1<<11)
-#define STATUS_WR_CRC_ERROR_CODE_MASK   (3<<10)
-#define STATUS_CARD_BUS_CLK_RUN         (1<<8)
-#define STATUS_APPL_BUFF_FF             (1<<7)
-#define STATUS_APPL_BUFF_FE             (1<<6)
-#define STATUS_RESP_CRC_ERR             (1<<5)
-#define STATUS_CRC_READ_ERR             (1<<3)
-#define STATUS_CRC_WRITE_ERR            (1<<2)
-#define STATUS_TIME_OUT_RESP            (1<<1)
-#define STATUS_TIME_OUT_READ            (1<<0)
-#define STATUS_ERR_MASK                 0x2f
-#define CLK_RATE_PRESCALER(x)           ((x) & 0x7)
-#define CLK_RATE_CLK_RATE(x)            (((x) & 0x7) << 3)
-#define CMD_DAT_CONT_CMD_RESP_LONG_OFF  (1<<12)
-#define CMD_DAT_CONT_STOP_READWAIT      (1<<11)
-#define CMD_DAT_CONT_START_READWAIT     (1<<10)
-#define CMD_DAT_CONT_BUS_WIDTH_1        (0<<8)
-#define CMD_DAT_CONT_BUS_WIDTH_4        (2<<8)
-#define CMD_DAT_CONT_INIT               (1<<7)
-#define CMD_DAT_CONT_BUSY               (1<<6)
-#define CMD_DAT_CONT_STREAM_BLOCK       (1<<5)
-#define CMD_DAT_CONT_WRITE              (1<<4)
-#define CMD_DAT_CONT_DATA_ENABLE        (1<<3)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R1 (1)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R2 (2)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R3 (3)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R4 (4)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R5 (5)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R6 (6)
-#define INT_MASK_AUTO_CARD_DETECT       (1<<6)
-#define INT_MASK_DAT0_EN                (1<<5)
-#define INT_MASK_SDIO                   (1<<4)
-#define INT_MASK_BUF_READY              (1<<3)
-#define INT_MASK_END_CMD_RES            (1<<2)
-#define INT_MASK_WRITE_OP_DONE          (1<<1)
-#define INT_MASK_DATA_TRAN              (1<<0)
-#define INT_ALL                         (0x7f)
index b6f38421d5418c061541a44be3b736617834b449..f0fcce40cd8daa27a6a10e44bb28ad9abcf5624d 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/device.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
+#include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/highmem.h>
@@ -25,6 +26,7 @@
 #include <linux/clk.h>
 #include <linux/scatterlist.h>
 #include <linux/gpio.h>
+#include <linux/of_gpio.h>
 #include <linux/regulator/consumer.h>
 #include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
@@ -1207,21 +1209,76 @@ static const struct mmc_host_ops mmci_ops = {
        .get_cd         = mmci_get_cd,
 };
 
+#ifdef CONFIG_OF
+static void mmci_dt_populate_generic_pdata(struct device_node *np,
+                                       struct mmci_platform_data *pdata)
+{
+       int bus_width = 0;
+
+       pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0);
+       if (!pdata->gpio_wp)
+               pdata->gpio_wp = -1;
+
+       pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0);
+       if (!pdata->gpio_cd)
+               pdata->gpio_cd = -1;
+
+       if (of_get_property(np, "cd-inverted", NULL))
+               pdata->cd_invert = true;
+       else
+               pdata->cd_invert = false;
+
+       of_property_read_u32(np, "max-frequency", &pdata->f_max);
+       if (!pdata->f_max)
+               pr_warn("%s has no 'max-frequency' property\n", np->full_name);
+
+       if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
+               pdata->capabilities |= MMC_CAP_MMC_HIGHSPEED;
+       if (of_get_property(np, "mmc-cap-sd-highspeed", NULL))
+               pdata->capabilities |= MMC_CAP_SD_HIGHSPEED;
+
+       of_property_read_u32(np, "bus-width", &bus_width);
+       switch (bus_width) {
+       case 0 :
+               /* No bus-width supplied. */
+               break;
+       case 4 :
+               pdata->capabilities |= MMC_CAP_4_BIT_DATA;
+               break;
+       case 8 :
+               pdata->capabilities |= MMC_CAP_8_BIT_DATA;
+               break;
+       default :
+               pr_warn("%s: Unsupported bus width\n", np->full_name);
+       }
+}
+#else
+static void mmci_dt_populate_generic_pdata(struct device_node *np,
+                                       struct mmci_platform_data *pdata)
+{
+       return;
+}
+#endif
+
 static int __devinit mmci_probe(struct amba_device *dev,
        const struct amba_id *id)
 {
        struct mmci_platform_data *plat = dev->dev.platform_data;
+       struct device_node *np = dev->dev.of_node;
        struct variant_data *variant = id->data;
        struct mmci_host *host;
        struct mmc_host *mmc;
        int ret;
 
-       /* must have platform data */
-       if (!plat) {
-               ret = -EINVAL;
-               goto out;
+       /* Must have platform data or Device Tree. */
+       if (!plat && !np) {
+               dev_err(&dev->dev, "No plat data or DT found\n");
+               return -EINVAL;
        }
 
+       if (np)
+               mmci_dt_populate_generic_pdata(np, plat);
+
        ret = amba_request_regions(dev, DRIVER_NAME);
        if (ret)
                goto out;
index eeb8cd125b0c3390a2061597ec895707c9129275..3b9136c1a475428b8e95732de6425215dc2d337c 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/scatterlist.h>
 #include <linux/irq.h>
+#include <linux/clk.h>
 #include <linux/gpio.h>
 #include <linux/mmc/host.h>
 
@@ -51,6 +52,7 @@ struct mvsd_host {
        struct device *dev;
        struct resource *res;
        int irq;
+       struct clk *clk;
        int gpio_card_detect;
        int gpio_write_protect;
 };
@@ -770,6 +772,13 @@ static int __init mvsd_probe(struct platform_device *pdev)
        } else
                host->irq = irq;
 
+       /* Not all platforms can gate the clock, so it is not
+          an error if the clock does not exists. */
+       host->clk = clk_get(&pdev->dev, NULL);
+       if (!IS_ERR(host->clk)) {
+               clk_prepare_enable(host->clk);
+       }
+
        if (mvsd_data->gpio_card_detect) {
                ret = gpio_request(mvsd_data->gpio_card_detect,
                                   DRIVER_NAME " cd");
@@ -854,6 +863,11 @@ static int __exit mvsd_remove(struct platform_device *pdev)
                mvsd_power_down(host);
                iounmap(host->base);
                release_resource(host->res);
+
+               if (!IS_ERR(host->clk)) {
+                       clk_disable_unprepare(host->clk);
+                       clk_put(host->clk);
+               }
                mmc_free_host(mmc);
        }
        platform_set_drvdata(pdev, NULL);
index b2058b4323209ba2a1a97aeabd21654655f02c0a..28ed52d58f7f5262d2ab9500095ee7ec478337f5 100644 (file)
@@ -136,7 +136,8 @@ struct mxcmci_host {
        u16                     rev_no;
        unsigned int            cmdat;
 
-       struct clk              *clk;
+       struct clk              *clk_ipg;
+       struct clk              *clk_per;
 
        int                     clock;
 
@@ -672,7 +673,7 @@ static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios)
 {
        unsigned int divider;
        int prescaler = 0;
-       unsigned int clk_in = clk_get_rate(host->clk);
+       unsigned int clk_in = clk_get_rate(host->clk_per);
 
        while (prescaler <= 0x800) {
                for (divider = 1; divider <= 0xF; divider++) {
@@ -900,12 +901,20 @@ static int mxcmci_probe(struct platform_device *pdev)
        host->res = r;
        host->irq = irq;
 
-       host->clk = clk_get(&pdev->dev, NULL);
-       if (IS_ERR(host->clk)) {
-               ret = PTR_ERR(host->clk);
+       host->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+       if (IS_ERR(host->clk_ipg)) {
+               ret = PTR_ERR(host->clk_ipg);
                goto out_iounmap;
        }
-       clk_enable(host->clk);
+
+       host->clk_per = devm_clk_get(&pdev->dev, "per");
+       if (IS_ERR(host->clk_per)) {
+               ret = PTR_ERR(host->clk_per);
+               goto out_iounmap;
+       }
+
+       clk_prepare_enable(host->clk_per);
+       clk_prepare_enable(host->clk_ipg);
 
        mxcmci_softreset(host);
 
@@ -917,8 +926,8 @@ static int mxcmci_probe(struct platform_device *pdev)
                goto out_clk_put;
        }
 
-       mmc->f_min = clk_get_rate(host->clk) >> 16;
-       mmc->f_max = clk_get_rate(host->clk) >> 1;
+       mmc->f_min = clk_get_rate(host->clk_per) >> 16;
+       mmc->f_max = clk_get_rate(host->clk_per) >> 1;
 
        /* recommended in data sheet */
        writew(0x2db4, host->base + MMC_REG_READ_TO);
@@ -967,8 +976,8 @@ out_free_dma:
        if (host->dma)
                dma_release_channel(host->dma);
 out_clk_put:
-       clk_disable(host->clk);
-       clk_put(host->clk);
+       clk_disable_unprepare(host->clk_per);
+       clk_disable_unprepare(host->clk_ipg);
 out_iounmap:
        iounmap(host->base);
 out_free:
@@ -999,8 +1008,8 @@ static int mxcmci_remove(struct platform_device *pdev)
        if (host->dma)
                dma_release_channel(host->dma);
 
-       clk_disable(host->clk);
-       clk_put(host->clk);
+       clk_disable_unprepare(host->clk_per);
+       clk_disable_unprepare(host->clk_ipg);
 
        release_mem_region(host->res->start, resource_size(host->res));
 
@@ -1018,7 +1027,8 @@ static int mxcmci_suspend(struct device *dev)
 
        if (mmc)
                ret = mmc_suspend_host(mmc);
-       clk_disable(host->clk);
+       clk_disable_unprepare(host->clk_per);
+       clk_disable_unprepare(host->clk_ipg);
 
        return ret;
 }
@@ -1029,7 +1039,8 @@ static int mxcmci_resume(struct device *dev)
        struct mxcmci_host *host = mmc_priv(mmc);
        int ret = 0;
 
-       clk_enable(host->clk);
+       clk_prepare_enable(host->clk_per);
+       clk_prepare_enable(host->clk_ipg);
        if (mmc)
                ret = mmc_resume_host(mmc);
 
index bb03ddda481d659bb8aeee6d348bf5a33908e7c3..34a90266ab11710d69f85c2a5648c68d1d6ec430 100644 (file)
@@ -23,6 +23,9 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
 #include <linux/platform_device.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/fsl/mxs-dma.h>
 #include <linux/pinctrl/consumer.h>
-
-#include <mach/mxs.h>
-#include <mach/common.h>
-#include <mach/mmc.h>
+#include <linux/stmp_device.h>
+#include <linux/mmc/mxs-mmc.h>
 
 #define DRIVER_NAME    "mxs-mmc"
 
 /* card detect polling timeout */
 #define MXS_MMC_DETECT_TIMEOUT                 (HZ/2)
 
-#define SSP_VERSION_LATEST     4
-#define ssp_is_old()           (host->version < SSP_VERSION_LATEST)
+#define ssp_is_old(host)       ((host)->devid == IMX23_MMC)
 
 /* SSP registers */
 #define HW_SSP_CTRL0                           0x000
 #define  BM_SSP_BLOCK_SIZE_BLOCK_COUNT         (0xffffff << 4)
 #define  BP_SSP_BLOCK_SIZE_BLOCK_SIZE          (0)
 #define  BM_SSP_BLOCK_SIZE_BLOCK_SIZE          (0xf)
-#define HW_SSP_TIMING                          (ssp_is_old() ? 0x050 : 0x070)
+#define HW_SSP_TIMING(h)                       (ssp_is_old(h) ? 0x050 : 0x070)
 #define  BP_SSP_TIMING_TIMEOUT                 (16)
 #define  BM_SSP_TIMING_TIMEOUT                 (0xffff << 16)
 #define  BP_SSP_TIMING_CLOCK_DIVIDE            (8)
 #define  BM_SSP_TIMING_CLOCK_DIVIDE            (0xff << 8)
 #define  BP_SSP_TIMING_CLOCK_RATE              (0)
 #define  BM_SSP_TIMING_CLOCK_RATE              (0xff)
-#define HW_SSP_CTRL1                           (ssp_is_old() ? 0x060 : 0x080)
+#define HW_SSP_CTRL1(h)                                (ssp_is_old(h) ? 0x060 : 0x080)
 #define  BM_SSP_CTRL1_SDIO_IRQ                 (1 << 31)
 #define  BM_SSP_CTRL1_SDIO_IRQ_EN              (1 << 30)
 #define  BM_SSP_CTRL1_RESP_ERR_IRQ             (1 << 29)
 #define  BM_SSP_CTRL1_WORD_LENGTH              (0xf << 4)
 #define  BP_SSP_CTRL1_SSP_MODE                 (0)
 #define  BM_SSP_CTRL1_SSP_MODE                 (0xf)
-#define HW_SSP_SDRESP0                         (ssp_is_old() ? 0x080 : 0x0a0)
-#define HW_SSP_SDRESP1                         (ssp_is_old() ? 0x090 : 0x0b0)
-#define HW_SSP_SDRESP2                         (ssp_is_old() ? 0x0a0 : 0x0c0)
-#define HW_SSP_SDRESP3                         (ssp_is_old() ? 0x0b0 : 0x0d0)
-#define HW_SSP_STATUS                          (ssp_is_old() ? 0x0c0 : 0x100)
+#define HW_SSP_SDRESP0(h)                      (ssp_is_old(h) ? 0x080 : 0x0a0)
+#define HW_SSP_SDRESP1(h)                      (ssp_is_old(h) ? 0x090 : 0x0b0)
+#define HW_SSP_SDRESP2(h)                      (ssp_is_old(h) ? 0x0a0 : 0x0c0)
+#define HW_SSP_SDRESP3(h)                      (ssp_is_old(h) ? 0x0b0 : 0x0d0)
+#define HW_SSP_STATUS(h)                       (ssp_is_old(h) ? 0x0c0 : 0x100)
 #define  BM_SSP_STATUS_CARD_DETECT             (1 << 28)
 #define  BM_SSP_STATUS_SDIO_IRQ                        (1 << 17)
-#define HW_SSP_VERSION                         (cpu_is_mx23() ? 0x110 : 0x130)
-#define  BP_SSP_VERSION_MAJOR                  (24)
 
 #define BF_SSP(value, field)   (((value) << BP_SSP_##field) & BM_SSP_##field)
 
 
 #define SSP_PIO_NUM    3
 
+enum mxs_mmc_id {
+       IMX23_MMC,
+       IMX28_MMC,
+};
+
 struct mxs_mmc_host {
        struct mmc_host                 *mmc;
        struct mmc_request              *mrq;
@@ -146,9 +149,7 @@ struct mxs_mmc_host {
        struct mmc_data                 *data;
 
        void __iomem                    *base;
-       int                             irq;
-       struct resource                 *res;
-       struct resource                 *dma_res;
+       int                             dma_channel;
        struct clk                      *clk;
        unsigned int                    clk_rate;
 
@@ -158,32 +159,28 @@ struct mxs_mmc_host {
        enum dma_transfer_direction     slave_dirn;
        u32                             ssp_pio_words[SSP_PIO_NUM];
 
-       unsigned int                    version;
+       enum mxs_mmc_id                 devid;
        unsigned char                   bus_width;
        spinlock_t                      lock;
        int                             sdio_irq_en;
+       int                             wp_gpio;
 };
 
 static int mxs_mmc_get_ro(struct mmc_host *mmc)
 {
        struct mxs_mmc_host *host = mmc_priv(mmc);
-       struct mxs_mmc_platform_data *pdata =
-               mmc_dev(host->mmc)->platform_data;
-
-       if (!pdata)
-               return -EFAULT;
 
-       if (!gpio_is_valid(pdata->wp_gpio))
+       if (!gpio_is_valid(host->wp_gpio))
                return -EINVAL;
 
-       return gpio_get_value(pdata->wp_gpio);
+       return gpio_get_value(host->wp_gpio);
 }
 
 static int mxs_mmc_get_cd(struct mmc_host *mmc)
 {
        struct mxs_mmc_host *host = mmc_priv(mmc);
 
-       return !(readl(host->base + HW_SSP_STATUS) &
+       return !(readl(host->base + HW_SSP_STATUS(host)) &
                 BM_SSP_STATUS_CARD_DETECT);
 }
 
@@ -191,7 +188,7 @@ static void mxs_mmc_reset(struct mxs_mmc_host *host)
 {
        u32 ctrl0, ctrl1;
 
-       mxs_reset_block(host->base);
+       stmp_reset_block(host->base);
 
        ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
        ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
@@ -207,7 +204,7 @@ static void mxs_mmc_reset(struct mxs_mmc_host *host)
        writel(BF_SSP(0xffff, TIMING_TIMEOUT) |
               BF_SSP(2, TIMING_CLOCK_DIVIDE) |
               BF_SSP(0, TIMING_CLOCK_RATE),
-              host->base + HW_SSP_TIMING);
+              host->base + HW_SSP_TIMING(host));
 
        if (host->sdio_irq_en) {
                ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
@@ -215,7 +212,7 @@ static void mxs_mmc_reset(struct mxs_mmc_host *host)
        }
 
        writel(ctrl0, host->base + HW_SSP_CTRL0);
-       writel(ctrl1, host->base + HW_SSP_CTRL1);
+       writel(ctrl1, host->base + HW_SSP_CTRL1(host));
 }
 
 static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
@@ -229,12 +226,12 @@ static void mxs_mmc_request_done(struct mxs_mmc_host *host)
 
        if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
                if (mmc_resp_type(cmd) & MMC_RSP_136) {
-                       cmd->resp[3] = readl(host->base + HW_SSP_SDRESP0);
-                       cmd->resp[2] = readl(host->base + HW_SSP_SDRESP1);
-                       cmd->resp[1] = readl(host->base + HW_SSP_SDRESP2);
-                       cmd->resp[0] = readl(host->base + HW_SSP_SDRESP3);
+                       cmd->resp[3] = readl(host->base + HW_SSP_SDRESP0(host));
+                       cmd->resp[2] = readl(host->base + HW_SSP_SDRESP1(host));
+                       cmd->resp[1] = readl(host->base + HW_SSP_SDRESP2(host));
+                       cmd->resp[0] = readl(host->base + HW_SSP_SDRESP3(host));
                } else {
-                       cmd->resp[0] = readl(host->base + HW_SSP_SDRESP0);
+                       cmd->resp[0] = readl(host->base + HW_SSP_SDRESP0(host));
                }
        }
 
@@ -277,9 +274,9 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
 
        spin_lock(&host->lock);
 
-       stat = readl(host->base + HW_SSP_CTRL1);
+       stat = readl(host->base + HW_SSP_CTRL1(host));
        writel(stat & MXS_MMC_IRQ_BITS,
-              host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR);
+              host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR);
 
        if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
                mmc_signal_sdio_irq(host->mmc);
@@ -485,7 +482,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
                blocks = 1;
 
        /* xfer count, block size and count need to be set differently */
-       if (ssp_is_old()) {
+       if (ssp_is_old(host)) {
                ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
                cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
                        BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
@@ -509,10 +506,10 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
 
        /* set the timeout count */
        timeout = mxs_ns_to_ssp_ticks(host->clk_rate, data->timeout_ns);
-       val = readl(host->base + HW_SSP_TIMING);
+       val = readl(host->base + HW_SSP_TIMING(host));
        val &= ~(BM_SSP_TIMING_TIMEOUT);
        val |= BF_SSP(timeout, TIMING_TIMEOUT);
-       writel(val, host->base + HW_SSP_TIMING);
+       writel(val, host->base + HW_SSP_TIMING(host));
 
        /* pio */
        host->ssp_pio_words[0] = ctrl0;
@@ -598,11 +595,11 @@ static void mxs_mmc_set_clk_rate(struct mxs_mmc_host *host, unsigned int rate)
 
        ssp_sck = ssp_clk / clock_divide / (1 + clock_rate);
 
-       val = readl(host->base + HW_SSP_TIMING);
+       val = readl(host->base + HW_SSP_TIMING(host));
        val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE);
        val |= BF_SSP(clock_divide, TIMING_CLOCK_DIVIDE);
        val |= BF_SSP(clock_rate, TIMING_CLOCK_RATE);
-       writel(val, host->base + HW_SSP_TIMING);
+       writel(val, host->base + HW_SSP_TIMING(host));
 
        host->clk_rate = ssp_sck;
 
@@ -637,18 +634,19 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
 
        if (enable) {
                writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
-                      host->base + HW_SSP_CTRL0 + MXS_SET_ADDR);
+                      host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
                writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
-                      host->base + HW_SSP_CTRL1 + MXS_SET_ADDR);
+                      host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_SET);
 
-               if (readl(host->base + HW_SSP_STATUS) & BM_SSP_STATUS_SDIO_IRQ)
+               if (readl(host->base + HW_SSP_STATUS(host)) &
+                               BM_SSP_STATUS_SDIO_IRQ)
                        mmc_signal_sdio_irq(host->mmc);
 
        } else {
                writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
-                      host->base + HW_SSP_CTRL0 + MXS_CLR_ADDR);
+                      host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
                writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
-                      host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR);
+                      host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR);
        }
 
        spin_unlock_irqrestore(&host->lock, flags);
@@ -669,7 +667,7 @@ static bool mxs_mmc_dma_filter(struct dma_chan *chan, void *param)
        if (!mxs_dma_is_apbh(chan))
                return false;
 
-       if (chan->chan_id != host->dma_res->start)
+       if (chan->chan_id != host->dma_channel)
                return false;
 
        chan->private = &host->dma_data;
@@ -677,11 +675,34 @@ static bool mxs_mmc_dma_filter(struct dma_chan *chan, void *param)
        return true;
 }
 
+static struct platform_device_id mxs_mmc_ids[] = {
+       {
+               .name = "imx23-mmc",
+               .driver_data = IMX23_MMC,
+       }, {
+               .name = "imx28-mmc",
+               .driver_data = IMX28_MMC,
+       }, {
+               /* sentinel */
+       }
+};
+MODULE_DEVICE_TABLE(platform, mxs_mmc_ids);
+
+static const struct of_device_id mxs_mmc_dt_ids[] = {
+       { .compatible = "fsl,imx23-mmc", .data = (void *) IMX23_MMC, },
+       { .compatible = "fsl,imx28-mmc", .data = (void *) IMX28_MMC, },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids);
+
 static int mxs_mmc_probe(struct platform_device *pdev)
 {
+       const struct of_device_id *of_id =
+                       of_match_device(mxs_mmc_dt_ids, &pdev->dev);
+       struct device_node *np = pdev->dev.of_node;
        struct mxs_mmc_host *host;
        struct mmc_host *mmc;
-       struct resource *iores, *dmares, *r;
+       struct resource *iores, *dmares;
        struct mxs_mmc_platform_data *pdata;
        struct pinctrl *pinctrl;
        int ret = 0, irq_err, irq_dma;
@@ -691,46 +712,51 @@ static int mxs_mmc_probe(struct platform_device *pdev)
        dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
        irq_err = platform_get_irq(pdev, 0);
        irq_dma = platform_get_irq(pdev, 1);
-       if (!iores || !dmares || irq_err < 0 || irq_dma < 0)
+       if (!iores || irq_err < 0 || irq_dma < 0)
                return -EINVAL;
 
-       r = request_mem_region(iores->start, resource_size(iores), pdev->name);
-       if (!r)
-               return -EBUSY;
-
        mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
-       if (!mmc) {
-               ret = -ENOMEM;
-               goto out_release_mem;
-       }
+       if (!mmc)
+               return -ENOMEM;
 
        host = mmc_priv(mmc);
-       host->base = ioremap(r->start, resource_size(r));
+       host->base = devm_request_and_ioremap(&pdev->dev, iores);
        if (!host->base) {
-               ret = -ENOMEM;
+               ret = -EADDRNOTAVAIL;
                goto out_mmc_free;
        }
 
-       /* only major verion does matter */
-       host->version = readl(host->base + HW_SSP_VERSION) >>
-                       BP_SSP_VERSION_MAJOR;
+       if (np) {
+               host->devid = (enum mxs_mmc_id) of_id->data;
+               /*
+                * TODO: This is a temporary solution and should be changed
+                * to use generic DMA binding later when the helpers get in.
+                */
+               ret = of_property_read_u32(np, "fsl,ssp-dma-channel",
+                                          &host->dma_channel);
+               if (ret) {
+                       dev_err(mmc_dev(host->mmc),
+                               "failed to get dma channel\n");
+                       goto out_mmc_free;
+               }
+       } else {
+               host->devid = pdev->id_entry->driver_data;
+               host->dma_channel = dmares->start;
+       }
 
        host->mmc = mmc;
-       host->res = r;
-       host->dma_res = dmares;
-       host->irq = irq_err;
        host->sdio_irq_en = 0;
 
        pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
        if (IS_ERR(pinctrl)) {
                ret = PTR_ERR(pinctrl);
-               goto out_iounmap;
+               goto out_mmc_free;
        }
 
        host->clk = clk_get(&pdev->dev, NULL);
        if (IS_ERR(host->clk)) {
                ret = PTR_ERR(host->clk);
-               goto out_iounmap;
+               goto out_mmc_free;
        }
        clk_prepare_enable(host->clk);
 
@@ -752,11 +778,20 @@ static int mxs_mmc_probe(struct platform_device *pdev)
                    MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL;
 
        pdata = mmc_dev(host->mmc)->platform_data;
-       if (pdata) {
+       if (!pdata) {
+               u32 bus_width = 0;
+               of_property_read_u32(np, "bus-width", &bus_width);
+               if (bus_width == 4)
+                       mmc->caps |= MMC_CAP_4_BIT_DATA;
+               else if (bus_width == 8)
+                       mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
+               host->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
+       } else {
                if (pdata->flags & SLOTF_8_BIT_CAPABLE)
                        mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
                if (pdata->flags & SLOTF_4_BIT_CAPABLE)
                        mmc->caps |= MMC_CAP_4_BIT_DATA;
+               host->wp_gpio = pdata->wp_gpio;
        }
 
        mmc->f_min = 400000;
@@ -765,13 +800,14 @@ static int mxs_mmc_probe(struct platform_device *pdev)
 
        mmc->max_segs = 52;
        mmc->max_blk_size = 1 << 0xf;
-       mmc->max_blk_count = (ssp_is_old()) ? 0xff : 0xffffff;
-       mmc->max_req_size = (ssp_is_old()) ? 0xffff : 0xffffffff;
+       mmc->max_blk_count = (ssp_is_old(host)) ? 0xff : 0xffffff;
+       mmc->max_req_size = (ssp_is_old(host)) ? 0xffff : 0xffffffff;
        mmc->max_seg_size = dma_get_max_seg_size(host->dmach->device->dev);
 
        platform_set_drvdata(pdev, mmc);
 
-       ret = request_irq(host->irq, mxs_mmc_irq_handler, 0, DRIVER_NAME, host);
+       ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
+                              DRIVER_NAME, host);
        if (ret)
                goto out_free_dma;
 
@@ -779,26 +815,20 @@ static int mxs_mmc_probe(struct platform_device *pdev)
 
        ret = mmc_add_host(mmc);
        if (ret)
-               goto out_free_irq;
+               goto out_free_dma;
 
        dev_info(mmc_dev(host->mmc), "initialized\n");
 
        return 0;
 
-out_free_irq:
-       free_irq(host->irq, host);
 out_free_dma:
        if (host->dmach)
                dma_release_channel(host->dmach);
 out_clk_put:
        clk_disable_unprepare(host->clk);
        clk_put(host->clk);
-out_iounmap:
-       iounmap(host->base);
 out_mmc_free:
        mmc_free_host(mmc);
-out_release_mem:
-       release_mem_region(iores->start, resource_size(iores));
        return ret;
 }
 
@@ -806,12 +836,9 @@ static int mxs_mmc_remove(struct platform_device *pdev)
 {
        struct mmc_host *mmc = platform_get_drvdata(pdev);
        struct mxs_mmc_host *host = mmc_priv(mmc);
-       struct resource *res = host->res;
 
        mmc_remove_host(mmc);
 
-       free_irq(host->irq, host);
-
        platform_set_drvdata(pdev, NULL);
 
        if (host->dmach)
@@ -820,12 +847,8 @@ static int mxs_mmc_remove(struct platform_device *pdev)
        clk_disable_unprepare(host->clk);
        clk_put(host->clk);
 
-       iounmap(host->base);
-
        mmc_free_host(mmc);
 
-       release_mem_region(res->start, resource_size(res));
-
        return 0;
 }
 
@@ -865,11 +888,13 @@ static const struct dev_pm_ops mxs_mmc_pm_ops = {
 static struct platform_driver mxs_mmc_driver = {
        .probe          = mxs_mmc_probe,
        .remove         = mxs_mmc_remove,
+       .id_table       = mxs_mmc_ids,
        .driver         = {
                .name   = DRIVER_NAME,
                .owner  = THIS_MODULE,
 #ifdef CONFIG_PM
                .pm     = &mxs_mmc_pm_ops,
+               .of_match_table = mxs_mmc_dt_ids,
 #endif
        },
 };
index 887c0e598cf3308d53f0324fba71176fb65d060b..552196c764d40bf4925e5bec5861729ad6af3716 100644 (file)
@@ -169,11 +169,11 @@ struct mmc_omap_host {
        struct timer_list       clk_timer;
        spinlock_t              clk_lock;     /* for changing enabled state */
        unsigned int            fclk_enabled:1;
+       struct workqueue_struct *mmc_omap_wq;
 
        struct omap_mmc_platform_data *pdata;
 };
 
-static struct workqueue_struct *mmc_omap_wq;
 
 static void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot)
 {
@@ -291,7 +291,7 @@ static void mmc_omap_release_slot(struct mmc_omap_slot *slot, int clk_enabled)
                host->next_slot = new_slot;
                host->mmc = new_slot->mmc;
                spin_unlock_irqrestore(&host->slot_lock, flags);
-               queue_work(mmc_omap_wq, &host->slot_release_work);
+               queue_work(host->mmc_omap_wq, &host->slot_release_work);
                return;
        }
 
@@ -459,7 +459,7 @@ mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
        }
 
        host->stop_data = data;
-       queue_work(mmc_omap_wq, &host->send_stop_work);
+       queue_work(host->mmc_omap_wq, &host->send_stop_work);
 }
 
 static void
@@ -639,7 +639,7 @@ mmc_omap_cmd_timer(unsigned long data)
                OMAP_MMC_WRITE(host, IE, 0);
                disable_irq(host->irq);
                host->abort = 1;
-               queue_work(mmc_omap_wq, &host->cmd_abort_work);
+               queue_work(host->mmc_omap_wq, &host->cmd_abort_work);
        }
        spin_unlock_irqrestore(&host->slot_lock, flags);
 }
@@ -828,7 +828,7 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
                host->abort = 1;
                OMAP_MMC_WRITE(host, IE, 0);
                disable_irq_nosync(host->irq);
-               queue_work(mmc_omap_wq, &host->cmd_abort_work);
+               queue_work(host->mmc_omap_wq, &host->cmd_abort_work);
                return IRQ_HANDLED;
        }
 
@@ -1389,13 +1389,13 @@ static void mmc_omap_remove_slot(struct mmc_omap_slot *slot)
 
        tasklet_kill(&slot->cover_tasklet);
        del_timer_sync(&slot->cover_timer);
-       flush_workqueue(mmc_omap_wq);
+       flush_workqueue(slot->host->mmc_omap_wq);
 
        mmc_remove_host(mmc);
        mmc_free_host(mmc);
 }
 
-static int __init mmc_omap_probe(struct platform_device *pdev)
+static int __devinit mmc_omap_probe(struct platform_device *pdev)
 {
        struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
        struct mmc_omap_host *host = NULL;
@@ -1497,6 +1497,10 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
 
        host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
 
+       host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
+       if (!host->mmc_omap_wq)
+               goto err_plat_cleanup;
+
        return 0;
 
 err_plat_cleanup:
@@ -1518,7 +1522,7 @@ err_free_mem_region:
        return ret;
 }
 
-static int mmc_omap_remove(struct platform_device *pdev)
+static int __devexit mmc_omap_remove(struct platform_device *pdev)
 {
        struct mmc_omap_host *host = platform_get_drvdata(pdev);
        int i;
@@ -1542,6 +1546,7 @@ static int mmc_omap_remove(struct platform_device *pdev)
        iounmap(host->virt_base);
        release_mem_region(pdev->resource[0].start,
                           pdev->resource[0].end - pdev->resource[0].start + 1);
+       destroy_workqueue(host->mmc_omap_wq);
 
        kfree(host);
 
@@ -1599,7 +1604,8 @@ static int mmc_omap_resume(struct platform_device *pdev)
 #endif
 
 static struct platform_driver mmc_omap_driver = {
-       .remove         = mmc_omap_remove,
+       .probe          = mmc_omap_probe,
+       .remove         = __devexit_p(mmc_omap_remove),
        .suspend        = mmc_omap_suspend,
        .resume         = mmc_omap_resume,
        .driver         = {
@@ -1608,29 +1614,7 @@ static struct platform_driver mmc_omap_driver = {
        },
 };
 
-static int __init mmc_omap_init(void)
-{
-       int ret;
-
-       mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
-       if (!mmc_omap_wq)
-               return -ENOMEM;
-
-       ret = platform_driver_probe(&mmc_omap_driver, mmc_omap_probe);
-       if (ret)
-               destroy_workqueue(mmc_omap_wq);
-       return ret;
-}
-
-static void __exit mmc_omap_exit(void)
-{
-       platform_driver_unregister(&mmc_omap_driver);
-       destroy_workqueue(mmc_omap_wq);
-}
-
-module_init(mmc_omap_init);
-module_exit(mmc_omap_exit);
-
+module_platform_driver(mmc_omap_driver);
 MODULE_DESCRIPTION("OMAP Multimedia Card driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:" DRIVER_NAME);
index 56d4499d43889e42a971fcfffb9bd0c334645294..9a7a60aeb19ea35dc921cd43f2e796c2bfd75469 100644 (file)
 #define BRR_ENABLE             (1 << 5)
 #define DTO_ENABLE             (1 << 20)
 #define INIT_STREAM            (1 << 1)
+#define ACEN_ACMD12            (1 << 2)
 #define DP_SELECT              (1 << 21)
 #define DDIR                   (1 << 4)
 #define DMA_EN                 0x1
 #define MSBS                   (1 << 5)
 #define BCE                    (1 << 1)
 #define FOUR_BIT               (1 << 1)
+#define DDR                    (1 << 19)
 #define DW8                    (1 << 5)
 #define CC                     0x1
 #define TC                     0x02
 #define OMAP_MMC_MAX_CLOCK     52000000
 #define DRIVER_NAME            "omap_hsmmc"
 
+#define AUTO_CMD12             (1 << 0)        /* Auto CMD12 support */
 /*
  * One controller can have multiple slots, like on some omap boards using
  * omap.c controller driver. Luckily this is not currently done on any known
@@ -167,7 +170,6 @@ struct omap_hsmmc_host {
        int                     use_dma, dma_ch;
        int                     dma_line_tx, dma_line_rx;
        int                     slot_id;
-       int                     got_dbclk;
        int                     response_busy;
        int                     context_loss;
        int                     vdd;
@@ -175,6 +177,7 @@ struct omap_hsmmc_host {
        int                     reqs_blocked;
        int                     use_reg;
        int                     req_in_progress;
+       unsigned int            flags;
        struct omap_hsmmc_next  next_data;
 
        struct  omap_mmc_platform_data  *pdata;
@@ -520,6 +523,10 @@ static void omap_hsmmc_set_bus_width(struct omap_hsmmc_host *host)
        u32 con;
 
        con = OMAP_HSMMC_READ(host->base, CON);
+       if (ios->timing == MMC_TIMING_UHS_DDR50)
+               con |= DDR;     /* configure in DDR mode */
+       else
+               con &= ~DDR;
        switch (ios->bus_width) {
        case MMC_BUS_WIDTH_8:
                OMAP_HSMMC_WRITE(host->base, CON, con | DW8);
@@ -766,6 +773,8 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
                cmdtype = 0x3;
 
        cmdreg = (cmd->opcode << 24) | (resptype << 16) | (cmdtype << 22);
+       if ((host->flags & AUTO_CMD12) && mmc_op_multi(cmd->opcode))
+               cmdreg |= ACEN_ACMD12;
 
        if (data) {
                cmdreg |= DP_SELECT | MSBS | BCE;
@@ -796,11 +805,12 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
 static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
 {
        int dma_ch;
+       unsigned long flags;
 
-       spin_lock(&host->irq_lock);
+       spin_lock_irqsave(&host->irq_lock, flags);
        host->req_in_progress = 0;
        dma_ch = host->dma_ch;
-       spin_unlock(&host->irq_lock);
+       spin_unlock_irqrestore(&host->irq_lock, flags);
 
        omap_hsmmc_disable_irq(host);
        /* Do not complete the request if DMA is still in progress */
@@ -837,11 +847,14 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
        else
                data->bytes_xfered = 0;
 
-       if (!data->stop) {
+       if (data->stop && ((!(host->flags & AUTO_CMD12)) || data->error)) {
+               omap_hsmmc_start_command(host, data->stop, NULL);
+       } else {
+               if (data->stop)
+                       data->stop->resp[0] = OMAP_HSMMC_READ(host->base,
+                                                       RSP76);
                omap_hsmmc_request_done(host, data->mrq);
-               return;
        }
-       omap_hsmmc_start_command(host, data->stop, NULL);
 }
 
 /*
@@ -874,13 +887,14 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
 static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
 {
        int dma_ch;
+       unsigned long flags;
 
        host->data->error = errno;
 
-       spin_lock(&host->irq_lock);
+       spin_lock_irqsave(&host->irq_lock, flags);
        dma_ch = host->dma_ch;
        host->dma_ch = -1;
-       spin_unlock(&host->irq_lock);
+       spin_unlock_irqrestore(&host->irq_lock, flags);
 
        if (host->use_dma && dma_ch != -1) {
                dma_unmap_sg(mmc_dev(host->mmc), host->data->sg,
@@ -1082,7 +1096,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
 
        /* Disable the clocks */
        pm_runtime_put_sync(host->dev);
-       if (host->got_dbclk)
+       if (host->dbclk)
                clk_disable(host->dbclk);
 
        /* Turn the power off */
@@ -1093,7 +1107,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
                ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1,
                                               vdd);
        pm_runtime_get_sync(host->dev);
-       if (host->got_dbclk)
+       if (host->dbclk)
                clk_enable(host->dbclk);
 
        if (ret != 0)
@@ -1234,6 +1248,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
        struct omap_hsmmc_host *host = cb_data;
        struct mmc_data *data;
        int dma_ch, req_in_progress;
+       unsigned long flags;
 
        if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
                dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n",
@@ -1241,9 +1256,9 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
                return;
        }
 
-       spin_lock(&host->irq_lock);
+       spin_lock_irqsave(&host->irq_lock, flags);
        if (host->dma_ch < 0) {
-               spin_unlock(&host->irq_lock);
+               spin_unlock_irqrestore(&host->irq_lock, flags);
                return;
        }
 
@@ -1253,7 +1268,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
                /* Fire up the next transfer. */
                omap_hsmmc_config_dma_params(host, data,
                                           data->sg + host->dma_sg_idx);
-               spin_unlock(&host->irq_lock);
+               spin_unlock_irqrestore(&host->irq_lock, flags);
                return;
        }
 
@@ -1264,7 +1279,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
        req_in_progress = host->req_in_progress;
        dma_ch = host->dma_ch;
        host->dma_ch = -1;
-       spin_unlock(&host->irq_lock);
+       spin_unlock_irqrestore(&host->irq_lock, flags);
 
        omap_free_dma(dma_ch);
 
@@ -1766,7 +1781,7 @@ static struct omap_mmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
                pdata->slots[0].nonremovable = true;
                pdata->slots[0].no_regulator_off_init = true;
        }
-       of_property_read_u32(np, "ti,bus-width", &bus_width);
+       of_property_read_u32(np, "bus-width", &bus_width);
        if (bus_width == 4)
                pdata->slots[0].caps |= MMC_CAP_4_BIT_DATA;
        else if (bus_width == 8)
@@ -1844,6 +1859,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
        host->mapbase   = res->start + pdata->reg_offset;
        host->base      = ioremap(host->mapbase, SZ_4K);
        host->power_mode = MMC_POWER_OFF;
+       host->flags     = AUTO_CMD12;
        host->next_data.cookie = 1;
 
        platform_set_drvdata(pdev, host);
@@ -1885,21 +1901,17 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
 
        omap_hsmmc_context_save(host);
 
-       if (cpu_is_omap2430()) {
-               host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
-               /*
-                * MMC can still work without debounce clock.
-                */
-               if (IS_ERR(host->dbclk))
-                       dev_warn(mmc_dev(host->mmc),
-                               "Failed to get debounce clock\n");
-               else
-                       host->got_dbclk = 1;
-
-               if (host->got_dbclk)
-                       if (clk_enable(host->dbclk) != 0)
-                               dev_dbg(mmc_dev(host->mmc), "Enabling debounce"
-                                                       " clk failed\n");
+       host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
+       /*
+        * MMC can still work without debounce clock.
+        */
+       if (IS_ERR(host->dbclk)) {
+               dev_warn(mmc_dev(host->mmc), "Failed to get debounce clk\n");
+               host->dbclk = NULL;
+       } else if (clk_enable(host->dbclk) != 0) {
+               dev_warn(mmc_dev(host->mmc), "Failed to enable debounce clk\n");
+               clk_put(host->dbclk);
+               host->dbclk = NULL;
        }
 
        /* Since we do only SG emulation, we can have as many segs
@@ -1969,7 +1981,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
                ret = request_threaded_irq(mmc_slot(host).card_detect_irq,
                                           NULL,
                                           omap_hsmmc_detect,
-                                          IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+                                          IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                                           mmc_hostname(mmc), host);
                if (ret) {
                        dev_dbg(mmc_dev(host->mmc),
@@ -2019,7 +2031,7 @@ err_irq:
        pm_runtime_put_sync(host->dev);
        pm_runtime_disable(host->dev);
        clk_put(host->fclk);
-       if (host->got_dbclk) {
+       if (host->dbclk) {
                clk_disable(host->dbclk);
                clk_put(host->dbclk);
        }
@@ -2030,7 +2042,9 @@ err1:
 err_alloc:
        omap_hsmmc_gpio_free(pdata);
 err:
-       release_mem_region(res->start, resource_size(res));
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (res)
+               release_mem_region(res->start, resource_size(res));
        return ret;
 }
 
@@ -2052,7 +2066,7 @@ static int __devexit omap_hsmmc_remove(struct platform_device *pdev)
        pm_runtime_put_sync(host->dev);
        pm_runtime_disable(host->dev);
        clk_put(host->fclk);
-       if (host->got_dbclk) {
+       if (host->dbclk) {
                clk_disable(host->dbclk);
                clk_put(host->dbclk);
        }
@@ -2110,7 +2124,7 @@ static int omap_hsmmc_suspend(struct device *dev)
                                OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
        }
 
-       if (host->got_dbclk)
+       if (host->dbclk)
                clk_disable(host->dbclk);
 err:
        pm_runtime_put_sync(host->dev);
@@ -2131,7 +2145,7 @@ static int omap_hsmmc_resume(struct device *dev)
 
        pm_runtime_get_sync(host->dev);
 
-       if (host->got_dbclk)
+       if (host->dbclk)
                clk_enable(host->dbclk);
 
        if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER))
index d190d04636a714e87da50a3f8748b17347ca225f..ebbe984e5d002c5dfe98d7215309676a8a183789 100644 (file)
@@ -71,6 +71,9 @@ struct pltfm_imx_data {
        enum imx_esdhc_type devtype;
        struct pinctrl *pinctrl;
        struct esdhc_platform_data boarddata;
+       struct clk *clk_ipg;
+       struct clk *clk_ahb;
+       struct clk *clk_per;
 };
 
 static struct platform_device_id imx_esdhc_devtype[] = {
@@ -404,7 +407,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
        if (!np)
                return -ENODEV;
 
-       if (of_get_property(np, "fsl,card-wired", NULL))
+       if (of_get_property(np, "non-removable", NULL))
                boarddata->cd_type = ESDHC_CD_PERMANENT;
 
        if (of_get_property(np, "fsl,cd-controller", NULL))
@@ -439,7 +442,6 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
        struct sdhci_pltfm_host *pltfm_host;
        struct sdhci_host *host;
        struct esdhc_platform_data *boarddata;
-       struct clk *clk;
        int err;
        struct pltfm_imx_data *imx_data;
 
@@ -460,14 +462,29 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
        imx_data->devtype = pdev->id_entry->driver_data;
        pltfm_host->priv = imx_data;
 
-       clk = clk_get(mmc_dev(host->mmc), NULL);
-       if (IS_ERR(clk)) {
-               dev_err(mmc_dev(host->mmc), "clk err\n");
-               err = PTR_ERR(clk);
+       imx_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+       if (IS_ERR(imx_data->clk_ipg)) {
+               err = PTR_ERR(imx_data->clk_ipg);
                goto err_clk_get;
        }
-       clk_prepare_enable(clk);
-       pltfm_host->clk = clk;
+
+       imx_data->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+       if (IS_ERR(imx_data->clk_ahb)) {
+               err = PTR_ERR(imx_data->clk_ahb);
+               goto err_clk_get;
+       }
+
+       imx_data->clk_per = devm_clk_get(&pdev->dev, "per");
+       if (IS_ERR(imx_data->clk_per)) {
+               err = PTR_ERR(imx_data->clk_per);
+               goto err_clk_get;
+       }
+
+       pltfm_host->clk = imx_data->clk_per;
+
+       clk_prepare_enable(imx_data->clk_per);
+       clk_prepare_enable(imx_data->clk_ipg);
+       clk_prepare_enable(imx_data->clk_ahb);
 
        imx_data->pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
        if (IS_ERR(imx_data->pinctrl)) {
@@ -567,8 +584,9 @@ no_card_detect_irq:
 no_card_detect_pin:
 no_board_data:
 pin_err:
-       clk_disable_unprepare(pltfm_host->clk);
-       clk_put(pltfm_host->clk);
+       clk_disable_unprepare(imx_data->clk_per);
+       clk_disable_unprepare(imx_data->clk_ipg);
+       clk_disable_unprepare(imx_data->clk_ahb);
 err_clk_get:
        kfree(imx_data);
 err_imx_data:
@@ -594,8 +612,10 @@ static int __devexit sdhci_esdhc_imx_remove(struct platform_device *pdev)
                gpio_free(boarddata->cd_gpio);
        }
 
-       clk_disable_unprepare(pltfm_host->clk);
-       clk_put(pltfm_host->clk);
+       clk_disable_unprepare(imx_data->clk_per);
+       clk_disable_unprepare(imx_data->clk_ipg);
+       clk_disable_unprepare(imx_data->clk_ahb);
+
        kfree(imx_data);
 
        sdhci_pltfm_free(pdev);
index c5c2a48bdd943166bff4e15ddf59718f3fbffa9a..d9a4ef4f1ed0e8b65cc212ce44a014093f5bf8fd 100644 (file)
@@ -42,7 +42,8 @@ static struct sdhci_ops sdhci_pltfm_ops = {
 #ifdef CONFIG_OF
 static bool sdhci_of_wp_inverted(struct device_node *np)
 {
-       if (of_get_property(np, "sdhci,wp-inverted", NULL))
+       if (of_get_property(np, "sdhci,wp-inverted", NULL) ||
+           of_get_property(np, "wp-inverted", NULL))
                return true;
 
        /* Old device trees don't have the wp-inverted property. */
@@ -59,13 +60,16 @@ void sdhci_get_of_property(struct platform_device *pdev)
        struct sdhci_host *host = platform_get_drvdata(pdev);
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
        const __be32 *clk;
+       u32 bus_width;
        int size;
 
        if (of_device_is_available(np)) {
                if (of_get_property(np, "sdhci,auto-cmd12", NULL))
                        host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
 
-               if (of_get_property(np, "sdhci,1-bit-only", NULL))
+               if (of_get_property(np, "sdhci,1-bit-only", NULL) ||
+                   (of_property_read_u32(np, "bus-width", &bus_width) == 0 &&
+                   bus_width == 1))
                        host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
 
                if (sdhci_of_wp_inverted(np))
index 6dfa82e03c7e4964bff24a7e3fb9eb92468cd0d0..1fe32dfa7cd4913fdb5fde321c2c0fd2da90428b 100644 (file)
@@ -75,8 +75,6 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
        struct spear_sdhci *sdhci;
        int ret;
 
-       BUG_ON(pdev == NULL);
-
        iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!iomem) {
                ret = -ENOMEM;
@@ -84,18 +82,18 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
                goto err;
        }
 
-       if (!request_mem_region(iomem->start, resource_size(iomem),
-                               "spear-sdhci")) {
+       if (!devm_request_mem_region(&pdev->dev, iomem->start,
+                               resource_size(iomem), "spear-sdhci")) {
                ret = -EBUSY;
                dev_dbg(&pdev->dev, "cannot request region\n");
                goto err;
        }
 
-       sdhci = kzalloc(sizeof(*sdhci), GFP_KERNEL);
+       sdhci = devm_kzalloc(&pdev->dev, sizeof(*sdhci), GFP_KERNEL);
        if (!sdhci) {
                ret = -ENOMEM;
                dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n");
-               goto err_kzalloc;
+               goto err;
        }
 
        /* clk enable */
@@ -103,13 +101,13 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
        if (IS_ERR(sdhci->clk)) {
                ret = PTR_ERR(sdhci->clk);
                dev_dbg(&pdev->dev, "Error getting clock\n");
-               goto err_clk_get;
+               goto err;
        }
 
        ret = clk_enable(sdhci->clk);
        if (ret) {
                dev_dbg(&pdev->dev, "Error enabling clock\n");
-               goto err_clk_enb;
+               goto put_clk;
        }
 
        /* overwrite platform_data */
@@ -124,7 +122,7 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
        if (IS_ERR(host)) {
                ret = PTR_ERR(host);
                dev_dbg(&pdev->dev, "error allocating host\n");
-               goto err_alloc_host;
+               goto disable_clk;
        }
 
        host->hw_name = "sdhci";
@@ -132,17 +130,18 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
        host->irq = platform_get_irq(pdev, 0);
        host->quirks = SDHCI_QUIRK_BROKEN_ADMA;
 
-       host->ioaddr = ioremap(iomem->start, resource_size(iomem));
+       host->ioaddr = devm_ioremap(&pdev->dev, iomem->start,
+                       resource_size(iomem));
        if (!host->ioaddr) {
                ret = -ENOMEM;
                dev_dbg(&pdev->dev, "failed to remap registers\n");
-               goto err_ioremap;
+               goto free_host;
        }
 
        ret = sdhci_add_host(host);
        if (ret) {
                dev_dbg(&pdev->dev, "error adding host\n");
-               goto err_add_host;
+               goto free_host;
        }
 
        platform_set_drvdata(pdev, host);
@@ -161,11 +160,12 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
        if (sdhci->data->card_power_gpio >= 0) {
                int val = 0;
 
-               ret = gpio_request(sdhci->data->card_power_gpio, "sdhci");
+               ret = devm_gpio_request(&pdev->dev,
+                               sdhci->data->card_power_gpio, "sdhci");
                if (ret < 0) {
                        dev_dbg(&pdev->dev, "gpio request fail: %d\n",
                                        sdhci->data->card_power_gpio);
-                       goto err_pgpio_request;
+                       goto set_drvdata;
                }
 
                if (sdhci->data->power_always_enb)
@@ -177,60 +177,48 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
                if (ret) {
                        dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
                                        sdhci->data->card_power_gpio);
-                       goto err_pgpio_direction;
+                       goto set_drvdata;
                }
        }
 
        if (sdhci->data->card_int_gpio >= 0) {
-               ret = gpio_request(sdhci->data->card_int_gpio, "sdhci");
+               ret = devm_gpio_request(&pdev->dev, sdhci->data->card_int_gpio,
+                               "sdhci");
                if (ret < 0) {
                        dev_dbg(&pdev->dev, "gpio request fail: %d\n",
                                        sdhci->data->card_int_gpio);
-                       goto err_igpio_request;
+                       goto set_drvdata;
                }
 
                ret = gpio_direction_input(sdhci->data->card_int_gpio);
                if (ret) {
                        dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
                                        sdhci->data->card_int_gpio);
-                       goto err_igpio_direction;
+                       goto set_drvdata;
                }
-               ret = request_irq(gpio_to_irq(sdhci->data->card_int_gpio),
+               ret = devm_request_irq(&pdev->dev,
+                               gpio_to_irq(sdhci->data->card_int_gpio),
                                sdhci_gpio_irq, IRQF_TRIGGER_LOW,
                                mmc_hostname(host->mmc), pdev);
                if (ret) {
                        dev_dbg(&pdev->dev, "gpio request irq fail: %d\n",
                                        sdhci->data->card_int_gpio);
-                       goto err_igpio_request_irq;
+                       goto set_drvdata;
                }
 
        }
 
        return 0;
 
-err_igpio_request_irq:
-err_igpio_direction:
-       if (sdhci->data->card_int_gpio >= 0)
-               gpio_free(sdhci->data->card_int_gpio);
-err_igpio_request:
-err_pgpio_direction:
-       if (sdhci->data->card_power_gpio >= 0)
-               gpio_free(sdhci->data->card_power_gpio);
-err_pgpio_request:
+set_drvdata:
        platform_set_drvdata(pdev, NULL);
        sdhci_remove_host(host, 1);
-err_add_host:
-       iounmap(host->ioaddr);
-err_ioremap:
+free_host:
        sdhci_free_host(host);
-err_alloc_host:
+disable_clk:
        clk_disable(sdhci->clk);
-err_clk_enb:
+put_clk:
        clk_put(sdhci->clk);
-err_clk_get:
-       kfree(sdhci);
-err_kzalloc:
-       release_mem_region(iomem->start, resource_size(iomem));
 err:
        dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret);
        return ret;
@@ -239,35 +227,19 @@ err:
 static int __devexit sdhci_remove(struct platform_device *pdev)
 {
        struct sdhci_host *host = platform_get_drvdata(pdev);
-       struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev);
-       int dead;
+       int dead = 0;
        u32 scratch;
 
-       if (sdhci->data) {
-               if (sdhci->data->card_int_gpio >= 0) {
-                       free_irq(gpio_to_irq(sdhci->data->card_int_gpio), pdev);
-                       gpio_free(sdhci->data->card_int_gpio);
-               }
-
-               if (sdhci->data->card_power_gpio >= 0)
-                       gpio_free(sdhci->data->card_power_gpio);
-       }
-
        platform_set_drvdata(pdev, NULL);
-       dead = 0;
        scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
        if (scratch == (u32)-1)
                dead = 1;
 
        sdhci_remove_host(host, dead);
-       iounmap(host->ioaddr);
        sdhci_free_host(host);
        clk_disable(sdhci->clk);
        clk_put(sdhci->clk);
-       kfree(sdhci);
-       if (iomem)
-               release_mem_region(iomem->start, resource_size(iomem));
 
        return 0;
 }
index ff5a16991939b9ce5d8c5cba2f000eea5bdbb5a3..b38d8a78f6a033ad28c6b50d30309c2c48ff03c8 100644 (file)
 
 #include "sdhci-pltfm.h"
 
+/* Tegra SDHOST controller vendor register definitions */
+#define SDHCI_TEGRA_VENDOR_MISC_CTRL           0x120
+#define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300  0x20
+
 #define NVQUIRK_FORCE_SDHCI_SPEC_200   BIT(0)
 #define NVQUIRK_ENABLE_BLOCK_GAP_DET   BIT(1)
+#define NVQUIRK_ENABLE_SDHCI_SPEC_300  BIT(2)
 
 struct sdhci_tegra_soc_data {
        struct sdhci_pltfm_data *pdata;
@@ -120,6 +125,25 @@ static irqreturn_t carddetect_irq(int irq, void *data)
        return IRQ_HANDLED;
 };
 
+static void tegra_sdhci_reset_exit(struct sdhci_host *host, u8 mask)
+{
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct sdhci_tegra *tegra_host = pltfm_host->priv;
+       const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
+
+       if (!(mask & SDHCI_RESET_ALL))
+               return;
+
+       /* Erratum: Enable SDHCI spec v3.00 support */
+       if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300) {
+               u32 misc_ctrl;
+
+               misc_ctrl = sdhci_readb(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
+               misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
+               sdhci_writeb(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
+       }
+}
+
 static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width)
 {
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -148,6 +172,7 @@ static struct sdhci_ops tegra_sdhci_ops = {
        .read_w     = tegra_sdhci_readw,
        .write_l    = tegra_sdhci_writel,
        .platform_8bit_width = tegra_sdhci_8bit,
+       .platform_reset_exit = tegra_sdhci_reset_exit,
 };
 
 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
@@ -178,6 +203,7 @@ static struct sdhci_pltfm_data sdhci_tegra30_pdata = {
 
 static struct sdhci_tegra_soc_data soc_data_tegra30 = {
        .pdata = &sdhci_tegra30_pdata,
+       .nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300,
 };
 #endif
 
index ccefdebeff1458a6041f269c7494fc26cac0d8a3..e626732aff77d3ebd0563c3d1720da62aab6d7ff 100644 (file)
@@ -680,8 +680,8 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
        }
 
        if (count >= 0xF) {
-               pr_warning("%s: Too large timeout requested for CMD%d!\n",
-                      mmc_hostname(host->mmc), cmd->opcode);
+               pr_warning("%s: Too large timeout 0x%x requested for CMD%d!\n",
+                          mmc_hostname(host->mmc), count, cmd->opcode);
                count = 0xE;
        }
 
index 5760c1a4b3f66ea3125b71d28ab4f62ceb4ee925..27143e042af5b2dfec767a1d1cf61762fcf2734b 100644 (file)
@@ -128,7 +128,7 @@ config MTD_AFS_PARTS
 
 config MTD_OF_PARTS
        tristate "OpenFirmware partitioning information support"
-       default Y
+       default y
        depends on OF
        help
          This provides a partition parsing function which derives
index 608321ee056e5cb69c22f950ad2eb72f974fd6da..63d2a64331f75d3287a68c2f72f8843cddc4f02d 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright Â© 2006-2008  Florian Fainelli <florian@openwrt.org>
  *                       Mike Albon <malbon@openwrt.org>
  * Copyright Â© 2009-2010  Daniel Dickinson <openwrt@cshore.neomailbox.net>
- * Copyright Â© 2011 Jonas Gorski <jonas.gorski@gmail.com>
+ * Copyright Â© 2011-2012  Jonas Gorski <jonas.gorski@gmail.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -82,6 +82,7 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
        int namelen = 0;
        int i;
        u32 computed_crc;
+       bool rootfs_first = false;
 
        if (bcm63xx_detect_cfe(master))
                return -EINVAL;
@@ -109,6 +110,7 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
                char *boardid = &(buf->board_id[0]);
                char *tagversion = &(buf->tag_version[0]);
 
+               sscanf(buf->flash_image_start, "%u", &rootfsaddr);
                sscanf(buf->kernel_address, "%u", &kerneladdr);
                sscanf(buf->kernel_length, "%u", &kernellen);
                sscanf(buf->total_length, "%u", &totallen);
@@ -117,10 +119,19 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
                        tagversion, boardid);
 
                kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE;
-               rootfsaddr = kerneladdr + kernellen;
+               rootfsaddr = rootfsaddr - BCM63XX_EXTENDED_SIZE;
                spareaddr = roundup(totallen, master->erasesize) + cfelen;
                sparelen = master->size - spareaddr - nvramlen;
-               rootfslen = spareaddr - rootfsaddr;
+
+               if (rootfsaddr < kerneladdr) {
+                       /* default Broadcom layout */
+                       rootfslen = kerneladdr - rootfsaddr;
+                       rootfs_first = true;
+               } else {
+                       /* OpenWrt layout */
+                       rootfsaddr = kerneladdr + kernellen;
+                       rootfslen = spareaddr - rootfsaddr;
+               }
        } else {
                pr_warn("CFE boot tag CRC invalid (expected %08x, actual %08x)\n",
                        buf->header_crc, computed_crc);
@@ -156,18 +167,26 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
        curpart++;
 
        if (kernellen > 0) {
-               parts[curpart].name = "kernel";
-               parts[curpart].offset = kerneladdr;
-               parts[curpart].size = kernellen;
+               int kernelpart = curpart;
+
+               if (rootfslen > 0 && rootfs_first)
+                       kernelpart++;
+               parts[kernelpart].name = "kernel";
+               parts[kernelpart].offset = kerneladdr;
+               parts[kernelpart].size = kernellen;
                curpart++;
        }
 
        if (rootfslen > 0) {
-               parts[curpart].name = "rootfs";
-               parts[curpart].offset = rootfsaddr;
-               parts[curpart].size = rootfslen;
-               if (sparelen > 0)
-                       parts[curpart].size += sparelen;
+               int rootfspart = curpart;
+
+               if (kernellen > 0 && rootfs_first)
+                       rootfspart--;
+               parts[rootfspart].name = "rootfs";
+               parts[rootfspart].offset = rootfsaddr;
+               parts[rootfspart].size = rootfslen;
+               if (sparelen > 0  && !rootfs_first)
+                       parts[rootfspart].size += sparelen;
                curpart++;
        }
 
index d02592e6a0f02a89195ef1aea302445d98585b0e..22d0493a026ff7adfe07b62bf7e11555e9f67982 100644 (file)
@@ -317,7 +317,7 @@ static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
 
        if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
                cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
-               pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name);
+               pr_warning("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->name);
        }
 }
 
@@ -328,10 +328,23 @@ static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
 
        if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
                cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
-               pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name);
+               pr_warning("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->name);
        }
 }
 
+static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
+{
+       struct map_info *map = mtd->priv;
+       struct cfi_private *cfi = map->fldrv_priv;
+
+       /*
+        *  S29NS512P flash uses more than 8bits to report number of sectors,
+        * which is not permitted by CFI.
+        */
+       cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
+       pr_warning("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->name);
+}
+
 /* Used to fix CFI-Tables of chips without Extended Query Tables */
 static struct cfi_fixup cfi_nopri_fixup_table[] = {
        { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
@@ -362,6 +375,7 @@ static struct cfi_fixup cfi_fixup_table[] = {
        { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
        { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
        { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
+       { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
        { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
        { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
        { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
index ddf9ec6d9168ed3848fdd249a67c2c2053b43444..4558e0f4d07f89b39a6ab7cce953ef5de701111b 100644 (file)
@@ -70,7 +70,7 @@ struct cmdline_mtd_partition {
 /* mtdpart_setup() parses into here */
 static struct cmdline_mtd_partition *partitions;
 
-/* the command line passed to mtdpart_setupd() */
+/* the command line passed to mtdpart_setup() */
 static char *cmdline;
 static int cmdline_parsed = 0;
 
index a4a80b742e65e99d602161002dd4296614e8593d..681e2ee0f2d6287a1c0e2170358a5e4cc9b11276 100644 (file)
@@ -52,8 +52,6 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
 
        while (pages) {
                page = page_read(mapping, index);
-               if (!page)
-                       return -ENOMEM;
                if (IS_ERR(page))
                        return PTR_ERR(page);
 
@@ -112,8 +110,6 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
                len = len - cpylen;
 
                page = page_read(dev->blkdev->bd_inode->i_mapping, index);
-               if (!page)
-                       return -ENOMEM;
                if (IS_ERR(page))
                        return PTR_ERR(page);
 
@@ -148,8 +144,6 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
                len = len - cpylen;
 
                page = page_read(mapping, index);
-               if (!page)
-                       return -ENOMEM;
                if (IS_ERR(page))
                        return PTR_ERR(page);
 
@@ -271,7 +265,6 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
        dev->mtd.flags = MTD_CAP_RAM;
        dev->mtd._erase = block2mtd_erase;
        dev->mtd._write = block2mtd_write;
-       dev->mtd._writev = mtd_writev;
        dev->mtd._sync = block2mtd_sync;
        dev->mtd._read = block2mtd_read;
        dev->mtd.priv = dev;
index 50aa90aa7a7fce0706a8ef5af2aa683e84fb346b..f70854d728fe04f3ea7ae2507efd9f458358bc24 100644 (file)
@@ -227,7 +227,7 @@ static void doc_read_data_area(struct docg3 *docg3, void *buf, int len,
        u8 data8, *dst8;
 
        doc_dbg("doc_read_data_area(buf=%p, len=%d)\n", buf, len);
-       cdr = len & 0x3;
+       cdr = len & 0x1;
        len4 = len - cdr;
 
        if (first)
@@ -732,12 +732,24 @@ err:
  * @len: the number of bytes to be read (must be a multiple of 4)
  * @buf: the buffer to be filled in (or NULL is forget bytes)
  * @first: 1 if first time read, DOC_READADDRESS should be set
+ * @last_odd: 1 if last read ended up on an odd byte
+ *
+ * Reads bytes from a prepared page. There is a trickery here : if the last read
+ * ended up on an odd offset in the 1024 bytes double page, ie. between the 2
+ * planes, the first byte must be read apart. If a word (16bit) read was used,
+ * the read would return the byte of plane 2 as low *and* high endian, which
+ * will mess the read.
  *
  */
 static int doc_read_page_getbytes(struct docg3 *docg3, int len, u_char *buf,
-                                 int first)
+                                 int first, int last_odd)
 {
-       doc_read_data_area(docg3, buf, len, first);
+       if (last_odd && len > 0) {
+               doc_read_data_area(docg3, buf, 1, first);
+               doc_read_data_area(docg3, buf ? buf + 1 : buf, len - 1, 0);
+       } else {
+               doc_read_data_area(docg3, buf, len, first);
+       }
        doc_delay(docg3, 2);
        return len;
 }
@@ -850,6 +862,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
        u8 *buf = ops->datbuf;
        size_t len, ooblen, nbdata, nboob;
        u8 hwecc[DOC_ECC_BCH_SIZE], eccconf1;
+       int max_bitflips = 0;
 
        if (buf)
                len = ops->len;
@@ -876,7 +889,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
        ret = 0;
        skip = from % DOC_LAYOUT_PAGE_SIZE;
        mutex_lock(&docg3->cascade->lock);
-       while (!ret && (len > 0 || ooblen > 0)) {
+       while (ret >= 0 && (len > 0 || ooblen > 0)) {
                calc_block_sector(from - skip, &block0, &block1, &page, &ofs,
                        docg3->reliable);
                nbdata = min_t(size_t, len, DOC_LAYOUT_PAGE_SIZE - skip);
@@ -887,20 +900,20 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
                ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
                if (ret < 0)
                        goto err_in_read;
-               ret = doc_read_page_getbytes(docg3, skip, NULL, 1);
+               ret = doc_read_page_getbytes(docg3, skip, NULL, 1, 0);
                if (ret < skip)
                        goto err_in_read;
-               ret = doc_read_page_getbytes(docg3, nbdata, buf, 0);
+               ret = doc_read_page_getbytes(docg3, nbdata, buf, 0, skip % 2);
                if (ret < nbdata)
                        goto err_in_read;
                doc_read_page_getbytes(docg3,
                                       DOC_LAYOUT_PAGE_SIZE - nbdata - skip,
-                                      NULL, 0);
-               ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0);
+                                      NULL, 0, (skip + nbdata) % 2);
+               ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0, 0);
                if (ret < nboob)
                        goto err_in_read;
                doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE - nboob,
-                                      NULL, 0);
+                                      NULL, 0, nboob % 2);
 
                doc_get_bch_hw_ecc(docg3, hwecc);
                eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1);
@@ -936,7 +949,8 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
                        }
                        if (ret > 0) {
                                mtd->ecc_stats.corrected += ret;
-                               ret = -EUCLEAN;
+                               max_bitflips = max(max_bitflips, ret);
+                               ret = max_bitflips;
                        }
                }
 
@@ -1004,7 +1018,7 @@ static int doc_reload_bbt(struct docg3 *docg3)
                                                     DOC_LAYOUT_PAGE_SIZE);
                if (!ret)
                        doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE,
-                                              buf, 1);
+                                              buf, 1, 0);
                buf += DOC_LAYOUT_PAGE_SIZE;
        }
        doc_read_page_finish(docg3);
@@ -1064,10 +1078,10 @@ static int doc_get_erase_count(struct docg3 *docg3, loff_t from)
        ret = doc_reset_seq(docg3);
        if (!ret)
                ret = doc_read_page_prepare(docg3, block0, block1, page,
-                                           ofs + DOC_LAYOUT_WEAR_OFFSET);
+                                           ofs + DOC_LAYOUT_WEAR_OFFSET, 0);
        if (!ret)
                ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_WEAR_SIZE,
-                                            buf, 1);
+                                            buf, 1, 0);
        doc_read_page_finish(docg3);
 
        if (ret || (buf[0] != DOC_ERASE_MARK) || (buf[2] != DOC_ERASE_MARK))
index 1924d247c1cb924c478ebd644475b799dd044297..5d0d68c3fe27a79a097d64fa03246c276510ad3b 100644 (file)
@@ -639,12 +639,16 @@ static const struct spi_device_id m25p_ids[] = {
        { "en25q32b", INFO(0x1c3016, 0, 64 * 1024,  64, 0) },
        { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
 
+       /* Everspin */
+       { "mr25h256", CAT25_INFO(  32 * 1024, 1, 256, 2) },
+
        /* Intel/Numonyx -- xxxs33b */
        { "160s33b",  INFO(0x898911, 0, 64 * 1024,  32, 0) },
        { "320s33b",  INFO(0x898912, 0, 64 * 1024,  64, 0) },
        { "640s33b",  INFO(0x898913, 0, 64 * 1024, 128, 0) },
 
        /* Macronix */
+       { "mx25l2005a",  INFO(0xc22012, 0, 64 * 1024,   4, SECT_4K) },
        { "mx25l4005a",  INFO(0xc22013, 0, 64 * 1024,   8, SECT_4K) },
        { "mx25l8005",   INFO(0xc22014, 0, 64 * 1024,  16, 0) },
        { "mx25l1606e",  INFO(0xc22015, 0, 64 * 1024,  32, SECT_4K) },
@@ -728,6 +732,7 @@ static const struct spi_device_id m25p_ids[] = {
        { "w25q32", INFO(0xef4016, 0, 64 * 1024,  64, SECT_4K) },
        { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
        { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
+       { "w25q80", INFO(0xef5014, 0, 64 * 1024,  16, SECT_4K) },
 
        /* Catalyst / On Semiconductor -- non-JEDEC */
        { "cat25c11", CAT25_INFO(  16, 8, 16, 1) },
index 797d43cd35507372fe7ea4c0205e169dac9fb0a5..67960362681e7f7a72933aaf87b5ffa515c36cfa 100644 (file)
@@ -990,9 +990,9 @@ static int __devinit spear_smi_probe(struct platform_device *pdev)
                goto err_clk;
        }
 
-       ret = clk_enable(dev->clk);
+       ret = clk_prepare_enable(dev->clk);
        if (ret)
-               goto err_clk_enable;
+               goto err_clk_prepare_enable;
 
        ret = request_irq(irq, spear_smi_int_handler, 0, pdev->name, dev);
        if (ret) {
@@ -1020,8 +1020,8 @@ err_bank_setup:
        free_irq(irq, dev);
        platform_set_drvdata(pdev, NULL);
 err_irq:
-       clk_disable(dev->clk);
-err_clk_enable:
+       clk_disable_unprepare(dev->clk);
+err_clk_prepare_enable:
        clk_put(dev->clk);
 err_clk:
        iounmap(dev->io_base);
@@ -1074,7 +1074,7 @@ static int __devexit spear_smi_remove(struct platform_device *pdev)
        irq = platform_get_irq(pdev, 0);
        free_irq(irq, dev);
 
-       clk_disable(dev->clk);
+       clk_disable_unprepare(dev->clk);
        clk_put(dev->clk);
        iounmap(dev->io_base);
        kfree(dev);
@@ -1091,7 +1091,7 @@ int spear_smi_suspend(struct platform_device *pdev, pm_message_t state)
        struct spear_smi *dev = platform_get_drvdata(pdev);
 
        if (dev && dev->clk)
-               clk_disable(dev->clk);
+               clk_disable_unprepare(dev->clk);
 
        return 0;
 }
@@ -1102,7 +1102,7 @@ int spear_smi_resume(struct platform_device *pdev)
        int ret = -EPERM;
 
        if (dev && dev->clk)
-               ret = clk_enable(dev->clk);
+               ret = clk_prepare_enable(dev->clk);
 
        if (!ret)
                spear_smi_hw_init(dev);
index dbfe17baf0463a7e47325f2b228487e29e28391c..45abed67f1ef176da4469ed5f9b78b1c66e874c4 100644 (file)
@@ -57,7 +57,7 @@ static struct qinfo_query_info qinfo_array[] = {
 
 static long lpddr_get_qinforec_pos(struct map_info *map, char *id_str)
 {
-       int qinfo_lines = sizeof(qinfo_array)/sizeof(struct qinfo_query_info);
+       int qinfo_lines = ARRAY_SIZE(qinfo_array);
        int i;
        int bankwidth = map_bankwidth(map) * 8;
        int major, minor;
index 8af67cfd671acac48ad7885a6e73ebe2f0ecdbff..5ba2458e799ac4e3db5d8d78b7c9b4814bec5a09 100644 (file)
@@ -224,7 +224,7 @@ config MTD_CK804XROM
 
 config MTD_SCB2_FLASH
        tristate "BIOS flash chip on Intel SCB2 boards"
-       depends on X86 && MTD_JEDECPROBE
+       depends on X86 && MTD_JEDECPROBE && PCI
        help
          Support for treating the BIOS flash chip on Intel SCB2 boards
          as an MTD device - with this you can reprogram your BIOS.
index 92e1f41634c7135ffc1b39d50c0323f0de33c603..93f03175c82dce9185f5144e8bc129f849165764 100644 (file)
@@ -260,18 +260,7 @@ static struct pci_driver vr_nor_pci_driver = {
        .id_table = vr_nor_pci_ids,
 };
 
-static int __init vr_nor_mtd_init(void)
-{
-       return pci_register_driver(&vr_nor_pci_driver);
-}
-
-static void __exit vr_nor_mtd_exit(void)
-{
-       pci_unregister_driver(&vr_nor_pci_driver);
-}
-
-module_init(vr_nor_mtd_init);
-module_exit(vr_nor_mtd_exit);
+module_pci_driver(vr_nor_pci_driver);
 
 MODULE_AUTHOR("Andy Lowe");
 MODULE_DESCRIPTION("MTD map driver for NOR flash on Intel Vermilion Range");
index b5401e355745bfd4ce1e92732151e5fd34e0e9e0..c03456f17004017ca0cb1f65513343cd8d470b7c 100644 (file)
@@ -19,9 +19,9 @@
 #include <linux/mtd/cfi.h>
 #include <linux/platform_device.h>
 #include <linux/mtd/physmap.h>
+#include <linux/of.h>
 
 #include <lantiq_soc.h>
-#include <lantiq_platform.h>
 
 /*
  * The NOR flash is connected to the same external bus unit (EBU) as PCI.
@@ -44,8 +44,9 @@ struct ltq_mtd {
        struct map_info *map;
 };
 
-static char ltq_map_name[] = "ltq_nor";
-static const char *ltq_probe_types[] __devinitconst = { "cmdlinepart", NULL };
+static const char ltq_map_name[] = "ltq_nor";
+static const char *ltq_probe_types[] __devinitconst = {
+                                       "cmdlinepart", "ofpart", NULL };
 
 static map_word
 ltq_read16(struct map_info *map, unsigned long adr)
@@ -108,42 +109,38 @@ ltq_copy_to(struct map_info *map, unsigned long to,
        spin_unlock_irqrestore(&ebu_lock, flags);
 }
 
-static int __init
+static int __devinit
 ltq_mtd_probe(struct platform_device *pdev)
 {
-       struct physmap_flash_data *ltq_mtd_data = dev_get_platdata(&pdev->dev);
+       struct mtd_part_parser_data ppdata;
        struct ltq_mtd *ltq_mtd;
-       struct resource *res;
        struct cfi_private *cfi;
        int err;
 
+       if (of_machine_is_compatible("lantiq,falcon") &&
+                       (ltq_boot_select() != BS_FLASH)) {
+               dev_err(&pdev->dev, "invalid bootstrap options\n");
+               return -ENODEV;
+       }
+
        ltq_mtd = kzalloc(sizeof(struct ltq_mtd), GFP_KERNEL);
        platform_set_drvdata(pdev, ltq_mtd);
 
        ltq_mtd->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!ltq_mtd->res) {
-               dev_err(&pdev->dev, "failed to get memory resource");
+               dev_err(&pdev->dev, "failed to get memory resource\n");
                err = -ENOENT;
                goto err_out;
        }
 
-       res = devm_request_mem_region(&pdev->dev, ltq_mtd->res->start,
-               resource_size(ltq_mtd->res), dev_name(&pdev->dev));
-       if (!ltq_mtd->res) {
-               dev_err(&pdev->dev, "failed to request mem resource");
-               err = -EBUSY;
-               goto err_out;
-       }
-
        ltq_mtd->map = kzalloc(sizeof(struct map_info), GFP_KERNEL);
-       ltq_mtd->map->phys = res->start;
-       ltq_mtd->map->size = resource_size(res);
-       ltq_mtd->map->virt = devm_ioremap_nocache(&pdev->dev,
-                               ltq_mtd->map->phys, ltq_mtd->map->size);
+       ltq_mtd->map->phys = ltq_mtd->res->start;
+       ltq_mtd->map->size = resource_size(ltq_mtd->res);
+       ltq_mtd->map->virt = devm_request_and_ioremap(&pdev->dev, ltq_mtd->res);
        if (!ltq_mtd->map->virt) {
-               dev_err(&pdev->dev, "failed to ioremap!\n");
-               err = -ENOMEM;
-               goto err_free;
+               dev_err(&pdev->dev, "failed to remap mem resource\n");
+               err = -EBUSY;
+               goto err_out;
        }
 
        ltq_mtd->map->name = ltq_map_name;
@@ -169,9 +166,9 @@ ltq_mtd_probe(struct platform_device *pdev)
        cfi->addr_unlock1 ^= 1;
        cfi->addr_unlock2 ^= 1;
 
-       err = mtd_device_parse_register(ltq_mtd->mtd, ltq_probe_types, NULL,
-                                       ltq_mtd_data->parts,
-                                       ltq_mtd_data->nr_parts);
+       ppdata.of_node = pdev->dev.of_node;
+       err = mtd_device_parse_register(ltq_mtd->mtd, ltq_probe_types,
+                                       &ppdata, NULL, 0);
        if (err) {
                dev_err(&pdev->dev, "failed to add partitions\n");
                goto err_destroy;
@@ -204,32 +201,23 @@ ltq_mtd_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id ltq_mtd_match[] = {
+       { .compatible = "lantiq,nor" },
+       {},
+};
+MODULE_DEVICE_TABLE(of, ltq_mtd_match);
+
 static struct platform_driver ltq_mtd_driver = {
+       .probe = ltq_mtd_probe,
        .remove = __devexit_p(ltq_mtd_remove),
        .driver = {
-               .name = "ltq_nor",
+               .name = "ltq-nor",
                .owner = THIS_MODULE,
+               .of_match_table = ltq_mtd_match,
        },
 };
 
-static int __init
-init_ltq_mtd(void)
-{
-       int ret = platform_driver_probe(&ltq_mtd_driver, ltq_mtd_probe);
-
-       if (ret)
-               pr_err("ltq_nor: error registering platform driver");
-       return ret;
-}
-
-static void __exit
-exit_ltq_mtd(void)
-{
-       platform_driver_unregister(&ltq_mtd_driver);
-}
-
-module_init(init_ltq_mtd);
-module_exit(exit_ltq_mtd);
+module_platform_driver(ltq_mtd_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
index 1d005a3e9b41603856c40a60fee6de972771634f..f14ce0af763f0dda66a831470319e23611152eb2 100644 (file)
@@ -352,18 +352,7 @@ static struct pci_driver mtd_pci_driver = {
        .id_table =     mtd_pci_ids,
 };
 
-static int __init mtd_pci_maps_init(void)
-{
-       return pci_register_driver(&mtd_pci_driver);
-}
-
-static void __exit mtd_pci_maps_exit(void)
-{
-       pci_unregister_driver(&mtd_pci_driver);
-}
-
-module_init(mtd_pci_maps_init);
-module_exit(mtd_pci_maps_exit);
+module_pci_driver(mtd_pci_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
index 934a72c8007880407247915d1d814225c45d65f2..9dcbc684abdb27e9e0218a585bdbc1aa079e93af 100644 (file)
@@ -234,20 +234,7 @@ static struct pci_driver scb2_flash_driver = {
        .remove =   __devexit_p(scb2_flash_remove),
 };
 
-static int __init
-scb2_flash_init(void)
-{
-       return pci_register_driver(&scb2_flash_driver);
-}
-
-static void __exit
-scb2_flash_exit(void)
-{
-       pci_unregister_driver(&scb2_flash_driver);
-}
-
-module_init(scb2_flash_init);
-module_exit(scb2_flash_exit);
+module_pci_driver(scb2_flash_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Tim Hockin <thockin@sun.com>");
index 71b0ba7979121f8cde4480751f547d42d8fa858b..e7534c82f93ab381a86890bfa92d3a6d3027295a 100644 (file)
@@ -59,7 +59,7 @@ static struct mtd_partition bigflash_parts[] = {
        }
 };
 
-static const char *part_probes[] __initdata = {"cmdlinepart", "RedBoot", NULL};
+static const char *part_probes[] __initconst = {"cmdlinepart", "RedBoot", NULL};
 
 #define init_sbc82xx_one_flash(map, br, or)                    \
 do {                                                           \
index c837507dfb1c73021da2a47eebd975bd40573ef9..575730744fdb3ce83dc8e4a78cf65cefb3413fdc 100644 (file)
@@ -250,6 +250,43 @@ static ssize_t mtd_name_show(struct device *dev,
 }
 static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
 
+static ssize_t mtd_ecc_strength_show(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
+{
+       struct mtd_info *mtd = dev_get_drvdata(dev);
+
+       return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength);
+}
+static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL);
+
+static ssize_t mtd_bitflip_threshold_show(struct device *dev,
+                                         struct device_attribute *attr,
+                                         char *buf)
+{
+       struct mtd_info *mtd = dev_get_drvdata(dev);
+
+       return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold);
+}
+
+static ssize_t mtd_bitflip_threshold_store(struct device *dev,
+                                          struct device_attribute *attr,
+                                          const char *buf, size_t count)
+{
+       struct mtd_info *mtd = dev_get_drvdata(dev);
+       unsigned int bitflip_threshold;
+       int retval;
+
+       retval = kstrtouint(buf, 0, &bitflip_threshold);
+       if (retval)
+               return retval;
+
+       mtd->bitflip_threshold = bitflip_threshold;
+       return count;
+}
+static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
+                  mtd_bitflip_threshold_show,
+                  mtd_bitflip_threshold_store);
+
 static struct attribute *mtd_attrs[] = {
        &dev_attr_type.attr,
        &dev_attr_flags.attr,
@@ -260,6 +297,8 @@ static struct attribute *mtd_attrs[] = {
        &dev_attr_oobsize.attr,
        &dev_attr_numeraseregions.attr,
        &dev_attr_name.attr,
+       &dev_attr_ecc_strength.attr,
+       &dev_attr_bitflip_threshold.attr,
        NULL,
 };
 
@@ -322,6 +361,10 @@ int add_mtd_device(struct mtd_info *mtd)
        mtd->index = i;
        mtd->usecount = 0;
 
+       /* default value if not set by driver */
+       if (mtd->bitflip_threshold == 0)
+               mtd->bitflip_threshold = mtd->ecc_strength;
+
        if (is_power_of_2(mtd->erasesize))
                mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
        else
@@ -757,12 +800,24 @@ EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
             u_char *buf)
 {
+       int ret_code;
        *retlen = 0;
        if (from < 0 || from > mtd->size || len > mtd->size - from)
                return -EINVAL;
        if (!len)
                return 0;
-       return mtd->_read(mtd, from, len, retlen, buf);
+
+       /*
+        * In the absence of an error, drivers return a non-negative integer
+        * representing the maximum number of bitflips that were corrected on
+        * any one ecc region (if applicable; zero otherwise).
+        */
+       ret_code = mtd->_read(mtd, from, len, retlen, buf);
+       if (unlikely(ret_code < 0))
+               return ret_code;
+       if (mtd->ecc_strength == 0)
+               return 0;       /* device lacks ecc */
+       return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
 }
 EXPORT_SYMBOL_GPL(mtd_read);
 
index 9651c06de0a9298f4db58265524ecaebe2c2b9b4..d518e4db8a0bf8665156fd2383ad40fcde0fe146 100644 (file)
@@ -67,12 +67,12 @@ static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
        stats = part->master->ecc_stats;
        res = part->master->_read(part->master, from + part->offset, len,
                                  retlen, buf);
-       if (unlikely(res)) {
-               if (mtd_is_bitflip(res))
-                       mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected;
-               if (mtd_is_eccerr(res))
-                       mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed;
-       }
+       if (unlikely(mtd_is_eccerr(res)))
+               mtd->ecc_stats.failed +=
+                       part->master->ecc_stats.failed - stats.failed;
+       else
+               mtd->ecc_stats.corrected +=
+                       part->master->ecc_stats.corrected - stats.corrected;
        return res;
 }
 
@@ -517,6 +517,8 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
 
        slave->mtd.ecclayout = master->ecclayout;
        slave->mtd.ecc_strength = master->ecc_strength;
+       slave->mtd.bitflip_threshold = master->bitflip_threshold;
+
        if (master->_block_isbad) {
                uint64_t offs = 0;
 
index 7d17cecad69d8fccc1467eaa210c6fe2915197dd..31bb7e5b504aa8f05e31472787d2dac7aff9659f 100644 (file)
@@ -115,6 +115,46 @@ config MTD_NAND_OMAP2
           Support for NAND flash on Texas Instruments OMAP2, OMAP3 and OMAP4
          platforms.
 
+config MTD_NAND_OMAP_BCH
+       depends on MTD_NAND && MTD_NAND_OMAP2 && ARCH_OMAP3
+       bool "Enable support for hardware BCH error correction"
+       default n
+       select BCH
+       select BCH_CONST_PARAMS
+       help
+        Support for hardware BCH error correction.
+
+choice
+       prompt "BCH error correction capability"
+       depends on MTD_NAND_OMAP_BCH
+
+config MTD_NAND_OMAP_BCH8
+       bool "8 bits / 512 bytes (recommended)"
+       help
+        Support correcting up to 8 bitflips per 512-byte block.
+        This will use 13 bytes of spare area per 512 bytes of page data.
+        This is the recommended mode, as 4-bit mode does not work
+        on some OMAP3 revisions, due to a hardware bug.
+
+config MTD_NAND_OMAP_BCH4
+       bool "4 bits / 512 bytes"
+       help
+        Support correcting up to 4 bitflips per 512-byte block.
+        This will use 7 bytes of spare area per 512 bytes of page data.
+        Note that this mode does not work on some OMAP3 revisions, due to a
+        hardware bug. Please check your OMAP datasheet before selecting this
+        mode.
+
+endchoice
+
+if MTD_NAND_OMAP_BCH
+config BCH_CONST_M
+       default 13
+config BCH_CONST_T
+       default 4 if MTD_NAND_OMAP_BCH4
+       default 8 if MTD_NAND_OMAP_BCH8
+endif
+
 config MTD_NAND_IDS
        tristate
 
@@ -440,7 +480,7 @@ config MTD_NAND_NANDSIM
 
 config MTD_NAND_GPMI_NAND
         bool "GPMI NAND Flash Controller driver"
-        depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28)
+        depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q)
         help
         Enables NAND Flash support for IMX23 or IMX28.
         The GPMI controller is very powerful, with the help of BCH
index 4f20e1d8bef10b43546bd64d51933c0ed010093d..60a0dfdb08087a55311ee7f2e53522ecde7baa17 100644 (file)
@@ -414,7 +414,7 @@ static int alauda_bounce_read(struct mtd_info *mtd, loff_t from, size_t len,
        }
        err = 0;
        if (corrected)
-               err = -EUCLEAN;
+               err = 1;        /* return max_bitflips per ecc step */
        if (uncorrected)
                err = -EBADMSG;
 out:
@@ -446,7 +446,7 @@ static int alauda_read(struct mtd_info *mtd, loff_t from, size_t len,
        }
        err = 0;
        if (corrected)
-               err = -EUCLEAN;
+               err = 1;        /* return max_bitflips per ecc step */
        if (uncorrected)
                err = -EBADMSG;
        return err;
index 2165576a1c67df0e623752970fdfdd141f3ea931..97ac6712bb1926c6ff8e31292cc3c362d4d53d9d 100644 (file)
@@ -324,9 +324,10 @@ static int atmel_nand_calculate(struct mtd_info *mtd,
  * mtd:        mtd info structure
  * chip:       nand chip info structure
  * buf:        buffer to store read data
+ * oob_required:    caller expects OOB data read to chip->oob_poi
  */
-static int atmel_nand_read_page(struct mtd_info *mtd,
-               struct nand_chip *chip, uint8_t *buf, int page)
+static int atmel_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+                               uint8_t *buf, int oob_required, int page)
 {
        int eccsize = chip->ecc.size;
        int eccbytes = chip->ecc.bytes;
@@ -335,6 +336,7 @@ static int atmel_nand_read_page(struct mtd_info *mtd,
        uint8_t *oob = chip->oob_poi;
        uint8_t *ecc_pos;
        int stat;
+       unsigned int max_bitflips = 0;
 
        /*
         * Errata: ALE is incorrectly wired up to the ECC controller
@@ -371,10 +373,12 @@ static int atmel_nand_read_page(struct mtd_info *mtd,
        /* check if there's an error */
        stat = chip->ecc.correct(mtd, p, oob, NULL);
 
-       if (stat < 0)
+       if (stat < 0) {
                mtd->ecc_stats.failed++;
-       else
+       } else {
                mtd->ecc_stats.corrected += stat;
+               max_bitflips = max_t(unsigned int, max_bitflips, stat);
+       }
 
        /* get back to oob start (end of page) */
        chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
@@ -382,7 +386,7 @@ static int atmel_nand_read_page(struct mtd_info *mtd,
        /* read the oob */
        chip->read_buf(mtd, oob, mtd->oobsize);
 
-       return 0;
+       return max_bitflips;
 }
 
 /*
index 73abbc3e093eced219347f93f9fefc5b31c2bd15..9f609d2dcf62d3dc993358e4de817de00d154267 100644 (file)
@@ -508,8 +508,6 @@ static int __devinit au1550nd_probe(struct platform_device *pdev)
        this->chip_delay = 30;
        this->ecc.mode = NAND_ECC_SOFT;
 
-       this->options = NAND_NO_AUTOINCR;
-
        if (pd->devwidth)
                this->options |= NAND_BUSWIDTH_16;
 
index a930666d0687655e1f5fe27c1604c34e38fe6911..5914bb32e0014e189cd42aafd34017698222dd27 100644 (file)
@@ -22,9 +22,9 @@
 
 /* ---- Private Function Prototypes -------------------------------------- */
 static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
-       struct nand_chip *chip, uint8_t *buf, int page);
+       struct nand_chip *chip, uint8_t *buf, int oob_required, int page);
 static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
-       struct nand_chip *chip, const uint8_t *buf);
+       struct nand_chip *chip, const uint8_t *buf, int oob_required);
 
 /* ---- Private Variables ------------------------------------------------ */
 
@@ -103,11 +103,12 @@ static struct nand_ecclayout nand_hw_eccoob_4096 = {
 *  @mtd:       mtd info structure
 *  @chip:      nand chip info structure
 *  @buf:       buffer to store read data
+*  @oob_required:      caller expects OOB data read to chip->oob_poi
 *
 ***************************************************************************/
 static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
                                       struct nand_chip *chip, uint8_t * buf,
-                                                int page)
+                                      int oob_required, int page)
 {
        int sectorIdx = 0;
        int eccsize = chip->ecc.size;
@@ -116,6 +117,7 @@ static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
        uint8_t eccCalc[NAND_ECC_NUM_BYTES];
        int sectorOobSize = mtd->oobsize / eccsteps;
        int stat;
+       unsigned int max_bitflips = 0;
 
        for (sectorIdx = 0; sectorIdx < eccsteps;
                        sectorIdx++, datap += eccsize) {
@@ -177,9 +179,10 @@ static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
                        }
 #endif
                        mtd->ecc_stats.corrected += stat;
+                       max_bitflips = max_t(unsigned int, max_bitflips, stat);
                }
        }
-       return 0;
+       return max_bitflips;
 }
 
 /****************************************************************************
@@ -188,10 +191,11 @@ static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
 *  @mtd:       mtd info structure
 *  @chip:      nand chip info structure
 *  @buf:       data buffer
+*  @oob_required:      must write chip->oob_poi to OOB
 *
 ***************************************************************************/
 static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
-       struct nand_chip *chip, const uint8_t *buf)
+       struct nand_chip *chip, const uint8_t *buf, int oob_required)
 {
        int sectorIdx = 0;
        int eccsize = chip->ecc.size;
index 6908cdde3065e73b24509e8c9d32865f5befb1d9..c855e7cd337b2f7a278a164c2e7a7b7f17a723f8 100644 (file)
@@ -341,7 +341,7 @@ static int bcm_umi_nand_verify_buf(struct mtd_info *mtd, const u_char * buf,
         * for MLC parts which may have permanently stuck bits.
         */
        struct nand_chip *chip = mtd->priv;
-       int ret = chip->ecc.read_page(mtd, chip, readbackbuf, 0);
+       int ret = chip->ecc.read_page(mtd, chip, readbackbuf, 0, 0);
        if (ret < 0)
                return -EFAULT;
        else {
@@ -476,12 +476,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
                this->badblock_pattern = &largepage_bbt;
        }
 
-       /*
-        * FIXME: ecc strength value of 6 bits per 512 bytes of data is a
-        * conservative guess, given 13 ecc bytes and using bch alg.
-        * (Assume Galois field order m=15 to allow a margin of error.)
-        */
-       this->ecc.strength = 6;
+       this->ecc.strength = 8;
 
 #endif
 
index d7b86b925de5ead4b4c0653bf13080bea55a4156..3f1c18599cbd9484096caed856f69c79c61c2bff 100644 (file)
@@ -558,7 +558,7 @@ static void bf5xx_nand_dma_write_buf(struct mtd_info *mtd,
 }
 
 static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
-               uint8_t *buf, int page)
+               uint8_t *buf, int oob_required, int page)
 {
        bf5xx_nand_read_buf(mtd, buf, mtd->writesize);
        bf5xx_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -567,7 +567,7 @@ static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip
 }
 
 static void bf5xx_nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
-               const uint8_t *buf)
+               const uint8_t *buf, int oob_required)
 {
        bf5xx_nand_write_buf(mtd, buf, mtd->writesize);
        bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
index 2a96e1a12062314234f2cd74b86163029abc3e3d..41371ba1a8117c87aaaf85b63383e60bda6303d6 100644 (file)
@@ -364,25 +364,27 @@ static int cafe_nand_write_oob(struct mtd_info *mtd,
 
 /* Don't use -- use nand_read_oob_std for now */
 static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
-                             int page, int sndcmd)
+                             int page)
 {
        chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
        chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-       return 1;
+       return 0;
 }
 /**
  * cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read
  * @mtd:       mtd info structure
  * @chip:      nand chip info structure
  * @buf:       buffer to store read data
+ * @oob_required:      caller expects OOB data read to chip->oob_poi
  *
  * The hw generator calculates the error syndrome automatically. Therefor
  * we need a special oob layout and handling.
  */
 static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
-                              uint8_t *buf, int page)
+                              uint8_t *buf, int oob_required, int page)
 {
        struct cafe_priv *cafe = mtd->priv;
+       unsigned int max_bitflips = 0;
 
        cafe_dev_dbg(&cafe->pdev->dev, "ECC result %08x SYN1,2 %08x\n",
                     cafe_readl(cafe, NAND_ECC_RESULT),
@@ -449,10 +451,11 @@ static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
                } else {
                        dev_dbg(&cafe->pdev->dev, "Corrected %d symbol errors\n", n);
                        mtd->ecc_stats.corrected += n;
+                       max_bitflips = max_t(unsigned int, max_bitflips, n);
                }
        }
 
-       return 0;
+       return max_bitflips;
 }
 
 static struct nand_ecclayout cafe_oobinfo_2048 = {
@@ -518,7 +521,8 @@ static struct nand_bbt_descr cafe_bbt_mirror_descr_512 = {
 
 
 static void cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
-                                         struct nand_chip *chip, const uint8_t *buf)
+                                         struct nand_chip *chip,
+                                         const uint8_t *buf, int oob_required)
 {
        struct cafe_priv *cafe = mtd->priv;
 
@@ -530,16 +534,17 @@ static void cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
 }
 
 static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
-                               const uint8_t *buf, int page, int cached, int raw)
+                               const uint8_t *buf, int oob_required, int page,
+                               int cached, int raw)
 {
        int status;
 
        chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
 
        if (unlikely(raw))
-               chip->ecc.write_page_raw(mtd, chip, buf);
+               chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
        else
-               chip->ecc.write_page(mtd, chip, buf);
+               chip->ecc.write_page(mtd, chip, buf, oob_required);
 
        /*
         * Cached progamming disabled for now, Not sure if its worth the
@@ -685,7 +690,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
 
        /* Enable the following for a flash based bad block table */
        cafe->nand.bbt_options = NAND_BBT_USE_FLASH;
-       cafe->nand.options = NAND_NO_AUTOINCR | NAND_OWN_BUFFERS;
+       cafe->nand.options = NAND_OWN_BUFFERS;
 
        if (skipbbt) {
                cafe->nand.options |= NAND_SKIP_BBTSCAN;
@@ -888,17 +893,7 @@ static struct pci_driver cafe_nand_pci_driver = {
        .resume = cafe_nand_resume,
 };
 
-static int __init cafe_nand_init(void)
-{
-       return pci_register_driver(&cafe_nand_pci_driver);
-}
-
-static void __exit cafe_nand_exit(void)
-{
-       pci_unregister_driver(&cafe_nand_pci_driver);
-}
-module_init(cafe_nand_init);
-module_exit(cafe_nand_exit);
+module_pci_driver(cafe_nand_pci_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
index 821c34c6250021246dfdb7ef97a44e497a05726c..adb6c3ef37fb0d8a645177f1c8efaf6b098bde8e 100644 (file)
@@ -240,7 +240,6 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
 
        /* Enable the following for a flash based bad block table */
        this->bbt_options = NAND_BBT_USE_FLASH;
-       this->options = NAND_NO_AUTOINCR;
 
        /* Scan to find existence of the device */
        if (nand_scan(new_mtd, 1)) {
index a9e57d686297096e0700935a90d6a5257aa789ff..0650aafa0dd2238b2af08a1328970e5efb38d24e 100644 (file)
@@ -924,9 +924,10 @@ bool is_erased(uint8_t *buf, int len)
 #define ECC_LAST_ERR(x)                ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
 
 static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
-                                       uint32_t irq_status)
+                      uint32_t irq_status, unsigned int *max_bitflips)
 {
        bool check_erased_page = false;
+       unsigned int bitflips = 0;
 
        if (irq_status & INTR_STATUS__ECC_ERR) {
                /* read the ECC errors. we'll ignore them for now */
@@ -965,6 +966,7 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
                                        /* correct the ECC error */
                                        buf[offset] ^= err_correction_value;
                                        denali->mtd.ecc_stats.corrected++;
+                                       bitflips++;
                                }
                        } else {
                                /* if the error is not correctable, need to
@@ -984,6 +986,7 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
                clear_interrupts(denali);
                denali_set_intr_modes(denali, true);
        }
+       *max_bitflips = bitflips;
        return check_erased_page;
 }
 
@@ -1084,7 +1087,7 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
  * by write_page above.
  * */
 static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
-                               const uint8_t *buf)
+                               const uint8_t *buf, int oob_required)
 {
        /* for regular page writes, we let HW handle all the ECC
         * data written to the device. */
@@ -1096,7 +1099,7 @@ static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
  * write_page() function above.
  */
 static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
-                                       const uint8_t *buf)
+                                       const uint8_t *buf, int oob_required)
 {
        /* for raw page writes, we want to disable ECC and simply write
           whatever data is in the buffer. */
@@ -1110,17 +1113,17 @@ static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
 }
 
 static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
-                          int page, int sndcmd)
+                          int page)
 {
        read_oob_data(mtd, chip->oob_poi, page);
 
-       return 0; /* notify NAND core to send command to
-                          NAND device. */
+       return 0;
 }
 
 static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
-                           uint8_t *buf, int page)
+                           uint8_t *buf, int oob_required, int page)
 {
+       unsigned int max_bitflips;
        struct denali_nand_info *denali = mtd_to_denali(mtd);
 
        dma_addr_t addr = denali->buf.dma_buf;
@@ -1153,7 +1156,7 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
 
        memcpy(buf, denali->buf.buf, mtd->writesize);
 
-       check_erased_page = handle_ecc(denali, buf, irq_status);
+       check_erased_page = handle_ecc(denali, buf, irq_status, &max_bitflips);
        denali_enable_dma(denali, false);
 
        if (check_erased_page) {
@@ -1167,11 +1170,11 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
                                denali->mtd.ecc_stats.failed++;
                }
        }
-       return 0;
+       return max_bitflips;
 }
 
 static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
-                               uint8_t *buf, int page)
+                               uint8_t *buf, int oob_required, int page)
 {
        struct denali_nand_info *denali = mtd_to_denali(mtd);
 
@@ -1702,17 +1705,4 @@ static struct pci_driver denali_pci_driver = {
        .remove = denali_pci_remove,
 };
 
-static int __devinit denali_init(void)
-{
-       printk(KERN_INFO "Spectra MTD driver\n");
-       return pci_register_driver(&denali_pci_driver);
-}
-
-/* Free memory */
-static void __devexit denali_exit(void)
-{
-       pci_unregister_driver(&denali_pci_driver);
-}
-
-module_init(denali_init);
-module_exit(denali_exit);
+module_pci_driver(denali_pci_driver);
index b08202664543200553255f89c00669e203783d14..a225e49a56235763b35cfd5118f30f7c20c4fb55 100644 (file)
@@ -720,6 +720,7 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
        struct docg4_priv *doc = nand->priv;
        void __iomem *docptr = doc->virtadr;
        uint16_t status, edc_err, *buf16;
+       int bits_corrected = 0;
 
        dev_dbg(doc->dev, "%s: page %08x\n", __func__, page);
 
@@ -772,7 +773,7 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
 
                /* If bitflips are reported, attempt to correct with ecc */
                if (edc_err & DOC_ECCCONF1_BCH_SYNDROM_ERR) {
-                       int bits_corrected = correct_data(mtd, buf, page);
+                       bits_corrected = correct_data(mtd, buf, page);
                        if (bits_corrected == -EBADMSG)
                                mtd->ecc_stats.failed++;
                        else
@@ -781,24 +782,24 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
        }
 
        writew(0, docptr + DOC_DATAEND);
-       return 0;
+       return bits_corrected;
 }
 
 
 static int docg4_read_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
-                              uint8_t *buf, int page)
+                              uint8_t *buf, int oob_required, int page)
 {
        return read_page(mtd, nand, buf, page, false);
 }
 
 static int docg4_read_page(struct mtd_info *mtd, struct nand_chip *nand,
-                          uint8_t *buf, int page)
+                          uint8_t *buf, int oob_required, int page)
 {
        return read_page(mtd, nand, buf, page, true);
 }
 
 static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
-                         int page, int sndcmd)
+                         int page)
 {
        struct docg4_priv *doc = nand->priv;
        void __iomem *docptr = doc->virtadr;
@@ -952,13 +953,13 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *nand,
 }
 
 static void docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
-                                const uint8_t *buf)
+                                const uint8_t *buf, int oob_required)
 {
        return write_page(mtd, nand, buf, false);
 }
 
 static void docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
-                            const uint8_t *buf)
+                            const uint8_t *buf, int oob_required)
 {
        return write_page(mtd, nand, buf, true);
 }
@@ -1002,7 +1003,7 @@ static int __init read_factory_bbt(struct mtd_info *mtd)
                return -ENOMEM;
 
        read_page_prologue(mtd, g4_addr);
-       status = docg4_read_page(mtd, nand, buf, DOCG4_FACTORY_BBT_PAGE);
+       status = docg4_read_page(mtd, nand, buf, 0, DOCG4_FACTORY_BBT_PAGE);
        if (status)
                goto exit;
 
@@ -1079,7 +1080,7 @@ static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
 
        /* write first page of block */
        write_page_prologue(mtd, g4_addr);
-       docg4_write_page(mtd, nand, buf);
+       docg4_write_page(mtd, nand, buf, 1);
        ret = pageprog(mtd);
        if (!ret)
                mtd->ecc_stats.badblocks++;
@@ -1192,8 +1193,7 @@ static void __init init_mtd_structs(struct mtd_info *mtd)
        nand->ecc.prepad = 8;
        nand->ecc.bytes = 8;
        nand->ecc.strength = DOCG4_T;
-       nand->options =
-               NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE | NAND_NO_AUTOINCR;
+       nand->options = NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE;
        nand->IO_ADDR_R = nand->IO_ADDR_W = doc->virtadr + DOC_IOSPACE_DATA;
        nand->controller = &nand->hwcontrol;
        spin_lock_init(&nand->controller->lock);
index 80b5264f0a32f031a10f5b50e50987a1184646e1..784293806110acc63ee4ae062c689dab534860d6 100644 (file)
@@ -75,6 +75,7 @@ struct fsl_elbc_fcm_ctrl {
        unsigned int use_mdr;    /* Non zero if the MDR is to be set      */
        unsigned int oob;        /* Non zero if operating on OOB data     */
        unsigned int counter;    /* counter for the initializations       */
+       unsigned int max_bitflips;  /* Saved during READ0 cmd             */
 };
 
 /* These map to the positions used by the FCM hardware ECC generator */
@@ -253,6 +254,8 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
        if (chip->ecc.mode != NAND_ECC_HW)
                return 0;
 
+       elbc_fcm_ctrl->max_bitflips = 0;
+
        if (elbc_fcm_ctrl->read_bytes == mtd->writesize + mtd->oobsize) {
                uint32_t lteccr = in_be32(&lbc->lteccr);
                /*
@@ -262,11 +265,16 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
                 * bits 28-31 are uncorrectable errors, marked elsewhere.
                 * for small page nand only 1 bit is used.
                 * if the ELBC doesn't have the lteccr register it reads 0
+                * FIXME: 4 bits can be corrected on NANDs with 2k pages, so
+                * count the number of sub-pages with bitflips and update
+                * ecc_stats.corrected accordingly.
                 */
                if (lteccr & 0x000F000F)
                        out_be32(&lbc->lteccr, 0x000F000F); /* clear lteccr */
-               if (lteccr & 0x000F0000)
+               if (lteccr & 0x000F0000) {
                        mtd->ecc_stats.corrected++;
+                       elbc_fcm_ctrl->max_bitflips = 1;
+               }
        }
 
        return 0;
@@ -738,26 +746,28 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
        return 0;
 }
 
-static int fsl_elbc_read_page(struct mtd_info *mtd,
-                              struct nand_chip *chip,
-                             uint8_t *buf,
-                             int page)
+static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+                             uint8_t *buf, int oob_required, int page)
 {
+       struct fsl_elbc_mtd *priv = chip->priv;
+       struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+       struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+
        fsl_elbc_read_buf(mtd, buf, mtd->writesize);
-       fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+       if (oob_required)
+               fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
 
        if (fsl_elbc_wait(mtd, chip) & NAND_STATUS_FAIL)
                mtd->ecc_stats.failed++;
 
-       return 0;
+       return elbc_fcm_ctrl->max_bitflips;
 }
 
 /* ECC will be calculated automatically, and errors will be detected in
  * waitfunc.
  */
-static void fsl_elbc_write_page(struct mtd_info *mtd,
-                                struct nand_chip *chip,
-                                const uint8_t *buf)
+static void fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+                               const uint8_t *buf, int oob_required)
 {
        fsl_elbc_write_buf(mtd, buf, mtd->writesize);
        fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -795,7 +805,7 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
        chip->bbt_md = &bbt_mirror_descr;
 
        /* set up nand options */
-       chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR;
+       chip->options = NAND_NO_READRDY;
        chip->bbt_options = NAND_BBT_USE_FLASH;
 
        chip->controller = &elbc_fcm_ctrl->controller;
@@ -814,11 +824,6 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
                chip->ecc.size = 512;
                chip->ecc.bytes = 3;
                chip->ecc.strength = 1;
-               /*
-                * FIXME: can hardware ecc correct 4 bitflips if page size is
-                * 2k?  Then does hardware report number of corrections for this
-                * case?  If so, ecc_stats reporting needs to be fixed as well.
-                */
        } else {
                /* otherwise fall back to default software ECC */
                chip->ecc.mode = NAND_ECC_SOFT;
index c30ac7b83d284cf2da970ab755441b7ba40de0dd..9602c1b7e27e8e5a70c66f50a47df04693b4bf1e 100644 (file)
@@ -63,6 +63,7 @@ struct fsl_ifc_nand_ctrl {
        unsigned int oob;       /* Non zero if operating on OOB data    */
        unsigned int eccread;   /* Non zero for a full-page ECC read    */
        unsigned int counter;   /* counter for the initializations      */
+       unsigned int max_bitflips;  /* Saved during READ0 cmd           */
 };
 
 static struct fsl_ifc_nand_ctrl *ifc_nand_ctrl;
@@ -262,6 +263,8 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
        if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_WPER)
                dev_err(priv->dev, "NAND Flash Write Protect Error\n");
 
+       nctrl->max_bitflips = 0;
+
        if (nctrl->eccread) {
                int errors;
                int bufnum = nctrl->page & priv->bufnum_mask;
@@ -290,6 +293,9 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
                        }
 
                        mtd->ecc_stats.corrected += errors;
+                       nctrl->max_bitflips = max_t(unsigned int,
+                                                   nctrl->max_bitflips,
+                                                   errors);
                }
 
                nctrl->eccread = 0;
@@ -375,21 +381,31 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
 
                return;
 
-       /* READID must read all 8 possible bytes */
        case NAND_CMD_READID:
+       case NAND_CMD_PARAM: {
+               int timing = IFC_FIR_OP_RB;
+               if (command == NAND_CMD_PARAM)
+                       timing = IFC_FIR_OP_RBCD;
+
                out_be32(&ifc->ifc_nand.nand_fir0,
                                (IFC_FIR_OP_CMD0 << IFC_NAND_FIR0_OP0_SHIFT) |
                                (IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) |
-                               (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT));
+                               (timing << IFC_NAND_FIR0_OP2_SHIFT));
                out_be32(&ifc->ifc_nand.nand_fcr0,
-                               NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT);
-               /* 8 bytes for manuf, device and exts */
-               out_be32(&ifc->ifc_nand.nand_fbcr, 8);
-               ifc_nand_ctrl->read_bytes = 8;
+                               command << IFC_NAND_FCR0_CMD0_SHIFT);
+               out_be32(&ifc->ifc_nand.row3, column);
+
+               /*
+                * although currently it's 8 bytes for READID, we always read
+                * the maximum 256 bytes(for PARAM)
+                */
+               out_be32(&ifc->ifc_nand.nand_fbcr, 256);
+               ifc_nand_ctrl->read_bytes = 256;
 
                set_addr(mtd, 0, 0, 0);
                fsl_ifc_run_command(mtd);
                return;
+       }
 
        /* ERASE1 stores the block and page address */
        case NAND_CMD_ERASE1:
@@ -682,15 +698,16 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
        return nand_fsr | NAND_STATUS_WP;
 }
 
-static int fsl_ifc_read_page(struct mtd_info *mtd,
-                             struct nand_chip *chip,
-                             uint8_t *buf, int page)
+static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+                            uint8_t *buf, int oob_required, int page)
 {
        struct fsl_ifc_mtd *priv = chip->priv;
        struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+       struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
 
        fsl_ifc_read_buf(mtd, buf, mtd->writesize);
-       fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+       if (oob_required)
+               fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
 
        if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_ECCER)
                dev_err(priv->dev, "NAND Flash ECC Uncorrectable Error\n");
@@ -698,15 +715,14 @@ static int fsl_ifc_read_page(struct mtd_info *mtd,
        if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
                mtd->ecc_stats.failed++;
 
-       return 0;
+       return nctrl->max_bitflips;
 }
 
 /* ECC will be calculated automatically, and errors will be detected in
  * waitfunc.
  */
-static void fsl_ifc_write_page(struct mtd_info *mtd,
-                               struct nand_chip *chip,
-                               const uint8_t *buf)
+static void fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+                              const uint8_t *buf, int oob_required)
 {
        fsl_ifc_write_buf(mtd, buf, mtd->writesize);
        fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -789,7 +805,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
        out_be32(&ifc->ifc_nand.ncfgr, 0x0);
 
        /* set up nand options */
-       chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR;
+       chip->options = NAND_NO_READRDY;
        chip->bbt_options = NAND_BBT_USE_FLASH;
 
 
@@ -811,6 +827,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
        /* Hardware generates ECC per 512 Bytes */
        chip->ecc.size = 512;
        chip->ecc.bytes = 8;
+       chip->ecc.strength = 4;
 
        switch (csor & CSOR_NAND_PGS_MASK) {
        case CSOR_NAND_PGS_512:
index 1b8330e1155a4468f4fd272a052b9b095850eddb..38d26240d8b152b06462794ed688905951bf7f4b 100644 (file)
@@ -692,6 +692,7 @@ static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
  * @mtd:       mtd info structure
  * @chip:      nand chip info structure
  * @buf:       buffer to store read data
+ * @oob_required:      caller expects OOB data read to chip->oob_poi
  * @page:      page number to read
  *
  * This routine is needed for fsmc version 8 as reading from NAND chip has to be
@@ -701,7 +702,7 @@ static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
  * max of 8 bits)
  */
 static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
-                                uint8_t *buf, int page)
+                                uint8_t *buf, int oob_required, int page)
 {
        struct fsmc_nand_data *host = container_of(mtd,
                                        struct fsmc_nand_data, mtd);
@@ -720,6 +721,7 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
         */
        uint16_t ecc_oob[7];
        uint8_t *oob = (uint8_t *)&ecc_oob[0];
+       unsigned int max_bitflips = 0;
 
        for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
                chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page);
@@ -748,13 +750,15 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
                chip->ecc.calculate(mtd, p, &ecc_calc[i]);
 
                stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
-               if (stat < 0)
+               if (stat < 0) {
                        mtd->ecc_stats.failed++;
-               else
+               } else {
                        mtd->ecc_stats.corrected += stat;
+                       max_bitflips = max_t(unsigned int, max_bitflips, stat);
+               }
        }
 
-       return 0;
+       return max_bitflips;
 }
 
 /*
@@ -994,9 +998,9 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
                return PTR_ERR(host->clk);
        }
 
-       ret = clk_enable(host->clk);
+       ret = clk_prepare_enable(host->clk);
        if (ret)
-               goto err_clk_enable;
+               goto err_clk_prepare_enable;
 
        /*
         * This device ID is actually a common AMBA ID as used on the
@@ -1176,8 +1180,8 @@ err_req_write_chnl:
        if (host->mode == USE_DMA_ACCESS)
                dma_release_channel(host->read_dma_chan);
 err_req_read_chnl:
-       clk_disable(host->clk);
-err_clk_enable:
+       clk_disable_unprepare(host->clk);
+err_clk_prepare_enable:
        clk_put(host->clk);
        return ret;
 }
@@ -1198,7 +1202,7 @@ static int fsmc_nand_remove(struct platform_device *pdev)
                        dma_release_channel(host->write_dma_chan);
                        dma_release_channel(host->read_dma_chan);
                }
-               clk_disable(host->clk);
+               clk_disable_unprepare(host->clk);
                clk_put(host->clk);
        }
 
@@ -1210,7 +1214,7 @@ static int fsmc_nand_suspend(struct device *dev)
 {
        struct fsmc_nand_data *host = dev_get_drvdata(dev);
        if (host)
-               clk_disable(host->clk);
+               clk_disable_unprepare(host->clk);
        return 0;
 }
 
@@ -1218,7 +1222,7 @@ static int fsmc_nand_resume(struct device *dev)
 {
        struct fsmc_nand_data *host = dev_get_drvdata(dev);
        if (host) {
-               clk_enable(host->clk);
+               clk_prepare_enable(host->clk);
                fsmc_nand_setup(host->regs_va, host->bank,
                                host->nand.options & NAND_BUSWIDTH_16,
                                host->dev_timings);
index 4effb8c579db0d5b3e9127dc3cedbe7630a532c3..a0924515c39644fcc9430938ded2795a2f356a4c 100644 (file)
 
 #define BP_BCH_FLASH0LAYOUT0_ECC0              12
 #define BM_BCH_FLASH0LAYOUT0_ECC0      (0xf << BP_BCH_FLASH0LAYOUT0_ECC0)
-#define BF_BCH_FLASH0LAYOUT0_ECC0(v)           \
-       (((v) << BP_BCH_FLASH0LAYOUT0_ECC0) & BM_BCH_FLASH0LAYOUT0_ECC0)
+#define MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0         11
+#define MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0 (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0)
+#define BF_BCH_FLASH0LAYOUT0_ECC0(v, x)                                \
+       (GPMI_IS_MX6Q(x)                                        \
+               ? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0)      \
+                       & MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0)       \
+               : (((v) << BP_BCH_FLASH0LAYOUT0_ECC0)           \
+                       & BM_BCH_FLASH0LAYOUT0_ECC0)            \
+       )
 
 #define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE                0
 #define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE                \
                        (0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
-#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v)     \
-       (((v) << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)\
-                                        & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE)
+#define MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE   \
+                       (0x3ff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
+#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v, x)                          \
+       (GPMI_IS_MX6Q(x)                                                \
+               ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE)   \
+               : ((v) & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE)               \
+       )
 
 #define HW_BCH_FLASH0LAYOUT1                   0x00000090
 
 
 #define BP_BCH_FLASH0LAYOUT1_ECCN              12
 #define BM_BCH_FLASH0LAYOUT1_ECCN      (0xf << BP_BCH_FLASH0LAYOUT1_ECCN)
-#define BF_BCH_FLASH0LAYOUT1_ECCN(v)           \
-       (((v) << BP_BCH_FLASH0LAYOUT1_ECCN) & BM_BCH_FLASH0LAYOUT1_ECCN)
+#define MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN         11
+#define MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN)
+#define BF_BCH_FLASH0LAYOUT1_ECCN(v, x)                                \
+       (GPMI_IS_MX6Q(x)                                        \
+               ? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN)      \
+                       & MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN)       \
+               : (((v) << BP_BCH_FLASH0LAYOUT1_ECCN)           \
+                       & BM_BCH_FLASH0LAYOUT1_ECCN)            \
+       )
 
 #define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE                0
 #define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE                \
                        (0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
-#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v)     \
-       (((v) << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
-                                        & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE)
+#define MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE   \
+                       (0x3ff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
+#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v, x)                          \
+       (GPMI_IS_MX6Q(x)                                                \
+               ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE)   \
+               : ((v) & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE)               \
+       )
 #endif
index e8ea7107932e9a9f784007da5be11cf01e32d5dd..a1f43329ad43d2c7898f7c978cb3f9f15721fe5e 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/mtd/gpmi-nand.h>
 #include <linux/delay.h>
 #include <linux/clk.h>
-#include <mach/mxs.h>
 
 #include "gpmi-nand.h"
 #include "gpmi-regs.h"
@@ -37,6 +36,8 @@ struct timing_threshod timing_default_threshold = {
        .max_dll_delay_in_ns         = 16,
 };
 
+#define MXS_SET_ADDR           0x4
+#define MXS_CLR_ADDR           0x8
 /*
  * Clear the bit and poll it cleared.  This is usually called with
  * a reset address and mask being either SFTRST(bit 31) or CLKGATE
@@ -47,7 +48,7 @@ static int clear_poll_bit(void __iomem *addr, u32 mask)
        int timeout = 0x400;
 
        /* clear the bit */
-       __mxs_clrl(mask, addr);
+       writel(mask, addr + MXS_CLR_ADDR);
 
        /*
         * SFTRST needs 3 GPMI clocks to settle, the reference manual
@@ -92,11 +93,11 @@ static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
                goto error;
 
        /* clear CLKGATE */
-       __mxs_clrl(MODULE_CLKGATE, reset_addr);
+       writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
 
        if (!just_enable) {
                /* set SFTRST to reset the block */
-               __mxs_setl(MODULE_SFTRST, reset_addr);
+               writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
                udelay(1);
 
                /* poll CLKGATE becoming set */
@@ -223,13 +224,13 @@ int bch_set_geometry(struct gpmi_nand_data *this)
        /* Configure layout 0. */
        writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
                        | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
-                       | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength)
-                       | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size),
+                       | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this)
+                       | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this),
                        r->bch_regs + HW_BCH_FLASH0LAYOUT0);
 
        writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
-                       | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength)
-                       | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size),
+                       | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this)
+                       | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this),
                        r->bch_regs + HW_BCH_FLASH0LAYOUT1);
 
        /* Set *all* chip selects to use layout 0. */
@@ -255,11 +256,12 @@ static unsigned int ns_to_cycles(unsigned int time,
        return max(k, min);
 }
 
+#define DEF_MIN_PROP_DELAY     5
+#define DEF_MAX_PROP_DELAY     9
 /* Apply timing to current hardware conditions. */
 static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
                                        struct gpmi_nfc_hardware_timing *hw)
 {
-       struct gpmi_nand_platform_data *pdata = this->pdata;
        struct timing_threshod *nfc = &timing_default_threshold;
        struct nand_chip *nand = &this->nand;
        struct nand_timing target = this->timing;
@@ -276,8 +278,8 @@ static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
        int ideal_sample_delay_in_ns;
        unsigned int sample_delay_factor;
        int tEYE;
-       unsigned int min_prop_delay_in_ns = pdata->min_prop_delay_in_ns;
-       unsigned int max_prop_delay_in_ns = pdata->max_prop_delay_in_ns;
+       unsigned int min_prop_delay_in_ns = DEF_MIN_PROP_DELAY;
+       unsigned int max_prop_delay_in_ns = DEF_MAX_PROP_DELAY;
 
        /*
         * If there are multiple chips, we need to relax the timings to allow
@@ -803,7 +805,8 @@ int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
        if (GPMI_IS_MX23(this)) {
                mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
                reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
-       } else if (GPMI_IS_MX28(this)) {
+       } else if (GPMI_IS_MX28(this) || GPMI_IS_MX6Q(this)) {
+               /* MX28 shares the same R/B register as MX6Q. */
                mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
                reg = readl(r->gpmi_regs + HW_GPMI_STAT);
        } else
index b68e04310bd8e5b14327ff142788180ba8680754..a05b7b444d4f1f8a92d6f0e10a2ee1db19d3c722 100644 (file)
@@ -25,6 +25,8 @@
 #include <linux/mtd/gpmi-nand.h>
 #include <linux/mtd/partitions.h>
 #include <linux/pinctrl/consumer.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 #include "gpmi-nand.h"
 
 /* add our owner bbt descriptor */
@@ -387,7 +389,7 @@ static void release_bch_irq(struct gpmi_nand_data *this)
 static bool gpmi_dma_filter(struct dma_chan *chan, void *param)
 {
        struct gpmi_nand_data *this = param;
-       struct resource *r = this->private;
+       int dma_channel = (int)this->private;
 
        if (!mxs_dma_is_apbh(chan))
                return false;
@@ -399,7 +401,7 @@ static bool gpmi_dma_filter(struct dma_chan *chan, void *param)
         *      for mx28 :      MX28_DMA_GPMI0 ~ MX28_DMA_GPMI7
         *              (These eight channels share the same IRQ!)
         */
-       if (r->start <= chan->chan_id && chan->chan_id <= r->end) {
+       if (dma_channel == chan->chan_id) {
                chan->private = &this->dma_data;
                return true;
        }
@@ -419,57 +421,45 @@ static void release_dma_channels(struct gpmi_nand_data *this)
 static int __devinit acquire_dma_channels(struct gpmi_nand_data *this)
 {
        struct platform_device *pdev = this->pdev;
-       struct gpmi_nand_platform_data *pdata = this->pdata;
-       struct resources *res = &this->resources;
-       struct resource *r, *r_dma;
-       unsigned int i;
+       struct resource *r_dma;
+       struct device_node *dn;
+       int dma_channel;
+       unsigned int ret;
+       struct dma_chan *dma_chan;
+       dma_cap_mask_t mask;
+
+       /* dma channel, we only use the first one. */
+       dn = pdev->dev.of_node;
+       ret = of_property_read_u32(dn, "fsl,gpmi-dma-channel", &dma_channel);
+       if (ret) {
+               pr_err("unable to get DMA channel from dt.\n");
+               goto acquire_err;
+       }
+       this->private = (void *)dma_channel;
 
-       r = platform_get_resource_byname(pdev, IORESOURCE_DMA,
-                                       GPMI_NAND_DMA_CHANNELS_RES_NAME);
+       /* gpmi dma interrupt */
        r_dma = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
                                        GPMI_NAND_DMA_INTERRUPT_RES_NAME);
-       if (!r || !r_dma) {
+       if (!r_dma) {
                pr_err("Can't get resource for DMA\n");
-               return -ENXIO;
+               goto acquire_err;
        }
+       this->dma_data.chan_irq = r_dma->start;
 
-       /* used in gpmi_dma_filter() */
-       this->private = r;
-
-       for (i = r->start; i <= r->end; i++) {
-               struct dma_chan *dma_chan;
-               dma_cap_mask_t mask;
+       /* request dma channel */
+       dma_cap_zero(mask);
+       dma_cap_set(DMA_SLAVE, mask);
 
-               if (i - r->start >= pdata->max_chip_count)
-                       break;
-
-               dma_cap_zero(mask);
-               dma_cap_set(DMA_SLAVE, mask);
-
-               /* get the DMA interrupt */
-               if (r_dma->start == r_dma->end) {
-                       /* only register the first. */
-                       if (i == r->start)
-                               this->dma_data.chan_irq = r_dma->start;
-                       else
-                               this->dma_data.chan_irq = NO_IRQ;
-               } else
-                       this->dma_data.chan_irq = r_dma->start + (i - r->start);
-
-               dma_chan = dma_request_channel(mask, gpmi_dma_filter, this);
-               if (!dma_chan)
-                       goto acquire_err;
-
-               /* fill the first empty item */
-               this->dma_chans[i - r->start] = dma_chan;
+       dma_chan = dma_request_channel(mask, gpmi_dma_filter, this);
+       if (!dma_chan) {
+               pr_err("dma_request_channel failed.\n");
+               goto acquire_err;
        }
 
-       res->dma_low_channel = r->start;
-       res->dma_high_channel = i;
+       this->dma_chans[0] = dma_chan;
        return 0;
 
 acquire_err:
-       pr_err("Can't acquire DMA channel %u\n", i);
        release_dma_channels(this);
        return -EINVAL;
 }
@@ -851,7 +841,7 @@ static void block_mark_swapping(struct gpmi_nand_data *this,
 }
 
 static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
-                               uint8_t *buf, int page)
+                               uint8_t *buf, int oob_required, int page)
 {
        struct gpmi_nand_data *this = chip->priv;
        struct bch_geometry *nfc_geo = &this->bch_geometry;
@@ -917,28 +907,31 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
                mtd->ecc_stats.corrected += corrected;
        }
 
-       /*
-        * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() for
-        * details about our policy for delivering the OOB.
-        *
-        * We fill the caller's buffer with set bits, and then copy the block
-        * mark to th caller's buffer. Note that, if block mark swapping was
-        * necessary, it has already been done, so we can rely on the first
-        * byte of the auxiliary buffer to contain the block mark.
-        */
-       memset(chip->oob_poi, ~0, mtd->oobsize);
-       chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
+       if (oob_required) {
+               /*
+                * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
+                * for details about our policy for delivering the OOB.
+                *
+                * We fill the caller's buffer with set bits, and then copy the
+                * block mark to th caller's buffer. Note that, if block mark
+                * swapping was necessary, it has already been done, so we can
+                * rely on the first byte of the auxiliary buffer to contain
+                * the block mark.
+                */
+               memset(chip->oob_poi, ~0, mtd->oobsize);
+               chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
 
-       read_page_swap_end(this, buf, mtd->writesize,
-                       this->payload_virt, this->payload_phys,
-                       nfc_geo->payload_size,
-                       payload_virt, payload_phys);
+               read_page_swap_end(this, buf, mtd->writesize,
+                               this->payload_virt, this->payload_phys,
+                               nfc_geo->payload_size,
+                               payload_virt, payload_phys);
+       }
 exit_nfc:
        return ret;
 }
 
-static void gpmi_ecc_write_page(struct mtd_info *mtd,
-                               struct nand_chip *chip, const uint8_t *buf)
+static void gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+                               const uint8_t *buf, int oob_required)
 {
        struct gpmi_nand_data *this = chip->priv;
        struct bch_geometry *nfc_geo = &this->bch_geometry;
@@ -1077,7 +1070,7 @@ exit_auxiliary:
  * this driver.
  */
 static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
-                               int page, int sndcmd)
+                               int page)
 {
        struct gpmi_nand_data *this = chip->priv;
 
@@ -1100,11 +1093,7 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
                chip->oob_poi[0] = chip->read_byte(mtd);
        }
 
-       /*
-        * Return true, indicating that the next call to this function must send
-        * a command.
-        */
-       return true;
+       return 0;
 }
 
 static int
@@ -1318,7 +1307,7 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
                /* Write the first page of the current stride. */
                dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
                chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
-               chip->ecc.write_page_raw(mtd, chip, buffer);
+               chip->ecc.write_page_raw(mtd, chip, buffer, 0);
                chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
 
                /* Wait for the write to finish. */
@@ -1444,6 +1433,10 @@ static int gpmi_pre_bbt_scan(struct gpmi_nand_data  *this)
        if (ret)
                return ret;
 
+       /* Adjust the ECC strength according to the chip. */
+       this->nand.ecc.strength = this->bch_geometry.ecc_strength;
+       this->mtd.ecc_strength = this->bch_geometry.ecc_strength;
+
        /* NAND boot init, depends on the gpmi_set_geometry(). */
        return nand_boot_init(this);
 }
@@ -1471,9 +1464,9 @@ void gpmi_nfc_exit(struct gpmi_nand_data *this)
 
 static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this)
 {
-       struct gpmi_nand_platform_data *pdata = this->pdata;
        struct mtd_info  *mtd = &this->mtd;
        struct nand_chip *chip = &this->nand;
+       struct mtd_part_parser_data ppdata = {};
        int ret;
 
        /* init current chip */
@@ -1502,6 +1495,7 @@ static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this)
        chip->options           |= NAND_NO_SUBPAGE_WRITE;
        chip->ecc.mode          = NAND_ECC_HW;
        chip->ecc.size          = 1;
+       chip->ecc.strength      = 8;
        chip->ecc.layout        = &gpmi_hw_ecclayout;
 
        /* Allocate a temporary DMA buffer for reading ID in the nand_scan() */
@@ -1511,14 +1505,14 @@ static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this)
        if (ret)
                goto err_out;
 
-       ret = nand_scan(mtd, pdata->max_chip_count);
+       ret = nand_scan(mtd, 1);
        if (ret) {
                pr_err("Chip scan failed\n");
                goto err_out;
        }
 
-       ret = mtd_device_parse_register(mtd, NULL, NULL,
-                       pdata->partitions, pdata->partition_count);
+       ppdata.of_node = this->pdev->dev.of_node;
+       ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
        if (ret)
                goto err_out;
        return 0;
@@ -1528,12 +1522,41 @@ err_out:
        return ret;
 }
 
+static const struct platform_device_id gpmi_ids[] = {
+       { .name = "imx23-gpmi-nand", .driver_data = IS_MX23, },
+       { .name = "imx28-gpmi-nand", .driver_data = IS_MX28, },
+       { .name = "imx6q-gpmi-nand", .driver_data = IS_MX6Q, },
+       {},
+};
+
+static const struct of_device_id gpmi_nand_id_table[] = {
+       {
+               .compatible = "fsl,imx23-gpmi-nand",
+               .data = (void *)&gpmi_ids[IS_MX23]
+       }, {
+               .compatible = "fsl,imx28-gpmi-nand",
+               .data = (void *)&gpmi_ids[IS_MX28]
+       }, {
+               .compatible = "fsl,imx6q-gpmi-nand",
+               .data = (void *)&gpmi_ids[IS_MX6Q]
+       }, {}
+};
+MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
+
 static int __devinit gpmi_nand_probe(struct platform_device *pdev)
 {
-       struct gpmi_nand_platform_data *pdata = pdev->dev.platform_data;
        struct gpmi_nand_data *this;
+       const struct of_device_id *of_id;
        int ret;
 
+       of_id = of_match_device(gpmi_nand_id_table, &pdev->dev);
+       if (of_id) {
+               pdev->id_entry = of_id->data;
+       } else {
+               pr_err("Failed to find the right device id.\n");
+               return -ENOMEM;
+       }
+
        this = kzalloc(sizeof(*this), GFP_KERNEL);
        if (!this) {
                pr_err("Failed to allocate per-device memory\n");
@@ -1543,13 +1566,6 @@ static int __devinit gpmi_nand_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, this);
        this->pdev  = pdev;
        this->dev   = &pdev->dev;
-       this->pdata = pdata;
-
-       if (pdata->platform_init) {
-               ret = pdata->platform_init();
-               if (ret)
-                       goto platform_init_error;
-       }
 
        ret = acquire_resources(this);
        if (ret)
@@ -1567,7 +1583,6 @@ static int __devinit gpmi_nand_probe(struct platform_device *pdev)
 
 exit_nfc_init:
        release_resources(this);
-platform_init_error:
 exit_acquire_resources:
        platform_set_drvdata(pdev, NULL);
        kfree(this);
@@ -1585,19 +1600,10 @@ static int __exit gpmi_nand_remove(struct platform_device *pdev)
        return 0;
 }
 
-static const struct platform_device_id gpmi_ids[] = {
-       {
-               .name = "imx23-gpmi-nand",
-               .driver_data = IS_MX23,
-       }, {
-               .name = "imx28-gpmi-nand",
-               .driver_data = IS_MX28,
-       }, {},
-};
-
 static struct platform_driver gpmi_nand_driver = {
        .driver = {
                .name = "gpmi-nand",
+               .of_match_table = gpmi_nand_id_table,
        },
        .probe   = gpmi_nand_probe,
        .remove  = __exit_p(gpmi_nand_remove),
index ec6180d4ff8ffa22314ce923802c606910492166..ce5daa1609203923caee19f576be884c27919a97 100644 (file)
@@ -266,8 +266,10 @@ extern int gpmi_read_page(struct gpmi_nand_data *,
 #define STATUS_UNCORRECTABLE   0xfe
 
 /* Use the platform_id to distinguish different Archs. */
-#define IS_MX23                        0x1
-#define IS_MX28                        0x2
+#define IS_MX23                        0x0
+#define IS_MX28                        0x1
+#define IS_MX6Q                        0x2
 #define GPMI_IS_MX23(x)                ((x)->pdev->id_entry->driver_data == IS_MX23)
 #define GPMI_IS_MX28(x)                ((x)->pdev->id_entry->driver_data == IS_MX28)
+#define GPMI_IS_MX6Q(x)                ((x)->pdev->id_entry->driver_data == IS_MX6Q)
 #endif
index 9bf5ce5fa22d0a5c6ad67d0800131efd27157ebb..50166e93ba96696e562d201c694dcdb51488bb14 100644 (file)
@@ -124,7 +124,6 @@ static int __init h1910_init(void)
        /* 15 us command delay time */
        this->chip_delay = 50;
        this->ecc.mode = NAND_ECC_SOFT;
-       this->options = NAND_NO_AUTOINCR;
 
        /* Scan to find existence of the device */
        if (nand_scan(h1910_nand_mtd, 1)) {
index e4147e8acb7c560f2a8c3fdae68bf1027266fdba..a6fa884ae49bb08deba807ae6aeca7f0fcbabff7 100644 (file)
@@ -332,11 +332,7 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
        chip->ecc.mode          = NAND_ECC_HW_OOB_FIRST;
        chip->ecc.size          = 512;
        chip->ecc.bytes         = 9;
-       chip->ecc.strength      = 2;
-       /*
-        * FIXME: ecc_strength value of 2 bits per 512 bytes of data is a
-        * conservative guess, given 9 ecc bytes and reed-solomon alg.
-        */
+       chip->ecc.strength      = 4;
 
        if (pdata)
                chip->ecc.layout = pdata->ecc_layout;
index c240cf1af96166f8c2431e029dc90064a443ecf4..c259c24d7986034f3a2d14e02d5c06fb9a19a323 100644 (file)
@@ -734,7 +734,6 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
        chip->write_buf = mpc5121_nfc_write_buf;
        chip->verify_buf = mpc5121_nfc_verify_buf;
        chip->select_chip = mpc5121_nfc_select_chip;
-       chip->options = NAND_NO_AUTOINCR;
        chip->bbt_options = NAND_BBT_USE_FLASH;
        chip->ecc.mode = NAND_ECC_SOFT;
 
index cc0678a967c12c6b054d21ee3fe5b868ee997924..c58e6a93f44501d68056d46ea40dc0373d9a51a7 100644 (file)
@@ -32,6 +32,8 @@
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/completion.h>
+#include <linux/of_device.h>
+#include <linux/of_mtd.h>
 
 #include <asm/mach/flash.h>
 #include <mach/mxc_nand.h>
 
 #define NFC_V3_DELAY_LINE              (host->regs_ip + 0x34)
 
+struct mxc_nand_host;
+
+struct mxc_nand_devtype_data {
+       void (*preset)(struct mtd_info *);
+       void (*send_cmd)(struct mxc_nand_host *, uint16_t, int);
+       void (*send_addr)(struct mxc_nand_host *, uint16_t, int);
+       void (*send_page)(struct mtd_info *, unsigned int);
+       void (*send_read_id)(struct mxc_nand_host *);
+       uint16_t (*get_dev_status)(struct mxc_nand_host *);
+       int (*check_int)(struct mxc_nand_host *);
+       void (*irq_control)(struct mxc_nand_host *, int);
+       u32 (*get_ecc_status)(struct mxc_nand_host *);
+       struct nand_ecclayout *ecclayout_512, *ecclayout_2k, *ecclayout_4k;
+       void (*select_chip)(struct mtd_info *mtd, int chip);
+       int (*correct_data)(struct mtd_info *mtd, u_char *dat,
+                       u_char *read_ecc, u_char *calc_ecc);
+
+       /*
+        * On i.MX21 the CONFIG2:INT bit cannot be read if interrupts are masked
+        * (CONFIG1:INT_MSK is set). To handle this the driver uses
+        * enable_irq/disable_irq_nosync instead of CONFIG1:INT_MSK
+        */
+       int irqpending_quirk;
+       int needs_ip;
+
+       size_t regs_offset;
+       size_t spare0_offset;
+       size_t axi_offset;
+
+       int spare_len;
+       int eccbytes;
+       int eccsize;
+};
+
 struct mxc_nand_host {
        struct mtd_info         mtd;
        struct nand_chip        nand;
        struct device           *dev;
 
-       void                    *spare0;
-       void                    *main_area0;
+       void __iomem            *spare0;
+       void __iomem            *main_area0;
 
        void __iomem            *base;
        void __iomem            *regs;
@@ -163,16 +199,9 @@ struct mxc_nand_host {
 
        uint8_t                 *data_buf;
        unsigned int            buf_start;
-       int                     spare_len;
-
-       void                    (*preset)(struct mtd_info *);
-       void                    (*send_cmd)(struct mxc_nand_host *, uint16_t, int);
-       void                    (*send_addr)(struct mxc_nand_host *, uint16_t, int);
-       void                    (*send_page)(struct mtd_info *, unsigned int);
-       void                    (*send_read_id)(struct mxc_nand_host *);
-       uint16_t                (*get_dev_status)(struct mxc_nand_host *);
-       int                     (*check_int)(struct mxc_nand_host *);
-       void                    (*irq_control)(struct mxc_nand_host *, int);
+
+       const struct mxc_nand_devtype_data *devtype_data;
+       struct mxc_nand_platform_data pdata;
 };
 
 /* OOB placement block for use with hardware ecc generation */
@@ -242,21 +271,7 @@ static struct nand_ecclayout nandv2_hw_eccoob_4k = {
        }
 };
 
-static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL };
-
-static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
-{
-       struct mxc_nand_host *host = dev_id;
-
-       if (!host->check_int(host))
-               return IRQ_NONE;
-
-       host->irq_control(host, 0);
-
-       complete(&host->op_completion);
-
-       return IRQ_HANDLED;
-}
+static const char *part_probes[] = { "RedBoot", "cmdlinepart", "ofpart", NULL };
 
 static int check_int_v3(struct mxc_nand_host *host)
 {
@@ -280,26 +295,12 @@ static int check_int_v1_v2(struct mxc_nand_host *host)
        if (!(tmp & NFC_V1_V2_CONFIG2_INT))
                return 0;
 
-       if (!cpu_is_mx21())
+       if (!host->devtype_data->irqpending_quirk)
                writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2);
 
        return 1;
 }
 
-/*
- * It has been observed that the i.MX21 cannot read the CONFIG2:INT bit
- * if interrupts are masked (CONFIG1:INT_MSK is set). To handle this, the
- * driver can enable/disable the irq line rather than simply masking the
- * interrupts.
- */
-static void irq_control_mx21(struct mxc_nand_host *host, int activate)
-{
-       if (activate)
-               enable_irq(host->irq);
-       else
-               disable_irq_nosync(host->irq);
-}
-
 static void irq_control_v1_v2(struct mxc_nand_host *host, int activate)
 {
        uint16_t tmp;
@@ -328,6 +329,47 @@ static void irq_control_v3(struct mxc_nand_host *host, int activate)
        writel(tmp, NFC_V3_CONFIG2);
 }
 
+static void irq_control(struct mxc_nand_host *host, int activate)
+{
+       if (host->devtype_data->irqpending_quirk) {
+               if (activate)
+                       enable_irq(host->irq);
+               else
+                       disable_irq_nosync(host->irq);
+       } else {
+               host->devtype_data->irq_control(host, activate);
+       }
+}
+
+static u32 get_ecc_status_v1(struct mxc_nand_host *host)
+{
+       return readw(NFC_V1_V2_ECC_STATUS_RESULT);
+}
+
+static u32 get_ecc_status_v2(struct mxc_nand_host *host)
+{
+       return readl(NFC_V1_V2_ECC_STATUS_RESULT);
+}
+
+static u32 get_ecc_status_v3(struct mxc_nand_host *host)
+{
+       return readl(NFC_V3_ECC_STATUS_RESULT);
+}
+
+static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
+{
+       struct mxc_nand_host *host = dev_id;
+
+       if (!host->devtype_data->check_int(host))
+               return IRQ_NONE;
+
+       irq_control(host, 0);
+
+       complete(&host->op_completion);
+
+       return IRQ_HANDLED;
+}
+
 /* This function polls the NANDFC to wait for the basic operation to
  * complete by checking the INT bit of config2 register.
  */
@@ -336,14 +378,14 @@ static void wait_op_done(struct mxc_nand_host *host, int useirq)
        int max_retries = 8000;
 
        if (useirq) {
-               if (!host->check_int(host)) {
+               if (!host->devtype_data->check_int(host)) {
                        INIT_COMPLETION(host->op_completion);
-                       host->irq_control(host, 1);
+                       irq_control(host, 1);
                        wait_for_completion(&host->op_completion);
                }
        } else {
                while (max_retries-- > 0) {
-                       if (host->check_int(host))
+                       if (host->devtype_data->check_int(host))
                                break;
 
                        udelay(1);
@@ -374,7 +416,7 @@ static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
        writew(cmd, NFC_V1_V2_FLASH_CMD);
        writew(NFC_CMD, NFC_V1_V2_CONFIG2);
 
-       if (cpu_is_mx21() && (cmd == NAND_CMD_RESET)) {
+       if (host->devtype_data->irqpending_quirk && (cmd == NAND_CMD_RESET)) {
                int max_retries = 100;
                /* Reset completion is indicated by NFC_CONFIG2 */
                /* being set to 0 */
@@ -433,13 +475,27 @@ static void send_page_v3(struct mtd_info *mtd, unsigned int ops)
        wait_op_done(host, false);
 }
 
-static void send_page_v1_v2(struct mtd_info *mtd, unsigned int ops)
+static void send_page_v2(struct mtd_info *mtd, unsigned int ops)
+{
+       struct nand_chip *nand_chip = mtd->priv;
+       struct mxc_nand_host *host = nand_chip->priv;
+
+       /* NANDFC buffer 0 is used for page read/write */
+       writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
+
+       writew(ops, NFC_V1_V2_CONFIG2);
+
+       /* Wait for operation to complete */
+       wait_op_done(host, true);
+}
+
+static void send_page_v1(struct mtd_info *mtd, unsigned int ops)
 {
        struct nand_chip *nand_chip = mtd->priv;
        struct mxc_nand_host *host = nand_chip->priv;
        int bufs, i;
 
-       if (nfc_is_v1() && mtd->writesize > 512)
+       if (mtd->writesize > 512)
                bufs = 4;
        else
                bufs = 1;
@@ -463,7 +519,7 @@ static void send_read_id_v3(struct mxc_nand_host *host)
 
        wait_op_done(host, true);
 
-       memcpy(host->data_buf, host->main_area0, 16);
+       memcpy_fromio(host->data_buf, host->main_area0, 16);
 }
 
 /* Request the NANDFC to perform a read of the NAND device ID. */
@@ -479,7 +535,7 @@ static void send_read_id_v1_v2(struct mxc_nand_host *host)
        /* Wait for operation to complete */
        wait_op_done(host, true);
 
-       memcpy(host->data_buf, host->main_area0, 16);
+       memcpy_fromio(host->data_buf, host->main_area0, 16);
 
        if (this->options & NAND_BUSWIDTH_16) {
                /* compress the ID info */
@@ -555,7 +611,7 @@ static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat,
         * additional correction.  2-Bit errors cannot be corrected by
         * HW ECC, so we need to return failure
         */
-       uint16_t ecc_status = readw(NFC_V1_V2_ECC_STATUS_RESULT);
+       uint16_t ecc_status = get_ecc_status_v1(host);
 
        if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
                pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
@@ -580,10 +636,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
 
        no_subpages = mtd->writesize >> 9;
 
-       if (nfc_is_v21())
-               ecc_stat = readl(NFC_V1_V2_ECC_STATUS_RESULT);
-       else
-               ecc_stat = readl(NFC_V3_ECC_STATUS_RESULT);
+       ecc_stat = host->devtype_data->get_ecc_status(host);
 
        do {
                err = ecc_stat & ecc_bit_mask;
@@ -616,7 +669,7 @@ static u_char mxc_nand_read_byte(struct mtd_info *mtd)
 
        /* Check for status request */
        if (host->status_request)
-               return host->get_dev_status(host) & 0xFF;
+               return host->devtype_data->get_dev_status(host) & 0xFF;
 
        ret = *(uint8_t *)(host->data_buf + host->buf_start);
        host->buf_start++;
@@ -682,7 +735,28 @@ static int mxc_nand_verify_buf(struct mtd_info *mtd,
 
 /* This function is used by upper layer for select and
  * deselect of the NAND chip */
-static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
+static void mxc_nand_select_chip_v1_v3(struct mtd_info *mtd, int chip)
+{
+       struct nand_chip *nand_chip = mtd->priv;
+       struct mxc_nand_host *host = nand_chip->priv;
+
+       if (chip == -1) {
+               /* Disable the NFC clock */
+               if (host->clk_act) {
+                       clk_disable_unprepare(host->clk);
+                       host->clk_act = 0;
+               }
+               return;
+       }
+
+       if (!host->clk_act) {
+               /* Enable the NFC clock */
+               clk_prepare_enable(host->clk);
+               host->clk_act = 1;
+       }
+}
+
+static void mxc_nand_select_chip_v2(struct mtd_info *mtd, int chip)
 {
        struct nand_chip *nand_chip = mtd->priv;
        struct mxc_nand_host *host = nand_chip->priv;
@@ -702,10 +776,8 @@ static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
                host->clk_act = 1;
        }
 
-       if (nfc_is_v21()) {
-               host->active_cs = chip;
-               writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
-       }
+       host->active_cs = chip;
+       writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
 }
 
 /*
@@ -718,23 +790,23 @@ static void copy_spare(struct mtd_info *mtd, bool bfrom)
        u16 i, j;
        u16 n = mtd->writesize >> 9;
        u8 *d = host->data_buf + mtd->writesize;
-       u8 *s = host->spare0;
-       u16 t = host->spare_len;
+       u8 __iomem *s = host->spare0;
+       u16 t = host->devtype_data->spare_len;
 
        j = (mtd->oobsize / n >> 1) << 1;
 
        if (bfrom) {
                for (i = 0; i < n - 1; i++)
-                       memcpy(d + i * j, s + i * t, j);
+                       memcpy_fromio(d + i * j, s + i * t, j);
 
                /* the last section */
-               memcpy(d + i * j, s + i * t, mtd->oobsize - i * j);
+               memcpy_fromio(d + i * j, s + i * t, mtd->oobsize - i * j);
        } else {
                for (i = 0; i < n - 1; i++)
-                       memcpy(&s[i * t], &d[i * j], j);
+                       memcpy_toio(&s[i * t], &d[i * j], j);
 
                /* the last section */
-               memcpy(&s[i * t], &d[i * j], mtd->oobsize - i * j);
+               memcpy_toio(&s[i * t], &d[i * j], mtd->oobsize - i * j);
        }
 }
 
@@ -751,34 +823,44 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
                 * perform a read/write buf operation, the saved column
                  * address is used to index into the full page.
                 */
-               host->send_addr(host, 0, page_addr == -1);
+               host->devtype_data->send_addr(host, 0, page_addr == -1);
                if (mtd->writesize > 512)
                        /* another col addr cycle for 2k page */
-                       host->send_addr(host, 0, false);
+                       host->devtype_data->send_addr(host, 0, false);
        }
 
        /* Write out page address, if necessary */
        if (page_addr != -1) {
                /* paddr_0 - p_addr_7 */
-               host->send_addr(host, (page_addr & 0xff), false);
+               host->devtype_data->send_addr(host, (page_addr & 0xff), false);
 
                if (mtd->writesize > 512) {
                        if (mtd->size >= 0x10000000) {
                                /* paddr_8 - paddr_15 */
-                               host->send_addr(host, (page_addr >> 8) & 0xff, false);
-                               host->send_addr(host, (page_addr >> 16) & 0xff, true);
+                               host->devtype_data->send_addr(host,
+                                               (page_addr >> 8) & 0xff,
+                                               false);
+                               host->devtype_data->send_addr(host,
+                                               (page_addr >> 16) & 0xff,
+                                               true);
                        } else
                                /* paddr_8 - paddr_15 */
-                               host->send_addr(host, (page_addr >> 8) & 0xff, true);
+                               host->devtype_data->send_addr(host,
+                                               (page_addr >> 8) & 0xff, true);
                } else {
                        /* One more address cycle for higher density devices */
                        if (mtd->size >= 0x4000000) {
                                /* paddr_8 - paddr_15 */
-                               host->send_addr(host, (page_addr >> 8) & 0xff, false);
-                               host->send_addr(host, (page_addr >> 16) & 0xff, true);
+                               host->devtype_data->send_addr(host,
+                                               (page_addr >> 8) & 0xff,
+                                               false);
+                               host->devtype_data->send_addr(host,
+                                               (page_addr >> 16) & 0xff,
+                                               true);
                        } else
                                /* paddr_8 - paddr_15 */
-                               host->send_addr(host, (page_addr >> 8) & 0xff, true);
+                               host->devtype_data->send_addr(host,
+                                               (page_addr >> 8) & 0xff, true);
                }
        }
 }
@@ -800,7 +882,35 @@ static int get_eccsize(struct mtd_info *mtd)
                return 8;
 }
 
-static void preset_v1_v2(struct mtd_info *mtd)
+static void preset_v1(struct mtd_info *mtd)
+{
+       struct nand_chip *nand_chip = mtd->priv;
+       struct mxc_nand_host *host = nand_chip->priv;
+       uint16_t config1 = 0;
+
+       if (nand_chip->ecc.mode == NAND_ECC_HW)
+               config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
+
+       if (!host->devtype_data->irqpending_quirk)
+               config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
+
+       host->eccsize = 1;
+
+       writew(config1, NFC_V1_V2_CONFIG1);
+       /* preset operation */
+
+       /* Unlock the internal RAM Buffer */
+       writew(0x2, NFC_V1_V2_CONFIG);
+
+       /* Blocks to be unlocked */
+       writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
+       writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR);
+
+       /* Unlock Block Command for given address range */
+       writew(0x4, NFC_V1_V2_WRPROT);
+}
+
+static void preset_v2(struct mtd_info *mtd)
 {
        struct nand_chip *nand_chip = mtd->priv;
        struct mxc_nand_host *host = nand_chip->priv;
@@ -809,13 +919,12 @@ static void preset_v1_v2(struct mtd_info *mtd)
        if (nand_chip->ecc.mode == NAND_ECC_HW)
                config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
 
-       if (nfc_is_v21())
-               config1 |= NFC_V2_CONFIG1_FP_INT;
+       config1 |= NFC_V2_CONFIG1_FP_INT;
 
-       if (!cpu_is_mx21())
+       if (!host->devtype_data->irqpending_quirk)
                config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
 
-       if (nfc_is_v21() && mtd->writesize) {
+       if (mtd->writesize) {
                uint16_t pages_per_block = mtd->erasesize / mtd->writesize;
 
                host->eccsize = get_eccsize(mtd);
@@ -834,20 +943,14 @@ static void preset_v1_v2(struct mtd_info *mtd)
        writew(0x2, NFC_V1_V2_CONFIG);
 
        /* Blocks to be unlocked */
-       if (nfc_is_v21()) {
-               writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0);
-               writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1);
-               writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2);
-               writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3);
-               writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0);
-               writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1);
-               writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2);
-               writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
-       } else if (nfc_is_v1()) {
-               writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
-               writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR);
-       } else
-               BUG();
+       writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0);
+       writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1);
+       writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2);
+       writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3);
+       writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0);
+       writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1);
+       writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2);
+       writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
 
        /* Unlock Block Command for given address range */
        writew(0x4, NFC_V1_V2_WRPROT);
@@ -937,15 +1040,15 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
        /* Command pre-processing step */
        switch (command) {
        case NAND_CMD_RESET:
-               host->preset(mtd);
-               host->send_cmd(host, command, false);
+               host->devtype_data->preset(mtd);
+               host->devtype_data->send_cmd(host, command, false);
                break;
 
        case NAND_CMD_STATUS:
                host->buf_start = 0;
                host->status_request = true;
 
-               host->send_cmd(host, command, true);
+               host->devtype_data->send_cmd(host, command, true);
                mxc_do_addr_cycle(mtd, column, page_addr);
                break;
 
@@ -958,15 +1061,16 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
 
                command = NAND_CMD_READ0; /* only READ0 is valid */
 
-               host->send_cmd(host, command, false);
+               host->devtype_data->send_cmd(host, command, false);
                mxc_do_addr_cycle(mtd, column, page_addr);
 
                if (mtd->writesize > 512)
-                       host->send_cmd(host, NAND_CMD_READSTART, true);
+                       host->devtype_data->send_cmd(host,
+                                       NAND_CMD_READSTART, true);
 
-               host->send_page(mtd, NFC_OUTPUT);
+               host->devtype_data->send_page(mtd, NFC_OUTPUT);
 
-               memcpy(host->data_buf, host->main_area0, mtd->writesize);
+               memcpy_fromio(host->data_buf, host->main_area0, mtd->writesize);
                copy_spare(mtd, true);
                break;
 
@@ -977,28 +1081,28 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
 
                host->buf_start = column;
 
-               host->send_cmd(host, command, false);
+               host->devtype_data->send_cmd(host, command, false);
                mxc_do_addr_cycle(mtd, column, page_addr);
                break;
 
        case NAND_CMD_PAGEPROG:
-               memcpy(host->main_area0, host->data_buf, mtd->writesize);
+               memcpy_toio(host->main_area0, host->data_buf, mtd->writesize);
                copy_spare(mtd, false);
-               host->send_page(mtd, NFC_INPUT);
-               host->send_cmd(host, command, true);
+               host->devtype_data->send_page(mtd, NFC_INPUT);
+               host->devtype_data->send_cmd(host, command, true);
                mxc_do_addr_cycle(mtd, column, page_addr);
                break;
 
        case NAND_CMD_READID:
-               host->send_cmd(host, command, true);
+               host->devtype_data->send_cmd(host, command, true);
                mxc_do_addr_cycle(mtd, column, page_addr);
-               host->send_read_id(host);
+               host->devtype_data->send_read_id(host);
                host->buf_start = column;
                break;
 
        case NAND_CMD_ERASE1:
        case NAND_CMD_ERASE2:
-               host->send_cmd(host, command, false);
+               host->devtype_data->send_cmd(host, command, false);
                mxc_do_addr_cycle(mtd, column, page_addr);
 
                break;
@@ -1032,15 +1136,191 @@ static struct nand_bbt_descr bbt_mirror_descr = {
        .pattern = mirror_pattern,
 };
 
+/* v1 + irqpending_quirk: i.MX21 */
+static const struct mxc_nand_devtype_data imx21_nand_devtype_data = {
+       .preset = preset_v1,
+       .send_cmd = send_cmd_v1_v2,
+       .send_addr = send_addr_v1_v2,
+       .send_page = send_page_v1,
+       .send_read_id = send_read_id_v1_v2,
+       .get_dev_status = get_dev_status_v1_v2,
+       .check_int = check_int_v1_v2,
+       .irq_control = irq_control_v1_v2,
+       .get_ecc_status = get_ecc_status_v1,
+       .ecclayout_512 = &nandv1_hw_eccoob_smallpage,
+       .ecclayout_2k = &nandv1_hw_eccoob_largepage,
+       .ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */
+       .select_chip = mxc_nand_select_chip_v1_v3,
+       .correct_data = mxc_nand_correct_data_v1,
+       .irqpending_quirk = 1,
+       .needs_ip = 0,
+       .regs_offset = 0xe00,
+       .spare0_offset = 0x800,
+       .spare_len = 16,
+       .eccbytes = 3,
+       .eccsize = 1,
+};
+
+/* v1 + !irqpending_quirk: i.MX27, i.MX31 */
+static const struct mxc_nand_devtype_data imx27_nand_devtype_data = {
+       .preset = preset_v1,
+       .send_cmd = send_cmd_v1_v2,
+       .send_addr = send_addr_v1_v2,
+       .send_page = send_page_v1,
+       .send_read_id = send_read_id_v1_v2,
+       .get_dev_status = get_dev_status_v1_v2,
+       .check_int = check_int_v1_v2,
+       .irq_control = irq_control_v1_v2,
+       .get_ecc_status = get_ecc_status_v1,
+       .ecclayout_512 = &nandv1_hw_eccoob_smallpage,
+       .ecclayout_2k = &nandv1_hw_eccoob_largepage,
+       .ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */
+       .select_chip = mxc_nand_select_chip_v1_v3,
+       .correct_data = mxc_nand_correct_data_v1,
+       .irqpending_quirk = 0,
+       .needs_ip = 0,
+       .regs_offset = 0xe00,
+       .spare0_offset = 0x800,
+       .axi_offset = 0,
+       .spare_len = 16,
+       .eccbytes = 3,
+       .eccsize = 1,
+};
+
+/* v21: i.MX25, i.MX35 */
+static const struct mxc_nand_devtype_data imx25_nand_devtype_data = {
+       .preset = preset_v2,
+       .send_cmd = send_cmd_v1_v2,
+       .send_addr = send_addr_v1_v2,
+       .send_page = send_page_v2,
+       .send_read_id = send_read_id_v1_v2,
+       .get_dev_status = get_dev_status_v1_v2,
+       .check_int = check_int_v1_v2,
+       .irq_control = irq_control_v1_v2,
+       .get_ecc_status = get_ecc_status_v2,
+       .ecclayout_512 = &nandv2_hw_eccoob_smallpage,
+       .ecclayout_2k = &nandv2_hw_eccoob_largepage,
+       .ecclayout_4k = &nandv2_hw_eccoob_4k,
+       .select_chip = mxc_nand_select_chip_v2,
+       .correct_data = mxc_nand_correct_data_v2_v3,
+       .irqpending_quirk = 0,
+       .needs_ip = 0,
+       .regs_offset = 0x1e00,
+       .spare0_offset = 0x1000,
+       .axi_offset = 0,
+       .spare_len = 64,
+       .eccbytes = 9,
+       .eccsize = 0,
+};
+
+/* v3: i.MX51, i.MX53 */
+static const struct mxc_nand_devtype_data imx51_nand_devtype_data = {
+       .preset = preset_v3,
+       .send_cmd = send_cmd_v3,
+       .send_addr = send_addr_v3,
+       .send_page = send_page_v3,
+       .send_read_id = send_read_id_v3,
+       .get_dev_status = get_dev_status_v3,
+       .check_int = check_int_v3,
+       .irq_control = irq_control_v3,
+       .get_ecc_status = get_ecc_status_v3,
+       .ecclayout_512 = &nandv2_hw_eccoob_smallpage,
+       .ecclayout_2k = &nandv2_hw_eccoob_largepage,
+       .ecclayout_4k = &nandv2_hw_eccoob_smallpage, /* XXX: needs fix */
+       .select_chip = mxc_nand_select_chip_v1_v3,
+       .correct_data = mxc_nand_correct_data_v2_v3,
+       .irqpending_quirk = 0,
+       .needs_ip = 1,
+       .regs_offset = 0,
+       .spare0_offset = 0x1000,
+       .axi_offset = 0x1e00,
+       .spare_len = 64,
+       .eccbytes = 0,
+       .eccsize = 0,
+};
+
+#ifdef CONFIG_OF_MTD
+static const struct of_device_id mxcnd_dt_ids[] = {
+       {
+               .compatible = "fsl,imx21-nand",
+               .data = &imx21_nand_devtype_data,
+       }, {
+               .compatible = "fsl,imx27-nand",
+               .data = &imx27_nand_devtype_data,
+       }, {
+               .compatible = "fsl,imx25-nand",
+               .data = &imx25_nand_devtype_data,
+       }, {
+               .compatible = "fsl,imx51-nand",
+               .data = &imx51_nand_devtype_data,
+       },
+       { /* sentinel */ }
+};
+
+static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
+{
+       struct device_node *np = host->dev->of_node;
+       struct mxc_nand_platform_data *pdata = &host->pdata;
+       const struct of_device_id *of_id =
+               of_match_device(mxcnd_dt_ids, host->dev);
+       int buswidth;
+
+       if (!np)
+               return 1;
+
+       if (of_get_nand_ecc_mode(np) >= 0)
+               pdata->hw_ecc = 1;
+
+       pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
+
+       buswidth = of_get_nand_bus_width(np);
+       if (buswidth < 0)
+               return buswidth;
+
+       pdata->width = buswidth / 8;
+
+       host->devtype_data = of_id->data;
+
+       return 0;
+}
+#else
+static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
+{
+       return 1;
+}
+#endif
+
+static int __init mxcnd_probe_pdata(struct mxc_nand_host *host)
+{
+       struct mxc_nand_platform_data *pdata = host->dev->platform_data;
+
+       if (!pdata)
+               return -ENODEV;
+
+       host->pdata = *pdata;
+
+       if (nfc_is_v1()) {
+               if (cpu_is_mx21())
+                       host->devtype_data = &imx21_nand_devtype_data;
+               else
+                       host->devtype_data = &imx27_nand_devtype_data;
+       } else if (nfc_is_v21()) {
+               host->devtype_data = &imx25_nand_devtype_data;
+       } else if (nfc_is_v3_2()) {
+               host->devtype_data = &imx51_nand_devtype_data;
+       } else
+               BUG();
+
+       return 0;
+}
+
 static int __init mxcnd_probe(struct platform_device *pdev)
 {
        struct nand_chip *this;
        struct mtd_info *mtd;
-       struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
        struct mxc_nand_host *host;
        struct resource *res;
        int err = 0;
-       struct nand_ecclayout *oob_smallpage, *oob_largepage;
 
        /* Allocate memory for MTD device structure and private data */
        host = kzalloc(sizeof(struct mxc_nand_host) + NAND_MAX_PAGESIZE +
@@ -1065,7 +1345,6 @@ static int __init mxcnd_probe(struct platform_device *pdev)
        this->priv = host;
        this->dev_ready = mxc_nand_dev_ready;
        this->cmdfunc = mxc_nand_command;
-       this->select_chip = mxc_nand_select_chip;
        this->read_byte = mxc_nand_read_byte;
        this->read_word = mxc_nand_read_word;
        this->write_buf = mxc_nand_write_buf;
@@ -1078,7 +1357,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
                goto eclk;
        }
 
-       clk_enable(host->clk);
+       clk_prepare_enable(host->clk);
        host->clk_act = 1;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1095,36 +1374,26 @@ static int __init mxcnd_probe(struct platform_device *pdev)
 
        host->main_area0 = host->base;
 
-       if (nfc_is_v1() || nfc_is_v21()) {
-               host->preset = preset_v1_v2;
-               host->send_cmd = send_cmd_v1_v2;
-               host->send_addr = send_addr_v1_v2;
-               host->send_page = send_page_v1_v2;
-               host->send_read_id = send_read_id_v1_v2;
-               host->get_dev_status = get_dev_status_v1_v2;
-               host->check_int = check_int_v1_v2;
-               if (cpu_is_mx21())
-                       host->irq_control = irq_control_mx21;
-               else
-                       host->irq_control = irq_control_v1_v2;
-       }
+       err = mxcnd_probe_dt(host);
+       if (err > 0)
+               err = mxcnd_probe_pdata(host);
+       if (err < 0)
+               goto eirq;
 
-       if (nfc_is_v21()) {
-               host->regs = host->base + 0x1e00;
-               host->spare0 = host->base + 0x1000;
-               host->spare_len = 64;
-               oob_smallpage = &nandv2_hw_eccoob_smallpage;
-               oob_largepage = &nandv2_hw_eccoob_largepage;
-               this->ecc.bytes = 9;
-       } else if (nfc_is_v1()) {
-               host->regs = host->base + 0xe00;
-               host->spare0 = host->base + 0x800;
-               host->spare_len = 16;
-               oob_smallpage = &nandv1_hw_eccoob_smallpage;
-               oob_largepage = &nandv1_hw_eccoob_largepage;
-               this->ecc.bytes = 3;
-               host->eccsize = 1;
-       } else if (nfc_is_v3_2()) {
+       if (host->devtype_data->regs_offset)
+               host->regs = host->base + host->devtype_data->regs_offset;
+       host->spare0 = host->base + host->devtype_data->spare0_offset;
+       if (host->devtype_data->axi_offset)
+               host->regs_axi = host->base + host->devtype_data->axi_offset;
+
+       this->ecc.bytes = host->devtype_data->eccbytes;
+       host->eccsize = host->devtype_data->eccsize;
+
+       this->select_chip = host->devtype_data->select_chip;
+       this->ecc.size = 512;
+       this->ecc.layout = host->devtype_data->ecclayout_512;
+
+       if (host->devtype_data->needs_ip) {
                res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
                if (!res) {
                        err = -ENODEV;
@@ -1135,42 +1404,22 @@ static int __init mxcnd_probe(struct platform_device *pdev)
                        err = -ENOMEM;
                        goto eirq;
                }
-               host->regs_axi = host->base + 0x1e00;
-               host->spare0 = host->base + 0x1000;
-               host->spare_len = 64;
-               host->preset = preset_v3;
-               host->send_cmd = send_cmd_v3;
-               host->send_addr = send_addr_v3;
-               host->send_page = send_page_v3;
-               host->send_read_id = send_read_id_v3;
-               host->check_int = check_int_v3;
-               host->get_dev_status = get_dev_status_v3;
-               host->irq_control = irq_control_v3;
-               oob_smallpage = &nandv2_hw_eccoob_smallpage;
-               oob_largepage = &nandv2_hw_eccoob_largepage;
-       } else
-               BUG();
-
-       this->ecc.size = 512;
-       this->ecc.layout = oob_smallpage;
+       }
 
-       if (pdata->hw_ecc) {
+       if (host->pdata.hw_ecc) {
                this->ecc.calculate = mxc_nand_calculate_ecc;
                this->ecc.hwctl = mxc_nand_enable_hwecc;
-               if (nfc_is_v1())
-                       this->ecc.correct = mxc_nand_correct_data_v1;
-               else
-                       this->ecc.correct = mxc_nand_correct_data_v2_v3;
+               this->ecc.correct = host->devtype_data->correct_data;
                this->ecc.mode = NAND_ECC_HW;
        } else {
                this->ecc.mode = NAND_ECC_SOFT;
        }
 
-       /* NAND bus width determines access funtions used by upper layer */
-       if (pdata->width == 2)
+       /* NAND bus width determines access functions used by upper layer */
+       if (host->pdata.width == 2)
                this->options |= NAND_BUSWIDTH_16;
 
-       if (pdata->flash_bbt) {
+       if (host->pdata.flash_bbt) {
                this->bbt_td = &bbt_main_descr;
                this->bbt_md = &bbt_mirror_descr;
                /* update flash based bbt */
@@ -1182,28 +1431,25 @@ static int __init mxcnd_probe(struct platform_device *pdev)
        host->irq = platform_get_irq(pdev, 0);
 
        /*
-        * mask the interrupt. For i.MX21 explicitely call
-        * irq_control_v1_v2 to use the mask bit. We can't call
-        * disable_irq_nosync() for an interrupt we do not own yet.
+        * Use host->devtype_data->irq_control() here instead of irq_control()
+        * because we must not disable_irq_nosync without having requested the
+        * irq.
         */
-       if (cpu_is_mx21())
-               irq_control_v1_v2(host, 0);
-       else
-               host->irq_control(host, 0);
+       host->devtype_data->irq_control(host, 0);
 
        err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host);
        if (err)
                goto eirq;
 
-       host->irq_control(host, 0);
-
        /*
-        * Now that the interrupt is disabled make sure the interrupt
-        * mask bit is cleared on i.MX21. Otherwise we can't read
-        * the interrupt status bit on this machine.
+        * Now that we "own" the interrupt make sure the interrupt mask bit is
+        * cleared on i.MX21. Otherwise we can't read the interrupt status bit
+        * on this machine.
         */
-       if (cpu_is_mx21())
-               irq_control_v1_v2(host, 1);
+       if (host->devtype_data->irqpending_quirk) {
+               disable_irq_nosync(host->irq);
+               host->devtype_data->irq_control(host, 1);
+       }
 
        /* first scan to find the device and get the page size */
        if (nand_scan_ident(mtd, nfc_is_v21() ? 4 : 1, NULL)) {
@@ -1212,18 +1458,12 @@ static int __init mxcnd_probe(struct platform_device *pdev)
        }
 
        /* Call preset again, with correct writesize this time */
-       host->preset(mtd);
+       host->devtype_data->preset(mtd);
 
        if (mtd->writesize == 2048)
-               this->ecc.layout = oob_largepage;
-       if (nfc_is_v21() && mtd->writesize == 4096)
-               this->ecc.layout = &nandv2_hw_eccoob_4k;
-
-       /* second phase scan */
-       if (nand_scan_tail(mtd)) {
-               err = -ENXIO;
-               goto escan;
-       }
+               this->ecc.layout = host->devtype_data->ecclayout_2k;
+       else if (mtd->writesize == 4096)
+               this->ecc.layout = host->devtype_data->ecclayout_4k;
 
        if (this->ecc.mode == NAND_ECC_HW) {
                if (nfc_is_v1())
@@ -1232,9 +1472,19 @@ static int __init mxcnd_probe(struct platform_device *pdev)
                        this->ecc.strength = (host->eccsize == 4) ? 4 : 8;
        }
 
+       /* second phase scan */
+       if (nand_scan_tail(mtd)) {
+               err = -ENXIO;
+               goto escan;
+       }
+
        /* Register the partitions */
-       mtd_device_parse_register(mtd, part_probes, NULL, pdata->parts,
-                                 pdata->nr_parts);
+       mtd_device_parse_register(mtd, part_probes,
+                       &(struct mtd_part_parser_data){
+                               .of_node = pdev->dev.of_node,
+                       },
+                       host->pdata.parts,
+                       host->pdata.nr_parts);
 
        platform_set_drvdata(pdev, host);
 
@@ -1275,6 +1525,8 @@ static int __devexit mxcnd_remove(struct platform_device *pdev)
 static struct platform_driver mxcnd_driver = {
        .driver = {
                   .name = DRIVER_NAME,
+                  .owner = THIS_MODULE,
+                  .of_match_table = of_match_ptr(mxcnd_dt_ids),
        },
        .remove = __devexit_p(mxcnd_remove),
 };
index 47b19c0bb070e3da612b031848496f7551c2589f..d47586cf64ce4af2c802f8783869af8b96ccc0a4 100644 (file)
@@ -1066,15 +1066,17 @@ EXPORT_SYMBOL(nand_lock);
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
  * @page: page number to read
  *
  * Not for syndrome calculating ECC controllers, which use a special oob layout.
  */
 static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
-                             uint8_t *buf, int page)
+                             uint8_t *buf, int oob_required, int page)
 {
        chip->read_buf(mtd, buf, mtd->writesize);
-       chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+       if (oob_required)
+               chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
        return 0;
 }
 
@@ -1083,13 +1085,14 @@ static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
  * @page: page number to read
  *
  * We need a special oob layout and handling even when OOB isn't used.
  */
 static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
-                                       struct nand_chip *chip,
-                                       uint8_t *buf, int page)
+                                      struct nand_chip *chip, uint8_t *buf,
+                                      int oob_required, int page)
 {
        int eccsize = chip->ecc.size;
        int eccbytes = chip->ecc.bytes;
@@ -1126,10 +1129,11 @@ static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
  * @page: page number to read
  */
 static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
-                               uint8_t *buf, int page)
+                               uint8_t *buf, int oob_required, int page)
 {
        int i, eccsize = chip->ecc.size;
        int eccbytes = chip->ecc.bytes;
@@ -1138,8 +1142,9 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
        uint8_t *ecc_calc = chip->buffers->ecccalc;
        uint8_t *ecc_code = chip->buffers->ecccode;
        uint32_t *eccpos = chip->ecc.layout->eccpos;
+       unsigned int max_bitflips = 0;
 
-       chip->ecc.read_page_raw(mtd, chip, buf, page);
+       chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
 
        for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
                chip->ecc.calculate(mtd, p, &ecc_calc[i]);
@@ -1154,12 +1159,14 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
                int stat;
 
                stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
-               if (stat < 0)
+               if (stat < 0) {
                        mtd->ecc_stats.failed++;
-               else
+               } else {
                        mtd->ecc_stats.corrected += stat;
+                       max_bitflips = max_t(unsigned int, max_bitflips, stat);
+               }
        }
-       return 0;
+       return max_bitflips;
 }
 
 /**
@@ -1180,6 +1187,7 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
        int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
        int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
        int index = 0;
+       unsigned int max_bitflips = 0;
 
        /* Column address within the page aligned to ECC size (256bytes) */
        start_step = data_offs / chip->ecc.size;
@@ -1244,12 +1252,14 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
 
                stat = chip->ecc.correct(mtd, p,
                        &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
-               if (stat < 0)
+               if (stat < 0) {
                        mtd->ecc_stats.failed++;
-               else
+               } else {
                        mtd->ecc_stats.corrected += stat;
+                       max_bitflips = max_t(unsigned int, max_bitflips, stat);
+               }
        }
-       return 0;
+       return max_bitflips;
 }
 
 /**
@@ -1257,12 +1267,13 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
  * @page: page number to read
  *
  * Not for syndrome calculating ECC controllers which need a special oob layout.
  */
 static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
-                               uint8_t *buf, int page)
+                               uint8_t *buf, int oob_required, int page)
 {
        int i, eccsize = chip->ecc.size;
        int eccbytes = chip->ecc.bytes;
@@ -1271,6 +1282,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
        uint8_t *ecc_calc = chip->buffers->ecccalc;
        uint8_t *ecc_code = chip->buffers->ecccode;
        uint32_t *eccpos = chip->ecc.layout->eccpos;
+       unsigned int max_bitflips = 0;
 
        for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
                chip->ecc.hwctl(mtd, NAND_ECC_READ);
@@ -1289,12 +1301,14 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
                int stat;
 
                stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
-               if (stat < 0)
+               if (stat < 0) {
                        mtd->ecc_stats.failed++;
-               else
+               } else {
                        mtd->ecc_stats.corrected += stat;
+                       max_bitflips = max_t(unsigned int, max_bitflips, stat);
+               }
        }
-       return 0;
+       return max_bitflips;
 }
 
 /**
@@ -1302,6 +1316,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
  * @page: page number to read
  *
  * Hardware ECC for large page chips, require OOB to be read first. For this
@@ -1311,7 +1326,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
  * the data area, by overwriting the NAND manufacturer bad block markings.
  */
 static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
-       struct nand_chip *chip, uint8_t *buf, int page)
+       struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
 {
        int i, eccsize = chip->ecc.size;
        int eccbytes = chip->ecc.bytes;
@@ -1320,6 +1335,7 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
        uint8_t *ecc_code = chip->buffers->ecccode;
        uint32_t *eccpos = chip->ecc.layout->eccpos;
        uint8_t *ecc_calc = chip->buffers->ecccalc;
+       unsigned int max_bitflips = 0;
 
        /* Read the OOB area first */
        chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
@@ -1337,12 +1353,14 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
                chip->ecc.calculate(mtd, p, &ecc_calc[i]);
 
                stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
-               if (stat < 0)
+               if (stat < 0) {
                        mtd->ecc_stats.failed++;
-               else
+               } else {
                        mtd->ecc_stats.corrected += stat;
+                       max_bitflips = max_t(unsigned int, max_bitflips, stat);
+               }
        }
-       return 0;
+       return max_bitflips;
 }
 
 /**
@@ -1350,19 +1368,21 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
  * @page: page number to read
  *
  * The hw generator calculates the error syndrome automatically. Therefore we
  * need a special oob layout and handling.
  */
 static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
-                                  uint8_t *buf, int page)
+                                  uint8_t *buf, int oob_required, int page)
 {
        int i, eccsize = chip->ecc.size;
        int eccbytes = chip->ecc.bytes;
        int eccsteps = chip->ecc.steps;
        uint8_t *p = buf;
        uint8_t *oob = chip->oob_poi;
+       unsigned int max_bitflips = 0;
 
        for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
                int stat;
@@ -1379,10 +1399,12 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
                chip->read_buf(mtd, oob, eccbytes);
                stat = chip->ecc.correct(mtd, p, oob, NULL);
 
-               if (stat < 0)
+               if (stat < 0) {
                        mtd->ecc_stats.failed++;
-               else
+               } else {
                        mtd->ecc_stats.corrected += stat;
+                       max_bitflips = max_t(unsigned int, max_bitflips, stat);
+               }
 
                oob += eccbytes;
 
@@ -1397,7 +1419,7 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
        if (i)
                chip->read_buf(mtd, oob, i);
 
-       return 0;
+       return max_bitflips;
 }
 
 /**
@@ -1459,11 +1481,9 @@ static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
 static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
                            struct mtd_oob_ops *ops)
 {
-       int chipnr, page, realpage, col, bytes, aligned;
+       int chipnr, page, realpage, col, bytes, aligned, oob_required;
        struct nand_chip *chip = mtd->priv;
        struct mtd_ecc_stats stats;
-       int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
-       int sndcmd = 1;
        int ret = 0;
        uint32_t readlen = ops->len;
        uint32_t oobreadlen = ops->ooblen;
@@ -1471,6 +1491,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
                mtd->oobavail : mtd->oobsize;
 
        uint8_t *bufpoi, *oob, *buf;
+       unsigned int max_bitflips = 0;
 
        stats = mtd->ecc_stats;
 
@@ -1484,6 +1505,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
 
        buf = ops->datbuf;
        oob = ops->oobbuf;
+       oob_required = oob ? 1 : 0;
 
        while (1) {
                bytes = min(mtd->writesize - col, readlen);
@@ -1493,21 +1515,22 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
                if (realpage != chip->pagebuf || oob) {
                        bufpoi = aligned ? buf : chip->buffers->databuf;
 
-                       if (likely(sndcmd)) {
-                               chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
-                               sndcmd = 0;
-                       }
+                       chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
 
-                       /* Now read the page into the buffer */
+                       /*
+                        * Now read the page into the buffer.  Absent an error,
+                        * the read methods return max bitflips per ecc step.
+                        */
                        if (unlikely(ops->mode == MTD_OPS_RAW))
-                               ret = chip->ecc.read_page_raw(mtd, chip,
-                                                             bufpoi, page);
+                               ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
+                                                             oob_required,
+                                                             page);
                        else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob)
                                ret = chip->ecc.read_subpage(mtd, chip,
                                                        col, bytes, bufpoi);
                        else
                                ret = chip->ecc.read_page(mtd, chip, bufpoi,
-                                                         page);
+                                                         oob_required, page);
                        if (ret < 0) {
                                if (!aligned)
                                        /* Invalidate page cache */
@@ -1515,22 +1538,25 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
                                break;
                        }
 
+                       max_bitflips = max_t(unsigned int, max_bitflips, ret);
+
                        /* Transfer not aligned data */
                        if (!aligned) {
                                if (!NAND_SUBPAGE_READ(chip) && !oob &&
                                    !(mtd->ecc_stats.failed - stats.failed) &&
-                                   (ops->mode != MTD_OPS_RAW))
+                                   (ops->mode != MTD_OPS_RAW)) {
                                        chip->pagebuf = realpage;
-                               else
+                                       chip->pagebuf_bitflips = ret;
+                               } else {
                                        /* Invalidate page cache */
                                        chip->pagebuf = -1;
+                               }
                                memcpy(buf, chip->buffers->databuf + col, bytes);
                        }
 
                        buf += bytes;
 
                        if (unlikely(oob)) {
-
                                int toread = min(oobreadlen, max_oobsize);
 
                                if (toread) {
@@ -1541,13 +1567,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
                        }
 
                        if (!(chip->options & NAND_NO_READRDY)) {
-                               /*
-                                * Apply delay or wait for ready/busy pin. Do
-                                * this before the AUTOINCR check, so no
-                                * problems arise if a chip which does auto
-                                * increment is marked as NOAUTOINCR by the
-                                * board driver.
-                                */
+                               /* Apply delay or wait for ready/busy pin */
                                if (!chip->dev_ready)
                                        udelay(chip->chip_delay);
                                else
@@ -1556,6 +1576,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
                } else {
                        memcpy(buf, chip->buffers->databuf + col, bytes);
                        buf += bytes;
+                       max_bitflips = max_t(unsigned int, max_bitflips,
+                                            chip->pagebuf_bitflips);
                }
 
                readlen -= bytes;
@@ -1575,26 +1597,19 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
                        chip->select_chip(mtd, -1);
                        chip->select_chip(mtd, chipnr);
                }
-
-               /*
-                * Check, if the chip supports auto page increment or if we
-                * have hit a block boundary.
-                */
-               if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
-                       sndcmd = 1;
        }
 
        ops->retlen = ops->len - (size_t) readlen;
        if (oob)
                ops->oobretlen = ops->ooblen - oobreadlen;
 
-       if (ret)
+       if (ret < 0)
                return ret;
 
        if (mtd->ecc_stats.failed - stats.failed)
                return -EBADMSG;
 
-       return  mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+       return max_bitflips;
 }
 
 /**
@@ -1630,17 +1645,13 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @page: page number to read
- * @sndcmd: flag whether to issue read command or not
  */
 static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
-                            int page, int sndcmd)
+                            int page)
 {
-       if (sndcmd) {
-               chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
-               sndcmd = 0;
-       }
+       chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
        chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-       return sndcmd;
+       return 0;
 }
 
 /**
@@ -1649,10 +1660,9 @@ static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @page: page number to read
- * @sndcmd: flag whether to issue read command or not
  */
 static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
-                                 int page, int sndcmd)
+                                 int page)
 {
        uint8_t *buf = chip->oob_poi;
        int length = mtd->oobsize;
@@ -1679,7 +1689,7 @@ static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
        if (length > 0)
                chip->read_buf(mtd, bufpoi, length);
 
-       return 1;
+       return 0;
 }
 
 /**
@@ -1775,13 +1785,13 @@ static int nand_write_oob_syndrome(struct mtd_info *mtd,
 static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
                            struct mtd_oob_ops *ops)
 {
-       int page, realpage, chipnr, sndcmd = 1;
+       int page, realpage, chipnr;
        struct nand_chip *chip = mtd->priv;
        struct mtd_ecc_stats stats;
-       int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
        int readlen = ops->ooblen;
        int len;
        uint8_t *buf = ops->oobbuf;
+       int ret = 0;
 
        pr_debug("%s: from = 0x%08Lx, len = %i\n",
                        __func__, (unsigned long long)from, readlen);
@@ -1817,20 +1827,18 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
 
        while (1) {
                if (ops->mode == MTD_OPS_RAW)
-                       sndcmd = chip->ecc.read_oob_raw(mtd, chip, page, sndcmd);
+                       ret = chip->ecc.read_oob_raw(mtd, chip, page);
                else
-                       sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd);
+                       ret = chip->ecc.read_oob(mtd, chip, page);
+
+               if (ret < 0)
+                       break;
 
                len = min(len, readlen);
                buf = nand_transfer_oob(chip, buf, ops, len);
 
                if (!(chip->options & NAND_NO_READRDY)) {
-                       /*
-                        * Apply delay or wait for ready/busy pin. Do this
-                        * before the AUTOINCR check, so no problems arise if a
-                        * chip which does auto increment is marked as
-                        * NOAUTOINCR by the board driver.
-                        */
+                       /* Apply delay or wait for ready/busy pin */
                        if (!chip->dev_ready)
                                udelay(chip->chip_delay);
                        else
@@ -1851,16 +1859,12 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
                        chip->select_chip(mtd, -1);
                        chip->select_chip(mtd, chipnr);
                }
-
-               /*
-                * Check, if the chip supports auto page increment or if we
-                * have hit a block boundary.
-                */
-               if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
-                       sndcmd = 1;
        }
 
-       ops->oobretlen = ops->ooblen;
+       ops->oobretlen = ops->ooblen - readlen;
+
+       if (ret < 0)
+               return ret;
 
        if (mtd->ecc_stats.failed - stats.failed)
                return -EBADMSG;
@@ -1919,14 +1923,16 @@ out:
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
  *
  * Not for syndrome calculating ECC controllers, which use a special oob layout.
  */
 static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
-                               const uint8_t *buf)
+                               const uint8_t *buf, int oob_required)
 {
        chip->write_buf(mtd, buf, mtd->writesize);
-       chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+       if (oob_required)
+               chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
 }
 
 /**
@@ -1934,12 +1940,13 @@ static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
  *
  * We need a special oob layout and handling even when ECC isn't checked.
  */
 static void nand_write_page_raw_syndrome(struct mtd_info *mtd,
                                        struct nand_chip *chip,
-                                       const uint8_t *buf)
+                                       const uint8_t *buf, int oob_required)
 {
        int eccsize = chip->ecc.size;
        int eccbytes = chip->ecc.bytes;
@@ -1973,9 +1980,10 @@ static void nand_write_page_raw_syndrome(struct mtd_info *mtd,
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
  */
 static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
-                                 const uint8_t *buf)
+                                 const uint8_t *buf, int oob_required)
 {
        int i, eccsize = chip->ecc.size;
        int eccbytes = chip->ecc.bytes;
@@ -1991,7 +1999,7 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
        for (i = 0; i < chip->ecc.total; i++)
                chip->oob_poi[eccpos[i]] = ecc_calc[i];
 
-       chip->ecc.write_page_raw(mtd, chip, buf);
+       chip->ecc.write_page_raw(mtd, chip, buf, 1);
 }
 
 /**
@@ -1999,9 +2007,10 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
  */
 static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
-                                 const uint8_t *buf)
+                                 const uint8_t *buf, int oob_required)
 {
        int i, eccsize = chip->ecc.size;
        int eccbytes = chip->ecc.bytes;
@@ -2027,12 +2036,14 @@ static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
  *
  * The hw generator calculates the error syndrome automatically. Therefore we
  * need a special oob layout and handling.
  */
 static void nand_write_page_syndrome(struct mtd_info *mtd,
-                                   struct nand_chip *chip, const uint8_t *buf)
+                                   struct nand_chip *chip,
+                                   const uint8_t *buf, int oob_required)
 {
        int i, eccsize = chip->ecc.size;
        int eccbytes = chip->ecc.bytes;
@@ -2071,21 +2082,23 @@ static void nand_write_page_syndrome(struct mtd_info *mtd,
  * @mtd: MTD device structure
  * @chip: NAND chip descriptor
  * @buf: the data to write
+ * @oob_required: must write chip->oob_poi to OOB
  * @page: page number to write
  * @cached: cached programming
  * @raw: use _raw version of write_page
  */
 static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
-                          const uint8_t *buf, int page, int cached, int raw)
+                          const uint8_t *buf, int oob_required, int page,
+                          int cached, int raw)
 {
        int status;
 
        chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
 
        if (unlikely(raw))
-               chip->ecc.write_page_raw(mtd, chip, buf);
+               chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
        else
-               chip->ecc.write_page(mtd, chip, buf);
+               chip->ecc.write_page(mtd, chip, buf, oob_required);
 
        /*
         * Cached progamming disabled for now. Not sure if it's worth the
@@ -2118,6 +2131,9 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
 
        if (chip->verify_buf(mtd, buf, mtd->writesize))
                return -EIO;
+
+       /* Make sure the next page prog is preceded by a status read */
+       chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
 #endif
        return 0;
 }
@@ -2202,6 +2218,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
        uint8_t *oob = ops->oobbuf;
        uint8_t *buf = ops->datbuf;
        int ret, subpage;
+       int oob_required = oob ? 1 : 0;
 
        ops->retlen = 0;
        if (!writelen)
@@ -2264,8 +2281,8 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
                        memset(chip->oob_poi, 0xff, mtd->oobsize);
                }
 
-               ret = chip->write_page(mtd, chip, wbuf, page, cached,
-                                      (ops->mode == MTD_OPS_RAW));
+               ret = chip->write_page(mtd, chip, wbuf, oob_required, page,
+                                      cached, (ops->mode == MTD_OPS_RAW));
                if (ret)
                        break;
 
@@ -2898,8 +2915,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
                *busw = NAND_BUSWIDTH_16;
 
        chip->options &= ~NAND_CHIPOPTIONS_MSK;
-       chip->options |= (NAND_NO_READRDY |
-                       NAND_NO_AUTOINCR) & NAND_CHIPOPTIONS_MSK;
+       chip->options |= NAND_NO_READRDY & NAND_CHIPOPTIONS_MSK;
 
        pr_info("ONFI flash detected\n");
        return 1;
@@ -3076,11 +3092,6 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
                chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
 ident_done:
 
-       /*
-        * Set chip as a default. Board drivers can override it, if necessary.
-        */
-       chip->options |= NAND_NO_AUTOINCR;
-
        /* Try to identify manufacturer */
        for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
                if (nand_manuf_ids[maf_idx].id == *maf_id)
@@ -3154,10 +3165,11 @@ ident_done:
        if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
                chip->cmdfunc = nand_command_lp;
 
-       pr_info("NAND device: Manufacturer ID:"
-               " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id,
-               nand_manuf_ids[maf_idx].name,
-               chip->onfi_version ? chip->onfi_params.model : type->name);
+       pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s),"
+               " page size: %d, OOB size: %d\n",
+               *maf_id, *dev_id, nand_manuf_ids[maf_idx].name,
+               chip->onfi_version ? chip->onfi_params.model : type->name,
+               mtd->writesize, mtd->oobsize);
 
        return type;
 }
@@ -3329,8 +3341,13 @@ int nand_scan_tail(struct mtd_info *mtd)
                if (!chip->ecc.write_oob)
                        chip->ecc.write_oob = nand_write_oob_syndrome;
 
-               if (mtd->writesize >= chip->ecc.size)
+               if (mtd->writesize >= chip->ecc.size) {
+                       if (!chip->ecc.strength) {
+                               pr_warn("Driver must set ecc.strength when using hardware ECC\n");
+                               BUG();
+                       }
                        break;
+               }
                pr_warn("%d byte HW ECC not possible on "
                           "%d byte page size, fallback to SW ECC\n",
                           chip->ecc.size, mtd->writesize);
@@ -3385,7 +3402,7 @@ int nand_scan_tail(struct mtd_info *mtd)
                        BUG();
                }
                chip->ecc.strength =
-                       chip->ecc.bytes*8 / fls(8*chip->ecc.size);
+                       chip->ecc.bytes * 8 / fls(8 * chip->ecc.size);
                break;
 
        case NAND_ECC_NONE:
@@ -3483,7 +3500,7 @@ int nand_scan_tail(struct mtd_info *mtd)
 
        /* propagate ecc info to mtd_info */
        mtd->ecclayout = chip->ecc.layout;
-       mtd->ecc_strength = chip->ecc.strength * chip->ecc.steps;
+       mtd->ecc_strength = chip->ecc.strength;
 
        /* Check, if we should skip the bad block table scan */
        if (chip->options & NAND_SKIP_BBTSCAN)
index 20a112f591fe3f67347bb206e1aae480d5eeaac6..30d1319ff0657ce75b063023d9eee0c65d2f5873 100644 (file)
@@ -324,6 +324,7 @@ static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
 
                buf += mtd->oobsize + mtd->writesize;
                len -= mtd->writesize;
+               offs += mtd->writesize;
        }
        return 0;
 }
index af4fe8ca7b5ef7fbdfb25bb2553d8af4f61f8d29..621b70b7a159099ffc3060bdf2651a38dc2dae43 100644 (file)
@@ -70,7 +70,7 @@ struct nand_flash_dev nand_flash_ids[] = {
         * These are the new chips with large page size. The pagesize and the
         * erasesize is determined from the extended id bytes
         */
-#define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY | NAND_NO_AUTOINCR)
+#define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY)
 #define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
 
        /* 512 Megabit */
@@ -157,9 +157,7 @@ struct nand_flash_dev nand_flash_ids[] = {
         * writes possible, but not implemented now
         */
        {"AND 128MiB 3,3V 8-bit",       0x01, 2048, 128, 0x4000,
-        NAND_IS_AND | NAND_NO_AUTOINCR |NAND_NO_READRDY | NAND_4PAGE_ARRAY |
-        BBT_AUTO_REFRESH
-       },
+        NAND_IS_AND | NAND_NO_READRDY | NAND_4PAGE_ARRAY | BBT_AUTO_REFRESH},
 
        {NULL,}
 };
index 261f478f8cc37944e3365ce8b0219ec073bfcddc..6cc8fbfabb8e2b71d50db215222d380f9e0061a1 100644 (file)
@@ -268,7 +268,6 @@ MODULE_PARM_DESC(bch,                "Enable BCH ecc and set how many bits should "
 #define OPT_PAGE512      0x00000002 /* 512-byte  page chips */
 #define OPT_PAGE2048     0x00000008 /* 2048-byte page chips */
 #define OPT_SMARTMEDIA   0x00000010 /* SmartMedia technology chips */
-#define OPT_AUTOINCR     0x00000020 /* page number auto incrementation is possible */
 #define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
 #define OPT_PAGE4096     0x00000080 /* 4096-byte page chips */
 #define OPT_LARGEPAGE    (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
@@ -594,7 +593,7 @@ static int init_nandsim(struct mtd_info *mtd)
                ns->options |= OPT_PAGE256;
        }
        else if (ns->geom.pgsz == 512) {
-               ns->options |= (OPT_PAGE512 | OPT_AUTOINCR);
+               ns->options |= OPT_PAGE512;
                if (ns->busw == 8)
                        ns->options |= OPT_PAGE512_8BIT;
        } else if (ns->geom.pgsz == 2048) {
@@ -663,8 +662,6 @@ static int init_nandsim(struct mtd_info *mtd)
         for (i = 0; nand_flash_ids[i].name != NULL; i++) {
                 if (second_id_byte != nand_flash_ids[i].id)
                         continue;
-               if (!(nand_flash_ids[i].options & NAND_NO_AUTOINCR))
-                       ns->options |= OPT_AUTOINCR;
        }
 
        if (ns->busw == 16)
@@ -1936,20 +1933,8 @@ static u_char ns_nand_read_byte(struct mtd_info *mtd)
        if (ns->regs.count == ns->regs.num) {
                NS_DBG("read_byte: all bytes were read\n");
 
-               /*
-                * The OPT_AUTOINCR allows to read next consecutive pages without
-                * new read operation cycle.
-                */
-               if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
-                       ns->regs.count = 0;
-                       if (ns->regs.row + 1 < ns->geom.pgnum)
-                               ns->regs.row += 1;
-                       NS_DBG("read_byte: switch to the next page (%#x)\n", ns->regs.row);
-                       do_state_action(ns, ACTION_CPY);
-               }
-               else if (NS_STATE(ns->nxstate) == STATE_READY)
+               if (NS_STATE(ns->nxstate) == STATE_READY)
                        switch_state(ns);
-
        }
 
        return outb;
@@ -2203,14 +2188,7 @@ static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
        ns->regs.count += len;
 
        if (ns->regs.count == ns->regs.num) {
-               if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
-                       ns->regs.count = 0;
-                       if (ns->regs.row + 1 < ns->geom.pgnum)
-                               ns->regs.row += 1;
-                       NS_DBG("read_buf: switch to the next page (%#x)\n", ns->regs.row);
-                       do_state_action(ns, ACTION_CPY);
-               }
-               else if (NS_STATE(ns->nxstate) == STATE_READY)
+               if (NS_STATE(ns->nxstate) == STATE_READY)
                        switch_state(ns);
        }
 
index c2b0bba9d8b39607626f091d398a3fccfa8687c4..d7f681d0c9b98e54023cdce755b2febf6a983708 100644 (file)
 #include <linux/io.h>
 #include <linux/slab.h>
 
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+#include <linux/bch.h>
+#endif
+
 #include <plat/dma.h>
 #include <plat/gpmc.h>
 #include <plat/nand.h>
@@ -127,6 +131,11 @@ struct omap_nand_info {
        } iomode;
        u_char                          *buf;
        int                                     buf_len;
+
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+       struct bch_control             *bch;
+       struct nand_ecclayout           ecclayout;
+#endif
 };
 
 /**
@@ -402,7 +411,7 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
                        PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
        if (ret)
                /* PFPW engine is busy, use cpu copy method */
-               goto out_copy;
+               goto out_copy_unmap;
 
        init_completion(&info->comp);
 
@@ -421,6 +430,8 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
        dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
        return 0;
 
+out_copy_unmap:
+       dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
 out_copy:
        if (info->nand.options & NAND_BUSWIDTH_16)
                is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
@@ -879,7 +890,7 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
        struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
                                                        mtd);
        unsigned long timeo = jiffies;
-       int status = NAND_STATUS_FAIL, state = this->state;
+       int status, state = this->state;
 
        if (state == FL_ERASING)
                timeo += (HZ * 400) / 1000;
@@ -894,6 +905,8 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
                        break;
                cond_resched();
        }
+
+       status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA);
        return status;
 }
 
@@ -925,6 +938,226 @@ static int omap_dev_ready(struct mtd_info *mtd)
        return 1;
 }
 
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+
+/**
+ * omap3_enable_hwecc_bch - Program OMAP3 GPMC to perform BCH ECC correction
+ * @mtd: MTD device structure
+ * @mode: Read/Write mode
+ */
+static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
+{
+       int nerrors;
+       unsigned int dev_width;
+       struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+                                                  mtd);
+       struct nand_chip *chip = mtd->priv;
+
+       nerrors = (info->nand.ecc.bytes == 13) ? 8 : 4;
+       dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+       /*
+        * Program GPMC to perform correction on one 512-byte sector at a time.
+        * Using 4 sectors at a time (i.e. ecc.size = 2048) is also possible and
+        * gives a slight (5%) performance gain (but requires additional code).
+        */
+       (void)gpmc_enable_hwecc_bch(info->gpmc_cs, mode, dev_width, 1, nerrors);
+}
+
+/**
+ * omap3_calculate_ecc_bch4 - Generate 7 bytes of ECC bytes
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ */
+static int omap3_calculate_ecc_bch4(struct mtd_info *mtd, const u_char *dat,
+                                   u_char *ecc_code)
+{
+       struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+                                                  mtd);
+       return gpmc_calculate_ecc_bch4(info->gpmc_cs, dat, ecc_code);
+}
+
+/**
+ * omap3_calculate_ecc_bch8 - Generate 13 bytes of ECC bytes
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ */
+static int omap3_calculate_ecc_bch8(struct mtd_info *mtd, const u_char *dat,
+                                   u_char *ecc_code)
+{
+       struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+                                                  mtd);
+       return gpmc_calculate_ecc_bch8(info->gpmc_cs, dat, ecc_code);
+}
+
+/**
+ * omap3_correct_data_bch - Decode received data and correct errors
+ * @mtd: MTD device structure
+ * @data: page data
+ * @read_ecc: ecc read from nand flash
+ * @calc_ecc: ecc read from HW ECC registers
+ */
+static int omap3_correct_data_bch(struct mtd_info *mtd, u_char *data,
+                                 u_char *read_ecc, u_char *calc_ecc)
+{
+       int i, count;
+       /* cannot correct more than 8 errors */
+       unsigned int errloc[8];
+       struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+                                                  mtd);
+
+       count = decode_bch(info->bch, NULL, 512, read_ecc, calc_ecc, NULL,
+                          errloc);
+       if (count > 0) {
+               /* correct errors */
+               for (i = 0; i < count; i++) {
+                       /* correct data only, not ecc bytes */
+                       if (errloc[i] < 8*512)
+                               data[errloc[i]/8] ^= 1 << (errloc[i] & 7);
+                       pr_debug("corrected bitflip %u\n", errloc[i]);
+               }
+       } else if (count < 0) {
+               pr_err("ecc unrecoverable error\n");
+       }
+       return count;
+}
+
+/**
+ * omap3_free_bch - Release BCH ecc resources
+ * @mtd: MTD device structure
+ */
+static void omap3_free_bch(struct mtd_info *mtd)
+{
+       struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+                                                  mtd);
+       if (info->bch) {
+               free_bch(info->bch);
+               info->bch = NULL;
+       }
+}
+
+/**
+ * omap3_init_bch - Initialize BCH ECC
+ * @mtd: MTD device structure
+ * @ecc_opt: OMAP ECC mode (OMAP_ECC_BCH4_CODE_HW or OMAP_ECC_BCH8_CODE_HW)
+ */
+static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
+{
+       int ret, max_errors;
+       struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+                                                  mtd);
+#ifdef CONFIG_MTD_NAND_OMAP_BCH8
+       const int hw_errors = 8;
+#else
+       const int hw_errors = 4;
+#endif
+       info->bch = NULL;
+
+       max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ? 8 : 4;
+       if (max_errors != hw_errors) {
+               pr_err("cannot configure %d-bit BCH ecc, only %d-bit supported",
+                      max_errors, hw_errors);
+               goto fail;
+       }
+
+       /* initialize GPMC BCH engine */
+       ret = gpmc_init_hwecc_bch(info->gpmc_cs, 1, max_errors);
+       if (ret)
+               goto fail;
+
+       /* software bch library is only used to detect and locate errors */
+       info->bch = init_bch(13, max_errors, 0x201b /* hw polynomial */);
+       if (!info->bch)
+               goto fail;
+
+       info->nand.ecc.size    = 512;
+       info->nand.ecc.hwctl   = omap3_enable_hwecc_bch;
+       info->nand.ecc.correct = omap3_correct_data_bch;
+       info->nand.ecc.mode    = NAND_ECC_HW;
+
+       /*
+        * The number of corrected errors in an ecc block that will trigger
+        * block scrubbing defaults to the ecc strength (4 or 8).
+        * Set mtd->bitflip_threshold here to define a custom threshold.
+        */
+
+       if (max_errors == 8) {
+               info->nand.ecc.strength  = 8;
+               info->nand.ecc.bytes     = 13;
+               info->nand.ecc.calculate = omap3_calculate_ecc_bch8;
+       } else {
+               info->nand.ecc.strength  = 4;
+               info->nand.ecc.bytes     = 7;
+               info->nand.ecc.calculate = omap3_calculate_ecc_bch4;
+       }
+
+       pr_info("enabling NAND BCH ecc with %d-bit correction\n", max_errors);
+       return 0;
+fail:
+       omap3_free_bch(mtd);
+       return -1;
+}
+
+/**
+ * omap3_init_bch_tail - Build an oob layout for BCH ECC correction.
+ * @mtd: MTD device structure
+ */
+static int omap3_init_bch_tail(struct mtd_info *mtd)
+{
+       int i, steps;
+       struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+                                                  mtd);
+       struct nand_ecclayout *layout = &info->ecclayout;
+
+       /* build oob layout */
+       steps = mtd->writesize/info->nand.ecc.size;
+       layout->eccbytes = steps*info->nand.ecc.bytes;
+
+       /* do not bother creating special oob layouts for small page devices */
+       if (mtd->oobsize < 64) {
+               pr_err("BCH ecc is not supported on small page devices\n");
+               goto fail;
+       }
+
+       /* reserve 2 bytes for bad block marker */
+       if (layout->eccbytes+2 > mtd->oobsize) {
+               pr_err("no oob layout available for oobsize %d eccbytes %u\n",
+                      mtd->oobsize, layout->eccbytes);
+               goto fail;
+       }
+
+       /* put ecc bytes at oob tail */
+       for (i = 0; i < layout->eccbytes; i++)
+               layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i;
+
+       layout->oobfree[0].offset = 2;
+       layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
+       info->nand.ecc.layout = layout;
+
+       if (!(info->nand.options & NAND_BUSWIDTH_16))
+               info->nand.badblock_pattern = &bb_descrip_flashbased;
+       return 0;
+fail:
+       omap3_free_bch(mtd);
+       return -1;
+}
+
+#else
+static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
+{
+       pr_err("CONFIG_MTD_NAND_OMAP_BCH is not enabled\n");
+       return -1;
+}
+static int omap3_init_bch_tail(struct mtd_info *mtd)
+{
+       return -1;
+}
+static void omap3_free_bch(struct mtd_info *mtd)
+{
+}
+#endif /* CONFIG_MTD_NAND_OMAP_BCH */
+
 static int __devinit omap_nand_probe(struct platform_device *pdev)
 {
        struct omap_nand_info           *info;
@@ -1063,6 +1296,13 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
                info->nand.ecc.hwctl            = omap_enable_hwecc;
                info->nand.ecc.correct          = omap_correct_data;
                info->nand.ecc.mode             = NAND_ECC_HW;
+       } else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
+                  (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
+               err = omap3_init_bch(&info->mtd, pdata->ecc_opt);
+               if (err) {
+                       err = -EINVAL;
+                       goto out_release_mem_region;
+               }
        }
 
        /* DIP switches on some boards change between 8 and 16 bit
@@ -1094,6 +1334,14 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
                                        (offset + omap_oobinfo.eccbytes);
 
                info->nand.ecc.layout = &omap_oobinfo;
+       } else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
+                  (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
+               /* build OOB layout for BCH ECC correction */
+               err = omap3_init_bch_tail(&info->mtd);
+               if (err) {
+                       err = -EINVAL;
+                       goto out_release_mem_region;
+               }
        }
 
        /* second phase scan */
@@ -1122,6 +1370,7 @@ static int omap_nand_remove(struct platform_device *pdev)
        struct mtd_info *mtd = platform_get_drvdata(pdev);
        struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
                                                        mtd);
+       omap3_free_bch(&info->mtd);
 
        platform_set_drvdata(pdev, NULL);
        if (info->dma_ch != -1)
index 0f50ef38b87b4e814e1be5ee8baf211afd3b3f51..513dc88a05ca422ac23525dac7db3f20ef9b0bfc 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/nand.h>
 #include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/err.h>
 #include <asm/io.h>
 #include <asm/sizes.h>
 #include <mach/hardware.h>
@@ -79,6 +81,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
        struct nand_chip *nc;
        struct orion_nand_data *board;
        struct resource *res;
+       struct clk *clk;
        void __iomem *io_base;
        int ret = 0;
        u32 val = 0;
@@ -155,6 +158,14 @@ static int __init orion_nand_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, mtd);
 
+       /* Not all platforms can gate the clock, so it is not
+          an error if the clock does not exists. */
+       clk = clk_get(&pdev->dev, NULL);
+       if (!IS_ERR(clk)) {
+               clk_prepare_enable(clk);
+               clk_put(clk);
+       }
+
        if (nand_scan(mtd, 1)) {
                ret = -ENXIO;
                goto no_dev;
@@ -184,6 +195,7 @@ static int __devexit orion_nand_remove(struct platform_device *pdev)
 {
        struct mtd_info *mtd = platform_get_drvdata(pdev);
        struct nand_chip *nc = mtd->priv;
+       struct clk *clk;
 
        nand_release(mtd);
 
@@ -191,6 +203,12 @@ static int __devexit orion_nand_remove(struct platform_device *pdev)
 
        kfree(nc);
 
+       clk = clk_get(&pdev->dev, NULL);
+       if (!IS_ERR(clk)) {
+               clk_disable_unprepare(clk);
+               clk_put(clk);
+       }
+
        return 0;
 }
 
index 974dbf8251c928842fe528c15777e4cd12efdaa8..1440e51cedccc877108a3922046a4b56fbac40ca 100644 (file)
@@ -155,7 +155,6 @@ static int __devinit pasemi_nand_probe(struct platform_device *ofdev)
        chip->ecc.mode = NAND_ECC_SOFT;
 
        /* Enable the following for a flash based bad block table */
-       chip->options = NAND_NO_AUTOINCR;
        chip->bbt_options = NAND_BBT_USE_FLASH;
 
        /* Scan to find existence of the device */
index 6404e6e81b101ad12b405a1cb1d689273b30c447..1bcb520404228ba2d8fe9cec1d29d9a6c535eac7 100644 (file)
@@ -23,14 +23,18 @@ struct plat_nand_data {
        void __iomem            *io_base;
 };
 
+static const char *part_probe_types[] = { "cmdlinepart", NULL };
+
 /*
  * Probe for the NAND device.
  */
 static int __devinit plat_nand_probe(struct platform_device *pdev)
 {
        struct platform_nand_data *pdata = pdev->dev.platform_data;
+       struct mtd_part_parser_data ppdata;
        struct plat_nand_data *data;
        struct resource *res;
+       const char **part_types;
        int err = 0;
 
        if (pdata->chip.nr_chips < 1) {
@@ -75,6 +79,7 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
        data->chip.select_chip = pdata->ctrl.select_chip;
        data->chip.write_buf = pdata->ctrl.write_buf;
        data->chip.read_buf = pdata->ctrl.read_buf;
+       data->chip.read_byte = pdata->ctrl.read_byte;
        data->chip.chip_delay = pdata->chip.chip_delay;
        data->chip.options |= pdata->chip.options;
        data->chip.bbt_options |= pdata->chip.bbt_options;
@@ -98,8 +103,10 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
                goto out;
        }
 
-       err = mtd_device_parse_register(&data->mtd,
-                                       pdata->chip.part_probe_types, NULL,
+       part_types = pdata->chip.part_probe_types ? : part_probe_types;
+
+       ppdata.of_node = pdev->dev.of_node;
+       err = mtd_device_parse_register(&data->mtd, part_types, &ppdata,
                                        pdata->chip.partitions,
                                        pdata->chip.nr_partitions);
 
@@ -140,12 +147,19 @@ static int __devexit plat_nand_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id plat_nand_match[] = {
+       { .compatible = "gen_nand" },
+       {},
+};
+MODULE_DEVICE_TABLE(of, plat_nand_match);
+
 static struct platform_driver plat_nand_driver = {
-       .probe          = plat_nand_probe,
-       .remove         = __devexit_p(plat_nand_remove),
-       .driver         = {
-               .name   = "gen_nand",
-               .owner  = THIS_MODULE,
+       .probe  = plat_nand_probe,
+       .remove = __devexit_p(plat_nand_remove),
+       .driver = {
+               .name           = "gen_nand",
+               .owner          = THIS_MODULE,
+               .of_match_table = plat_nand_match,
        },
 };
 
index def50caa6f84b259fd5ed23e702efcd5977317cb..252aaefcacfa2ba0e07d1425672afcaa8c665bdf 100644 (file)
@@ -682,14 +682,15 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
 }
 
 static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
-               struct nand_chip *chip, const uint8_t *buf)
+               struct nand_chip *chip, const uint8_t *buf, int oob_required)
 {
        chip->write_buf(mtd, buf, mtd->writesize);
        chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
 }
 
 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
-               struct nand_chip *chip, uint8_t *buf, int page)
+               struct nand_chip *chip, uint8_t *buf, int oob_required,
+               int page)
 {
        struct pxa3xx_nand_host *host = mtd->priv;
        struct pxa3xx_nand_info *info = host->info_data;
@@ -1004,7 +1005,6 @@ KEEP_CONFIG:
        chip->ecc.size = host->page_size;
        chip->ecc.strength = 1;
 
-       chip->options = NAND_NO_AUTOINCR;
        chip->options |= NAND_NO_READRDY;
        if (host->reg_ndcr & NDCR_DWIDTH_M)
                chip->options |= NAND_BUSWIDTH_16;
index c2040187c813e0084e6dc7da8441c675bbe6cc3e..8cb627751c9c9658c5a3d6ac7c0a9cf1f78fb6af 100644 (file)
@@ -539,14 +539,11 @@ exit:
  * nand_read_oob_syndrome assumes we can send column address - we can't
  */
 static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
-                            int page, int sndcmd)
+                            int page)
 {
-       if (sndcmd) {
-               chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
-               sndcmd = 0;
-       }
+       chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
        chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-       return sndcmd;
+       return 0;
 }
 
 /*
@@ -1104,18 +1101,7 @@ static struct pci_driver r852_pci_driver = {
        .driver.pm      = &r852_pm_ops,
 };
 
-static __init int r852_module_init(void)
-{
-       return pci_register_driver(&r852_pci_driver);
-}
-
-static void __exit r852_module_exit(void)
-{
-       pci_unregister_driver(&r852_pci_driver);
-}
-
-module_init(r852_module_init);
-module_exit(r852_module_exit);
+module_pci_driver(r852_pci_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
index e9b2b260de3ae081bbeb2e04bbff4d61902003b3..aa9b8a5e0b8f94b66f6356b4346000feeea0e6d8 100644 (file)
@@ -344,7 +344,7 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
 }
 
 static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
-                               uint8_t *buf, int page)
+                               uint8_t *buf, int oob_required, int page)
 {
        int i, eccsize = chip->ecc.size;
        int eccbytes = chip->ecc.bytes;
@@ -359,14 +359,14 @@ static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
                if (flctl->hwecc_cant_correct[i])
                        mtd->ecc_stats.failed++;
                else
-                       mtd->ecc_stats.corrected += 0;
+                       mtd->ecc_stats.corrected += 0; /* FIXME */
        }
 
        return 0;
 }
 
 static void flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
-                                  const uint8_t *buf)
+                                  const uint8_t *buf, int oob_required)
 {
        int i, eccsize = chip->ecc.size;
        int eccbytes = chip->ecc.bytes;
@@ -881,8 +881,6 @@ static int __devinit flctl_probe(struct platform_device *pdev)
        flctl->hwecc = pdata->has_hwecc;
        flctl->holden = pdata->use_holden;
 
-       nand->options = NAND_NO_AUTOINCR;
-
        /* Set address of hardware control function */
        /* 20 us command delay time */
        nand->chip_delay = 20;
index 774c3c26671379a30ef8c3f4e0aec1c57ee3efec..082bcdcd6bcfa3f460e26b68edc183b56aba5947 100644 (file)
@@ -94,17 +94,16 @@ static struct nand_flash_dev nand_smartmedia_flash_ids[] = {
        {NULL,}
 };
 
-#define XD_TYPEM       (NAND_NO_AUTOINCR | NAND_BROKEN_XD)
 static struct nand_flash_dev nand_xd_flash_ids[] = {
 
        {"xD 16MiB 3,3V",    0x73, 512, 16, 0x4000, 0},
        {"xD 32MiB 3,3V",    0x75, 512, 32, 0x4000, 0},
        {"xD 64MiB 3,3V",    0x76, 512, 64, 0x4000, 0},
        {"xD 128MiB 3,3V",   0x79, 512, 128, 0x4000, 0},
-       {"xD 256MiB 3,3V",   0x71, 512, 256, 0x4000, XD_TYPEM},
-       {"xD 512MiB 3,3V",   0xdc, 512, 512, 0x4000, XD_TYPEM},
-       {"xD 1GiB 3,3V",     0xd3, 512, 1024, 0x4000, XD_TYPEM},
-       {"xD 2GiB 3,3V",     0xd5, 512, 2048, 0x4000, XD_TYPEM},
+       {"xD 256MiB 3,3V",   0x71, 512, 256, 0x4000, NAND_BROKEN_XD},
+       {"xD 512MiB 3,3V",   0xdc, 512, 512, 0x4000, NAND_BROKEN_XD},
+       {"xD 1GiB 3,3V",     0xd3, 512, 1024, 0x4000, NAND_BROKEN_XD},
+       {"xD 2GiB 3,3V",     0xd5, 512, 2048, 0x4000, NAND_BROKEN_XD},
        {NULL,}
 };
 
index b3ce12ef359e83777280aff2b6f9fa9d58cc4dc9..7153e0d27101e3eed06ac17addcdbd4734c7ccc3 100644 (file)
@@ -1201,7 +1201,8 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
        if (mtd->ecc_stats.failed - stats.failed)
                return -EBADMSG;
 
-       return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+       /* return max bitflips per ecc step; ONENANDs correct 1 bit only */
+       return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0;
 }
 
 /**
@@ -1333,7 +1334,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
        if (mtd->ecc_stats.failed - stats.failed)
                return -EBADMSG;
 
-       return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+       /* return max bitflips per ecc step; ONENANDs correct 1 bit only */
+       return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0;
 }
 
 /**
index ec03b401620a6f3fc649940d1379b2b6e29deed8..9c755db6b16d12ccbbb43c5172d70c612c61c48e 100644 (file)
@@ -1131,7 +1131,6 @@ static irqreturn_t
 e100rxtx_interrupt(int irq, void *dev_id)
 {
        struct net_device *dev = (struct net_device *)dev_id;
-       struct net_local *np = netdev_priv(dev);
        unsigned long irqbits;
 
        /*
index 8f2cf8c09e2d7026ef4dc65c316a269cac1f78a3..ff7f4c5115a1d098106a56ea814512c62fedd9e0 100644 (file)
@@ -207,7 +207,8 @@ struct fec_enet_private {
 
        struct net_device *netdev;
 
-       struct clk *clk;
+       struct clk *clk_ipg;
+       struct clk *clk_ahb;
 
        /* The saved address of a sent-in-place packet/buffer, for skfree(). */
        unsigned char *tx_bounce[TX_RING_SIZE];
@@ -1065,7 +1066,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
         * Reference Manual has an error on this, and gets fixed on i.MX6Q
         * document.
         */
-       fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk), 5000000);
+       fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ahb), 5000000);
        if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
                fep->phy_speed--;
        fep->phy_speed <<= 1;
@@ -1618,12 +1619,20 @@ fec_probe(struct platform_device *pdev)
                goto failed_pin;
        }
 
-       fep->clk = clk_get(&pdev->dev, NULL);
-       if (IS_ERR(fep->clk)) {
-               ret = PTR_ERR(fep->clk);
+       fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+       if (IS_ERR(fep->clk_ipg)) {
+               ret = PTR_ERR(fep->clk_ipg);
                goto failed_clk;
        }
-       clk_prepare_enable(fep->clk);
+
+       fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+       if (IS_ERR(fep->clk_ahb)) {
+               ret = PTR_ERR(fep->clk_ahb);
+               goto failed_clk;
+       }
+
+       clk_prepare_enable(fep->clk_ahb);
+       clk_prepare_enable(fep->clk_ipg);
 
        ret = fec_enet_init(ndev);
        if (ret)
@@ -1646,8 +1655,8 @@ failed_register:
        fec_enet_mii_remove(fep);
 failed_mii_init:
 failed_init:
-       clk_disable_unprepare(fep->clk);
-       clk_put(fep->clk);
+       clk_disable_unprepare(fep->clk_ahb);
+       clk_disable_unprepare(fep->clk_ipg);
 failed_pin:
 failed_clk:
        for (i = 0; i < FEC_IRQ_NUM; i++) {
@@ -1680,8 +1689,8 @@ fec_drv_remove(struct platform_device *pdev)
                if (irq > 0)
                        free_irq(irq, ndev);
        }
-       clk_disable_unprepare(fep->clk);
-       clk_put(fep->clk);
+       clk_disable_unprepare(fep->clk_ahb);
+       clk_disable_unprepare(fep->clk_ipg);
        iounmap(fep->hwp);
        free_netdev(ndev);
 
@@ -1705,7 +1714,8 @@ fec_suspend(struct device *dev)
                fec_stop(ndev);
                netif_device_detach(ndev);
        }
-       clk_disable_unprepare(fep->clk);
+       clk_disable_unprepare(fep->clk_ahb);
+       clk_disable_unprepare(fep->clk_ipg);
 
        return 0;
 }
@@ -1716,7 +1726,8 @@ fec_resume(struct device *dev)
        struct net_device *ndev = dev_get_drvdata(dev);
        struct fec_enet_private *fep = netdev_priv(ndev);
 
-       clk_prepare_enable(fep->clk);
+       clk_prepare_enable(fep->clk_ahb);
+       clk_prepare_enable(fep->clk_ipg);
        if (netif_running(ndev)) {
                fec_restart(ndev, fep->full_duplex);
                netif_device_attach(ndev);
index 97f947b3d94af9c6b50811ca1b527b34307735c5..2933d08b036edc437b0e026ed55404fa34c8b4fb 100644 (file)
@@ -437,7 +437,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
                length = status & BCOM_FEC_RX_BD_LEN_MASK;
                skb_put(rskb, length - 4);      /* length without CRC32 */
                rskb->protocol = eth_type_trans(rskb, dev);
-               if (!skb_defer_rx_timestamp(skb))
+               if (!skb_defer_rx_timestamp(rskb))
                        netif_rx(rskb);
 
                spin_lock(&priv->lock);
index 95731c8410447f2db3fe8ad1d643c625b7a59b56..7483ca0a6282f8e2111c44ca67a0518c8edba661 100644 (file)
@@ -4080,7 +4080,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
                                spin_lock_irqsave(&adapter->stats_lock,
                                                  irq_flags);
                                e1000_tbi_adjust_stats(hw, &adapter->stats,
-                                                      length, skb->data);
+                                                      length, mapped);
                                spin_unlock_irqrestore(&adapter->stats_lock,
                                                       irq_flags);
                                length--;
index bbf70ba367da0c750997ff35f7db517a95eb74a2..238ab2f8a5e7ceff37869043c1ac83d3e41d3b26 100644 (file)
 #define I217_EEE_100_SUPPORTED  (1 << 1)       /* 100BaseTx EEE supported */
 
 /* Intel Rapid Start Technology Support */
-#define I217_PROXY_CTRL                 PHY_REG(BM_WUC_PAGE, 70)
+#define I217_PROXY_CTRL                 BM_PHY_REG(BM_WUC_PAGE, 70)
 #define I217_PROXY_CTRL_AUTO_DISABLE    0x0080
 #define I217_SxCTRL                     PHY_REG(BM_PORT_CTRL_PAGE, 28)
-#define I217_SxCTRL_MASK                0x1000
+#define I217_SxCTRL_ENABLE_LPI_RESET    0x1000
 #define I217_CGFREG                     PHY_REG(772, 29)
-#define I217_CGFREG_MASK                0x0002
+#define I217_CGFREG_ENABLE_MTA_RESET    0x0002
 #define I217_MEMPWR                     PHY_REG(772, 26)
-#define I217_MEMPWR_MASK                0x0010
+#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
 
 /* Strapping Option Register - RO */
 #define E1000_STRAP                     0x0000C
@@ -4089,12 +4089,12 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
                         * power good.
                         */
                        e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
-                       phy_reg |= I217_SxCTRL_MASK;
+                       phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
                        e1e_wphy_locked(hw, I217_SxCTRL, phy_reg);
 
                        /* Disable the SMB release on LCD reset. */
                        e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
-                       phy_reg &= ~I217_MEMPWR;
+                       phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
                        e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
                }
 
@@ -4103,7 +4103,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
                 * Support
                 */
                e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
-               phy_reg |= I217_CGFREG_MASK;
+               phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
                e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
 
 release:
@@ -4176,7 +4176,7 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
                        ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
                        if (ret_val)
                                goto release;
-                       phy_reg |= I217_MEMPWR_MASK;
+                       phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
                        e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
 
                        /* Disable Proxy */
@@ -4186,7 +4186,7 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
                ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
                if (ret_val)
                        goto release;
-               phy_reg &= ~I217_CGFREG_MASK;
+               phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
                e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
 release:
                if (ret_val)
index c8950da60e6ba981a401365e431009bfc338834c..04d901d0ff635f284185175bdc3698bc7940038b 100644 (file)
@@ -57,6 +57,7 @@
 #include <linux/types.h>
 #include <linux/inet_lro.h>
 #include <linux/slab.h>
+#include <linux/clk.h>
 
 static char mv643xx_eth_driver_name[] = "mv643xx_eth";
 static char mv643xx_eth_driver_version[] = "1.4";
@@ -289,10 +290,10 @@ struct mv643xx_eth_shared_private {
        /*
         * Hardware-specific parameters.
         */
-       unsigned int t_clk;
        int extended_rx_coal_limit;
        int tx_bw_control;
        int tx_csum_limit;
+
 };
 
 #define TX_BW_CONTROL_ABSENT           0
@@ -431,6 +432,12 @@ struct mv643xx_eth_private {
        int tx_desc_sram_size;
        int txq_count;
        struct tx_queue txq[8];
+
+       /*
+        * Hardware-specific parameters.
+        */
+       struct clk *clk;
+       unsigned int t_clk;
 };
 
 
@@ -1010,7 +1017,7 @@ static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
        int mtu;
        int bucket_size;
 
-       token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
+       token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
        if (token_rate > 1023)
                token_rate = 1023;
 
@@ -1042,7 +1049,7 @@ static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
        int token_rate;
        int bucket_size;
 
-       token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
+       token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
        if (token_rate > 1023)
                token_rate = 1023;
 
@@ -1309,7 +1316,7 @@ static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
                temp = (val & 0x003fff00) >> 8;
 
        temp *= 64000000;
-       do_div(temp, mp->shared->t_clk);
+       do_div(temp, mp->t_clk);
 
        return (unsigned int)temp;
 }
@@ -1319,7 +1326,7 @@ static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
        u64 temp;
        u32 val;
 
-       temp = (u64)usec * mp->shared->t_clk;
+       temp = (u64)usec * mp->t_clk;
        temp += 31999999;
        do_div(temp, 64000000);
 
@@ -1345,7 +1352,7 @@ static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
 
        temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
        temp *= 64000000;
-       do_div(temp, mp->shared->t_clk);
+       do_div(temp, mp->t_clk);
 
        return (unsigned int)temp;
 }
@@ -1354,7 +1361,7 @@ static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
 {
        u64 temp;
 
-       temp = (u64)usec * mp->shared->t_clk;
+       temp = (u64)usec * mp->t_clk;
        temp += 31999999;
        do_div(temp, 64000000);
 
@@ -2663,10 +2670,6 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
        if (dram)
                mv643xx_eth_conf_mbus_windows(msp, dram);
 
-       /*
-        * Detect hardware parameters.
-        */
-       msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
        msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
                                        pd->tx_csum_limit : 9 * 1024;
        infer_hw_params(msp);
@@ -2891,6 +2894,18 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
 
        mp->dev = dev;
 
+       /*
+        * Get the clk rate, if there is one, otherwise use the default.
+        */
+       mp->clk = clk_get(&pdev->dev, (pdev->id ? "1" : "0"));
+       if (!IS_ERR(mp->clk)) {
+               clk_prepare_enable(mp->clk);
+               mp->t_clk = clk_get_rate(mp->clk);
+       } else {
+               mp->t_clk = 133000000;
+               printk(KERN_WARNING "Unable to get clock");
+       }
+
        set_params(mp, pd);
        netif_set_real_num_tx_queues(dev, mp->txq_count);
        netif_set_real_num_rx_queues(dev, mp->rxq_count);
@@ -2979,6 +2994,11 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
        if (mp->phy != NULL)
                phy_detach(mp->phy);
        cancel_work_sync(&mp->tx_timeout_task);
+
+       if (!IS_ERR(mp->clk)) {
+               clk_disable_unprepare(mp->clk);
+               clk_put(mp->clk);
+       }
        free_netdev(mp->dev);
 
        platform_set_drvdata(pdev, NULL);
index 1bcead1fa2f65f333ed16638148485a4c3891d54..842c8ce9494e0134ea02f7e962298737c11b19d2 100644 (file)
@@ -617,7 +617,7 @@ static struct mlx4_cmd_info cmd_info[] = {
                .out_is_imm = false,
                .encode_slave_id = false,
                .verify = NULL,
-               .wrapper = NULL
+               .wrapper = mlx4_QUERY_FW_wrapper
        },
        {
                .opcode = MLX4_CMD_QUERY_HCA,
@@ -635,7 +635,7 @@ static struct mlx4_cmd_info cmd_info[] = {
                .out_is_imm = false,
                .encode_slave_id = false,
                .verify = NULL,
-               .wrapper = NULL
+               .wrapper = mlx4_QUERY_DEV_CAP_wrapper
        },
        {
                .opcode = MLX4_CMD_QUERY_FUNC_CAP,
index 988b2424e1c6085f0486569e79ca121b19c3fa44..69ba57270481c1dc9e5b5a0ea828c209ad38ffac 100644 (file)
@@ -136,13 +136,12 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
        struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
        struct mlx4_en_priv *priv;
 
-       if (!mdev->pndev[port])
-               return;
-
-       priv = netdev_priv(mdev->pndev[port]);
        switch (event) {
        case MLX4_DEV_EVENT_PORT_UP:
        case MLX4_DEV_EVENT_PORT_DOWN:
+               if (!mdev->pndev[port])
+                       return;
+               priv = netdev_priv(mdev->pndev[port]);
                /* To prevent races, we poll the link state in a separate
                  task rather than changing it here */
                priv->link_state = event;
@@ -154,7 +153,10 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
                break;
 
        default:
-               mlx4_warn(mdev, "Unhandled event: %d\n", event);
+               if (port < 1 || port > dev->caps.num_ports ||
+                   !mdev->pndev[port])
+                       return;
+               mlx4_warn(mdev, "Unhandled event %d for port %d\n", event, port);
        }
 }
 
index 3b6f8efbf141278e8ad0958df64d7a852cf933fa..bce98d9c0039b87e74f8d7b7fbd9db3b0281d0bb 100644 (file)
@@ -426,7 +426,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
 
                        mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
 
-                       if (flr_slave > dev->num_slaves) {
+                       if (flr_slave >= dev->num_slaves) {
                                mlx4_warn(dev,
                                          "Got FLR for unknown function: %d\n",
                                          flr_slave);
index 68f5cd6cb3c7c4291edd5f0972d586389ccd1fa1..9c83bb8151ea5be38395939f57cfa677b729717a 100644 (file)
@@ -412,7 +412,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        outbox = mailbox->buf;
 
        err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
-                          MLX4_CMD_TIME_CLASS_A, !mlx4_is_slave(dev));
+                          MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
        if (err)
                goto out;
 
@@ -590,8 +590,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 
                for (i = 1; i <= dev_cap->num_ports; ++i) {
                        err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
-                                          MLX4_CMD_TIME_CLASS_B,
-                                          !mlx4_is_slave(dev));
+                                          MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
                        if (err)
                                goto out;
 
@@ -669,6 +668,28 @@ out:
        return err;
 }
 
+int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
+                              struct mlx4_vhcr *vhcr,
+                              struct mlx4_cmd_mailbox *inbox,
+                              struct mlx4_cmd_mailbox *outbox,
+                              struct mlx4_cmd_info *cmd)
+{
+       int     err = 0;
+       u8      field;
+
+       err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
+                          MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+       if (err)
+               return err;
+
+       /* For guests, report Blueflame disabled */
+       MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
+       field &= 0x7f;
+       MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
+
+       return 0;
+}
+
 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
                            struct mlx4_vhcr *vhcr,
                            struct mlx4_cmd_mailbox *inbox,
@@ -860,6 +881,9 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
                ((fw_ver & 0xffff0000ull) >> 16) |
                ((fw_ver & 0x0000ffffull) << 16);
 
+       if (mlx4_is_slave(dev))
+               goto out;
+
        MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
        dev->caps.function = lg;
 
@@ -927,6 +951,27 @@ out:
        return err;
 }
 
+int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd)
+{
+       u8 *outbuf;
+       int err;
+
+       outbuf = outbox->buf;
+       err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
+                           MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+       if (err)
+               return err;
+
+       /* for slaves, zero out everything except FW version */
+       outbuf[0] = outbuf[1] = 0;
+       memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8);
+       return 0;
+}
+
 static void get_board_id(void *vsd, char *board_id)
 {
        int i;
index 2e024a68fa814573d858ac382147f1f4d923b835..ee6f4fe00837ea2ecd2324f076a7bfa60df2d5b7 100644 (file)
@@ -142,12 +142,6 @@ struct mlx4_port_config {
        struct pci_dev *pdev;
 };
 
-static inline int mlx4_master_get_num_eqs(struct mlx4_dev *dev)
-{
-       return dev->caps.reserved_eqs +
-               MLX4_MFUNC_EQ_NUM * (dev->num_slaves + 1);
-}
-
 int mlx4_check_port_params(struct mlx4_dev *dev,
                           enum mlx4_port_type *port_type)
 {
@@ -217,6 +211,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        }
 
        dev->caps.num_ports          = dev_cap->num_ports;
+       dev->phys_caps.num_phys_eqs  = MLX4_MAX_EQ_NUM;
        for (i = 1; i <= dev->caps.num_ports; ++i) {
                dev->caps.vl_cap[i]         = dev_cap->max_vl[i];
                dev->caps.ib_mtu_cap[i]     = dev_cap->ib_mtu[i];
@@ -435,12 +430,17 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
        mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
 
        memset(&dev_cap, 0, sizeof(dev_cap));
+       dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
        err = mlx4_dev_cap(dev, &dev_cap);
        if (err) {
                mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
                return err;
        }
 
+       err = mlx4_QUERY_FW(dev);
+       if (err)
+               mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n");
+
        page_size = ~dev->caps.page_size_cap + 1;
        mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
        if (page_size > PAGE_SIZE) {
@@ -485,15 +485,15 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
        dev->caps.num_mgms              = 0;
        dev->caps.num_amgms             = 0;
 
-       for (i = 1; i <= dev->caps.num_ports; ++i)
-               dev->caps.port_mask[i] = dev->caps.port_type[i];
-
        if (dev->caps.num_ports > MLX4_MAX_PORTS) {
                mlx4_err(dev, "HCA has %d ports, but we only support %d, "
                         "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
                return -ENODEV;
        }
 
+       for (i = 1; i <= dev->caps.num_ports; ++i)
+               dev->caps.port_mask[i] = dev->caps.port_type[i];
+
        if (dev->caps.uar_page_size * (dev->caps.num_uars -
                                       dev->caps.reserved_uars) >
                                       pci_resource_len(dev->pdev, 2)) {
@@ -504,18 +504,6 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
                return -ENODEV;
        }
 
-#if 0
-       mlx4_warn(dev, "sqp_demux:%d\n", dev->caps.sqp_demux);
-       mlx4_warn(dev, "num_uars:%d reserved_uars:%d uar region:0x%x bar2:0x%llx\n",
-                 dev->caps.num_uars, dev->caps.reserved_uars,
-                 dev->caps.uar_page_size * dev->caps.num_uars,
-                 pci_resource_len(dev->pdev, 2));
-       mlx4_warn(dev, "num_eqs:%d reserved_eqs:%d\n", dev->caps.num_eqs,
-                 dev->caps.reserved_eqs);
-       mlx4_warn(dev, "num_pds:%d reserved_pds:%d slave_pd_shift:%d pd_base:%d\n",
-                 dev->caps.num_pds, dev->caps.reserved_pds,
-                 dev->caps.slave_pd_shift, dev->caps.pd_base);
-#endif
        return 0;
 }
 
@@ -810,9 +798,8 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
        if (err)
                goto err_srq;
 
-       num_eqs = (mlx4_is_master(dev)) ?
-               roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
-               dev->caps.num_eqs;
+       num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
+                 dev->caps.num_eqs;
        err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
                                  cmpt_base +
                                  ((u64) (MLX4_CMPT_TYPE_EQ *
@@ -874,9 +861,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
        }
 
 
-       num_eqs = (mlx4_is_master(dev)) ?
-               roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
-               dev->caps.num_eqs;
+       num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
+                  dev->caps.num_eqs;
        err = mlx4_init_icm_table(dev, &priv->eq_table.table,
                                  init_hca->eqc_base, dev_cap->eqc_entry_sz,
                                  num_eqs, num_eqs, 0, 0);
index 86b6e5a2fabf93c7494e013dc383f5c32c650bf1..e5d20220762cf6437078062676607d0d0423779c 100644 (file)
@@ -1039,6 +1039,11 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev);
 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
                                enum mlx4_res_tracker_free_type type);
 
+int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
 int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
                          struct mlx4_vhcr *vhcr,
                          struct mlx4_cmd_mailbox *inbox,
@@ -1054,6 +1059,11 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
                            struct mlx4_cmd_mailbox *inbox,
                            struct mlx4_cmd_mailbox *outbox,
                            struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
+                              struct mlx4_vhcr *vhcr,
+                              struct mlx4_cmd_mailbox *inbox,
+                              struct mlx4_cmd_mailbox *outbox,
+                              struct mlx4_cmd_info *cmd);
 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
                            struct mlx4_vhcr *vhcr,
                            struct mlx4_cmd_mailbox *inbox,
index 06e5adeb76f71840d7a34f2221c607dfcefcb723..b83bc928d52a9fa1bfc2f78957b5699979af6db5 100644 (file)
@@ -126,7 +126,9 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
        profile[MLX4_RES_AUXC].num    = request->num_qp;
        profile[MLX4_RES_SRQ].num     = request->num_srq;
        profile[MLX4_RES_CQ].num      = request->num_cq;
-       profile[MLX4_RES_EQ].num      = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
+       profile[MLX4_RES_EQ].num      = mlx4_is_mfunc(dev) ?
+                                       dev->phys_caps.num_phys_eqs :
+                                       min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
        profile[MLX4_RES_DMPT].num    = request->num_mpt;
        profile[MLX4_RES_CMPT].num    = MLX4_NUM_CMPTS;
        profile[MLX4_RES_MTT].num     = request->num_mtt * (1 << log_mtts_per_seg);
@@ -215,9 +217,10 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
                        init_hca->log_num_cqs = profile[i].log_num;
                        break;
                case MLX4_RES_EQ:
-                       dev->caps.num_eqs     = profile[i].num;
+                       dev->caps.num_eqs     = roundup_pow_of_two(min_t(unsigned, dev_cap->max_eqs,
+                                                                        MAX_MSIX));
                        init_hca->eqc_base    = profile[i].start;
-                       init_hca->log_num_eqs = profile[i].log_num;
+                       init_hca->log_num_eqs = ilog2(dev->caps.num_eqs);
                        break;
                case MLX4_RES_DMPT:
                        dev->caps.num_mpts      = profile[i].num;
index 4de73643fec676396835c3c7582c287d8648b322..d1827e887f4e9d82ce8d46ee33cfb61052ee2816 100644 (file)
@@ -1096,20 +1096,20 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
        if (err) {
                dev_err(&pdev->dev, "32-bit PCI DMA addresses"
                                "not supported by the card\n");
-               goto err_out;
+               goto err_out_disable_dev;
        }
        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
        if (err) {
                dev_err(&pdev->dev, "32-bit PCI DMA addresses"
                                "not supported by the card\n");
-               goto err_out;
+               goto err_out_disable_dev;
        }
 
        /* IO Size check */
        if (pci_resource_len(pdev, bar) < io_size) {
                dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
                err = -EIO;
-               goto err_out;
+               goto err_out_disable_dev;
        }
 
        pci_set_master(pdev);
@@ -1117,7 +1117,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
        dev = alloc_etherdev(sizeof(struct r6040_private));
        if (!dev) {
                err = -ENOMEM;
-               goto err_out;
+               goto err_out_disable_dev;
        }
        SET_NETDEV_DEV(dev, &pdev->dev);
        lp = netdev_priv(dev);
@@ -1233,11 +1233,15 @@ err_out_mdio_irq:
 err_out_mdio:
        mdiobus_free(lp->mii_bus);
 err_out_unmap:
+       netif_napi_del(&lp->napi);
+       pci_set_drvdata(pdev, NULL);
        pci_iounmap(pdev, ioaddr);
 err_out_free_res:
        pci_release_regions(pdev);
 err_out_free_dev:
        free_netdev(dev);
+err_out_disable_dev:
+       pci_disable_device(pdev);
 err_out:
        return err;
 }
@@ -1251,6 +1255,9 @@ static void __devexit r6040_remove_one(struct pci_dev *pdev)
        mdiobus_unregister(lp->mii_bus);
        kfree(lp->mii_bus->irq);
        mdiobus_free(lp->mii_bus);
+       netif_napi_del(&lp->napi);
+       pci_set_drvdata(pdev, NULL);
+       pci_iounmap(pdev, lp->base);
        pci_release_regions(pdev);
        free_netdev(dev);
        pci_disable_device(pdev);
index 5eef290997f91414c7c73fc143c45518e3f7c8cd..995d0cfc4c065658ad47279a3705beb27b8c43f0 100644 (file)
@@ -979,6 +979,17 @@ static void cp_init_hw (struct cp_private *cp)
        cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
        cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
 
+       cpw32_f(HiTxRingAddr, 0);
+       cpw32_f(HiTxRingAddr + 4, 0);
+
+       ring_dma = cp->ring_dma;
+       cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
+       cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
+
+       ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
+       cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
+       cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
+
        cp_start_hw(cp);
        cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
 
@@ -992,17 +1003,6 @@ static void cp_init_hw (struct cp_private *cp)
 
        cpw8(Config5, cpr8(Config5) & PMEStatus);
 
-       cpw32_f(HiTxRingAddr, 0);
-       cpw32_f(HiTxRingAddr + 4, 0);
-
-       ring_dma = cp->ring_dma;
-       cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
-       cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
-
-       ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
-       cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
-       cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
-
        cpw16(MultiIntr, 0);
 
        cpw8_f(Cfg9346, Cfg9346_Lock);
@@ -1636,7 +1636,7 @@ static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
 
 static void eeprom_cmd_end(void __iomem *ee_addr)
 {
-       writeb (~EE_CS, ee_addr);
+       writeb(0, ee_addr);
        eeprom_delay ();
 }
 
index 03df076ed596086abbfd1dde5383badc4614ab8e..1d83565cc6af6dca0e61360c1c60394e1fb5682d 100644 (file)
@@ -1173,7 +1173,7 @@ static int __devinit read_eeprom (void __iomem *ioaddr, int location, int addr_l
        }
 
        /* Terminate the EEPROM access. */
-       RTL_W8 (Cfg9346, ~EE_CS);
+       RTL_W8(Cfg9346, 0);
        eeprom_delay ();
 
        return retval;
index 00b4f56a671cac02790a2519b3f978b4c891457d..9757ce3543a08746150e73cc933bf25537f71b12 100644 (file)
@@ -6345,6 +6345,8 @@ static void __devexit rtl_remove_one(struct pci_dev *pdev)
 
        cancel_work_sync(&tp->wk.work);
 
+       netif_napi_del(&tp->napi);
+
        unregister_netdev(dev);
 
        rtl_release_firmware(tp);
@@ -6668,6 +6670,7 @@ out:
        return rc;
 
 err_out_msi_4:
+       netif_napi_del(&tp->napi);
        rtl_disable_msi(pdev, tp);
        iounmap(ioaddr);
 err_out_free_res_3:
index be3c22179161504f39eb2527b85cfa44eb2da685..667169b825263d96402b44ba22784a1696f80c82 100644 (file)
@@ -1101,8 +1101,12 @@ static int sh_eth_rx(struct net_device *ndev)
 
        /* Restart Rx engine if stopped. */
        /* If we don't need to check status, don't. -KDU */
-       if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R))
+       if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
+               /* fix the values for the next receiving */
+               mdp->cur_rx = mdp->dirty_rx = (sh_eth_read(ndev, RDFAR) -
+                                              sh_eth_read(ndev, RDLAR)) >> 4;
                sh_eth_write(ndev, EDRRR_R, EDRRR);
+       }
 
        return 0;
 }
@@ -1199,8 +1203,6 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
                /* Receive Descriptor Empty int */
                ndev->stats.rx_over_errors++;
 
-               if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R)
-                       sh_eth_write(ndev, EDRRR_R, EDRRR);
                if (netif_msg_rx_err(mdp))
                        dev_err(&ndev->dev, "Receive Descriptor Empty\n");
        }
index dab9c6f671ec69a4ced1f1ea72c83b171f0ad8b0..1466e5d2af44a438e2cf04205edf908b997c6966 100644 (file)
@@ -2390,11 +2390,11 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
 
        retval = smsc911x_request_resources(pdev);
        if (retval)
-               goto out_return_resources;
+               goto out_request_resources_fail;
 
        retval = smsc911x_enable_resources(pdev);
        if (retval)
-               goto out_disable_resources;
+               goto out_enable_resources_fail;
 
        if (pdata->ioaddr == NULL) {
                SMSC_WARN(pdata, probe, "Error smsc911x base address invalid");
@@ -2501,8 +2501,9 @@ out_free_irq:
        free_irq(dev->irq, dev);
 out_disable_resources:
        (void)smsc911x_disable_resources(pdev);
-out_return_resources:
+out_enable_resources_fail:
        smsc911x_free_resources(pdev);
+out_request_resources_fail:
        platform_set_drvdata(pdev, NULL);
        iounmap(pdata->ioaddr);
        free_netdev(dev);
index b42252c4bec8903622db89641d12efca325c8277..1b173a6145d642fb3c2fe26c58c8d8b995ce50b8 100644 (file)
@@ -51,7 +51,7 @@ config TI_DAVINCI_CPDMA
 
 config TI_CPSW
        tristate "TI CPSW Switch Support"
-       depends on ARM && (ARCH_DAVINCI || SOC_OMAPAM33XX)
+       depends on ARM && (ARCH_DAVINCI || SOC_AM33XX)
        select TI_DAVINCI_CPDMA
        select TI_DAVINCI_MDIO
        ---help---
index 71e2b0523bc2db243704abab76ce8bd32a47d6d9..3ae80eccd0efd9802e5e997d53cd54946ef6d788 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/crc32.h>
 #include <linux/usb/usbnet.h>
 #include <linux/slab.h>
+#include <linux/if_vlan.h>
 
 #define DRIVER_VERSION "22-Dec-2011"
 #define DRIVER_NAME "asix"
@@ -321,7 +322,7 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
                        return 0;
                }
 
-               if ((size > dev->net->mtu + ETH_HLEN) ||
+               if ((size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) ||
                    (size + offset > skb->len)) {
                        netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
                                   size);
index add1064f755dd4e78fb27ed5a49604a6165a2e50..03c2d8d653df1afa52ee90744f0b33ec4d7baec5 100644 (file)
@@ -629,11 +629,31 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
        return skb->len > 0;
 }
 
+static void mcs7830_status(struct usbnet *dev, struct urb *urb)
+{
+       u8 *buf = urb->transfer_buffer;
+       bool link;
+
+       if (urb->actual_length < 16)
+               return;
+
+       link = !(buf[1] & 0x20);
+       if (netif_carrier_ok(dev->net) != link) {
+               if (link) {
+                       netif_carrier_on(dev->net);
+                       usbnet_defer_kevent(dev, EVENT_LINK_RESET);
+               } else
+                       netif_carrier_off(dev->net);
+               netdev_dbg(dev->net, "Link Status is: %d\n", link);
+       }
+}
+
 static const struct driver_info moschip_info = {
        .description    = "MOSCHIP 7830/7832/7730 usb-NET adapter",
        .bind           = mcs7830_bind,
        .rx_fixup       = mcs7830_rx_fixup,
-       .flags          = FLAG_ETHER,
+       .flags          = FLAG_ETHER | FLAG_LINK_INTR,
+       .status         = mcs7830_status,
        .in             = 1,
        .out            = 2,
 };
@@ -642,7 +662,8 @@ static const struct driver_info sitecom_info = {
        .description    = "Sitecom LN-30 usb-NET adapter",
        .bind           = mcs7830_bind,
        .rx_fixup       = mcs7830_rx_fixup,
-       .flags          = FLAG_ETHER,
+       .flags          = FLAG_ETHER | FLAG_LINK_INTR,
+       .status         = mcs7830_status,
        .in             = 1,
        .out            = 2,
 };
index 380dbea6109de022c97865775fb401e7bf199838..3b206786b5e7d8196d2fd1e63060f17cd7101c6e 100644 (file)
@@ -547,6 +547,8 @@ static const struct usb_device_id products[] = {
        {QMI_GOBI_DEVICE(0x16d8, 0x8002)},      /* CMDTech Gobi 2000 Modem device (VU922) */
        {QMI_GOBI_DEVICE(0x05c6, 0x9205)},      /* Gobi 2000 Modem device */
        {QMI_GOBI_DEVICE(0x1199, 0x9013)},      /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
+       {QMI_GOBI_DEVICE(0x1199, 0x9015)},      /* Sierra Wireless Gobi 3000 Modem device */
+       {QMI_GOBI_DEVICE(0x1199, 0x9019)},      /* Sierra Wireless Gobi 3000 Modem device */
        { }                                     /* END */
 };
 MODULE_DEVICE_TABLE(usb, products);
index 9ce6995e8d084d046beb75f80712cd1b16a4aaf3..5214b1eceb9516282cb9ae8b38f79a606da0ecb7 100644 (file)
@@ -1231,11 +1231,6 @@ static int virtnet_freeze(struct virtio_device *vdev)
        vi->config_enable = false;
        mutex_unlock(&vi->config_lock);
 
-       virtqueue_disable_cb(vi->rvq);
-       virtqueue_disable_cb(vi->svq);
-       if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
-               virtqueue_disable_cb(vi->cvq);
-
        netif_device_detach(vi->dev);
        cancel_delayed_work_sync(&vi->refill);
 
index 0ba81a66061fca201ae7917e21b1ffd6c703a91d..fbaa309300764ef791faff16d06f85a28d7c431d 100644 (file)
@@ -2415,6 +2415,22 @@ ath5k_tx_complete_poll_work(struct work_struct *work)
 * Initialization routines *
 \*************************/
 
+static const struct ieee80211_iface_limit if_limits[] = {
+       { .max = 2048,  .types = BIT(NL80211_IFTYPE_STATION) },
+       { .max = 4,     .types =
+#ifdef CONFIG_MAC80211_MESH
+                                BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+                                BIT(NL80211_IFTYPE_AP) },
+};
+
+static const struct ieee80211_iface_combination if_comb = {
+       .limits = if_limits,
+       .n_limits = ARRAY_SIZE(if_limits),
+       .max_interfaces = 2048,
+       .num_different_channels = 1,
+};
+
 int __devinit
 ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
 {
@@ -2436,6 +2452,9 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
                BIT(NL80211_IFTYPE_ADHOC) |
                BIT(NL80211_IFTYPE_MESH_POINT);
 
+       hw->wiphy->iface_combinations = &if_comb;
+       hw->wiphy->n_iface_combinations = 1;
+
        /* SW support for IBSS_RSN is provided by mac80211 */
        hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
 
index ac53d901801deb037db4ca11137812d1df6d458f..dfb0441f406c24e59c10fac636007d7337cd4b65 100644 (file)
@@ -3809,7 +3809,7 @@ static bool is_pmu_set(struct ath_hw *ah, u32 pmu_reg, int pmu_set)
        return true;
 }
 
-static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
+void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
 {
        int internal_regulator =
                ath9k_hw_ar9300_get_eeprom(ah, EEP_INTERNAL_REGULATOR);
index 2505ac44f0c16ff248be27b145b91cd1b90dde49..8396d150ce01d4303174aab4a1e9071d70ca09de 100644 (file)
@@ -334,4 +334,7 @@ u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is_2ghz);
 
 unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
                                           struct ath9k_channel *chan);
+
+void ar9003_hw_internal_regulator_apply(struct ath_hw *ah);
+
 #endif
index f11d9b2677fd05753750e1311a351ac7b7a52cfe..1bd3a3d22101806aca2b06ebc9be5ee77afdddd7 100644 (file)
@@ -1,5 +1,6 @@
 /*
- * Copyright (c) 2011 Atheros Communications Inc.
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -18,7 +19,7 @@
 #define INITVALS_9330_1P1_H
 
 static const u32 ar9331_1p1_baseband_postamble[][5] = {
-       /*  Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20  */
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
        {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
        {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
        {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
@@ -27,10 +28,10 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
        {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
        {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
        {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a4, 0x037216a4},
-       {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
+       {0x00009e04, 0x00202020, 0x00202020, 0x00202020, 0x00202020},
        {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
        {0x00009e10, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e},
-       {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+       {0x00009e14, 0x31365d5e, 0x3136605e, 0x3136605e, 0x31365d5e},
        {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
        {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
@@ -55,7 +56,7 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
        {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
-       {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071981},
+       {0x0000a2d0, 0x00071982, 0x00071982, 0x00071982, 0x00071982},
        {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
        {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000ae04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
@@ -63,7 +64,7 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
 };
 
 static const u32 ar9331_modes_lowest_ob_db_tx_gain_1p1[][5] = {
-       /*   Addr     5G_HT20     5G_HT40     2G_HT40     2G_HT20  */
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
        {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
        {0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
        {0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
@@ -155,7 +156,7 @@ static const u32 ar9331_modes_lowest_ob_db_tx_gain_1p1[][5] = {
 };
 
 static const u32 ar9331_modes_high_ob_db_tx_gain_1p1[][5] = {
-       /*   Addr     5G_HT20     5G_HT40     2G_HT40     2G_HT20  */
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
        {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
        {0x0000a2dc, 0xffaa9a52, 0xffaa9a52, 0xffaa9a52, 0xffaa9a52},
        {0x0000a2e0, 0xffb31c84, 0xffb31c84, 0xffb31c84, 0xffb31c84},
@@ -245,7 +246,7 @@ static const u32 ar9331_modes_high_ob_db_tx_gain_1p1[][5] = {
 };
 
 static const u32 ar9331_modes_low_ob_db_tx_gain_1p1[][5] = {
-       /*   Addr     5G_HT20     5G_HT40     2G_HT40     2G_HT20  */
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
        {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
        {0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
        {0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
@@ -377,14 +378,14 @@ static const u32 ar9331_1p1_radio_core[][2] = {
        {0x000160b4, 0x92480040},
        {0x000160c0, 0x006db6db},
        {0x000160c4, 0x0186db60},
-       {0x000160c8, 0x6db6db6c},
+       {0x000160c8, 0x6db4db6c},
        {0x000160cc, 0x6de6c300},
        {0x000160d0, 0x14500820},
        {0x00016100, 0x04cb0001},
        {0x00016104, 0xfff80015},
        {0x00016108, 0x00080010},
        {0x0001610c, 0x00170000},
-       {0x00016140, 0x10804000},
+       {0x00016140, 0x10800000},
        {0x00016144, 0x01884080},
        {0x00016148, 0x000080c0},
        {0x00016280, 0x01000015},
@@ -417,7 +418,7 @@ static const u32 ar9331_1p1_radio_core[][2] = {
 };
 
 static const u32 ar9331_1p1_soc_postamble[][5] = {
-       /*  Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20  */
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
        {0x00007010, 0x00000022, 0x00000022, 0x00000022, 0x00000022},
 };
 
@@ -691,7 +692,7 @@ static const u32 ar9331_1p1_baseband_core[][2] = {
 };
 
 static const u32 ar9331_modes_high_power_tx_gain_1p1[][5] = {
-       /*  Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20  */
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
        {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
        {0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
        {0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
@@ -783,7 +784,7 @@ static const u32 ar9331_modes_high_power_tx_gain_1p1[][5] = {
 };
 
 static const u32 ar9331_1p1_mac_postamble[][5] = {
-       /*  Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20  */
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
        {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
        {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
        {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
@@ -973,26 +974,27 @@ static const u32 ar9331_1p1_mac_core[][2] = {
 
 static const u32 ar9331_common_rx_gain_1p1[][2] = {
        /* Addr      allmodes  */
-       {0x0000a000, 0x00010000},
-       {0x0000a004, 0x00030002},
-       {0x0000a008, 0x00050004},
-       {0x0000a00c, 0x00810080},
-       {0x0000a010, 0x00830082},
-       {0x0000a014, 0x01810180},
-       {0x0000a018, 0x01830182},
-       {0x0000a01c, 0x01850184},
-       {0x0000a020, 0x01890188},
-       {0x0000a024, 0x018b018a},
-       {0x0000a028, 0x018d018c},
-       {0x0000a02c, 0x01910190},
-       {0x0000a030, 0x01930192},
-       {0x0000a034, 0x01950194},
-       {0x0000a038, 0x038a0196},
-       {0x0000a03c, 0x038c038b},
-       {0x0000a040, 0x0390038d},
-       {0x0000a044, 0x03920391},
-       {0x0000a048, 0x03940393},
-       {0x0000a04c, 0x03960395},
+       {0x00009e18, 0x05000000},
+       {0x0000a000, 0x00060005},
+       {0x0000a004, 0x00810080},
+       {0x0000a008, 0x00830082},
+       {0x0000a00c, 0x00850084},
+       {0x0000a010, 0x01820181},
+       {0x0000a014, 0x01840183},
+       {0x0000a018, 0x01880185},
+       {0x0000a01c, 0x018a0189},
+       {0x0000a020, 0x02850284},
+       {0x0000a024, 0x02890288},
+       {0x0000a028, 0x028b028a},
+       {0x0000a02c, 0x03850384},
+       {0x0000a030, 0x03890388},
+       {0x0000a034, 0x038b038a},
+       {0x0000a038, 0x038d038c},
+       {0x0000a03c, 0x03910390},
+       {0x0000a040, 0x03930392},
+       {0x0000a044, 0x03950394},
+       {0x0000a048, 0x00000396},
+       {0x0000a04c, 0x00000000},
        {0x0000a050, 0x00000000},
        {0x0000a054, 0x00000000},
        {0x0000a058, 0x00000000},
@@ -1005,15 +1007,15 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = {
        {0x0000a074, 0x00000000},
        {0x0000a078, 0x00000000},
        {0x0000a07c, 0x00000000},
-       {0x0000a080, 0x22222229},
-       {0x0000a084, 0x1d1d1d1d},
-       {0x0000a088, 0x1d1d1d1d},
-       {0x0000a08c, 0x1d1d1d1d},
-       {0x0000a090, 0x171d1d1d},
-       {0x0000a094, 0x11111717},
-       {0x0000a098, 0x00030311},
-       {0x0000a09c, 0x00000000},
-       {0x0000a0a0, 0x00000000},
+       {0x0000a080, 0x28282828},
+       {0x0000a084, 0x28282828},
+       {0x0000a088, 0x28282828},
+       {0x0000a08c, 0x28282828},
+       {0x0000a090, 0x28282828},
+       {0x0000a094, 0x24242428},
+       {0x0000a098, 0x171e1e1e},
+       {0x0000a09c, 0x02020b0b},
+       {0x0000a0a0, 0x02020202},
        {0x0000a0a4, 0x00000000},
        {0x0000a0a8, 0x00000000},
        {0x0000a0ac, 0x00000000},
@@ -1021,27 +1023,27 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = {
        {0x0000a0b4, 0x00000000},
        {0x0000a0b8, 0x00000000},
        {0x0000a0bc, 0x00000000},
-       {0x0000a0c0, 0x001f0000},
-       {0x0000a0c4, 0x01000101},
-       {0x0000a0c8, 0x011e011f},
-       {0x0000a0cc, 0x011c011d},
-       {0x0000a0d0, 0x02030204},
-       {0x0000a0d4, 0x02010202},
-       {0x0000a0d8, 0x021f0200},
-       {0x0000a0dc, 0x0302021e},
-       {0x0000a0e0, 0x03000301},
-       {0x0000a0e4, 0x031e031f},
-       {0x0000a0e8, 0x0402031d},
-       {0x0000a0ec, 0x04000401},
-       {0x0000a0f0, 0x041e041f},
-       {0x0000a0f4, 0x0502041d},
-       {0x0000a0f8, 0x05000501},
-       {0x0000a0fc, 0x051e051f},
-       {0x0000a100, 0x06010602},
-       {0x0000a104, 0x061f0600},
-       {0x0000a108, 0x061d061e},
-       {0x0000a10c, 0x07020703},
-       {0x0000a110, 0x07000701},
+       {0x0000a0c0, 0x22072208},
+       {0x0000a0c4, 0x22052206},
+       {0x0000a0c8, 0x22032204},
+       {0x0000a0cc, 0x22012202},
+       {0x0000a0d0, 0x221f2200},
+       {0x0000a0d4, 0x221d221e},
+       {0x0000a0d8, 0x33023303},
+       {0x0000a0dc, 0x33003301},
+       {0x0000a0e0, 0x331e331f},
+       {0x0000a0e4, 0x4402331d},
+       {0x0000a0e8, 0x44004401},
+       {0x0000a0ec, 0x441e441f},
+       {0x0000a0f0, 0x55025503},
+       {0x0000a0f4, 0x55005501},
+       {0x0000a0f8, 0x551e551f},
+       {0x0000a0fc, 0x6602551d},
+       {0x0000a100, 0x66006601},
+       {0x0000a104, 0x661e661f},
+       {0x0000a108, 0x7703661d},
+       {0x0000a10c, 0x77017702},
+       {0x0000a110, 0x00007700},
        {0x0000a114, 0x00000000},
        {0x0000a118, 0x00000000},
        {0x0000a11c, 0x00000000},
@@ -1054,26 +1056,26 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = {
        {0x0000a138, 0x00000000},
        {0x0000a13c, 0x00000000},
        {0x0000a140, 0x001f0000},
-       {0x0000a144, 0x01000101},
-       {0x0000a148, 0x011e011f},
-       {0x0000a14c, 0x011c011d},
-       {0x0000a150, 0x02030204},
-       {0x0000a154, 0x02010202},
-       {0x0000a158, 0x021f0200},
-       {0x0000a15c, 0x0302021e},
-       {0x0000a160, 0x03000301},
-       {0x0000a164, 0x031e031f},
-       {0x0000a168, 0x0402031d},
-       {0x0000a16c, 0x04000401},
-       {0x0000a170, 0x041e041f},
-       {0x0000a174, 0x0502041d},
-       {0x0000a178, 0x05000501},
-       {0x0000a17c, 0x051e051f},
-       {0x0000a180, 0x06010602},
-       {0x0000a184, 0x061f0600},
-       {0x0000a188, 0x061d061e},
-       {0x0000a18c, 0x07020703},
-       {0x0000a190, 0x07000701},
+       {0x0000a144, 0x111f1100},
+       {0x0000a148, 0x111d111e},
+       {0x0000a14c, 0x111b111c},
+       {0x0000a150, 0x22032204},
+       {0x0000a154, 0x22012202},
+       {0x0000a158, 0x221f2200},
+       {0x0000a15c, 0x221d221e},
+       {0x0000a160, 0x33013302},
+       {0x0000a164, 0x331f3300},
+       {0x0000a168, 0x4402331e},
+       {0x0000a16c, 0x44004401},
+       {0x0000a170, 0x441e441f},
+       {0x0000a174, 0x55015502},
+       {0x0000a178, 0x551f5500},
+       {0x0000a17c, 0x6602551e},
+       {0x0000a180, 0x66006601},
+       {0x0000a184, 0x661e661f},
+       {0x0000a188, 0x7703661d},
+       {0x0000a18c, 0x77017702},
+       {0x0000a190, 0x00007700},
        {0x0000a194, 0x00000000},
        {0x0000a198, 0x00000000},
        {0x0000a19c, 0x00000000},
@@ -1100,14 +1102,14 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = {
        {0x0000a1f0, 0x00000396},
        {0x0000a1f4, 0x00000396},
        {0x0000a1f8, 0x00000396},
-       {0x0000a1fc, 0x00000196},
+       {0x0000a1fc, 0x00000296},
 };
 
 static const u32 ar9331_common_tx_gain_offset1_1[][1] = {
-       {0},
-       {3},
-       {0},
-       {0},
+       {0x00000000},
+       {0x00000003},
+       {0x00000000},
+       {0x00000000},
 };
 
 static const u32 ar9331_1p1_chansel_xtal_25M[] = {
index abe05ec85d501dbeaea089a19dd444fd079b0a4f..7db1890448f20fbfe85ccccea1966633de0d84ad 100644 (file)
@@ -1468,6 +1468,9 @@ static bool ath9k_hw_chip_reset(struct ath_hw *ah,
                return false;
 
        ah->chip_fullsleep = false;
+
+       if (AR_SREV_9330(ah))
+               ar9003_hw_internal_regulator_apply(ah);
        ath9k_hw_init_pll(ah, chan);
        ath9k_hw_set_rfmode(ah, chan);
 
index dfa78e8b6470c02074025f5a94ff8be34058ad8e..4de4473776acd808f52eeed3994af2858897796d 100644 (file)
@@ -239,7 +239,7 @@ static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
 {
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
-       bool ret;
+       bool ret = true;
 
        ieee80211_stop_queues(sc->hw);
 
@@ -250,11 +250,12 @@ static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
        ath9k_debug_samp_bb_mac(sc);
        ath9k_hw_disable_interrupts(ah);
 
-       ret = ath_drain_all_txq(sc, retry_tx);
-
        if (!ath_stoprecv(sc))
                ret = false;
 
+       if (!ath_drain_all_txq(sc, retry_tx))
+               ret = false;
+
        if (!flush) {
                if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
                        ath_rx_tasklet(sc, 1, true);
index 23eaa1b26ebe5ca9a1a242ea4de5e9fa6508b02c..d59dd01d6cdeda200280f0f856fe9ecf26e5fbcc 100644 (file)
@@ -64,7 +64,8 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
 static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
                                           struct ath_txq *txq,
                                           struct ath_atx_tid *tid,
-                                          struct sk_buff *skb);
+                                          struct sk_buff *skb,
+                                          bool dequeue);
 
 enum {
        MCS_HT20,
@@ -811,7 +812,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
                fi = get_frame_info(skb);
                bf = fi->bf;
                if (!fi->bf)
-                       bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+                       bf = ath_tx_setup_buffer(sc, txq, tid, skb, true);
 
                if (!bf)
                        continue;
@@ -1726,7 +1727,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
                return;
        }
 
-       bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
+       bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
        if (!bf)
                return;
 
@@ -1753,7 +1754,7 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
 
        bf = fi->bf;
        if (!bf)
-               bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+               bf = ath_tx_setup_buffer(sc, txq, tid, skb, false);
 
        if (!bf)
                return;
@@ -1814,7 +1815,8 @@ u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
 static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
                                           struct ath_txq *txq,
                                           struct ath_atx_tid *tid,
-                                          struct sk_buff *skb)
+                                          struct sk_buff *skb,
+                                          bool dequeue)
 {
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_frame_info *fi = get_frame_info(skb);
@@ -1863,6 +1865,8 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
        return bf;
 
 error:
+       if (dequeue)
+               __skb_unlink(skb, &tid->buf_q);
        dev_kfree_skb_any(skb);
        return NULL;
 }
@@ -1893,7 +1897,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
                 */
                ath_tx_send_ampdu(sc, tid, skb, txctl);
        } else {
-               bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
+               bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
                if (!bf)
                        return;
 
index c5a34ffe64599e9d5852aeec83f93ab00a215901..a299d42da8e74a358939b8fa5da8a32a01fd312b 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/uaccess.h>
 #include <linux/firmware.h>
 #include <linux/usb.h>
+#include <linux/vmalloc.h>
 #include <net/cfg80211.h>
 
 #include <defs.h>
@@ -1239,7 +1240,7 @@ static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo)
                return -EINVAL;
        }
 
-       devinfo->image = kmalloc(fw->size, GFP_ATOMIC); /* plus nvram */
+       devinfo->image = vmalloc(fw->size); /* plus nvram */
        if (!devinfo->image)
                return -ENOMEM;
 
@@ -1603,7 +1604,7 @@ static struct usb_driver brcmf_usbdrvr = {
 void brcmf_usb_exit(void)
 {
        usb_deregister(&brcmf_usbdrvr);
-       kfree(g_image.data);
+       vfree(g_image.data);
        g_image.data = NULL;
        g_image.len = 0;
 }
index db6c6e528022635638f8aa8594c2a6872da52f9b..2463c06264387230759f14801239610a4fe4eb58 100644 (file)
@@ -137,11 +137,3 @@ config IWLWIFI_EXPERIMENTAL_MFP
          even if the microcode doesn't advertise it.
 
          Say Y only if you want to experiment with MFP.
-
-config IWLWIFI_UCODE16
-       bool "support uCode 16.0"
-       depends on IWLWIFI
-       help
-         This option enables support for uCode version 16.0.
-
-         Say Y if you want to use 16.0 microcode.
index 406f297a9a56dd27eecde87f8d027a879d337137..d615eacbf050be320d803c74fec0ace223abae73 100644 (file)
@@ -18,7 +18,6 @@ iwlwifi-objs          += iwl-notif-wait.o
 iwlwifi-objs           += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o
 
 
-iwlwifi-$(CONFIG_IWLWIFI_UCODE16) += iwl-phy-db.o
 iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
 iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
 iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o
index 7f793417c78740b5fd908aa976ea208d5c2c6c3f..8133105ac6450ae19ae2743019aae2a3182b1b46 100644 (file)
@@ -79,7 +79,7 @@ static const struct iwl_base_params iwl2000_base_params = {
        .chain_noise_scale = 1000,
        .wd_timeout = IWL_DEF_WD_TIMEOUT,
        .max_event_log_size = 512,
-       .shadow_reg_enable = true,
+       .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
        .hd_v2 = true,
 };
 
@@ -97,7 +97,7 @@ static const struct iwl_base_params iwl2030_base_params = {
        .chain_noise_scale = 1000,
        .wd_timeout = IWL_LONG_WD_TIMEOUT,
        .max_event_log_size = 512,
-       .shadow_reg_enable = true,
+       .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
        .hd_v2 = true,
 };
 
index 381b02cf339c46e0353b07bf04f201b70f49a4bf..19f7ee84ae89e2b76ba493016cb508745eaae173 100644 (file)
@@ -86,7 +86,7 @@ static const struct iwl_base_params iwl6000_base_params = {
        .chain_noise_scale = 1000,
        .wd_timeout = IWL_DEF_WD_TIMEOUT,
        .max_event_log_size = 512,
-       .shadow_reg_enable = true,
+       .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
 };
 
 static const struct iwl_base_params iwl6050_base_params = {
@@ -102,7 +102,7 @@ static const struct iwl_base_params iwl6050_base_params = {
        .chain_noise_scale = 1500,
        .wd_timeout = IWL_DEF_WD_TIMEOUT,
        .max_event_log_size = 1024,
-       .shadow_reg_enable = true,
+       .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
 };
 
 static const struct iwl_base_params iwl6000_g2_base_params = {
@@ -118,7 +118,7 @@ static const struct iwl_base_params iwl6000_g2_base_params = {
        .chain_noise_scale = 1000,
        .wd_timeout = IWL_LONG_WD_TIMEOUT,
        .max_event_log_size = 512,
-       .shadow_reg_enable = true,
+       .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
 };
 
 static const struct iwl_ht_params iwl6000_ht_params = {
index 51e1a69ffdda629ff84c7008513202d85ce2e94d..8cebd7c363fc301477cd71e5ed8b96a184d58c16 100644 (file)
@@ -884,6 +884,7 @@ static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
        if ((priv->bt_traffic_load != priv->last_bt_traffic_load) ||
            (priv->bt_full_concurrent != full_concurrent)) {
                priv->bt_full_concurrent = full_concurrent;
+               priv->last_bt_traffic_load = priv->bt_traffic_load;
 
                /* Update uCode's rate table. */
                tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
index b31584e87bc7f4d03f2d3d2ba727879979b19156..aea07aab3c9e82c44f6b417d20794f4523e6ae70 100644 (file)
@@ -772,7 +772,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
                                                ~IWL_STA_DRIVER_ACTIVE;
                                priv->stations[i].used &=
                                                ~IWL_STA_UCODE_INPROGRESS;
-                               spin_unlock_bh(&priv->sta_lock);
+                               continue;
                        }
                        /*
                         * Rate scaling has already been initialized, send
index 3c72bad0ae56fc3d1e443f2b08a70e337910e4f0..d742900969eabc913feb5e3643b3cfdc0ddf3840 100644 (file)
@@ -657,17 +657,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
        return -EINVAL;
 }
 
-static int alloc_pci_desc(struct iwl_drv *drv,
-                         struct iwl_firmware_pieces *pieces,
-                         enum iwl_ucode_type type)
+static int iwl_alloc_ucode(struct iwl_drv *drv,
+                          struct iwl_firmware_pieces *pieces,
+                          enum iwl_ucode_type type)
 {
        int i;
        for (i = 0;
             i < IWL_UCODE_SECTION_MAX && get_sec_size(pieces, type, i);
             i++)
                if (iwl_alloc_fw_desc(drv, &(drv->fw.img[type].sec[i]),
-                                               get_sec(pieces, type, i)))
-                       return -1;
+                                     get_sec(pieces, type, i)))
+                       return -ENOMEM;
        return 0;
 }
 
@@ -825,8 +825,8 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
         * 1) unmodified from disk
         * 2) backup cache for save/restore during power-downs */
        for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
-               if (alloc_pci_desc(drv, &pieces, i))
-                       goto err_pci_alloc;
+               if (iwl_alloc_ucode(drv, &pieces, i))
+                       goto out_free_fw;
 
        /* Now that we can no longer fail, copy information */
 
@@ -866,7 +866,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
        drv->op_mode = iwl_dvm_ops.start(drv->trans, drv->cfg, &drv->fw);
 
        if (!drv->op_mode)
-               goto out_unbind;
+               goto out_free_fw;
 
        return;
 
@@ -877,7 +877,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
                goto out_unbind;
        return;
 
err_pci_alloc:
out_free_fw:
        IWL_ERR(drv, "failed to allocate pci memory\n");
        iwl_dealloc_ucode(drv);
        release_firmware(ucode_raw);
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
deleted file mode 100644 (file)
index f166955..0000000
+++ /dev/null
@@ -1,288 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#include <linux/slab.h>
-#include <linux/string.h>
-
-#include "iwl-debug.h"
-#include "iwl-dev.h"
-
-#include "iwl-phy-db.h"
-
-#define CHANNEL_NUM_SIZE       4       /* num of channels in calib_ch size */
-
-struct iwl_phy_db *iwl_phy_db_init(struct device *dev)
-{
-       struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db),
-                                           GFP_KERNEL);
-
-       if (!phy_db)
-               return phy_db;
-
-       phy_db->dev = dev;
-
-       /* TODO: add default values of the phy db. */
-       return phy_db;
-}
-
-/*
- * get phy db section: returns a pointer to a phy db section specified by
- * type and channel group id.
- */
-static struct iwl_phy_db_entry *
-iwl_phy_db_get_section(struct iwl_phy_db *phy_db,
-                      enum iwl_phy_db_section_type type,
-                      u16 chg_id)
-{
-       if (!phy_db || type < 0 || type >= IWL_PHY_DB_MAX)
-               return NULL;
-
-       switch (type) {
-       case IWL_PHY_DB_CFG:
-               return &phy_db->cfg;
-       case IWL_PHY_DB_CALIB_NCH:
-               return &phy_db->calib_nch;
-       case IWL_PHY_DB_CALIB_CH:
-               return &phy_db->calib_ch;
-       case IWL_PHY_DB_CALIB_CHG_PAPD:
-               if (chg_id < 0 || chg_id >= IWL_NUM_PAPD_CH_GROUPS)
-                       return NULL;
-               return &phy_db->calib_ch_group_papd[chg_id];
-       case IWL_PHY_DB_CALIB_CHG_TXP:
-               if (chg_id < 0 || chg_id >= IWL_NUM_TXP_CH_GROUPS)
-                       return NULL;
-               return &phy_db->calib_ch_group_txp[chg_id];
-       default:
-               return NULL;
-       }
-       return NULL;
-}
-
-static void iwl_phy_db_free_section(struct iwl_phy_db *phy_db,
-                                   enum iwl_phy_db_section_type type,
-                                   u16 chg_id)
-{
-       struct iwl_phy_db_entry *entry =
-                               iwl_phy_db_get_section(phy_db, type, chg_id);
-       if (!entry)
-               return;
-
-       kfree(entry->data);
-       entry->data = NULL;
-       entry->size = 0;
-}
-
-void iwl_phy_db_free(struct iwl_phy_db *phy_db)
-{
-       int i;
-
-       if (!phy_db)
-               return;
-
-       iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0);
-       iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0);
-       iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CH, 0);
-       for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++)
-               iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i);
-       for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++)
-               iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i);
-
-       kfree(phy_db);
-}
-
-int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
-                          enum iwl_phy_db_section_type type, u8 *data,
-                          u16 size, gfp_t alloc_ctx)
-{
-       struct iwl_phy_db_entry *entry;
-       u16 chg_id = 0;
-
-       if (!phy_db)
-               return -EINVAL;
-
-       if (type == IWL_PHY_DB_CALIB_CHG_PAPD ||
-           type == IWL_PHY_DB_CALIB_CHG_TXP)
-               chg_id = le16_to_cpup((__le16 *)data);
-
-       entry = iwl_phy_db_get_section(phy_db, type, chg_id);
-       if (!entry)
-               return -EINVAL;
-
-       kfree(entry->data);
-       entry->data = kmemdup(data, size, alloc_ctx);
-       if (!entry->data) {
-               entry->size = 0;
-               return -ENOMEM;
-       }
-
-       entry->size = size;
-
-       if (type == IWL_PHY_DB_CALIB_CH) {
-               phy_db->channel_num = le32_to_cpup((__le32 *)data);
-               phy_db->channel_size =
-                     (size - CHANNEL_NUM_SIZE) / phy_db->channel_num;
-       }
-
-       return 0;
-}
-
-static int is_valid_channel(u16 ch_id)
-{
-       if (ch_id <= 14 ||
-           (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
-           (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
-           (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
-               return 1;
-       return 0;
-}
-
-static u8 ch_id_to_ch_index(u16 ch_id)
-{
-       if (WARN_ON(!is_valid_channel(ch_id)))
-               return 0xff;
-
-       if (ch_id <= 14)
-               return ch_id - 1;
-       if (ch_id <= 64)
-               return (ch_id + 20) / 4;
-       if (ch_id <= 140)
-               return (ch_id - 12) / 4;
-       return (ch_id - 13) / 4;
-}
-
-
-static u16 channel_id_to_papd(u16 ch_id)
-{
-       if (WARN_ON(!is_valid_channel(ch_id)))
-               return 0xff;
-
-       if (1 <= ch_id && ch_id <= 14)
-               return 0;
-       if (36 <= ch_id && ch_id <= 64)
-               return 1;
-       if (100 <= ch_id && ch_id <= 140)
-               return 2;
-       return 3;
-}
-
-static u16 channel_id_to_txp(struct iwl_phy_db *phy_db, u16 ch_id)
-{
-       struct iwl_phy_db_chg_txp *txp_chg;
-       int i;
-       u8 ch_index = ch_id_to_ch_index(ch_id);
-       if (ch_index == 0xff)
-               return 0xff;
-
-       for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) {
-               txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
-               if (!txp_chg)
-                       return 0xff;
-               /*
-                * Looking for the first channel group that its max channel is
-                * higher then wanted channel.
-                */
-               if (le16_to_cpu(txp_chg->max_channel_idx) >= ch_index)
-                       return i;
-       }
-       return 0xff;
-}
-
-int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
-                               enum iwl_phy_db_section_type type, u8 **data,
-                               u16 *size, u16 ch_id)
-{
-       struct iwl_phy_db_entry *entry;
-       u32 channel_num;
-       u32 channel_size;
-       u16 ch_group_id = 0;
-       u16 index;
-
-       if (!phy_db)
-               return -EINVAL;
-
-       /* find wanted channel group */
-       if (type == IWL_PHY_DB_CALIB_CHG_PAPD)
-               ch_group_id = channel_id_to_papd(ch_id);
-       else if (type == IWL_PHY_DB_CALIB_CHG_TXP)
-               ch_group_id = channel_id_to_txp(phy_db, ch_id);
-
-       entry = iwl_phy_db_get_section(phy_db, type, ch_group_id);
-       if (!entry)
-               return -EINVAL;
-
-       if (type == IWL_PHY_DB_CALIB_CH) {
-               index = ch_id_to_ch_index(ch_id);
-               channel_num = phy_db->channel_num;
-               channel_size = phy_db->channel_size;
-               if (index >= channel_num) {
-                       IWL_ERR(phy_db, "Wrong channel number %d", ch_id);
-                       return -EINVAL;
-               }
-               *data = entry->data + CHANNEL_NUM_SIZE + index * channel_size;
-               *size = channel_size;
-       } else {
-               *data = entry->data;
-               *size = entry->size;
-       }
-       return 0;
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
deleted file mode 100644 (file)
index c34c6a9..0000000
+++ /dev/null
@@ -1,129 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#ifndef __IWL_PHYDB_H__
-#define __IWL_PHYDB_H__
-
-#include <linux/types.h>
-
-#define IWL_NUM_PAPD_CH_GROUPS 4
-#define IWL_NUM_TXP_CH_GROUPS  8
-
-struct iwl_phy_db_entry {
-       u16     size;
-       u8      *data;
-};
-
-struct iwl_shared;
-
-/**
- * struct iwl_phy_db - stores phy configuration and calibration data.
- *
- * @cfg: phy configuration.
- * @calib_nch: non channel specific calibration data.
- * @calib_ch: channel specific calibration data.
- * @calib_ch_group_papd: calibration data related to papd channel group.
- * @calib_ch_group_txp: calibration data related to tx power chanel group.
- */
-struct iwl_phy_db {
-       struct iwl_phy_db_entry cfg;
-       struct iwl_phy_db_entry calib_nch;
-       struct iwl_phy_db_entry calib_ch;
-       struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS];
-       struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS];
-
-       u32 channel_num;
-       u32 channel_size;
-
-       /* for an access to the logger */
-       struct device *dev;
-};
-
-enum iwl_phy_db_section_type {
-       IWL_PHY_DB_CFG = 1,
-       IWL_PHY_DB_CALIB_NCH,
-       IWL_PHY_DB_CALIB_CH,
-       IWL_PHY_DB_CALIB_CHG_PAPD,
-       IWL_PHY_DB_CALIB_CHG_TXP,
-       IWL_PHY_DB_MAX
-};
-
-/* for parsing of tx power channel group data that comes from the firmware*/
-struct iwl_phy_db_chg_txp {
-       __le32 space;
-       __le16 max_channel_idx;
-} __packed;
-
-struct iwl_phy_db *iwl_phy_db_init(struct device *dev);
-
-void iwl_phy_db_free(struct iwl_phy_db *phy_db);
-
-int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
-                          enum iwl_phy_db_section_type type, u8 *data,
-                          u16 size, gfp_t alloc_ctx);
-
-int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
-                               enum iwl_phy_db_section_type type, u8 **data,
-                               u16 *size, u16 ch_id);
-
-#endif /* __IWL_PHYDB_H__ */
index 6213c05a4b529c6ba0f4263b83d9050afcfdd614..e959207c630a9352f143536c1a51a5de995ee96b 100644 (file)
@@ -347,7 +347,7 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
 void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int queue, int fifo,
                                 int sta_id, int tid, int frame_limit, u16 ssn);
 void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
-       int index, enum dma_data_direction dma_dir);
+                        enum dma_data_direction dma_dir);
 int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
                         struct sk_buff_head *skbs);
 int iwl_queue_space(const struct iwl_queue *q);
index 21a8a672fbb258caae8735c61fcfbc0f09e9f426..a8750238ee09b78a99623693332e8c8c40c83a85 100644 (file)
@@ -204,33 +204,39 @@ static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
        for (i = 1; i < num_tbs; i++)
                dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i),
                                iwl_tfd_tb_get_len(tfd, i), dma_dir);
+
+       tfd->num_tbs = 0;
 }
 
 /**
  * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
  * @trans - transport private data
  * @txq - tx queue
- * @index - the index of the TFD to be freed
- *@dma_dir - the direction of the DMA mapping
+ * @dma_dir - the direction of the DMA mapping
  *
  * Does NOT advance any TFD circular buffer read/write indexes
  * Does NOT free the TFD itself (which is within circular buffer)
  */
 void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
-       int index, enum dma_data_direction dma_dir)
+                        enum dma_data_direction dma_dir)
 {
        struct iwl_tfd *tfd_tmp = txq->tfds;
 
+       /* rd_ptr is bounded by n_bd and idx is bounded by n_window */
+       int rd_ptr = txq->q.read_ptr;
+       int idx = get_cmd_index(&txq->q, rd_ptr);
+
        lockdep_assert_held(&txq->lock);
 
-       iwlagn_unmap_tfd(trans, &txq->entries[index].meta,
-                        &tfd_tmp[index], dma_dir);
+       /* We have only q->n_window txq->entries, but we use q->n_bd tfds */
+       iwlagn_unmap_tfd(trans, &txq->entries[idx].meta,
+                        &tfd_tmp[rd_ptr], dma_dir);
 
        /* free SKB */
        if (txq->entries) {
                struct sk_buff *skb;
 
-               skb = txq->entries[index].skb;
+               skb = txq->entries[idx].skb;
 
                /* Can be called from irqs-disabled context
                 * If skb is not NULL, it means that the whole queue is being
@@ -238,7 +244,7 @@ void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
                 */
                if (skb) {
                        iwl_op_mode_free_skb(trans->op_mode, skb);
-                       txq->entries[index].skb = NULL;
+                       txq->entries[idx].skb = NULL;
                }
        }
 }
@@ -973,7 +979,7 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
 
                iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
 
-               iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr, DMA_TO_DEVICE);
+               iwlagn_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
                freed++;
        }
 
index 2e57161854b901187f40a50628db2fdf6b4c8ac0..ec6fb395b84d0aca4e7d7bfcf2e1729c3959dc5a 100644 (file)
@@ -435,9 +435,7 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
 
        spin_lock_bh(&txq->lock);
        while (q->write_ptr != q->read_ptr) {
-               /* The read_ptr needs to bound by q->n_window */
-               iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr),
-                                   dma_dir);
+               iwlagn_txq_free_tfd(trans, txq, dma_dir);
                q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
        }
        spin_unlock_bh(&txq->lock);
index 1b851f650e074eb795ffb708dd25b3c94a2cb9e1..e2750a12c6f160a922609f775fcf65c4c81d662e 100644 (file)
@@ -260,6 +260,7 @@ static int wl1251_sdio_probe(struct sdio_func *func,
        }
 
        if (wl->irq) {
+               irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
                ret = request_irq(wl->irq, wl1251_line_irq, 0, "wl1251", wl);
                if (ret < 0) {
                        wl1251_error("request_irq() failed: %d", ret);
@@ -267,7 +268,6 @@ static int wl1251_sdio_probe(struct sdio_func *func,
                }
 
                irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
-               disable_irq(wl->irq);
 
                wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
                wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
index 6248c354fc5c659fd840a2e7cff6186536207596..87f6305bda2cc5ced7f6377da9a3a3f9a16b72a4 100644 (file)
@@ -281,6 +281,7 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi)
 
        wl->use_eeprom = pdata->use_eeprom;
 
+       irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
        ret = request_irq(wl->irq, wl1251_irq, 0, DRIVER_NAME, wl);
        if (ret < 0) {
                wl1251_error("request_irq() failed: %d", ret);
@@ -289,8 +290,6 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi)
 
        irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
 
-       disable_irq(wl->irq);
-
        ret = wl1251_init_ieee80211(wl);
        if (ret)
                goto out_irq;
index 509aa881d790fa4ae7be894c7dc8fa362e161114..f3d6fa5082696c145b30b453fd027ce4cdd88a11 100644 (file)
@@ -1715,6 +1715,7 @@ out:
 
 }
 
+#ifdef CONFIG_PM
 /* Set the global behaviour of RX filters - On/Off + default action */
 int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable,
                                        enum rx_filter_action action)
@@ -1794,3 +1795,4 @@ out:
        kfree(acx);
        return ret;
 }
+#endif /* CONFIG_PM */
index 8106b2ebfe607dd921a87565a16f97f4339de355..e6a74869a5ff539df5589e90e73bb12d98f34747 100644 (file)
@@ -1330,9 +1330,11 @@ int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
 int wl1271_acx_fm_coex(struct wl1271 *wl);
 int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl);
 int wl12xx_acx_config_hangover(struct wl1271 *wl);
+
+#ifdef CONFIG_PM
 int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable,
                                        enum rx_filter_action action);
 int wl1271_acx_set_rx_filter(struct wl1271 *wl, u8 index, bool enable,
                             struct wl12xx_rx_filter *filter);
-
+#endif /* CONFIG_PM */
 #endif /* __WL1271_ACX_H__ */
index 1f1d9488dfb6b26a2482d88e66b957e4406b474b..d6a3c6b07827738bbc3e0f3ea0ea977e1a6e9dad 100644 (file)
@@ -279,6 +279,7 @@ void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status)
        wl12xx_rearm_rx_streaming(wl, active_hlids);
 }
 
+#ifdef CONFIG_PM
 int wl1271_rx_filter_enable(struct wl1271 *wl,
                            int index, bool enable,
                            struct wl12xx_rx_filter *filter)
@@ -314,3 +315,4 @@ void wl1271_rx_filter_clear_all(struct wl1271 *wl)
                wl1271_rx_filter_enable(wl, i, 0, NULL);
        }
 }
+#endif /* CONFIG_PM */
index 2596401308a86e24210efd65d531d91d6be60746..f4a6fcaeffb1db381ef9fd007a72578d984a3b4c 100644 (file)
@@ -325,8 +325,7 @@ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
        unsigned int count;
        int i, copy_off;
 
-       count = DIV_ROUND_UP(
-                       offset_in_page(skb->data)+skb_headlen(skb), PAGE_SIZE);
+       count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE);
 
        copy_off = skb_headlen(skb) % PAGE_SIZE;
 
index 46f4a9f9f5e476ce90729e4386c74442abcfa05f..281f18c2fb8282670c4dd6dab4593ab4ef3cc4b8 100644 (file)
@@ -232,7 +232,7 @@ static int pn544_hci_i2c_write(struct i2c_client *client, u8 *buf, int len)
 
 static int check_crc(u8 *buf, int buflen)
 {
-       u8 len;
+       int len;
        u16 crc;
 
        len = buf[0] + 1;
index f37fbeb66a4400c2c7fa28bc45cd8be5af7c7c47..1e173f3576743a702bef6125d2fc5d70bf612d24 100644 (file)
@@ -90,8 +90,22 @@ struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
        if (!dev)
                return NULL;
 
-       return to_i2c_client(dev);
+       return i2c_verify_client(dev);
 }
 EXPORT_SYMBOL(of_find_i2c_device_by_node);
 
+/* must call put_device() when done with returned i2c_adapter device */
+struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
+{
+       struct device *dev;
+
+       dev = bus_find_device(&i2c_bus_type, NULL, node,
+                                        of_dev_node_match);
+       if (!dev)
+               return NULL;
+
+       return i2c_verify_adapter(dev);
+}
+EXPORT_SYMBOL(of_find_i2c_adapter_by_node);
+
 MODULE_LICENSE("GPL");
index 93125163dea21fc16bcf2dd0243c7426cd351708..67705381321154546eb8b80a3280ae417a662c01 100644 (file)
@@ -15,7 +15,7 @@
  * PCI tree until an device-node is found, at which point it will finish
  * resolving using the OF tree walking.
  */
-int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
+int of_irq_map_pci(const struct pci_dev *pdev, struct of_irq *out_irq)
 {
        struct device_node *dn, *ppnode;
        struct pci_dev *ppdev;
index 8f169002dc7ec6b4581d4f7f049c35ff9ed7e8a2..447e83472c01558705d0685f7fd6af17a6f8deef 100644 (file)
@@ -2370,7 +2370,7 @@ void pci_enable_acs(struct pci_dev *dev)
  * number is always 0 (see the Implementation Note in section 2.2.8.1 of
  * the PCI Express Base Specification, Revision 2.1)
  */
-u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
+u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
 {
        int slot;
 
index b8e01c3eaa95fdd9186ced5a85a4fb1e45d65c19..b26395d16347db6dcecf40d8aa438d2409c02bd3 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/irq.h>
 #include <linux/irqdomain.h>
 #include <linux/slab.h>
+#include <linux/of_device.h>
 #include <linux/pinctrl/pinctrl.h>
 #include <linux/pinctrl/pinmux.h>
 #include <linux/pinctrl/pinconf.h>
@@ -1688,18 +1689,34 @@ static struct pinctrl_desc nmk_pinctrl_desc = {
        .owner = THIS_MODULE,
 };
 
+static const struct of_device_id nmk_pinctrl_match[] = {
+       {
+               .compatible = "stericsson,nmk_pinctrl",
+               .data = (void *)PINCTRL_NMK_DB8500,
+       },
+       {},
+};
+
 static int __devinit nmk_pinctrl_probe(struct platform_device *pdev)
 {
        const struct platform_device_id *platid = platform_get_device_id(pdev);
+       struct device_node *np = pdev->dev.of_node;
        struct nmk_pinctrl *npct;
+       unsigned int version = 0;
        int i;
 
        npct = devm_kzalloc(&pdev->dev, sizeof(*npct), GFP_KERNEL);
        if (!npct)
                return -ENOMEM;
 
+       if (platid)
+               version = platid->driver_data;
+       else if (np)
+               version = (unsigned int)
+                       of_match_device(nmk_pinctrl_match, &pdev->dev)->data;
+
        /* Poke in other ASIC variants here */
-       if (platid->driver_data == PINCTRL_NMK_DB8500)
+       if (version == PINCTRL_NMK_DB8500)
                nmk_pinctrl_db8500_init(&npct->soc);
 
        /*
@@ -1758,6 +1775,7 @@ static struct platform_driver nmk_pinctrl_driver = {
        .driver = {
                .owner = THIS_MODULE,
                .name = "pinctrl-nomadik",
+               .of_match_table = nmk_pinctrl_match,
        },
        .probe = nmk_pinctrl_probe,
        .id_table = nmk_pinctrl_id,
index 6a2596b4f359283c61476e8187f64e9ff2640813..91558791e7666fd0e1ebea3484424b4f9674ea93 100644 (file)
@@ -31,4 +31,14 @@ config PINCTRL_SPEAR320
        depends on MACH_SPEAR320
        select PINCTRL_SPEAR3XX
 
+config PINCTRL_SPEAR1310
+       bool "ST Microelectronics SPEAr1310 SoC pin controller driver"
+       depends on MACH_SPEAR1310
+       select PINCTRL_SPEAR
+
+config PINCTRL_SPEAR1340
+       bool "ST Microelectronics SPEAr1340 SoC pin controller driver"
+       depends on MACH_SPEAR1340
+       select PINCTRL_SPEAR
+
 endif
index 15dcb85da22dfa0fc070ec06bb4e9447faa62308..b28a7ba224432a656c238d40900af1cabbffc0bb 100644 (file)
@@ -5,3 +5,5 @@ obj-$(CONFIG_PINCTRL_SPEAR3XX)  += pinctrl-spear3xx.o
 obj-$(CONFIG_PINCTRL_SPEAR300) += pinctrl-spear300.o
 obj-$(CONFIG_PINCTRL_SPEAR310) += pinctrl-spear310.o
 obj-$(CONFIG_PINCTRL_SPEAR320) += pinctrl-spear320.o
+obj-$(CONFIG_PINCTRL_SPEAR1310)        += pinctrl-spear1310.o
+obj-$(CONFIG_PINCTRL_SPEAR1340)        += pinctrl-spear1340.o
index 47a6b5b72f907ade381e1b01de57c82cbf0365c1..9155783bb47fbd332648fecff577b67a0519e1f1 100644 (file)
@@ -139,4 +139,255 @@ void __devinit pmx_init_addr(struct spear_pinctrl_machdata *machdata, u16 reg);
 int __devinit spear_pinctrl_probe(struct platform_device *pdev,
                struct spear_pinctrl_machdata *machdata);
 int __devexit spear_pinctrl_remove(struct platform_device *pdev);
+
+#define SPEAR_PIN_0_TO_101             \
+       PINCTRL_PIN(0, "PLGPIO0"),      \
+       PINCTRL_PIN(1, "PLGPIO1"),      \
+       PINCTRL_PIN(2, "PLGPIO2"),      \
+       PINCTRL_PIN(3, "PLGPIO3"),      \
+       PINCTRL_PIN(4, "PLGPIO4"),      \
+       PINCTRL_PIN(5, "PLGPIO5"),      \
+       PINCTRL_PIN(6, "PLGPIO6"),      \
+       PINCTRL_PIN(7, "PLGPIO7"),      \
+       PINCTRL_PIN(8, "PLGPIO8"),      \
+       PINCTRL_PIN(9, "PLGPIO9"),      \
+       PINCTRL_PIN(10, "PLGPIO10"),    \
+       PINCTRL_PIN(11, "PLGPIO11"),    \
+       PINCTRL_PIN(12, "PLGPIO12"),    \
+       PINCTRL_PIN(13, "PLGPIO13"),    \
+       PINCTRL_PIN(14, "PLGPIO14"),    \
+       PINCTRL_PIN(15, "PLGPIO15"),    \
+       PINCTRL_PIN(16, "PLGPIO16"),    \
+       PINCTRL_PIN(17, "PLGPIO17"),    \
+       PINCTRL_PIN(18, "PLGPIO18"),    \
+       PINCTRL_PIN(19, "PLGPIO19"),    \
+       PINCTRL_PIN(20, "PLGPIO20"),    \
+       PINCTRL_PIN(21, "PLGPIO21"),    \
+       PINCTRL_PIN(22, "PLGPIO22"),    \
+       PINCTRL_PIN(23, "PLGPIO23"),    \
+       PINCTRL_PIN(24, "PLGPIO24"),    \
+       PINCTRL_PIN(25, "PLGPIO25"),    \
+       PINCTRL_PIN(26, "PLGPIO26"),    \
+       PINCTRL_PIN(27, "PLGPIO27"),    \
+       PINCTRL_PIN(28, "PLGPIO28"),    \
+       PINCTRL_PIN(29, "PLGPIO29"),    \
+       PINCTRL_PIN(30, "PLGPIO30"),    \
+       PINCTRL_PIN(31, "PLGPIO31"),    \
+       PINCTRL_PIN(32, "PLGPIO32"),    \
+       PINCTRL_PIN(33, "PLGPIO33"),    \
+       PINCTRL_PIN(34, "PLGPIO34"),    \
+       PINCTRL_PIN(35, "PLGPIO35"),    \
+       PINCTRL_PIN(36, "PLGPIO36"),    \
+       PINCTRL_PIN(37, "PLGPIO37"),    \
+       PINCTRL_PIN(38, "PLGPIO38"),    \
+       PINCTRL_PIN(39, "PLGPIO39"),    \
+       PINCTRL_PIN(40, "PLGPIO40"),    \
+       PINCTRL_PIN(41, "PLGPIO41"),    \
+       PINCTRL_PIN(42, "PLGPIO42"),    \
+       PINCTRL_PIN(43, "PLGPIO43"),    \
+       PINCTRL_PIN(44, "PLGPIO44"),    \
+       PINCTRL_PIN(45, "PLGPIO45"),    \
+       PINCTRL_PIN(46, "PLGPIO46"),    \
+       PINCTRL_PIN(47, "PLGPIO47"),    \
+       PINCTRL_PIN(48, "PLGPIO48"),    \
+       PINCTRL_PIN(49, "PLGPIO49"),    \
+       PINCTRL_PIN(50, "PLGPIO50"),    \
+       PINCTRL_PIN(51, "PLGPIO51"),    \
+       PINCTRL_PIN(52, "PLGPIO52"),    \
+       PINCTRL_PIN(53, "PLGPIO53"),    \
+       PINCTRL_PIN(54, "PLGPIO54"),    \
+       PINCTRL_PIN(55, "PLGPIO55"),    \
+       PINCTRL_PIN(56, "PLGPIO56"),    \
+       PINCTRL_PIN(57, "PLGPIO57"),    \
+       PINCTRL_PIN(58, "PLGPIO58"),    \
+       PINCTRL_PIN(59, "PLGPIO59"),    \
+       PINCTRL_PIN(60, "PLGPIO60"),    \
+       PINCTRL_PIN(61, "PLGPIO61"),    \
+       PINCTRL_PIN(62, "PLGPIO62"),    \
+       PINCTRL_PIN(63, "PLGPIO63"),    \
+       PINCTRL_PIN(64, "PLGPIO64"),    \
+       PINCTRL_PIN(65, "PLGPIO65"),    \
+       PINCTRL_PIN(66, "PLGPIO66"),    \
+       PINCTRL_PIN(67, "PLGPIO67"),    \
+       PINCTRL_PIN(68, "PLGPIO68"),    \
+       PINCTRL_PIN(69, "PLGPIO69"),    \
+       PINCTRL_PIN(70, "PLGPIO70"),    \
+       PINCTRL_PIN(71, "PLGPIO71"),    \
+       PINCTRL_PIN(72, "PLGPIO72"),    \
+       PINCTRL_PIN(73, "PLGPIO73"),    \
+       PINCTRL_PIN(74, "PLGPIO74"),    \
+       PINCTRL_PIN(75, "PLGPIO75"),    \
+       PINCTRL_PIN(76, "PLGPIO76"),    \
+       PINCTRL_PIN(77, "PLGPIO77"),    \
+       PINCTRL_PIN(78, "PLGPIO78"),    \
+       PINCTRL_PIN(79, "PLGPIO79"),    \
+       PINCTRL_PIN(80, "PLGPIO80"),    \
+       PINCTRL_PIN(81, "PLGPIO81"),    \
+       PINCTRL_PIN(82, "PLGPIO82"),    \
+       PINCTRL_PIN(83, "PLGPIO83"),    \
+       PINCTRL_PIN(84, "PLGPIO84"),    \
+       PINCTRL_PIN(85, "PLGPIO85"),    \
+       PINCTRL_PIN(86, "PLGPIO86"),    \
+       PINCTRL_PIN(87, "PLGPIO87"),    \
+       PINCTRL_PIN(88, "PLGPIO88"),    \
+       PINCTRL_PIN(89, "PLGPIO89"),    \
+       PINCTRL_PIN(90, "PLGPIO90"),    \
+       PINCTRL_PIN(91, "PLGPIO91"),    \
+       PINCTRL_PIN(92, "PLGPIO92"),    \
+       PINCTRL_PIN(93, "PLGPIO93"),    \
+       PINCTRL_PIN(94, "PLGPIO94"),    \
+       PINCTRL_PIN(95, "PLGPIO95"),    \
+       PINCTRL_PIN(96, "PLGPIO96"),    \
+       PINCTRL_PIN(97, "PLGPIO97"),    \
+       PINCTRL_PIN(98, "PLGPIO98"),    \
+       PINCTRL_PIN(99, "PLGPIO99"),    \
+       PINCTRL_PIN(100, "PLGPIO100"),  \
+       PINCTRL_PIN(101, "PLGPIO101")
+
+#define SPEAR_PIN_102_TO_245           \
+       PINCTRL_PIN(102, "PLGPIO102"),  \
+       PINCTRL_PIN(103, "PLGPIO103"),  \
+       PINCTRL_PIN(104, "PLGPIO104"),  \
+       PINCTRL_PIN(105, "PLGPIO105"),  \
+       PINCTRL_PIN(106, "PLGPIO106"),  \
+       PINCTRL_PIN(107, "PLGPIO107"),  \
+       PINCTRL_PIN(108, "PLGPIO108"),  \
+       PINCTRL_PIN(109, "PLGPIO109"),  \
+       PINCTRL_PIN(110, "PLGPIO110"),  \
+       PINCTRL_PIN(111, "PLGPIO111"),  \
+       PINCTRL_PIN(112, "PLGPIO112"),  \
+       PINCTRL_PIN(113, "PLGPIO113"),  \
+       PINCTRL_PIN(114, "PLGPIO114"),  \
+       PINCTRL_PIN(115, "PLGPIO115"),  \
+       PINCTRL_PIN(116, "PLGPIO116"),  \
+       PINCTRL_PIN(117, "PLGPIO117"),  \
+       PINCTRL_PIN(118, "PLGPIO118"),  \
+       PINCTRL_PIN(119, "PLGPIO119"),  \
+       PINCTRL_PIN(120, "PLGPIO120"),  \
+       PINCTRL_PIN(121, "PLGPIO121"),  \
+       PINCTRL_PIN(122, "PLGPIO122"),  \
+       PINCTRL_PIN(123, "PLGPIO123"),  \
+       PINCTRL_PIN(124, "PLGPIO124"),  \
+       PINCTRL_PIN(125, "PLGPIO125"),  \
+       PINCTRL_PIN(126, "PLGPIO126"),  \
+       PINCTRL_PIN(127, "PLGPIO127"),  \
+       PINCTRL_PIN(128, "PLGPIO128"),  \
+       PINCTRL_PIN(129, "PLGPIO129"),  \
+       PINCTRL_PIN(130, "PLGPIO130"),  \
+       PINCTRL_PIN(131, "PLGPIO131"),  \
+       PINCTRL_PIN(132, "PLGPIO132"),  \
+       PINCTRL_PIN(133, "PLGPIO133"),  \
+       PINCTRL_PIN(134, "PLGPIO134"),  \
+       PINCTRL_PIN(135, "PLGPIO135"),  \
+       PINCTRL_PIN(136, "PLGPIO136"),  \
+       PINCTRL_PIN(137, "PLGPIO137"),  \
+       PINCTRL_PIN(138, "PLGPIO138"),  \
+       PINCTRL_PIN(139, "PLGPIO139"),  \
+       PINCTRL_PIN(140, "PLGPIO140"),  \
+       PINCTRL_PIN(141, "PLGPIO141"),  \
+       PINCTRL_PIN(142, "PLGPIO142"),  \
+       PINCTRL_PIN(143, "PLGPIO143"),  \
+       PINCTRL_PIN(144, "PLGPIO144"),  \
+       PINCTRL_PIN(145, "PLGPIO145"),  \
+       PINCTRL_PIN(146, "PLGPIO146"),  \
+       PINCTRL_PIN(147, "PLGPIO147"),  \
+       PINCTRL_PIN(148, "PLGPIO148"),  \
+       PINCTRL_PIN(149, "PLGPIO149"),  \
+       PINCTRL_PIN(150, "PLGPIO150"),  \
+       PINCTRL_PIN(151, "PLGPIO151"),  \
+       PINCTRL_PIN(152, "PLGPIO152"),  \
+       PINCTRL_PIN(153, "PLGPIO153"),  \
+       PINCTRL_PIN(154, "PLGPIO154"),  \
+       PINCTRL_PIN(155, "PLGPIO155"),  \
+       PINCTRL_PIN(156, "PLGPIO156"),  \
+       PINCTRL_PIN(157, "PLGPIO157"),  \
+       PINCTRL_PIN(158, "PLGPIO158"),  \
+       PINCTRL_PIN(159, "PLGPIO159"),  \
+       PINCTRL_PIN(160, "PLGPIO160"),  \
+       PINCTRL_PIN(161, "PLGPIO161"),  \
+       PINCTRL_PIN(162, "PLGPIO162"),  \
+       PINCTRL_PIN(163, "PLGPIO163"),  \
+       PINCTRL_PIN(164, "PLGPIO164"),  \
+       PINCTRL_PIN(165, "PLGPIO165"),  \
+       PINCTRL_PIN(166, "PLGPIO166"),  \
+       PINCTRL_PIN(167, "PLGPIO167"),  \
+       PINCTRL_PIN(168, "PLGPIO168"),  \
+       PINCTRL_PIN(169, "PLGPIO169"),  \
+       PINCTRL_PIN(170, "PLGPIO170"),  \
+       PINCTRL_PIN(171, "PLGPIO171"),  \
+       PINCTRL_PIN(172, "PLGPIO172"),  \
+       PINCTRL_PIN(173, "PLGPIO173"),  \
+       PINCTRL_PIN(174, "PLGPIO174"),  \
+       PINCTRL_PIN(175, "PLGPIO175"),  \
+       PINCTRL_PIN(176, "PLGPIO176"),  \
+       PINCTRL_PIN(177, "PLGPIO177"),  \
+       PINCTRL_PIN(178, "PLGPIO178"),  \
+       PINCTRL_PIN(179, "PLGPIO179"),  \
+       PINCTRL_PIN(180, "PLGPIO180"),  \
+       PINCTRL_PIN(181, "PLGPIO181"),  \
+       PINCTRL_PIN(182, "PLGPIO182"),  \
+       PINCTRL_PIN(183, "PLGPIO183"),  \
+       PINCTRL_PIN(184, "PLGPIO184"),  \
+       PINCTRL_PIN(185, "PLGPIO185"),  \
+       PINCTRL_PIN(186, "PLGPIO186"),  \
+       PINCTRL_PIN(187, "PLGPIO187"),  \
+       PINCTRL_PIN(188, "PLGPIO188"),  \
+       PINCTRL_PIN(189, "PLGPIO189"),  \
+       PINCTRL_PIN(190, "PLGPIO190"),  \
+       PINCTRL_PIN(191, "PLGPIO191"),  \
+       PINCTRL_PIN(192, "PLGPIO192"),  \
+       PINCTRL_PIN(193, "PLGPIO193"),  \
+       PINCTRL_PIN(194, "PLGPIO194"),  \
+       PINCTRL_PIN(195, "PLGPIO195"),  \
+       PINCTRL_PIN(196, "PLGPIO196"),  \
+       PINCTRL_PIN(197, "PLGPIO197"),  \
+       PINCTRL_PIN(198, "PLGPIO198"),  \
+       PINCTRL_PIN(199, "PLGPIO199"),  \
+       PINCTRL_PIN(200, "PLGPIO200"),  \
+       PINCTRL_PIN(201, "PLGPIO201"),  \
+       PINCTRL_PIN(202, "PLGPIO202"),  \
+       PINCTRL_PIN(203, "PLGPIO203"),  \
+       PINCTRL_PIN(204, "PLGPIO204"),  \
+       PINCTRL_PIN(205, "PLGPIO205"),  \
+       PINCTRL_PIN(206, "PLGPIO206"),  \
+       PINCTRL_PIN(207, "PLGPIO207"),  \
+       PINCTRL_PIN(208, "PLGPIO208"),  \
+       PINCTRL_PIN(209, "PLGPIO209"),  \
+       PINCTRL_PIN(210, "PLGPIO210"),  \
+       PINCTRL_PIN(211, "PLGPIO211"),  \
+       PINCTRL_PIN(212, "PLGPIO212"),  \
+       PINCTRL_PIN(213, "PLGPIO213"),  \
+       PINCTRL_PIN(214, "PLGPIO214"),  \
+       PINCTRL_PIN(215, "PLGPIO215"),  \
+       PINCTRL_PIN(216, "PLGPIO216"),  \
+       PINCTRL_PIN(217, "PLGPIO217"),  \
+       PINCTRL_PIN(218, "PLGPIO218"),  \
+       PINCTRL_PIN(219, "PLGPIO219"),  \
+       PINCTRL_PIN(220, "PLGPIO220"),  \
+       PINCTRL_PIN(221, "PLGPIO221"),  \
+       PINCTRL_PIN(222, "PLGPIO222"),  \
+       PINCTRL_PIN(223, "PLGPIO223"),  \
+       PINCTRL_PIN(224, "PLGPIO224"),  \
+       PINCTRL_PIN(225, "PLGPIO225"),  \
+       PINCTRL_PIN(226, "PLGPIO226"),  \
+       PINCTRL_PIN(227, "PLGPIO227"),  \
+       PINCTRL_PIN(228, "PLGPIO228"),  \
+       PINCTRL_PIN(229, "PLGPIO229"),  \
+       PINCTRL_PIN(230, "PLGPIO230"),  \
+       PINCTRL_PIN(231, "PLGPIO231"),  \
+       PINCTRL_PIN(232, "PLGPIO232"),  \
+       PINCTRL_PIN(233, "PLGPIO233"),  \
+       PINCTRL_PIN(234, "PLGPIO234"),  \
+       PINCTRL_PIN(235, "PLGPIO235"),  \
+       PINCTRL_PIN(236, "PLGPIO236"),  \
+       PINCTRL_PIN(237, "PLGPIO237"),  \
+       PINCTRL_PIN(238, "PLGPIO238"),  \
+       PINCTRL_PIN(239, "PLGPIO239"),  \
+       PINCTRL_PIN(240, "PLGPIO240"),  \
+       PINCTRL_PIN(241, "PLGPIO241"),  \
+       PINCTRL_PIN(242, "PLGPIO242"),  \
+       PINCTRL_PIN(243, "PLGPIO243"),  \
+       PINCTRL_PIN(244, "PLGPIO244"),  \
+       PINCTRL_PIN(245, "PLGPIO245")
+
 #endif /* __PINMUX_SPEAR_H__ */
diff --git a/drivers/pinctrl/spear/pinctrl-spear1310.c b/drivers/pinctrl/spear/pinctrl-spear1310.c
new file mode 100644 (file)
index 0000000..fff168b
--- /dev/null
@@ -0,0 +1,2198 @@
+/*
+ * Driver for the ST Microelectronics SPEAr1310 pinmux
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "pinctrl-spear.h"
+
+#define DRIVER_NAME "spear1310-pinmux"
+
+/* pins */
+static const struct pinctrl_pin_desc spear1310_pins[] = {
+       SPEAR_PIN_0_TO_101,
+       SPEAR_PIN_102_TO_245,
+};
+
+/* registers */
+#define PERIP_CFG                                      0x32C
+       #define MCIF_SEL_SHIFT                          3
+       #define MCIF_SEL_SD                             (0x1 << MCIF_SEL_SHIFT)
+       #define MCIF_SEL_CF                             (0x2 << MCIF_SEL_SHIFT)
+       #define MCIF_SEL_XD                             (0x3 << MCIF_SEL_SHIFT)
+       #define MCIF_SEL_MASK                           (0x3 << MCIF_SEL_SHIFT)
+
+#define PCIE_SATA_CFG                                  0x3A4
+       #define PCIE_SATA2_SEL_PCIE                     (0 << 31)
+       #define PCIE_SATA1_SEL_PCIE                     (0 << 30)
+       #define PCIE_SATA0_SEL_PCIE                     (0 << 29)
+       #define PCIE_SATA2_SEL_SATA                     (1 << 31)
+       #define PCIE_SATA1_SEL_SATA                     (1 << 30)
+       #define PCIE_SATA0_SEL_SATA                     (1 << 29)
+       #define SATA2_CFG_TX_CLK_EN                     (1 << 27)
+       #define SATA2_CFG_RX_CLK_EN                     (1 << 26)
+       #define SATA2_CFG_POWERUP_RESET                 (1 << 25)
+       #define SATA2_CFG_PM_CLK_EN                     (1 << 24)
+       #define SATA1_CFG_TX_CLK_EN                     (1 << 23)
+       #define SATA1_CFG_RX_CLK_EN                     (1 << 22)
+       #define SATA1_CFG_POWERUP_RESET                 (1 << 21)
+       #define SATA1_CFG_PM_CLK_EN                     (1 << 20)
+       #define SATA0_CFG_TX_CLK_EN                     (1 << 19)
+       #define SATA0_CFG_RX_CLK_EN                     (1 << 18)
+       #define SATA0_CFG_POWERUP_RESET                 (1 << 17)
+       #define SATA0_CFG_PM_CLK_EN                     (1 << 16)
+       #define PCIE2_CFG_DEVICE_PRESENT                (1 << 11)
+       #define PCIE2_CFG_POWERUP_RESET                 (1 << 10)
+       #define PCIE2_CFG_CORE_CLK_EN                   (1 << 9)
+       #define PCIE2_CFG_AUX_CLK_EN                    (1 << 8)
+       #define PCIE1_CFG_DEVICE_PRESENT                (1 << 7)
+       #define PCIE1_CFG_POWERUP_RESET                 (1 << 6)
+       #define PCIE1_CFG_CORE_CLK_EN                   (1 << 5)
+       #define PCIE1_CFG_AUX_CLK_EN                    (1 << 4)
+       #define PCIE0_CFG_DEVICE_PRESENT                (1 << 3)
+       #define PCIE0_CFG_POWERUP_RESET                 (1 << 2)
+       #define PCIE0_CFG_CORE_CLK_EN                   (1 << 1)
+       #define PCIE0_CFG_AUX_CLK_EN                    (1 << 0)
+
+#define PAD_FUNCTION_EN_0                              0x650
+       #define PMX_UART0_MASK                          (1 << 1)
+       #define PMX_I2C0_MASK                           (1 << 2)
+       #define PMX_I2S0_MASK                           (1 << 3)
+       #define PMX_SSP0_MASK                           (1 << 4)
+       #define PMX_CLCD1_MASK                          (1 << 5)
+       #define PMX_EGPIO00_MASK                        (1 << 6)
+       #define PMX_EGPIO01_MASK                        (1 << 7)
+       #define PMX_EGPIO02_MASK                        (1 << 8)
+       #define PMX_EGPIO03_MASK                        (1 << 9)
+       #define PMX_EGPIO04_MASK                        (1 << 10)
+       #define PMX_EGPIO05_MASK                        (1 << 11)
+       #define PMX_EGPIO06_MASK                        (1 << 12)
+       #define PMX_EGPIO07_MASK                        (1 << 13)
+       #define PMX_EGPIO08_MASK                        (1 << 14)
+       #define PMX_EGPIO09_MASK                        (1 << 15)
+       #define PMX_SMI_MASK                            (1 << 16)
+       #define PMX_NAND8_MASK                          (1 << 17)
+       #define PMX_GMIICLK_MASK                        (1 << 18)
+       #define PMX_GMIICOL_CRS_XFERER_MIITXCLK_MASK    (1 << 19)
+       #define PMX_RXCLK_RDV_TXEN_D03_MASK             (1 << 20)
+       #define PMX_GMIID47_MASK                        (1 << 21)
+       #define PMX_MDC_MDIO_MASK                       (1 << 22)
+       #define PMX_MCI_DATA8_15_MASK                   (1 << 23)
+       #define PMX_NFAD23_MASK                         (1 << 24)
+       #define PMX_NFAD24_MASK                         (1 << 25)
+       #define PMX_NFAD25_MASK                         (1 << 26)
+       #define PMX_NFCE3_MASK                          (1 << 27)
+       #define PMX_NFWPRT3_MASK                        (1 << 28)
+       #define PMX_NFRSTPWDWN0_MASK                    (1 << 29)
+       #define PMX_NFRSTPWDWN1_MASK                    (1 << 30)
+       #define PMX_NFRSTPWDWN2_MASK                    (1 << 31)
+
+#define PAD_FUNCTION_EN_1                              0x654
+       #define PMX_NFRSTPWDWN3_MASK                    (1 << 0)
+       #define PMX_SMINCS2_MASK                        (1 << 1)
+       #define PMX_SMINCS3_MASK                        (1 << 2)
+       #define PMX_CLCD2_MASK                          (1 << 3)
+       #define PMX_KBD_ROWCOL68_MASK                   (1 << 4)
+       #define PMX_EGPIO10_MASK                        (1 << 5)
+       #define PMX_EGPIO11_MASK                        (1 << 6)
+       #define PMX_EGPIO12_MASK                        (1 << 7)
+       #define PMX_EGPIO13_MASK                        (1 << 8)
+       #define PMX_EGPIO14_MASK                        (1 << 9)
+       #define PMX_EGPIO15_MASK                        (1 << 10)
+       #define PMX_UART0_MODEM_MASK                    (1 << 11)
+       #define PMX_GPT0_TMR0_MASK                      (1 << 12)
+       #define PMX_GPT0_TMR1_MASK                      (1 << 13)
+       #define PMX_GPT1_TMR0_MASK                      (1 << 14)
+       #define PMX_GPT1_TMR1_MASK                      (1 << 15)
+       #define PMX_I2S1_MASK                           (1 << 16)
+       #define PMX_KBD_ROWCOL25_MASK                   (1 << 17)
+       #define PMX_NFIO8_15_MASK                       (1 << 18)
+       #define PMX_KBD_COL1_MASK                       (1 << 19)
+       #define PMX_NFCE1_MASK                          (1 << 20)
+       #define PMX_KBD_COL0_MASK                       (1 << 21)
+       #define PMX_NFCE2_MASK                          (1 << 22)
+       #define PMX_KBD_ROW1_MASK                       (1 << 23)
+       #define PMX_NFWPRT1_MASK                        (1 << 24)
+       #define PMX_KBD_ROW0_MASK                       (1 << 25)
+       #define PMX_NFWPRT2_MASK                        (1 << 26)
+       #define PMX_MCIDATA0_MASK                       (1 << 27)
+       #define PMX_MCIDATA1_MASK                       (1 << 28)
+       #define PMX_MCIDATA2_MASK                       (1 << 29)
+       #define PMX_MCIDATA3_MASK                       (1 << 30)
+       #define PMX_MCIDATA4_MASK                       (1 << 31)
+
+#define PAD_FUNCTION_EN_2                              0x658
+       #define PMX_MCIDATA5_MASK                       (1 << 0)
+       #define PMX_MCIDATA6_MASK                       (1 << 1)
+       #define PMX_MCIDATA7_MASK                       (1 << 2)
+       #define PMX_MCIDATA1SD_MASK                     (1 << 3)
+       #define PMX_MCIDATA2SD_MASK                     (1 << 4)
+       #define PMX_MCIDATA3SD_MASK                     (1 << 5)
+       #define PMX_MCIADDR0ALE_MASK                    (1 << 6)
+       #define PMX_MCIADDR1CLECLK_MASK                 (1 << 7)
+       #define PMX_MCIADDR2_MASK                       (1 << 8)
+       #define PMX_MCICECF_MASK                        (1 << 9)
+       #define PMX_MCICEXD_MASK                        (1 << 10)
+       #define PMX_MCICESDMMC_MASK                     (1 << 11)
+       #define PMX_MCICDCF1_MASK                       (1 << 12)
+       #define PMX_MCICDCF2_MASK                       (1 << 13)
+       #define PMX_MCICDXD_MASK                        (1 << 14)
+       #define PMX_MCICDSDMMC_MASK                     (1 << 15)
+       #define PMX_MCIDATADIR_MASK                     (1 << 16)
+       #define PMX_MCIDMARQWP_MASK                     (1 << 17)
+       #define PMX_MCIIORDRE_MASK                      (1 << 18)
+       #define PMX_MCIIOWRWE_MASK                      (1 << 19)
+       #define PMX_MCIRESETCF_MASK                     (1 << 20)
+       #define PMX_MCICS0CE_MASK                       (1 << 21)
+       #define PMX_MCICFINTR_MASK                      (1 << 22)
+       #define PMX_MCIIORDY_MASK                       (1 << 23)
+       #define PMX_MCICS1_MASK                         (1 << 24)
+       #define PMX_MCIDMAACK_MASK                      (1 << 25)
+       #define PMX_MCISDCMD_MASK                       (1 << 26)
+       #define PMX_MCILEDS_MASK                        (1 << 27)
+       #define PMX_TOUCH_XY_MASK                       (1 << 28)
+       #define PMX_SSP0_CS0_MASK                       (1 << 29)
+       #define PMX_SSP0_CS1_2_MASK                     (1 << 30)
+
+/* combined macros */
+#define PMX_GMII_MASK          (PMX_GMIICLK_MASK |                     \
+                               PMX_GMIICOL_CRS_XFERER_MIITXCLK_MASK |  \
+                               PMX_RXCLK_RDV_TXEN_D03_MASK |           \
+                               PMX_GMIID47_MASK | PMX_MDC_MDIO_MASK)
+
+#define PMX_EGPIO_0_GRP_MASK   (PMX_EGPIO00_MASK | PMX_EGPIO01_MASK |  \
+                               PMX_EGPIO02_MASK |                      \
+                               PMX_EGPIO03_MASK | PMX_EGPIO04_MASK |   \
+                               PMX_EGPIO05_MASK | PMX_EGPIO06_MASK |   \
+                               PMX_EGPIO07_MASK | PMX_EGPIO08_MASK |   \
+                               PMX_EGPIO09_MASK)
+#define PMX_EGPIO_1_GRP_MASK   (PMX_EGPIO10_MASK | PMX_EGPIO11_MASK |  \
+                               PMX_EGPIO12_MASK | PMX_EGPIO13_MASK |   \
+                               PMX_EGPIO14_MASK | PMX_EGPIO15_MASK)
+
+#define PMX_KEYBOARD_6X6_MASK  (PMX_KBD_ROW0_MASK | PMX_KBD_ROW1_MASK | \
+                               PMX_KBD_ROWCOL25_MASK | PMX_KBD_COL0_MASK | \
+                               PMX_KBD_COL1_MASK)
+
+#define PMX_NAND8BIT_0_MASK    (PMX_NAND8_MASK | PMX_NFAD23_MASK |     \
+                               PMX_NFAD24_MASK | PMX_NFAD25_MASK |     \
+                               PMX_NFWPRT3_MASK | PMX_NFRSTPWDWN0_MASK | \
+                               PMX_NFRSTPWDWN1_MASK | PMX_NFRSTPWDWN2_MASK | \
+                               PMX_NFCE3_MASK)
+#define PMX_NAND8BIT_1_MASK    PMX_NFRSTPWDWN3_MASK
+
+#define PMX_NAND16BIT_1_MASK   (PMX_KBD_ROWCOL25_MASK | PMX_NFIO8_15_MASK)
+#define PMX_NAND_4CHIPS_MASK   (PMX_NFCE1_MASK | PMX_NFCE2_MASK |      \
+                               PMX_NFWPRT1_MASK | PMX_NFWPRT2_MASK |   \
+                               PMX_KBD_ROW0_MASK | PMX_KBD_ROW1_MASK | \
+                               PMX_KBD_COL0_MASK | PMX_KBD_COL1_MASK)
+
+#define PMX_MCIFALL_1_MASK     0xF8000000
+#define PMX_MCIFALL_2_MASK     0x0FFFFFFF
+
+#define PMX_PCI_REG1_MASK      (PMX_SMINCS2_MASK | PMX_SMINCS3_MASK |  \
+                               PMX_CLCD2_MASK | PMX_KBD_ROWCOL68_MASK | \
+                               PMX_EGPIO_1_GRP_MASK | PMX_GPT0_TMR0_MASK | \
+                               PMX_GPT0_TMR1_MASK | PMX_GPT1_TMR0_MASK | \
+                               PMX_GPT1_TMR1_MASK | PMX_I2S1_MASK |    \
+                               PMX_NFCE2_MASK)
+#define PMX_PCI_REG2_MASK      (PMX_TOUCH_XY_MASK | PMX_SSP0_CS0_MASK | \
+                               PMX_SSP0_CS1_2_MASK)
+
+#define PMX_SMII_0_1_2_MASK    (PMX_CLCD2_MASK | PMX_KBD_ROWCOL68_MASK)
+#define PMX_RGMII_REG0_MASK    (PMX_MCI_DATA8_15_MASK |                \
+                               PMX_GMIICOL_CRS_XFERER_MIITXCLK_MASK |  \
+                               PMX_GMIID47_MASK)
+#define PMX_RGMII_REG1_MASK    (PMX_KBD_ROWCOL68_MASK | PMX_EGPIO_1_GRP_MASK |\
+                               PMX_KBD_ROW1_MASK | PMX_NFWPRT1_MASK |  \
+                               PMX_KBD_ROW0_MASK | PMX_NFWPRT2_MASK)
+#define PMX_RGMII_REG2_MASK    (PMX_TOUCH_XY_MASK | PMX_SSP0_CS0_MASK | \
+                               PMX_SSP0_CS1_2_MASK)
+
+#define PCIE_CFG_VAL(x)                (PCIE_SATA##x##_SEL_PCIE |      \
+                               PCIE##x##_CFG_AUX_CLK_EN |      \
+                               PCIE##x##_CFG_CORE_CLK_EN |     \
+                               PCIE##x##_CFG_POWERUP_RESET |   \
+                               PCIE##x##_CFG_DEVICE_PRESENT)
+#define SATA_CFG_VAL(x)                (PCIE_SATA##x##_SEL_SATA |      \
+                               SATA##x##_CFG_PM_CLK_EN |       \
+                               SATA##x##_CFG_POWERUP_RESET |   \
+                               SATA##x##_CFG_RX_CLK_EN |       \
+                               SATA##x##_CFG_TX_CLK_EN)
+
+/* Pad multiplexing for i2c0 device */
+static const unsigned i2c0_pins[] = { 102, 103 };
+static struct spear_muxreg i2c0_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_0,
+               .mask = PMX_I2C0_MASK,
+               .val = PMX_I2C0_MASK,
+       },
+};
+
+static struct spear_modemux i2c0_modemux[] = {
+       {
+               .muxregs = i2c0_muxreg,
+               .nmuxregs = ARRAY_SIZE(i2c0_muxreg),
+       },
+};
+
+static struct spear_pingroup i2c0_pingroup = {
+       .name = "i2c0_grp",
+       .pins = i2c0_pins,
+       .npins = ARRAY_SIZE(i2c0_pins),
+       .modemuxs = i2c0_modemux,
+       .nmodemuxs = ARRAY_SIZE(i2c0_modemux),
+};
+
+static const char *const i2c0_grps[] = { "i2c0_grp" };
+static struct spear_function i2c0_function = {
+       .name = "i2c0",
+       .groups = i2c0_grps,
+       .ngroups = ARRAY_SIZE(i2c0_grps),
+};
+
+/* Pad multiplexing for ssp0 device */
+static const unsigned ssp0_pins[] = { 109, 110, 111, 112 };
+static struct spear_muxreg ssp0_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_0,
+               .mask = PMX_SSP0_MASK,
+               .val = PMX_SSP0_MASK,
+       },
+};
+
+static struct spear_modemux ssp0_modemux[] = {
+       {
+               .muxregs = ssp0_muxreg,
+               .nmuxregs = ARRAY_SIZE(ssp0_muxreg),
+       },
+};
+
+static struct spear_pingroup ssp0_pingroup = {
+       .name = "ssp0_grp",
+       .pins = ssp0_pins,
+       .npins = ARRAY_SIZE(ssp0_pins),
+       .modemuxs = ssp0_modemux,
+       .nmodemuxs = ARRAY_SIZE(ssp0_modemux),
+};
+
+/* Pad multiplexing for ssp0_cs0 device */
+static const unsigned ssp0_cs0_pins[] = { 96 };
+static struct spear_muxreg ssp0_cs0_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_2,
+               .mask = PMX_SSP0_CS0_MASK,
+               .val = PMX_SSP0_CS0_MASK,
+       },
+};
+
+static struct spear_modemux ssp0_cs0_modemux[] = {
+       {
+               .muxregs = ssp0_cs0_muxreg,
+               .nmuxregs = ARRAY_SIZE(ssp0_cs0_muxreg),
+       },
+};
+
+static struct spear_pingroup ssp0_cs0_pingroup = {
+       .name = "ssp0_cs0_grp",
+       .pins = ssp0_cs0_pins,
+       .npins = ARRAY_SIZE(ssp0_cs0_pins),
+       .modemuxs = ssp0_cs0_modemux,
+       .nmodemuxs = ARRAY_SIZE(ssp0_cs0_modemux),
+};
+
+/* ssp0_cs1_2 device */
+static const unsigned ssp0_cs1_2_pins[] = { 94, 95 };
+static struct spear_muxreg ssp0_cs1_2_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_2,
+               .mask = PMX_SSP0_CS1_2_MASK,
+               .val = PMX_SSP0_CS1_2_MASK,
+       },
+};
+
+static struct spear_modemux ssp0_cs1_2_modemux[] = {
+       {
+               .muxregs = ssp0_cs1_2_muxreg,
+               .nmuxregs = ARRAY_SIZE(ssp0_cs1_2_muxreg),
+       },
+};
+
+static struct spear_pingroup ssp0_cs1_2_pingroup = {
+       .name = "ssp0_cs1_2_grp",
+       .pins = ssp0_cs1_2_pins,
+       .npins = ARRAY_SIZE(ssp0_cs1_2_pins),
+       .modemuxs = ssp0_cs1_2_modemux,
+       .nmodemuxs = ARRAY_SIZE(ssp0_cs1_2_modemux),
+};
+
+static const char *const ssp0_grps[] = { "ssp0_grp", "ssp0_cs0_grp",
+       "ssp0_cs1_2_grp" };
+static struct spear_function ssp0_function = {
+       .name = "ssp0",
+       .groups = ssp0_grps,
+       .ngroups = ARRAY_SIZE(ssp0_grps),
+};
+
+/* Pad multiplexing for i2s0 device */
+static const unsigned i2s0_pins[] = { 104, 105, 106, 107, 108 };
+static struct spear_muxreg i2s0_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_0,
+               .mask = PMX_I2S0_MASK,
+               .val = PMX_I2S0_MASK,
+       },
+};
+
+static struct spear_modemux i2s0_modemux[] = {
+       {
+               .muxregs = i2s0_muxreg,
+               .nmuxregs = ARRAY_SIZE(i2s0_muxreg),
+       },
+};
+
+static struct spear_pingroup i2s0_pingroup = {
+       .name = "i2s0_grp",
+       .pins = i2s0_pins,
+       .npins = ARRAY_SIZE(i2s0_pins),
+       .modemuxs = i2s0_modemux,
+       .nmodemuxs = ARRAY_SIZE(i2s0_modemux),
+};
+
+static const char *const i2s0_grps[] = { "i2s0_grp" };
+static struct spear_function i2s0_function = {
+       .name = "i2s0",
+       .groups = i2s0_grps,
+       .ngroups = ARRAY_SIZE(i2s0_grps),
+};
+
+/* Pad multiplexing for i2s1 device */
+static const unsigned i2s1_pins[] = { 0, 1, 2, 3 };
+static struct spear_muxreg i2s1_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PMX_I2S1_MASK,
+               .val = PMX_I2S1_MASK,
+       },
+};
+
+static struct spear_modemux i2s1_modemux[] = {
+       {
+               .muxregs = i2s1_muxreg,
+               .nmuxregs = ARRAY_SIZE(i2s1_muxreg),
+       },
+};
+
+static struct spear_pingroup i2s1_pingroup = {
+       .name = "i2s1_grp",
+       .pins = i2s1_pins,
+       .npins = ARRAY_SIZE(i2s1_pins),
+       .modemuxs = i2s1_modemux,
+       .nmodemuxs = ARRAY_SIZE(i2s1_modemux),
+};
+
+static const char *const i2s1_grps[] = { "i2s1_grp" };
+static struct spear_function i2s1_function = {
+       .name = "i2s1",
+       .groups = i2s1_grps,
+       .ngroups = ARRAY_SIZE(i2s1_grps),
+};
+
+/* Pad multiplexing for clcd device */
+static const unsigned clcd_pins[] = { 113, 114, 115, 116, 117, 118, 119, 120,
+       121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+       135, 136, 137, 138, 139, 140, 141, 142 };
+static struct spear_muxreg clcd_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_0,
+               .mask = PMX_CLCD1_MASK,
+               .val = PMX_CLCD1_MASK,
+       },
+};
+
+static struct spear_modemux clcd_modemux[] = {
+       {
+               .muxregs = clcd_muxreg,
+               .nmuxregs = ARRAY_SIZE(clcd_muxreg),
+       },
+};
+
+static struct spear_pingroup clcd_pingroup = {
+       .name = "clcd_grp",
+       .pins = clcd_pins,
+       .npins = ARRAY_SIZE(clcd_pins),
+       .modemuxs = clcd_modemux,
+       .nmodemuxs = ARRAY_SIZE(clcd_modemux),
+};
+
+static const unsigned clcd_high_res_pins[] = { 30, 31, 32, 33, 34, 35, 36, 37,
+       38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53 };
+static struct spear_muxreg clcd_high_res_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PMX_CLCD2_MASK,
+               .val = PMX_CLCD2_MASK,
+       },
+};
+
+static struct spear_modemux clcd_high_res_modemux[] = {
+       {
+               .muxregs = clcd_high_res_muxreg,
+               .nmuxregs = ARRAY_SIZE(clcd_high_res_muxreg),
+       },
+};
+
+static struct spear_pingroup clcd_high_res_pingroup = {
+       .name = "clcd_high_res_grp",
+       .pins = clcd_high_res_pins,
+       .npins = ARRAY_SIZE(clcd_high_res_pins),
+       .modemuxs = clcd_high_res_modemux,
+       .nmodemuxs = ARRAY_SIZE(clcd_high_res_modemux),
+};
+
+static const char *const clcd_grps[] = { "clcd_grp", "clcd_high_res" };
+static struct spear_function clcd_function = {
+       .name = "clcd",
+       .groups = clcd_grps,
+       .ngroups = ARRAY_SIZE(clcd_grps),
+};
+
+static const unsigned arm_gpio_pins[] = { 18, 19, 20, 21, 22, 23, 143, 144, 145,
+       146, 147, 148, 149, 150, 151, 152 };
+static struct spear_muxreg arm_gpio_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_0,
+               .mask = PMX_EGPIO_0_GRP_MASK,
+               .val = PMX_EGPIO_0_GRP_MASK,
+       }, {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PMX_EGPIO_1_GRP_MASK,
+               .val = PMX_EGPIO_1_GRP_MASK,
+       },
+};
+
+static struct spear_modemux arm_gpio_modemux[] = {
+       {
+               .muxregs = arm_gpio_muxreg,
+               .nmuxregs = ARRAY_SIZE(arm_gpio_muxreg),
+       },
+};
+
+static struct spear_pingroup arm_gpio_pingroup = {
+       .name = "arm_gpio_grp",
+       .pins = arm_gpio_pins,
+       .npins = ARRAY_SIZE(arm_gpio_pins),
+       .modemuxs = arm_gpio_modemux,
+       .nmodemuxs = ARRAY_SIZE(arm_gpio_modemux),
+};
+
+static const char *const arm_gpio_grps[] = { "arm_gpio_grp" };
+static struct spear_function arm_gpio_function = {
+       .name = "arm_gpio",
+       .groups = arm_gpio_grps,
+       .ngroups = ARRAY_SIZE(arm_gpio_grps),
+};
+
+/* Pad multiplexing for smi 2 chips device */
+static const unsigned smi_2_chips_pins[] = { 153, 154, 155, 156, 157 };
+static struct spear_muxreg smi_2_chips_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_0,
+               .mask = PMX_SMI_MASK,
+               .val = PMX_SMI_MASK,
+       },
+};
+
+static struct spear_modemux smi_2_chips_modemux[] = {
+       {
+               .muxregs = smi_2_chips_muxreg,
+               .nmuxregs = ARRAY_SIZE(smi_2_chips_muxreg),
+       },
+};
+
+static struct spear_pingroup smi_2_chips_pingroup = {
+       .name = "smi_2_chips_grp",
+       .pins = smi_2_chips_pins,
+       .npins = ARRAY_SIZE(smi_2_chips_pins),
+       .modemuxs = smi_2_chips_modemux,
+       .nmodemuxs = ARRAY_SIZE(smi_2_chips_modemux),
+};
+
+static const unsigned smi_4_chips_pins[] = { 54, 55 };
+static struct spear_muxreg smi_4_chips_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_0,
+               .mask = PMX_SMI_MASK,
+               .val = PMX_SMI_MASK,
+       }, {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK,
+               .val = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK,
+       },
+};
+
+static struct spear_modemux smi_4_chips_modemux[] = {
+       {
+               .muxregs = smi_4_chips_muxreg,
+               .nmuxregs = ARRAY_SIZE(smi_4_chips_muxreg),
+       },
+};
+
+static struct spear_pingroup smi_4_chips_pingroup = {
+       .name = "smi_4_chips_grp",
+       .pins = smi_4_chips_pins,
+       .npins = ARRAY_SIZE(smi_4_chips_pins),
+       .modemuxs = smi_4_chips_modemux,
+       .nmodemuxs = ARRAY_SIZE(smi_4_chips_modemux),
+};
+
+static const char *const smi_grps[] = { "smi_2_chips_grp", "smi_4_chips_grp" };
+static struct spear_function smi_function = {
+       .name = "smi",
+       .groups = smi_grps,
+       .ngroups = ARRAY_SIZE(smi_grps),
+};
+
+/* Pad multiplexing for gmii device */
+static const unsigned gmii_pins[] = { 173, 174, 175, 176, 177, 178, 179, 180,
+       181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
+       195, 196, 197, 198, 199, 200 };
+static struct spear_muxreg gmii_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_0,
+               .mask = PMX_GMII_MASK,
+               .val = PMX_GMII_MASK,
+       },
+};
+
+static struct spear_modemux gmii_modemux[] = {
+       {
+               .muxregs = gmii_muxreg,
+               .nmuxregs = ARRAY_SIZE(gmii_muxreg),
+       },
+};
+
+static struct spear_pingroup gmii_pingroup = {
+       .name = "gmii_grp",
+       .pins = gmii_pins,
+       .npins = ARRAY_SIZE(gmii_pins),
+       .modemuxs = gmii_modemux,
+       .nmodemuxs = ARRAY_SIZE(gmii_modemux),
+};
+
+static const char *const gmii_grps[] = { "gmii_grp" };
+static struct spear_function gmii_function = {
+       .name = "gmii",
+       .groups = gmii_grps,
+       .ngroups = ARRAY_SIZE(gmii_grps),
+};
+
+/* Pad multiplexing for rgmii device */
+static const unsigned rgmii_pins[] = { 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+       28, 29, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 175,
+       180, 181, 182, 183, 185, 188, 193, 194, 195, 196, 197, 198, 211, 212 };
+static struct spear_muxreg rgmii_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_0,
+               .mask = PMX_RGMII_REG0_MASK,
+               .val = 0,
+       }, {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PMX_RGMII_REG1_MASK,
+               .val = 0,
+       }, {
+               .reg = PAD_FUNCTION_EN_2,
+               .mask = PMX_RGMII_REG2_MASK,
+               .val = 0,
+       },
+};
+
+static struct spear_modemux rgmii_modemux[] = {
+       {
+               .muxregs = rgmii_muxreg,
+               .nmuxregs = ARRAY_SIZE(rgmii_muxreg),
+       },
+};
+
+static struct spear_pingroup rgmii_pingroup = {
+       .name = "rgmii_grp",
+       .pins = rgmii_pins,
+       .npins = ARRAY_SIZE(rgmii_pins),
+       .modemuxs = rgmii_modemux,
+       .nmodemuxs = ARRAY_SIZE(rgmii_modemux),
+};
+
+static const char *const rgmii_grps[] = { "rgmii_grp" };
+static struct spear_function rgmii_function = {
+       .name = "rgmii",
+       .groups = rgmii_grps,
+       .ngroups = ARRAY_SIZE(rgmii_grps),
+};
+
+/* Pad multiplexing for smii_0_1_2 device */
+static const unsigned smii_0_1_2_pins[] = { 24, 25, 26, 27, 28, 29, 30, 31, 32,
+       33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+       51, 52, 53, 54, 55 };
+static struct spear_muxreg smii_0_1_2_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PMX_SMII_0_1_2_MASK,
+               .val = 0,
+       },
+};
+
+static struct spear_modemux smii_0_1_2_modemux[] = {
+       {
+               .muxregs = smii_0_1_2_muxreg,
+               .nmuxregs = ARRAY_SIZE(smii_0_1_2_muxreg),
+       },
+};
+
+static struct spear_pingroup smii_0_1_2_pingroup = {
+       .name = "smii_0_1_2_grp",
+       .pins = smii_0_1_2_pins,
+       .npins = ARRAY_SIZE(smii_0_1_2_pins),
+       .modemuxs = smii_0_1_2_modemux,
+       .nmodemuxs = ARRAY_SIZE(smii_0_1_2_modemux),
+};
+
+static const char *const smii_0_1_2_grps[] = { "smii_0_1_2_grp" };
+static struct spear_function smii_0_1_2_function = {
+       .name = "smii_0_1_2",
+       .groups = smii_0_1_2_grps,
+       .ngroups = ARRAY_SIZE(smii_0_1_2_grps),
+};
+
+/* Pad multiplexing for ras_mii_txclk device */
+static const unsigned ras_mii_txclk_pins[] = { 98, 99 };
+static struct spear_muxreg ras_mii_txclk_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PMX_NFCE2_MASK,
+               .val = 0,
+       },
+};
+
+static struct spear_modemux ras_mii_txclk_modemux[] = {
+       {
+               .muxregs = ras_mii_txclk_muxreg,
+               .nmuxregs = ARRAY_SIZE(ras_mii_txclk_muxreg),
+       },
+};
+
+static struct spear_pingroup ras_mii_txclk_pingroup = {
+       .name = "ras_mii_txclk_grp",
+       .pins = ras_mii_txclk_pins,
+       .npins = ARRAY_SIZE(ras_mii_txclk_pins),
+       .modemuxs = ras_mii_txclk_modemux,
+       .nmodemuxs = ARRAY_SIZE(ras_mii_txclk_modemux),
+};
+
+static const char *const ras_mii_txclk_grps[] = { "ras_mii_txclk_grp" };
+static struct spear_function ras_mii_txclk_function = {
+       .name = "ras_mii_txclk",
+       .groups = ras_mii_txclk_grps,
+       .ngroups = ARRAY_SIZE(ras_mii_txclk_grps),
+};
+
+/* Pad multiplexing for nand 8bit device (cs0 only) */
+static const unsigned nand_8bit_pins[] = { 56, 57, 58, 59, 60, 61, 62, 63, 64,
+       65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82,
+       83, 84, 85, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
+       170, 171, 172, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211,
+       212 };
+static struct spear_muxreg nand_8bit_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_0,
+               .mask = PMX_NAND8BIT_0_MASK,
+               .val = PMX_NAND8BIT_0_MASK,
+       }, {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PMX_NAND8BIT_1_MASK,
+               .val = PMX_NAND8BIT_1_MASK,
+       },
+};
+
+static struct spear_modemux nand_8bit_modemux[] = {
+       {
+               .muxregs = nand_8bit_muxreg,
+               .nmuxregs = ARRAY_SIZE(nand_8bit_muxreg),
+       },
+};
+
+static struct spear_pingroup nand_8bit_pingroup = {
+       .name = "nand_8bit_grp",
+       .pins = nand_8bit_pins,
+       .npins = ARRAY_SIZE(nand_8bit_pins),
+       .modemuxs = nand_8bit_modemux,
+       .nmodemuxs = ARRAY_SIZE(nand_8bit_modemux),
+};
+
+/* Pad multiplexing for nand 16bit device */
+static const unsigned nand_16bit_pins[] = { 201, 202, 203, 204, 207, 208, 209,
+       210 };
+static struct spear_muxreg nand_16bit_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PMX_NAND16BIT_1_MASK,
+               .val = PMX_NAND16BIT_1_MASK,
+       },
+};
+
+static struct spear_modemux nand_16bit_modemux[] = {
+       {
+               .muxregs = nand_16bit_muxreg,
+               .nmuxregs = ARRAY_SIZE(nand_16bit_muxreg),
+       },
+};
+
+static struct spear_pingroup nand_16bit_pingroup = {
+       .name = "nand_16bit_grp",
+       .pins = nand_16bit_pins,
+       .npins = ARRAY_SIZE(nand_16bit_pins),
+       .modemuxs = nand_16bit_modemux,
+       .nmodemuxs = ARRAY_SIZE(nand_16bit_modemux),
+};
+
+/* Pad multiplexing for nand 4 chips */
+static const unsigned nand_4_chips_pins[] = { 205, 206, 211, 212 };
+static struct spear_muxreg nand_4_chips_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PMX_NAND_4CHIPS_MASK,
+               .val = PMX_NAND_4CHIPS_MASK,
+       },
+};
+
+static struct spear_modemux nand_4_chips_modemux[] = {
+       {
+               .muxregs = nand_4_chips_muxreg,
+               .nmuxregs = ARRAY_SIZE(nand_4_chips_muxreg),
+       },
+};
+
+static struct spear_pingroup nand_4_chips_pingroup = {
+       .name = "nand_4_chips_grp",
+       .pins = nand_4_chips_pins,
+       .npins = ARRAY_SIZE(nand_4_chips_pins),
+       .modemuxs = nand_4_chips_modemux,
+       .nmodemuxs = ARRAY_SIZE(nand_4_chips_modemux),
+};
+
+static const char *const nand_grps[] = { "nand_8bit_grp", "nand_16bit_grp",
+       "nand_4_chips_grp" };
+static struct spear_function nand_function = {
+       .name = "nand",
+       .groups = nand_grps,
+       .ngroups = ARRAY_SIZE(nand_grps),
+};
+
+/* Pad multiplexing for keyboard_6x6 device */
+static const unsigned keyboard_6x6_pins[] = { 201, 202, 203, 204, 205, 206, 207,
+       208, 209, 210, 211, 212 };
+static struct spear_muxreg keyboard_6x6_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PMX_KEYBOARD_6X6_MASK | PMX_NFIO8_15_MASK |
+                       PMX_NFCE1_MASK | PMX_NFCE2_MASK | PMX_NFWPRT1_MASK |
+                       PMX_NFWPRT2_MASK,
+               .val = PMX_KEYBOARD_6X6_MASK,
+       },
+};
+
+static struct spear_modemux keyboard_6x6_modemux[] = {
+       {
+               .muxregs = keyboard_6x6_muxreg,
+               .nmuxregs = ARRAY_SIZE(keyboard_6x6_muxreg),
+       },
+};
+
+static struct spear_pingroup keyboard_6x6_pingroup = {
+       .name = "keyboard_6x6_grp",
+       .pins = keyboard_6x6_pins,
+       .npins = ARRAY_SIZE(keyboard_6x6_pins),
+       .modemuxs = keyboard_6x6_modemux,
+       .nmodemuxs = ARRAY_SIZE(keyboard_6x6_modemux),
+};
+
+/* Pad multiplexing for keyboard_rowcol6_8 device */
+static const unsigned keyboard_rowcol6_8_pins[] = { 24, 25, 26, 27, 28, 29 };
+static struct spear_muxreg keyboard_rowcol6_8_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PMX_KBD_ROWCOL68_MASK,
+               .val = PMX_KBD_ROWCOL68_MASK,
+       },
+};
+
+static struct spear_modemux keyboard_rowcol6_8_modemux[] = {
+       {
+               .muxregs = keyboard_rowcol6_8_muxreg,
+               .nmuxregs = ARRAY_SIZE(keyboard_rowcol6_8_muxreg),
+       },
+};
+
+static struct spear_pingroup keyboard_rowcol6_8_pingroup = {
+       .name = "keyboard_rowcol6_8_grp",
+       .pins = keyboard_rowcol6_8_pins,
+       .npins = ARRAY_SIZE(keyboard_rowcol6_8_pins),
+       .modemuxs = keyboard_rowcol6_8_modemux,
+       .nmodemuxs = ARRAY_SIZE(keyboard_rowcol6_8_modemux),
+};
+
+static const char *const keyboard_grps[] = { "keyboard_6x6_grp",
+       "keyboard_rowcol6_8_grp" };
+static struct spear_function keyboard_function = {
+       .name = "keyboard",
+       .groups = keyboard_grps,
+       .ngroups = ARRAY_SIZE(keyboard_grps),
+};
+
+/* Pad multiplexing for uart0 device */
+static const unsigned uart0_pins[] = { 100, 101 };
+static struct spear_muxreg uart0_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_0,
+               .mask = PMX_UART0_MASK,
+               .val = PMX_UART0_MASK,
+       },
+};
+
+static struct spear_modemux uart0_modemux[] = {
+       {
+               .muxregs = uart0_muxreg,
+               .nmuxregs = ARRAY_SIZE(uart0_muxreg),
+       },
+};
+
+static struct spear_pingroup uart0_pingroup = {
+       .name = "uart0_grp",
+       .pins = uart0_pins,
+       .npins = ARRAY_SIZE(uart0_pins),
+       .modemuxs = uart0_modemux,
+       .nmodemuxs = ARRAY_SIZE(uart0_modemux),
+};
+
+/* Pad multiplexing for uart0_modem device */
+static const unsigned uart0_modem_pins[] = { 12, 13, 14, 15, 16, 17 };
+static struct spear_muxreg uart0_modem_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PMX_UART0_MODEM_MASK,
+               .val = PMX_UART0_MODEM_MASK,
+       },
+};
+
+static struct spear_modemux uart0_modem_modemux[] = {
+       {
+               .muxregs = uart0_modem_muxreg,
+               .nmuxregs = ARRAY_SIZE(uart0_modem_muxreg),
+       },
+};
+
+static struct spear_pingroup uart0_modem_pingroup = {
+       .name = "uart0_modem_grp",
+       .pins = uart0_modem_pins,
+       .npins = ARRAY_SIZE(uart0_modem_pins),
+       .modemuxs = uart0_modem_modemux,
+       .nmodemuxs = ARRAY_SIZE(uart0_modem_modemux),
+};
+
+static const char *const uart0_grps[] = { "uart0_grp", "uart0_modem_grp" };
+static struct spear_function uart0_function = {
+       .name = "uart0",
+       .groups = uart0_grps,
+       .ngroups = ARRAY_SIZE(uart0_grps),
+};
+
+/* Pad multiplexing for gpt0_tmr0 device */
+static const unsigned gpt0_tmr0_pins[] = { 10, 11 };
+static struct spear_muxreg gpt0_tmr0_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PMX_GPT0_TMR0_MASK,
+               .val = PMX_GPT0_TMR0_MASK,
+       },
+};
+
+static struct spear_modemux gpt0_tmr0_modemux[] = {
+       {
+               .muxregs = gpt0_tmr0_muxreg,
+               .nmuxregs = ARRAY_SIZE(gpt0_tmr0_muxreg),
+       },
+};
+
+static struct spear_pingroup gpt0_tmr0_pingroup = {
+       .name = "gpt0_tmr0_grp",
+       .pins = gpt0_tmr0_pins,
+       .npins = ARRAY_SIZE(gpt0_tmr0_pins),
+       .modemuxs = gpt0_tmr0_modemux,
+       .nmodemuxs = ARRAY_SIZE(gpt0_tmr0_modemux),
+};
+
+/* Pad multiplexing for gpt0_tmr1 device */
+static const unsigned gpt0_tmr1_pins[] = { 8, 9 };
+static struct spear_muxreg gpt0_tmr1_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PMX_GPT0_TMR1_MASK,
+               .val = PMX_GPT0_TMR1_MASK,
+       },
+};
+
+static struct spear_modemux gpt0_tmr1_modemux[] = {
+       {
+               .muxregs = gpt0_tmr1_muxreg,
+               .nmuxregs = ARRAY_SIZE(gpt0_tmr1_muxreg),
+       },
+};
+
+static struct spear_pingroup gpt0_tmr1_pingroup = {
+       .name = "gpt0_tmr1_grp",
+       .pins = gpt0_tmr1_pins,
+       .npins = ARRAY_SIZE(gpt0_tmr1_pins),
+       .modemuxs = gpt0_tmr1_modemux,
+       .nmodemuxs = ARRAY_SIZE(gpt0_tmr1_modemux),
+};
+
+static const char *const gpt0_grps[] = { "gpt0_tmr0_grp", "gpt0_tmr1_grp" };
+static struct spear_function gpt0_function = {
+       .name = "gpt0",
+       .groups = gpt0_grps,
+       .ngroups = ARRAY_SIZE(gpt0_grps),
+};
+
+/* Pad multiplexing for gpt1_tmr0 device */
+static const unsigned gpt1_tmr0_pins[] = { 6, 7 };
+static struct spear_muxreg gpt1_tmr0_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PMX_GPT1_TMR0_MASK,
+               .val = PMX_GPT1_TMR0_MASK,
+       },
+};
+
+static struct spear_modemux gpt1_tmr0_modemux[] = {
+       {
+               .muxregs = gpt1_tmr0_muxreg,
+               .nmuxregs = ARRAY_SIZE(gpt1_tmr0_muxreg),
+       },
+};
+
+static struct spear_pingroup gpt1_tmr0_pingroup = {
+       .name = "gpt1_tmr0_grp",
+       .pins = gpt1_tmr0_pins,
+       .npins = ARRAY_SIZE(gpt1_tmr0_pins),
+       .modemuxs = gpt1_tmr0_modemux,
+       .nmodemuxs = ARRAY_SIZE(gpt1_tmr0_modemux),
+};
+
+/* Pad multiplexing for gpt1_tmr1 device */
+static const unsigned gpt1_tmr1_pins[] = { 4, 5 };
+static struct spear_muxreg gpt1_tmr1_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PMX_GPT1_TMR1_MASK,
+               .val = PMX_GPT1_TMR1_MASK,
+       },
+};
+
+static struct spear_modemux gpt1_tmr1_modemux[] = {
+       {
+               .muxregs = gpt1_tmr1_muxreg,
+               .nmuxregs = ARRAY_SIZE(gpt1_tmr1_muxreg),
+       },
+};
+
+static struct spear_pingroup gpt1_tmr1_pingroup = {
+       .name = "gpt1_tmr1_grp",
+       .pins = gpt1_tmr1_pins,
+       .npins = ARRAY_SIZE(gpt1_tmr1_pins),
+       .modemuxs = gpt1_tmr1_modemux,
+       .nmodemuxs = ARRAY_SIZE(gpt1_tmr1_modemux),
+};
+
+static const char *const gpt1_grps[] = { "gpt1_tmr1_grp", "gpt1_tmr0_grp" };
+static struct spear_function gpt1_function = {
+       .name = "gpt1",
+       .groups = gpt1_grps,
+       .ngroups = ARRAY_SIZE(gpt1_grps),
+};
+
+/* Pad multiplexing for mcif device */
+static const unsigned mcif_pins[] = { 86, 87, 88, 89, 90, 91, 92, 93, 213, 214,
+       215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228,
+       229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
+       243, 244, 245 };
+#define MCIF_MUXREG                                            \
+       {                                                       \
+               .reg = PAD_FUNCTION_EN_0,                       \
+               .mask = PMX_MCI_DATA8_15_MASK,                  \
+               .val = PMX_MCI_DATA8_15_MASK,                   \
+       }, {                                                    \
+               .reg = PAD_FUNCTION_EN_1,                       \
+               .mask = PMX_MCIFALL_1_MASK | PMX_NFWPRT1_MASK | \
+                       PMX_NFWPRT2_MASK,                       \
+               .val = PMX_MCIFALL_1_MASK,                      \
+       }, {                                                    \
+               .reg = PAD_FUNCTION_EN_2,                       \
+               .mask = PMX_MCIFALL_2_MASK,                     \
+               .val = PMX_MCIFALL_2_MASK,                      \
+       }
+
+/* sdhci device */
+static struct spear_muxreg sdhci_muxreg[] = {
+       MCIF_MUXREG,
+       {
+               .reg = PERIP_CFG,
+               .mask = MCIF_SEL_MASK,
+               .val = MCIF_SEL_SD,
+       },
+};
+
+static struct spear_modemux sdhci_modemux[] = {
+       {
+               .muxregs = sdhci_muxreg,
+               .nmuxregs = ARRAY_SIZE(sdhci_muxreg),
+       },
+};
+
+static struct spear_pingroup sdhci_pingroup = {
+       .name = "sdhci_grp",
+       .pins = mcif_pins,
+       .npins = ARRAY_SIZE(mcif_pins),
+       .modemuxs = sdhci_modemux,
+       .nmodemuxs = ARRAY_SIZE(sdhci_modemux),
+};
+
+static const char *const sdhci_grps[] = { "sdhci_grp" };
+static struct spear_function sdhci_function = {
+       .name = "sdhci",
+       .groups = sdhci_grps,
+       .ngroups = ARRAY_SIZE(sdhci_grps),
+};
+
+/* cf device */
+static struct spear_muxreg cf_muxreg[] = {
+       MCIF_MUXREG,
+       {
+               .reg = PERIP_CFG,
+               .mask = MCIF_SEL_MASK,
+               .val = MCIF_SEL_CF,
+       },
+};
+
+static struct spear_modemux cf_modemux[] = {
+       {
+               .muxregs = cf_muxreg,
+               .nmuxregs = ARRAY_SIZE(cf_muxreg),
+       },
+};
+
+static struct spear_pingroup cf_pingroup = {
+       .name = "cf_grp",
+       .pins = mcif_pins,
+       .npins = ARRAY_SIZE(mcif_pins),
+       .modemuxs = cf_modemux,
+       .nmodemuxs = ARRAY_SIZE(cf_modemux),
+};
+
+static const char *const cf_grps[] = { "cf_grp" };
+static struct spear_function cf_function = {
+       .name = "cf",
+       .groups = cf_grps,
+       .ngroups = ARRAY_SIZE(cf_grps),
+};
+
+/* xd device */
+static struct spear_muxreg xd_muxreg[] = {
+       MCIF_MUXREG,
+       {
+               .reg = PERIP_CFG,
+               .mask = MCIF_SEL_MASK,
+               .val = MCIF_SEL_XD,
+       },
+};
+
+static struct spear_modemux xd_modemux[] = {
+       {
+               .muxregs = xd_muxreg,
+               .nmuxregs = ARRAY_SIZE(xd_muxreg),
+       },
+};
+
+static struct spear_pingroup xd_pingroup = {
+       .name = "xd_grp",
+       .pins = mcif_pins,
+       .npins = ARRAY_SIZE(mcif_pins),
+       .modemuxs = xd_modemux,
+       .nmodemuxs = ARRAY_SIZE(xd_modemux),
+};
+
+static const char *const xd_grps[] = { "xd_grp" };
+static struct spear_function xd_function = {
+       .name = "xd",
+       .groups = xd_grps,
+       .ngroups = ARRAY_SIZE(xd_grps),
+};
+
+/* Pad multiplexing for touch_xy device */
+static const unsigned touch_xy_pins[] = { 97 };
+static struct spear_muxreg touch_xy_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_2,
+               .mask = PMX_TOUCH_XY_MASK,
+               .val = PMX_TOUCH_XY_MASK,
+       },
+};
+
+static struct spear_modemux touch_xy_modemux[] = {
+       {
+               .muxregs = touch_xy_muxreg,
+               .nmuxregs = ARRAY_SIZE(touch_xy_muxreg),
+       },
+};
+
+static struct spear_pingroup touch_xy_pingroup = {
+       .name = "touch_xy_grp",
+       .pins = touch_xy_pins,
+       .npins = ARRAY_SIZE(touch_xy_pins),
+       .modemuxs = touch_xy_modemux,
+       .nmodemuxs = ARRAY_SIZE(touch_xy_modemux),
+};
+
+static const char *const touch_xy_grps[] = { "touch_xy_grp" };
+static struct spear_function touch_xy_function = {
+       .name = "touchscreen",
+       .groups = touch_xy_grps,
+       .ngroups = ARRAY_SIZE(touch_xy_grps),
+};
+
+/* Pad multiplexing for uart1 device */
+/* Muxed with I2C */
+static const unsigned uart1_dis_i2c_pins[] = { 102, 103 };
+static struct spear_muxreg uart1_dis_i2c_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_0,
+               .mask = PMX_I2C0_MASK,
+               .val = 0,
+       },
+};
+
+static struct spear_modemux uart1_dis_i2c_modemux[] = {
+       {
+               .muxregs = uart1_dis_i2c_muxreg,
+               .nmuxregs = ARRAY_SIZE(uart1_dis_i2c_muxreg),
+       },
+};
+
+static struct spear_pingroup uart_1_dis_i2c_pingroup = {
+       .name = "uart1_disable_i2c_grp",
+       .pins = uart1_dis_i2c_pins,
+       .npins = ARRAY_SIZE(uart1_dis_i2c_pins),
+       .modemuxs = uart1_dis_i2c_modemux,
+       .nmodemuxs = ARRAY_SIZE(uart1_dis_i2c_modemux),
+};
+
+/* Muxed with SD/MMC */
+static const unsigned uart1_dis_sd_pins[] = { 214, 215 };
+static struct spear_muxreg uart1_dis_sd_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PMX_MCIDATA1_MASK |
+                       PMX_MCIDATA2_MASK,
+               .val = 0,
+       },
+};
+
+static struct spear_modemux uart1_dis_sd_modemux[] = {
+       {
+               .muxregs = uart1_dis_sd_muxreg,
+               .nmuxregs = ARRAY_SIZE(uart1_dis_sd_muxreg),
+       },
+};
+
+static struct spear_pingroup uart_1_dis_sd_pingroup = {
+       .name = "uart1_disable_sd_grp",
+       .pins = uart1_dis_sd_pins,
+       .npins = ARRAY_SIZE(uart1_dis_sd_pins),
+       .modemuxs = uart1_dis_sd_modemux,
+       .nmodemuxs = ARRAY_SIZE(uart1_dis_sd_modemux),
+};
+
+static const char *const uart1_grps[] = { "uart1_disable_i2c_grp",
+       "uart1_disable_sd_grp" };
+static struct spear_function uart1_function = {
+       .name = "uart1",
+       .groups = uart1_grps,
+       .ngroups = ARRAY_SIZE(uart1_grps),
+};
+
+/* Pad multiplexing for uart2_3 device */
+static const unsigned uart2_3_pins[] = { 104, 105, 106, 107 };
+static struct spear_muxreg uart2_3_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_0,
+               .mask = PMX_I2S0_MASK,
+               .val = 0,
+       },
+};
+
+static struct spear_modemux uart2_3_modemux[] = {
+       {
+               .muxregs = uart2_3_muxreg,
+               .nmuxregs = ARRAY_SIZE(uart2_3_muxreg),
+       },
+};
+
+static struct spear_pingroup uart_2_3_pingroup = {
+       .name = "uart2_3_grp",
+       .pins = uart2_3_pins,
+       .npins = ARRAY_SIZE(uart2_3_pins),
+       .modemuxs = uart2_3_modemux,
+       .nmodemuxs = ARRAY_SIZE(uart2_3_modemux),
+};
+
+static const char *const uart2_3_grps[] = { "uart2_3_grp" };
+static struct spear_function uart2_3_function = {
+       .name = "uart2_3",
+       .groups = uart2_3_grps,
+       .ngroups = ARRAY_SIZE(uart2_3_grps),
+};
+
+/* Pad multiplexing for uart4 device */
+static const unsigned uart4_pins[] = { 108, 113 };
+static struct spear_muxreg uart4_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_0,
+               .mask = PMX_I2S0_MASK | PMX_CLCD1_MASK,
+               .val = 0,
+       },
+};
+
+static struct spear_modemux uart4_modemux[] = {
+       {
+               .muxregs = uart4_muxreg,
+               .nmuxregs = ARRAY_SIZE(uart4_muxreg),
+       },
+};
+
+static struct spear_pingroup uart_4_pingroup = {
+       .name = "uart4_grp",
+       .pins = uart4_pins,
+       .npins = ARRAY_SIZE(uart4_pins),
+       .modemuxs = uart4_modemux,
+       .nmodemuxs = ARRAY_SIZE(uart4_modemux),
+};
+
+static const char *const uart4_grps[] = { "uart4_grp" };
+static struct spear_function uart4_function = {
+       .name = "uart4",
+       .groups = uart4_grps,
+       .ngroups = ARRAY_SIZE(uart4_grps),
+};
+
+/* Pad multiplexing for uart5 device */
+static const unsigned uart5_pins[] = { 114, 115 };
+static struct spear_muxreg uart5_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_0,
+               .mask = PMX_CLCD1_MASK,
+               .val = 0,
+       },
+};
+
+static struct spear_modemux uart5_modemux[] = {
+       {
+               .muxregs = uart5_muxreg,
+               .nmuxregs = ARRAY_SIZE(uart5_muxreg),
+       },
+};
+
+static struct spear_pingroup uart_5_pingroup = {
+       .name = "uart5_grp",
+       .pins = uart5_pins,
+       .npins = ARRAY_SIZE(uart5_pins),
+       .modemuxs = uart5_modemux,
+       .nmodemuxs = ARRAY_SIZE(uart5_modemux),
+};
+
+static const char *const uart5_grps[] = { "uart5_grp" };
+static struct spear_function uart5_function = {
+       .name = "uart5",
+       .groups = uart5_grps,
+       .ngroups = ARRAY_SIZE(uart5_grps),
+};
+
+/* Pad multiplexing for rs485_0_1_tdm_0_1 device */
+static const unsigned rs485_0_1_tdm_0_1_pins[] = { 116, 117, 118, 119, 120, 121,
+       122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
+       136, 137 };
+static struct spear_muxreg rs485_0_1_tdm_0_1_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_0,
+               .mask = PMX_CLCD1_MASK,
+               .val = 0,
+       },
+};
+
+static struct spear_modemux rs485_0_1_tdm_0_1_modemux[] = {
+       {
+               .muxregs = rs485_0_1_tdm_0_1_muxreg,
+               .nmuxregs = ARRAY_SIZE(rs485_0_1_tdm_0_1_muxreg),
+       },
+};
+
+static struct spear_pingroup rs485_0_1_tdm_0_1_pingroup = {
+       .name = "rs485_0_1_tdm_0_1_grp",
+       .pins = rs485_0_1_tdm_0_1_pins,
+       .npins = ARRAY_SIZE(rs485_0_1_tdm_0_1_pins),
+       .modemuxs = rs485_0_1_tdm_0_1_modemux,
+       .nmodemuxs = ARRAY_SIZE(rs485_0_1_tdm_0_1_modemux),
+};
+
+static const char *const rs485_0_1_tdm_0_1_grps[] = { "rs485_0_1_tdm_0_1_grp" };
+static struct spear_function rs485_0_1_tdm_0_1_function = {
+       .name = "rs485_0_1_tdm_0_1",
+       .groups = rs485_0_1_tdm_0_1_grps,
+       .ngroups = ARRAY_SIZE(rs485_0_1_tdm_0_1_grps),
+};
+
+/* Pad multiplexing for i2c_1_2 device */
+static const unsigned i2c_1_2_pins[] = { 138, 139, 140, 141 };
+static struct spear_muxreg i2c_1_2_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_0,
+               .mask = PMX_CLCD1_MASK,
+               .val = 0,
+       },
+};
+
+static struct spear_modemux i2c_1_2_modemux[] = {
+       {
+               .muxregs = i2c_1_2_muxreg,
+               .nmuxregs = ARRAY_SIZE(i2c_1_2_muxreg),
+       },
+};
+
+static struct spear_pingroup i2c_1_2_pingroup = {
+       .name = "i2c_1_2_grp",
+       .pins = i2c_1_2_pins,
+       .npins = ARRAY_SIZE(i2c_1_2_pins),
+       .modemuxs = i2c_1_2_modemux,
+       .nmodemuxs = ARRAY_SIZE(i2c_1_2_modemux),
+};
+
+static const char *const i2c_1_2_grps[] = { "i2c_1_2_grp" };
+static struct spear_function i2c_1_2_function = {
+       .name = "i2c_1_2",
+       .groups = i2c_1_2_grps,
+       .ngroups = ARRAY_SIZE(i2c_1_2_grps),
+};
+
+/* Pad multiplexing for i2c3_dis_smi_clcd device */
+/* Muxed with SMI & CLCD */
+static const unsigned i2c3_dis_smi_clcd_pins[] = { 142, 153 };
+static struct spear_muxreg i2c3_dis_smi_clcd_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_0,
+               .mask = PMX_CLCD1_MASK | PMX_SMI_MASK,
+               .val = 0,
+       },
+};
+
+static struct spear_modemux i2c3_dis_smi_clcd_modemux[] = {
+       {
+               .muxregs = i2c3_dis_smi_clcd_muxreg,
+               .nmuxregs = ARRAY_SIZE(i2c3_dis_smi_clcd_muxreg),
+       },
+};
+
+static struct spear_pingroup i2c3_dis_smi_clcd_pingroup = {
+       .name = "i2c3_dis_smi_clcd_grp",
+       .pins = i2c3_dis_smi_clcd_pins,
+       .npins = ARRAY_SIZE(i2c3_dis_smi_clcd_pins),
+       .modemuxs = i2c3_dis_smi_clcd_modemux,
+       .nmodemuxs = ARRAY_SIZE(i2c3_dis_smi_clcd_modemux),
+};
+
+/* Pad multiplexing for i2c3_dis_sd_i2s0 device */
+/* Muxed with SD/MMC & I2S1 */
+static const unsigned i2c3_dis_sd_i2s0_pins[] = { 0, 216 };
+static struct spear_muxreg i2c3_dis_sd_i2s0_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PMX_I2S1_MASK | PMX_MCIDATA3_MASK,
+               .val = 0,
+       },
+};
+
+static struct spear_modemux i2c3_dis_sd_i2s0_modemux[] = {
+       {
+               .muxregs = i2c3_dis_sd_i2s0_muxreg,
+               .nmuxregs = ARRAY_SIZE(i2c3_dis_sd_i2s0_muxreg),
+       },
+};
+
+static struct spear_pingroup i2c3_dis_sd_i2s0_pingroup = {
+       .name = "i2c3_dis_sd_i2s0_grp",
+       .pins = i2c3_dis_sd_i2s0_pins,
+       .npins = ARRAY_SIZE(i2c3_dis_sd_i2s0_pins),
+       .modemuxs = i2c3_dis_sd_i2s0_modemux,
+       .nmodemuxs = ARRAY_SIZE(i2c3_dis_sd_i2s0_modemux),
+};
+
+static const char *const i2c3_grps[] = { "i2c3_dis_smi_clcd_grp",
+       "i2c3_dis_sd_i2s0_grp" };
+static struct spear_function i2c3_unction = {
+       .name = "i2c3_i2s1",
+       .groups = i2c3_grps,
+       .ngroups = ARRAY_SIZE(i2c3_grps),
+};
+
+/* Pad multiplexing for i2c_4_5_dis_smi device */
+/* Muxed with SMI */
+static const unsigned i2c_4_5_dis_smi_pins[] = { 154, 155, 156, 157 };
+static struct spear_muxreg i2c_4_5_dis_smi_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_0,
+               .mask = PMX_SMI_MASK,
+               .val = 0,
+       },
+};
+
+static struct spear_modemux i2c_4_5_dis_smi_modemux[] = {
+       {
+               .muxregs = i2c_4_5_dis_smi_muxreg,
+               .nmuxregs = ARRAY_SIZE(i2c_4_5_dis_smi_muxreg),
+       },
+};
+
+static struct spear_pingroup i2c_4_5_dis_smi_pingroup = {
+       .name = "i2c_4_5_dis_smi_grp",
+       .pins = i2c_4_5_dis_smi_pins,
+       .npins = ARRAY_SIZE(i2c_4_5_dis_smi_pins),
+       .modemuxs = i2c_4_5_dis_smi_modemux,
+       .nmodemuxs = ARRAY_SIZE(i2c_4_5_dis_smi_modemux),
+};
+
+/* Pad multiplexing for i2c4_dis_sd device */
+/* Muxed with SD/MMC */
+static const unsigned i2c4_dis_sd_pins[] = { 217, 218 };
+static struct spear_muxreg i2c4_dis_sd_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PMX_MCIDATA4_MASK,
+               .val = 0,
+       }, {
+               .reg = PAD_FUNCTION_EN_2,
+               .mask = PMX_MCIDATA5_MASK,
+               .val = 0,
+       },
+};
+
+static struct spear_modemux i2c4_dis_sd_modemux[] = {
+       {
+               .muxregs = i2c4_dis_sd_muxreg,
+               .nmuxregs = ARRAY_SIZE(i2c4_dis_sd_muxreg),
+       },
+};
+
+static struct spear_pingroup i2c4_dis_sd_pingroup = {
+       .name = "i2c4_dis_sd_grp",
+       .pins = i2c4_dis_sd_pins,
+       .npins = ARRAY_SIZE(i2c4_dis_sd_pins),
+       .modemuxs = i2c4_dis_sd_modemux,
+       .nmodemuxs = ARRAY_SIZE(i2c4_dis_sd_modemux),
+};
+
+/* Pad multiplexing for i2c5_dis_sd device */
+/* Muxed with SD/MMC */
+static const unsigned i2c5_dis_sd_pins[] = { 219, 220 };
+static struct spear_muxreg i2c5_dis_sd_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_2,
+               .mask = PMX_MCIDATA6_MASK |
+                       PMX_MCIDATA7_MASK,
+               .val = 0,
+       },
+};
+
+static struct spear_modemux i2c5_dis_sd_modemux[] = {
+       {
+               .muxregs = i2c5_dis_sd_muxreg,
+               .nmuxregs = ARRAY_SIZE(i2c5_dis_sd_muxreg),
+       },
+};
+
+static struct spear_pingroup i2c5_dis_sd_pingroup = {
+       .name = "i2c5_dis_sd_grp",
+       .pins = i2c5_dis_sd_pins,
+       .npins = ARRAY_SIZE(i2c5_dis_sd_pins),
+       .modemuxs = i2c5_dis_sd_modemux,
+       .nmodemuxs = ARRAY_SIZE(i2c5_dis_sd_modemux),
+};
+
+static const char *const i2c_4_5_grps[] = { "i2c5_dis_sd_grp",
+       "i2c4_dis_sd_grp", "i2c_4_5_dis_smi_grp" };
+static struct spear_function i2c_4_5_function = {
+       .name = "i2c_4_5",
+       .groups = i2c_4_5_grps,
+       .ngroups = ARRAY_SIZE(i2c_4_5_grps),
+};
+
+/* Pad multiplexing for i2c_6_7_dis_kbd device */
+/* Muxed with KBD */
+static const unsigned i2c_6_7_dis_kbd_pins[] = { 207, 208, 209, 210 };
+static struct spear_muxreg i2c_6_7_dis_kbd_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PMX_KBD_ROWCOL25_MASK,
+               .val = 0,
+       },
+};
+
+static struct spear_modemux i2c_6_7_dis_kbd_modemux[] = {
+       {
+               .muxregs = i2c_6_7_dis_kbd_muxreg,
+               .nmuxregs = ARRAY_SIZE(i2c_6_7_dis_kbd_muxreg),
+       },
+};
+
+static struct spear_pingroup i2c_6_7_dis_kbd_pingroup = {
+       .name = "i2c_6_7_dis_kbd_grp",
+       .pins = i2c_6_7_dis_kbd_pins,
+       .npins = ARRAY_SIZE(i2c_6_7_dis_kbd_pins),
+       .modemuxs = i2c_6_7_dis_kbd_modemux,
+       .nmodemuxs = ARRAY_SIZE(i2c_6_7_dis_kbd_modemux),
+};
+
+/* Pad multiplexing for i2c6_dis_sd device */
+/* Muxed with SD/MMC */
+static const unsigned i2c6_dis_sd_pins[] = { 236, 237 };
+static struct spear_muxreg i2c6_dis_sd_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_2,
+               .mask = PMX_MCIIORDRE_MASK |
+                       PMX_MCIIOWRWE_MASK,
+               .val = 0,
+       },
+};
+
+static struct spear_modemux i2c6_dis_sd_modemux[] = {
+       {
+               .muxregs = i2c6_dis_sd_muxreg,
+               .nmuxregs = ARRAY_SIZE(i2c6_dis_sd_muxreg),
+       },
+};
+
+static struct spear_pingroup i2c6_dis_sd_pingroup = {
+       .name = "i2c6_dis_sd_grp",
+       .pins = i2c6_dis_sd_pins,
+       .npins = ARRAY_SIZE(i2c6_dis_sd_pins),
+       .modemuxs = i2c6_dis_sd_modemux,
+       .nmodemuxs = ARRAY_SIZE(i2c6_dis_sd_modemux),
+};
+
+/* Pad multiplexing for i2c7_dis_sd device */
+static const unsigned i2c7_dis_sd_pins[] = { 238, 239 };
+static struct spear_muxreg i2c7_dis_sd_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_2,
+               .mask = PMX_MCIRESETCF_MASK |
+                       PMX_MCICS0CE_MASK,
+               .val = 0,
+       },
+};
+
+static struct spear_modemux i2c7_dis_sd_modemux[] = {
+       {
+               .muxregs = i2c7_dis_sd_muxreg,
+               .nmuxregs = ARRAY_SIZE(i2c7_dis_sd_muxreg),
+       },
+};
+
+static struct spear_pingroup i2c7_dis_sd_pingroup = {
+       .name = "i2c7_dis_sd_grp",
+       .pins = i2c7_dis_sd_pins,
+       .npins = ARRAY_SIZE(i2c7_dis_sd_pins),
+       .modemuxs = i2c7_dis_sd_modemux,
+       .nmodemuxs = ARRAY_SIZE(i2c7_dis_sd_modemux),
+};
+
+static const char *const i2c_6_7_grps[] = { "i2c6_dis_sd_grp",
+       "i2c7_dis_sd_grp", "i2c_6_7_dis_kbd_grp" };
+static struct spear_function i2c_6_7_function = {
+       .name = "i2c_6_7",
+       .groups = i2c_6_7_grps,
+       .ngroups = ARRAY_SIZE(i2c_6_7_grps),
+};
+
+/* Pad multiplexing for can0_dis_nor device */
+/* Muxed with NOR */
+static const unsigned can0_dis_nor_pins[] = { 56, 57 };
+static struct spear_muxreg can0_dis_nor_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_0,
+               .mask = PMX_NFRSTPWDWN2_MASK,
+               .val = 0,
+       }, {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PMX_NFRSTPWDWN3_MASK,
+               .val = 0,
+       },
+};
+
+static struct spear_modemux can0_dis_nor_modemux[] = {
+       {
+               .muxregs = can0_dis_nor_muxreg,
+               .nmuxregs = ARRAY_SIZE(can0_dis_nor_muxreg),
+       },
+};
+
+static struct spear_pingroup can0_dis_nor_pingroup = {
+       .name = "can0_dis_nor_grp",
+       .pins = can0_dis_nor_pins,
+       .npins = ARRAY_SIZE(can0_dis_nor_pins),
+       .modemuxs = can0_dis_nor_modemux,
+       .nmodemuxs = ARRAY_SIZE(can0_dis_nor_modemux),
+};
+
+/* Pad multiplexing for can0_dis_sd device */
+/* Muxed with SD/MMC */
+static const unsigned can0_dis_sd_pins[] = { 240, 241 };
+static struct spear_muxreg can0_dis_sd_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_2,
+               .mask = PMX_MCICFINTR_MASK | PMX_MCIIORDY_MASK,
+               .val = 0,
+       },
+};
+
+static struct spear_modemux can0_dis_sd_modemux[] = {
+       {
+               .muxregs = can0_dis_sd_muxreg,
+               .nmuxregs = ARRAY_SIZE(can0_dis_sd_muxreg),
+       },
+};
+
+static struct spear_pingroup can0_dis_sd_pingroup = {
+       .name = "can0_dis_sd_grp",
+       .pins = can0_dis_sd_pins,
+       .npins = ARRAY_SIZE(can0_dis_sd_pins),
+       .modemuxs = can0_dis_sd_modemux,
+       .nmodemuxs = ARRAY_SIZE(can0_dis_sd_modemux),
+};
+
+static const char *const can0_grps[] = { "can0_dis_nor_grp", "can0_dis_sd_grp"
+};
+static struct spear_function can0_function = {
+       .name = "can0",
+       .groups = can0_grps,
+       .ngroups = ARRAY_SIZE(can0_grps),
+};
+
+/* Pad multiplexing for can1_dis_sd device */
+/* Muxed with SD/MMC */
+static const unsigned can1_dis_sd_pins[] = { 242, 243 };
+static struct spear_muxreg can1_dis_sd_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_2,
+               .mask = PMX_MCICS1_MASK | PMX_MCIDMAACK_MASK,
+               .val = 0,
+       },
+};
+
+static struct spear_modemux can1_dis_sd_modemux[] = {
+       {
+               .muxregs = can1_dis_sd_muxreg,
+               .nmuxregs = ARRAY_SIZE(can1_dis_sd_muxreg),
+       },
+};
+
+static struct spear_pingroup can1_dis_sd_pingroup = {
+       .name = "can1_dis_sd_grp",
+       .pins = can1_dis_sd_pins,
+       .npins = ARRAY_SIZE(can1_dis_sd_pins),
+       .modemuxs = can1_dis_sd_modemux,
+       .nmodemuxs = ARRAY_SIZE(can1_dis_sd_modemux),
+};
+
+/* Pad multiplexing for can1_dis_kbd device */
+/* Muxed with KBD */
+static const unsigned can1_dis_kbd_pins[] = { 201, 202 };
+static struct spear_muxreg can1_dis_kbd_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PMX_KBD_ROWCOL25_MASK,
+               .val = 0,
+       },
+};
+
+static struct spear_modemux can1_dis_kbd_modemux[] = {
+       {
+               .muxregs = can1_dis_kbd_muxreg,
+               .nmuxregs = ARRAY_SIZE(can1_dis_kbd_muxreg),
+       },
+};
+
+static struct spear_pingroup can1_dis_kbd_pingroup = {
+       .name = "can1_dis_kbd_grp",
+       .pins = can1_dis_kbd_pins,
+       .npins = ARRAY_SIZE(can1_dis_kbd_pins),
+       .modemuxs = can1_dis_kbd_modemux,
+       .nmodemuxs = ARRAY_SIZE(can1_dis_kbd_modemux),
+};
+
+static const char *const can1_grps[] = { "can1_dis_sd_grp", "can1_dis_kbd_grp"
+};
+static struct spear_function can1_function = {
+       .name = "can1",
+       .groups = can1_grps,
+       .ngroups = ARRAY_SIZE(can1_grps),
+};
+
+/* Pad multiplexing for pci device */
+static const unsigned pci_sata_pins[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 18,
+       19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+       37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+       55, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99 };
+#define PCI_SATA_MUXREG                                \
+       {                                       \
+               .reg = PAD_FUNCTION_EN_0,       \
+               .mask = PMX_MCI_DATA8_15_MASK,  \
+               .val = 0,                       \
+       }, {                                    \
+               .reg = PAD_FUNCTION_EN_1,       \
+               .mask = PMX_PCI_REG1_MASK,      \
+               .val = 0,                       \
+       }, {                                    \
+               .reg = PAD_FUNCTION_EN_2,       \
+               .mask = PMX_PCI_REG2_MASK,      \
+               .val = 0,                       \
+       }
+
+/* pad multiplexing for pcie0 device */
+static struct spear_muxreg pcie0_muxreg[] = {
+       PCI_SATA_MUXREG,
+       {
+               .reg = PCIE_SATA_CFG,
+               .mask = PCIE_CFG_VAL(0),
+               .val = PCIE_CFG_VAL(0),
+       },
+};
+
+static struct spear_modemux pcie0_modemux[] = {
+       {
+               .muxregs = pcie0_muxreg,
+               .nmuxregs = ARRAY_SIZE(pcie0_muxreg),
+       },
+};
+
+static struct spear_pingroup pcie0_pingroup = {
+       .name = "pcie0_grp",
+       .pins = pci_sata_pins,
+       .npins = ARRAY_SIZE(pci_sata_pins),
+       .modemuxs = pcie0_modemux,
+       .nmodemuxs = ARRAY_SIZE(pcie0_modemux),
+};
+
+/* pad multiplexing for pcie1 device */
+static struct spear_muxreg pcie1_muxreg[] = {
+       PCI_SATA_MUXREG,
+       {
+               .reg = PCIE_SATA_CFG,
+               .mask = PCIE_CFG_VAL(1),
+               .val = PCIE_CFG_VAL(1),
+       },
+};
+
+static struct spear_modemux pcie1_modemux[] = {
+       {
+               .muxregs = pcie1_muxreg,
+               .nmuxregs = ARRAY_SIZE(pcie1_muxreg),
+       },
+};
+
+static struct spear_pingroup pcie1_pingroup = {
+       .name = "pcie1_grp",
+       .pins = pci_sata_pins,
+       .npins = ARRAY_SIZE(pci_sata_pins),
+       .modemuxs = pcie1_modemux,
+       .nmodemuxs = ARRAY_SIZE(pcie1_modemux),
+};
+
+/* pad multiplexing for pcie2 device */
+static struct spear_muxreg pcie2_muxreg[] = {
+       PCI_SATA_MUXREG,
+       {
+               .reg = PCIE_SATA_CFG,
+               .mask = PCIE_CFG_VAL(2),
+               .val = PCIE_CFG_VAL(2),
+       },
+};
+
+static struct spear_modemux pcie2_modemux[] = {
+       {
+               .muxregs = pcie2_muxreg,
+               .nmuxregs = ARRAY_SIZE(pcie2_muxreg),
+       },
+};
+
+static struct spear_pingroup pcie2_pingroup = {
+       .name = "pcie2_grp",
+       .pins = pci_sata_pins,
+       .npins = ARRAY_SIZE(pci_sata_pins),
+       .modemuxs = pcie2_modemux,
+       .nmodemuxs = ARRAY_SIZE(pcie2_modemux),
+};
+
+static const char *const pci_grps[] = { "pcie0_grp", "pcie1_grp", "pcie2_grp" };
+static struct spear_function pci_function = {
+       .name = "pci",
+       .groups = pci_grps,
+       .ngroups = ARRAY_SIZE(pci_grps),
+};
+
+/* pad multiplexing for sata0 device */
+static struct spear_muxreg sata0_muxreg[] = {
+       PCI_SATA_MUXREG,
+       {
+               .reg = PCIE_SATA_CFG,
+               .mask = SATA_CFG_VAL(0),
+               .val = SATA_CFG_VAL(0),
+       },
+};
+
+static struct spear_modemux sata0_modemux[] = {
+       {
+               .muxregs = sata0_muxreg,
+               .nmuxregs = ARRAY_SIZE(sata0_muxreg),
+       },
+};
+
+static struct spear_pingroup sata0_pingroup = {
+       .name = "sata0_grp",
+       .pins = pci_sata_pins,
+       .npins = ARRAY_SIZE(pci_sata_pins),
+       .modemuxs = sata0_modemux,
+       .nmodemuxs = ARRAY_SIZE(sata0_modemux),
+};
+
+/* pad multiplexing for sata1 device */
+static struct spear_muxreg sata1_muxreg[] = {
+       PCI_SATA_MUXREG,
+       {
+               .reg = PCIE_SATA_CFG,
+               .mask = SATA_CFG_VAL(1),
+               .val = SATA_CFG_VAL(1),
+       },
+};
+
+static struct spear_modemux sata1_modemux[] = {
+       {
+               .muxregs = sata1_muxreg,
+               .nmuxregs = ARRAY_SIZE(sata1_muxreg),
+       },
+};
+
+static struct spear_pingroup sata1_pingroup = {
+       .name = "sata1_grp",
+       .pins = pci_sata_pins,
+       .npins = ARRAY_SIZE(pci_sata_pins),
+       .modemuxs = sata1_modemux,
+       .nmodemuxs = ARRAY_SIZE(sata1_modemux),
+};
+
+/* pad multiplexing for sata2 device */
+static struct spear_muxreg sata2_muxreg[] = {
+       PCI_SATA_MUXREG,
+       {
+               .reg = PCIE_SATA_CFG,
+               .mask = SATA_CFG_VAL(2),
+               .val = SATA_CFG_VAL(2),
+       },
+};
+
+static struct spear_modemux sata2_modemux[] = {
+       {
+               .muxregs = sata2_muxreg,
+               .nmuxregs = ARRAY_SIZE(sata2_muxreg),
+       },
+};
+
+static struct spear_pingroup sata2_pingroup = {
+       .name = "sata2_grp",
+       .pins = pci_sata_pins,
+       .npins = ARRAY_SIZE(pci_sata_pins),
+       .modemuxs = sata2_modemux,
+       .nmodemuxs = ARRAY_SIZE(sata2_modemux),
+};
+
+static const char *const sata_grps[] = { "sata0_grp", "sata1_grp", "sata2_grp"
+};
+static struct spear_function sata_function = {
+       .name = "sata",
+       .groups = sata_grps,
+       .ngroups = ARRAY_SIZE(sata_grps),
+};
+
+/* Pad multiplexing for ssp1_dis_kbd device */
+static const unsigned ssp1_dis_kbd_pins[] = { 203, 204, 205, 206 };
+static struct spear_muxreg ssp1_dis_kbd_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PMX_KBD_ROWCOL25_MASK | PMX_KBD_COL1_MASK |
+                       PMX_KBD_COL0_MASK | PMX_NFIO8_15_MASK | PMX_NFCE1_MASK |
+                       PMX_NFCE2_MASK,
+               .val = 0,
+       },
+};
+
+static struct spear_modemux ssp1_dis_kbd_modemux[] = {
+       {
+               .muxregs = ssp1_dis_kbd_muxreg,
+               .nmuxregs = ARRAY_SIZE(ssp1_dis_kbd_muxreg),
+       },
+};
+
+static struct spear_pingroup ssp1_dis_kbd_pingroup = {
+       .name = "ssp1_dis_kbd_grp",
+       .pins = ssp1_dis_kbd_pins,
+       .npins = ARRAY_SIZE(ssp1_dis_kbd_pins),
+       .modemuxs = ssp1_dis_kbd_modemux,
+       .nmodemuxs = ARRAY_SIZE(ssp1_dis_kbd_modemux),
+};
+
+/* Pad multiplexing for ssp1_dis_sd device */
+static const unsigned ssp1_dis_sd_pins[] = { 224, 226, 227, 228 };
+static struct spear_muxreg ssp1_dis_sd_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_2,
+               .mask = PMX_MCIADDR0ALE_MASK | PMX_MCIADDR2_MASK |
+                       PMX_MCICECF_MASK | PMX_MCICEXD_MASK,
+               .val = 0,
+       },
+};
+
+static struct spear_modemux ssp1_dis_sd_modemux[] = {
+       {
+               .muxregs = ssp1_dis_sd_muxreg,
+               .nmuxregs = ARRAY_SIZE(ssp1_dis_sd_muxreg),
+       },
+};
+
+static struct spear_pingroup ssp1_dis_sd_pingroup = {
+       .name = "ssp1_dis_sd_grp",
+       .pins = ssp1_dis_sd_pins,
+       .npins = ARRAY_SIZE(ssp1_dis_sd_pins),
+       .modemuxs = ssp1_dis_sd_modemux,
+       .nmodemuxs = ARRAY_SIZE(ssp1_dis_sd_modemux),
+};
+
+static const char *const ssp1_grps[] = { "ssp1_dis_kbd_grp",
+       "ssp1_dis_sd_grp" };
+static struct spear_function ssp1_function = {
+       .name = "ssp1",
+       .groups = ssp1_grps,
+       .ngroups = ARRAY_SIZE(ssp1_grps),
+};
+
+/* Pad multiplexing for gpt64 device */
+static const unsigned gpt64_pins[] = { 230, 231, 232, 245 };
+static struct spear_muxreg gpt64_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_2,
+               .mask = PMX_MCICDCF1_MASK | PMX_MCICDCF2_MASK | PMX_MCICDXD_MASK
+                       | PMX_MCILEDS_MASK,
+               .val = 0,
+       },
+};
+
+static struct spear_modemux gpt64_modemux[] = {
+       {
+               .muxregs = gpt64_muxreg,
+               .nmuxregs = ARRAY_SIZE(gpt64_muxreg),
+       },
+};
+
+static struct spear_pingroup gpt64_pingroup = {
+       .name = "gpt64_grp",
+       .pins = gpt64_pins,
+       .npins = ARRAY_SIZE(gpt64_pins),
+       .modemuxs = gpt64_modemux,
+       .nmodemuxs = ARRAY_SIZE(gpt64_modemux),
+};
+
+static const char *const gpt64_grps[] = { "gpt64_grp" };
+static struct spear_function gpt64_function = {
+       .name = "gpt64",
+       .groups = gpt64_grps,
+       .ngroups = ARRAY_SIZE(gpt64_grps),
+};
+
+/* pingroups */
+static struct spear_pingroup *spear1310_pingroups[] = {
+       &i2c0_pingroup,
+       &ssp0_pingroup,
+       &i2s0_pingroup,
+       &i2s1_pingroup,
+       &clcd_pingroup,
+       &clcd_high_res_pingroup,
+       &arm_gpio_pingroup,
+       &smi_2_chips_pingroup,
+       &smi_4_chips_pingroup,
+       &gmii_pingroup,
+       &rgmii_pingroup,
+       &smii_0_1_2_pingroup,
+       &ras_mii_txclk_pingroup,
+       &nand_8bit_pingroup,
+       &nand_16bit_pingroup,
+       &nand_4_chips_pingroup,
+       &keyboard_6x6_pingroup,
+       &keyboard_rowcol6_8_pingroup,
+       &uart0_pingroup,
+       &uart0_modem_pingroup,
+       &gpt0_tmr0_pingroup,
+       &gpt0_tmr1_pingroup,
+       &gpt1_tmr0_pingroup,
+       &gpt1_tmr1_pingroup,
+       &sdhci_pingroup,
+       &cf_pingroup,
+       &xd_pingroup,
+       &touch_xy_pingroup,
+       &ssp0_cs0_pingroup,
+       &ssp0_cs1_2_pingroup,
+       &uart_1_dis_i2c_pingroup,
+       &uart_1_dis_sd_pingroup,
+       &uart_2_3_pingroup,
+       &uart_4_pingroup,
+       &uart_5_pingroup,
+       &rs485_0_1_tdm_0_1_pingroup,
+       &i2c_1_2_pingroup,
+       &i2c3_dis_smi_clcd_pingroup,
+       &i2c3_dis_sd_i2s0_pingroup,
+       &i2c_4_5_dis_smi_pingroup,
+       &i2c4_dis_sd_pingroup,
+       &i2c5_dis_sd_pingroup,
+       &i2c_6_7_dis_kbd_pingroup,
+       &i2c6_dis_sd_pingroup,
+       &i2c7_dis_sd_pingroup,
+       &can0_dis_nor_pingroup,
+       &can0_dis_sd_pingroup,
+       &can1_dis_sd_pingroup,
+       &can1_dis_kbd_pingroup,
+       &pcie0_pingroup,
+       &pcie1_pingroup,
+       &pcie2_pingroup,
+       &sata0_pingroup,
+       &sata1_pingroup,
+       &sata2_pingroup,
+       &ssp1_dis_kbd_pingroup,
+       &ssp1_dis_sd_pingroup,
+       &gpt64_pingroup,
+};
+
+/* functions */
+static struct spear_function *spear1310_functions[] = {
+       &i2c0_function,
+       &ssp0_function,
+       &i2s0_function,
+       &i2s1_function,
+       &clcd_function,
+       &arm_gpio_function,
+       &smi_function,
+       &gmii_function,
+       &rgmii_function,
+       &smii_0_1_2_function,
+       &ras_mii_txclk_function,
+       &nand_function,
+       &keyboard_function,
+       &uart0_function,
+       &gpt0_function,
+       &gpt1_function,
+       &sdhci_function,
+       &cf_function,
+       &xd_function,
+       &touch_xy_function,
+       &uart1_function,
+       &uart2_3_function,
+       &uart4_function,
+       &uart5_function,
+       &rs485_0_1_tdm_0_1_function,
+       &i2c_1_2_function,
+       &i2c3_unction,
+       &i2c_4_5_function,
+       &i2c_6_7_function,
+       &can0_function,
+       &can1_function,
+       &pci_function,
+       &sata_function,
+       &ssp1_function,
+       &gpt64_function,
+};
+
+static struct spear_pinctrl_machdata spear1310_machdata = {
+       .pins = spear1310_pins,
+       .npins = ARRAY_SIZE(spear1310_pins),
+       .groups = spear1310_pingroups,
+       .ngroups = ARRAY_SIZE(spear1310_pingroups),
+       .functions = spear1310_functions,
+       .nfunctions = ARRAY_SIZE(spear1310_functions),
+       .modes_supported = false,
+};
+
+static struct of_device_id spear1310_pinctrl_of_match[] __devinitdata = {
+       {
+               .compatible = "st,spear1310-pinmux",
+       },
+       {},
+};
+
+static int __devinit spear1310_pinctrl_probe(struct platform_device *pdev)
+{
+       return spear_pinctrl_probe(pdev, &spear1310_machdata);
+}
+
+static int __devexit spear1310_pinctrl_remove(struct platform_device *pdev)
+{
+       return spear_pinctrl_remove(pdev);
+}
+
+static struct platform_driver spear1310_pinctrl_driver = {
+       .driver = {
+               .name = DRIVER_NAME,
+               .owner = THIS_MODULE,
+               .of_match_table = spear1310_pinctrl_of_match,
+       },
+       .probe = spear1310_pinctrl_probe,
+       .remove = __devexit_p(spear1310_pinctrl_remove),
+};
+
+static int __init spear1310_pinctrl_init(void)
+{
+       return platform_driver_register(&spear1310_pinctrl_driver);
+}
+arch_initcall(spear1310_pinctrl_init);
+
+static void __exit spear1310_pinctrl_exit(void)
+{
+       platform_driver_unregister(&spear1310_pinctrl_driver);
+}
+module_exit(spear1310_pinctrl_exit);
+
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_DESCRIPTION("ST Microelectronics SPEAr1310 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, spear1310_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear1340.c b/drivers/pinctrl/spear/pinctrl-spear1340.c
new file mode 100644 (file)
index 0000000..a8ab2a6
--- /dev/null
@@ -0,0 +1,1989 @@
+/*
+ * Driver for the ST Microelectronics SPEAr1340 pinmux
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "pinctrl-spear.h"
+
+#define DRIVER_NAME "spear1340-pinmux"
+
+/* pins */
+static const struct pinctrl_pin_desc spear1340_pins[] = {
+       SPEAR_PIN_0_TO_101,
+       SPEAR_PIN_102_TO_245,
+       PINCTRL_PIN(246, "PLGPIO246"),
+       PINCTRL_PIN(247, "PLGPIO247"),
+       PINCTRL_PIN(248, "PLGPIO248"),
+       PINCTRL_PIN(249, "PLGPIO249"),
+       PINCTRL_PIN(250, "PLGPIO250"),
+       PINCTRL_PIN(251, "PLGPIO251"),
+};
+
+/* In SPEAr1340 there are two levels of pad muxing */
+/* - pads as gpio OR peripherals */
+#define PAD_FUNCTION_EN_1                      0x668
+#define PAD_FUNCTION_EN_2                      0x66C
+#define PAD_FUNCTION_EN_3                      0x670
+#define PAD_FUNCTION_EN_4                      0x674
+#define PAD_FUNCTION_EN_5                      0x690
+#define PAD_FUNCTION_EN_6                      0x694
+#define PAD_FUNCTION_EN_7                      0x698
+#define PAD_FUNCTION_EN_8                      0x69C
+
+/* - If peripherals, then primary OR alternate peripheral */
+#define PAD_SHARED_IP_EN_1                     0x6A0
+#define PAD_SHARED_IP_EN_2                     0x6A4
+
+/*
+ * Macro's for first level of pmx - pads as gpio OR peripherals. There are 8
+ * registers with 32 bits each for handling gpio pads, register 8 has only 26
+ * relevant bits.
+ */
+/* macro's for making pads as gpio's */
+#define PADS_AS_GPIO_REG0_MASK                 0xFFFFFFFE
+#define PADS_AS_GPIO_REGS_MASK                 0xFFFFFFFF
+#define PADS_AS_GPIO_REG7_MASK                 0x07FFFFFF
+
+/* macro's for making pads as peripherals */
+#define FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK  0x00000FFE
+#define UART0_ENH_AND_GPT_REG0_MASK            0x0003F000
+#define PWM1_AND_KBD_COL5_REG0_MASK            0x00040000
+#define I2C1_REG0_MASK                         0x01080000
+#define SPDIF_IN_REG0_MASK                     0x00100000
+#define PWM2_AND_GPT0_TMR0_CPT_REG0_MASK       0x00400000
+#define PWM3_AND_GPT0_TMR1_CLK_REG0_MASK       0x00800000
+#define PWM0_AND_SSP0_CS1_REG0_MASK            0x02000000
+#define VIP_AND_CAM3_REG0_MASK                 0xFC200000
+#define VIP_AND_CAM3_REG1_MASK                 0x0000000F
+#define VIP_REG1_MASK                          0x00001EF0
+#define VIP_AND_CAM2_REG1_MASK                 0x007FE100
+#define VIP_AND_CAM1_REG1_MASK                 0xFF800000
+#define VIP_AND_CAM1_REG2_MASK                 0x00000003
+#define VIP_AND_CAM0_REG2_MASK                 0x00001FFC
+#define SMI_REG2_MASK                          0x0021E000
+#define SSP0_REG2_MASK                         0x001E0000
+#define TS_AND_SSP0_CS2_REG2_MASK              0x00400000
+#define UART0_REG2_MASK                                0x01800000
+#define UART1_REG2_MASK                                0x06000000
+#define I2S_IN_REG2_MASK                       0xF8000000
+#define DEVS_GRP_AND_MIPHY_DBG_REG3_MASK       0x000001FE
+#define I2S_OUT_REG3_MASK                      0x000001EF
+#define I2S_IN_REG3_MASK                       0x00000010
+#define GMAC_REG3_MASK                         0xFFFFFE00
+#define GMAC_REG4_MASK                         0x0000001F
+#define DEVS_GRP_AND_MIPHY_DBG_REG4_MASK       0x7FFFFF20
+#define SSP0_CS3_REG4_MASK                     0x00000020
+#define I2C0_REG4_MASK                         0x000000C0
+#define CEC0_REG4_MASK                         0x00000100
+#define CEC1_REG4_MASK                         0x00000200
+#define SPDIF_OUT_REG4_MASK                    0x00000400
+#define CLCD_REG4_MASK                         0x7FFFF800
+#define CLCD_AND_ARM_TRACE_REG4_MASK           0x80000000
+#define CLCD_AND_ARM_TRACE_REG5_MASK           0xFFFFFFFF
+#define CLCD_AND_ARM_TRACE_REG6_MASK           0x00000001
+#define FSMC_PNOR_AND_MCIF_REG6_MASK           0x073FFFFE
+#define MCIF_REG6_MASK                         0xF8C00000
+#define MCIF_REG7_MASK                         0x000043FF
+#define FSMC_8BIT_REG7_MASK                    0x07FFBC00
+
+/* other registers */
+#define PERIP_CFG                              0x42C
+       /* PERIP_CFG register masks */
+       #define SSP_CS_CTL_HW                   0
+       #define SSP_CS_CTL_SW                   1
+       #define SSP_CS_CTL_MASK                 1
+       #define SSP_CS_CTL_SHIFT                21
+       #define SSP_CS_VAL_MASK                 1
+       #define SSP_CS_VAL_SHIFT                20
+       #define SSP_CS_SEL_CS0                  0
+       #define SSP_CS_SEL_CS1                  1
+       #define SSP_CS_SEL_CS2                  2
+       #define SSP_CS_SEL_MASK                 3
+       #define SSP_CS_SEL_SHIFT                18
+
+       #define I2S_CHNL_2_0                    (0)
+       #define I2S_CHNL_3_1                    (1)
+       #define I2S_CHNL_5_1                    (2)
+       #define I2S_CHNL_7_1                    (3)
+       #define I2S_CHNL_PLAY_SHIFT             (4)
+       #define I2S_CHNL_PLAY_MASK              (3 << 4)
+       #define I2S_CHNL_REC_SHIFT              (6)
+       #define I2S_CHNL_REC_MASK               (3 << 6)
+
+       #define SPDIF_OUT_ENB_MASK              (1 << 2)
+       #define SPDIF_OUT_ENB_SHIFT             2
+
+       #define MCIF_SEL_SD                     1
+       #define MCIF_SEL_CF                     2
+       #define MCIF_SEL_XD                     3
+       #define MCIF_SEL_MASK                   3
+       #define MCIF_SEL_SHIFT                  0
+
+#define GMAC_CLK_CFG                           0x248
+       #define GMAC_PHY_IF_GMII_VAL            (0 << 3)
+       #define GMAC_PHY_IF_RGMII_VAL           (1 << 3)
+       #define GMAC_PHY_IF_SGMII_VAL           (2 << 3)
+       #define GMAC_PHY_IF_RMII_VAL            (4 << 3)
+       #define GMAC_PHY_IF_SEL_MASK            (7 << 3)
+       #define GMAC_PHY_INPUT_ENB_VAL          0
+       #define GMAC_PHY_SYNT_ENB_VAL           1
+       #define GMAC_PHY_CLK_MASK               1
+       #define GMAC_PHY_CLK_SHIFT              2
+       #define GMAC_PHY_125M_PAD_VAL           0
+       #define GMAC_PHY_PLL2_VAL               1
+       #define GMAC_PHY_OSC3_VAL               2
+       #define GMAC_PHY_INPUT_CLK_MASK         3
+       #define GMAC_PHY_INPUT_CLK_SHIFT        0
+
+#define PCIE_SATA_CFG                          0x424
+       /* PCIE CFG MASks */
+       #define PCIE_CFG_DEVICE_PRESENT         (1 << 11)
+       #define PCIE_CFG_POWERUP_RESET          (1 << 10)
+       #define PCIE_CFG_CORE_CLK_EN            (1 << 9)
+       #define PCIE_CFG_AUX_CLK_EN             (1 << 8)
+       #define SATA_CFG_TX_CLK_EN              (1 << 4)
+       #define SATA_CFG_RX_CLK_EN              (1 << 3)
+       #define SATA_CFG_POWERUP_RESET          (1 << 2)
+       #define SATA_CFG_PM_CLK_EN              (1 << 1)
+       #define PCIE_SATA_SEL_PCIE              (0)
+       #define PCIE_SATA_SEL_SATA              (1)
+       #define SATA_PCIE_CFG_MASK              0xF1F
+       #define PCIE_CFG_VAL    (PCIE_SATA_SEL_PCIE | PCIE_CFG_AUX_CLK_EN | \
+                               PCIE_CFG_CORE_CLK_EN | PCIE_CFG_POWERUP_RESET |\
+                               PCIE_CFG_DEVICE_PRESENT)
+       #define SATA_CFG_VAL    (PCIE_SATA_SEL_SATA | SATA_CFG_PM_CLK_EN | \
+                               SATA_CFG_POWERUP_RESET | SATA_CFG_RX_CLK_EN | \
+                               SATA_CFG_TX_CLK_EN)
+
+/* Macro's for second level of pmx - pads as primary OR alternate peripheral */
+/* Write 0 to enable FSMC_16_BIT */
+#define KBD_ROW_COL_MASK                       (1 << 0)
+
+/* Write 0 to enable UART0_ENH */
+#define GPT_MASK                               (1 << 1) /* Only clk & cpt */
+
+/* Write 0 to enable PWM1 */
+#define KBD_COL5_MASK                          (1 << 2)
+
+/* Write 0 to enable PWM2 */
+#define GPT0_TMR0_CPT_MASK                     (1 << 3) /* Only clk & cpt */
+
+/* Write 0 to enable PWM3 */
+#define GPT0_TMR1_CLK_MASK                     (1 << 4) /* Only clk & cpt */
+
+/* Write 0 to enable PWM0 */
+#define SSP0_CS1_MASK                          (1 << 5)
+
+/* Write 0 to enable VIP */
+#define CAM3_MASK                              (1 << 6)
+
+/* Write 0 to enable VIP */
+#define CAM2_MASK                              (1 << 7)
+
+/* Write 0 to enable VIP */
+#define CAM1_MASK                              (1 << 8)
+
+/* Write 0 to enable VIP */
+#define CAM0_MASK                              (1 << 9)
+
+/* Write 0 to enable TS */
+#define SSP0_CS2_MASK                          (1 << 10)
+
+/* Write 0 to enable FSMC PNOR */
+#define MCIF_MASK                              (1 << 11)
+
+/* Write 0 to enable CLCD */
+#define ARM_TRACE_MASK                         (1 << 12)
+
+/* Write 0 to enable I2S, SSP0_CS2, CEC0, 1, SPDIF out, CLCD */
+#define MIPHY_DBG_MASK                         (1 << 13)
+
+/*
+ * Pad multiplexing for making all pads as gpio's. This is done to override the
+ * values passed from bootloader and start from scratch.
+ */
+static const unsigned pads_as_gpio_pins[] = { 251 };
+static struct spear_muxreg pads_as_gpio_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PADS_AS_GPIO_REG0_MASK,
+               .val = 0x0,
+       }, {
+               .reg = PAD_FUNCTION_EN_2,
+               .mask = PADS_AS_GPIO_REGS_MASK,
+               .val = 0x0,
+       }, {
+               .reg = PAD_FUNCTION_EN_3,
+               .mask = PADS_AS_GPIO_REGS_MASK,
+               .val = 0x0,
+       }, {
+               .reg = PAD_FUNCTION_EN_4,
+               .mask = PADS_AS_GPIO_REGS_MASK,
+               .val = 0x0,
+       }, {
+               .reg = PAD_FUNCTION_EN_5,
+               .mask = PADS_AS_GPIO_REGS_MASK,
+               .val = 0x0,
+       }, {
+               .reg = PAD_FUNCTION_EN_6,
+               .mask = PADS_AS_GPIO_REGS_MASK,
+               .val = 0x0,
+       }, {
+               .reg = PAD_FUNCTION_EN_7,
+               .mask = PADS_AS_GPIO_REGS_MASK,
+               .val = 0x0,
+       }, {
+               .reg = PAD_FUNCTION_EN_8,
+               .mask = PADS_AS_GPIO_REG7_MASK,
+               .val = 0x0,
+       },
+};
+
+static struct spear_modemux pads_as_gpio_modemux[] = {
+       {
+               .muxregs = pads_as_gpio_muxreg,
+               .nmuxregs = ARRAY_SIZE(pads_as_gpio_muxreg),
+       },
+};
+
+static struct spear_pingroup pads_as_gpio_pingroup = {
+       .name = "pads_as_gpio_grp",
+       .pins = pads_as_gpio_pins,
+       .npins = ARRAY_SIZE(pads_as_gpio_pins),
+       .modemuxs = pads_as_gpio_modemux,
+       .nmodemuxs = ARRAY_SIZE(pads_as_gpio_modemux),
+};
+
+static const char *const pads_as_gpio_grps[] = { "pads_as_gpio_grp" };
+static struct spear_function pads_as_gpio_function = {
+       .name = "pads_as_gpio",
+       .groups = pads_as_gpio_grps,
+       .ngroups = ARRAY_SIZE(pads_as_gpio_grps),
+};
+
+/* Pad multiplexing for fsmc_8bit device */
+static const unsigned fsmc_8bit_pins[] = { 233, 234, 235, 236, 238, 239, 240,
+       241, 242, 243, 244, 245, 246, 247, 248, 249 };
+static struct spear_muxreg fsmc_8bit_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_8,
+               .mask = FSMC_8BIT_REG7_MASK,
+               .val = FSMC_8BIT_REG7_MASK,
+       }
+};
+
+static struct spear_modemux fsmc_8bit_modemux[] = {
+       {
+               .muxregs = fsmc_8bit_muxreg,
+               .nmuxregs = ARRAY_SIZE(fsmc_8bit_muxreg),
+       },
+};
+
+static struct spear_pingroup fsmc_8bit_pingroup = {
+       .name = "fsmc_8bit_grp",
+       .pins = fsmc_8bit_pins,
+       .npins = ARRAY_SIZE(fsmc_8bit_pins),
+       .modemuxs = fsmc_8bit_modemux,
+       .nmodemuxs = ARRAY_SIZE(fsmc_8bit_modemux),
+};
+
+/* Pad multiplexing for fsmc_16bit device */
+static const unsigned fsmc_16bit_pins[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
+static struct spear_muxreg fsmc_16bit_muxreg[] = {
+       {
+               .reg = PAD_SHARED_IP_EN_1,
+               .mask = KBD_ROW_COL_MASK,
+               .val = 0,
+       }, {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK,
+               .val = FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK,
+       },
+};
+
+static struct spear_modemux fsmc_16bit_modemux[] = {
+       {
+               .muxregs = fsmc_16bit_muxreg,
+               .nmuxregs = ARRAY_SIZE(fsmc_16bit_muxreg),
+       },
+};
+
+static struct spear_pingroup fsmc_16bit_pingroup = {
+       .name = "fsmc_16bit_grp",
+       .pins = fsmc_16bit_pins,
+       .npins = ARRAY_SIZE(fsmc_16bit_pins),
+       .modemuxs = fsmc_16bit_modemux,
+       .nmodemuxs = ARRAY_SIZE(fsmc_16bit_modemux),
+};
+
+/* pad multiplexing for fsmc_pnor device */
+static const unsigned fsmc_pnor_pins[] = { 192, 193, 194, 195, 196, 197, 198,
+       199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+       215, 216, 217 };
+static struct spear_muxreg fsmc_pnor_muxreg[] = {
+       {
+               .reg = PAD_SHARED_IP_EN_1,
+               .mask = MCIF_MASK,
+               .val = 0,
+       }, {
+               .reg = PAD_FUNCTION_EN_7,
+               .mask = FSMC_PNOR_AND_MCIF_REG6_MASK,
+               .val = FSMC_PNOR_AND_MCIF_REG6_MASK,
+       },
+};
+
+static struct spear_modemux fsmc_pnor_modemux[] = {
+       {
+               .muxregs = fsmc_pnor_muxreg,
+               .nmuxregs = ARRAY_SIZE(fsmc_pnor_muxreg),
+       },
+};
+
+static struct spear_pingroup fsmc_pnor_pingroup = {
+       .name = "fsmc_pnor_grp",
+       .pins = fsmc_pnor_pins,
+       .npins = ARRAY_SIZE(fsmc_pnor_pins),
+       .modemuxs = fsmc_pnor_modemux,
+       .nmodemuxs = ARRAY_SIZE(fsmc_pnor_modemux),
+};
+
+static const char *const fsmc_grps[] = { "fsmc_8bit_grp", "fsmc_16bit_grp",
+       "fsmc_pnor_grp" };
+static struct spear_function fsmc_function = {
+       .name = "fsmc",
+       .groups = fsmc_grps,
+       .ngroups = ARRAY_SIZE(fsmc_grps),
+};
+
+/* pad multiplexing for keyboard rows-cols device */
+static const unsigned keyboard_row_col_pins[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+       10 };
+static struct spear_muxreg keyboard_row_col_muxreg[] = {
+       {
+               .reg = PAD_SHARED_IP_EN_1,
+               .mask = KBD_ROW_COL_MASK,
+               .val = KBD_ROW_COL_MASK,
+       }, {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK,
+               .val = FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK,
+       },
+};
+
+static struct spear_modemux keyboard_row_col_modemux[] = {
+       {
+               .muxregs = keyboard_row_col_muxreg,
+               .nmuxregs = ARRAY_SIZE(keyboard_row_col_muxreg),
+       },
+};
+
+static struct spear_pingroup keyboard_row_col_pingroup = {
+       .name = "keyboard_row_col_grp",
+       .pins = keyboard_row_col_pins,
+       .npins = ARRAY_SIZE(keyboard_row_col_pins),
+       .modemuxs = keyboard_row_col_modemux,
+       .nmodemuxs = ARRAY_SIZE(keyboard_row_col_modemux),
+};
+
+/* pad multiplexing for keyboard col5 device */
+static const unsigned keyboard_col5_pins[] = { 17 };
+static struct spear_muxreg keyboard_col5_muxreg[] = {
+       {
+               .reg = PAD_SHARED_IP_EN_1,
+               .mask = KBD_COL5_MASK,
+               .val = KBD_COL5_MASK,
+       }, {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PWM1_AND_KBD_COL5_REG0_MASK,
+               .val = PWM1_AND_KBD_COL5_REG0_MASK,
+       },
+};
+
+static struct spear_modemux keyboard_col5_modemux[] = {
+       {
+               .muxregs = keyboard_col5_muxreg,
+               .nmuxregs = ARRAY_SIZE(keyboard_col5_muxreg),
+       },
+};
+
+static struct spear_pingroup keyboard_col5_pingroup = {
+       .name = "keyboard_col5_grp",
+       .pins = keyboard_col5_pins,
+       .npins = ARRAY_SIZE(keyboard_col5_pins),
+       .modemuxs = keyboard_col5_modemux,
+       .nmodemuxs = ARRAY_SIZE(keyboard_col5_modemux),
+};
+
+static const char *const keyboard_grps[] = { "keyboard_row_col_grp",
+       "keyboard_col5_grp" };
+static struct spear_function keyboard_function = {
+       .name = "keyboard",
+       .groups = keyboard_grps,
+       .ngroups = ARRAY_SIZE(keyboard_grps),
+};
+
+/* pad multiplexing for spdif_in device */
+static const unsigned spdif_in_pins[] = { 19 };
+static struct spear_muxreg spdif_in_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = SPDIF_IN_REG0_MASK,
+               .val = SPDIF_IN_REG0_MASK,
+       },
+};
+
+static struct spear_modemux spdif_in_modemux[] = {
+       {
+               .muxregs = spdif_in_muxreg,
+               .nmuxregs = ARRAY_SIZE(spdif_in_muxreg),
+       },
+};
+
+static struct spear_pingroup spdif_in_pingroup = {
+       .name = "spdif_in_grp",
+       .pins = spdif_in_pins,
+       .npins = ARRAY_SIZE(spdif_in_pins),
+       .modemuxs = spdif_in_modemux,
+       .nmodemuxs = ARRAY_SIZE(spdif_in_modemux),
+};
+
+static const char *const spdif_in_grps[] = { "spdif_in_grp" };
+static struct spear_function spdif_in_function = {
+       .name = "spdif_in",
+       .groups = spdif_in_grps,
+       .ngroups = ARRAY_SIZE(spdif_in_grps),
+};
+
+/* pad multiplexing for spdif_out device */
+static const unsigned spdif_out_pins[] = { 137 };
+static struct spear_muxreg spdif_out_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_5,
+               .mask = SPDIF_OUT_REG4_MASK,
+               .val = SPDIF_OUT_REG4_MASK,
+       }, {
+               .reg = PERIP_CFG,
+               .mask = SPDIF_OUT_ENB_MASK,
+               .val = SPDIF_OUT_ENB_MASK,
+       }
+};
+
+static struct spear_modemux spdif_out_modemux[] = {
+       {
+               .muxregs = spdif_out_muxreg,
+               .nmuxregs = ARRAY_SIZE(spdif_out_muxreg),
+       },
+};
+
+static struct spear_pingroup spdif_out_pingroup = {
+       .name = "spdif_out_grp",
+       .pins = spdif_out_pins,
+       .npins = ARRAY_SIZE(spdif_out_pins),
+       .modemuxs = spdif_out_modemux,
+       .nmodemuxs = ARRAY_SIZE(spdif_out_modemux),
+};
+
+static const char *const spdif_out_grps[] = { "spdif_out_grp" };
+static struct spear_function spdif_out_function = {
+       .name = "spdif_out",
+       .groups = spdif_out_grps,
+       .ngroups = ARRAY_SIZE(spdif_out_grps),
+};
+
+/* pad multiplexing for gpt_0_1 device */
+static const unsigned gpt_0_1_pins[] = { 11, 12, 13, 14, 15, 16, 21, 22 };
+static struct spear_muxreg gpt_0_1_muxreg[] = {
+       {
+               .reg = PAD_SHARED_IP_EN_1,
+               .mask = GPT_MASK | GPT0_TMR0_CPT_MASK | GPT0_TMR1_CLK_MASK,
+               .val = GPT_MASK | GPT0_TMR0_CPT_MASK | GPT0_TMR1_CLK_MASK,
+       }, {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = UART0_ENH_AND_GPT_REG0_MASK |
+                       PWM2_AND_GPT0_TMR0_CPT_REG0_MASK |
+                       PWM3_AND_GPT0_TMR1_CLK_REG0_MASK,
+               .val = UART0_ENH_AND_GPT_REG0_MASK |
+                       PWM2_AND_GPT0_TMR0_CPT_REG0_MASK |
+                       PWM3_AND_GPT0_TMR1_CLK_REG0_MASK,
+       },
+};
+
+static struct spear_modemux gpt_0_1_modemux[] = {
+       {
+               .muxregs = gpt_0_1_muxreg,
+               .nmuxregs = ARRAY_SIZE(gpt_0_1_muxreg),
+       },
+};
+
+static struct spear_pingroup gpt_0_1_pingroup = {
+       .name = "gpt_0_1_grp",
+       .pins = gpt_0_1_pins,
+       .npins = ARRAY_SIZE(gpt_0_1_pins),
+       .modemuxs = gpt_0_1_modemux,
+       .nmodemuxs = ARRAY_SIZE(gpt_0_1_modemux),
+};
+
+static const char *const gpt_0_1_grps[] = { "gpt_0_1_grp" };
+static struct spear_function gpt_0_1_function = {
+       .name = "gpt_0_1",
+       .groups = gpt_0_1_grps,
+       .ngroups = ARRAY_SIZE(gpt_0_1_grps),
+};
+
+/* pad multiplexing for pwm0 device */
+static const unsigned pwm0_pins[] = { 24 };
+static struct spear_muxreg pwm0_muxreg[] = {
+       {
+               .reg = PAD_SHARED_IP_EN_1,
+               .mask = SSP0_CS1_MASK,
+               .val = 0,
+       }, {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PWM0_AND_SSP0_CS1_REG0_MASK,
+               .val = PWM0_AND_SSP0_CS1_REG0_MASK,
+       },
+};
+
+static struct spear_modemux pwm0_modemux[] = {
+       {
+               .muxregs = pwm0_muxreg,
+               .nmuxregs = ARRAY_SIZE(pwm0_muxreg),
+       },
+};
+
+static struct spear_pingroup pwm0_pingroup = {
+       .name = "pwm0_grp",
+       .pins = pwm0_pins,
+       .npins = ARRAY_SIZE(pwm0_pins),
+       .modemuxs = pwm0_modemux,
+       .nmodemuxs = ARRAY_SIZE(pwm0_modemux),
+};
+
+/* pad multiplexing for pwm1 device */
+static const unsigned pwm1_pins[] = { 17 };
+static struct spear_muxreg pwm1_muxreg[] = {
+       {
+               .reg = PAD_SHARED_IP_EN_1,
+               .mask = KBD_COL5_MASK,
+               .val = 0,
+       }, {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PWM1_AND_KBD_COL5_REG0_MASK,
+               .val = PWM1_AND_KBD_COL5_REG0_MASK,
+       },
+};
+
+static struct spear_modemux pwm1_modemux[] = {
+       {
+               .muxregs = pwm1_muxreg,
+               .nmuxregs = ARRAY_SIZE(pwm1_muxreg),
+       },
+};
+
+static struct spear_pingroup pwm1_pingroup = {
+       .name = "pwm1_grp",
+       .pins = pwm1_pins,
+       .npins = ARRAY_SIZE(pwm1_pins),
+       .modemuxs = pwm1_modemux,
+       .nmodemuxs = ARRAY_SIZE(pwm1_modemux),
+};
+
+/* pad multiplexing for pwm2 device */
+static const unsigned pwm2_pins[] = { 21 };
+static struct spear_muxreg pwm2_muxreg[] = {
+       {
+               .reg = PAD_SHARED_IP_EN_1,
+               .mask = GPT0_TMR0_CPT_MASK,
+               .val = 0,
+       }, {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PWM2_AND_GPT0_TMR0_CPT_REG0_MASK,
+               .val = PWM2_AND_GPT0_TMR0_CPT_REG0_MASK,
+       },
+};
+
+static struct spear_modemux pwm2_modemux[] = {
+       {
+               .muxregs = pwm2_muxreg,
+               .nmuxregs = ARRAY_SIZE(pwm2_muxreg),
+       },
+};
+
+static struct spear_pingroup pwm2_pingroup = {
+       .name = "pwm2_grp",
+       .pins = pwm2_pins,
+       .npins = ARRAY_SIZE(pwm2_pins),
+       .modemuxs = pwm2_modemux,
+       .nmodemuxs = ARRAY_SIZE(pwm2_modemux),
+};
+
+/* pad multiplexing for pwm3 device */
+static const unsigned pwm3_pins[] = { 22 };
+static struct spear_muxreg pwm3_muxreg[] = {
+       {
+               .reg = PAD_SHARED_IP_EN_1,
+               .mask = GPT0_TMR1_CLK_MASK,
+               .val = 0,
+       }, {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PWM3_AND_GPT0_TMR1_CLK_REG0_MASK,
+               .val = PWM3_AND_GPT0_TMR1_CLK_REG0_MASK,
+       },
+};
+
+static struct spear_modemux pwm3_modemux[] = {
+       {
+               .muxregs = pwm3_muxreg,
+               .nmuxregs = ARRAY_SIZE(pwm3_muxreg),
+       },
+};
+
+static struct spear_pingroup pwm3_pingroup = {
+       .name = "pwm3_grp",
+       .pins = pwm3_pins,
+       .npins = ARRAY_SIZE(pwm3_pins),
+       .modemuxs = pwm3_modemux,
+       .nmodemuxs = ARRAY_SIZE(pwm3_modemux),
+};
+
+static const char *const pwm_grps[] = { "pwm0_grp", "pwm1_grp", "pwm2_grp",
+       "pwm3_grp" };
+static struct spear_function pwm_function = {
+       .name = "pwm",
+       .groups = pwm_grps,
+       .ngroups = ARRAY_SIZE(pwm_grps),
+};
+
+/* pad multiplexing for vip_mux device */
+static const unsigned vip_mux_pins[] = { 35, 36, 37, 38, 40, 41, 42, 43 };
+static struct spear_muxreg vip_mux_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_2,
+               .mask = VIP_REG1_MASK,
+               .val = VIP_REG1_MASK,
+       },
+};
+
+static struct spear_modemux vip_mux_modemux[] = {
+       {
+               .muxregs = vip_mux_muxreg,
+               .nmuxregs = ARRAY_SIZE(vip_mux_muxreg),
+       },
+};
+
+static struct spear_pingroup vip_mux_pingroup = {
+       .name = "vip_mux_grp",
+       .pins = vip_mux_pins,
+       .npins = ARRAY_SIZE(vip_mux_pins),
+       .modemuxs = vip_mux_modemux,
+       .nmodemuxs = ARRAY_SIZE(vip_mux_modemux),
+};
+
+/* pad multiplexing for vip_mux_cam0 (disables cam0) device */
+static const unsigned vip_mux_cam0_pins[] = { 65, 66, 67, 68, 69, 70, 71, 72,
+       73, 74, 75 };
+static struct spear_muxreg vip_mux_cam0_muxreg[] = {
+       {
+               .reg = PAD_SHARED_IP_EN_1,
+               .mask = CAM0_MASK,
+               .val = 0,
+       }, {
+               .reg = PAD_FUNCTION_EN_3,
+               .mask = VIP_AND_CAM0_REG2_MASK,
+               .val = VIP_AND_CAM0_REG2_MASK,
+       },
+};
+
+static struct spear_modemux vip_mux_cam0_modemux[] = {
+       {
+               .muxregs = vip_mux_cam0_muxreg,
+               .nmuxregs = ARRAY_SIZE(vip_mux_cam0_muxreg),
+       },
+};
+
+static struct spear_pingroup vip_mux_cam0_pingroup = {
+       .name = "vip_mux_cam0_grp",
+       .pins = vip_mux_cam0_pins,
+       .npins = ARRAY_SIZE(vip_mux_cam0_pins),
+       .modemuxs = vip_mux_cam0_modemux,
+       .nmodemuxs = ARRAY_SIZE(vip_mux_cam0_modemux),
+};
+
+/* pad multiplexing for vip_mux_cam1 (disables cam1) device */
+static const unsigned vip_mux_cam1_pins[] = { 54, 55, 56, 57, 58, 59, 60, 61,
+       62, 63, 64 };
+static struct spear_muxreg vip_mux_cam1_muxreg[] = {
+       {
+               .reg = PAD_SHARED_IP_EN_1,
+               .mask = CAM1_MASK,
+               .val = 0,
+       }, {
+               .reg = PAD_FUNCTION_EN_2,
+               .mask = VIP_AND_CAM1_REG1_MASK,
+               .val = VIP_AND_CAM1_REG1_MASK,
+       }, {
+               .reg = PAD_FUNCTION_EN_3,
+               .mask = VIP_AND_CAM1_REG2_MASK,
+               .val = VIP_AND_CAM1_REG2_MASK,
+       },
+};
+
+static struct spear_modemux vip_mux_cam1_modemux[] = {
+       {
+               .muxregs = vip_mux_cam1_muxreg,
+               .nmuxregs = ARRAY_SIZE(vip_mux_cam1_muxreg),
+       },
+};
+
+static struct spear_pingroup vip_mux_cam1_pingroup = {
+       .name = "vip_mux_cam1_grp",
+       .pins = vip_mux_cam1_pins,
+       .npins = ARRAY_SIZE(vip_mux_cam1_pins),
+       .modemuxs = vip_mux_cam1_modemux,
+       .nmodemuxs = ARRAY_SIZE(vip_mux_cam1_modemux),
+};
+
+/* pad multiplexing for vip_mux_cam2 (disables cam2) device */
+static const unsigned vip_mux_cam2_pins[] = { 39, 44, 45, 46, 47, 48, 49, 50,
+       51, 52, 53 };
+static struct spear_muxreg vip_mux_cam2_muxreg[] = {
+       {
+               .reg = PAD_SHARED_IP_EN_1,
+               .mask = CAM2_MASK,
+               .val = 0,
+       }, {
+               .reg = PAD_FUNCTION_EN_2,
+               .mask = VIP_AND_CAM2_REG1_MASK,
+               .val = VIP_AND_CAM2_REG1_MASK,
+       },
+};
+
+static struct spear_modemux vip_mux_cam2_modemux[] = {
+       {
+               .muxregs = vip_mux_cam2_muxreg,
+               .nmuxregs = ARRAY_SIZE(vip_mux_cam2_muxreg),
+       },
+};
+
+static struct spear_pingroup vip_mux_cam2_pingroup = {
+       .name = "vip_mux_cam2_grp",
+       .pins = vip_mux_cam2_pins,
+       .npins = ARRAY_SIZE(vip_mux_cam2_pins),
+       .modemuxs = vip_mux_cam2_modemux,
+       .nmodemuxs = ARRAY_SIZE(vip_mux_cam2_modemux),
+};
+
+/* pad multiplexing for vip_mux_cam3 (disables cam3) device */
+static const unsigned vip_mux_cam3_pins[] = { 20, 25, 26, 27, 28, 29, 30, 31,
+       32, 33, 34 };
+static struct spear_muxreg vip_mux_cam3_muxreg[] = {
+       {
+               .reg = PAD_SHARED_IP_EN_1,
+               .mask = CAM3_MASK,
+               .val = 0,
+       }, {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = VIP_AND_CAM3_REG0_MASK,
+               .val = VIP_AND_CAM3_REG0_MASK,
+       }, {
+               .reg = PAD_FUNCTION_EN_2,
+               .mask = VIP_AND_CAM3_REG1_MASK,
+               .val = VIP_AND_CAM3_REG1_MASK,
+       },
+};
+
+static struct spear_modemux vip_mux_cam3_modemux[] = {
+       {
+               .muxregs = vip_mux_cam3_muxreg,
+               .nmuxregs = ARRAY_SIZE(vip_mux_cam3_muxreg),
+       },
+};
+
+static struct spear_pingroup vip_mux_cam3_pingroup = {
+       .name = "vip_mux_cam3_grp",
+       .pins = vip_mux_cam3_pins,
+       .npins = ARRAY_SIZE(vip_mux_cam3_pins),
+       .modemuxs = vip_mux_cam3_modemux,
+       .nmodemuxs = ARRAY_SIZE(vip_mux_cam3_modemux),
+};
+
+static const char *const vip_grps[] = { "vip_mux_grp", "vip_mux_cam0_grp" ,
+       "vip_mux_cam1_grp" , "vip_mux_cam2_grp", "vip_mux_cam3_grp" };
+static struct spear_function vip_function = {
+       .name = "vip",
+       .groups = vip_grps,
+       .ngroups = ARRAY_SIZE(vip_grps),
+};
+
+/* pad multiplexing for cam0 device */
+static const unsigned cam0_pins[] = { 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75
+};
+static struct spear_muxreg cam0_muxreg[] = {
+       {
+               .reg = PAD_SHARED_IP_EN_1,
+               .mask = CAM0_MASK,
+               .val = CAM0_MASK,
+       }, {
+               .reg = PAD_FUNCTION_EN_3,
+               .mask = VIP_AND_CAM0_REG2_MASK,
+               .val = VIP_AND_CAM0_REG2_MASK,
+       },
+};
+
+static struct spear_modemux cam0_modemux[] = {
+       {
+               .muxregs = cam0_muxreg,
+               .nmuxregs = ARRAY_SIZE(cam0_muxreg),
+       },
+};
+
+static struct spear_pingroup cam0_pingroup = {
+       .name = "cam0_grp",
+       .pins = cam0_pins,
+       .npins = ARRAY_SIZE(cam0_pins),
+       .modemuxs = cam0_modemux,
+       .nmodemuxs = ARRAY_SIZE(cam0_modemux),
+};
+
+static const char *const cam0_grps[] = { "cam0_grp" };
+static struct spear_function cam0_function = {
+       .name = "cam0",
+       .groups = cam0_grps,
+       .ngroups = ARRAY_SIZE(cam0_grps),
+};
+
+/* pad multiplexing for cam1 device */
+static const unsigned cam1_pins[] = { 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
+};
+static struct spear_muxreg cam1_muxreg[] = {
+       {
+               .reg = PAD_SHARED_IP_EN_1,
+               .mask = CAM1_MASK,
+               .val = CAM1_MASK,
+       }, {
+               .reg = PAD_FUNCTION_EN_2,
+               .mask = VIP_AND_CAM1_REG1_MASK,
+               .val = VIP_AND_CAM1_REG1_MASK,
+       }, {
+               .reg = PAD_FUNCTION_EN_3,
+               .mask = VIP_AND_CAM1_REG2_MASK,
+               .val = VIP_AND_CAM1_REG2_MASK,
+       },
+};
+
+static struct spear_modemux cam1_modemux[] = {
+       {
+               .muxregs = cam1_muxreg,
+               .nmuxregs = ARRAY_SIZE(cam1_muxreg),
+       },
+};
+
+static struct spear_pingroup cam1_pingroup = {
+       .name = "cam1_grp",
+       .pins = cam1_pins,
+       .npins = ARRAY_SIZE(cam1_pins),
+       .modemuxs = cam1_modemux,
+       .nmodemuxs = ARRAY_SIZE(cam1_modemux),
+};
+
+static const char *const cam1_grps[] = { "cam1_grp" };
+static struct spear_function cam1_function = {
+       .name = "cam1",
+       .groups = cam1_grps,
+       .ngroups = ARRAY_SIZE(cam1_grps),
+};
+
+/* pad multiplexing for cam2 device */
+static const unsigned cam2_pins[] = { 39, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53
+};
+static struct spear_muxreg cam2_muxreg[] = {
+       {
+               .reg = PAD_SHARED_IP_EN_1,
+               .mask = CAM2_MASK,
+               .val = CAM2_MASK,
+       }, {
+               .reg = PAD_FUNCTION_EN_2,
+               .mask = VIP_AND_CAM2_REG1_MASK,
+               .val = VIP_AND_CAM2_REG1_MASK,
+       },
+};
+
+static struct spear_modemux cam2_modemux[] = {
+       {
+               .muxregs = cam2_muxreg,
+               .nmuxregs = ARRAY_SIZE(cam2_muxreg),
+       },
+};
+
+static struct spear_pingroup cam2_pingroup = {
+       .name = "cam2_grp",
+       .pins = cam2_pins,
+       .npins = ARRAY_SIZE(cam2_pins),
+       .modemuxs = cam2_modemux,
+       .nmodemuxs = ARRAY_SIZE(cam2_modemux),
+};
+
+static const char *const cam2_grps[] = { "cam2_grp" };
+static struct spear_function cam2_function = {
+       .name = "cam2",
+       .groups = cam2_grps,
+       .ngroups = ARRAY_SIZE(cam2_grps),
+};
+
+/* pad multiplexing for cam3 device */
+static const unsigned cam3_pins[] = { 20, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34
+};
+static struct spear_muxreg cam3_muxreg[] = {
+       {
+               .reg = PAD_SHARED_IP_EN_1,
+               .mask = CAM3_MASK,
+               .val = CAM3_MASK,
+       }, {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = VIP_AND_CAM3_REG0_MASK,
+               .val = VIP_AND_CAM3_REG0_MASK,
+       }, {
+               .reg = PAD_FUNCTION_EN_2,
+               .mask = VIP_AND_CAM3_REG1_MASK,
+               .val = VIP_AND_CAM3_REG1_MASK,
+       },
+};
+
+static struct spear_modemux cam3_modemux[] = {
+       {
+               .muxregs = cam3_muxreg,
+               .nmuxregs = ARRAY_SIZE(cam3_muxreg),
+       },
+};
+
+static struct spear_pingroup cam3_pingroup = {
+       .name = "cam3_grp",
+       .pins = cam3_pins,
+       .npins = ARRAY_SIZE(cam3_pins),
+       .modemuxs = cam3_modemux,
+       .nmodemuxs = ARRAY_SIZE(cam3_modemux),
+};
+
+static const char *const cam3_grps[] = { "cam3_grp" };
+static struct spear_function cam3_function = {
+       .name = "cam3",
+       .groups = cam3_grps,
+       .ngroups = ARRAY_SIZE(cam3_grps),
+};
+
+/* pad multiplexing for smi device */
+static const unsigned smi_pins[] = { 76, 77, 78, 79, 84 };
+static struct spear_muxreg smi_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_3,
+               .mask = SMI_REG2_MASK,
+               .val = SMI_REG2_MASK,
+       },
+};
+
+static struct spear_modemux smi_modemux[] = {
+       {
+               .muxregs = smi_muxreg,
+               .nmuxregs = ARRAY_SIZE(smi_muxreg),
+       },
+};
+
+static struct spear_pingroup smi_pingroup = {
+       .name = "smi_grp",
+       .pins = smi_pins,
+       .npins = ARRAY_SIZE(smi_pins),
+       .modemuxs = smi_modemux,
+       .nmodemuxs = ARRAY_SIZE(smi_modemux),
+};
+
+static const char *const smi_grps[] = { "smi_grp" };
+static struct spear_function smi_function = {
+       .name = "smi",
+       .groups = smi_grps,
+       .ngroups = ARRAY_SIZE(smi_grps),
+};
+
+/* pad multiplexing for ssp0 device */
+static const unsigned ssp0_pins[] = { 80, 81, 82, 83 };
+static struct spear_muxreg ssp0_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_3,
+               .mask = SSP0_REG2_MASK,
+               .val = SSP0_REG2_MASK,
+       },
+};
+
+static struct spear_modemux ssp0_modemux[] = {
+       {
+               .muxregs = ssp0_muxreg,
+               .nmuxregs = ARRAY_SIZE(ssp0_muxreg),
+       },
+};
+
+static struct spear_pingroup ssp0_pingroup = {
+       .name = "ssp0_grp",
+       .pins = ssp0_pins,
+       .npins = ARRAY_SIZE(ssp0_pins),
+       .modemuxs = ssp0_modemux,
+       .nmodemuxs = ARRAY_SIZE(ssp0_modemux),
+};
+
+/* pad multiplexing for ssp0_cs1 device */
+static const unsigned ssp0_cs1_pins[] = { 24 };
+static struct spear_muxreg ssp0_cs1_muxreg[] = {
+       {
+               .reg = PAD_SHARED_IP_EN_1,
+               .mask = SSP0_CS1_MASK,
+               .val = SSP0_CS1_MASK,
+       }, {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PWM0_AND_SSP0_CS1_REG0_MASK,
+               .val = PWM0_AND_SSP0_CS1_REG0_MASK,
+       },
+};
+
+static struct spear_modemux ssp0_cs1_modemux[] = {
+       {
+               .muxregs = ssp0_cs1_muxreg,
+               .nmuxregs = ARRAY_SIZE(ssp0_cs1_muxreg),
+       },
+};
+
+static struct spear_pingroup ssp0_cs1_pingroup = {
+       .name = "ssp0_cs1_grp",
+       .pins = ssp0_cs1_pins,
+       .npins = ARRAY_SIZE(ssp0_cs1_pins),
+       .modemuxs = ssp0_cs1_modemux,
+       .nmodemuxs = ARRAY_SIZE(ssp0_cs1_modemux),
+};
+
+/* pad multiplexing for ssp0_cs2 device */
+static const unsigned ssp0_cs2_pins[] = { 85 };
+static struct spear_muxreg ssp0_cs2_muxreg[] = {
+       {
+               .reg = PAD_SHARED_IP_EN_1,
+               .mask = SSP0_CS2_MASK,
+               .val = SSP0_CS2_MASK,
+       }, {
+               .reg = PAD_FUNCTION_EN_3,
+               .mask = TS_AND_SSP0_CS2_REG2_MASK,
+               .val = TS_AND_SSP0_CS2_REG2_MASK,
+       },
+};
+
+static struct spear_modemux ssp0_cs2_modemux[] = {
+       {
+               .muxregs = ssp0_cs2_muxreg,
+               .nmuxregs = ARRAY_SIZE(ssp0_cs2_muxreg),
+       },
+};
+
+static struct spear_pingroup ssp0_cs2_pingroup = {
+       .name = "ssp0_cs2_grp",
+       .pins = ssp0_cs2_pins,
+       .npins = ARRAY_SIZE(ssp0_cs2_pins),
+       .modemuxs = ssp0_cs2_modemux,
+       .nmodemuxs = ARRAY_SIZE(ssp0_cs2_modemux),
+};
+
+/* pad multiplexing for ssp0_cs3 device */
+static const unsigned ssp0_cs3_pins[] = { 132 };
+static struct spear_muxreg ssp0_cs3_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_5,
+               .mask = SSP0_CS3_REG4_MASK,
+               .val = SSP0_CS3_REG4_MASK,
+       },
+};
+
+static struct spear_modemux ssp0_cs3_modemux[] = {
+       {
+               .muxregs = ssp0_cs3_muxreg,
+               .nmuxregs = ARRAY_SIZE(ssp0_cs3_muxreg),
+       },
+};
+
+static struct spear_pingroup ssp0_cs3_pingroup = {
+       .name = "ssp0_cs3_grp",
+       .pins = ssp0_cs3_pins,
+       .npins = ARRAY_SIZE(ssp0_cs3_pins),
+       .modemuxs = ssp0_cs3_modemux,
+       .nmodemuxs = ARRAY_SIZE(ssp0_cs3_modemux),
+};
+
+static const char *const ssp0_grps[] = { "ssp0_grp", "ssp0_cs1_grp",
+       "ssp0_cs2_grp", "ssp0_cs3_grp" };
+static struct spear_function ssp0_function = {
+       .name = "ssp0",
+       .groups = ssp0_grps,
+       .ngroups = ARRAY_SIZE(ssp0_grps),
+};
+
+/* pad multiplexing for uart0 device */
+static const unsigned uart0_pins[] = { 86, 87 };
+static struct spear_muxreg uart0_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_3,
+               .mask = UART0_REG2_MASK,
+               .val = UART0_REG2_MASK,
+       },
+};
+
+static struct spear_modemux uart0_modemux[] = {
+       {
+               .muxregs = uart0_muxreg,
+               .nmuxregs = ARRAY_SIZE(uart0_muxreg),
+       },
+};
+
+static struct spear_pingroup uart0_pingroup = {
+       .name = "uart0_grp",
+       .pins = uart0_pins,
+       .npins = ARRAY_SIZE(uart0_pins),
+       .modemuxs = uart0_modemux,
+       .nmodemuxs = ARRAY_SIZE(uart0_modemux),
+};
+
+/* pad multiplexing for uart0_enh device */
+static const unsigned uart0_enh_pins[] = { 11, 12, 13, 14, 15, 16 };
+static struct spear_muxreg uart0_enh_muxreg[] = {
+       {
+               .reg = PAD_SHARED_IP_EN_1,
+               .mask = GPT_MASK,
+               .val = 0,
+       }, {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = UART0_ENH_AND_GPT_REG0_MASK,
+               .val = UART0_ENH_AND_GPT_REG0_MASK,
+       },
+};
+
+static struct spear_modemux uart0_enh_modemux[] = {
+       {
+               .muxregs = uart0_enh_muxreg,
+               .nmuxregs = ARRAY_SIZE(uart0_enh_muxreg),
+       },
+};
+
+static struct spear_pingroup uart0_enh_pingroup = {
+       .name = "uart0_enh_grp",
+       .pins = uart0_enh_pins,
+       .npins = ARRAY_SIZE(uart0_enh_pins),
+       .modemuxs = uart0_enh_modemux,
+       .nmodemuxs = ARRAY_SIZE(uart0_enh_modemux),
+};
+
+static const char *const uart0_grps[] = { "uart0_grp", "uart0_enh_grp" };
+static struct spear_function uart0_function = {
+       .name = "uart0",
+       .groups = uart0_grps,
+       .ngroups = ARRAY_SIZE(uart0_grps),
+};
+
+/* pad multiplexing for uart1 device */
+static const unsigned uart1_pins[] = { 88, 89 };
+static struct spear_muxreg uart1_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_3,
+               .mask = UART1_REG2_MASK,
+               .val = UART1_REG2_MASK,
+       },
+};
+
+static struct spear_modemux uart1_modemux[] = {
+       {
+               .muxregs = uart1_muxreg,
+               .nmuxregs = ARRAY_SIZE(uart1_muxreg),
+       },
+};
+
+static struct spear_pingroup uart1_pingroup = {
+       .name = "uart1_grp",
+       .pins = uart1_pins,
+       .npins = ARRAY_SIZE(uart1_pins),
+       .modemuxs = uart1_modemux,
+       .nmodemuxs = ARRAY_SIZE(uart1_modemux),
+};
+
+static const char *const uart1_grps[] = { "uart1_grp" };
+static struct spear_function uart1_function = {
+       .name = "uart1",
+       .groups = uart1_grps,
+       .ngroups = ARRAY_SIZE(uart1_grps),
+};
+
+/* pad multiplexing for i2s_in device */
+static const unsigned i2s_in_pins[] = { 90, 91, 92, 93, 94, 99 };
+static struct spear_muxreg i2s_in_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_3,
+               .mask = I2S_IN_REG2_MASK,
+               .val = I2S_IN_REG2_MASK,
+       }, {
+               .reg = PAD_FUNCTION_EN_4,
+               .mask = I2S_IN_REG3_MASK,
+               .val = I2S_IN_REG3_MASK,
+       },
+};
+
+static struct spear_modemux i2s_in_modemux[] = {
+       {
+               .muxregs = i2s_in_muxreg,
+               .nmuxregs = ARRAY_SIZE(i2s_in_muxreg),
+       },
+};
+
+static struct spear_pingroup i2s_in_pingroup = {
+       .name = "i2s_in_grp",
+       .pins = i2s_in_pins,
+       .npins = ARRAY_SIZE(i2s_in_pins),
+       .modemuxs = i2s_in_modemux,
+       .nmodemuxs = ARRAY_SIZE(i2s_in_modemux),
+};
+
+/* pad multiplexing for i2s_out device */
+static const unsigned i2s_out_pins[] = { 95, 96, 97, 98, 100, 101, 102, 103 };
+static struct spear_muxreg i2s_out_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_4,
+               .mask = I2S_OUT_REG3_MASK,
+               .val = I2S_OUT_REG3_MASK,
+       },
+};
+
+static struct spear_modemux i2s_out_modemux[] = {
+       {
+               .muxregs = i2s_out_muxreg,
+               .nmuxregs = ARRAY_SIZE(i2s_out_muxreg),
+       },
+};
+
+static struct spear_pingroup i2s_out_pingroup = {
+       .name = "i2s_out_grp",
+       .pins = i2s_out_pins,
+       .npins = ARRAY_SIZE(i2s_out_pins),
+       .modemuxs = i2s_out_modemux,
+       .nmodemuxs = ARRAY_SIZE(i2s_out_modemux),
+};
+
+static const char *const i2s_grps[] = { "i2s_in_grp", "i2s_out_grp" };
+static struct spear_function i2s_function = {
+       .name = "i2s",
+       .groups = i2s_grps,
+       .ngroups = ARRAY_SIZE(i2s_grps),
+};
+
+/* pad multiplexing for gmac device */
+static const unsigned gmac_pins[] = { 104, 105, 106, 107, 108, 109, 110, 111,
+       112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125,
+       126, 127, 128, 129, 130, 131 };
+#define GMAC_MUXREG                            \
+       {                                       \
+               .reg = PAD_FUNCTION_EN_4,       \
+               .mask = GMAC_REG3_MASK,         \
+               .val = GMAC_REG3_MASK,          \
+       }, {                                    \
+               .reg = PAD_FUNCTION_EN_5,       \
+               .mask = GMAC_REG4_MASK,         \
+               .val = GMAC_REG4_MASK,          \
+       }
+
+/* pad multiplexing for gmii device */
+static struct spear_muxreg gmii_muxreg[] = {
+       GMAC_MUXREG,
+       {
+               .reg = GMAC_CLK_CFG,
+               .mask = GMAC_PHY_IF_SEL_MASK,
+               .val = GMAC_PHY_IF_GMII_VAL,
+       },
+};
+
+static struct spear_modemux gmii_modemux[] = {
+       {
+               .muxregs = gmii_muxreg,
+               .nmuxregs = ARRAY_SIZE(gmii_muxreg),
+       },
+};
+
+static struct spear_pingroup gmii_pingroup = {
+       .name = "gmii_grp",
+       .pins = gmac_pins,
+       .npins = ARRAY_SIZE(gmac_pins),
+       .modemuxs = gmii_modemux,
+       .nmodemuxs = ARRAY_SIZE(gmii_modemux),
+};
+
+/* pad multiplexing for rgmii device */
+static struct spear_muxreg rgmii_muxreg[] = {
+       GMAC_MUXREG,
+       {
+               .reg = GMAC_CLK_CFG,
+               .mask = GMAC_PHY_IF_SEL_MASK,
+               .val = GMAC_PHY_IF_RGMII_VAL,
+       },
+};
+
+static struct spear_modemux rgmii_modemux[] = {
+       {
+               .muxregs = rgmii_muxreg,
+               .nmuxregs = ARRAY_SIZE(rgmii_muxreg),
+       },
+};
+
+static struct spear_pingroup rgmii_pingroup = {
+       .name = "rgmii_grp",
+       .pins = gmac_pins,
+       .npins = ARRAY_SIZE(gmac_pins),
+       .modemuxs = rgmii_modemux,
+       .nmodemuxs = ARRAY_SIZE(rgmii_modemux),
+};
+
+/* pad multiplexing for rmii device */
+static struct spear_muxreg rmii_muxreg[] = {
+       GMAC_MUXREG,
+       {
+               .reg = GMAC_CLK_CFG,
+               .mask = GMAC_PHY_IF_SEL_MASK,
+               .val = GMAC_PHY_IF_RMII_VAL,
+       },
+};
+
+static struct spear_modemux rmii_modemux[] = {
+       {
+               .muxregs = rmii_muxreg,
+               .nmuxregs = ARRAY_SIZE(rmii_muxreg),
+       },
+};
+
+static struct spear_pingroup rmii_pingroup = {
+       .name = "rmii_grp",
+       .pins = gmac_pins,
+       .npins = ARRAY_SIZE(gmac_pins),
+       .modemuxs = rmii_modemux,
+       .nmodemuxs = ARRAY_SIZE(rmii_modemux),
+};
+
+/* pad multiplexing for sgmii device */
+static struct spear_muxreg sgmii_muxreg[] = {
+       GMAC_MUXREG,
+       {
+               .reg = GMAC_CLK_CFG,
+               .mask = GMAC_PHY_IF_SEL_MASK,
+               .val = GMAC_PHY_IF_SGMII_VAL,
+       },
+};
+
+static struct spear_modemux sgmii_modemux[] = {
+       {
+               .muxregs = sgmii_muxreg,
+               .nmuxregs = ARRAY_SIZE(sgmii_muxreg),
+       },
+};
+
+static struct spear_pingroup sgmii_pingroup = {
+       .name = "sgmii_grp",
+       .pins = gmac_pins,
+       .npins = ARRAY_SIZE(gmac_pins),
+       .modemuxs = sgmii_modemux,
+       .nmodemuxs = ARRAY_SIZE(sgmii_modemux),
+};
+
+static const char *const gmac_grps[] = { "gmii_grp", "rgmii_grp", "rmii_grp",
+       "sgmii_grp" };
+static struct spear_function gmac_function = {
+       .name = "gmac",
+       .groups = gmac_grps,
+       .ngroups = ARRAY_SIZE(gmac_grps),
+};
+
+/* pad multiplexing for i2c0 device */
+static const unsigned i2c0_pins[] = { 133, 134 };
+static struct spear_muxreg i2c0_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_5,
+               .mask = I2C0_REG4_MASK,
+               .val = I2C0_REG4_MASK,
+       },
+};
+
+static struct spear_modemux i2c0_modemux[] = {
+       {
+               .muxregs = i2c0_muxreg,
+               .nmuxregs = ARRAY_SIZE(i2c0_muxreg),
+       },
+};
+
+static struct spear_pingroup i2c0_pingroup = {
+       .name = "i2c0_grp",
+       .pins = i2c0_pins,
+       .npins = ARRAY_SIZE(i2c0_pins),
+       .modemuxs = i2c0_modemux,
+       .nmodemuxs = ARRAY_SIZE(i2c0_modemux),
+};
+
+static const char *const i2c0_grps[] = { "i2c0_grp" };
+static struct spear_function i2c0_function = {
+       .name = "i2c0",
+       .groups = i2c0_grps,
+       .ngroups = ARRAY_SIZE(i2c0_grps),
+};
+
+/* pad multiplexing for i2c1 device */
+static const unsigned i2c1_pins[] = { 18, 23 };
+static struct spear_muxreg i2c1_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = I2C1_REG0_MASK,
+               .val = I2C1_REG0_MASK,
+       },
+};
+
+static struct spear_modemux i2c1_modemux[] = {
+       {
+               .muxregs = i2c1_muxreg,
+               .nmuxregs = ARRAY_SIZE(i2c1_muxreg),
+       },
+};
+
+static struct spear_pingroup i2c1_pingroup = {
+       .name = "i2c1_grp",
+       .pins = i2c1_pins,
+       .npins = ARRAY_SIZE(i2c1_pins),
+       .modemuxs = i2c1_modemux,
+       .nmodemuxs = ARRAY_SIZE(i2c1_modemux),
+};
+
+static const char *const i2c1_grps[] = { "i2c1_grp" };
+static struct spear_function i2c1_function = {
+       .name = "i2c1",
+       .groups = i2c1_grps,
+       .ngroups = ARRAY_SIZE(i2c1_grps),
+};
+
+/* pad multiplexing for cec0 device */
+static const unsigned cec0_pins[] = { 135 };
+static struct spear_muxreg cec0_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_5,
+               .mask = CEC0_REG4_MASK,
+               .val = CEC0_REG4_MASK,
+       },
+};
+
+static struct spear_modemux cec0_modemux[] = {
+       {
+               .muxregs = cec0_muxreg,
+               .nmuxregs = ARRAY_SIZE(cec0_muxreg),
+       },
+};
+
+static struct spear_pingroup cec0_pingroup = {
+       .name = "cec0_grp",
+       .pins = cec0_pins,
+       .npins = ARRAY_SIZE(cec0_pins),
+       .modemuxs = cec0_modemux,
+       .nmodemuxs = ARRAY_SIZE(cec0_modemux),
+};
+
+static const char *const cec0_grps[] = { "cec0_grp" };
+static struct spear_function cec0_function = {
+       .name = "cec0",
+       .groups = cec0_grps,
+       .ngroups = ARRAY_SIZE(cec0_grps),
+};
+
+/* pad multiplexing for cec1 device */
+static const unsigned cec1_pins[] = { 136 };
+static struct spear_muxreg cec1_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_5,
+               .mask = CEC1_REG4_MASK,
+               .val = CEC1_REG4_MASK,
+       },
+};
+
+static struct spear_modemux cec1_modemux[] = {
+       {
+               .muxregs = cec1_muxreg,
+               .nmuxregs = ARRAY_SIZE(cec1_muxreg),
+       },
+};
+
+static struct spear_pingroup cec1_pingroup = {
+       .name = "cec1_grp",
+       .pins = cec1_pins,
+       .npins = ARRAY_SIZE(cec1_pins),
+       .modemuxs = cec1_modemux,
+       .nmodemuxs = ARRAY_SIZE(cec1_modemux),
+};
+
+static const char *const cec1_grps[] = { "cec1_grp" };
+static struct spear_function cec1_function = {
+       .name = "cec1",
+       .groups = cec1_grps,
+       .ngroups = ARRAY_SIZE(cec1_grps),
+};
+
+/* pad multiplexing for mcif devices */
+static const unsigned mcif_pins[] = { 193, 194, 195, 196, 197, 198, 199, 200,
+       201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+       215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228,
+       229, 230, 231, 232, 237 };
+#define MCIF_MUXREG                                                    \
+       {                                                               \
+               .reg = PAD_SHARED_IP_EN_1,                              \
+               .mask = MCIF_MASK,                                      \
+               .val = MCIF_MASK,                                       \
+       }, {                                                            \
+               .reg = PAD_FUNCTION_EN_7,                               \
+               .mask = FSMC_PNOR_AND_MCIF_REG6_MASK | MCIF_REG6_MASK,  \
+               .val = FSMC_PNOR_AND_MCIF_REG6_MASK | MCIF_REG6_MASK,   \
+       }, {                                                            \
+               .reg = PAD_FUNCTION_EN_8,                               \
+               .mask = MCIF_REG7_MASK,                                 \
+               .val = MCIF_REG7_MASK,                                  \
+       }
+
+/* Pad multiplexing for sdhci device */
+static struct spear_muxreg sdhci_muxreg[] = {
+       MCIF_MUXREG,
+       {
+               .reg = PERIP_CFG,
+               .mask = MCIF_SEL_MASK,
+               .val = MCIF_SEL_SD,
+       },
+};
+
+static struct spear_modemux sdhci_modemux[] = {
+       {
+               .muxregs = sdhci_muxreg,
+               .nmuxregs = ARRAY_SIZE(sdhci_muxreg),
+       },
+};
+
+static struct spear_pingroup sdhci_pingroup = {
+       .name = "sdhci_grp",
+       .pins = mcif_pins,
+       .npins = ARRAY_SIZE(mcif_pins),
+       .modemuxs = sdhci_modemux,
+       .nmodemuxs = ARRAY_SIZE(sdhci_modemux),
+};
+
+static const char *const sdhci_grps[] = { "sdhci_grp" };
+static struct spear_function sdhci_function = {
+       .name = "sdhci",
+       .groups = sdhci_grps,
+       .ngroups = ARRAY_SIZE(sdhci_grps),
+};
+
+/* Pad multiplexing for cf device */
+static struct spear_muxreg cf_muxreg[] = {
+       MCIF_MUXREG,
+       {
+               .reg = PERIP_CFG,
+               .mask = MCIF_SEL_MASK,
+               .val = MCIF_SEL_CF,
+       },
+};
+
+static struct spear_modemux cf_modemux[] = {
+       {
+               .muxregs = cf_muxreg,
+               .nmuxregs = ARRAY_SIZE(cf_muxreg),
+       },
+};
+
+static struct spear_pingroup cf_pingroup = {
+       .name = "cf_grp",
+       .pins = mcif_pins,
+       .npins = ARRAY_SIZE(mcif_pins),
+       .modemuxs = cf_modemux,
+       .nmodemuxs = ARRAY_SIZE(cf_modemux),
+};
+
+static const char *const cf_grps[] = { "cf_grp" };
+static struct spear_function cf_function = {
+       .name = "cf",
+       .groups = cf_grps,
+       .ngroups = ARRAY_SIZE(cf_grps),
+};
+
+/* Pad multiplexing for xd device */
+static struct spear_muxreg xd_muxreg[] = {
+       MCIF_MUXREG,
+       {
+               .reg = PERIP_CFG,
+               .mask = MCIF_SEL_MASK,
+               .val = MCIF_SEL_XD,
+       },
+};
+
+static struct spear_modemux xd_modemux[] = {
+       {
+               .muxregs = xd_muxreg,
+               .nmuxregs = ARRAY_SIZE(xd_muxreg),
+       },
+};
+
+static struct spear_pingroup xd_pingroup = {
+       .name = "xd_grp",
+       .pins = mcif_pins,
+       .npins = ARRAY_SIZE(mcif_pins),
+       .modemuxs = xd_modemux,
+       .nmodemuxs = ARRAY_SIZE(xd_modemux),
+};
+
+static const char *const xd_grps[] = { "xd_grp" };
+static struct spear_function xd_function = {
+       .name = "xd",
+       .groups = xd_grps,
+       .ngroups = ARRAY_SIZE(xd_grps),
+};
+
+/* pad multiplexing for clcd device */
+static const unsigned clcd_pins[] = { 138, 139, 140, 141, 142, 143, 144, 145,
+       146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
+       160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173,
+       174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+       188, 189, 190, 191 };
+static struct spear_muxreg clcd_muxreg[] = {
+       {
+               .reg = PAD_SHARED_IP_EN_1,
+               .mask = ARM_TRACE_MASK | MIPHY_DBG_MASK,
+               .val = 0,
+       }, {
+               .reg = PAD_FUNCTION_EN_5,
+               .mask = CLCD_REG4_MASK | CLCD_AND_ARM_TRACE_REG4_MASK,
+               .val = CLCD_REG4_MASK | CLCD_AND_ARM_TRACE_REG4_MASK,
+       }, {
+               .reg = PAD_FUNCTION_EN_6,
+               .mask = CLCD_AND_ARM_TRACE_REG5_MASK,
+               .val = CLCD_AND_ARM_TRACE_REG5_MASK,
+       }, {
+               .reg = PAD_FUNCTION_EN_7,
+               .mask = CLCD_AND_ARM_TRACE_REG6_MASK,
+               .val = CLCD_AND_ARM_TRACE_REG6_MASK,
+       },
+};
+
+static struct spear_modemux clcd_modemux[] = {
+       {
+               .muxregs = clcd_muxreg,
+               .nmuxregs = ARRAY_SIZE(clcd_muxreg),
+       },
+};
+
+static struct spear_pingroup clcd_pingroup = {
+       .name = "clcd_grp",
+       .pins = clcd_pins,
+       .npins = ARRAY_SIZE(clcd_pins),
+       .modemuxs = clcd_modemux,
+       .nmodemuxs = ARRAY_SIZE(clcd_modemux),
+};
+
+static const char *const clcd_grps[] = { "clcd_grp" };
+static struct spear_function clcd_function = {
+       .name = "clcd",
+       .groups = clcd_grps,
+       .ngroups = ARRAY_SIZE(clcd_grps),
+};
+
+/* pad multiplexing for arm_trace device */
+static const unsigned arm_trace_pins[] = { 158, 159, 160, 161, 162, 163, 164,
+       165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178,
+       179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+       193, 194, 195, 196, 197, 198, 199, 200 };
+static struct spear_muxreg arm_trace_muxreg[] = {
+       {
+               .reg = PAD_SHARED_IP_EN_1,
+               .mask = ARM_TRACE_MASK,
+               .val = ARM_TRACE_MASK,
+       }, {
+               .reg = PAD_FUNCTION_EN_5,
+               .mask = CLCD_AND_ARM_TRACE_REG4_MASK,
+               .val = CLCD_AND_ARM_TRACE_REG4_MASK,
+       }, {
+               .reg = PAD_FUNCTION_EN_6,
+               .mask = CLCD_AND_ARM_TRACE_REG5_MASK,
+               .val = CLCD_AND_ARM_TRACE_REG5_MASK,
+       }, {
+               .reg = PAD_FUNCTION_EN_7,
+               .mask = CLCD_AND_ARM_TRACE_REG6_MASK,
+               .val = CLCD_AND_ARM_TRACE_REG6_MASK,
+       },
+};
+
+static struct spear_modemux arm_trace_modemux[] = {
+       {
+               .muxregs = arm_trace_muxreg,
+               .nmuxregs = ARRAY_SIZE(arm_trace_muxreg),
+       },
+};
+
+static struct spear_pingroup arm_trace_pingroup = {
+       .name = "arm_trace_grp",
+       .pins = arm_trace_pins,
+       .npins = ARRAY_SIZE(arm_trace_pins),
+       .modemuxs = arm_trace_modemux,
+       .nmodemuxs = ARRAY_SIZE(arm_trace_modemux),
+};
+
+static const char *const arm_trace_grps[] = { "arm_trace_grp" };
+static struct spear_function arm_trace_function = {
+       .name = "arm_trace",
+       .groups = arm_trace_grps,
+       .ngroups = ARRAY_SIZE(arm_trace_grps),
+};
+
+/* pad multiplexing for miphy_dbg device */
+static const unsigned miphy_dbg_pins[] = { 96, 97, 98, 99, 100, 101, 102, 103,
+       132, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
+       148, 149, 150, 151, 152, 153, 154, 155, 156, 157 };
+static struct spear_muxreg miphy_dbg_muxreg[] = {
+       {
+               .reg = PAD_SHARED_IP_EN_1,
+               .mask = MIPHY_DBG_MASK,
+               .val = MIPHY_DBG_MASK,
+       }, {
+               .reg = PAD_FUNCTION_EN_5,
+               .mask = DEVS_GRP_AND_MIPHY_DBG_REG4_MASK,
+               .val = DEVS_GRP_AND_MIPHY_DBG_REG4_MASK,
+       },
+};
+
+static struct spear_modemux miphy_dbg_modemux[] = {
+       {
+               .muxregs = miphy_dbg_muxreg,
+               .nmuxregs = ARRAY_SIZE(miphy_dbg_muxreg),
+       },
+};
+
+static struct spear_pingroup miphy_dbg_pingroup = {
+       .name = "miphy_dbg_grp",
+       .pins = miphy_dbg_pins,
+       .npins = ARRAY_SIZE(miphy_dbg_pins),
+       .modemuxs = miphy_dbg_modemux,
+       .nmodemuxs = ARRAY_SIZE(miphy_dbg_modemux),
+};
+
+static const char *const miphy_dbg_grps[] = { "miphy_dbg_grp" };
+static struct spear_function miphy_dbg_function = {
+       .name = "miphy_dbg",
+       .groups = miphy_dbg_grps,
+       .ngroups = ARRAY_SIZE(miphy_dbg_grps),
+};
+
+/* pad multiplexing for pcie device */
+static const unsigned pcie_pins[] = { 250 };
+static struct spear_muxreg pcie_muxreg[] = {
+       {
+               .reg = PCIE_SATA_CFG,
+               .mask = SATA_PCIE_CFG_MASK,
+               .val = PCIE_CFG_VAL,
+       },
+};
+
+static struct spear_modemux pcie_modemux[] = {
+       {
+               .muxregs = pcie_muxreg,
+               .nmuxregs = ARRAY_SIZE(pcie_muxreg),
+       },
+};
+
+static struct spear_pingroup pcie_pingroup = {
+       .name = "pcie_grp",
+       .pins = pcie_pins,
+       .npins = ARRAY_SIZE(pcie_pins),
+       .modemuxs = pcie_modemux,
+       .nmodemuxs = ARRAY_SIZE(pcie_modemux),
+};
+
+static const char *const pcie_grps[] = { "pcie_grp" };
+static struct spear_function pcie_function = {
+       .name = "pcie",
+       .groups = pcie_grps,
+       .ngroups = ARRAY_SIZE(pcie_grps),
+};
+
+/* pad multiplexing for sata device */
+static const unsigned sata_pins[] = { 250 };
+static struct spear_muxreg sata_muxreg[] = {
+       {
+               .reg = PCIE_SATA_CFG,
+               .mask = SATA_PCIE_CFG_MASK,
+               .val = SATA_CFG_VAL,
+       },
+};
+
+static struct spear_modemux sata_modemux[] = {
+       {
+               .muxregs = sata_muxreg,
+               .nmuxregs = ARRAY_SIZE(sata_muxreg),
+       },
+};
+
+static struct spear_pingroup sata_pingroup = {
+       .name = "sata_grp",
+       .pins = sata_pins,
+       .npins = ARRAY_SIZE(sata_pins),
+       .modemuxs = sata_modemux,
+       .nmodemuxs = ARRAY_SIZE(sata_modemux),
+};
+
+static const char *const sata_grps[] = { "sata_grp" };
+static struct spear_function sata_function = {
+       .name = "sata",
+       .groups = sata_grps,
+       .ngroups = ARRAY_SIZE(sata_grps),
+};
+
+/* pingroups */
+static struct spear_pingroup *spear1340_pingroups[] = {
+       &pads_as_gpio_pingroup,
+       &fsmc_8bit_pingroup,
+       &fsmc_16bit_pingroup,
+       &fsmc_pnor_pingroup,
+       &keyboard_row_col_pingroup,
+       &keyboard_col5_pingroup,
+       &spdif_in_pingroup,
+       &spdif_out_pingroup,
+       &gpt_0_1_pingroup,
+       &pwm0_pingroup,
+       &pwm1_pingroup,
+       &pwm2_pingroup,
+       &pwm3_pingroup,
+       &vip_mux_pingroup,
+       &vip_mux_cam0_pingroup,
+       &vip_mux_cam1_pingroup,
+       &vip_mux_cam2_pingroup,
+       &vip_mux_cam3_pingroup,
+       &cam0_pingroup,
+       &cam1_pingroup,
+       &cam2_pingroup,
+       &cam3_pingroup,
+       &smi_pingroup,
+       &ssp0_pingroup,
+       &ssp0_cs1_pingroup,
+       &ssp0_cs2_pingroup,
+       &ssp0_cs3_pingroup,
+       &uart0_pingroup,
+       &uart0_enh_pingroup,
+       &uart1_pingroup,
+       &i2s_in_pingroup,
+       &i2s_out_pingroup,
+       &gmii_pingroup,
+       &rgmii_pingroup,
+       &rmii_pingroup,
+       &sgmii_pingroup,
+       &i2c0_pingroup,
+       &i2c1_pingroup,
+       &cec0_pingroup,
+       &cec1_pingroup,
+       &sdhci_pingroup,
+       &cf_pingroup,
+       &xd_pingroup,
+       &clcd_pingroup,
+       &arm_trace_pingroup,
+       &miphy_dbg_pingroup,
+       &pcie_pingroup,
+       &sata_pingroup,
+};
+
+/* functions */
+static struct spear_function *spear1340_functions[] = {
+       &pads_as_gpio_function,
+       &fsmc_function,
+       &keyboard_function,
+       &spdif_in_function,
+       &spdif_out_function,
+       &gpt_0_1_function,
+       &pwm_function,
+       &vip_function,
+       &cam0_function,
+       &cam1_function,
+       &cam2_function,
+       &cam3_function,
+       &smi_function,
+       &ssp0_function,
+       &uart0_function,
+       &uart1_function,
+       &i2s_function,
+       &gmac_function,
+       &i2c0_function,
+       &i2c1_function,
+       &cec0_function,
+       &cec1_function,
+       &sdhci_function,
+       &cf_function,
+       &xd_function,
+       &clcd_function,
+       &arm_trace_function,
+       &miphy_dbg_function,
+       &pcie_function,
+       &sata_function,
+};
+
+static struct spear_pinctrl_machdata spear1340_machdata = {
+       .pins = spear1340_pins,
+       .npins = ARRAY_SIZE(spear1340_pins),
+       .groups = spear1340_pingroups,
+       .ngroups = ARRAY_SIZE(spear1340_pingroups),
+       .functions = spear1340_functions,
+       .nfunctions = ARRAY_SIZE(spear1340_functions),
+       .modes_supported = false,
+};
+
+static struct of_device_id spear1340_pinctrl_of_match[] __devinitdata = {
+       {
+               .compatible = "st,spear1340-pinmux",
+       },
+       {},
+};
+
+static int __devinit spear1340_pinctrl_probe(struct platform_device *pdev)
+{
+       return spear_pinctrl_probe(pdev, &spear1340_machdata);
+}
+
+static int __devexit spear1340_pinctrl_remove(struct platform_device *pdev)
+{
+       return spear_pinctrl_remove(pdev);
+}
+
+static struct platform_driver spear1340_pinctrl_driver = {
+       .driver = {
+               .name = DRIVER_NAME,
+               .owner = THIS_MODULE,
+               .of_match_table = spear1340_pinctrl_of_match,
+       },
+       .probe = spear1340_pinctrl_probe,
+       .remove = __devexit_p(spear1340_pinctrl_remove),
+};
+
+static int __init spear1340_pinctrl_init(void)
+{
+       return platform_driver_register(&spear1340_pinctrl_driver);
+}
+arch_initcall(spear1340_pinctrl_init);
+
+static void __exit spear1340_pinctrl_exit(void)
+{
+       platform_driver_unregister(&spear1340_pinctrl_driver);
+}
+module_exit(spear1340_pinctrl_exit);
+
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_DESCRIPTION("ST Microelectronics SPEAr1340 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, spear1340_pinctrl_of_match);
index 832049a8b1c96ec8388b7c3e4ef4c7907b06f739..91c883bc46a6b1e0ab40f6446a736936210a3c8b 100644 (file)
 
 /* pins */
 static const struct pinctrl_pin_desc spear3xx_pins[] = {
-       PINCTRL_PIN(0, "PLGPIO0"),
-       PINCTRL_PIN(1, "PLGPIO1"),
-       PINCTRL_PIN(2, "PLGPIO2"),
-       PINCTRL_PIN(3, "PLGPIO3"),
-       PINCTRL_PIN(4, "PLGPIO4"),
-       PINCTRL_PIN(5, "PLGPIO5"),
-       PINCTRL_PIN(6, "PLGPIO6"),
-       PINCTRL_PIN(7, "PLGPIO7"),
-       PINCTRL_PIN(8, "PLGPIO8"),
-       PINCTRL_PIN(9, "PLGPIO9"),
-       PINCTRL_PIN(10, "PLGPIO10"),
-       PINCTRL_PIN(11, "PLGPIO11"),
-       PINCTRL_PIN(12, "PLGPIO12"),
-       PINCTRL_PIN(13, "PLGPIO13"),
-       PINCTRL_PIN(14, "PLGPIO14"),
-       PINCTRL_PIN(15, "PLGPIO15"),
-       PINCTRL_PIN(16, "PLGPIO16"),
-       PINCTRL_PIN(17, "PLGPIO17"),
-       PINCTRL_PIN(18, "PLGPIO18"),
-       PINCTRL_PIN(19, "PLGPIO19"),
-       PINCTRL_PIN(20, "PLGPIO20"),
-       PINCTRL_PIN(21, "PLGPIO21"),
-       PINCTRL_PIN(22, "PLGPIO22"),
-       PINCTRL_PIN(23, "PLGPIO23"),
-       PINCTRL_PIN(24, "PLGPIO24"),
-       PINCTRL_PIN(25, "PLGPIO25"),
-       PINCTRL_PIN(26, "PLGPIO26"),
-       PINCTRL_PIN(27, "PLGPIO27"),
-       PINCTRL_PIN(28, "PLGPIO28"),
-       PINCTRL_PIN(29, "PLGPIO29"),
-       PINCTRL_PIN(30, "PLGPIO30"),
-       PINCTRL_PIN(31, "PLGPIO31"),
-       PINCTRL_PIN(32, "PLGPIO32"),
-       PINCTRL_PIN(33, "PLGPIO33"),
-       PINCTRL_PIN(34, "PLGPIO34"),
-       PINCTRL_PIN(35, "PLGPIO35"),
-       PINCTRL_PIN(36, "PLGPIO36"),
-       PINCTRL_PIN(37, "PLGPIO37"),
-       PINCTRL_PIN(38, "PLGPIO38"),
-       PINCTRL_PIN(39, "PLGPIO39"),
-       PINCTRL_PIN(40, "PLGPIO40"),
-       PINCTRL_PIN(41, "PLGPIO41"),
-       PINCTRL_PIN(42, "PLGPIO42"),
-       PINCTRL_PIN(43, "PLGPIO43"),
-       PINCTRL_PIN(44, "PLGPIO44"),
-       PINCTRL_PIN(45, "PLGPIO45"),
-       PINCTRL_PIN(46, "PLGPIO46"),
-       PINCTRL_PIN(47, "PLGPIO47"),
-       PINCTRL_PIN(48, "PLGPIO48"),
-       PINCTRL_PIN(49, "PLGPIO49"),
-       PINCTRL_PIN(50, "PLGPIO50"),
-       PINCTRL_PIN(51, "PLGPIO51"),
-       PINCTRL_PIN(52, "PLGPIO52"),
-       PINCTRL_PIN(53, "PLGPIO53"),
-       PINCTRL_PIN(54, "PLGPIO54"),
-       PINCTRL_PIN(55, "PLGPIO55"),
-       PINCTRL_PIN(56, "PLGPIO56"),
-       PINCTRL_PIN(57, "PLGPIO57"),
-       PINCTRL_PIN(58, "PLGPIO58"),
-       PINCTRL_PIN(59, "PLGPIO59"),
-       PINCTRL_PIN(60, "PLGPIO60"),
-       PINCTRL_PIN(61, "PLGPIO61"),
-       PINCTRL_PIN(62, "PLGPIO62"),
-       PINCTRL_PIN(63, "PLGPIO63"),
-       PINCTRL_PIN(64, "PLGPIO64"),
-       PINCTRL_PIN(65, "PLGPIO65"),
-       PINCTRL_PIN(66, "PLGPIO66"),
-       PINCTRL_PIN(67, "PLGPIO67"),
-       PINCTRL_PIN(68, "PLGPIO68"),
-       PINCTRL_PIN(69, "PLGPIO69"),
-       PINCTRL_PIN(70, "PLGPIO70"),
-       PINCTRL_PIN(71, "PLGPIO71"),
-       PINCTRL_PIN(72, "PLGPIO72"),
-       PINCTRL_PIN(73, "PLGPIO73"),
-       PINCTRL_PIN(74, "PLGPIO74"),
-       PINCTRL_PIN(75, "PLGPIO75"),
-       PINCTRL_PIN(76, "PLGPIO76"),
-       PINCTRL_PIN(77, "PLGPIO77"),
-       PINCTRL_PIN(78, "PLGPIO78"),
-       PINCTRL_PIN(79, "PLGPIO79"),
-       PINCTRL_PIN(80, "PLGPIO80"),
-       PINCTRL_PIN(81, "PLGPIO81"),
-       PINCTRL_PIN(82, "PLGPIO82"),
-       PINCTRL_PIN(83, "PLGPIO83"),
-       PINCTRL_PIN(84, "PLGPIO84"),
-       PINCTRL_PIN(85, "PLGPIO85"),
-       PINCTRL_PIN(86, "PLGPIO86"),
-       PINCTRL_PIN(87, "PLGPIO87"),
-       PINCTRL_PIN(88, "PLGPIO88"),
-       PINCTRL_PIN(89, "PLGPIO89"),
-       PINCTRL_PIN(90, "PLGPIO90"),
-       PINCTRL_PIN(91, "PLGPIO91"),
-       PINCTRL_PIN(92, "PLGPIO92"),
-       PINCTRL_PIN(93, "PLGPIO93"),
-       PINCTRL_PIN(94, "PLGPIO94"),
-       PINCTRL_PIN(95, "PLGPIO95"),
-       PINCTRL_PIN(96, "PLGPIO96"),
-       PINCTRL_PIN(97, "PLGPIO97"),
-       PINCTRL_PIN(98, "PLGPIO98"),
-       PINCTRL_PIN(99, "PLGPIO99"),
-       PINCTRL_PIN(100, "PLGPIO100"),
-       PINCTRL_PIN(101, "PLGPIO101"),
+       SPEAR_PIN_0_TO_101,
 };
 
 /* firda_pins */
index c1a3fd8e12438bb0d4a995f107981edfe7c68a89..ce875dc365e5f2ed0fa61dda0162fd1bb5be3cd1 100644 (file)
@@ -523,6 +523,30 @@ static const struct dmi_system_id video_vendor_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4750"),
                },
        },
+       {
+               .callback = video_set_backlight_video_vendor,
+               .ident = "Acer Extensa 5235",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Extensa 5235"),
+               },
+       },
+       {
+               .callback = video_set_backlight_video_vendor,
+               .ident = "Acer TravelMate 5760",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5760"),
+               },
+       },
+       {
+               .callback = video_set_backlight_video_vendor,
+               .ident = "Acer Aspire 5750",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5750"),
+               },
+       },
        {}
 };
 
index 8a582bdfdc76e3d20a9bef63e1458035999776b3..694a15a56230668c4eee12c7f5e7bc421a09cb0a 100644 (file)
@@ -87,6 +87,9 @@ static int gmux_update_status(struct backlight_device *bd)
        struct apple_gmux_data *gmux_data = bl_get_data(bd);
        u32 brightness = bd->props.brightness;
 
+       if (bd->props.state & BL_CORE_SUSPENDED)
+               return 0;
+
        /*
         * Older gmux versions require writing out lower bytes first then
         * setting the upper byte to 0 to flush the values. Newer versions
@@ -102,6 +105,7 @@ static int gmux_update_status(struct backlight_device *bd)
 }
 
 static const struct backlight_ops gmux_bl_ops = {
+       .options = BL_CORE_SUSPENDRESUME,
        .get_brightness = gmux_get_brightness,
        .update_status = gmux_update_status,
 };
index e6c08ee8d46c0acb5d1652b2627bc65b5f2d4922..5f78aac9b163bf21c957f0beccbdff80d095228a 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/err.h>
 #include <linux/dmi.h>
 #include <linux/io.h>
-#include <linux/rfkill.h>
 #include <linux/power_supply.h>
 #include <linux/acpi.h>
 #include <linux/mm.h>
@@ -90,11 +89,8 @@ static struct platform_driver platform_driver = {
 
 static struct platform_device *platform_device;
 static struct backlight_device *dell_backlight_device;
-static struct rfkill *wifi_rfkill;
-static struct rfkill *bluetooth_rfkill;
-static struct rfkill *wwan_rfkill;
 
-static const struct dmi_system_id __initdata dell_device_table[] = {
+static const struct dmi_system_id dell_device_table[] __initconst = {
        {
                .ident = "Dell laptop",
                .matches = {
@@ -119,96 +115,94 @@ static const struct dmi_system_id __initdata dell_device_table[] = {
 };
 MODULE_DEVICE_TABLE(dmi, dell_device_table);
 
-static struct dmi_system_id __devinitdata dell_blacklist[] = {
-       /* Supported by compal-laptop */
-       {
-               .ident = "Dell Mini 9",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 910"),
-               },
-       },
+static struct dmi_system_id __devinitdata dell_quirks[] = {
        {
-               .ident = "Dell Mini 10",
+               .callback = dmi_matched,
+               .ident = "Dell Vostro V130",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1010"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V130"),
                },
+               .driver_data = &quirk_dell_vostro_v130,
        },
        {
-               .ident = "Dell Mini 10v",
+               .callback = dmi_matched,
+               .ident = "Dell Vostro V131",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1011"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"),
                },
+               .driver_data = &quirk_dell_vostro_v130,
        },
        {
-               .ident = "Dell Mini 1012",
+               .callback = dmi_matched,
+               .ident = "Dell Vostro 3350",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3350"),
                },
+               .driver_data = &quirk_dell_vostro_v130,
        },
        {
-               .ident = "Dell Inspiron 11z",
+               .callback = dmi_matched,
+               .ident = "Dell Vostro 3555",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1110"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3555"),
                },
+               .driver_data = &quirk_dell_vostro_v130,
        },
        {
-               .ident = "Dell Mini 12",
+               .callback = dmi_matched,
+               .ident = "Dell Inspiron N311z",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1210"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron N311z"),
                },
+               .driver_data = &quirk_dell_vostro_v130,
        },
-       {}
-};
-
-static struct dmi_system_id __devinitdata dell_quirks[] = {
        {
                .callback = dmi_matched,
-               .ident = "Dell Vostro V130",
+               .ident = "Dell Inspiron M5110",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V130"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron M5110"),
                },
                .driver_data = &quirk_dell_vostro_v130,
        },
        {
                .callback = dmi_matched,
-               .ident = "Dell Vostro V131",
+               .ident = "Dell Vostro 3360",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3360"),
                },
                .driver_data = &quirk_dell_vostro_v130,
        },
        {
                .callback = dmi_matched,
-               .ident = "Dell Vostro 3555",
+               .ident = "Dell Vostro 3460",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3555"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3460"),
                },
                .driver_data = &quirk_dell_vostro_v130,
        },
        {
                .callback = dmi_matched,
-               .ident = "Dell Inspiron N311z",
+               .ident = "Dell Vostro 3560",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron N311z"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3560"),
                },
                .driver_data = &quirk_dell_vostro_v130,
        },
        {
                .callback = dmi_matched,
-               .ident = "Dell Inspiron M5110",
+               .ident = "Dell Vostro 3450",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron M5110"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Dell System Vostro 3450"),
                },
                .driver_data = &quirk_dell_vostro_v130,
        },
@@ -305,94 +299,6 @@ dell_send_request(struct calling_interface_buffer *buffer, int class,
        return buffer;
 }
 
-/* Derived from information in DellWirelessCtl.cpp:
-   Class 17, select 11 is radio control. It returns an array of 32-bit values.
-
-   Input byte 0 = 0: Wireless information
-
-   result[0]: return code
-   result[1]:
-     Bit 0:      Hardware switch supported
-     Bit 1:      Wifi locator supported
-     Bit 2:      Wifi is supported
-     Bit 3:      Bluetooth is supported
-     Bit 4:      WWAN is supported
-     Bit 5:      Wireless keyboard supported
-     Bits 6-7:   Reserved
-     Bit 8:      Wifi is installed
-     Bit 9:      Bluetooth is installed
-     Bit 10:     WWAN is installed
-     Bits 11-15: Reserved
-     Bit 16:     Hardware switch is on
-     Bit 17:     Wifi is blocked
-     Bit 18:     Bluetooth is blocked
-     Bit 19:     WWAN is blocked
-     Bits 20-31: Reserved
-   result[2]: NVRAM size in bytes
-   result[3]: NVRAM format version number
-
-   Input byte 0 = 2: Wireless switch configuration
-   result[0]: return code
-   result[1]:
-     Bit 0:      Wifi controlled by switch
-     Bit 1:      Bluetooth controlled by switch
-     Bit 2:      WWAN controlled by switch
-     Bits 3-6:   Reserved
-     Bit 7:      Wireless switch config locked
-     Bit 8:      Wifi locator enabled
-     Bits 9-14:  Reserved
-     Bit 15:     Wifi locator setting locked
-     Bits 16-31: Reserved
-*/
-
-static int dell_rfkill_set(void *data, bool blocked)
-{
-       int disable = blocked ? 1 : 0;
-       unsigned long radio = (unsigned long)data;
-       int hwswitch_bit = (unsigned long)data - 1;
-       int ret = 0;
-
-       get_buffer();
-       dell_send_request(buffer, 17, 11);
-
-       /* If the hardware switch controls this radio, and the hardware
-          switch is disabled, don't allow changing the software state */
-       if ((hwswitch_state & BIT(hwswitch_bit)) &&
-           !(buffer->output[1] & BIT(16))) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       buffer->input[0] = (1 | (radio<<8) | (disable << 16));
-       dell_send_request(buffer, 17, 11);
-
-out:
-       release_buffer();
-       return ret;
-}
-
-static void dell_rfkill_query(struct rfkill *rfkill, void *data)
-{
-       int status;
-       int bit = (unsigned long)data + 16;
-       int hwswitch_bit = (unsigned long)data - 1;
-
-       get_buffer();
-       dell_send_request(buffer, 17, 11);
-       status = buffer->output[1];
-       release_buffer();
-
-       rfkill_set_sw_state(rfkill, !!(status & BIT(bit)));
-
-       if (hwswitch_state & (BIT(hwswitch_bit)))
-               rfkill_set_hw_state(rfkill, !(status & BIT(16)));
-}
-
-static const struct rfkill_ops dell_rfkill_ops = {
-       .set_block = dell_rfkill_set,
-       .query = dell_rfkill_query,
-};
-
 static struct dentry *dell_laptop_dir;
 
 static int dell_debugfs_show(struct seq_file *s, void *data)
@@ -462,108 +368,6 @@ static const struct file_operations dell_debugfs_fops = {
        .release = single_release,
 };
 
-static void dell_update_rfkill(struct work_struct *ignored)
-{
-       if (wifi_rfkill)
-               dell_rfkill_query(wifi_rfkill, (void *)1);
-       if (bluetooth_rfkill)
-               dell_rfkill_query(bluetooth_rfkill, (void *)2);
-       if (wwan_rfkill)
-               dell_rfkill_query(wwan_rfkill, (void *)3);
-}
-static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill);
-
-
-static int __init dell_setup_rfkill(void)
-{
-       int status;
-       int ret;
-
-       if (dmi_check_system(dell_blacklist)) {
-               pr_info("Blacklisted hardware detected - not enabling rfkill\n");
-               return 0;
-       }
-
-       get_buffer();
-       dell_send_request(buffer, 17, 11);
-       status = buffer->output[1];
-       buffer->input[0] = 0x2;
-       dell_send_request(buffer, 17, 11);
-       hwswitch_state = buffer->output[1];
-       release_buffer();
-
-       if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) {
-               wifi_rfkill = rfkill_alloc("dell-wifi", &platform_device->dev,
-                                          RFKILL_TYPE_WLAN,
-                                          &dell_rfkill_ops, (void *) 1);
-               if (!wifi_rfkill) {
-                       ret = -ENOMEM;
-                       goto err_wifi;
-               }
-               ret = rfkill_register(wifi_rfkill);
-               if (ret)
-                       goto err_wifi;
-       }
-
-       if ((status & (1<<3|1<<9)) == (1<<3|1<<9)) {
-               bluetooth_rfkill = rfkill_alloc("dell-bluetooth",
-                                               &platform_device->dev,
-                                               RFKILL_TYPE_BLUETOOTH,
-                                               &dell_rfkill_ops, (void *) 2);
-               if (!bluetooth_rfkill) {
-                       ret = -ENOMEM;
-                       goto err_bluetooth;
-               }
-               ret = rfkill_register(bluetooth_rfkill);
-               if (ret)
-                       goto err_bluetooth;
-       }
-
-       if ((status & (1<<4|1<<10)) == (1<<4|1<<10)) {
-               wwan_rfkill = rfkill_alloc("dell-wwan",
-                                          &platform_device->dev,
-                                          RFKILL_TYPE_WWAN,
-                                          &dell_rfkill_ops, (void *) 3);
-               if (!wwan_rfkill) {
-                       ret = -ENOMEM;
-                       goto err_wwan;
-               }
-               ret = rfkill_register(wwan_rfkill);
-               if (ret)
-                       goto err_wwan;
-       }
-
-       return 0;
-err_wwan:
-       rfkill_destroy(wwan_rfkill);
-       if (bluetooth_rfkill)
-               rfkill_unregister(bluetooth_rfkill);
-err_bluetooth:
-       rfkill_destroy(bluetooth_rfkill);
-       if (wifi_rfkill)
-               rfkill_unregister(wifi_rfkill);
-err_wifi:
-       rfkill_destroy(wifi_rfkill);
-
-       return ret;
-}
-
-static void dell_cleanup_rfkill(void)
-{
-       if (wifi_rfkill) {
-               rfkill_unregister(wifi_rfkill);
-               rfkill_destroy(wifi_rfkill);
-       }
-       if (bluetooth_rfkill) {
-               rfkill_unregister(bluetooth_rfkill);
-               rfkill_destroy(bluetooth_rfkill);
-       }
-       if (wwan_rfkill) {
-               rfkill_unregister(wwan_rfkill);
-               rfkill_destroy(wwan_rfkill);
-       }
-}
-
 static int dell_send_intensity(struct backlight_device *bd)
 {
        int ret = 0;
@@ -655,30 +459,6 @@ static void touchpad_led_exit(void)
        led_classdev_unregister(&touchpad_led);
 }
 
-static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str,
-                             struct serio *port)
-{
-       static bool extended;
-
-       if (str & 0x20)
-               return false;
-
-       if (unlikely(data == 0xe0)) {
-               extended = true;
-               return false;
-       } else if (unlikely(extended)) {
-               switch (data) {
-               case 0x8:
-                       schedule_delayed_work(&dell_rfkill_work,
-                                             round_jiffies_relative(HZ));
-                       break;
-               }
-               extended = false;
-       }
-
-       return false;
-}
-
 static int __init dell_init(void)
 {
        int max_intensity = 0;
@@ -720,26 +500,10 @@ static int __init dell_init(void)
                goto fail_buffer;
        buffer = page_address(bufferpage);
 
-       ret = dell_setup_rfkill();
-
-       if (ret) {
-               pr_warn("Unable to setup rfkill\n");
-               goto fail_rfkill;
-       }
-
-       ret = i8042_install_filter(dell_laptop_i8042_filter);
-       if (ret) {
-               pr_warn("Unable to install key filter\n");
-               goto fail_filter;
-       }
-
        if (quirks && quirks->touchpad_led)
                touchpad_led_init(&platform_device->dev);
 
        dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL);
-       if (dell_laptop_dir != NULL)
-               debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL,
-                                   &dell_debugfs_fops);
 
 #ifdef CONFIG_ACPI
        /* In the event of an ACPI backlight being available, don't
@@ -782,11 +546,6 @@ static int __init dell_init(void)
        return 0;
 
 fail_backlight:
-       i8042_remove_filter(dell_laptop_i8042_filter);
-       cancel_delayed_work_sync(&dell_rfkill_work);
-fail_filter:
-       dell_cleanup_rfkill();
-fail_rfkill:
        free_page((unsigned long)bufferpage);
 fail_buffer:
        platform_device_del(platform_device);
@@ -804,10 +563,7 @@ static void __exit dell_exit(void)
        debugfs_remove_recursive(dell_laptop_dir);
        if (quirks && quirks->touchpad_led)
                touchpad_led_exit();
-       i8042_remove_filter(dell_laptop_i8042_filter);
-       cancel_delayed_work_sync(&dell_rfkill_work);
        backlight_device_unregister(dell_backlight_device);
-       dell_cleanup_rfkill();
        if (platform_device) {
                platform_device_unregister(platform_device);
                platform_driver_unregister(&platform_driver);
index 580d80a73c3adab888ba7fb07adb323c4c48883a..da267eae8ba85bc151fef55a0255a9d850429141 100644 (file)
@@ -16,6 +16,8 @@
  * 59 Temple Place Suite 330, Boston, MA 02111-1307, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -34,7 +36,8 @@
 #define ACPI_FUJITSU_CLASS "fujitsu"
 
 #define INVERT_TABLET_MODE_BIT      0x01
-#define FORCE_TABLET_MODE_IF_UNDOCK 0x02
+#define INVERT_DOCK_STATE_BIT       0x02
+#define FORCE_TABLET_MODE_IF_UNDOCK 0x04
 
 #define KEYMAP_LEN 16
 
@@ -161,6 +164,8 @@ static void fujitsu_send_state(void)
        state = fujitsu_read_register(0xdd);
 
        dock = state & 0x02;
+       if (fujitsu.config.quirks & INVERT_DOCK_STATE_BIT)
+               dock = !dock;
 
        if ((fujitsu.config.quirks & FORCE_TABLET_MODE_IF_UNDOCK) && (!dock)) {
                tablet_mode = 1;
@@ -221,9 +226,6 @@ static int __devinit input_fujitsu_setup(struct device *parent,
        input_set_capability(idev, EV_SW, SW_DOCK);
        input_set_capability(idev, EV_SW, SW_TABLET_MODE);
 
-       input_set_capability(idev, EV_SW, SW_DOCK);
-       input_set_capability(idev, EV_SW, SW_TABLET_MODE);
-
        error = input_register_device(idev);
        if (error) {
                input_free_device(idev);
@@ -275,25 +277,31 @@ static irqreturn_t fujitsu_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static int __devinit fujitsu_dmi_default(const struct dmi_system_id *dmi)
+static void __devinit fujitsu_dmi_common(const struct dmi_system_id *dmi)
 {
-       printk(KERN_INFO MODULENAME ": %s\n", dmi->ident);
+       pr_info("%s\n", dmi->ident);
        memcpy(fujitsu.config.keymap, dmi->driver_data,
                        sizeof(fujitsu.config.keymap));
+}
+
+static int __devinit fujitsu_dmi_lifebook(const struct dmi_system_id *dmi)
+{
+       fujitsu_dmi_common(dmi);
+       fujitsu.config.quirks |= INVERT_TABLET_MODE_BIT;
        return 1;
 }
 
 static int __devinit fujitsu_dmi_stylistic(const struct dmi_system_id *dmi)
 {
-       fujitsu_dmi_default(dmi);
+       fujitsu_dmi_common(dmi);
        fujitsu.config.quirks |= FORCE_TABLET_MODE_IF_UNDOCK;
-       fujitsu.config.quirks |= INVERT_TABLET_MODE_BIT;
+       fujitsu.config.quirks |= INVERT_DOCK_STATE_BIT;
        return 1;
 }
 
 static struct dmi_system_id dmi_ids[] __initconst = {
        {
-               .callback = fujitsu_dmi_default,
+               .callback = fujitsu_dmi_lifebook,
                .ident = "Fujitsu Siemens P/T Series",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -302,7 +310,7 @@ static struct dmi_system_id dmi_ids[] __initconst = {
                .driver_data = keymap_Lifebook_Tseries
        },
        {
-               .callback = fujitsu_dmi_default,
+               .callback = fujitsu_dmi_lifebook,
                .ident = "Fujitsu Lifebook T Series",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -320,7 +328,7 @@ static struct dmi_system_id dmi_ids[] __initconst = {
                .driver_data = keymap_Stylistic_Tseries
        },
        {
-               .callback = fujitsu_dmi_default,
+               .callback = fujitsu_dmi_lifebook,
                .ident = "Fujitsu LifeBook U810",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -347,7 +355,7 @@ static struct dmi_system_id dmi_ids[] __initconst = {
                .driver_data = keymap_Stylistic_ST5xxx
        },
        {
-               .callback = fujitsu_dmi_default,
+               .callback = fujitsu_dmi_lifebook,
                .ident = "Unknown (using defaults)",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, ""),
@@ -473,6 +481,6 @@ module_exit(fujitsu_module_exit);
 MODULE_AUTHOR("Robert Gerlach <khnz@gmx.de>");
 MODULE_DESCRIPTION("Fujitsu tablet pc extras driver");
 MODULE_LICENSE("GPL");
-MODULE_VERSION("2.4");
+MODULE_VERSION("2.5");
 
 MODULE_DEVICE_TABLE(acpi, fujitsu_ids);
index 7387f97a294194b607115096510f2119d76902d0..24a3ae065f1b9ffce58f6c6aaa595f2384c11bac 100644 (file)
@@ -2,7 +2,7 @@
  * hdaps.c - driver for IBM's Hard Drive Active Protection System
  *
  * Copyright (C) 2005 Robert Love <rml@novell.com>
- * Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com>
+ * Copyright (C) 2005 Jesper Juhl <jj@chaosbits.net>
  *
  * The HardDisk Active Protection System (hdaps) is present in IBM ThinkPads
  * starting with the R40, T41, and X40.  It provides a basic two-axis
index e2faa3cbb792e3f154d83db97ca0f9f45ed44402..387183a2d6ddd4405c8430137e8da3c4595b5407 100644 (file)
@@ -634,6 +634,8 @@ static int __devinit hp_wmi_rfkill_setup(struct platform_device *device)
                                           RFKILL_TYPE_WLAN,
                                           &hp_wmi_rfkill_ops,
                                           (void *) HPWMI_WIFI);
+               if (!wifi_rfkill)
+                       return -ENOMEM;
                rfkill_init_sw_state(wifi_rfkill,
                                     hp_wmi_get_sw_state(HPWMI_WIFI));
                rfkill_set_hw_state(wifi_rfkill,
@@ -648,6 +650,10 @@ static int __devinit hp_wmi_rfkill_setup(struct platform_device *device)
                                                RFKILL_TYPE_BLUETOOTH,
                                                &hp_wmi_rfkill_ops,
                                                (void *) HPWMI_BLUETOOTH);
+               if (!bluetooth_rfkill) {
+                       err = -ENOMEM;
+                       goto register_wifi_error;
+               }
                rfkill_init_sw_state(bluetooth_rfkill,
                                     hp_wmi_get_sw_state(HPWMI_BLUETOOTH));
                rfkill_set_hw_state(bluetooth_rfkill,
@@ -662,6 +668,10 @@ static int __devinit hp_wmi_rfkill_setup(struct platform_device *device)
                                           RFKILL_TYPE_WWAN,
                                           &hp_wmi_rfkill_ops,
                                           (void *) HPWMI_WWAN);
+               if (!wwan_rfkill) {
+                       err = -ENOMEM;
+                       goto register_bluetooth_error;
+               }
                rfkill_init_sw_state(wwan_rfkill,
                                     hp_wmi_get_sw_state(HPWMI_WWAN));
                rfkill_set_hw_state(wwan_rfkill,
index ac902f7a9baad76cbe4fb5982285044fe81a97e5..4f20f8dd3d7cd054d2b3ef4c72cda6c75e265094 100644 (file)
@@ -194,7 +194,6 @@ static int write_ec_cmd(acpi_handle handle, int cmd, unsigned long data)
 /*
  * debugfs
  */
-#define DEBUGFS_EVENT_LEN (4096)
 static int debugfs_status_show(struct seq_file *s, void *data)
 {
        unsigned long value;
@@ -315,7 +314,7 @@ static int __devinit ideapad_debugfs_init(struct ideapad_private *priv)
        node = debugfs_create_file("status", S_IRUGO, priv->debug, NULL,
                                   &debugfs_status_fops);
        if (!node) {
-               pr_err("failed to create event in debugfs");
+               pr_err("failed to create status in debugfs");
                goto errout;
        }
 
@@ -785,6 +784,10 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
                        case 9:
                                ideapad_sync_rfk_state(priv);
                                break;
+                       case 13:
+                       case 6:
+                               ideapad_input_report(priv, vpc_bit);
+                               break;
                        case 4:
                                ideapad_backlight_notify_brightness(priv);
                                break;
@@ -795,7 +798,7 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
                                ideapad_backlight_notify_power(priv);
                                break;
                        default:
-                               ideapad_input_report(priv, vpc_bit);
+                               pr_info("Unknown event: %lu\n", vpc_bit);
                        }
                }
        }
index 8a51795aa02a07bbc0d60b6267686ca4f54dbe9b..210d4ae547c201eafd438ca896357f10f22a3502 100644 (file)
@@ -141,6 +141,27 @@ MODULE_PARM_DESC(kbd_backlight_timeout,
                 "(default: 0)");
 
 static void sony_nc_kbd_backlight_resume(void);
+static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
+               unsigned int handle);
+static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd);
+
+static int sony_nc_battery_care_setup(struct platform_device *pd,
+               unsigned int handle);
+static void sony_nc_battery_care_cleanup(struct platform_device *pd);
+
+static int sony_nc_thermal_setup(struct platform_device *pd);
+static void sony_nc_thermal_cleanup(struct platform_device *pd);
+static void sony_nc_thermal_resume(void);
+
+static int sony_nc_lid_resume_setup(struct platform_device *pd);
+static void sony_nc_lid_resume_cleanup(struct platform_device *pd);
+
+static int sony_nc_highspeed_charging_setup(struct platform_device *pd);
+static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd);
+
+static int sony_nc_touchpad_setup(struct platform_device *pd,
+                                 unsigned int handle);
+static void sony_nc_touchpad_cleanup(struct platform_device *pd);
 
 enum sony_nc_rfkill {
        SONY_WIFI,
@@ -153,6 +174,9 @@ enum sony_nc_rfkill {
 static int sony_rfkill_handle;
 static struct rfkill *sony_rfkill_devices[N_SONY_RFKILL];
 static int sony_rfkill_address[N_SONY_RFKILL] = {0x300, 0x500, 0x700, 0x900};
+static int sony_nc_rfkill_setup(struct acpi_device *device,
+               unsigned int handle);
+static void sony_nc_rfkill_cleanup(void);
 static void sony_nc_rfkill_update(void);
 
 /*********** Input Devices ***********/
@@ -691,59 +715,97 @@ static struct acpi_device *sony_nc_acpi_device = NULL;
 
 /*
  * acpi_evaluate_object wrappers
+ * all useful calls into SNC methods take one or zero parameters and return
+ * integers or arrays.
  */
-static int acpi_callgetfunc(acpi_handle handle, char *name, int *result)
+static union acpi_object *__call_snc_method(acpi_handle handle, char *method,
+               u64 *value)
 {
-       struct acpi_buffer output;
-       union acpi_object out_obj;
+       union acpi_object *result = NULL;
+       struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
        acpi_status status;
 
-       output.length = sizeof(out_obj);
-       output.pointer = &out_obj;
+       if (value) {
+               struct acpi_object_list params;
+               union acpi_object in;
+               in.type = ACPI_TYPE_INTEGER;
+               in.integer.value = *value;
+               params.count = 1;
+               params.pointer = &in;
+               status = acpi_evaluate_object(handle, method, &params, &output);
+               dprintk("__call_snc_method: [%s:0x%.8x%.8x]\n", method,
+                               (unsigned int)(*value >> 32),
+                               (unsigned int)*value & 0xffffffff);
+       } else {
+               status = acpi_evaluate_object(handle, method, NULL, &output);
+               dprintk("__call_snc_method: [%s]\n", method);
+       }
 
-       status = acpi_evaluate_object(handle, name, NULL, &output);
-       if ((status == AE_OK) && (out_obj.type == ACPI_TYPE_INTEGER)) {
-               *result = out_obj.integer.value;
-               return 0;
+       if (ACPI_FAILURE(status)) {
+               pr_err("Failed to evaluate [%s]\n", method);
+               return NULL;
        }
 
-       pr_warn("acpi_callreadfunc failed\n");
+       result = (union acpi_object *) output.pointer;
+       if (!result)
+               dprintk("No return object [%s]\n", method);
 
-       return -1;
+       return result;
 }
 
-static int acpi_callsetfunc(acpi_handle handle, char *name, int value,
-                           int *result)
+static int sony_nc_int_call(acpi_handle handle, char *name, int *value,
+               int *result)
 {
-       struct acpi_object_list params;
-       union acpi_object in_obj;
-       struct acpi_buffer output;
-       union acpi_object out_obj;
-       acpi_status status;
-
-       params.count = 1;
-       params.pointer = &in_obj;
-       in_obj.type = ACPI_TYPE_INTEGER;
-       in_obj.integer.value = value;
+       union acpi_object *object = NULL;
+       if (value) {
+               u64 v = *value;
+               object = __call_snc_method(handle, name, &v);
+       } else
+               object = __call_snc_method(handle, name, NULL);
 
-       output.length = sizeof(out_obj);
-       output.pointer = &out_obj;
+       if (!object)
+               return -EINVAL;
 
-       status = acpi_evaluate_object(handle, name, &params, &output);
-       if (status == AE_OK) {
-               if (result != NULL) {
-                       if (out_obj.type != ACPI_TYPE_INTEGER) {
-                               pr_warn("acpi_evaluate_object bad return type\n");
-                               return -1;
-                       }
-                       *result = out_obj.integer.value;
-               }
-               return 0;
+       if (object->type != ACPI_TYPE_INTEGER) {
+               pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n",
+                               ACPI_TYPE_INTEGER, object->type);
+               kfree(object);
+               return -EINVAL;
        }
 
-       pr_warn("acpi_evaluate_object failed\n");
+       if (result)
+               *result = object->integer.value;
+
+       kfree(object);
+       return 0;
+}
+
+#define MIN(a, b)      (a > b ? b : a)
+static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value,
+               void *buffer, size_t buflen)
+{
+       size_t len = len;
+       union acpi_object *object = __call_snc_method(handle, name, value);
+
+       if (!object)
+               return -EINVAL;
+
+       if (object->type == ACPI_TYPE_BUFFER)
+               len = MIN(buflen, object->buffer.length);
+
+       else if (object->type == ACPI_TYPE_INTEGER)
+               len = MIN(buflen, sizeof(object->integer.value));
+
+       else {
+               pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n",
+                               ACPI_TYPE_BUFFER, object->type);
+               kfree(object);
+               return -EINVAL;
+       }
 
-       return -1;
+       memcpy(buffer, object->buffer.pointer, len);
+       kfree(object);
+       return 0;
 }
 
 struct sony_nc_handles {
@@ -770,16 +832,17 @@ static ssize_t sony_nc_handles_show(struct device *dev,
 
 static int sony_nc_handles_setup(struct platform_device *pd)
 {
-       int i;
-       int result;
+       int i, r, result, arg;
 
        handles = kzalloc(sizeof(*handles), GFP_KERNEL);
        if (!handles)
                return -ENOMEM;
 
        for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
-               if (!acpi_callsetfunc(sony_nc_acpi_handle,
-                                       "SN00", i + 0x20, &result)) {
+               arg = i + 0x20;
+               r = sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg,
+                                       &result);
+               if (!r) {
                        dprintk("caching handle 0x%.4x (offset: 0x%.2x)\n",
                                        result, i);
                        handles->cap[i] = result;
@@ -819,8 +882,8 @@ static int sony_find_snc_handle(int handle)
        int i;
 
        /* not initialized yet, return early */
-       if (!handles)
-               return -1;
+       if (!handles || !handle)
+               return -EINVAL;
 
        for (i = 0; i < 0x10; i++) {
                if (handles->cap[i] == handle) {
@@ -830,21 +893,20 @@ static int sony_find_snc_handle(int handle)
                }
        }
        dprintk("handle 0x%.4x not found\n", handle);
-       return -1;
+       return -EINVAL;
 }
 
 static int sony_call_snc_handle(int handle, int argument, int *result)
 {
-       int ret = 0;
+       int arg, ret = 0;
        int offset = sony_find_snc_handle(handle);
 
        if (offset < 0)
-               return -1;
+               return offset;
 
-       ret = acpi_callsetfunc(sony_nc_acpi_handle, "SN07", offset | argument,
-                       result);
-       dprintk("called SN07 with 0x%.4x (result: 0x%.4x)\n", offset | argument,
-                       *result);
+       arg = offset | argument;
+       ret = sony_nc_int_call(sony_nc_acpi_handle, "SN07", &arg, result);
+       dprintk("called SN07 with 0x%.4x (result: 0x%.4x)\n", arg, *result);
        return ret;
 }
 
@@ -889,14 +951,16 @@ static int boolean_validate(const int direction, const int value)
 static ssize_t sony_nc_sysfs_show(struct device *dev, struct device_attribute *attr,
                              char *buffer)
 {
-       int value;
+       int value, ret = 0;
        struct sony_nc_value *item =
            container_of(attr, struct sony_nc_value, devattr);
 
        if (!*item->acpiget)
                return -EIO;
 
-       if (acpi_callgetfunc(sony_nc_acpi_handle, *item->acpiget, &value) < 0)
+       ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiget, NULL,
+                               &value);
+       if (ret < 0)
                return -EIO;
 
        if (item->validate)
@@ -909,7 +973,8 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
                               struct device_attribute *attr,
                               const char *buffer, size_t count)
 {
-       int value;
+       unsigned long value = 0;
+       int ret = 0;
        struct sony_nc_value *item =
            container_of(attr, struct sony_nc_value, devattr);
 
@@ -919,7 +984,8 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
        if (count > 31)
                return -EINVAL;
 
-       value = simple_strtoul(buffer, NULL, 10);
+       if (kstrtoul(buffer, 10, &value))
+               return -EINVAL;
 
        if (item->validate)
                value = item->validate(SNC_VALIDATE_IN, value);
@@ -927,8 +993,11 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
        if (value < 0)
                return value;
 
-       if (acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset, value, NULL) < 0)
+       ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiset,
+                       (int *)&value, NULL);
+       if (ret < 0)
                return -EIO;
+
        item->value = value;
        item->valid = 1;
        return count;
@@ -948,15 +1017,15 @@ struct sony_backlight_props sony_bl_props;
 
 static int sony_backlight_update_status(struct backlight_device *bd)
 {
-       return acpi_callsetfunc(sony_nc_acpi_handle, "SBRT",
-                               bd->props.brightness + 1, NULL);
+       int arg = bd->props.brightness + 1;
+       return sony_nc_int_call(sony_nc_acpi_handle, "SBRT", &arg, NULL);
 }
 
 static int sony_backlight_get_brightness(struct backlight_device *bd)
 {
        int value;
 
-       if (acpi_callgetfunc(sony_nc_acpi_handle, "GBRT", &value))
+       if (sony_nc_int_call(sony_nc_acpi_handle, "GBRT", NULL, &value))
                return 0;
        /* brightness levels are 1-based, while backlight ones are 0-based */
        return value - 1;
@@ -1024,10 +1093,14 @@ static struct sony_nc_event sony_100_events[] = {
        { 0x06, SONYPI_EVENT_FNKEY_RELEASED },
        { 0x87, SONYPI_EVENT_FNKEY_F7 },
        { 0x07, SONYPI_EVENT_FNKEY_RELEASED },
+       { 0x88, SONYPI_EVENT_FNKEY_F8 },
+       { 0x08, SONYPI_EVENT_FNKEY_RELEASED },
        { 0x89, SONYPI_EVENT_FNKEY_F9 },
        { 0x09, SONYPI_EVENT_FNKEY_RELEASED },
        { 0x8A, SONYPI_EVENT_FNKEY_F10 },
        { 0x0A, SONYPI_EVENT_FNKEY_RELEASED },
+       { 0x8B, SONYPI_EVENT_FNKEY_F11 },
+       { 0x0B, SONYPI_EVENT_FNKEY_RELEASED },
        { 0x8C, SONYPI_EVENT_FNKEY_F12 },
        { 0x0C, SONYPI_EVENT_FNKEY_RELEASED },
        { 0x9d, SONYPI_EVENT_ZOOM_PRESSED },
@@ -1063,63 +1136,116 @@ static struct sony_nc_event sony_127_events[] = {
        { 0, 0 },
 };
 
+static int sony_nc_hotkeys_decode(u32 event, unsigned int handle)
+{
+       int ret = -EINVAL;
+       unsigned int result = 0;
+       struct sony_nc_event *key_event;
+
+       if (sony_call_snc_handle(handle, 0x200, &result)) {
+               dprintk("Unable to decode event 0x%.2x 0x%.2x\n", handle,
+                               event);
+               return -EINVAL;
+       }
+
+       result &= 0xFF;
+
+       if (handle == 0x0100)
+               key_event = sony_100_events;
+       else
+               key_event = sony_127_events;
+
+       for (; key_event->data; key_event++) {
+               if (key_event->data == result) {
+                       ret = key_event->event;
+                       break;
+               }
+       }
+
+       if (!key_event->data)
+               pr_info("Unknown hotkey 0x%.2x/0x%.2x (handle 0x%.2x)\n",
+                               event, result, handle);
+
+       return ret;
+}
+
 /*
  * ACPI callbacks
  */
 static void sony_nc_notify(struct acpi_device *device, u32 event)
 {
-       u32 ev = event;
+       u32 real_ev = event;
+       u8 ev_type = 0;
+       dprintk("sony_nc_notify, event: 0x%.2x\n", event);
+
+       if (event >= 0x90) {
+               unsigned int result = 0;
+               unsigned int arg = 0;
+               unsigned int handle = 0;
+               unsigned int offset = event - 0x90;
+
+               if (offset >= ARRAY_SIZE(handles->cap)) {
+                       pr_err("Event 0x%x outside of capabilities list\n",
+                                       event);
+                       return;
+               }
+               handle = handles->cap[offset];
+
+               /* list of handles known for generating events */
+               switch (handle) {
+               /* hotkey event */
+               case 0x0100:
+               case 0x0127:
+                       ev_type = 1;
+                       real_ev = sony_nc_hotkeys_decode(event, handle);
+
+                       if (real_ev > 0)
+                               sony_laptop_report_input_event(real_ev);
+                       else
+                               /* restore the original event for reporting */
+                               real_ev = event;
 
-       if (ev >= 0x90) {
-               /* New-style event */
-               int result;
-               int key_handle = 0;
-               ev -= 0x90;
-
-               if (sony_find_snc_handle(0x100) == ev)
-                       key_handle = 0x100;
-               if (sony_find_snc_handle(0x127) == ev)
-                       key_handle = 0x127;
-
-               if (key_handle) {
-                       struct sony_nc_event *key_event;
-
-                       if (sony_call_snc_handle(key_handle, 0x200, &result)) {
-                               dprintk("sony_nc_notify, unable to decode"
-                                       " event 0x%.2x 0x%.2x\n", key_handle,
-                                       ev);
-                               /* restore the original event */
-                               ev = event;
-                       } else {
-                               ev = result & 0xFF;
-
-                               if (key_handle == 0x100)
-                                       key_event = sony_100_events;
-                               else
-                                       key_event = sony_127_events;
-
-                               for (; key_event->data; key_event++) {
-                                       if (key_event->data == ev) {
-                                               ev = key_event->event;
-                                               break;
-                                       }
-                               }
+                       break;
 
-                               if (!key_event->data)
-                                       pr_info("Unknown event: 0x%x 0x%x\n",
-                                               key_handle, ev);
-                               else
-                                       sony_laptop_report_input_event(ev);
-                       }
-               } else if (sony_find_snc_handle(sony_rfkill_handle) == ev) {
-                       sony_nc_rfkill_update();
-                       return;
+               /* wlan switch */
+               case 0x0124:
+               case 0x0135:
+                       /* events on this handle are reported when the
+                        * switch changes position or for battery
+                        * events. We'll notify both of them but only
+                        * update the rfkill device status when the
+                        * switch is moved.
+                        */
+                       ev_type = 2;
+                       sony_call_snc_handle(handle, 0x0100, &result);
+                       real_ev = result & 0x03;
+
+                       /* hw switch event */
+                       if (real_ev == 1)
+                               sony_nc_rfkill_update();
+
+                       break;
+
+               default:
+                       dprintk("Unknown event 0x%x for handle 0x%x\n",
+                                       event, handle);
+                       break;
                }
-       } else
-               sony_laptop_report_input_event(ev);
 
-       dprintk("sony_nc_notify, event: 0x%.2x\n", ev);
-       acpi_bus_generate_proc_event(sony_nc_acpi_device, 1, ev);
+               /* clear the event (and the event reason when present) */
+               arg = 1 << offset;
+               sony_nc_int_call(sony_nc_acpi_handle, "SN05", &arg, &result);
+
+       } else {
+               /* old style event */
+               ev_type = 1;
+               sony_laptop_report_input_event(real_ev);
+       }
+
+       acpi_bus_generate_proc_event(sony_nc_acpi_device, ev_type, real_ev);
+
+       acpi_bus_generate_netlink_event(sony_nc_acpi_device->pnp.device_class,
+                       dev_name(&sony_nc_acpi_device->dev), ev_type, real_ev);
 }
 
 static acpi_status sony_walk_callback(acpi_handle handle, u32 level,
@@ -1140,20 +1266,190 @@ static acpi_status sony_walk_callback(acpi_handle handle, u32 level,
 /*
  * ACPI device
  */
-static int sony_nc_function_setup(struct acpi_device *device)
+static void sony_nc_function_setup(struct acpi_device *device,
+               struct platform_device *pf_device)
 {
-       int result;
+       unsigned int i, result, bitmask, arg;
+
+       if (!handles)
+               return;
+
+       /* setup found handles here */
+       for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
+               unsigned int handle = handles->cap[i];
+
+               if (!handle)
+                       continue;
+
+               dprintk("setting up handle 0x%.4x\n", handle);
+
+               switch (handle) {
+               case 0x0100:
+               case 0x0101:
+               case 0x0127:
+                       /* setup hotkeys */
+                       sony_call_snc_handle(handle, 0, &result);
+                       break;
+               case 0x0102:
+                       /* setup hotkeys */
+                       sony_call_snc_handle(handle, 0x100, &result);
+                       break;
+               case 0x0105:
+               case 0x0148:
+                       /* touchpad enable/disable */
+                       result = sony_nc_touchpad_setup(pf_device, handle);
+                       if (result)
+                               pr_err("couldn't set up touchpad control function (%d)\n",
+                                               result);
+                       break;
+               case 0x0115:
+               case 0x0136:
+               case 0x013f:
+                       result = sony_nc_battery_care_setup(pf_device, handle);
+                       if (result)
+                               pr_err("couldn't set up battery care function (%d)\n",
+                                               result);
+                       break;
+               case 0x0119:
+                       result = sony_nc_lid_resume_setup(pf_device);
+                       if (result)
+                               pr_err("couldn't set up lid resume function (%d)\n",
+                                               result);
+                       break;
+               case 0x0122:
+                       result = sony_nc_thermal_setup(pf_device);
+                       if (result)
+                               pr_err("couldn't set up thermal profile function (%d)\n",
+                                               result);
+                       break;
+               case 0x0131:
+                       result = sony_nc_highspeed_charging_setup(pf_device);
+                       if (result)
+                               pr_err("couldn't set up high speed charging function (%d)\n",
+                                      result);
+                       break;
+               case 0x0124:
+               case 0x0135:
+                       result = sony_nc_rfkill_setup(device, handle);
+                       if (result)
+                               pr_err("couldn't set up rfkill support (%d)\n",
+                                               result);
+                       break;
+               case 0x0137:
+               case 0x0143:
+                       result = sony_nc_kbd_backlight_setup(pf_device, handle);
+                       if (result)
+                               pr_err("couldn't set up keyboard backlight function (%d)\n",
+                                               result);
+                       break;
+               default:
+                       continue;
+               }
+       }
 
        /* Enable all events */
-       acpi_callsetfunc(sony_nc_acpi_handle, "SN02", 0xffff, &result);
+       arg = 0x10;
+       if (!sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg, &bitmask))
+               sony_nc_int_call(sony_nc_acpi_handle, "SN02", &bitmask,
+                               &result);
+}
 
-       /* Setup hotkeys */
-       sony_call_snc_handle(0x0100, 0, &result);
-       sony_call_snc_handle(0x0101, 0, &result);
-       sony_call_snc_handle(0x0102, 0x100, &result);
-       sony_call_snc_handle(0x0127, 0, &result);
+static void sony_nc_function_cleanup(struct platform_device *pd)
+{
+       unsigned int i, result, bitmask, handle;
 
-       return 0;
+       /* get enabled events and disable them */
+       sony_nc_int_call(sony_nc_acpi_handle, "SN01", NULL, &bitmask);
+       sony_nc_int_call(sony_nc_acpi_handle, "SN03", &bitmask, &result);
+
+       /* cleanup handles here */
+       for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
+
+               handle = handles->cap[i];
+
+               if (!handle)
+                       continue;
+
+               switch (handle) {
+               case 0x0105:
+               case 0x0148:
+                       sony_nc_touchpad_cleanup(pd);
+                       break;
+               case 0x0115:
+               case 0x0136:
+               case 0x013f:
+                       sony_nc_battery_care_cleanup(pd);
+                       break;
+               case 0x0119:
+                       sony_nc_lid_resume_cleanup(pd);
+                       break;
+               case 0x0122:
+                       sony_nc_thermal_cleanup(pd);
+                       break;
+               case 0x0131:
+                       sony_nc_highspeed_charging_cleanup(pd);
+                       break;
+               case 0x0124:
+               case 0x0135:
+                       sony_nc_rfkill_cleanup();
+                       break;
+               case 0x0137:
+               case 0x0143:
+                       sony_nc_kbd_backlight_cleanup(pd);
+                       break;
+               default:
+                       continue;
+               }
+       }
+
+       /* finally cleanup the handles list */
+       sony_nc_handles_cleanup(pd);
+}
+
+static void sony_nc_function_resume(void)
+{
+       unsigned int i, result, bitmask, arg;
+
+       dprintk("Resuming SNC device\n");
+
+       for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
+               unsigned int handle = handles->cap[i];
+
+               if (!handle)
+                       continue;
+
+               switch (handle) {
+               case 0x0100:
+               case 0x0101:
+               case 0x0127:
+                       /* re-enable hotkeys */
+                       sony_call_snc_handle(handle, 0, &result);
+                       break;
+               case 0x0102:
+                       /* re-enable hotkeys */
+                       sony_call_snc_handle(handle, 0x100, &result);
+                       break;
+               case 0x0122:
+                       sony_nc_thermal_resume();
+                       break;
+               case 0x0124:
+               case 0x0135:
+                       sony_nc_rfkill_update();
+                       break;
+               case 0x0137:
+               case 0x0143:
+                       sony_nc_kbd_backlight_resume();
+                       break;
+               default:
+                       continue;
+               }
+       }
+
+       /* Enable all events */
+       arg = 0x10;
+       if (!sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg, &bitmask))
+               sony_nc_int_call(sony_nc_acpi_handle, "SN02", &bitmask,
+                               &result);
 }
 
 static int sony_nc_resume(struct acpi_device *device)
@@ -1166,8 +1462,8 @@ static int sony_nc_resume(struct acpi_device *device)
 
                if (!item->valid)
                        continue;
-               ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset,
-                                      item->value, NULL);
+               ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiset,
+                                      &item->value, NULL);
                if (ret < 0) {
                        pr_err("%s: %d\n", __func__, ret);
                        break;
@@ -1176,21 +1472,14 @@ static int sony_nc_resume(struct acpi_device *device)
 
        if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
                                         &handle))) {
-               if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL))
+               int arg = 1;
+               if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL))
                        dprintk("ECON Method failed\n");
        }
 
        if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00",
-                                        &handle))) {
-               dprintk("Doing SNC setup\n");
-               sony_nc_function_setup(device);
-       }
-
-       /* re-read rfkill state */
-       sony_nc_rfkill_update();
-
-       /* restore kbd backlight states */
-       sony_nc_kbd_backlight_resume();
+                                        &handle)))
+               sony_nc_function_resume();
 
        return 0;
 }
@@ -1213,7 +1502,7 @@ static int sony_nc_rfkill_set(void *data, bool blocked)
        int argument = sony_rfkill_address[(long) data] + 0x100;
 
        if (!blocked)
-               argument |= 0xff0000;
+               argument |= 0x030000;
 
        return sony_call_snc_handle(sony_rfkill_handle, argument, &result);
 }
@@ -1230,7 +1519,7 @@ static int sony_nc_setup_rfkill(struct acpi_device *device,
        enum rfkill_type type;
        const char *name;
        int result;
-       bool hwblock;
+       bool hwblock, swblock;
 
        switch (nc_type) {
        case SONY_WIFI:
@@ -1258,8 +1547,21 @@ static int sony_nc_setup_rfkill(struct acpi_device *device,
        if (!rfk)
                return -ENOMEM;
 
-       sony_call_snc_handle(sony_rfkill_handle, 0x200, &result);
+       if (sony_call_snc_handle(sony_rfkill_handle, 0x200, &result) < 0) {
+               rfkill_destroy(rfk);
+               return -1;
+       }
        hwblock = !(result & 0x1);
+
+       if (sony_call_snc_handle(sony_rfkill_handle,
+                               sony_rfkill_address[nc_type],
+                               &result) < 0) {
+               rfkill_destroy(rfk);
+               return -1;
+       }
+       swblock = !(result & 0x2);
+
+       rfkill_init_sw_state(rfk, swblock);
        rfkill_set_hw_state(rfk, hwblock);
 
        err = rfkill_register(rfk);
@@ -1295,101 +1597,79 @@ static void sony_nc_rfkill_update(void)
 
                sony_call_snc_handle(sony_rfkill_handle, argument, &result);
                rfkill_set_states(sony_rfkill_devices[i],
-                                 !(result & 0xf), false);
+                                 !(result & 0x2), false);
        }
 }
 
-static void sony_nc_rfkill_setup(struct acpi_device *device)
+static int sony_nc_rfkill_setup(struct acpi_device *device,
+               unsigned int handle)
 {
-       int offset;
-       u8 dev_code, i;
-       acpi_status status;
-       struct acpi_object_list params;
-       union acpi_object in_obj;
-       union acpi_object *device_enum;
-       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-
-       offset = sony_find_snc_handle(0x124);
-       if (offset == -1) {
-               offset = sony_find_snc_handle(0x135);
-               if (offset == -1)
-                       return;
-               else
-                       sony_rfkill_handle = 0x135;
-       } else
-               sony_rfkill_handle = 0x124;
-       dprintk("Found rkfill handle: 0x%.4x\n", sony_rfkill_handle);
-
-       /* need to read the whole buffer returned by the acpi call to SN06
-        * here otherwise we may miss some features
-        */
-       params.count = 1;
-       params.pointer = &in_obj;
-       in_obj.type = ACPI_TYPE_INTEGER;
-       in_obj.integer.value = offset;
-       status = acpi_evaluate_object(sony_nc_acpi_handle, "SN06", &params,
-                       &buffer);
-       if (ACPI_FAILURE(status)) {
-               dprintk("Radio device enumeration failed\n");
-               return;
-       }
-
-       device_enum = (union acpi_object *) buffer.pointer;
-       if (!device_enum) {
-               pr_err("No SN06 return object\n");
-               goto out_no_enum;
-       }
-       if (device_enum->type != ACPI_TYPE_BUFFER) {
-               pr_err("Invalid SN06 return object 0x%.2x\n",
-                      device_enum->type);
-               goto out_no_enum;
-       }
+       u64 offset;
+       int i;
+       unsigned char buffer[32] = { 0 };
 
-       /* the buffer is filled with magic numbers describing the devices
-        * available, 0xff terminates the enumeration
+       offset = sony_find_snc_handle(handle);
+       sony_rfkill_handle = handle;
+
+       i = sony_nc_buffer_call(sony_nc_acpi_handle, "SN06", &offset, buffer,
+                       32);
+       if (i < 0)
+               return i;
+
+       /* The buffer is filled with magic numbers describing the devices
+        * available, 0xff terminates the enumeration.
+        * Known codes:
+        *      0x00 WLAN
+        *      0x10 BLUETOOTH
+        *      0x20 WWAN GPRS-EDGE
+        *      0x21 WWAN HSDPA
+        *      0x22 WWAN EV-DO
+        *      0x23 WWAN GPS
+        *      0x25 Gobi WWAN no GPS
+        *      0x26 Gobi WWAN + GPS
+        *      0x28 Gobi WWAN no GPS
+        *      0x29 Gobi WWAN + GPS
+        *      0x30 WIMAX
+        *      0x50 Gobi WWAN no GPS
+        *      0x51 Gobi WWAN + GPS
+        *      0x70 no SIM card slot
+        *      0x71 SIM card slot
         */
-       for (i = 0; i < device_enum->buffer.length; i++) {
+       for (i = 0; i < ARRAY_SIZE(buffer); i++) {
 
-               dev_code = *(device_enum->buffer.pointer + i);
-               if (dev_code == 0xff)
+               if (buffer[i] == 0xff)
                        break;
 
-               dprintk("Radio devices, looking at 0x%.2x\n", dev_code);
+               dprintk("Radio devices, found 0x%.2x\n", buffer[i]);
 
-               if (dev_code == 0 && !sony_rfkill_devices[SONY_WIFI])
+               if (buffer[i] == 0 && !sony_rfkill_devices[SONY_WIFI])
                        sony_nc_setup_rfkill(device, SONY_WIFI);
 
-               if (dev_code == 0x10 && !sony_rfkill_devices[SONY_BLUETOOTH])
+               if (buffer[i] == 0x10 && !sony_rfkill_devices[SONY_BLUETOOTH])
                        sony_nc_setup_rfkill(device, SONY_BLUETOOTH);
 
-               if ((0xf0 & dev_code) == 0x20 &&
+               if (((0xf0 & buffer[i]) == 0x20 ||
+                                       (0xf0 & buffer[i]) == 0x50) &&
                                !sony_rfkill_devices[SONY_WWAN])
                        sony_nc_setup_rfkill(device, SONY_WWAN);
 
-               if (dev_code == 0x30 && !sony_rfkill_devices[SONY_WIMAX])
+               if (buffer[i] == 0x30 && !sony_rfkill_devices[SONY_WIMAX])
                        sony_nc_setup_rfkill(device, SONY_WIMAX);
        }
-
-out_no_enum:
-       kfree(buffer.pointer);
-       return;
+       return 0;
 }
 
 /* Keyboard backlight feature */
-#define KBDBL_HANDLER  0x137
-#define KBDBL_PRESENT  0xB00
-#define        SET_MODE        0xC00
-#define SET_STATE      0xD00
-#define SET_TIMEOUT    0xE00
-
 struct kbd_backlight {
-       int mode;
-       int timeout;
+       unsigned int handle;
+       unsigned int base;
+       unsigned int mode;
+       unsigned int timeout;
        struct device_attribute mode_attr;
        struct device_attribute timeout_attr;
 };
 
-static struct kbd_backlight *kbdbl_handle;
+static struct kbd_backlight *kbdbl_ctl;
 
 static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value)
 {
@@ -1398,15 +1678,15 @@ static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value)
        if (value > 1)
                return -EINVAL;
 
-       if (sony_call_snc_handle(KBDBL_HANDLER,
-                               (value << 0x10) | SET_MODE, &result))
+       if (sony_call_snc_handle(kbdbl_ctl->handle,
+                               (value << 0x10) | (kbdbl_ctl->base), &result))
                return -EIO;
 
        /* Try to turn the light on/off immediately */
-       sony_call_snc_handle(KBDBL_HANDLER, (value << 0x10) | SET_STATE,
-                       &result);
+       sony_call_snc_handle(kbdbl_ctl->handle,
+                       (value << 0x10) | (kbdbl_ctl->base + 0x100), &result);
 
-       kbdbl_handle->mode = value;
+       kbdbl_ctl->mode = value;
 
        return 0;
 }
@@ -1421,7 +1701,7 @@ static ssize_t sony_nc_kbd_backlight_mode_store(struct device *dev,
        if (count > 31)
                return -EINVAL;
 
-       if (strict_strtoul(buffer, 10, &value))
+       if (kstrtoul(buffer, 10, &value))
                return -EINVAL;
 
        ret = __sony_nc_kbd_backlight_mode_set(value);
@@ -1435,7 +1715,7 @@ static ssize_t sony_nc_kbd_backlight_mode_show(struct device *dev,
                struct device_attribute *attr, char *buffer)
 {
        ssize_t count = 0;
-       count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_handle->mode);
+       count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_ctl->mode);
        return count;
 }
 
@@ -1446,11 +1726,11 @@ static int __sony_nc_kbd_backlight_timeout_set(u8 value)
        if (value > 3)
                return -EINVAL;
 
-       if (sony_call_snc_handle(KBDBL_HANDLER,
-                               (value << 0x10) | SET_TIMEOUT, &result))
+       if (sony_call_snc_handle(kbdbl_ctl->handle, (value << 0x10) |
+                               (kbdbl_ctl->base + 0x200), &result))
                return -EIO;
 
-       kbdbl_handle->timeout = value;
+       kbdbl_ctl->timeout = value;
 
        return 0;
 }
@@ -1465,7 +1745,7 @@ static ssize_t sony_nc_kbd_backlight_timeout_store(struct device *dev,
        if (count > 31)
                return -EINVAL;
 
-       if (strict_strtoul(buffer, 10, &value))
+       if (kstrtoul(buffer, 10, &value))
                return -EINVAL;
 
        ret = __sony_nc_kbd_backlight_timeout_set(value);
@@ -1479,39 +1759,58 @@ static ssize_t sony_nc_kbd_backlight_timeout_show(struct device *dev,
                struct device_attribute *attr, char *buffer)
 {
        ssize_t count = 0;
-       count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_handle->timeout);
+       count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_ctl->timeout);
        return count;
 }
 
-static int sony_nc_kbd_backlight_setup(struct platform_device *pd)
+static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
+               unsigned int handle)
 {
        int result;
+       int ret = 0;
 
-       if (sony_call_snc_handle(KBDBL_HANDLER, KBDBL_PRESENT, &result))
-               return 0;
-       if (!(result & 0x02))
+       /* verify the kbd backlight presence, these handles are not used for
+        * keyboard backlight only
+        */
+       ret = sony_call_snc_handle(handle, handle == 0x0137 ? 0x0B00 : 0x0100,
+                       &result);
+       if (ret)
+               return ret;
+
+       if ((handle == 0x0137 && !(result & 0x02)) ||
+                       !(result & 0x01)) {
+               dprintk("no backlight keyboard found\n");
                return 0;
+       }
 
-       kbdbl_handle = kzalloc(sizeof(*kbdbl_handle), GFP_KERNEL);
-       if (!kbdbl_handle)
+       kbdbl_ctl = kzalloc(sizeof(*kbdbl_ctl), GFP_KERNEL);
+       if (!kbdbl_ctl)
                return -ENOMEM;
 
-       sysfs_attr_init(&kbdbl_handle->mode_attr.attr);
-       kbdbl_handle->mode_attr.attr.name = "kbd_backlight";
-       kbdbl_handle->mode_attr.attr.mode = S_IRUGO | S_IWUSR;
-       kbdbl_handle->mode_attr.show = sony_nc_kbd_backlight_mode_show;
-       kbdbl_handle->mode_attr.store = sony_nc_kbd_backlight_mode_store;
+       kbdbl_ctl->handle = handle;
+       if (handle == 0x0137)
+               kbdbl_ctl->base = 0x0C00;
+       else
+               kbdbl_ctl->base = 0x4000;
+
+       sysfs_attr_init(&kbdbl_ctl->mode_attr.attr);
+       kbdbl_ctl->mode_attr.attr.name = "kbd_backlight";
+       kbdbl_ctl->mode_attr.attr.mode = S_IRUGO | S_IWUSR;
+       kbdbl_ctl->mode_attr.show = sony_nc_kbd_backlight_mode_show;
+       kbdbl_ctl->mode_attr.store = sony_nc_kbd_backlight_mode_store;
 
-       sysfs_attr_init(&kbdbl_handle->timeout_attr.attr);
-       kbdbl_handle->timeout_attr.attr.name = "kbd_backlight_timeout";
-       kbdbl_handle->timeout_attr.attr.mode = S_IRUGO | S_IWUSR;
-       kbdbl_handle->timeout_attr.show = sony_nc_kbd_backlight_timeout_show;
-       kbdbl_handle->timeout_attr.store = sony_nc_kbd_backlight_timeout_store;
+       sysfs_attr_init(&kbdbl_ctl->timeout_attr.attr);
+       kbdbl_ctl->timeout_attr.attr.name = "kbd_backlight_timeout";
+       kbdbl_ctl->timeout_attr.attr.mode = S_IRUGO | S_IWUSR;
+       kbdbl_ctl->timeout_attr.show = sony_nc_kbd_backlight_timeout_show;
+       kbdbl_ctl->timeout_attr.store = sony_nc_kbd_backlight_timeout_store;
 
-       if (device_create_file(&pd->dev, &kbdbl_handle->mode_attr))
+       ret = device_create_file(&pd->dev, &kbdbl_ctl->mode_attr);
+       if (ret)
                goto outkzalloc;
 
-       if (device_create_file(&pd->dev, &kbdbl_handle->timeout_attr))
+       ret = device_create_file(&pd->dev, &kbdbl_ctl->timeout_attr);
+       if (ret)
                goto outmode;
 
        __sony_nc_kbd_backlight_mode_set(kbd_backlight);
@@ -1520,131 +1819,716 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd)
        return 0;
 
 outmode:
-       device_remove_file(&pd->dev, &kbdbl_handle->mode_attr);
+       device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr);
 outkzalloc:
-       kfree(kbdbl_handle);
-       kbdbl_handle = NULL;
-       return -1;
+       kfree(kbdbl_ctl);
+       kbdbl_ctl = NULL;
+       return ret;
 }
 
-static int sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
+static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
 {
-       if (kbdbl_handle) {
+       if (kbdbl_ctl) {
                int result;
 
-               device_remove_file(&pd->dev, &kbdbl_handle->mode_attr);
-               device_remove_file(&pd->dev, &kbdbl_handle->timeout_attr);
+               device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr);
+               device_remove_file(&pd->dev, &kbdbl_ctl->timeout_attr);
 
                /* restore the default hw behaviour */
-               sony_call_snc_handle(KBDBL_HANDLER, 0x1000 | SET_MODE, &result);
-               sony_call_snc_handle(KBDBL_HANDLER, SET_TIMEOUT, &result);
+               sony_call_snc_handle(kbdbl_ctl->handle,
+                               kbdbl_ctl->base | 0x10000, &result);
+               sony_call_snc_handle(kbdbl_ctl->handle,
+                               kbdbl_ctl->base + 0x200, &result);
 
-               kfree(kbdbl_handle);
+               kfree(kbdbl_ctl);
+               kbdbl_ctl = NULL;
        }
-       return 0;
 }
 
 static void sony_nc_kbd_backlight_resume(void)
 {
        int ignore = 0;
 
-       if (!kbdbl_handle)
+       if (!kbdbl_ctl)
                return;
 
-       if (kbdbl_handle->mode == 0)
-               sony_call_snc_handle(KBDBL_HANDLER, SET_MODE, &ignore);
-
-       if (kbdbl_handle->timeout != 0)
-               sony_call_snc_handle(KBDBL_HANDLER,
-                               (kbdbl_handle->timeout << 0x10) | SET_TIMEOUT,
+       if (kbdbl_ctl->mode == 0)
+               sony_call_snc_handle(kbdbl_ctl->handle, kbdbl_ctl->base,
                                &ignore);
+
+       if (kbdbl_ctl->timeout != 0)
+               sony_call_snc_handle(kbdbl_ctl->handle,
+                               (kbdbl_ctl->base + 0x200) |
+                               (kbdbl_ctl->timeout << 0x10), &ignore);
 }
 
-static void sony_nc_backlight_ng_read_limits(int handle,
-               struct sony_backlight_props *props)
+struct battery_care_control {
+       struct device_attribute attrs[2];
+       unsigned int handle;
+};
+static struct battery_care_control *bcare_ctl;
+
+static ssize_t sony_nc_battery_care_limit_store(struct device *dev,
+               struct device_attribute *attr,
+               const char *buffer, size_t count)
 {
-       int offset;
-       acpi_status status;
-       u8 brlvl, i;
-       u8 min = 0xff, max = 0x00;
-       struct acpi_object_list params;
-       union acpi_object in_obj;
-       union acpi_object *lvl_enum;
-       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+       unsigned int result, cmd;
+       unsigned long value;
 
-       props->handle = handle;
-       props->offset = 0;
-       props->maxlvl = 0xff;
+       if (count > 31)
+               return -EINVAL;
 
-       offset = sony_find_snc_handle(handle);
-       if (offset < 0)
-               return;
+       if (kstrtoul(buffer, 10, &value))
+               return -EINVAL;
 
-       /* try to read the boundaries from ACPI tables, if we fail the above
-        * defaults should be reasonable
+       /*  limit values (2 bits):
+        *  00 - none
+        *  01 - 80%
+        *  10 - 50%
+        *  11 - 100%
+        *
+        *  bit 0: 0 disable BCL, 1 enable BCL
+        *  bit 1: 1 tell to store the battery limit (see bits 6,7) too
+        *  bits 2,3: reserved
+        *  bits 4,5: store the limit into the EC
+        *  bits 6,7: store the limit into the battery
         */
-       params.count = 1;
-       params.pointer = &in_obj;
-       in_obj.type = ACPI_TYPE_INTEGER;
-       in_obj.integer.value = offset;
-       status = acpi_evaluate_object(sony_nc_acpi_handle, "SN06", &params,
-                       &buffer);
-       if (ACPI_FAILURE(status))
-               return;
-
-       lvl_enum = (union acpi_object *) buffer.pointer;
-       if (!lvl_enum) {
-               pr_err("No SN06 return object.");
-               return;
-       }
-       if (lvl_enum->type != ACPI_TYPE_BUFFER) {
-               pr_err("Invalid SN06 return object 0x%.2x\n",
-                      lvl_enum->type);
-               goto out_invalid;
-       }
 
-       /* the buffer lists brightness levels available, brightness levels are
-        * from 0 to 8 in the array, other values are used by ALS control.
+       /*
+        * handle 0x0115 should allow storing on battery too;
+        * handle 0x0136 same as 0x0115 + health status;
+        * handle 0x013f, same as 0x0136 but no storing on the battery
+        *
+        * Store only inside the EC for now, regardless the handle number
         */
-       for (i = 0; i < 9 && i < lvl_enum->buffer.length; i++) {
+       if (value == 0)
+               /* disable limits */
+               cmd = 0x0;
 
-               brlvl = *(lvl_enum->buffer.pointer + i);
-               dprintk("Brightness level: %d\n", brlvl);
+       else if (value <= 50)
+               cmd = 0x21;
 
-               if (!brlvl)
-                       break;
+       else if (value <= 80)
+               cmd = 0x11;
 
-               if (brlvl > max)
-                       max = brlvl;
-               if (brlvl < min)
-                       min = brlvl;
-       }
-       props->offset = min;
-       props->maxlvl = max;
-       dprintk("Brightness levels: min=%d max=%d\n", props->offset,
-                       props->maxlvl);
+       else if (value <= 100)
+               cmd = 0x31;
+
+       else
+               return -EINVAL;
+
+       if (sony_call_snc_handle(bcare_ctl->handle, (cmd << 0x10) | 0x0100,
+                               &result))
+               return -EIO;
 
-out_invalid:
-       kfree(buffer.pointer);
-       return;
+       return count;
 }
 
-static void sony_nc_backlight_setup(void)
+static ssize_t sony_nc_battery_care_limit_show(struct device *dev,
+               struct device_attribute *attr, char *buffer)
 {
-       acpi_handle unused;
-       int max_brightness = 0;
-       const struct backlight_ops *ops = NULL;
-       struct backlight_properties props;
+       unsigned int result, status;
 
-       if (sony_find_snc_handle(0x12f) != -1) {
-               ops = &sony_backlight_ng_ops;
-               sony_nc_backlight_ng_read_limits(0x12f, &sony_bl_props);
-               max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
+       if (sony_call_snc_handle(bcare_ctl->handle, 0x0000, &result))
+               return -EIO;
 
-       } else if (sony_find_snc_handle(0x137) != -1) {
-               ops = &sony_backlight_ng_ops;
-               sony_nc_backlight_ng_read_limits(0x137, &sony_bl_props);
-               max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
+       status = (result & 0x01) ? ((result & 0x30) >> 0x04) : 0;
+       switch (status) {
+       case 1:
+               status = 80;
+               break;
+       case 2:
+               status = 50;
+               break;
+       case 3:
+               status = 100;
+               break;
+       default:
+               status = 0;
+               break;
+       }
+
+       return snprintf(buffer, PAGE_SIZE, "%d\n", status);
+}
+
+static ssize_t sony_nc_battery_care_health_show(struct device *dev,
+               struct device_attribute *attr, char *buffer)
+{
+       ssize_t count = 0;
+       unsigned int health;
+
+       if (sony_call_snc_handle(bcare_ctl->handle, 0x0200, &health))
+               return -EIO;
+
+       count = snprintf(buffer, PAGE_SIZE, "%d\n", health & 0xff);
+
+       return count;
+}
+
+static int sony_nc_battery_care_setup(struct platform_device *pd,
+               unsigned int handle)
+{
+       int ret = 0;
+
+       bcare_ctl = kzalloc(sizeof(struct battery_care_control), GFP_KERNEL);
+       if (!bcare_ctl)
+               return -ENOMEM;
+
+       bcare_ctl->handle = handle;
+
+       sysfs_attr_init(&bcare_ctl->attrs[0].attr);
+       bcare_ctl->attrs[0].attr.name = "battery_care_limiter";
+       bcare_ctl->attrs[0].attr.mode = S_IRUGO | S_IWUSR;
+       bcare_ctl->attrs[0].show = sony_nc_battery_care_limit_show;
+       bcare_ctl->attrs[0].store = sony_nc_battery_care_limit_store;
+
+       ret = device_create_file(&pd->dev, &bcare_ctl->attrs[0]);
+       if (ret)
+               goto outkzalloc;
+
+       /* 0x0115 is for models with no health reporting capability */
+       if (handle == 0x0115)
+               return 0;
+
+       sysfs_attr_init(&bcare_ctl->attrs[1].attr);
+       bcare_ctl->attrs[1].attr.name = "battery_care_health";
+       bcare_ctl->attrs[1].attr.mode = S_IRUGO;
+       bcare_ctl->attrs[1].show = sony_nc_battery_care_health_show;
+
+       ret = device_create_file(&pd->dev, &bcare_ctl->attrs[1]);
+       if (ret)
+               goto outlimiter;
+
+       return 0;
+
+outlimiter:
+       device_remove_file(&pd->dev, &bcare_ctl->attrs[0]);
+
+outkzalloc:
+       kfree(bcare_ctl);
+       bcare_ctl = NULL;
+
+       return ret;
+}
+
+static void sony_nc_battery_care_cleanup(struct platform_device *pd)
+{
+       if (bcare_ctl) {
+               device_remove_file(&pd->dev, &bcare_ctl->attrs[0]);
+               if (bcare_ctl->handle != 0x0115)
+                       device_remove_file(&pd->dev, &bcare_ctl->attrs[1]);
+
+               kfree(bcare_ctl);
+               bcare_ctl = NULL;
+       }
+}
+
+struct snc_thermal_ctrl {
+       unsigned int mode;
+       unsigned int profiles;
+       struct device_attribute mode_attr;
+       struct device_attribute profiles_attr;
+};
+static struct snc_thermal_ctrl *th_handle;
+
+#define THM_PROFILE_MAX 3
+static const char * const snc_thermal_profiles[] = {
+       "balanced",
+       "silent",
+       "performance"
+};
+
+static int sony_nc_thermal_mode_set(unsigned short mode)
+{
+       unsigned int result;
+
+       /* the thermal profile seems to be a two bit bitmask:
+        * lsb -> silent
+        * msb -> performance
+        * no bit set is the normal operation and is always valid
+        * Some vaio models only have "balanced" and "performance"
+        */
+       if ((mode && !(th_handle->profiles & mode)) || mode >= THM_PROFILE_MAX)
+               return -EINVAL;
+
+       if (sony_call_snc_handle(0x0122, mode << 0x10 | 0x0200, &result))
+               return -EIO;
+
+       th_handle->mode = mode;
+
+       return 0;
+}
+
+static int sony_nc_thermal_mode_get(void)
+{
+       unsigned int result;
+
+       if (sony_call_snc_handle(0x0122, 0x0100, &result))
+               return -EIO;
+
+       return result & 0xff;
+}
+
+static ssize_t sony_nc_thermal_profiles_show(struct device *dev,
+               struct device_attribute *attr, char *buffer)
+{
+       short cnt;
+       size_t idx = 0;
+
+       for (cnt = 0; cnt < THM_PROFILE_MAX; cnt++) {
+               if (!cnt || (th_handle->profiles & cnt))
+                       idx += snprintf(buffer + idx, PAGE_SIZE - idx, "%s ",
+                                       snc_thermal_profiles[cnt]);
+       }
+       idx += snprintf(buffer + idx, PAGE_SIZE - idx, "\n");
+
+       return idx;
+}
+
+static ssize_t sony_nc_thermal_mode_store(struct device *dev,
+               struct device_attribute *attr,
+               const char *buffer, size_t count)
+{
+       unsigned short cmd;
+       size_t len = count;
+
+       if (count == 0)
+               return -EINVAL;
+
+       /* skip the newline if present */
+       if (buffer[len - 1] == '\n')
+               len--;
+
+       for (cmd = 0; cmd < THM_PROFILE_MAX; cmd++)
+               if (strncmp(buffer, snc_thermal_profiles[cmd], len) == 0)
+                       break;
+
+       if (sony_nc_thermal_mode_set(cmd))
+               return -EIO;
+
+       return count;
+}
+
+static ssize_t sony_nc_thermal_mode_show(struct device *dev,
+               struct device_attribute *attr, char *buffer)
+{
+       ssize_t count = 0;
+       unsigned int mode = sony_nc_thermal_mode_get();
+
+       if (mode < 0)
+               return mode;
+
+       count = snprintf(buffer, PAGE_SIZE, "%s\n", snc_thermal_profiles[mode]);
+
+       return count;
+}
+
+static int sony_nc_thermal_setup(struct platform_device *pd)
+{
+       int ret = 0;
+       th_handle = kzalloc(sizeof(struct snc_thermal_ctrl), GFP_KERNEL);
+       if (!th_handle)
+               return -ENOMEM;
+
+       ret = sony_call_snc_handle(0x0122, 0x0000, &th_handle->profiles);
+       if (ret) {
+               pr_warn("couldn't to read the thermal profiles\n");
+               goto outkzalloc;
+       }
+
+       ret = sony_nc_thermal_mode_get();
+       if (ret < 0) {
+               pr_warn("couldn't to read the current thermal profile");
+               goto outkzalloc;
+       }
+       th_handle->mode = ret;
+
+       sysfs_attr_init(&th_handle->profiles_attr.attr);
+       th_handle->profiles_attr.attr.name = "thermal_profiles";
+       th_handle->profiles_attr.attr.mode = S_IRUGO;
+       th_handle->profiles_attr.show = sony_nc_thermal_profiles_show;
+
+       sysfs_attr_init(&th_handle->mode_attr.attr);
+       th_handle->mode_attr.attr.name = "thermal_control";
+       th_handle->mode_attr.attr.mode = S_IRUGO | S_IWUSR;
+       th_handle->mode_attr.show = sony_nc_thermal_mode_show;
+       th_handle->mode_attr.store = sony_nc_thermal_mode_store;
+
+       ret = device_create_file(&pd->dev, &th_handle->profiles_attr);
+       if (ret)
+               goto outkzalloc;
+
+       ret = device_create_file(&pd->dev, &th_handle->mode_attr);
+       if (ret)
+               goto outprofiles;
+
+       return 0;
+
+outprofiles:
+       device_remove_file(&pd->dev, &th_handle->profiles_attr);
+outkzalloc:
+       kfree(th_handle);
+       th_handle = NULL;
+       return ret;
+}
+
+static void sony_nc_thermal_cleanup(struct platform_device *pd)
+{
+       if (th_handle) {
+               device_remove_file(&pd->dev, &th_handle->profiles_attr);
+               device_remove_file(&pd->dev, &th_handle->mode_attr);
+               kfree(th_handle);
+               th_handle = NULL;
+       }
+}
+
+static void sony_nc_thermal_resume(void)
+{
+       unsigned int status = sony_nc_thermal_mode_get();
+
+       if (status != th_handle->mode)
+               sony_nc_thermal_mode_set(th_handle->mode);
+}
+
+/* resume on LID open */
+struct snc_lid_resume_control {
+       struct device_attribute attrs[3];
+       unsigned int status;
+};
+static struct snc_lid_resume_control *lid_ctl;
+
+static ssize_t sony_nc_lid_resume_store(struct device *dev,
+                                       struct device_attribute *attr,
+                                       const char *buffer, size_t count)
+{
+       unsigned int result, pos;
+       unsigned long value;
+       if (count > 31)
+               return -EINVAL;
+
+       if (kstrtoul(buffer, 10, &value) || value > 1)
+               return -EINVAL;
+
+       /* the value we have to write to SNC is a bitmask:
+        * +--------------+
+        * | S3 | S4 | S5 |
+        * +--------------+
+        *   2    1    0
+        */
+       if (strcmp(attr->attr.name, "lid_resume_S3") == 0)
+               pos = 2;
+       else if (strcmp(attr->attr.name, "lid_resume_S4") == 0)
+               pos = 1;
+       else if (strcmp(attr->attr.name, "lid_resume_S5") == 0)
+               pos = 0;
+       else
+               return -EINVAL;
+
+       if (value)
+               value = lid_ctl->status | (1 << pos);
+       else
+               value = lid_ctl->status & ~(1 << pos);
+
+       if (sony_call_snc_handle(0x0119, value << 0x10 | 0x0100, &result))
+               return -EIO;
+
+       lid_ctl->status = value;
+
+       return count;
+}
+
+static ssize_t sony_nc_lid_resume_show(struct device *dev,
+                                      struct device_attribute *attr, char *buffer)
+{
+       unsigned int pos;
+
+       if (strcmp(attr->attr.name, "lid_resume_S3") == 0)
+               pos = 2;
+       else if (strcmp(attr->attr.name, "lid_resume_S4") == 0)
+               pos = 1;
+       else if (strcmp(attr->attr.name, "lid_resume_S5") == 0)
+               pos = 0;
+       else
+               return -EINVAL;
+              
+       return snprintf(buffer, PAGE_SIZE, "%d\n",
+                       (lid_ctl->status >> pos) & 0x01);
+}
+
+static int sony_nc_lid_resume_setup(struct platform_device *pd)
+{
+       unsigned int result;
+       int i;
+
+       if (sony_call_snc_handle(0x0119, 0x0000, &result))
+               return -EIO;
+
+       lid_ctl = kzalloc(sizeof(struct snc_lid_resume_control), GFP_KERNEL);
+       if (!lid_ctl)
+               return -ENOMEM;
+
+       lid_ctl->status = result & 0x7;
+
+       sysfs_attr_init(&lid_ctl->attrs[0].attr);
+       lid_ctl->attrs[0].attr.name = "lid_resume_S3";
+       lid_ctl->attrs[0].attr.mode = S_IRUGO | S_IWUSR;
+       lid_ctl->attrs[0].show = sony_nc_lid_resume_show;
+       lid_ctl->attrs[0].store = sony_nc_lid_resume_store;
+
+       sysfs_attr_init(&lid_ctl->attrs[1].attr);
+       lid_ctl->attrs[1].attr.name = "lid_resume_S4";
+       lid_ctl->attrs[1].attr.mode = S_IRUGO | S_IWUSR;
+       lid_ctl->attrs[1].show = sony_nc_lid_resume_show;
+       lid_ctl->attrs[1].store = sony_nc_lid_resume_store;
+
+       sysfs_attr_init(&lid_ctl->attrs[2].attr);
+       lid_ctl->attrs[2].attr.name = "lid_resume_S5";
+       lid_ctl->attrs[2].attr.mode = S_IRUGO | S_IWUSR;
+       lid_ctl->attrs[2].show = sony_nc_lid_resume_show;
+       lid_ctl->attrs[2].store = sony_nc_lid_resume_store;
+
+       for (i = 0; i < 3; i++) {
+               result = device_create_file(&pd->dev, &lid_ctl->attrs[i]);
+               if (result)
+                       goto liderror;
+       }
+
+       return 0;
+
+liderror:
+       for (; i > 0; i--)
+               device_remove_file(&pd->dev, &lid_ctl->attrs[i]);
+
+       kfree(lid_ctl);
+       lid_ctl = NULL;
+
+       return result;
+}
+
+static void sony_nc_lid_resume_cleanup(struct platform_device *pd)
+{
+       int i;
+
+       if (lid_ctl) {
+               for (i = 0; i < 3; i++)
+                       device_remove_file(&pd->dev, &lid_ctl->attrs[i]);
+
+               kfree(lid_ctl);
+               lid_ctl = NULL;
+       }
+}
+
+/* High speed charging function */
+static struct device_attribute *hsc_handle;
+
+static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
+               struct device_attribute *attr,
+               const char *buffer, size_t count)
+{
+       unsigned int result;
+       unsigned long value;
+
+       if (count > 31)
+               return -EINVAL;
+
+       if (kstrtoul(buffer, 10, &value) || value > 1)
+               return -EINVAL;
+
+       if (sony_call_snc_handle(0x0131, value << 0x10 | 0x0200, &result))
+               return -EIO;
+
+       return count;
+}
+
+static ssize_t sony_nc_highspeed_charging_show(struct device *dev,
+               struct device_attribute *attr, char *buffer)
+{
+       unsigned int result;
+
+       if (sony_call_snc_handle(0x0131, 0x0100, &result))
+               return -EIO;
+
+       return snprintf(buffer, PAGE_SIZE, "%d\n", result & 0x01);
+}
+
+static int sony_nc_highspeed_charging_setup(struct platform_device *pd)
+{
+       unsigned int result;
+
+       if (sony_call_snc_handle(0x0131, 0x0000, &result) || !(result & 0x01)) {
+               /* some models advertise the handle but have no implementation
+                * for it
+                */
+               pr_info("No High Speed Charging capability found\n");
+               return 0;
+       }
+
+       hsc_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+       if (!hsc_handle)
+               return -ENOMEM;
+
+       sysfs_attr_init(&hsc_handle->attr);
+       hsc_handle->attr.name = "battery_highspeed_charging";
+       hsc_handle->attr.mode = S_IRUGO | S_IWUSR;
+       hsc_handle->show = sony_nc_highspeed_charging_show;
+       hsc_handle->store = sony_nc_highspeed_charging_store;
+
+       result = device_create_file(&pd->dev, hsc_handle);
+       if (result) {
+               kfree(hsc_handle);
+               hsc_handle = NULL;
+               return result;
+       }
+
+       return 0;
+}
+
+static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
+{
+       if (hsc_handle) {
+               device_remove_file(&pd->dev, hsc_handle);
+               kfree(hsc_handle);
+               hsc_handle = NULL;
+       }
+}
+
+/* Touchpad enable/disable */
+struct touchpad_control {
+       struct device_attribute attr;
+       int handle;
+};
+static struct touchpad_control *tp_ctl;
+
+static ssize_t sony_nc_touchpad_store(struct device *dev,
+               struct device_attribute *attr, const char *buffer, size_t count)
+{
+       unsigned int result;
+       unsigned long value;
+
+       if (count > 31)
+               return -EINVAL;
+
+       if (kstrtoul(buffer, 10, &value) || value > 1)
+               return -EINVAL;
+
+       /* sysfs: 0 disabled, 1 enabled
+        * EC: 0 enabled, 1 disabled
+        */
+       if (sony_call_snc_handle(tp_ctl->handle,
+                               (!value << 0x10) | 0x100, &result))
+               return -EIO;
+
+       return count;
+}
+
+static ssize_t sony_nc_touchpad_show(struct device *dev,
+               struct device_attribute *attr, char *buffer)
+{
+       unsigned int result;
+
+       if (sony_call_snc_handle(tp_ctl->handle, 0x000, &result))
+               return -EINVAL;
+
+       return snprintf(buffer, PAGE_SIZE, "%d\n", !(result & 0x01));
+}
+
+static int sony_nc_touchpad_setup(struct platform_device *pd,
+               unsigned int handle)
+{
+       int ret = 0;
+
+       tp_ctl = kzalloc(sizeof(struct touchpad_control), GFP_KERNEL);
+       if (!tp_ctl)
+               return -ENOMEM;
+
+       tp_ctl->handle = handle;
+
+       sysfs_attr_init(&tp_ctl->attr.attr);
+       tp_ctl->attr.attr.name = "touchpad";
+       tp_ctl->attr.attr.mode = S_IRUGO | S_IWUSR;
+       tp_ctl->attr.show = sony_nc_touchpad_show;
+       tp_ctl->attr.store = sony_nc_touchpad_store;
+
+       ret = device_create_file(&pd->dev, &tp_ctl->attr);
+       if (ret) {
+               kfree(tp_ctl);
+               tp_ctl = NULL;
+       }
+
+       return ret;
+}
+
+static void sony_nc_touchpad_cleanup(struct platform_device *pd)
+{
+       if (tp_ctl) {
+               device_remove_file(&pd->dev, &tp_ctl->attr);
+               kfree(tp_ctl);
+               tp_ctl = NULL;
+       }
+}
+
+static void sony_nc_backlight_ng_read_limits(int handle,
+               struct sony_backlight_props *props)
+{
+       u64 offset;
+       int i;
+       u8 min = 0xff, max = 0x00;
+       unsigned char buffer[32] = { 0 };
+
+       props->handle = handle;
+       props->offset = 0;
+       props->maxlvl = 0xff;
+
+       offset = sony_find_snc_handle(handle);
+       if (offset < 0)
+               return;
+
+       /* try to read the boundaries from ACPI tables, if we fail the above
+        * defaults should be reasonable
+        */
+       i = sony_nc_buffer_call(sony_nc_acpi_handle, "SN06", &offset, buffer,
+                       32);
+       if (i < 0)
+               return;
+
+       /* the buffer lists brightness levels available, brightness levels are
+        * from position 0 to 8 in the array, other values are used by ALS
+        * control.
+        */
+       for (i = 0; i < 9 && i < ARRAY_SIZE(buffer); i++) {
+
+               dprintk("Brightness level: %d\n", buffer[i]);
+
+               if (!buffer[i])
+                       break;
+
+               if (buffer[i] > max)
+                       max = buffer[i];
+               if (buffer[i] < min)
+                       min = buffer[i];
+       }
+       props->offset = min;
+       props->maxlvl = max;
+       dprintk("Brightness levels: min=%d max=%d\n", props->offset,
+                       props->maxlvl);
+}
+
+static void sony_nc_backlight_setup(void)
+{
+       acpi_handle unused;
+       int max_brightness = 0;
+       const struct backlight_ops *ops = NULL;
+       struct backlight_properties props;
+
+       if (sony_find_snc_handle(0x12f) != -1) {
+               ops = &sony_backlight_ng_ops;
+               sony_nc_backlight_ng_read_limits(0x12f, &sony_bl_props);
+               max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
+
+       } else if (sony_find_snc_handle(0x137) != -1) {
+               ops = &sony_backlight_ng_ops;
+               sony_nc_backlight_ng_read_limits(0x137, &sony_bl_props);
+               max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
 
        } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
                                                &unused))) {
@@ -1715,28 +2599,25 @@ static int sony_nc_add(struct acpi_device *device)
 
        if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
                                         &handle))) {
-               if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL))
+               int arg = 1;
+               if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL))
                        dprintk("ECON Method failed\n");
        }
 
        if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00",
                                         &handle))) {
                dprintk("Doing SNC setup\n");
+               /* retrieve the available handles */
                result = sony_nc_handles_setup(sony_pf_device);
-               if (result)
-                       goto outpresent;
-               result = sony_nc_kbd_backlight_setup(sony_pf_device);
-               if (result)
-                       goto outsnc;
-               sony_nc_function_setup(device);
-               sony_nc_rfkill_setup(device);
+               if (!result)
+                       sony_nc_function_setup(device, sony_pf_device);
        }
 
        /* setup input devices and helper fifo */
        result = sony_laptop_setup_input(device);
        if (result) {
                pr_err("Unable to create input devices\n");
-               goto outkbdbacklight;
+               goto outsnc;
        }
 
        if (acpi_video_backlight_support()) {
@@ -1794,10 +2675,8 @@ static int sony_nc_add(struct acpi_device *device)
 
        sony_laptop_remove_input();
 
-      outkbdbacklight:
-       sony_nc_kbd_backlight_cleanup(sony_pf_device);
-
       outsnc:
+       sony_nc_function_cleanup(sony_pf_device);
        sony_nc_handles_cleanup(sony_pf_device);
 
       outpresent:
@@ -1820,11 +2699,10 @@ static int sony_nc_remove(struct acpi_device *device, int type)
                device_remove_file(&sony_pf_device->dev, &item->devattr);
        }
 
-       sony_nc_kbd_backlight_cleanup(sony_pf_device);
+       sony_nc_function_cleanup(sony_pf_device);
        sony_nc_handles_cleanup(sony_pf_device);
        sony_pf_remove();
        sony_laptop_remove_input();
-       sony_nc_rfkill_cleanup();
        dprintk(SONY_NC_DRIVER_NAME " removed.\n");
 
        return 0;
@@ -2437,7 +3315,9 @@ static ssize_t sony_pic_wwanpower_store(struct device *dev,
        if (count > 31)
                return -EINVAL;
 
-       value = simple_strtoul(buffer, NULL, 10);
+       if (kstrtoul(buffer, 10, &value))
+               return -EINVAL;
+
        mutex_lock(&spic_dev.lock);
        __sony_pic_set_wwanpower(value);
        mutex_unlock(&spic_dev.lock);
@@ -2474,7 +3354,9 @@ static ssize_t sony_pic_bluetoothpower_store(struct device *dev,
        if (count > 31)
                return -EINVAL;
 
-       value = simple_strtoul(buffer, NULL, 10);
+       if (kstrtoul(buffer, 10, &value))
+               return -EINVAL;
+
        mutex_lock(&spic_dev.lock);
        __sony_pic_set_bluetoothpower(value);
        mutex_unlock(&spic_dev.lock);
@@ -2513,7 +3395,9 @@ static ssize_t sony_pic_fanspeed_store(struct device *dev,
        if (count > 31)
                return -EINVAL;
 
-       value = simple_strtoul(buffer, NULL, 10);
+       if (kstrtoul(buffer, 10, &value))
+               return -EINVAL;
+
        if (sony_pic_set_fanspeed(value))
                return -EIO;
 
@@ -2671,7 +3555,8 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd,
                        ret = -EIO;
                        break;
                }
-               if (acpi_callgetfunc(sony_nc_acpi_handle, "GBRT", &value)) {
+               if (sony_nc_int_call(sony_nc_acpi_handle, "GBRT", NULL,
+                                       &value)) {
                        ret = -EIO;
                        break;
                }
@@ -2688,8 +3573,9 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd,
                        ret = -EFAULT;
                        break;
                }
-               if (acpi_callsetfunc(sony_nc_acpi_handle, "SBRT",
-                               (val8 >> 5) + 1, NULL)) {
+               value = (val8 >> 5) + 1;
+               if (sony_nc_int_call(sony_nc_acpi_handle, "SBRT", &value,
+                                       NULL)) {
                        ret = -EIO;
                        break;
                }
index d68c0002f4a29c451116422b4693ab2e330f7160..8b5610d884186b63b693b6b7aee2ceb705fc47ee 100644 (file)
@@ -3402,7 +3402,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
        /* Do not issue duplicate brightness change events to
         * userspace. tpacpi_detect_brightness_capabilities() must have
         * been called before this point  */
-       if (tp_features.bright_acpimode && acpi_video_backlight_support()) {
+       if (acpi_video_backlight_support()) {
                pr_info("This ThinkPad has standard ACPI backlight "
                        "brightness control, supported by the ACPI "
                        "video driver\n");
index ee79ce64d9dfbdb8a99061f59232325b8c57f6d3..dab10f6edcd43186e2af9a397d909a0ce607694f 100644 (file)
@@ -95,6 +95,7 @@ MODULE_LICENSE("GPL");
 
 /* registers */
 #define HCI_FAN                                0x0004
+#define HCI_TR_BACKLIGHT               0x0005
 #define HCI_SYSTEM_EVENT               0x0016
 #define HCI_VIDEO_OUT                  0x001c
 #define HCI_HOTKEY_EVENT               0x001e
@@ -134,6 +135,7 @@ struct toshiba_acpi_dev {
        unsigned int system_event_supported:1;
        unsigned int ntfy_supported:1;
        unsigned int info_supported:1;
+       unsigned int tr_backlight_supported:1;
 
        struct mutex mutex;
 };
@@ -478,34 +480,70 @@ static const struct rfkill_ops toshiba_rfk_ops = {
        .poll = bt_rfkill_poll,
 };
 
+static int get_tr_backlight_status(struct toshiba_acpi_dev *dev, bool *enabled)
+{
+       u32 hci_result;
+       u32 status;
+
+       hci_read1(dev, HCI_TR_BACKLIGHT, &status, &hci_result);
+       *enabled = !status;
+       return hci_result == HCI_SUCCESS ? 0 : -EIO;
+}
+
+static int set_tr_backlight_status(struct toshiba_acpi_dev *dev, bool enable)
+{
+       u32 hci_result;
+       u32 value = !enable;
+
+       hci_write1(dev, HCI_TR_BACKLIGHT, value, &hci_result);
+       return hci_result == HCI_SUCCESS ? 0 : -EIO;
+}
+
 static struct proc_dir_entry *toshiba_proc_dir /*= 0*/ ;
 
-static int get_lcd(struct backlight_device *bd)
+static int __get_lcd_brightness(struct toshiba_acpi_dev *dev)
 {
-       struct toshiba_acpi_dev *dev = bl_get_data(bd);
        u32 hci_result;
        u32 value;
+       int brightness = 0;
+
+       if (dev->tr_backlight_supported) {
+               bool enabled;
+               int ret = get_tr_backlight_status(dev, &enabled);
+               if (ret)
+                       return ret;
+               if (enabled)
+                       return 0;
+               brightness++;
+       }
 
        hci_read1(dev, HCI_LCD_BRIGHTNESS, &value, &hci_result);
        if (hci_result == HCI_SUCCESS)
-               return (value >> HCI_LCD_BRIGHTNESS_SHIFT);
+               return brightness + (value >> HCI_LCD_BRIGHTNESS_SHIFT);
 
        return -EIO;
 }
 
+static int get_lcd_brightness(struct backlight_device *bd)
+{
+       struct toshiba_acpi_dev *dev = bl_get_data(bd);
+       return __get_lcd_brightness(dev);
+}
+
 static int lcd_proc_show(struct seq_file *m, void *v)
 {
        struct toshiba_acpi_dev *dev = m->private;
        int value;
+       int levels;
 
        if (!dev->backlight_dev)
                return -ENODEV;
 
-       value = get_lcd(dev->backlight_dev);
+       levels = dev->backlight_dev->props.max_brightness + 1;
+       value = get_lcd_brightness(dev->backlight_dev);
        if (value >= 0) {
                seq_printf(m, "brightness:              %d\n", value);
-               seq_printf(m, "brightness_levels:       %d\n",
-                            HCI_LCD_BRIGHTNESS_LEVELS);
+               seq_printf(m, "brightness_levels:       %d\n", levels);
                return 0;
        }
 
@@ -518,10 +556,19 @@ static int lcd_proc_open(struct inode *inode, struct file *file)
        return single_open(file, lcd_proc_show, PDE(inode)->data);
 }
 
-static int set_lcd(struct toshiba_acpi_dev *dev, int value)
+static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
 {
        u32 hci_result;
 
+       if (dev->tr_backlight_supported) {
+               bool enable = !value;
+               int ret = set_tr_backlight_status(dev, enable);
+               if (ret)
+                       return ret;
+               if (value)
+                       value--;
+       }
+
        value = value << HCI_LCD_BRIGHTNESS_SHIFT;
        hci_write1(dev, HCI_LCD_BRIGHTNESS, value, &hci_result);
        return hci_result == HCI_SUCCESS ? 0 : -EIO;
@@ -530,7 +577,7 @@ static int set_lcd(struct toshiba_acpi_dev *dev, int value)
 static int set_lcd_status(struct backlight_device *bd)
 {
        struct toshiba_acpi_dev *dev = bl_get_data(bd);
-       return set_lcd(dev, bd->props.brightness);
+       return set_lcd_brightness(dev, bd->props.brightness);
 }
 
 static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
@@ -541,6 +588,7 @@ static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
        size_t len;
        int value;
        int ret;
+       int levels = dev->backlight_dev->props.max_brightness + 1;
 
        len = min(count, sizeof(cmd) - 1);
        if (copy_from_user(cmd, buf, len))
@@ -548,8 +596,8 @@ static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
        cmd[len] = '\0';
 
        if (sscanf(cmd, " brightness : %i", &value) == 1 &&
-           value >= 0 && value < HCI_LCD_BRIGHTNESS_LEVELS) {
-               ret = set_lcd(dev, value);
+           value >= 0 && value < levels) {
+               ret = set_lcd_brightness(dev, value);
                if (ret == 0)
                        ret = count;
        } else {
@@ -860,8 +908,9 @@ static void remove_toshiba_proc_entries(struct toshiba_acpi_dev *dev)
 }
 
 static const struct backlight_ops toshiba_backlight_data = {
-        .get_brightness = get_lcd,
-        .update_status  = set_lcd_status,
+       .options = BL_CORE_SUSPENDRESUME,
+       .get_brightness = get_lcd_brightness,
+       .update_status  = set_lcd_status,
 };
 
 static bool toshiba_acpi_i8042_filter(unsigned char data, unsigned char str,
@@ -1020,6 +1069,56 @@ static int __devinit toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
        return error;
 }
 
+static int __devinit toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
+{
+       struct backlight_properties props;
+       int brightness;
+       int ret;
+       bool enabled;
+
+       /*
+        * Some machines don't support the backlight methods at all, and
+        * others support it read-only. Either of these is pretty useless,
+        * so only register the backlight device if the backlight method
+        * supports both reads and writes.
+        */
+       brightness = __get_lcd_brightness(dev);
+       if (brightness < 0)
+               return 0;
+       ret = set_lcd_brightness(dev, brightness);
+       if (ret) {
+               pr_debug("Backlight method is read-only, disabling backlight support\n");
+               return 0;
+       }
+
+       /* Determine whether or not BIOS supports transflective backlight */
+       ret = get_tr_backlight_status(dev, &enabled);
+       dev->tr_backlight_supported = !ret;
+
+       memset(&props, 0, sizeof(props));
+       props.type = BACKLIGHT_PLATFORM;
+       props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
+
+       /* adding an extra level and having 0 change to transflective mode */
+       if (dev->tr_backlight_supported)
+               props.max_brightness++;
+
+       dev->backlight_dev = backlight_device_register("toshiba",
+                                                      &dev->acpi_dev->dev,
+                                                      dev,
+                                                      &toshiba_backlight_data,
+                                                      &props);
+       if (IS_ERR(dev->backlight_dev)) {
+               ret = PTR_ERR(dev->backlight_dev);
+               pr_err("Could not register toshiba backlight device\n");
+               dev->backlight_dev = NULL;
+               return ret;
+       }
+
+       dev->backlight_dev->props.brightness = brightness;
+       return 0;
+}
+
 static int toshiba_acpi_remove(struct acpi_device *acpi_dev, int type)
 {
        struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
@@ -1078,7 +1177,6 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
        u32 dummy;
        bool bt_present;
        int ret = 0;
-       struct backlight_properties props;
 
        if (toshiba_acpi)
                return -EBUSY;
@@ -1104,21 +1202,9 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
 
        mutex_init(&dev->mutex);
 
-       props.type = BACKLIGHT_PLATFORM;
-       props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
-       dev->backlight_dev = backlight_device_register("toshiba",
-                                                      &acpi_dev->dev,
-                                                      dev,
-                                                      &toshiba_backlight_data,
-                                                      &props);
-       if (IS_ERR(dev->backlight_dev)) {
-               ret = PTR_ERR(dev->backlight_dev);
-
-               pr_err("Could not register toshiba backlight device\n");
-               dev->backlight_dev = NULL;
+       ret = toshiba_acpi_setup_backlight(dev);
+       if (ret)
                goto error;
-       }
-       dev->backlight_dev->props.brightness = get_lcd(dev->backlight_dev);
 
        /* Register rfkill switch for Bluetooth */
        if (hci_get_bt_present(dev, &bt_present) == HCI_SUCCESS && bt_present) {
index 41781ed8301c737018494bb30a19677640cc9018..b57ad8641480424b660f4d447acebdcf9ec9a2b4 100644 (file)
 
 #include <asm/olpc.h>
 
+static bool card_blocked;
+
 static int rfkill_set_block(void *data, bool blocked)
 {
        unsigned char cmd;
+       int r;
+
+       if (blocked == card_blocked)
+               return 0;
+
        if (blocked)
                cmd = EC_WLAN_ENTER_RESET;
        else
                cmd = EC_WLAN_LEAVE_RESET;
 
-       return olpc_ec_cmd(cmd, NULL, 0, NULL, 0);
+       r = olpc_ec_cmd(cmd, NULL, 0, NULL, 0);
+       if (r == 0)
+               card_blocked = blocked;
+
+       return r;
 }
 
 static const struct rfkill_ops rfkill_ops = {
index 99dc29f2f2f2ba84b16430a51548817b094b9c3a..e3a3b4956f08408741fe5856a562b8defe7294a1 100644 (file)
@@ -1,5 +1,5 @@
 menuconfig POWER_SUPPLY
-       tristate "Power supply class support"
+       bool "Power supply class support"
        help
          Say Y here to enable power supply class support. This allows
          power supply (batteries, AC, USB) monitoring by userspace
@@ -77,7 +77,7 @@ config BATTERY_DS2780
          Say Y here to enable support for batteries with ds2780 chip.
 
 config BATTERY_DS2781
-       tristate "2781 battery driver"
+       tristate "DS2781 battery driver"
        depends on HAS_IOMEM
        select W1
        select W1_SLAVE_DS2781
@@ -181,14 +181,15 @@ config BATTERY_MAX17040
          to operate with a single lithium cell
 
 config BATTERY_MAX17042
-       tristate "Maxim MAX17042/8997/8966 Fuel Gauge"
+       tristate "Maxim MAX17042/17047/17050/8997/8966 Fuel Gauge"
        depends on I2C
        help
          MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries
          in handheld and portable equipment. The MAX17042 is configured
          to operate with a single lithium cell. MAX8997 and MAX8966 are
          multi-function devices that include fuel gauages that are compatible
-         with MAX17042.
+         with MAX17042. This driver also supports max17047/50 chips which are
+         improved version of max17042.
 
 config BATTERY_Z2
        tristate "Z2 battery driver"
@@ -291,6 +292,7 @@ config CHARGER_MAX8998
 config CHARGER_SMB347
        tristate "Summit Microelectronics SMB347 Battery Charger"
        depends on I2C
+       select REGMAP_I2C
        help
          Say Y to include support for Summit Microelectronics SMB347
          Battery Charger.
index d8bb99394ac01c1e6ebb289698dea0f8d3426cd8..bba3ccac72fe731a6807e211af9171a3204ce8c8 100644 (file)
@@ -964,10 +964,15 @@ static int __devinit ab8500_btemp_probe(struct platform_device *pdev)
 {
        int irq, i, ret = 0;
        u8 val;
-       struct abx500_bm_plat_data *plat_data;
+       struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
+       struct ab8500_btemp *di;
+
+       if (!plat_data) {
+               dev_err(&pdev->dev, "No platform data\n");
+               return -EINVAL;
+       }
 
-       struct ab8500_btemp *di =
-               kzalloc(sizeof(struct ab8500_btemp), GFP_KERNEL);
+       di = kzalloc(sizeof(*di), GFP_KERNEL);
        if (!di)
                return -ENOMEM;
 
@@ -977,7 +982,6 @@ static int __devinit ab8500_btemp_probe(struct platform_device *pdev)
        di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
 
        /* get btemp specific platform data */
-       plat_data = pdev->dev.platform_data;
        di->pdata = plat_data->btemp;
        if (!di->pdata) {
                dev_err(di->dev, "no btemp platform data supplied\n");
index e2b4accbec8815782ad1bf659b90e40196d31b63..d2303d0b7c755669f7ab48b968e062474241997b 100644 (file)
@@ -2534,10 +2534,15 @@ static int __devexit ab8500_charger_remove(struct platform_device *pdev)
 static int __devinit ab8500_charger_probe(struct platform_device *pdev)
 {
        int irq, i, charger_status, ret = 0;
-       struct abx500_bm_plat_data *plat_data;
+       struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
+       struct ab8500_charger *di;
 
-       struct ab8500_charger *di =
-               kzalloc(sizeof(struct ab8500_charger), GFP_KERNEL);
+       if (!plat_data) {
+               dev_err(&pdev->dev, "No platform data\n");
+               return -EINVAL;
+       }
+
+       di = kzalloc(sizeof(*di), GFP_KERNEL);
        if (!di)
                return -ENOMEM;
 
@@ -2550,9 +2555,7 @@ static int __devinit ab8500_charger_probe(struct platform_device *pdev)
        spin_lock_init(&di->usb_state.usb_lock);
 
        /* get charger specific platform data */
-       plat_data = pdev->dev.platform_data;
        di->pdata = plat_data->charger;
-
        if (!di->pdata) {
                dev_err(di->dev, "no charger platform data supplied\n");
                ret = -EINVAL;
index c22f2f05657e28d249d619d9a01aaab8a3095bb6..bf022255994c86b3d3486e27e1edcc905c7246f4 100644 (file)
@@ -2446,10 +2446,15 @@ static int __devinit ab8500_fg_probe(struct platform_device *pdev)
 {
        int i, irq;
        int ret = 0;
-       struct abx500_bm_plat_data *plat_data;
+       struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
+       struct ab8500_fg *di;
+
+       if (!plat_data) {
+               dev_err(&pdev->dev, "No platform data\n");
+               return -EINVAL;
+       }
 
-       struct ab8500_fg *di =
-               kzalloc(sizeof(struct ab8500_fg), GFP_KERNEL);
+       di = kzalloc(sizeof(*di), GFP_KERNEL);
        if (!di)
                return -ENOMEM;
 
@@ -2461,7 +2466,6 @@ static int __devinit ab8500_fg_probe(struct platform_device *pdev)
        di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
 
        /* get fg specific platform data */
-       plat_data = pdev->dev.platform_data;
        di->pdata = plat_data->fg;
        if (!di->pdata) {
                dev_err(di->dev, "no fg platform data supplied\n");
index 9eca9f1ff0eae2e5b381e503f035ceb446340e66..86935ec1895431aac77c47de1860711ae26e21eb 100644 (file)
 #include <linux/power/charger-manager.h>
 #include <linux/regulator/consumer.h>
 
+static const char * const default_event_names[] = {
+       [CM_EVENT_UNKNOWN] = "Unknown",
+       [CM_EVENT_BATT_FULL] = "Battery Full",
+       [CM_EVENT_BATT_IN] = "Battery Inserted",
+       [CM_EVENT_BATT_OUT] = "Battery Pulled Out",
+       [CM_EVENT_EXT_PWR_IN_OUT] = "External Power Attach/Detach",
+       [CM_EVENT_CHG_START_STOP] = "Charging Start/Stop",
+       [CM_EVENT_OTHERS] = "Other battery events"
+};
+
 /*
  * Regard CM_JIFFIES_SMALL jiffies is small enough to ignore for
  * delayed works so that we can run delayed works with CM_JIFFIES_SMALL
@@ -57,6 +67,12 @@ static bool cm_suspended;
 static bool cm_rtc_set;
 static unsigned long cm_suspend_duration_ms;
 
+/* About normal (not suspended) monitoring */
+static unsigned long polling_jiffy = ULONG_MAX; /* ULONG_MAX: no polling */
+static unsigned long next_polling; /* Next appointed polling time */
+static struct workqueue_struct *cm_wq; /* init at driver add */
+static struct delayed_work cm_monitor_work; /* init at driver add */
+
 /* Global charger-manager description */
 static struct charger_global_desc *g_desc; /* init with setup_charger_manager */
 
@@ -71,6 +87,11 @@ static bool is_batt_present(struct charger_manager *cm)
        int i, ret;
 
        switch (cm->desc->battery_present) {
+       case CM_BATTERY_PRESENT:
+               present = true;
+               break;
+       case CM_NO_BATTERY:
+               break;
        case CM_FUEL_GAUGE:
                ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
                                POWER_SUPPLY_PROP_PRESENT, &val);
@@ -278,6 +299,26 @@ static int try_charger_enable(struct charger_manager *cm, bool enable)
        return err;
 }
 
+/**
+ * try_charger_restart - Restart charging.
+ * @cm: the Charger Manager representing the battery.
+ *
+ * Restart charging by turning off and on the charger.
+ */
+static int try_charger_restart(struct charger_manager *cm)
+{
+       int err;
+
+       if (cm->emergency_stop)
+               return -EAGAIN;
+
+       err = try_charger_enable(cm, false);
+       if (err)
+               return err;
+
+       return try_charger_enable(cm, true);
+}
+
 /**
  * uevent_notify - Let users know something has changed.
  * @cm: the Charger Manager representing the battery.
@@ -333,6 +374,46 @@ static void uevent_notify(struct charger_manager *cm, const char *event)
        dev_info(cm->dev, event);
 }
 
+/**
+ * fullbatt_vchk - Check voltage drop some times after "FULL" event.
+ * @work: the work_struct appointing the function
+ *
+ * If a user has designated "fullbatt_vchkdrop_ms/uV" values with
+ * charger_desc, Charger Manager checks voltage drop after the battery
+ * "FULL" event. It checks whether the voltage has dropped more than
+ * fullbatt_vchkdrop_uV by calling this function after fullbatt_vchkrop_ms.
+ */
+static void fullbatt_vchk(struct work_struct *work)
+{
+       struct delayed_work *dwork = to_delayed_work(work);
+       struct charger_manager *cm = container_of(dwork,
+                       struct charger_manager, fullbatt_vchk_work);
+       struct charger_desc *desc = cm->desc;
+       int batt_uV, err, diff;
+
+       /* remove the appointment for fullbatt_vchk */
+       cm->fullbatt_vchk_jiffies_at = 0;
+
+       if (!desc->fullbatt_vchkdrop_uV || !desc->fullbatt_vchkdrop_ms)
+               return;
+
+       err = get_batt_uV(cm, &batt_uV);
+       if (err) {
+               dev_err(cm->dev, "%s: get_batt_uV error(%d).\n", __func__, err);
+               return;
+       }
+
+       diff = cm->fullbatt_vchk_uV;
+       diff -= batt_uV;
+
+       dev_dbg(cm->dev, "VBATT dropped %duV after full-batt.\n", diff);
+
+       if (diff > desc->fullbatt_vchkdrop_uV) {
+               try_charger_restart(cm);
+               uevent_notify(cm, "Recharge");
+       }
+}
+
 /**
  * _cm_monitor - Monitor the temperature and return true for exceptions.
  * @cm: the Charger Manager representing the battery.
@@ -392,6 +473,131 @@ static bool cm_monitor(void)
        return stop;
 }
 
+/**
+ * _setup_polling - Setup the next instance of polling.
+ * @work: work_struct of the function _setup_polling.
+ */
+static void _setup_polling(struct work_struct *work)
+{
+       unsigned long min = ULONG_MAX;
+       struct charger_manager *cm;
+       bool keep_polling = false;
+       unsigned long _next_polling;
+
+       mutex_lock(&cm_list_mtx);
+
+       list_for_each_entry(cm, &cm_list, entry) {
+               if (is_polling_required(cm) && cm->desc->polling_interval_ms) {
+                       keep_polling = true;
+
+                       if (min > cm->desc->polling_interval_ms)
+                               min = cm->desc->polling_interval_ms;
+               }
+       }
+
+       polling_jiffy = msecs_to_jiffies(min);
+       if (polling_jiffy <= CM_JIFFIES_SMALL)
+               polling_jiffy = CM_JIFFIES_SMALL + 1;
+
+       if (!keep_polling)
+               polling_jiffy = ULONG_MAX;
+       if (polling_jiffy == ULONG_MAX)
+               goto out;
+
+       WARN(cm_wq == NULL, "charger-manager: workqueue not initialized"
+                           ". try it later. %s\n", __func__);
+
+       _next_polling = jiffies + polling_jiffy;
+
+       if (!delayed_work_pending(&cm_monitor_work) ||
+           (delayed_work_pending(&cm_monitor_work) &&
+            time_after(next_polling, _next_polling))) {
+               cancel_delayed_work_sync(&cm_monitor_work);
+               next_polling = jiffies + polling_jiffy;
+               queue_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy);
+       }
+
+out:
+       mutex_unlock(&cm_list_mtx);
+}
+static DECLARE_WORK(setup_polling, _setup_polling);
+
+/**
+ * cm_monitor_poller - The Monitor / Poller.
+ * @work: work_struct of the function cm_monitor_poller
+ *
+ * During non-suspended state, cm_monitor_poller is used to poll and monitor
+ * the batteries.
+ */
+static void cm_monitor_poller(struct work_struct *work)
+{
+       cm_monitor();
+       schedule_work(&setup_polling);
+}
+
+/**
+ * fullbatt_handler - Event handler for CM_EVENT_BATT_FULL
+ * @cm: the Charger Manager representing the battery.
+ */
+static void fullbatt_handler(struct charger_manager *cm)
+{
+       struct charger_desc *desc = cm->desc;
+
+       if (!desc->fullbatt_vchkdrop_uV || !desc->fullbatt_vchkdrop_ms)
+               goto out;
+
+       if (cm_suspended)
+               device_set_wakeup_capable(cm->dev, true);
+
+       if (delayed_work_pending(&cm->fullbatt_vchk_work))
+               cancel_delayed_work(&cm->fullbatt_vchk_work);
+       queue_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
+                          msecs_to_jiffies(desc->fullbatt_vchkdrop_ms));
+       cm->fullbatt_vchk_jiffies_at = jiffies + msecs_to_jiffies(
+                                      desc->fullbatt_vchkdrop_ms);
+
+       if (cm->fullbatt_vchk_jiffies_at == 0)
+               cm->fullbatt_vchk_jiffies_at = 1;
+
+out:
+       dev_info(cm->dev, "EVENT_HANDLE: Battery Fully Charged.\n");
+       uevent_notify(cm, default_event_names[CM_EVENT_BATT_FULL]);
+}
+
+/**
+ * battout_handler - Event handler for CM_EVENT_BATT_OUT
+ * @cm: the Charger Manager representing the battery.
+ */
+static void battout_handler(struct charger_manager *cm)
+{
+       if (cm_suspended)
+               device_set_wakeup_capable(cm->dev, true);
+
+       if (!is_batt_present(cm)) {
+               dev_emerg(cm->dev, "Battery Pulled Out!\n");
+               uevent_notify(cm, default_event_names[CM_EVENT_BATT_OUT]);
+       } else {
+               uevent_notify(cm, "Battery Reinserted?");
+       }
+}
+
+/**
+ * misc_event_handler - Handler for other evnets
+ * @cm: the Charger Manager representing the battery.
+ * @type: the Charger Manager representing the battery.
+ */
+static void misc_event_handler(struct charger_manager *cm,
+                       enum cm_event_types type)
+{
+       if (cm_suspended)
+               device_set_wakeup_capable(cm->dev, true);
+
+       if (!delayed_work_pending(&cm_monitor_work) &&
+           is_polling_required(cm) && cm->desc->polling_interval_ms)
+               schedule_work(&setup_polling);
+       uevent_notify(cm, default_event_names[type]);
+}
+
 static int charger_get_property(struct power_supply *psy,
                enum power_supply_property psp,
                union power_supply_propval *val)
@@ -613,6 +819,21 @@ static bool cm_setup_timer(void)
        mutex_lock(&cm_list_mtx);
 
        list_for_each_entry(cm, &cm_list, entry) {
+               unsigned int fbchk_ms = 0;
+
+               /* fullbatt_vchk is required. setup timer for that */
+               if (cm->fullbatt_vchk_jiffies_at) {
+                       fbchk_ms = jiffies_to_msecs(cm->fullbatt_vchk_jiffies_at
+                                                   - jiffies);
+                       if (time_is_before_eq_jiffies(
+                               cm->fullbatt_vchk_jiffies_at) ||
+                               msecs_to_jiffies(fbchk_ms) < CM_JIFFIES_SMALL) {
+                               fullbatt_vchk(&cm->fullbatt_vchk_work.work);
+                               fbchk_ms = 0;
+                       }
+               }
+               CM_MIN_VALID(wakeup_ms, fbchk_ms);
+
                /* Skip if polling is not required for this CM */
                if (!is_polling_required(cm) && !cm->emergency_stop)
                        continue;
@@ -672,6 +893,23 @@ static bool cm_setup_timer(void)
        return false;
 }
 
+static void _cm_fbchk_in_suspend(struct charger_manager *cm)
+{
+       unsigned long jiffy_now = jiffies;
+
+       if (!cm->fullbatt_vchk_jiffies_at)
+               return;
+
+       if (g_desc && g_desc->assume_timer_stops_in_suspend)
+               jiffy_now += msecs_to_jiffies(cm_suspend_duration_ms);
+
+       /* Execute now if it's going to be executed not too long after */
+       jiffy_now += CM_JIFFIES_SMALL;
+
+       if (time_after_eq(jiffy_now, cm->fullbatt_vchk_jiffies_at))
+               fullbatt_vchk(&cm->fullbatt_vchk_work.work);
+}
+
 /**
  * cm_suspend_again - Determine whether suspend again or not
  *
@@ -693,6 +931,8 @@ bool cm_suspend_again(void)
        ret = true;
        mutex_lock(&cm_list_mtx);
        list_for_each_entry(cm, &cm_list, entry) {
+               _cm_fbchk_in_suspend(cm);
+
                if (cm->status_save_ext_pwr_inserted != is_ext_pwr_online(cm) ||
                    cm->status_save_batt != is_batt_present(cm)) {
                        ret = false;
@@ -796,6 +1036,21 @@ static int charger_manager_probe(struct platform_device *pdev)
        memcpy(cm->desc, desc, sizeof(struct charger_desc));
        cm->last_temp_mC = INT_MIN; /* denotes "unmeasured, yet" */
 
+       /*
+        * The following two do not need to be errors.
+        * Users may intentionally ignore those two features.
+        */
+       if (desc->fullbatt_uV == 0) {
+               dev_info(&pdev->dev, "Ignoring full-battery voltage threshold"
+                                       " as it is not supplied.");
+       }
+       if (!desc->fullbatt_vchkdrop_ms || !desc->fullbatt_vchkdrop_uV) {
+               dev_info(&pdev->dev, "Disabling full-battery voltage drop "
+                               "checking mechanism as it is not supplied.");
+               desc->fullbatt_vchkdrop_ms = 0;
+               desc->fullbatt_vchkdrop_uV = 0;
+       }
+
        if (!desc->charger_regulators || desc->num_charger_regulators < 1) {
                ret = -EINVAL;
                dev_err(&pdev->dev, "charger_regulators undefined.\n");
@@ -903,6 +1158,8 @@ static int charger_manager_probe(struct platform_device *pdev)
                cm->charger_psy.num_properties++;
        }
 
+       INIT_DELAYED_WORK(&cm->fullbatt_vchk_work, fullbatt_vchk);
+
        ret = power_supply_register(NULL, &cm->charger_psy);
        if (ret) {
                dev_err(&pdev->dev, "Cannot register charger-manager with"
@@ -928,6 +1185,15 @@ static int charger_manager_probe(struct platform_device *pdev)
        list_add(&cm->entry, &cm_list);
        mutex_unlock(&cm_list_mtx);
 
+       /*
+        * Charger-manager is capable of waking up the systme from sleep
+        * when event is happend through cm_notify_event()
+        */
+       device_init_wakeup(&pdev->dev, true);
+       device_set_wakeup_capable(&pdev->dev, false);
+
+       schedule_work(&setup_polling);
+
        return 0;
 
 err_chg_enable:
@@ -958,9 +1224,17 @@ static int __devexit charger_manager_remove(struct platform_device *pdev)
        list_del(&cm->entry);
        mutex_unlock(&cm_list_mtx);
 
+       if (work_pending(&setup_polling))
+               cancel_work_sync(&setup_polling);
+       if (delayed_work_pending(&cm_monitor_work))
+               cancel_delayed_work_sync(&cm_monitor_work);
+
        regulator_bulk_free(desc->num_charger_regulators,
                            desc->charger_regulators);
        power_supply_unregister(&cm->charger_psy);
+
+       try_charger_enable(cm, false);
+
        kfree(cm->charger_psy.properties);
        kfree(cm->charger_stat);
        kfree(cm->desc);
@@ -975,6 +1249,18 @@ static const struct platform_device_id charger_manager_id[] = {
 };
 MODULE_DEVICE_TABLE(platform, charger_manager_id);
 
+static int cm_suspend_noirq(struct device *dev)
+{
+       int ret = 0;
+
+       if (device_may_wakeup(dev)) {
+               device_set_wakeup_capable(dev, false);
+               ret = -EAGAIN;
+       }
+
+       return ret;
+}
+
 static int cm_suspend_prepare(struct device *dev)
 {
        struct charger_manager *cm = dev_get_drvdata(dev);
@@ -1000,6 +1286,8 @@ static int cm_suspend_prepare(struct device *dev)
                cm_suspended = true;
        }
 
+       if (delayed_work_pending(&cm->fullbatt_vchk_work))
+               cancel_delayed_work(&cm->fullbatt_vchk_work);
        cm->status_save_ext_pwr_inserted = is_ext_pwr_online(cm);
        cm->status_save_batt = is_batt_present(cm);
 
@@ -1027,11 +1315,40 @@ static void cm_suspend_complete(struct device *dev)
                cm_rtc_set = false;
        }
 
+       /* Re-enqueue delayed work (fullbatt_vchk_work) */
+       if (cm->fullbatt_vchk_jiffies_at) {
+               unsigned long delay = 0;
+               unsigned long now = jiffies + CM_JIFFIES_SMALL;
+
+               if (time_after_eq(now, cm->fullbatt_vchk_jiffies_at)) {
+                       delay = (unsigned long)((long)now
+                               - (long)(cm->fullbatt_vchk_jiffies_at));
+                       delay = jiffies_to_msecs(delay);
+               } else {
+                       delay = 0;
+               }
+
+               /*
+                * Account for cm_suspend_duration_ms if
+                * assume_timer_stops_in_suspend is active
+                */
+               if (g_desc && g_desc->assume_timer_stops_in_suspend) {
+                       if (delay > cm_suspend_duration_ms)
+                               delay -= cm_suspend_duration_ms;
+                       else
+                               delay = 0;
+               }
+
+               queue_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
+                                  msecs_to_jiffies(delay));
+       }
+       device_set_wakeup_capable(cm->dev, false);
        uevent_notify(cm, NULL);
 }
 
 static const struct dev_pm_ops charger_manager_pm = {
        .prepare        = cm_suspend_prepare,
+       .suspend_noirq  = cm_suspend_noirq,
        .complete       = cm_suspend_complete,
 };
 
@@ -1048,16 +1365,91 @@ static struct platform_driver charger_manager_driver = {
 
 static int __init charger_manager_init(void)
 {
+       cm_wq = create_freezable_workqueue("charger_manager");
+       INIT_DELAYED_WORK(&cm_monitor_work, cm_monitor_poller);
+
        return platform_driver_register(&charger_manager_driver);
 }
 late_initcall(charger_manager_init);
 
 static void __exit charger_manager_cleanup(void)
 {
+       destroy_workqueue(cm_wq);
+       cm_wq = NULL;
+
        platform_driver_unregister(&charger_manager_driver);
 }
 module_exit(charger_manager_cleanup);
 
+/**
+ * find_power_supply - find the associated power_supply of charger
+ * @cm: the Charger Manager representing the battery
+ * @psy: pointer to instance of charger's power_supply
+ */
+static bool find_power_supply(struct charger_manager *cm,
+                       struct power_supply *psy)
+{
+       int i;
+       bool found = false;
+
+       for (i = 0; cm->charger_stat[i]; i++) {
+               if (psy == cm->charger_stat[i]) {
+                       found = true;
+                       break;
+               }
+       }
+
+       return found;
+}
+
+/**
+ * cm_notify_event - charger driver notify Charger Manager of charger event
+ * @psy: pointer to instance of charger's power_supply
+ * @type: type of charger event
+ * @msg: optional message passed to uevent_notify fuction
+ */
+void cm_notify_event(struct power_supply *psy, enum cm_event_types type,
+                    char *msg)
+{
+       struct charger_manager *cm;
+       bool found_power_supply = false;
+
+       if (psy == NULL)
+               return;
+
+       mutex_lock(&cm_list_mtx);
+       list_for_each_entry(cm, &cm_list, entry) {
+               found_power_supply = find_power_supply(cm, psy);
+               if (found_power_supply)
+                       break;
+       }
+       mutex_unlock(&cm_list_mtx);
+
+       if (!found_power_supply)
+               return;
+
+       switch (type) {
+       case CM_EVENT_BATT_FULL:
+               fullbatt_handler(cm);
+               break;
+       case CM_EVENT_BATT_OUT:
+               battout_handler(cm);
+               break;
+       case CM_EVENT_BATT_IN:
+       case CM_EVENT_EXT_PWR_IN_OUT ... CM_EVENT_CHG_START_STOP:
+               misc_event_handler(cm, type);
+               break;
+       case CM_EVENT_UNKNOWN:
+       case CM_EVENT_OTHERS:
+               uevent_notify(cm, msg ? msg : default_event_names[type]);
+               break;
+       default:
+               dev_err(cm->dev, "%s type not specified.\n", __func__);
+               break;
+       }
+}
+EXPORT_SYMBOL_GPL(cm_notify_event);
+
 MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
 MODULE_DESCRIPTION("Charger Manager");
 MODULE_LICENSE("GPL");
index ca0d653d0a7a2c3ac0d4c422f66317c3993dc49b..975684a40f1519ad33e5f630ecfa82020d71d960 100644 (file)
@@ -643,9 +643,7 @@ static ssize_t ds2781_read_param_eeprom_bin(struct file *filp,
        struct power_supply *psy = to_power_supply(dev);
        struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
 
-       count = min_t(loff_t, count,
-               DS2781_EEPROM_BLOCK1_END -
-               DS2781_EEPROM_BLOCK1_START + 1 - off);
+       count = min_t(loff_t, count, DS2781_PARAM_EEPROM_SIZE - off);
 
        return ds2781_read_block(dev_info, buf,
                                DS2781_EEPROM_BLOCK1_START + off, count);
@@ -661,9 +659,7 @@ static ssize_t ds2781_write_param_eeprom_bin(struct file *filp,
        struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
        int ret;
 
-       count = min_t(loff_t, count,
-               DS2781_EEPROM_BLOCK1_END -
-               DS2781_EEPROM_BLOCK1_START + 1 - off);
+       count = min_t(loff_t, count, DS2781_PARAM_EEPROM_SIZE - off);
 
        ret = ds2781_write(dev_info, buf,
                                DS2781_EEPROM_BLOCK1_START + off, count);
@@ -682,7 +678,7 @@ static struct bin_attribute ds2781_param_eeprom_bin_attr = {
                .name = "param_eeprom",
                .mode = S_IRUGO | S_IWUSR,
        },
-       .size = DS2781_EEPROM_BLOCK1_END - DS2781_EEPROM_BLOCK1_START + 1,
+       .size = DS2781_PARAM_EEPROM_SIZE,
        .read = ds2781_read_param_eeprom_bin,
        .write = ds2781_write_param_eeprom_bin,
 };
@@ -696,9 +692,7 @@ static ssize_t ds2781_read_user_eeprom_bin(struct file *filp,
        struct power_supply *psy = to_power_supply(dev);
        struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
 
-       count = min_t(loff_t, count,
-               DS2781_EEPROM_BLOCK0_END -
-               DS2781_EEPROM_BLOCK0_START + 1 - off);
+       count = min_t(loff_t, count, DS2781_USER_EEPROM_SIZE - off);
 
        return ds2781_read_block(dev_info, buf,
                                DS2781_EEPROM_BLOCK0_START + off, count);
@@ -715,9 +709,7 @@ static ssize_t ds2781_write_user_eeprom_bin(struct file *filp,
        struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
        int ret;
 
-       count = min_t(loff_t, count,
-               DS2781_EEPROM_BLOCK0_END -
-               DS2781_EEPROM_BLOCK0_START + 1 - off);
+       count = min_t(loff_t, count, DS2781_USER_EEPROM_SIZE - off);
 
        ret = ds2781_write(dev_info, buf,
                                DS2781_EEPROM_BLOCK0_START + off, count);
@@ -736,7 +728,7 @@ static struct bin_attribute ds2781_user_eeprom_bin_attr = {
                .name = "user_eeprom",
                .mode = S_IRUGO | S_IWUSR,
        },
-       .size = DS2781_EEPROM_BLOCK0_END - DS2781_EEPROM_BLOCK0_START + 1,
+       .size = DS2781_USER_EEPROM_SIZE,
        .read = ds2781_read_user_eeprom_bin,
        .write = ds2781_write_user_eeprom_bin,
 };
index 39eb50f35f09fd777445a53202fb30823d9f9438..e5ccd29797732d8dae6992a558eabd896fe6a9f0 100644 (file)
@@ -474,13 +474,13 @@ static int __devinit isp1704_charger_probe(struct platform_device *pdev)
 fail2:
        power_supply_unregister(&isp->psy);
 fail1:
+       isp1704_charger_set_power(isp, 0);
        usb_put_transceiver(isp->phy);
 fail0:
        kfree(isp);
 
        dev_err(&pdev->dev, "failed to register isp1704 with error %d\n", ret);
 
-       isp1704_charger_set_power(isp, 0);
        return ret;
 }
 
index 04620c2cb388f3f5f2411f6a2b8191c5b36b459f..140788b309f84fe26056e77636eff26b954d8cb8 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/i2c.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
+#include <linux/pm.h>
 #include <linux/mod_devicetable.h>
 #include <linux/power_supply.h>
 #include <linux/power/max17042_battery.h>
 #define dP_ACC_100     0x1900
 #define dP_ACC_200     0x3200
 
+#define MAX17042_IC_VERSION    0x0092
+#define MAX17047_IC_VERSION    0x00AC  /* same for max17050 */
+
 struct max17042_chip {
        struct i2c_client *client;
        struct power_supply battery;
+       enum max170xx_chip_type chip_type;
        struct max17042_platform_data *pdata;
        struct work_struct work;
        int    init_complete;
@@ -105,6 +110,7 @@ static enum power_supply_property max17042_battery_props[] = {
        POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
        POWER_SUPPLY_PROP_VOLTAGE_NOW,
        POWER_SUPPLY_PROP_VOLTAGE_AVG,
+       POWER_SUPPLY_PROP_VOLTAGE_OCV,
        POWER_SUPPLY_PROP_CAPACITY,
        POWER_SUPPLY_PROP_CHARGE_FULL,
        POWER_SUPPLY_PROP_TEMP,
@@ -150,7 +156,10 @@ static int max17042_get_property(struct power_supply *psy,
                val->intval *= 20000; /* Units of LSB = 20mV */
                break;
        case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
-               ret = max17042_read_reg(chip->client, MAX17042_V_empty);
+               if (chip->chip_type == MAX17042)
+                       ret = max17042_read_reg(chip->client, MAX17042_V_empty);
+               else
+                       ret = max17042_read_reg(chip->client, MAX17047_V_empty);
                if (ret < 0)
                        return ret;
 
@@ -169,6 +178,13 @@ static int max17042_get_property(struct power_supply *psy,
                if (ret < 0)
                        return ret;
 
+               val->intval = ret * 625 / 8;
+               break;
+       case POWER_SUPPLY_PROP_VOLTAGE_OCV:
+               ret = max17042_read_reg(chip->client, MAX17042_OCVInternal);
+               if (ret < 0)
+                       return ret;
+
                val->intval = ret * 625 / 8;
                break;
        case POWER_SUPPLY_PROP_CAPACITY:
@@ -325,11 +341,10 @@ static inline int max17042_model_data_compare(struct max17042_chip *chip,
 static int max17042_init_model(struct max17042_chip *chip)
 {
        int ret;
-       int table_size =
-               sizeof(chip->pdata->config_data->cell_char_tbl)/sizeof(u16);
+       int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
        u16 *temp_data;
 
-       temp_data = kzalloc(table_size, GFP_KERNEL);
+       temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
        if (!temp_data)
                return -ENOMEM;
 
@@ -354,12 +369,11 @@ static int max17042_init_model(struct max17042_chip *chip)
 static int max17042_verify_model_lock(struct max17042_chip *chip)
 {
        int i;
-       int table_size =
-               sizeof(chip->pdata->config_data->cell_char_tbl);
+       int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
        u16 *temp_data;
        int ret = 0;
 
-       temp_data = kzalloc(table_size, GFP_KERNEL);
+       temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
        if (!temp_data)
                return -ENOMEM;
 
@@ -382,6 +396,9 @@ static void max17042_write_config_regs(struct max17042_chip *chip)
        max17042_write_reg(chip->client, MAX17042_FilterCFG,
                        config->filter_cfg);
        max17042_write_reg(chip->client, MAX17042_RelaxCFG, config->relax_cfg);
+       if (chip->chip_type == MAX17047)
+               max17042_write_reg(chip->client, MAX17047_FullSOCThr,
+                                               config->full_soc_thresh);
 }
 
 static void  max17042_write_custom_regs(struct max17042_chip *chip)
@@ -392,12 +409,23 @@ static void  max17042_write_custom_regs(struct max17042_chip *chip)
                                config->rcomp0);
        max17042_write_verify_reg(chip->client, MAX17042_TempCo,
                                config->tcompc0);
-       max17042_write_reg(chip->client, MAX17042_EmptyTempCo,
-                       config->empty_tempco);
-       max17042_write_verify_reg(chip->client, MAX17042_K_empty0,
-                               config->kempty0);
        max17042_write_verify_reg(chip->client, MAX17042_ICHGTerm,
                                config->ichgt_term);
+       if (chip->chip_type == MAX17042) {
+               max17042_write_reg(chip->client, MAX17042_EmptyTempCo,
+                                       config->empty_tempco);
+               max17042_write_verify_reg(chip->client, MAX17042_K_empty0,
+                                       config->kempty0);
+       } else {
+               max17042_write_verify_reg(chip->client, MAX17047_QRTbl00,
+                                               config->qrtbl00);
+               max17042_write_verify_reg(chip->client, MAX17047_QRTbl10,
+                                               config->qrtbl10);
+               max17042_write_verify_reg(chip->client, MAX17047_QRTbl20,
+                                               config->qrtbl20);
+               max17042_write_verify_reg(chip->client, MAX17047_QRTbl30,
+                                               config->qrtbl30);
+       }
 }
 
 static void max17042_update_capacity_regs(struct max17042_chip *chip)
@@ -453,6 +481,8 @@ static void max17042_load_new_capacity_params(struct max17042_chip *chip)
                        config->design_cap);
        max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom,
                        config->fullcapnom);
+       /* Update SOC register with new SOC */
+       max17042_write_reg(chip->client, MAX17042_RepSOC, vfSoc);
 }
 
 /*
@@ -489,20 +519,28 @@ static inline void max17042_override_por_values(struct max17042_chip *chip)
 
        max17042_override_por(client, MAX17042_FullCAP, config->fullcap);
        max17042_override_por(client, MAX17042_FullCAPNom, config->fullcapnom);
-       max17042_override_por(client, MAX17042_SOC_empty, config->socempty);
+       if (chip->chip_type == MAX17042)
+               max17042_override_por(client, MAX17042_SOC_empty,
+                                               config->socempty);
        max17042_override_por(client, MAX17042_LAvg_empty, config->lavg_empty);
        max17042_override_por(client, MAX17042_dQacc, config->dqacc);
        max17042_override_por(client, MAX17042_dPacc, config->dpacc);
 
-       max17042_override_por(client, MAX17042_V_empty, config->vempty);
+       if (chip->chip_type == MAX17042)
+               max17042_override_por(client, MAX17042_V_empty, config->vempty);
+       else
+               max17042_override_por(client, MAX17047_V_empty, config->vempty);
        max17042_override_por(client, MAX17042_TempNom, config->temp_nom);
        max17042_override_por(client, MAX17042_TempLim, config->temp_lim);
        max17042_override_por(client, MAX17042_FCTC, config->fctc);
        max17042_override_por(client, MAX17042_RCOMP0, config->rcomp0);
        max17042_override_por(client, MAX17042_TempCo, config->tcompc0);
-       max17042_override_por(client, MAX17042_EmptyTempCo,
-                       config->empty_tempco);
-       max17042_override_por(client, MAX17042_K_empty0, config->kempty0);
+       if (chip->chip_type) {
+               max17042_override_por(client, MAX17042_EmptyTempCo,
+                                       config->empty_tempco);
+               max17042_override_por(client, MAX17042_K_empty0,
+                                       config->kempty0);
+       }
 }
 
 static int max17042_init_chip(struct max17042_chip *chip)
@@ -659,7 +697,19 @@ static int __devinit max17042_probe(struct i2c_client *client,
 
        i2c_set_clientdata(client, chip);
 
-       chip->battery.name              = "max17042_battery";
+       ret = max17042_read_reg(chip->client, MAX17042_DevName);
+       if (ret == MAX17042_IC_VERSION) {
+               dev_dbg(&client->dev, "chip type max17042 detected\n");
+               chip->chip_type = MAX17042;
+       } else if (ret == MAX17047_IC_VERSION) {
+               dev_dbg(&client->dev, "chip type max17047/50 detected\n");
+               chip->chip_type = MAX17047;
+       } else {
+               dev_err(&client->dev, "device version mismatch: %x\n", ret);
+               return -EIO;
+       }
+
+       chip->battery.name              = "max170xx_battery";
        chip->battery.type              = POWER_SUPPLY_TYPE_BATTERY;
        chip->battery.get_property      = max17042_get_property;
        chip->battery.properties        = max17042_battery_props;
@@ -683,6 +733,12 @@ static int __devinit max17042_probe(struct i2c_client *client,
                max17042_write_reg(client, MAX17042_LearnCFG, 0x0007);
        }
 
+       ret = power_supply_register(&client->dev, &chip->battery);
+       if (ret) {
+               dev_err(&client->dev, "failed: power supply register\n");
+               return ret;
+       }
+
        if (client->irq) {
                ret = request_threaded_irq(client->irq, NULL,
                                                max17042_thread_handler,
@@ -693,13 +749,14 @@ static int __devinit max17042_probe(struct i2c_client *client,
                        reg |= CONFIG_ALRT_BIT_ENBL;
                        max17042_write_reg(client, MAX17042_CONFIG, reg);
                        max17042_set_soc_threshold(chip, 1);
-               } else
+               } else {
+                       client->irq = 0;
                        dev_err(&client->dev, "%s(): cannot get IRQ\n",
                                __func__);
+               }
        }
 
        reg = max17042_read_reg(chip->client, MAX17042_STATUS);
-
        if (reg & STATUS_POR_BIT) {
                INIT_WORK(&chip->work, max17042_init_worker);
                schedule_work(&chip->work);
@@ -707,23 +764,65 @@ static int __devinit max17042_probe(struct i2c_client *client,
                chip->init_complete = 1;
        }
 
-       ret = power_supply_register(&client->dev, &chip->battery);
-       if (ret)
-               dev_err(&client->dev, "failed: power supply register\n");
-       return ret;
+       return 0;
 }
 
 static int __devexit max17042_remove(struct i2c_client *client)
 {
        struct max17042_chip *chip = i2c_get_clientdata(client);
 
+       if (client->irq)
+               free_irq(client->irq, chip);
        power_supply_unregister(&chip->battery);
        return 0;
 }
 
+#ifdef CONFIG_PM
+static int max17042_suspend(struct device *dev)
+{
+       struct max17042_chip *chip = dev_get_drvdata(dev);
+
+       /*
+        * disable the irq and enable irq_wake
+        * capability to the interrupt line.
+        */
+       if (chip->client->irq) {
+               disable_irq(chip->client->irq);
+               enable_irq_wake(chip->client->irq);
+       }
+
+       return 0;
+}
+
+static int max17042_resume(struct device *dev)
+{
+       struct max17042_chip *chip = dev_get_drvdata(dev);
+
+       if (chip->client->irq) {
+               disable_irq_wake(chip->client->irq);
+               enable_irq(chip->client->irq);
+               /* re-program the SOC thresholds to 1% change */
+               max17042_set_soc_threshold(chip, 1);
+       }
+
+       return 0;
+}
+
+static const struct dev_pm_ops max17042_pm_ops = {
+       .suspend        = max17042_suspend,
+       .resume         = max17042_resume,
+};
+
+#define MAX17042_PM_OPS (&max17042_pm_ops)
+#else
+#define MAX17042_PM_OPS NULL
+#endif
+
 #ifdef CONFIG_OF
 static const struct of_device_id max17042_dt_match[] = {
        { .compatible = "maxim,max17042" },
+       { .compatible = "maxim,max17047" },
+       { .compatible = "maxim,max17050" },
        { },
 };
 MODULE_DEVICE_TABLE(of, max17042_dt_match);
@@ -731,6 +830,8 @@ MODULE_DEVICE_TABLE(of, max17042_dt_match);
 
 static const struct i2c_device_id max17042_id[] = {
        { "max17042", 0 },
+       { "max17047", 1 },
+       { "max17050", 2 },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, max17042_id);
@@ -739,6 +840,7 @@ static struct i2c_driver max17042_i2c_driver = {
        .driver = {
                .name   = "max17042",
                .of_match_table = of_match_ptr(max17042_dt_match),
+               .pm     = MAX17042_PM_OPS,
        },
        .probe          = max17042_probe,
        .remove         = __devexit_p(max17042_remove),
index 4368e7d61316bb37c9a3565ca1c0400964aa6781..4150747f9186f8dfaff9faf81bf03a7ade8cb9fa 100644 (file)
@@ -146,6 +146,7 @@ static struct device_attribute power_supply_attrs[] = {
        POWER_SUPPLY_ATTR(voltage_min_design),
        POWER_SUPPLY_ATTR(voltage_now),
        POWER_SUPPLY_ATTR(voltage_avg),
+       POWER_SUPPLY_ATTR(voltage_ocv),
        POWER_SUPPLY_ATTR(current_max),
        POWER_SUPPLY_ATTR(current_now),
        POWER_SUPPLY_ATTR(current_avg),
index 06b659d9179009e032bd7aaf2953aa8bf82c84fb..a5b6849d4123b51b60d2431bbcd72cc9f13c86f4 100644 (file)
@@ -89,7 +89,7 @@ static const struct chip_data {
        [REG_CURRENT] =
                SBS_DATA(POWER_SUPPLY_PROP_CURRENT_NOW, 0x0A, -32768, 32767),
        [REG_CAPACITY] =
-               SBS_DATA(POWER_SUPPLY_PROP_CAPACITY, 0x0E, 0, 100),
+               SBS_DATA(POWER_SUPPLY_PROP_CAPACITY, 0x0D, 0, 100),
        [REG_REMAINING_CAPACITY] =
                SBS_DATA(POWER_SUPPLY_PROP_ENERGY_NOW, 0x0F, 0, 65535),
        [REG_REMAINING_CAPACITY_CHARGE] =
index ce1694d1a36584b3e910adde4dd0ff5800fc4c72..f8eedd8a676fc68ad21f45b8bfa4ddec55add723 100644 (file)
@@ -11,7 +11,7 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/debugfs.h>
+#include <linux/err.h>
 #include <linux/gpio.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -21,7 +21,7 @@
 #include <linux/mutex.h>
 #include <linux/power_supply.h>
 #include <linux/power/smb347-charger.h>
-#include <linux/seq_file.h>
+#include <linux/regmap.h>
 
 /*
  * Configuration registers. These are mirrored to volatile RAM and can be
@@ -39,6 +39,7 @@
 #define CFG_CURRENT_LIMIT_DC_SHIFT             4
 #define CFG_CURRENT_LIMIT_USB_MASK             0x0f
 #define CFG_FLOAT_VOLTAGE                      0x03
+#define CFG_FLOAT_VOLTAGE_FLOAT_MASK           0x3f
 #define CFG_FLOAT_VOLTAGE_THRESHOLD_MASK       0xc0
 #define CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT      6
 #define CFG_STAT                               0x05
 #define STAT_C_CHARGER_ERROR                   BIT(6)
 #define STAT_E                                 0x3f
 
+#define SMB347_MAX_REGISTER                    0x3f
+
 /**
  * struct smb347_charger - smb347 charger instance
  * @lock: protects concurrent access to online variables
- * @client: pointer to i2c client
+ * @dev: pointer to device
+ * @regmap: pointer to driver regmap
  * @mains: power_supply instance for AC/DC power
  * @usb: power_supply instance for USB power
  * @battery: power_supply instance for battery
  * @mains_online: is AC/DC input connected
  * @usb_online: is USB input connected
  * @charging_enabled: is charging enabled
- * @dentry: for debugfs
  * @pdata: pointer to platform data
  */
 struct smb347_charger {
        struct mutex            lock;
-       struct i2c_client       *client;
+       struct device           *dev;
+       struct regmap           *regmap;
        struct power_supply     mains;
        struct power_supply     usb;
        struct power_supply     battery;
        bool                    mains_online;
        bool                    usb_online;
        bool                    charging_enabled;
-       struct dentry           *dentry;
        const struct smb347_charger_platform_data *pdata;
 };
 
@@ -193,14 +196,6 @@ static const unsigned int ccc_tbl[] = {
        1200000,
 };
 
-/* Convert register value to current using lookup table */
-static int hw_to_current(const unsigned int *tbl, size_t size, unsigned int val)
-{
-       if (val >= size)
-               return -EINVAL;
-       return tbl[val];
-}
-
 /* Convert current to register value using lookup table */
 static int current_to_hw(const unsigned int *tbl, size_t size, unsigned int val)
 {
@@ -212,43 +207,22 @@ static int current_to_hw(const unsigned int *tbl, size_t size, unsigned int val)
        return i > 0 ? i - 1 : -EINVAL;
 }
 
-static int smb347_read(struct smb347_charger *smb, u8 reg)
-{
-       int ret;
-
-       ret = i2c_smbus_read_byte_data(smb->client, reg);
-       if (ret < 0)
-               dev_warn(&smb->client->dev, "failed to read reg 0x%x: %d\n",
-                        reg, ret);
-       return ret;
-}
-
-static int smb347_write(struct smb347_charger *smb, u8 reg, u8 val)
-{
-       int ret;
-
-       ret = i2c_smbus_write_byte_data(smb->client, reg, val);
-       if (ret < 0)
-               dev_warn(&smb->client->dev, "failed to write reg 0x%x: %d\n",
-                        reg, ret);
-       return ret;
-}
-
 /**
- * smb347_update_status - updates the charging status
+ * smb347_update_ps_status - refreshes the power source status
  * @smb: pointer to smb347 charger instance
  *
- * Function checks status of the charging and updates internal state
- * accordingly. Returns %0 if there is no change in status, %1 if the
- * status has changed and negative errno in case of failure.
+ * Function checks whether any power source is connected to the charger and
+ * updates internal state accordingly. If there is a change to previous state
+ * function returns %1, otherwise %0 and negative errno in case of errror.
  */
-static int smb347_update_status(struct smb347_charger *smb)
+static int smb347_update_ps_status(struct smb347_charger *smb)
 {
        bool usb = false;
        bool dc = false;
+       unsigned int val;
        int ret;
 
-       ret = smb347_read(smb, IRQSTAT_E);
+       ret = regmap_read(smb->regmap, IRQSTAT_E, &val);
        if (ret < 0)
                return ret;
 
@@ -257,9 +231,9 @@ static int smb347_update_status(struct smb347_charger *smb)
         * platform data _and_ whether corresponding undervoltage is set.
         */
        if (smb->pdata->use_mains)
-               dc = !(ret & IRQSTAT_E_DCIN_UV_STAT);
+               dc = !(val & IRQSTAT_E_DCIN_UV_STAT);
        if (smb->pdata->use_usb)
-               usb = !(ret & IRQSTAT_E_USBIN_UV_STAT);
+               usb = !(val & IRQSTAT_E_USBIN_UV_STAT);
 
        mutex_lock(&smb->lock);
        ret = smb->mains_online != dc || smb->usb_online != usb;
@@ -271,15 +245,15 @@ static int smb347_update_status(struct smb347_charger *smb)
 }
 
 /*
- * smb347_is_online - returns whether input power source is connected
+ * smb347_is_ps_online - returns whether input power source is connected
  * @smb: pointer to smb347 charger instance
  *
  * Returns %true if input power source is connected. Note that this is
  * dependent on what platform has configured for usable power sources. For
- * example if USB is disabled, this will return %false even if the USB
- * cable is connected.
+ * example if USB is disabled, this will return %false even if the USB cable
+ * is connected.
  */
-static bool smb347_is_online(struct smb347_charger *smb)
+static bool smb347_is_ps_online(struct smb347_charger *smb)
 {
        bool ret;
 
@@ -299,16 +273,17 @@ static bool smb347_is_online(struct smb347_charger *smb)
  */
 static int smb347_charging_status(struct smb347_charger *smb)
 {
+       unsigned int val;
        int ret;
 
-       if (!smb347_is_online(smb))
+       if (!smb347_is_ps_online(smb))
                return 0;
 
-       ret = smb347_read(smb, STAT_C);
+       ret = regmap_read(smb->regmap, STAT_C, &val);
        if (ret < 0)
                return 0;
 
-       return (ret & STAT_C_CHG_MASK) >> STAT_C_CHG_SHIFT;
+       return (val & STAT_C_CHG_MASK) >> STAT_C_CHG_SHIFT;
 }
 
 static int smb347_charging_set(struct smb347_charger *smb, bool enable)
@@ -316,27 +291,17 @@ static int smb347_charging_set(struct smb347_charger *smb, bool enable)
        int ret = 0;
 
        if (smb->pdata->enable_control != SMB347_CHG_ENABLE_SW) {
-               dev_dbg(&smb->client->dev,
-                       "charging enable/disable in SW disabled\n");
+               dev_dbg(smb->dev, "charging enable/disable in SW disabled\n");
                return 0;
        }
 
        mutex_lock(&smb->lock);
        if (smb->charging_enabled != enable) {
-               ret = smb347_read(smb, CMD_A);
-               if (ret < 0)
-                       goto out;
-
-               smb->charging_enabled = enable;
-
-               if (enable)
-                       ret |= CMD_A_CHG_ENABLED;
-               else
-                       ret &= ~CMD_A_CHG_ENABLED;
-
-               ret = smb347_write(smb, CMD_A, ret);
+               ret = regmap_update_bits(smb->regmap, CMD_A, CMD_A_CHG_ENABLED,
+                                        enable ? CMD_A_CHG_ENABLED : 0);
+               if (!ret)
+                       smb->charging_enabled = enable;
        }
-out:
        mutex_unlock(&smb->lock);
        return ret;
 }
@@ -351,7 +316,7 @@ static inline int smb347_charging_disable(struct smb347_charger *smb)
        return smb347_charging_set(smb, false);
 }
 
-static int smb347_update_online(struct smb347_charger *smb)
+static int smb347_start_stop_charging(struct smb347_charger *smb)
 {
        int ret;
 
@@ -360,16 +325,14 @@ static int smb347_update_online(struct smb347_charger *smb)
         * disable or enable the charging. We do it manually because it
         * depends on how the platform has configured the valid inputs.
         */
-       if (smb347_is_online(smb)) {
+       if (smb347_is_ps_online(smb)) {
                ret = smb347_charging_enable(smb);
                if (ret < 0)
-                       dev_err(&smb->client->dev,
-                               "failed to enable charging\n");
+                       dev_err(smb->dev, "failed to enable charging\n");
        } else {
                ret = smb347_charging_disable(smb);
                if (ret < 0)
-                       dev_err(&smb->client->dev,
-                               "failed to disable charging\n");
+                       dev_err(smb->dev, "failed to disable charging\n");
        }
 
        return ret;
@@ -377,112 +340,120 @@ static int smb347_update_online(struct smb347_charger *smb)
 
 static int smb347_set_charge_current(struct smb347_charger *smb)
 {
-       int ret, val;
-
-       ret = smb347_read(smb, CFG_CHARGE_CURRENT);
-       if (ret < 0)
-               return ret;
+       int ret;
 
        if (smb->pdata->max_charge_current) {
-               val = current_to_hw(fcc_tbl, ARRAY_SIZE(fcc_tbl),
+               ret = current_to_hw(fcc_tbl, ARRAY_SIZE(fcc_tbl),
                                    smb->pdata->max_charge_current);
-               if (val < 0)
-                       return val;
+               if (ret < 0)
+                       return ret;
 
-               ret &= ~CFG_CHARGE_CURRENT_FCC_MASK;
-               ret |= val << CFG_CHARGE_CURRENT_FCC_SHIFT;
+               ret = regmap_update_bits(smb->regmap, CFG_CHARGE_CURRENT,
+                                        CFG_CHARGE_CURRENT_FCC_MASK,
+                                        ret << CFG_CHARGE_CURRENT_FCC_SHIFT);
+               if (ret < 0)
+                       return ret;
        }
 
        if (smb->pdata->pre_charge_current) {
-               val = current_to_hw(pcc_tbl, ARRAY_SIZE(pcc_tbl),
+               ret = current_to_hw(pcc_tbl, ARRAY_SIZE(pcc_tbl),
                                    smb->pdata->pre_charge_current);
-               if (val < 0)
-                       return val;
+               if (ret < 0)
+                       return ret;
 
-               ret &= ~CFG_CHARGE_CURRENT_PCC_MASK;
-               ret |= val << CFG_CHARGE_CURRENT_PCC_SHIFT;
+               ret = regmap_update_bits(smb->regmap, CFG_CHARGE_CURRENT,
+                                        CFG_CHARGE_CURRENT_PCC_MASK,
+                                        ret << CFG_CHARGE_CURRENT_PCC_SHIFT);
+               if (ret < 0)
+                       return ret;
        }
 
        if (smb->pdata->termination_current) {
-               val = current_to_hw(tc_tbl, ARRAY_SIZE(tc_tbl),
+               ret = current_to_hw(tc_tbl, ARRAY_SIZE(tc_tbl),
                                    smb->pdata->termination_current);
-               if (val < 0)
-                       return val;
+               if (ret < 0)
+                       return ret;
 
-               ret &= ~CFG_CHARGE_CURRENT_TC_MASK;
-               ret |= val;
+               ret = regmap_update_bits(smb->regmap, CFG_CHARGE_CURRENT,
+                                        CFG_CHARGE_CURRENT_TC_MASK, ret);
+               if (ret < 0)
+                       return ret;
        }
 
-       return smb347_write(smb, CFG_CHARGE_CURRENT, ret);
+       return 0;
 }
 
 static int smb347_set_current_limits(struct smb347_charger *smb)
 {
-       int ret, val;
-
-       ret = smb347_read(smb, CFG_CURRENT_LIMIT);
-       if (ret < 0)
-               return ret;
+       int ret;
 
        if (smb->pdata->mains_current_limit) {
-               val = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
+               ret = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
                                    smb->pdata->mains_current_limit);
-               if (val < 0)
-                       return val;
+               if (ret < 0)
+                       return ret;
 
-               ret &= ~CFG_CURRENT_LIMIT_DC_MASK;
-               ret |= val << CFG_CURRENT_LIMIT_DC_SHIFT;
+               ret = regmap_update_bits(smb->regmap, CFG_CURRENT_LIMIT,
+                                        CFG_CURRENT_LIMIT_DC_MASK,
+                                        ret << CFG_CURRENT_LIMIT_DC_SHIFT);
+               if (ret < 0)
+                       return ret;
        }
 
        if (smb->pdata->usb_hc_current_limit) {
-               val = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
+               ret = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
                                    smb->pdata->usb_hc_current_limit);
-               if (val < 0)
-                       return val;
+               if (ret < 0)
+                       return ret;
 
-               ret &= ~CFG_CURRENT_LIMIT_USB_MASK;
-               ret |= val;
+               ret = regmap_update_bits(smb->regmap, CFG_CURRENT_LIMIT,
+                                        CFG_CURRENT_LIMIT_USB_MASK, ret);
+               if (ret < 0)
+                       return ret;
        }
 
-       return smb347_write(smb, CFG_CURRENT_LIMIT, ret);
+       return 0;
 }
 
 static int smb347_set_voltage_limits(struct smb347_charger *smb)
 {
-       int ret, val;
-
-       ret = smb347_read(smb, CFG_FLOAT_VOLTAGE);
-       if (ret < 0)
-               return ret;
+       int ret;
 
        if (smb->pdata->pre_to_fast_voltage) {
-               val = smb->pdata->pre_to_fast_voltage;
+               ret = smb->pdata->pre_to_fast_voltage;
 
                /* uV */
-               val = clamp_val(val, 2400000, 3000000) - 2400000;
-               val /= 200000;
+               ret = clamp_val(ret, 2400000, 3000000) - 2400000;
+               ret /= 200000;
 
-               ret &= ~CFG_FLOAT_VOLTAGE_THRESHOLD_MASK;
-               ret |= val << CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT;
+               ret = regmap_update_bits(smb->regmap, CFG_FLOAT_VOLTAGE,
+                               CFG_FLOAT_VOLTAGE_THRESHOLD_MASK,
+                               ret << CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT);
+               if (ret < 0)
+                       return ret;
        }
 
        if (smb->pdata->max_charge_voltage) {
-               val = smb->pdata->max_charge_voltage;
+               ret = smb->pdata->max_charge_voltage;
 
                /* uV */
-               val = clamp_val(val, 3500000, 4500000) - 3500000;
-               val /= 20000;
+               ret = clamp_val(ret, 3500000, 4500000) - 3500000;
+               ret /= 20000;
 
-               ret |= val;
+               ret = regmap_update_bits(smb->regmap, CFG_FLOAT_VOLTAGE,
+                                        CFG_FLOAT_VOLTAGE_FLOAT_MASK, ret);
+               if (ret < 0)
+                       return ret;
        }
 
-       return smb347_write(smb, CFG_FLOAT_VOLTAGE, ret);
+       return 0;
 }
 
 static int smb347_set_temp_limits(struct smb347_charger *smb)
 {
        bool enable_therm_monitor = false;
-       int ret, val;
+       int ret = 0;
+       int val;
 
        if (smb->pdata->chip_temp_threshold) {
                val = smb->pdata->chip_temp_threshold;
@@ -491,22 +462,13 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
                val = clamp_val(val, 100, 130) - 100;
                val /= 10;
 
-               ret = smb347_read(smb, CFG_OTG);
-               if (ret < 0)
-                       return ret;
-
-               ret &= ~CFG_OTG_TEMP_THRESHOLD_MASK;
-               ret |= val << CFG_OTG_TEMP_THRESHOLD_SHIFT;
-
-               ret = smb347_write(smb, CFG_OTG, ret);
+               ret = regmap_update_bits(smb->regmap, CFG_OTG,
+                                        CFG_OTG_TEMP_THRESHOLD_MASK,
+                                        val << CFG_OTG_TEMP_THRESHOLD_SHIFT);
                if (ret < 0)
                        return ret;
        }
 
-       ret = smb347_read(smb, CFG_TEMP_LIMIT);
-       if (ret < 0)
-               return ret;
-
        if (smb->pdata->soft_cold_temp_limit != SMB347_TEMP_USE_DEFAULT) {
                val = smb->pdata->soft_cold_temp_limit;
 
@@ -515,8 +477,11 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
                /* this goes from higher to lower so invert the value */
                val = ~val & 0x3;
 
-               ret &= ~CFG_TEMP_LIMIT_SOFT_COLD_MASK;
-               ret |= val << CFG_TEMP_LIMIT_SOFT_COLD_SHIFT;
+               ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
+                                        CFG_TEMP_LIMIT_SOFT_COLD_MASK,
+                                        val << CFG_TEMP_LIMIT_SOFT_COLD_SHIFT);
+               if (ret < 0)
+                       return ret;
 
                enable_therm_monitor = true;
        }
@@ -527,8 +492,11 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
                val = clamp_val(val, 40, 55) - 40;
                val /= 5;
 
-               ret &= ~CFG_TEMP_LIMIT_SOFT_HOT_MASK;
-               ret |= val << CFG_TEMP_LIMIT_SOFT_HOT_SHIFT;
+               ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
+                                        CFG_TEMP_LIMIT_SOFT_HOT_MASK,
+                                        val << CFG_TEMP_LIMIT_SOFT_HOT_SHIFT);
+               if (ret < 0)
+                       return ret;
 
                enable_therm_monitor = true;
        }
@@ -541,8 +509,11 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
                /* this goes from higher to lower so invert the value */
                val = ~val & 0x3;
 
-               ret &= ~CFG_TEMP_LIMIT_HARD_COLD_MASK;
-               ret |= val << CFG_TEMP_LIMIT_HARD_COLD_SHIFT;
+               ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
+                                        CFG_TEMP_LIMIT_HARD_COLD_MASK,
+                                        val << CFG_TEMP_LIMIT_HARD_COLD_SHIFT);
+               if (ret < 0)
+                       return ret;
 
                enable_therm_monitor = true;
        }
@@ -553,16 +524,15 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
                val = clamp_val(val, 50, 65) - 50;
                val /= 5;
 
-               ret &= ~CFG_TEMP_LIMIT_HARD_HOT_MASK;
-               ret |= val << CFG_TEMP_LIMIT_HARD_HOT_SHIFT;
+               ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
+                                        CFG_TEMP_LIMIT_HARD_HOT_MASK,
+                                        val << CFG_TEMP_LIMIT_HARD_HOT_SHIFT);
+               if (ret < 0)
+                       return ret;
 
                enable_therm_monitor = true;
        }
 
-       ret = smb347_write(smb, CFG_TEMP_LIMIT, ret);
-       if (ret < 0)
-               return ret;
-
        /*
         * If any of the temperature limits are set, we also enable the
         * thermistor monitoring.
@@ -574,25 +544,15 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
         * depending on the configuration.
         */
        if (enable_therm_monitor) {
-               ret = smb347_read(smb, CFG_THERM);
-               if (ret < 0)
-                       return ret;
-
-               ret &= ~CFG_THERM_MONITOR_DISABLED;
-
-               ret = smb347_write(smb, CFG_THERM, ret);
+               ret = regmap_update_bits(smb->regmap, CFG_THERM,
+                                        CFG_THERM_MONITOR_DISABLED, 0);
                if (ret < 0)
                        return ret;
        }
 
        if (smb->pdata->suspend_on_hard_temp_limit) {
-               ret = smb347_read(smb, CFG_SYSOK);
-               if (ret < 0)
-                       return ret;
-
-               ret &= ~CFG_SYSOK_SUSPEND_HARD_LIMIT_DISABLED;
-
-               ret = smb347_write(smb, CFG_SYSOK, ret);
+               ret = regmap_update_bits(smb->regmap, CFG_SYSOK,
+                                CFG_SYSOK_SUSPEND_HARD_LIMIT_DISABLED, 0);
                if (ret < 0)
                        return ret;
        }
@@ -601,17 +561,15 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
            SMB347_SOFT_TEMP_COMPENSATE_DEFAULT) {
                val = smb->pdata->soft_temp_limit_compensation & 0x3;
 
-               ret = smb347_read(smb, CFG_THERM);
+               ret = regmap_update_bits(smb->regmap, CFG_THERM,
+                                CFG_THERM_SOFT_HOT_COMPENSATION_MASK,
+                                val << CFG_THERM_SOFT_HOT_COMPENSATION_SHIFT);
                if (ret < 0)
                        return ret;
 
-               ret &= ~CFG_THERM_SOFT_HOT_COMPENSATION_MASK;
-               ret |= val << CFG_THERM_SOFT_HOT_COMPENSATION_SHIFT;
-
-               ret &= ~CFG_THERM_SOFT_COLD_COMPENSATION_MASK;
-               ret |= val << CFG_THERM_SOFT_COLD_COMPENSATION_SHIFT;
-
-               ret = smb347_write(smb, CFG_THERM, ret);
+               ret = regmap_update_bits(smb->regmap, CFG_THERM,
+                                CFG_THERM_SOFT_COLD_COMPENSATION_MASK,
+                                val << CFG_THERM_SOFT_COLD_COMPENSATION_SHIFT);
                if (ret < 0)
                        return ret;
        }
@@ -622,14 +580,9 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
                if (val < 0)
                        return val;
 
-               ret = smb347_read(smb, CFG_OTG);
-               if (ret < 0)
-                       return ret;
-
-               ret &= ~CFG_OTG_CC_COMPENSATION_MASK;
-               ret |= (val & 0x3) << CFG_OTG_CC_COMPENSATION_SHIFT;
-
-               ret = smb347_write(smb, CFG_OTG, ret);
+               ret = regmap_update_bits(smb->regmap, CFG_OTG,
+                               CFG_OTG_CC_COMPENSATION_MASK,
+                               (val & 0x3) << CFG_OTG_CC_COMPENSATION_SHIFT);
                if (ret < 0)
                        return ret;
        }
@@ -648,22 +601,13 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
  */
 static int smb347_set_writable(struct smb347_charger *smb, bool writable)
 {
-       int ret;
-
-       ret = smb347_read(smb, CMD_A);
-       if (ret < 0)
-               return ret;
-
-       if (writable)
-               ret |= CMD_A_ALLOW_WRITE;
-       else
-               ret &= ~CMD_A_ALLOW_WRITE;
-
-       return smb347_write(smb, CMD_A, ret);
+       return regmap_update_bits(smb->regmap, CMD_A, CMD_A_ALLOW_WRITE,
+                                 writable ? CMD_A_ALLOW_WRITE : 0);
 }
 
 static int smb347_hw_init(struct smb347_charger *smb)
 {
+       unsigned int val;
        int ret;
 
        ret = smb347_set_writable(smb, true);
@@ -692,34 +636,19 @@ static int smb347_hw_init(struct smb347_charger *smb)
 
        /* If USB charging is disabled we put the USB in suspend mode */
        if (!smb->pdata->use_usb) {
-               ret = smb347_read(smb, CMD_A);
-               if (ret < 0)
-                       goto fail;
-
-               ret |= CMD_A_SUSPEND_ENABLED;
-
-               ret = smb347_write(smb, CMD_A, ret);
+               ret = regmap_update_bits(smb->regmap, CMD_A,
+                                        CMD_A_SUSPEND_ENABLED,
+                                        CMD_A_SUSPEND_ENABLED);
                if (ret < 0)
                        goto fail;
        }
 
-       ret = smb347_read(smb, CFG_OTHER);
-       if (ret < 0)
-               goto fail;
-
        /*
         * If configured by platform data, we enable hardware Auto-OTG
         * support for driving VBUS. Otherwise we disable it.
         */
-       ret &= ~CFG_OTHER_RID_MASK;
-       if (smb->pdata->use_usb_otg)
-               ret |= CFG_OTHER_RID_ENABLED_AUTO_OTG;
-
-       ret = smb347_write(smb, CFG_OTHER, ret);
-       if (ret < 0)
-               goto fail;
-
-       ret = smb347_read(smb, CFG_PIN);
+       ret = regmap_update_bits(smb->regmap, CFG_OTHER, CFG_OTHER_RID_MASK,
+               smb->pdata->use_usb_otg ? CFG_OTHER_RID_ENABLED_AUTO_OTG : 0);
        if (ret < 0)
                goto fail;
 
@@ -728,32 +657,33 @@ static int smb347_hw_init(struct smb347_charger *smb)
         * command register unless pin control is specified in the platform
         * data.
         */
-       ret &= ~CFG_PIN_EN_CTRL_MASK;
-
        switch (smb->pdata->enable_control) {
-       case SMB347_CHG_ENABLE_SW:
-               /* Do nothing, 0 means i2c control */
-               break;
        case SMB347_CHG_ENABLE_PIN_ACTIVE_LOW:
-               ret |= CFG_PIN_EN_CTRL_ACTIVE_LOW;
+               val = CFG_PIN_EN_CTRL_ACTIVE_LOW;
                break;
        case SMB347_CHG_ENABLE_PIN_ACTIVE_HIGH:
-               ret |= CFG_PIN_EN_CTRL_ACTIVE_HIGH;
+               val = CFG_PIN_EN_CTRL_ACTIVE_HIGH;
+               break;
+       default:
+               val = 0;
                break;
        }
 
-       /* Disable Automatic Power Source Detection (APSD) interrupt. */
-       ret &= ~CFG_PIN_EN_APSD_IRQ;
+       ret = regmap_update_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_CTRL_MASK,
+                                val);
+       if (ret < 0)
+               goto fail;
 
-       ret = smb347_write(smb, CFG_PIN, ret);
+       /* Disable Automatic Power Source Detection (APSD) interrupt. */
+       ret = regmap_update_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_APSD_IRQ, 0);
        if (ret < 0)
                goto fail;
 
-       ret = smb347_update_status(smb);
+       ret = smb347_update_ps_status(smb);
        if (ret < 0)
                goto fail;
 
-       ret = smb347_update_online(smb);
+       ret = smb347_start_stop_charging(smb);
 
 fail:
        smb347_set_writable(smb, false);
@@ -763,24 +693,25 @@ fail:
 static irqreturn_t smb347_interrupt(int irq, void *data)
 {
        struct smb347_charger *smb = data;
-       int stat_c, irqstat_e, irqstat_c;
-       irqreturn_t ret = IRQ_NONE;
+       unsigned int stat_c, irqstat_e, irqstat_c;
+       bool handled = false;
+       int ret;
 
-       stat_c = smb347_read(smb, STAT_C);
-       if (stat_c < 0) {
-               dev_warn(&smb->client->dev, "reading STAT_C failed\n");
+       ret = regmap_read(smb->regmap, STAT_C, &stat_c);
+       if (ret < 0) {
+               dev_warn(smb->dev, "reading STAT_C failed\n");
                return IRQ_NONE;
        }
 
-       irqstat_c = smb347_read(smb, IRQSTAT_C);
-       if (irqstat_c < 0) {
-               dev_warn(&smb->client->dev, "reading IRQSTAT_C failed\n");
+       ret = regmap_read(smb->regmap, IRQSTAT_C, &irqstat_c);
+       if (ret < 0) {
+               dev_warn(smb->dev, "reading IRQSTAT_C failed\n");
                return IRQ_NONE;
        }
 
-       irqstat_e = smb347_read(smb, IRQSTAT_E);
-       if (irqstat_e < 0) {
-               dev_warn(&smb->client->dev, "reading IRQSTAT_E failed\n");
+       ret = regmap_read(smb->regmap, IRQSTAT_E, &irqstat_e);
+       if (ret < 0) {
+               dev_warn(smb->dev, "reading IRQSTAT_E failed\n");
                return IRQ_NONE;
        }
 
@@ -789,13 +720,11 @@ static irqreturn_t smb347_interrupt(int irq, void *data)
         * disable charging.
         */
        if (stat_c & STAT_C_CHARGER_ERROR) {
-               dev_err(&smb->client->dev,
-                       "error in charger, disabling charging\n");
+               dev_err(smb->dev, "error in charger, disabling charging\n");
 
                smb347_charging_disable(smb);
                power_supply_changed(&smb->battery);
-
-               ret = IRQ_HANDLED;
+               handled = true;
        }
 
        /*
@@ -806,7 +735,7 @@ static irqreturn_t smb347_interrupt(int irq, void *data)
        if (irqstat_c & (IRQSTAT_C_TERMINATION_IRQ | IRQSTAT_C_TAPER_IRQ)) {
                if (irqstat_c & IRQSTAT_C_TERMINATION_STAT)
                        power_supply_changed(&smb->battery);
-               ret = IRQ_HANDLED;
+               handled = true;
        }
 
        /*
@@ -814,15 +743,17 @@ static irqreturn_t smb347_interrupt(int irq, void *data)
         * was connected or disconnected.
         */
        if (irqstat_e & (IRQSTAT_E_USBIN_UV_IRQ | IRQSTAT_E_DCIN_UV_IRQ)) {
-               if (smb347_update_status(smb) > 0) {
-                       smb347_update_online(smb);
-                       power_supply_changed(&smb->mains);
-                       power_supply_changed(&smb->usb);
+               if (smb347_update_ps_status(smb) > 0) {
+                       smb347_start_stop_charging(smb);
+                       if (smb->pdata->use_mains)
+                               power_supply_changed(&smb->mains);
+                       if (smb->pdata->use_usb)
+                               power_supply_changed(&smb->usb);
                }
-               ret = IRQ_HANDLED;
+               handled = true;
        }
 
-       return ret;
+       return handled ? IRQ_HANDLED : IRQ_NONE;
 }
 
 static int smb347_irq_set(struct smb347_charger *smb, bool enable)
@@ -839,41 +770,18 @@ static int smb347_irq_set(struct smb347_charger *smb, bool enable)
         *      - termination current reached
         *      - charger error
         */
-       if (enable) {
-               ret = smb347_write(smb, CFG_FAULT_IRQ, CFG_FAULT_IRQ_DCIN_UV);
-               if (ret < 0)
-                       goto fail;
-
-               ret = smb347_write(smb, CFG_STATUS_IRQ,
-                                  CFG_STATUS_IRQ_TERMINATION_OR_TAPER);
-               if (ret < 0)
-                       goto fail;
-
-               ret = smb347_read(smb, CFG_PIN);
-               if (ret < 0)
-                       goto fail;
-
-               ret |= CFG_PIN_EN_CHARGER_ERROR;
-
-               ret = smb347_write(smb, CFG_PIN, ret);
-       } else {
-               ret = smb347_write(smb, CFG_FAULT_IRQ, 0);
-               if (ret < 0)
-                       goto fail;
-
-               ret = smb347_write(smb, CFG_STATUS_IRQ, 0);
-               if (ret < 0)
-                       goto fail;
-
-               ret = smb347_read(smb, CFG_PIN);
-               if (ret < 0)
-                       goto fail;
-
-               ret &= ~CFG_PIN_EN_CHARGER_ERROR;
+       ret = regmap_update_bits(smb->regmap, CFG_FAULT_IRQ, 0xff,
+                                enable ? CFG_FAULT_IRQ_DCIN_UV : 0);
+       if (ret < 0)
+               goto fail;
 
-               ret = smb347_write(smb, CFG_PIN, ret);
-       }
+       ret = regmap_update_bits(smb->regmap, CFG_STATUS_IRQ, 0xff,
+                       enable ? CFG_STATUS_IRQ_TERMINATION_OR_TAPER : 0);
+       if (ret < 0)
+               goto fail;
 
+       ret = regmap_update_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_CHARGER_ERROR,
+                                enable ? CFG_PIN_EN_CHARGER_ERROR : 0);
 fail:
        smb347_set_writable(smb, false);
        return ret;
@@ -889,18 +797,18 @@ static inline int smb347_irq_disable(struct smb347_charger *smb)
        return smb347_irq_set(smb, false);
 }
 
-static int smb347_irq_init(struct smb347_charger *smb)
+static int smb347_irq_init(struct smb347_charger *smb,
+                          struct i2c_client *client)
 {
        const struct smb347_charger_platform_data *pdata = smb->pdata;
        int ret, irq = gpio_to_irq(pdata->irq_gpio);
 
-       ret = gpio_request_one(pdata->irq_gpio, GPIOF_IN, smb->client->name);
+       ret = gpio_request_one(pdata->irq_gpio, GPIOF_IN, client->name);
        if (ret < 0)
                goto fail;
 
        ret = request_threaded_irq(irq, NULL, smb347_interrupt,
-                                  IRQF_TRIGGER_FALLING, smb->client->name,
-                                  smb);
+                                  IRQF_TRIGGER_FALLING, client->name, smb);
        if (ret < 0)
                goto fail_gpio;
 
@@ -912,23 +820,14 @@ static int smb347_irq_init(struct smb347_charger *smb)
         * Configure the STAT output to be suitable for interrupts: disable
         * all other output (except interrupts) and make it active low.
         */
-       ret = smb347_read(smb, CFG_STAT);
-       if (ret < 0)
-               goto fail_readonly;
-
-       ret &= ~CFG_STAT_ACTIVE_HIGH;
-       ret |= CFG_STAT_DISABLED;
-
-       ret = smb347_write(smb, CFG_STAT, ret);
-       if (ret < 0)
-               goto fail_readonly;
-
-       ret = smb347_irq_enable(smb);
+       ret = regmap_update_bits(smb->regmap, CFG_STAT,
+                                CFG_STAT_ACTIVE_HIGH | CFG_STAT_DISABLED,
+                                CFG_STAT_DISABLED);
        if (ret < 0)
                goto fail_readonly;
 
        smb347_set_writable(smb, false);
-       smb->client->irq = irq;
+       client->irq = irq;
        return 0;
 
 fail_readonly:
@@ -938,7 +837,7 @@ fail_irq:
 fail_gpio:
        gpio_free(pdata->irq_gpio);
 fail:
-       smb->client->irq = 0;
+       client->irq = 0;
        return ret;
 }
 
@@ -987,13 +886,13 @@ static int smb347_battery_get_property(struct power_supply *psy,
        const struct smb347_charger_platform_data *pdata = smb->pdata;
        int ret;
 
-       ret = smb347_update_status(smb);
+       ret = smb347_update_ps_status(smb);
        if (ret < 0)
                return ret;
 
        switch (prop) {
        case POWER_SUPPLY_PROP_STATUS:
-               if (!smb347_is_online(smb)) {
+               if (!smb347_is_ps_online(smb)) {
                        val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
                        break;
                }
@@ -1004,7 +903,7 @@ static int smb347_battery_get_property(struct power_supply *psy,
                break;
 
        case POWER_SUPPLY_PROP_CHARGE_TYPE:
-               if (!smb347_is_online(smb))
+               if (!smb347_is_ps_online(smb))
                        return -ENODATA;
 
                /*
@@ -1036,44 +935,6 @@ static int smb347_battery_get_property(struct power_supply *psy,
                val->intval = pdata->battery_info.voltage_max_design;
                break;
 
-       case POWER_SUPPLY_PROP_VOLTAGE_NOW:
-               if (!smb347_is_online(smb))
-                       return -ENODATA;
-               ret = smb347_read(smb, STAT_A);
-               if (ret < 0)
-                       return ret;
-
-               ret &= STAT_A_FLOAT_VOLTAGE_MASK;
-               if (ret > 0x3d)
-                       ret = 0x3d;
-
-               val->intval = 3500000 + ret * 20000;
-               break;
-
-       case POWER_SUPPLY_PROP_CURRENT_NOW:
-               if (!smb347_is_online(smb))
-                       return -ENODATA;
-
-               ret = smb347_read(smb, STAT_B);
-               if (ret < 0)
-                       return ret;
-
-               /*
-                * The current value is composition of FCC and PCC values
-                * and we can detect which table to use from bit 5.
-                */
-               if (ret & 0x20) {
-                       val->intval = hw_to_current(fcc_tbl,
-                                                   ARRAY_SIZE(fcc_tbl),
-                                                   ret & 7);
-               } else {
-                       ret >>= 3;
-                       val->intval = hw_to_current(pcc_tbl,
-                                                   ARRAY_SIZE(pcc_tbl),
-                                                   ret & 7);
-               }
-               break;
-
        case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
                val->intval = pdata->battery_info.charge_full_design;
                break;
@@ -1095,64 +956,58 @@ static enum power_supply_property smb347_battery_properties[] = {
        POWER_SUPPLY_PROP_TECHNOLOGY,
        POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
        POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
-       POWER_SUPPLY_PROP_VOLTAGE_NOW,
-       POWER_SUPPLY_PROP_CURRENT_NOW,
        POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
        POWER_SUPPLY_PROP_MODEL_NAME,
 };
 
-static int smb347_debugfs_show(struct seq_file *s, void *data)
+static bool smb347_volatile_reg(struct device *dev, unsigned int reg)
 {
-       struct smb347_charger *smb = s->private;
-       int ret;
-       u8 reg;
-
-       seq_printf(s, "Control registers:\n");
-       seq_printf(s, "==================\n");
-       for (reg = CFG_CHARGE_CURRENT; reg <= CFG_ADDRESS; reg++) {
-               ret = smb347_read(smb, reg);
-               seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
-       }
-       seq_printf(s, "\n");
-
-       seq_printf(s, "Command registers:\n");
-       seq_printf(s, "==================\n");
-       ret = smb347_read(smb, CMD_A);
-       seq_printf(s, "0x%02x:\t0x%02x\n", CMD_A, ret);
-       ret = smb347_read(smb, CMD_B);
-       seq_printf(s, "0x%02x:\t0x%02x\n", CMD_B, ret);
-       ret = smb347_read(smb, CMD_C);
-       seq_printf(s, "0x%02x:\t0x%02x\n", CMD_C, ret);
-       seq_printf(s, "\n");
-
-       seq_printf(s, "Interrupt status registers:\n");
-       seq_printf(s, "===========================\n");
-       for (reg = IRQSTAT_A; reg <= IRQSTAT_F; reg++) {
-               ret = smb347_read(smb, reg);
-               seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
-       }
-       seq_printf(s, "\n");
-
-       seq_printf(s, "Status registers:\n");
-       seq_printf(s, "=================\n");
-       for (reg = STAT_A; reg <= STAT_E; reg++) {
-               ret = smb347_read(smb, reg);
-               seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
+       switch (reg) {
+       case IRQSTAT_A:
+       case IRQSTAT_C:
+       case IRQSTAT_E:
+       case IRQSTAT_F:
+       case STAT_A:
+       case STAT_B:
+       case STAT_C:
+       case STAT_E:
+               return true;
        }
 
-       return 0;
+       return false;
 }
 
-static int smb347_debugfs_open(struct inode *inode, struct file *file)
+static bool smb347_readable_reg(struct device *dev, unsigned int reg)
 {
-       return single_open(file, smb347_debugfs_show, inode->i_private);
+       switch (reg) {
+       case CFG_CHARGE_CURRENT:
+       case CFG_CURRENT_LIMIT:
+       case CFG_FLOAT_VOLTAGE:
+       case CFG_STAT:
+       case CFG_PIN:
+       case CFG_THERM:
+       case CFG_SYSOK:
+       case CFG_OTHER:
+       case CFG_OTG:
+       case CFG_TEMP_LIMIT:
+       case CFG_FAULT_IRQ:
+       case CFG_STATUS_IRQ:
+       case CFG_ADDRESS:
+       case CMD_A:
+       case CMD_B:
+       case CMD_C:
+               return true;
+       }
+
+       return smb347_volatile_reg(dev, reg);
 }
 
-static const struct file_operations smb347_debugfs_fops = {
-       .open           = smb347_debugfs_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
+static const struct regmap_config smb347_regmap = {
+       .reg_bits       = 8,
+       .val_bits       = 8,
+       .max_register   = SMB347_MAX_REGISTER,
+       .volatile_reg   = smb347_volatile_reg,
+       .readable_reg   = smb347_readable_reg,
 };
 
 static int smb347_probe(struct i2c_client *client,
@@ -1178,28 +1033,45 @@ static int smb347_probe(struct i2c_client *client,
        i2c_set_clientdata(client, smb);
 
        mutex_init(&smb->lock);
-       smb->client = client;
+       smb->dev = &client->dev;
        smb->pdata = pdata;
 
+       smb->regmap = devm_regmap_init_i2c(client, &smb347_regmap);
+       if (IS_ERR(smb->regmap))
+               return PTR_ERR(smb->regmap);
+
        ret = smb347_hw_init(smb);
        if (ret < 0)
                return ret;
 
-       smb->mains.name = "smb347-mains";
-       smb->mains.type = POWER_SUPPLY_TYPE_MAINS;
-       smb->mains.get_property = smb347_mains_get_property;
-       smb->mains.properties = smb347_mains_properties;
-       smb->mains.num_properties = ARRAY_SIZE(smb347_mains_properties);
-       smb->mains.supplied_to = battery;
-       smb->mains.num_supplicants = ARRAY_SIZE(battery);
-
-       smb->usb.name = "smb347-usb";
-       smb->usb.type = POWER_SUPPLY_TYPE_USB;
-       smb->usb.get_property = smb347_usb_get_property;
-       smb->usb.properties = smb347_usb_properties;
-       smb->usb.num_properties = ARRAY_SIZE(smb347_usb_properties);
-       smb->usb.supplied_to = battery;
-       smb->usb.num_supplicants = ARRAY_SIZE(battery);
+       if (smb->pdata->use_mains) {
+               smb->mains.name = "smb347-mains";
+               smb->mains.type = POWER_SUPPLY_TYPE_MAINS;
+               smb->mains.get_property = smb347_mains_get_property;
+               smb->mains.properties = smb347_mains_properties;
+               smb->mains.num_properties = ARRAY_SIZE(smb347_mains_properties);
+               smb->mains.supplied_to = battery;
+               smb->mains.num_supplicants = ARRAY_SIZE(battery);
+               ret = power_supply_register(dev, &smb->mains);
+               if (ret < 0)
+                       return ret;
+       }
+
+       if (smb->pdata->use_usb) {
+               smb->usb.name = "smb347-usb";
+               smb->usb.type = POWER_SUPPLY_TYPE_USB;
+               smb->usb.get_property = smb347_usb_get_property;
+               smb->usb.properties = smb347_usb_properties;
+               smb->usb.num_properties = ARRAY_SIZE(smb347_usb_properties);
+               smb->usb.supplied_to = battery;
+               smb->usb.num_supplicants = ARRAY_SIZE(battery);
+               ret = power_supply_register(dev, &smb->usb);
+               if (ret < 0) {
+                       if (smb->pdata->use_mains)
+                               power_supply_unregister(&smb->mains);
+                       return ret;
+               }
+       }
 
        smb->battery.name = "smb347-battery";
        smb->battery.type = POWER_SUPPLY_TYPE_BATTERY;
@@ -1207,20 +1079,13 @@ static int smb347_probe(struct i2c_client *client,
        smb->battery.properties = smb347_battery_properties;
        smb->battery.num_properties = ARRAY_SIZE(smb347_battery_properties);
 
-       ret = power_supply_register(dev, &smb->mains);
-       if (ret < 0)
-               return ret;
-
-       ret = power_supply_register(dev, &smb->usb);
-       if (ret < 0) {
-               power_supply_unregister(&smb->mains);
-               return ret;
-       }
 
        ret = power_supply_register(dev, &smb->battery);
        if (ret < 0) {
-               power_supply_unregister(&smb->usb);
-               power_supply_unregister(&smb->mains);
+               if (smb->pdata->use_usb)
+                       power_supply_unregister(&smb->usb);
+               if (smb->pdata->use_mains)
+                       power_supply_unregister(&smb->mains);
                return ret;
        }
 
@@ -1229,15 +1094,15 @@ static int smb347_probe(struct i2c_client *client,
         * interrupt support here.
         */
        if (pdata->irq_gpio >= 0) {
-               ret = smb347_irq_init(smb);
+               ret = smb347_irq_init(smb, client);
                if (ret < 0) {
                        dev_warn(dev, "failed to initialize IRQ: %d\n", ret);
                        dev_warn(dev, "disabling IRQ support\n");
+               } else {
+                       smb347_irq_enable(smb);
                }
        }
 
-       smb->dentry = debugfs_create_file("smb347-regs", S_IRUSR, NULL, smb,
-                                         &smb347_debugfs_fops);
        return 0;
 }
 
@@ -1245,9 +1110,6 @@ static int smb347_remove(struct i2c_client *client)
 {
        struct smb347_charger *smb = i2c_get_clientdata(client);
 
-       if (!IS_ERR_OR_NULL(smb->dentry))
-               debugfs_remove(smb->dentry);
-
        if (client->irq) {
                smb347_irq_disable(smb);
                free_irq(client->irq, smb);
@@ -1255,8 +1117,10 @@ static int smb347_remove(struct i2c_client *client)
        }
 
        power_supply_unregister(&smb->battery);
-       power_supply_unregister(&smb->usb);
-       power_supply_unregister(&smb->mains);
+       if (smb->pdata->use_usb)
+               power_supply_unregister(&smb->usb);
+       if (smb->pdata->use_mains)
+               power_supply_unregister(&smb->mains);
        return 0;
 }
 
@@ -1275,17 +1139,7 @@ static struct i2c_driver smb347_driver = {
        .id_table     = smb347_id,
 };
 
-static int __init smb347_init(void)
-{
-       return i2c_add_driver(&smb347_driver);
-}
-module_init(smb347_init);
-
-static void __exit smb347_exit(void)
-{
-       i2c_del_driver(&smb347_driver);
-}
-module_exit(smb347_exit);
+module_i2c_driver(smb347_driver);
 
 MODULE_AUTHOR("Bruce E. Robertson <bruce.e.robertson@intel.com>");
 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
index 987332b71d8de3ddfbfab813800cac880e48e53d..fc1ad9551182602f8d485d5e18a2f56e0b057908 100644 (file)
@@ -565,7 +565,7 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
                            goto err_usb;
        }
 
-       irq = platform_get_irq_byname(pdev, "SYSLO");
+       irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
        ret = request_threaded_irq(irq, NULL, wm831x_syslo_irq,
                                   IRQF_TRIGGER_RISING, "System power low",
                                   power);
@@ -575,7 +575,7 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
                goto err_battery;
        }
 
-       irq = platform_get_irq_byname(pdev, "PWR SRC");
+       irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC"));
        ret = request_threaded_irq(irq, NULL, wm831x_pwr_src_irq,
                                   IRQF_TRIGGER_RISING, "Power source",
                                   power);
@@ -586,7 +586,9 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
        }
 
        for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) {
-               irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]);
+               irq = wm831x_irq(wm831x,
+                                platform_get_irq_byname(pdev,
+                                                        wm831x_bat_irqs[i]));
                ret = request_threaded_irq(irq, NULL, wm831x_bat_irq,
                                           IRQF_TRIGGER_RISING,
                                           wm831x_bat_irqs[i],
@@ -606,10 +608,10 @@ err_bat_irq:
                irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]);
                free_irq(irq, power);
        }
-       irq = platform_get_irq_byname(pdev, "PWR SRC");
+       irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC"));
        free_irq(irq, power);
 err_syslo:
-       irq = platform_get_irq_byname(pdev, "SYSLO");
+       irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
        free_irq(irq, power);
 err_battery:
        if (power->have_battery)
@@ -626,17 +628,20 @@ err_kmalloc:
 static __devexit int wm831x_power_remove(struct platform_device *pdev)
 {
        struct wm831x_power *wm831x_power = platform_get_drvdata(pdev);
+       struct wm831x *wm831x = wm831x_power->wm831x;
        int irq, i;
 
        for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) {
-               irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]);
+               irq = wm831x_irq(wm831x, 
+                                platform_get_irq_byname(pdev,
+                                                        wm831x_bat_irqs[i]));
                free_irq(irq, wm831x_power);
        }
 
-       irq = platform_get_irq_byname(pdev, "PWR SRC");
+       irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC"));
        free_irq(irq, wm831x_power);
 
-       irq = platform_get_irq_byname(pdev, "SYSLO");
+       irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
        free_irq(irq, wm831x_power);
 
        if (wm831x_power->have_battery)
index bc871923879303f5771a27c83f7c148850682ddd..6194d35ebb9740c0af7f999dcb7b73aec75d91ca 100644 (file)
@@ -22,6 +22,20 @@ config RAPIDIO_ENABLE_RX_TX_PORTS
          ports for Input/Output direction to allow other traffic
          than Maintenance transfers.
 
+config RAPIDIO_DMA_ENGINE
+       bool "DMA Engine support for RapidIO"
+       depends on RAPIDIO
+       select DMADEVICES
+       select DMA_ENGINE
+       help
+         Say Y here if you want to use DMA Engine frameork for RapidIO data
+         transfers to/from target RIO devices. RapidIO uses NREAD and
+         NWRITE (NWRITE_R, SWRITE) requests to transfer data between local
+         memory and memory on remote target device. You need a DMA controller
+         capable to perform data transfers to/from RapidIO.
+
+         If you are unsure about this, say Y here.
+
 config RAPIDIO_DEBUG
        bool "RapidIO subsystem debug messages"
        depends on RAPIDIO
index 3b7b4e2dff7c8a07dd159c85037302c16678b8bd..7b62860f34f805842ab9fbe28ea35a6909f029b3 100644 (file)
@@ -3,3 +3,6 @@
 #
 
 obj-$(CONFIG_RAPIDIO_TSI721)   += tsi721.o
+ifeq ($(CONFIG_RAPIDIO_DMA_ENGINE),y)
+obj-$(CONFIG_RAPIDIO_TSI721)   += tsi721_dma.o
+endif
index 30d2072f480b72947c74401d5522fbb9d697d313..722246cf20ab2ed592a6a5ed5435b0f1320a5a44 100644 (file)
@@ -108,6 +108,7 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
                        u16 destid, u8 hopcount, u32 offset, int len,
                        u32 *data, int do_wr)
 {
+       void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id);
        struct tsi721_dma_desc *bd_ptr;
        u32 rd_count, swr_ptr, ch_stat;
        int i, err = 0;
@@ -116,10 +117,9 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
        if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32)))
                return -EINVAL;
 
-       bd_ptr = priv->bdma[TSI721_DMACH_MAINT].bd_base;
+       bd_ptr = priv->mdma.bd_base;
 
-       rd_count = ioread32(
-                       priv->regs + TSI721_DMAC_DRDCNT(TSI721_DMACH_MAINT));
+       rd_count = ioread32(regs + TSI721_DMAC_DRDCNT);
 
        /* Initialize DMA descriptor */
        bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid);
@@ -134,19 +134,18 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
        mb();
 
        /* Start DMA operation */
-       iowrite32(rd_count + 2,
-               priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
-       ioread32(priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
+       iowrite32(rd_count + 2, regs + TSI721_DMAC_DWRCNT);
+       ioread32(regs + TSI721_DMAC_DWRCNT);
        i = 0;
 
        /* Wait until DMA transfer is finished */
-       while ((ch_stat = ioread32(priv->regs +
-               TSI721_DMAC_STS(TSI721_DMACH_MAINT))) & TSI721_DMAC_STS_RUN) {
+       while ((ch_stat = ioread32(regs + TSI721_DMAC_STS))
+                                                       & TSI721_DMAC_STS_RUN) {
                udelay(1);
                if (++i >= 5000000) {
                        dev_dbg(&priv->pdev->dev,
                                "%s : DMA[%d] read timeout ch_status=%x\n",
-                               __func__, TSI721_DMACH_MAINT, ch_stat);
+                               __func__, priv->mdma.ch_id, ch_stat);
                        if (!do_wr)
                                *data = 0xffffffff;
                        err = -EIO;
@@ -162,13 +161,10 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
                        __func__, ch_stat);
                dev_dbg(&priv->pdev->dev, "OP=%d : destid=%x hc=%x off=%x\n",
                        do_wr ? MAINT_WR : MAINT_RD, destid, hopcount, offset);
-               iowrite32(TSI721_DMAC_INT_ALL,
-                       priv->regs + TSI721_DMAC_INT(TSI721_DMACH_MAINT));
-               iowrite32(TSI721_DMAC_CTL_INIT,
-                       priv->regs + TSI721_DMAC_CTL(TSI721_DMACH_MAINT));
+               iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
+               iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
                udelay(10);
-               iowrite32(0, priv->regs +
-                               TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
+               iowrite32(0, regs + TSI721_DMAC_DWRCNT);
                udelay(1);
                if (!do_wr)
                        *data = 0xffffffff;
@@ -184,8 +180,8 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
         * NOTE: Skipping check and clear FIFO entries because we are waiting
         * for transfer to be completed.
         */
-       swr_ptr = ioread32(priv->regs + TSI721_DMAC_DSWP(TSI721_DMACH_MAINT));
-       iowrite32(swr_ptr, priv->regs + TSI721_DMAC_DSRP(TSI721_DMACH_MAINT));
+       swr_ptr = ioread32(regs + TSI721_DMAC_DSWP);
+       iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP);
 err_out:
 
        return err;
@@ -541,6 +537,22 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
                        tsi721_pw_handler(mport);
        }
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+       if (dev_int & TSI721_DEV_INT_BDMA_CH) {
+               int ch;
+
+               if (dev_ch_int & TSI721_INT_BDMA_CHAN_M) {
+                       dev_dbg(&priv->pdev->dev,
+                               "IRQ from DMA channel 0x%08x\n", dev_ch_int);
+
+                       for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) {
+                               if (!(dev_ch_int & TSI721_INT_BDMA_CHAN(ch)))
+                                       continue;
+                               tsi721_bdma_handler(&priv->bdma[ch]);
+                       }
+               }
+       }
+#endif
        return IRQ_HANDLED;
 }
 
@@ -553,18 +565,26 @@ static void tsi721_interrupts_init(struct tsi721_device *priv)
                priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
        iowrite32(TSI721_SR_CHINT_IDBQRCV,
                priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
-       iowrite32(TSI721_INT_SR2PC_CHAN(IDB_QUEUE),
-               priv->regs + TSI721_DEV_CHAN_INTE);
 
        /* Enable SRIO MAC interrupts */
        iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT,
                priv->regs + TSI721_RIO_EM_DEV_INT_EN);
 
+       /* Enable interrupts from channels in use */
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+       intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE) |
+               (TSI721_INT_BDMA_CHAN_M &
+                ~TSI721_INT_BDMA_CHAN(TSI721_DMACH_MAINT));
+#else
+       intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE);
+#endif
+       iowrite32(intr, priv->regs + TSI721_DEV_CHAN_INTE);
+
        if (priv->flags & TSI721_USING_MSIX)
                intr = TSI721_DEV_INT_SRIO;
        else
                intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
-                       TSI721_DEV_INT_SMSG_CH;
+                       TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
 
        iowrite32(intr, priv->regs + TSI721_DEV_INTE);
        ioread32(priv->regs + TSI721_DEV_INTE);
@@ -715,12 +735,29 @@ static int tsi721_enable_msix(struct tsi721_device *priv)
                                        TSI721_MSIX_OMSG_INT(i);
        }
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+       /*
+        * Initialize MSI-X entries for Block DMA Engine:
+        * this driver supports XXX DMA channels
+        * (one is reserved for SRIO maintenance transactions)
+        */
+       for (i = 0; i < TSI721_DMA_CHNUM; i++) {
+               entries[TSI721_VECT_DMA0_DONE + i].entry =
+                                       TSI721_MSIX_DMACH_DONE(i);
+               entries[TSI721_VECT_DMA0_INT + i].entry =
+                                       TSI721_MSIX_DMACH_INT(i);
+       }
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
        err = pci_enable_msix(priv->pdev, entries, ARRAY_SIZE(entries));
        if (err) {
                if (err > 0)
                        dev_info(&priv->pdev->dev,
                                 "Only %d MSI-X vectors available, "
                                 "not using MSI-X\n", err);
+               else
+                       dev_err(&priv->pdev->dev,
+                               "Failed to enable MSI-X (err=%d)\n", err);
                return err;
        }
 
@@ -760,6 +797,22 @@ static int tsi721_enable_msix(struct tsi721_device *priv)
                         i, pci_name(priv->pdev));
        }
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+       for (i = 0; i < TSI721_DMA_CHNUM; i++) {
+               priv->msix[TSI721_VECT_DMA0_DONE + i].vector =
+                               entries[TSI721_VECT_DMA0_DONE + i].vector;
+               snprintf(priv->msix[TSI721_VECT_DMA0_DONE + i].irq_name,
+                        IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmad%d@pci:%s",
+                        i, pci_name(priv->pdev));
+
+               priv->msix[TSI721_VECT_DMA0_INT + i].vector =
+                               entries[TSI721_VECT_DMA0_INT + i].vector;
+               snprintf(priv->msix[TSI721_VECT_DMA0_INT + i].irq_name,
+                        IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmai%d@pci:%s",
+                        i, pci_name(priv->pdev));
+       }
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
        return 0;
 }
 #endif /* CONFIG_PCI_MSI */
@@ -888,20 +941,34 @@ static void tsi721_doorbell_free(struct tsi721_device *priv)
        priv->idb_base = NULL;
 }
 
-static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
+/**
+ * tsi721_bdma_maint_init - Initialize maintenance request BDMA channel.
+ * @priv: pointer to tsi721 private data
+ *
+ * Initialize BDMA channel allocated for RapidIO maintenance read/write
+ * request generation
+ * Returns %0 on success or %-ENOMEM on failure.
+ */
+static int tsi721_bdma_maint_init(struct tsi721_device *priv)
 {
        struct tsi721_dma_desc *bd_ptr;
        u64             *sts_ptr;
        dma_addr_t      bd_phys, sts_phys;
        int             sts_size;
-       int             bd_num = priv->bdma[chnum].bd_num;
+       int             bd_num = 2;
+       void __iomem    *regs;
 
-       dev_dbg(&priv->pdev->dev, "Init Block DMA Engine, CH%d\n", chnum);
+       dev_dbg(&priv->pdev->dev,
+               "Init Block DMA Engine for Maintenance requests, CH%d\n",
+               TSI721_DMACH_MAINT);
 
        /*
         * Initialize DMA channel for maintenance requests
         */
 
+       priv->mdma.ch_id = TSI721_DMACH_MAINT;
+       regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT);
+
        /* Allocate space for DMA descriptors */
        bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
                                        bd_num * sizeof(struct tsi721_dma_desc),
@@ -909,8 +976,9 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
        if (!bd_ptr)
                return -ENOMEM;
 
-       priv->bdma[chnum].bd_phys = bd_phys;
-       priv->bdma[chnum].bd_base = bd_ptr;
+       priv->mdma.bd_num = bd_num;
+       priv->mdma.bd_phys = bd_phys;
+       priv->mdma.bd_base = bd_ptr;
 
        dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n",
                bd_ptr, (unsigned long long)bd_phys);
@@ -927,13 +995,13 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
                dma_free_coherent(&priv->pdev->dev,
                                  bd_num * sizeof(struct tsi721_dma_desc),
                                  bd_ptr, bd_phys);
-               priv->bdma[chnum].bd_base = NULL;
+               priv->mdma.bd_base = NULL;
                return -ENOMEM;
        }
 
-       priv->bdma[chnum].sts_phys = sts_phys;
-       priv->bdma[chnum].sts_base = sts_ptr;
-       priv->bdma[chnum].sts_size = sts_size;
+       priv->mdma.sts_phys = sts_phys;
+       priv->mdma.sts_base = sts_ptr;
+       priv->mdma.sts_size = sts_size;
 
        dev_dbg(&priv->pdev->dev,
                "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
@@ -946,83 +1014,61 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
        bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32);
 
        /* Setup DMA descriptor pointers */
-       iowrite32(((u64)bd_phys >> 32),
-               priv->regs + TSI721_DMAC_DPTRH(chnum));
+       iowrite32(((u64)bd_phys >> 32), regs + TSI721_DMAC_DPTRH);
        iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
-               priv->regs + TSI721_DMAC_DPTRL(chnum));
+               regs + TSI721_DMAC_DPTRL);
 
        /* Setup descriptor status FIFO */
-       iowrite32(((u64)sts_phys >> 32),
-               priv->regs + TSI721_DMAC_DSBH(chnum));
+       iowrite32(((u64)sts_phys >> 32), regs + TSI721_DMAC_DSBH);
        iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
-               priv->regs + TSI721_DMAC_DSBL(chnum));
+               regs + TSI721_DMAC_DSBL);
        iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
-               priv->regs + TSI721_DMAC_DSSZ(chnum));
+               regs + TSI721_DMAC_DSSZ);
 
        /* Clear interrupt bits */
-       iowrite32(TSI721_DMAC_INT_ALL,
-               priv->regs + TSI721_DMAC_INT(chnum));
+       iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
 
-       ioread32(priv->regs + TSI721_DMAC_INT(chnum));
+       ioread32(regs + TSI721_DMAC_INT);
 
        /* Toggle DMA channel initialization */
-       iowrite32(TSI721_DMAC_CTL_INIT, priv->regs + TSI721_DMAC_CTL(chnum));
-       ioread32(priv->regs + TSI721_DMAC_CTL(chnum));
+       iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
+       ioread32(regs + TSI721_DMAC_CTL);
        udelay(10);
 
        return 0;
 }
 
-static int tsi721_bdma_ch_free(struct tsi721_device *priv, int chnum)
+static int tsi721_bdma_maint_free(struct tsi721_device *priv)
 {
        u32 ch_stat;
+       struct tsi721_bdma_maint *mdma = &priv->mdma;
+       void __iomem *regs = priv->regs + TSI721_DMAC_BASE(mdma->ch_id);
 
-       if (priv->bdma[chnum].bd_base == NULL)
+       if (mdma->bd_base == NULL)
                return 0;
 
        /* Check if DMA channel still running */
-       ch_stat = ioread32(priv->regs + TSI721_DMAC_STS(chnum));
+       ch_stat = ioread32(regs + TSI721_DMAC_STS);
        if (ch_stat & TSI721_DMAC_STS_RUN)
                return -EFAULT;
 
        /* Put DMA channel into init state */
-       iowrite32(TSI721_DMAC_CTL_INIT,
-               priv->regs + TSI721_DMAC_CTL(chnum));
+       iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
 
        /* Free space allocated for DMA descriptors */
        dma_free_coherent(&priv->pdev->dev,
-               priv->bdma[chnum].bd_num * sizeof(struct tsi721_dma_desc),
-               priv->bdma[chnum].bd_base, priv->bdma[chnum].bd_phys);
-       priv->bdma[chnum].bd_base = NULL;
+               mdma->bd_num * sizeof(struct tsi721_dma_desc),
+               mdma->bd_base, mdma->bd_phys);
+       mdma->bd_base = NULL;
 
        /* Free space allocated for status FIFO */
        dma_free_coherent(&priv->pdev->dev,
-               priv->bdma[chnum].sts_size * sizeof(struct tsi721_dma_sts),
-               priv->bdma[chnum].sts_base, priv->bdma[chnum].sts_phys);
-       priv->bdma[chnum].sts_base = NULL;
-       return 0;
-}
-
-static int tsi721_bdma_init(struct tsi721_device *priv)
-{
-       /* Initialize BDMA channel allocated for RapidIO maintenance read/write
-        * request generation
-        */
-       priv->bdma[TSI721_DMACH_MAINT].bd_num = 2;
-       if (tsi721_bdma_ch_init(priv, TSI721_DMACH_MAINT)) {
-               dev_err(&priv->pdev->dev, "Unable to initialize maintenance DMA"
-                       " channel %d, aborting\n", TSI721_DMACH_MAINT);
-               return -ENOMEM;
-       }
-
+               mdma->sts_size * sizeof(struct tsi721_dma_sts),
+               mdma->sts_base, mdma->sts_phys);
+       mdma->sts_base = NULL;
        return 0;
 }
 
-static void tsi721_bdma_free(struct tsi721_device *priv)
-{
-       tsi721_bdma_ch_free(priv, TSI721_DMACH_MAINT);
-}
-
 /* Enable Inbound Messaging Interrupts */
 static void
 tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch,
@@ -2035,7 +2081,8 @@ static void tsi721_disable_ints(struct tsi721_device *priv)
 
        /* Disable all BDMA Channel interrupts */
        for (ch = 0; ch < TSI721_DMA_MAXCH; ch++)
-               iowrite32(0, priv->regs + TSI721_DMAC_INTE(ch));
+               iowrite32(0,
+                       priv->regs + TSI721_DMAC_BASE(ch) + TSI721_DMAC_INTE);
 
        /* Disable all general BDMA interrupts */
        iowrite32(0, priv->regs + TSI721_BDMA_INTE);
@@ -2104,6 +2151,7 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
        mport->phy_type = RIO_PHY_SERIAL;
        mport->priv = (void *)priv;
        mport->phys_efptr = 0x100;
+       priv->mport = mport;
 
        INIT_LIST_HEAD(&mport->dbells);
 
@@ -2129,17 +2177,21 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
        if (!err) {
                tsi721_interrupts_init(priv);
                ops->pwenable = tsi721_pw_enable;
-       } else
+       } else {
                dev_err(&pdev->dev, "Unable to get assigned PCI IRQ "
                        "vector %02X err=0x%x\n", pdev->irq, err);
+               goto err_exit;
+       }
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+       tsi721_register_dma(priv);
+#endif
        /* Enable SRIO link */
        iowrite32(ioread32(priv->regs + TSI721_DEVCTL) |
                  TSI721_DEVCTL_SRBOOT_CMPL,
                  priv->regs + TSI721_DEVCTL);
 
        rio_register_mport(mport);
-       priv->mport = mport;
 
        if (mport->host_deviceid >= 0)
                iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER |
@@ -2149,6 +2201,11 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
                iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR));
 
        return 0;
+
+err_exit:
+       kfree(mport);
+       kfree(ops);
+       return err;
 }
 
 static int __devinit tsi721_probe(struct pci_dev *pdev,
@@ -2294,7 +2351,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
        tsi721_init_pc2sr_mapping(priv);
        tsi721_init_sr2pc_mapping(priv);
 
-       if (tsi721_bdma_init(priv)) {
+       if (tsi721_bdma_maint_init(priv)) {
                dev_err(&pdev->dev, "BDMA initialization failed, aborting\n");
                err = -ENOMEM;
                goto err_unmap_bars;
@@ -2319,7 +2376,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
 err_free_consistent:
        tsi721_doorbell_free(priv);
 err_free_bdma:
-       tsi721_bdma_free(priv);
+       tsi721_bdma_maint_free(priv);
 err_unmap_bars:
        if (priv->regs)
                iounmap(priv->regs);
index 1c226b31af13fc9e196d221a4e3557f91ae9ae6c..59de9d7be3460a08250bcd197dc858bed0af6dee 100644 (file)
 #define TSI721_DEV_INTE                0x29840
 #define TSI721_DEV_INT         0x29844
 #define TSI721_DEV_INTSET      0x29848
+#define TSI721_DEV_INT_BDMA_CH 0x00002000
+#define TSI721_DEV_INT_BDMA_NCH        0x00001000
 #define TSI721_DEV_INT_SMSG_CH 0x00000800
 #define TSI721_DEV_INT_SMSG_NCH        0x00000400
 #define TSI721_DEV_INT_SR2PC_CH        0x00000200
 #define TSI721_INT_IMSG_CHAN(x)        (1 << (16 + (x)))
 #define TSI721_INT_OMSG_CHAN_M 0x0000ff00
 #define TSI721_INT_OMSG_CHAN(x)        (1 << (8 + (x)))
+#define TSI721_INT_BDMA_CHAN_M 0x000000ff
+#define TSI721_INT_BDMA_CHAN(x)        (1 << (x))
 
 /*
  * PC2SR block registers
  *   x = 0..7
  */
 
-#define TSI721_DMAC_DWRCNT(x)  (0x51000 + (x) * 0x1000)
-#define TSI721_DMAC_DRDCNT(x)  (0x51004 + (x) * 0x1000)
+#define TSI721_DMAC_BASE(x)    (0x51000 + (x) * 0x1000)
 
-#define TSI721_DMAC_CTL(x)     (0x51008 + (x) * 0x1000)
+#define TSI721_DMAC_DWRCNT     0x000
+#define TSI721_DMAC_DRDCNT     0x004
+
+#define TSI721_DMAC_CTL                0x008
 #define TSI721_DMAC_CTL_SUSP   0x00000002
 #define TSI721_DMAC_CTL_INIT   0x00000001
 
-#define TSI721_DMAC_INT(x)     (0x5100c + (x) * 0x1000)
+#define TSI721_DMAC_INT                0x00c
 #define TSI721_DMAC_INT_STFULL 0x00000010
 #define TSI721_DMAC_INT_DONE   0x00000008
 #define TSI721_DMAC_INT_SUSP   0x00000004
 #define TSI721_DMAC_INT_IOFDONE        0x00000001
 #define TSI721_DMAC_INT_ALL    0x0000001f
 
-#define TSI721_DMAC_INTSET(x)  (0x51010 + (x) * 0x1000)
+#define TSI721_DMAC_INTSET     0x010
 
-#define TSI721_DMAC_STS(x)     (0x51014 + (x) * 0x1000)
+#define TSI721_DMAC_STS                0x014
 #define TSI721_DMAC_STS_ABORT  0x00400000
 #define TSI721_DMAC_STS_RUN    0x00200000
 #define TSI721_DMAC_STS_CS     0x001f0000
 
-#define TSI721_DMAC_INTE(x)    (0x51018 + (x) * 0x1000)
+#define TSI721_DMAC_INTE       0x018
 
-#define TSI721_DMAC_DPTRL(x)   (0x51024 + (x) * 0x1000)
+#define TSI721_DMAC_DPTRL      0x024
 #define TSI721_DMAC_DPTRL_MASK 0xffffffe0
 
-#define TSI721_DMAC_DPTRH(x)   (0x51028 + (x) * 0x1000)
+#define TSI721_DMAC_DPTRH      0x028
 
-#define TSI721_DMAC_DSBL(x)    (0x5102c + (x) * 0x1000)
+#define TSI721_DMAC_DSBL       0x02c
 #define TSI721_DMAC_DSBL_MASK  0xffffffc0
 
-#define TSI721_DMAC_DSBH(x)    (0x51030 + (x) * 0x1000)
+#define TSI721_DMAC_DSBH       0x030
 
-#define TSI721_DMAC_DSSZ(x)    (0x51034 + (x) * 0x1000)
+#define TSI721_DMAC_DSSZ       0x034
 #define TSI721_DMAC_DSSZ_SIZE_M        0x0000000f
 #define TSI721_DMAC_DSSZ_SIZE(size)    (__fls(size) - 4)
 
-
-#define TSI721_DMAC_DSRP(x)    (0x51038 + (x) * 0x1000)
+#define TSI721_DMAC_DSRP       0x038
 #define TSI721_DMAC_DSRP_MASK  0x0007ffff
 
-#define TSI721_DMAC_DSWP(x)    (0x5103c + (x) * 0x1000)
+#define TSI721_DMAC_DSWP       0x03c
 #define TSI721_DMAC_DSWP_MASK  0x0007ffff
 
 #define TSI721_BDMA_INTE       0x5f000
@@ -612,6 +617,8 @@ enum dma_rtype {
 #define TSI721_DMACH_MAINT     0       /* DMA channel for maint requests */
 #define TSI721_DMACH_MAINT_NBD 32      /* Number of BDs for maint requests */
 
+#define TSI721_DMACH_DMA       1       /* DMA channel for data transfers */
+
 #define MSG_DMA_ENTRY_INX_TO_SIZE(x)   ((0x10 << (x)) & 0xFFFF0)
 
 enum tsi721_smsg_int_flag {
@@ -626,7 +633,48 @@ enum tsi721_smsg_int_flag {
 
 /* Structures */
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+
+struct tsi721_tx_desc {
+       struct dma_async_tx_descriptor  txd;
+       struct tsi721_dma_desc          *hw_desc;
+       u16                             destid;
+       /* low 64-bits of 66-bit RIO address */
+       u64                             rio_addr;
+       /* upper 2-bits of 66-bit RIO address */
+       u8                              rio_addr_u;
+       bool                            interrupt;
+       struct list_head                desc_node;
+       struct list_head                tx_list;
+};
+
 struct tsi721_bdma_chan {
+       int             id;
+       void __iomem    *regs;
+       int             bd_num;         /* number of buffer descriptors */
+       void            *bd_base;       /* start of DMA descriptors */
+       dma_addr_t      bd_phys;
+       void            *sts_base;      /* start of DMA BD status FIFO */
+       dma_addr_t      sts_phys;
+       int             sts_size;
+       u32             sts_rdptr;
+       u32             wr_count;
+       u32             wr_count_next;
+
+       struct dma_chan         dchan;
+       struct tsi721_tx_desc   *tx_desc;
+       spinlock_t              lock;
+       struct list_head        active_list;
+       struct list_head        queue;
+       struct list_head        free_list;
+       dma_cookie_t            completed_cookie;
+       struct tasklet_struct   tasklet;
+};
+
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
+struct tsi721_bdma_maint {
+       int             ch_id;          /* BDMA channel number */
        int             bd_num;         /* number of buffer descriptors */
        void            *bd_base;       /* start of DMA descriptors */
        dma_addr_t      bd_phys;
@@ -721,6 +769,24 @@ enum tsi721_msix_vect {
        TSI721_VECT_IMB1_INT,
        TSI721_VECT_IMB2_INT,
        TSI721_VECT_IMB3_INT,
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+       TSI721_VECT_DMA0_DONE,
+       TSI721_VECT_DMA1_DONE,
+       TSI721_VECT_DMA2_DONE,
+       TSI721_VECT_DMA3_DONE,
+       TSI721_VECT_DMA4_DONE,
+       TSI721_VECT_DMA5_DONE,
+       TSI721_VECT_DMA6_DONE,
+       TSI721_VECT_DMA7_DONE,
+       TSI721_VECT_DMA0_INT,
+       TSI721_VECT_DMA1_INT,
+       TSI721_VECT_DMA2_INT,
+       TSI721_VECT_DMA3_INT,
+       TSI721_VECT_DMA4_INT,
+       TSI721_VECT_DMA5_INT,
+       TSI721_VECT_DMA6_INT,
+       TSI721_VECT_DMA7_INT,
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
        TSI721_VECT_MAX
 };
 
@@ -754,7 +820,11 @@ struct tsi721_device {
        u32             pw_discard_count;
 
        /* BDMA Engine */
+       struct tsi721_bdma_maint mdma; /* Maintenance rd/wr request channel */
+
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
        struct tsi721_bdma_chan bdma[TSI721_DMA_CHNUM];
+#endif
 
        /* Inbound Messaging */
        int             imsg_init[TSI721_IMSG_CHNUM];
@@ -765,4 +835,9 @@ struct tsi721_device {
        struct tsi721_omsg_ring omsg_ring[TSI721_OMSG_CHNUM];
 };
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+extern void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan);
+extern int __devinit tsi721_register_dma(struct tsi721_device *priv);
+#endif
+
 #endif
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
new file mode 100644 (file)
index 0000000..92e06a5
--- /dev/null
@@ -0,0 +1,823 @@
+/*
+ * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge
+ *
+ * Copyright 2011 Integrated Device Technology, Inc.
+ * Alexandre Bounine <alexandre.bounine@idt.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/delay.h>
+
+#include "tsi721.h"
+
+static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan)
+{
+       return container_of(chan, struct tsi721_bdma_chan, dchan);
+}
+
+static inline struct tsi721_device *to_tsi721(struct dma_device *ddev)
+{
+       return container_of(ddev, struct rio_mport, dma)->priv;
+}
+
+static inline
+struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd)
+{
+       return container_of(txd, struct tsi721_tx_desc, txd);
+}
+
+static inline
+struct tsi721_tx_desc *tsi721_dma_first_active(
+                               struct tsi721_bdma_chan *bdma_chan)
+{
+       return list_first_entry(&bdma_chan->active_list,
+                               struct tsi721_tx_desc, desc_node);
+}
+
+static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan)
+{
+       struct tsi721_dma_desc *bd_ptr;
+       struct device *dev = bdma_chan->dchan.device->dev;
+       u64             *sts_ptr;
+       dma_addr_t      bd_phys;
+       dma_addr_t      sts_phys;
+       int             sts_size;
+       int             bd_num = bdma_chan->bd_num;
+
+       dev_dbg(dev, "Init Block DMA Engine, CH%d\n", bdma_chan->id);
+
+       /* Allocate space for DMA descriptors */
+       bd_ptr = dma_zalloc_coherent(dev,
+                               bd_num * sizeof(struct tsi721_dma_desc),
+                               &bd_phys, GFP_KERNEL);
+       if (!bd_ptr)
+               return -ENOMEM;
+
+       bdma_chan->bd_phys = bd_phys;
+       bdma_chan->bd_base = bd_ptr;
+
+       dev_dbg(dev, "DMA descriptors @ %p (phys = %llx)\n",
+               bd_ptr, (unsigned long long)bd_phys);
+
+       /* Allocate space for descriptor status FIFO */
+       sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
+                                       bd_num : TSI721_DMA_MINSTSSZ;
+       sts_size = roundup_pow_of_two(sts_size);
+       sts_ptr = dma_zalloc_coherent(dev,
+                                    sts_size * sizeof(struct tsi721_dma_sts),
+                                    &sts_phys, GFP_KERNEL);
+       if (!sts_ptr) {
+               /* Free space allocated for DMA descriptors */
+               dma_free_coherent(dev,
+                                 bd_num * sizeof(struct tsi721_dma_desc),
+                                 bd_ptr, bd_phys);
+               bdma_chan->bd_base = NULL;
+               return -ENOMEM;
+       }
+
+       bdma_chan->sts_phys = sts_phys;
+       bdma_chan->sts_base = sts_ptr;
+       bdma_chan->sts_size = sts_size;
+
+       dev_dbg(dev,
+               "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
+               sts_ptr, (unsigned long long)sts_phys, sts_size);
+
+       /* Initialize DMA descriptors ring */
+       bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29);
+       bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys &
+                                                TSI721_DMAC_DPTRL_MASK);
+       bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32);
+
+       /* Setup DMA descriptor pointers */
+       iowrite32(((u64)bd_phys >> 32),
+               bdma_chan->regs + TSI721_DMAC_DPTRH);
+       iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
+               bdma_chan->regs + TSI721_DMAC_DPTRL);
+
+       /* Setup descriptor status FIFO */
+       iowrite32(((u64)sts_phys >> 32),
+               bdma_chan->regs + TSI721_DMAC_DSBH);
+       iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
+               bdma_chan->regs + TSI721_DMAC_DSBL);
+       iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
+               bdma_chan->regs + TSI721_DMAC_DSSZ);
+
+       /* Clear interrupt bits */
+       iowrite32(TSI721_DMAC_INT_ALL,
+               bdma_chan->regs + TSI721_DMAC_INT);
+
+       ioread32(bdma_chan->regs + TSI721_DMAC_INT);
+
+       /* Toggle DMA channel initialization */
+       iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
+       ioread32(bdma_chan->regs + TSI721_DMAC_CTL);
+       bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
+       bdma_chan->sts_rdptr = 0;
+       udelay(10);
+
+       return 0;
+}
+
+static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan)
+{
+       u32 ch_stat;
+
+       if (bdma_chan->bd_base == NULL)
+               return 0;
+
+       /* Check if DMA channel still running */
+       ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
+       if (ch_stat & TSI721_DMAC_STS_RUN)
+               return -EFAULT;
+
+       /* Put DMA channel into init state */
+       iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
+
+       /* Free space allocated for DMA descriptors */
+       dma_free_coherent(bdma_chan->dchan.device->dev,
+               bdma_chan->bd_num * sizeof(struct tsi721_dma_desc),
+               bdma_chan->bd_base, bdma_chan->bd_phys);
+       bdma_chan->bd_base = NULL;
+
+       /* Free space allocated for status FIFO */
+       dma_free_coherent(bdma_chan->dchan.device->dev,
+               bdma_chan->sts_size * sizeof(struct tsi721_dma_sts),
+               bdma_chan->sts_base, bdma_chan->sts_phys);
+       bdma_chan->sts_base = NULL;
+       return 0;
+}
+
+static void
+tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable)
+{
+       if (enable) {
+               /* Clear pending BDMA channel interrupts */
+               iowrite32(TSI721_DMAC_INT_ALL,
+                       bdma_chan->regs + TSI721_DMAC_INT);
+               ioread32(bdma_chan->regs + TSI721_DMAC_INT);
+               /* Enable BDMA channel interrupts */
+               iowrite32(TSI721_DMAC_INT_ALL,
+                       bdma_chan->regs + TSI721_DMAC_INTE);
+       } else {
+               /* Disable BDMA channel interrupts */
+               iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
+               /* Clear pending BDMA channel interrupts */
+               iowrite32(TSI721_DMAC_INT_ALL,
+                       bdma_chan->regs + TSI721_DMAC_INT);
+       }
+
+}
+
+static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan)
+{
+       u32 sts;
+
+       sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
+       return ((sts & TSI721_DMAC_STS_RUN) == 0);
+}
+
+void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
+{
+       /* Disable BDMA channel interrupts */
+       iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
+
+       tasklet_schedule(&bdma_chan->tasklet);
+}
+
+#ifdef CONFIG_PCI_MSI
+/**
+ * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels
+ * @irq: Linux interrupt number
+ * @ptr: Pointer to interrupt-specific data (BDMA channel structure)
+ *
+ * Handles BDMA channel interrupts signaled using MSI-X.
+ */
+static irqreturn_t tsi721_bdma_msix(int irq, void *ptr)
+{
+       struct tsi721_bdma_chan *bdma_chan = ptr;
+
+       tsi721_bdma_handler(bdma_chan);
+       return IRQ_HANDLED;
+}
+#endif /* CONFIG_PCI_MSI */
+
+/* Must be called with the spinlock held */
+static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan)
+{
+       if (!tsi721_dma_is_idle(bdma_chan)) {
+               dev_err(bdma_chan->dchan.device->dev,
+                       "BUG: Attempt to start non-idle channel\n");
+               return;
+       }
+
+       if (bdma_chan->wr_count == bdma_chan->wr_count_next) {
+               dev_err(bdma_chan->dchan.device->dev,
+                       "BUG: Attempt to start DMA with no BDs ready\n");
+               return;
+       }
+
+       dev_dbg(bdma_chan->dchan.device->dev,
+               "tx_chan: %p, chan: %d, regs: %p\n",
+               bdma_chan, bdma_chan->dchan.chan_id, bdma_chan->regs);
+
+       iowrite32(bdma_chan->wr_count_next,
+               bdma_chan->regs + TSI721_DMAC_DWRCNT);
+       ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT);
+
+       bdma_chan->wr_count = bdma_chan->wr_count_next;
+}
+
+static void tsi721_desc_put(struct tsi721_bdma_chan *bdma_chan,
+                           struct tsi721_tx_desc *desc)
+{
+       dev_dbg(bdma_chan->dchan.device->dev,
+               "Put desc: %p into free list\n", desc);
+
+       if (desc) {
+               spin_lock_bh(&bdma_chan->lock);
+               list_splice_init(&desc->tx_list, &bdma_chan->free_list);
+               list_add(&desc->desc_node, &bdma_chan->free_list);
+               bdma_chan->wr_count_next = bdma_chan->wr_count;
+               spin_unlock_bh(&bdma_chan->lock);
+       }
+}
+
+static
+struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
+{
+       struct tsi721_tx_desc *tx_desc, *_tx_desc;
+       struct tsi721_tx_desc *ret = NULL;
+       int i;
+
+       spin_lock_bh(&bdma_chan->lock);
+       list_for_each_entry_safe(tx_desc, _tx_desc,
+                                &bdma_chan->free_list, desc_node) {
+               if (async_tx_test_ack(&tx_desc->txd)) {
+                       list_del(&tx_desc->desc_node);
+                       ret = tx_desc;
+                       break;
+               }
+               dev_dbg(bdma_chan->dchan.device->dev,
+                       "desc %p not ACKed\n", tx_desc);
+       }
+
+       i = bdma_chan->wr_count_next % bdma_chan->bd_num;
+       if (i == bdma_chan->bd_num - 1) {
+               i = 0;
+               bdma_chan->wr_count_next++; /* skip link descriptor */
+       }
+
+       bdma_chan->wr_count_next++;
+       tx_desc->txd.phys = bdma_chan->bd_phys +
+                               i * sizeof(struct tsi721_dma_desc);
+       tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i];
+
+       spin_unlock_bh(&bdma_chan->lock);
+
+       return ret;
+}
+
+static int
+tsi721_fill_desc(struct tsi721_bdma_chan *bdma_chan,
+       struct tsi721_tx_desc *desc, struct scatterlist *sg,
+       enum dma_rtype rtype, u32 sys_size)
+{
+       struct tsi721_dma_desc *bd_ptr = desc->hw_desc;
+       u64 rio_addr;
+
+       if (sg_dma_len(sg) > TSI721_DMAD_BCOUNT1 + 1) {
+               dev_err(bdma_chan->dchan.device->dev,
+                       "SG element is too large\n");
+               return -EINVAL;
+       }
+
+       dev_dbg(bdma_chan->dchan.device->dev,
+               "desc: 0x%llx, addr: 0x%llx len: 0x%x\n",
+               (u64)desc->txd.phys, (unsigned long long)sg_dma_address(sg),
+               sg_dma_len(sg));
+
+       dev_dbg(bdma_chan->dchan.device->dev,
+               "bd_ptr = %p did=%d raddr=0x%llx\n",
+               bd_ptr, desc->destid, desc->rio_addr);
+
+       /* Initialize DMA descriptor */
+       bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) |
+                                       (rtype << 19) | desc->destid);
+       if (desc->interrupt)
+               bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
+       bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) |
+                                       (sys_size << 26) | sg_dma_len(sg));
+       rio_addr = (desc->rio_addr >> 2) |
+                               ((u64)(desc->rio_addr_u & 0x3) << 62);
+       bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff);
+       bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32);
+       bd_ptr->t1.bufptr_lo = cpu_to_le32(
+                                       (u64)sg_dma_address(sg) & 0xffffffff);
+       bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32);
+       bd_ptr->t1.s_dist = 0;
+       bd_ptr->t1.s_size = 0;
+
+       return 0;
+}
+
+static void tsi721_dma_chain_complete(struct tsi721_bdma_chan *bdma_chan,
+                                     struct tsi721_tx_desc *desc)
+{
+       struct dma_async_tx_descriptor *txd = &desc->txd;
+       dma_async_tx_callback callback = txd->callback;
+       void *param = txd->callback_param;
+
+       list_splice_init(&desc->tx_list, &bdma_chan->free_list);
+       list_move(&desc->desc_node, &bdma_chan->free_list);
+       bdma_chan->completed_cookie = txd->cookie;
+
+       if (callback)
+               callback(param);
+}
+
+static void tsi721_dma_complete_all(struct tsi721_bdma_chan *bdma_chan)
+{
+       struct tsi721_tx_desc *desc, *_d;
+       LIST_HEAD(list);
+
+       BUG_ON(!tsi721_dma_is_idle(bdma_chan));
+
+       if (!list_empty(&bdma_chan->queue))
+               tsi721_start_dma(bdma_chan);
+
+       list_splice_init(&bdma_chan->active_list, &list);
+       list_splice_init(&bdma_chan->queue, &bdma_chan->active_list);
+
+       list_for_each_entry_safe(desc, _d, &list, desc_node)
+               tsi721_dma_chain_complete(bdma_chan, desc);
+}
+
+static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan)
+{
+       u32 srd_ptr;
+       u64 *sts_ptr;
+       int i, j;
+
+       /* Check and clear descriptor status FIFO entries */
+       srd_ptr = bdma_chan->sts_rdptr;
+       sts_ptr = bdma_chan->sts_base;
+       j = srd_ptr * 8;
+       while (sts_ptr[j]) {
+               for (i = 0; i < 8 && sts_ptr[j]; i++, j++)
+                       sts_ptr[j] = 0;
+
+               ++srd_ptr;
+               srd_ptr %= bdma_chan->sts_size;
+               j = srd_ptr * 8;
+       }
+
+       iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP);
+       bdma_chan->sts_rdptr = srd_ptr;
+}
+
+static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan)
+{
+       if (list_empty(&bdma_chan->active_list) ||
+               list_is_singular(&bdma_chan->active_list)) {
+               dev_dbg(bdma_chan->dchan.device->dev,
+                       "%s: Active_list empty\n", __func__);
+               tsi721_dma_complete_all(bdma_chan);
+       } else {
+               dev_dbg(bdma_chan->dchan.device->dev,
+                       "%s: Active_list NOT empty\n", __func__);
+               tsi721_dma_chain_complete(bdma_chan,
+                                       tsi721_dma_first_active(bdma_chan));
+               tsi721_start_dma(bdma_chan);
+       }
+}
+
+static void tsi721_dma_tasklet(unsigned long data)
+{
+       struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data;
+       u32 dmac_int, dmac_sts;
+
+       dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
+       dev_dbg(bdma_chan->dchan.device->dev, "%s: DMAC%d_INT = 0x%x\n",
+               __func__, bdma_chan->id, dmac_int);
+       /* Clear channel interrupts */
+       iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT);
+
+       if (dmac_int & TSI721_DMAC_INT_ERR) {
+               dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
+               dev_err(bdma_chan->dchan.device->dev,
+                       "%s: DMA ERROR - DMAC%d_STS = 0x%x\n",
+                       __func__, bdma_chan->id, dmac_sts);
+       }
+
+       if (dmac_int & TSI721_DMAC_INT_STFULL) {
+               dev_err(bdma_chan->dchan.device->dev,
+                       "%s: DMAC%d descriptor status FIFO is full\n",
+                       __func__, bdma_chan->id);
+       }
+
+       if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) {
+               tsi721_clr_stat(bdma_chan);
+               spin_lock(&bdma_chan->lock);
+               tsi721_advance_work(bdma_chan);
+               spin_unlock(&bdma_chan->lock);
+       }
+
+       /* Re-Enable BDMA channel interrupts */
+       iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE);
+}
+
+static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+       struct tsi721_tx_desc *desc = to_tsi721_desc(txd);
+       struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan);
+       dma_cookie_t cookie;
+
+       spin_lock_bh(&bdma_chan->lock);
+
+       cookie = txd->chan->cookie;
+       if (++cookie < 0)
+               cookie = 1;
+       txd->chan->cookie = cookie;
+       txd->cookie = cookie;
+
+       if (list_empty(&bdma_chan->active_list)) {
+               list_add_tail(&desc->desc_node, &bdma_chan->active_list);
+               tsi721_start_dma(bdma_chan);
+       } else {
+               list_add_tail(&desc->desc_node, &bdma_chan->queue);
+       }
+
+       spin_unlock_bh(&bdma_chan->lock);
+       return cookie;
+}
+
+static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
+{
+       struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+#ifdef CONFIG_PCI_MSI
+       struct tsi721_device *priv = to_tsi721(dchan->device);
+#endif
+       struct tsi721_tx_desc *desc = NULL;
+       LIST_HEAD(tmp_list);
+       int i;
+       int rc;
+
+       if (bdma_chan->bd_base)
+               return bdma_chan->bd_num - 1;
+
+       /* Initialize BDMA channel */
+       if (tsi721_bdma_ch_init(bdma_chan)) {
+               dev_err(dchan->device->dev, "Unable to initialize data DMA"
+                       " channel %d, aborting\n", bdma_chan->id);
+               return -ENOMEM;
+       }
+
+       /* Alocate matching number of logical descriptors */
+       desc = kcalloc((bdma_chan->bd_num - 1), sizeof(struct tsi721_tx_desc),
+                       GFP_KERNEL);
+       if (!desc) {
+               dev_err(dchan->device->dev,
+                       "Failed to allocate logical descriptors\n");
+               rc = -ENOMEM;
+               goto err_out;
+       }
+
+       bdma_chan->tx_desc = desc;
+
+       for (i = 0; i < bdma_chan->bd_num - 1; i++) {
+               dma_async_tx_descriptor_init(&desc[i].txd, dchan);
+               desc[i].txd.tx_submit = tsi721_tx_submit;
+               desc[i].txd.flags = DMA_CTRL_ACK;
+               INIT_LIST_HEAD(&desc[i].tx_list);
+               list_add_tail(&desc[i].desc_node, &tmp_list);
+       }
+
+       spin_lock_bh(&bdma_chan->lock);
+       list_splice(&tmp_list, &bdma_chan->free_list);
+       bdma_chan->completed_cookie = dchan->cookie = 1;
+       spin_unlock_bh(&bdma_chan->lock);
+
+#ifdef CONFIG_PCI_MSI
+       if (priv->flags & TSI721_USING_MSIX) {
+               /* Request interrupt service if we are in MSI-X mode */
+               rc = request_irq(
+                       priv->msix[TSI721_VECT_DMA0_DONE +
+                                  bdma_chan->id].vector,
+                       tsi721_bdma_msix, 0,
+                       priv->msix[TSI721_VECT_DMA0_DONE +
+                                  bdma_chan->id].irq_name,
+                       (void *)bdma_chan);
+
+               if (rc) {
+                       dev_dbg(dchan->device->dev,
+                               "Unable to allocate MSI-X interrupt for "
+                               "BDMA%d-DONE\n", bdma_chan->id);
+                       goto err_out;
+               }
+
+               rc = request_irq(priv->msix[TSI721_VECT_DMA0_INT +
+                                           bdma_chan->id].vector,
+                               tsi721_bdma_msix, 0,
+                               priv->msix[TSI721_VECT_DMA0_INT +
+                                          bdma_chan->id].irq_name,
+                               (void *)bdma_chan);
+
+               if (rc) {
+                       dev_dbg(dchan->device->dev,
+                               "Unable to allocate MSI-X interrupt for "
+                               "BDMA%d-INT\n", bdma_chan->id);
+                       free_irq(
+                               priv->msix[TSI721_VECT_DMA0_DONE +
+                                          bdma_chan->id].vector,
+                               (void *)bdma_chan);
+                       rc = -EIO;
+                       goto err_out;
+               }
+       }
+#endif /* CONFIG_PCI_MSI */
+
+       tasklet_enable(&bdma_chan->tasklet);
+       tsi721_bdma_interrupt_enable(bdma_chan, 1);
+
+       return bdma_chan->bd_num - 1;
+
+err_out:
+       kfree(desc);
+       tsi721_bdma_ch_free(bdma_chan);
+       return rc;
+}
+
+static void tsi721_free_chan_resources(struct dma_chan *dchan)
+{
+       struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+#ifdef CONFIG_PCI_MSI
+       struct tsi721_device *priv = to_tsi721(dchan->device);
+#endif
+       LIST_HEAD(list);
+
+       dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
+
+       if (bdma_chan->bd_base == NULL)
+               return;
+
+       BUG_ON(!list_empty(&bdma_chan->active_list));
+       BUG_ON(!list_empty(&bdma_chan->queue));
+
+       tasklet_disable(&bdma_chan->tasklet);
+
+       spin_lock_bh(&bdma_chan->lock);
+       list_splice_init(&bdma_chan->free_list, &list);
+       spin_unlock_bh(&bdma_chan->lock);
+
+       tsi721_bdma_interrupt_enable(bdma_chan, 0);
+
+#ifdef CONFIG_PCI_MSI
+       if (priv->flags & TSI721_USING_MSIX) {
+               free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
+                                   bdma_chan->id].vector, (void *)bdma_chan);
+               free_irq(priv->msix[TSI721_VECT_DMA0_INT +
+                                   bdma_chan->id].vector, (void *)bdma_chan);
+       }
+#endif /* CONFIG_PCI_MSI */
+
+       tsi721_bdma_ch_free(bdma_chan);
+       kfree(bdma_chan->tx_desc);
+}
+
+static
+enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
+                                struct dma_tx_state *txstate)
+{
+       struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+       dma_cookie_t            last_used;
+       dma_cookie_t            last_completed;
+       int                     ret;
+
+       spin_lock_bh(&bdma_chan->lock);
+       last_completed = bdma_chan->completed_cookie;
+       last_used = dchan->cookie;
+       spin_unlock_bh(&bdma_chan->lock);
+
+       ret = dma_async_is_complete(cookie, last_completed, last_used);
+
+       dma_set_tx_state(txstate, last_completed, last_used, 0);
+
+       dev_dbg(dchan->device->dev,
+               "%s: exit, ret: %d, last_completed: %d, last_used: %d\n",
+               __func__, ret, last_completed, last_used);
+
+       return ret;
+}
+
+static void tsi721_issue_pending(struct dma_chan *dchan)
+{
+       struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+
+       dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
+
+       if (tsi721_dma_is_idle(bdma_chan)) {
+               spin_lock_bh(&bdma_chan->lock);
+               tsi721_advance_work(bdma_chan);
+               spin_unlock_bh(&bdma_chan->lock);
+       } else
+               dev_dbg(dchan->device->dev,
+                       "%s: DMA channel still busy\n", __func__);
+}
+
+static
+struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
+                       struct scatterlist *sgl, unsigned int sg_len,
+                       enum dma_transfer_direction dir, unsigned long flags,
+                       void *tinfo)
+{
+       struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+       struct tsi721_tx_desc *desc = NULL;
+       struct tsi721_tx_desc *first = NULL;
+       struct scatterlist *sg;
+       struct rio_dma_ext *rext = tinfo;
+       u64 rio_addr = rext->rio_addr; /* limited to 64-bit rio_addr for now */
+       unsigned int i;
+       u32 sys_size = dma_to_mport(dchan->device)->sys_size;
+       enum dma_rtype rtype;
+
+       if (!sgl || !sg_len) {
+               dev_err(dchan->device->dev, "%s: No SG list\n", __func__);
+               return NULL;
+       }
+
+       if (dir == DMA_DEV_TO_MEM)
+               rtype = NREAD;
+       else if (dir == DMA_MEM_TO_DEV) {
+               switch (rext->wr_type) {
+               case RDW_ALL_NWRITE:
+                       rtype = ALL_NWRITE;
+                       break;
+               case RDW_ALL_NWRITE_R:
+                       rtype = ALL_NWRITE_R;
+                       break;
+               case RDW_LAST_NWRITE_R:
+               default:
+                       rtype = LAST_NWRITE_R;
+                       break;
+               }
+       } else {
+               dev_err(dchan->device->dev,
+                       "%s: Unsupported DMA direction option\n", __func__);
+               return NULL;
+       }
+
+       for_each_sg(sgl, sg, sg_len, i) {
+               int err;
+
+               dev_dbg(dchan->device->dev, "%s: sg #%d\n", __func__, i);
+               desc = tsi721_desc_get(bdma_chan);
+               if (!desc) {
+                       dev_err(dchan->device->dev,
+                               "Not enough descriptors available\n");
+                       goto err_desc_get;
+               }
+
+               if (sg_is_last(sg))
+                       desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
+               else
+                       desc->interrupt = false;
+
+               desc->destid = rext->destid;
+               desc->rio_addr = rio_addr;
+               desc->rio_addr_u = 0;
+
+               err = tsi721_fill_desc(bdma_chan, desc, sg, rtype, sys_size);
+               if (err) {
+                       dev_err(dchan->device->dev,
+                               "Failed to build desc: %d\n", err);
+                       goto err_desc_get;
+               }
+
+               rio_addr += sg_dma_len(sg);
+
+               if (!first)
+                       first = desc;
+               else
+                       list_add_tail(&desc->desc_node, &first->tx_list);
+       }
+
+       first->txd.cookie = -EBUSY;
+       desc->txd.flags = flags;
+
+       return &first->txd;
+
+err_desc_get:
+       tsi721_desc_put(bdma_chan, first);
+       return NULL;
+}
+
+static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
+                            unsigned long arg)
+{
+       struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+       struct tsi721_tx_desc *desc, *_d;
+       LIST_HEAD(list);
+
+       dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
+
+       if (cmd != DMA_TERMINATE_ALL)
+               return -ENXIO;
+
+       spin_lock_bh(&bdma_chan->lock);
+
+       /* make sure to stop the transfer */
+       iowrite32(TSI721_DMAC_CTL_SUSP, bdma_chan->regs + TSI721_DMAC_CTL);
+
+       list_splice_init(&bdma_chan->active_list, &list);
+       list_splice_init(&bdma_chan->queue, &list);
+
+       list_for_each_entry_safe(desc, _d, &list, desc_node)
+               tsi721_dma_chain_complete(bdma_chan, desc);
+
+       spin_unlock_bh(&bdma_chan->lock);
+
+       return 0;
+}
+
+int __devinit tsi721_register_dma(struct tsi721_device *priv)
+{
+       int i;
+       int nr_channels = TSI721_DMA_MAXCH;
+       int err;
+       struct rio_mport *mport = priv->mport;
+
+       mport->dma.dev = &priv->pdev->dev;
+       mport->dma.chancnt = nr_channels;
+
+       INIT_LIST_HEAD(&mport->dma.channels);
+
+       for (i = 0; i < nr_channels; i++) {
+               struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i];
+
+               if (i == TSI721_DMACH_MAINT)
+                       continue;
+
+               bdma_chan->bd_num = 64;
+               bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
+
+               bdma_chan->dchan.device = &mport->dma;
+               bdma_chan->dchan.cookie = 1;
+               bdma_chan->dchan.chan_id = i;
+               bdma_chan->id = i;
+
+               spin_lock_init(&bdma_chan->lock);
+
+               INIT_LIST_HEAD(&bdma_chan->active_list);
+               INIT_LIST_HEAD(&bdma_chan->queue);
+               INIT_LIST_HEAD(&bdma_chan->free_list);
+
+               tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
+                            (unsigned long)bdma_chan);
+               tasklet_disable(&bdma_chan->tasklet);
+               list_add_tail(&bdma_chan->dchan.device_node,
+                             &mport->dma.channels);
+       }
+
+       dma_cap_zero(mport->dma.cap_mask);
+       dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask);
+       dma_cap_set(DMA_SLAVE, mport->dma.cap_mask);
+
+       mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources;
+       mport->dma.device_free_chan_resources = tsi721_free_chan_resources;
+       mport->dma.device_tx_status = tsi721_tx_status;
+       mport->dma.device_issue_pending = tsi721_issue_pending;
+       mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
+       mport->dma.device_control = tsi721_device_control;
+
+       err = dma_async_device_register(&mport->dma);
+       if (err)
+               dev_err(&priv->pdev->dev, "Failed to register DMA device\n");
+
+       return err;
+}
index 86c9a091a2ffdbfb3cac0868f17824ee9f4e5b6d..c40665a4fa3347a8b9bdb1edd12c4b48eacb5d43 100644 (file)
@@ -1121,6 +1121,87 @@ int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
        return 0;
 }
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+
+static bool rio_chan_filter(struct dma_chan *chan, void *arg)
+{
+       struct rio_dev *rdev = arg;
+
+       /* Check that DMA device belongs to the right MPORT */
+       return (rdev->net->hport ==
+               container_of(chan->device, struct rio_mport, dma));
+}
+
+/**
+ * rio_request_dma - request RapidIO capable DMA channel that supports
+ *   specified target RapidIO device.
+ * @rdev: RIO device control structure
+ *
+ * Returns pointer to allocated DMA channel or NULL if failed.
+ */
+struct dma_chan *rio_request_dma(struct rio_dev *rdev)
+{
+       dma_cap_mask_t mask;
+       struct dma_chan *dchan;
+
+       dma_cap_zero(mask);
+       dma_cap_set(DMA_SLAVE, mask);
+       dchan = dma_request_channel(mask, rio_chan_filter, rdev);
+
+       return dchan;
+}
+EXPORT_SYMBOL_GPL(rio_request_dma);
+
+/**
+ * rio_release_dma - release specified DMA channel
+ * @dchan: DMA channel to release
+ */
+void rio_release_dma(struct dma_chan *dchan)
+{
+       dma_release_channel(dchan);
+}
+EXPORT_SYMBOL_GPL(rio_release_dma);
+
+/**
+ * rio_dma_prep_slave_sg - RapidIO specific wrapper
+ *   for device_prep_slave_sg callback defined by DMAENGINE.
+ * @rdev: RIO device control structure
+ * @dchan: DMA channel to configure
+ * @data: RIO specific data descriptor
+ * @direction: DMA data transfer direction (TO or FROM the device)
+ * @flags: dmaengine defined flags
+ *
+ * Initializes RapidIO capable DMA channel for the specified data transfer.
+ * Uses DMA channel private extension to pass information related to remote
+ * target RIO device.
+ * Returns pointer to DMA transaction descriptor or NULL if failed.
+ */
+struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev,
+       struct dma_chan *dchan, struct rio_dma_data *data,
+       enum dma_transfer_direction direction, unsigned long flags)
+{
+       struct dma_async_tx_descriptor *txd = NULL;
+       struct rio_dma_ext rio_ext;
+
+       if (dchan->device->device_prep_slave_sg == NULL) {
+               pr_err("%s: prep_rio_sg == NULL\n", __func__);
+               return NULL;
+       }
+
+       rio_ext.destid = rdev->destid;
+       rio_ext.rio_addr_u = data->rio_addr_u;
+       rio_ext.rio_addr = data->rio_addr;
+       rio_ext.wr_type = data->wr_type;
+
+       txd = dmaengine_prep_rio_sg(dchan, data->sg, data->sg_len,
+                                       direction, flags, &rio_ext);
+
+       return txd;
+}
+EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg);
+
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
 static void rio_fixup_device(struct rio_dev *dev)
 {
 }
index 49b2112b0486120c5434f7e2295955202538387f..3660bace123c97adc3f3ce4e1165216655578a3d 100644 (file)
@@ -47,7 +47,7 @@ static int anatop_set_voltage(struct regulator_dev *reg, int min_uV,
                                  int max_uV, unsigned *selector)
 {
        struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
-       u32 val, sel;
+       u32 val, sel, mask;
        int uv;
 
        uv = min_uV;
@@ -71,11 +71,10 @@ static int anatop_set_voltage(struct regulator_dev *reg, int min_uV,
        val = anatop_reg->min_bit_val + sel;
        *selector = sel;
        dev_dbg(&reg->dev, "%s: calculated val %d\n", __func__, val);
-       anatop_set_bits(anatop_reg->mfd,
-                       anatop_reg->control_reg,
-                       anatop_reg->vol_bit_shift,
-                       anatop_reg->vol_bit_width,
-                       val);
+       mask = ((1 << anatop_reg->vol_bit_width) - 1) <<
+               anatop_reg->vol_bit_shift;
+       val <<= anatop_reg->vol_bit_shift;
+       anatop_write_reg(anatop_reg->mfd, anatop_reg->control_reg, val, mask);
 
        return 0;
 }
@@ -88,10 +87,9 @@ static int anatop_get_voltage_sel(struct regulator_dev *reg)
        if (!anatop_reg->control_reg)
                return -ENOTSUPP;
 
-       val = anatop_get_bits(anatop_reg->mfd,
-                             anatop_reg->control_reg,
-                             anatop_reg->vol_bit_shift,
-                             anatop_reg->vol_bit_width);
+       val = anatop_read_reg(anatop_reg->mfd, anatop_reg->control_reg);
+       val = (val & ((1 << anatop_reg->vol_bit_width) - 1)) >>
+               anatop_reg->vol_bit_shift;
 
        return val - anatop_reg->min_bit_val;
 }
index 4e01a423471b7d521921a5cf65a99afa1cd7fdc5..6bf864b4bdf67e8ddd860ed16fe0e7248e65318b 100644 (file)
@@ -331,21 +331,16 @@ struct tps65910_reg {
 
 static inline int tps65910_read(struct tps65910_reg *pmic, u8 reg)
 {
-       u8 val;
+       unsigned int val;
        int err;
 
-       err = pmic->mfd->read(pmic->mfd, reg, 1, &val);
+       err = tps65910_reg_read(pmic->mfd, reg, &val);
        if (err)
                return err;
 
        return val;
 }
 
-static inline int tps65910_write(struct tps65910_reg *pmic, u8 reg, u8 val)
-{
-       return pmic->mfd->write(pmic->mfd, reg, 1, &val);
-}
-
 static int tps65910_modify_bits(struct tps65910_reg *pmic, u8 reg,
                                        u8 set_mask, u8 clear_mask)
 {
@@ -362,7 +357,7 @@ static int tps65910_modify_bits(struct tps65910_reg *pmic, u8 reg,
 
        data &= ~clear_mask;
        data |= set_mask;
-       err = tps65910_write(pmic, reg, data);
+       err = tps65910_reg_write(pmic->mfd, reg, data);
        if (err)
                dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg);
 
@@ -371,7 +366,7 @@ out:
        return err;
 }
 
-static int tps65910_reg_read(struct tps65910_reg *pmic, u8 reg)
+static int tps65910_reg_read_locked(struct tps65910_reg *pmic, u8 reg)
 {
        int data;
 
@@ -385,13 +380,13 @@ static int tps65910_reg_read(struct tps65910_reg *pmic, u8 reg)
        return data;
 }
 
-static int tps65910_reg_write(struct tps65910_reg *pmic, u8 reg, u8 val)
+static int tps65910_reg_write_locked(struct tps65910_reg *pmic, u8 reg, u8 val)
 {
        int err;
 
        mutex_lock(&pmic->mutex);
 
-       err = tps65910_write(pmic, reg, val);
+       err = tps65910_reg_write(pmic->mfd, reg, val);
        if (err < 0)
                dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg);
 
@@ -490,9 +485,9 @@ static int tps65910_set_mode(struct regulator_dev *dev, unsigned int mode)
                                                        LDO_ST_MODE_BIT);
        case REGULATOR_MODE_IDLE:
                value = LDO_ST_ON_BIT | LDO_ST_MODE_BIT;
-               return tps65910_set_bits(mfd, reg, value);
+               return tps65910_reg_set_bits(mfd, reg, value);
        case REGULATOR_MODE_STANDBY:
-               return tps65910_clear_bits(mfd, reg, LDO_ST_ON_BIT);
+               return tps65910_reg_clear_bits(mfd, reg, LDO_ST_ON_BIT);
        }
 
        return -EINVAL;
@@ -507,7 +502,7 @@ static unsigned int tps65910_get_mode(struct regulator_dev *dev)
        if (reg < 0)
                return reg;
 
-       value = tps65910_reg_read(pmic, reg);
+       value = tps65910_reg_read_locked(pmic, reg);
        if (value < 0)
                return value;
 
@@ -527,28 +522,28 @@ static int tps65910_get_voltage_dcdc_sel(struct regulator_dev *dev)
 
        switch (id) {
        case TPS65910_REG_VDD1:
-               opvsel = tps65910_reg_read(pmic, TPS65910_VDD1_OP);
-               mult = tps65910_reg_read(pmic, TPS65910_VDD1);
+               opvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD1_OP);
+               mult = tps65910_reg_read_locked(pmic, TPS65910_VDD1);
                mult = (mult & VDD1_VGAIN_SEL_MASK) >> VDD1_VGAIN_SEL_SHIFT;
-               srvsel = tps65910_reg_read(pmic, TPS65910_VDD1_SR);
+               srvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD1_SR);
                sr = opvsel & VDD1_OP_CMD_MASK;
                opvsel &= VDD1_OP_SEL_MASK;
                srvsel &= VDD1_SR_SEL_MASK;
                vselmax = 75;
                break;
        case TPS65910_REG_VDD2:
-               opvsel = tps65910_reg_read(pmic, TPS65910_VDD2_OP);
-               mult = tps65910_reg_read(pmic, TPS65910_VDD2);
+               opvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD2_OP);
+               mult = tps65910_reg_read_locked(pmic, TPS65910_VDD2);
                mult = (mult & VDD2_VGAIN_SEL_MASK) >> VDD2_VGAIN_SEL_SHIFT;
-               srvsel = tps65910_reg_read(pmic, TPS65910_VDD2_SR);
+               srvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD2_SR);
                sr = opvsel & VDD2_OP_CMD_MASK;
                opvsel &= VDD2_OP_SEL_MASK;
                srvsel &= VDD2_SR_SEL_MASK;
                vselmax = 75;
                break;
        case TPS65911_REG_VDDCTRL:
-               opvsel = tps65910_reg_read(pmic, TPS65911_VDDCTRL_OP);
-               srvsel = tps65910_reg_read(pmic, TPS65911_VDDCTRL_SR);
+               opvsel = tps65910_reg_read_locked(pmic, TPS65911_VDDCTRL_OP);
+               srvsel = tps65910_reg_read_locked(pmic, TPS65911_VDDCTRL_SR);
                sr = opvsel & VDDCTRL_OP_CMD_MASK;
                opvsel &= VDDCTRL_OP_SEL_MASK;
                srvsel &= VDDCTRL_SR_SEL_MASK;
@@ -588,7 +583,7 @@ static int tps65910_get_voltage_sel(struct regulator_dev *dev)
        if (reg < 0)
                return reg;
 
-       value = tps65910_reg_read(pmic, reg);
+       value = tps65910_reg_read_locked(pmic, reg);
        if (value < 0)
                return value;
 
@@ -625,7 +620,7 @@ static int tps65911_get_voltage_sel(struct regulator_dev *dev)
 
        reg = pmic->get_ctrl_reg(id);
 
-       value = tps65910_reg_read(pmic, reg);
+       value = tps65910_reg_read_locked(pmic, reg);
 
        switch (id) {
        case TPS65911_REG_LDO1:
@@ -670,7 +665,7 @@ static int tps65910_set_voltage_dcdc_sel(struct regulator_dev *dev,
                tps65910_modify_bits(pmic, TPS65910_VDD1,
                                (dcdc_mult << VDD1_VGAIN_SEL_SHIFT),
                                                VDD1_VGAIN_SEL_MASK);
-               tps65910_reg_write(pmic, TPS65910_VDD1_OP, vsel);
+               tps65910_reg_write_locked(pmic, TPS65910_VDD1_OP, vsel);
                break;
        case TPS65910_REG_VDD2:
                dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
@@ -681,11 +676,11 @@ static int tps65910_set_voltage_dcdc_sel(struct regulator_dev *dev,
                tps65910_modify_bits(pmic, TPS65910_VDD2,
                                (dcdc_mult << VDD2_VGAIN_SEL_SHIFT),
                                                VDD1_VGAIN_SEL_MASK);
-               tps65910_reg_write(pmic, TPS65910_VDD2_OP, vsel);
+               tps65910_reg_write_locked(pmic, TPS65910_VDD2_OP, vsel);
                break;
        case TPS65911_REG_VDDCTRL:
                vsel = selector + 3;
-               tps65910_reg_write(pmic, TPS65911_VDDCTRL_OP, vsel);
+               tps65910_reg_write_locked(pmic, TPS65911_VDDCTRL_OP, vsel);
        }
 
        return 0;
@@ -936,10 +931,10 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
 
        /* External EN1 control */
        if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN1)
-               ret = tps65910_set_bits(mfd,
+               ret = tps65910_reg_set_bits(mfd,
                                TPS65910_EN1_LDO_ASS + regoffs, bit_pos);
        else
-               ret = tps65910_clear_bits(mfd,
+               ret = tps65910_reg_clear_bits(mfd,
                                TPS65910_EN1_LDO_ASS + regoffs, bit_pos);
        if (ret < 0) {
                dev_err(mfd->dev,
@@ -949,10 +944,10 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
 
        /* External EN2 control */
        if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN2)
-               ret = tps65910_set_bits(mfd,
+               ret = tps65910_reg_set_bits(mfd,
                                TPS65910_EN2_LDO_ASS + regoffs, bit_pos);
        else
-               ret = tps65910_clear_bits(mfd,
+               ret = tps65910_reg_clear_bits(mfd,
                                TPS65910_EN2_LDO_ASS + regoffs, bit_pos);
        if (ret < 0) {
                dev_err(mfd->dev,
@@ -964,10 +959,10 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
        if ((tps65910_chip_id(mfd) == TPS65910) &&
                        (id >= TPS65910_REG_VDIG1)) {
                if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN3)
-                       ret = tps65910_set_bits(mfd,
+                       ret = tps65910_reg_set_bits(mfd,
                                TPS65910_EN3_LDO_ASS + regoffs, bit_pos);
                else
-                       ret = tps65910_clear_bits(mfd,
+                       ret = tps65910_reg_clear_bits(mfd,
                                TPS65910_EN3_LDO_ASS + regoffs, bit_pos);
                if (ret < 0) {
                        dev_err(mfd->dev,
@@ -979,10 +974,10 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
        /* Return if no external control is selected */
        if (!(ext_sleep_config & EXT_SLEEP_CONTROL)) {
                /* Clear all sleep controls */
-               ret = tps65910_clear_bits(mfd,
+               ret = tps65910_reg_clear_bits(mfd,
                        TPS65910_SLEEP_KEEP_LDO_ON + regoffs, bit_pos);
                if (!ret)
-                       ret = tps65910_clear_bits(mfd,
+                       ret = tps65910_reg_clear_bits(mfd,
                                TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos);
                if (ret < 0)
                        dev_err(mfd->dev,
@@ -1001,32 +996,33 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
                                (tps65910_chip_id(mfd) == TPS65911))) {
                int op_reg_add = pmic->get_ctrl_reg(id) + 1;
                int sr_reg_add = pmic->get_ctrl_reg(id) + 2;
-               int opvsel = tps65910_reg_read(pmic, op_reg_add);
-               int srvsel = tps65910_reg_read(pmic, sr_reg_add);
+               int opvsel = tps65910_reg_read_locked(pmic, op_reg_add);
+               int srvsel = tps65910_reg_read_locked(pmic, sr_reg_add);
                if (opvsel & VDD1_OP_CMD_MASK) {
                        u8 reg_val = srvsel & VDD1_OP_SEL_MASK;
-                       ret = tps65910_reg_write(pmic, op_reg_add, reg_val);
+                       ret = tps65910_reg_write_locked(pmic, op_reg_add,
+                                                       reg_val);
                        if (ret < 0) {
                                dev_err(mfd->dev,
                                        "Error in configuring op register\n");
                                return ret;
                        }
                }
-               ret = tps65910_reg_write(pmic, sr_reg_add, 0);
+               ret = tps65910_reg_write_locked(pmic, sr_reg_add, 0);
                if (ret < 0) {
                        dev_err(mfd->dev, "Error in settting sr register\n");
                        return ret;
                }
        }
 
-       ret = tps65910_clear_bits(mfd,
+       ret = tps65910_reg_clear_bits(mfd,
                        TPS65910_SLEEP_KEEP_LDO_ON + regoffs, bit_pos);
        if (!ret) {
                if (ext_sleep_config & TPS65911_SLEEP_CONTROL_EXT_INPUT_SLEEP)
-                       ret = tps65910_set_bits(mfd,
+                       ret = tps65910_reg_set_bits(mfd,
                                TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos);
                else
-                       ret = tps65910_clear_bits(mfd,
+                       ret = tps65910_reg_clear_bits(mfd,
                                TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos);
        }
        if (ret < 0)
@@ -1177,7 +1173,7 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, pmic);
 
        /* Give control of all register to control port */
-       tps65910_set_bits(pmic->mfd, TPS65910_DEVCTRL,
+       tps65910_reg_set_bits(pmic->mfd, TPS65910_DEVCTRL,
                                DEVCTRL_SR_CTL_I2C_SEL_MASK);
 
        switch(tps65910_chip_id(tps65910)) {
index a885911bb5fce9c3adf4640b2e65fbbe97c7bf9b..099da11e989fde4e9ed793c3cdee9c6452c58a0f 100644 (file)
@@ -535,7 +535,7 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
                goto err;
        }
 
-       irq = platform_get_irq_byname(pdev, "UV");
+       irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
        ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
                                   IRQF_TRIGGER_RISING, dcdc->name, dcdc);
        if (ret != 0) {
@@ -544,7 +544,7 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
                goto err_regulator;
        }
 
-       irq = platform_get_irq_byname(pdev, "HC");
+       irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "HC"));
        ret = request_threaded_irq(irq, NULL, wm831x_dcdc_oc_irq,
                                   IRQF_TRIGGER_RISING, dcdc->name, dcdc);
        if (ret != 0) {
@@ -558,7 +558,8 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
        return 0;
 
 err_uv:
-       free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
+       free_irq(wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV")),
+                dcdc);
 err_regulator:
        regulator_unregister(dcdc->regulator);
 err:
@@ -570,11 +571,14 @@ err:
 static __devexit int wm831x_buckv_remove(struct platform_device *pdev)
 {
        struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
+       struct wm831x *wm831x = dcdc->wm831x;
 
        platform_set_drvdata(pdev, NULL);
 
-       free_irq(platform_get_irq_byname(pdev, "HC"), dcdc);
-       free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
+       free_irq(wm831x_irq(wm831x, platform_get_irq_byname(pdev, "HC")),
+                           dcdc);
+       free_irq(wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV")),
+                           dcdc);
        regulator_unregister(dcdc->regulator);
        if (dcdc->dvs_gpio)
                gpio_free(dcdc->dvs_gpio);
@@ -726,7 +730,7 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev)
                goto err;
        }
 
-       irq = platform_get_irq_byname(pdev, "UV");
+       irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
        ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
                                   IRQF_TRIGGER_RISING, dcdc->name, dcdc);
        if (ret != 0) {
@@ -751,7 +755,8 @@ static __devexit int wm831x_buckp_remove(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, NULL);
 
-       free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
+       free_irq(wm831x_irq(dcdc->wm831x, platform_get_irq_byname(pdev, "UV")),
+                           dcdc);
        regulator_unregister(dcdc->regulator);
 
        return 0;
@@ -859,7 +864,7 @@ static __devinit int wm831x_boostp_probe(struct platform_device *pdev)
                goto err;
        }
 
-       irq = platform_get_irq_byname(pdev, "UV");
+       irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
        ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
                                   IRQF_TRIGGER_RISING, dcdc->name,
                                   dcdc);
@@ -885,7 +890,8 @@ static __devexit int wm831x_boostp_remove(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, NULL);
 
-       free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
+       free_irq(wm831x_irq(dcdc->wm831x, platform_get_irq_byname(pdev, "UV")),
+                dcdc);
        regulator_unregister(dcdc->regulator);
 
        return 0;
index b50ab778b098274965667a0408ead22253efe8d2..0d207c297714ed2c7fff51e1e3c06de18c29d361 100644 (file)
@@ -202,7 +202,7 @@ static __devinit int wm831x_isink_probe(struct platform_device *pdev)
                goto err;
        }
 
-       irq = platform_get_irq(pdev, 0);
+       irq = wm831x_irq(wm831x, platform_get_irq(pdev, 0));
        ret = request_threaded_irq(irq, NULL, wm831x_isink_irq,
                                   IRQF_TRIGGER_RISING, isink->name, isink);
        if (ret != 0) {
@@ -227,7 +227,7 @@ static __devexit int wm831x_isink_remove(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, NULL);
 
-       free_irq(platform_get_irq(pdev, 0), isink);
+       free_irq(wm831x_irq(isink->wm831x, platform_get_irq(pdev, 0)), isink);
 
        regulator_unregister(isink->regulator);
 
index aa1f8b3fbe16c7a6e1683b537db9721e77c56b0d..a9a28d8ac18591d4c7126364dbd40197024191f8 100644 (file)
@@ -321,7 +321,7 @@ static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev)
                goto err;
        }
 
-       irq = platform_get_irq_byname(pdev, "UV");
+       irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
        ret = request_threaded_irq(irq, NULL, wm831x_ldo_uv_irq,
                                   IRQF_TRIGGER_RISING, ldo->name,
                                   ldo);
@@ -347,7 +347,8 @@ static __devexit int wm831x_gp_ldo_remove(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, NULL);
 
-       free_irq(platform_get_irq_byname(pdev, "UV"), ldo);
+       free_irq(wm831x_irq(ldo->wm831x,
+                           platform_get_irq_byname(pdev, "UV")), ldo);
        regulator_unregister(ldo->regulator);
 
        return 0;
@@ -582,7 +583,7 @@ static __devinit int wm831x_aldo_probe(struct platform_device *pdev)
                goto err;
        }
 
-       irq = platform_get_irq_byname(pdev, "UV");
+       irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
        ret = request_threaded_irq(irq, NULL, wm831x_ldo_uv_irq,
                                   IRQF_TRIGGER_RISING, ldo->name, ldo);
        if (ret != 0) {
@@ -605,7 +606,8 @@ static __devexit int wm831x_aldo_remove(struct platform_device *pdev)
 {
        struct wm831x_ldo *ldo = platform_get_drvdata(pdev);
 
-       free_irq(platform_get_irq_byname(pdev, "UV"), ldo);
+       free_irq(wm831x_irq(ldo->wm831x, platform_get_irq_byname(pdev, "UV")),
+                ldo);
        regulator_unregister(ldo->regulator);
 
        return 0;
index d6f8adaa26efb1fd0916253f2daf3f420e4ff486..8ea7bccc71007fd94132cf7c65da5f5ed5095a12 100644 (file)
@@ -78,7 +78,7 @@ typedef int (*rproc_handle_resource_t)(struct rproc *rproc, void *, int avail);
  * the recovery of the remote processor.
  */
 static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev,
-               unsigned long iova, int flags)
+               unsigned long iova, int flags, void *token)
 {
        dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags);
 
@@ -117,7 +117,7 @@ static int rproc_enable_iommu(struct rproc *rproc)
                return -ENOMEM;
        }
 
-       iommu_set_fault_handler(domain, rproc_iommu_fault);
+       iommu_set_fault_handler(domain, rproc_iommu_fault, rproc);
 
        ret = iommu_attach_device(domain, dev);
        if (ret) {
index 4161bfe462cd5f958c1f1d790bd9ef795df8b530..08cbdb900a18a290a7065bc73850015f8278ca10 100644 (file)
@@ -620,27 +620,6 @@ config RTC_DRV_MSM6242
          This driver can also be built as a module. If so, the module
          will be called rtc-msm6242.
 
-config RTC_DRV_IMXDI
-       tristate "Freescale IMX DryIce Real Time Clock"
-       depends on ARCH_MX25
-       depends on RTC_CLASS
-       help
-          Support for Freescale IMX DryIce RTC
-
-          This driver can also be built as a module, if so, the module
-          will be called "rtc-imxdi".
-
-config RTC_MXC
-       tristate "Freescale MXC Real Time Clock"
-       depends on ARCH_MXC
-       depends on RTC_CLASS
-       help
-          If you say yes here you get support for the Freescale MXC
-          RTC module.
-
-          This driver can also be built as a module, if so, the module
-          will be called "rtc-mxc".
-
 config RTC_DRV_BQ4802
        tristate "TI BQ4802"
        help
@@ -738,6 +717,16 @@ config RTC_DRV_DAVINCI
          This driver can also be built as a module. If so, the module
          will be called rtc-davinci.
 
+config RTC_DRV_IMXDI
+       tristate "Freescale IMX DryIce Real Time Clock"
+       depends on SOC_IMX25
+       depends on RTC_CLASS
+       help
+          Support for Freescale IMX DryIce RTC
+
+          This driver can also be built as a module, if so, the module
+          will be called "rtc-imxdi".
+
 config RTC_DRV_OMAP
        tristate "TI OMAP1"
        depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_DAVINCI_DA8XX
@@ -1087,4 +1076,15 @@ config RTC_DRV_LOONGSON1
          This driver can also be built as a module. If so, the module
          will be called rtc-ls1x.
 
+config RTC_DRV_MXC
+       tristate "Freescale MXC Real Time Clock"
+       depends on ARCH_MXC
+       depends on RTC_CLASS
+       help
+          If you say yes here you get support for the Freescale MXC
+          RTC module.
+
+          This driver can also be built as a module, if so, the module
+          will be called "rtc-mxc".
+
 endif # RTC_CLASS
index 727ae7786e6c3806face77564b82e76c52a5b01f..2973921c30d84d70eafea434dd3e678e4c54c3c1 100644 (file)
@@ -61,7 +61,7 @@ obj-$(CONFIG_RTC_DRV_M41T94)  += rtc-m41t94.o
 obj-$(CONFIG_RTC_DRV_M48T35)   += rtc-m48t35.o
 obj-$(CONFIG_RTC_DRV_M48T59)   += rtc-m48t59.o
 obj-$(CONFIG_RTC_DRV_M48T86)   += rtc-m48t86.o
-obj-$(CONFIG_RTC_MXC)          += rtc-mxc.o
+obj-$(CONFIG_RTC_DRV_MXC)      += rtc-mxc.o
 obj-$(CONFIG_RTC_DRV_MAX6900)  += rtc-max6900.o
 obj-$(CONFIG_RTC_DRV_MAX8925)  += rtc-max8925.o
 obj-$(CONFIG_RTC_DRV_MAX8998)  += rtc-max8998.o
index 7d5f56edb8efc30e8beb70cbad177a0cca32dc35..4267789ca9959413e90df5ea053154e07481d3ce 100644 (file)
@@ -910,14 +910,17 @@ static inline int cmos_poweroff(struct device *dev)
 
 static u32 rtc_handler(void *context)
 {
+       struct device *dev = context;
+
+       pm_wakeup_event(dev, 0);
        acpi_clear_event(ACPI_EVENT_RTC);
        acpi_disable_event(ACPI_EVENT_RTC, 0);
        return ACPI_INTERRUPT_HANDLED;
 }
 
-static inline void rtc_wake_setup(void)
+static inline void rtc_wake_setup(struct device *dev)
 {
-       acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL);
+       acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, dev);
        /*
         * After the RTC handler is installed, the Fixed_RTC event should
         * be disabled. Only when the RTC alarm is set will it be enabled.
@@ -950,7 +953,7 @@ cmos_wake_setup(struct device *dev)
        if (acpi_disabled)
                return;
 
-       rtc_wake_setup();
+       rtc_wake_setup(dev);
        acpi_rtc_info.wake_on = rtc_wake_on;
        acpi_rtc_info.wake_off = rtc_wake_off;
 
index c293d0cdb10483502784653f8617d0f0ecb55562..836710ce750e703ae06f8761aca9d00b64bd84f9 100644 (file)
@@ -17,8 +17,7 @@
 #include <linux/string.h>
 #include <linux/rtc.h>
 #include <linux/bcd.h>
-
-
+#include <linux/rtc/ds1307.h>
 
 /*
  * We can't determine type by probing, but if we expect pre-Linux code
@@ -92,7 +91,8 @@ enum ds_type {
 #      define DS1337_BIT_A2I           0x02
 #      define DS1337_BIT_A1I           0x01
 #define DS1339_REG_ALARM1_SECS 0x07
-#define DS1339_REG_TRICKLE     0x10
+
+#define DS13XX_TRICKLE_CHARGER_MAGIC   0xa0
 
 #define RX8025_REG_CTRL1       0x0e
 #      define RX8025_BIT_2412          0x20
@@ -124,6 +124,7 @@ struct chip_desc {
        unsigned                alarm:1;
        u16                     nvram_offset;
        u16                     nvram_size;
+       u16                     trickle_charger_reg;
 };
 
 static const struct chip_desc chips[last_ds_type] = {
@@ -140,6 +141,13 @@ static const struct chip_desc chips[last_ds_type] = {
        },
        [ds_1339] = {
                .alarm          = 1,
+               .trickle_charger_reg = 0x10,
+       },
+       [ds_1340] = {
+               .trickle_charger_reg = 0x08,
+       },
+       [ds_1388] = {
+               .trickle_charger_reg = 0x0a,
        },
        [ds_3231] = {
                .alarm          = 1,
@@ -619,6 +627,7 @@ static int __devinit ds1307_probe(struct i2c_client *client,
        struct i2c_adapter      *adapter = to_i2c_adapter(client->dev.parent);
        int                     want_irq = false;
        unsigned char           *buf;
+       struct ds1307_platform_data *pdata = client->dev.platform_data;
        static const int        bbsqi_bitpos[] = {
                [ds_1337] = 0,
                [ds_1339] = DS1339_BIT_BBSQI,
@@ -637,7 +646,10 @@ static int __devinit ds1307_probe(struct i2c_client *client,
 
        ds1307->client  = client;
        ds1307->type    = id->driver_data;
-       ds1307->offset  = 0;
+
+       if (pdata && pdata->trickle_charger_setup && chip->trickle_charger_reg)
+               i2c_smbus_write_byte_data(client, chip->trickle_charger_reg,
+                       DS13XX_TRICKLE_CHARGER_MAGIC | pdata->trickle_charger_setup);
 
        buf = ds1307->regs;
        if (i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) {
index 14a42a1edc66d55be04a79c67aa622db7b10b38d..9602278ff988df3affbb58b03d88dd7908508447 100644 (file)
@@ -127,7 +127,7 @@ static const struct attribute_group ep93xx_rtc_sysfs_files = {
        .attrs  = ep93xx_rtc_attrs,
 };
 
-static int __init ep93xx_rtc_probe(struct platform_device *pdev)
+static int __devinit ep93xx_rtc_probe(struct platform_device *pdev)
 {
        struct ep93xx_rtc *ep93xx_rtc;
        struct resource *res;
@@ -174,7 +174,7 @@ exit:
        return err;
 }
 
-static int __exit ep93xx_rtc_remove(struct platform_device *pdev)
+static int __devexit ep93xx_rtc_remove(struct platform_device *pdev)
 {
        struct ep93xx_rtc *ep93xx_rtc = platform_get_drvdata(pdev);
 
@@ -186,31 +186,19 @@ static int __exit ep93xx_rtc_remove(struct platform_device *pdev)
        return 0;
 }
 
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:ep93xx-rtc");
-
 static struct platform_driver ep93xx_rtc_driver = {
        .driver         = {
                .name   = "ep93xx-rtc",
                .owner  = THIS_MODULE,
        },
-       .remove         = __exit_p(ep93xx_rtc_remove),
+       .probe          = ep93xx_rtc_probe,
+       .remove         = __devexit_p(ep93xx_rtc_remove),
 };
 
-static int __init ep93xx_rtc_init(void)
-{
-        return platform_driver_probe(&ep93xx_rtc_driver, ep93xx_rtc_probe);
-}
-
-static void __exit ep93xx_rtc_exit(void)
-{
-       platform_driver_unregister(&ep93xx_rtc_driver);
-}
+module_platform_driver(ep93xx_rtc_driver);
 
 MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
 MODULE_DESCRIPTION("EP93XX RTC driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
-
-module_init(ep93xx_rtc_init);
-module_exit(ep93xx_rtc_exit);
+MODULE_ALIAS("platform:ep93xx-rtc");
index d93a9608b1f0dfec14db5c3e5e3e6e941382b618..891cd6c61d0ae76112ccc0f6b6da3876d21569ad 100644 (file)
@@ -405,7 +405,7 @@ static int dryice_rtc_probe(struct platform_device *pdev)
        imxdi->clk = clk_get(&pdev->dev, NULL);
        if (IS_ERR(imxdi->clk))
                return PTR_ERR(imxdi->clk);
-       clk_enable(imxdi->clk);
+       clk_prepare_enable(imxdi->clk);
 
        /*
         * Initialize dryice hardware
@@ -470,7 +470,7 @@ static int dryice_rtc_probe(struct platform_device *pdev)
        return 0;
 
 err:
-       clk_disable(imxdi->clk);
+       clk_disable_unprepare(imxdi->clk);
        clk_put(imxdi->clk);
 
        return rc;
@@ -487,7 +487,7 @@ static int __devexit dryice_rtc_remove(struct platform_device *pdev)
 
        rtc_device_unregister(imxdi->rtc);
 
-       clk_disable(imxdi->clk);
+       clk_disable_unprepare(imxdi->clk);
        clk_put(imxdi->clk);
 
        return 0;
index 63c72189c64b38d76d7d1e31721f359f724d983d..d5218553741ff54f871799f51b5969d74eabe85c 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/rtc.h>
 #include <linux/slab.h>
 #include <linux/io.h>
+#include <linux/of.h>
 
 /*
  * Clock and Power control register offsets
@@ -386,13 +387,22 @@ static const struct dev_pm_ops lpc32xx_rtc_pm_ops = {
 #define LPC32XX_RTC_PM_OPS NULL
 #endif
 
+#ifdef CONFIG_OF
+static const struct of_device_id lpc32xx_rtc_match[] = {
+       { .compatible = "nxp,lpc3220-rtc" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, lpc32xx_rtc_match);
+#endif
+
 static struct platform_driver lpc32xx_rtc_driver = {
        .probe          = lpc32xx_rtc_probe,
        .remove         = __devexit_p(lpc32xx_rtc_remove),
        .driver = {
                .name   = RTC_NAME,
                .owner  = THIS_MODULE,
-               .pm     = LPC32XX_RTC_PM_OPS
+               .pm     = LPC32XX_RTC_PM_OPS,
+               .of_match_table = of_match_ptr(lpc32xx_rtc_match),
        },
 };
 
index 10f1c29436ec59946123e5f2830f83e44a6c2ebc..efab3d48cb153314e8fdbf600da18eb6289dd5fe 100644 (file)
@@ -48,6 +48,7 @@ static inline int m41t93_set_reg(struct spi_device *spi, u8 addr, u8 data)
 static int m41t93_set_time(struct device *dev, struct rtc_time *tm)
 {
        struct spi_device *spi = to_spi_device(dev);
+       int tmp;
        u8 buf[9] = {0x80};        /* write cmd + 8 data bytes */
        u8 * const data = &buf[1]; /* ptr to first data byte */
 
@@ -62,6 +63,30 @@ static int m41t93_set_time(struct device *dev, struct rtc_time *tm)
                return -EINVAL;
        }
 
+       tmp = spi_w8r8(spi, M41T93_REG_FLAGS);
+       if (tmp < 0)
+               return tmp;
+
+       if (tmp & M41T93_FLAG_OF) {
+               dev_warn(&spi->dev, "OF bit is set, resetting.\n");
+               m41t93_set_reg(spi, M41T93_REG_FLAGS, tmp & ~M41T93_FLAG_OF);
+
+               tmp = spi_w8r8(spi, M41T93_REG_FLAGS);
+               if (tmp < 0) {
+                       return tmp;
+               } else if (tmp & M41T93_FLAG_OF) {
+                       /* OF cannot be immediately reset: oscillator has to be
+                        * restarted. */
+                       u8 reset_osc = buf[M41T93_REG_ST_SEC] | M41T93_FLAG_ST;
+
+                       dev_warn(&spi->dev,
+                                "OF bit is still set, kickstarting clock.\n");
+                       m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
+                       reset_osc &= ~M41T93_FLAG_ST;
+                       m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
+               }
+       }
+
        data[M41T93_REG_SSEC]           = 0;
        data[M41T93_REG_ST_SEC]         = bin2bcd(tm->tm_sec);
        data[M41T93_REG_MIN]            = bin2bcd(tm->tm_min);
@@ -89,10 +114,7 @@ static int m41t93_get_time(struct device *dev, struct rtc_time *tm)
           1. halt bit (HT) is set: the clock is running but update of readout
              registers has been disabled due to power failure. This is normal
              case after poweron. Time is valid after resetting HT bit.
-          2. oscillator fail bit (OF) is set. Oscillator has be stopped and
-             time is invalid:
-             a) OF can be immeditely reset.
-             b) OF cannot be immediately reset: oscillator has to be restarted.
+          2. oscillator fail bit (OF) is set: time is invalid.
        */
        tmp = spi_w8r8(spi, M41T93_REG_ALM_HOUR_HT);
        if (tmp < 0)
@@ -110,21 +132,7 @@ static int m41t93_get_time(struct device *dev, struct rtc_time *tm)
 
        if (tmp & M41T93_FLAG_OF) {
                ret = -EINVAL;
-               dev_warn(&spi->dev, "OF bit is set, resetting.\n");
-               m41t93_set_reg(spi, M41T93_REG_FLAGS, tmp & ~M41T93_FLAG_OF);
-
-               tmp = spi_w8r8(spi, M41T93_REG_FLAGS);
-               if (tmp < 0)
-                       return tmp;
-               else if (tmp & M41T93_FLAG_OF) {
-                       u8 reset_osc = buf[M41T93_REG_ST_SEC] | M41T93_FLAG_ST;
-
-                       dev_warn(&spi->dev,
-                                "OF bit is still set, kickstarting clock.\n");
-                       m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
-                       reset_osc &= ~M41T93_FLAG_ST;
-                       m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
-               }
+               dev_warn(&spi->dev, "OF bit is set, write time to restart.\n");
        }
 
        if (tmp & M41T93_FLAG_BL)
index bc0677de1996d93a13e0bf37c080bbd30a49e403..97a3284bb7c60920be7204156fab79b2dabafec6 100644 (file)
@@ -64,6 +64,7 @@ struct pcf8563 {
         * 1970...2069.
         */
        int c_polarity; /* 0: MO_C=1 means 19xx, otherwise MO_C=1 means 20xx */
+       int voltage_low; /* incicates if a low_voltage was detected */
 };
 
 /*
@@ -86,9 +87,11 @@ static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm)
                return -EIO;
        }
 
-       if (buf[PCF8563_REG_SC] & PCF8563_SC_LV)
+       if (buf[PCF8563_REG_SC] & PCF8563_SC_LV) {
+               pcf8563->voltage_low = 1;
                dev_info(&client->dev,
                        "low voltage detected, date/time is not reliable.\n");
+       }
 
        dev_dbg(&client->dev,
                "%s: raw data is st1=%02x, st2=%02x, sec=%02x, min=%02x, hr=%02x, "
@@ -173,6 +176,44 @@ static int pcf8563_set_datetime(struct i2c_client *client, struct rtc_time *tm)
        return 0;
 }
 
+#ifdef CONFIG_RTC_INTF_DEV
+static int pcf8563_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+       struct pcf8563 *pcf8563 = i2c_get_clientdata(to_i2c_client(dev));
+       struct rtc_time tm;
+
+       switch (cmd) {
+       case RTC_VL_READ:
+               if (pcf8563->voltage_low)
+                       dev_info(dev, "low voltage detected, date/time is not reliable.\n");
+
+               if (copy_to_user((void __user *)arg, &pcf8563->voltage_low,
+                                       sizeof(int)))
+                       return -EFAULT;
+               return 0;
+       case RTC_VL_CLR:
+               /*
+                * Clear the VL bit in the seconds register in case
+                * the time has not been set already (which would
+                * have cleared it). This does not really matter
+                * because of the cached voltage_low value but do it
+                * anyway for consistency.
+                */
+               if (pcf8563_get_datetime(to_i2c_client(dev), &tm))
+                       pcf8563_set_datetime(to_i2c_client(dev), &tm);
+
+               /* Clear the cached value. */
+               pcf8563->voltage_low = 0;
+
+               return 0;
+       default:
+               return -ENOIOCTLCMD;
+       }
+}
+#else
+#define pcf8563_rtc_ioctl NULL
+#endif
+
 static int pcf8563_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
        return pcf8563_get_datetime(to_i2c_client(dev), tm);
@@ -184,6 +225,7 @@ static int pcf8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
 }
 
 static const struct rtc_class_ops pcf8563_rtc_ops = {
+       .ioctl          = pcf8563_rtc_ioctl,
        .read_time      = pcf8563_rtc_read_time,
        .set_time       = pcf8563_rtc_set_time,
 };
index f027c063fb20312a15ab3ed57d9949b18fb40f65..cc0533994f6e0650bbd7c2bc2a4976cfb947593e 100644 (file)
@@ -220,17 +220,9 @@ static irqreturn_t pl031_interrupt(int irq, void *dev_id)
        unsigned long events = 0;
 
        rtcmis = readl(ldata->base + RTC_MIS);
-       if (rtcmis) {
-               writel(rtcmis, ldata->base + RTC_ICR);
-
-               if (rtcmis & RTC_BIT_AI)
-                       events |= (RTC_AF | RTC_IRQF);
-
-               /* Timer interrupt is only available in ST variants */
-               if ((rtcmis & RTC_BIT_PI) &&
-                       (ldata->hw_designer == AMBA_VENDOR_ST))
-                       events |= (RTC_PF | RTC_IRQF);
-
+       if (rtcmis & RTC_BIT_AI) {
+               writel(RTC_BIT_AI, ldata->base + RTC_ICR);
+               events |= (RTC_AF | RTC_IRQF);
                rtc_update_irq(ldata->rtc, 1, events);
 
                return IRQ_HANDLED;
index 3f3a29752369b092c7b99cca4ff33232f6fa605b..7e6af0b22f17d0e3d54bad2e8981689e82be8e81 100644 (file)
@@ -670,6 +670,7 @@ static int s3c_rtc_resume(struct platform_device *pdev)
 #define s3c_rtc_resume  NULL
 #endif
 
+#ifdef CONFIG_OF
 static struct s3c_rtc_drv_data s3c_rtc_drv_data_array[] = {
        [TYPE_S3C2410] = { TYPE_S3C2410 },
        [TYPE_S3C2416] = { TYPE_S3C2416 },
@@ -677,7 +678,6 @@ static struct s3c_rtc_drv_data s3c_rtc_drv_data_array[] = {
        [TYPE_S3C64XX] = { TYPE_S3C64XX },
 };
 
-#ifdef CONFIG_OF
 static const struct of_device_id s3c_rtc_dt_match[] = {
        {
                .compatible = "samsung,s3c2410-rtc",
index e38da0dc41872070e4d0abe40da9280b6ddf747a..1f76320e545b1cf15d563cc8996b8eed7122f14a 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/rtc.h>
 #include <linux/slab.h>
@@ -519,6 +520,14 @@ static void spear_rtc_shutdown(struct platform_device *pdev)
        clk_disable(config->clk);
 }
 
+#ifdef CONFIG_OF
+static const struct of_device_id spear_rtc_id_table[] = {
+       { .compatible = "st,spear600-rtc" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, spear_rtc_id_table);
+#endif
+
 static struct platform_driver spear_rtc_driver = {
        .probe = spear_rtc_probe,
        .remove = __devexit_p(spear_rtc_remove),
@@ -527,6 +536,7 @@ static struct platform_driver spear_rtc_driver = {
        .shutdown = spear_rtc_shutdown,
        .driver = {
                .name = "rtc-spear",
+               .of_match_table = of_match_ptr(spear_rtc_id_table),
        },
 };
 
index 75259fe38602af5952193d7a2a411e6eba661da9..c006025cecc809ac3b00d1abbe59a3f4e3527254 100644 (file)
@@ -309,7 +309,8 @@ static int __devinit tegra_rtc_probe(struct platform_device *pdev)
        struct resource *res;
        int ret;
 
-       info = kzalloc(sizeof(struct tegra_rtc_info), GFP_KERNEL);
+       info = devm_kzalloc(&pdev->dev, sizeof(struct tegra_rtc_info),
+               GFP_KERNEL);
        if (!info)
                return -ENOMEM;
 
@@ -317,29 +318,18 @@ static int __devinit tegra_rtc_probe(struct platform_device *pdev)
        if (!res) {
                dev_err(&pdev->dev,
                        "Unable to allocate resources for device.\n");
-               ret = -EBUSY;
-               goto err_free_info;
+               return -EBUSY;
        }
 
-       if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
-               dev_err(&pdev->dev,
-                       "Unable to request mem region for device.\n");
-               ret = -EBUSY;
-               goto err_free_info;
+       info->rtc_base = devm_request_and_ioremap(&pdev->dev, res);
+       if (!info->rtc_base) {
+               dev_err(&pdev->dev, "Unable to request mem region and grab IOs for device.\n");
+               return -EBUSY;
        }
 
        info->tegra_rtc_irq = platform_get_irq(pdev, 0);
-       if (info->tegra_rtc_irq <= 0) {
-               ret = -EBUSY;
-               goto err_release_mem_region;
-       }
-
-       info->rtc_base = ioremap_nocache(res->start, resource_size(res));
-       if (!info->rtc_base) {
-               dev_err(&pdev->dev, "Unable to grab IOs for device.\n");
-               ret = -EBUSY;
-               goto err_release_mem_region;
-       }
+       if (info->tegra_rtc_irq <= 0)
+               return -EBUSY;
 
        /* set context info. */
        info->pdev = pdev;
@@ -362,11 +352,12 @@ static int __devinit tegra_rtc_probe(struct platform_device *pdev)
                dev_err(&pdev->dev,
                        "Unable to register device (err=%d).\n",
                        ret);
-               goto err_iounmap;
+               return ret;
        }
 
-       ret = request_irq(info->tegra_rtc_irq, tegra_rtc_irq_handler,
-               IRQF_TRIGGER_HIGH, "rtc alarm", &pdev->dev);
+       ret = devm_request_irq(&pdev->dev, info->tegra_rtc_irq,
+                       tegra_rtc_irq_handler, IRQF_TRIGGER_HIGH,
+                       "rtc alarm", &pdev->dev);
        if (ret) {
                dev_err(&pdev->dev,
                        "Unable to request interrupt for device (err=%d).\n",
@@ -380,12 +371,6 @@ static int __devinit tegra_rtc_probe(struct platform_device *pdev)
 
 err_dev_unreg:
        rtc_device_unregister(info->rtc_dev);
-err_iounmap:
-       iounmap(info->rtc_base);
-err_release_mem_region:
-       release_mem_region(res->start, resource_size(res));
-err_free_info:
-       kfree(info);
 
        return ret;
 }
@@ -393,17 +378,8 @@ err_free_info:
 static int __devexit tegra_rtc_remove(struct platform_device *pdev)
 {
        struct tegra_rtc_info *info = platform_get_drvdata(pdev);
-       struct resource *res;
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res)
-               return -EBUSY;
 
-       free_irq(info->tegra_rtc_irq, &pdev->dev);
        rtc_device_unregister(info->rtc_dev);
-       iounmap(info->rtc_base);
-       release_mem_region(res->start, resource_size(res));
-       kfree(info);
 
        platform_set_drvdata(pdev, NULL);
 
index 3b6e6a67e765b34e7efd7e5cb82eea63a930d31e..59c6245e0421f21a70548108a2607ebdaa4bbd33 100644 (file)
@@ -396,7 +396,7 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
 {
        struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
        struct wm831x_rtc *wm831x_rtc;
-       int alm_irq = platform_get_irq_byname(pdev, "ALM");
+       int alm_irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "ALM"));
        int ret = 0;
 
        wm831x_rtc = devm_kzalloc(&pdev->dev, sizeof(*wm831x_rtc), GFP_KERNEL);
index 33a6743ddc558c11e9b07e09e230efb40e80da28..c05da00583f06c94f5fbb12bb110c7f5a60875c8 100644 (file)
@@ -10,8 +10,6 @@
 #ifndef DASD_INT_H
 #define DASD_INT_H
 
-#ifdef __KERNEL__
-
 /* we keep old device allocation scheme; IOW, minors are still in 0..255 */
 #define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS))
 #define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1)
@@ -791,6 +789,4 @@ static inline int dasd_eer_enabled(struct dasd_device *device)
 #define dasd_eer_enabled(d)    (0)
 #endif /* CONFIG_DASD_ERR */
 
-#endif                         /* __KERNEL__ */
-
 #endif                         /* DASD_H */
index 36506366158daa719c5c66419c0000c233a09d87..766cb7b19b403fedadc2fecee3d9c24cc7bee273 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/mm.h>
 #include <linux/mmzone.h>
 #include <linux/memory.h>
+#include <linux/module.h>
 #include <linux/platform_device.h>
 #include <asm/chpid.h>
 #include <asm/sclp.h>
@@ -38,7 +39,8 @@ struct read_info_sccb {
        u64     facilities;             /* 48-55 */
        u8      _reserved2[84 - 56];    /* 56-83 */
        u8      fac84;                  /* 84 */
-       u8      _reserved3[91 - 85];    /* 85-90 */
+       u8      fac85;                  /* 85 */
+       u8      _reserved3[91 - 86];    /* 86-90 */
        u8      flags;                  /* 91 */
        u8      _reserved4[100 - 92];   /* 92-99 */
        u32     rnsize2;                /* 100-103 */
@@ -51,6 +53,7 @@ static int __initdata early_read_info_sccb_valid;
 
 u64 sclp_facilities;
 static u8 sclp_fac84;
+static u8 sclp_fac85;
 static unsigned long long rzm;
 static unsigned long long rnmax;
 
@@ -112,6 +115,7 @@ void __init sclp_facilities_detect(void)
        sccb = &early_read_info_sccb;
        sclp_facilities = sccb->facilities;
        sclp_fac84 = sccb->fac84;
+       sclp_fac85 = sccb->fac85;
        rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
        rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
        rzm <<= 20;
@@ -127,6 +131,12 @@ unsigned long long sclp_get_rzm(void)
        return rzm;
 }
 
+u8 sclp_get_fac85(void)
+{
+       return sclp_fac85;
+}
+EXPORT_SYMBOL_GPL(sclp_get_fac85);
+
 /*
  * This function will be called after sclp_facilities_detect(), which gets
  * called from early.c code. Therefore the sccb should have valid contents.
index 69e6c50d4cfb25c341d0e115d2ad2d4a3034520a..50f7115990fffc3954cc99f24bdd06d1caa4fa7a 100644 (file)
@@ -211,7 +211,7 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
        sccb.evbuf.event_qual = EQ_STORE_DATA;
        sccb.evbuf.data_id = DI_FCP_DUMP;
        sccb.evbuf.event_id = 4712;
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
        sccb.evbuf.asa_size = ASA_SIZE_64;
 #else
        sccb.evbuf.asa_size = ASA_SIZE_32;
index 01bb04cd9e7516a567b714fcd1f8a1896096aba9..2a096795b9aa86768ef18561872d948800eb6402 100644 (file)
@@ -571,13 +571,12 @@ free_cmd:
 static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd,
                               int iscsi_cmd, int size)
 {
-       cmd->va = pci_alloc_consistent(phba->ctrl.pdev, sizeof(size),
-                                      &cmd->dma);
+       cmd->va = pci_alloc_consistent(phba->ctrl.pdev, size, &cmd->dma);
        if (!cmd->va) {
                SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for if info\n");
                return -ENOMEM;
        }
-       memset(cmd->va, 0, sizeof(size));
+       memset(cmd->va, 0, size);
        cmd->size = size;
        be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size);
        return 0;
index 8b6c6bf7837e7e09a0d6fec5d88733b3477a0d77..b83927440171810b4d6442c992c2c01a5db4558d 100644 (file)
@@ -426,6 +426,23 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
                vshost = vport->drv_port.im_port->shost;
                fc_host_node_name(vshost) = wwn_to_u64((u8 *)&port_cfg.nwwn);
                fc_host_port_name(vshost) = wwn_to_u64((u8 *)&port_cfg.pwwn);
+               fc_host_supported_classes(vshost) = FC_COS_CLASS3;
+
+               memset(fc_host_supported_fc4s(vshost), 0,
+                       sizeof(fc_host_supported_fc4s(vshost)));
+
+               /* For FCP type 0x08 */
+               if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM)
+                       fc_host_supported_fc4s(vshost)[2] = 1;
+
+               /* For fibre channel services type 0x20 */
+               fc_host_supported_fc4s(vshost)[7] = 1;
+
+               fc_host_supported_speeds(vshost) =
+                               bfad_im_supported_speeds(&bfad->bfa);
+               fc_host_maxframe_size(vshost) =
+                               bfa_fcport_get_maxfrsize(&bfad->bfa);
+
                fc_vport->dd_data = vport;
                vport->drv_port.im_port->fc_vport = fc_vport;
        } else if (rc == BFA_STATUS_INVALID_WWN)
index 3153923f5b6027f1c16d806e14092e1df5356218..1ac09afe35ee17a6a23916e41ede98cd3f63a2ad 100644 (file)
@@ -987,7 +987,7 @@ done:
        return 0;
 }
 
-static u32
+u32
 bfad_im_supported_speeds(struct bfa_s *bfa)
 {
        struct bfa_ioc_attr_s *ioc_attr;
index 0814367ef101a1c075c0cfd4f5a52bc34dea920d..f6c1023e502a13cd04f19551d4893ec5f9014129 100644 (file)
@@ -37,6 +37,7 @@ int  bfad_im_scsi_host_alloc(struct bfad_s *bfad,
                struct bfad_im_port_s *im_port, struct device *dev);
 void bfad_im_scsi_host_free(struct bfad_s *bfad,
                                struct bfad_im_port_s *im_port);
+u32 bfad_im_supported_speeds(struct bfa_s *bfa);
 
 #define MAX_FCP_TARGET 1024
 #define MAX_FCP_LUN 16384
index a4953ef9e53accf67b5933e3bc1259511f31fa22..0578fa0dc14b73e6c26113d163759e1fe33b735d 100644 (file)
@@ -62,7 +62,7 @@
 #include "bnx2fc_constants.h"
 
 #define BNX2FC_NAME            "bnx2fc"
-#define BNX2FC_VERSION         "1.0.10"
+#define BNX2FC_VERSION         "1.0.11"
 
 #define PFX                    "bnx2fc: "
 
@@ -228,13 +228,16 @@ struct bnx2fc_interface {
        struct packet_type fip_packet_type;
        struct workqueue_struct *timer_work_queue;
        struct kref kref;
-       struct fcoe_ctlr ctlr;
        u8 vlan_enabled;
        int vlan_id;
        bool enabled;
 };
 
-#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_interface, ctlr)
+#define bnx2fc_from_ctlr(x)                    \
+       ((struct bnx2fc_interface *)((x) + 1))
+
+#define bnx2fc_to_ctlr(x)                                      \
+       ((struct fcoe_ctlr *)(((struct fcoe_ctlr *)(x)) - 1))
 
 struct bnx2fc_lport {
        struct list_head list;
index ce0ce3e32f336aaf711d6129a781c0aa035120fa..bdbbb13b8534c2318464b1b9a803c554af2b17ac 100644 (file)
@@ -854,7 +854,6 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
        struct fc_exch *exch = fc_seq_exch(seq);
        struct fc_lport *lport = exch->lp;
        u8 *mac;
-       struct fc_frame_header *fh;
        u8 op;
 
        if (IS_ERR(fp))
@@ -862,13 +861,6 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
 
        mac = fr_cb(fp)->granted_mac;
        if (is_zero_ether_addr(mac)) {
-               fh = fc_frame_header_get(fp);
-               if (fh->fh_type != FC_TYPE_ELS) {
-                       printk(KERN_ERR PFX "bnx2fc_flogi_resp:"
-                               "fh_type != FC_TYPE_ELS\n");
-                       fc_frame_free(fp);
-                       return;
-               }
                op = fc_frame_payload_op(fp);
                if (lport->vport) {
                        if (op == ELS_LS_RJT) {
@@ -878,12 +870,10 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
                                return;
                        }
                }
-               if (fcoe_ctlr_recv_flogi(fip, lport, fp)) {
-                       fc_frame_free(fp);
-                       return;
-               }
+               fcoe_ctlr_recv_flogi(fip, lport, fp);
        }
-       fip->update_mac(lport, mac);
+       if (!is_zero_ether_addr(mac))
+               fip->update_mac(lport, mac);
 done:
        fc_lport_flogi_resp(seq, fp, lport);
 }
@@ -910,7 +900,7 @@ struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
 {
        struct fcoe_port *port = lport_priv(lport);
        struct bnx2fc_interface *interface = port->priv;
-       struct fcoe_ctlr *fip = &interface->ctlr;
+       struct fcoe_ctlr *fip = bnx2fc_to_ctlr(interface);
        struct fc_frame_header *fh = fc_frame_header_get(fp);
 
        switch (op) {
index c1c6a92a0b989737c9f8a15b81e8e24cd86b758e..f52f668fd247b5601e27ac9572c99b6832302ca8 100644 (file)
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
 
 #define DRV_MODULE_NAME                "bnx2fc"
 #define DRV_MODULE_VERSION     BNX2FC_VERSION
-#define DRV_MODULE_RELDATE     "Jan 22, 2011"
+#define DRV_MODULE_RELDATE     "Apr 24, 2012"
 
 
 static char version[] __devinitdata =
@@ -54,6 +54,7 @@ static struct cnic_ulp_ops bnx2fc_cnic_cb;
 static struct libfc_function_template bnx2fc_libfc_fcn_templ;
 static struct scsi_host_template bnx2fc_shost_template;
 static struct fc_function_template bnx2fc_transport_function;
+static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ;
 static struct fc_function_template bnx2fc_vport_xport_function;
 static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode);
 static void __bnx2fc_destroy(struct bnx2fc_interface *interface);
@@ -88,6 +89,7 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport);
 static void bnx2fc_stop(struct bnx2fc_interface *interface);
 static int __init bnx2fc_mod_init(void);
 static void __exit bnx2fc_mod_exit(void);
+static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev);
 
 unsigned int bnx2fc_debug_level;
 module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR);
@@ -118,6 +120,41 @@ static void bnx2fc_get_lesb(struct fc_lport *lport,
        __fcoe_get_lesb(lport, fc_lesb, netdev);
 }
 
+static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
+{
+       struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
+       struct net_device *netdev = bnx2fc_netdev(fip->lp);
+       struct fcoe_fc_els_lesb *fcoe_lesb;
+       struct fc_els_lesb fc_lesb;
+
+       __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
+       fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
+
+       ctlr_dev->lesb.lesb_link_fail =
+               ntohl(fcoe_lesb->lesb_link_fail);
+       ctlr_dev->lesb.lesb_vlink_fail =
+               ntohl(fcoe_lesb->lesb_vlink_fail);
+       ctlr_dev->lesb.lesb_miss_fka =
+               ntohl(fcoe_lesb->lesb_miss_fka);
+       ctlr_dev->lesb.lesb_symb_err =
+               ntohl(fcoe_lesb->lesb_symb_err);
+       ctlr_dev->lesb.lesb_err_block =
+               ntohl(fcoe_lesb->lesb_err_block);
+       ctlr_dev->lesb.lesb_fcs_error =
+               ntohl(fcoe_lesb->lesb_fcs_error);
+}
+EXPORT_SYMBOL(bnx2fc_ctlr_get_lesb);
+
+static void bnx2fc_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
+{
+       struct fcoe_ctlr_device *ctlr_dev =
+               fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
+       struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+       struct bnx2fc_interface *fcoe = fcoe_ctlr_priv(ctlr);
+
+       fcf_dev->vlan_id = fcoe->vlan_id;
+}
+
 static void bnx2fc_clean_rx_queue(struct fc_lport *lp)
 {
        struct fcoe_percpu_s *bg;
@@ -244,6 +281,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
        struct sk_buff          *skb;
        struct fc_frame_header  *fh;
        struct bnx2fc_interface *interface;
+       struct fcoe_ctlr        *ctlr;
        struct bnx2fc_hba *hba;
        struct fcoe_port        *port;
        struct fcoe_hdr         *hp;
@@ -256,6 +294,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
 
        port = (struct fcoe_port *)lport_priv(lport);
        interface = port->priv;
+       ctlr = bnx2fc_to_ctlr(interface);
        hba = interface->hba;
 
        fh = fc_frame_header_get(fp);
@@ -268,12 +307,12 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
        }
 
        if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
-               if (!interface->ctlr.sel_fcf) {
+               if (!ctlr->sel_fcf) {
                        BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n");
                        kfree_skb(skb);
                        return -EINVAL;
                }
-               if (fcoe_ctlr_els_send(&interface->ctlr, lport, skb))
+               if (fcoe_ctlr_els_send(ctlr, lport, skb))
                        return 0;
        }
 
@@ -346,14 +385,14 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
        /* fill up mac and fcoe headers */
        eh = eth_hdr(skb);
        eh->h_proto = htons(ETH_P_FCOE);
-       if (interface->ctlr.map_dest)
+       if (ctlr->map_dest)
                fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
        else
                /* insert GW address */
-               memcpy(eh->h_dest, interface->ctlr.dest_addr, ETH_ALEN);
+               memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN);
 
-       if (unlikely(interface->ctlr.flogi_oxid != FC_XID_UNKNOWN))
-               memcpy(eh->h_source, interface->ctlr.ctl_src_addr, ETH_ALEN);
+       if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN))
+               memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN);
        else
                memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
 
@@ -403,6 +442,7 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
 {
        struct fc_lport *lport;
        struct bnx2fc_interface *interface;
+       struct fcoe_ctlr *ctlr;
        struct fc_frame_header *fh;
        struct fcoe_rcv_info *fr;
        struct fcoe_percpu_s *bg;
@@ -410,7 +450,8 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
 
        interface = container_of(ptype, struct bnx2fc_interface,
                                 fcoe_packet_type);
-       lport = interface->ctlr.lp;
+       ctlr = bnx2fc_to_ctlr(interface);
+       lport = ctlr->lp;
 
        if (unlikely(lport == NULL)) {
                printk(KERN_ERR PFX "bnx2fc_rcv: lport is NULL\n");
@@ -758,11 +799,13 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
 {
        struct bnx2fc_hba *hba;
        struct bnx2fc_interface *interface;
+       struct fcoe_ctlr *ctlr;
        struct fcoe_port *port;
        u64 wwnn, wwpn;
 
        port = lport_priv(lport);
        interface = port->priv;
+       ctlr = bnx2fc_to_ctlr(interface);
        hba = interface->hba;
 
        /* require support for get_pauseparam ethtool op. */
@@ -781,13 +824,13 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
 
        if (!lport->vport) {
                if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
-                       wwnn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr,
+                       wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
                                                 1, 0);
                BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn);
                fc_set_wwnn(lport, wwnn);
 
                if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
-                       wwpn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr,
+                       wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
                                                 2, 0);
 
                BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn);
@@ -824,6 +867,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
        struct fc_lport *lport;
        struct fc_lport *vport;
        struct bnx2fc_interface *interface, *tmp;
+       struct fcoe_ctlr *ctlr;
        int wait_for_upload = 0;
        u32 link_possible = 1;
 
@@ -874,7 +918,8 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
                if (interface->hba != hba)
                        continue;
 
-               lport = interface->ctlr.lp;
+               ctlr = bnx2fc_to_ctlr(interface);
+               lport = ctlr->lp;
                BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n",
                                interface->netdev->name, event);
 
@@ -889,8 +934,8 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
                         * on a stale vlan
                         */
                        if (interface->enabled)
-                               fcoe_ctlr_link_up(&interface->ctlr);
-               } else if (fcoe_ctlr_link_down(&interface->ctlr)) {
+                               fcoe_ctlr_link_up(ctlr);
+               } else if (fcoe_ctlr_link_down(ctlr)) {
                        mutex_lock(&lport->lp_mutex);
                        list_for_each_entry(vport, &lport->vports, list)
                                fc_host_port_type(vport->host) =
@@ -995,9 +1040,11 @@ static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev,
                           struct net_device *orig_dev)
 {
        struct bnx2fc_interface *interface;
+       struct fcoe_ctlr *ctlr;
        interface = container_of(ptype, struct bnx2fc_interface,
                                 fip_packet_type);
-       fcoe_ctlr_recv(&interface->ctlr, skb);
+       ctlr = bnx2fc_to_ctlr(interface);
+       fcoe_ctlr_recv(ctlr, skb);
        return 0;
 }
 
@@ -1155,6 +1202,7 @@ static int bnx2fc_interface_setup(struct bnx2fc_interface *interface)
 {
        struct net_device *netdev = interface->netdev;
        struct net_device *physdev = interface->hba->phys_dev;
+       struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
        struct netdev_hw_addr *ha;
        int sel_san_mac = 0;
 
@@ -1169,7 +1217,7 @@ static int bnx2fc_interface_setup(struct bnx2fc_interface *interface)
 
                if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
                    (is_valid_ether_addr(ha->addr))) {
-                       memcpy(interface->ctlr.ctl_src_addr, ha->addr,
+                       memcpy(ctlr->ctl_src_addr, ha->addr,
                               ETH_ALEN);
                        sel_san_mac = 1;
                        BNX2FC_MISC_DBG("Found SAN MAC\n");
@@ -1224,19 +1272,23 @@ static void bnx2fc_release_transport(void)
 
 static void bnx2fc_interface_release(struct kref *kref)
 {
+       struct fcoe_ctlr_device *ctlr_dev;
        struct bnx2fc_interface *interface;
+       struct fcoe_ctlr *ctlr;
        struct net_device *netdev;
 
        interface = container_of(kref, struct bnx2fc_interface, kref);
        BNX2FC_MISC_DBG("Interface is being released\n");
 
+       ctlr = bnx2fc_to_ctlr(interface);
+       ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr);
        netdev = interface->netdev;
 
        /* tear-down FIP controller */
        if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags))
-               fcoe_ctlr_destroy(&interface->ctlr);
+               fcoe_ctlr_destroy(ctlr);
 
-       kfree(interface);
+       fcoe_ctlr_device_delete(ctlr_dev);
 
        dev_put(netdev);
        module_put(THIS_MODULE);
@@ -1329,33 +1381,40 @@ struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
                                      struct net_device *netdev,
                                      enum fip_state fip_mode)
 {
+       struct fcoe_ctlr_device *ctlr_dev;
        struct bnx2fc_interface *interface;
+       struct fcoe_ctlr *ctlr;
+       int size;
        int rc = 0;
 
-       interface = kzalloc(sizeof(*interface), GFP_KERNEL);
-       if (!interface) {
+       size = (sizeof(*interface) + sizeof(struct fcoe_ctlr));
+       ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &bnx2fc_fcoe_sysfs_templ,
+                                        size);
+       if (!ctlr_dev) {
                printk(KERN_ERR PFX "Unable to allocate interface structure\n");
                return NULL;
        }
+       ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+       interface = fcoe_ctlr_priv(ctlr);
        dev_hold(netdev);
        kref_init(&interface->kref);
        interface->hba = hba;
        interface->netdev = netdev;
 
        /* Initialize FIP */
-       fcoe_ctlr_init(&interface->ctlr, fip_mode);
-       interface->ctlr.send = bnx2fc_fip_send;
-       interface->ctlr.update_mac = bnx2fc_update_src_mac;
-       interface->ctlr.get_src_addr = bnx2fc_get_src_mac;
+       fcoe_ctlr_init(ctlr, fip_mode);
+       ctlr->send = bnx2fc_fip_send;
+       ctlr->update_mac = bnx2fc_update_src_mac;
+       ctlr->get_src_addr = bnx2fc_get_src_mac;
        set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags);
 
        rc = bnx2fc_interface_setup(interface);
        if (!rc)
                return interface;
 
-       fcoe_ctlr_destroy(&interface->ctlr);
+       fcoe_ctlr_destroy(ctlr);
        dev_put(netdev);
-       kfree(interface);
+       fcoe_ctlr_device_delete(ctlr_dev);
        return NULL;
 }
 
@@ -1373,6 +1432,7 @@ struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
 static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
                                  struct device *parent, int npiv)
 {
+       struct fcoe_ctlr        *ctlr = bnx2fc_to_ctlr(interface);
        struct fc_lport         *lport, *n_port;
        struct fcoe_port        *port;
        struct Scsi_Host        *shost;
@@ -1383,7 +1443,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
 
        blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL);
        if (!blport) {
-               BNX2FC_HBA_DBG(interface->ctlr.lp, "Unable to alloc blport\n");
+               BNX2FC_HBA_DBG(ctlr->lp, "Unable to alloc blport\n");
                return NULL;
        }
 
@@ -1479,7 +1539,8 @@ static void bnx2fc_net_cleanup(struct bnx2fc_interface *interface)
 
 static void bnx2fc_interface_cleanup(struct bnx2fc_interface *interface)
 {
-       struct fc_lport *lport = interface->ctlr.lp;
+       struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
+       struct fc_lport *lport = ctlr->lp;
        struct fcoe_port *port = lport_priv(lport);
        struct bnx2fc_hba *hba = interface->hba;
 
@@ -1519,7 +1580,8 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
 
 static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
 {
-       struct fc_lport *lport = interface->ctlr.lp;
+       struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
+       struct fc_lport *lport = ctlr->lp;
        struct fcoe_port *port = lport_priv(lport);
 
        bnx2fc_interface_cleanup(interface);
@@ -1543,13 +1605,15 @@ static int bnx2fc_destroy(struct net_device *netdev)
 {
        struct bnx2fc_interface *interface = NULL;
        struct workqueue_struct *timer_work_queue;
+       struct fcoe_ctlr *ctlr;
        int rc = 0;
 
        rtnl_lock();
        mutex_lock(&bnx2fc_dev_lock);
 
        interface = bnx2fc_interface_lookup(netdev);
-       if (!interface || !interface->ctlr.lp) {
+       ctlr = bnx2fc_to_ctlr(interface);
+       if (!interface || !ctlr->lp) {
                rc = -ENODEV;
                printk(KERN_ERR PFX "bnx2fc_destroy: interface or lport not found\n");
                goto netdev_err;
@@ -1646,6 +1710,7 @@ static void bnx2fc_ulp_start(void *handle)
 {
        struct bnx2fc_hba *hba = handle;
        struct bnx2fc_interface *interface;
+       struct fcoe_ctlr *ctlr;
        struct fc_lport *lport;
 
        mutex_lock(&bnx2fc_dev_lock);
@@ -1657,7 +1722,8 @@ static void bnx2fc_ulp_start(void *handle)
 
        list_for_each_entry(interface, &if_list, list) {
                if (interface->hba == hba) {
-                       lport = interface->ctlr.lp;
+                       ctlr = bnx2fc_to_ctlr(interface);
+                       lport = ctlr->lp;
                        /* Kick off Fabric discovery*/
                        printk(KERN_ERR PFX "ulp_init: start discovery\n");
                        lport->tt.frame_send = bnx2fc_xmit;
@@ -1677,13 +1743,14 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport)
 
 static void bnx2fc_stop(struct bnx2fc_interface *interface)
 {
+       struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
        struct fc_lport *lport;
        struct fc_lport *vport;
 
        if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags))
                return;
 
-       lport = interface->ctlr.lp;
+       lport = ctlr->lp;
        bnx2fc_port_shutdown(lport);
 
        mutex_lock(&lport->lp_mutex);
@@ -1692,7 +1759,7 @@ static void bnx2fc_stop(struct bnx2fc_interface *interface)
                                        FC_PORTTYPE_UNKNOWN;
        mutex_unlock(&lport->lp_mutex);
        fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
-       fcoe_ctlr_link_down(&interface->ctlr);
+       fcoe_ctlr_link_down(ctlr);
        fcoe_clean_pending_queue(lport);
 }
 
@@ -1804,6 +1871,7 @@ exit:
 
 static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
 {
+       struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
        struct fc_lport *lport;
        int wait_cnt = 0;
 
@@ -1814,18 +1882,18 @@ static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
                return;
        }
 
-       lport = interface->ctlr.lp;
+       lport = ctlr->lp;
        BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n");
 
        if (!bnx2fc_link_ok(lport) && interface->enabled) {
                BNX2FC_HBA_DBG(lport, "ctlr_link_up\n");
-               fcoe_ctlr_link_up(&interface->ctlr);
+               fcoe_ctlr_link_up(ctlr);
                fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
                set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
        }
 
        /* wait for the FCF to be selected before issuing FLOGI */
-       while (!interface->ctlr.sel_fcf) {
+       while (!ctlr->sel_fcf) {
                msleep(250);
                /* give up after 3 secs */
                if (++wait_cnt > 12)
@@ -1889,19 +1957,21 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
 static int bnx2fc_disable(struct net_device *netdev)
 {
        struct bnx2fc_interface *interface;
+       struct fcoe_ctlr *ctlr;
        int rc = 0;
 
        rtnl_lock();
        mutex_lock(&bnx2fc_dev_lock);
 
        interface = bnx2fc_interface_lookup(netdev);
-       if (!interface || !interface->ctlr.lp) {
+       ctlr = bnx2fc_to_ctlr(interface);
+       if (!interface || !ctlr->lp) {
                rc = -ENODEV;
                printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n");
        } else {
                interface->enabled = false;
-               fcoe_ctlr_link_down(&interface->ctlr);
-               fcoe_clean_pending_queue(interface->ctlr.lp);
+               fcoe_ctlr_link_down(ctlr);
+               fcoe_clean_pending_queue(ctlr->lp);
        }
 
        mutex_unlock(&bnx2fc_dev_lock);
@@ -1913,17 +1983,19 @@ static int bnx2fc_disable(struct net_device *netdev)
 static int bnx2fc_enable(struct net_device *netdev)
 {
        struct bnx2fc_interface *interface;
+       struct fcoe_ctlr *ctlr;
        int rc = 0;
 
        rtnl_lock();
        mutex_lock(&bnx2fc_dev_lock);
 
        interface = bnx2fc_interface_lookup(netdev);
-       if (!interface || !interface->ctlr.lp) {
+       ctlr = bnx2fc_to_ctlr(interface);
+       if (!interface || !ctlr->lp) {
                rc = -ENODEV;
                printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n");
-       } else if (!bnx2fc_link_ok(interface->ctlr.lp)) {
-               fcoe_ctlr_link_up(&interface->ctlr);
+       } else if (!bnx2fc_link_ok(ctlr->lp)) {
+               fcoe_ctlr_link_up(ctlr);
                interface->enabled = true;
        }
 
@@ -1944,6 +2016,7 @@ static int bnx2fc_enable(struct net_device *netdev)
  */
 static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
 {
+       struct fcoe_ctlr *ctlr;
        struct bnx2fc_interface *interface;
        struct bnx2fc_hba *hba;
        struct net_device *phys_dev;
@@ -2010,6 +2083,7 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
                goto ifput_err;
        }
 
+       ctlr = bnx2fc_to_ctlr(interface);
        interface->vlan_id = vlan_id;
        interface->vlan_enabled = 1;
 
@@ -2035,10 +2109,10 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
        lport->boot_time = jiffies;
 
        /* Make this master N_port */
-       interface->ctlr.lp = lport;
+       ctlr->lp = lport;
 
        if (!bnx2fc_link_ok(lport)) {
-               fcoe_ctlr_link_up(&interface->ctlr);
+               fcoe_ctlr_link_up(ctlr);
                fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
                set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
        }
@@ -2439,6 +2513,19 @@ static void __exit bnx2fc_mod_exit(void)
 module_init(bnx2fc_mod_init);
 module_exit(bnx2fc_mod_exit);
 
+static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ = {
+       .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
+       .get_fcoe_ctlr_link_fail = bnx2fc_ctlr_get_lesb,
+       .get_fcoe_ctlr_vlink_fail = bnx2fc_ctlr_get_lesb,
+       .get_fcoe_ctlr_miss_fka = bnx2fc_ctlr_get_lesb,
+       .get_fcoe_ctlr_symb_err = bnx2fc_ctlr_get_lesb,
+       .get_fcoe_ctlr_err_block = bnx2fc_ctlr_get_lesb,
+       .get_fcoe_ctlr_fcs_error = bnx2fc_ctlr_get_lesb,
+
+       .get_fcoe_fcf_selected = fcoe_fcf_get_selected,
+       .get_fcoe_fcf_vlan_id = bnx2fc_fcf_get_vlan_id,
+};
+
 static struct fc_function_template bnx2fc_transport_function = {
        .show_host_node_name = 1,
        .show_host_port_name = 1,
index afd570962b8c288636fad351671a376ad03992e7..2ca6bfe4ce5e38fe3a7be6b75cd39191c4a6ca2d 100644 (file)
@@ -167,6 +167,7 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
 {
        struct fc_lport *lport = port->lport;
        struct bnx2fc_interface *interface = port->priv;
+       struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
        struct bnx2fc_hba *hba = interface->hba;
        struct kwqe *kwqe_arr[4];
        struct fcoe_kwqe_conn_offload1 ofld_req1;
@@ -314,13 +315,13 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
        ofld_req4.src_mac_addr_mid[1] =  port->data_src_addr[2];
        ofld_req4.src_mac_addr_hi[0] =  port->data_src_addr[1];
        ofld_req4.src_mac_addr_hi[1] =  port->data_src_addr[0];
-       ofld_req4.dst_mac_addr_lo[0] =  interface->ctlr.dest_addr[5];
+       ofld_req4.dst_mac_addr_lo[0] =  ctlr->dest_addr[5];
                                                        /* fcf mac */
-       ofld_req4.dst_mac_addr_lo[1] =  interface->ctlr.dest_addr[4];
-       ofld_req4.dst_mac_addr_mid[0] =  interface->ctlr.dest_addr[3];
-       ofld_req4.dst_mac_addr_mid[1] =  interface->ctlr.dest_addr[2];
-       ofld_req4.dst_mac_addr_hi[0] =  interface->ctlr.dest_addr[1];
-       ofld_req4.dst_mac_addr_hi[1] =  interface->ctlr.dest_addr[0];
+       ofld_req4.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
+       ofld_req4.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
+       ofld_req4.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
+       ofld_req4.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
+       ofld_req4.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
 
        ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
        ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
@@ -351,6 +352,7 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
 {
        struct kwqe *kwqe_arr[2];
        struct bnx2fc_interface *interface = port->priv;
+       struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
        struct bnx2fc_hba *hba = interface->hba;
        struct fcoe_kwqe_conn_enable_disable enbl_req;
        struct fc_lport *lport = port->lport;
@@ -374,12 +376,12 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
        enbl_req.src_mac_addr_hi[1] =  port->data_src_addr[0];
        memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
 
-       enbl_req.dst_mac_addr_lo[0] =  interface->ctlr.dest_addr[5];
-       enbl_req.dst_mac_addr_lo[1] =  interface->ctlr.dest_addr[4];
-       enbl_req.dst_mac_addr_mid[0] =  interface->ctlr.dest_addr[3];
-       enbl_req.dst_mac_addr_mid[1] =  interface->ctlr.dest_addr[2];
-       enbl_req.dst_mac_addr_hi[0] =  interface->ctlr.dest_addr[1];
-       enbl_req.dst_mac_addr_hi[1] =  interface->ctlr.dest_addr[0];
+       enbl_req.dst_mac_addr_lo[0] =  ctlr->dest_addr[5];
+       enbl_req.dst_mac_addr_lo[1] =  ctlr->dest_addr[4];
+       enbl_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
+       enbl_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
+       enbl_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
+       enbl_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
 
        port_id = fc_host_port_id(lport->host);
        if (port_id != tgt->sid) {
@@ -419,6 +421,7 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
                                    struct bnx2fc_rport *tgt)
 {
        struct bnx2fc_interface *interface = port->priv;
+       struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
        struct bnx2fc_hba *hba = interface->hba;
        struct fcoe_kwqe_conn_enable_disable disable_req;
        struct kwqe *kwqe_arr[2];
@@ -440,12 +443,12 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
        disable_req.src_mac_addr_hi[0] =  tgt->src_addr[1];
        disable_req.src_mac_addr_hi[1] =  tgt->src_addr[0];
 
-       disable_req.dst_mac_addr_lo[0] =  interface->ctlr.dest_addr[5];
-       disable_req.dst_mac_addr_lo[1] =  interface->ctlr.dest_addr[4];
-       disable_req.dst_mac_addr_mid[0] =  interface->ctlr.dest_addr[3];
-       disable_req.dst_mac_addr_mid[1] =  interface->ctlr.dest_addr[2];
-       disable_req.dst_mac_addr_hi[0] =  interface->ctlr.dest_addr[1];
-       disable_req.dst_mac_addr_hi[1] =  interface->ctlr.dest_addr[0];
+       disable_req.dst_mac_addr_lo[0] =  ctlr->dest_addr[5];
+       disable_req.dst_mac_addr_lo[1] =  ctlr->dest_addr[4];
+       disable_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
+       disable_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
+       disable_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
+       disable_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
 
        port_id = tgt->sid;
        disable_req.s_id[0] = (port_id & 0x000000FF);
index e897ce975bb8b15f7ae74f56d0028f1b0f6adb05..4f7453b9e41e2486b662d7fc3d8f55b1762fe154 100644 (file)
@@ -810,8 +810,22 @@ retry_tmf:
        spin_lock_bh(&tgt->tgt_lock);
 
        io_req->wait_for_comp = 0;
-       if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags)))
+       if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) {
                set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags);
+               if (io_req->on_tmf_queue) {
+                       list_del_init(&io_req->link);
+                       io_req->on_tmf_queue = 0;
+               }
+               io_req->wait_for_comp = 1;
+               bnx2fc_initiate_cleanup(io_req);
+               spin_unlock_bh(&tgt->tgt_lock);
+               rc = wait_for_completion_timeout(&io_req->tm_done,
+                                                BNX2FC_FW_TIMEOUT);
+               spin_lock_bh(&tgt->tgt_lock);
+               io_req->wait_for_comp = 0;
+               if (!rc)
+                       kref_put(&io_req->refcount, bnx2fc_cmd_release);
+       }
 
        spin_unlock_bh(&tgt->tgt_lock);
 
@@ -1089,6 +1103,48 @@ int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
        return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
 }
 
+int bnx2fc_expl_logo(struct fc_lport *lport, struct bnx2fc_cmd *io_req)
+{
+       struct bnx2fc_rport *tgt = io_req->tgt;
+       struct fc_rport_priv *rdata = tgt->rdata;
+       int logo_issued;
+       int rc = SUCCESS;
+       int wait_cnt = 0;
+
+       BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n",
+                     tgt->flags);
+       logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO,
+                                      &tgt->flags);
+       io_req->wait_for_comp = 1;
+       bnx2fc_initiate_cleanup(io_req);
+
+       spin_unlock_bh(&tgt->tgt_lock);
+
+       wait_for_completion(&io_req->tm_done);
+
+       io_req->wait_for_comp = 0;
+       /*
+        * release the reference taken in eh_abort to allow the
+        * target to re-login after flushing IOs
+        */
+        kref_put(&io_req->refcount, bnx2fc_cmd_release);
+
+       if (!logo_issued) {
+               clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
+               mutex_lock(&lport->disc.disc_mutex);
+               lport->tt.rport_logoff(rdata);
+               mutex_unlock(&lport->disc.disc_mutex);
+               do {
+                       msleep(BNX2FC_RELOGIN_WAIT_TIME);
+                       if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT) {
+                               rc = FAILED;
+                               break;
+                       }
+               } while (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags));
+       }
+       spin_lock_bh(&tgt->tgt_lock);
+       return rc;
+}
 /**
  * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
  *                     SCSI command
@@ -1103,10 +1159,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
        struct fc_rport_libfc_priv *rp = rport->dd_data;
        struct bnx2fc_cmd *io_req;
        struct fc_lport *lport;
-       struct fc_rport_priv *rdata;
        struct bnx2fc_rport *tgt;
-       int logo_issued;
-       int wait_cnt = 0;
        int rc = FAILED;
 
 
@@ -1183,58 +1236,31 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
        list_add_tail(&io_req->link, &tgt->io_retire_queue);
 
        init_completion(&io_req->tm_done);
-       io_req->wait_for_comp = 1;
 
-       if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
-               /* Cancel the current timer running on this io_req */
-               if (cancel_delayed_work(&io_req->timeout_work))
-                       kref_put(&io_req->refcount,
-                                bnx2fc_cmd_release); /* drop timer hold */
-               set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
-               rc = bnx2fc_initiate_abts(io_req);
-       } else {
+       if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
                printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
                                "already in abts processing\n", io_req->xid);
                if (cancel_delayed_work(&io_req->timeout_work))
                        kref_put(&io_req->refcount,
                                 bnx2fc_cmd_release); /* drop timer hold */
-               bnx2fc_initiate_cleanup(io_req);
+               rc = bnx2fc_expl_logo(lport, io_req);
+               goto out;
+       }
 
+       /* Cancel the current timer running on this io_req */
+       if (cancel_delayed_work(&io_req->timeout_work))
+               kref_put(&io_req->refcount,
+                        bnx2fc_cmd_release); /* drop timer hold */
+       set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
+       io_req->wait_for_comp = 1;
+       rc = bnx2fc_initiate_abts(io_req);
+       if (rc == FAILED) {
+               bnx2fc_initiate_cleanup(io_req);
                spin_unlock_bh(&tgt->tgt_lock);
-
                wait_for_completion(&io_req->tm_done);
-
                spin_lock_bh(&tgt->tgt_lock);
                io_req->wait_for_comp = 0;
-               rdata = io_req->tgt->rdata;
-               logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO,
-                                              &tgt->flags);
-               kref_put(&io_req->refcount, bnx2fc_cmd_release);
-               spin_unlock_bh(&tgt->tgt_lock);
-
-               if (!logo_issued) {
-                       BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n",
-                                     tgt->flags);
-                       mutex_lock(&lport->disc.disc_mutex);
-                       lport->tt.rport_logoff(rdata);
-                       mutex_unlock(&lport->disc.disc_mutex);
-                       do {
-                               msleep(BNX2FC_RELOGIN_WAIT_TIME);
-                               /*
-                                * If session not recovered, let SCSI-ml
-                                * escalate error recovery.
-                                */
-                               if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT)
-                                       return FAILED;
-                       } while (!test_bit(BNX2FC_FLAG_SESSION_READY,
-                                          &tgt->flags));
-               }
-               return SUCCESS;
-       }
-       if (rc == FAILED) {
-               kref_put(&io_req->refcount, bnx2fc_cmd_release);
-               spin_unlock_bh(&tgt->tgt_lock);
-               return rc;
+               goto done;
        }
        spin_unlock_bh(&tgt->tgt_lock);
 
@@ -1247,7 +1273,8 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
                /* Let the scsi-ml try to recover this command */
                printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
                       io_req->xid);
-               rc = FAILED;
+               rc = bnx2fc_expl_logo(lport, io_req);
+               goto out;
        } else {
                /*
                 * We come here even when there was a race condition
@@ -1259,9 +1286,10 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
                bnx2fc_scsi_done(io_req, DID_ABORT);
                kref_put(&io_req->refcount, bnx2fc_cmd_release);
        }
-
+done:
        /* release the reference taken in eh_abort */
        kref_put(&io_req->refcount, bnx2fc_cmd_release);
+out:
        spin_unlock_bh(&tgt->tgt_lock);
        return rc;
 }
index c1800b5312708a914cf424319dd7376fbb008186..082a25c3117e58cf961c383803e743ac92b7da24 100644 (file)
@@ -185,6 +185,16 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
                BUG_ON(rc);
        }
 
+       list_for_each_safe(list, tmp, &tgt->active_tm_queue) {
+               i++;
+               io_req = (struct bnx2fc_cmd *)list;
+               list_del_init(&io_req->link);
+               io_req->on_tmf_queue = 0;
+               BNX2FC_IO_DBG(io_req, "tm_queue cleanup\n");
+               if (io_req->wait_for_comp)
+                       complete(&io_req->tm_done);
+       }
+
        list_for_each_safe(list, tmp, &tgt->els_queue) {
                i++;
                io_req = (struct bnx2fc_cmd *)list;
@@ -213,8 +223,17 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
 
                BNX2FC_IO_DBG(io_req, "retire_queue flush\n");
 
-               if (cancel_delayed_work(&io_req->timeout_work))
+               if (cancel_delayed_work(&io_req->timeout_work)) {
+                       if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+                                               &io_req->req_flags)) {
+                               /* Handle eh_abort timeout */
+                               BNX2FC_IO_DBG(io_req, "eh_abort for IO "
+                                             "in retire_q\n");
+                               if (io_req->wait_for_comp)
+                                       complete(&io_req->tm_done);
+                       }
                        kref_put(&io_req->refcount, bnx2fc_cmd_release);
+               }
 
                clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
        }
index f6d37d0271f73bd08538bc3028e388e0e569b9db..aed0f5db36684c67c8a01817cad83fa553f48cb2 100644 (file)
@@ -1,4 +1,4 @@
 obj-$(CONFIG_FCOE) += fcoe.o
 obj-$(CONFIG_LIBFCOE) += libfcoe.o
 
-libfcoe-objs := fcoe_ctlr.o fcoe_transport.o
+libfcoe-objs := fcoe_ctlr.o fcoe_transport.o fcoe_sysfs.o
index 76e3d0b5bfa676212156d800dd295b46cddfc1de..fe30b1b65e1d3ddc879823a79404efdcc60d4982 100644 (file)
@@ -41,6 +41,7 @@
 
 #include <scsi/fc/fc_encaps.h>
 #include <scsi/fc/fc_fip.h>
+#include <scsi/fc/fc_fcoe.h>
 
 #include <scsi/libfc.h>
 #include <scsi/fc_frame.h>
@@ -150,6 +151,21 @@ static int fcoe_vport_create(struct fc_vport *, bool disabled);
 static int fcoe_vport_disable(struct fc_vport *, bool disable);
 static void fcoe_set_vport_symbolic_name(struct fc_vport *);
 static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
+static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *);
+static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *);
+
+static struct fcoe_sysfs_function_template fcoe_sysfs_templ = {
+       .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
+       .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb,
+       .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb,
+       .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb,
+       .get_fcoe_ctlr_symb_err = fcoe_ctlr_get_lesb,
+       .get_fcoe_ctlr_err_block = fcoe_ctlr_get_lesb,
+       .get_fcoe_ctlr_fcs_error = fcoe_ctlr_get_lesb,
+
+       .get_fcoe_fcf_selected = fcoe_fcf_get_selected,
+       .get_fcoe_fcf_vlan_id = fcoe_fcf_get_vlan_id,
+};
 
 static struct libfc_function_template fcoe_libfc_fcn_templ = {
        .frame_send = fcoe_xmit,
@@ -282,7 +298,7 @@ static struct scsi_host_template fcoe_shost_template = {
 static int fcoe_interface_setup(struct fcoe_interface *fcoe,
                                struct net_device *netdev)
 {
-       struct fcoe_ctlr *fip = &fcoe->ctlr;
+       struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
        struct netdev_hw_addr *ha;
        struct net_device *real_dev;
        u8 flogi_maddr[ETH_ALEN];
@@ -366,7 +382,10 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
 static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
                                                    enum fip_state fip_mode)
 {
+       struct fcoe_ctlr_device *ctlr_dev;
+       struct fcoe_ctlr *ctlr;
        struct fcoe_interface *fcoe;
+       int size;
        int err;
 
        if (!try_module_get(THIS_MODULE)) {
@@ -376,27 +395,32 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
                goto out;
        }
 
-       fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL);
-       if (!fcoe) {
-               FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n");
+       size = sizeof(struct fcoe_ctlr) + sizeof(struct fcoe_interface);
+       ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &fcoe_sysfs_templ,
+                                       size);
+       if (!ctlr_dev) {
+               FCOE_DBG("Failed to add fcoe_ctlr_device\n");
                fcoe = ERR_PTR(-ENOMEM);
                goto out_putmod;
        }
 
+       ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+       fcoe = fcoe_ctlr_priv(ctlr);
+
        dev_hold(netdev);
 
        /*
         * Initialize FIP.
         */
-       fcoe_ctlr_init(&fcoe->ctlr, fip_mode);
-       fcoe->ctlr.send = fcoe_fip_send;
-       fcoe->ctlr.update_mac = fcoe_update_src_mac;
-       fcoe->ctlr.get_src_addr = fcoe_get_src_mac;
+       fcoe_ctlr_init(ctlr, fip_mode);
+       ctlr->send = fcoe_fip_send;
+       ctlr->update_mac = fcoe_update_src_mac;
+       ctlr->get_src_addr = fcoe_get_src_mac;
 
        err = fcoe_interface_setup(fcoe, netdev);
        if (err) {
-               fcoe_ctlr_destroy(&fcoe->ctlr);
-               kfree(fcoe);
+               fcoe_ctlr_destroy(ctlr);
+               fcoe_ctlr_device_delete(ctlr_dev);
                dev_put(netdev);
                fcoe = ERR_PTR(err);
                goto out_putmod;
@@ -419,7 +443,7 @@ out:
 static void fcoe_interface_remove(struct fcoe_interface *fcoe)
 {
        struct net_device *netdev = fcoe->netdev;
-       struct fcoe_ctlr *fip = &fcoe->ctlr;
+       struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
        u8 flogi_maddr[ETH_ALEN];
        const struct net_device_ops *ops;
 
@@ -462,7 +486,8 @@ static void fcoe_interface_remove(struct fcoe_interface *fcoe)
 static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
 {
        struct net_device *netdev = fcoe->netdev;
-       struct fcoe_ctlr *fip = &fcoe->ctlr;
+       struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
+       struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
 
        rtnl_lock();
        if (!fcoe->removed)
@@ -472,8 +497,8 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
        /* Release the self-reference taken during fcoe_interface_create() */
        /* tear-down the FCoE controller */
        fcoe_ctlr_destroy(fip);
-       scsi_host_put(fcoe->ctlr.lp->host);
-       kfree(fcoe);
+       scsi_host_put(fip->lp->host);
+       fcoe_ctlr_device_delete(ctlr_dev);
        dev_put(netdev);
        module_put(THIS_MODULE);
 }
@@ -493,9 +518,11 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
                         struct net_device *orig_dev)
 {
        struct fcoe_interface *fcoe;
+       struct fcoe_ctlr *ctlr;
 
        fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type);
-       fcoe_ctlr_recv(&fcoe->ctlr, skb);
+       ctlr = fcoe_to_ctlr(fcoe);
+       fcoe_ctlr_recv(ctlr, skb);
        return 0;
 }
 
@@ -645,11 +672,13 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
        u32 mfs;
        u64 wwnn, wwpn;
        struct fcoe_interface *fcoe;
+       struct fcoe_ctlr *ctlr;
        struct fcoe_port *port;
 
        /* Setup lport private data to point to fcoe softc */
        port = lport_priv(lport);
        fcoe = port->priv;
+       ctlr = fcoe_to_ctlr(fcoe);
 
        /*
         * Determine max frame size based on underlying device and optional
@@ -676,10 +705,10 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
 
        if (!lport->vport) {
                if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
-                       wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0);
+                       wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, 1, 0);
                fc_set_wwnn(lport, wwnn);
                if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
-                       wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr,
+                       wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
                                                 2, 0);
                fc_set_wwpn(lport, wwpn);
        }
@@ -1056,6 +1085,7 @@ static int fcoe_ddp_done(struct fc_lport *lport, u16 xid)
 static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
                                       struct device *parent, int npiv)
 {
+       struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
        struct net_device *netdev = fcoe->netdev;
        struct fc_lport *lport, *n_port;
        struct fcoe_port *port;
@@ -1119,7 +1149,7 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
        }
 
        /* Initialize the library */
-       rc = fcoe_libfc_config(lport, &fcoe->ctlr, &fcoe_libfc_fcn_templ, 1);
+       rc = fcoe_libfc_config(lport, ctlr, &fcoe_libfc_fcn_templ, 1);
        if (rc) {
                FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
                                "interface\n");
@@ -1386,6 +1416,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
 {
        struct fc_lport *lport;
        struct fcoe_rcv_info *fr;
+       struct fcoe_ctlr *ctlr;
        struct fcoe_interface *fcoe;
        struct fc_frame_header *fh;
        struct fcoe_percpu_s *fps;
@@ -1393,7 +1424,8 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
        unsigned int cpu;
 
        fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type);
-       lport = fcoe->ctlr.lp;
+       ctlr = fcoe_to_ctlr(fcoe);
+       lport = ctlr->lp;
        if (unlikely(!lport)) {
                FCOE_NETDEV_DBG(netdev, "Cannot find hba structure");
                goto err2;
@@ -1409,8 +1441,8 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
 
        eh = eth_hdr(skb);
 
-       if (is_fip_mode(&fcoe->ctlr) &&
-           compare_ether_addr(eh->h_source, fcoe->ctlr.dest_addr)) {
+       if (is_fip_mode(ctlr) &&
+           compare_ether_addr(eh->h_source, ctlr->dest_addr)) {
                FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n",
                                eh->h_source);
                goto err;
@@ -1544,6 +1576,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
        unsigned int elen;              /* eth header, may include vlan */
        struct fcoe_port *port = lport_priv(lport);
        struct fcoe_interface *fcoe = port->priv;
+       struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
        u8 sof, eof;
        struct fcoe_hdr *hp;
 
@@ -1559,7 +1592,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
        }
 
        if (unlikely(fh->fh_type == FC_TYPE_ELS) &&
-           fcoe_ctlr_els_send(&fcoe->ctlr, lport, skb))
+           fcoe_ctlr_els_send(ctlr, lport, skb))
                return 0;
 
        sof = fr_sof(fp);
@@ -1623,12 +1656,12 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
        /* fill up mac and fcoe headers */
        eh = eth_hdr(skb);
        eh->h_proto = htons(ETH_P_FCOE);
-       memcpy(eh->h_dest, fcoe->ctlr.dest_addr, ETH_ALEN);
-       if (fcoe->ctlr.map_dest)
+       memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN);
+       if (ctlr->map_dest)
                memcpy(eh->h_dest + 3, fh->fh_d_id, 3);
 
-       if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN))
-               memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN);
+       if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN))
+               memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN);
        else
                memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
 
@@ -1677,6 +1710,7 @@ static void fcoe_percpu_flush_done(struct sk_buff *skb)
 static inline int fcoe_filter_frames(struct fc_lport *lport,
                                     struct fc_frame *fp)
 {
+       struct fcoe_ctlr *ctlr;
        struct fcoe_interface *fcoe;
        struct fc_frame_header *fh;
        struct sk_buff *skb = (struct sk_buff *)fp;
@@ -1698,7 +1732,8 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
                return 0;
 
        fcoe = ((struct fcoe_port *)lport_priv(lport))->priv;
-       if (is_fip_mode(&fcoe->ctlr) && fc_frame_payload_op(fp) == ELS_LOGO &&
+       ctlr = fcoe_to_ctlr(fcoe);
+       if (is_fip_mode(ctlr) && fc_frame_payload_op(fp) == ELS_LOGO &&
            ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
                FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n");
                return -EINVAL;
@@ -1877,6 +1912,7 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
                                     ulong event, void *ptr)
 {
        struct dcb_app_type *entry = ptr;
+       struct fcoe_ctlr *ctlr;
        struct fcoe_interface *fcoe;
        struct net_device *netdev;
        struct fcoe_port *port;
@@ -1894,6 +1930,8 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
        if (!fcoe)
                return NOTIFY_OK;
 
+       ctlr = fcoe_to_ctlr(fcoe);
+
        if (entry->dcbx & DCB_CAP_DCBX_VER_CEE)
                prio = ffs(entry->app.priority) - 1;
        else
@@ -1904,10 +1942,10 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
 
        if (entry->app.protocol == ETH_P_FIP ||
            entry->app.protocol == ETH_P_FCOE)
-               fcoe->ctlr.priority = prio;
+               ctlr->priority = prio;
 
        if (entry->app.protocol == ETH_P_FCOE) {
-               port = lport_priv(fcoe->ctlr.lp);
+               port = lport_priv(ctlr->lp);
                port->priority = prio;
        }
 
@@ -1929,6 +1967,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
 {
        struct fc_lport *lport = NULL;
        struct net_device *netdev = ptr;
+       struct fcoe_ctlr *ctlr;
        struct fcoe_interface *fcoe;
        struct fcoe_port *port;
        struct fcoe_dev_stats *stats;
@@ -1938,7 +1977,8 @@ static int fcoe_device_notification(struct notifier_block *notifier,
 
        list_for_each_entry(fcoe, &fcoe_hostlist, list) {
                if (fcoe->netdev == netdev) {
-                       lport = fcoe->ctlr.lp;
+                       ctlr = fcoe_to_ctlr(fcoe);
+                       lport = ctlr->lp;
                        break;
                }
        }
@@ -1967,7 +2007,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
                break;
        case NETDEV_UNREGISTER:
                list_del(&fcoe->list);
-               port = lport_priv(fcoe->ctlr.lp);
+               port = lport_priv(ctlr->lp);
                queue_work(fcoe_wq, &port->destroy_work);
                goto out;
                break;
@@ -1982,8 +2022,8 @@ static int fcoe_device_notification(struct notifier_block *notifier,
        fcoe_link_speed_update(lport);
 
        if (link_possible && !fcoe_link_ok(lport))
-               fcoe_ctlr_link_up(&fcoe->ctlr);
-       else if (fcoe_ctlr_link_down(&fcoe->ctlr)) {
+               fcoe_ctlr_link_up(ctlr);
+       else if (fcoe_ctlr_link_down(ctlr)) {
                stats = per_cpu_ptr(lport->dev_stats, get_cpu());
                stats->LinkFailureCount++;
                put_cpu();
@@ -2003,6 +2043,7 @@ out:
  */
 static int fcoe_disable(struct net_device *netdev)
 {
+       struct fcoe_ctlr *ctlr;
        struct fcoe_interface *fcoe;
        int rc = 0;
 
@@ -2013,8 +2054,9 @@ static int fcoe_disable(struct net_device *netdev)
        rtnl_unlock();
 
        if (fcoe) {
-               fcoe_ctlr_link_down(&fcoe->ctlr);
-               fcoe_clean_pending_queue(fcoe->ctlr.lp);
+               ctlr = fcoe_to_ctlr(fcoe);
+               fcoe_ctlr_link_down(ctlr);
+               fcoe_clean_pending_queue(ctlr->lp);
        } else
                rc = -ENODEV;
 
@@ -2032,6 +2074,7 @@ static int fcoe_disable(struct net_device *netdev)
  */
 static int fcoe_enable(struct net_device *netdev)
 {
+       struct fcoe_ctlr *ctlr;
        struct fcoe_interface *fcoe;
        int rc = 0;
 
@@ -2040,11 +2083,17 @@ static int fcoe_enable(struct net_device *netdev)
        fcoe = fcoe_hostlist_lookup_port(netdev);
        rtnl_unlock();
 
-       if (!fcoe)
+       if (!fcoe) {
                rc = -ENODEV;
-       else if (!fcoe_link_ok(fcoe->ctlr.lp))
-               fcoe_ctlr_link_up(&fcoe->ctlr);
+               goto out;
+       }
+
+       ctlr = fcoe_to_ctlr(fcoe);
+
+       if (!fcoe_link_ok(ctlr->lp))
+               fcoe_ctlr_link_up(ctlr);
 
+out:
        mutex_unlock(&fcoe_config_mutex);
        return rc;
 }
@@ -2059,6 +2108,7 @@ static int fcoe_enable(struct net_device *netdev)
  */
 static int fcoe_destroy(struct net_device *netdev)
 {
+       struct fcoe_ctlr *ctlr;
        struct fcoe_interface *fcoe;
        struct fc_lport *lport;
        struct fcoe_port *port;
@@ -2071,7 +2121,8 @@ static int fcoe_destroy(struct net_device *netdev)
                rc = -ENODEV;
                goto out_nodev;
        }
-       lport = fcoe->ctlr.lp;
+       ctlr = fcoe_to_ctlr(fcoe);
+       lport = ctlr->lp;
        port = lport_priv(lport);
        list_del(&fcoe->list);
        queue_work(fcoe_wq, &port->destroy_work);
@@ -2126,7 +2177,8 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
        int dcbx;
        u8 fup, up;
        struct net_device *netdev = fcoe->realdev;
-       struct fcoe_port *port = lport_priv(fcoe->ctlr.lp);
+       struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
+       struct fcoe_port *port = lport_priv(ctlr->lp);
        struct dcb_app app = {
                                .priority = 0,
                                .protocol = ETH_P_FCOE
@@ -2149,7 +2201,7 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
                }
 
                port->priority = ffs(up) ? ffs(up) - 1 : 0;
-               fcoe->ctlr.priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
+               ctlr->priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
        }
 #endif
 }
@@ -2166,6 +2218,8 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
 static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
 {
        int rc = 0;
+       struct fcoe_ctlr_device *ctlr_dev;
+       struct fcoe_ctlr *ctlr;
        struct fcoe_interface *fcoe;
        struct fc_lport *lport;
 
@@ -2184,7 +2238,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
                goto out_nodev;
        }
 
-       lport = fcoe_if_create(fcoe, &netdev->dev, 0);
+       ctlr = fcoe_to_ctlr(fcoe);
+       ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr);
+       lport = fcoe_if_create(fcoe, &ctlr_dev->dev, 0);
        if (IS_ERR(lport)) {
                printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
                       netdev->name);
@@ -2195,7 +2251,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
        }
 
        /* Make this the "master" N_Port */
-       fcoe->ctlr.lp = lport;
+       ctlr->lp = lport;
 
        /* setup DCB priority attributes. */
        fcoe_dcb_create(fcoe);
@@ -2208,7 +2264,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
        fc_fabric_login(lport);
        if (!fcoe_link_ok(lport)) {
                rtnl_unlock();
-               fcoe_ctlr_link_up(&fcoe->ctlr);
+               fcoe_ctlr_link_up(ctlr);
                mutex_unlock(&fcoe_config_mutex);
                return rc;
        }
@@ -2320,11 +2376,12 @@ static int fcoe_reset(struct Scsi_Host *shost)
        struct fc_lport *lport = shost_priv(shost);
        struct fcoe_port *port = lport_priv(lport);
        struct fcoe_interface *fcoe = port->priv;
+       struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
 
-       fcoe_ctlr_link_down(&fcoe->ctlr);
-       fcoe_clean_pending_queue(fcoe->ctlr.lp);
-       if (!fcoe_link_ok(fcoe->ctlr.lp))
-               fcoe_ctlr_link_up(&fcoe->ctlr);
+       fcoe_ctlr_link_down(ctlr);
+       fcoe_clean_pending_queue(ctlr->lp);
+       if (!fcoe_link_ok(ctlr->lp))
+               fcoe_ctlr_link_up(ctlr);
        return 0;
 }
 
@@ -2359,10 +2416,12 @@ fcoe_hostlist_lookup_port(const struct net_device *netdev)
  */
 static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
 {
+       struct fcoe_ctlr *ctlr;
        struct fcoe_interface *fcoe;
 
        fcoe = fcoe_hostlist_lookup_port(netdev);
-       return (fcoe) ? fcoe->ctlr.lp : NULL;
+       ctlr = fcoe_to_ctlr(fcoe);
+       return (fcoe) ? ctlr->lp : NULL;
 }
 
 /**
@@ -2466,6 +2525,7 @@ module_init(fcoe_init);
 static void __exit fcoe_exit(void)
 {
        struct fcoe_interface *fcoe, *tmp;
+       struct fcoe_ctlr *ctlr;
        struct fcoe_port *port;
        unsigned int cpu;
 
@@ -2477,7 +2537,8 @@ static void __exit fcoe_exit(void)
        rtnl_lock();
        list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
                list_del(&fcoe->list);
-               port = lport_priv(fcoe->ctlr.lp);
+               ctlr = fcoe_to_ctlr(fcoe);
+               port = lport_priv(ctlr->lp);
                queue_work(fcoe_wq, &port->destroy_work);
        }
        rtnl_unlock();
@@ -2573,7 +2634,7 @@ static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did,
 {
        struct fcoe_port *port = lport_priv(lport);
        struct fcoe_interface *fcoe = port->priv;
-       struct fcoe_ctlr *fip = &fcoe->ctlr;
+       struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
        struct fc_frame_header *fh = fc_frame_header_get(fp);
 
        switch (op) {
@@ -2730,6 +2791,40 @@ static void fcoe_get_lesb(struct fc_lport *lport,
        __fcoe_get_lesb(lport, fc_lesb, netdev);
 }
 
+static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
+{
+       struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
+       struct net_device *netdev = fcoe_netdev(fip->lp);
+       struct fcoe_fc_els_lesb *fcoe_lesb;
+       struct fc_els_lesb fc_lesb;
+
+       __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
+       fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
+
+       ctlr_dev->lesb.lesb_link_fail =
+               ntohl(fcoe_lesb->lesb_link_fail);
+       ctlr_dev->lesb.lesb_vlink_fail =
+               ntohl(fcoe_lesb->lesb_vlink_fail);
+       ctlr_dev->lesb.lesb_miss_fka =
+               ntohl(fcoe_lesb->lesb_miss_fka);
+       ctlr_dev->lesb.lesb_symb_err =
+               ntohl(fcoe_lesb->lesb_symb_err);
+       ctlr_dev->lesb.lesb_err_block =
+               ntohl(fcoe_lesb->lesb_err_block);
+       ctlr_dev->lesb.lesb_fcs_error =
+               ntohl(fcoe_lesb->lesb_fcs_error);
+}
+
+static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
+{
+       struct fcoe_ctlr_device *ctlr_dev =
+               fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
+       struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+       struct fcoe_interface *fcoe = fcoe_ctlr_priv(ctlr);
+
+       fcf_dev->vlan_id = vlan_dev_vlan_id(fcoe->netdev);
+}
+
 /**
  * fcoe_set_port_id() - Callback from libfc when Port_ID is set.
  * @lport: the local port
@@ -2747,7 +2842,8 @@ static void fcoe_set_port_id(struct fc_lport *lport,
 {
        struct fcoe_port *port = lport_priv(lport);
        struct fcoe_interface *fcoe = port->priv;
+       struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
 
        if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
-               fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
+               fcoe_ctlr_recv_flogi(ctlr, lport, fp);
 }
index 96ac938d39ccc81bc8acb4f0dc34d3ac4de23bef..a624add4f8ecbae730b01f6daab8b8f88ba1ea4d 100644 (file)
@@ -68,7 +68,6 @@ do {                                                                  \
  * @netdev:          The associated net device
  * @fcoe_packet_type: FCoE packet type
  * @fip_packet_type:  FIP packet type
- * @ctlr:            The FCoE controller (for FIP)
  * @oem:             The offload exchange manager for all local port
  *                   instances associated with this port
  * @removed:         Indicates fcoe interface removed from net device
@@ -80,12 +79,15 @@ struct fcoe_interface {
        struct net_device  *realdev;
        struct packet_type fcoe_packet_type;
        struct packet_type fip_packet_type;
-       struct fcoe_ctlr   ctlr;
        struct fc_exch_mgr *oem;
        u8      removed;
 };
 
-#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr)
+#define fcoe_to_ctlr(x)                                                \
+       (struct fcoe_ctlr *)(((struct fcoe_ctlr *)(x)) - 1)
+
+#define fcoe_from_ctlr(x)                      \
+       ((struct fcoe_interface *)((x) + 1))
 
 /**
  * fcoe_netdev() - Return the net device associated with a local port
index 5a4c7250aa77abd218ea52c65da31fb514abfd30..d68d57241ee68227703ce1880e52c2ce31b4684d 100644 (file)
@@ -160,6 +160,76 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode)
 }
 EXPORT_SYMBOL(fcoe_ctlr_init);
 
+static int fcoe_sysfs_fcf_add(struct fcoe_fcf *new)
+{
+       struct fcoe_ctlr *fip = new->fip;
+       struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
+       struct fcoe_fcf_device temp, *fcf_dev;
+       int rc = 0;
+
+       LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n",
+                       new->fabric_name, new->fcf_mac);
+
+       mutex_lock(&ctlr_dev->lock);
+
+       temp.fabric_name = new->fabric_name;
+       temp.switch_name = new->switch_name;
+       temp.fc_map = new->fc_map;
+       temp.vfid = new->vfid;
+       memcpy(temp.mac, new->fcf_mac, ETH_ALEN);
+       temp.priority = new->pri;
+       temp.fka_period = new->fka_period;
+       temp.selected = 0; /* default to unselected */
+
+       fcf_dev = fcoe_fcf_device_add(ctlr_dev, &temp);
+       if (unlikely(!fcf_dev)) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       /*
+        * The fcoe_sysfs layer can return a CONNECTED fcf that
+        * has a priv (fcf was never deleted) or a CONNECTED fcf
+        * that doesn't have a priv (fcf was deleted). However,
+        * libfcoe will always delete FCFs before trying to add
+        * them. This is ensured because both recv_adv and
+        * age_fcfs are protected by the the fcoe_ctlr's mutex.
+        * This means that we should never get a FCF with a
+        * non-NULL priv pointer.
+        */
+       BUG_ON(fcf_dev->priv);
+
+       fcf_dev->priv = new;
+       new->fcf_dev = fcf_dev;
+
+       list_add(&new->list, &fip->fcfs);
+       fip->fcf_count++;
+
+out:
+       mutex_unlock(&ctlr_dev->lock);
+       return rc;
+}
+
+static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new)
+{
+       struct fcoe_ctlr *fip = new->fip;
+       struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
+       struct fcoe_fcf_device *fcf_dev;
+
+       list_del(&new->list);
+       fip->fcf_count--;
+
+       mutex_lock(&ctlr_dev->lock);
+
+       fcf_dev = fcoe_fcf_to_fcf_dev(new);
+       WARN_ON(!fcf_dev);
+       new->fcf_dev = NULL;
+       fcoe_fcf_device_delete(fcf_dev);
+       kfree(new);
+
+       mutex_unlock(&ctlr_dev->lock);
+}
+
 /**
  * fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller
  * @fip: The FCoE controller whose FCFs are to be reset
@@ -173,10 +243,10 @@ static void fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip)
 
        fip->sel_fcf = NULL;
        list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
-               list_del(&fcf->list);
-               kfree(fcf);
+               fcoe_sysfs_fcf_del(fcf);
        }
-       fip->fcf_count = 0;
+       WARN_ON(fip->fcf_count);
+
        fip->sel_time = 0;
 }
 
@@ -717,8 +787,11 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
        unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD);
        unsigned long deadline;
        unsigned long sel_time = 0;
+       struct list_head del_list;
        struct fcoe_dev_stats *stats;
 
+       INIT_LIST_HEAD(&del_list);
+
        stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu());
 
        list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
@@ -739,10 +812,13 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
                if (time_after_eq(jiffies, deadline)) {
                        if (fip->sel_fcf == fcf)
                                fip->sel_fcf = NULL;
+                       /*
+                        * Move to delete list so we can call
+                        * fcoe_sysfs_fcf_del (which can sleep)
+                        * after the put_cpu().
+                        */
                        list_del(&fcf->list);
-                       WARN_ON(!fip->fcf_count);
-                       fip->fcf_count--;
-                       kfree(fcf);
+                       list_add(&fcf->list, &del_list);
                        stats->VLinkFailureCount++;
                } else {
                        if (time_after(next_timer, deadline))
@@ -753,6 +829,12 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
                }
        }
        put_cpu();
+
+       list_for_each_entry_safe(fcf, next, &del_list, list) {
+               /* Removes fcf from current list */
+               fcoe_sysfs_fcf_del(fcf);
+       }
+
        if (sel_time && !fip->sel_fcf && !fip->sel_time) {
                sel_time += msecs_to_jiffies(FCOE_CTLR_START_DELAY);
                fip->sel_time = sel_time;
@@ -903,23 +985,23 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
 {
        struct fcoe_fcf *fcf;
        struct fcoe_fcf new;
-       struct fcoe_fcf *found;
        unsigned long sol_tov = msecs_to_jiffies(FCOE_CTRL_SOL_TOV);
        int first = 0;
        int mtu_valid;
+       int found = 0;
+       int rc = 0;
 
        if (fcoe_ctlr_parse_adv(fip, skb, &new))
                return;
 
        mutex_lock(&fip->ctlr_mutex);
        first = list_empty(&fip->fcfs);
-       found = NULL;
        list_for_each_entry(fcf, &fip->fcfs, list) {
                if (fcf->switch_name == new.switch_name &&
                    fcf->fabric_name == new.fabric_name &&
                    fcf->fc_map == new.fc_map &&
                    compare_ether_addr(fcf->fcf_mac, new.fcf_mac) == 0) {
-                       found = fcf;
+                       found = 1;
                        break;
                }
        }
@@ -931,9 +1013,16 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
                if (!fcf)
                        goto out;
 
-               fip->fcf_count++;
                memcpy(fcf, &new, sizeof(new));
-               list_add(&fcf->list, &fip->fcfs);
+               fcf->fip = fip;
+               rc = fcoe_sysfs_fcf_add(fcf);
+               if (rc) {
+                       printk(KERN_ERR "Failed to allocate sysfs instance "
+                              "for FCF, fab %16.16llx mac %pM\n",
+                              new.fabric_name, new.fcf_mac);
+                       kfree(fcf);
+                       goto out;
+               }
        } else {
                /*
                 * Update the FCF's keep-alive descriptor flags.
@@ -954,6 +1043,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
                fcf->fka_period = new.fka_period;
                memcpy(fcf->fcf_mac, new.fcf_mac, ETH_ALEN);
        }
+
        mtu_valid = fcoe_ctlr_mtu_valid(fcf);
        fcf->time = jiffies;
        if (!found)
@@ -996,6 +1086,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
                    time_before(fip->sel_time, fip->timer.expires))
                        mod_timer(&fip->timer, fip->sel_time);
        }
+
 out:
        mutex_unlock(&fip->ctlr_mutex);
 }
@@ -2718,9 +2809,9 @@ unlock:
 
 /**
  * fcoe_libfc_config() - Sets up libfc related properties for local port
- * @lp: The local port to configure libfc for
- * @fip: The FCoE controller in use by the local port
- * @tt: The libfc function template
+ * @lport:    The local port to configure libfc for
+ * @fip:      The FCoE controller in use by the local port
+ * @tt:       The libfc function template
  * @init_fcp: If non-zero, the FCP portion of libfc should be initialized
  *
  * Returns : 0 for success
@@ -2753,3 +2844,43 @@ int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip,
        return 0;
 }
 EXPORT_SYMBOL_GPL(fcoe_libfc_config);
+
+void fcoe_fcf_get_selected(struct fcoe_fcf_device *fcf_dev)
+{
+       struct fcoe_ctlr_device *ctlr_dev = fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
+       struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
+       struct fcoe_fcf *fcf;
+
+       mutex_lock(&fip->ctlr_mutex);
+       mutex_lock(&ctlr_dev->lock);
+
+       fcf = fcoe_fcf_device_priv(fcf_dev);
+       if (fcf)
+               fcf_dev->selected = (fcf == fip->sel_fcf) ? 1 : 0;
+       else
+               fcf_dev->selected = 0;
+
+       mutex_unlock(&ctlr_dev->lock);
+       mutex_unlock(&fip->ctlr_mutex);
+}
+EXPORT_SYMBOL(fcoe_fcf_get_selected);
+
+void fcoe_ctlr_get_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
+{
+       struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+
+       mutex_lock(&ctlr->ctlr_mutex);
+       switch (ctlr->mode) {
+       case FIP_MODE_FABRIC:
+               ctlr_dev->mode = FIP_CONN_TYPE_FABRIC;
+               break;
+       case FIP_MODE_VN2VN:
+               ctlr_dev->mode = FIP_CONN_TYPE_VN2VN;
+               break;
+       default:
+               ctlr_dev->mode = FIP_CONN_TYPE_UNKNOWN;
+               break;
+       }
+       mutex_unlock(&ctlr->ctlr_mutex);
+}
+EXPORT_SYMBOL(fcoe_ctlr_get_fip_mode);
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
new file mode 100644 (file)
index 0000000..2bc1631
--- /dev/null
@@ -0,0 +1,832 @@
+/*
+ * Copyright(c) 2011 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/etherdevice.h>
+
+#include <scsi/fcoe_sysfs.h>
+
+static atomic_t ctlr_num;
+static atomic_t fcf_num;
+
+/*
+ * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
+ * should insulate the loss of a fcf.
+ */
+static unsigned int fcoe_fcf_dev_loss_tmo = 1800;  /* seconds */
+
+module_param_named(fcf_dev_loss_tmo, fcoe_fcf_dev_loss_tmo,
+                  uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(fcf_dev_loss_tmo,
+                "Maximum number of seconds that libfcoe should"
+                " insulate the loss of a fcf. Once this value is"
+                " exceeded, the fcf is removed.");
+
+/*
+ * These are used by the fcoe_*_show_function routines, they
+ * are intentionally placed in the .c file as they're not intended
+ * for use throughout the code.
+ */
+#define fcoe_ctlr_id(x)                                \
+       ((x)->id)
+#define fcoe_ctlr_work_q_name(x)               \
+       ((x)->work_q_name)
+#define fcoe_ctlr_work_q(x)                    \
+       ((x)->work_q)
+#define fcoe_ctlr_devloss_work_q_name(x)       \
+       ((x)->devloss_work_q_name)
+#define fcoe_ctlr_devloss_work_q(x)            \
+       ((x)->devloss_work_q)
+#define fcoe_ctlr_mode(x)                      \
+       ((x)->mode)
+#define fcoe_ctlr_fcf_dev_loss_tmo(x)          \
+       ((x)->fcf_dev_loss_tmo)
+#define fcoe_ctlr_link_fail(x)                 \
+       ((x)->lesb.lesb_link_fail)
+#define fcoe_ctlr_vlink_fail(x)                        \
+       ((x)->lesb.lesb_vlink_fail)
+#define fcoe_ctlr_miss_fka(x)                  \
+       ((x)->lesb.lesb_miss_fka)
+#define fcoe_ctlr_symb_err(x)                  \
+       ((x)->lesb.lesb_symb_err)
+#define fcoe_ctlr_err_block(x)                 \
+       ((x)->lesb.lesb_err_block)
+#define fcoe_ctlr_fcs_error(x)                 \
+       ((x)->lesb.lesb_fcs_error)
+#define fcoe_fcf_state(x)                      \
+       ((x)->state)
+#define fcoe_fcf_fabric_name(x)                        \
+       ((x)->fabric_name)
+#define fcoe_fcf_switch_name(x)                        \
+       ((x)->switch_name)
+#define fcoe_fcf_fc_map(x)                     \
+       ((x)->fc_map)
+#define fcoe_fcf_vfid(x)                       \
+       ((x)->vfid)
+#define fcoe_fcf_mac(x)                                \
+       ((x)->mac)
+#define fcoe_fcf_priority(x)                   \
+       ((x)->priority)
+#define fcoe_fcf_fka_period(x)                 \
+       ((x)->fka_period)
+#define fcoe_fcf_dev_loss_tmo(x)               \
+       ((x)->dev_loss_tmo)
+#define fcoe_fcf_selected(x)                   \
+       ((x)->selected)
+#define fcoe_fcf_vlan_id(x)                    \
+       ((x)->vlan_id)
+
+/*
+ * dev_loss_tmo attribute
+ */
+static int fcoe_str_to_dev_loss(const char *buf, unsigned long *val)
+{
+       int ret;
+
+       ret = kstrtoul(buf, 0, val);
+       if (ret || *val < 0)
+               return -EINVAL;
+       /*
+        * Check for overflow; dev_loss_tmo is u32
+        */
+       if (*val > UINT_MAX)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int fcoe_fcf_set_dev_loss_tmo(struct fcoe_fcf_device *fcf,
+                                    unsigned long val)
+{
+       if ((fcf->state == FCOE_FCF_STATE_UNKNOWN) ||
+           (fcf->state == FCOE_FCF_STATE_DISCONNECTED) ||
+           (fcf->state == FCOE_FCF_STATE_DELETED))
+               return -EBUSY;
+       /*
+        * Check for overflow; dev_loss_tmo is u32
+        */
+       if (val > UINT_MAX)
+               return -EINVAL;
+
+       fcoe_fcf_dev_loss_tmo(fcf) = val;
+       return 0;
+}
+
+#define FCOE_DEVICE_ATTR(_prefix, _name, _mode, _show, _store) \
+struct device_attribute device_attr_fcoe_##_prefix##_##_name = \
+       __ATTR(_name, _mode, _show, _store)
+
+#define fcoe_ctlr_show_function(field, format_string, sz, cast)        \
+static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \
+                                           struct device_attribute *attr, \
+                                           char *buf)                  \
+{                                                                      \
+       struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);               \
+       if (ctlr->f->get_fcoe_ctlr_##field)                             \
+               ctlr->f->get_fcoe_ctlr_##field(ctlr);                   \
+       return snprintf(buf, sz, format_string,                         \
+                       cast fcoe_ctlr_##field(ctlr));                  \
+}
+
+#define fcoe_fcf_show_function(field, format_string, sz, cast) \
+static ssize_t show_fcoe_fcf_device_##field(struct device *dev,        \
+                                          struct device_attribute *attr, \
+                                          char *buf)                   \
+{                                                                      \
+       struct fcoe_fcf_device *fcf = dev_to_fcf(dev);                  \
+       struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);  \
+       if (ctlr->f->get_fcoe_fcf_##field)                              \
+               ctlr->f->get_fcoe_fcf_##field(fcf);                     \
+       return snprintf(buf, sz, format_string,                         \
+                       cast fcoe_fcf_##field(fcf));                    \
+}
+
+#define fcoe_ctlr_private_show_function(field, format_string, sz, cast)        \
+static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \
+                                           struct device_attribute *attr, \
+                                           char *buf)                  \
+{                                                                      \
+       struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);               \
+       return snprintf(buf, sz, format_string, cast fcoe_ctlr_##field(ctlr)); \
+}
+
+#define fcoe_fcf_private_show_function(field, format_string, sz, cast) \
+static ssize_t show_fcoe_fcf_device_##field(struct device *dev,        \
+                                          struct device_attribute *attr, \
+                                          char *buf)                   \
+{                                                              \
+       struct fcoe_fcf_device *fcf = dev_to_fcf(dev);                  \
+       return snprintf(buf, sz, format_string, cast fcoe_fcf_##field(fcf)); \
+}
+
+#define fcoe_ctlr_private_rd_attr(field, format_string, sz)            \
+       fcoe_ctlr_private_show_function(field, format_string, sz, )     \
+       static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO,                   \
+                               show_fcoe_ctlr_device_##field, NULL)
+
+#define fcoe_ctlr_rd_attr(field, format_string, sz)                    \
+       fcoe_ctlr_show_function(field, format_string, sz, )             \
+       static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO,                   \
+                               show_fcoe_ctlr_device_##field, NULL)
+
+#define fcoe_fcf_rd_attr(field, format_string, sz)                     \
+       fcoe_fcf_show_function(field, format_string, sz, )              \
+       static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO,                    \
+                               show_fcoe_fcf_device_##field, NULL)
+
+#define fcoe_fcf_private_rd_attr(field, format_string, sz)             \
+       fcoe_fcf_private_show_function(field, format_string, sz, )      \
+       static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO,                    \
+                               show_fcoe_fcf_device_##field, NULL)
+
+#define fcoe_ctlr_private_rd_attr_cast(field, format_string, sz, cast) \
+       fcoe_ctlr_private_show_function(field, format_string, sz, (cast)) \
+       static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO,                   \
+                               show_fcoe_ctlr_device_##field, NULL)
+
+#define fcoe_fcf_private_rd_attr_cast(field, format_string, sz, cast)  \
+       fcoe_fcf_private_show_function(field, format_string, sz, (cast)) \
+       static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO,                    \
+                               show_fcoe_fcf_device_##field, NULL)
+
+#define fcoe_enum_name_search(title, table_type, table)                        \
+static const char *get_fcoe_##title##_name(enum table_type table_key)  \
+{                                                                      \
+       int i;                                                          \
+       char *name = NULL;                                              \
+                                                                       \
+       for (i = 0; i < ARRAY_SIZE(table); i++) {                       \
+               if (table[i].value == table_key) {                      \
+                       name = table[i].name;                           \
+                       break;                                          \
+               }                                                       \
+       }                                                               \
+       return name;                                                    \
+}
+
+static struct {
+       enum fcf_state value;
+       char           *name;
+} fcf_state_names[] = {
+       { FCOE_FCF_STATE_UNKNOWN,      "Unknown" },
+       { FCOE_FCF_STATE_DISCONNECTED, "Disconnected" },
+       { FCOE_FCF_STATE_CONNECTED,    "Connected" },
+};
+fcoe_enum_name_search(fcf_state, fcf_state, fcf_state_names)
+#define FCOE_FCF_STATE_MAX_NAMELEN 50
+
+static ssize_t show_fcf_state(struct device *dev,
+                             struct device_attribute *attr,
+                             char *buf)
+{
+       struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
+       const char *name;
+       name = get_fcoe_fcf_state_name(fcf->state);
+       if (!name)
+               return -EINVAL;
+       return snprintf(buf, FCOE_FCF_STATE_MAX_NAMELEN, "%s\n", name);
+}
+static FCOE_DEVICE_ATTR(fcf, state, S_IRUGO, show_fcf_state, NULL);
+
+static struct {
+       enum fip_conn_type value;
+       char               *name;
+} fip_conn_type_names[] = {
+       { FIP_CONN_TYPE_UNKNOWN, "Unknown" },
+       { FIP_CONN_TYPE_FABRIC, "Fabric" },
+       { FIP_CONN_TYPE_VN2VN, "VN2VN" },
+};
+fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names)
+#define FCOE_CTLR_MODE_MAX_NAMELEN 50
+
+static ssize_t show_ctlr_mode(struct device *dev,
+                             struct device_attribute *attr,
+                             char *buf)
+{
+       struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+       const char *name;
+
+       if (ctlr->f->get_fcoe_ctlr_mode)
+               ctlr->f->get_fcoe_ctlr_mode(ctlr);
+
+       name = get_fcoe_ctlr_mode_name(ctlr->mode);
+       if (!name)
+               return -EINVAL;
+       return snprintf(buf, FCOE_CTLR_MODE_MAX_NAMELEN,
+                       "%s\n", name);
+}
+static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO,
+                       show_ctlr_mode, NULL);
+
+static ssize_t
+store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev,
+                                        struct device_attribute *attr,
+                                        const char *buf, size_t count)
+{
+       struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+       struct fcoe_fcf_device *fcf;
+       unsigned long val;
+       int rc;
+
+       rc = fcoe_str_to_dev_loss(buf, &val);
+       if (rc)
+               return rc;
+
+       fcoe_ctlr_fcf_dev_loss_tmo(ctlr) = val;
+       mutex_lock(&ctlr->lock);
+       list_for_each_entry(fcf, &ctlr->fcfs, peers)
+               fcoe_fcf_set_dev_loss_tmo(fcf, val);
+       mutex_unlock(&ctlr->lock);
+       return count;
+}
+fcoe_ctlr_private_show_function(fcf_dev_loss_tmo, "%d\n", 20, );
+static FCOE_DEVICE_ATTR(ctlr, fcf_dev_loss_tmo, S_IRUGO | S_IWUSR,
+                       show_fcoe_ctlr_device_fcf_dev_loss_tmo,
+                       store_private_fcoe_ctlr_fcf_dev_loss_tmo);
+
+/* Link Error Status Block (LESB) */
+fcoe_ctlr_rd_attr(link_fail, "%u\n", 20);
+fcoe_ctlr_rd_attr(vlink_fail, "%u\n", 20);
+fcoe_ctlr_rd_attr(miss_fka, "%u\n", 20);
+fcoe_ctlr_rd_attr(symb_err, "%u\n", 20);
+fcoe_ctlr_rd_attr(err_block, "%u\n", 20);
+fcoe_ctlr_rd_attr(fcs_error, "%u\n", 20);
+
+fcoe_fcf_private_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long);
+fcoe_fcf_private_rd_attr_cast(switch_name, "0x%llx\n", 20, unsigned long long);
+fcoe_fcf_private_rd_attr(priority, "%u\n", 20);
+fcoe_fcf_private_rd_attr(fc_map, "0x%x\n", 20);
+fcoe_fcf_private_rd_attr(vfid, "%u\n", 20);
+fcoe_fcf_private_rd_attr(mac, "%pM\n", 20);
+fcoe_fcf_private_rd_attr(fka_period, "%u\n", 20);
+fcoe_fcf_rd_attr(selected, "%u\n", 20);
+fcoe_fcf_rd_attr(vlan_id, "%u\n", 20);
+
+fcoe_fcf_private_show_function(dev_loss_tmo, "%d\n", 20, )
+static ssize_t
+store_fcoe_fcf_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
+                           const char *buf, size_t count)
+{
+       struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
+       unsigned long val;
+       int rc;
+
+       rc = fcoe_str_to_dev_loss(buf, &val);
+       if (rc)
+               return rc;
+
+       rc = fcoe_fcf_set_dev_loss_tmo(fcf, val);
+       if (rc)
+               return rc;
+       return count;
+}
+static FCOE_DEVICE_ATTR(fcf, dev_loss_tmo, S_IRUGO | S_IWUSR,
+                       show_fcoe_fcf_device_dev_loss_tmo,
+                       store_fcoe_fcf_dev_loss_tmo);
+
+static struct attribute *fcoe_ctlr_lesb_attrs[] = {
+       &device_attr_fcoe_ctlr_link_fail.attr,
+       &device_attr_fcoe_ctlr_vlink_fail.attr,
+       &device_attr_fcoe_ctlr_miss_fka.attr,
+       &device_attr_fcoe_ctlr_symb_err.attr,
+       &device_attr_fcoe_ctlr_err_block.attr,
+       &device_attr_fcoe_ctlr_fcs_error.attr,
+       NULL,
+};
+
+static struct attribute_group fcoe_ctlr_lesb_attr_group = {
+       .name = "lesb",
+       .attrs = fcoe_ctlr_lesb_attrs,
+};
+
+static struct attribute *fcoe_ctlr_attrs[] = {
+       &device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr,
+       &device_attr_fcoe_ctlr_mode.attr,
+       NULL,
+};
+
+static struct attribute_group fcoe_ctlr_attr_group = {
+       .attrs = fcoe_ctlr_attrs,
+};
+
+static const struct attribute_group *fcoe_ctlr_attr_groups[] = {
+       &fcoe_ctlr_attr_group,
+       &fcoe_ctlr_lesb_attr_group,
+       NULL,
+};
+
+static struct attribute *fcoe_fcf_attrs[] = {
+       &device_attr_fcoe_fcf_fabric_name.attr,
+       &device_attr_fcoe_fcf_switch_name.attr,
+       &device_attr_fcoe_fcf_dev_loss_tmo.attr,
+       &device_attr_fcoe_fcf_fc_map.attr,
+       &device_attr_fcoe_fcf_vfid.attr,
+       &device_attr_fcoe_fcf_mac.attr,
+       &device_attr_fcoe_fcf_priority.attr,
+       &device_attr_fcoe_fcf_fka_period.attr,
+       &device_attr_fcoe_fcf_state.attr,
+       &device_attr_fcoe_fcf_selected.attr,
+       &device_attr_fcoe_fcf_vlan_id.attr,
+       NULL
+};
+
+static struct attribute_group fcoe_fcf_attr_group = {
+       .attrs = fcoe_fcf_attrs,
+};
+
+static const struct attribute_group *fcoe_fcf_attr_groups[] = {
+       &fcoe_fcf_attr_group,
+       NULL,
+};
+
+struct bus_type fcoe_bus_type;
+
+static int fcoe_bus_match(struct device *dev,
+                         struct device_driver *drv)
+{
+       if (dev->bus == &fcoe_bus_type)
+               return 1;
+       return 0;
+}
+
+/**
+ * fcoe_ctlr_device_release() - Release the FIP ctlr memory
+ * @dev: Pointer to the FIP ctlr's embedded device
+ *
+ * Called when the last FIP ctlr reference is released.
+ */
+static void fcoe_ctlr_device_release(struct device *dev)
+{
+       struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+       kfree(ctlr);
+}
+
+/**
+ * fcoe_fcf_device_release() - Release the FIP fcf memory
+ * @dev: Pointer to the fcf's embedded device
+ *
+ * Called when the last FIP fcf reference is released.
+ */
+static void fcoe_fcf_device_release(struct device *dev)
+{
+       struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
+       kfree(fcf);
+}
+
+struct device_type fcoe_ctlr_device_type = {
+       .name = "fcoe_ctlr",
+       .groups = fcoe_ctlr_attr_groups,
+       .release = fcoe_ctlr_device_release,
+};
+
+struct device_type fcoe_fcf_device_type = {
+       .name = "fcoe_fcf",
+       .groups = fcoe_fcf_attr_groups,
+       .release = fcoe_fcf_device_release,
+};
+
+struct bus_type fcoe_bus_type = {
+       .name = "fcoe",
+       .match = &fcoe_bus_match,
+};
+
+/**
+ * fcoe_ctlr_device_flush_work() - Flush a FIP ctlr's workqueue
+ * @ctlr: Pointer to the FIP ctlr whose workqueue is to be flushed
+ */
+void fcoe_ctlr_device_flush_work(struct fcoe_ctlr_device *ctlr)
+{
+       if (!fcoe_ctlr_work_q(ctlr)) {
+               printk(KERN_ERR
+                      "ERROR: FIP Ctlr '%d' attempted to flush work, "
+                      "when no workqueue created.\n", ctlr->id);
+               dump_stack();
+               return;
+       }
+
+       flush_workqueue(fcoe_ctlr_work_q(ctlr));
+}
+
+/**
+ * fcoe_ctlr_device_queue_work() - Schedule work for a FIP ctlr's workqueue
+ * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue
+ * @work:   Work to queue for execution
+ *
+ * Return value:
+ *     1 on success / 0 already queued / < 0 for error
+ */
+int fcoe_ctlr_device_queue_work(struct fcoe_ctlr_device *ctlr,
+                              struct work_struct *work)
+{
+       if (unlikely(!fcoe_ctlr_work_q(ctlr))) {
+               printk(KERN_ERR
+                      "ERROR: FIP Ctlr '%d' attempted to queue work, "
+                      "when no workqueue created.\n", ctlr->id);
+               dump_stack();
+
+               return -EINVAL;
+       }
+
+       return queue_work(fcoe_ctlr_work_q(ctlr), work);
+}
+
+/**
+ * fcoe_ctlr_device_flush_devloss() - Flush a FIP ctlr's devloss workqueue
+ * @ctlr: Pointer to FIP ctlr whose workqueue is to be flushed
+ */
+void fcoe_ctlr_device_flush_devloss(struct fcoe_ctlr_device *ctlr)
+{
+       if (!fcoe_ctlr_devloss_work_q(ctlr)) {
+               printk(KERN_ERR
+                      "ERROR: FIP Ctlr '%d' attempted to flush work, "
+                      "when no workqueue created.\n", ctlr->id);
+               dump_stack();
+               return;
+       }
+
+       flush_workqueue(fcoe_ctlr_devloss_work_q(ctlr));
+}
+
+/**
+ * fcoe_ctlr_device_queue_devloss_work() - Schedule work for a FIP ctlr's devloss workqueue
+ * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue
+ * @work:   Work to queue for execution
+ * @delay:  jiffies to delay the work queuing
+ *
+ * Return value:
+ *     1 on success / 0 already queued / < 0 for error
+ */
+int fcoe_ctlr_device_queue_devloss_work(struct fcoe_ctlr_device *ctlr,
+                                      struct delayed_work *work,
+                                      unsigned long delay)
+{
+       if (unlikely(!fcoe_ctlr_devloss_work_q(ctlr))) {
+               printk(KERN_ERR
+                      "ERROR: FIP Ctlr '%d' attempted to queue work, "
+                      "when no workqueue created.\n", ctlr->id);
+               dump_stack();
+
+               return -EINVAL;
+       }
+
+       return queue_delayed_work(fcoe_ctlr_devloss_work_q(ctlr), work, delay);
+}
+
+static int fcoe_fcf_device_match(struct fcoe_fcf_device *new,
+                                struct fcoe_fcf_device *old)
+{
+       if (new->switch_name == old->switch_name &&
+           new->fabric_name == old->fabric_name &&
+           new->fc_map == old->fc_map &&
+           compare_ether_addr(new->mac, old->mac) == 0)
+               return 1;
+       return 0;
+}
+
+/**
+ * fcoe_ctlr_device_add() - Add a FIP ctlr to sysfs
+ * @parent:    The parent device to which the fcoe_ctlr instance
+ *             should be attached
+ * @f:         The LLD's FCoE sysfs function template pointer
+ * @priv_size: Size to be allocated with the fcoe_ctlr_device for the LLD
+ *
+ * This routine allocates a FIP ctlr object with some additional memory
+ * for the LLD. The FIP ctlr is initialized, added to sysfs and then
+ * attributes are added to it.
+ */
+struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
+                                   struct fcoe_sysfs_function_template *f,
+                                   int priv_size)
+{
+       struct fcoe_ctlr_device *ctlr;
+       int error = 0;
+
+       ctlr = kzalloc(sizeof(struct fcoe_ctlr_device) + priv_size,
+                      GFP_KERNEL);
+       if (!ctlr)
+               goto out;
+
+       ctlr->id = atomic_inc_return(&ctlr_num) - 1;
+       ctlr->f = f;
+       INIT_LIST_HEAD(&ctlr->fcfs);
+       mutex_init(&ctlr->lock);
+       ctlr->dev.parent = parent;
+       ctlr->dev.bus = &fcoe_bus_type;
+       ctlr->dev.type = &fcoe_ctlr_device_type;
+
+       ctlr->fcf_dev_loss_tmo = fcoe_fcf_dev_loss_tmo;
+
+       snprintf(ctlr->work_q_name, sizeof(ctlr->work_q_name),
+                "ctlr_wq_%d", ctlr->id);
+       ctlr->work_q = create_singlethread_workqueue(
+               ctlr->work_q_name);
+       if (!ctlr->work_q)
+               goto out_del;
+
+       snprintf(ctlr->devloss_work_q_name,
+                sizeof(ctlr->devloss_work_q_name),
+                "ctlr_dl_wq_%d", ctlr->id);
+       ctlr->devloss_work_q = create_singlethread_workqueue(
+               ctlr->devloss_work_q_name);
+       if (!ctlr->devloss_work_q)
+               goto out_del_q;
+
+       dev_set_name(&ctlr->dev, "ctlr_%d", ctlr->id);
+       error = device_register(&ctlr->dev);
+       if (error)
+               goto out_del_q2;
+
+       return ctlr;
+
+out_del_q2:
+       destroy_workqueue(ctlr->devloss_work_q);
+       ctlr->devloss_work_q = NULL;
+out_del_q:
+       destroy_workqueue(ctlr->work_q);
+       ctlr->work_q = NULL;
+out_del:
+       kfree(ctlr);
+out:
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(fcoe_ctlr_device_add);
+
+/**
+ * fcoe_ctlr_device_delete() - Delete a FIP ctlr and its subtree from sysfs
+ * @ctlr: A pointer to the ctlr to be deleted
+ *
+ * Deletes a FIP ctlr and any fcfs attached
+ * to it. Deleting fcfs will cause their childen
+ * to be deleted as well.
+ *
+ * The ctlr is detached from sysfs and it's resources
+ * are freed (work q), but the memory is not freed
+ * until its last reference is released.
+ *
+ * This routine expects no locks to be held before
+ * calling.
+ *
+ * TODO: Currently there are no callbacks to clean up LLD data
+ * for a fcoe_fcf_device. LLDs must keep this in mind as they need
+ * to clean up each of their LLD data for all fcoe_fcf_device before
+ * calling fcoe_ctlr_device_delete.
+ */
+void fcoe_ctlr_device_delete(struct fcoe_ctlr_device *ctlr)
+{
+       struct fcoe_fcf_device *fcf, *next;
+       /* Remove any attached fcfs */
+       mutex_lock(&ctlr->lock);
+       list_for_each_entry_safe(fcf, next,
+                                &ctlr->fcfs, peers) {
+               list_del(&fcf->peers);
+               fcf->state = FCOE_FCF_STATE_DELETED;
+               fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work);
+       }
+       mutex_unlock(&ctlr->lock);
+
+       fcoe_ctlr_device_flush_work(ctlr);
+
+       destroy_workqueue(ctlr->devloss_work_q);
+       ctlr->devloss_work_q = NULL;
+       destroy_workqueue(ctlr->work_q);
+       ctlr->work_q = NULL;
+
+       device_unregister(&ctlr->dev);
+}
+EXPORT_SYMBOL_GPL(fcoe_ctlr_device_delete);
+
+/**
+ * fcoe_fcf_device_final_delete() - Final delete routine
+ * @work: The FIP fcf's embedded work struct
+ *
+ * It is expected that the fcf has been removed from
+ * the FIP ctlr's list before calling this routine.
+ */
+static void fcoe_fcf_device_final_delete(struct work_struct *work)
+{
+       struct fcoe_fcf_device *fcf =
+               container_of(work, struct fcoe_fcf_device, delete_work);
+       struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
+
+       /*
+        * Cancel any outstanding timers. These should really exist
+        * only when rmmod'ing the LLDD and we're asking for
+        * immediate termination of the rports
+        */
+       if (!cancel_delayed_work(&fcf->dev_loss_work))
+               fcoe_ctlr_device_flush_devloss(ctlr);
+
+       device_unregister(&fcf->dev);
+}
+
+/**
+ * fip_timeout_deleted_fcf() - Delete a fcf when the devloss timer fires
+ * @work: The FIP fcf's embedded work struct
+ *
+ * Removes the fcf from the FIP ctlr's list of fcfs and
+ * queues the final deletion.
+ */
+static void fip_timeout_deleted_fcf(struct work_struct *work)
+{
+       struct fcoe_fcf_device *fcf =
+               container_of(work, struct fcoe_fcf_device, dev_loss_work.work);
+       struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
+
+       mutex_lock(&ctlr->lock);
+
+       /*
+        * If the fcf is deleted or reconnected before the timer
+        * fires the devloss queue will be flushed, but the state will
+        * either be CONNECTED or DELETED. If that is the case we
+        * cancel deleting the fcf.
+        */
+       if (fcf->state != FCOE_FCF_STATE_DISCONNECTED)
+               goto out;
+
+       dev_printk(KERN_ERR, &fcf->dev,
+                  "FIP fcf connection time out: removing fcf\n");
+
+       list_del(&fcf->peers);
+       fcf->state = FCOE_FCF_STATE_DELETED;
+       fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work);
+
+out:
+       mutex_unlock(&ctlr->lock);
+}
+
+/**
+ * fcoe_fcf_device_delete() - Delete a FIP fcf
+ * @fcf: Pointer to the fcf which is to be deleted
+ *
+ * Queues the FIP fcf on the devloss workqueue
+ *
+ * Expects the ctlr_attrs mutex to be held for fcf
+ * state change.
+ */
+void fcoe_fcf_device_delete(struct fcoe_fcf_device *fcf)
+{
+       struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
+       int timeout = fcf->dev_loss_tmo;
+
+       if (fcf->state != FCOE_FCF_STATE_CONNECTED)
+               return;
+
+       fcf->state = FCOE_FCF_STATE_DISCONNECTED;
+
+       /*
+        * FCF will only be re-connected by the LLD calling
+        * fcoe_fcf_device_add, and it should be setting up
+        * priv then.
+        */
+       fcf->priv = NULL;
+
+       fcoe_ctlr_device_queue_devloss_work(ctlr, &fcf->dev_loss_work,
+                                          timeout * HZ);
+}
+EXPORT_SYMBOL_GPL(fcoe_fcf_device_delete);
+
+/**
+ * fcoe_fcf_device_add() - Add a FCoE sysfs fcoe_fcf_device to the system
+ * @ctlr:    The fcoe_ctlr_device that will be the fcoe_fcf_device parent
+ * @new_fcf: A temporary FCF used for lookups on the current list of fcfs
+ *
+ * Expects to be called with the ctlr->lock held
+ */
+struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
+                                           struct fcoe_fcf_device *new_fcf)
+{
+       struct fcoe_fcf_device *fcf;
+       int error = 0;
+
+       list_for_each_entry(fcf, &ctlr->fcfs, peers) {
+               if (fcoe_fcf_device_match(new_fcf, fcf)) {
+                       if (fcf->state == FCOE_FCF_STATE_CONNECTED)
+                               return fcf;
+
+                       fcf->state = FCOE_FCF_STATE_CONNECTED;
+
+                       if (!cancel_delayed_work(&fcf->dev_loss_work))
+                               fcoe_ctlr_device_flush_devloss(ctlr);
+
+                       return fcf;
+               }
+       }
+
+       fcf = kzalloc(sizeof(struct fcoe_fcf_device), GFP_ATOMIC);
+       if (unlikely(!fcf))
+               goto out;
+
+       INIT_WORK(&fcf->delete_work, fcoe_fcf_device_final_delete);
+       INIT_DELAYED_WORK(&fcf->dev_loss_work, fip_timeout_deleted_fcf);
+
+       fcf->dev.parent = &ctlr->dev;
+       fcf->dev.bus = &fcoe_bus_type;
+       fcf->dev.type = &fcoe_fcf_device_type;
+       fcf->id = atomic_inc_return(&fcf_num) - 1;
+       fcf->state = FCOE_FCF_STATE_UNKNOWN;
+
+       fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
+
+       dev_set_name(&fcf->dev, "fcf_%d", fcf->id);
+
+       fcf->fabric_name = new_fcf->fabric_name;
+       fcf->switch_name = new_fcf->switch_name;
+       fcf->fc_map = new_fcf->fc_map;
+       fcf->vfid = new_fcf->vfid;
+       memcpy(fcf->mac, new_fcf->mac, ETH_ALEN);
+       fcf->priority = new_fcf->priority;
+       fcf->fka_period = new_fcf->fka_period;
+       fcf->selected = new_fcf->selected;
+
+       error = device_register(&fcf->dev);
+       if (error)
+               goto out_del;
+
+       fcf->state = FCOE_FCF_STATE_CONNECTED;
+       list_add_tail(&fcf->peers, &ctlr->fcfs);
+
+       return fcf;
+
+out_del:
+       kfree(fcf);
+out:
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(fcoe_fcf_device_add);
+
+int __init fcoe_sysfs_setup(void)
+{
+       int error;
+
+       atomic_set(&ctlr_num, 0);
+       atomic_set(&fcf_num, 0);
+
+       error = bus_register(&fcoe_bus_type);
+       if (error)
+               return error;
+
+       return 0;
+}
+
+void __exit fcoe_sysfs_teardown(void)
+{
+       bus_unregister(&fcoe_bus_type);
+}
index 710e149d41b60b0e1f087acce76b9b074953eb15..b46f43dced78eb6b77337905ff20eeb16d451a0c 100644 (file)
@@ -815,9 +815,17 @@ out_nodev:
  */
 static int __init libfcoe_init(void)
 {
-       fcoe_transport_init();
+       int rc = 0;
 
-       return 0;
+       rc = fcoe_transport_init();
+       if (rc)
+               return rc;
+
+       rc = fcoe_sysfs_setup();
+       if (rc)
+               fcoe_transport_exit();
+
+       return rc;
 }
 module_init(libfcoe_init);
 
@@ -826,6 +834,7 @@ module_init(libfcoe_init);
  */
 static void __exit libfcoe_exit(void)
 {
+       fcoe_sysfs_teardown();
        fcoe_transport_exit();
 }
 module_exit(libfcoe_exit);
index 6208d562890d1cb3cfb3c85845dddf02086c4fb2..317a7fdc3b825064e4a5677f0a64bc5f3e43a8d6 100644 (file)
@@ -25,3 +25,12 @@ config SCSI_QLA_FC
        Firmware images can be retrieved from:
 
                ftp://ftp.qlogic.com/outgoing/linux/firmware/
+
+config TCM_QLA2XXX
+       tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs"
+       depends on SCSI_QLA_FC && TARGET_CORE
+       select LIBFC
+       select BTREE
+       default n
+       ---help---
+       Say Y here to enable the TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs
index 5df782f4a097f9cd8b32455319f3894a5879cd06..dce7d788cdc9c7999795dac0e9819e81943aa223 100644 (file)
@@ -1,5 +1,6 @@
 qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
                qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
-        qla_nx.o
+        qla_nx.o qla_target.o
 
 obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
+obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
index 5926f5a87ea8e97b0611d246ea99b96b1f6c7b6c..5ab953029f8d1412ee66057c641771bf69d765cd 100644 (file)
@@ -5,6 +5,7 @@
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 #include "qla_def.h"
+#include "qla_target.h"
 
 #include <linux/kthread.h>
 #include <linux/vmalloc.h>
@@ -576,6 +577,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
                scsi_block_requests(vha->host);
                set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
                if (IS_QLA82XX(ha)) {
+                       ha->flags.isp82xx_no_md_cap = 1;
                        qla82xx_idc_lock(ha);
                        qla82xx_set_reset_owner(vha);
                        qla82xx_idc_unlock(ha);
@@ -585,7 +587,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
                scsi_unblock_requests(vha->host);
                break;
        case 0x2025d:
-               if (!IS_QLA81XX(ha))
+               if (!IS_QLA81XX(ha) || !IS_QLA8031(ha))
                        return -EPERM;
 
                ql_log(ql_log_info, vha, 0x706f,
@@ -1105,9 +1107,8 @@ qla2x00_total_isp_aborts_show(struct device *dev,
                              struct device_attribute *attr, char *buf)
 {
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
-       struct qla_hw_data *ha = vha->hw;
        return snprintf(buf, PAGE_SIZE, "%d\n",
-           ha->qla_stats.total_isp_aborts);
+           vha->qla_stats.total_isp_aborts);
 }
 
 static ssize_t
@@ -1154,7 +1155,7 @@ qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
        struct qla_hw_data *ha = vha->hw;
 
-       if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+       if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
                return snprintf(buf, PAGE_SIZE, "\n");
 
        return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
@@ -1537,7 +1538,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
        dma_addr_t stats_dma;
        struct fc_host_statistics *pfc_host_stat;
 
-       pfc_host_stat = &ha->fc_host_stat;
+       pfc_host_stat = &vha->fc_host_stat;
        memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
 
        if (test_bit(UNLOADING, &vha->dpc_flags))
@@ -1580,8 +1581,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
                pfc_host_stat->dumped_frames = stats->dumped_frames;
                pfc_host_stat->nos_count = stats->nos_rcvd;
        }
-       pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20;
-       pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20;
+       pfc_host_stat->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
+       pfc_host_stat->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
 
 done_free:
         dma_pool_free(ha->s_dma_pool, stats, stats_dma);
@@ -1737,6 +1738,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
        fc_host_supported_speeds(vha->host) =
                fc_host_supported_speeds(base_vha->host);
 
+       qlt_vport_create(vha, ha);
        qla24xx_vport_disable(fc_vport, disable);
 
        if (ha->flags.cpu_affinity_enabled) {
@@ -1951,12 +1953,16 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
        fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
        fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
        fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
-       fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
+       fc_host_supported_classes(vha->host) = ha->tgt.enable_class_2 ?
+                       (FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3;
        fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
        fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
 
        if (IS_CNA_CAPABLE(ha))
                speed = FC_PORTSPEED_10GBIT;
+       else if (IS_QLA2031(ha))
+               speed = FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT |
+                   FC_PORTSPEED_4GBIT;
        else if (IS_QLA25XX(ha))
                speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
                    FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
index bc3cc6d91117ab58b103e889370c89f8402bd6f9..c68883806c54b8092e81ff82f62db06fd8ee822f 100644 (file)
@@ -297,7 +297,6 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
 
                /* Initialize all required  fields of fcport */
                fcport->vha = vha;
-               fcport->vp_idx = vha->vp_idx;
                fcport->d_id.b.al_pa =
                        bsg_job->request->rqst_data.h_els.port_id[0];
                fcport->d_id.b.area =
@@ -483,7 +482,6 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
 
        /* Initialize all required  fields of fcport */
        fcport->vha = vha;
-       fcport->vp_idx = vha->vp_idx;
        fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
        fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
        fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
@@ -544,7 +542,7 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
        int rval = 0;
        struct qla_hw_data *ha = vha->hw;
 
-       if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+       if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
                goto done_set_internal;
 
        new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
@@ -586,7 +584,7 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
        uint16_t new_config[4];
        struct qla_hw_data *ha = vha->hw;
 
-       if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+       if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
                goto done_reset_internal;
 
        memset(new_config, 0 , sizeof(new_config));
@@ -710,8 +708,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
        elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
 
        if ((ha->current_topology == ISP_CFG_F ||
-           (atomic_read(&vha->loop_state) == LOOP_DOWN) ||
-           ((IS_QLA81XX(ha) || IS_QLA83XX(ha)) &&
+           ((IS_QLA81XX(ha) || IS_QLA8031(ha)) &&
            le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
            && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
                elreq.options == EXTERNAL_LOOPBACK) {
@@ -1402,6 +1399,9 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
        if (rval)
                return rval;
 
+       /* Set the isp82xx_no_md_cap not to capture minidump */
+       ha->flags.isp82xx_no_md_cap = 1;
+
        sg_copy_to_buffer(bsg_job->request_payload.sg_list,
            bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
            ha->optrom_region_size);
index 62324a1d55737b7a127bcc5bab74ebac66ab62aa..fdee5611f3e2afce938c7f7ad559ce5d8eecffe9 100644 (file)
  * ----------------------------------------------------------------------
  * |             Level            |   Last Value Used  |     Holes     |
  * ----------------------------------------------------------------------
- * | Module Init and Probe        |       0x0120       | 0x4b,0xba,0xfa |
- * | Mailbox commands             |       0x113e       | 0x112c-0x112e  |
+ * | Module Init and Probe        |       0x0122       | 0x4b,0xba,0xfa |
+ * | Mailbox commands             |       0x1140       | 0x111a-0x111b  |
+ * |                              |                    | 0x112c-0x112e  |
  * |                              |                    | 0x113a         |
  * | Device Discovery             |       0x2086       | 0x2020-0x2022  |
  * | Queue Command and IO tracing |       0x3030       | 0x3006,0x3008  |
  * |                              |                    | 0x302d-0x302e  |
- * | DPC Thread                   |       0x401c       |               |
- * | Async Events                 |       0x505d       | 0x502b-0x502f  |
+ * | DPC Thread                   |       0x401c       | 0x4002,0x4013  |
+ * | Async Events                 |       0x505f       | 0x502b-0x502f  |
  * |                              |                    | 0x5047,0x5052  |
- * | Timer Routines               |       0x6011       | 0x600e-0x600f  |
+ * | Timer Routines               |       0x6011       |                |
  * | User Space Interactions      |       0x709f       | 0x7018,0x702e, |
  * |                              |                    | 0x7039,0x7045, |
  * |                              |                    | 0x7073-0x7075, |
  * |                              |                    | 0x708c         |
  * | Task Management              |       0x803c       | 0x8025-0x8026  |
  * |                              |                    | 0x800b,0x8039  |
- * | AER/EEH                      |       0x900f       |               |
+ * | AER/EEH                      |       0x9011       |               |
  * | Virtual Port                 |       0xa007       |               |
- * | ISP82XX Specific             |       0xb054       | 0xb053         |
+ * | ISP82XX Specific             |       0xb054       | 0xb024         |
  * | MultiQ                       |       0xc00c       |               |
  * | Misc                         |       0xd010       |               |
+ * | Target Mode                 |       0xe06f       |                |
+ * | Target Mode Management      |       0xf071       |                |
+ * | Target Mode Task Management  |      0x1000b      |                |
  * ----------------------------------------------------------------------
  */
 
@@ -378,6 +382,54 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
        return (char *)iter_reg + ntohl(fcec->size);
 }
 
+static inline void *
+qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
+       uint32_t **last_chain)
+{
+       struct qla2xxx_mqueue_chain *q;
+       struct qla2xxx_mqueue_header *qh;
+       uint32_t num_queues;
+       int que;
+       struct {
+               int length;
+               void *ring;
+       } aq, *aqp;
+
+       if (!ha->tgt.atio_q_length)
+               return ptr;
+
+       num_queues = 1;
+       aqp = &aq;
+       aqp->length = ha->tgt.atio_q_length;
+       aqp->ring = ha->tgt.atio_ring;
+
+       for (que = 0; que < num_queues; que++) {
+               /* aqp = ha->atio_q_map[que]; */
+               q = ptr;
+               *last_chain = &q->type;
+               q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
+               q->chain_size = htonl(
+                   sizeof(struct qla2xxx_mqueue_chain) +
+                   sizeof(struct qla2xxx_mqueue_header) +
+                   (aqp->length * sizeof(request_t)));
+               ptr += sizeof(struct qla2xxx_mqueue_chain);
+
+               /* Add header. */
+               qh = ptr;
+               qh->queue = __constant_htonl(TYPE_ATIO_QUEUE);
+               qh->number = htonl(que);
+               qh->size = htonl(aqp->length * sizeof(request_t));
+               ptr += sizeof(struct qla2xxx_mqueue_header);
+
+               /* Add data. */
+               memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t));
+
+               ptr += aqp->length * sizeof(request_t);
+       }
+
+       return ptr;
+}
+
 static inline void *
 qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
 {
@@ -873,6 +925,8 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
        struct qla24xx_fw_dump *fw;
        uint32_t        ext_mem_cnt;
        void            *nxt;
+       void            *nxt_chain;
+       uint32_t        *last_chain = NULL;
        struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 
        if (IS_QLA82XX(ha))
@@ -1091,6 +1145,16 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 
        qla24xx_copy_eft(ha, nxt);
 
+       nxt_chain = (void *)ha->fw_dump + ha->chain_offset;
+       nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
+       if (last_chain) {
+               ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
+               *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
+       }
+
+       /* Adjust valid length. */
+       ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
+
 qla24xx_fw_dump_failed_0:
        qla2xxx_dump_post_process(base_vha, rval);
 
@@ -1399,6 +1463,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
        /* Chain entries -- started with MQ. */
        nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
        nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
+       nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
        if (last_chain) {
                ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
                *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
@@ -1717,6 +1782,7 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
        /* Chain entries -- started with MQ. */
        nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
        nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
+       nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
        if (last_chain) {
                ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
                *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
@@ -2218,6 +2284,7 @@ copy_queue:
        /* Chain entries -- started with MQ. */
        nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
        nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
+       nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
        if (last_chain) {
                ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
                *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
index 2157bdf1569a87e3771efbe9a8dcc26833ce47ab..f278df8cce0f02988e95f85e3f65d82381150f47 100644 (file)
@@ -244,6 +244,7 @@ struct qla2xxx_mqueue_header {
        uint32_t queue;
 #define TYPE_REQUEST_QUEUE     0x1
 #define TYPE_RESPONSE_QUEUE    0x2
+#define TYPE_ATIO_QUEUE                0x3
        uint32_t number;
        uint32_t size;
 };
@@ -339,3 +340,11 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
 #define ql_dbg_misc    0x00010000 /* For dumping everything that is not
                                    * not covered by upper categories
                                    */
+#define ql_dbg_verbose 0x00008000 /* More verbosity for each level
+                                   * This is to be used with other levels where
+                                   * more verbosity is required. It might not
+                                   * be applicable to all the levels.
+                                   */
+#define ql_dbg_tgt     0x00004000 /* Target mode */
+#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */
+#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
index a2443031dbe76c68c617f26767e269105f075343..39007f53aec0284b9cd855592d786823bbad5786 100644 (file)
 #define RESPONSE_ENTRY_CNT_2100                64      /* Number of response entries.*/
 #define RESPONSE_ENTRY_CNT_2300                512     /* Number of response entries.*/
 #define RESPONSE_ENTRY_CNT_MQ          128     /* Number of response entries.*/
+#define ATIO_ENTRY_CNT_24XX            4096    /* Number of ATIO entries. */
 
 struct req_que;
 
@@ -1234,11 +1235,27 @@ typedef struct {
  * ISP queue - response queue entry definition.
  */
 typedef struct {
-       uint8_t         data[60];
+       uint8_t         entry_type;             /* Entry type. */
+       uint8_t         entry_count;            /* Entry count. */
+       uint8_t         sys_define;             /* System defined. */
+       uint8_t         entry_status;           /* Entry Status. */
+       uint32_t        handle;                 /* System defined handle */
+       uint8_t         data[52];
        uint32_t        signature;
 #define RESPONSE_PROCESSED     0xDEADDEAD      /* Signature */
 } response_t;
 
+/*
+ * ISP queue - ATIO queue entry definition.
+ */
+struct atio {
+       uint8_t         entry_type;             /* Entry type. */
+       uint8_t         entry_count;            /* Entry count. */
+       uint8_t         data[58];
+       uint32_t        signature;
+#define ATIO_PROCESSED 0xDEADDEAD              /* Signature */
+};
+
 typedef union {
        uint16_t extended;
        struct {
@@ -1719,11 +1736,13 @@ typedef struct fc_port {
        struct fc_rport *rport, *drport;
        u32 supported_classes;
 
-       uint16_t vp_idx;
        uint8_t fc4_type;
        uint8_t scan_state;
 } fc_port_t;
 
+#define QLA_FCPORT_SCAN_NONE   0
+#define QLA_FCPORT_SCAN_FOUND  1
+
 /*
  * Fibre channel port/lun states.
  */
@@ -1747,6 +1766,7 @@ static const char * const port_state_str[] = {
 #define FCF_LOGIN_NEEDED       BIT_1
 #define FCF_FCP2_DEVICE                BIT_2
 #define FCF_ASYNC_SENT         BIT_3
+#define FCF_CONF_COMP_SUPPORTED BIT_4
 
 /* No loop ID flag. */
 #define FC_NO_LOOP_ID          0x1000
@@ -2419,6 +2439,40 @@ struct qlfc_fw {
        uint32_t len;
 };
 
+struct qlt_hw_data {
+       /* Protected by hw lock */
+       uint32_t enable_class_2:1;
+       uint32_t enable_explicit_conf:1;
+       uint32_t ini_mode_force_reverse:1;
+       uint32_t node_name_set:1;
+
+       dma_addr_t atio_dma;    /* Physical address. */
+       struct atio *atio_ring; /* Base virtual address */
+       struct atio *atio_ring_ptr;     /* Current address. */
+       uint16_t atio_ring_index; /* Current index. */
+       uint16_t atio_q_length;
+
+       void *target_lport_ptr;
+       struct qla_tgt_func_tmpl *tgt_ops;
+       struct qla_tgt *qla_tgt;
+       struct qla_tgt_cmd *cmds[MAX_OUTSTANDING_COMMANDS];
+       uint16_t current_handle;
+
+       struct qla_tgt_vp_map *tgt_vp_map;
+       struct mutex tgt_mutex;
+       struct mutex tgt_host_action_mutex;
+
+       int saved_set;
+       uint16_t saved_exchange_count;
+       uint32_t saved_firmware_options_1;
+       uint32_t saved_firmware_options_2;
+       uint32_t saved_firmware_options_3;
+       uint8_t saved_firmware_options[2];
+       uint8_t saved_add_firmware_options[2];
+
+       uint8_t tgt_node_name[WWN_SIZE];
+};
+
 /*
  * Qlogic host adapter specific data structure.
 */
@@ -2460,7 +2514,9 @@ struct qla_hw_data {
                uint32_t        thermal_supported:1;
                uint32_t        isp82xx_reset_hdlr_active:1;
                uint32_t        isp82xx_reset_owner:1;
-               /* 28 bits */
+               uint32_t        isp82xx_no_md_cap:1;
+               uint32_t        host_shutting_down:1;
+               /* 30 bits */
        } flags;
 
        /* This spinlock is used to protect "io transactions", you must
@@ -2804,7 +2860,6 @@ struct qla_hw_data {
                                        /* ISP2322: red, green, amber. */
        uint16_t        zio_mode;
        uint16_t        zio_timer;
-       struct fc_host_statistics fc_host_stat;
 
        struct qla_msix_entry *msix_entries;
 
@@ -2817,7 +2872,6 @@ struct qla_hw_data {
        int             cur_vport_count;
 
        struct qla_chip_state_84xx *cs84xx;
-       struct qla_statistics qla_stats;
        struct isp_operations *isp_ops;
        struct workqueue_struct *wq;
        struct qlfc_fw fw_buf;
@@ -2863,6 +2917,8 @@ struct qla_hw_data {
        dma_addr_t      md_tmplt_hdr_dma;
        void            *md_dump;
        uint32_t        md_dump_size;
+
+       struct qlt_hw_data tgt;
 };
 
 /*
@@ -2920,6 +2976,7 @@ typedef struct scsi_qla_host {
 #define FCOE_CTX_RESET_NEEDED  18      /* Initiate FCoE context reset */
 #define MPI_RESET_NEEDED       19      /* Initiate MPI FW reset */
 #define ISP_QUIESCE_NEEDED     20      /* Driver need some quiescence */
+#define SCR_PENDING            21      /* SCR in target mode */
 
        uint32_t        device_flags;
 #define SWITCH_FOUND           BIT_0
@@ -2979,10 +3036,21 @@ typedef struct scsi_qla_host {
        struct req_que *req;
        int             fw_heartbeat_counter;
        int             seconds_since_last_heartbeat;
+       struct fc_host_statistics fc_host_stat;
+       struct qla_statistics qla_stats;
 
        atomic_t        vref_count;
 } scsi_qla_host_t;
 
+#define SET_VP_IDX     1
+#define SET_AL_PA      2
+#define RESET_VP_IDX   3
+#define RESET_AL_PA    4
+struct qla_tgt_vp_map {
+       uint8_t idx;
+       scsi_qla_host_t *vha;
+};
+
 /*
  * Macros to help code, maintain, etc.
  */
index 9f065804bd12b830a458ab11b6a92c7936f0bc58..9eacd2df111b85108dd8b3e75c3ea0320ec927b5 100644 (file)
@@ -175,6 +175,7 @@ extern int  qla2x00_vp_abort_isp(scsi_qla_host_t *);
 /*
  * Global Function Prototypes in qla_iocb.c source file.
  */
+
 extern uint16_t qla2x00_calc_iocbs_32(uint16_t);
 extern uint16_t qla2x00_calc_iocbs_64(uint16_t);
 extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
@@ -188,6 +189,8 @@ extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t);
 extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
 extern int qla24xx_dif_start_scsi(srb_t *);
 
+extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
+extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
 
 /*
  * Global Function Prototypes in qla_mbx.c source file.
@@ -238,6 +241,9 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *, uint8_t *, uint8_t *, uint16_t *);
 extern int
 qla2x00_init_firmware(scsi_qla_host_t *, uint16_t);
 
+extern int
+qla2x00_get_node_name_list(scsi_qla_host_t *, void **, int *);
+
 extern int
 qla2x00_get_port_database(scsi_qla_host_t *, fc_port_t *, uint8_t);
 
@@ -383,6 +389,8 @@ extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
 extern void qla2x00_free_irqs(scsi_qla_host_t *);
 
 extern int qla2x00_get_data_rate(scsi_qla_host_t *);
+extern char *qla2x00_get_link_speed_str(struct qla_hw_data *);
+
 /*
  * Global Function Prototypes in qla_sup.c source file.
  */
@@ -546,6 +554,7 @@ extern void qla2x00_sp_free(void *, void *);
 extern void qla2x00_sp_timeout(unsigned long);
 extern void qla2x00_bsg_job_done(void *, void *, int);
 extern void qla2x00_bsg_sp_free(void *, void *);
+extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *);
 
 /* Interrupt related */
 extern irqreturn_t qla82xx_intr_handler(int, void *);
index 3128f80441f5378090156e98edbd1ee6798a2033..05260d25fe469f8e28bfba0807a874c4e2173acd 100644 (file)
@@ -5,6 +5,7 @@
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 #include "qla_def.h"
+#include "qla_target.h"
 
 static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
 static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
@@ -556,7 +557,8 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
        ct_req->req.rff_id.port_id[1] = vha->d_id.b.area;
        ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa;
 
-       ct_req->req.rff_id.fc4_feature = BIT_1;
+       qlt_rff_id(vha, ct_req);
+
        ct_req->req.rff_id.fc4_type = 0x08;             /* SCSI - FCP */
 
        /* Execute MS IOCB */
index b9465643396b0c40e7fb7fbab19249f5c5cbded6..ca5084743135cf358c397dfccaa84fbbbbfece95 100644 (file)
@@ -17,6 +17,9 @@
 #include <asm/prom.h>
 #endif
 
+#include <target/target_core_base.h>
+#include "qla_target.h"
+
 /*
 *  QLogic ISP2x00 Hardware Support Function Prototypes.
 */
@@ -518,7 +521,10 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
                        return QLA_FUNCTION_FAILED;
                }
        }
-       rval = qla2x00_init_rings(vha);
+
+       if (qla_ini_mode_enabled(vha))
+               rval = qla2x00_init_rings(vha);
+
        ha->flags.chip_reset_done = 1;
 
        if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
@@ -1233,6 +1239,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
                        mq_size += ha->max_rsp_queues *
                            (rsp->length * sizeof(response_t));
                }
+               if (ha->tgt.atio_q_length)
+                       mq_size += ha->tgt.atio_q_length * sizeof(request_t);
                /* Allocate memory for Fibre Channel Event Buffer. */
                if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
                        goto try_eft;
@@ -1696,6 +1704,12 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
        icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
        icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
 
+       /* Setup ATIO queue dma pointers for target mode */
+       icb->atio_q_inpointer = __constant_cpu_to_le16(0);
+       icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
+       icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
+       icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
+
        if (ha->mqenable || IS_QLA83XX(ha)) {
                icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
                icb->rid = __constant_cpu_to_le16(rid);
@@ -1739,6 +1753,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
                WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
                WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
        }
+       qlt_24xx_config_rings(vha, reg);
+
        /* PCI posting */
        RD_REG_DWORD(&ioreg->hccr);
 }
@@ -1794,6 +1810,11 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
 
        spin_unlock(&ha->vport_slock);
 
+       ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
+       ha->tgt.atio_ring_index = 0;
+       /* Initialize ATIO queue entries */
+       qlt_init_atio_q_entries(vha);
+
        ha->isp_ops->config_rings(vha);
 
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2051,6 +2072,10 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
        vha->d_id.b.area = area;
        vha->d_id.b.al_pa = al_pa;
 
+       spin_lock(&ha->vport_slock);
+       qlt_update_vp_map(vha, SET_AL_PA);
+       spin_unlock(&ha->vport_slock);
+
        if (!vha->flags.init_done)
                ql_log(ql_log_info, vha, 0x2010,
                    "Topology - %s, Host Loop address 0x%x.\n",
@@ -2185,7 +2210,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
            nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
                /* Reset NVRAM data. */
                ql_log(ql_log_warn, vha, 0x0064,
-                   "Inconisistent NVRAM "
+                   "Inconsistent NVRAM "
                    "detected: checksum=0x%x id=%c version=0x%x.\n",
                    chksum, nv->id[0], nv->nvram_version);
                ql_log(ql_log_warn, vha, 0x0065,
@@ -2270,7 +2295,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
        if (IS_QLA23XX(ha)) {
                nv->firmware_options[0] |= BIT_2;
                nv->firmware_options[0] &= ~BIT_3;
-               nv->firmware_options[0] &= ~BIT_6;
+               nv->special_options[0] &= ~BIT_6;
                nv->add_firmware_options[1] |= BIT_5 | BIT_4;
 
                if (IS_QLA2300(ha)) {
@@ -2467,14 +2492,21 @@ qla2x00_rport_del(void *data)
 {
        fc_port_t *fcport = data;
        struct fc_rport *rport;
+       scsi_qla_host_t *vha = fcport->vha;
        unsigned long flags;
 
        spin_lock_irqsave(fcport->vha->host->host_lock, flags);
        rport = fcport->drport ? fcport->drport: fcport->rport;
        fcport->drport = NULL;
        spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
-       if (rport)
+       if (rport) {
                fc_remote_port_delete(rport);
+               /*
+                * Release the target mode FC NEXUS in qla_target.c code
+                * if target mod is enabled.
+                */
+               qlt_fc_port_deleted(vha, fcport);
+       }
 }
 
 /**
@@ -2495,11 +2527,11 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
 
        /* Setup fcport template structure. */
        fcport->vha = vha;
-       fcport->vp_idx = vha->vp_idx;
        fcport->port_type = FCT_UNKNOWN;
        fcport->loop_id = FC_NO_LOOP_ID;
        qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
        fcport->supported_classes = FC_COS_UNSPECIFIED;
+       fcport->scan_state = QLA_FCPORT_SCAN_NONE;
 
        return fcport;
 }
@@ -2726,7 +2758,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
                new_fcport->d_id.b.area = area;
                new_fcport->d_id.b.al_pa = al_pa;
                new_fcport->loop_id = loop_id;
-               new_fcport->vp_idx = vha->vp_idx;
                rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
                if (rval2 != QLA_SUCCESS) {
                        ql_dbg(ql_dbg_disc, vha, 0x201a,
@@ -2760,10 +2791,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
 
                if (!found) {
                        /* New device, add to fcports list. */
-                       if (vha->vp_idx) {
-                               new_fcport->vha = vha;
-                               new_fcport->vp_idx = vha->vp_idx;
-                       }
                        list_add_tail(&new_fcport->list, &vha->vp_fcports);
 
                        /* Allocate a new replacement fcport. */
@@ -2800,8 +2827,6 @@ cleanup_allocation:
 static void
 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
 {
-#define LS_UNKNOWN      2
-       static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
        char *link_speed;
        int rval;
        uint16_t mb[4];
@@ -2829,11 +2854,7 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
                    fcport->port_name[6], fcport->port_name[7], rval,
                    fcport->fp_speed, mb[0], mb[1]);
        } else {
-               link_speed = link_speeds[LS_UNKNOWN];
-               if (fcport->fp_speed < 5)
-                       link_speed = link_speeds[fcport->fp_speed];
-               else if (fcport->fp_speed == 0x13)
-                       link_speed = link_speeds[5];
+               link_speed = qla2x00_get_link_speed_str(ha);
                ql_dbg(ql_dbg_disc, vha, 0x2005,
                    "iIDMA adjusted to %s GB/s "
                    "on %02x%02x%02x%02x%02x%02x%02x%02x.\n", link_speed,
@@ -2864,6 +2885,12 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
                    "Unable to allocate fc remote port.\n");
                return;
        }
+       /*
+        * Create target mode FC NEXUS in qla_target.c if target mode is
+        * enabled..
+        */
+       qlt_fc_port_added(vha, fcport);
+
        spin_lock_irqsave(fcport->vha->host->host_lock, flags);
        *((fc_port_t **)rport->dd_data) = fcport;
        spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
@@ -2921,7 +2948,7 @@ static int
 qla2x00_configure_fabric(scsi_qla_host_t *vha)
 {
        int     rval;
-       fc_port_t       *fcport, *fcptemp;
+       fc_port_t       *fcport;
        uint16_t        next_loopid;
        uint16_t        mb[MAILBOX_REGISTER_COUNT];
        uint16_t        loop_id;
@@ -2959,7 +2986,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
                    0xfc, mb, BIT_1|BIT_0);
                if (rval != QLA_SUCCESS) {
                        set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
-                       return rval;
+                       break;
                }
                if (mb[0] != MBS_COMMAND_COMPLETE) {
                        ql_dbg(ql_dbg_disc, vha, 0x2042,
@@ -2991,21 +3018,16 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
                        }
                }
 
-#define QLA_FCPORT_SCAN                1
-#define QLA_FCPORT_FOUND       2
-
-               list_for_each_entry(fcport, &vha->vp_fcports, list) {
-                       fcport->scan_state = QLA_FCPORT_SCAN;
-               }
-
                rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
                if (rval != QLA_SUCCESS)
                        break;
 
-               /*
-                * Logout all previous fabric devices marked lost, except
-                * FCP2 devices.
-                */
+               /* Add new ports to existing port list */
+               list_splice_tail_init(&new_fcports, &vha->vp_fcports);
+
+               /* Starting free loop ID. */
+               next_loopid = ha->min_external_loopid;
+
                list_for_each_entry(fcport, &vha->vp_fcports, list) {
                        if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
                                break;
@@ -3013,7 +3035,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
                        if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
                                continue;
 
-                       if (fcport->scan_state == QLA_FCPORT_SCAN &&
+                       /* Logout lost/gone fabric devices (non-FCP2) */
+                       if (fcport->scan_state != QLA_FCPORT_SCAN_FOUND &&
                            atomic_read(&fcport->state) == FCS_ONLINE) {
                                qla2x00_mark_device_lost(vha, fcport,
                                    ql2xplogiabsentdevice, 0);
@@ -3026,78 +3049,30 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
                                            fcport->d_id.b.domain,
                                            fcport->d_id.b.area,
                                            fcport->d_id.b.al_pa);
-                                       fcport->loop_id = FC_NO_LOOP_ID;
                                }
-                       }
-               }
-
-               /* Starting free loop ID. */
-               next_loopid = ha->min_external_loopid;
-
-               /*
-                * Scan through our port list and login entries that need to be
-                * logged in.
-                */
-               list_for_each_entry(fcport, &vha->vp_fcports, list) {
-                       if (atomic_read(&vha->loop_down_timer) ||
-                           test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
-                               break;
-
-                       if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
-                           (fcport->flags & FCF_LOGIN_NEEDED) == 0)
                                continue;
-
-                       if (fcport->loop_id == FC_NO_LOOP_ID) {
-                               fcport->loop_id = next_loopid;
-                               rval = qla2x00_find_new_loop_id(
-                                   base_vha, fcport);
-                               if (rval != QLA_SUCCESS) {
-                                       /* Ran out of IDs to use */
-                                       break;
-                               }
                        }
-                       /* Login and update database */
-                       qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
-               }
-
-               /* Exit if out of loop IDs. */
-               if (rval != QLA_SUCCESS) {
-                       break;
-               }
-
-               /*
-                * Login and add the new devices to our port list.
-                */
-               list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
-                       if (atomic_read(&vha->loop_down_timer) ||
-                           test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
-                               break;
-
-                       /* Find a new loop ID to use. */
-                       fcport->loop_id = next_loopid;
-                       rval = qla2x00_find_new_loop_id(base_vha, fcport);
-                       if (rval != QLA_SUCCESS) {
-                               /* Ran out of IDs to use */
-                               break;
+                       fcport->scan_state = QLA_FCPORT_SCAN_NONE;
+
+                       /* Login fabric devices that need a login */
+                       if ((fcport->flags & FCF_LOGIN_NEEDED) != 0 &&
+                           atomic_read(&vha->loop_down_timer) == 0) {
+                               if (fcport->loop_id == FC_NO_LOOP_ID) {
+                                       fcport->loop_id = next_loopid;
+                                       rval = qla2x00_find_new_loop_id(
+                                           base_vha, fcport);
+                                       if (rval != QLA_SUCCESS) {
+                                               /* Ran out of IDs to use */
+                                               continue;
+                                       }
+                               }
                        }
 
                        /* Login and update database */
                        qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
-
-                       if (vha->vp_idx) {
-                               fcport->vha = vha;
-                               fcport->vp_idx = vha->vp_idx;
-                       }
-                       list_move_tail(&fcport->list, &vha->vp_fcports);
                }
        } while (0);
 
-       /* Free all new device structures not processed. */
-       list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
-               list_del(&fcport->list);
-               kfree(fcport);
-       }
-
        if (rval) {
                ql_dbg(ql_dbg_disc, vha, 0x2068,
                    "Configure fabric error exit rval=%d.\n", rval);
@@ -3287,7 +3262,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
                            WWN_SIZE))
                                continue;
 
-                       fcport->scan_state = QLA_FCPORT_FOUND;
+                       fcport->scan_state = QLA_FCPORT_SCAN_FOUND;
 
                        found++;
 
@@ -3595,6 +3570,12 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
                        if (mb[10] & BIT_1)
                                fcport->supported_classes |= FC_COS_CLASS3;
 
+                       if (IS_FWI2_CAPABLE(ha)) {
+                               if (mb[10] & BIT_7)
+                                       fcport->flags |=
+                                           FCF_CONF_COMP_SUPPORTED;
+                       }
+
                        rval = QLA_SUCCESS;
                        break;
                } else if (mb[0] == MBS_LOOP_ID_USED) {
@@ -3841,7 +3822,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
                vha->flags.online = 0;
        ha->flags.chip_reset_done = 0;
        clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
-       ha->qla_stats.total_isp_aborts++;
+       vha->qla_stats.total_isp_aborts++;
 
        ql_log(ql_log_info, vha, 0x00af,
            "Performing ISP error recovery - ha=%p.\n", ha);
@@ -4066,6 +4047,7 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
        struct qla_hw_data *ha = vha->hw;
        struct req_que *req = ha->req_q_map[0];
        struct rsp_que *rsp = ha->rsp_q_map[0];
+       unsigned long flags;
 
        /* If firmware needs to be loaded */
        if (qla2x00_isp_firmware(vha)) {
@@ -4090,6 +4072,16 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
                        qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
 
                        vha->flags.online = 1;
+
+                       /*
+                        * Process any ATIO queue entries that came in
+                        * while we weren't online.
+                        */
+                       spin_lock_irqsave(&ha->hardware_lock, flags);
+                       if (qla_tgt_mode_enabled(vha))
+                               qlt_24xx_process_atio_queue(vha);
+                       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
                        /* Wait at most MAX_TARGET RSCNs for a stable link. */
                        wait_time = 256;
                        do {
@@ -4279,7 +4271,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
            nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
                /* Reset NVRAM data. */
                ql_log(ql_log_warn, vha, 0x006b,
-                   "Inconisistent NVRAM detected: checksum=0x%x id=%c "
+                   "Inconsistent NVRAM detected: checksum=0x%x id=%c "
                    "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
                ql_log(ql_log_warn, vha, 0x006c,
                    "Falling back to functioning (yet invalid -- WWPN) "
@@ -4330,6 +4322,15 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
                rval = 1;
        }
 
+       if (!qla_ini_mode_enabled(vha)) {
+               /* Don't enable full login after initial LIP */
+               nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
+               /* Don't enable LIP full login for initiator */
+               nv->host_p &= __constant_cpu_to_le32(~BIT_10);
+       }
+
+       qlt_24xx_config_nvram_stage1(vha, nv);
+
        /* Reset Initialization control block */
        memset(icb, 0, ha->init_cb_size);
 
@@ -4357,8 +4358,10 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
        qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
            "QLA2462");
 
-       /* Use alternate WWN? */
+       qlt_24xx_config_nvram_stage2(vha, icb);
+
        if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
+               /* Use alternate WWN? */
                memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
                memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
        }
@@ -5029,7 +5032,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
            nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
                /* Reset NVRAM data. */
                ql_log(ql_log_info, vha, 0x0073,
-                   "Inconisistent NVRAM detected: checksum=0x%x id=%c "
+                   "Inconsistent NVRAM detected: checksum=0x%x id=%c "
                    "version=0x%x.\n", chksum, nv->id[0],
                    le16_to_cpu(nv->nvram_version));
                ql_log(ql_log_info, vha, 0x0074,
index eac9509244971ba9e324803ca531014e3f917dad..70dbf53d9e0f4fe0762073cb0bfec1508251fc8f 100644 (file)
@@ -5,6 +5,7 @@
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 #include "qla_def.h"
+#include "qla_target.h"
 
 #include <linux/blkdev.h>
 #include <linux/delay.h>
@@ -23,18 +24,17 @@ qla2x00_get_cmd_direction(srb_t *sp)
 {
        uint16_t cflags;
        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+       struct scsi_qla_host *vha = sp->fcport->vha;
 
        cflags = 0;
 
        /* Set transfer direction */
        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
                cflags = CF_WRITE;
-               sp->fcport->vha->hw->qla_stats.output_bytes +=
-                   scsi_bufflen(cmd);
+               vha->qla_stats.output_bytes += scsi_bufflen(cmd);
        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
                cflags = CF_READ;
-               sp->fcport->vha->hw->qla_stats.input_bytes +=
-                   scsi_bufflen(cmd);
+               vha->qla_stats.input_bytes += scsi_bufflen(cmd);
        }
        return (cflags);
 }
@@ -385,9 +385,10 @@ qla2x00_start_scsi(srb_t *sp)
                else
                        req->cnt = req->length -
                            (req->ring_index - cnt);
+               /* If still no head room then bail out */
+               if (req->cnt < (req_cnt + 2))
+                       goto queuing_error;
        }
-       if (req->cnt < (req_cnt + 2))
-               goto queuing_error;
 
        /* Build command packet */
        req->current_outstanding_cmd = handle;
@@ -470,7 +471,7 @@ queuing_error:
 /**
  * qla2x00_start_iocbs() - Execute the IOCB command
  */
-static void
+void
 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
 {
        struct qla_hw_data *ha = vha->hw;
@@ -571,6 +572,29 @@ qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
        return (ret);
 }
 
+/*
+ * qla2x00_issue_marker
+ *
+ * Issue marker
+ * Caller CAN have hardware lock held as specified by ha_locked parameter.
+ * Might release it, then reaquire.
+ */
+int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
+{
+       if (ha_locked) {
+               if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
+                                       MK_SYNC_ALL) != QLA_SUCCESS)
+                       return QLA_FUNCTION_FAILED;
+       } else {
+               if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
+                                       MK_SYNC_ALL) != QLA_SUCCESS)
+                       return QLA_FUNCTION_FAILED;
+       }
+       vha->marker_needed = 0;
+
+       return QLA_SUCCESS;
+}
+
 /**
  * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
  * Continuation Type 1 IOCBs to allocate.
@@ -629,11 +653,11 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
                cmd_pkt->control_flags =
                    __constant_cpu_to_le16(CF_WRITE_DATA);
-               ha->qla_stats.output_bytes += scsi_bufflen(cmd);
+               vha->qla_stats.output_bytes += scsi_bufflen(cmd);
        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
                cmd_pkt->control_flags =
                    __constant_cpu_to_le16(CF_READ_DATA);
-               ha->qla_stats.input_bytes += scsi_bufflen(cmd);
+               vha->qla_stats.input_bytes += scsi_bufflen(cmd);
        }
 
        cur_seg = scsi_sglist(cmd);
@@ -745,13 +769,11 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
                cmd_pkt->task_mgmt_flags =
                    __constant_cpu_to_le16(TMF_WRITE_DATA);
-               sp->fcport->vha->hw->qla_stats.output_bytes +=
-                   scsi_bufflen(cmd);
+               vha->qla_stats.output_bytes += scsi_bufflen(cmd);
        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
                cmd_pkt->task_mgmt_flags =
                    __constant_cpu_to_le16(TMF_READ_DATA);
-               sp->fcport->vha->hw->qla_stats.input_bytes +=
-                   scsi_bufflen(cmd);
+               vha->qla_stats.input_bytes += scsi_bufflen(cmd);
        }
 
        /* One DSD is available in the Command Type 3 IOCB */
@@ -1245,7 +1267,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
                return QLA_SUCCESS;
        }
 
-       cmd_pkt->vp_index = sp->fcport->vp_idx;
+       cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
 
        /* Set transfer direction */
        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
@@ -1502,9 +1524,9 @@ qla24xx_start_scsi(srb_t *sp)
                else
                        req->cnt = req->length -
                                (req->ring_index - cnt);
+               if (req->cnt < (req_cnt + 2))
+                       goto queuing_error;
        }
-       if (req->cnt < (req_cnt + 2))
-               goto queuing_error;
 
        /* Build command packet. */
        req->current_outstanding_cmd = handle;
@@ -1527,7 +1549,7 @@ qla24xx_start_scsi(srb_t *sp)
        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
-       cmd_pkt->vp_index = sp->fcport->vp_idx;
+       cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
 
        int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
        host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
@@ -1717,11 +1739,10 @@ qla24xx_dif_start_scsi(srb_t *sp)
                else
                        req->cnt = req->length -
                                (req->ring_index - cnt);
+               if (req->cnt < (req_cnt + 2))
+                       goto queuing_error;
        }
 
-       if (req->cnt < (req_cnt + 2))
-               goto queuing_error;
-
        status |= QDSS_GOT_Q_SPACE;
 
        /* Build header part of command packet (excluding the OPCODE). */
@@ -1898,7 +1919,7 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
        logio->port_id[1] = sp->fcport->d_id.b.area;
        logio->port_id[2] = sp->fcport->d_id.b.domain;
-       logio->vp_index = sp->fcport->vp_idx;
+       logio->vp_index = sp->fcport->vha->vp_idx;
 }
 
 static void
@@ -1922,7 +1943,7 @@ qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
        mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
        mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
            sp->fcport->d_id.b.al_pa);
-       mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
+       mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
 }
 
 static void
@@ -1935,7 +1956,7 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
        logio->port_id[1] = sp->fcport->d_id.b.area;
        logio->port_id[2] = sp->fcport->d_id.b.domain;
-       logio->vp_index = sp->fcport->vp_idx;
+       logio->vp_index = sp->fcport->vha->vp_idx;
 }
 
 static void
@@ -1952,7 +1973,7 @@ qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
        mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
        mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
            sp->fcport->d_id.b.al_pa);
-       mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
+       mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
        /* Implicit: mbx->mbx10 = 0. */
 }
 
@@ -1962,7 +1983,7 @@ qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
        logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
-       logio->vp_index = sp->fcport->vp_idx;
+       logio->vp_index = sp->fcport->vha->vp_idx;
 }
 
 static void
@@ -1983,7 +2004,7 @@ qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
        mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
        mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
        mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
-       mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
+       mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
 }
 
 static void
@@ -2009,7 +2030,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
        tsk->port_id[0] = fcport->d_id.b.al_pa;
        tsk->port_id[1] = fcport->d_id.b.area;
        tsk->port_id[2] = fcport->d_id.b.domain;
-       tsk->vp_index = fcport->vp_idx;
+       tsk->vp_index = fcport->vha->vp_idx;
 
        if (flags == TCF_LUN_RESET) {
                int_to_scsilun(lun, &tsk->lun);
@@ -2030,7 +2051,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
         els_iocb->handle = sp->handle;
         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
         els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
-        els_iocb->vp_index = sp->fcport->vp_idx;
+       els_iocb->vp_index = sp->fcport->vha->vp_idx;
         els_iocb->sof_type = EST_SOFI3;
         els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
 
@@ -2160,7 +2181,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
         ct_iocb->handle = sp->handle;
 
        ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
-       ct_iocb->vp_index = sp->fcport->vp_idx;
+       ct_iocb->vp_index = sp->fcport->vha->vp_idx;
         ct_iocb->comp_status = __constant_cpu_to_le16(0);
 
        ct_iocb->cmd_dsd_count =
@@ -2343,11 +2364,10 @@ sufficient_dsds:
                        else
                                req->cnt = req->length -
                                        (req->ring_index - cnt);
+                       if (req->cnt < (req_cnt + 2))
+                               goto queuing_error;
                }
 
-               if (req->cnt < (req_cnt + 2))
-                       goto queuing_error;
-
                ctx = sp->u.scmd.ctx =
                    mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
                if (!ctx) {
@@ -2362,7 +2382,7 @@ sufficient_dsds:
                if (!ctx->fcp_cmnd) {
                        ql_log(ql_log_fatal, vha, 0x3011,
                            "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
-                       goto queuing_error_fcp_cmnd;
+                       goto queuing_error;
                }
 
                /* Initialize the DSD list and dma handle */
@@ -2400,7 +2420,7 @@ sufficient_dsds:
                cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
                cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
                cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
-               cmd_pkt->vp_index = sp->fcport->vp_idx;
+               cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
 
                /* Build IOCB segments */
                if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
@@ -2489,7 +2509,7 @@ sufficient_dsds:
                cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
                cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
                cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
-               cmd_pkt->vp_index = sp->fcport->vp_idx;
+               cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
 
                int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
                host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
index ce42288049b5a20a5c57646ecf077886a1239ebb..6f67a9d4998b6d43fbab53a0a75918c6f607fbf0 100644 (file)
@@ -5,6 +5,7 @@
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 #include "qla_def.h"
+#include "qla_target.h"
 
 #include <linux/delay.h>
 #include <linux/slab.h>
@@ -309,6 +310,28 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
                    "IDC failed to post ACK.\n");
 }
 
+#define LS_UNKNOWN     2
+char *
+qla2x00_get_link_speed_str(struct qla_hw_data *ha)
+{
+       static char *link_speeds[] = {"1", "2", "?", "4", "8", "16", "10"};
+       char *link_speed;
+       int fw_speed = ha->link_data_rate;
+
+       if (IS_QLA2100(ha) || IS_QLA2200(ha))
+               link_speed = link_speeds[0];
+       else if (fw_speed == 0x13)
+               link_speed = link_speeds[6];
+       else {
+               link_speed = link_speeds[LS_UNKNOWN];
+               if (fw_speed < 6)
+                       link_speed =
+                           link_speeds[fw_speed];
+       }
+
+       return link_speed;
+}
+
 /**
  * qla2x00_async_event() - Process aynchronous events.
  * @ha: SCSI driver HA context
@@ -317,9 +340,6 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
 void
 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
 {
-#define LS_UNKNOWN     2
-       static char *link_speeds[] = { "1", "2", "?", "4", "8", "16", "10" };
-       char            *link_speed;
        uint16_t        handle_cnt;
        uint16_t        cnt, mbx;
        uint32_t        handles[5];
@@ -454,8 +474,8 @@ skip_rio:
        case MBA_WAKEUP_THRES:          /* Request Queue Wake-up */
                ql_dbg(ql_dbg_async, vha, 0x5008,
                    "Asynchronous WAKEUP_THRES.\n");
-               break;
 
+               break;
        case MBA_LIP_OCCURRED:          /* Loop Initialization Procedure */
                ql_dbg(ql_dbg_async, vha, 0x5009,
                    "LIP occurred (%x).\n", mb[1]);
@@ -479,20 +499,14 @@ skip_rio:
                break;
 
        case MBA_LOOP_UP:               /* Loop Up Event */
-               if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
-                       link_speed = link_speeds[0];
+               if (IS_QLA2100(ha) || IS_QLA2200(ha))
                        ha->link_data_rate = PORT_SPEED_1GB;
-               } else {
-                       link_speed = link_speeds[LS_UNKNOWN];
-                       if (mb[1] < 6)
-                               link_speed = link_speeds[mb[1]];
-                       else if (mb[1] == 0x13)
-                               link_speed = link_speeds[6];
+               else
                        ha->link_data_rate = mb[1];
-               }
 
                ql_dbg(ql_dbg_async, vha, 0x500a,
-                   "LOOP UP detected (%s Gbps).\n", link_speed);
+                   "LOOP UP detected (%s Gbps).\n",
+                   qla2x00_get_link_speed_str(ha));
 
                vha->flags.management_server_logged_in = 0;
                qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
@@ -638,6 +652,8 @@ skip_rio:
                        ql_dbg(ql_dbg_async, vha, 0x5010,
                            "Port unavailable %04x %04x %04x.\n",
                            mb[1], mb[2], mb[3]);
+                       ql_log(ql_log_warn, vha, 0x505e,
+                           "Link is offline.\n");
 
                        if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
                                atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -670,12 +686,17 @@ skip_rio:
                        ql_dbg(ql_dbg_async, vha, 0x5011,
                            "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
                            mb[1], mb[2], mb[3]);
+
+                       qlt_async_event(mb[0], vha, mb);
                        break;
                }
 
                ql_dbg(ql_dbg_async, vha, 0x5012,
                    "Port database changed %04x %04x %04x.\n",
                    mb[1], mb[2], mb[3]);
+               ql_log(ql_log_warn, vha, 0x505f,
+                   "Link is operational (%s Gbps).\n",
+                   qla2x00_get_link_speed_str(ha));
 
                /*
                 * Mark all devices as missing so we will login again.
@@ -684,8 +705,13 @@ skip_rio:
 
                qla2x00_mark_all_devices_lost(vha, 1);
 
+               if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
+                       set_bit(SCR_PENDING, &vha->dpc_flags);
+
                set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
                set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+
+               qlt_async_event(mb[0], vha, mb);
                break;
 
        case MBA_RSCN_UPDATE:           /* State Change Registration */
@@ -807,6 +833,8 @@ skip_rio:
                    mb[0], mb[1], mb[2], mb[3]);
        }
 
+       qlt_async_event(mb[0], vha, mb);
+
        if (!vha->vp_idx && ha->num_vhosts)
                qla2x00_alert_all_vps(rsp, mb);
 }
@@ -1172,6 +1200,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
                } else if (iop[0] & BIT_5)
                        fcport->port_type = FCT_INITIATOR;
 
+               if (iop[0] & BIT_7)
+                       fcport->flags |= FCF_CONF_COMP_SUPPORTED;
+
                if (logio->io_parameter[7] || logio->io_parameter[8])
                        fcport->supported_classes |= FC_COS_CLASS2;
                if (logio->io_parameter[9] || logio->io_parameter[10])
@@ -1986,6 +2017,9 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
 
                if (pkt->entry_status != 0) {
                        qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
+
+                       (void)qlt_24xx_process_response_error(vha, pkt);
+
                        ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
                        wmb();
                        continue;
@@ -2016,6 +2050,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
                 case ELS_IOCB_TYPE:
                        qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
                        break;
+               case ABTS_RECV_24XX:
+                       /* ensure that the ATIO queue is empty */
+                       qlt_24xx_process_atio_queue(vha);
+               case ABTS_RESP_24XX:
+               case CTIO_TYPE7:
+               case NOTIFY_ACK_TYPE:
+                       qlt_response_pkt_all_vps(vha, (response_t *)pkt);
+                       break;
                case MARKER_TYPE:
                        /* Do nothing in this case, this check is to prevent it
                         * from falling into default case
@@ -2168,6 +2210,13 @@ qla24xx_intr_handler(int irq, void *dev_id)
                case 0x14:
                        qla24xx_process_response_queue(vha, rsp);
                        break;
+               case 0x1C: /* ATIO queue updated */
+                       qlt_24xx_process_atio_queue(vha);
+                       break;
+               case 0x1D: /* ATIO and response queues updated */
+                       qlt_24xx_process_atio_queue(vha);
+                       qla24xx_process_response_queue(vha, rsp);
+                       break;
                default:
                        ql_dbg(ql_dbg_async, vha, 0x504f,
                            "Unrecognized interrupt type (%d).\n", stat * 0xff);
@@ -2312,6 +2361,13 @@ qla24xx_msix_default(int irq, void *dev_id)
                case 0x14:
                        qla24xx_process_response_queue(vha, rsp);
                        break;
+               case 0x1C: /* ATIO queue updated */
+                       qlt_24xx_process_atio_queue(vha);
+                       break;
+               case 0x1D: /* ATIO and response queues updated */
+                       qlt_24xx_process_atio_queue(vha);
+                       qla24xx_process_response_queue(vha, rsp);
+                       break;
                default:
                        ql_dbg(ql_dbg_async, vha, 0x5051,
                            "Unrecognized interrupt type (%d).\n", stat & 0xff);
@@ -2564,7 +2620,15 @@ void
 qla2x00_free_irqs(scsi_qla_host_t *vha)
 {
        struct qla_hw_data *ha = vha->hw;
-       struct rsp_que *rsp = ha->rsp_q_map[0];
+       struct rsp_que *rsp;
+
+       /*
+        * We need to check that ha->rsp_q_map is valid in case we are called
+        * from a probe failure context.
+        */
+       if (!ha->rsp_q_map || !ha->rsp_q_map[0])
+               return;
+       rsp = ha->rsp_q_map[0];
 
        if (ha->flags.msix_enabled)
                qla24xx_disable_msix(ha);
index b4a23394a7bd8f9a225ba2acd77ee19fb018f691..d5ce92c0a8fcef8e246ef25599e9d635a9a5b454 100644 (file)
@@ -5,6 +5,7 @@
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 #include "qla_def.h"
+#include "qla_target.h"
 
 #include <linux/delay.h>
 #include <linux/gfp.h>
@@ -270,11 +271,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                        ictrl = RD_REG_WORD(&reg->isp.ictrl);
                }
                ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
-                   "MBX Command timeout for cmd %x.\n", command);
-               ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111a,
-                   "iocontrol=%x jiffies=%lx.\n", ictrl, jiffies);
-               ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111b,
-                   "mb[0] = 0x%x.\n", mb0);
+                   "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
+                   "mb[0]=0x%x\n", command, ictrl, jiffies, mb0);
                ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
 
                /*
@@ -320,7 +318,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                                            CRB_NIU_XG_PAUSE_CTL_P1);
                                }
                                ql_log(ql_log_info, base_vha, 0x101c,
-                                   "Mailbox cmd timeout occured, cmd=0x%x, "
+                                   "Mailbox cmd timeout occurred, cmd=0x%x, "
                                    "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
                                    "abort.\n", command, mcp->mb[0],
                                    ha->flags.eeh_busy);
@@ -345,7 +343,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                                            CRB_NIU_XG_PAUSE_CTL_P1);
                                }
                                ql_log(ql_log_info, base_vha, 0x101e,
-                                   "Mailbox cmd timeout occured, cmd=0x%x, "
+                                   "Mailbox cmd timeout occurred, cmd=0x%x, "
                                    "mb[0]=0x%x. Scheduling ISP abort ",
                                    command, mcp->mb[0]);
                                set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
@@ -390,7 +388,8 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1022, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
+           "Entered %s.\n", __func__);
 
        if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
                mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
@@ -424,7 +423,8 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
                ql_dbg(ql_dbg_mbx, vha, 0x1023,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1024, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -454,7 +454,8 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1025, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
        mcp->out_mb = MBX_0;
@@ -489,10 +490,11 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
                if (IS_FWI2_CAPABLE(ha)) {
-                       ql_dbg(ql_dbg_mbx, vha, 0x1027,
+                       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1027,
                            "Done exchanges=%x.\n", mcp->mb[1]);
                } else {
-                       ql_dbg(ql_dbg_mbx, vha, 0x1028, "Done %s.\n", __func__);
+                       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
+                           "Done %s.\n", __func__);
                }
        }
 
@@ -523,7 +525,8 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
        mbx_cmd_t       *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1029, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
        mcp->out_mb = MBX_0;
@@ -561,11 +564,11 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
                        ha->fw_attributes_h = mcp->mb[15];
                        ha->fw_attributes_ext[0] = mcp->mb[16];
                        ha->fw_attributes_ext[1] = mcp->mb[17];
-                       ql_dbg(ql_dbg_mbx, vha, 0x1139,
+                       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
                            "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
                            __func__, mcp->mb[15], mcp->mb[6]);
                } else
-                       ql_dbg(ql_dbg_mbx, vha, 0x112f,
+                       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
                            "%s: FwAttributes [Upper]  invalid, MB6:%04x\n",
                            __func__, mcp->mb[6]);
        }
@@ -576,7 +579,8 @@ failed:
                ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
        } else {
                /*EMPTY*/
-               ql_dbg(ql_dbg_mbx, vha, 0x102b, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
+                   "Done %s.\n", __func__);
        }
        return rval;
 }
@@ -602,7 +606,8 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x102c, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
        mcp->out_mb = MBX_0;
@@ -620,7 +625,8 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
                fwopts[2] = mcp->mb[2];
                fwopts[3] = mcp->mb[3];
 
-               ql_dbg(ql_dbg_mbx, vha, 0x102e, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -648,7 +654,8 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x102f, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
        mcp->mb[1] = fwopts[1];
@@ -676,7 +683,8 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
                    "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
        } else {
                /*EMPTY*/
-               ql_dbg(ql_dbg_mbx, vha, 0x1031, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -704,7 +712,8 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1032, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
        mcp->mb[1] = 0xAAAA;
@@ -734,7 +743,8 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
                ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
        } else {
                /*EMPTY*/
-               ql_dbg(ql_dbg_mbx, vha, 0x1034, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -762,7 +772,8 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1035, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_VERIFY_CHECKSUM;
        mcp->out_mb = MBX_0;
@@ -787,7 +798,8 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
                    "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
                    (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1037, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -819,7 +831,8 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
        mbx_cmd_t       mc;
        mbx_cmd_t       *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1038, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_IOCB_COMMAND_A64;
        mcp->mb[1] = 0;
@@ -842,7 +855,8 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
                /* Mask reserved bits. */
                sts_entry->entry_status &=
                    IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
-               ql_dbg(ql_dbg_mbx, vha, 0x103a, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -884,7 +898,8 @@ qla2x00_abort_command(srb_t *sp)
        struct req_que *req = vha->req;
        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 
-       ql_dbg(ql_dbg_mbx, vha, 0x103b, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
+           "Entered %s.\n", __func__);
 
        spin_lock_irqsave(&ha->hardware_lock, flags);
        for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -915,7 +930,8 @@ qla2x00_abort_command(srb_t *sp)
        if (rval != QLA_SUCCESS) {
                ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x103d, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -934,7 +950,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
        l = l;
        vha = fcport->vha;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x103e, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
+           "Entered %s.\n", __func__);
 
        req = vha->hw->req_q_map[0];
        rsp = req->rsp;
@@ -955,7 +972,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
        mcp->flags = 0;
        rval = qla2x00_mailbox_command(vha, mcp);
        if (rval != QLA_SUCCESS) {
-               ql_dbg(ql_dbg_mbx, vha, 0x103f, "Failed=%x.\n", rval);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
+                   "Failed=%x.\n", rval);
        }
 
        /* Issue marker IOCB. */
@@ -965,7 +983,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
                ql_dbg(ql_dbg_mbx, vha, 0x1040,
                    "Failed to issue marker IOCB (%x).\n", rval2);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1041, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -983,7 +1002,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
 
        vha = fcport->vha;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1042, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
+           "Entered %s.\n", __func__);
 
        req = vha->hw->req_q_map[0];
        rsp = req->rsp;
@@ -1012,7 +1032,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
                ql_dbg(ql_dbg_mbx, vha, 0x1044,
                    "Failed to issue marker IOCB (%x).\n", rval2);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1045, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -1046,7 +1067,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1046, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
        mcp->mb[9] = vha->vp_idx;
@@ -1074,7 +1096,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
                /*EMPTY*/
                ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1048, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
+                   "Done %s.\n", __func__);
 
                if (IS_CNA_CAPABLE(vha->hw)) {
                        vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
@@ -1115,7 +1138,8 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1049, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_GET_RETRY_COUNT;
        mcp->out_mb = MBX_0;
@@ -1138,7 +1162,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
                        *tov = ratov;
                }
 
-               ql_dbg(ql_dbg_mbx, vha, 0x104b,
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
                    "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
        }
 
@@ -1170,7 +1194,8 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
        mbx_cmd_t *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x104c, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
+           "Entered %s.\n", __func__);
 
        if (IS_QLA82XX(ha) && ql2xdbwr)
                qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
@@ -1213,9 +1238,100 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
                    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
        } else {
                /*EMPTY*/
-               ql_dbg(ql_dbg_mbx, vha, 0x104e, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
+                   "Done %s.\n", __func__);
+       }
+
+       return rval;
+}
+
+/*
+ * qla2x00_get_node_name_list
+ *      Issue get node name list mailbox command, kmalloc()
+ *      and return the resulting list. Caller must kfree() it!
+ *
+ * Input:
+ *      ha = adapter state pointer.
+ *      out_data = resulting list
+ *      out_len = length of the resulting list
+ *
+ * Returns:
+ *      qla2x00 local function return status code.
+ *
+ * Context:
+ *      Kernel context.
+ */
+int
+qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_port_24xx_data *list = NULL;
+       void *pmap;
+       mbx_cmd_t mc;
+       dma_addr_t pmap_dma;
+       ulong dma_size;
+       int rval, left;
+
+       left = 1;
+       while (left > 0) {
+               dma_size = left * sizeof(*list);
+               pmap = dma_alloc_coherent(&ha->pdev->dev, dma_size,
+                                        &pmap_dma, GFP_KERNEL);
+               if (!pmap) {
+                       ql_log(ql_log_warn, vha, 0x113f,
+                           "%s(%ld): DMA Alloc failed of %ld\n",
+                           __func__, vha->host_no, dma_size);
+                       rval = QLA_MEMORY_ALLOC_FAILED;
+                       goto out;
+               }
+
+               mc.mb[0] = MBC_PORT_NODE_NAME_LIST;
+               mc.mb[1] = BIT_1 | BIT_3;
+               mc.mb[2] = MSW(pmap_dma);
+               mc.mb[3] = LSW(pmap_dma);
+               mc.mb[6] = MSW(MSD(pmap_dma));
+               mc.mb[7] = LSW(MSD(pmap_dma));
+               mc.mb[8] = dma_size;
+               mc.out_mb = MBX_0|MBX_1|MBX_2|MBX_3|MBX_6|MBX_7|MBX_8;
+               mc.in_mb = MBX_0|MBX_1;
+               mc.tov = 30;
+               mc.flags = MBX_DMA_IN;
+
+               rval = qla2x00_mailbox_command(vha, &mc);
+               if (rval != QLA_SUCCESS) {
+                       if ((mc.mb[0] == MBS_COMMAND_ERROR) &&
+                           (mc.mb[1] == 0xA)) {
+                               left += le16_to_cpu(mc.mb[2]) /
+                                   sizeof(struct qla_port_24xx_data);
+                               goto restart;
+                       }
+                       goto out_free;
+               }
+
+               left = 0;
+
+               list = kzalloc(dma_size, GFP_KERNEL);
+               if (!list) {
+                       ql_log(ql_log_warn, vha, 0x1140,
+                           "%s(%ld): failed to allocate node names list "
+                           "structure.\n", __func__, vha->host_no);
+                       rval = QLA_MEMORY_ALLOC_FAILED;
+                       goto out_free;
+               }
+
+               memcpy(list, pmap, dma_size);
+restart:
+               dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
        }
 
+       *out_data = list;
+       *out_len = dma_size;
+
+out:
+       return rval;
+
+out_free:
+       dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
        return rval;
 }
 
@@ -1246,7 +1362,8 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
        dma_addr_t pd_dma;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x104f, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
+           "Entered %s.\n", __func__);
 
        pd24 = NULL;
        pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
@@ -1326,6 +1443,13 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
                        fcport->port_type = FCT_INITIATOR;
                else
                        fcport->port_type = FCT_TARGET;
+
+               /* Passback COS information. */
+               fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
+                               FC_COS_CLASS2 : FC_COS_CLASS3;
+
+               if (pd24->prli_svc_param_word_3[0] & BIT_7)
+                       fcport->flags |= FCF_CONF_COMP_SUPPORTED;
        } else {
                uint64_t zero = 0;
 
@@ -1378,7 +1502,8 @@ gpd_error_out:
                    "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
                    mcp->mb[0], mcp->mb[1]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1053, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -1407,7 +1532,8 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1054, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
        mcp->out_mb = MBX_0;
@@ -1433,7 +1559,8 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
                ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
        } else {
                /*EMPTY*/
-               ql_dbg(ql_dbg_mbx, vha, 0x1056, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -1465,7 +1592,8 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1057, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_GET_PORT_NAME;
        mcp->mb[9] = vha->vp_idx;
@@ -1499,7 +1627,8 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
                        name[7] = LSB(mcp->mb[7]);
                }
 
-               ql_dbg(ql_dbg_mbx, vha, 0x1059, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -1527,7 +1656,8 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x105a, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
+           "Entered %s.\n", __func__);
 
        if (IS_CNA_CAPABLE(vha->hw)) {
                /* Logout across all FCFs. */
@@ -1564,7 +1694,8 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
                ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
        } else {
                /*EMPTY*/
-               ql_dbg(ql_dbg_mbx, vha, 0x105c, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -1596,9 +1727,10 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x105d, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
+           "Entered %s.\n", __func__);
 
-       ql_dbg(ql_dbg_mbx, vha, 0x105e,
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
            "Retry cnt=%d ratov=%d total tov=%d.\n",
            vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
 
@@ -1622,7 +1754,8 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
                    rval, mcp->mb[0], mcp->mb[1]);
        } else {
                /*EMPTY*/
-               ql_dbg(ql_dbg_mbx, vha, 0x1060, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -1641,7 +1774,8 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
        struct req_que *req;
        struct rsp_que *rsp;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1061, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
+           "Entered %s.\n", __func__);
 
        if (ha->flags.cpu_affinity_enabled)
                req = ha->req_q_map[0];
@@ -1715,7 +1849,8 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
                        break;
                }
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1066, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
+                   "Done %s.\n", __func__);
 
                iop[0] = le32_to_cpu(lg->io_parameter[0]);
 
@@ -1733,6 +1868,10 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
                        mb[10] |= BIT_0;        /* Class 2. */
                if (lg->io_parameter[9] || lg->io_parameter[10])
                        mb[10] |= BIT_1;        /* Class 3. */
+               if (lg->io_parameter[0] & __constant_cpu_to_le32(BIT_7))
+                       mb[10] |= BIT_7;        /* Confirmed Completion
+                                                * Allowed
+                                                */
        }
 
        dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1770,7 +1909,8 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
        mbx_cmd_t *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1067, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
        mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -1818,7 +1958,8 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
                    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
        } else {
                /*EMPTY*/
-               ql_dbg(ql_dbg_mbx, vha, 0x1069, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -1849,7 +1990,8 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
        mbx_cmd_t *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x106a, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
+           "Entered %s.\n", __func__);
 
        if (IS_FWI2_CAPABLE(ha))
                return qla24xx_login_fabric(vha, fcport->loop_id,
@@ -1891,7 +2033,8 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
                    rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
        } else {
                /*EMPTY*/
-               ql_dbg(ql_dbg_mbx, vha, 0x106c, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
+                   "Done %s.\n", __func__);
        }
 
        return (rval);
@@ -1908,7 +2051,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
        struct req_que *req;
        struct rsp_que *rsp;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x106d, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
+           "Entered %s.\n", __func__);
 
        lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
        if (lg == NULL) {
@@ -1952,7 +2096,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
                    le32_to_cpu(lg->io_parameter[1]));
        } else {
                /*EMPTY*/
-               ql_dbg(ql_dbg_mbx, vha, 0x1072, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
+                   "Done %s.\n", __func__);
        }
 
        dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1984,7 +2129,8 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1073, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
        mcp->out_mb = MBX_1|MBX_0;
@@ -2007,7 +2153,8 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
                    "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
        } else {
                /*EMPTY*/
-               ql_dbg(ql_dbg_mbx, vha, 0x1075, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -2035,7 +2182,8 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1076, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_LIP_FULL_LOGIN;
        mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
@@ -2052,7 +2200,8 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
                ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
        } else {
                /*EMPTY*/
-               ql_dbg(ql_dbg_mbx, vha, 0x1078, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -2078,7 +2227,8 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1079, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
+           "Entered %s.\n", __func__);
 
        if (id_list == NULL)
                return QLA_FUNCTION_FAILED;
@@ -2110,7 +2260,8 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
                ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
        } else {
                *entries = mcp->mb[1];
-               ql_dbg(ql_dbg_mbx, vha, 0x107b, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -2138,7 +2289,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x107c, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
        mcp->out_mb = MBX_0;
@@ -2154,7 +2306,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
                ql_dbg(ql_dbg_mbx, vha, 0x107d,
                    "Failed mb[0]=%x.\n", mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x107e,
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
                    "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
                    "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
                    mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
@@ -2201,7 +2353,8 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
        dma_addr_t pmap_dma;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x107f, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
+           "Entered %s.\n", __func__);
 
        pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
        if (pmap  == NULL) {
@@ -2224,7 +2377,7 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
        rval = qla2x00_mailbox_command(vha, mcp);
 
        if (rval == QLA_SUCCESS) {
-               ql_dbg(ql_dbg_mbx, vha, 0x1081,
+               ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
                    "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
                    mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
                ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
@@ -2238,7 +2391,8 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
        if (rval != QLA_SUCCESS) {
                ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1083, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -2267,7 +2421,8 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
        uint32_t *siter, *diter, dwords;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1084, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_GET_LINK_STATUS;
        mcp->mb[2] = MSW(stats_dma);
@@ -2301,7 +2456,8 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
                        rval = QLA_FUNCTION_FAILED;
                } else {
                        /* Copy over data -- firmware data is LE. */
-                       ql_dbg(ql_dbg_mbx, vha, 0x1086, "Done %s.\n", __func__);
+                       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
+                           "Done %s.\n", __func__);
                        dwords = offsetof(struct link_statistics, unused1) / 4;
                        siter = diter = &stats->link_fail_cnt;
                        while (dwords--)
@@ -2324,7 +2480,8 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
        mbx_cmd_t *mcp = &mc;
        uint32_t *siter, *diter, dwords;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1088, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
        mcp->mb[2] = MSW(stats_dma);
@@ -2346,7 +2503,8 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
                            "Failed mb[0]=%x.\n", mcp->mb[0]);
                        rval = QLA_FUNCTION_FAILED;
                } else {
-                       ql_dbg(ql_dbg_mbx, vha, 0x108a, "Done %s.\n", __func__);
+                       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
+                           "Done %s.\n", __func__);
                        /* Copy over data -- firmware data is LE. */
                        dwords = sizeof(struct link_statistics) / 4;
                        siter = diter = &stats->link_fail_cnt;
@@ -2375,7 +2533,8 @@ qla24xx_abort_command(srb_t *sp)
        struct qla_hw_data *ha = vha->hw;
        struct req_que *req = vha->req;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x108c, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
+           "Entered %s.\n", __func__);
 
        spin_lock_irqsave(&ha->hardware_lock, flags);
        for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -2404,7 +2563,7 @@ qla24xx_abort_command(srb_t *sp)
        abt->port_id[0] = fcport->d_id.b.al_pa;
        abt->port_id[1] = fcport->d_id.b.area;
        abt->port_id[2] = fcport->d_id.b.domain;
-       abt->vp_index = fcport->vp_idx;
+       abt->vp_index = fcport->vha->vp_idx;
 
        abt->req_que_no = cpu_to_le16(req->id);
 
@@ -2423,7 +2582,8 @@ qla24xx_abort_command(srb_t *sp)
                    le16_to_cpu(abt->nport_handle));
                rval = QLA_FUNCTION_FAILED;
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1091, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
+                   "Done %s.\n", __func__);
        }
 
        dma_pool_free(ha->s_dma_pool, abt, abt_dma);
@@ -2455,7 +2615,8 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
        ha = vha->hw;
        req = vha->req;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1092, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
+           "Entered %s.\n", __func__);
 
        if (ha->flags.cpu_affinity_enabled)
                rsp = ha->rsp_q_map[tag + 1];
@@ -2478,7 +2639,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
        tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
        tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
        tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
-       tsk->p.tsk.vp_index = fcport->vp_idx;
+       tsk->p.tsk.vp_index = fcport->vha->vp_idx;
        if (type == TCF_LUN_RESET) {
                int_to_scsilun(l, &tsk->p.tsk.lun);
                host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
@@ -2504,7 +2665,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
        } else if (le16_to_cpu(sts->scsi_status) &
            SS_RESPONSE_INFO_LEN_VALID) {
                if (le32_to_cpu(sts->rsp_data_len) < 4) {
-                       ql_dbg(ql_dbg_mbx, vha, 0x1097,
+                       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
                            "Ignoring inconsistent data length -- not enough "
                            "response info (%d).\n",
                            le32_to_cpu(sts->rsp_data_len));
@@ -2523,7 +2684,8 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
                ql_dbg(ql_dbg_mbx, vha, 0x1099,
                    "Failed to issue marker IOCB (%x).\n", rval2);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x109a, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
+                   "Done %s.\n", __func__);
        }
 
        dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
@@ -2564,7 +2726,8 @@ qla2x00_system_error(scsi_qla_host_t *vha)
        if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
                return QLA_FUNCTION_FAILED;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x109b, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
        mcp->out_mb = MBX_0;
@@ -2576,7 +2739,8 @@ qla2x00_system_error(scsi_qla_host_t *vha)
        if (rval != QLA_SUCCESS) {
                ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x109d, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -2596,7 +2760,8 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x109e, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_SERDES_PARAMS;
        mcp->mb[1] = BIT_0;
@@ -2615,7 +2780,8 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
                /*EMPTY*/
-               ql_dbg(ql_dbg_mbx, vha, 0x10a0, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -2631,7 +2797,8 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
        if (!IS_FWI2_CAPABLE(vha->hw))
                return QLA_FUNCTION_FAILED;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10a1, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_STOP_FIRMWARE;
        mcp->mb[1] = 0;
@@ -2646,7 +2813,8 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
                if (mcp->mb[0] == MBS_INVALID_COMMAND)
                        rval = QLA_INVALID_COMMAND;
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10a3, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -2660,7 +2828,8 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10a4, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
+           "Entered %s.\n", __func__);
 
        if (!IS_FWI2_CAPABLE(vha->hw))
                return QLA_FUNCTION_FAILED;
@@ -2686,7 +2855,8 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
                    "Failed=%x mb[0]=%x mb[1]=%x.\n",
                    rval, mcp->mb[0], mcp->mb[1]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10a6, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -2699,7 +2869,8 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10a7, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
+           "Entered %s.\n", __func__);
 
        if (!IS_FWI2_CAPABLE(vha->hw))
                return QLA_FUNCTION_FAILED;
@@ -2719,7 +2890,8 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
                    "Failed=%x mb[0]=%x mb[1]=%x.\n",
                    rval, mcp->mb[0], mcp->mb[1]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10a9, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -2733,7 +2905,8 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10aa, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
+           "Entered %s.\n", __func__);
 
        if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
            !IS_QLA83XX(vha->hw))
@@ -2764,7 +2937,8 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
                    "Failed=%x mb[0]=%x mb[1]=%x.\n",
                    rval, mcp->mb[0], mcp->mb[1]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10ac, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
+                   "Done %s.\n", __func__);
 
                if (mb)
                        memcpy(mb, mcp->mb, 8 * sizeof(*mb));
@@ -2782,7 +2956,8 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10ad, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
+           "Entered %s.\n", __func__);
 
        if (!IS_FWI2_CAPABLE(vha->hw))
                return QLA_FUNCTION_FAILED;
@@ -2804,7 +2979,8 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
                    "Failed=%x mb[0]=%x mb[1]=%x.\n",
                    rval, mcp->mb[0], mcp->mb[1]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10af, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
+                   "Done %s.\n", __func__);
 
                if (wr)
                        *wr = (uint64_t) mcp->mb[5] << 48 |
@@ -2829,7 +3005,8 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10b0, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
+           "Entered %s.\n", __func__);
 
        if (!IS_IIDMA_CAPABLE(vha->hw))
                return QLA_FUNCTION_FAILED;
@@ -2854,7 +3031,8 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
        if (rval != QLA_SUCCESS) {
                ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10b2, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
+                   "Done %s.\n", __func__);
                if (port_speed)
                        *port_speed = mcp->mb[3];
        }
@@ -2870,7 +3048,8 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10b3, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
+           "Entered %s.\n", __func__);
 
        if (!IS_IIDMA_CAPABLE(vha->hw))
                return QLA_FUNCTION_FAILED;
@@ -2897,9 +3076,11 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
        }
 
        if (rval != QLA_SUCCESS) {
-               ql_dbg(ql_dbg_mbx, vha, 0x10b4, "Failed=%x.\n", rval);
+               ql_dbg(ql_dbg_mbx, vha, 0x10b4,
+                   "Failed=%x.\n", rval);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10b5, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -2915,24 +3096,25 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
        scsi_qla_host_t *vp;
        unsigned long   flags;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10b6, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
+           "Entered %s.\n", __func__);
 
        if (rptid_entry->entry_status != 0)
                return;
 
        if (rptid_entry->format == 0) {
-               ql_dbg(ql_dbg_mbx, vha, 0x10b7,
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7,
                    "Format 0 : Number of VPs setup %d, number of "
                    "VPs acquired %d.\n",
                    MSB(le16_to_cpu(rptid_entry->vp_count)),
                    LSB(le16_to_cpu(rptid_entry->vp_count)));
-               ql_dbg(ql_dbg_mbx, vha, 0x10b8,
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8,
                    "Primary port id %02x%02x%02x.\n",
                    rptid_entry->port_id[2], rptid_entry->port_id[1],
                    rptid_entry->port_id[0]);
        } else if (rptid_entry->format == 1) {
                vp_idx = LSB(stat);
-               ql_dbg(ql_dbg_mbx, vha, 0x10b9,
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9,
                    "Format 1: VP[%d] enabled - status %d - with "
                    "port id %02x%02x%02x.\n", vp_idx, MSB(stat),
                    rptid_entry->port_id[2], rptid_entry->port_id[1],
@@ -2999,7 +3181,8 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
 
        /* This can be called by the parent */
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10bb, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
+           "Entered %s.\n", __func__);
 
        vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
        if (!vpmod) {
@@ -3015,6 +3198,9 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
        vpmod->vp_count = 1;
        vpmod->vp_index1 = vha->vp_idx;
        vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
+
+       qlt_modify_vp_config(vha, vpmod);
+
        memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
        memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
        vpmod->entry_count = 1;
@@ -3035,7 +3221,8 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
                rval = QLA_FUNCTION_FAILED;
        } else {
                /* EMPTY */
-               ql_dbg(ql_dbg_mbx, vha, 0x10c0, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
+                   "Done %s.\n", __func__);
                fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
        }
        dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
@@ -3069,7 +3256,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
        int     vp_index = vha->vp_idx;
        struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10c1,
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c1,
            "Entered %s enabling index %d.\n", __func__, vp_index);
 
        if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
@@ -3112,7 +3299,8 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
                    le16_to_cpu(vce->comp_status));
                rval = QLA_FUNCTION_FAILED;
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10c6, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c6,
+                   "Done %s.\n", __func__);
        }
 
        dma_pool_free(ha->s_dma_pool, vce, vce_dma);
@@ -3149,14 +3337,8 @@ qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10c7, "Entered %s.\n", __func__);
-
-       /*
-        * This command is implicitly executed by firmware during login for the
-        * physical hosts
-        */
-       if (vp_idx == 0)
-               return QLA_FUNCTION_FAILED;
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
        mcp->mb[1] = format;
@@ -3185,7 +3367,8 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1009, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
+           "Entered %s.\n", __func__);
 
        if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
                mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
@@ -3219,7 +3402,8 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
                ql_dbg(ql_dbg_mbx, vha, 0x1008,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1007, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -3244,7 +3428,8 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
        unsigned long flags;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10c8, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
+           "Entered %s.\n", __func__);
 
        mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
        if (mn == NULL) {
@@ -3285,7 +3470,7 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
                status[0] = le16_to_cpu(mn->p.rsp.comp_status);
                status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
                    le16_to_cpu(mn->p.rsp.failure_code) : 0;
-               ql_dbg(ql_dbg_mbx, vha, 0x10ce,
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
                    "cs=%x fc=%x.\n", status[0], status[1]);
 
                if (status[0] != CS_COMPLETE) {
@@ -3299,7 +3484,7 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
                                retry = 1;
                        }
                } else {
-                       ql_dbg(ql_dbg_mbx, vha, 0x10d0,
+                       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
                            "Firmware updated to %x.\n",
                            le32_to_cpu(mn->p.rsp.fw_ver));
 
@@ -3316,9 +3501,11 @@ verify_done:
        dma_pool_free(ha->s_dma_pool, mn, mn_dma);
 
        if (rval != QLA_SUCCESS) {
-               ql_dbg(ql_dbg_mbx, vha, 0x10d1, "Failed=%x.\n", rval);
+               ql_dbg(ql_dbg_mbx, vha, 0x10d1,
+                   "Failed=%x.\n", rval);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10d2, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -3334,7 +3521,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
        struct device_reg_25xxmq __iomem *reg;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10d3, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
        mcp->mb[1] = req->options;
@@ -3388,7 +3576,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
                ql_dbg(ql_dbg_mbx, vha, 0x10d4,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10d5, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -3404,7 +3593,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
        struct device_reg_25xxmq __iomem *reg;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10d6, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
        mcp->mb[1] = rsp->options;
@@ -3456,7 +3646,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
                ql_dbg(ql_dbg_mbx, vha, 0x10d7,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10d8, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -3469,7 +3660,8 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10d9, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_IDC_ACK;
        memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
@@ -3483,7 +3675,8 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
                ql_dbg(ql_dbg_mbx, vha, 0x10da,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10db, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -3496,7 +3689,8 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10dc, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
+           "Entered %s.\n", __func__);
 
        if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
                return QLA_FUNCTION_FAILED;
@@ -3514,7 +3708,8 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
                    "Failed=%x mb[0]=%x mb[1]=%x.\n",
                    rval, mcp->mb[0], mcp->mb[1]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10de, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
+                   "Done %s.\n", __func__);
                *sector_size = mcp->mb[1];
        }
 
@@ -3531,7 +3726,8 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
        if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
                return QLA_FUNCTION_FAILED;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10df, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
        mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
@@ -3547,7 +3743,8 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
                    "Failed=%x mb[0]=%x mb[1]=%x.\n",
                    rval, mcp->mb[0], mcp->mb[1]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10e1, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -3563,7 +3760,8 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
        if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
                return QLA_FUNCTION_FAILED;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10e2, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
        mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
@@ -3582,7 +3780,8 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
                    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
                    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10e4, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -3595,7 +3794,8 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10e5, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_RESTART_MPI_FW;
        mcp->out_mb = MBX_0;
@@ -3609,7 +3809,8 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
                    "Failed=%x mb[0]=%x mb[1]=%x.\n",
                    rval, mcp->mb[0], mcp->mb[1]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10e7, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -3624,7 +3825,8 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
        mbx_cmd_t *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10e8, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
+           "Entered %s.\n", __func__);
 
        if (!IS_FWI2_CAPABLE(ha))
                return QLA_FUNCTION_FAILED;
@@ -3654,7 +3856,8 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
                ql_dbg(ql_dbg_mbx, vha, 0x10e9,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10ea, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -3669,7 +3872,8 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
        mbx_cmd_t *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10eb, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
+           "Entered %s.\n", __func__);
 
        if (!IS_FWI2_CAPABLE(ha))
                return QLA_FUNCTION_FAILED;
@@ -3699,7 +3903,8 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
                ql_dbg(ql_dbg_mbx, vha, 0x10ec,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10ed, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -3713,7 +3918,8 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10ee, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
+           "Entered %s.\n", __func__);
 
        if (!IS_CNA_CAPABLE(vha->hw))
                return QLA_FUNCTION_FAILED;
@@ -3735,7 +3941,8 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
                    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
                    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10f0, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
+                   "Done %s.\n", __func__);
 
 
                *actual_size = mcp->mb[2] << 2;
@@ -3752,7 +3959,8 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10f1, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
+           "Entered %s.\n", __func__);
 
        if (!IS_CNA_CAPABLE(vha->hw))
                return QLA_FUNCTION_FAILED;
@@ -3775,7 +3983,8 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
                    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
                    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10f3, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -3788,7 +3997,8 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10f4, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
+           "Entered %s.\n", __func__);
 
        if (!IS_FWI2_CAPABLE(vha->hw))
                return QLA_FUNCTION_FAILED;
@@ -3805,7 +4015,8 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
                ql_dbg(ql_dbg_mbx, vha, 0x10f5,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10f6, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
+                   "Done %s.\n", __func__);
                *data = mcp->mb[3] << 16 | mcp->mb[2];
        }
 
@@ -3821,7 +4032,8 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
        mbx_cmd_t *mcp = &mc;
        uint32_t iter_cnt = 0x1;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10f7, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
+           "Entered %s.\n", __func__);
 
        memset(mcp->mb, 0 , sizeof(mcp->mb));
        mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
@@ -3865,7 +4077,8 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
                    "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
                    mcp->mb[3], mcp->mb[18], mcp->mb[19]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10f9, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
+                   "Done %s.\n", __func__);
        }
 
        /* Copy mailbox information */
@@ -3882,7 +4095,8 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
        mbx_cmd_t *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10fa, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
+           "Entered %s.\n", __func__);
 
        memset(mcp->mb, 0 , sizeof(mcp->mb));
        mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
@@ -3926,7 +4140,8 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
                    "Failed=%x mb[0]=%x mb[1]=%x.\n",
                    rval, mcp->mb[0], mcp->mb[1]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10fc, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
+                   "Done %s.\n", __func__);
        }
 
        /* Copy mailbox information */
@@ -3941,7 +4156,7 @@ qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10fd,
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
            "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
 
        mcp->mb[0] = MBC_ISP84XX_RESET;
@@ -3955,7 +4170,8 @@ qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
        if (rval != QLA_SUCCESS)
                ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
        else
-               ql_dbg(ql_dbg_mbx, vha, 0x10ff, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
+                   "Done %s.\n", __func__);
 
        return rval;
 }
@@ -3967,7 +4183,8 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1100, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
+           "Entered %s.\n", __func__);
 
        if (!IS_FWI2_CAPABLE(vha->hw))
                return QLA_FUNCTION_FAILED;
@@ -3986,7 +4203,8 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
                ql_dbg(ql_dbg_mbx, vha, 0x1101,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1102, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -4003,7 +4221,8 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
 
        rval = QLA_SUCCESS;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1103, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
+           "Entered %s.\n", __func__);
 
        clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
 
@@ -4046,7 +4265,8 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
                ql_dbg(ql_dbg_mbx, vha, 0x1104,
                    "Failed=%x mb[0]=%x.\n", rval, mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1105, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -4060,7 +4280,8 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
        mbx_cmd_t *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1106, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
+           "Entered %s.\n", __func__);
 
        if (!IS_FWI2_CAPABLE(ha))
                return QLA_FUNCTION_FAILED;
@@ -4078,7 +4299,8 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
                ql_dbg(ql_dbg_mbx, vha, 0x1107,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1108, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
+                   "Done %s.\n", __func__);
                if (mcp->mb[1] != 0x7)
                        ha->link_data_rate = mcp->mb[1];
        }
@@ -4094,7 +4316,8 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
        mbx_cmd_t *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1109, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
+           "Entered %s.\n", __func__);
 
        if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
                return QLA_FUNCTION_FAILED;
@@ -4113,7 +4336,8 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
                /* Copy all bits to preserve original value */
                memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
 
-               ql_dbg(ql_dbg_mbx, vha, 0x110b, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
+                   "Done %s.\n", __func__);
        }
        return rval;
 }
@@ -4125,7 +4349,8 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x110c, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_SET_PORT_CONFIG;
        /* Copy all bits to preserve original setting */
@@ -4140,7 +4365,8 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
                ql_dbg(ql_dbg_mbx, vha, 0x110d,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else
-               ql_dbg(ql_dbg_mbx, vha, 0x110e, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
+                   "Done %s.\n", __func__);
 
        return rval;
 }
@@ -4155,7 +4381,8 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
        mbx_cmd_t *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x110f, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
+           "Entered %s.\n", __func__);
 
        if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
                return QLA_FUNCTION_FAILED;
@@ -4183,7 +4410,8 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
        if (rval != QLA_SUCCESS) {
                ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x10cc, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -4196,7 +4424,8 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
        uint8_t byte;
        struct qla_hw_data *ha = vha->hw;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x10ca, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ca,
+           "Entered %s.\n", __func__);
 
        /* Integer part */
        rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0);
@@ -4216,7 +4445,8 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
        }
        *frac = (byte >> 6) * 25;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1018, "Done %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1018,
+           "Done %s.\n", __func__);
 fail:
        return rval;
 }
@@ -4229,7 +4459,8 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1017, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
+           "Entered %s.\n", __func__);
 
        if (!IS_FWI2_CAPABLE(ha))
                return QLA_FUNCTION_FAILED;
@@ -4248,7 +4479,8 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
                ql_dbg(ql_dbg_mbx, vha, 0x1016,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x100e, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -4262,7 +4494,8 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x100d, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
+           "Entered %s.\n", __func__);
 
        if (!IS_QLA82XX(ha))
                return QLA_FUNCTION_FAILED;
@@ -4281,7 +4514,8 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
                ql_dbg(ql_dbg_mbx, vha, 0x100c,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x100b, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -4295,7 +4529,8 @@ qla82xx_md_get_template_size(scsi_qla_host_t *vha)
        mbx_cmd_t *mcp = &mc;
        int rval = QLA_FUNCTION_FAILED;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x111f, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
+           "Entered %s.\n", __func__);
 
        memset(mcp->mb, 0 , sizeof(mcp->mb));
        mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
@@ -4318,7 +4553,8 @@ qla82xx_md_get_template_size(scsi_qla_host_t *vha)
                    (mcp->mb[1] << 16) | mcp->mb[0],
                    (mcp->mb[3] << 16) | mcp->mb[2]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1121, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
+                   "Done %s.\n", __func__);
                ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
                if (!ha->md_template_size) {
                        ql_dbg(ql_dbg_mbx, vha, 0x1122,
@@ -4337,7 +4573,8 @@ qla82xx_md_get_template(scsi_qla_host_t *vha)
        mbx_cmd_t *mcp = &mc;
        int rval = QLA_FUNCTION_FAILED;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1123, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
+           "Entered %s.\n", __func__);
 
        ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
           ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
@@ -4372,7 +4609,8 @@ qla82xx_md_get_template(scsi_qla_host_t *vha)
                    ((mcp->mb[1] << 16) | mcp->mb[0]),
                    ((mcp->mb[3] << 16) | mcp->mb[2]));
        } else
-               ql_dbg(ql_dbg_mbx, vha, 0x1126, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
+                   "Done %s.\n", __func__);
        return rval;
 }
 
@@ -4387,7 +4625,8 @@ qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
        if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
                return QLA_FUNCTION_FAILED;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1133, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
+           "Entered %s.\n", __func__);
 
        memset(mcp, 0, sizeof(mbx_cmd_t));
        mcp->mb[0] = MBC_SET_LED_CONFIG;
@@ -4412,7 +4651,8 @@ qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
                ql_dbg(ql_dbg_mbx, vha, 0x1134,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1135, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -4429,7 +4669,8 @@ qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
        if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
                return QLA_FUNCTION_FAILED;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1136, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
+           "Entered %s.\n", __func__);
 
        memset(mcp, 0, sizeof(mbx_cmd_t));
        mcp->mb[0] = MBC_GET_LED_CONFIG;
@@ -4454,7 +4695,8 @@ qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
                        led_cfg[4] = mcp->mb[5];
                        led_cfg[5] = mcp->mb[6];
                }
-               ql_dbg(ql_dbg_mbx, vha, 0x1138, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
+                   "Done %s.\n", __func__);
        }
 
        return rval;
@@ -4471,7 +4713,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
        if (!IS_QLA82XX(ha))
                return QLA_FUNCTION_FAILED;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1127,
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
                "Entered %s.\n", __func__);
 
        memset(mcp, 0, sizeof(mbx_cmd_t));
@@ -4491,7 +4733,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
                ql_dbg(ql_dbg_mbx, vha, 0x1128,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1129,
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
                    "Done %s.\n", __func__);
        }
 
@@ -4509,7 +4751,8 @@ qla83xx_write_remote_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
        if (!IS_QLA83XX(ha))
                return QLA_FUNCTION_FAILED;
 
-       ql_dbg(ql_dbg_mbx, vha, 0x1130, "Entered %s.\n", __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
+           "Entered %s.\n", __func__);
 
        mcp->mb[0] = MBC_WRITE_REMOTE_REG;
        mcp->mb[1] = LSW(reg);
@@ -4527,7 +4770,7 @@ qla83xx_write_remote_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
                ql_dbg(ql_dbg_mbx, vha, 0x1131,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        } else {
-               ql_dbg(ql_dbg_mbx, vha, 0x1132,
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
                    "Done %s.\n", __func__);
        }
 
@@ -4543,13 +4786,14 @@ qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
        mbx_cmd_t *mcp = &mc;
 
        if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
-               ql_dbg(ql_dbg_mbx, vha, 0x113b,
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
                    "Implicit LOGO Unsupported.\n");
                return QLA_FUNCTION_FAILED;
        }
 
 
-       ql_dbg(ql_dbg_mbx, vha, 0x113c, "Done %s.\n",  __func__);
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
+           "Entering %s.\n",  __func__);
 
        /* Perform Implicit LOGO. */
        mcp->mb[0] = MBC_PORT_LOGOUT;
@@ -4564,7 +4808,8 @@ qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
                ql_dbg(ql_dbg_mbx, vha, 0x113d,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
        else
-               ql_dbg(ql_dbg_mbx, vha, 0x113e, "Done %s.\n", __func__);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
+                   "Done %s.\n", __func__);
 
        return rval;
 }
index aa062a1b0ca496f4a1bc4f10e85f795d0d5d17b3..3e8b32419e68959440c8f85716867e6dfc897435 100644 (file)
@@ -6,6 +6,7 @@
  */
 #include "qla_def.h"
 #include "qla_gbl.h"
+#include "qla_target.h"
 
 #include <linux/moduleparam.h>
 #include <linux/vmalloc.h>
@@ -49,6 +50,9 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
 
        spin_lock_irqsave(&ha->vport_slock, flags);
        list_add_tail(&vha->list, &ha->vp_list);
+
+       qlt_update_vp_map(vha, SET_VP_IDX);
+
        spin_unlock_irqrestore(&ha->vport_slock, flags);
 
        mutex_unlock(&ha->vport_lock);
@@ -79,6 +83,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
                spin_lock_irqsave(&ha->vport_slock, flags);
        }
        list_del(&vha->list);
+       qlt_update_vp_map(vha, RESET_VP_IDX);
        spin_unlock_irqrestore(&ha->vport_slock, flags);
 
        vp_id = vha->vp_idx;
@@ -134,7 +139,7 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
        list_for_each_entry(fcport, &vha->vp_fcports, list) {
                ql_dbg(ql_dbg_vport, vha, 0xa001,
                    "Marking port dead, loop_id=0x%04x : %x.\n",
-                   fcport->loop_id, fcport->vp_idx);
+                   fcport->loop_id, fcport->vha->vp_idx);
 
                qla2x00_mark_device_lost(vha, fcport, 0, 0);
                qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
@@ -150,6 +155,9 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
        atomic_set(&vha->loop_state, LOOP_DOWN);
        atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
 
+       /* Remove port id from vp target map */
+       qlt_update_vp_map(vha, RESET_AL_PA);
+
        qla2x00_mark_vp_devices_dead(vha);
        atomic_set(&vha->vp_state, VP_FAILED);
        vha->flags.management_server_logged_in = 0;
@@ -295,10 +303,8 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
 static int
 qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
 {
-       ql_dbg(ql_dbg_dpc, vha, 0x4012,
-           "Entering %s.\n", __func__);
-       ql_dbg(ql_dbg_dpc, vha, 0x4013,
-           "vp_flags: 0x%lx.\n", vha->vp_flags);
+       ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
+           "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
 
        qla2x00_do_work(vha);
 
@@ -348,7 +354,7 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
                }
        }
 
-       ql_dbg(ql_dbg_dpc, vha, 0x401c,
+       ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
            "Exiting %s.\n", __func__);
        return 0;
 }
index de722a933438dea5de3b60ce03b212b1170d734d..caf627ba7fa8b3e11a493355bf7a08f120770bcd 100644 (file)
@@ -1190,12 +1190,12 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
        }
 
        /* Offset in flash = lower 16 bits
-        * Number of enteries = upper 16 bits
+        * Number of entries = upper 16 bits
         */
        offset = n & 0xffffU;
        n = (n >> 16) & 0xffffU;
 
-       /* number of addr/value pair should not exceed 1024 enteries */
+       /* number of addr/value pair should not exceed 1024 entries */
        if (n  >= 1024) {
                ql_log(ql_log_fatal, vha, 0x0071,
                    "Card flash not initialized:n=0x%x.\n", n);
@@ -2050,7 +2050,7 @@ qla82xx_intr_handler(int irq, void *dev_id)
 
        rsp = (struct rsp_que *) dev_id;
        if (!rsp) {
-               ql_log(ql_log_info, NULL, 0xb054,
+               ql_log(ql_log_info, NULL, 0xb053,
                    "%s: NULL response queue pointer.\n", __func__);
                return IRQ_NONE;
        }
@@ -2446,7 +2446,7 @@ qla82xx_load_fw(scsi_qla_host_t *vha)
 
        if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
                ql_log(ql_log_info, vha, 0x00a1,
-                   "Firmware loaded successully from flash.\n");
+                   "Firmware loaded successfully from flash.\n");
                return QLA_SUCCESS;
        } else {
                ql_log(ql_log_warn, vha, 0x0108,
@@ -2461,7 +2461,7 @@ try_blob_fw:
        blob = ha->hablob = qla2x00_request_firmware(vha);
        if (!blob) {
                ql_log(ql_log_fatal, vha, 0x00a3,
-                   "Firmware image not preset.\n");
+                   "Firmware image not present.\n");
                goto fw_load_failed;
        }
 
@@ -2689,7 +2689,7 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
                if (!optrom) {
                        ql_log(ql_log_warn, vha, 0xb01b,
                            "Unable to allocate memory "
-                           "for optron burst write (%x KB).\n",
+                           "for optrom burst write (%x KB).\n",
                            OPTROM_BURST_SIZE / 1024);
                }
        }
@@ -2960,9 +2960,8 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
                         * changing the state to DEV_READY
                         */
                        ql_log(ql_log_info, vha, 0xb023,
-                           "%s : QUIESCENT TIMEOUT.\n", QLA2XXX_DRIVER_NAME);
-                       ql_log(ql_log_info, vha, 0xb024,
-                           "DRV_ACTIVE:%d DRV_STATE:%d.\n",
+                           "%s : QUIESCENT TIMEOUT DRV_ACTIVE:%d "
+                           "DRV_STATE:%d.\n", QLA2XXX_DRIVER_NAME,
                            drv_active, drv_state);
                        qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
                            QLA82XX_DEV_READY);
@@ -3129,7 +3128,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
                if (ql2xmdenable) {
                        if (qla82xx_md_collect(vha))
                                ql_log(ql_log_warn, vha, 0xb02c,
-                                   "Not able to collect minidump.\n");
+                                   "Minidump not collected.\n");
                } else
                        ql_log(ql_log_warn, vha, 0xb04f,
                            "Minidump disabled.\n");
@@ -3160,11 +3159,11 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
                                    "Firmware version differs "
                                    "Previous version: %d:%d:%d - "
                                    "New version: %d:%d:%d\n",
+                                   fw_major_version, fw_minor_version,
+                                   fw_subminor_version,
                                    ha->fw_major_version,
                                    ha->fw_minor_version,
-                                   ha->fw_subminor_version,
-                                   fw_major_version, fw_minor_version,
-                                   fw_subminor_version);
+                                   ha->fw_subminor_version);
                                /* Release MiniDump resources */
                                qla82xx_md_free(vha);
                                /* ALlocate MiniDump resources */
@@ -3325,6 +3324,30 @@ exit:
        return rval;
 }
 
+static int qla82xx_check_temp(scsi_qla_host_t *vha)
+{
+       uint32_t temp, temp_state, temp_val;
+       struct qla_hw_data *ha = vha->hw;
+
+       temp = qla82xx_rd_32(ha, CRB_TEMP_STATE);
+       temp_state = qla82xx_get_temp_state(temp);
+       temp_val = qla82xx_get_temp_val(temp);
+
+       if (temp_state == QLA82XX_TEMP_PANIC) {
+               ql_log(ql_log_warn, vha, 0x600e,
+                   "Device temperature %d degrees C exceeds "
+                   " maximum allowed. Hardware has been shut down.\n",
+                   temp_val);
+               return 1;
+       } else if (temp_state == QLA82XX_TEMP_WARN) {
+               ql_log(ql_log_warn, vha, 0x600f,
+                   "Device temperature %d degrees C exceeds "
+                   "operating range. Immediate action needed.\n",
+                   temp_val);
+       }
+       return 0;
+}
+
 void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
 {
        struct qla_hw_data *ha = vha->hw;
@@ -3347,18 +3370,20 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
        /* don't poll if reset is going on */
        if (!ha->flags.isp82xx_reset_hdlr_active) {
                dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
-               if (dev_state == QLA82XX_DEV_NEED_RESET &&
+               if (qla82xx_check_temp(vha)) {
+                       set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
+                       ha->flags.isp82xx_fw_hung = 1;
+                       qla82xx_clear_pending_mbx(vha);
+               } else if (dev_state == QLA82XX_DEV_NEED_RESET &&
                    !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
                        ql_log(ql_log_warn, vha, 0x6001,
                            "Adapter reset needed.\n");
                        set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
-                       qla2xxx_wake_dpc(vha);
                } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
                        !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
                        ql_log(ql_log_warn, vha, 0x6002,
                            "Quiescent needed.\n");
                        set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
-                       qla2xxx_wake_dpc(vha);
                } else {
                        if (qla82xx_check_fw_alive(vha)) {
                                ql_dbg(ql_dbg_timer, vha, 0x6011,
@@ -3398,7 +3423,6 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
                                        set_bit(ISP_ABORT_NEEDED,
                                            &vha->dpc_flags);
                                }
-                               qla2xxx_wake_dpc(vha);
                                ha->flags.isp82xx_fw_hung = 1;
                                ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n");
                                qla82xx_clear_pending_mbx(vha);
@@ -4113,6 +4137,14 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
                goto md_failed;
        }
 
+       if (ha->flags.isp82xx_no_md_cap) {
+               ql_log(ql_log_warn, vha, 0xb054,
+                   "Forced reset from application, "
+                   "ignore minidump capture\n");
+               ha->flags.isp82xx_no_md_cap = 0;
+               goto md_failed;
+       }
+
        if (qla82xx_validate_template_chksum(vha)) {
                ql_log(ql_log_info, vha, 0xb039,
                    "Template checksum validation error\n");
index 4ac50e274661af0744ca1ffcbfecdcedf5771b53..6eb210e3cc637242aed65efde17902a0f94c2d60 100644 (file)
@@ -26,6 +26,7 @@
 #define CRB_RCVPEG_STATE               QLA82XX_REG(0x13c)
 #define BOOT_LOADER_DIMM_STATUS                QLA82XX_REG(0x54)
 #define CRB_DMA_SHIFT                  QLA82XX_REG(0xcc)
+#define CRB_TEMP_STATE                 QLA82XX_REG(0x1b4)
 #define QLA82XX_DMA_SHIFT_VALUE                0x55555555
 
 #define QLA82XX_HW_H0_CH_HUB_ADR    0x05
 #define QLA82XX_FW_VERSION_SUB         (QLA82XX_CAM_RAM(0x158))
 #define QLA82XX_PCIE_REG(reg)          (QLA82XX_CRB_PCIE + (reg))
 
-#define PCIE_CHICKEN3                  (0x120c8)
 #define PCIE_SETUP_FUNCTION            (0x12040)
 #define PCIE_SETUP_FUNCTION2           (0x12048)
 
@@ -1178,4 +1178,16 @@ static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
 #define CRB_NIU_XG_PAUSE_CTL_P0        0x1
 #define CRB_NIU_XG_PAUSE_CTL_P1        0x8
 
+#define qla82xx_get_temp_val(x)          ((x) >> 16)
+#define qla82xx_get_temp_state(x)        ((x) & 0xffff)
+#define qla82xx_encode_temp(val, state)  (((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+       QLA82XX_TEMP_NORMAL = 0x1, /* Normal operating range */
+       QLA82XX_TEMP_WARN,         /* Sound alert, temperature getting high */
+       QLA82XX_TEMP_PANIC         /* Fatal error, hardware has shut down. */
+};
 #endif
index c9c56a8427f3e1d36c2a1c14764a4d4860a782d9..6d1d873a20e2f8997ead5ecadd2e30a183d92c6c 100644 (file)
 #include <linux/mutex.h>
 #include <linux/kobject.h>
 #include <linux/slab.h>
-
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsicam.h>
 #include <scsi/scsi_transport.h>
 #include <scsi/scsi_transport_fc.h>
 
+#include "qla_target.h"
+
 /*
  * Driver version
  */
@@ -40,6 +41,12 @@ static struct kmem_cache *ctx_cachep;
  */
 int ql_errlev = ql_log_all;
 
+int ql2xenableclass2;
+module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xenableclass2,
+               "Specify if Class 2 operations are supported from the very "
+               "beginning. Default is 0 - class 2 not supported.");
+
 int ql2xlogintimeout = 20;
 module_param(ql2xlogintimeout, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xlogintimeout,
@@ -255,6 +262,8 @@ struct scsi_host_template qla2xxx_driver_template = {
 
        .max_sectors            = 0xFFFF,
        .shost_attrs            = qla2x00_host_attrs,
+
+       .supported_mode         = MODE_INITIATOR,
 };
 
 static struct scsi_transport_template *qla2xxx_transport_template = NULL;
@@ -306,7 +315,8 @@ static void qla2x00_free_fw_dump(struct qla_hw_data *);
 static void qla2x00_mem_free(struct qla_hw_data *);
 
 /* -------------------------------------------------------------------------- */
-static int qla2x00_alloc_queues(struct qla_hw_data *ha)
+static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
+                               struct rsp_que *rsp)
 {
        scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
        ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
@@ -324,6 +334,12 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha)
                    "Unable to allocate memory for response queue ptrs.\n");
                goto fail_rsp_map;
        }
+       /*
+        * Make sure we record at least the request and response queue zero in
+        * case we need to free them if part of the probe fails.
+        */
+       ha->rsp_q_map[0] = rsp;
+       ha->req_q_map[0] = req;
        set_bit(0, ha->rsp_qid_map);
        set_bit(0, ha->req_qid_map);
        return 1;
@@ -642,12 +658,12 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 
        if (ha->flags.eeh_busy) {
                if (ha->flags.pci_channel_io_perm_failure) {
-                       ql_dbg(ql_dbg_io, vha, 0x3001,
+                       ql_dbg(ql_dbg_aer, vha, 0x9010,
                            "PCI Channel IO permanent failure, exiting "
                            "cmd=%p.\n", cmd);
                        cmd->result = DID_NO_CONNECT << 16;
                } else {
-                       ql_dbg(ql_dbg_io, vha, 0x3002,
+                       ql_dbg(ql_dbg_aer, vha, 0x9011,
                            "EEH_Busy, Requeuing the cmd=%p.\n", cmd);
                        cmd->result = DID_REQUEUE << 16;
                }
@@ -657,7 +673,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
        rval = fc_remote_port_chkready(rport);
        if (rval) {
                cmd->result = rval;
-               ql_dbg(ql_dbg_io, vha, 0x3003,
+               ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003,
                    "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
                    cmd, rval);
                goto qc24_fail_command;
@@ -1136,7 +1152,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
        ret = FAILED;
 
        ql_log(ql_log_info, vha, 0x8012,
-           "BUS RESET ISSUED nexus=%ld:%d%d.\n", vha->host_no, id, lun);
+           "BUS RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun);
 
        if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
                ql_log(ql_log_fatal, vha, 0x8013,
@@ -2180,6 +2196,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
            "Memory allocated for ha=%p.\n", ha);
        ha->pdev = pdev;
+       ha->tgt.enable_class_2 = ql2xenableclass2;
 
        /* Clear our data area */
        ha->bars = bars;
@@ -2243,6 +2260,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
                ha->mbx_count = MAILBOX_REGISTER_COUNT;
                req_length = REQUEST_ENTRY_CNT_24XX;
                rsp_length = RESPONSE_ENTRY_CNT_2300;
+               ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
                ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
                ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
                ha->gid_list_info_size = 8;
@@ -2258,6 +2276,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
                ha->mbx_count = MAILBOX_REGISTER_COUNT;
                req_length = REQUEST_ENTRY_CNT_24XX;
                rsp_length = RESPONSE_ENTRY_CNT_2300;
+               ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
                ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
                ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
                ha->gid_list_info_size = 8;
@@ -2417,6 +2436,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
            host->max_cmd_len, host->max_channel, host->max_lun,
            host->transportt, sht->vendor_id);
 
+que_init:
+       /* Alloc arrays of request and response ring ptrs */
+       if (!qla2x00_alloc_queues(ha, req, rsp)) {
+               ql_log(ql_log_fatal, base_vha, 0x003d,
+                   "Failed to allocate memory for queue pointers..."
+                   "aborting.\n");
+               goto probe_init_failed;
+       }
+
+       qlt_probe_one_stage1(base_vha, ha);
+
        /* Set up the irqs */
        ret = qla2x00_request_irqs(ha, rsp);
        if (ret)
@@ -2424,20 +2454,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 
        pci_save_state(pdev);
 
-       /* Alloc arrays of request and response ring ptrs */
-que_init:
-       if (!qla2x00_alloc_queues(ha)) {
-               ql_log(ql_log_fatal, base_vha, 0x003d,
-                   "Failed to allocate memory for queue pointers.. aborting.\n");
-               goto probe_init_failed;
-       }
-
-       ha->rsp_q_map[0] = rsp;
-       ha->req_q_map[0] = req;
+       /* Assign back pointers */
        rsp->req = req;
        req->rsp = rsp;
-       set_bit(0, ha->req_qid_map);
-       set_bit(0, ha->rsp_qid_map);
+
        /* FWI2-capable only. */
        req->req_q_in = &ha->iobase->isp24.req_q_in;
        req->req_q_out = &ha->iobase->isp24.req_q_out;
@@ -2514,6 +2534,14 @@ que_init:
        ql_dbg(ql_dbg_init, base_vha, 0x00ee,
            "DPC thread started successfully.\n");
 
+       /*
+        * If we're not coming up in initiator mode, we might sit for
+        * a while without waking up the dpc thread, which leads to a
+        * stuck process warning.  So just kick the dpc once here and
+        * let the kthread start (and go back to sleep in qla2x00_do_dpc).
+        */
+       qla2xxx_wake_dpc(base_vha);
+
 skip_dpc:
        list_add_tail(&base_vha->list, &ha->vp_list);
        base_vha->host->irq = ha->pdev->irq;
@@ -2559,7 +2587,11 @@ skip_dpc:
        ql_dbg(ql_dbg_init, base_vha, 0x00f2,
            "Init done and hba is online.\n");
 
-       scsi_scan_host(host);
+       if (qla_ini_mode_enabled(base_vha))
+               scsi_scan_host(host);
+       else
+               ql_dbg(ql_dbg_init, base_vha, 0x0122,
+                       "skipping scsi_scan_host() for non-initiator port\n");
 
        qla2x00_alloc_sysfs_attr(base_vha);
 
@@ -2577,11 +2609,17 @@ skip_dpc:
            base_vha->host_no,
            ha->isp_ops->fw_version_str(base_vha, fw_str));
 
+       qlt_add_target(ha, base_vha);
+
        return 0;
 
 probe_init_failed:
        qla2x00_free_req_que(ha, req);
+       ha->req_q_map[0] = NULL;
+       clear_bit(0, ha->req_qid_map);
        qla2x00_free_rsp_que(ha, rsp);
+       ha->rsp_q_map[0] = NULL;
+       clear_bit(0, ha->rsp_qid_map);
        ha->max_req_queues = ha->max_rsp_queues = 0;
 
 probe_failed:
@@ -2620,6 +2658,22 @@ probe_out:
        return ret;
 }
 
+static void
+qla2x00_stop_dpc_thread(scsi_qla_host_t *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct task_struct *t = ha->dpc_thread;
+
+       if (ha->dpc_thread == NULL)
+               return;
+       /*
+        * qla2xxx_wake_dpc checks for ->dpc_thread
+        * so we need to zero it out.
+        */
+       ha->dpc_thread = NULL;
+       kthread_stop(t);
+}
+
 static void
 qla2x00_shutdown(struct pci_dev *pdev)
 {
@@ -2663,9 +2717,18 @@ qla2x00_remove_one(struct pci_dev *pdev)
        struct qla_hw_data  *ha;
        unsigned long flags;
 
+       /*
+        * If the PCI device is disabled that means that probe failed and any
+        * resources should be have cleaned up on probe exit.
+        */
+       if (!atomic_read(&pdev->enable_cnt))
+               return;
+
        base_vha = pci_get_drvdata(pdev);
        ha = base_vha->hw;
 
+       ha->flags.host_shutting_down = 1;
+
        mutex_lock(&ha->vport_lock);
        while (ha->cur_vport_count) {
                struct Scsi_Host *scsi_host;
@@ -2719,6 +2782,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
                ha->dpc_thread = NULL;
                kthread_stop(t);
        }
+       qlt_remove_target(ha, base_vha);
 
        qla2x00_free_sysfs_attr(base_vha);
 
@@ -2770,17 +2834,7 @@ qla2x00_free_device(scsi_qla_host_t *vha)
        if (vha->timer_active)
                qla2x00_stop_timer(vha);
 
-       /* Kill the kernel thread for this host */
-       if (ha->dpc_thread) {
-               struct task_struct *t = ha->dpc_thread;
-
-               /*
-                * qla2xxx_wake_dpc checks for ->dpc_thread
-                * so we need to zero it out.
-                */
-               ha->dpc_thread = NULL;
-               kthread_stop(t);
-       }
+       qla2x00_stop_dpc_thread(vha);
 
        qla25xx_delete_queues(vha);
 
@@ -2842,8 +2896,10 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
                spin_unlock_irqrestore(vha->host->host_lock, flags);
                set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
                qla2xxx_wake_dpc(base_vha);
-       } else
+       } else {
                fc_remote_port_delete(rport);
+               qlt_fc_port_deleted(vha, fcport);
+       }
 }
 
 /*
@@ -2859,7 +2915,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
     int do_login, int defer)
 {
        if (atomic_read(&fcport->state) == FCS_ONLINE &&
-           vha->vp_idx == fcport->vp_idx) {
+           vha->vp_idx == fcport->vha->vp_idx) {
                qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
                qla2x00_schedule_rport_del(vha, fcport, defer);
        }
@@ -2908,7 +2964,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
        fc_port_t *fcport;
 
        list_for_each_entry(fcport, &vha->vp_fcports, list) {
-               if (vha->vp_idx != 0 && vha->vp_idx != fcport->vp_idx)
+               if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx)
                        continue;
 
                /*
@@ -2921,7 +2977,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
                        qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
                        if (defer)
                                qla2x00_schedule_rport_del(vha, fcport, defer);
-                       else if (vha->vp_idx == fcport->vp_idx)
+                       else if (vha->vp_idx == fcport->vha->vp_idx)
                                qla2x00_schedule_rport_del(vha, fcport, defer);
                }
        }
@@ -2946,10 +3002,13 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
        if (!ha->init_cb)
                goto fail;
 
+       if (qlt_mem_alloc(ha) < 0)
+               goto fail_free_init_cb;
+
        ha->gid_list = dma_alloc_coherent(&ha->pdev->dev,
                qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL);
        if (!ha->gid_list)
-               goto fail_free_init_cb;
+               goto fail_free_tgt_mem;
 
        ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
        if (!ha->srb_mempool)
@@ -3167,6 +3226,8 @@ fail_free_gid_list:
        ha->gid_list_dma);
        ha->gid_list = NULL;
        ha->gid_list_dma = 0;
+fail_free_tgt_mem:
+       qlt_mem_free(ha);
 fail_free_init_cb:
        dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
        ha->init_cb_dma);
@@ -3282,6 +3343,8 @@ qla2x00_mem_free(struct qla_hw_data *ha)
        if (ha->ctx_mempool)
                mempool_destroy(ha->ctx_mempool);
 
+       qlt_mem_free(ha);
+
        if (ha->init_cb)
                dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
                        ha->init_cb, ha->init_cb_dma);
@@ -3311,6 +3374,10 @@ qla2x00_mem_free(struct qla_hw_data *ha)
 
        ha->gid_list = NULL;
        ha->gid_list_dma = 0;
+
+       ha->tgt.atio_ring = NULL;
+       ha->tgt.atio_dma = 0;
+       ha->tgt.tgt_vp_map = NULL;
 }
 
 struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
@@ -3671,10 +3738,9 @@ qla2x00_do_dpc(void *data)
 
                ha->dpc_active = 1;
 
-               ql_dbg(ql_dbg_dpc, base_vha, 0x4001,
-                   "DPC handler waking up.\n");
-               ql_dbg(ql_dbg_dpc, base_vha, 0x4002,
-                   "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
+               ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001,
+                   "DPC handler waking up, dpc_flags=0x%lx.\n",
+                   base_vha->dpc_flags);
 
                qla2x00_do_work(base_vha);
 
@@ -3740,6 +3806,16 @@ qla2x00_do_dpc(void *data)
                        clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
                }
 
+               if (test_bit(SCR_PENDING, &base_vha->dpc_flags)) {
+                       int ret;
+                       ret = qla2x00_send_change_request(base_vha, 0x3, 0);
+                       if (ret != QLA_SUCCESS)
+                               ql_log(ql_log_warn, base_vha, 0x121,
+                                   "Failed to enable receiving of RSCN "
+                                   "requests: 0x%x.\n", ret);
+                       clear_bit(SCR_PENDING, &base_vha->dpc_flags);
+               }
+
                if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
                        ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
                            "Quiescence mode scheduled.\n");
@@ -4457,6 +4533,21 @@ qla2x00_module_init(void)
                return -ENOMEM;
        }
 
+       /* Initialize target kmem_cache and mem_pools */
+       ret = qlt_init();
+       if (ret < 0) {
+               kmem_cache_destroy(srb_cachep);
+               return ret;
+       } else if (ret > 0) {
+               /*
+                * If initiator mode is explictly disabled by qlt_init(),
+                * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from
+                * performing scsi_scan_target() during LOOP UP event.
+                */
+               qla2xxx_transport_functions.disable_target_scan = 1;
+               qla2xxx_transport_vport_functions.disable_target_scan = 1;
+       }
+
        /* Derive version string. */
        strcpy(qla2x00_version_str, QLA2XXX_VERSION);
        if (ql2xextended_error_logging)
@@ -4468,6 +4559,7 @@ qla2x00_module_init(void)
                kmem_cache_destroy(srb_cachep);
                ql_log(ql_log_fatal, NULL, 0x0002,
                    "fc_attach_transport failed...Failing load!.\n");
+               qlt_exit();
                return -ENODEV;
        }
 
@@ -4481,6 +4573,7 @@ qla2x00_module_init(void)
            fc_attach_transport(&qla2xxx_transport_vport_functions);
        if (!qla2xxx_transport_vport_template) {
                kmem_cache_destroy(srb_cachep);
+               qlt_exit();
                fc_release_transport(qla2xxx_transport_template);
                ql_log(ql_log_fatal, NULL, 0x0004,
                    "fc_attach_transport vport failed...Failing load!.\n");
@@ -4492,6 +4585,7 @@ qla2x00_module_init(void)
        ret = pci_register_driver(&qla2xxx_pci_driver);
        if (ret) {
                kmem_cache_destroy(srb_cachep);
+               qlt_exit();
                fc_release_transport(qla2xxx_transport_template);
                fc_release_transport(qla2xxx_transport_vport_template);
                ql_log(ql_log_fatal, NULL, 0x0006,
@@ -4511,6 +4605,7 @@ qla2x00_module_exit(void)
        pci_unregister_driver(&qla2xxx_pci_driver);
        qla2x00_release_firmware();
        kmem_cache_destroy(srb_cachep);
+       qlt_exit();
        if (ctx_cachep)
                kmem_cache_destroy(ctx_cachep);
        fc_release_transport(qla2xxx_transport_template);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
new file mode 100644 (file)
index 0000000..04f80eb
--- /dev/null
@@ -0,0 +1,4973 @@
+/*
+ *  qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
+ *
+ *  based on qla2x00t.c code:
+ *
+ *  Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
+ *  Copyright (C) 2004 - 2005 Leonid Stoljar
+ *  Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
+ *  Copyright (C) 2006 - 2010 ID7 Ltd.
+ *
+ *  Forward port and refactoring to modern qla2xxx and target/configfs
+ *
+ *  Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org>
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation, version 2
+ *  of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *  GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include "qla_def.h"
+#include "qla_target.h"
+
+static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
+module_param(qlini_mode, charp, S_IRUGO);
+MODULE_PARM_DESC(qlini_mode,
+       "Determines when initiator mode will be enabled. Possible values: "
+       "\"exclusive\" - initiator mode will be enabled on load, "
+       "disabled on enabling target mode and then on disabling target mode "
+       "enabled back; "
+       "\"disabled\" - initiator mode will never be enabled; "
+       "\"enabled\" (default) - initiator mode will always stay enabled.");
+
+static int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
+
+/*
+ * From scsi/fc/fc_fcp.h
+ */
+enum fcp_resp_rsp_codes {
+       FCP_TMF_CMPL = 0,
+       FCP_DATA_LEN_INVALID = 1,
+       FCP_CMND_FIELDS_INVALID = 2,
+       FCP_DATA_PARAM_MISMATCH = 3,
+       FCP_TMF_REJECTED = 4,
+       FCP_TMF_FAILED = 5,
+       FCP_TMF_INVALID_LUN = 9,
+};
+
+/*
+ * fc_pri_ta from scsi/fc/fc_fcp.h
+ */
+#define FCP_PTA_SIMPLE      0   /* simple task attribute */
+#define FCP_PTA_HEADQ       1   /* head of queue task attribute */
+#define FCP_PTA_ORDERED     2   /* ordered task attribute */
+#define FCP_PTA_ACA         4   /* auto. contigent allegiance */
+#define FCP_PTA_MASK        7   /* mask for task attribute field */
+#define FCP_PRI_SHIFT       3   /* priority field starts in bit 3 */
+#define FCP_PRI_RESVD_MASK  0x80        /* reserved bits in priority field */
+
+/*
+ * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
+ * must be called under HW lock and could unlock/lock it inside.
+ * It isn't an issue, since in the current implementation on the time when
+ * those functions are called:
+ *
+ *   - Either context is IRQ and only IRQ handler can modify HW data,
+ *     including rings related fields,
+ *
+ *   - Or access to target mode variables from struct qla_tgt doesn't
+ *     cross those functions boundaries, except tgt_stop, which
+ *     additionally protected by irq_cmd_count.
+ */
+/* Predefs for callbacks handed to qla2xxx LLD */
+static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
+       struct atio_from_isp *pkt);
+static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
+static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
+       int fn, void *iocb, int flags);
+static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
+       *cmd, struct atio_from_isp *atio, int ha_locked);
+static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
+       struct qla_tgt_srr_imm *imm, int ha_lock);
+/*
+ * Global Variables
+ */
+static struct kmem_cache *qla_tgt_cmd_cachep;
+static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
+static mempool_t *qla_tgt_mgmt_cmd_mempool;
+static struct workqueue_struct *qla_tgt_wq;
+static DEFINE_MUTEX(qla_tgt_mutex);
+static LIST_HEAD(qla_tgt_glist);
+
+/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
+static struct qla_tgt_sess *qlt_find_sess_by_port_name(
+       struct qla_tgt *tgt,
+       const uint8_t *port_name)
+{
+       struct qla_tgt_sess *sess;
+
+       list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
+               if (!memcmp(sess->port_name, port_name, WWN_SIZE))
+                       return sess;
+       }
+
+       return NULL;
+}
+
+/* Might release hw lock, then reaquire!! */
+static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
+{
+       /* Send marker if required */
+       if (unlikely(vha->marker_needed != 0)) {
+               int rc = qla2x00_issue_marker(vha, vha_locked);
+               if (rc != QLA_SUCCESS) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe03d,
+                           "qla_target(%d): issue_marker() failed\n",
+                           vha->vp_idx);
+               }
+               return rc;
+       }
+       return QLA_SUCCESS;
+}
+
+static inline
+struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
+       uint8_t *d_id)
+{
+       struct qla_hw_data *ha = vha->hw;
+       uint8_t vp_idx;
+
+       if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0]))
+               return NULL;
+
+       if (vha->d_id.b.al_pa == d_id[2])
+               return vha;
+
+       BUG_ON(ha->tgt.tgt_vp_map == NULL);
+       vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx;
+       if (likely(test_bit(vp_idx, ha->vp_idx_map)))
+               return ha->tgt.tgt_vp_map[vp_idx].vha;
+
+       return NULL;
+}
+
+static inline
+struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
+       uint16_t vp_idx)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       if (vha->vp_idx == vp_idx)
+               return vha;
+
+       BUG_ON(ha->tgt.tgt_vp_map == NULL);
+       if (likely(test_bit(vp_idx, ha->vp_idx_map)))
+               return ha->tgt.tgt_vp_map[vp_idx].vha;
+
+       return NULL;
+}
+
+void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
+       struct atio_from_isp *atio)
+{
+       switch (atio->u.raw.entry_type) {
+       case ATIO_TYPE7:
+       {
+               struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
+                   atio->u.isp24.fcp_hdr.d_id);
+               if (unlikely(NULL == host)) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe03e,
+                           "qla_target(%d): Received ATIO_TYPE7 "
+                           "with unknown d_id %x:%x:%x\n", vha->vp_idx,
+                           atio->u.isp24.fcp_hdr.d_id[0],
+                           atio->u.isp24.fcp_hdr.d_id[1],
+                           atio->u.isp24.fcp_hdr.d_id[2]);
+                       break;
+               }
+               qlt_24xx_atio_pkt(host, atio);
+               break;
+       }
+
+       case IMMED_NOTIFY_TYPE:
+       {
+               struct scsi_qla_host *host = vha;
+               struct imm_ntfy_from_isp *entry =
+                   (struct imm_ntfy_from_isp *)atio;
+
+               if ((entry->u.isp24.vp_index != 0xFF) &&
+                   (entry->u.isp24.nport_handle != 0xFFFF)) {
+                       host = qlt_find_host_by_vp_idx(vha,
+                           entry->u.isp24.vp_index);
+                       if (unlikely(!host)) {
+                               ql_dbg(ql_dbg_tgt, vha, 0xe03f,
+                                   "qla_target(%d): Received "
+                                   "ATIO (IMMED_NOTIFY_TYPE) "
+                                   "with unknown vp_index %d\n",
+                                   vha->vp_idx, entry->u.isp24.vp_index);
+                               break;
+                       }
+               }
+               qlt_24xx_atio_pkt(host, atio);
+               break;
+       }
+
+       default:
+               ql_dbg(ql_dbg_tgt, vha, 0xe040,
+                   "qla_target(%d): Received unknown ATIO atio "
+                   "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
+               break;
+       }
+
+       return;
+}
+
+void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
+{
+       switch (pkt->entry_type) {
+       case CTIO_TYPE7:
+       {
+               struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
+               struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+                   entry->vp_index);
+               if (unlikely(!host)) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe041,
+                           "qla_target(%d): Response pkt (CTIO_TYPE7) "
+                           "received, with unknown vp_index %d\n",
+                           vha->vp_idx, entry->vp_index);
+                       break;
+               }
+               qlt_response_pkt(host, pkt);
+               break;
+       }
+
+       case IMMED_NOTIFY_TYPE:
+       {
+               struct scsi_qla_host *host = vha;
+               struct imm_ntfy_from_isp *entry =
+                   (struct imm_ntfy_from_isp *)pkt;
+
+               host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
+               if (unlikely(!host)) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe042,
+                           "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
+                           "received, with unknown vp_index %d\n",
+                           vha->vp_idx, entry->u.isp24.vp_index);
+                       break;
+               }
+               qlt_response_pkt(host, pkt);
+               break;
+       }
+
+       case NOTIFY_ACK_TYPE:
+       {
+               struct scsi_qla_host *host = vha;
+               struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
+
+               if (0xFF != entry->u.isp24.vp_index) {
+                       host = qlt_find_host_by_vp_idx(vha,
+                           entry->u.isp24.vp_index);
+                       if (unlikely(!host)) {
+                               ql_dbg(ql_dbg_tgt, vha, 0xe043,
+                                   "qla_target(%d): Response "
+                                   "pkt (NOTIFY_ACK_TYPE) "
+                                   "received, with unknown "
+                                   "vp_index %d\n", vha->vp_idx,
+                                   entry->u.isp24.vp_index);
+                               break;
+                       }
+               }
+               qlt_response_pkt(host, pkt);
+               break;
+       }
+
+       case ABTS_RECV_24XX:
+       {
+               struct abts_recv_from_24xx *entry =
+                   (struct abts_recv_from_24xx *)pkt;
+               struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+                   entry->vp_index);
+               if (unlikely(!host)) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe044,
+                           "qla_target(%d): Response pkt "
+                           "(ABTS_RECV_24XX) received, with unknown "
+                           "vp_index %d\n", vha->vp_idx, entry->vp_index);
+                       break;
+               }
+               qlt_response_pkt(host, pkt);
+               break;
+       }
+
+       case ABTS_RESP_24XX:
+       {
+               struct abts_resp_to_24xx *entry =
+                   (struct abts_resp_to_24xx *)pkt;
+               struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+                   entry->vp_index);
+               if (unlikely(!host)) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe045,
+                           "qla_target(%d): Response pkt "
+                           "(ABTS_RECV_24XX) received, with unknown "
+                           "vp_index %d\n", vha->vp_idx, entry->vp_index);
+                       break;
+               }
+               qlt_response_pkt(host, pkt);
+               break;
+       }
+
+       default:
+               qlt_response_pkt(vha, pkt);
+               break;
+       }
+
+}
+
+static void qlt_free_session_done(struct work_struct *work)
+{
+       struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess,
+           free_work);
+       struct qla_tgt *tgt = sess->tgt;
+       struct scsi_qla_host *vha = sess->vha;
+       struct qla_hw_data *ha = vha->hw;
+
+       BUG_ON(!tgt);
+       /*
+        * Release the target session for FC Nexus from fabric module code.
+        */
+       if (sess->se_sess != NULL)
+               ha->tgt.tgt_ops->free_session(sess);
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
+           "Unregistration of sess %p finished\n", sess);
+
+       kfree(sess);
+       /*
+        * We need to protect against race, when tgt is freed before or
+        * inside wake_up()
+        */
+       tgt->sess_count--;
+       if (tgt->sess_count == 0)
+               wake_up_all(&tgt->waitQ);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+void qlt_unreg_sess(struct qla_tgt_sess *sess)
+{
+       struct scsi_qla_host *vha = sess->vha;
+
+       vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
+
+       list_del(&sess->sess_list_entry);
+       if (sess->deleted)
+               list_del(&sess->del_list_entry);
+
+       INIT_WORK(&sess->free_work, qlt_free_session_done);
+       schedule_work(&sess->free_work);
+}
+EXPORT_SYMBOL(qlt_unreg_sess);
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt_sess *sess = NULL;
+       uint32_t unpacked_lun, lun = 0;
+       uint16_t loop_id;
+       int res = 0;
+       struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
+       struct atio_from_isp *a = (struct atio_from_isp *)iocb;
+
+       loop_id = le16_to_cpu(n->u.isp24.nport_handle);
+       if (loop_id == 0xFFFF) {
+#if 0 /* FIXME: Re-enable Global event handling.. */
+               /* Global event */
+               atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
+               qlt_clear_tgt_db(ha->tgt.qla_tgt, 1);
+               if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
+                       sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
+                           typeof(*sess), sess_list_entry);
+                       switch (mcmd) {
+                       case QLA_TGT_NEXUS_LOSS_SESS:
+                               mcmd = QLA_TGT_NEXUS_LOSS;
+                               break;
+                       case QLA_TGT_ABORT_ALL_SESS:
+                               mcmd = QLA_TGT_ABORT_ALL;
+                               break;
+                       case QLA_TGT_NEXUS_LOSS:
+                       case QLA_TGT_ABORT_ALL:
+                               break;
+                       default:
+                               ql_dbg(ql_dbg_tgt, vha, 0xe046,
+                                   "qla_target(%d): Not allowed "
+                                   "command %x in %s", vha->vp_idx,
+                                   mcmd, __func__);
+                               sess = NULL;
+                               break;
+                       }
+               } else
+                       sess = NULL;
+#endif
+       } else {
+               sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
+       }
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe000,
+           "Using sess for qla_tgt_reset: %p\n", sess);
+       if (!sess) {
+               res = -ESRCH;
+               return res;
+       }
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe047,
+           "scsi(%ld): resetting (session %p from port "
+           "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, "
+           "mcmd %x, loop_id %d)\n", vha->host_no, sess,
+           sess->port_name[0], sess->port_name[1],
+           sess->port_name[2], sess->port_name[3],
+           sess->port_name[4], sess->port_name[5],
+           sess->port_name[6], sess->port_name[7],
+           mcmd, loop_id);
+
+       lun = a->u.isp24.fcp_cmnd.lun;
+       unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+       return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
+           iocb, QLA24XX_MGMT_SEND_NACK);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
+       bool immediate)
+{
+       struct qla_tgt *tgt = sess->tgt;
+       uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
+
+       if (sess->deleted)
+               return;
+
+       ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
+           "Scheduling sess %p for deletion\n", sess);
+       list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
+       sess->deleted = 1;
+
+       if (immediate)
+               dev_loss_tmo = 0;
+
+       sess->expires = jiffies + dev_loss_tmo * HZ;
+
+       ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
+           "qla_target(%d): session for port %02x:%02x:%02x:"
+           "%02x:%02x:%02x:%02x:%02x (loop ID %d) scheduled for "
+           "deletion in %u secs (expires: %lu) immed: %d\n",
+           sess->vha->vp_idx,
+           sess->port_name[0], sess->port_name[1],
+           sess->port_name[2], sess->port_name[3],
+           sess->port_name[4], sess->port_name[5],
+           sess->port_name[6], sess->port_name[7],
+           sess->loop_id, dev_loss_tmo, sess->expires, immediate);
+
+       if (immediate)
+               schedule_delayed_work(&tgt->sess_del_work, 0);
+       else
+               schedule_delayed_work(&tgt->sess_del_work,
+                   jiffies - sess->expires);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static void qlt_clear_tgt_db(struct qla_tgt *tgt, bool local_only)
+{
+       struct qla_tgt_sess *sess;
+
+       list_for_each_entry(sess, &tgt->sess_list, sess_list_entry)
+               qlt_schedule_sess_for_deletion(sess, true);
+
+       /* At this point tgt could be already dead */
+}
+
+static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
+       uint16_t *loop_id)
+{
+       struct qla_hw_data *ha = vha->hw;
+       dma_addr_t gid_list_dma;
+       struct gid_list_info *gid_list;
+       char *id_iter;
+       int res, rc, i;
+       uint16_t entries;
+
+       gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+           &gid_list_dma, GFP_KERNEL);
+       if (!gid_list) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
+                   "qla_target(%d): DMA Alloc failed of %u\n",
+                   vha->vp_idx, qla2x00_gid_list_size(ha));
+               return -ENOMEM;
+       }
+
+       /* Get list of logged in devices */
+       rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries);
+       if (rc != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
+                   "qla_target(%d): get_id_list() failed: %x\n",
+                   vha->vp_idx, rc);
+               res = -1;
+               goto out_free_id_list;
+       }
+
+       id_iter = (char *)gid_list;
+       res = -1;
+       for (i = 0; i < entries; i++) {
+               struct gid_list_info *gid = (struct gid_list_info *)id_iter;
+               if ((gid->al_pa == s_id[2]) &&
+                   (gid->area == s_id[1]) &&
+                   (gid->domain == s_id[0])) {
+                       *loop_id = le16_to_cpu(gid->loop_id);
+                       res = 0;
+                       break;
+               }
+               id_iter += ha->gid_list_info_size;
+       }
+
+out_free_id_list:
+       dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+           gid_list, gid_list_dma);
+       return res;
+}
+
+static bool qlt_check_fcport_exist(struct scsi_qla_host *vha,
+       struct qla_tgt_sess *sess)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_port_24xx_data *pmap24;
+       bool res, found = false;
+       int rc, i;
+       uint16_t loop_id = 0xFFFF; /* to eliminate compiler's warning */
+       uint16_t entries;
+       void *pmap;
+       int pmap_len;
+       fc_port_t *fcport;
+       int global_resets;
+
+retry:
+       global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
+
+       rc = qla2x00_get_node_name_list(vha, &pmap, &pmap_len);
+       if (rc != QLA_SUCCESS) {
+               res = false;
+               goto out;
+       }
+
+       pmap24 = pmap;
+       entries = pmap_len/sizeof(*pmap24);
+
+       for (i = 0; i < entries; ++i) {
+               if (!memcmp(sess->port_name, pmap24[i].port_name, WWN_SIZE)) {
+                       loop_id = le16_to_cpu(pmap24[i].loop_id);
+                       found = true;
+                       break;
+               }
+       }
+
+       kfree(pmap);
+
+       if (!found) {
+               res = false;
+               goto out;
+       }
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf046,
+           "qlt_check_fcport_exist(): loop_id %d", loop_id);
+
+       fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
+       if (fcport == NULL) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf047,
+                   "qla_target(%d): Allocation of tmp FC port failed",
+                   vha->vp_idx);
+               res = false;
+               goto out;
+       }
+
+       fcport->loop_id = loop_id;
+
+       rc = qla2x00_get_port_database(vha, fcport, 0);
+       if (rc != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf048,
+                   "qla_target(%d): Failed to retrieve fcport "
+                   "information -- get_port_database() returned %x "
+                   "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
+               res = false;
+               goto out_free_fcport;
+       }
+
+       if (global_resets !=
+           atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
+                   "qla_target(%d): global reset during session discovery"
+                   " (counter was %d, new %d), retrying",
+                   vha->vp_idx, global_resets,
+                   atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
+               goto retry;
+       }
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
+           "Updating sess %p s_id %x:%x:%x, loop_id %d) to d_id %x:%x:%x, "
+           "loop_id %d", sess, sess->s_id.b.domain, sess->s_id.b.al_pa,
+           sess->s_id.b.area, sess->loop_id, fcport->d_id.b.domain,
+           fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->loop_id);
+
+       sess->s_id = fcport->d_id;
+       sess->loop_id = fcport->loop_id;
+       sess->conf_compl_supported = !!(fcport->flags &
+           FCF_CONF_COMP_SUPPORTED);
+
+       res = true;
+
+out_free_fcport:
+       kfree(fcport);
+
+out:
+       return res;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static void qlt_undelete_sess(struct qla_tgt_sess *sess)
+{
+       BUG_ON(!sess->deleted);
+
+       list_del(&sess->del_list_entry);
+       sess->deleted = 0;
+}
+
+static void qlt_del_sess_work_fn(struct delayed_work *work)
+{
+       struct qla_tgt *tgt = container_of(work, struct qla_tgt,
+           sess_del_work);
+       struct scsi_qla_host *vha = tgt->vha;
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt_sess *sess;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       while (!list_empty(&tgt->del_sess_list)) {
+               sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
+                   del_list_entry);
+               if (time_after_eq(jiffies, sess->expires)) {
+                       bool cancel;
+
+                       qlt_undelete_sess(sess);
+
+                       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+                       cancel = qlt_check_fcport_exist(vha, sess);
+
+                       if (cancel) {
+                               if (sess->deleted) {
+                                       /*
+                                        * sess was again deleted while we were
+                                        * discovering it
+                                        */
+                                       spin_lock_irqsave(&ha->hardware_lock,
+                                           flags);
+                                       continue;
+                               }
+
+                               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf049,
+                                   "qla_target(%d): cancel deletion of "
+                                   "session for port %02x:%02x:%02x:%02x:%02x:"
+                                   "%02x:%02x:%02x (loop ID %d), because "
+                                   " it isn't deleted by firmware",
+                                   vha->vp_idx, sess->port_name[0],
+                                   sess->port_name[1], sess->port_name[2],
+                                   sess->port_name[3], sess->port_name[4],
+                                   sess->port_name[5], sess->port_name[6],
+                                   sess->port_name[7], sess->loop_id);
+                       } else {
+                               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
+                                   "Timeout: sess %p about to be deleted\n",
+                                   sess);
+                               ha->tgt.tgt_ops->shutdown_sess(sess);
+                               ha->tgt.tgt_ops->put_sess(sess);
+                       }
+
+                       spin_lock_irqsave(&ha->hardware_lock, flags);
+               } else {
+                       schedule_delayed_work(&tgt->sess_del_work,
+                           jiffies - sess->expires);
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+/*
+ * Adds an extra ref to allow to drop hw lock after adding sess to the list.
+ * Caller must put it.
+ */
+static struct qla_tgt_sess *qlt_create_sess(
+       struct scsi_qla_host *vha,
+       fc_port_t *fcport,
+       bool local)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt_sess *sess;
+       unsigned long flags;
+       unsigned char be_sid[3];
+
+       /* Check to avoid double sessions */
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       list_for_each_entry(sess, &ha->tgt.qla_tgt->sess_list,
+                               sess_list_entry) {
+               if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
+                           "Double sess %p found (s_id %x:%x:%x, "
+                           "loop_id %d), updating to d_id %x:%x:%x, "
+                           "loop_id %d", sess, sess->s_id.b.domain,
+                           sess->s_id.b.al_pa, sess->s_id.b.area,
+                           sess->loop_id, fcport->d_id.b.domain,
+                           fcport->d_id.b.al_pa, fcport->d_id.b.area,
+                           fcport->loop_id);
+
+                       if (sess->deleted)
+                               qlt_undelete_sess(sess);
+
+                       kref_get(&sess->se_sess->sess_kref);
+                       sess->s_id = fcport->d_id;
+                       sess->loop_id = fcport->loop_id;
+                       sess->conf_compl_supported = !!(fcport->flags &
+                           FCF_CONF_COMP_SUPPORTED);
+                       if (sess->local && !local)
+                               sess->local = 0;
+                       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+                       return sess;
+               }
+       }
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+       if (!sess) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a,
+                   "qla_target(%u): session allocation failed, "
+                   "all commands from port %02x:%02x:%02x:%02x:"
+                   "%02x:%02x:%02x:%02x will be refused", vha->vp_idx,
+                   fcport->port_name[0], fcport->port_name[1],
+                   fcport->port_name[2], fcport->port_name[3],
+                   fcport->port_name[4], fcport->port_name[5],
+                   fcport->port_name[6], fcport->port_name[7]);
+
+               return NULL;
+       }
+       sess->tgt = ha->tgt.qla_tgt;
+       sess->vha = vha;
+       sess->s_id = fcport->d_id;
+       sess->loop_id = fcport->loop_id;
+       sess->local = local;
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
+           "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
+           sess, ha->tgt.qla_tgt);
+
+       be_sid[0] = sess->s_id.b.domain;
+       be_sid[1] = sess->s_id.b.area;
+       be_sid[2] = sess->s_id.b.al_pa;
+       /*
+        * Determine if this fc_port->port_name is allowed to access
+        * target mode using explict NodeACLs+MappedLUNs, or using
+        * TPG demo mode.  If this is successful a target mode FC nexus
+        * is created.
+        */
+       if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
+           &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) {
+               kfree(sess);
+               return NULL;
+       }
+       /*
+        * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
+        * access across ->hardware_lock reaquire.
+        */
+       kref_get(&sess->se_sess->sess_kref);
+
+       sess->conf_compl_supported = !!(fcport->flags &
+           FCF_CONF_COMP_SUPPORTED);
+       BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
+       memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       list_add_tail(&sess->sess_list_entry, &ha->tgt.qla_tgt->sess_list);
+       ha->tgt.qla_tgt->sess_count++;
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
+           "qla_target(%d): %ssession for wwn %02x:%02x:%02x:%02x:"
+           "%02x:%02x:%02x:%02x (loop_id %d, s_id %x:%x:%x, confirmed"
+           " completion %ssupported) added\n",
+           vha->vp_idx, local ?  "local " : "", fcport->port_name[0],
+           fcport->port_name[1], fcport->port_name[2], fcport->port_name[3],
+           fcport->port_name[4], fcport->port_name[5], fcport->port_name[6],
+           fcport->port_name[7], fcport->loop_id, sess->s_id.b.domain,
+           sess->s_id.b.area, sess->s_id.b.al_pa, sess->conf_compl_supported ?
+           "" : "not ");
+
+       return sess;
+}
+
+/*
+ * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
+ */
+void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       struct qla_tgt_sess *sess;
+       unsigned long flags;
+
+       if (!vha->hw->tgt.tgt_ops)
+               return;
+
+       if (!tgt || (fcport->port_type != FCT_INITIATOR))
+               return;
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       if (tgt->tgt_stop) {
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+               return;
+       }
+       sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
+       if (!sess) {
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+               mutex_lock(&ha->tgt.tgt_mutex);
+               sess = qlt_create_sess(vha, fcport, false);
+               mutex_unlock(&ha->tgt.tgt_mutex);
+
+               spin_lock_irqsave(&ha->hardware_lock, flags);
+       } else {
+               kref_get(&sess->se_sess->sess_kref);
+
+               if (sess->deleted) {
+                       qlt_undelete_sess(sess);
+
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
+                           "qla_target(%u): %ssession for port %02x:"
+                           "%02x:%02x:%02x:%02x:%02x:%02x:%02x (loop ID %d) "
+                           "reappeared\n", vha->vp_idx, sess->local ? "local "
+                           : "", sess->port_name[0], sess->port_name[1],
+                           sess->port_name[2], sess->port_name[3],
+                           sess->port_name[4], sess->port_name[5],
+                           sess->port_name[6], sess->port_name[7],
+                           sess->loop_id);
+
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
+                           "Reappeared sess %p\n", sess);
+               }
+               sess->s_id = fcport->d_id;
+               sess->loop_id = fcport->loop_id;
+               sess->conf_compl_supported = !!(fcport->flags &
+                   FCF_CONF_COMP_SUPPORTED);
+       }
+
+       if (sess && sess->local) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
+                   "qla_target(%u): local session for "
+                   "port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
+                   "(loop ID %d) became global\n", vha->vp_idx,
+                   fcport->port_name[0], fcport->port_name[1],
+                   fcport->port_name[2], fcport->port_name[3],
+                   fcport->port_name[4], fcport->port_name[5],
+                   fcport->port_name[6], fcport->port_name[7],
+                   sess->loop_id);
+               sess->local = 0;
+       }
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       ha->tgt.tgt_ops->put_sess(sess);
+}
+
+void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       struct qla_tgt_sess *sess;
+       unsigned long flags;
+
+       if (!vha->hw->tgt.tgt_ops)
+               return;
+
+       if (!tgt || (fcport->port_type != FCT_INITIATOR))
+               return;
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       if (tgt->tgt_stop) {
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+               return;
+       }
+       sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
+       if (!sess) {
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+               return;
+       }
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
+
+       sess->local = 1;
+       qlt_schedule_sess_for_deletion(sess, false);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static inline int test_tgt_sess_count(struct qla_tgt *tgt)
+{
+       struct qla_hw_data *ha = tgt->ha;
+       unsigned long flags;
+       int res;
+       /*
+        * We need to protect against race, when tgt is freed before or
+        * inside wake_up()
+        */
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
+           "tgt %p, empty(sess_list)=%d sess_count=%d\n",
+           tgt, list_empty(&tgt->sess_list), tgt->sess_count);
+       res = (tgt->sess_count == 0);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       return res;
+}
+
+/* Called by tcm_qla2xxx configfs code */
+void qlt_stop_phase1(struct qla_tgt *tgt)
+{
+       struct scsi_qla_host *vha = tgt->vha;
+       struct qla_hw_data *ha = tgt->ha;
+       unsigned long flags;
+
+       if (tgt->tgt_stop || tgt->tgt_stopped) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
+                   "Already in tgt->tgt_stop or tgt_stopped state\n");
+               dump_stack();
+               return;
+       }
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
+           vha->host_no, vha);
+       /*
+        * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
+        * Lock is needed, because we still can get an incoming packet.
+        */
+       mutex_lock(&ha->tgt.tgt_mutex);
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       tgt->tgt_stop = 1;
+       qlt_clear_tgt_db(tgt, true);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+       mutex_unlock(&ha->tgt.tgt_mutex);
+
+       flush_delayed_work_sync(&tgt->sess_del_work);
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
+           "Waiting for sess works (tgt %p)", tgt);
+       spin_lock_irqsave(&tgt->sess_work_lock, flags);
+       while (!list_empty(&tgt->sess_works_list)) {
+               spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+               flush_scheduled_work();
+               spin_lock_irqsave(&tgt->sess_work_lock, flags);
+       }
+       spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
+           "Waiting for tgt %p: list_empty(sess_list)=%d "
+           "sess_count=%d\n", tgt, list_empty(&tgt->sess_list),
+           tgt->sess_count);
+
+       wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
+
+       /* Big hammer */
+       if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha))
+               qlt_disable_vha(vha);
+
+       /* Wait for sessions to clear out (just in case) */
+       wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
+}
+EXPORT_SYMBOL(qlt_stop_phase1);
+
+/* Called by tcm_qla2xxx configfs code */
+void qlt_stop_phase2(struct qla_tgt *tgt)
+{
+       struct qla_hw_data *ha = tgt->ha;
+       unsigned long flags;
+
+       if (tgt->tgt_stopped) {
+               ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf04f,
+                   "Already in tgt->tgt_stopped state\n");
+               dump_stack();
+               return;
+       }
+
+       ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00b,
+           "Waiting for %d IRQ commands to complete (tgt %p)",
+           tgt->irq_cmd_count, tgt);
+
+       mutex_lock(&ha->tgt.tgt_mutex);
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       while (tgt->irq_cmd_count != 0) {
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+               udelay(2);
+               spin_lock_irqsave(&ha->hardware_lock, flags);
+       }
+       tgt->tgt_stop = 0;
+       tgt->tgt_stopped = 1;
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+       mutex_unlock(&ha->tgt.tgt_mutex);
+
+       ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00c, "Stop of tgt %p finished",
+           tgt);
+}
+EXPORT_SYMBOL(qlt_stop_phase2);
+
+/* Called from qlt_remove_target() -> qla2x00_remove_one() */
+void qlt_release(struct qla_tgt *tgt)
+{
+       struct qla_hw_data *ha = tgt->ha;
+
+       if ((ha->tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
+               qlt_stop_phase2(tgt);
+
+       ha->tgt.qla_tgt = NULL;
+
+       ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00d,
+           "Release of tgt %p finished\n", tgt);
+
+       kfree(tgt);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
+       const void *param, unsigned int param_size)
+{
+       struct qla_tgt_sess_work_param *prm;
+       unsigned long flags;
+
+       prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
+       if (!prm) {
+               ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
+                   "qla_target(%d): Unable to create session "
+                   "work, command will be refused", 0);
+               return -ENOMEM;
+       }
+
+       ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
+           "Scheduling work (type %d, prm %p)"
+           " to find session for param %p (size %d, tgt %p)\n",
+           type, prm, param, param_size, tgt);
+
+       prm->type = type;
+       memcpy(&prm->tm_iocb, param, param_size);
+
+       spin_lock_irqsave(&tgt->sess_work_lock, flags);
+       list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
+       spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+
+       schedule_work(&tgt->sess_work);
+
+       return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_send_notify_ack(struct scsi_qla_host *vha,
+       struct imm_ntfy_from_isp *ntfy,
+       uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
+       uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
+{
+       struct qla_hw_data *ha = vha->hw;
+       request_t *pkt;
+       struct nack_to_isp *nack;
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
+
+       /* Send marker if required */
+       if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
+               return;
+
+       pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
+       if (!pkt) {
+               ql_dbg(ql_dbg_tgt, vha, 0xe049,
+                   "qla_target(%d): %s failed: unable to allocate "
+                   "request packet\n", vha->vp_idx, __func__);
+               return;
+       }
+
+       if (ha->tgt.qla_tgt != NULL)
+               ha->tgt.qla_tgt->notify_ack_expected++;
+
+       pkt->entry_type = NOTIFY_ACK_TYPE;
+       pkt->entry_count = 1;
+
+       nack = (struct nack_to_isp *)pkt;
+       nack->ox_id = ntfy->ox_id;
+
+       nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
+       if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
+               nack->u.isp24.flags = ntfy->u.isp24.flags &
+                       __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+       }
+       nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
+       nack->u.isp24.status = ntfy->u.isp24.status;
+       nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
+       nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
+       nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
+       nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
+       nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
+       nack->u.isp24.srr_reject_code = srr_reject_code;
+       nack->u.isp24.srr_reject_code_expl = srr_explan;
+       nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe005,
+           "qla_target(%d): Sending 24xx Notify Ack %d\n",
+           vha->vp_idx, nack->u.isp24.status);
+
+       qla2x00_start_iocbs(vha, vha->req);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
+       struct abts_recv_from_24xx *abts, uint32_t status,
+       bool ids_reversed)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct abts_resp_to_24xx *resp;
+       uint32_t f_ctl;
+       uint8_t *p;
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe006,
+           "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
+           ha, abts, status);
+
+       /* Send marker if required */
+       if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
+               return;
+
+       resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
+       if (!resp) {
+               ql_dbg(ql_dbg_tgt, vha, 0xe04a,
+                   "qla_target(%d): %s failed: unable to allocate "
+                   "request packet", vha->vp_idx, __func__);
+               return;
+       }
+
+       resp->entry_type = ABTS_RESP_24XX;
+       resp->entry_count = 1;
+       resp->nport_handle = abts->nport_handle;
+       resp->vp_index = vha->vp_idx;
+       resp->sof_type = abts->sof_type;
+       resp->exchange_address = abts->exchange_address;
+       resp->fcp_hdr_le = abts->fcp_hdr_le;
+       f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
+           F_CTL_LAST_SEQ | F_CTL_END_SEQ |
+           F_CTL_SEQ_INITIATIVE);
+       p = (uint8_t *)&f_ctl;
+       resp->fcp_hdr_le.f_ctl[0] = *p++;
+       resp->fcp_hdr_le.f_ctl[1] = *p++;
+       resp->fcp_hdr_le.f_ctl[2] = *p;
+       if (ids_reversed) {
+               resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
+               resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
+               resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
+               resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
+               resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
+               resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
+       } else {
+               resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
+               resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
+               resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
+               resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
+               resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
+               resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
+       }
+       resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
+       if (status == FCP_TMF_CMPL) {
+               resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
+               resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
+               resp->payload.ba_acct.low_seq_cnt = 0x0000;
+               resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
+               resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
+               resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
+       } else {
+               resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
+               resp->payload.ba_rjt.reason_code =
+                       BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
+               /* Other bytes are zero */
+       }
+
+       ha->tgt.qla_tgt->abts_resp_expected++;
+
+       qla2x00_start_iocbs(vha, vha->req);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
+       struct abts_resp_from_24xx_fw *entry)
+{
+       struct ctio7_to_24xx *ctio;
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe007,
+           "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
+       /* Send marker if required */
+       if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
+               return;
+
+       ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
+       if (ctio == NULL) {
+               ql_dbg(ql_dbg_tgt, vha, 0xe04b,
+                   "qla_target(%d): %s failed: unable to allocate "
+                   "request packet\n", vha->vp_idx, __func__);
+               return;
+       }
+
+       /*
+        * We've got on entrance firmware's response on by us generated
+        * ABTS response. So, in it ID fields are reversed.
+        */
+
+       ctio->entry_type = CTIO_TYPE7;
+       ctio->entry_count = 1;
+       ctio->nport_handle = entry->nport_handle;
+       ctio->handle = QLA_TGT_SKIP_HANDLE |    CTIO_COMPLETION_HANDLE_MARK;
+       ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+       ctio->vp_index = vha->vp_idx;
+       ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
+       ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
+       ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
+       ctio->exchange_addr = entry->exchange_addr_to_abort;
+       ctio->u.status1.flags =
+           __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
+               CTIO7_FLAGS_TERMINATE);
+       ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
+
+       qla2x00_start_iocbs(vha, vha->req);
+
+       qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
+           FCP_TMF_CMPL, true);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
+       struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt_mgmt_cmd *mcmd;
+       int rc;
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
+           "qla_target(%d): task abort (tag=%d)\n",
+           vha->vp_idx, abts->exchange_addr_to_abort);
+
+       mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
+       if (mcmd == NULL) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
+                   "qla_target(%d): %s: Allocation of ABORT cmd failed",
+                   vha->vp_idx, __func__);
+               return -ENOMEM;
+       }
+       memset(mcmd, 0, sizeof(*mcmd));
+
+       mcmd->sess = sess;
+       memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
+
+       rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, TMR_ABORT_TASK,
+           abts->exchange_addr_to_abort);
+       if (rc != 0) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
+                   "qla_target(%d):  tgt_ops->handle_tmr()"
+                   " failed: %d", vha->vp_idx, rc);
+               mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
+       struct abts_recv_from_24xx *abts)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt_sess *sess;
+       uint32_t tag = abts->exchange_addr_to_abort;
+       uint8_t s_id[3];
+       int rc;
+
+       if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
+                   "qla_target(%d): ABTS: Abort Sequence not "
+                   "supported\n", vha->vp_idx);
+               qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+               return;
+       }
+
+       if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
+                   "qla_target(%d): ABTS: Unknown Exchange "
+                   "Address received\n", vha->vp_idx);
+               qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+               return;
+       }
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
+           "qla_target(%d): task abort (s_id=%x:%x:%x, "
+           "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
+           abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
+           le32_to_cpu(abts->fcp_hdr_le.parameter));
+
+       s_id[0] = abts->fcp_hdr_le.s_id[2];
+       s_id[1] = abts->fcp_hdr_le.s_id[1];
+       s_id[2] = abts->fcp_hdr_le.s_id[0];
+
+       sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
+       if (!sess) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
+                   "qla_target(%d): task abort for non-existant session\n",
+                   vha->vp_idx);
+               rc = qlt_sched_sess_work(ha->tgt.qla_tgt,
+                   QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
+               if (rc != 0) {
+                       qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
+                           false);
+               }
+               return;
+       }
+
+       rc = __qlt_24xx_handle_abts(vha, abts, sess);
+       if (rc != 0) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
+                   "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
+                   vha->vp_idx, rc);
+               qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+               return;
+       }
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
+       struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
+{
+       struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
+       struct ctio7_to_24xx *ctio;
+
+       ql_dbg(ql_dbg_tgt, ha, 0xe008,
+           "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
+           ha, atio, resp_code);
+
+       /* Send marker if required */
+       if (qlt_issue_marker(ha, 1) != QLA_SUCCESS)
+               return;
+
+       ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL);
+       if (ctio == NULL) {
+               ql_dbg(ql_dbg_tgt, ha, 0xe04c,
+                   "qla_target(%d): %s failed: unable to allocate "
+                   "request packet\n", ha->vp_idx, __func__);
+               return;
+       }
+
+       ctio->entry_type = CTIO_TYPE7;
+       ctio->entry_count = 1;
+       ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+       ctio->nport_handle = mcmd->sess->loop_id;
+       ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+       ctio->vp_index = ha->vp_idx;
+       ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+       ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+       ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+       ctio->exchange_addr = atio->u.isp24.exchange_addr;
+       ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
+           __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
+               CTIO7_FLAGS_SEND_STATUS);
+       ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+       ctio->u.status1.scsi_status =
+           __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
+       ctio->u.status1.response_len = __constant_cpu_to_le16(8);
+       ((uint32_t *)ctio->u.status1.sense_data)[0] = cpu_to_be32(resp_code);
+
+       qla2x00_start_iocbs(ha, ha->req);
+}
+
+void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
+{
+       mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+}
+EXPORT_SYMBOL(qlt_free_mcmd);
+
+/* callback from target fabric module code */
+void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
+{
+       struct scsi_qla_host *vha = mcmd->sess->vha;
+       struct qla_hw_data *ha = vha->hw;
+       unsigned long flags;
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
+           "TM response mcmd (%p) status %#x state %#x",
+           mcmd, mcmd->fc_tm_rsp, mcmd->flags);
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       if (mcmd->flags == QLA24XX_MGMT_SEND_NACK)
+               qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
+                   0, 0, 0, 0, 0, 0);
+       else {
+               if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK)
+                       qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
+                           mcmd->fc_tm_rsp, false);
+               else
+                       qlt_24xx_send_task_mgmt_ctio(vha, mcmd,
+                           mcmd->fc_tm_rsp);
+       }
+       /*
+        * Make the callback for ->free_mcmd() to queue_work() and invoke
+        * target_put_sess_cmd() to drop cmd_kref to 1.  The final
+        * target_put_sess_cmd() call will be made from TFO->check_stop_free()
+        * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
+        * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
+        * qlt_xmit_tm_rsp() returns here..
+        */
+       ha->tgt.tgt_ops->free_mcmd(mcmd);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+EXPORT_SYMBOL(qlt_xmit_tm_rsp);
+
+/* No locks */
+static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
+{
+       struct qla_tgt_cmd *cmd = prm->cmd;
+
+       BUG_ON(cmd->sg_cnt == 0);
+
+       prm->sg = (struct scatterlist *)cmd->sg;
+       prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg,
+           cmd->sg_cnt, cmd->dma_data_direction);
+       if (unlikely(prm->seg_cnt == 0))
+               goto out_err;
+
+       prm->cmd->sg_mapped = 1;
+
+       /*
+        * If greater than four sg entries then we need to allocate
+        * the continuation entries
+        */
+       if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
+               prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
+                   prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont);
+
+       ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n",
+           prm->seg_cnt, prm->req_cnt);
+       return 0;
+
+out_err:
+       ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d,
+           "qla_target(%d): PCI mapping failed: sg_cnt=%d",
+           0, prm->cmd->sg_cnt);
+       return -1;
+}
+
+static inline void qlt_unmap_sg(struct scsi_qla_host *vha,
+       struct qla_tgt_cmd *cmd)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       BUG_ON(!cmd->sg_mapped);
+       pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
+       cmd->sg_mapped = 0;
+}
+
+static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
+       uint32_t req_cnt)
+{
+       struct qla_hw_data *ha = vha->hw;
+       device_reg_t __iomem *reg = ha->iobase;
+       uint32_t cnt;
+
+       if (vha->req->cnt < (req_cnt + 2)) {
+               cnt = (uint16_t)RD_REG_DWORD(&reg->isp24.req_q_out);
+
+               ql_dbg(ql_dbg_tgt, vha, 0xe00a,
+                   "Request ring circled: cnt=%d, vha->->ring_index=%d, "
+                   "vha->req->cnt=%d, req_cnt=%d\n", cnt,
+                   vha->req->ring_index, vha->req->cnt, req_cnt);
+               if  (vha->req->ring_index < cnt)
+                       vha->req->cnt = cnt - vha->req->ring_index;
+               else
+                       vha->req->cnt = vha->req->length -
+                           (vha->req->ring_index - cnt);
+       }
+
+       if (unlikely(vha->req->cnt < (req_cnt + 2))) {
+               ql_dbg(ql_dbg_tgt, vha, 0xe00b,
+                   "qla_target(%d): There is no room in the "
+                   "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, "
+                   "req_cnt=%d\n", vha->vp_idx, vha->req->ring_index,
+                   vha->req->cnt, req_cnt);
+               return -EAGAIN;
+       }
+       vha->req->cnt -= req_cnt;
+
+       return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha)
+{
+       /* Adjust ring index. */
+       vha->req->ring_index++;
+       if (vha->req->ring_index == vha->req->length) {
+               vha->req->ring_index = 0;
+               vha->req->ring_ptr = vha->req->ring;
+       } else {
+               vha->req->ring_ptr++;
+       }
+       return (cont_entry_t *)vha->req->ring_ptr;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+       uint32_t h;
+
+       h = ha->tgt.current_handle;
+       /* always increment cmd handle */
+       do {
+               ++h;
+               if (h > MAX_OUTSTANDING_COMMANDS)
+                       h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
+               if (h == ha->tgt.current_handle) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe04e,
+                           "qla_target(%d): Ran out of "
+                           "empty cmd slots in ha %p\n", vha->vp_idx, ha);
+                       h = QLA_TGT_NULL_HANDLE;
+                       break;
+               }
+       } while ((h == QLA_TGT_NULL_HANDLE) ||
+           (h == QLA_TGT_SKIP_HANDLE) ||
+           (ha->tgt.cmds[h-1] != NULL));
+
+       if (h != QLA_TGT_NULL_HANDLE)
+               ha->tgt.current_handle = h;
+
+       return h;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
+       struct scsi_qla_host *vha)
+{
+       uint32_t h;
+       struct ctio7_to_24xx *pkt;
+       struct qla_hw_data *ha = vha->hw;
+       struct atio_from_isp *atio = &prm->cmd->atio;
+
+       pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
+       prm->pkt = pkt;
+       memset(pkt, 0, sizeof(*pkt));
+
+       pkt->entry_type = CTIO_TYPE7;
+       pkt->entry_count = (uint8_t)prm->req_cnt;
+       pkt->vp_index = vha->vp_idx;
+
+       h = qlt_make_handle(vha);
+       if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
+               /*
+                * CTIO type 7 from the firmware doesn't provide a way to
+                * know the initiator's LOOP ID, hence we can't find
+                * the session and, so, the command.
+                */
+               return -EAGAIN;
+       } else
+               ha->tgt.cmds[h-1] = prm->cmd;
+
+       pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
+       pkt->nport_handle = prm->cmd->loop_id;
+       pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+       pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+       pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+       pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+       pkt->exchange_addr = atio->u.isp24.exchange_addr;
+       pkt->u.status0.flags |= (atio->u.isp24.attr << 9);
+       pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+       pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe00c,
+           "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n",
+           vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT,
+           le16_to_cpu(pkt->u.status0.ox_id));
+       return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. We have already made sure
+ * that there is sufficient amount of request entries to not drop it.
+ */
+static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
+       struct scsi_qla_host *vha)
+{
+       int cnt;
+       uint32_t *dword_ptr;
+       int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
+
+       /* Build continuation packets */
+       while (prm->seg_cnt > 0) {
+               cont_a64_entry_t *cont_pkt64 =
+                       (cont_a64_entry_t *)qlt_get_req_pkt(vha);
+
+               /*
+                * Make sure that from cont_pkt64 none of
+                * 64-bit specific fields used for 32-bit
+                * addressing. Cast to (cont_entry_t *) for
+                * that.
+                */
+
+               memset(cont_pkt64, 0, sizeof(*cont_pkt64));
+
+               cont_pkt64->entry_count = 1;
+               cont_pkt64->sys_define = 0;
+
+               if (enable_64bit_addressing) {
+                       cont_pkt64->entry_type = CONTINUE_A64_TYPE;
+                       dword_ptr =
+                           (uint32_t *)&cont_pkt64->dseg_0_address;
+               } else {
+                       cont_pkt64->entry_type = CONTINUE_TYPE;
+                       dword_ptr =
+                           (uint32_t *)&((cont_entry_t *)
+                               cont_pkt64)->dseg_0_address;
+               }
+
+               /* Load continuation entry data segments */
+               for (cnt = 0;
+                   cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
+                   cnt++, prm->seg_cnt--) {
+                       *dword_ptr++ =
+                           cpu_to_le32(pci_dma_lo32
+                               (sg_dma_address(prm->sg)));
+                       if (enable_64bit_addressing) {
+                               *dword_ptr++ =
+                                   cpu_to_le32(pci_dma_hi32
+                                       (sg_dma_address
+                                       (prm->sg)));
+                       }
+                       *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
+
+                       ql_dbg(ql_dbg_tgt, vha, 0xe00d,
+                           "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n",
+                           (long long unsigned int)
+                           pci_dma_hi32(sg_dma_address(prm->sg)),
+                           (long long unsigned int)
+                           pci_dma_lo32(sg_dma_address(prm->sg)),
+                           (int)sg_dma_len(prm->sg));
+
+                       prm->sg = sg_next(prm->sg);
+               }
+       }
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. We have already made sure
+ * that there is sufficient amount of request entries to not drop it.
+ */
+static void qlt_load_data_segments(struct qla_tgt_prm *prm,
+       struct scsi_qla_host *vha)
+{
+       int cnt;
+       uint32_t *dword_ptr;
+       int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
+       struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe00e,
+           "iocb->scsi_status=%x, iocb->flags=%x\n",
+           le16_to_cpu(pkt24->u.status0.scsi_status),
+           le16_to_cpu(pkt24->u.status0.flags));
+
+       pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
+
+       /* Setup packet address segment pointer */
+       dword_ptr = pkt24->u.status0.dseg_0_address;
+
+       /* Set total data segment count */
+       if (prm->seg_cnt)
+               pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
+
+       if (prm->seg_cnt == 0) {
+               /* No data transfer */
+               *dword_ptr++ = 0;
+               *dword_ptr = 0;
+               return;
+       }
+
+       /* If scatter gather */
+       ql_dbg(ql_dbg_tgt, vha, 0xe00f, "%s", "Building S/G data segments...");
+
+       /* Load command entry data segments */
+       for (cnt = 0;
+           (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
+           cnt++, prm->seg_cnt--) {
+               *dword_ptr++ =
+                   cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
+               if (enable_64bit_addressing) {
+                       *dword_ptr++ =
+                           cpu_to_le32(pci_dma_hi32(
+                               sg_dma_address(prm->sg)));
+               }
+               *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
+
+               ql_dbg(ql_dbg_tgt, vha, 0xe010,
+                   "S/G Segment phys_addr=%llx:%llx, len=%d\n",
+                   (long long unsigned int)pci_dma_hi32(sg_dma_address(
+                   prm->sg)),
+                   (long long unsigned int)pci_dma_lo32(sg_dma_address(
+                   prm->sg)),
+                   (int)sg_dma_len(prm->sg));
+
+               prm->sg = sg_next(prm->sg);
+       }
+
+       qlt_load_cont_data_segments(prm, vha);
+}
+
+static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
+{
+       return cmd->bufflen > 0;
+}
+
+/*
+ * Called without ha->hardware_lock held
+ */
+static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
+       struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
+       uint32_t *full_req_cnt)
+{
+       struct qla_tgt *tgt = cmd->tgt;
+       struct scsi_qla_host *vha = tgt->vha;
+       struct qla_hw_data *ha = vha->hw;
+       struct se_cmd *se_cmd = &cmd->se_cmd;
+
+       if (unlikely(cmd->aborted)) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
+                   "qla_target(%d): terminating exchange "
+                   "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd,
+                   se_cmd, cmd->tag);
+
+               cmd->state = QLA_TGT_STATE_ABORTED;
+
+               qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
+
+               /* !! At this point cmd could be already freed !! */
+               return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
+       }
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n",
+           vha->vp_idx, cmd->tag);
+
+       prm->cmd = cmd;
+       prm->tgt = tgt;
+       prm->rq_result = scsi_status;
+       prm->sense_buffer = &cmd->sense_buffer[0];
+       prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
+       prm->sg = NULL;
+       prm->seg_cnt = -1;
+       prm->req_cnt = 1;
+       prm->add_status_pkt = 0;
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe012, "rq_result=%x, xmit_type=%x\n",
+           prm->rq_result, xmit_type);
+
+       /* Send marker if required */
+       if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
+               return -EFAULT;
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe013, "CTIO start: vha(%d)\n", vha->vp_idx);
+
+       if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
+               if  (qlt_pci_map_calc_cnt(prm) != 0)
+                       return -EAGAIN;
+       }
+
+       *full_req_cnt = prm->req_cnt;
+
+       if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+               prm->residual = se_cmd->residual_count;
+               ql_dbg(ql_dbg_tgt, vha, 0xe014,
+                   "Residual underflow: %d (tag %d, "
+                   "op %x, bufflen %d, rq_result %x)\n", prm->residual,
+                   cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
+                   cmd->bufflen, prm->rq_result);
+               prm->rq_result |= SS_RESIDUAL_UNDER;
+       } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+               prm->residual = se_cmd->residual_count;
+               ql_dbg(ql_dbg_tgt, vha, 0xe015,
+                   "Residual overflow: %d (tag %d, "
+                   "op %x, bufflen %d, rq_result %x)\n", prm->residual,
+                   cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
+                   cmd->bufflen, prm->rq_result);
+               prm->rq_result |= SS_RESIDUAL_OVER;
+       }
+
+       if (xmit_type & QLA_TGT_XMIT_STATUS) {
+               /*
+                * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
+                * ignored in *xmit_response() below
+                */
+               if (qlt_has_data(cmd)) {
+                       if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
+                           (IS_FWI2_CAPABLE(ha) &&
+                           (prm->rq_result != 0))) {
+                               prm->add_status_pkt = 1;
+                               (*full_req_cnt)++;
+                       }
+               }
+       }
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe016,
+           "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n",
+           prm->req_cnt, *full_req_cnt, prm->add_status_pkt);
+
+       return 0;
+}
+
+static inline int qlt_need_explicit_conf(struct qla_hw_data *ha,
+       struct qla_tgt_cmd *cmd, int sending_sense)
+{
+       if (ha->tgt.enable_class_2)
+               return 0;
+
+       if (sending_sense)
+               return cmd->conf_compl_supported;
+       else
+               return ha->tgt.enable_explicit_conf &&
+                   cmd->conf_compl_supported;
+}
+
+#ifdef CONFIG_QLA_TGT_DEBUG_SRR
+/*
+ *  Original taken from the XFS code
+ */
+static unsigned long qlt_srr_random(void)
+{
+       static int Inited;
+       static unsigned long RandomValue;
+       static DEFINE_SPINLOCK(lock);
+       /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
+       register long rv;
+       register long lo;
+       register long hi;
+       unsigned long flags;
+
+       spin_lock_irqsave(&lock, flags);
+       if (!Inited) {
+               RandomValue = jiffies;
+               Inited = 1;
+       }
+       rv = RandomValue;
+       hi = rv / 127773;
+       lo = rv % 127773;
+       rv = 16807 * lo - 2836 * hi;
+       if (rv <= 0)
+               rv += 2147483647;
+       RandomValue = rv;
+       spin_unlock_irqrestore(&lock, flags);
+       return rv;
+}
+
+static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
+{
+#if 0 /* This is not a real status packets lost, so it won't lead to SRR */
+       if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200)
+           == 50) {
+               *xmit_type &= ~QLA_TGT_XMIT_STATUS;
+               ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015,
+                   "Dropping cmd %p (tag %d) status", cmd, cmd->tag);
+       }
+#endif
+       /*
+        * It's currently not possible to simulate SRRs for FCP_WRITE without
+        * a physical link layer failure, so don't even try here..
+        */
+       if (cmd->dma_data_direction != DMA_FROM_DEVICE)
+               return;
+
+       if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) &&
+           ((qlt_srr_random() % 100) == 20)) {
+               int i, leave = 0;
+               unsigned int tot_len = 0;
+
+               while (leave == 0)
+                       leave = qlt_srr_random() % cmd->sg_cnt;
+
+               for (i = 0; i < leave; i++)
+                       tot_len += cmd->sg[i].length;
+
+               ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016,
+                   "Cutting cmd %p (tag %d) buffer"
+                   " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
+                   " cmd->sg_cnt %d)", cmd, cmd->tag, tot_len, leave,
+                   cmd->bufflen, cmd->sg_cnt);
+
+               cmd->bufflen = tot_len;
+               cmd->sg_cnt = leave;
+       }
+
+       if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) {
+               unsigned int offset = qlt_srr_random() % cmd->bufflen;
+
+               ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017,
+                   "Cutting cmd %p (tag %d) buffer head "
+                   "to offset %d (cmd->bufflen %d)", cmd, cmd->tag, offset,
+                   cmd->bufflen);
+               if (offset == 0)
+                       *xmit_type &= ~QLA_TGT_XMIT_DATA;
+               else if (qlt_set_data_offset(cmd, offset)) {
+                       ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018,
+                           "qlt_set_data_offset() failed (tag %d)", cmd->tag);
+               }
+       }
+}
+#else
+static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
+{}
+#endif
+
+static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
+       struct qla_tgt_prm *prm)
+{
+       prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
+           (uint32_t)sizeof(ctio->u.status1.sense_data));
+       ctio->u.status0.flags |=
+           __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
+       if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
+               ctio->u.status0.flags |= __constant_cpu_to_le16(
+                   CTIO7_FLAGS_EXPLICIT_CONFORM |
+                   CTIO7_FLAGS_CONFORM_REQ);
+       }
+       ctio->u.status0.residual = cpu_to_le32(prm->residual);
+       ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
+       if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
+               int i;
+
+               if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
+                       if (prm->cmd->se_cmd.scsi_status != 0) {
+                               ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017,
+                                   "Skipping EXPLICIT_CONFORM and "
+                                   "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
+                                   "non GOOD status\n");
+                               goto skip_explict_conf;
+                       }
+                       ctio->u.status1.flags |= __constant_cpu_to_le16(
+                           CTIO7_FLAGS_EXPLICIT_CONFORM |
+                           CTIO7_FLAGS_CONFORM_REQ);
+               }
+skip_explict_conf:
+               ctio->u.status1.flags &=
+                   ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
+               ctio->u.status1.flags |=
+                   __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
+               ctio->u.status1.scsi_status |=
+                   __constant_cpu_to_le16(SS_SENSE_LEN_VALID);
+               ctio->u.status1.sense_length =
+                   cpu_to_le16(prm->sense_buffer_len);
+               for (i = 0; i < prm->sense_buffer_len/4; i++)
+                       ((uint32_t *)ctio->u.status1.sense_data)[i] =
+                               cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
+#if 0
+               if (unlikely((prm->sense_buffer_len % 4) != 0)) {
+                       static int q;
+                       if (q < 10) {
+                               ql_dbg(ql_dbg_tgt, vha, 0xe04f,
+                                   "qla_target(%d): %d bytes of sense "
+                                   "lost", prm->tgt->ha->vp_idx,
+                                   prm->sense_buffer_len % 4);
+                               q++;
+                       }
+               }
+#endif
+       } else {
+               ctio->u.status1.flags &=
+                   ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
+               ctio->u.status1.flags |=
+                   __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
+               ctio->u.status1.sense_length = 0;
+               memset(ctio->u.status1.sense_data, 0,
+                   sizeof(ctio->u.status1.sense_data));
+       }
+
+       /* Sense with len > 24, is it possible ??? */
+}
+
+/*
+ * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
+ * QLA_TGT_XMIT_STATUS for >= 24xx silicon
+ */
+int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
+       uint8_t scsi_status)
+{
+       struct scsi_qla_host *vha = cmd->vha;
+       struct qla_hw_data *ha = vha->hw;
+       struct ctio7_to_24xx *pkt;
+       struct qla_tgt_prm prm;
+       uint32_t full_req_cnt = 0;
+       unsigned long flags = 0;
+       int res;
+
+       memset(&prm, 0, sizeof(prm));
+       qlt_check_srr_debug(cmd, &xmit_type);
+
+       ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
+           "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, "
+           "cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ?
+           1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction);
+
+       res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
+           &full_req_cnt);
+       if (unlikely(res != 0)) {
+               if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
+                       return 0;
+
+               return res;
+       }
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+
+       /* Does F/W have an IOCBs for this request */
+       res = qlt_check_reserve_free_req(vha, full_req_cnt);
+       if (unlikely(res))
+               goto out_unmap_unlock;
+
+       res = qlt_24xx_build_ctio_pkt(&prm, vha);
+       if (unlikely(res != 0))
+               goto out_unmap_unlock;
+
+
+       pkt = (struct ctio7_to_24xx *)prm.pkt;
+
+       if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
+               pkt->u.status0.flags |=
+                   __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
+                       CTIO7_FLAGS_STATUS_MODE_0);
+
+               qlt_load_data_segments(&prm, vha);
+
+               if (prm.add_status_pkt == 0) {
+                       if (xmit_type & QLA_TGT_XMIT_STATUS) {
+                               pkt->u.status0.scsi_status =
+                                   cpu_to_le16(prm.rq_result);
+                               pkt->u.status0.residual =
+                                   cpu_to_le32(prm.residual);
+                               pkt->u.status0.flags |= __constant_cpu_to_le16(
+                                   CTIO7_FLAGS_SEND_STATUS);
+                               if (qlt_need_explicit_conf(ha, cmd, 0)) {
+                                       pkt->u.status0.flags |=
+                                           __constant_cpu_to_le16(
+                                               CTIO7_FLAGS_EXPLICIT_CONFORM |
+                                               CTIO7_FLAGS_CONFORM_REQ);
+                               }
+                       }
+
+               } else {
+                       /*
+                        * We have already made sure that there is sufficient
+                        * amount of request entries to not drop HW lock in
+                        * req_pkt().
+                        */
+                       struct ctio7_to_24xx *ctio =
+                               (struct ctio7_to_24xx *)qlt_get_req_pkt(vha);
+
+                       ql_dbg(ql_dbg_tgt, vha, 0xe019,
+                           "Building additional status packet\n");
+
+                       memcpy(ctio, pkt, sizeof(*ctio));
+                       ctio->entry_count = 1;
+                       ctio->dseg_count = 0;
+                       ctio->u.status1.flags &= ~__constant_cpu_to_le16(
+                           CTIO7_FLAGS_DATA_IN);
+
+                       /* Real finish is ctio_m1's finish */
+                       pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
+                       pkt->u.status0.flags |= __constant_cpu_to_le16(
+                           CTIO7_FLAGS_DONT_RET_CTIO);
+                       qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
+                           &prm);
+                       pr_debug("Status CTIO7: %p\n", ctio);
+               }
+       } else
+               qlt_24xx_init_ctio_to_isp(pkt, &prm);
+
+
+       cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe01a,
+           "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n",
+           pkt, scsi_status);
+
+       qla2x00_start_iocbs(vha, vha->req);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       return 0;
+
+out_unmap_unlock:
+       if (cmd->sg_mapped)
+               qlt_unmap_sg(vha, cmd);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       return res;
+}
+EXPORT_SYMBOL(qlt_xmit_response);
+
+int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
+{
+       struct ctio7_to_24xx *pkt;
+       struct scsi_qla_host *vha = cmd->vha;
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt *tgt = cmd->tgt;
+       struct qla_tgt_prm prm;
+       unsigned long flags;
+       int res = 0;
+
+       memset(&prm, 0, sizeof(prm));
+       prm.cmd = cmd;
+       prm.tgt = tgt;
+       prm.sg = NULL;
+       prm.req_cnt = 1;
+
+       /* Send marker if required */
+       if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
+               return -EIO;
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)",
+           (int)vha->vp_idx);
+
+       /* Calculate number of entries and segments required */
+       if (qlt_pci_map_calc_cnt(&prm) != 0)
+               return -EAGAIN;
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+
+       /* Does F/W have an IOCBs for this request */
+       res = qlt_check_reserve_free_req(vha, prm.req_cnt);
+       if (res != 0)
+               goto out_unlock_free_unmap;
+
+       res = qlt_24xx_build_ctio_pkt(&prm, vha);
+       if (unlikely(res != 0))
+               goto out_unlock_free_unmap;
+       pkt = (struct ctio7_to_24xx *)prm.pkt;
+       pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
+           CTIO7_FLAGS_STATUS_MODE_0);
+       qlt_load_data_segments(&prm, vha);
+
+       cmd->state = QLA_TGT_STATE_NEED_DATA;
+
+       qla2x00_start_iocbs(vha, vha->req);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       return res;
+
+out_unlock_free_unmap:
+       if (cmd->sg_mapped)
+               qlt_unmap_sg(vha, cmd);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       return res;
+}
+EXPORT_SYMBOL(qlt_rdy_to_xfer);
+
+/* If hardware_lock held on entry, might drop it, then reaquire */
+/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
+static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
+       struct qla_tgt_cmd *cmd,
+       struct atio_from_isp *atio)
+{
+       struct ctio7_to_24xx *ctio24;
+       struct qla_hw_data *ha = vha->hw;
+       request_t *pkt;
+       int ret = 0;
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
+
+       pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
+       if (pkt == NULL) {
+               ql_dbg(ql_dbg_tgt, vha, 0xe050,
+                   "qla_target(%d): %s failed: unable to allocate "
+                   "request packet\n", vha->vp_idx, __func__);
+               return -ENOMEM;
+       }
+
+       if (cmd != NULL) {
+               if (cmd->state < QLA_TGT_STATE_PROCESSED) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe051,
+                           "qla_target(%d): Terminating cmd %p with "
+                           "incorrect state %d\n", vha->vp_idx, cmd,
+                           cmd->state);
+               } else
+                       ret = 1;
+       }
+
+       pkt->entry_count = 1;
+       pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+
+       ctio24 = (struct ctio7_to_24xx *)pkt;
+       ctio24->entry_type = CTIO_TYPE7;
+       ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED;
+       ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+       ctio24->vp_index = vha->vp_idx;
+       ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+       ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+       ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+       ctio24->exchange_addr = atio->u.isp24.exchange_addr;
+       ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
+           __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
+               CTIO7_FLAGS_TERMINATE);
+       ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+
+       /* Most likely, it isn't needed */
+       ctio24->u.status1.residual = get_unaligned((uint32_t *)
+           &atio->u.isp24.fcp_cmnd.add_cdb[
+           atio->u.isp24.fcp_cmnd.add_cdb_len]);
+       if (ctio24->u.status1.residual != 0)
+               ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
+
+       qla2x00_start_iocbs(vha, vha->req);
+       return ret;
+}
+
+static void qlt_send_term_exchange(struct scsi_qla_host *vha,
+       struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
+{
+       unsigned long flags;
+       int rc;
+
+       if (qlt_issue_marker(vha, ha_locked) < 0)
+               return;
+
+       if (ha_locked) {
+               rc = __qlt_send_term_exchange(vha, cmd, atio);
+               goto done;
+       }
+       spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+       rc = __qlt_send_term_exchange(vha, cmd, atio);
+       spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+done:
+       if (rc == 1) {
+               if (!ha_locked && !in_interrupt())
+                       msleep(250); /* just in case */
+
+               vha->hw->tgt.tgt_ops->free_cmd(cmd);
+       }
+}
+
+void qlt_free_cmd(struct qla_tgt_cmd *cmd)
+{
+       BUG_ON(cmd->sg_mapped);
+
+       if (unlikely(cmd->free_sg))
+               kfree(cmd->sg);
+       kmem_cache_free(qla_tgt_cmd_cachep, cmd);
+}
+EXPORT_SYMBOL(qlt_free_cmd);
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
+       struct qla_tgt_cmd *cmd, void *ctio)
+{
+       struct qla_tgt_srr_ctio *sc;
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       struct qla_tgt_srr_imm *imm;
+
+       tgt->ctio_srr_id++;
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
+           "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx);
+
+       if (!ctio) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055,
+                   "qla_target(%d): SRR CTIO, but ctio is NULL\n",
+                   vha->vp_idx);
+               return -EINVAL;
+       }
+
+       sc = kzalloc(sizeof(*sc), GFP_ATOMIC);
+       if (sc != NULL) {
+               sc->cmd = cmd;
+               /* IRQ is already OFF */
+               spin_lock(&tgt->srr_lock);
+               sc->srr_id = tgt->ctio_srr_id;
+               list_add_tail(&sc->srr_list_entry,
+                   &tgt->srr_ctio_list);
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
+                   "CTIO SRR %p added (id %d)\n", sc, sc->srr_id);
+               if (tgt->imm_srr_id == tgt->ctio_srr_id) {
+                       int found = 0;
+                       list_for_each_entry(imm, &tgt->srr_imm_list,
+                           srr_list_entry) {
+                               if (imm->srr_id == sc->srr_id) {
+                                       found = 1;
+                                       break;
+                               }
+                       }
+                       if (found) {
+                               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b,
+                                   "Scheduling srr work\n");
+                               schedule_work(&tgt->srr_work);
+                       } else {
+                               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056,
+                                   "qla_target(%d): imm_srr_id "
+                                   "== ctio_srr_id (%d), but there is no "
+                                   "corresponding SRR IMM, deleting CTIO "
+                                   "SRR %p\n", vha->vp_idx,
+                                   tgt->ctio_srr_id, sc);
+                               list_del(&sc->srr_list_entry);
+                               spin_unlock(&tgt->srr_lock);
+
+                               kfree(sc);
+                               return -EINVAL;
+                       }
+               }
+               spin_unlock(&tgt->srr_lock);
+       } else {
+               struct qla_tgt_srr_imm *ti;
+
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057,
+                   "qla_target(%d): Unable to allocate SRR CTIO entry\n",
+                   vha->vp_idx);
+               spin_lock(&tgt->srr_lock);
+               list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list,
+                   srr_list_entry) {
+                       if (imm->srr_id == tgt->ctio_srr_id) {
+                               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c,
+                                   "IMM SRR %p deleted (id %d)\n",
+                                   imm, imm->srr_id);
+                               list_del(&imm->srr_list_entry);
+                               qlt_reject_free_srr_imm(vha, imm, 1);
+                       }
+               }
+               spin_unlock(&tgt->srr_lock);
+
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
+       struct qla_tgt_cmd *cmd, uint32_t status)
+{
+       int term = 0;
+
+       if (ctio != NULL) {
+               struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
+               term = !(c->flags &
+                   __constant_cpu_to_le16(OF_TERM_EXCH));
+       } else
+               term = 1;
+
+       if (term)
+               qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+
+       return term;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha,
+       uint32_t handle)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       handle--;
+       if (ha->tgt.cmds[handle] != NULL) {
+               struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle];
+               ha->tgt.cmds[handle] = NULL;
+               return cmd;
+       } else
+               return NULL;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
+       uint32_t handle, void *ctio)
+{
+       struct qla_tgt_cmd *cmd = NULL;
+
+       /* Clear out internal marks */
+       handle &= ~(CTIO_COMPLETION_HANDLE_MARK |
+           CTIO_INTERMEDIATE_HANDLE_MARK);
+
+       if (handle != QLA_TGT_NULL_HANDLE) {
+               if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe01d, "%s",
+                           "SKIP_HANDLE CTIO\n");
+                       return NULL;
+               }
+               /* handle-1 is actually used */
+               if (unlikely(handle > MAX_OUTSTANDING_COMMANDS)) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe052,
+                           "qla_target(%d): Wrong handle %x received\n",
+                           vha->vp_idx, handle);
+                       return NULL;
+               }
+               cmd = qlt_get_cmd(vha, handle);
+               if (unlikely(cmd == NULL)) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe053,
+                           "qla_target(%d): Suspicious: unable to "
+                           "find the command with handle %x\n", vha->vp_idx,
+                           handle);
+                       return NULL;
+               }
+       } else if (ctio != NULL) {
+               /* We can't get loop ID from CTIO7 */
+               ql_dbg(ql_dbg_tgt, vha, 0xe054,
+                   "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
+                   "support NULL handles\n", vha->vp_idx);
+               return NULL;
+       }
+
+       return cmd;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
+       uint32_t status, void *ctio)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct se_cmd *se_cmd;
+       struct target_core_fabric_ops *tfo;
+       struct qla_tgt_cmd *cmd;
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe01e,
+           "qla_target(%d): handle(ctio %p status %#x) <- %08x\n",
+           vha->vp_idx, ctio, status, handle);
+
+       if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
+               /* That could happen only in case of an error/reset/abort */
+               if (status != CTIO_SUCCESS) {
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
+                           "Intermediate CTIO received"
+                           " (status %x)\n", status);
+               }
+               return;
+       }
+
+       cmd = qlt_ctio_to_cmd(vha, handle, ctio);
+       if (cmd == NULL) {
+               if (status != CTIO_SUCCESS)
+                       qlt_term_ctio_exchange(vha, ctio, NULL, status);
+               return;
+       }
+       se_cmd = &cmd->se_cmd;
+       tfo = se_cmd->se_tfo;
+
+       if (cmd->sg_mapped)
+               qlt_unmap_sg(vha, cmd);
+
+       if (unlikely(status != CTIO_SUCCESS)) {
+               switch (status & 0xFFFF) {
+               case CTIO_LIP_RESET:
+               case CTIO_TARGET_RESET:
+               case CTIO_ABORTED:
+               case CTIO_TIMEOUT:
+               case CTIO_INVALID_RX_ID:
+                       /* They are OK */
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
+                           "qla_target(%d): CTIO with "
+                           "status %#x received, state %x, se_cmd %p, "
+                           "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
+                           "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
+                           status, cmd->state, se_cmd);
+                       break;
+
+               case CTIO_PORT_LOGGED_OUT:
+               case CTIO_PORT_UNAVAILABLE:
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
+                           "qla_target(%d): CTIO with PORT LOGGED "
+                           "OUT (29) or PORT UNAVAILABLE (28) status %x "
+                           "received (state %x, se_cmd %p)\n", vha->vp_idx,
+                           status, cmd->state, se_cmd);
+                       break;
+
+               case CTIO_SRR_RECEIVED:
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a,
+                           "qla_target(%d): CTIO with SRR_RECEIVED"
+                           " status %x received (state %x, se_cmd %p)\n",
+                           vha->vp_idx, status, cmd->state, se_cmd);
+                       if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0)
+                               break;
+                       else
+                               return;
+
+               default:
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
+                           "qla_target(%d): CTIO with error status "
+                           "0x%x received (state %x, se_cmd %p\n",
+                           vha->vp_idx, status, cmd->state, se_cmd);
+                       break;
+               }
+
+               if (cmd->state != QLA_TGT_STATE_NEED_DATA)
+                       if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
+                               return;
+       }
+
+       if (cmd->state == QLA_TGT_STATE_PROCESSED) {
+               ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd);
+       } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
+               int rx_status = 0;
+
+               cmd->state = QLA_TGT_STATE_DATA_IN;
+
+               if (unlikely(status != CTIO_SUCCESS))
+                       rx_status = -EIO;
+               else
+                       cmd->write_data_transferred = 1;
+
+               ql_dbg(ql_dbg_tgt, vha, 0xe020,
+                   "Data received, context %x, rx_status %d\n",
+                   0x0, rx_status);
+
+               ha->tgt.tgt_ops->handle_data(cmd);
+               return;
+       } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
+                   "Aborted command %p (tag %d) finished\n", cmd, cmd->tag);
+       } else {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
+                   "qla_target(%d): A command in state (%d) should "
+                   "not return a CTIO complete\n", vha->vp_idx, cmd->state);
+       }
+
+       if (unlikely(status != CTIO_SUCCESS)) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
+               dump_stack();
+       }
+
+       ha->tgt.tgt_ops->free_cmd(cmd);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+/* called via callback from qla2xxx */
+void qlt_ctio_completion(struct scsi_qla_host *vha, uint32_t handle)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+
+       if (likely(tgt == NULL)) {
+               ql_dbg(ql_dbg_tgt, vha, 0xe021,
+                   "CTIO, but target mode not enabled"
+                   " (ha %d %p handle %#x)", vha->vp_idx, ha, handle);
+               return;
+       }
+
+       tgt->irq_cmd_count++;
+       qlt_do_ctio_completion(vha, handle, CTIO_SUCCESS, NULL);
+       tgt->irq_cmd_count--;
+}
+
+static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
+       uint8_t task_codes)
+{
+       int fcp_task_attr;
+
+       switch (task_codes) {
+       case ATIO_SIMPLE_QUEUE:
+               fcp_task_attr = MSG_SIMPLE_TAG;
+               break;
+       case ATIO_HEAD_OF_QUEUE:
+               fcp_task_attr = MSG_HEAD_TAG;
+               break;
+       case ATIO_ORDERED_QUEUE:
+               fcp_task_attr = MSG_ORDERED_TAG;
+               break;
+       case ATIO_ACA_QUEUE:
+               fcp_task_attr = MSG_ACA_TAG;
+               break;
+       case ATIO_UNTAGGED:
+               fcp_task_attr = MSG_SIMPLE_TAG;
+               break;
+       default:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
+                   "qla_target: unknown task code %x, use ORDERED instead\n",
+                   task_codes);
+               fcp_task_attr = MSG_ORDERED_TAG;
+               break;
+       }
+
+       return fcp_task_attr;
+}
+
+static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *,
+                                       uint8_t *);
+/*
+ * Process context for I/O path into tcm_qla2xxx code
+ */
+static void qlt_do_work(struct work_struct *work)
+{
+       struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+       scsi_qla_host_t *vha = cmd->vha;
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       struct qla_tgt_sess *sess = NULL;
+       struct atio_from_isp *atio = &cmd->atio;
+       unsigned char *cdb;
+       unsigned long flags;
+       uint32_t data_length;
+       int ret, fcp_task_attr, data_dir, bidi = 0;
+
+       if (tgt->tgt_stop)
+               goto out_term;
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
+           atio->u.isp24.fcp_hdr.s_id);
+       if (sess) {
+               if (unlikely(sess->tearing_down)) {
+                       sess = NULL;
+                       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+                       goto out_term;
+               } else {
+                       /*
+                        * Do the extra kref_get() before dropping
+                        * qla_hw_data->hardware_lock.
+                        */
+                       kref_get(&sess->se_sess->sess_kref);
+               }
+       }
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       if (unlikely(!sess)) {
+               uint8_t *s_id = atio->u.isp24.fcp_hdr.s_id;
+
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
+                       "qla_target(%d): Unable to find wwn login"
+                       " (s_id %x:%x:%x), trying to create it manually\n",
+                       vha->vp_idx, s_id[0], s_id[1], s_id[2]);
+
+               if (atio->u.raw.entry_count > 1) {
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
+                               "Dropping multy entry cmd %p\n", cmd);
+                       goto out_term;
+               }
+
+               mutex_lock(&ha->tgt.tgt_mutex);
+               sess = qlt_make_local_sess(vha, s_id);
+               /* sess has an extra creation ref. */
+               mutex_unlock(&ha->tgt.tgt_mutex);
+
+               if (!sess)
+                       goto out_term;
+       }
+
+       cmd->sess = sess;
+       cmd->loop_id = sess->loop_id;
+       cmd->conf_compl_supported = sess->conf_compl_supported;
+
+       cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
+       cmd->tag = atio->u.isp24.exchange_addr;
+       cmd->unpacked_lun = scsilun_to_int(
+           (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
+
+       if (atio->u.isp24.fcp_cmnd.rddata &&
+           atio->u.isp24.fcp_cmnd.wrdata) {
+               bidi = 1;
+               data_dir = DMA_TO_DEVICE;
+       } else if (atio->u.isp24.fcp_cmnd.rddata)
+               data_dir = DMA_FROM_DEVICE;
+       else if (atio->u.isp24.fcp_cmnd.wrdata)
+               data_dir = DMA_TO_DEVICE;
+       else
+               data_dir = DMA_NONE;
+
+       fcp_task_attr = qlt_get_fcp_task_attr(vha,
+           atio->u.isp24.fcp_cmnd.task_attr);
+       data_length = be32_to_cpu(get_unaligned((uint32_t *)
+           &atio->u.isp24.fcp_cmnd.add_cdb[
+           atio->u.isp24.fcp_cmnd.add_cdb_len]));
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe022,
+           "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n",
+           cmd, cmd->unpacked_lun, cmd->tag);
+
+       ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
+           fcp_task_attr, data_dir, bidi);
+       if (ret != 0)
+               goto out_term;
+       /*
+        * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
+        */
+       ha->tgt.tgt_ops->put_sess(sess);
+       return;
+
+out_term:
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd);
+       /*
+        * cmd has not sent to target yet, so pass NULL as the second argument
+        */
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+       if (sess)
+               ha->tgt.tgt_ops->put_sess(sess);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
+       struct atio_from_isp *atio)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       struct qla_tgt_cmd *cmd;
+
+       if (unlikely(tgt->tgt_stop)) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
+                   "New command while device %p is shutting down\n", tgt);
+               return -EFAULT;
+       }
+
+       cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC);
+       if (!cmd) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e,
+                   "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
+               return -ENOMEM;
+       }
+
+       INIT_LIST_HEAD(&cmd->cmd_list);
+
+       memcpy(&cmd->atio, atio, sizeof(*atio));
+       cmd->state = QLA_TGT_STATE_NEW;
+       cmd->tgt = ha->tgt.qla_tgt;
+       cmd->vha = vha;
+
+       INIT_WORK(&cmd->work, qlt_do_work);
+       queue_work(qla_tgt_wq, &cmd->work);
+       return 0;
+
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
+       int fn, void *iocb, int flags)
+{
+       struct scsi_qla_host *vha = sess->vha;
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt_mgmt_cmd *mcmd;
+       int res;
+       uint8_t tmr_func;
+
+       mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
+       if (!mcmd) {
+               ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
+                   "qla_target(%d): Allocation of management "
+                   "command failed, some commands and their data could "
+                   "leak\n", vha->vp_idx);
+               return -ENOMEM;
+       }
+       memset(mcmd, 0, sizeof(*mcmd));
+       mcmd->sess = sess;
+
+       if (iocb) {
+               memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
+                   sizeof(mcmd->orig_iocb.imm_ntfy));
+       }
+       mcmd->tmr_func = fn;
+       mcmd->flags = flags;
+
+       switch (fn) {
+       case QLA_TGT_CLEAR_ACA:
+               ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000,
+                   "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx);
+               tmr_func = TMR_CLEAR_ACA;
+               break;
+
+       case QLA_TGT_TARGET_RESET:
+               ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001,
+                   "qla_target(%d): TARGET_RESET received\n",
+                   sess->vha->vp_idx);
+               tmr_func = TMR_TARGET_WARM_RESET;
+               break;
+
+       case QLA_TGT_LUN_RESET:
+               ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
+                   "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
+               tmr_func = TMR_LUN_RESET;
+               break;
+
+       case QLA_TGT_CLEAR_TS:
+               ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003,
+                   "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx);
+               tmr_func = TMR_CLEAR_TASK_SET;
+               break;
+
+       case QLA_TGT_ABORT_TS:
+               ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004,
+                   "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx);
+               tmr_func = TMR_ABORT_TASK_SET;
+               break;
+#if 0
+       case QLA_TGT_ABORT_ALL:
+               ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005,
+                   "qla_target(%d): Doing ABORT_ALL_TASKS\n",
+                   sess->vha->vp_idx);
+               tmr_func = 0;
+               break;
+
+       case QLA_TGT_ABORT_ALL_SESS:
+               ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006,
+                   "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n",
+                   sess->vha->vp_idx);
+               tmr_func = 0;
+               break;
+
+       case QLA_TGT_NEXUS_LOSS_SESS:
+               ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007,
+                   "qla_target(%d): Doing NEXUS_LOSS_SESS\n",
+                   sess->vha->vp_idx);
+               tmr_func = 0;
+               break;
+
+       case QLA_TGT_NEXUS_LOSS:
+               ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008,
+                   "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx);
+               tmr_func = 0;
+               break;
+#endif
+       default:
+               ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a,
+                   "qla_target(%d): Unknown task mgmt fn 0x%x\n",
+                   sess->vha->vp_idx, fn);
+               mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+               return -ENOSYS;
+       }
+
+       res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0);
+       if (res != 0) {
+               ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
+                   "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
+                   sess->vha->vp_idx, res);
+               mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
+{
+       struct atio_from_isp *a = (struct atio_from_isp *)iocb;
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt *tgt;
+       struct qla_tgt_sess *sess;
+       uint32_t lun, unpacked_lun;
+       int lun_size, fn;
+
+       tgt = ha->tgt.qla_tgt;
+
+       lun = a->u.isp24.fcp_cmnd.lun;
+       lun_size = sizeof(a->u.isp24.fcp_cmnd.lun);
+       fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
+       sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
+           a->u.isp24.fcp_hdr.s_id);
+       unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+       if (!sess) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
+                   "qla_target(%d): task mgmt fn 0x%x for "
+                   "non-existant session\n", vha->vp_idx, fn);
+               return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
+                   sizeof(struct atio_from_isp));
+       }
+
+       return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int __qlt_abort_task(struct scsi_qla_host *vha,
+       struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess)
+{
+       struct atio_from_isp *a = (struct atio_from_isp *)iocb;
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt_mgmt_cmd *mcmd;
+       uint32_t lun, unpacked_lun;
+       int rc;
+
+       mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
+       if (mcmd == NULL) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
+                   "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
+                   vha->vp_idx, __func__);
+               return -ENOMEM;
+       }
+       memset(mcmd, 0, sizeof(*mcmd));
+
+       mcmd->sess = sess;
+       memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
+           sizeof(mcmd->orig_iocb.imm_ntfy));
+
+       lun = a->u.isp24.fcp_cmnd.lun;
+       unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+       rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK,
+           le16_to_cpu(iocb->u.isp2x.seq_id));
+       if (rc != 0) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
+                   "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
+                   vha->vp_idx, rc);
+               mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_abort_task(struct scsi_qla_host *vha,
+       struct imm_ntfy_from_isp *iocb)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt_sess *sess;
+       int loop_id;
+
+       loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
+
+       sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
+       if (sess == NULL) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
+                   "qla_target(%d): task abort for unexisting "
+                   "session\n", vha->vp_idx);
+               return qlt_sched_sess_work(ha->tgt.qla_tgt,
+                   QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
+       }
+
+       return __qlt_abort_task(vha, iocb, sess);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
+       struct imm_ntfy_from_isp *iocb)
+{
+       struct qla_hw_data *ha = vha->hw;
+       int res = 0;
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
+           "qla_target(%d): Port ID: 0x%02x:%02x:%02x"
+           " ELS opcode: 0x%02x\n", vha->vp_idx, iocb->u.isp24.port_id[0],
+           iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[2],
+           iocb->u.isp24.status_subcode);
+
+       switch (iocb->u.isp24.status_subcode) {
+       case ELS_PLOGI:
+       case ELS_FLOGI:
+       case ELS_PRLI:
+       case ELS_LOGO:
+       case ELS_PRLO:
+               res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
+               break;
+       case ELS_PDISC:
+       case ELS_ADISC:
+       {
+               struct qla_tgt *tgt = ha->tgt.qla_tgt;
+               if (tgt->link_reinit_iocb_pending) {
+                       qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
+                           0, 0, 0, 0, 0, 0);
+                       tgt->link_reinit_iocb_pending = 0;
+               }
+               res = 1; /* send notify ack */
+               break;
+       }
+
+       default:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
+                   "qla_target(%d): Unsupported ELS command %x "
+                   "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
+               res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
+               break;
+       }
+
+       return res;
+}
+
+static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
+{
+       struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL;
+       size_t first_offset = 0, rem_offset = offset, tmp = 0;
+       int i, sg_srr_cnt, bufflen = 0;
+
+       ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023,
+           "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, "
+           "cmd->sg_cnt: %u, direction: %d\n",
+           cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
+
+       /*
+        * FIXME: Reject non zero SRR relative offset until we can test
+        * this code properly.
+        */
+       pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset);
+       return -1;
+
+       if (!cmd->sg || !cmd->sg_cnt) {
+               ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055,
+                   "Missing cmd->sg or zero cmd->sg_cnt in"
+                   " qla_tgt_set_data_offset\n");
+               return -EINVAL;
+       }
+       /*
+        * Walk the current cmd->sg list until we locate the new sg_srr_start
+        */
+       for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
+               ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024,
+                   "sg[%d]: %p page: %p, length: %d, offset: %d\n",
+                   i, sg, sg_page(sg), sg->length, sg->offset);
+
+               if ((sg->length + tmp) > offset) {
+                       first_offset = rem_offset;
+                       sg_srr_start = sg;
+                       ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025,
+                           "Found matching sg[%d], using %p as sg_srr_start, "
+                           "and using first_offset: %zu\n", i, sg,
+                           first_offset);
+                       break;
+               }
+               tmp += sg->length;
+               rem_offset -= sg->length;
+       }
+
+       if (!sg_srr_start) {
+               ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056,
+                   "Unable to locate sg_srr_start for offset: %u\n", offset);
+               return -EINVAL;
+       }
+       sg_srr_cnt = (cmd->sg_cnt - i);
+
+       sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL);
+       if (!sg_srr) {
+               ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057,
+                   "Unable to allocate sgp\n");
+               return -ENOMEM;
+       }
+       sg_init_table(sg_srr, sg_srr_cnt);
+       sgp = &sg_srr[0];
+       /*
+        * Walk the remaining list for sg_srr_start, mapping to the newly
+        * allocated sg_srr taking first_offset into account.
+        */
+       for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) {
+               if (first_offset) {
+                       sg_set_page(sgp, sg_page(sg),
+                           (sg->length - first_offset), first_offset);
+                       first_offset = 0;
+               } else {
+                       sg_set_page(sgp, sg_page(sg), sg->length, 0);
+               }
+               bufflen += sgp->length;
+
+               sgp = sg_next(sgp);
+               if (!sgp)
+                       break;
+       }
+
+       cmd->sg = sg_srr;
+       cmd->sg_cnt = sg_srr_cnt;
+       cmd->bufflen = bufflen;
+       cmd->offset += offset;
+       cmd->free_sg = 1;
+
+       ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg);
+       ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n",
+           cmd->sg_cnt);
+       ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n",
+           cmd->bufflen);
+       ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n",
+           cmd->offset);
+
+       if (cmd->sg_cnt < 0)
+               BUG();
+
+       if (cmd->bufflen < 0)
+               BUG();
+
+       return 0;
+}
+
+static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd,
+       uint32_t srr_rel_offs, int *xmit_type)
+{
+       int res = 0, rel_offs;
+
+       rel_offs = srr_rel_offs - cmd->offset;
+       ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d",
+           srr_rel_offs, rel_offs);
+
+       *xmit_type = QLA_TGT_XMIT_ALL;
+
+       if (rel_offs < 0) {
+               ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062,
+                   "qla_target(%d): SRR rel_offs (%d) < 0",
+                   cmd->vha->vp_idx, rel_offs);
+               res = -1;
+       } else if (rel_offs == cmd->bufflen)
+               *xmit_type = QLA_TGT_XMIT_STATUS;
+       else if (rel_offs > 0)
+               res = qlt_set_data_offset(cmd, rel_offs);
+
+       return res;
+}
+
+/* No locks, thread context */
+static void qlt_handle_srr(struct scsi_qla_host *vha,
+       struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm)
+{
+       struct imm_ntfy_from_isp *ntfy =
+           (struct imm_ntfy_from_isp *)&imm->imm_ntfy;
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt_cmd *cmd = sctio->cmd;
+       struct se_cmd *se_cmd = &cmd->se_cmd;
+       unsigned long flags;
+       int xmit_type = 0, resp = 0;
+       uint32_t offset;
+       uint16_t srr_ui;
+
+       offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs);
+       srr_ui = ntfy->u.isp24.srr_ui;
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n",
+           cmd, srr_ui);
+
+       switch (srr_ui) {
+       case SRR_IU_STATUS:
+               spin_lock_irqsave(&ha->hardware_lock, flags);
+               qlt_send_notify_ack(vha, ntfy,
+                   0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+               xmit_type = QLA_TGT_XMIT_STATUS;
+               resp = 1;
+               break;
+       case SRR_IU_DATA_IN:
+               if (!cmd->sg || !cmd->sg_cnt) {
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063,
+                           "Unable to process SRR_IU_DATA_IN due to"
+                           " missing cmd->sg, state: %d\n", cmd->state);
+                       dump_stack();
+                       goto out_reject;
+               }
+               if (se_cmd->scsi_status != 0) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe02a,
+                           "Rejecting SRR_IU_DATA_IN with non GOOD "
+                           "scsi_status\n");
+                       goto out_reject;
+               }
+               cmd->bufflen = se_cmd->data_length;
+
+               if (qlt_has_data(cmd)) {
+                       if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
+                               goto out_reject;
+                       spin_lock_irqsave(&ha->hardware_lock, flags);
+                       qlt_send_notify_ack(vha, ntfy,
+                           0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
+                       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+                       resp = 1;
+               } else {
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064,
+                           "qla_target(%d): SRR for in data for cmd "
+                           "without them (tag %d, SCSI status %d), "
+                           "reject", vha->vp_idx, cmd->tag,
+                           cmd->se_cmd.scsi_status);
+                       goto out_reject;
+               }
+               break;
+       case SRR_IU_DATA_OUT:
+               if (!cmd->sg || !cmd->sg_cnt) {
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065,
+                           "Unable to process SRR_IU_DATA_OUT due to"
+                           " missing cmd->sg\n");
+                       dump_stack();
+                       goto out_reject;
+               }
+               if (se_cmd->scsi_status != 0) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe02b,
+                           "Rejecting SRR_IU_DATA_OUT"
+                           " with non GOOD scsi_status\n");
+                       goto out_reject;
+               }
+               cmd->bufflen = se_cmd->data_length;
+
+               if (qlt_has_data(cmd)) {
+                       if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
+                               goto out_reject;
+                       spin_lock_irqsave(&ha->hardware_lock, flags);
+                       qlt_send_notify_ack(vha, ntfy,
+                           0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
+                       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+                       if (xmit_type & QLA_TGT_XMIT_DATA)
+                               qlt_rdy_to_xfer(cmd);
+               } else {
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
+                           "qla_target(%d): SRR for out data for cmd "
+                           "without them (tag %d, SCSI status %d), "
+                           "reject", vha->vp_idx, cmd->tag,
+                           cmd->se_cmd.scsi_status);
+                       goto out_reject;
+               }
+               break;
+       default:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067,
+                   "qla_target(%d): Unknown srr_ui value %x",
+                   vha->vp_idx, srr_ui);
+               goto out_reject;
+       }
+
+       /* Transmit response in case of status and data-in cases */
+       if (resp)
+               qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
+
+       return;
+
+out_reject:
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       qlt_send_notify_ack(vha, ntfy, 0, 0, 0,
+           NOTIFY_ACK_SRR_FLAGS_REJECT,
+           NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+           NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
+       if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
+               cmd->state = QLA_TGT_STATE_DATA_IN;
+               dump_stack();
+       } else
+               qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha,
+       struct qla_tgt_srr_imm *imm, int ha_locked)
+{
+       struct qla_hw_data *ha = vha->hw;
+       unsigned long flags = 0;
+
+       if (!ha_locked)
+               spin_lock_irqsave(&ha->hardware_lock, flags);
+
+       qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0,
+           NOTIFY_ACK_SRR_FLAGS_REJECT,
+           NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+           NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
+
+       if (!ha_locked)
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       kfree(imm);
+}
+
+static void qlt_handle_srr_work(struct work_struct *work)
+{
+       struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work);
+       struct scsi_qla_host *vha = tgt->vha;
+       struct qla_tgt_srr_ctio *sctio;
+       unsigned long flags;
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n",
+           tgt);
+
+restart:
+       spin_lock_irqsave(&tgt->srr_lock, flags);
+       list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) {
+               struct qla_tgt_srr_imm *imm, *i, *ti;
+               struct qla_tgt_cmd *cmd;
+               struct se_cmd *se_cmd;
+
+               imm = NULL;
+               list_for_each_entry_safe(i, ti, &tgt->srr_imm_list,
+                                               srr_list_entry) {
+                       if (i->srr_id == sctio->srr_id) {
+                               list_del(&i->srr_list_entry);
+                               if (imm) {
+                                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068,
+                                         "qla_target(%d): There must be "
+                                         "only one IMM SRR per CTIO SRR "
+                                         "(IMM SRR %p, id %d, CTIO %p\n",
+                                         vha->vp_idx, i, i->srr_id, sctio);
+                                       qlt_reject_free_srr_imm(tgt->vha, i, 0);
+                               } else
+                                       imm = i;
+                       }
+               }
+
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a,
+                   "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio,
+                   sctio->srr_id);
+
+               if (imm == NULL) {
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b,
+                           "Not found matching IMM for SRR CTIO (id %d)\n",
+                           sctio->srr_id);
+                       continue;
+               } else
+                       list_del(&sctio->srr_list_entry);
+
+               spin_unlock_irqrestore(&tgt->srr_lock, flags);
+
+               cmd = sctio->cmd;
+               /*
+                * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow
+                * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in()
+                * logic..
+                */
+               cmd->offset = 0;
+               if (cmd->free_sg) {
+                       kfree(cmd->sg);
+                       cmd->sg = NULL;
+                       cmd->free_sg = 0;
+               }
+               se_cmd = &cmd->se_cmd;
+
+               cmd->sg_cnt = se_cmd->t_data_nents;
+               cmd->sg = se_cmd->t_data_sg;
+
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
+                   "SRR cmd %p (se_cmd %p, tag %d, op %x), "
+                   "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag,
+                   se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset);
+
+               qlt_handle_srr(vha, sctio, imm);
+
+               kfree(imm);
+               kfree(sctio);
+               goto restart;
+       }
+       spin_unlock_irqrestore(&tgt->srr_lock, flags);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
+       struct imm_ntfy_from_isp *iocb)
+{
+       struct qla_tgt_srr_imm *imm;
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       struct qla_tgt_srr_ctio *sctio;
+
+       tgt->imm_srr_id++;
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02d, "qla_target(%d): SRR received\n",
+           vha->vp_idx);
+
+       imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
+       if (imm != NULL) {
+               memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy));
+
+               /* IRQ is already OFF */
+               spin_lock(&tgt->srr_lock);
+               imm->srr_id = tgt->imm_srr_id;
+               list_add_tail(&imm->srr_list_entry,
+                   &tgt->srr_imm_list);
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e,
+                   "IMM NTFY SRR %p added (id %d, ui %x)\n",
+                   imm, imm->srr_id, iocb->u.isp24.srr_ui);
+               if (tgt->imm_srr_id == tgt->ctio_srr_id) {
+                       int found = 0;
+                       list_for_each_entry(sctio, &tgt->srr_ctio_list,
+                           srr_list_entry) {
+                               if (sctio->srr_id == imm->srr_id) {
+                                       found = 1;
+                                       break;
+                               }
+                       }
+                       if (found) {
+                               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s",
+                                   "Scheduling srr work\n");
+                               schedule_work(&tgt->srr_work);
+                       } else {
+                               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030,
+                                   "qla_target(%d): imm_srr_id "
+                                   "== ctio_srr_id (%d), but there is no "
+                                   "corresponding SRR CTIO, deleting IMM "
+                                   "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id,
+                                   imm);
+                               list_del(&imm->srr_list_entry);
+
+                               kfree(imm);
+
+                               spin_unlock(&tgt->srr_lock);
+                               goto out_reject;
+                       }
+               }
+               spin_unlock(&tgt->srr_lock);
+       } else {
+               struct qla_tgt_srr_ctio *ts;
+
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069,
+                   "qla_target(%d): Unable to allocate SRR IMM "
+                   "entry, SRR request will be rejected\n", vha->vp_idx);
+
+               /* IRQ is already OFF */
+               spin_lock(&tgt->srr_lock);
+               list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list,
+                   srr_list_entry) {
+                       if (sctio->srr_id == tgt->imm_srr_id) {
+                               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031,
+                                   "CTIO SRR %p deleted (id %d)\n",
+                                   sctio, sctio->srr_id);
+                               list_del(&sctio->srr_list_entry);
+                               qlt_send_term_exchange(vha, sctio->cmd,
+                                   &sctio->cmd->atio, 1);
+                               kfree(sctio);
+                       }
+               }
+               spin_unlock(&tgt->srr_lock);
+               goto out_reject;
+       }
+
+       return;
+
+out_reject:
+       qlt_send_notify_ack(vha, iocb, 0, 0, 0,
+           NOTIFY_ACK_SRR_FLAGS_REJECT,
+           NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+           NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
+       struct imm_ntfy_from_isp *iocb)
+{
+       struct qla_hw_data *ha = vha->hw;
+       uint32_t add_flags = 0;
+       int send_notify_ack = 1;
+       uint16_t status;
+
+       status = le16_to_cpu(iocb->u.isp2x.status);
+       switch (status) {
+       case IMM_NTFY_LIP_RESET:
+       {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
+                   "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
+                   vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
+                   iocb->u.isp24.status_subcode);
+
+               if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
+                       send_notify_ack = 0;
+               break;
+       }
+
+       case IMM_NTFY_LIP_LINK_REINIT:
+       {
+               struct qla_tgt *tgt = ha->tgt.qla_tgt;
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
+                   "qla_target(%d): LINK REINIT (loop %#x, "
+                   "subcode %x)\n", vha->vp_idx,
+                   le16_to_cpu(iocb->u.isp24.nport_handle),
+                   iocb->u.isp24.status_subcode);
+               if (tgt->link_reinit_iocb_pending) {
+                       qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
+                           0, 0, 0, 0, 0, 0);
+               }
+               memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
+               tgt->link_reinit_iocb_pending = 1;
+               /*
+                * QLogic requires to wait after LINK REINIT for possible
+                * PDISC or ADISC ELS commands
+                */
+               send_notify_ack = 0;
+               break;
+       }
+
+       case IMM_NTFY_PORT_LOGOUT:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
+                   "qla_target(%d): Port logout (loop "
+                   "%#x, subcode %x)\n", vha->vp_idx,
+                   le16_to_cpu(iocb->u.isp24.nport_handle),
+                   iocb->u.isp24.status_subcode);
+
+               if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
+                       send_notify_ack = 0;
+               /* The sessions will be cleared in the callback, if needed */
+               break;
+
+       case IMM_NTFY_GLBL_TPRLO:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
+                   "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
+               if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
+                       send_notify_ack = 0;
+               /* The sessions will be cleared in the callback, if needed */
+               break;
+
+       case IMM_NTFY_PORT_CONFIG:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
+                   "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
+                   status);
+               if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
+                       send_notify_ack = 0;
+               /* The sessions will be cleared in the callback, if needed */
+               break;
+
+       case IMM_NTFY_GLBL_LOGO:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
+                   "qla_target(%d): Link failure detected\n",
+                   vha->vp_idx);
+               /* I_T nexus loss */
+               if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
+                       send_notify_ack = 0;
+               break;
+
+       case IMM_NTFY_IOCB_OVERFLOW:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
+                   "qla_target(%d): Cannot provide requested "
+                   "capability (IOCB overflowed the immediate notify "
+                   "resource count)\n", vha->vp_idx);
+               break;
+
+       case IMM_NTFY_ABORT_TASK:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
+                   "qla_target(%d): Abort Task (S %08x I %#x -> "
+                   "L %#x)\n", vha->vp_idx,
+                   le16_to_cpu(iocb->u.isp2x.seq_id),
+                   GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
+                   le16_to_cpu(iocb->u.isp2x.lun));
+               if (qlt_abort_task(vha, iocb) == 0)
+                       send_notify_ack = 0;
+               break;
+
+       case IMM_NTFY_RESOURCE:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
+                   "qla_target(%d): Out of resources, host %ld\n",
+                   vha->vp_idx, vha->host_no);
+               break;
+
+       case IMM_NTFY_MSG_RX:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
+                   "qla_target(%d): Immediate notify task %x\n",
+                   vha->vp_idx, iocb->u.isp2x.task_flags);
+               if (qlt_handle_task_mgmt(vha, iocb) == 0)
+                       send_notify_ack = 0;
+               break;
+
+       case IMM_NTFY_ELS:
+               if (qlt_24xx_handle_els(vha, iocb) == 0)
+                       send_notify_ack = 0;
+               break;
+
+       case IMM_NTFY_SRR:
+               qlt_prepare_srr_imm(vha, iocb);
+               send_notify_ack = 0;
+               break;
+
+       default:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
+                   "qla_target(%d): Received unknown immediate "
+                   "notify status %x\n", vha->vp_idx, status);
+               break;
+       }
+
+       if (send_notify_ack)
+               qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ * This function sends busy to ISP 2xxx or 24xx.
+ */
+static void qlt_send_busy(struct scsi_qla_host *vha,
+       struct atio_from_isp *atio, uint16_t status)
+{
+       struct ctio7_to_24xx *ctio24;
+       struct qla_hw_data *ha = vha->hw;
+       request_t *pkt;
+       struct qla_tgt_sess *sess = NULL;
+
+       sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
+           atio->u.isp24.fcp_hdr.s_id);
+       if (!sess) {
+               qlt_send_term_exchange(vha, NULL, atio, 1);
+               return;
+       }
+       /* Sending marker isn't necessary, since we called from ISR */
+
+       pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
+       if (!pkt) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06e,
+                   "qla_target(%d): %s failed: unable to allocate "
+                   "request packet", vha->vp_idx, __func__);
+               return;
+       }
+
+       pkt->entry_count = 1;
+       pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+
+       ctio24 = (struct ctio7_to_24xx *)pkt;
+       ctio24->entry_type = CTIO_TYPE7;
+       ctio24->nport_handle = sess->loop_id;
+       ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+       ctio24->vp_index = vha->vp_idx;
+       ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+       ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+       ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+       ctio24->exchange_addr = atio->u.isp24.exchange_addr;
+       ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
+           __constant_cpu_to_le16(
+               CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
+               CTIO7_FLAGS_DONT_RET_CTIO);
+       /*
+        * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
+        * if the explicit conformation is used.
+        */
+       ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+       ctio24->u.status1.scsi_status = cpu_to_le16(status);
+       ctio24->u.status1.residual = get_unaligned((uint32_t *)
+           &atio->u.isp24.fcp_cmnd.add_cdb[
+           atio->u.isp24.fcp_cmnd.add_cdb_len]);
+       if (ctio24->u.status1.residual != 0)
+               ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
+
+       qla2x00_start_iocbs(vha, vha->req);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+/* called via callback from qla2xxx */
+static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
+       struct atio_from_isp *atio)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       int rc;
+
+       if (unlikely(tgt == NULL)) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf039,
+                   "ATIO pkt, but no tgt (ha %p)", ha);
+               return;
+       }
+       ql_dbg(ql_dbg_tgt, vha, 0xe02c,
+           "qla_target(%d): ATIO pkt %p: type %02x count %02x",
+           vha->vp_idx, atio, atio->u.raw.entry_type,
+           atio->u.raw.entry_count);
+       /*
+        * In tgt_stop mode we also should allow all requests to pass.
+        * Otherwise, some commands can stuck.
+        */
+
+       tgt->irq_cmd_count++;
+
+       switch (atio->u.raw.entry_type) {
+       case ATIO_TYPE7:
+               ql_dbg(ql_dbg_tgt, vha, 0xe02d,
+                   "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, "
+                   "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n",
+                   vha->vp_idx, atio->u.isp24.fcp_cmnd.lun,
+                   atio->u.isp24.fcp_cmnd.rddata,
+                   atio->u.isp24.fcp_cmnd.wrdata,
+                   atio->u.isp24.fcp_cmnd.add_cdb_len,
+                   be32_to_cpu(get_unaligned((uint32_t *)
+                       &atio->u.isp24.fcp_cmnd.add_cdb[
+                       atio->u.isp24.fcp_cmnd.add_cdb_len])),
+                   atio->u.isp24.fcp_hdr.s_id[0],
+                   atio->u.isp24.fcp_hdr.s_id[1],
+                   atio->u.isp24.fcp_hdr.s_id[2]);
+
+               if (unlikely(atio->u.isp24.exchange_addr ==
+                   ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe058,
+                           "qla_target(%d): ATIO_TYPE7 "
+                           "received with UNKNOWN exchange address, "
+                           "sending QUEUE_FULL\n", vha->vp_idx);
+                       qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
+                       break;
+               }
+               if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0))
+                       rc = qlt_handle_cmd_for_atio(vha, atio);
+               else
+                       rc = qlt_handle_task_mgmt(vha, atio);
+               if (unlikely(rc != 0)) {
+                       if (rc == -ESRCH) {
+#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
+                               qlt_send_busy(vha, atio, SAM_STAT_BUSY);
+#else
+                               qlt_send_term_exchange(vha, NULL, atio, 1);
+#endif
+                       } else {
+                               if (tgt->tgt_stop) {
+                                       ql_dbg(ql_dbg_tgt, vha, 0xe059,
+                                           "qla_target: Unable to send "
+                                           "command to target for req, "
+                                           "ignoring.\n");
+                               } else {
+                                       ql_dbg(ql_dbg_tgt, vha, 0xe05a,
+                                           "qla_target(%d): Unable to send "
+                                           "command to target, sending BUSY "
+                                           "status.\n", vha->vp_idx);
+                                       qlt_send_busy(vha, atio, SAM_STAT_BUSY);
+                               }
+                       }
+               }
+               break;
+
+       case IMMED_NOTIFY_TYPE:
+       {
+               if (unlikely(atio->u.isp2x.entry_status != 0)) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe05b,
+                           "qla_target(%d): Received ATIO packet %x "
+                           "with error status %x\n", vha->vp_idx,
+                           atio->u.raw.entry_type,
+                           atio->u.isp2x.entry_status);
+                       break;
+               }
+               ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
+               qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
+               break;
+       }
+
+       default:
+               ql_dbg(ql_dbg_tgt, vha, 0xe05c,
+                   "qla_target(%d): Received unknown ATIO atio "
+                   "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
+               break;
+       }
+
+       tgt->irq_cmd_count--;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+/* called via callback from qla2xxx */
+static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+
+       if (unlikely(tgt == NULL)) {
+               ql_dbg(ql_dbg_tgt, vha, 0xe05d,
+                   "qla_target(%d): Response pkt %x received, but no "
+                   "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha);
+               return;
+       }
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe02f,
+           "qla_target(%d): response pkt %p: T %02x C %02x S %02x "
+           "handle %#x\n", vha->vp_idx, pkt, pkt->entry_type,
+           pkt->entry_count, pkt->entry_status, pkt->handle);
+
+       /*
+        * In tgt_stop mode we also should allow all requests to pass.
+        * Otherwise, some commands can stuck.
+        */
+
+       tgt->irq_cmd_count++;
+
+       switch (pkt->entry_type) {
+       case CTIO_TYPE7:
+       {
+               struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
+               ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n",
+                   vha->vp_idx);
+               qlt_do_ctio_completion(vha, entry->handle,
+                   le16_to_cpu(entry->status)|(pkt->entry_status << 16),
+                   entry);
+               break;
+       }
+
+       case ACCEPT_TGT_IO_TYPE:
+       {
+               struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
+               int rc;
+               ql_dbg(ql_dbg_tgt, vha, 0xe031,
+                   "ACCEPT_TGT_IO instance %d status %04x "
+                   "lun %04x read/write %d data_length %04x "
+                   "target_id %02x rx_id %04x\n ", vha->vp_idx,
+                   le16_to_cpu(atio->u.isp2x.status),
+                   le16_to_cpu(atio->u.isp2x.lun),
+                   atio->u.isp2x.execution_codes,
+                   le32_to_cpu(atio->u.isp2x.data_length), GET_TARGET_ID(ha,
+                   atio), atio->u.isp2x.rx_id);
+               if (atio->u.isp2x.status !=
+                   __constant_cpu_to_le16(ATIO_CDB_VALID)) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe05e,
+                           "qla_target(%d): ATIO with error "
+                           "status %x received\n", vha->vp_idx,
+                           le16_to_cpu(atio->u.isp2x.status));
+                       break;
+               }
+               ql_dbg(ql_dbg_tgt, vha, 0xe032,
+                   "FCP CDB: 0x%02x, sizeof(cdb): %lu",
+                   atio->u.isp2x.cdb[0], (unsigned long
+                   int)sizeof(atio->u.isp2x.cdb));
+
+               rc = qlt_handle_cmd_for_atio(vha, atio);
+               if (unlikely(rc != 0)) {
+                       if (rc == -ESRCH) {
+#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
+                               qlt_send_busy(vha, atio, 0);
+#else
+                               qlt_send_term_exchange(vha, NULL, atio, 1);
+#endif
+                       } else {
+                               if (tgt->tgt_stop) {
+                                       ql_dbg(ql_dbg_tgt, vha, 0xe05f,
+                                           "qla_target: Unable to send "
+                                           "command to target, sending TERM "
+                                           "EXCHANGE for rsp\n");
+                                       qlt_send_term_exchange(vha, NULL,
+                                           atio, 1);
+                               } else {
+                                       ql_dbg(ql_dbg_tgt, vha, 0xe060,
+                                           "qla_target(%d): Unable to send "
+                                           "command to target, sending BUSY "
+                                           "status\n", vha->vp_idx);
+                                       qlt_send_busy(vha, atio, 0);
+                               }
+                       }
+               }
+       }
+       break;
+
+       case CONTINUE_TGT_IO_TYPE:
+       {
+               struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
+               ql_dbg(ql_dbg_tgt, vha, 0xe033,
+                   "CONTINUE_TGT_IO: instance %d\n", vha->vp_idx);
+               qlt_do_ctio_completion(vha, entry->handle,
+                   le16_to_cpu(entry->status)|(pkt->entry_status << 16),
+                   entry);
+               break;
+       }
+
+       case CTIO_A64_TYPE:
+       {
+               struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
+               ql_dbg(ql_dbg_tgt, vha, 0xe034, "CTIO_A64: instance %d\n",
+                   vha->vp_idx);
+               qlt_do_ctio_completion(vha, entry->handle,
+                   le16_to_cpu(entry->status)|(pkt->entry_status << 16),
+                   entry);
+               break;
+       }
+
+       case IMMED_NOTIFY_TYPE:
+               ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
+               qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
+               break;
+
+       case NOTIFY_ACK_TYPE:
+               if (tgt->notify_ack_expected > 0) {
+                       struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
+                       ql_dbg(ql_dbg_tgt, vha, 0xe036,
+                           "NOTIFY_ACK seq %08x status %x\n",
+                           le16_to_cpu(entry->u.isp2x.seq_id),
+                           le16_to_cpu(entry->u.isp2x.status));
+                       tgt->notify_ack_expected--;
+                       if (entry->u.isp2x.status !=
+                           __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
+                               ql_dbg(ql_dbg_tgt, vha, 0xe061,
+                                   "qla_target(%d): NOTIFY_ACK "
+                                   "failed %x\n", vha->vp_idx,
+                                   le16_to_cpu(entry->u.isp2x.status));
+                       }
+               } else {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe062,
+                           "qla_target(%d): Unexpected NOTIFY_ACK received\n",
+                           vha->vp_idx);
+               }
+               break;
+
+       case ABTS_RECV_24XX:
+               ql_dbg(ql_dbg_tgt, vha, 0xe037,
+                   "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
+               qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
+               break;
+
+       case ABTS_RESP_24XX:
+               if (tgt->abts_resp_expected > 0) {
+                       struct abts_resp_from_24xx_fw *entry =
+                               (struct abts_resp_from_24xx_fw *)pkt;
+                       ql_dbg(ql_dbg_tgt, vha, 0xe038,
+                           "ABTS_RESP_24XX: compl_status %x\n",
+                           entry->compl_status);
+                       tgt->abts_resp_expected--;
+                       if (le16_to_cpu(entry->compl_status) !=
+                           ABTS_RESP_COMPL_SUCCESS) {
+                               if ((entry->error_subcode1 == 0x1E) &&
+                                   (entry->error_subcode2 == 0)) {
+                                       /*
+                                        * We've got a race here: aborted
+                                        * exchange not terminated, i.e.
+                                        * response for the aborted command was
+                                        * sent between the abort request was
+                                        * received and processed.
+                                        * Unfortunately, the firmware has a
+                                        * silly requirement that all aborted
+                                        * exchanges must be explicitely
+                                        * terminated, otherwise it refuses to
+                                        * send responses for the abort
+                                        * requests. So, we have to
+                                        * (re)terminate the exchange and retry
+                                        * the abort response.
+                                        */
+                                       qlt_24xx_retry_term_exchange(vha,
+                                           entry);
+                               } else
+                                       ql_dbg(ql_dbg_tgt, vha, 0xe063,
+                                           "qla_target(%d): ABTS_RESP_24XX "
+                                           "failed %x (subcode %x:%x)",
+                                           vha->vp_idx, entry->compl_status,
+                                           entry->error_subcode1,
+                                           entry->error_subcode2);
+                       }
+               } else {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe064,
+                           "qla_target(%d): Unexpected ABTS_RESP_24XX "
+                           "received\n", vha->vp_idx);
+               }
+               break;
+
+       default:
+               ql_dbg(ql_dbg_tgt, vha, 0xe065,
+                   "qla_target(%d): Received unknown response pkt "
+                   "type %x\n", vha->vp_idx, pkt->entry_type);
+               break;
+       }
+
+       tgt->irq_cmd_count--;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
+       uint16_t *mailbox)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       int reason_code;
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe039,
+           "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n",
+           vha->host_no, atomic_read(&vha->loop_state), vha->flags.init_done,
+           ha->operating_mode, ha->current_topology);
+
+       if (!ha->tgt.tgt_ops)
+               return;
+
+       if (unlikely(tgt == NULL)) {
+               ql_dbg(ql_dbg_tgt, vha, 0xe03a,
+                   "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha);
+               return;
+       }
+
+       if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
+           IS_QLA2100(ha))
+               return;
+       /*
+        * In tgt_stop mode we also should allow all requests to pass.
+        * Otherwise, some commands can stuck.
+        */
+
+       tgt->irq_cmd_count++;
+
+       switch (code) {
+       case MBA_RESET:                 /* Reset */
+       case MBA_SYSTEM_ERR:            /* System Error */
+       case MBA_REQ_TRANSFER_ERR:      /* Request Transfer Error */
+       case MBA_RSP_TRANSFER_ERR:      /* Response Transfer Error */
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
+                   "qla_target(%d): System error async event %#x "
+                   "occured", vha->vp_idx, code);
+               break;
+       case MBA_WAKEUP_THRES:          /* Request Queue Wake-up. */
+               set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+               break;
+
+       case MBA_LOOP_UP:
+       {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
+                   "qla_target(%d): Async LOOP_UP occured "
+                   "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx,
+                   le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
+                   le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
+               if (tgt->link_reinit_iocb_pending) {
+                       qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
+                           0, 0, 0, 0, 0, 0);
+                       tgt->link_reinit_iocb_pending = 0;
+               }
+               break;
+       }
+
+       case MBA_LIP_OCCURRED:
+       case MBA_LOOP_DOWN:
+       case MBA_LIP_RESET:
+       case MBA_RSCN_UPDATE:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
+                   "qla_target(%d): Async event %#x occured "
+                   "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx, code,
+                   le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
+                   le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
+               break;
+
+       case MBA_PORT_UPDATE:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
+                   "qla_target(%d): Port update async event %#x "
+                   "occured: updating the ports database (m[1]=%x, m[2]=%x, "
+                   "m[3]=%x, m[4]=%x)", vha->vp_idx, code,
+                   le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
+                   le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
+               reason_code = le16_to_cpu(mailbox[2]);
+               if (reason_code == 0x4)
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
+                           "Async MB 2: Got PLOGI Complete\n");
+               else if (reason_code == 0x7)
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
+                           "Async MB 2: Port Logged Out\n");
+               break;
+
+       default:
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040,
+                   "qla_target(%d): Async event %#x occured: "
+                   "ignore (m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx,
+                   code, le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
+                   le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
+               break;
+       }
+
+       tgt->irq_cmd_count--;
+}
+
+static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
+       uint16_t loop_id)
+{
+       fc_port_t *fcport;
+       int rc;
+
+       fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
+       if (!fcport) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
+                   "qla_target(%d): Allocation of tmp FC port failed",
+                   vha->vp_idx);
+               return NULL;
+       }
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf041, "loop_id %d", loop_id);
+
+       fcport->loop_id = loop_id;
+
+       rc = qla2x00_get_port_database(vha, fcport, 0);
+       if (rc != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
+                   "qla_target(%d): Failed to retrieve fcport "
+                   "information -- get_port_database() returned %x "
+                   "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
+               kfree(fcport);
+               return NULL;
+       }
+
+       return fcport;
+}
+
+/* Must be called under tgt_mutex */
+static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
+       uint8_t *s_id)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt_sess *sess = NULL;
+       fc_port_t *fcport = NULL;
+       int rc, global_resets;
+       uint16_t loop_id = 0;
+
+retry:
+       global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
+
+       rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
+       if (rc != 0) {
+               if ((s_id[0] == 0xFF) &&
+                   (s_id[1] == 0xFC)) {
+                       /*
+                        * This is Domain Controller, so it should be
+                        * OK to drop SCSI commands from it.
+                        */
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
+                           "Unable to find initiator with S_ID %x:%x:%x",
+                           s_id[0], s_id[1], s_id[2]);
+               } else
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071,
+                           "qla_target(%d): Unable to find "
+                           "initiator with S_ID %x:%x:%x",
+                           vha->vp_idx, s_id[0], s_id[1],
+                           s_id[2]);
+               return NULL;
+       }
+
+       fcport = qlt_get_port_database(vha, loop_id);
+       if (!fcport)
+               return NULL;
+
+       if (global_resets !=
+           atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
+                   "qla_target(%d): global reset during session discovery "
+                   "(counter was %d, new %d), retrying", vha->vp_idx,
+                   global_resets,
+                   atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
+               goto retry;
+       }
+
+       sess = qlt_create_sess(vha, fcport, true);
+
+       kfree(fcport);
+       return sess;
+}
+
+static void qlt_abort_work(struct qla_tgt *tgt,
+       struct qla_tgt_sess_work_param *prm)
+{
+       struct scsi_qla_host *vha = tgt->vha;
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt_sess *sess = NULL;
+       unsigned long flags;
+       uint32_t be_s_id;
+       uint8_t s_id[3];
+       int rc;
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+
+       if (tgt->tgt_stop)
+               goto out_term;
+
+       s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
+       s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
+       s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
+
+       sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
+           (unsigned char *)&be_s_id);
+       if (!sess) {
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+               mutex_lock(&ha->tgt.tgt_mutex);
+               sess = qlt_make_local_sess(vha, s_id);
+               /* sess has got an extra creation ref */
+               mutex_unlock(&ha->tgt.tgt_mutex);
+
+               spin_lock_irqsave(&ha->hardware_lock, flags);
+               if (!sess)
+                       goto out_term;
+       } else {
+               kref_get(&sess->se_sess->sess_kref);
+       }
+
+       if (tgt->tgt_stop)
+               goto out_term;
+
+       rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
+       if (rc != 0)
+               goto out_term;
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       ha->tgt.tgt_ops->put_sess(sess);
+       return;
+
+out_term:
+       qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+       if (sess)
+               ha->tgt.tgt_ops->put_sess(sess);
+}
+
+static void qlt_tmr_work(struct qla_tgt *tgt,
+       struct qla_tgt_sess_work_param *prm)
+{
+       struct atio_from_isp *a = &prm->tm_iocb2;
+       struct scsi_qla_host *vha = tgt->vha;
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt_sess *sess = NULL;
+       unsigned long flags;
+       uint8_t *s_id = NULL; /* to hide compiler warnings */
+       int rc;
+       uint32_t lun, unpacked_lun;
+       int lun_size, fn;
+       void *iocb;
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+
+       if (tgt->tgt_stop)
+               goto out_term;
+
+       s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
+       sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
+       if (!sess) {
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+               mutex_lock(&ha->tgt.tgt_mutex);
+               sess = qlt_make_local_sess(vha, s_id);
+               /* sess has got an extra creation ref */
+               mutex_unlock(&ha->tgt.tgt_mutex);
+
+               spin_lock_irqsave(&ha->hardware_lock, flags);
+               if (!sess)
+                       goto out_term;
+       } else {
+               kref_get(&sess->se_sess->sess_kref);
+       }
+
+       iocb = a;
+       lun = a->u.isp24.fcp_cmnd.lun;
+       lun_size = sizeof(lun);
+       fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
+       unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+       rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
+       if (rc != 0)
+               goto out_term;
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       ha->tgt.tgt_ops->put_sess(sess);
+       return;
+
+out_term:
+       qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+       if (sess)
+               ha->tgt.tgt_ops->put_sess(sess);
+}
+
+static void qlt_sess_work_fn(struct work_struct *work)
+{
+       struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
+       struct scsi_qla_host *vha = tgt->vha;
+       unsigned long flags;
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
+
+       spin_lock_irqsave(&tgt->sess_work_lock, flags);
+       while (!list_empty(&tgt->sess_works_list)) {
+               struct qla_tgt_sess_work_param *prm = list_entry(
+                   tgt->sess_works_list.next, typeof(*prm),
+                   sess_works_list_entry);
+
+               /*
+                * This work can be scheduled on several CPUs at time, so we
+                * must delete the entry to eliminate double processing
+                */
+               list_del(&prm->sess_works_list_entry);
+
+               spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+
+               switch (prm->type) {
+               case QLA_TGT_SESS_WORK_ABORT:
+                       qlt_abort_work(tgt, prm);
+                       break;
+               case QLA_TGT_SESS_WORK_TM:
+                       qlt_tmr_work(tgt, prm);
+                       break;
+               default:
+                       BUG_ON(1);
+                       break;
+               }
+
+               spin_lock_irqsave(&tgt->sess_work_lock, flags);
+
+               kfree(prm);
+       }
+       spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+}
+
+/* Must be called under tgt_host_action_mutex */
+int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
+{
+       struct qla_tgt *tgt;
+
+       if (!QLA_TGT_MODE_ENABLED())
+               return 0;
+
+       ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
+           "Registering target for host %ld(%p)", base_vha->host_no, ha);
+
+       BUG_ON((ha->tgt.qla_tgt != NULL) || (ha->tgt.tgt_ops != NULL));
+
+       tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
+       if (!tgt) {
+               ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
+                   "Unable to allocate struct qla_tgt\n");
+               return -ENOMEM;
+       }
+
+       if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
+               base_vha->host->hostt->supported_mode |= MODE_TARGET;
+
+       tgt->ha = ha;
+       tgt->vha = base_vha;
+       init_waitqueue_head(&tgt->waitQ);
+       INIT_LIST_HEAD(&tgt->sess_list);
+       INIT_LIST_HEAD(&tgt->del_sess_list);
+       INIT_DELAYED_WORK(&tgt->sess_del_work,
+               (void (*)(struct work_struct *))qlt_del_sess_work_fn);
+       spin_lock_init(&tgt->sess_work_lock);
+       INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
+       INIT_LIST_HEAD(&tgt->sess_works_list);
+       spin_lock_init(&tgt->srr_lock);
+       INIT_LIST_HEAD(&tgt->srr_ctio_list);
+       INIT_LIST_HEAD(&tgt->srr_imm_list);
+       INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
+       atomic_set(&tgt->tgt_global_resets_count, 0);
+
+       ha->tgt.qla_tgt = tgt;
+
+       ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
+               "qla_target(%d): using 64 Bit PCI addressing",
+               base_vha->vp_idx);
+       tgt->tgt_enable_64bit_addr = 1;
+       /* 3 is reserved */
+       tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
+       tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
+       tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
+
+       mutex_lock(&qla_tgt_mutex);
+       list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
+       mutex_unlock(&qla_tgt_mutex);
+
+       return 0;
+}
+
+/* Must be called under tgt_host_action_mutex */
+int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
+{
+       if (!ha->tgt.qla_tgt)
+               return 0;
+
+       mutex_lock(&qla_tgt_mutex);
+       list_del(&ha->tgt.qla_tgt->tgt_list_entry);
+       mutex_unlock(&qla_tgt_mutex);
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
+           vha->host_no, ha);
+       qlt_release(ha->tgt.qla_tgt);
+
+       return 0;
+}
+
+static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
+       unsigned char *b)
+{
+       int i;
+
+       pr_debug("qla2xxx HW vha->node_name: ");
+       for (i = 0; i < WWN_SIZE; i++)
+               pr_debug("%02x ", vha->node_name[i]);
+       pr_debug("\n");
+       pr_debug("qla2xxx HW vha->port_name: ");
+       for (i = 0; i < WWN_SIZE; i++)
+               pr_debug("%02x ", vha->port_name[i]);
+       pr_debug("\n");
+
+       pr_debug("qla2xxx passed configfs WWPN: ");
+       put_unaligned_be64(wwpn, b);
+       for (i = 0; i < WWN_SIZE; i++)
+               pr_debug("%02x ", b[i]);
+       pr_debug("\n");
+}
+
+/**
+ * qla_tgt_lport_register - register lport with external module
+ *
+ * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
+ * @wwpn: Passwd FC target WWPN
+ * @callback:  lport initialization callback for tcm_qla2xxx code
+ * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
+ */
+int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
+       int (*callback)(struct scsi_qla_host *), void *target_lport_ptr)
+{
+       struct qla_tgt *tgt;
+       struct scsi_qla_host *vha;
+       struct qla_hw_data *ha;
+       struct Scsi_Host *host;
+       unsigned long flags;
+       int rc;
+       u8 b[WWN_SIZE];
+
+       mutex_lock(&qla_tgt_mutex);
+       list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
+               vha = tgt->vha;
+               ha = vha->hw;
+
+               host = vha->host;
+               if (!host)
+                       continue;
+
+               if (ha->tgt.tgt_ops != NULL)
+                       continue;
+
+               if (!(host->hostt->supported_mode & MODE_TARGET))
+                       continue;
+
+               spin_lock_irqsave(&ha->hardware_lock, flags);
+               if (host->active_mode & MODE_TARGET) {
+                       pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
+                           host->host_no);
+                       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+                       continue;
+               }
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+               if (!scsi_host_get(host)) {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe068,
+                           "Unable to scsi_host_get() for"
+                           " qla2xxx scsi_host\n");
+                       continue;
+               }
+               qlt_lport_dump(vha, wwpn, b);
+
+               if (memcmp(vha->port_name, b, WWN_SIZE)) {
+                       scsi_host_put(host);
+                       continue;
+               }
+               /*
+                * Setup passed parameters ahead of invoking callback
+                */
+               ha->tgt.tgt_ops = qla_tgt_ops;
+               ha->tgt.target_lport_ptr = target_lport_ptr;
+               rc = (*callback)(vha);
+               if (rc != 0) {
+                       ha->tgt.tgt_ops = NULL;
+                       ha->tgt.target_lport_ptr = NULL;
+               }
+               mutex_unlock(&qla_tgt_mutex);
+               return rc;
+       }
+       mutex_unlock(&qla_tgt_mutex);
+
+       return -ENODEV;
+}
+EXPORT_SYMBOL(qlt_lport_register);
+
+/**
+ * qla_tgt_lport_deregister - Degister lport
+ *
+ * @vha:  Registered scsi_qla_host pointer
+ */
+void qlt_lport_deregister(struct scsi_qla_host *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct Scsi_Host *sh = vha->host;
+       /*
+        * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
+        */
+       ha->tgt.target_lport_ptr = NULL;
+       ha->tgt.tgt_ops = NULL;
+       /*
+        * Release the Scsi_Host reference for the underlying qla2xxx host
+        */
+       scsi_host_put(sh);
+}
+EXPORT_SYMBOL(qlt_lport_deregister);
+
+/* Must be called under HW lock */
+void qlt_set_mode(struct scsi_qla_host *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       switch (ql2x_ini_mode) {
+       case QLA2XXX_INI_MODE_DISABLED:
+       case QLA2XXX_INI_MODE_EXCLUSIVE:
+               vha->host->active_mode = MODE_TARGET;
+               break;
+       case QLA2XXX_INI_MODE_ENABLED:
+               vha->host->active_mode |= MODE_TARGET;
+               break;
+       default:
+               break;
+       }
+
+       if (ha->tgt.ini_mode_force_reverse)
+               qla_reverse_ini_mode(vha);
+}
+
+/* Must be called under HW lock */
+void qlt_clear_mode(struct scsi_qla_host *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       switch (ql2x_ini_mode) {
+       case QLA2XXX_INI_MODE_DISABLED:
+               vha->host->active_mode = MODE_UNKNOWN;
+               break;
+       case QLA2XXX_INI_MODE_EXCLUSIVE:
+               vha->host->active_mode = MODE_INITIATOR;
+               break;
+       case QLA2XXX_INI_MODE_ENABLED:
+               vha->host->active_mode &= ~MODE_TARGET;
+               break;
+       default:
+               break;
+       }
+
+       if (ha->tgt.ini_mode_force_reverse)
+               qla_reverse_ini_mode(vha);
+}
+
+/*
+ * qla_tgt_enable_vha - NO LOCK HELD
+ *
+ * host_reset, bring up w/ Target Mode Enabled
+ */
+void
+qlt_enable_vha(struct scsi_qla_host *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       unsigned long flags;
+
+       if (!tgt) {
+               ql_dbg(ql_dbg_tgt, vha, 0xe069,
+                   "Unable to locate qla_tgt pointer from"
+                   " struct qla_hw_data\n");
+               dump_stack();
+               return;
+       }
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       tgt->tgt_stopped = 0;
+       qlt_set_mode(vha);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+       qla2xxx_wake_dpc(vha);
+       qla2x00_wait_for_hba_online(vha);
+}
+EXPORT_SYMBOL(qlt_enable_vha);
+
+/*
+ * qla_tgt_disable_vha - NO LOCK HELD
+ *
+ * Disable Target Mode and reset the adapter
+ */
+void
+qlt_disable_vha(struct scsi_qla_host *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       unsigned long flags;
+
+       if (!tgt) {
+               ql_dbg(ql_dbg_tgt, vha, 0xe06a,
+                   "Unable to locate qla_tgt pointer from"
+                   " struct qla_hw_data\n");
+               dump_stack();
+               return;
+       }
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       qlt_clear_mode(vha);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+       qla2xxx_wake_dpc(vha);
+       qla2x00_wait_for_hba_online(vha);
+}
+
+/*
+ * Called from qla_init.c:qla24xx_vport_create() contex to setup
+ * the target mode specific struct scsi_qla_host and struct qla_hw_data
+ * members.
+ */
+void
+qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
+{
+       if (!qla_tgt_mode_enabled(vha))
+               return;
+
+       mutex_init(&ha->tgt.tgt_mutex);
+       mutex_init(&ha->tgt.tgt_host_action_mutex);
+
+       qlt_clear_mode(vha);
+
+       /*
+        * NOTE: Currently the value is kept the same for <24xx and
+        * >=24xx ISPs. If it is necessary to change it,
+        * the check should be added for specific ISPs,
+        * assigning the value appropriately.
+        */
+       ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
+}
+
+void
+qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
+{
+       /*
+        * FC-4 Feature bit 0 indicates target functionality to the name server.
+        */
+       if (qla_tgt_mode_enabled(vha)) {
+               if (qla_ini_mode_enabled(vha))
+                       ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
+               else
+                       ct_req->req.rff_id.fc4_feature = BIT_0;
+       } else if (qla_ini_mode_enabled(vha)) {
+               ct_req->req.rff_id.fc4_feature = BIT_1;
+       }
+}
+
+/*
+ * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
+ * @ha: HA context
+ *
+ * Beginning of ATIO ring has initialization control block already built
+ * by nvram config routine.
+ *
+ * Returns 0 on success.
+ */
+void
+qlt_init_atio_q_entries(struct scsi_qla_host *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+       uint16_t cnt;
+       struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
+
+       if (!qla_tgt_mode_enabled(vha))
+               return;
+
+       for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
+               pkt->u.raw.signature = ATIO_PROCESSED;
+               pkt++;
+       }
+
+}
+
+/*
+ * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
+ * @ha: SCSI driver HA context
+ */
+void
+qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+       struct atio_from_isp *pkt;
+       int cnt, i;
+
+       if (!vha->flags.online)
+               return;
+
+       while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) {
+               pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
+               cnt = pkt->u.raw.entry_count;
+
+               qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt);
+
+               for (i = 0; i < cnt; i++) {
+                       ha->tgt.atio_ring_index++;
+                       if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
+                               ha->tgt.atio_ring_index = 0;
+                               ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
+                       } else
+                               ha->tgt.atio_ring_ptr++;
+
+                       pkt->u.raw.signature = ATIO_PROCESSED;
+                       pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
+               }
+               wmb();
+       }
+
+       /* Adjust ring index */
+       WRT_REG_DWORD(&reg->atio_q_out, ha->tgt.atio_ring_index);
+}
+
+void
+qlt_24xx_config_rings(struct scsi_qla_host *vha, device_reg_t __iomem *reg)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+/* FIXME: atio_q in/out for ha->mqenable=1..? */
+       if (ha->mqenable) {
+#if 0
+               WRT_REG_DWORD(&reg->isp25mq.atio_q_in, 0);
+               WRT_REG_DWORD(&reg->isp25mq.atio_q_out, 0);
+               RD_REG_DWORD(&reg->isp25mq.atio_q_out);
+#endif
+       } else {
+               /* Setup APTIO registers for target mode */
+               WRT_REG_DWORD(&reg->isp24.atio_q_in, 0);
+               WRT_REG_DWORD(&reg->isp24.atio_q_out, 0);
+               RD_REG_DWORD(&reg->isp24.atio_q_out);
+       }
+}
+
+void
+qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       if (qla_tgt_mode_enabled(vha)) {
+               if (!ha->tgt.saved_set) {
+                       /* We save only once */
+                       ha->tgt.saved_exchange_count = nv->exchange_count;
+                       ha->tgt.saved_firmware_options_1 =
+                           nv->firmware_options_1;
+                       ha->tgt.saved_firmware_options_2 =
+                           nv->firmware_options_2;
+                       ha->tgt.saved_firmware_options_3 =
+                           nv->firmware_options_3;
+                       ha->tgt.saved_set = 1;
+               }
+
+               nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
+
+               /* Enable target mode */
+               nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
+
+               /* Disable ini mode, if requested */
+               if (!qla_ini_mode_enabled(vha))
+                       nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5);
+
+               /* Disable Full Login after LIP */
+               nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
+               /* Enable initial LIP */
+               nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
+               /* Enable FC tapes support */
+               nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
+               /* Disable Full Login after LIP */
+               nv->host_p &= __constant_cpu_to_le32(~BIT_10);
+               /* Enable target PRLI control */
+               nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
+       } else {
+               if (ha->tgt.saved_set) {
+                       nv->exchange_count = ha->tgt.saved_exchange_count;
+                       nv->firmware_options_1 =
+                           ha->tgt.saved_firmware_options_1;
+                       nv->firmware_options_2 =
+                           ha->tgt.saved_firmware_options_2;
+                       nv->firmware_options_3 =
+                           ha->tgt.saved_firmware_options_3;
+               }
+               return;
+       }
+
+       /* out-of-order frames reassembly */
+       nv->firmware_options_3 |= BIT_6|BIT_9;
+
+       if (ha->tgt.enable_class_2) {
+               if (vha->flags.init_done)
+                       fc_host_supported_classes(vha->host) =
+                               FC_COS_CLASS2 | FC_COS_CLASS3;
+
+               nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
+       } else {
+               if (vha->flags.init_done)
+                       fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
+
+               nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
+       }
+}
+
+void
+qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
+       struct init_cb_24xx *icb)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       if (ha->tgt.node_name_set) {
+               memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
+               icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
+       }
+}
+
+int
+qlt_24xx_process_response_error(struct scsi_qla_host *vha,
+       struct sts_entry_24xx *pkt)
+{
+       switch (pkt->entry_type) {
+       case ABTS_RECV_24XX:
+       case ABTS_RESP_24XX:
+       case CTIO_TYPE7:
+       case NOTIFY_ACK_TYPE:
+               return 1;
+       default:
+               return 0;
+       }
+}
+
+void
+qlt_modify_vp_config(struct scsi_qla_host *vha,
+       struct vp_config_entry_24xx *vpmod)
+{
+       if (qla_tgt_mode_enabled(vha))
+               vpmod->options_idx1 &= ~BIT_5;
+       /* Disable ini mode, if requested */
+       if (!qla_ini_mode_enabled(vha))
+               vpmod->options_idx1 &= ~BIT_4;
+}
+
+void
+qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
+{
+       if (!QLA_TGT_MODE_ENABLED())
+               return;
+
+       mutex_init(&ha->tgt.tgt_mutex);
+       mutex_init(&ha->tgt.tgt_host_action_mutex);
+       qlt_clear_mode(base_vha);
+}
+
+int
+qlt_mem_alloc(struct qla_hw_data *ha)
+{
+       if (!QLA_TGT_MODE_ENABLED())
+               return 0;
+
+       ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) *
+           MAX_MULTI_ID_FABRIC, GFP_KERNEL);
+       if (!ha->tgt.tgt_vp_map)
+               return -ENOMEM;
+
+       ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
+           (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
+           &ha->tgt.atio_dma, GFP_KERNEL);
+       if (!ha->tgt.atio_ring) {
+               kfree(ha->tgt.tgt_vp_map);
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+void
+qlt_mem_free(struct qla_hw_data *ha)
+{
+       if (!QLA_TGT_MODE_ENABLED())
+               return;
+
+       if (ha->tgt.atio_ring) {
+               dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
+                   sizeof(struct atio_from_isp), ha->tgt.atio_ring,
+                   ha->tgt.atio_dma);
+       }
+       kfree(ha->tgt.tgt_vp_map);
+}
+
+/* vport_slock to be held by the caller */
+void
+qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
+{
+       if (!QLA_TGT_MODE_ENABLED())
+               return;
+
+       switch (cmd) {
+       case SET_VP_IDX:
+               vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
+               break;
+       case SET_AL_PA:
+               vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx;
+               break;
+       case RESET_VP_IDX:
+               vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
+               break;
+       case RESET_AL_PA:
+               vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0;
+               break;
+       }
+}
+
+static int __init qlt_parse_ini_mode(void)
+{
+       if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
+               ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
+       else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
+               ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
+       else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
+               ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
+       else
+               return false;
+
+       return true;
+}
+
+int __init qlt_init(void)
+{
+       int ret;
+
+       if (!qlt_parse_ini_mode()) {
+               ql_log(ql_log_fatal, NULL, 0xe06b,
+                   "qlt_parse_ini_mode() failed\n");
+               return -EINVAL;
+       }
+
+       if (!QLA_TGT_MODE_ENABLED())
+               return 0;
+
+       qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep",
+           sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0,
+           NULL);
+       if (!qla_tgt_cmd_cachep) {
+               ql_log(ql_log_fatal, NULL, 0xe06c,
+                   "kmem_cache_create for qla_tgt_cmd_cachep failed\n");
+               return -ENOMEM;
+       }
+
+       qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
+           sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
+           qla_tgt_mgmt_cmd), 0, NULL);
+       if (!qla_tgt_mgmt_cmd_cachep) {
+               ql_log(ql_log_fatal, NULL, 0xe06d,
+                   "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
+           mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
+       if (!qla_tgt_mgmt_cmd_mempool) {
+               ql_log(ql_log_fatal, NULL, 0xe06e,
+                   "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
+               ret = -ENOMEM;
+               goto out_mgmt_cmd_cachep;
+       }
+
+       qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
+       if (!qla_tgt_wq) {
+               ql_log(ql_log_fatal, NULL, 0xe06f,
+                   "alloc_workqueue for qla_tgt_wq failed\n");
+               ret = -ENOMEM;
+               goto out_cmd_mempool;
+       }
+       /*
+        * Return 1 to signal that initiator-mode is being disabled
+        */
+       return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
+
+out_cmd_mempool:
+       mempool_destroy(qla_tgt_mgmt_cmd_mempool);
+out_mgmt_cmd_cachep:
+       kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
+out:
+       kmem_cache_destroy(qla_tgt_cmd_cachep);
+       return ret;
+}
+
+void qlt_exit(void)
+{
+       if (!QLA_TGT_MODE_ENABLED())
+               return;
+
+       destroy_workqueue(qla_tgt_wq);
+       mempool_destroy(qla_tgt_mgmt_cmd_mempool);
+       kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
+       kmem_cache_destroy(qla_tgt_cmd_cachep);
+}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
new file mode 100644 (file)
index 0000000..9ec19bc
--- /dev/null
@@ -0,0 +1,1005 @@
+/*
+ *  Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
+ *  Copyright (C) 2004 - 2005 Leonid Stoljar
+ *  Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
+ *  Copyright (C) 2007 - 2010 ID7 Ltd.
+ *
+ *  Forward port and refactoring to modern qla2xxx and target/configfs
+ *
+ *  Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org>
+ *
+ *  Additional file for the target driver support.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version 2
+ *  of the License, or (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *  GNU General Public License for more details.
+ */
+/*
+ * This is the global def file that is useful for including from the
+ * target portion.
+ */
+
+#ifndef __QLA_TARGET_H
+#define __QLA_TARGET_H
+
+#include "qla_def.h"
+
+/*
+ * Must be changed on any change in any initiator visible interfaces or
+ * data in the target add-on
+ */
+#define QLA2XXX_TARGET_MAGIC   269
+
+/*
+ * Must be changed on any change in any target visible interfaces or
+ * data in the initiator
+ */
+#define QLA2XXX_INITIATOR_MAGIC   57222
+
+#define QLA2XXX_INI_MODE_STR_EXCLUSIVE "exclusive"
+#define QLA2XXX_INI_MODE_STR_DISABLED  "disabled"
+#define QLA2XXX_INI_MODE_STR_ENABLED   "enabled"
+
+#define QLA2XXX_INI_MODE_EXCLUSIVE     0
+#define QLA2XXX_INI_MODE_DISABLED      1
+#define QLA2XXX_INI_MODE_ENABLED       2
+
+#define QLA2XXX_COMMAND_COUNT_INIT     250
+#define QLA2XXX_IMMED_NOTIFY_COUNT_INIT 250
+
+/*
+ * Used to mark which completion handles (for RIO Status's) are for CTIO's
+ * vs. regular (non-target) info. This is checked for in
+ * qla2x00_process_response_queue() to see if a handle coming back in a
+ * multi-complete should come to the tgt driver or be handled there by qla2xxx
+ */
+#define CTIO_COMPLETION_HANDLE_MARK    BIT_29
+#if (CTIO_COMPLETION_HANDLE_MARK <= MAX_OUTSTANDING_COMMANDS)
+#error "CTIO_COMPLETION_HANDLE_MARK not larger than MAX_OUTSTANDING_COMMANDS"
+#endif
+#define HANDLE_IS_CTIO_COMP(h) (h & CTIO_COMPLETION_HANDLE_MARK)
+
+/* Used to mark CTIO as intermediate */
+#define CTIO_INTERMEDIATE_HANDLE_MARK  BIT_30
+
+#ifndef OF_SS_MODE_0
+/*
+ * ISP target entries - Flags bit definitions.
+ */
+#define OF_SS_MODE_0        0
+#define OF_SS_MODE_1        1
+#define OF_SS_MODE_2        2
+#define OF_SS_MODE_3        3
+
+#define OF_EXPL_CONF        BIT_5       /* Explicit Confirmation Requested */
+#define OF_DATA_IN          BIT_6       /* Data in to initiator */
+                                       /*  (data from target to initiator) */
+#define OF_DATA_OUT         BIT_7       /* Data out from initiator */
+                                       /*  (data from initiator to target) */
+#define OF_NO_DATA          (BIT_7 | BIT_6)
+#define OF_INC_RC           BIT_8       /* Increment command resource count */
+#define OF_FAST_POST        BIT_9       /* Enable mailbox fast posting. */
+#define OF_CONF_REQ         BIT_13      /* Confirmation Requested */
+#define OF_TERM_EXCH        BIT_14      /* Terminate exchange */
+#define OF_SSTS             BIT_15      /* Send SCSI status */
+#endif
+
+#ifndef QLA_TGT_DATASEGS_PER_CMD32
+#define QLA_TGT_DATASEGS_PER_CMD32     3
+#define QLA_TGT_DATASEGS_PER_CONT32    7
+#define QLA_TGT_MAX_SG32(ql) \
+       (((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD32 + \
+               QLA_TGT_DATASEGS_PER_CONT32*((ql) - 1)) : 0)
+
+#define QLA_TGT_DATASEGS_PER_CMD64     2
+#define QLA_TGT_DATASEGS_PER_CONT64    5
+#define QLA_TGT_MAX_SG64(ql) \
+       (((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD64 + \
+               QLA_TGT_DATASEGS_PER_CONT64*((ql) - 1)) : 0)
+#endif
+
+#ifndef QLA_TGT_DATASEGS_PER_CMD_24XX
+#define QLA_TGT_DATASEGS_PER_CMD_24XX  1
+#define QLA_TGT_DATASEGS_PER_CONT_24XX 5
+#define QLA_TGT_MAX_SG_24XX(ql) \
+       (min(1270, ((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD_24XX + \
+               QLA_TGT_DATASEGS_PER_CONT_24XX*((ql) - 1)) : 0))
+#endif
+#endif
+
+#define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha))                        \
+                        ? le16_to_cpu((iocb)->u.isp2x.target.extended) \
+                        : (uint16_t)(iocb)->u.isp2x.target.id.standard)
+
+#ifndef IMMED_NOTIFY_TYPE
+#define IMMED_NOTIFY_TYPE 0x0D         /* Immediate notify entry. */
+/*
+ * ISP queue - immediate notify entry structure definition.
+ *             This is sent by the ISP to the Target driver.
+ *             This IOCB would have report of events sent by the
+ *             initiator, that needs to be handled by the target
+ *             driver immediately.
+ */
+struct imm_ntfy_from_isp {
+       uint8_t  entry_type;                /* Entry type. */
+       uint8_t  entry_count;               /* Entry count. */
+       uint8_t  sys_define;                /* System defined. */
+       uint8_t  entry_status;              /* Entry Status. */
+       union {
+               struct {
+                       uint32_t sys_define_2; /* System defined. */
+                       target_id_t target;
+                       uint16_t lun;
+                       uint8_t  target_id;
+                       uint8_t  reserved_1;
+                       uint16_t status_modifier;
+                       uint16_t status;
+                       uint16_t task_flags;
+                       uint16_t seq_id;
+                       uint16_t srr_rx_id;
+                       uint32_t srr_rel_offs;
+                       uint16_t srr_ui;
+#define SRR_IU_DATA_IN 0x1
+#define SRR_IU_DATA_OUT        0x5
+#define SRR_IU_STATUS  0x7
+                       uint16_t srr_ox_id;
+                       uint8_t reserved_2[28];
+               } isp2x;
+               struct {
+                       uint32_t reserved;
+                       uint16_t nport_handle;
+                       uint16_t reserved_2;
+                       uint16_t flags;
+#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO   BIT_1
+#define NOTIFY24XX_FLAGS_PUREX_IOCB     BIT_0
+                       uint16_t srr_rx_id;
+                       uint16_t status;
+                       uint8_t  status_subcode;
+                       uint8_t  reserved_3;
+                       uint32_t exchange_address;
+                       uint32_t srr_rel_offs;
+                       uint16_t srr_ui;
+                       uint16_t srr_ox_id;
+                       uint8_t  reserved_4[19];
+                       uint8_t  vp_index;
+                       uint32_t reserved_5;
+                       uint8_t  port_id[3];
+                       uint8_t  reserved_6;
+               } isp24;
+       } u;
+       uint16_t reserved_7;
+       uint16_t ox_id;
+} __packed;
+#endif
+
+#ifndef NOTIFY_ACK_TYPE
+#define NOTIFY_ACK_TYPE 0x0E     /* Notify acknowledge entry. */
+/*
+ * ISP queue - notify acknowledge entry structure definition.
+ *             This is sent to the ISP from the target driver.
+ */
+struct nack_to_isp {
+       uint8_t  entry_type;                /* Entry type. */
+       uint8_t  entry_count;               /* Entry count. */
+       uint8_t  sys_define;                /* System defined. */
+       uint8_t  entry_status;              /* Entry Status. */
+       union {
+               struct {
+                       uint32_t sys_define_2; /* System defined. */
+                       target_id_t target;
+                       uint8_t  target_id;
+                       uint8_t  reserved_1;
+                       uint16_t flags;
+                       uint16_t resp_code;
+                       uint16_t status;
+                       uint16_t task_flags;
+                       uint16_t seq_id;
+                       uint16_t srr_rx_id;
+                       uint32_t srr_rel_offs;
+                       uint16_t srr_ui;
+                       uint16_t srr_flags;
+                       uint16_t srr_reject_code;
+                       uint8_t  srr_reject_vendor_uniq;
+                       uint8_t  srr_reject_code_expl;
+                       uint8_t  reserved_2[24];
+               } isp2x;
+               struct {
+                       uint32_t handle;
+                       uint16_t nport_handle;
+                       uint16_t reserved_1;
+                       uint16_t flags;
+                       uint16_t srr_rx_id;
+                       uint16_t status;
+                       uint8_t  status_subcode;
+                       uint8_t  reserved_3;
+                       uint32_t exchange_address;
+                       uint32_t srr_rel_offs;
+                       uint16_t srr_ui;
+                       uint16_t srr_flags;
+                       uint8_t  reserved_4[19];
+                       uint8_t  vp_index;
+                       uint8_t  srr_reject_vendor_uniq;
+                       uint8_t  srr_reject_code_expl;
+                       uint8_t  srr_reject_code;
+                       uint8_t  reserved_5[5];
+               } isp24;
+       } u;
+       uint8_t  reserved[2];
+       uint16_t ox_id;
+} __packed;
+#define NOTIFY_ACK_SRR_FLAGS_ACCEPT    0
+#define NOTIFY_ACK_SRR_FLAGS_REJECT    1
+
+#define NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM 0x9
+
+#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL               0
+#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_UNABLE_TO_SUPPLY_DATA 0x2a
+
+#define NOTIFY_ACK_SUCCESS      0x01
+#endif
+
+#ifndef ACCEPT_TGT_IO_TYPE
+#define ACCEPT_TGT_IO_TYPE 0x16 /* Accept target I/O entry. */
+#endif
+
+#ifndef CONTINUE_TGT_IO_TYPE
+#define CONTINUE_TGT_IO_TYPE 0x17
+/*
+ * ISP queue - Continue Target I/O (CTIO) entry for status mode 0 structure.
+ *             This structure is sent to the ISP 2xxx from target driver.
+ */
+struct ctio_to_2xxx {
+       uint8_t  entry_type;            /* Entry type. */
+       uint8_t  entry_count;           /* Entry count. */
+       uint8_t  sys_define;            /* System defined. */
+       uint8_t  entry_status;          /* Entry Status. */
+       uint32_t handle;                /* System defined handle */
+       target_id_t target;
+       uint16_t rx_id;
+       uint16_t flags;
+       uint16_t status;
+       uint16_t timeout;               /* 0 = 30 seconds, 0xFFFF = disable */
+       uint16_t dseg_count;            /* Data segment count. */
+       uint32_t relative_offset;
+       uint32_t residual;
+       uint16_t reserved_1[3];
+       uint16_t scsi_status;
+       uint32_t transfer_length;
+       uint32_t dseg_0_address;        /* Data segment 0 address. */
+       uint32_t dseg_0_length;         /* Data segment 0 length. */
+       uint32_t dseg_1_address;        /* Data segment 1 address. */
+       uint32_t dseg_1_length;         /* Data segment 1 length. */
+       uint32_t dseg_2_address;        /* Data segment 2 address. */
+       uint32_t dseg_2_length;         /* Data segment 2 length. */
+} __packed;
+#define ATIO_PATH_INVALID       0x07
+#define ATIO_CANT_PROV_CAP      0x16
+#define ATIO_CDB_VALID          0x3D
+
+#define ATIO_EXEC_READ          BIT_1
+#define ATIO_EXEC_WRITE         BIT_0
+#endif
+
+#ifndef CTIO_A64_TYPE
+#define CTIO_A64_TYPE 0x1F
+#define CTIO_SUCCESS                   0x01
+#define CTIO_ABORTED                   0x02
+#define CTIO_INVALID_RX_ID             0x08
+#define CTIO_TIMEOUT                   0x0B
+#define CTIO_LIP_RESET                 0x0E
+#define CTIO_TARGET_RESET              0x17
+#define CTIO_PORT_UNAVAILABLE          0x28
+#define CTIO_PORT_LOGGED_OUT           0x29
+#define CTIO_PORT_CONF_CHANGED         0x2A
+#define CTIO_SRR_RECEIVED              0x45
+#endif
+
+#ifndef CTIO_RET_TYPE
+#define CTIO_RET_TYPE  0x17            /* CTIO return entry */
+#define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */
+
+struct fcp_hdr {
+       uint8_t  r_ctl;
+       uint8_t  d_id[3];
+       uint8_t  cs_ctl;
+       uint8_t  s_id[3];
+       uint8_t  type;
+       uint8_t  f_ctl[3];
+       uint8_t  seq_id;
+       uint8_t  df_ctl;
+       uint16_t seq_cnt;
+       uint16_t ox_id;
+       uint16_t rx_id;
+       uint32_t parameter;
+} __packed;
+
+struct fcp_hdr_le {
+       uint8_t  d_id[3];
+       uint8_t  r_ctl;
+       uint8_t  s_id[3];
+       uint8_t  cs_ctl;
+       uint8_t  f_ctl[3];
+       uint8_t  type;
+       uint16_t seq_cnt;
+       uint8_t  df_ctl;
+       uint8_t  seq_id;
+       uint16_t rx_id;
+       uint16_t ox_id;
+       uint32_t parameter;
+} __packed;
+
+#define F_CTL_EXCH_CONTEXT_RESP        BIT_23
+#define F_CTL_SEQ_CONTEXT_RESIP        BIT_22
+#define F_CTL_LAST_SEQ         BIT_20
+#define F_CTL_END_SEQ          BIT_19
+#define F_CTL_SEQ_INITIATIVE   BIT_16
+
+#define R_CTL_BASIC_LINK_SERV  0x80
+#define R_CTL_B_ACC            0x4
+#define R_CTL_B_RJT            0x5
+
+struct atio7_fcp_cmnd {
+       uint64_t lun;
+       uint8_t  cmnd_ref;
+       uint8_t  task_attr:3;
+       uint8_t  reserved:5;
+       uint8_t  task_mgmt_flags;
+#define FCP_CMND_TASK_MGMT_CLEAR_ACA           6
+#define FCP_CMND_TASK_MGMT_TARGET_RESET                5
+#define FCP_CMND_TASK_MGMT_LU_RESET            4
+#define FCP_CMND_TASK_MGMT_CLEAR_TASK_SET      2
+#define FCP_CMND_TASK_MGMT_ABORT_TASK_SET      1
+       uint8_t  wrdata:1;
+       uint8_t  rddata:1;
+       uint8_t  add_cdb_len:6;
+       uint8_t  cdb[16];
+       /*
+        * add_cdb is optional and can absent from struct atio7_fcp_cmnd. Size 4
+        * only to make sizeof(struct atio7_fcp_cmnd) be as expected by
+        * BUILD_BUG_ON in qlt_init().
+        */
+       uint8_t  add_cdb[4];
+       /* uint32_t data_length; */
+} __packed;
+
+/*
+ * ISP queue - Accept Target I/O (ATIO) type entry IOCB structure.
+ *             This is sent from the ISP to the target driver.
+ */
+struct atio_from_isp {
+       union {
+               struct {
+                       uint16_t entry_hdr;
+                       uint8_t  sys_define;   /* System defined. */
+                       uint8_t  entry_status; /* Entry Status.   */
+                       uint32_t sys_define_2; /* System defined. */
+                       target_id_t target;
+                       uint16_t rx_id;
+                       uint16_t flags;
+                       uint16_t status;
+                       uint8_t  command_ref;
+                       uint8_t  task_codes;
+                       uint8_t  task_flags;
+                       uint8_t  execution_codes;
+                       uint8_t  cdb[MAX_CMDSZ];
+                       uint32_t data_length;
+                       uint16_t lun;
+                       uint8_t  initiator_port_name[WWN_SIZE]; /* on qla23xx */
+                       uint16_t reserved_32[6];
+                       uint16_t ox_id;
+               } isp2x;
+               struct {
+                       uint16_t entry_hdr;
+                       uint8_t  fcp_cmnd_len_low;
+                       uint8_t  fcp_cmnd_len_high:4;
+                       uint8_t  attr:4;
+                       uint32_t exchange_addr;
+#define ATIO_EXCHANGE_ADDRESS_UNKNOWN  0xFFFFFFFF
+                       struct fcp_hdr fcp_hdr;
+                       struct atio7_fcp_cmnd fcp_cmnd;
+               } isp24;
+               struct {
+                       uint8_t  entry_type;    /* Entry type. */
+                       uint8_t  entry_count;   /* Entry count. */
+                       uint8_t  data[58];
+                       uint32_t signature;
+#define ATIO_PROCESSED 0xDEADDEAD              /* Signature */
+               } raw;
+       } u;
+} __packed;
+
+#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
+
+/*
+ * ISP queue - Continue Target I/O (ATIO) type 7 entry (for 24xx) structure.
+ *             This structure is sent to the ISP 24xx from the target driver.
+ */
+
+struct ctio7_to_24xx {
+       uint8_t  entry_type;                /* Entry type. */
+       uint8_t  entry_count;               /* Entry count. */
+       uint8_t  sys_define;                /* System defined. */
+       uint8_t  entry_status;              /* Entry Status. */
+       uint32_t handle;                    /* System defined handle */
+       uint16_t nport_handle;
+#define CTIO7_NHANDLE_UNRECOGNIZED     0xFFFF
+       uint16_t timeout;
+       uint16_t dseg_count;                /* Data segment count. */
+       uint8_t  vp_index;
+       uint8_t  add_flags;
+       uint8_t  initiator_id[3];
+       uint8_t  reserved;
+       uint32_t exchange_addr;
+       union {
+               struct {
+                       uint16_t reserved1;
+                       uint16_t flags;
+                       uint32_t residual;
+                       uint16_t ox_id;
+                       uint16_t scsi_status;
+                       uint32_t relative_offset;
+                       uint32_t reserved2;
+                       uint32_t transfer_length;
+                       uint32_t reserved3;
+                       /* Data segment 0 address. */
+                       uint32_t dseg_0_address[2];
+                       /* Data segment 0 length. */
+                       uint32_t dseg_0_length;
+               } status0;
+               struct {
+                       uint16_t sense_length;
+                       uint16_t flags;
+                       uint32_t residual;
+                       uint16_t ox_id;
+                       uint16_t scsi_status;
+                       uint16_t response_len;
+                       uint16_t reserved;
+                       uint8_t sense_data[24];
+               } status1;
+       } u;
+} __packed;
+
+/*
+ * ISP queue - CTIO type 7 from ISP 24xx to target driver
+ * returned entry structure.
+ */
+struct ctio7_from_24xx {
+       uint8_t  entry_type;                /* Entry type. */
+       uint8_t  entry_count;               /* Entry count. */
+       uint8_t  sys_define;                /* System defined. */
+       uint8_t  entry_status;              /* Entry Status. */
+       uint32_t handle;                    /* System defined handle */
+       uint16_t status;
+       uint16_t timeout;
+       uint16_t dseg_count;                /* Data segment count. */
+       uint8_t  vp_index;
+       uint8_t  reserved1[5];
+       uint32_t exchange_address;
+       uint16_t reserved2;
+       uint16_t flags;
+       uint32_t residual;
+       uint16_t ox_id;
+       uint16_t reserved3;
+       uint32_t relative_offset;
+       uint8_t  reserved4[24];
+} __packed;
+
+/* CTIO7 flags values */
+#define CTIO7_FLAGS_SEND_STATUS                BIT_15
+#define CTIO7_FLAGS_TERMINATE          BIT_14
+#define CTIO7_FLAGS_CONFORM_REQ                BIT_13
+#define CTIO7_FLAGS_DONT_RET_CTIO      BIT_8
+#define CTIO7_FLAGS_STATUS_MODE_0      0
+#define CTIO7_FLAGS_STATUS_MODE_1      BIT_6
+#define CTIO7_FLAGS_EXPLICIT_CONFORM   BIT_5
+#define CTIO7_FLAGS_CONFIRM_SATISF     BIT_4
+#define CTIO7_FLAGS_DSD_PTR            BIT_2
+#define CTIO7_FLAGS_DATA_IN            BIT_1
+#define CTIO7_FLAGS_DATA_OUT           BIT_0
+
+#define ELS_PLOGI                      0x3
+#define ELS_FLOGI                      0x4
+#define ELS_LOGO                       0x5
+#define ELS_PRLI                       0x20
+#define ELS_PRLO                       0x21
+#define ELS_TPRLO                      0x24
+#define ELS_PDISC                      0x50
+#define ELS_ADISC                      0x52
+
+/*
+ * ISP queue - ABTS received/response entries structure definition for 24xx.
+ */
+#define ABTS_RECV_24XX         0x54 /* ABTS received (for 24xx) */
+#define ABTS_RESP_24XX         0x55 /* ABTS responce (for 24xx) */
+
+/*
+ * ISP queue - ABTS received IOCB entry structure definition for 24xx.
+ *             The ABTS BLS received from the wire is sent to the
+ *             target driver by the ISP 24xx.
+ *             The IOCB is placed on the response queue.
+ */
+struct abts_recv_from_24xx {
+       uint8_t  entry_type;                /* Entry type. */
+       uint8_t  entry_count;               /* Entry count. */
+       uint8_t  sys_define;                /* System defined. */
+       uint8_t  entry_status;              /* Entry Status. */
+       uint8_t  reserved_1[6];
+       uint16_t nport_handle;
+       uint8_t  reserved_2[2];
+       uint8_t  vp_index;
+       uint8_t  reserved_3:4;
+       uint8_t  sof_type:4;
+       uint32_t exchange_address;
+       struct fcp_hdr_le fcp_hdr_le;
+       uint8_t  reserved_4[16];
+       uint32_t exchange_addr_to_abort;
+} __packed;
+
+#define ABTS_PARAM_ABORT_SEQ           BIT_0
+
+struct ba_acc_le {
+       uint16_t reserved;
+       uint8_t  seq_id_last;
+       uint8_t  seq_id_valid;
+#define SEQ_ID_VALID   0x80
+#define SEQ_ID_INVALID 0x00
+       uint16_t rx_id;
+       uint16_t ox_id;
+       uint16_t high_seq_cnt;
+       uint16_t low_seq_cnt;
+} __packed;
+
+struct ba_rjt_le {
+       uint8_t vendor_uniq;
+       uint8_t reason_expl;
+       uint8_t reason_code;
+#define BA_RJT_REASON_CODE_INVALID_COMMAND     0x1
+#define BA_RJT_REASON_CODE_UNABLE_TO_PERFORM   0x9
+       uint8_t reserved;
+} __packed;
+
+/*
+ * ISP queue - ABTS Response IOCB entry structure definition for 24xx.
+ *             The ABTS response to the ABTS received is sent by the
+ *             target driver to the ISP 24xx.
+ *             The IOCB is placed on the request queue.
+ */
+struct abts_resp_to_24xx {
+       uint8_t  entry_type;                /* Entry type. */
+       uint8_t  entry_count;               /* Entry count. */
+       uint8_t  sys_define;                /* System defined. */
+       uint8_t  entry_status;              /* Entry Status. */
+       uint32_t handle;
+       uint16_t reserved_1;
+       uint16_t nport_handle;
+       uint16_t control_flags;
+#define ABTS_CONTR_FLG_TERM_EXCHG      BIT_0
+       uint8_t  vp_index;
+       uint8_t  reserved_3:4;
+       uint8_t  sof_type:4;
+       uint32_t exchange_address;
+       struct fcp_hdr_le fcp_hdr_le;
+       union {
+               struct ba_acc_le ba_acct;
+               struct ba_rjt_le ba_rjt;
+       } __packed payload;
+       uint32_t reserved_4;
+       uint32_t exchange_addr_to_abort;
+} __packed;
+
+/*
+ * ISP queue - ABTS Response IOCB from ISP24xx Firmware entry structure.
+ *             The ABTS response with completion status to the ABTS response
+ *             (sent by the target driver to the ISP 24xx) is sent by the
+ *             ISP24xx firmware to the target driver.
+ *             The IOCB is placed on the response queue.
+ */
+struct abts_resp_from_24xx_fw {
+       uint8_t  entry_type;                /* Entry type. */
+       uint8_t  entry_count;               /* Entry count. */
+       uint8_t  sys_define;                /* System defined. */
+       uint8_t  entry_status;              /* Entry Status. */
+       uint32_t handle;
+       uint16_t compl_status;
+#define ABTS_RESP_COMPL_SUCCESS                0
+#define ABTS_RESP_COMPL_SUBCODE_ERROR  0x31
+       uint16_t nport_handle;
+       uint16_t reserved_1;
+       uint8_t  reserved_2;
+       uint8_t  reserved_3:4;
+       uint8_t  sof_type:4;
+       uint32_t exchange_address;
+       struct fcp_hdr_le fcp_hdr_le;
+       uint8_t reserved_4[8];
+       uint32_t error_subcode1;
+#define ABTS_RESP_SUBCODE_ERR_ABORTED_EXCH_NOT_TERM    0x1E
+       uint32_t error_subcode2;
+       uint32_t exchange_addr_to_abort;
+} __packed;
+
+/********************************************************************\
+ * Type Definitions used by initiator & target halves
+\********************************************************************/
+
+struct qla_tgt_mgmt_cmd;
+struct qla_tgt_sess;
+
+/*
+ * This structure provides a template of function calls that the
+ * target driver (from within qla_target.c) can issue to the
+ * target module (tcm_qla2xxx).
+ */
+struct qla_tgt_func_tmpl {
+
+       int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
+                       unsigned char *, uint32_t, int, int, int);
+       int (*handle_data)(struct qla_tgt_cmd *);
+       int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t,
+                       uint32_t);
+       void (*free_cmd)(struct qla_tgt_cmd *);
+       void (*free_mcmd)(struct qla_tgt_mgmt_cmd *);
+       void (*free_session)(struct qla_tgt_sess *);
+
+       int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *,
+                                       void *, uint8_t *, uint16_t);
+       struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *,
+                                               const uint16_t);
+       struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *,
+                                               const uint8_t *);
+       void (*clear_nacl_from_fcport_map)(struct qla_tgt_sess *);
+       void (*put_sess)(struct qla_tgt_sess *);
+       void (*shutdown_sess)(struct qla_tgt_sess *);
+};
+
+int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
+
+#include <target/target_core_base.h>
+
+#define QLA_TGT_TIMEOUT                        10      /* in seconds */
+
+#define QLA_TGT_MAX_HW_PENDING_TIME    60 /* in seconds */
+
+/* Immediate notify status constants */
+#define IMM_NTFY_LIP_RESET          0x000E
+#define IMM_NTFY_LIP_LINK_REINIT    0x000F
+#define IMM_NTFY_IOCB_OVERFLOW      0x0016
+#define IMM_NTFY_ABORT_TASK         0x0020
+#define IMM_NTFY_PORT_LOGOUT        0x0029
+#define IMM_NTFY_PORT_CONFIG        0x002A
+#define IMM_NTFY_GLBL_TPRLO         0x002D
+#define IMM_NTFY_GLBL_LOGO          0x002E
+#define IMM_NTFY_RESOURCE           0x0034
+#define IMM_NTFY_MSG_RX             0x0036
+#define IMM_NTFY_SRR                0x0045
+#define IMM_NTFY_ELS                0x0046
+
+/* Immediate notify task flags */
+#define IMM_NTFY_TASK_MGMT_SHIFT    8
+
+#define QLA_TGT_CLEAR_ACA               0x40
+#define QLA_TGT_TARGET_RESET            0x20
+#define QLA_TGT_LUN_RESET               0x10
+#define QLA_TGT_CLEAR_TS                0x04
+#define QLA_TGT_ABORT_TS                0x02
+#define QLA_TGT_ABORT_ALL_SESS          0xFFFF
+#define QLA_TGT_ABORT_ALL               0xFFFE
+#define QLA_TGT_NEXUS_LOSS_SESS         0xFFFD
+#define QLA_TGT_NEXUS_LOSS              0xFFFC
+
+/* Notify Acknowledge flags */
+#define NOTIFY_ACK_RES_COUNT        BIT_8
+#define NOTIFY_ACK_CLEAR_LIP_RESET  BIT_5
+#define NOTIFY_ACK_TM_RESP_CODE_VALID BIT_4
+
+/* Command's states */
+#define QLA_TGT_STATE_NEW              0 /* New command + target processing */
+#define QLA_TGT_STATE_NEED_DATA                1 /* target needs data to continue */
+#define QLA_TGT_STATE_DATA_IN          2 /* Data arrived + target processing */
+#define QLA_TGT_STATE_PROCESSED                3 /* target done processing */
+#define QLA_TGT_STATE_ABORTED          4 /* Command aborted */
+
+/* Special handles */
+#define QLA_TGT_NULL_HANDLE    0
+#define QLA_TGT_SKIP_HANDLE    (0xFFFFFFFF & ~CTIO_COMPLETION_HANDLE_MARK)
+
+/* ATIO task_codes field */
+#define ATIO_SIMPLE_QUEUE           0
+#define ATIO_HEAD_OF_QUEUE          1
+#define ATIO_ORDERED_QUEUE          2
+#define ATIO_ACA_QUEUE              4
+#define ATIO_UNTAGGED               5
+
+/* TM failed response codes, see FCP (9.4.11 FCP_RSP_INFO) */
+#define        FC_TM_SUCCESS               0
+#define        FC_TM_BAD_FCP_DATA          1
+#define        FC_TM_BAD_CMD               2
+#define        FC_TM_FCP_DATA_MISMATCH     3
+#define        FC_TM_REJECT                4
+#define FC_TM_FAILED                5
+
+/*
+ * Error code of qlt_pre_xmit_response() meaning that cmd's exchange was
+ * terminated, so no more actions is needed and success should be returned
+ * to target.
+ */
+#define QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED      0x1717
+
+#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
+#define pci_dma_lo32(a) (a & 0xffffffff)
+#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff)
+#else
+#define pci_dma_lo32(a) (a & 0xffffffff)
+#define pci_dma_hi32(a) 0
+#endif
+
+#define QLA_TGT_SENSE_VALID(sense)  ((sense != NULL) && \
+                               (((const uint8_t *)(sense))[0] & 0x70) == 0x70)
+
+struct qla_port_24xx_data {
+       uint8_t port_name[WWN_SIZE];
+       uint16_t loop_id;
+       uint16_t reserved;
+};
+
+struct qla_tgt {
+       struct scsi_qla_host *vha;
+       struct qla_hw_data *ha;
+
+       /*
+        * To sync between IRQ handlers and qlt_target_release(). Needed,
+        * because req_pkt() can drop/reaquire HW lock inside. Protected by
+        * HW lock.
+        */
+       int irq_cmd_count;
+
+       int datasegs_per_cmd, datasegs_per_cont, sg_tablesize;
+
+       /* Target's flags, serialized by pha->hardware_lock */
+       unsigned int tgt_enable_64bit_addr:1; /* 64-bits PCI addr enabled */
+       unsigned int link_reinit_iocb_pending:1;
+
+       /*
+        * Protected by tgt_mutex AND hardware_lock for writing and tgt_mutex
+        * OR hardware_lock for reading.
+        */
+       int tgt_stop; /* the target mode driver is being stopped */
+       int tgt_stopped; /* the target mode driver has been stopped */
+
+       /* Count of sessions refering qla_tgt. Protected by hardware_lock. */
+       int sess_count;
+
+       /* Protected by hardware_lock. Addition also protected by tgt_mutex. */
+       struct list_head sess_list;
+
+       /* Protected by hardware_lock */
+       struct list_head del_sess_list;
+       struct delayed_work sess_del_work;
+
+       spinlock_t sess_work_lock;
+       struct list_head sess_works_list;
+       struct work_struct sess_work;
+
+       struct imm_ntfy_from_isp link_reinit_iocb;
+       wait_queue_head_t waitQ;
+       int notify_ack_expected;
+       int abts_resp_expected;
+       int modify_lun_expected;
+
+       int ctio_srr_id;
+       int imm_srr_id;
+       spinlock_t srr_lock;
+       struct list_head srr_ctio_list;
+       struct list_head srr_imm_list;
+       struct work_struct srr_work;
+
+       atomic_t tgt_global_resets_count;
+
+       struct list_head tgt_list_entry;
+};
+
+/*
+ * Equivilant to IT Nexus (Initiator-Target)
+ */
+struct qla_tgt_sess {
+       uint16_t loop_id;
+       port_id_t s_id;
+
+       unsigned int conf_compl_supported:1;
+       unsigned int deleted:1;
+       unsigned int local:1;
+       unsigned int tearing_down:1;
+
+       struct se_session *se_sess;
+       struct scsi_qla_host *vha;
+       struct qla_tgt *tgt;
+
+       struct list_head sess_list_entry;
+       unsigned long expires;
+       struct list_head del_list_entry;
+
+       uint8_t port_name[WWN_SIZE];
+       struct work_struct free_work;
+};
+
+struct qla_tgt_cmd {
+       struct qla_tgt_sess *sess;
+       int state;
+       struct se_cmd se_cmd;
+       struct work_struct free_work;
+       struct work_struct work;
+       /* Sense buffer that will be mapped into outgoing status */
+       unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
+
+       /* to save extra sess dereferences */
+       unsigned int conf_compl_supported:1;
+       unsigned int sg_mapped:1;
+       unsigned int free_sg:1;
+       unsigned int aborted:1; /* Needed in case of SRR */
+       unsigned int write_data_transferred:1;
+
+       struct scatterlist *sg; /* cmd data buffer SG vector */
+       int sg_cnt;             /* SG segments count */
+       int bufflen;            /* cmd buffer length */
+       int offset;
+       uint32_t tag;
+       uint32_t unpacked_lun;
+       enum dma_data_direction dma_data_direction;
+
+       uint16_t loop_id;       /* to save extra sess dereferences */
+       struct qla_tgt *tgt;    /* to save extra sess dereferences */
+       struct scsi_qla_host *vha;
+       struct list_head cmd_list;
+
+       struct atio_from_isp atio;
+};
+
+struct qla_tgt_sess_work_param {
+       struct list_head sess_works_list_entry;
+
+#define QLA_TGT_SESS_WORK_ABORT        1
+#define QLA_TGT_SESS_WORK_TM   2
+       int type;
+
+       union {
+               struct abts_recv_from_24xx abts;
+               struct imm_ntfy_from_isp tm_iocb;
+               struct atio_from_isp tm_iocb2;
+       };
+};
+
+struct qla_tgt_mgmt_cmd {
+       uint8_t tmr_func;
+       uint8_t fc_tm_rsp;
+       struct qla_tgt_sess *sess;
+       struct se_cmd se_cmd;
+       struct work_struct free_work;
+       unsigned int flags;
+#define QLA24XX_MGMT_SEND_NACK 1
+       union {
+               struct atio_from_isp atio;
+               struct imm_ntfy_from_isp imm_ntfy;
+               struct abts_recv_from_24xx abts;
+       } __packed orig_iocb;
+};
+
+struct qla_tgt_prm {
+       struct qla_tgt_cmd *cmd;
+       struct qla_tgt *tgt;
+       void *pkt;
+       struct scatterlist *sg; /* cmd data buffer SG vector */
+       int seg_cnt;
+       int req_cnt;
+       uint16_t rq_result;
+       uint16_t scsi_status;
+       unsigned char *sense_buffer;
+       int sense_buffer_len;
+       int residual;
+       int add_status_pkt;
+};
+
+struct qla_tgt_srr_imm {
+       struct list_head srr_list_entry;
+       int srr_id;
+       struct imm_ntfy_from_isp imm_ntfy;
+};
+
+struct qla_tgt_srr_ctio {
+       struct list_head srr_list_entry;
+       int srr_id;
+       struct qla_tgt_cmd *cmd;
+};
+
+#define QLA_TGT_XMIT_DATA              1
+#define QLA_TGT_XMIT_STATUS            2
+#define QLA_TGT_XMIT_ALL               (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA)
+
+#include <linux/version.h>
+
+extern struct qla_tgt_data qla_target;
+/*
+ * Internal function prototypes
+ */
+void qlt_disable_vha(struct scsi_qla_host *);
+
+/*
+ * Function prototypes for qla_target.c logic used by qla2xxx LLD code.
+ */
+extern int qlt_add_target(struct qla_hw_data *, struct scsi_qla_host *);
+extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *);
+extern int qlt_lport_register(struct qla_tgt_func_tmpl *, u64,
+                       int (*callback)(struct scsi_qla_host *), void *);
+extern void qlt_lport_deregister(struct scsi_qla_host *);
+extern void qlt_unreg_sess(struct qla_tgt_sess *);
+extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
+extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *);
+extern void qlt_set_mode(struct scsi_qla_host *ha);
+extern void qlt_clear_mode(struct scsi_qla_host *ha);
+extern int __init qlt_init(void);
+extern void qlt_exit(void);
+extern void qlt_update_vp_map(struct scsi_qla_host *, int);
+
+/*
+ * This macro is used during early initializations when host->active_mode
+ * is not set. Right now, ha value is ignored.
+ */
+#define QLA_TGT_MODE_ENABLED() (ql2x_ini_mode != QLA2XXX_INI_MODE_ENABLED)
+
+static inline bool qla_tgt_mode_enabled(struct scsi_qla_host *ha)
+{
+       return ha->host->active_mode & MODE_TARGET;
+}
+
+static inline bool qla_ini_mode_enabled(struct scsi_qla_host *ha)
+{
+       return ha->host->active_mode & MODE_INITIATOR;
+}
+
+static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha)
+{
+       if (ha->host->active_mode & MODE_INITIATOR)
+               ha->host->active_mode &= ~MODE_INITIATOR;
+       else
+               ha->host->active_mode |= MODE_INITIATOR;
+}
+
+/*
+ * Exported symbols from qla_target.c LLD logic used by qla2xxx code..
+ */
+extern void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *,
+       struct atio_from_isp *);
+extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
+extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
+extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
+extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
+extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
+extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
+extern void qlt_ctio_completion(struct scsi_qla_host *, uint32_t);
+extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *);
+extern void qlt_enable_vha(struct scsi_qla_host *);
+extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
+extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *);
+extern void qlt_init_atio_q_entries(struct scsi_qla_host *);
+extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *);
+extern void qlt_24xx_config_rings(struct scsi_qla_host *,
+       device_reg_t __iomem *);
+extern void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *,
+       struct nvram_24xx *);
+extern void qlt_24xx_config_nvram_stage2(struct scsi_qla_host *,
+       struct init_cb_24xx *);
+extern int qlt_24xx_process_response_error(struct scsi_qla_host *,
+       struct sts_entry_24xx *);
+extern void qlt_modify_vp_config(struct scsi_qla_host *,
+       struct vp_config_entry_24xx *);
+extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *);
+extern int qlt_mem_alloc(struct qla_hw_data *);
+extern void qlt_mem_free(struct qla_hw_data *);
+extern void qlt_stop_phase1(struct qla_tgt *);
+extern void qlt_stop_phase2(struct qla_tgt *);
+
+#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
new file mode 100644 (file)
index 0000000..436598f
--- /dev/null
@@ -0,0 +1,1955 @@
+/*******************************************************************************
+ * This file contains tcm implementation using v4 configfs fabric infrastructure
+ * for QLogic target mode HBAs
+ *
+ * ?? Copyright 2010-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL)
+ * version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
+ *
+ * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from
+ * the TCM_FC / Open-FCoE.org fabric module.
+ *
+ * Copyright (c) 2010 Cisco Systems, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/configfs.h>
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "qla_def.h"
+#include "qla_target.h"
+#include "tcm_qla2xxx.h"
+
+struct workqueue_struct *tcm_qla2xxx_free_wq;
+struct workqueue_struct *tcm_qla2xxx_cmd_wq;
+
+static int tcm_qla2xxx_check_true(struct se_portal_group *se_tpg)
+{
+       return 1;
+}
+
+static int tcm_qla2xxx_check_false(struct se_portal_group *se_tpg)
+{
+       return 0;
+}
+
+/*
+ * Parse WWN.
+ * If strict, we require lower-case hex and colon separators to be sure
+ * the name is the same as what would be generated by ft_format_wwn()
+ * so the name and wwn are mapped one-to-one.
+ */
+static ssize_t tcm_qla2xxx_parse_wwn(const char *name, u64 *wwn, int strict)
+{
+       const char *cp;
+       char c;
+       u32 nibble;
+       u32 byte = 0;
+       u32 pos = 0;
+       u32 err;
+
+       *wwn = 0;
+       for (cp = name; cp < &name[TCM_QLA2XXX_NAMELEN - 1]; cp++) {
+               c = *cp;
+               if (c == '\n' && cp[1] == '\0')
+                       continue;
+               if (strict && pos++ == 2 && byte++ < 7) {
+                       pos = 0;
+                       if (c == ':')
+                               continue;
+                       err = 1;
+                       goto fail;
+               }
+               if (c == '\0') {
+                       err = 2;
+                       if (strict && byte != 8)
+                               goto fail;
+                       return cp - name;
+               }
+               err = 3;
+               if (isdigit(c))
+                       nibble = c - '0';
+               else if (isxdigit(c) && (islower(c) || !strict))
+                       nibble = tolower(c) - 'a' + 10;
+               else
+                       goto fail;
+               *wwn = (*wwn << 4) | nibble;
+       }
+       err = 4;
+fail:
+       pr_debug("err %u len %zu pos %u byte %u\n",
+                       err, cp - name, pos, byte);
+       return -1;
+}
+
+static ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn)
+{
+       u8 b[8];
+
+       put_unaligned_be64(wwn, b);
+       return snprintf(buf, len,
+               "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
+               b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
+}
+
+static char *tcm_qla2xxx_get_fabric_name(void)
+{
+       return "qla2xxx";
+}
+
+/*
+ * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn
+ */
+static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm)
+{
+       unsigned int i, j, value;
+       u8 wwn[8];
+
+       memset(wwn, 0, sizeof(wwn));
+
+       /* Validate and store the new name */
+       for (i = 0, j = 0; i < 16; i++) {
+               value = hex_to_bin(*ns++);
+               if (value >= 0)
+                       j = (j << 4) | value;
+               else
+                       return -EINVAL;
+
+               if (i % 2) {
+                       wwn[i/2] = j & 0xff;
+                       j = 0;
+               }
+       }
+
+       *nm = wwn_to_u64(wwn);
+       return 0;
+}
+
+/*
+ * This parsing logic follows drivers/scsi/scsi_transport_fc.c:
+ * store_fc_host_vport_create()
+ */
+static int tcm_qla2xxx_npiv_parse_wwn(
+       const char *name,
+       size_t count,
+       u64 *wwpn,
+       u64 *wwnn)
+{
+       unsigned int cnt = count;
+       int rc;
+
+       *wwpn = 0;
+       *wwnn = 0;
+
+       /* count may include a LF at end of string */
+       if (name[cnt-1] == '\n')
+               cnt--;
+
+       /* validate we have enough characters for WWPN */
+       if ((cnt != (16+1+16)) || (name[16] != ':'))
+               return -EINVAL;
+
+       rc = tcm_qla2xxx_npiv_extract_wwn(&name[0], wwpn);
+       if (rc != 0)
+               return rc;
+
+       rc = tcm_qla2xxx_npiv_extract_wwn(&name[17], wwnn);
+       if (rc != 0)
+               return rc;
+
+       return 0;
+}
+
+static ssize_t tcm_qla2xxx_npiv_format_wwn(char *buf, size_t len,
+                                       u64 wwpn, u64 wwnn)
+{
+       u8 b[8], b2[8];
+
+       put_unaligned_be64(wwpn, b);
+       put_unaligned_be64(wwnn, b2);
+       return snprintf(buf, len,
+               "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x,"
+               "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
+               b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7],
+               b2[0], b2[1], b2[2], b2[3], b2[4], b2[5], b2[6], b2[7]);
+}
+
+static char *tcm_qla2xxx_npiv_get_fabric_name(void)
+{
+       return "qla2xxx_npiv";
+}
+
+static u8 tcm_qla2xxx_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                               struct tcm_qla2xxx_tpg, se_tpg);
+       struct tcm_qla2xxx_lport *lport = tpg->lport;
+       u8 proto_id;
+
+       switch (lport->lport_proto_id) {
+       case SCSI_PROTOCOL_FCP:
+       default:
+               proto_id = fc_get_fabric_proto_ident(se_tpg);
+               break;
+       }
+
+       return proto_id;
+}
+
+static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                               struct tcm_qla2xxx_tpg, se_tpg);
+       struct tcm_qla2xxx_lport *lport = tpg->lport;
+
+       return &lport->lport_name[0];
+}
+
+static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                               struct tcm_qla2xxx_tpg, se_tpg);
+       struct tcm_qla2xxx_lport *lport = tpg->lport;
+
+       return &lport->lport_npiv_name[0];
+}
+
+static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg)
+{
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                               struct tcm_qla2xxx_tpg, se_tpg);
+       return tpg->lport_tpgt;
+}
+
+static u32 tcm_qla2xxx_get_default_depth(struct se_portal_group *se_tpg)
+{
+       return 1;
+}
+
+static u32 tcm_qla2xxx_get_pr_transport_id(
+       struct se_portal_group *se_tpg,
+       struct se_node_acl *se_nacl,
+       struct t10_pr_registration *pr_reg,
+       int *format_code,
+       unsigned char *buf)
+{
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                               struct tcm_qla2xxx_tpg, se_tpg);
+       struct tcm_qla2xxx_lport *lport = tpg->lport;
+       int ret = 0;
+
+       switch (lport->lport_proto_id) {
+       case SCSI_PROTOCOL_FCP:
+       default:
+               ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+                                       format_code, buf);
+               break;
+       }
+
+       return ret;
+}
+
+static u32 tcm_qla2xxx_get_pr_transport_id_len(
+       struct se_portal_group *se_tpg,
+       struct se_node_acl *se_nacl,
+       struct t10_pr_registration *pr_reg,
+       int *format_code)
+{
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                               struct tcm_qla2xxx_tpg, se_tpg);
+       struct tcm_qla2xxx_lport *lport = tpg->lport;
+       int ret = 0;
+
+       switch (lport->lport_proto_id) {
+       case SCSI_PROTOCOL_FCP:
+       default:
+               ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+                                       format_code);
+               break;
+       }
+
+       return ret;
+}
+
+static char *tcm_qla2xxx_parse_pr_out_transport_id(
+       struct se_portal_group *se_tpg,
+       const char *buf,
+       u32 *out_tid_len,
+       char **port_nexus_ptr)
+{
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                               struct tcm_qla2xxx_tpg, se_tpg);
+       struct tcm_qla2xxx_lport *lport = tpg->lport;
+       char *tid = NULL;
+
+       switch (lport->lport_proto_id) {
+       case SCSI_PROTOCOL_FCP:
+       default:
+               tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+                                       port_nexus_ptr);
+               break;
+       }
+
+       return tid;
+}
+
+static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg)
+{
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                               struct tcm_qla2xxx_tpg, se_tpg);
+
+       return QLA_TPG_ATTRIB(tpg)->generate_node_acls;
+}
+
+static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg)
+{
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                               struct tcm_qla2xxx_tpg, se_tpg);
+
+       return QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls;
+}
+
+static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg)
+{
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                               struct tcm_qla2xxx_tpg, se_tpg);
+
+       return QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect;
+}
+
+static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
+{
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                               struct tcm_qla2xxx_tpg, se_tpg);
+
+       return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect;
+}
+
+static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
+       struct se_portal_group *se_tpg)
+{
+       struct tcm_qla2xxx_nacl *nacl;
+
+       nacl = kzalloc(sizeof(struct tcm_qla2xxx_nacl), GFP_KERNEL);
+       if (!nacl) {
+               pr_err("Unable to alocate struct tcm_qla2xxx_nacl\n");
+               return NULL;
+       }
+
+       return &nacl->se_node_acl;
+}
+
+static void tcm_qla2xxx_release_fabric_acl(
+       struct se_portal_group *se_tpg,
+       struct se_node_acl *se_nacl)
+{
+       struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
+                       struct tcm_qla2xxx_nacl, se_node_acl);
+       kfree(nacl);
+}
+
+static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                               struct tcm_qla2xxx_tpg, se_tpg);
+
+       return tpg->lport_tpgt;
+}
+
+static void tcm_qla2xxx_complete_mcmd(struct work_struct *work)
+{
+       struct qla_tgt_mgmt_cmd *mcmd = container_of(work,
+                       struct qla_tgt_mgmt_cmd, free_work);
+
+       transport_generic_free_cmd(&mcmd->se_cmd, 0);
+}
+
+/*
+ * Called from qla_target_template->free_mcmd(), and will call
+ * tcm_qla2xxx_release_cmd() via normal struct target_core_fabric_ops
+ * release callback.  qla_hw_data->hardware_lock is expected to be held
+ */
+static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
+{
+       INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd);
+       queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work);
+}
+
+static void tcm_qla2xxx_complete_free(struct work_struct *work)
+{
+       struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+
+       transport_generic_free_cmd(&cmd->se_cmd, 0);
+}
+
+/*
+ * Called from qla_target_template->free_cmd(), and will call
+ * tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops
+ * release callback.  qla_hw_data->hardware_lock is expected to be held
+ */
+static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
+{
+       INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
+       queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+}
+
+/*
+ * Called from struct target_core_fabric_ops->check_stop_free() context
+ */
+static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
+{
+       return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+}
+
+/* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying
+ * fabric descriptor @se_cmd command to release
+ */
+static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
+{
+       struct qla_tgt_cmd *cmd;
+
+       if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
+               struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
+                               struct qla_tgt_mgmt_cmd, se_cmd);
+               qlt_free_mcmd(mcmd);
+               return;
+       }
+
+       cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
+       qlt_free_cmd(cmd);
+}
+
+static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess)
+{
+       struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
+       struct scsi_qla_host *vha;
+       unsigned long flags;
+
+       BUG_ON(!sess);
+       vha = sess->vha;
+
+       spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+       sess->tearing_down = 1;
+       target_splice_sess_cmd_list(se_sess);
+       spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+
+       return 1;
+}
+
+static void tcm_qla2xxx_close_session(struct se_session *se_sess)
+{
+       struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
+       struct scsi_qla_host *vha;
+       unsigned long flags;
+
+       BUG_ON(!sess);
+       vha = sess->vha;
+
+       spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+       qlt_unreg_sess(sess);
+       spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+}
+
+static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
+{
+       return 0;
+}
+
+/*
+ * The LIO target core uses DMA_TO_DEVICE to mean that data is going
+ * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
+ * that data is coming from the target (eg handling a READ).  However,
+ * this is just the opposite of what we have to tell the DMA mapping
+ * layer -- eg when handling a READ, the HBA will have to DMA the data
+ * out of memory so it can send it to the initiator, which means we
+ * need to use DMA_TO_DEVICE when we map the data.
+ */
+static enum dma_data_direction tcm_qla2xxx_mapping_dir(struct se_cmd *se_cmd)
+{
+       if (se_cmd->se_cmd_flags & SCF_BIDI)
+               return DMA_BIDIRECTIONAL;
+
+       switch (se_cmd->data_direction) {
+       case DMA_TO_DEVICE:
+               return DMA_FROM_DEVICE;
+       case DMA_FROM_DEVICE:
+               return DMA_TO_DEVICE;
+       case DMA_NONE:
+       default:
+               return DMA_NONE;
+       }
+}
+
+static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
+{
+       struct qla_tgt_cmd *cmd = container_of(se_cmd,
+                               struct qla_tgt_cmd, se_cmd);
+
+       cmd->bufflen = se_cmd->data_length;
+       cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
+
+       cmd->sg_cnt = se_cmd->t_data_nents;
+       cmd->sg = se_cmd->t_data_sg;
+
+       /*
+        * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
+        * the SGL mappings into PCIe memory for incoming FCP WRITE data.
+        */
+       return qlt_rdy_to_xfer(cmd);
+}
+
+static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
+{
+       unsigned long flags;
+       /*
+        * Check for WRITE_PENDING status to determine if we need to wait for
+        * CTIO aborts to be posted via hardware in tcm_qla2xxx_handle_data().
+        */
+       spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+       if (se_cmd->t_state == TRANSPORT_WRITE_PENDING ||
+           se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
+               spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+               wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
+                                               3000);
+               return 0;
+       }
+       spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+
+       return 0;
+}
+
+static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl)
+{
+       return;
+}
+
+static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd)
+{
+       struct qla_tgt_cmd *cmd = container_of(se_cmd,
+                               struct qla_tgt_cmd, se_cmd);
+
+       return cmd->tag;
+}
+
+static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd)
+{
+       return 0;
+}
+
+/*
+ * Called from process context in qla_target.c:qlt_do_work() code
+ */
+static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
+       unsigned char *cdb, uint32_t data_length, int fcp_task_attr,
+       int data_dir, int bidi)
+{
+       struct se_cmd *se_cmd = &cmd->se_cmd;
+       struct se_session *se_sess;
+       struct qla_tgt_sess *sess;
+       int flags = TARGET_SCF_ACK_KREF;
+
+       if (bidi)
+               flags |= TARGET_SCF_BIDI_OP;
+
+       sess = cmd->sess;
+       if (!sess) {
+               pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n");
+               return -EINVAL;
+       }
+
+       se_sess = sess->se_sess;
+       if (!se_sess) {
+               pr_err("Unable to locate active struct se_session\n");
+               return -EINVAL;
+       }
+
+       target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
+                               cmd->unpacked_lun, data_length, fcp_task_attr,
+                               data_dir, flags);
+       return 0;
+}
+
+static void tcm_qla2xxx_do_rsp(struct work_struct *work)
+{
+       struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+       /*
+        * Dispatch ->queue_status from workqueue process context
+        */
+       transport_generic_request_failure(&cmd->se_cmd);
+}
+
+/*
+ * Called from qla_target.c:qlt_do_ctio_completion()
+ */
+static int tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
+{
+       struct se_cmd *se_cmd = &cmd->se_cmd;
+       unsigned long flags;
+       /*
+        * Ensure that the complete FCP WRITE payload has been received.
+        * Otherwise return an exception via CHECK_CONDITION status.
+        */
+       if (!cmd->write_data_transferred) {
+               /*
+                * Check if se_cmd has already been aborted via LUN_RESET, and
+                * waiting upon completion in tcm_qla2xxx_write_pending_status()
+                */
+               spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+               if (se_cmd->transport_state & CMD_T_ABORTED) {
+                       spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+                       complete(&se_cmd->t_transport_stop_comp);
+                       return 0;
+               }
+               spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+
+               se_cmd->scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD;
+               INIT_WORK(&cmd->work, tcm_qla2xxx_do_rsp);
+               queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+               return 0;
+       }
+       /*
+        * We now tell TCM to queue this WRITE CDB with TRANSPORT_PROCESS_WRITE
+        * status to the backstore processing thread.
+        */
+       return transport_generic_handle_data(&cmd->se_cmd);
+}
+
+/*
+ * Called from qla_target.c:qlt_issue_task_mgmt()
+ */
+int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun,
+                       uint8_t tmr_func, uint32_t tag)
+{
+       struct qla_tgt_sess *sess = mcmd->sess;
+       struct se_cmd *se_cmd = &mcmd->se_cmd;
+
+       return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd,
+                       tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF);
+}
+
+static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
+{
+       struct qla_tgt_cmd *cmd = container_of(se_cmd,
+                               struct qla_tgt_cmd, se_cmd);
+
+       cmd->bufflen = se_cmd->data_length;
+       cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
+       cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
+
+       cmd->sg_cnt = se_cmd->t_data_nents;
+       cmd->sg = se_cmd->t_data_sg;
+       cmd->offset = 0;
+
+       /*
+        * Now queue completed DATA_IN the qla2xxx LLD and response ring
+        */
+       return qlt_xmit_response(cmd, QLA_TGT_XMIT_DATA|QLA_TGT_XMIT_STATUS,
+                               se_cmd->scsi_status);
+}
+
+static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
+{
+       struct qla_tgt_cmd *cmd = container_of(se_cmd,
+                               struct qla_tgt_cmd, se_cmd);
+       int xmit_type = QLA_TGT_XMIT_STATUS;
+
+       cmd->bufflen = se_cmd->data_length;
+       cmd->sg = NULL;
+       cmd->sg_cnt = 0;
+       cmd->offset = 0;
+       cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
+       cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
+
+       if (se_cmd->data_direction == DMA_FROM_DEVICE) {
+               /*
+                * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen
+                * for qla_tgt_xmit_response LLD code
+                */
+               se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+               se_cmd->residual_count = se_cmd->data_length;
+
+               cmd->bufflen = 0;
+       }
+       /*
+        * Now queue status response to qla2xxx LLD code and response ring
+        */
+       return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
+}
+
+static int tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+       struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+       struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
+                               struct qla_tgt_mgmt_cmd, se_cmd);
+
+       pr_debug("queue_tm_rsp: mcmd: %p func: 0x%02x response: 0x%02x\n",
+                       mcmd, se_tmr->function, se_tmr->response);
+       /*
+        * Do translation between TCM TM response codes and
+        * QLA2xxx FC TM response codes.
+        */
+       switch (se_tmr->response) {
+       case TMR_FUNCTION_COMPLETE:
+               mcmd->fc_tm_rsp = FC_TM_SUCCESS;
+               break;
+       case TMR_TASK_DOES_NOT_EXIST:
+               mcmd->fc_tm_rsp = FC_TM_BAD_CMD;
+               break;
+       case TMR_FUNCTION_REJECTED:
+               mcmd->fc_tm_rsp = FC_TM_REJECT;
+               break;
+       case TMR_LUN_DOES_NOT_EXIST:
+       default:
+               mcmd->fc_tm_rsp = FC_TM_FAILED;
+               break;
+       }
+       /*
+        * Queue the TM response to QLA2xxx LLD to build a
+        * CTIO response packet.
+        */
+       qlt_xmit_tm_rsp(mcmd);
+
+       return 0;
+}
+
+static u16 tcm_qla2xxx_get_fabric_sense_len(void)
+{
+       return 0;
+}
+
+static u16 tcm_qla2xxx_set_fabric_sense_len(struct se_cmd *se_cmd,
+                                       u32 sense_length)
+{
+       return 0;
+}
+
+/* Local pointer to allocated TCM configfs fabric module */
+struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
+struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
+
+static int tcm_qla2xxx_setup_nacl_from_rport(
+       struct se_portal_group *se_tpg,
+       struct se_node_acl *se_nacl,
+       struct tcm_qla2xxx_lport *lport,
+       struct tcm_qla2xxx_nacl *nacl,
+       u64 rport_wwnn)
+{
+       struct scsi_qla_host *vha = lport->qla_vha;
+       struct Scsi_Host *sh = vha->host;
+       struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
+       struct fc_rport *rport;
+       unsigned long flags;
+       void *node;
+       int rc;
+
+       /*
+        * Scan the existing rports, and create a session for the
+        * explict NodeACL is an matching rport->node_name already
+        * exists.
+        */
+       spin_lock_irqsave(sh->host_lock, flags);
+       list_for_each_entry(rport, &fc_host->rports, peers) {
+               if (rport_wwnn != rport->node_name)
+                       continue;
+
+               pr_debug("Located existing rport_wwpn and rport->node_name: 0x%016LX, port_id: 0x%04x\n",
+                   rport->node_name, rport->port_id);
+               nacl->nport_id = rport->port_id;
+
+               spin_unlock_irqrestore(sh->host_lock, flags);
+
+               spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+               node = btree_lookup32(&lport->lport_fcport_map, rport->port_id);
+               if (node) {
+                       rc = btree_update32(&lport->lport_fcport_map,
+                                           rport->port_id, se_nacl);
+               } else {
+                       rc = btree_insert32(&lport->lport_fcport_map,
+                                           rport->port_id, se_nacl,
+                                           GFP_ATOMIC);
+               }
+               spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+
+               if (rc) {
+                       pr_err("Unable to insert se_nacl into fcport_map");
+                       WARN_ON(rc > 0);
+                       return rc;
+               }
+
+               pr_debug("Inserted into fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%08x\n",
+                   se_nacl, rport_wwnn, nacl->nport_id);
+
+               return 1;
+       }
+       spin_unlock_irqrestore(sh->host_lock, flags);
+
+       return 0;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
+{
+       struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
+       struct se_portal_group *se_tpg = se_nacl->se_tpg;
+       struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+       struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+                               struct tcm_qla2xxx_lport, lport_wwn);
+       struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
+                               struct tcm_qla2xxx_nacl, se_node_acl);
+       void *node;
+
+       pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id);
+
+       node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id);
+       WARN_ON(node && (node != se_nacl));
+
+       pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n",
+           se_nacl, nacl->nport_wwnn, nacl->nport_id);
+}
+
+static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
+{
+       target_put_session(sess->se_sess);
+}
+
+static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
+{
+       tcm_qla2xxx_shutdown_session(sess->se_sess);
+}
+
+static struct se_node_acl *tcm_qla2xxx_make_nodeacl(
+       struct se_portal_group *se_tpg,
+       struct config_group *group,
+       const char *name)
+{
+       struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+       struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+                               struct tcm_qla2xxx_lport, lport_wwn);
+       struct se_node_acl *se_nacl, *se_nacl_new;
+       struct tcm_qla2xxx_nacl *nacl;
+       u64 wwnn;
+       u32 qla2xxx_nexus_depth;
+       int rc;
+
+       if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0)
+               return ERR_PTR(-EINVAL);
+
+       se_nacl_new = tcm_qla2xxx_alloc_fabric_acl(se_tpg);
+       if (!se_nacl_new)
+               return ERR_PTR(-ENOMEM);
+/* #warning FIXME: Hardcoded qla2xxx_nexus depth in tcm_qla2xxx_make_nodeacl */
+       qla2xxx_nexus_depth = 1;
+
+       /*
+        * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
+        * when converting a NodeACL from demo mode -> explict
+        */
+       se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
+                               name, qla2xxx_nexus_depth);
+       if (IS_ERR(se_nacl)) {
+               tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new);
+               return se_nacl;
+       }
+       /*
+        * Locate our struct tcm_qla2xxx_nacl and set the FC Nport WWPN
+        */
+       nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+       nacl->nport_wwnn = wwnn;
+       tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn);
+       /*
+        * Setup a se_nacl handle based on an a matching struct fc_rport setup
+        * via drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
+        */
+       rc = tcm_qla2xxx_setup_nacl_from_rport(se_tpg, se_nacl, lport,
+                                       nacl, wwnn);
+       if (rc < 0) {
+               tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new);
+               return ERR_PTR(rc);
+       }
+
+       return se_nacl;
+}
+
+static void tcm_qla2xxx_drop_nodeacl(struct se_node_acl *se_acl)
+{
+       struct se_portal_group *se_tpg = se_acl->se_tpg;
+       struct tcm_qla2xxx_nacl *nacl = container_of(se_acl,
+                               struct tcm_qla2xxx_nacl, se_node_acl);
+
+       core_tpg_del_initiator_node_acl(se_tpg, se_acl, 1);
+       kfree(nacl);
+}
+
+/* Start items for tcm_qla2xxx_tpg_attrib_cit */
+
+#define DEF_QLA_TPG_ATTRIB(name)                                       \
+                                                                       \
+static ssize_t tcm_qla2xxx_tpg_attrib_show_##name(                     \
+       struct se_portal_group *se_tpg,                                 \
+       char *page)                                                     \
+{                                                                      \
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,              \
+                       struct tcm_qla2xxx_tpg, se_tpg);                \
+                                                                       \
+       return sprintf(page, "%u\n", QLA_TPG_ATTRIB(tpg)->name);        \
+}                                                                      \
+                                                                       \
+static ssize_t tcm_qla2xxx_tpg_attrib_store_##name(                    \
+       struct se_portal_group *se_tpg,                                 \
+       const char *page,                                               \
+       size_t count)                                                   \
+{                                                                      \
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,              \
+                       struct tcm_qla2xxx_tpg, se_tpg);                \
+       unsigned long val;                                              \
+       int ret;                                                        \
+                                                                       \
+       ret = kstrtoul(page, 0, &val);                                  \
+       if (ret < 0) {                                                  \
+               pr_err("kstrtoul() failed with"                         \
+                               " ret: %d\n", ret);                     \
+               return -EINVAL;                                         \
+       }                                                               \
+       ret = tcm_qla2xxx_set_attrib_##name(tpg, val);                  \
+                                                                       \
+       return (!ret) ? count : -EINVAL;                                \
+}
+
+#define DEF_QLA_TPG_ATTR_BOOL(_name)                                   \
+                                                                       \
+static int tcm_qla2xxx_set_attrib_##_name(                             \
+       struct tcm_qla2xxx_tpg *tpg,                                    \
+       unsigned long val)                                              \
+{                                                                      \
+       struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib;            \
+                                                                       \
+       if ((val != 0) && (val != 1)) {                                 \
+               pr_err("Illegal boolean value %lu\n", val);             \
+               return -EINVAL;                                         \
+       }                                                               \
+                                                                       \
+       a->_name = val;                                                 \
+       return 0;                                                       \
+}
+
+#define QLA_TPG_ATTR(_name, _mode) \
+       TF_TPG_ATTRIB_ATTR(tcm_qla2xxx, _name, _mode);
+
+/*
+ * Define tcm_qla2xxx_tpg_attrib_s_generate_node_acls
+ */
+DEF_QLA_TPG_ATTR_BOOL(generate_node_acls);
+DEF_QLA_TPG_ATTRIB(generate_node_acls);
+QLA_TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR);
+
+/*
+ Define tcm_qla2xxx_attrib_s_cache_dynamic_acls
+ */
+DEF_QLA_TPG_ATTR_BOOL(cache_dynamic_acls);
+DEF_QLA_TPG_ATTRIB(cache_dynamic_acls);
+QLA_TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR);
+
+/*
+ * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_write_protect
+ */
+DEF_QLA_TPG_ATTR_BOOL(demo_mode_write_protect);
+DEF_QLA_TPG_ATTRIB(demo_mode_write_protect);
+QLA_TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
+
+/*
+ * Define tcm_qla2xxx_tpg_attrib_s_prod_mode_write_protect
+ */
+DEF_QLA_TPG_ATTR_BOOL(prod_mode_write_protect);
+DEF_QLA_TPG_ATTRIB(prod_mode_write_protect);
+QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
+       &tcm_qla2xxx_tpg_attrib_generate_node_acls.attr,
+       &tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr,
+       &tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr,
+       &tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr,
+       NULL,
+};
+
+/* End items for tcm_qla2xxx_tpg_attrib_cit */
+
+static ssize_t tcm_qla2xxx_tpg_show_enable(
+       struct se_portal_group *se_tpg,
+       char *page)
+{
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                       struct tcm_qla2xxx_tpg, se_tpg);
+
+       return snprintf(page, PAGE_SIZE, "%d\n",
+                       atomic_read(&tpg->lport_tpg_enabled));
+}
+
+static ssize_t tcm_qla2xxx_tpg_store_enable(
+       struct se_portal_group *se_tpg,
+       const char *page,
+       size_t count)
+{
+       struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+       struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+                       struct tcm_qla2xxx_lport, lport_wwn);
+       struct scsi_qla_host *vha = lport->qla_vha;
+       struct qla_hw_data *ha = vha->hw;
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                       struct tcm_qla2xxx_tpg, se_tpg);
+       unsigned long op;
+       int rc;
+
+       rc = kstrtoul(page, 0, &op);
+       if (rc < 0) {
+               pr_err("kstrtoul() returned %d\n", rc);
+               return -EINVAL;
+       }
+       if ((op != 1) && (op != 0)) {
+               pr_err("Illegal value for tpg_enable: %lu\n", op);
+               return -EINVAL;
+       }
+
+       if (op) {
+               atomic_set(&tpg->lport_tpg_enabled, 1);
+               qlt_enable_vha(vha);
+       } else {
+               if (!ha->tgt.qla_tgt) {
+                       pr_err("truct qla_hw_data *ha->tgt.qla_tgt is NULL\n");
+                       return -ENODEV;
+               }
+               atomic_set(&tpg->lport_tpg_enabled, 0);
+               qlt_stop_phase1(ha->tgt.qla_tgt);
+       }
+
+       return count;
+}
+
+TF_TPG_BASE_ATTR(tcm_qla2xxx, enable, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = {
+       &tcm_qla2xxx_tpg_enable.attr,
+       NULL,
+};
+
+static struct se_portal_group *tcm_qla2xxx_make_tpg(
+       struct se_wwn *wwn,
+       struct config_group *group,
+       const char *name)
+{
+       struct tcm_qla2xxx_lport *lport = container_of(wwn,
+                       struct tcm_qla2xxx_lport, lport_wwn);
+       struct tcm_qla2xxx_tpg *tpg;
+       unsigned long tpgt;
+       int ret;
+
+       if (strstr(name, "tpgt_") != name)
+               return ERR_PTR(-EINVAL);
+       if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
+               return ERR_PTR(-EINVAL);
+
+       if (!lport->qla_npiv_vp && (tpgt != 1)) {
+               pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n");
+               return ERR_PTR(-ENOSYS);
+       }
+
+       tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
+       if (!tpg) {
+               pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
+               return ERR_PTR(-ENOMEM);
+       }
+       tpg->lport = lport;
+       tpg->lport_tpgt = tpgt;
+       /*
+        * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
+        * NodeACLs
+        */
+       QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1;
+       QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1;
+       QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1;
+
+       ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn,
+                               &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+       if (ret < 0) {
+               kfree(tpg);
+               return NULL;
+       }
+       /*
+        * Setup local TPG=1 pointer for non NPIV mode.
+        */
+       if (lport->qla_npiv_vp == NULL)
+               lport->tpg_1 = tpg;
+
+       return &tpg->se_tpg;
+}
+
+static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)
+{
+       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+                       struct tcm_qla2xxx_tpg, se_tpg);
+       struct tcm_qla2xxx_lport *lport = tpg->lport;
+       struct scsi_qla_host *vha = lport->qla_vha;
+       struct qla_hw_data *ha = vha->hw;
+       /*
+        * Call into qla2x_target.c LLD logic to shutdown the active
+        * FC Nexuses and disable target mode operation for this qla_hw_data
+        */
+       if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stop)
+               qlt_stop_phase1(ha->tgt.qla_tgt);
+
+       core_tpg_deregister(se_tpg);
+       /*
+        * Clear local TPG=1 pointer for non NPIV mode.
+        */
+       if (lport->qla_npiv_vp == NULL)
+               lport->tpg_1 = NULL;
+
+       kfree(tpg);
+}
+
+static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
+       struct se_wwn *wwn,
+       struct config_group *group,
+       const char *name)
+{
+       struct tcm_qla2xxx_lport *lport = container_of(wwn,
+                       struct tcm_qla2xxx_lport, lport_wwn);
+       struct tcm_qla2xxx_tpg *tpg;
+       unsigned long tpgt;
+       int ret;
+
+       if (strstr(name, "tpgt_") != name)
+               return ERR_PTR(-EINVAL);
+       if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
+               return ERR_PTR(-EINVAL);
+
+       tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
+       if (!tpg) {
+               pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
+               return ERR_PTR(-ENOMEM);
+       }
+       tpg->lport = lport;
+       tpg->lport_tpgt = tpgt;
+
+       ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn,
+                               &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+       if (ret < 0) {
+               kfree(tpg);
+               return NULL;
+       }
+       return &tpg->se_tpg;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
+       scsi_qla_host_t *vha,
+       const uint8_t *s_id)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct tcm_qla2xxx_lport *lport;
+       struct se_node_acl *se_nacl;
+       struct tcm_qla2xxx_nacl *nacl;
+       u32 key;
+
+       lport = ha->tgt.target_lport_ptr;
+       if (!lport) {
+               pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+               dump_stack();
+               return NULL;
+       }
+
+       key = (((unsigned long)s_id[0] << 16) |
+              ((unsigned long)s_id[1] << 8) |
+              (unsigned long)s_id[2]);
+       pr_debug("find_sess_by_s_id: 0x%06x\n", key);
+
+       se_nacl = btree_lookup32(&lport->lport_fcport_map, key);
+       if (!se_nacl) {
+               pr_debug("Unable to locate s_id: 0x%06x\n", key);
+               return NULL;
+       }
+       pr_debug("find_sess_by_s_id: located se_nacl: %p, initiatorname: %s\n",
+           se_nacl, se_nacl->initiatorname);
+
+       nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+       if (!nacl->qla_tgt_sess) {
+               pr_err("Unable to locate struct qla_tgt_sess\n");
+               return NULL;
+       }
+
+       return nacl->qla_tgt_sess;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static void tcm_qla2xxx_set_sess_by_s_id(
+       struct tcm_qla2xxx_lport *lport,
+       struct se_node_acl *new_se_nacl,
+       struct tcm_qla2xxx_nacl *nacl,
+       struct se_session *se_sess,
+       struct qla_tgt_sess *qla_tgt_sess,
+       uint8_t *s_id)
+{
+       u32 key;
+       void *slot;
+       int rc;
+
+       key = (((unsigned long)s_id[0] << 16) |
+              ((unsigned long)s_id[1] << 8) |
+              (unsigned long)s_id[2]);
+       pr_debug("set_sess_by_s_id: %06x\n", key);
+
+       slot = btree_lookup32(&lport->lport_fcport_map, key);
+       if (!slot) {
+               if (new_se_nacl) {
+                       pr_debug("Setting up new fc_port entry to new_se_nacl\n");
+                       nacl->nport_id = key;
+                       rc = btree_insert32(&lport->lport_fcport_map, key,
+                                       new_se_nacl, GFP_ATOMIC);
+                       if (rc)
+                               printk(KERN_ERR "Unable to insert s_id into fcport_map: %06x\n",
+                                   (int)key);
+               } else {
+                       pr_debug("Wiping nonexisting fc_port entry\n");
+               }
+
+               qla_tgt_sess->se_sess = se_sess;
+               nacl->qla_tgt_sess = qla_tgt_sess;
+               return;
+       }
+
+       if (nacl->qla_tgt_sess) {
+               if (new_se_nacl == NULL) {
+                       pr_debug("Clearing existing nacl->qla_tgt_sess and fc_port entry\n");
+                       btree_remove32(&lport->lport_fcport_map, key);
+                       nacl->qla_tgt_sess = NULL;
+                       return;
+               }
+               pr_debug("Replacing existing nacl->qla_tgt_sess and fc_port entry\n");
+               btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
+               qla_tgt_sess->se_sess = se_sess;
+               nacl->qla_tgt_sess = qla_tgt_sess;
+               return;
+       }
+
+       if (new_se_nacl == NULL) {
+               pr_debug("Clearing existing fc_port entry\n");
+               btree_remove32(&lport->lport_fcport_map, key);
+               return;
+       }
+
+       pr_debug("Replacing existing fc_port entry w/o active nacl->qla_tgt_sess\n");
+       btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
+       qla_tgt_sess->se_sess = se_sess;
+       nacl->qla_tgt_sess = qla_tgt_sess;
+
+       pr_debug("Setup nacl->qla_tgt_sess %p by s_id for se_nacl: %p, initiatorname: %s\n",
+           nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
+       scsi_qla_host_t *vha,
+       const uint16_t loop_id)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct tcm_qla2xxx_lport *lport;
+       struct se_node_acl *se_nacl;
+       struct tcm_qla2xxx_nacl *nacl;
+       struct tcm_qla2xxx_fc_loopid *fc_loopid;
+
+       lport = ha->tgt.target_lport_ptr;
+       if (!lport) {
+               pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+               dump_stack();
+               return NULL;
+       }
+
+       pr_debug("find_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
+
+       fc_loopid = lport->lport_loopid_map + loop_id;
+       se_nacl = fc_loopid->se_nacl;
+       if (!se_nacl) {
+               pr_debug("Unable to locate se_nacl by loop_id: 0x%04x\n",
+                   loop_id);
+               return NULL;
+       }
+
+       nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+
+       if (!nacl->qla_tgt_sess) {
+               pr_err("Unable to locate struct qla_tgt_sess\n");
+               return NULL;
+       }
+
+       return nacl->qla_tgt_sess;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static void tcm_qla2xxx_set_sess_by_loop_id(
+       struct tcm_qla2xxx_lport *lport,
+       struct se_node_acl *new_se_nacl,
+       struct tcm_qla2xxx_nacl *nacl,
+       struct se_session *se_sess,
+       struct qla_tgt_sess *qla_tgt_sess,
+       uint16_t loop_id)
+{
+       struct se_node_acl *saved_nacl;
+       struct tcm_qla2xxx_fc_loopid *fc_loopid;
+
+       pr_debug("set_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
+
+       fc_loopid = &((struct tcm_qla2xxx_fc_loopid *)
+                       lport->lport_loopid_map)[loop_id];
+
+       saved_nacl = fc_loopid->se_nacl;
+       if (!saved_nacl) {
+               pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n");
+               fc_loopid->se_nacl = new_se_nacl;
+               if (qla_tgt_sess->se_sess != se_sess)
+                       qla_tgt_sess->se_sess = se_sess;
+               if (nacl->qla_tgt_sess != qla_tgt_sess)
+                       nacl->qla_tgt_sess = qla_tgt_sess;
+               return;
+       }
+
+       if (nacl->qla_tgt_sess) {
+               if (new_se_nacl == NULL) {
+                       pr_debug("Clearing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
+                       fc_loopid->se_nacl = NULL;
+                       nacl->qla_tgt_sess = NULL;
+                       return;
+               }
+
+               pr_debug("Replacing existing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
+               fc_loopid->se_nacl = new_se_nacl;
+               if (qla_tgt_sess->se_sess != se_sess)
+                       qla_tgt_sess->se_sess = se_sess;
+               if (nacl->qla_tgt_sess != qla_tgt_sess)
+                       nacl->qla_tgt_sess = qla_tgt_sess;
+               return;
+       }
+
+       if (new_se_nacl == NULL) {
+               pr_debug("Clearing fc_loopid->se_nacl\n");
+               fc_loopid->se_nacl = NULL;
+               return;
+       }
+
+       pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->qla_tgt_sess\n");
+       fc_loopid->se_nacl = new_se_nacl;
+       if (qla_tgt_sess->se_sess != se_sess)
+               qla_tgt_sess->se_sess = se_sess;
+       if (nacl->qla_tgt_sess != qla_tgt_sess)
+               nacl->qla_tgt_sess = qla_tgt_sess;
+
+       pr_debug("Setup nacl->qla_tgt_sess %p by loop_id for se_nacl: %p, initiatorname: %s\n",
+           nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
+}
+
+static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
+{
+       struct qla_tgt *tgt = sess->tgt;
+       struct qla_hw_data *ha = tgt->ha;
+       struct se_session *se_sess;
+       struct se_node_acl *se_nacl;
+       struct tcm_qla2xxx_lport *lport;
+       struct tcm_qla2xxx_nacl *nacl;
+       unsigned char be_sid[3];
+       unsigned long flags;
+
+       BUG_ON(in_interrupt());
+
+       se_sess = sess->se_sess;
+       if (!se_sess) {
+               pr_err("struct qla_tgt_sess->se_sess is NULL\n");
+               dump_stack();
+               return;
+       }
+       se_nacl = se_sess->se_node_acl;
+       nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+
+       lport = ha->tgt.target_lport_ptr;
+       if (!lport) {
+               pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+               dump_stack();
+               return;
+       }
+       target_wait_for_sess_cmds(se_sess, 0);
+       /*
+        * And now clear the se_nacl and session pointers from our HW lport
+        * mappings for fabric S_ID and LOOP_ID.
+        */
+       memset(&be_sid, 0, 3);
+       be_sid[0] = sess->s_id.b.domain;
+       be_sid[1] = sess->s_id.b.area;
+       be_sid[2] = sess->s_id.b.al_pa;
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess,
+                       sess, be_sid);
+       tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess,
+                       sess, sess->loop_id);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       transport_deregister_session_configfs(sess->se_sess);
+       transport_deregister_session(sess->se_sess);
+}
+
+/*
+ * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl()
+ * to locate struct se_node_acl
+ */
+static int tcm_qla2xxx_check_initiator_node_acl(
+       scsi_qla_host_t *vha,
+       unsigned char *fc_wwpn,
+       void *qla_tgt_sess,
+       uint8_t *s_id,
+       uint16_t loop_id)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct tcm_qla2xxx_lport *lport;
+       struct tcm_qla2xxx_tpg *tpg;
+       struct tcm_qla2xxx_nacl *nacl;
+       struct se_portal_group *se_tpg;
+       struct se_node_acl *se_nacl;
+       struct se_session *se_sess;
+       struct qla_tgt_sess *sess = qla_tgt_sess;
+       unsigned char port_name[36];
+       unsigned long flags;
+
+       lport = ha->tgt.target_lport_ptr;
+       if (!lport) {
+               pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+               dump_stack();
+               return -EINVAL;
+       }
+       /*
+        * Locate the TPG=1 reference..
+        */
+       tpg = lport->tpg_1;
+       if (!tpg) {
+               pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n");
+               return -EINVAL;
+       }
+       se_tpg = &tpg->se_tpg;
+
+       se_sess = transport_init_session();
+       if (IS_ERR(se_sess)) {
+               pr_err("Unable to initialize struct se_session\n");
+               return PTR_ERR(se_sess);
+       }
+       /*
+        * Format the FCP Initiator port_name into colon seperated values to
+        * match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
+        */
+       memset(&port_name, 0, 36);
+       snprintf(port_name, 36, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
+               fc_wwpn[0], fc_wwpn[1], fc_wwpn[2], fc_wwpn[3], fc_wwpn[4],
+               fc_wwpn[5], fc_wwpn[6], fc_wwpn[7]);
+       /*
+        * Locate our struct se_node_acl either from an explict NodeACL created
+        * via ConfigFS, or via running in TPG demo mode.
+        */
+       se_sess->se_node_acl = core_tpg_check_initiator_node_acl(se_tpg,
+                                       port_name);
+       if (!se_sess->se_node_acl) {
+               transport_free_session(se_sess);
+               return -EINVAL;
+       }
+       se_nacl = se_sess->se_node_acl;
+       nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+       /*
+        * And now setup the new se_nacl and session pointers into our HW lport
+        * mappings for fabric S_ID and LOOP_ID.
+        */
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess,
+                       qla_tgt_sess, s_id);
+       tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess,
+                       qla_tgt_sess, loop_id);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+       /*
+        * Finally register the new FC Nexus with TCM
+        */
+       __transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
+
+       return 0;
+}
+
+/*
+ * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path.
+ */
+static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
+       .handle_cmd             = tcm_qla2xxx_handle_cmd,
+       .handle_data            = tcm_qla2xxx_handle_data,
+       .handle_tmr             = tcm_qla2xxx_handle_tmr,
+       .free_cmd               = tcm_qla2xxx_free_cmd,
+       .free_mcmd              = tcm_qla2xxx_free_mcmd,
+       .free_session           = tcm_qla2xxx_free_session,
+       .check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl,
+       .find_sess_by_s_id      = tcm_qla2xxx_find_sess_by_s_id,
+       .find_sess_by_loop_id   = tcm_qla2xxx_find_sess_by_loop_id,
+       .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
+       .put_sess               = tcm_qla2xxx_put_sess,
+       .shutdown_sess          = tcm_qla2xxx_shutdown_sess,
+};
+
+static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
+{
+       int rc;
+
+       rc = btree_init32(&lport->lport_fcport_map);
+       if (rc) {
+               pr_err("Unable to initialize lport->lport_fcport_map btree\n");
+               return rc;
+       }
+
+       lport->lport_loopid_map = vmalloc(sizeof(struct tcm_qla2xxx_fc_loopid) *
+                               65536);
+       if (!lport->lport_loopid_map) {
+               pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n",
+                   sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
+               btree_destroy32(&lport->lport_fcport_map);
+               return -ENOMEM;
+       }
+       memset(lport->lport_loopid_map, 0, sizeof(struct tcm_qla2xxx_fc_loopid)
+              * 65536);
+       pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n",
+              sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
+       return 0;
+}
+
+static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct tcm_qla2xxx_lport *lport;
+       /*
+        * Setup local pointer to vha, NPIV VP pointer (if present) and
+        * vha->tcm_lport pointer
+        */
+       lport = (struct tcm_qla2xxx_lport *)ha->tgt.target_lport_ptr;
+       lport->qla_vha = vha;
+
+       return 0;
+}
+
+static struct se_wwn *tcm_qla2xxx_make_lport(
+       struct target_fabric_configfs *tf,
+       struct config_group *group,
+       const char *name)
+{
+       struct tcm_qla2xxx_lport *lport;
+       u64 wwpn;
+       int ret = -ENODEV;
+
+       if (tcm_qla2xxx_parse_wwn(name, &wwpn, 1) < 0)
+               return ERR_PTR(-EINVAL);
+
+       lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
+       if (!lport) {
+               pr_err("Unable to allocate struct tcm_qla2xxx_lport\n");
+               return ERR_PTR(-ENOMEM);
+       }
+       lport->lport_wwpn = wwpn;
+       tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN,
+                               wwpn);
+
+       ret = tcm_qla2xxx_init_lport(lport);
+       if (ret != 0)
+               goto out;
+
+       ret = qlt_lport_register(&tcm_qla2xxx_template, wwpn,
+                               tcm_qla2xxx_lport_register_cb, lport);
+       if (ret != 0)
+               goto out_lport;
+
+       return &lport->lport_wwn;
+out_lport:
+       vfree(lport->lport_loopid_map);
+       btree_destroy32(&lport->lport_fcport_map);
+out:
+       kfree(lport);
+       return ERR_PTR(ret);
+}
+
+static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
+{
+       struct tcm_qla2xxx_lport *lport = container_of(wwn,
+                       struct tcm_qla2xxx_lport, lport_wwn);
+       struct scsi_qla_host *vha = lport->qla_vha;
+       struct qla_hw_data *ha = vha->hw;
+       struct se_node_acl *node;
+       u32 key = 0;
+
+       /*
+        * Call into qla2x_target.c LLD logic to complete the
+        * shutdown of struct qla_tgt after the call to
+        * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above..
+        */
+       if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stopped)
+               qlt_stop_phase2(ha->tgt.qla_tgt);
+
+       qlt_lport_deregister(vha);
+
+       vfree(lport->lport_loopid_map);
+       btree_for_each_safe32(&lport->lport_fcport_map, key, node)
+               btree_remove32(&lport->lport_fcport_map, key);
+       btree_destroy32(&lport->lport_fcport_map);
+       kfree(lport);
+}
+
+static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
+       struct target_fabric_configfs *tf,
+       struct config_group *group,
+       const char *name)
+{
+       struct tcm_qla2xxx_lport *lport;
+       u64 npiv_wwpn, npiv_wwnn;
+       int ret;
+
+       if (tcm_qla2xxx_npiv_parse_wwn(name, strlen(name)+1,
+                               &npiv_wwpn, &npiv_wwnn) < 0)
+               return ERR_PTR(-EINVAL);
+
+       lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
+       if (!lport) {
+               pr_err("Unable to allocate struct tcm_qla2xxx_lport for NPIV\n");
+               return ERR_PTR(-ENOMEM);
+       }
+       lport->lport_npiv_wwpn = npiv_wwpn;
+       lport->lport_npiv_wwnn = npiv_wwnn;
+       tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0],
+                       TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn);
+
+/* FIXME: tcm_qla2xxx_npiv_make_lport */
+       ret = -ENOSYS;
+       if (ret != 0)
+               goto out;
+
+       return &lport->lport_wwn;
+out:
+       kfree(lport);
+       return ERR_PTR(ret);
+}
+
+static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn)
+{
+       struct tcm_qla2xxx_lport *lport = container_of(wwn,
+                       struct tcm_qla2xxx_lport, lport_wwn);
+       struct scsi_qla_host *vha = lport->qla_vha;
+       struct Scsi_Host *sh = vha->host;
+       /*
+        * Notify libfc that we want to release the lport->npiv_vport
+        */
+       fc_vport_terminate(lport->npiv_vport);
+
+       scsi_host_put(sh);
+       kfree(lport);
+}
+
+
+static ssize_t tcm_qla2xxx_wwn_show_attr_version(
+       struct target_fabric_configfs *tf,
+       char *page)
+{
+       return sprintf(page,
+           "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on "
+           UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
+           utsname()->machine);
+}
+
+TF_WWN_ATTR_RO(tcm_qla2xxx, version);
+
+static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = {
+       &tcm_qla2xxx_wwn_version.attr,
+       NULL,
+};
+
+static struct target_core_fabric_ops tcm_qla2xxx_ops = {
+       .get_fabric_name                = tcm_qla2xxx_get_fabric_name,
+       .get_fabric_proto_ident         = tcm_qla2xxx_get_fabric_proto_ident,
+       .tpg_get_wwn                    = tcm_qla2xxx_get_fabric_wwn,
+       .tpg_get_tag                    = tcm_qla2xxx_get_tag,
+       .tpg_get_default_depth          = tcm_qla2xxx_get_default_depth,
+       .tpg_get_pr_transport_id        = tcm_qla2xxx_get_pr_transport_id,
+       .tpg_get_pr_transport_id_len    = tcm_qla2xxx_get_pr_transport_id_len,
+       .tpg_parse_pr_out_transport_id  = tcm_qla2xxx_parse_pr_out_transport_id,
+       .tpg_check_demo_mode            = tcm_qla2xxx_check_demo_mode,
+       .tpg_check_demo_mode_cache      = tcm_qla2xxx_check_demo_mode_cache,
+       .tpg_check_demo_mode_write_protect =
+                                       tcm_qla2xxx_check_demo_write_protect,
+       .tpg_check_prod_mode_write_protect =
+                                       tcm_qla2xxx_check_prod_write_protect,
+       .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true,
+       .tpg_alloc_fabric_acl           = tcm_qla2xxx_alloc_fabric_acl,
+       .tpg_release_fabric_acl         = tcm_qla2xxx_release_fabric_acl,
+       .tpg_get_inst_index             = tcm_qla2xxx_tpg_get_inst_index,
+       .new_cmd_map                    = NULL,
+       .check_stop_free                = tcm_qla2xxx_check_stop_free,
+       .release_cmd                    = tcm_qla2xxx_release_cmd,
+       .shutdown_session               = tcm_qla2xxx_shutdown_session,
+       .close_session                  = tcm_qla2xxx_close_session,
+       .sess_get_index                 = tcm_qla2xxx_sess_get_index,
+       .sess_get_initiator_sid         = NULL,
+       .write_pending                  = tcm_qla2xxx_write_pending,
+       .write_pending_status           = tcm_qla2xxx_write_pending_status,
+       .set_default_node_attributes    = tcm_qla2xxx_set_default_node_attrs,
+       .get_task_tag                   = tcm_qla2xxx_get_task_tag,
+       .get_cmd_state                  = tcm_qla2xxx_get_cmd_state,
+       .queue_data_in                  = tcm_qla2xxx_queue_data_in,
+       .queue_status                   = tcm_qla2xxx_queue_status,
+       .queue_tm_rsp                   = tcm_qla2xxx_queue_tm_rsp,
+       .get_fabric_sense_len           = tcm_qla2xxx_get_fabric_sense_len,
+       .set_fabric_sense_len           = tcm_qla2xxx_set_fabric_sense_len,
+       /*
+        * Setup function pointers for generic logic in
+        * target_core_fabric_configfs.c
+        */
+       .fabric_make_wwn                = tcm_qla2xxx_make_lport,
+       .fabric_drop_wwn                = tcm_qla2xxx_drop_lport,
+       .fabric_make_tpg                = tcm_qla2xxx_make_tpg,
+       .fabric_drop_tpg                = tcm_qla2xxx_drop_tpg,
+       .fabric_post_link               = NULL,
+       .fabric_pre_unlink              = NULL,
+       .fabric_make_np                 = NULL,
+       .fabric_drop_np                 = NULL,
+       .fabric_make_nodeacl            = tcm_qla2xxx_make_nodeacl,
+       .fabric_drop_nodeacl            = tcm_qla2xxx_drop_nodeacl,
+};
+
+static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
+       .get_fabric_name                = tcm_qla2xxx_npiv_get_fabric_name,
+       .get_fabric_proto_ident         = tcm_qla2xxx_get_fabric_proto_ident,
+       .tpg_get_wwn                    = tcm_qla2xxx_npiv_get_fabric_wwn,
+       .tpg_get_tag                    = tcm_qla2xxx_get_tag,
+       .tpg_get_default_depth          = tcm_qla2xxx_get_default_depth,
+       .tpg_get_pr_transport_id        = tcm_qla2xxx_get_pr_transport_id,
+       .tpg_get_pr_transport_id_len    = tcm_qla2xxx_get_pr_transport_id_len,
+       .tpg_parse_pr_out_transport_id  = tcm_qla2xxx_parse_pr_out_transport_id,
+       .tpg_check_demo_mode            = tcm_qla2xxx_check_false,
+       .tpg_check_demo_mode_cache      = tcm_qla2xxx_check_true,
+       .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true,
+       .tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false,
+       .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true,
+       .tpg_alloc_fabric_acl           = tcm_qla2xxx_alloc_fabric_acl,
+       .tpg_release_fabric_acl         = tcm_qla2xxx_release_fabric_acl,
+       .tpg_get_inst_index             = tcm_qla2xxx_tpg_get_inst_index,
+       .release_cmd                    = tcm_qla2xxx_release_cmd,
+       .shutdown_session               = tcm_qla2xxx_shutdown_session,
+       .close_session                  = tcm_qla2xxx_close_session,
+       .sess_get_index                 = tcm_qla2xxx_sess_get_index,
+       .sess_get_initiator_sid         = NULL,
+       .write_pending                  = tcm_qla2xxx_write_pending,
+       .write_pending_status           = tcm_qla2xxx_write_pending_status,
+       .set_default_node_attributes    = tcm_qla2xxx_set_default_node_attrs,
+       .get_task_tag                   = tcm_qla2xxx_get_task_tag,
+       .get_cmd_state                  = tcm_qla2xxx_get_cmd_state,
+       .queue_data_in                  = tcm_qla2xxx_queue_data_in,
+       .queue_status                   = tcm_qla2xxx_queue_status,
+       .queue_tm_rsp                   = tcm_qla2xxx_queue_tm_rsp,
+       .get_fabric_sense_len           = tcm_qla2xxx_get_fabric_sense_len,
+       .set_fabric_sense_len           = tcm_qla2xxx_set_fabric_sense_len,
+       /*
+        * Setup function pointers for generic logic in
+        * target_core_fabric_configfs.c
+        */
+       .fabric_make_wwn                = tcm_qla2xxx_npiv_make_lport,
+       .fabric_drop_wwn                = tcm_qla2xxx_npiv_drop_lport,
+       .fabric_make_tpg                = tcm_qla2xxx_npiv_make_tpg,
+       .fabric_drop_tpg                = tcm_qla2xxx_drop_tpg,
+       .fabric_post_link               = NULL,
+       .fabric_pre_unlink              = NULL,
+       .fabric_make_np                 = NULL,
+       .fabric_drop_np                 = NULL,
+       .fabric_make_nodeacl            = tcm_qla2xxx_make_nodeacl,
+       .fabric_drop_nodeacl            = tcm_qla2xxx_drop_nodeacl,
+};
+
+static int tcm_qla2xxx_register_configfs(void)
+{
+       struct target_fabric_configfs *fabric, *npiv_fabric;
+       int ret;
+
+       pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
+           UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
+           utsname()->machine);
+       /*
+        * Register the top level struct config_item_type with TCM core
+        */
+       fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx");
+       if (IS_ERR(fabric)) {
+               pr_err("target_fabric_configfs_init() failed\n");
+               return PTR_ERR(fabric);
+       }
+       /*
+        * Setup fabric->tf_ops from our local tcm_qla2xxx_ops
+        */
+       fabric->tf_ops = tcm_qla2xxx_ops;
+       /*
+        * Setup default attribute lists for various fabric->tf_cit_tmpl
+        */
+       TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
+       TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs;
+       TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs =
+                                               tcm_qla2xxx_tpg_attrib_attrs;
+       TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
+       TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
+       TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+       TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+       TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+       TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+       /*
+        * Register the fabric for use within TCM
+        */
+       ret = target_fabric_configfs_register(fabric);
+       if (ret < 0) {
+               pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
+               return ret;
+       }
+       /*
+        * Setup our local pointer to *fabric
+        */
+       tcm_qla2xxx_fabric_configfs = fabric;
+       pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_fabric_configfs\n");
+
+       /*
+        * Register the top level struct config_item_type for NPIV with TCM core
+        */
+       npiv_fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx_npiv");
+       if (IS_ERR(npiv_fabric)) {
+               pr_err("target_fabric_configfs_init() failed\n");
+               ret = PTR_ERR(npiv_fabric);
+               goto out_fabric;
+       }
+       /*
+        * Setup fabric->tf_ops from our local tcm_qla2xxx_npiv_ops
+        */
+       npiv_fabric->tf_ops = tcm_qla2xxx_npiv_ops;
+       /*
+        * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl
+        */
+       TF_CIT_TMPL(npiv_fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
+       TF_CIT_TMPL(npiv_fabric)->tfc_tpg_base_cit.ct_attrs = NULL;
+       TF_CIT_TMPL(npiv_fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
+       TF_CIT_TMPL(npiv_fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
+       TF_CIT_TMPL(npiv_fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
+       TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+       TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+       TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+       TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+       /*
+        * Register the npiv_fabric for use within TCM
+        */
+       ret = target_fabric_configfs_register(npiv_fabric);
+       if (ret < 0) {
+               pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
+               goto out_fabric;
+       }
+       /*
+        * Setup our local pointer to *npiv_fabric
+        */
+       tcm_qla2xxx_npiv_fabric_configfs = npiv_fabric;
+       pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_npiv_fabric_configfs\n");
+
+       tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free",
+                                               WQ_MEM_RECLAIM, 0);
+       if (!tcm_qla2xxx_free_wq) {
+               ret = -ENOMEM;
+               goto out_fabric_npiv;
+       }
+
+       tcm_qla2xxx_cmd_wq = alloc_workqueue("tcm_qla2xxx_cmd", 0, 0);
+       if (!tcm_qla2xxx_cmd_wq) {
+               ret = -ENOMEM;
+               goto out_free_wq;
+       }
+
+       return 0;
+
+out_free_wq:
+       destroy_workqueue(tcm_qla2xxx_free_wq);
+out_fabric_npiv:
+       target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
+out_fabric:
+       target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
+       return ret;
+}
+
+static void tcm_qla2xxx_deregister_configfs(void)
+{
+       destroy_workqueue(tcm_qla2xxx_cmd_wq);
+       destroy_workqueue(tcm_qla2xxx_free_wq);
+
+       target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
+       tcm_qla2xxx_fabric_configfs = NULL;
+       pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_fabric_configfs\n");
+
+       target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
+       tcm_qla2xxx_npiv_fabric_configfs = NULL;
+       pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_npiv_fabric_configfs\n");
+}
+
+static int __init tcm_qla2xxx_init(void)
+{
+       int ret;
+
+       ret = tcm_qla2xxx_register_configfs();
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static void __exit tcm_qla2xxx_exit(void)
+{
+       tcm_qla2xxx_deregister_configfs();
+}
+
+MODULE_DESCRIPTION("TCM QLA2XXX series NPIV enabled fabric driver");
+MODULE_LICENSE("GPL");
+module_init(tcm_qla2xxx_init);
+module_exit(tcm_qla2xxx_exit);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
new file mode 100644 (file)
index 0000000..8254981
--- /dev/null
@@ -0,0 +1,82 @@
+#include <target/target_core_base.h>
+#include <linux/btree.h>
+
+#define TCM_QLA2XXX_VERSION    "v0.1"
+/* length of ASCII WWPNs including pad */
+#define TCM_QLA2XXX_NAMELEN    32
+/* lenth of ASCII NPIV 'WWPN+WWNN' including pad */
+#define TCM_QLA2XXX_NPIV_NAMELEN 66
+
+#include "qla_target.h"
+
+struct tcm_qla2xxx_nacl {
+       /* From libfc struct fc_rport->port_id */
+       u32 nport_id;
+       /* Binary World Wide unique Node Name for remote FC Initiator Nport */
+       u64 nport_wwnn;
+       /* ASCII formatted WWPN for FC Initiator Nport */
+       char nport_name[TCM_QLA2XXX_NAMELEN];
+       /* Pointer to qla_tgt_sess */
+       struct qla_tgt_sess *qla_tgt_sess;
+       /* Pointer to TCM FC nexus */
+       struct se_session *nport_nexus;
+       /* Returned by tcm_qla2xxx_make_nodeacl() */
+       struct se_node_acl se_node_acl;
+};
+
+struct tcm_qla2xxx_tpg_attrib {
+       int generate_node_acls;
+       int cache_dynamic_acls;
+       int demo_mode_write_protect;
+       int prod_mode_write_protect;
+};
+
+struct tcm_qla2xxx_tpg {
+       /* FC lport target portal group tag for TCM */
+       u16 lport_tpgt;
+       /* Atomic bit to determine TPG active status */
+       atomic_t lport_tpg_enabled;
+       /* Pointer back to tcm_qla2xxx_lport */
+       struct tcm_qla2xxx_lport *lport;
+       /* Used by tcm_qla2xxx_tpg_attrib_cit */
+       struct tcm_qla2xxx_tpg_attrib tpg_attrib;
+       /* Returned by tcm_qla2xxx_make_tpg() */
+       struct se_portal_group se_tpg;
+};
+
+#define QLA_TPG_ATTRIB(tpg)    (&(tpg)->tpg_attrib)
+
+struct tcm_qla2xxx_fc_loopid {
+       struct se_node_acl *se_nacl;
+};
+
+struct tcm_qla2xxx_lport {
+       /* SCSI protocol the lport is providing */
+       u8 lport_proto_id;
+       /* Binary World Wide unique Port Name for FC Target Lport */
+       u64 lport_wwpn;
+       /* Binary World Wide unique Port Name for FC NPIV Target Lport */
+       u64 lport_npiv_wwpn;
+       /* Binary World Wide unique Node Name for FC NPIV Target Lport */
+       u64 lport_npiv_wwnn;
+       /* ASCII formatted WWPN for FC Target Lport */
+       char lport_name[TCM_QLA2XXX_NAMELEN];
+       /* ASCII formatted WWPN+WWNN for NPIV FC Target Lport */
+       char lport_npiv_name[TCM_QLA2XXX_NPIV_NAMELEN];
+       /* map for fc_port pointers in 24-bit FC Port ID space */
+       struct btree_head32 lport_fcport_map;
+       /* vmalloc-ed memory for fc_port pointers for 16-bit FC loop ID */
+       struct tcm_qla2xxx_fc_loopid *lport_loopid_map;
+       /* Pointer to struct scsi_qla_host from qla2xxx LLD */
+       struct scsi_qla_host *qla_vha;
+       /* Pointer to struct scsi_qla_host for NPIV VP from qla2xxx LLD */
+       struct scsi_qla_host *qla_npiv_vp;
+       /* Pointer to struct qla_tgt pointer */
+       struct qla_tgt lport_qla_tgt;
+       /* Pointer to struct fc_vport for NPIV vport from libfc */
+       struct fc_vport *npiv_vport;
+       /* Pointer to TPG=1 for non NPIV mode */
+       struct tcm_qla2xxx_tpg *tpg_1;
+       /* Returned by tcm_qla2xxx_make_lport() */
+       struct se_wwn lport_wwn;
+};
index 0b0a7d42137d7b3fef646ec376175d9c7a97454e..c681b2a355e137a99edcfd39e1c18b8c7dcbda11 100644 (file)
@@ -9,6 +9,140 @@
 #include "ql4_glbl.h"
 #include "ql4_dbg.h"
 
+static ssize_t
+qla4_8xxx_sysfs_read_fw_dump(struct file *filep, struct kobject *kobj,
+                            struct bin_attribute *ba, char *buf, loff_t off,
+                            size_t count)
+{
+       struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
+                                              struct device, kobj)));
+
+       if (!is_qla8022(ha))
+               return -EINVAL;
+
+       if (!test_bit(AF_82XX_DUMP_READING, &ha->flags))
+               return 0;
+
+       return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
+                                      ha->fw_dump_size);
+}
+
+static ssize_t
+qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
+                             struct bin_attribute *ba, char *buf, loff_t off,
+                             size_t count)
+{
+       struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
+                                              struct device, kobj)));
+       uint32_t dev_state;
+       long reading;
+       int ret = 0;
+
+       if (!is_qla8022(ha))
+               return -EINVAL;
+
+       if (off != 0)
+               return ret;
+
+       buf[1] = 0;
+       ret = kstrtol(buf, 10, &reading);
+       if (ret) {
+               ql4_printk(KERN_ERR, ha, "%s: Invalid input. Return err %d\n",
+                          __func__, ret);
+               return ret;
+       }
+
+       switch (reading) {
+       case 0:
+               /* clear dump collection flags */
+               if (test_and_clear_bit(AF_82XX_DUMP_READING, &ha->flags)) {
+                       clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
+                       /* Reload minidump template */
+                       qla4xxx_alloc_fw_dump(ha);
+                       DEBUG2(ql4_printk(KERN_INFO, ha,
+                                         "Firmware template reloaded\n"));
+               }
+               break;
+       case 1:
+               /* Set flag to read dump */
+               if (test_bit(AF_82XX_FW_DUMPED, &ha->flags) &&
+                   !test_bit(AF_82XX_DUMP_READING, &ha->flags)) {
+                       set_bit(AF_82XX_DUMP_READING, &ha->flags);
+                       DEBUG2(ql4_printk(KERN_INFO, ha,
+                                         "Raw firmware dump ready for read on (%ld).\n",
+                                         ha->host_no));
+               }
+               break;
+       case 2:
+               /* Reset HBA */
+               qla4_8xxx_idc_lock(ha);
+               dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+               if (dev_state == QLA82XX_DEV_READY) {
+                       ql4_printk(KERN_INFO, ha,
+                                  "%s: Setting Need reset, reset_owner is 0x%x.\n",
+                                  __func__, ha->func_num);
+                       qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+                                       QLA82XX_DEV_NEED_RESET);
+                       set_bit(AF_82XX_RST_OWNER, &ha->flags);
+               } else
+                       ql4_printk(KERN_INFO, ha,
+                                  "%s: Reset not performed as device state is 0x%x\n",
+                                  __func__, dev_state);
+
+               qla4_8xxx_idc_unlock(ha);
+               break;
+       default:
+               /* do nothing */
+               break;
+       }
+
+       return count;
+}
+
+static struct bin_attribute sysfs_fw_dump_attr = {
+       .attr = {
+               .name = "fw_dump",
+               .mode = S_IRUSR | S_IWUSR,
+       },
+       .size = 0,
+       .read = qla4_8xxx_sysfs_read_fw_dump,
+       .write = qla4_8xxx_sysfs_write_fw_dump,
+};
+
+static struct sysfs_entry {
+       char *name;
+       struct bin_attribute *attr;
+} bin_file_entries[] = {
+       { "fw_dump", &sysfs_fw_dump_attr },
+       { NULL },
+};
+
+void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha)
+{
+       struct Scsi_Host *host = ha->host;
+       struct sysfs_entry *iter;
+       int ret;
+
+       for (iter = bin_file_entries; iter->name; iter++) {
+               ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
+                                           iter->attr);
+               if (ret)
+                       ql4_printk(KERN_ERR, ha,
+                                  "Unable to create sysfs %s binary attribute (%d).\n",
+                                  iter->name, ret);
+       }
+}
+
+void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha)
+{
+       struct Scsi_Host *host = ha->host;
+       struct sysfs_entry *iter;
+
+       for (iter = bin_file_entries; iter->name; iter++)
+               sysfs_remove_bin_file(&host->shost_gendev.kobj,
+                                     iter->attr);
+}
+
 /* Scsi_Host attributes. */
 static ssize_t
 qla4xxx_fw_version_show(struct device *dev,
index 7f2492e88be72085325bbb58debacefe9aaf288c..96a5616a8fdaa6f7bbb35140befe25a0c88e7087 100644 (file)
@@ -398,6 +398,16 @@ struct isp_operations {
        int (*get_sys_info) (struct scsi_qla_host *);
 };
 
+struct ql4_mdump_size_table {
+       uint32_t size;
+       uint32_t size_cmask_02;
+       uint32_t size_cmask_04;
+       uint32_t size_cmask_08;
+       uint32_t size_cmask_10;
+       uint32_t size_cmask_FF;
+       uint32_t version;
+};
+
 /*qla4xxx ipaddress configuration details */
 struct ipaddress_config {
        uint16_t ipv4_options;
@@ -485,6 +495,10 @@ struct scsi_qla_host {
 #define AF_EEH_BUSY                    20 /* 0x00100000 */
 #define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */
 #define AF_BUILD_DDB_LIST              22 /* 0x00400000 */
+#define AF_82XX_FW_DUMPED              24 /* 0x01000000 */
+#define AF_82XX_RST_OWNER              25 /* 0x02000000 */
+#define AF_82XX_DUMP_READING           26 /* 0x04000000 */
+
        unsigned long dpc_flags;
 
 #define DPC_RESET_HA                   1 /* 0x00000002 */
@@ -662,6 +676,11 @@ struct scsi_qla_host {
 
        uint32_t nx_dev_init_timeout;
        uint32_t nx_reset_timeout;
+       void *fw_dump;
+       uint32_t fw_dump_size;
+       uint32_t fw_dump_capture_mask;
+       void *fw_dump_tmplt_hdr;
+       uint32_t fw_dump_tmplt_size;
 
        struct completion mbx_intr_comp;
 
@@ -936,4 +955,7 @@ static inline int ql4xxx_reset_active(struct scsi_qla_host *ha)
 #define PROCESS_ALL_AENS        0
 #define FLUSH_DDB_CHANGED_AENS  1
 
+/* Defines for udev events */
+#define QL4_UEVENT_CODE_FW_DUMP                0
+
 #endif /*_QLA4XXX_H */
index 210cd1d64475b0b3cfc0128ee262565ec7533f2c..7240948fb929bcb557398ecd774fe9fc36c7fae3 100644 (file)
@@ -385,6 +385,11 @@ struct qla_flt_region {
 #define MBOX_CMD_GET_IP_ADDR_STATE             0x0091
 #define MBOX_CMD_SEND_IPV6_ROUTER_SOL          0x0092
 #define MBOX_CMD_GET_DB_ENTRY_CURRENT_IP_ADDR  0x0093
+#define MBOX_CMD_MINIDUMP                      0x0129
+
+/* Minidump subcommand */
+#define MINIDUMP_GET_SIZE_SUBCOMMAND           0x00
+#define MINIDUMP_GET_TMPLT_SUBCOMMAND          0x01
 
 /* Mailbox 1 */
 #define FW_STATE_READY                         0x0000
@@ -1190,4 +1195,27 @@ struct ql_iscsi_stats {
        uint8_t reserved2[264]; /* 0x0308 - 0x040F */
 };
 
+#define QLA82XX_DBG_STATE_ARRAY_LEN            16
+#define QLA82XX_DBG_CAP_SIZE_ARRAY_LEN         8
+#define QLA82XX_DBG_RSVD_ARRAY_LEN             8
+
+struct qla4_8xxx_minidump_template_hdr {
+       uint32_t entry_type;
+       uint32_t first_entry_offset;
+       uint32_t size_of_template;
+       uint32_t capture_debug_level;
+       uint32_t num_of_entries;
+       uint32_t version;
+       uint32_t driver_timestamp;
+       uint32_t checksum;
+
+       uint32_t driver_capture_mask;
+       uint32_t driver_info_word2;
+       uint32_t driver_info_word3;
+       uint32_t driver_info_word4;
+
+       uint32_t saved_state_array[QLA82XX_DBG_STATE_ARRAY_LEN];
+       uint32_t capture_size_array[QLA82XX_DBG_CAP_SIZE_ARRAY_LEN];
+};
+
 #endif /*  _QLA4X_FW_H */
index 910536667cf577e1c19d7db82803944730eefbca..20b49d019043d193c50aebd9aef311cfa9571271 100644 (file)
@@ -196,10 +196,18 @@ int qla4xxx_bsg_request(struct bsg_job *bsg_job);
 int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job);
 
 void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry);
+int qla4xxx_get_minidump_template(struct scsi_qla_host *ha,
+                                 dma_addr_t phys_addr);
+int qla4xxx_req_template_size(struct scsi_qla_host *ha);
+void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha);
+void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha);
+void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha);
 
 extern int ql4xextended_error_logging;
 extern int ql4xdontresethba;
 extern int ql4xenablemsix;
+extern int ql4xmdcapmask;
+extern int ql4xenablemd;
 
 extern struct device_attribute *qla4xxx_host_attrs[];
 #endif /* _QLA4x_GBL_H */
index 90ee5d8fa7315eda08a8bdc9cd8b66aff2e82eb2..bf36723b84e10cff0a01a3925f5d5bcafaa27fb9 100644 (file)
@@ -277,6 +277,94 @@ qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha)
        return ipv4_wait|ipv6_wait;
 }
 
+/**
+ * qla4xxx_alloc_fw_dump - Allocate memory for minidump data.
+ * @ha: pointer to host adapter structure.
+ **/
+void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha)
+{
+       int status;
+       uint32_t capture_debug_level;
+       int hdr_entry_bit, k;
+       void *md_tmp;
+       dma_addr_t md_tmp_dma;
+       struct qla4_8xxx_minidump_template_hdr *md_hdr;
+
+       if (ha->fw_dump) {
+               ql4_printk(KERN_WARNING, ha,
+                          "Firmware dump previously allocated.\n");
+               return;
+       }
+
+       status = qla4xxx_req_template_size(ha);
+       if (status != QLA_SUCCESS) {
+               ql4_printk(KERN_INFO, ha,
+                          "scsi%ld: Failed to get template size\n",
+                          ha->host_no);
+               return;
+       }
+
+       clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
+
+       /* Allocate memory for saving the template */
+       md_tmp = dma_alloc_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size,
+                                   &md_tmp_dma, GFP_KERNEL);
+
+       /* Request template */
+       status =  qla4xxx_get_minidump_template(ha, md_tmp_dma);
+       if (status != QLA_SUCCESS) {
+               ql4_printk(KERN_INFO, ha,
+                          "scsi%ld: Failed to get minidump template\n",
+                          ha->host_no);
+               goto alloc_cleanup;
+       }
+
+       md_hdr = (struct qla4_8xxx_minidump_template_hdr *)md_tmp;
+
+       capture_debug_level = md_hdr->capture_debug_level;
+
+       /* Get capture mask based on module loadtime setting. */
+       if (ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F)
+               ha->fw_dump_capture_mask = ql4xmdcapmask;
+       else
+               ha->fw_dump_capture_mask = capture_debug_level;
+
+       md_hdr->driver_capture_mask = ha->fw_dump_capture_mask;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Minimum num of entries = %d\n",
+                         md_hdr->num_of_entries));
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Dump template size  = %d\n",
+                         ha->fw_dump_tmplt_size));
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Selected Capture mask =0x%x\n",
+                         ha->fw_dump_capture_mask));
+
+       /* Calculate fw_dump_size */
+       for (hdr_entry_bit = 0x2, k = 1; (hdr_entry_bit & 0xFF);
+            hdr_entry_bit <<= 1, k++) {
+               if (hdr_entry_bit & ha->fw_dump_capture_mask)
+                       ha->fw_dump_size += md_hdr->capture_size_array[k];
+       }
+
+       /* Total firmware dump size including command header */
+       ha->fw_dump_size += ha->fw_dump_tmplt_size;
+       ha->fw_dump = vmalloc(ha->fw_dump_size);
+       if (!ha->fw_dump)
+               goto alloc_cleanup;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "Minidump Tempalate Size = 0x%x KB\n",
+                         ha->fw_dump_tmplt_size));
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "Total Minidump size = 0x%x KB\n", ha->fw_dump_size));
+
+       memcpy(ha->fw_dump, md_tmp, ha->fw_dump_tmplt_size);
+       ha->fw_dump_tmplt_hdr = ha->fw_dump;
+
+alloc_cleanup:
+       dma_free_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size,
+                         md_tmp, md_tmp_dma);
+}
+
 static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
 {
        uint32_t timeout_count;
@@ -445,9 +533,13 @@ static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
                              "control block\n", ha->host_no, __func__));
                return status;
        }
+
        if (!qla4xxx_fw_ready(ha))
                return status;
 
+       if (is_qla8022(ha) && !test_bit(AF_INIT_DONE, &ha->flags))
+               qla4xxx_alloc_fw_dump(ha);
+
        return qla4xxx_get_firmware_status(ha);
 }
 
@@ -884,8 +976,8 @@ int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
                switch (state) {
                case DDB_DS_SESSION_ACTIVE:
                case DDB_DS_DISCOVERY:
-                       ddb_entry->unblock_sess(ddb_entry->sess);
                        qla4xxx_update_session_conn_param(ha, ddb_entry);
+                       ddb_entry->unblock_sess(ddb_entry->sess);
                        status = QLA_SUCCESS;
                        break;
                case DDB_DS_SESSION_FAILED:
@@ -897,6 +989,7 @@ int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
                }
                break;
        case DDB_DS_SESSION_ACTIVE:
+       case DDB_DS_DISCOVERY:
                switch (state) {
                case DDB_DS_SESSION_FAILED:
                        /*
index 7ac21dabbf22fce08264314ba3a212a329a1c40e..cab8f665a41faca343dba8f404e01ba96075abaf 100644 (file)
@@ -51,25 +51,6 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
                }
        }
 
-       if (is_qla8022(ha)) {
-               if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
-                       DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
-                           "prematurely completing mbx cmd as firmware "
-                           "recovery detected\n", ha->host_no, __func__));
-                       return status;
-               }
-               /* Do not send any mbx cmd if h/w is in failed state*/
-               qla4_8xxx_idc_lock(ha);
-               dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
-               qla4_8xxx_idc_unlock(ha);
-               if (dev_state == QLA82XX_DEV_FAILED) {
-                       ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: H/W is in "
-                           "failed state, do not send any mailbox commands\n",
-                           ha->host_no, __func__);
-                       return status;
-               }
-       }
-
        if ((is_aer_supported(ha)) &&
            (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))) {
                DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Perm failure on EEH, "
@@ -96,6 +77,25 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
                msleep(10);
        }
 
+       if (is_qla8022(ha)) {
+               if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
+                       DEBUG2(ql4_printk(KERN_WARNING, ha,
+                                         "scsi%ld: %s: prematurely completing mbx cmd as firmware recovery detected\n",
+                                         ha->host_no, __func__));
+                       goto mbox_exit;
+               }
+               /* Do not send any mbx cmd if h/w is in failed state*/
+               qla4_8xxx_idc_lock(ha);
+               dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+               qla4_8xxx_idc_unlock(ha);
+               if (dev_state == QLA82XX_DEV_FAILED) {
+                       ql4_printk(KERN_WARNING, ha,
+                                  "scsi%ld: %s: H/W is in failed state, do not send any mailbox commands\n",
+                                  ha->host_no, __func__);
+                       goto mbox_exit;
+               }
+       }
+
        spin_lock_irqsave(&ha->hardware_lock, flags);
 
        ha->mbox_status_count = outCount;
@@ -270,6 +270,79 @@ mbox_exit:
        return status;
 }
 
+/**
+ * qla4xxx_get_minidump_template - Get the firmware template
+ * @ha: Pointer to host adapter structure.
+ * @phys_addr: dma address for template
+ *
+ * Obtain the minidump template from firmware during initialization
+ * as it may not be available when minidump is desired.
+ **/
+int qla4xxx_get_minidump_template(struct scsi_qla_host *ha,
+                                 dma_addr_t phys_addr)
+{
+       uint32_t mbox_cmd[MBOX_REG_COUNT];
+       uint32_t mbox_sts[MBOX_REG_COUNT];
+       int status;
+
+       memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+       memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+       mbox_cmd[0] = MBOX_CMD_MINIDUMP;
+       mbox_cmd[1] = MINIDUMP_GET_TMPLT_SUBCOMMAND;
+       mbox_cmd[2] = LSDW(phys_addr);
+       mbox_cmd[3] = MSDW(phys_addr);
+       mbox_cmd[4] = ha->fw_dump_tmplt_size;
+       mbox_cmd[5] = 0;
+
+       status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
+                                        &mbox_sts[0]);
+       if (status != QLA_SUCCESS) {
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "scsi%ld: %s: Cmd = %08X, mbx[0] = 0x%04x, mbx[1] = 0x%04x\n",
+                                 ha->host_no, __func__, mbox_cmd[0],
+                                 mbox_sts[0], mbox_sts[1]));
+       }
+       return status;
+}
+
+/**
+ * qla4xxx_req_template_size - Get minidump template size from firmware.
+ * @ha: Pointer to host adapter structure.
+ **/
+int qla4xxx_req_template_size(struct scsi_qla_host *ha)
+{
+       uint32_t mbox_cmd[MBOX_REG_COUNT];
+       uint32_t mbox_sts[MBOX_REG_COUNT];
+       int status;
+
+       memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+       memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+       mbox_cmd[0] = MBOX_CMD_MINIDUMP;
+       mbox_cmd[1] = MINIDUMP_GET_SIZE_SUBCOMMAND;
+
+       status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
+                                        &mbox_sts[0]);
+       if (status == QLA_SUCCESS) {
+               ha->fw_dump_tmplt_size = mbox_sts[1];
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "%s: sts[0]=0x%04x, template  size=0x%04x, size_cm_02=0x%04x, size_cm_04=0x%04x, size_cm_08=0x%04x, size_cm_10=0x%04x, size_cm_FF=0x%04x, version=0x%04x\n",
+                                 __func__, mbox_sts[0], mbox_sts[1],
+                                 mbox_sts[2], mbox_sts[3], mbox_sts[4],
+                                 mbox_sts[5], mbox_sts[6], mbox_sts[7]));
+               if (ha->fw_dump_tmplt_size == 0)
+                       status = QLA_ERROR;
+       } else {
+               ql4_printk(KERN_WARNING, ha,
+                          "%s: Error sts[0]=0x%04x, mbx[1]=0x%04x\n",
+                          __func__, mbox_sts[0], mbox_sts[1]);
+               status = QLA_ERROR;
+       }
+
+       return status;
+}
+
 void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha)
 {
        set_bit(AF_FW_RECOVERY, &ha->flags);
index e1e46b6dac754e8bb10d9f6523b75e9ef38c2ea3..228b67020d2cde7549e9d962f0b1e95b0675155c 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/delay.h>
 #include <linux/io.h>
 #include <linux/pci.h>
+#include <linux/ratelimit.h>
 #include "ql4_def.h"
 #include "ql4_glbl.h"
 
@@ -420,6 +421,38 @@ qla4_8xxx_rd_32(struct scsi_qla_host *ha, ulong off)
        return data;
 }
 
+/* Minidump related functions */
+static int qla4_8xxx_md_rw_32(struct scsi_qla_host *ha, uint32_t off,
+                             u32 data, uint8_t flag)
+{
+       uint32_t win_read, off_value, rval = QLA_SUCCESS;
+
+       off_value  = off & 0xFFFF0000;
+       writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+
+       /* Read back value to make sure write has gone through before trying
+        * to use it.
+        */
+       win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+       if (win_read != off_value) {
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "%s: Written (0x%x) != Read (0x%x), off=0x%x\n",
+                                  __func__, off_value, win_read, off));
+               return QLA_ERROR;
+       }
+
+       off_value  = off & 0x0000FFFF;
+
+       if (flag)
+               writel(data, (void __iomem *)(off_value + CRB_INDIRECT_2M +
+                                             ha->nx_pcibase));
+       else
+               rval = readl((void __iomem *)(off_value + CRB_INDIRECT_2M +
+                                             ha->nx_pcibase));
+
+       return rval;
+}
+
 #define CRB_WIN_LOCK_TIMEOUT 100000000
 
 int qla4_8xxx_crb_win_lock(struct scsi_qla_host *ha)
@@ -1252,9 +1285,9 @@ qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha,
                }
 
                if (j >= MAX_CTL_CHECK) {
-                       if (printk_ratelimit())
-                               ql4_printk(KERN_ERR, ha,
-                                   "failed to read through agent\n");
+                       printk_ratelimited(KERN_ERR
+                                          "%s: failed to read through agent\n",
+                                          __func__);
                        break;
                }
 
@@ -1390,7 +1423,8 @@ qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha,
                if (j >= MAX_CTL_CHECK) {
                        if (printk_ratelimit())
                                ql4_printk(KERN_ERR, ha,
-                                   "failed to write through agent\n");
+                                          "%s: failed to read through agent\n",
+                                          __func__);
                        ret = -1;
                        break;
                }
@@ -1462,6 +1496,8 @@ qla4_8xxx_set_drv_active(struct scsi_qla_host *ha)
 
        drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
        drv_active |= (1 << (ha->func_num * 4));
+       ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
+                  __func__, ha->host_no, drv_active);
        qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
 }
 
@@ -1472,6 +1508,8 @@ qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha)
 
        drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
        drv_active &= ~(1 << (ha->func_num * 4));
+       ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
+                  __func__, ha->host_no, drv_active);
        qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
 }
 
@@ -1497,6 +1535,8 @@ qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha)
 
        drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
        drv_state |= (1 << (ha->func_num * 4));
+       ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
+                  __func__, ha->host_no, drv_state);
        qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
 }
 
@@ -1507,6 +1547,8 @@ qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha)
 
        drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
        drv_state &= ~(1 << (ha->func_num * 4));
+       ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
+                  __func__, ha->host_no, drv_state);
        qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
 }
 
@@ -1601,6 +1643,629 @@ static void qla4_8xxx_rom_lock_recovery(struct scsi_qla_host *ha)
        qla4_8xxx_rom_unlock(ha);
 }
 
+static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,
+                               struct qla82xx_minidump_entry_hdr *entry_hdr,
+                               uint32_t **d_ptr)
+{
+       uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+       struct qla82xx_minidump_entry_crb *crb_hdr;
+       uint32_t *data_ptr = *d_ptr;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+       crb_hdr = (struct qla82xx_minidump_entry_crb *)entry_hdr;
+       r_addr = crb_hdr->addr;
+       r_stride = crb_hdr->crb_strd.addr_stride;
+       loop_cnt = crb_hdr->op_count;
+
+       for (i = 0; i < loop_cnt; i++) {
+               r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
+               *data_ptr++ = cpu_to_le32(r_addr);
+               *data_ptr++ = cpu_to_le32(r_value);
+               r_addr += r_stride;
+       }
+       *d_ptr = data_ptr;
+}
+
+static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
+                                struct qla82xx_minidump_entry_hdr *entry_hdr,
+                                uint32_t **d_ptr)
+{
+       uint32_t addr, r_addr, c_addr, t_r_addr;
+       uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+       unsigned long p_wait, w_time, p_mask;
+       uint32_t c_value_w, c_value_r;
+       struct qla82xx_minidump_entry_cache *cache_hdr;
+       int rval = QLA_ERROR;
+       uint32_t *data_ptr = *d_ptr;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+       cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr;
+
+       loop_count = cache_hdr->op_count;
+       r_addr = cache_hdr->read_addr;
+       c_addr = cache_hdr->control_addr;
+       c_value_w = cache_hdr->cache_ctrl.write_value;
+
+       t_r_addr = cache_hdr->tag_reg_addr;
+       t_value = cache_hdr->addr_ctrl.init_tag_value;
+       r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+       p_wait = cache_hdr->cache_ctrl.poll_wait;
+       p_mask = cache_hdr->cache_ctrl.poll_mask;
+
+       for (i = 0; i < loop_count; i++) {
+               qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1);
+
+               if (c_value_w)
+                       qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1);
+
+               if (p_mask) {
+                       w_time = jiffies + p_wait;
+                       do {
+                               c_value_r = qla4_8xxx_md_rw_32(ha, c_addr,
+                                                               0, 0);
+                               if ((c_value_r & p_mask) == 0) {
+                                       break;
+                               } else if (time_after_eq(jiffies, w_time)) {
+                                       /* capturing dump failed */
+                                       return rval;
+                               }
+                       } while (1);
+               }
+
+               addr = r_addr;
+               for (k = 0; k < r_cnt; k++) {
+                       r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
+                       *data_ptr++ = cpu_to_le32(r_value);
+                       addr += cache_hdr->read_ctrl.read_addr_stride;
+               }
+
+               t_value += cache_hdr->addr_ctrl.tag_value_stride;
+       }
+       *d_ptr = data_ptr;
+       return QLA_SUCCESS;
+}
+
+static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
+                               struct qla82xx_minidump_entry_hdr *entry_hdr)
+{
+       struct qla82xx_minidump_entry_crb *crb_entry;
+       uint32_t read_value, opcode, poll_time, addr, index, rval = QLA_SUCCESS;
+       uint32_t crb_addr;
+       unsigned long wtime;
+       struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
+       int i;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+       tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
+                                               ha->fw_dump_tmplt_hdr;
+       crb_entry = (struct qla82xx_minidump_entry_crb *)entry_hdr;
+
+       crb_addr = crb_entry->addr;
+       for (i = 0; i < crb_entry->op_count; i++) {
+               opcode = crb_entry->crb_ctrl.opcode;
+               if (opcode & QLA82XX_DBG_OPCODE_WR) {
+                       qla4_8xxx_md_rw_32(ha, crb_addr,
+                                          crb_entry->value_1, 1);
+                       opcode &= ~QLA82XX_DBG_OPCODE_WR;
+               }
+               if (opcode & QLA82XX_DBG_OPCODE_RW) {
+                       read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+                       qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
+                       opcode &= ~QLA82XX_DBG_OPCODE_RW;
+               }
+               if (opcode & QLA82XX_DBG_OPCODE_AND) {
+                       read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+                       read_value &= crb_entry->value_2;
+                       opcode &= ~QLA82XX_DBG_OPCODE_AND;
+                       if (opcode & QLA82XX_DBG_OPCODE_OR) {
+                               read_value |= crb_entry->value_3;
+                               opcode &= ~QLA82XX_DBG_OPCODE_OR;
+                       }
+                       qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
+               }
+               if (opcode & QLA82XX_DBG_OPCODE_OR) {
+                       read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+                       read_value |= crb_entry->value_3;
+                       qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
+                       opcode &= ~QLA82XX_DBG_OPCODE_OR;
+               }
+               if (opcode & QLA82XX_DBG_OPCODE_POLL) {
+                       poll_time = crb_entry->crb_strd.poll_timeout;
+                       wtime = jiffies + poll_time;
+                       read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+
+                       do {
+                               if ((read_value & crb_entry->value_2) ==
+                                   crb_entry->value_1)
+                                       break;
+                               else if (time_after_eq(jiffies, wtime)) {
+                                       /* capturing dump failed */
+                                       rval = QLA_ERROR;
+                                       break;
+                               } else
+                                       read_value = qla4_8xxx_md_rw_32(ha,
+                                                               crb_addr, 0, 0);
+                       } while (1);
+                       opcode &= ~QLA82XX_DBG_OPCODE_POLL;
+               }
+
+               if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
+                       if (crb_entry->crb_strd.state_index_a) {
+                               index = crb_entry->crb_strd.state_index_a;
+                               addr = tmplt_hdr->saved_state_array[index];
+                       } else {
+                               addr = crb_addr;
+                       }
+
+                       read_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
+                       index = crb_entry->crb_ctrl.state_index_v;
+                       tmplt_hdr->saved_state_array[index] = read_value;
+                       opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
+               }
+
+               if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
+                       if (crb_entry->crb_strd.state_index_a) {
+                               index = crb_entry->crb_strd.state_index_a;
+                               addr = tmplt_hdr->saved_state_array[index];
+                       } else {
+                               addr = crb_addr;
+                       }
+
+                       if (crb_entry->crb_ctrl.state_index_v) {
+                               index = crb_entry->crb_ctrl.state_index_v;
+                               read_value =
+                                       tmplt_hdr->saved_state_array[index];
+                       } else {
+                               read_value = crb_entry->value_1;
+                       }
+
+                       qla4_8xxx_md_rw_32(ha, addr, read_value, 1);
+                       opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
+               }
+
+               if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
+                       index = crb_entry->crb_ctrl.state_index_v;
+                       read_value = tmplt_hdr->saved_state_array[index];
+                       read_value <<= crb_entry->crb_ctrl.shl;
+                       read_value >>= crb_entry->crb_ctrl.shr;
+                       if (crb_entry->value_2)
+                               read_value &= crb_entry->value_2;
+                       read_value |= crb_entry->value_3;
+                       read_value += crb_entry->value_1;
+                       tmplt_hdr->saved_state_array[index] = read_value;
+                       opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
+               }
+               crb_addr += crb_entry->crb_strd.addr_stride;
+       }
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s\n", __func__));
+       return rval;
+}
+
+static void qla4_8xxx_minidump_process_rdocm(struct scsi_qla_host *ha,
+                               struct qla82xx_minidump_entry_hdr *entry_hdr,
+                               uint32_t **d_ptr)
+{
+       uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+       struct qla82xx_minidump_entry_rdocm *ocm_hdr;
+       uint32_t *data_ptr = *d_ptr;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+       ocm_hdr = (struct qla82xx_minidump_entry_rdocm *)entry_hdr;
+       r_addr = ocm_hdr->read_addr;
+       r_stride = ocm_hdr->read_addr_stride;
+       loop_cnt = ocm_hdr->op_count;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n",
+                         __func__, r_addr, r_stride, loop_cnt));
+
+       for (i = 0; i < loop_cnt; i++) {
+               r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase));
+               *data_ptr++ = cpu_to_le32(r_value);
+               r_addr += r_stride;
+       }
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%lx\n",
+                         __func__, (loop_cnt * sizeof(uint32_t))));
+       *d_ptr = data_ptr;
+}
+
+static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha,
+                               struct qla82xx_minidump_entry_hdr *entry_hdr,
+                               uint32_t **d_ptr)
+{
+       uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
+       struct qla82xx_minidump_entry_mux *mux_hdr;
+       uint32_t *data_ptr = *d_ptr;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+       mux_hdr = (struct qla82xx_minidump_entry_mux *)entry_hdr;
+       r_addr = mux_hdr->read_addr;
+       s_addr = mux_hdr->select_addr;
+       s_stride = mux_hdr->select_value_stride;
+       s_value = mux_hdr->select_value;
+       loop_cnt = mux_hdr->op_count;
+
+       for (i = 0; i < loop_cnt; i++) {
+               qla4_8xxx_md_rw_32(ha, s_addr, s_value, 1);
+               r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
+               *data_ptr++ = cpu_to_le32(s_value);
+               *data_ptr++ = cpu_to_le32(r_value);
+               s_value += s_stride;
+       }
+       *d_ptr = data_ptr;
+}
+
+static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha,
+                               struct qla82xx_minidump_entry_hdr *entry_hdr,
+                               uint32_t **d_ptr)
+{
+       uint32_t addr, r_addr, c_addr, t_r_addr;
+       uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+       uint32_t c_value_w;
+       struct qla82xx_minidump_entry_cache *cache_hdr;
+       uint32_t *data_ptr = *d_ptr;
+
+       cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr;
+       loop_count = cache_hdr->op_count;
+       r_addr = cache_hdr->read_addr;
+       c_addr = cache_hdr->control_addr;
+       c_value_w = cache_hdr->cache_ctrl.write_value;
+
+       t_r_addr = cache_hdr->tag_reg_addr;
+       t_value = cache_hdr->addr_ctrl.init_tag_value;
+       r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+
+       for (i = 0; i < loop_count; i++) {
+               qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1);
+               qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1);
+               addr = r_addr;
+               for (k = 0; k < r_cnt; k++) {
+                       r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
+                       *data_ptr++ = cpu_to_le32(r_value);
+                       addr += cache_hdr->read_ctrl.read_addr_stride;
+               }
+               t_value += cache_hdr->addr_ctrl.tag_value_stride;
+       }
+       *d_ptr = data_ptr;
+}
+
+static void qla4_8xxx_minidump_process_queue(struct scsi_qla_host *ha,
+                               struct qla82xx_minidump_entry_hdr *entry_hdr,
+                               uint32_t **d_ptr)
+{
+       uint32_t s_addr, r_addr;
+       uint32_t r_stride, r_value, r_cnt, qid = 0;
+       uint32_t i, k, loop_cnt;
+       struct qla82xx_minidump_entry_queue *q_hdr;
+       uint32_t *data_ptr = *d_ptr;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+       q_hdr = (struct qla82xx_minidump_entry_queue *)entry_hdr;
+       s_addr = q_hdr->select_addr;
+       r_cnt = q_hdr->rd_strd.read_addr_cnt;
+       r_stride = q_hdr->rd_strd.read_addr_stride;
+       loop_cnt = q_hdr->op_count;
+
+       for (i = 0; i < loop_cnt; i++) {
+               qla4_8xxx_md_rw_32(ha, s_addr, qid, 1);
+               r_addr = q_hdr->read_addr;
+               for (k = 0; k < r_cnt; k++) {
+                       r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
+                       *data_ptr++ = cpu_to_le32(r_value);
+                       r_addr += r_stride;
+               }
+               qid += q_hdr->q_strd.queue_id_stride;
+       }
+       *d_ptr = data_ptr;
+}
+
+#define MD_DIRECT_ROM_WINDOW           0x42110030
+#define MD_DIRECT_ROM_READ_BASE                0x42150000
+
+static void qla4_8xxx_minidump_process_rdrom(struct scsi_qla_host *ha,
+                               struct qla82xx_minidump_entry_hdr *entry_hdr,
+                               uint32_t **d_ptr)
+{
+       uint32_t r_addr, r_value;
+       uint32_t i, loop_cnt;
+       struct qla82xx_minidump_entry_rdrom *rom_hdr;
+       uint32_t *data_ptr = *d_ptr;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+       rom_hdr = (struct qla82xx_minidump_entry_rdrom *)entry_hdr;
+       r_addr = rom_hdr->read_addr;
+       loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t);
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "[%s]: flash_addr: 0x%x, read_data_size: 0x%x\n",
+                          __func__, r_addr, loop_cnt));
+
+       for (i = 0; i < loop_cnt; i++) {
+               qla4_8xxx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW,
+                                  (r_addr & 0xFFFF0000), 1);
+               r_value = qla4_8xxx_md_rw_32(ha,
+                                            MD_DIRECT_ROM_READ_BASE +
+                                            (r_addr & 0x0000FFFF), 0, 0);
+               *data_ptr++ = cpu_to_le32(r_value);
+               r_addr += sizeof(uint32_t);
+       }
+       *d_ptr = data_ptr;
+}
+
+#define MD_MIU_TEST_AGT_CTRL           0x41000090
+#define MD_MIU_TEST_AGT_ADDR_LO                0x41000094
+#define MD_MIU_TEST_AGT_ADDR_HI                0x41000098
+
+static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
+                               struct qla82xx_minidump_entry_hdr *entry_hdr,
+                               uint32_t **d_ptr)
+{
+       uint32_t r_addr, r_value, r_data;
+       uint32_t i, j, loop_cnt;
+       struct qla82xx_minidump_entry_rdmem *m_hdr;
+       unsigned long flags;
+       uint32_t *data_ptr = *d_ptr;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+       m_hdr = (struct qla82xx_minidump_entry_rdmem *)entry_hdr;
+       r_addr = m_hdr->read_addr;
+       loop_cnt = m_hdr->read_data_size/16;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n",
+                         __func__, r_addr, m_hdr->read_data_size));
+
+       if (r_addr & 0xf) {
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "[%s]: Read addr 0x%x not 16 bytes alligned\n",
+                                 __func__, r_addr));
+               return QLA_ERROR;
+       }
+
+       if (m_hdr->read_data_size % 16) {
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "[%s]: Read data[0x%x] not multiple of 16 bytes\n",
+                                 __func__, m_hdr->read_data_size));
+               return QLA_ERROR;
+       }
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
+                         __func__, r_addr, m_hdr->read_data_size, loop_cnt));
+
+       write_lock_irqsave(&ha->hw_lock, flags);
+       for (i = 0; i < loop_cnt; i++) {
+               qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1);
+               r_value = 0;
+               qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1);
+               r_value = MIU_TA_CTL_ENABLE;
+               qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
+               r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
+               qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
+
+               for (j = 0; j < MAX_CTL_CHECK; j++) {
+                       r_value = qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL,
+                                                    0, 0);
+                       if ((r_value & MIU_TA_CTL_BUSY) == 0)
+                               break;
+               }
+
+               if (j >= MAX_CTL_CHECK) {
+                       printk_ratelimited(KERN_ERR
+                                          "%s: failed to read through agent\n",
+                                           __func__);
+                       write_unlock_irqrestore(&ha->hw_lock, flags);
+                       return QLA_SUCCESS;
+               }
+
+               for (j = 0; j < 4; j++) {
+                       r_data = qla4_8xxx_md_rw_32(ha,
+                                                   MD_MIU_TEST_AGT_RDDATA[j],
+                                                   0, 0);
+                       *data_ptr++ = cpu_to_le32(r_data);
+               }
+
+               r_addr += 16;
+       }
+       write_unlock_irqrestore(&ha->hw_lock, flags);
+
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%x\n",
+                         __func__, (loop_cnt * 16)));
+
+       *d_ptr = data_ptr;
+       return QLA_SUCCESS;
+}
+
+static void ql4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha,
+                               struct qla82xx_minidump_entry_hdr *entry_hdr,
+                               int index)
+{
+       entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
+                         ha->host_no, index, entry_hdr->entry_type,
+                         entry_hdr->d_ctrl.entry_capture_mask));
+}
+
+/**
+ * qla82xx_collect_md_data - Retrieve firmware minidump data.
+ * @ha: pointer to adapter structure
+ **/
+static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
+{
+       int num_entry_hdr = 0;
+       struct qla82xx_minidump_entry_hdr *entry_hdr;
+       struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
+       uint32_t *data_ptr;
+       uint32_t data_collected = 0;
+       int i, rval = QLA_ERROR;
+       uint64_t now;
+       uint32_t timestamp;
+
+       if (!ha->fw_dump) {
+               ql4_printk(KERN_INFO, ha, "%s(%ld) No buffer to dump\n",
+                          __func__, ha->host_no);
+               return rval;
+       }
+
+       tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
+                                               ha->fw_dump_tmplt_hdr;
+       data_ptr = (uint32_t *)((uint8_t *)ha->fw_dump +
+                                               ha->fw_dump_tmplt_size);
+       data_collected += ha->fw_dump_tmplt_size;
+
+       num_entry_hdr = tmplt_hdr->num_of_entries;
+       ql4_printk(KERN_INFO, ha, "[%s]: starting data ptr: %p\n",
+                  __func__, data_ptr);
+       ql4_printk(KERN_INFO, ha,
+                  "[%s]: no of entry headers in Template: 0x%x\n",
+                  __func__, num_entry_hdr);
+       ql4_printk(KERN_INFO, ha, "[%s]: Capture Mask obtained: 0x%x\n",
+                  __func__, ha->fw_dump_capture_mask);
+       ql4_printk(KERN_INFO, ha, "[%s]: Total_data_size 0x%x, %d obtained\n",
+                  __func__, ha->fw_dump_size, ha->fw_dump_size);
+
+       /* Update current timestamp before taking dump */
+       now = get_jiffies_64();
+       timestamp = (u32)(jiffies_to_msecs(now) / 1000);
+       tmplt_hdr->driver_timestamp = timestamp;
+
+       entry_hdr = (struct qla82xx_minidump_entry_hdr *)
+                                       (((uint8_t *)ha->fw_dump_tmplt_hdr) +
+                                        tmplt_hdr->first_entry_offset);
+
+       /* Walk through the entry headers - validate/perform required action */
+       for (i = 0; i < num_entry_hdr; i++) {
+               if (data_collected >= ha->fw_dump_size) {
+                       ql4_printk(KERN_INFO, ha,
+                                  "Data collected: [0x%x], Total Dump size: [0x%x]\n",
+                                  data_collected, ha->fw_dump_size);
+                       return rval;
+               }
+
+               if (!(entry_hdr->d_ctrl.entry_capture_mask &
+                     ha->fw_dump_capture_mask)) {
+                       entry_hdr->d_ctrl.driver_flags |=
+                                               QLA82XX_DBG_SKIPPED_FLAG;
+                       goto skip_nxt_entry;
+               }
+
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "Data collected: [0x%x], Dump size left:[0x%x]\n",
+                                 data_collected,
+                                 (ha->fw_dump_size - data_collected)));
+
+               /* Decode the entry type and take required action to capture
+                * debug data
+                */
+               switch (entry_hdr->entry_type) {
+               case QLA82XX_RDEND:
+                       ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+                       break;
+               case QLA82XX_CNTRL:
+                       rval = qla4_8xxx_minidump_process_control(ha,
+                                                                 entry_hdr);
+                       if (rval != QLA_SUCCESS) {
+                               ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+                               goto md_failed;
+                       }
+                       break;
+               case QLA82XX_RDCRB:
+                       qla4_8xxx_minidump_process_rdcrb(ha, entry_hdr,
+                                                        &data_ptr);
+                       break;
+               case QLA82XX_RDMEM:
+                       rval = qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
+                                                               &data_ptr);
+                       if (rval != QLA_SUCCESS) {
+                               ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+                               goto md_failed;
+                       }
+                       break;
+               case QLA82XX_BOARD:
+               case QLA82XX_RDROM:
+                       qla4_8xxx_minidump_process_rdrom(ha, entry_hdr,
+                                                        &data_ptr);
+                       break;
+               case QLA82XX_L2DTG:
+               case QLA82XX_L2ITG:
+               case QLA82XX_L2DAT:
+               case QLA82XX_L2INS:
+                       rval = qla4_8xxx_minidump_process_l2tag(ha, entry_hdr,
+                                                               &data_ptr);
+                       if (rval != QLA_SUCCESS) {
+                               ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+                               goto md_failed;
+                       }
+                       break;
+               case QLA82XX_L1DAT:
+               case QLA82XX_L1INS:
+                       qla4_8xxx_minidump_process_l1cache(ha, entry_hdr,
+                                                          &data_ptr);
+                       break;
+               case QLA82XX_RDOCM:
+                       qla4_8xxx_minidump_process_rdocm(ha, entry_hdr,
+                                                        &data_ptr);
+                       break;
+               case QLA82XX_RDMUX:
+                       qla4_8xxx_minidump_process_rdmux(ha, entry_hdr,
+                                                        &data_ptr);
+                       break;
+               case QLA82XX_QUEUE:
+                       qla4_8xxx_minidump_process_queue(ha, entry_hdr,
+                                                        &data_ptr);
+                       break;
+               case QLA82XX_RDNOP:
+               default:
+                       ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+                       break;
+               }
+
+               data_collected = (uint8_t *)data_ptr -
+                                ((uint8_t *)((uint8_t *)ha->fw_dump +
+                                               ha->fw_dump_tmplt_size));
+skip_nxt_entry:
+               /*  next entry in the template */
+               entry_hdr = (struct qla82xx_minidump_entry_hdr *)
+                               (((uint8_t *)entry_hdr) +
+                                entry_hdr->entry_size);
+       }
+
+       if ((data_collected + ha->fw_dump_tmplt_size) != ha->fw_dump_size) {
+               ql4_printk(KERN_INFO, ha,
+                          "Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n",
+                          data_collected, ha->fw_dump_size);
+               goto md_failed;
+       }
+
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s Last entry: 0x%x\n",
+                         __func__, i));
+md_failed:
+       return rval;
+}
+
+/**
+ * qla4_8xxx_uevent_emit - Send uevent when the firmware dump is ready.
+ * @ha: pointer to adapter structure
+ **/
+static void qla4_8xxx_uevent_emit(struct scsi_qla_host *ha, u32 code)
+{
+       char event_string[40];
+       char *envp[] = { event_string, NULL };
+
+       switch (code) {
+       case QL4_UEVENT_CODE_FW_DUMP:
+               snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
+                        ha->host_no);
+               break;
+       default:
+               /*do nothing*/
+               break;
+       }
+
+       kobject_uevent_env(&(&ha->pdev->dev)->kobj, KOBJ_CHANGE, envp);
+}
+
 /**
  * qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw
  * @ha: pointer to adapter structure
@@ -1659,6 +2324,15 @@ dev_initialize:
        qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
 
        qla4_8xxx_idc_unlock(ha);
+       if (ql4xenablemd && test_bit(AF_FW_RECOVERY, &ha->flags) &&
+           !test_and_set_bit(AF_82XX_FW_DUMPED, &ha->flags)) {
+               if (!qla4_8xxx_collect_md_data(ha)) {
+                       qla4_8xxx_uevent_emit(ha, QL4_UEVENT_CODE_FW_DUMP);
+               } else {
+                       ql4_printk(KERN_INFO, ha, "Unable to collect minidump\n");
+                       clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
+               }
+       }
        rval = qla4_8xxx_try_start_fw(ha);
        qla4_8xxx_idc_lock(ha);
 
@@ -1686,6 +2360,7 @@ static void
 qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
 {
        uint32_t dev_state, drv_state, drv_active;
+       uint32_t active_mask = 0xFFFFFFFF;
        unsigned long reset_timeout;
 
        ql4_printk(KERN_INFO, ha,
@@ -1697,7 +2372,14 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
                qla4_8xxx_idc_lock(ha);
        }
 
-       qla4_8xxx_set_rst_ready(ha);
+       if (!test_bit(AF_82XX_RST_OWNER, &ha->flags)) {
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "%s(%ld): reset acknowledged\n",
+                                 __func__, ha->host_no));
+               qla4_8xxx_set_rst_ready(ha);
+       } else {
+               active_mask = (~(1 << (ha->func_num * 4)));
+       }
 
        /* wait for 10 seconds for reset ack from all functions */
        reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
@@ -1709,12 +2391,24 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
                "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
                __func__, ha->host_no, drv_state, drv_active);
 
-       while (drv_state != drv_active) {
+       while (drv_state != (drv_active & active_mask)) {
                if (time_after_eq(jiffies, reset_timeout)) {
-                       printk("%s: RESET TIMEOUT!\n", DRIVER_NAME);
+                       ql4_printk(KERN_INFO, ha,
+                                  "%s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n",
+                                  DRIVER_NAME, drv_state, drv_active);
                        break;
                }
 
+               /*
+                * When reset_owner times out, check which functions
+                * acked/did not ack
+                */
+               if (test_bit(AF_82XX_RST_OWNER, &ha->flags)) {
+                       ql4_printk(KERN_INFO, ha,
+                                  "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
+                                  __func__, ha->host_no, drv_state,
+                                  drv_active);
+               }
                qla4_8xxx_idc_unlock(ha);
                msleep(1000);
                qla4_8xxx_idc_lock(ha);
@@ -1723,14 +2417,18 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
                drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
        }
 
+       /* Clear RESET OWNER as we are not going to use it any further */
+       clear_bit(AF_82XX_RST_OWNER, &ha->flags);
+
        dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
-       ql4_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state,
-               dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+       ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", dev_state,
+                  dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
 
        /* Force to DEV_COLD unless someone else is starting a reset */
        if (dev_state != QLA82XX_DEV_INITIALIZING) {
                ql4_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
                qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
+               qla4_8xxx_set_rst_ready(ha);
        }
 }
 
@@ -1765,8 +2463,9 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
        }
 
        dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
-       ql4_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state,
-               dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+       DEBUG2(ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
+                         dev_state, dev_state < MAX_STATES ?
+                         qdev_state[dev_state] : "Unknown"));
 
        /* wait for 30 seconds for device to go ready */
        dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
@@ -1775,15 +2474,19 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
        while (1) {
 
                if (time_after_eq(jiffies, dev_init_timeout)) {
-                       ql4_printk(KERN_WARNING, ha, "Device init failed!\n");
+                       ql4_printk(KERN_WARNING, ha,
+                                  "%s: Device Init Failed 0x%x = %s\n",
+                                  DRIVER_NAME,
+                                  dev_state, dev_state < MAX_STATES ?
+                                  qdev_state[dev_state] : "Unknown");
                        qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
                                QLA82XX_DEV_FAILED);
                }
 
                dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
-               ql4_printk(KERN_INFO, ha,
-                   "2:Device state is 0x%x = %s\n", dev_state,
-                   dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+               ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
+                          dev_state, dev_state < MAX_STATES ?
+                          qdev_state[dev_state] : "Unknown");
 
                /* NOTE: Make sure idc unlocked upon exit of switch statement */
                switch (dev_state) {
@@ -2184,6 +2887,7 @@ qla4_8xxx_isp_reset(struct scsi_qla_host *ha)
                ql4_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
                qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
                    QLA82XX_DEV_NEED_RESET);
+               set_bit(AF_82XX_RST_OWNER, &ha->flags);
        } else
                ql4_printk(KERN_INFO, ha, "HW State: DEVICE INITIALIZING\n");
 
@@ -2195,8 +2899,10 @@ qla4_8xxx_isp_reset(struct scsi_qla_host *ha)
        qla4_8xxx_clear_rst_ready(ha);
        qla4_8xxx_idc_unlock(ha);
 
-       if (rval == QLA_SUCCESS)
+       if (rval == QLA_SUCCESS) {
+               ql4_printk(KERN_INFO, ha, "Clearing AF_RECOVERY in qla4_8xxx_isp_reset\n");
                clear_bit(AF_FW_RECOVERY, &ha->flags);
+       }
 
        return rval;
 }
index dc7500e47b8b8e3a564cae69f4f158ec0e6a8a07..30258479f100370400590278f725d6da470b49a1 100644 (file)
@@ -792,4 +792,196 @@ struct crb_addr_pair {
 #define MIU_TEST_AGT_WRDATA_UPPER_LO   (0x0b0)
 #define        MIU_TEST_AGT_WRDATA_UPPER_HI    (0x0b4)
 
+/* Minidump related */
+
+/* Entry Type Defines */
+#define QLA82XX_RDNOP  0
+#define QLA82XX_RDCRB  1
+#define QLA82XX_RDMUX  2
+#define QLA82XX_QUEUE  3
+#define QLA82XX_BOARD  4
+#define QLA82XX_RDOCM  6
+#define QLA82XX_PREGS  7
+#define QLA82XX_L1DTG  8
+#define QLA82XX_L1ITG  9
+#define QLA82XX_L1DAT  11
+#define QLA82XX_L1INS  12
+#define QLA82XX_L2DTG  21
+#define QLA82XX_L2ITG  22
+#define QLA82XX_L2DAT  23
+#define QLA82XX_L2INS  24
+#define QLA82XX_RDROM  71
+#define QLA82XX_RDMEM  72
+#define QLA82XX_CNTRL  98
+#define QLA82XX_RDEND  255
+
+/* Opcodes for Control Entries.
+ * These Flags are bit fields.
+ */
+#define QLA82XX_DBG_OPCODE_WR          0x01
+#define QLA82XX_DBG_OPCODE_RW          0x02
+#define QLA82XX_DBG_OPCODE_AND         0x04
+#define QLA82XX_DBG_OPCODE_OR          0x08
+#define QLA82XX_DBG_OPCODE_POLL                0x10
+#define QLA82XX_DBG_OPCODE_RDSTATE     0x20
+#define QLA82XX_DBG_OPCODE_WRSTATE     0x40
+#define QLA82XX_DBG_OPCODE_MDSTATE     0x80
+
+/* Driver Flags */
+#define QLA82XX_DBG_SKIPPED_FLAG       0x80 /* driver skipped this entry  */
+#define QLA82XX_DBG_SIZE_ERR_FLAG      0x40 /* Entry vs Capture size
+                                             * mismatch */
+
+/* Driver_code is for driver to write some info about the entry
+ * currently not used.
+ */
+struct qla82xx_minidump_entry_hdr {
+       uint32_t entry_type;
+       uint32_t entry_size;
+       uint32_t entry_capture_size;
+       struct {
+               uint8_t entry_capture_mask;
+               uint8_t entry_code;
+               uint8_t driver_code;
+               uint8_t driver_flags;
+       } d_ctrl;
+};
+
+/*  Read CRB entry header */
+struct qla82xx_minidump_entry_crb {
+       struct qla82xx_minidump_entry_hdr h;
+       uint32_t addr;
+       struct {
+               uint8_t addr_stride;
+               uint8_t state_index_a;
+               uint16_t poll_timeout;
+       } crb_strd;
+       uint32_t data_size;
+       uint32_t op_count;
+
+       struct {
+               uint8_t opcode;
+               uint8_t state_index_v;
+               uint8_t shl;
+               uint8_t shr;
+       } crb_ctrl;
+
+       uint32_t value_1;
+       uint32_t value_2;
+       uint32_t value_3;
+};
+
+struct qla82xx_minidump_entry_cache {
+       struct qla82xx_minidump_entry_hdr h;
+       uint32_t tag_reg_addr;
+       struct {
+               uint16_t tag_value_stride;
+               uint16_t init_tag_value;
+       } addr_ctrl;
+       uint32_t data_size;
+       uint32_t op_count;
+       uint32_t control_addr;
+       struct {
+               uint16_t write_value;
+               uint8_t poll_mask;
+               uint8_t poll_wait;
+       } cache_ctrl;
+       uint32_t read_addr;
+       struct {
+               uint8_t read_addr_stride;
+               uint8_t read_addr_cnt;
+               uint16_t rsvd_1;
+       } read_ctrl;
+};
+
+/* Read OCM */
+struct qla82xx_minidump_entry_rdocm {
+       struct qla82xx_minidump_entry_hdr h;
+       uint32_t rsvd_0;
+       uint32_t rsvd_1;
+       uint32_t data_size;
+       uint32_t op_count;
+       uint32_t rsvd_2;
+       uint32_t rsvd_3;
+       uint32_t read_addr;
+       uint32_t read_addr_stride;
+};
+
+/* Read Memory */
+struct qla82xx_minidump_entry_rdmem {
+       struct qla82xx_minidump_entry_hdr h;
+       uint32_t rsvd[6];
+       uint32_t read_addr;
+       uint32_t read_data_size;
+};
+
+/* Read ROM */
+struct qla82xx_minidump_entry_rdrom {
+       struct qla82xx_minidump_entry_hdr h;
+       uint32_t rsvd[6];
+       uint32_t read_addr;
+       uint32_t read_data_size;
+};
+
+/* Mux entry */
+struct qla82xx_minidump_entry_mux {
+       struct qla82xx_minidump_entry_hdr h;
+       uint32_t select_addr;
+       uint32_t rsvd_0;
+       uint32_t data_size;
+       uint32_t op_count;
+       uint32_t select_value;
+       uint32_t select_value_stride;
+       uint32_t read_addr;
+       uint32_t rsvd_1;
+};
+
+/* Queue entry */
+struct qla82xx_minidump_entry_queue {
+       struct qla82xx_minidump_entry_hdr h;
+       uint32_t select_addr;
+       struct {
+               uint16_t queue_id_stride;
+               uint16_t rsvd_0;
+       } q_strd;
+       uint32_t data_size;
+       uint32_t op_count;
+       uint32_t rsvd_1;
+       uint32_t rsvd_2;
+       uint32_t read_addr;
+       struct {
+               uint8_t read_addr_stride;
+               uint8_t read_addr_cnt;
+               uint16_t rsvd_3;
+       } rd_strd;
+};
+
+#define QLA82XX_MINIDUMP_OCM0_SIZE             (256 * 1024)
+#define QLA82XX_MINIDUMP_L1C_SIZE              (256 * 1024)
+#define QLA82XX_MINIDUMP_L2C_SIZE              1572864
+#define QLA82XX_MINIDUMP_COMMON_STR_SIZE       0
+#define QLA82XX_MINIDUMP_FCOE_STR_SIZE         0
+#define QLA82XX_MINIDUMP_MEM_SIZE              0
+#define QLA82XX_MAX_ENTRY_HDR                  4
+
+struct qla82xx_minidump {
+       uint32_t md_ocm0_data[QLA82XX_MINIDUMP_OCM0_SIZE];
+       uint32_t md_l1c_data[QLA82XX_MINIDUMP_L1C_SIZE];
+       uint32_t md_l2c_data[QLA82XX_MINIDUMP_L2C_SIZE];
+       uint32_t md_cs_data[QLA82XX_MINIDUMP_COMMON_STR_SIZE];
+       uint32_t md_fcoes_data[QLA82XX_MINIDUMP_FCOE_STR_SIZE];
+       uint32_t md_mem_data[QLA82XX_MINIDUMP_MEM_SIZE];
+};
+
+#define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE       0x129
+#define RQST_TMPLT_SIZE                                0x0
+#define RQST_TMPLT                             0x1
+#define MD_DIRECT_ROM_WINDOW                   0x42110030
+#define MD_DIRECT_ROM_READ_BASE                        0x42150000
+#define MD_MIU_TEST_AGT_CTRL                   0x41000090
+#define MD_MIU_TEST_AGT_ADDR_LO                        0x41000094
+#define MD_MIU_TEST_AGT_ADDR_HI                        0x41000098
+
+static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8,
+                               0x410000AC, 0x410000B8, 0x410000BC };
 #endif
index ee47820c30a6591824cfa42abff426fb09114e7b..cd15678f9ada740d4448f7b04f96ea89b1a956c5 100644 (file)
@@ -68,12 +68,34 @@ MODULE_PARM_DESC(ql4xmaxqdepth,
                 " Maximum queue depth to report for target devices.\n"
                 "\t\t  Default: 32.");
 
+static int ql4xqfulltracking = 1;
+module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xqfulltracking,
+                " Enable or disable dynamic tracking and adjustment of\n"
+                "\t\t scsi device queue depth.\n"
+                "\t\t  0 - Disable.\n"
+                "\t\t  1 - Enable. (Default)");
+
 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
                " Target Session Recovery Timeout.\n"
                "\t\t  Default: 120 sec.");
 
+int ql4xmdcapmask = 0x1F;
+module_param(ql4xmdcapmask, int, S_IRUGO);
+MODULE_PARM_DESC(ql4xmdcapmask,
+                " Set the Minidump driver capture mask level.\n"
+                "\t\t  Default is 0x1F.\n"
+                "\t\t  Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F");
+
+int ql4xenablemd = 1;
+module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xenablemd,
+                " Set to enable minidump.\n"
+                "\t\t  0 - disable minidump\n"
+                "\t\t  1 - enable minidump (Default)");
+
 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
 /*
  * SCSI host template entry points
@@ -140,6 +162,8 @@ static int qla4xxx_slave_configure(struct scsi_device *device);
 static void qla4xxx_slave_destroy(struct scsi_device *sdev);
 static umode_t ql4_attr_is_visible(int param_type, int param);
 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
+static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
+                                     int reason);
 
 static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
     QLA82XX_LEGACY_INTR_CONFIG;
@@ -159,6 +183,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
        .slave_configure        = qla4xxx_slave_configure,
        .slave_alloc            = qla4xxx_slave_alloc,
        .slave_destroy          = qla4xxx_slave_destroy,
+       .change_queue_depth     = qla4xxx_change_queue_depth,
 
        .this_id                = -1,
        .cmd_per_lun            = 3,
@@ -1555,19 +1580,53 @@ static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
        struct iscsi_session *sess;
        struct ddb_entry *ddb_entry;
        struct scsi_qla_host *ha;
-       unsigned long flags;
+       unsigned long flags, wtime;
+       struct dev_db_entry *fw_ddb_entry = NULL;
+       dma_addr_t fw_ddb_entry_dma;
+       uint32_t ddb_state;
+       int ret;
 
        DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
        sess = cls_sess->dd_data;
        ddb_entry = sess->dd_data;
        ha = ddb_entry->ha;
 
+       fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+                                         &fw_ddb_entry_dma, GFP_KERNEL);
+       if (!fw_ddb_entry) {
+               ql4_printk(KERN_ERR, ha,
+                          "%s: Unable to allocate dma buffer\n", __func__);
+               goto destroy_session;
+       }
+
+       wtime = jiffies + (HZ * LOGOUT_TOV);
+       do {
+               ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
+                                             fw_ddb_entry, fw_ddb_entry_dma,
+                                             NULL, NULL, &ddb_state, NULL,
+                                             NULL, NULL);
+               if (ret == QLA_ERROR)
+                       goto destroy_session;
+
+               if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
+                   (ddb_state == DDB_DS_SESSION_FAILED))
+                       goto destroy_session;
+
+               schedule_timeout_uninterruptible(HZ);
+       } while ((time_after(wtime, jiffies)));
+
+destroy_session:
        qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
 
        spin_lock_irqsave(&ha->hardware_lock, flags);
        qla4xxx_free_ddb(ha, ddb_entry);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
        iscsi_session_teardown(cls_sess);
+
+       if (fw_ddb_entry)
+               dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+                                 fw_ddb_entry, fw_ddb_entry_dma);
 }
 
 static struct iscsi_cls_conn *
@@ -2220,6 +2279,9 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
                dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
                                  ha->queues_dma);
 
+        if (ha->fw_dump)
+               vfree(ha->fw_dump);
+
        ha->queues_len = 0;
        ha->queues = NULL;
        ha->queues_dma = 0;
@@ -2229,6 +2291,8 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
        ha->response_dma = 0;
        ha->shadow_regs = NULL;
        ha->shadow_regs_dma = 0;
+       ha->fw_dump = NULL;
+       ha->fw_dump_size = 0;
 
        /* Free srb pool. */
        if (ha->srb_mempool)
@@ -5023,6 +5087,8 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
 
        set_bit(AF_INIT_DONE, &ha->flags);
 
+       qla4_8xxx_alloc_sysfs_attr(ha);
+
        printk(KERN_INFO
               " QLogic iSCSI HBA Driver version: %s\n"
               "  QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
@@ -5149,6 +5215,7 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
                iscsi_boot_destroy_kset(ha->boot_kset);
 
        qla4xxx_destroy_fw_ddb_session(ha);
+       qla4_8xxx_free_sysfs_attr(ha);
 
        scsi_remove_host(ha->host);
 
@@ -5217,6 +5284,15 @@ static void qla4xxx_slave_destroy(struct scsi_device *sdev)
        scsi_deactivate_tcq(sdev, 1);
 }
 
+static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
+                                     int reason)
+{
+       if (!ql4xqfulltracking)
+               return -EOPNOTSUPP;
+
+       return iscsi_change_queue_depth(sdev, qdepth, reason);
+}
+
 /**
  * qla4xxx_del_from_active_array - returns an active srb
  * @ha: Pointer to host adapter structure.
index 97b30c108e365f6d22e93fc4ca826b6eb46c5e34..cc1cc3518b87cfad495304d247d8c5bf2e0332c2 100644 (file)
@@ -5,4 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k16"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k17"
index 62ddfd31d4ce3539b129ee3f7763d9422ab32f5e..6dfb9785d34581eb06395ef1d4b372b243de5d87 100644 (file)
@@ -1378,16 +1378,19 @@ static int scsi_lld_busy(struct request_queue *q)
 {
        struct scsi_device *sdev = q->queuedata;
        struct Scsi_Host *shost;
-       struct scsi_target *starget;
 
        if (!sdev)
                return 0;
 
        shost = sdev->host;
-       starget = scsi_target(sdev);
 
-       if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
-           scsi_target_is_busy(starget) || scsi_device_is_busy(sdev))
+       /*
+        * Ignore host/starget busy state.
+        * Since block layer does not have a concept of fairness across
+        * multiple queues, congestion of host/starget needs to be handled
+        * in SCSI layer.
+        */
+       if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
                return 1;
 
        return 0;
index f661a41fa4c6fef7e054ed15dd1b73596bc99691..d4201ded3b2203c0bd9cc17d8a26b528f5ccfc2c 100644 (file)
@@ -24,8 +24,11 @@ static int scsi_dev_type_suspend(struct device *dev, pm_message_t msg)
        err = scsi_device_quiesce(to_scsi_device(dev));
        if (err == 0) {
                drv = dev->driver;
-               if (drv && drv->suspend)
+               if (drv && drv->suspend) {
                        err = drv->suspend(dev, msg);
+                       if (err)
+                               scsi_device_resume(to_scsi_device(dev));
+               }
        }
        dev_dbg(dev, "scsi suspend: %d\n", err);
        return err;
index 01b03744f1f99ced5879d0f9e9f3a7d1d0677c11..2e5fe584aad32d2130ad59945c922eaa238fb0be 100644 (file)
@@ -147,7 +147,7 @@ int scsi_complete_async_scans(void)
 
        do {
                if (list_empty(&scanning_hosts))
-                       return 0;
+                       goto out;
                /* If we can't get memory immediately, that's OK.  Just
                 * sleep a little.  Even if we never get memory, the async
                 * scans will finish eventually.
@@ -179,8 +179,11 @@ int scsi_complete_async_scans(void)
        }
  done:
        spin_unlock(&async_scan_lock);
-
        kfree(data);
+
+ out:
+       async_synchronize_full_domain(&scsi_sd_probe_domain);
+
        return 0;
 }
 
index 74708fcaf82fe900c3a77689ccad11c91b48b173..ae781487461829ae190f41f52d6f3e267e1fa5bb 100644 (file)
@@ -12,7 +12,7 @@
 
 #include <linux/module.h>
 #include <linux/device.h>
-#include <scsi/scsi_scan.h>
+#include "scsi_priv.h"
 
 static int __init wait_scan_init(void)
 {
index 4e010b727818cd341968bdb378a1cb3f6e544cdf..6a4fd00117ca66667173a27728b7055d2e9a5f76 100644 (file)
@@ -1836,7 +1836,7 @@ ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        err = pci_request_regions(pdev, UFSHCD);
        if (err < 0) {
                dev_err(&pdev->dev, "request regions failed\n");
-               goto out_disable;
+               goto out_host_put;
        }
 
        hba->mmio_base = pci_ioremap_bar(pdev, 0);
@@ -1925,8 +1925,9 @@ out_iounmap:
        iounmap(hba->mmio_base);
 out_release_regions:
        pci_release_regions(pdev);
-out_disable:
+out_host_put:
        scsi_host_put(host);
+out_disable:
        pci_clear_master(pdev);
        pci_disable_device(pdev);
 out_error:
index 00c024039c9713a7b8468d91f5a7e42316db65d5..cd2fe350e724a1db540dc372ca081646bb10a881 100644 (file)
@@ -311,7 +311,7 @@ config SPI_S3C24XX_FIQ
 
 config SPI_S3C64XX
        tristate "Samsung S3C64XX series type SPI"
-       depends on (ARCH_S3C64XX || ARCH_S5P64X0 || ARCH_EXYNOS)
+       depends on (ARCH_S3C24XX || ARCH_S3C64XX || ARCH_S5P64X0 || ARCH_EXYNOS)
        select S3C64XX_DMA if ARCH_S3C64XX
        help
          SPI driver for Samsung S3C64XX and newer SoCs.
index 69c9a6601f4588287f69b30110a359bf62786070..47877d687614fbeb9867ac34b8052fe83bc68b90 100644 (file)
@@ -86,7 +86,8 @@ struct spi_imx_data {
        struct completion xfer_done;
        void __iomem *base;
        int irq;
-       struct clk *clk;
+       struct clk *clk_per;
+       struct clk *clk_ipg;
        unsigned long spi_clk;
 
        unsigned int count;
@@ -853,15 +854,22 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
                goto out_free_irq;
        }
 
-       spi_imx->clk = clk_get(&pdev->dev, NULL);
-       if (IS_ERR(spi_imx->clk)) {
-               dev_err(&pdev->dev, "unable to get clock\n");
-               ret = PTR_ERR(spi_imx->clk);
+       spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+       if (IS_ERR(spi_imx->clk_ipg)) {
+               ret = PTR_ERR(spi_imx->clk_ipg);
                goto out_free_irq;
        }
 
-       clk_enable(spi_imx->clk);
-       spi_imx->spi_clk = clk_get_rate(spi_imx->clk);
+       spi_imx->clk_per = devm_clk_get(&pdev->dev, "per");
+       if (IS_ERR(spi_imx->clk_per)) {
+               ret = PTR_ERR(spi_imx->clk_per);
+               goto out_free_irq;
+       }
+
+       clk_prepare_enable(spi_imx->clk_per);
+       clk_prepare_enable(spi_imx->clk_ipg);
+
+       spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
 
        spi_imx->devtype_data->reset(spi_imx);
 
@@ -879,8 +887,8 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
        return ret;
 
 out_clk_put:
-       clk_disable(spi_imx->clk);
-       clk_put(spi_imx->clk);
+       clk_disable_unprepare(spi_imx->clk_per);
+       clk_disable_unprepare(spi_imx->clk_ipg);
 out_free_irq:
        free_irq(spi_imx->irq, spi_imx);
 out_iounmap:
@@ -908,8 +916,8 @@ static int __devexit spi_imx_remove(struct platform_device *pdev)
        spi_bitbang_stop(&spi_imx->bitbang);
 
        writel(0, spi_imx->base + MXC_CSPICTRL);
-       clk_disable(spi_imx->clk);
-       clk_put(spi_imx->clk);
+       clk_disable_unprepare(spi_imx->clk_per);
+       clk_disable_unprepare(spi_imx->clk_ipg);
        free_irq(spi_imx->irq, spi_imx);
        iounmap(spi_imx->base);
 
index e496f799b7a9053326a1f68f82a67b49a36eaaea..dfd04e91fa6da35d14c89f3aeb874935d41da339 100644 (file)
@@ -16,8 +16,8 @@
 #include <linux/err.h>
 #include <linux/io.h>
 #include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
 #include <linux/module.h>
+#include <linux/clk.h>
 #include <asm/unaligned.h>
 
 #define DRIVER_NAME                    "orion_spi"
@@ -46,6 +46,7 @@ struct orion_spi {
        unsigned int            max_speed;
        unsigned int            min_speed;
        struct orion_spi_info   *spi_info;
+       struct clk              *clk;
 };
 
 static struct workqueue_struct *orion_spi_wq;
@@ -104,7 +105,7 @@ static int orion_spi_baudrate_set(struct spi_device *spi, unsigned int speed)
 
        orion_spi = spi_master_get_devdata(spi->master);
 
-       tclk_hz = orion_spi->spi_info->tclk;
+       tclk_hz = clk_get_rate(orion_spi->clk);
 
        /*
         * the supported rates are: 4,6,8...30
@@ -450,6 +451,7 @@ static int __init orion_spi_probe(struct platform_device *pdev)
        struct orion_spi *spi;
        struct resource *r;
        struct orion_spi_info *spi_info;
+       unsigned long tclk_hz;
        int status = 0;
 
        spi_info = pdev->dev.platform_data;
@@ -476,19 +478,28 @@ static int __init orion_spi_probe(struct platform_device *pdev)
        spi->master = master;
        spi->spi_info = spi_info;
 
-       spi->max_speed = DIV_ROUND_UP(spi_info->tclk, 4);
-       spi->min_speed = DIV_ROUND_UP(spi_info->tclk, 30);
+       spi->clk = clk_get(&pdev->dev, NULL);
+       if (IS_ERR(spi->clk)) {
+               status = PTR_ERR(spi->clk);
+               goto out;
+       }
+
+       clk_prepare(spi->clk);
+       clk_enable(spi->clk);
+       tclk_hz = clk_get_rate(spi->clk);
+       spi->max_speed = DIV_ROUND_UP(tclk_hz, 4);
+       spi->min_speed = DIV_ROUND_UP(tclk_hz, 30);
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (r == NULL) {
                status = -ENODEV;
-               goto out;
+               goto out_rel_clk;
        }
 
        if (!request_mem_region(r->start, resource_size(r),
                                dev_name(&pdev->dev))) {
                status = -EBUSY;
-               goto out;
+               goto out_rel_clk;
        }
        spi->base = ioremap(r->start, SZ_1K);
 
@@ -508,7 +519,9 @@ static int __init orion_spi_probe(struct platform_device *pdev)
 
 out_rel_mem:
        release_mem_region(r->start, resource_size(r));
-
+out_rel_clk:
+       clk_disable_unprepare(spi->clk);
+       clk_put(spi->clk);
 out:
        spi_master_put(master);
        return status;
@@ -526,6 +539,9 @@ static int __exit orion_spi_remove(struct platform_device *pdev)
 
        cancel_work_sync(&spi->work);
 
+       clk_disable_unprepare(spi->clk);
+       clk_put(spi->clk);
+
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        release_mem_region(r->start, resource_size(r));
 
index 4511420849bc1dd61368e7cbbfc7b8080e819bee..e84dbecd09911715817be0b43339ec65bc956441 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/module.h>
 #include <linux/file.h>
 #include <linux/fs.h>
+#include <linux/falloc.h>
 #include <linux/miscdevice.h>
 #include <linux/security.h>
 #include <linux/mm.h>
@@ -363,11 +364,12 @@ static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
 
        mutex_lock(&ashmem_mutex);
        list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
-               struct inode *inode = range->asma->file->f_dentry->d_inode;
                loff_t start = range->pgstart * PAGE_SIZE;
-               loff_t end = (range->pgend + 1) * PAGE_SIZE - 1;
+               loff_t end = (range->pgend + 1) * PAGE_SIZE;
 
-               vmtruncate_range(inode, start, end);
+               do_fallocate(range->asma->file,
+                               FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+                               start, end - start);
                range->purged = ASHMEM_WAS_PURGED;
                lru_del(range);
 
index 35819e31262472140be9ec2dfa5210a087a43be1..6cc4358f68c12ad2c779c7837207ce875968cf40 100644 (file)
@@ -1033,7 +1033,7 @@ static int get_serial_info(struct tty_struct *tty, struct serial_state *state,
        if (!retinfo)
                return -EFAULT;
        memset(&tmp, 0, sizeof(tmp));
-       tty_lock(tty);
+       tty_lock();
        tmp.line = tty->index;
        tmp.port = state->port;
        tmp.flags = state->tport.flags;
@@ -1042,7 +1042,7 @@ static int get_serial_info(struct tty_struct *tty, struct serial_state *state,
        tmp.close_delay = state->tport.close_delay;
        tmp.closing_wait = state->tport.closing_wait;
        tmp.custom_divisor = state->custom_divisor;
-       tty_unlock(tty);
+       tty_unlock();
        if (copy_to_user(retinfo,&tmp,sizeof(*retinfo)))
                return -EFAULT;
        return 0;
@@ -1059,12 +1059,12 @@ static int set_serial_info(struct tty_struct *tty, struct serial_state *state,
        if (copy_from_user(&new_serial,new_info,sizeof(new_serial)))
                return -EFAULT;
 
-       tty_lock(tty);
+       tty_lock();
        change_spd = ((new_serial.flags ^ port->flags) & ASYNC_SPD_MASK) ||
                new_serial.custom_divisor != state->custom_divisor;
        if (new_serial.irq || new_serial.port != state->port ||
                        new_serial.xmit_fifo_size != state->xmit_fifo_size) {
-               tty_unlock(tty);
+               tty_unlock();
                return -EINVAL;
        }
   
@@ -1074,7 +1074,7 @@ static int set_serial_info(struct tty_struct *tty, struct serial_state *state,
                    (new_serial.xmit_fifo_size != state->xmit_fifo_size) ||
                    ((new_serial.flags & ~ASYNC_USR_MASK) !=
                     (port->flags & ~ASYNC_USR_MASK))) {
-                       tty_unlock(tty);
+                       tty_unlock();
                        return -EPERM;
                }
                port->flags = ((port->flags & ~ASYNC_USR_MASK) |
@@ -1084,7 +1084,7 @@ static int set_serial_info(struct tty_struct *tty, struct serial_state *state,
        }
 
        if (new_serial.baud_base < 9600) {
-               tty_unlock(tty);
+               tty_unlock();
                return -EINVAL;
        }
 
@@ -1116,7 +1116,7 @@ check_and_exit:
                }
        } else
                retval = startup(tty, state);
-       tty_unlock(tty);
+       tty_unlock();
        return retval;
 }
 
index 6984e1a2686a50185608dfff24eb223dbe31b0d9..e61cabdd69df36d56ea0c0cd27fcaeadc488f8de 100644 (file)
@@ -1599,7 +1599,7 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
         * If the port is the middle of closing, bail out now
         */
        if (tty_hung_up_p(filp) || (info->port.flags & ASYNC_CLOSING)) {
-               wait_event_interruptible_tty(tty, info->port.close_wait,
+               wait_event_interruptible_tty(info->port.close_wait,
                                !(info->port.flags & ASYNC_CLOSING));
                return (info->port.flags & ASYNC_HUP_NOTIFY) ? -EAGAIN: -ERESTARTSYS;
        }
index 656ad93bbc96383b4ed77438e887091a8de63739..5c6c31459a2f6618cb7cf9d83c7100cf1a6d86ea 100644 (file)
@@ -1065,8 +1065,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
 
        TRACE_L("read()");
 
-       /* FIXME: should use a private lock */
-       tty_lock(tty);
+       tty_lock();
 
        pClient = findClient(pInfo, task_pid(current));
        if (pClient) {
@@ -1078,7 +1077,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
                                goto unlock;
                        }
                        /* block until there is a message: */
-                       wait_event_interruptible_tty(tty, pInfo->read_wait,
+                       wait_event_interruptible_tty(pInfo->read_wait,
                                        (pMsg = remove_msg(pInfo, pClient)));
                }
 
@@ -1108,7 +1107,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
        }
        ret = -EPERM;
 unlock:
-       tty_unlock(tty);
+       tty_unlock();
        return ret;
 }
 
@@ -1157,7 +1156,7 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
        pHeader->locks = 0;
        pHeader->owner = NULL;
 
-       tty_lock(tty);
+       tty_lock();
 
        pClient = findClient(pInfo, task_pid(current));
        if (pClient) {
@@ -1176,7 +1175,7 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
        add_tx_queue(pInfo, pHeader);
        trigger_transmit(pInfo);
 
-       tty_unlock(tty);
+       tty_unlock();
 
        return 0;
 }
index 59af3945ea859c334719fed6a41b4192a30f87f5..5505ffc91da4b5780b33af2cac624ea8f696f5e4 100644 (file)
@@ -47,7 +47,6 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
        wake_up_interruptible(&tty->read_wait);
        wake_up_interruptible(&tty->write_wait);
        tty->packet = 0;
-       /* Review - krefs on tty_link ?? */
        if (!tty->link)
                return;
        tty->link->packet = 0;
@@ -63,9 +62,9 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
                        mutex_unlock(&devpts_mutex);
                }
 #endif
-               tty_unlock(tty);
+               tty_unlock();
                tty_vhangup(tty->link);
-               tty_lock(tty);
+               tty_lock();
        }
 }
 
@@ -623,29 +622,26 @@ static int ptmx_open(struct inode *inode, struct file *filp)
                return retval;
 
        /* find a device that is not in use. */
-       mutex_lock(&devpts_mutex);
+       tty_lock();
        index = devpts_new_index(inode);
+       tty_unlock();
        if (index < 0) {
                retval = index;
                goto err_file;
        }
 
-       mutex_unlock(&devpts_mutex);
-
        mutex_lock(&tty_mutex);
        mutex_lock(&devpts_mutex);
        tty = tty_init_dev(ptm_driver, index);
+       mutex_unlock(&devpts_mutex);
+       tty_lock();
+       mutex_unlock(&tty_mutex);
 
        if (IS_ERR(tty)) {
                retval = PTR_ERR(tty);
                goto out;
        }
 
-       /* The tty returned here is locked so we can safely
-          drop the mutex */
-       mutex_unlock(&devpts_mutex);
-       mutex_unlock(&tty_mutex);
-
        set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
 
        tty_add_file(tty, filp);
@@ -658,17 +654,16 @@ static int ptmx_open(struct inode *inode, struct file *filp)
        if (retval)
                goto err_release;
 
-       tty_unlock(tty);
+       tty_unlock();
        return 0;
 err_release:
-       tty_unlock(tty);
+       tty_unlock();
        tty_release(inode, filp);
        return retval;
 out:
-       mutex_unlock(&tty_mutex);
        devpts_kill_index(inode, index);
+       tty_unlock();
 err_file:
-        mutex_unlock(&devpts_mutex);
        tty_free_file(filp);
        return retval;
 }
index 7264d4d2671774651267d7d98e5c4a44aff7f946..80b6b1b1f7257d3b1da80f1b58523e7465bdb36b 100644 (file)
@@ -3976,7 +3976,7 @@ block_til_ready(struct tty_struct *tty, struct file * filp,
         */
        if (tty_hung_up_p(filp) ||
            (info->flags & ASYNC_CLOSING)) {
-               wait_event_interruptible_tty(tty, info->close_wait,
+               wait_event_interruptible_tty(info->close_wait,
                        !(info->flags & ASYNC_CLOSING));
 #ifdef SERIAL_DO_RESTART
                if (info->flags & ASYNC_HUP_NOTIFY)
@@ -4052,9 +4052,9 @@ block_til_ready(struct tty_struct *tty, struct file * filp,
                printk("block_til_ready blocking: ttyS%d, count = %d\n",
                       info->line, info->count);
 #endif
-               tty_unlock(tty);
+               tty_unlock();
                schedule();
-               tty_lock(tty);
+               tty_lock();
        }
        set_current_state(TASK_RUNNING);
        remove_wait_queue(&info->open_wait, &wait);
@@ -4115,7 +4115,7 @@ rs_open(struct tty_struct *tty, struct file * filp)
         */
        if (tty_hung_up_p(filp) ||
            (info->flags & ASYNC_CLOSING)) {
-               wait_event_interruptible_tty(tty, info->close_wait,
+               wait_event_interruptible_tty(info->close_wait,
                        !(info->flags & ASYNC_CLOSING));
 #ifdef SERIAL_DO_RESTART
                return ((info->flags & ASYNC_HUP_NOTIFY) ?
index ec206732f68ce715aac6d12d771d04e9c880a283..4ef747307ecbb51a8ae195191243000e16c51795 100644 (file)
@@ -205,7 +205,8 @@ struct imx_port {
        unsigned int            irda_inv_rx:1;
        unsigned int            irda_inv_tx:1;
        unsigned short          trcv_delay; /* transceiver delay */
-       struct clk              *clk;
+       struct clk              *clk_ipg;
+       struct clk              *clk_per;
        struct imx_uart_data    *devdata;
 };
 
@@ -673,7 +674,7 @@ static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
         * RFDIV is set such way to satisfy requested uartclk value
         */
        val = TXTL << 10 | RXTL;
-       ufcr_rfdiv = (clk_get_rate(sport->clk) + sport->port.uartclk / 2)
+       ufcr_rfdiv = (clk_get_rate(sport->clk_per) + sport->port.uartclk / 2)
                        / sport->port.uartclk;
 
        if(!ufcr_rfdiv)
@@ -1286,7 +1287,7 @@ imx_console_get_options(struct imx_port *sport, int *baud,
                else
                        ucfr_rfdiv = 6 - ucfr_rfdiv;
 
-               uartclk = clk_get_rate(sport->clk);
+               uartclk = clk_get_rate(sport->clk_per);
                uartclk /= ucfr_rfdiv;
 
                {       /*
@@ -1511,14 +1512,22 @@ static int serial_imx_probe(struct platform_device *pdev)
                goto unmap;
        }
 
-       sport->clk = clk_get(&pdev->dev, "uart");
-       if (IS_ERR(sport->clk)) {
-               ret = PTR_ERR(sport->clk);
+       sport->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+       if (IS_ERR(sport->clk_ipg)) {
+               ret = PTR_ERR(sport->clk_ipg);
                goto unmap;
        }
-       clk_prepare_enable(sport->clk);
 
-       sport->port.uartclk = clk_get_rate(sport->clk);
+       sport->clk_per = devm_clk_get(&pdev->dev, "per");
+       if (IS_ERR(sport->clk_per)) {
+               ret = PTR_ERR(sport->clk_per);
+               goto unmap;
+       }
+
+       clk_prepare_enable(sport->clk_per);
+       clk_prepare_enable(sport->clk_ipg);
+
+       sport->port.uartclk = clk_get_rate(sport->clk_per);
 
        imx_ports[sport->port.line] = sport;
 
@@ -1539,8 +1548,8 @@ deinit:
        if (pdata && pdata->exit)
                pdata->exit(pdev);
 clkput:
-       clk_disable_unprepare(sport->clk);
-       clk_put(sport->clk);
+       clk_disable_unprepare(sport->clk_per);
+       clk_disable_unprepare(sport->clk_ipg);
 unmap:
        iounmap(sport->port.membase);
 free:
@@ -1558,11 +1567,10 @@ static int serial_imx_remove(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, NULL);
 
-       if (sport) {
-               uart_remove_one_port(&imx_reg, &sport->port);
-               clk_disable_unprepare(sport->clk);
-               clk_put(sport->clk);
-       }
+       uart_remove_one_port(&imx_reg, &sport->port);
+
+       clk_disable_unprepare(sport->clk_per);
+       clk_disable_unprepare(sport->clk_ipg);
 
        if (pdata && pdata->exit)
                pdata->exit(pdev);
index 96c1cacc73608c277de8f5379147498ca67f849f..02da071fe1e7e3a4796ae2b43f92739525eed59f 100644 (file)
 #include <linux/tty_flip.h>
 #include <linux/serial_core.h>
 #include <linux/serial.h>
-#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/io.h>
 #include <linux/clk.h>
+#include <linux/gpio.h>
 
 #include <lantiq_soc.h>
 
 #define PORT_LTQ_ASC           111
 #define MAXPORTS               2
 #define UART_DUMMY_UER_RX      1
-#define DRVNAME                        "ltq_asc"
+#define DRVNAME                        "lantiq,asc"
 #ifdef __BIG_ENDIAN
 #define LTQ_ASC_TBUF           (0x0020 + 3)
 #define LTQ_ASC_RBUF           (0x0024 + 3)
@@ -114,6 +117,9 @@ static DEFINE_SPINLOCK(ltq_asc_lock);
 
 struct ltq_uart_port {
        struct uart_port        port;
+       /* clock used to derive divider */
+       struct clk              *fpiclk;
+       /* clock gating of the ASC core */
        struct clk              *clk;
        unsigned int            tx_irq;
        unsigned int            rx_irq;
@@ -316,7 +322,9 @@ lqasc_startup(struct uart_port *port)
        struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
        int retval;
 
-       port->uartclk = clk_get_rate(ltq_port->clk);
+       if (ltq_port->clk)
+               clk_enable(ltq_port->clk);
+       port->uartclk = clk_get_rate(ltq_port->fpiclk);
 
        ltq_w32_mask(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET),
                port->membase + LTQ_ASC_CLC);
@@ -382,6 +390,8 @@ lqasc_shutdown(struct uart_port *port)
                port->membase + LTQ_ASC_RXFCON);
        ltq_w32_mask(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU,
                port->membase + LTQ_ASC_TXFCON);
+       if (ltq_port->clk)
+               clk_disable(ltq_port->clk);
 }
 
 static void
@@ -630,7 +640,7 @@ lqasc_console_setup(struct console *co, char *options)
 
        port = &ltq_port->port;
 
-       port->uartclk = clk_get_rate(ltq_port->clk);
+       port->uartclk = clk_get_rate(ltq_port->fpiclk);
 
        if (options)
                uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -668,37 +678,32 @@ static struct uart_driver lqasc_reg = {
 static int __init
 lqasc_probe(struct platform_device *pdev)
 {
+       struct device_node *node = pdev->dev.of_node;
        struct ltq_uart_port *ltq_port;
        struct uart_port *port;
-       struct resource *mmres, *irqres;
-       int tx_irq, rx_irq, err_irq;
-       struct clk *clk;
+       struct resource *mmres, irqres[3];
+       int line = 0;
        int ret;
 
        mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (!mmres || !irqres)
+       ret = of_irq_to_resource_table(node, irqres, 3);
+       if (!mmres || (ret != 3)) {
+               dev_err(&pdev->dev,
+                       "failed to get memory/irq for serial port\n");
                return -ENODEV;
+       }
 
-       if (pdev->id >= MAXPORTS)
-               return -EBUSY;
+       /* check if this is the console port */
+       if (mmres->start != CPHYSADDR(LTQ_EARLY_ASC))
+               line = 1;
 
-       if (lqasc_port[pdev->id] != NULL)
+       if (lqasc_port[line]) {
+               dev_err(&pdev->dev, "port %d already allocated\n", line);
                return -EBUSY;
-
-       clk = clk_get(&pdev->dev, "fpi");
-       if (IS_ERR(clk)) {
-               pr_err("failed to get fpi clk\n");
-               return -ENOENT;
        }
 
-       tx_irq = platform_get_irq_byname(pdev, "tx");
-       rx_irq = platform_get_irq_byname(pdev, "rx");
-       err_irq = platform_get_irq_byname(pdev, "err");
-       if ((tx_irq < 0) | (rx_irq < 0) | (err_irq < 0))
-               return -ENODEV;
-
-       ltq_port = kzalloc(sizeof(struct ltq_uart_port), GFP_KERNEL);
+       ltq_port = devm_kzalloc(&pdev->dev, sizeof(struct ltq_uart_port),
+                       GFP_KERNEL);
        if (!ltq_port)
                return -ENOMEM;
 
@@ -709,19 +714,26 @@ lqasc_probe(struct platform_device *pdev)
        port->ops       = &lqasc_pops;
        port->fifosize  = 16;
        port->type      = PORT_LTQ_ASC,
-       port->line      = pdev->id;
+       port->line      = line;
        port->dev       = &pdev->dev;
-
-       port->irq       = tx_irq; /* unused, just to be backward-compatibe */
+       /* unused, just to be backward-compatible */
+       port->irq       = irqres[0].start;
        port->mapbase   = mmres->start;
 
-       ltq_port->clk   = clk;
+       ltq_port->fpiclk = clk_get_fpi();
+       if (IS_ERR(ltq_port->fpiclk)) {
+               pr_err("failed to get fpi clk\n");
+               return -ENOENT;
+       }
 
-       ltq_port->tx_irq = tx_irq;
-       ltq_port->rx_irq = rx_irq;
-       ltq_port->err_irq = err_irq;
+       /* not all asc ports have clock gates, lets ignore the return code */
+       ltq_port->clk = clk_get(&pdev->dev, NULL);
 
-       lqasc_port[pdev->id] = ltq_port;
+       ltq_port->tx_irq = irqres[0].start;
+       ltq_port->rx_irq = irqres[1].start;
+       ltq_port->err_irq = irqres[2].start;
+
+       lqasc_port[line] = ltq_port;
        platform_set_drvdata(pdev, ltq_port);
 
        ret = uart_add_one_port(&lqasc_reg, port);
@@ -729,10 +741,17 @@ lqasc_probe(struct platform_device *pdev)
        return ret;
 }
 
+static const struct of_device_id ltq_asc_match[] = {
+       { .compatible = DRVNAME },
+       {},
+};
+MODULE_DEVICE_TABLE(of, ltq_asc_match);
+
 static struct platform_driver lqasc_driver = {
        .driver         = {
                .name   = DRVNAME,
                .owner  = THIS_MODULE,
+               .of_match_table = ltq_asc_match,
        },
 };
 
index 0be8a2f00d0ba92b76c990c8204f23ce4cc0d84d..f76b1688c5c864fc1a7d8f2619f04a1d0566acab 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/major.h>
 #include <linux/serial.h>
 #include <linux/serial_core.h>
index 4001eee6c08de214f596e932c7fe454dc0f4c2f8..92c00b24d0df961f62b47b39dd0bffc40de2f47e 100644 (file)
@@ -57,6 +57,7 @@
 #include <linux/ioport.h>
 #include <linux/irqflags.h>
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/major.h>
 #include <linux/serial.h>
 #include <linux/serial_core.h>
index 5ed0daae65647c366dc9948307440795b8eda39b..593d40ad0a6be9b0de161803aa69c9011e2fdab8 100644 (file)
@@ -3338,9 +3338,9 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
                        printk("%s(%d):block_til_ready blocking on %s count=%d\n",
                                 __FILE__,__LINE__, tty->driver->name, port->count );
                                 
-               tty_unlock(tty);
+               tty_unlock();
                schedule();
-               tty_lock(tty);
+               tty_lock();
        }
        
        set_current_state(TASK_RUNNING);
index 45b43f11ca3927df3c1c680076c883808f600db9..aa1debf97cc741e3f5914cb5396c8397bd37f362 100644 (file)
@@ -3336,9 +3336,9 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
                }
 
                DBGINFO(("%s block_til_ready wait\n", tty->driver->name));
-               tty_unlock(tty);
+               tty_unlock();
                schedule();
-               tty_lock(tty);
+               tty_lock();
        }
 
        set_current_state(TASK_RUNNING);
index 4a1e4f07765bbce2872b394560d3cc0c93a1ece4..a3dddc12d2fedc3ec261c2c8f2b3da215f564a20 100644 (file)
@@ -3357,9 +3357,9 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
                        printk("%s(%d):%s block_til_ready() count=%d\n",
                                 __FILE__,__LINE__, tty->driver->name, port->count );
 
-               tty_unlock(tty);
+               tty_unlock();
                schedule();
-               tty_lock(tty);
+               tty_lock();
        }
 
        set_current_state(TASK_RUNNING);
index 9e930c009bf23b49cd7c4efae3d4ccc427444656..b425c79675ad96adc187c601e36d65c2bebc3581 100644 (file)
@@ -185,7 +185,6 @@ void free_tty_struct(struct tty_struct *tty)
                put_device(tty->dev);
        kfree(tty->write_buf);
        tty_buffer_free_all(tty);
-       tty->magic = 0xDEADDEAD;
        kfree(tty);
 }
 
@@ -574,7 +573,7 @@ void __tty_hangup(struct tty_struct *tty)
        }
        spin_unlock(&redirect_lock);
 
-       tty_lock(tty);
+       tty_lock();
 
        /* some functions below drop BTM, so we need this bit */
        set_bit(TTY_HUPPING, &tty->flags);
@@ -667,7 +666,7 @@ void __tty_hangup(struct tty_struct *tty)
        clear_bit(TTY_HUPPING, &tty->flags);
        tty_ldisc_enable(tty);
 
-       tty_unlock(tty);
+       tty_unlock();
 
        if (f)
                fput(f);
@@ -1104,12 +1103,12 @@ void tty_write_message(struct tty_struct *tty, char *msg)
 {
        if (tty) {
                mutex_lock(&tty->atomic_write_lock);
-               tty_lock(tty);
+               tty_lock();
                if (tty->ops->write && !test_bit(TTY_CLOSING, &tty->flags)) {
-                       tty_unlock(tty);
+                       tty_unlock();
                        tty->ops->write(tty, msg, strlen(msg));
                } else
-                       tty_unlock(tty);
+                       tty_unlock();
                tty_write_unlock(tty);
        }
        return;
@@ -1404,7 +1403,6 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
        }
        initialize_tty_struct(tty, driver, idx);
 
-       tty_lock(tty);
        retval = tty_driver_install_tty(driver, tty);
        if (retval < 0)
                goto err_deinit_tty;
@@ -1417,11 +1415,9 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
        retval = tty_ldisc_setup(tty, tty->link);
        if (retval)
                goto err_release_tty;
-       /* Return the tty locked so that it cannot vanish under the caller */
        return tty;
 
 err_deinit_tty:
-       tty_unlock(tty);
        deinitialize_tty_struct(tty);
        free_tty_struct(tty);
 err_module_put:
@@ -1430,7 +1426,6 @@ err_module_put:
 
        /* call the tty release_tty routine to clean out this slot */
 err_release_tty:
-       tty_unlock(tty);
        printk_ratelimited(KERN_INFO "tty_init_dev: ldisc open failed, "
                                 "clearing slot %d\n", idx);
        release_tty(tty, idx);
@@ -1633,7 +1628,7 @@ int tty_release(struct inode *inode, struct file *filp)
        if (tty_paranoia_check(tty, inode, __func__))
                return 0;
 
-       tty_lock(tty);
+       tty_lock();
        check_tty_count(tty, __func__);
 
        __tty_fasync(-1, filp, 0);
@@ -1642,11 +1637,10 @@ int tty_release(struct inode *inode, struct file *filp)
        pty_master = (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
                      tty->driver->subtype == PTY_TYPE_MASTER);
        devpts = (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM) != 0;
-       /* Review: parallel close */
        o_tty = tty->link;
 
        if (tty_release_checks(tty, o_tty, idx)) {
-               tty_unlock(tty);
+               tty_unlock();
                return 0;
        }
 
@@ -1658,7 +1652,7 @@ int tty_release(struct inode *inode, struct file *filp)
        if (tty->ops->close)
                tty->ops->close(tty, filp);
 
-       tty_unlock(tty);
+       tty_unlock();
        /*
         * Sanity check: if tty->count is going to zero, there shouldn't be
         * any waiters on tty->read_wait or tty->write_wait.  We test the
@@ -1681,7 +1675,7 @@ int tty_release(struct inode *inode, struct file *filp)
                   opens on /dev/tty */
 
                mutex_lock(&tty_mutex);
-               tty_lock_pair(tty, o_tty);
+               tty_lock();
                tty_closing = tty->count <= 1;
                o_tty_closing = o_tty &&
                        (o_tty->count <= (pty_master ? 1 : 0));
@@ -1712,7 +1706,7 @@ int tty_release(struct inode *inode, struct file *filp)
 
                printk(KERN_WARNING "%s: %s: read/write wait queue active!\n",
                                __func__, tty_name(tty, buf));
-               tty_unlock_pair(tty, o_tty);
+               tty_unlock();
                mutex_unlock(&tty_mutex);
                schedule();
        }
@@ -1775,7 +1769,7 @@ int tty_release(struct inode *inode, struct file *filp)
 
        /* check whether both sides are closing ... */
        if (!tty_closing || (o_tty && !o_tty_closing)) {
-               tty_unlock_pair(tty, o_tty);
+               tty_unlock();
                return 0;
        }
 
@@ -1788,16 +1782,14 @@ int tty_release(struct inode *inode, struct file *filp)
        tty_ldisc_release(tty, o_tty);
        /*
         * The release_tty function takes care of the details of clearing
-        * the slots and preserving the termios structure. The tty_unlock_pair
-        * should be safe as we keep a kref while the tty is locked (so the
-        * unlock never unlocks a freed tty).
+        * the slots and preserving the termios structure.
         */
        release_tty(tty, idx);
-       tty_unlock_pair(tty, o_tty);
 
        /* Make this pty number available for reallocation */
        if (devpts)
                devpts_kill_index(inode, idx);
+       tty_unlock();
        return 0;
 }
 
@@ -1901,9 +1893,6 @@ static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
  *     Locking: tty_mutex protects tty, tty_lookup_driver and tty_init_dev.
  *              tty->count should protect the rest.
  *              ->siglock protects ->signal/->sighand
- *
- *     Note: the tty_unlock/lock cases without a ref are only safe due to
- *     tty_mutex
  */
 
 static int tty_open(struct inode *inode, struct file *filp)
@@ -1927,7 +1916,8 @@ retry_open:
        retval = 0;
 
        mutex_lock(&tty_mutex);
-       /* This is protected by the tty_mutex */
+       tty_lock();
+
        tty = tty_open_current_tty(device, filp);
        if (IS_ERR(tty)) {
                retval = PTR_ERR(tty);
@@ -1948,19 +1938,17 @@ retry_open:
        }
 
        if (tty) {
-               tty_lock(tty);
                retval = tty_reopen(tty);
-               if (retval < 0) {
-                       tty_unlock(tty);
+               if (retval)
                        tty = ERR_PTR(retval);
-               }
-       } else  /* Returns with the tty_lock held for now */
+       } else
                tty = tty_init_dev(driver, index);
 
        mutex_unlock(&tty_mutex);
        if (driver)
                tty_driver_kref_put(driver);
        if (IS_ERR(tty)) {
+               tty_unlock();
                retval = PTR_ERR(tty);
                goto err_file;
        }
@@ -1989,7 +1977,7 @@ retry_open:
                printk(KERN_DEBUG "%s: error %d in opening %s...\n", __func__,
                                retval, tty->name);
 #endif
-               tty_unlock(tty); /* need to call tty_release without BTM */
+               tty_unlock(); /* need to call tty_release without BTM */
                tty_release(inode, filp);
                if (retval != -ERESTARTSYS)
                        return retval;
@@ -2001,15 +1989,17 @@ retry_open:
                /*
                 * Need to reset f_op in case a hangup happened.
                 */
+               tty_lock();
                if (filp->f_op == &hung_up_tty_fops)
                        filp->f_op = &tty_fops;
+               tty_unlock();
                goto retry_open;
        }
-       tty_unlock(tty);
+       tty_unlock();
 
 
        mutex_lock(&tty_mutex);
-       tty_lock(tty);
+       tty_lock();
        spin_lock_irq(&current->sighand->siglock);
        if (!noctty &&
            current->signal->leader &&
@@ -2017,10 +2007,11 @@ retry_open:
            tty->session == NULL)
                __proc_set_tty(current, tty);
        spin_unlock_irq(&current->sighand->siglock);
-       tty_unlock(tty);
+       tty_unlock();
        mutex_unlock(&tty_mutex);
        return 0;
 err_unlock:
+       tty_unlock();
        mutex_unlock(&tty_mutex);
        /* after locks to avoid deadlock */
        if (!IS_ERR_OR_NULL(driver))
@@ -2103,13 +2094,10 @@ out:
 
 static int tty_fasync(int fd, struct file *filp, int on)
 {
-       struct tty_struct *tty = file_tty(filp);
        int retval;
-
-       tty_lock(tty);
+       tty_lock();
        retval = __tty_fasync(fd, filp, on);
-       tty_unlock(tty);
-
+       tty_unlock();
        return retval;
 }
 
@@ -2946,7 +2934,6 @@ void initialize_tty_struct(struct tty_struct *tty,
        tty->pgrp = NULL;
        tty->overrun_time = jiffies;
        tty_buffer_init(tty);
-       mutex_init(&tty->legacy_mutex);
        mutex_init(&tty->termios_mutex);
        mutex_init(&tty->ldisc_mutex);
        init_waitqueue_head(&tty->write_wait);
index 173a9000a6cb3940ab2dcb068c5efba300f9613e..9911eb6b34cd06c772cadf9761647b1dc5828487 100644 (file)
@@ -568,7 +568,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
        if (IS_ERR(new_ldisc))
                return PTR_ERR(new_ldisc);
 
-       tty_lock(tty);
+       tty_lock();
        /*
         *      We need to look at the tty locking here for pty/tty pairs
         *      when both sides try to change in parallel.
@@ -582,12 +582,12 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
         */
 
        if (tty->ldisc->ops->num == ldisc) {
-               tty_unlock(tty);
+               tty_unlock();
                tty_ldisc_put(new_ldisc);
                return 0;
        }
 
-       tty_unlock(tty);
+       tty_unlock();
        /*
         *      Problem: What do we do if this blocks ?
         *      We could deadlock here
@@ -595,7 +595,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
 
        tty_wait_until_sent(tty, 0);
 
-       tty_lock(tty);
+       tty_lock();
        mutex_lock(&tty->ldisc_mutex);
 
        /*
@@ -605,10 +605,10 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
 
        while (test_bit(TTY_LDISC_CHANGING, &tty->flags)) {
                mutex_unlock(&tty->ldisc_mutex);
-               tty_unlock(tty);
+               tty_unlock();
                wait_event(tty_ldisc_wait,
                        test_bit(TTY_LDISC_CHANGING, &tty->flags) == 0);
-               tty_lock(tty);
+               tty_lock();
                mutex_lock(&tty->ldisc_mutex);
        }
 
@@ -623,7 +623,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
 
        o_ldisc = tty->ldisc;
 
-       tty_unlock(tty);
+       tty_unlock();
        /*
         *      Make sure we don't change while someone holds a
         *      reference to the line discipline. The TTY_LDISC bit
@@ -650,7 +650,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
 
        retval = tty_ldisc_wait_idle(tty, 5 * HZ);
 
-       tty_lock(tty);
+       tty_lock();
        mutex_lock(&tty->ldisc_mutex);
 
        /* handle wait idle failure locked */
@@ -665,7 +665,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
                clear_bit(TTY_LDISC_CHANGING, &tty->flags);
                mutex_unlock(&tty->ldisc_mutex);
                tty_ldisc_put(new_ldisc);
-               tty_unlock(tty);
+               tty_unlock();
                return -EIO;
        }
 
@@ -708,7 +708,7 @@ enable:
        if (o_work)
                schedule_work(&o_tty->buf.work);
        mutex_unlock(&tty->ldisc_mutex);
-       tty_unlock(tty);
+       tty_unlock();
        return retval;
 }
 
@@ -816,11 +816,11 @@ void tty_ldisc_hangup(struct tty_struct *tty)
         * need to wait for another function taking the BTM
         */
        clear_bit(TTY_LDISC, &tty->flags);
-       tty_unlock(tty);
+       tty_unlock();
        cancel_work_sync(&tty->buf.work);
        mutex_unlock(&tty->ldisc_mutex);
 retry:
-       tty_lock(tty);
+       tty_lock();
        mutex_lock(&tty->ldisc_mutex);
 
        /* At this point we have a closed ldisc and we want to
@@ -831,7 +831,7 @@ retry:
                if (atomic_read(&tty->ldisc->users) != 1) {
                        char cur_n[TASK_COMM_LEN], tty_n[64];
                        long timeout = 3 * HZ;
-                       tty_unlock(tty);
+                       tty_unlock();
 
                        while (tty_ldisc_wait_idle(tty, timeout) == -EBUSY) {
                                timeout = MAX_SCHEDULE_TIMEOUT;
@@ -912,10 +912,10 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
         * race with the set_ldisc code path.
         */
 
-       tty_unlock(tty);
+       tty_unlock();
        tty_ldisc_halt(tty);
        tty_ldisc_flush_works(tty);
-       tty_lock(tty);
+       tty_lock();
 
        mutex_lock(&tty->ldisc_mutex);
        /*
index 69adc80c98cd5c035fa0f3a18df6fc3e3c127623..9ff986c32a21ef702edf515a79c19440a504b747 100644 (file)
@@ -4,59 +4,29 @@
 #include <linux/semaphore.h>
 #include <linux/sched.h>
 
-/* Legacy tty mutex glue */
+/*
+ * The 'big tty mutex'
+ *
+ * This mutex is taken and released by tty_lock() and tty_unlock(),
+ * replacing the older big kernel lock.
+ * It can no longer be taken recursively, and does not get
+ * released implicitly while sleeping.
+ *
+ * Don't use in new code.
+ */
+static DEFINE_MUTEX(big_tty_mutex);
 
 /*
  * Getting the big tty mutex.
  */
-
-void __lockfunc tty_lock(struct tty_struct *tty)
+void __lockfunc tty_lock(void)
 {
-       if (tty->magic != TTY_MAGIC) {
-               printk(KERN_ERR "L Bad %p\n", tty);
-               WARN_ON(1);
-               return;
-       }
-       tty_kref_get(tty);
-       mutex_lock(&tty->legacy_mutex);
+       mutex_lock(&big_tty_mutex);
 }
 EXPORT_SYMBOL(tty_lock);
 
-void __lockfunc tty_unlock(struct tty_struct *tty)
+void __lockfunc tty_unlock(void)
 {
-       if (tty->magic != TTY_MAGIC) {
-               printk(KERN_ERR "U Bad %p\n", tty);
-               WARN_ON(1);
-               return;
-       }
-       mutex_unlock(&tty->legacy_mutex);
-       tty_kref_put(tty);
+       mutex_unlock(&big_tty_mutex);
 }
 EXPORT_SYMBOL(tty_unlock);
-
-/*
- * Getting the big tty mutex for a pair of ttys with lock ordering
- * On a non pty/tty pair tty2 can be NULL which is just fine.
- */
-void __lockfunc tty_lock_pair(struct tty_struct *tty,
-                                       struct tty_struct *tty2)
-{
-       if (tty < tty2) {
-               tty_lock(tty);
-               tty_lock(tty2);
-       } else {
-               if (tty2 && tty2 != tty)
-                       tty_lock(tty2);
-               tty_lock(tty);
-       }
-}
-EXPORT_SYMBOL(tty_lock_pair);
-
-void __lockfunc tty_unlock_pair(struct tty_struct *tty,
-                                               struct tty_struct *tty2)
-{
-       tty_unlock(tty);
-       if (tty2 && tty2 != tty)
-               tty_unlock(tty2);
-}
-EXPORT_SYMBOL(tty_unlock_pair);
index d9cca95a5452484fe7951aebb59467d23d7e0ca4..bf6e238146ae40acd4ac8ea2f517574870366590 100644 (file)
@@ -230,7 +230,7 @@ int tty_port_block_til_ready(struct tty_port *port,
 
        /* block if port is in the process of being closed */
        if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) {
-               wait_event_interruptible_tty(tty, port->close_wait,
+               wait_event_interruptible_tty(port->close_wait,
                                !(port->flags & ASYNC_CLOSING));
                if (port->flags & ASYNC_HUP_NOTIFY)
                        return -EAGAIN;
@@ -296,9 +296,9 @@ int tty_port_block_til_ready(struct tty_port *port,
                        retval = -ERESTARTSYS;
                        break;
                }
-               tty_unlock(tty);
+               tty_unlock();
                schedule();
-               tty_lock(tty);
+               tty_lock();
        }
        finish_wait(&port->open_wait, &wait);
 
index a797d51ecbe83fd9daa651d435a9931219333b4f..c778ffe4e4e528544e8f70007c851f521bd90675 100644 (file)
@@ -32,7 +32,7 @@
 #define ULPI_VIEWPORT_OFFSET   0x170
 
 struct ehci_mxc_priv {
-       struct clk *usbclk, *ahbclk, *phy1clk;
+       struct clk *usbclk, *ahbclk, *phyclk;
        struct usb_hcd *hcd;
 };
 
@@ -166,31 +166,26 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
        }
 
        /* enable clocks */
-       priv->usbclk = clk_get(dev, "usb");
+       priv->usbclk = clk_get(dev, "ipg");
        if (IS_ERR(priv->usbclk)) {
                ret = PTR_ERR(priv->usbclk);
                goto err_clk;
        }
-       clk_enable(priv->usbclk);
+       clk_prepare_enable(priv->usbclk);
 
-       if (!cpu_is_mx35() && !cpu_is_mx25()) {
-               priv->ahbclk = clk_get(dev, "usb_ahb");
-               if (IS_ERR(priv->ahbclk)) {
-                       ret = PTR_ERR(priv->ahbclk);
-                       goto err_clk_ahb;
-               }
-               clk_enable(priv->ahbclk);
+       priv->ahbclk = clk_get(dev, "ahb");
+       if (IS_ERR(priv->ahbclk)) {
+               ret = PTR_ERR(priv->ahbclk);
+               goto err_clk_ahb;
        }
+       clk_prepare_enable(priv->ahbclk);
 
        /* "dr" device has its own clock on i.MX51 */
-       if (cpu_is_mx51() && (pdev->id == 0)) {
-               priv->phy1clk = clk_get(dev, "usb_phy1");
-               if (IS_ERR(priv->phy1clk)) {
-                       ret = PTR_ERR(priv->phy1clk);
-                       goto err_clk_phy;
-               }
-               clk_enable(priv->phy1clk);
-       }
+       priv->phyclk = clk_get(dev, "phy");
+       if (IS_ERR(priv->phyclk))
+               priv->phyclk = NULL;
+       if (priv->phyclk)
+               clk_prepare_enable(priv->phyclk);
 
 
        /* call platform specific init function */
@@ -265,17 +260,15 @@ err_add:
        if (pdata && pdata->exit)
                pdata->exit(pdev);
 err_init:
-       if (priv->phy1clk) {
-               clk_disable(priv->phy1clk);
-               clk_put(priv->phy1clk);
-       }
-err_clk_phy:
-       if (priv->ahbclk) {
-               clk_disable(priv->ahbclk);
-               clk_put(priv->ahbclk);
+       if (priv->phyclk) {
+               clk_disable_unprepare(priv->phyclk);
+               clk_put(priv->phyclk);
        }
+
+       clk_disable_unprepare(priv->ahbclk);
+       clk_put(priv->ahbclk);
 err_clk_ahb:
-       clk_disable(priv->usbclk);
+       clk_disable_unprepare(priv->usbclk);
        clk_put(priv->usbclk);
 err_clk:
        iounmap(hcd->regs);
@@ -307,15 +300,14 @@ static int __exit ehci_mxc_drv_remove(struct platform_device *pdev)
        usb_put_hcd(hcd);
        platform_set_drvdata(pdev, NULL);
 
-       clk_disable(priv->usbclk);
+       clk_disable_unprepare(priv->usbclk);
        clk_put(priv->usbclk);
-       if (priv->ahbclk) {
-               clk_disable(priv->ahbclk);
-               clk_put(priv->ahbclk);
-       }
-       if (priv->phy1clk) {
-               clk_disable(priv->phy1clk);
-               clk_put(priv->phy1clk);
+       clk_disable_unprepare(priv->ahbclk);
+       clk_put(priv->ahbclk);
+
+       if (priv->phyclk) {
+               clk_disable_unprepare(priv->phyclk);
+               clk_put(priv->phyclk);
        }
 
        kfree(priv);
index 6c6a5a3b4ea7edcaaa83916a5d1e0f8eacf48114..82de1073aa529ae105da5ac374face13e6b35bd2 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/mbus.h>
+#include <linux/clk.h>
 #include <plat/ehci-orion.h>
 
 #define rdl(off)       __raw_readl(hcd->regs + (off))
@@ -198,6 +199,7 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
        struct resource *res;
        struct usb_hcd *hcd;
        struct ehci_hcd *ehci;
+       struct clk *clk;
        void __iomem *regs;
        int irq, err;
 
@@ -238,6 +240,14 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
                goto err2;
        }
 
+       /* Not all platforms can gate the clock, so it is not
+          an error if the clock does not exists. */
+       clk = clk_get(&pdev->dev, NULL);
+       if (!IS_ERR(clk)) {
+               clk_prepare_enable(clk);
+               clk_put(clk);
+       }
+
        hcd = usb_create_hcd(&ehci_orion_hc_driver,
                        &pdev->dev, dev_name(&pdev->dev));
        if (!hcd) {
@@ -301,12 +311,18 @@ err1:
 static int __exit ehci_orion_drv_remove(struct platform_device *pdev)
 {
        struct usb_hcd *hcd = platform_get_drvdata(pdev);
+       struct clk *clk;
 
        usb_remove_hcd(hcd);
        iounmap(hcd->regs);
        release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
        usb_put_hcd(hcd);
 
+       clk = clk_get(&pdev->dev, NULL);
+       if (!IS_ERR(clk)) {
+               clk_disable_unprepare(clk);
+               clk_put(clk);
+       }
        return 0;
 }
 
index 4a44bf833611bc88126005d219853ae15850621b..68548236ec4228ceb224d69845daba6345bf9971 100644 (file)
@@ -722,8 +722,9 @@ static int tegra_ehci_probe(struct platform_device *pdev)
                }
        }
 
-       tegra->phy = tegra_usb_phy_open(instance, hcd->regs, pdata->phy_config,
-                                               TEGRA_USB_PHY_MODE_HOST);
+       tegra->phy = tegra_usb_phy_open(&pdev->dev, instance, hcd->regs,
+                                       pdata->phy_config,
+                                       TEGRA_USB_PHY_MODE_HOST);
        if (IS_ERR(tegra->phy)) {
                dev_err(&pdev->dev, "Failed to open USB phy\n");
                err = -ENXIO;
index a290be51a1f4a2f412aa80976c85a396eb60db30..0217f7415ef5d6997dd82de449066a2c9643b096 100644 (file)
@@ -2210,7 +2210,7 @@ config FB_XILINX
 
 config FB_COBALT
        tristate "Cobalt server LCD frame buffer support"
-       depends on FB && MIPS_COBALT
+       depends on FB && (MIPS_COBALT || MIPS_SEAD3)
 
 config FB_SH7760
        bool "SH7760/SH7763/SH7720/SH7721 LCDC support"
@@ -2382,6 +2382,39 @@ config FB_BROADSHEET
          and could also have been called by other names when coupled with
          a bridge adapter.
 
+config FB_AUO_K190X
+       tristate "AUO-K190X EPD controller support"
+       depends on FB
+       select FB_SYS_FILLRECT
+       select FB_SYS_COPYAREA
+       select FB_SYS_IMAGEBLIT
+       select FB_SYS_FOPS
+       select FB_DEFERRED_IO
+       help
+         Provides support for epaper controllers from the K190X series
+         of AUO. These controllers can be used to drive epaper displays
+         from Sipix.
+
+         This option enables the common support, shared by the individual
+         controller drivers. You will also have to enable the driver
+         for the controller type used in your device.
+
+config FB_AUO_K1900
+       tristate "AUO-K1900 EPD controller support"
+       depends on FB && FB_AUO_K190X
+       help
+         This driver implements support for the AUO K1900 epd-controller.
+         This controller can drive Sipix epaper displays but can only do
+         serial updates, reducing the number of possible frames per second.
+
+config FB_AUO_K1901
+       tristate "AUO-K1901 EPD controller support"
+       depends on FB && FB_AUO_K190X
+       help
+         This driver implements support for the AUO K1901 epd-controller.
+         This controller can drive Sipix epaper displays and supports
+         concurrent updates, making higher frames per second possible.
+
 config FB_JZ4740
        tristate "JZ4740 LCD framebuffer support"
        depends on FB && MACH_JZ4740
index 9356add945b319ea4bfc9003317306009456d5ee..ee8dafb69e369dd644e6f5354573d00de3463fa8 100644 (file)
@@ -118,6 +118,9 @@ obj-$(CONFIG_FB_PMAGB_B)      += pmagb-b-fb.o
 obj-$(CONFIG_FB_MAXINE)                  += maxinefb.o
 obj-$(CONFIG_FB_METRONOME)        += metronomefb.o
 obj-$(CONFIG_FB_BROADSHEET)       += broadsheetfb.o
+obj-$(CONFIG_FB_AUO_K190X)       += auo_k190x.o
+obj-$(CONFIG_FB_AUO_K1900)       += auo_k1900fb.o
+obj-$(CONFIG_FB_AUO_K1901)       += auo_k1901fb.o
 obj-$(CONFIG_FB_S1D13XXX)        += s1d13xxxfb.o
 obj-$(CONFIG_FB_SH7760)                  += sh7760fb.o
 obj-$(CONFIG_FB_IMX)              += imxfb.o
diff --git a/drivers/video/auo_k1900fb.c b/drivers/video/auo_k1900fb.c
new file mode 100644 (file)
index 0000000..c36cf96
--- /dev/null
@@ -0,0 +1,198 @@
+/*
+ * auok190xfb.c -- FB driver for AUO-K1900 controllers
+ *
+ * Copyright (C) 2011, 2012 Heiko Stuebner <heiko@sntech.de>
+ *
+ * based on broadsheetfb.c
+ *
+ * Copyright (C) 2008, Jaya Kumar
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven.
+ *
+ * This driver is written to be used with the AUO-K1900 display controller.
+ *
+ * It is intended to be architecture independent. A board specific driver
+ * must be used to perform all the physical IO interactions.
+ *
+ * The controller supports different update modes:
+ * mode0+1 16 step gray (4bit)
+ * mode2 4 step gray (2bit) - FIXME: add strange refresh
+ * mode3 2 step gray (1bit) - FIXME: add strange refresh
+ * mode4 handwriting mode (strange behaviour)
+ * mode5 automatic selection of update mode
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/firmware.h>
+#include <linux/gpio.h>
+#include <linux/pm_runtime.h>
+
+#include <video/auo_k190xfb.h>
+
+#include "auo_k190x.h"
+
+/*
+ * AUO-K1900 specific commands
+ */
+
+#define AUOK1900_CMD_PARTIALDISP       0x1001
+#define AUOK1900_CMD_ROTATION          0x1006
+#define AUOK1900_CMD_LUT_STOP          0x1009
+
+#define AUOK1900_INIT_TEMP_AVERAGE     (1 << 13)
+#define AUOK1900_INIT_ROTATE(_x)       ((_x & 0x3) << 10)
+#define AUOK1900_INIT_RESOLUTION(_res) ((_res & 0x7) << 2)
+
+static void auok1900_init(struct auok190xfb_par *par)
+{
+       struct auok190x_board *board = par->board;
+       u16 init_param = 0;
+
+       init_param |= AUOK1900_INIT_TEMP_AVERAGE;
+       init_param |= AUOK1900_INIT_ROTATE(par->rotation);
+       init_param |= AUOK190X_INIT_INVERSE_WHITE;
+       init_param |= AUOK190X_INIT_FORMAT0;
+       init_param |= AUOK1900_INIT_RESOLUTION(par->resolution);
+       init_param |= AUOK190X_INIT_SHIFT_RIGHT;
+
+       auok190x_send_cmdargs(par, AUOK190X_CMD_INIT, 1, &init_param);
+
+       /* let the controller finish */
+       board->wait_for_rdy(par);
+}
+
+static void auok1900_update_region(struct auok190xfb_par *par, int mode,
+                                               u16 y1, u16 y2)
+{
+       struct device *dev = par->info->device;
+       unsigned char *buf = (unsigned char *)par->info->screen_base;
+       int xres = par->info->var.xres;
+       u16 args[4];
+
+       pm_runtime_get_sync(dev);
+
+       mutex_lock(&(par->io_lock));
+
+       /* y1 and y2 must be a multiple of 2 so drop the lowest bit */
+       y1 &= 0xfffe;
+       y2 &= 0xfffe;
+
+       dev_dbg(dev, "update (x,y,w,h,mode)=(%d,%d,%d,%d,%d)\n",
+               1, y1+1, xres, y2-y1, mode);
+
+       /* to FIX handle different partial update modes */
+       args[0] = mode | 1;
+       args[1] = y1 + 1;
+       args[2] = xres;
+       args[3] = y2 - y1;
+       buf += y1 * xres;
+       auok190x_send_cmdargs_pixels(par, AUOK1900_CMD_PARTIALDISP, 4, args,
+                                    ((y2 - y1) * xres)/2, (u16 *) buf);
+       auok190x_send_command(par, AUOK190X_CMD_DATA_STOP);
+
+       par->update_cnt++;
+
+       mutex_unlock(&(par->io_lock));
+
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_put_autosuspend(dev);
+}
+
+static void auok1900fb_dpy_update_pages(struct auok190xfb_par *par,
+                                               u16 y1, u16 y2)
+{
+       int mode;
+
+       if (par->update_mode < 0) {
+               mode = AUOK190X_UPDATE_MODE(1);
+               par->last_mode = -1;
+       } else {
+               mode = AUOK190X_UPDATE_MODE(par->update_mode);
+               par->last_mode = par->update_mode;
+       }
+
+       if (par->flash)
+               mode |= AUOK190X_UPDATE_NONFLASH;
+
+       auok1900_update_region(par, mode, y1, y2);
+}
+
+static void auok1900fb_dpy_update(struct auok190xfb_par *par)
+{
+       int mode;
+
+       if (par->update_mode < 0) {
+               mode = AUOK190X_UPDATE_MODE(0);
+               par->last_mode = -1;
+       } else {
+               mode = AUOK190X_UPDATE_MODE(par->update_mode);
+               par->last_mode = par->update_mode;
+       }
+
+       if (par->flash)
+               mode |= AUOK190X_UPDATE_NONFLASH;
+
+       auok1900_update_region(par, mode, 0, par->info->var.yres);
+       par->update_cnt = 0;
+}
+
+static bool auok1900fb_need_refresh(struct auok190xfb_par *par)
+{
+       return (par->update_cnt > 10);
+}
+
+static int __devinit auok1900fb_probe(struct platform_device *pdev)
+{
+       struct auok190x_init_data init;
+       struct auok190x_board *board;
+
+       /* pick up board specific routines */
+       board = pdev->dev.platform_data;
+       if (!board)
+               return -EINVAL;
+
+       /* fill temporary init struct for common init */
+       init.id = "auo_k1900fb";
+       init.board = board;
+       init.update_partial = auok1900fb_dpy_update_pages;
+       init.update_all = auok1900fb_dpy_update;
+       init.need_refresh = auok1900fb_need_refresh;
+       init.init = auok1900_init;
+
+       return auok190x_common_probe(pdev, &init);
+}
+
+static int __devexit auok1900fb_remove(struct platform_device *pdev)
+{
+       return auok190x_common_remove(pdev);
+}
+
+static struct platform_driver auok1900fb_driver = {
+       .probe  = auok1900fb_probe,
+       .remove = __devexit_p(auok1900fb_remove),
+       .driver = {
+               .owner  = THIS_MODULE,
+               .name   = "auo_k1900fb",
+               .pm = &auok190x_pm,
+       },
+};
+module_platform_driver(auok1900fb_driver);
+
+MODULE_DESCRIPTION("framebuffer driver for the AUO-K1900 EPD controller");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/auo_k1901fb.c b/drivers/video/auo_k1901fb.c
new file mode 100644 (file)
index 0000000..1c054c1
--- /dev/null
@@ -0,0 +1,251 @@
+/*
+ * auok190xfb.c -- FB driver for AUO-K1901 controllers
+ *
+ * Copyright (C) 2011, 2012 Heiko Stuebner <heiko@sntech.de>
+ *
+ * based on broadsheetfb.c
+ *
+ * Copyright (C) 2008, Jaya Kumar
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven.
+ *
+ * This driver is written to be used with the AUO-K1901 display controller.
+ *
+ * It is intended to be architecture independent. A board specific driver
+ * must be used to perform all the physical IO interactions.
+ *
+ * The controller supports different update modes:
+ * mode0+1 16 step gray (4bit)
+ * mode2+3 4 step gray (2bit)
+ * mode4+5 2 step gray (1bit)
+ * - mode4 is described as "without LUT"
+ * mode7 automatic selection of update mode
+ *
+ * The most interesting difference to the K1900 is the ability to do screen
+ * updates in an asynchronous fashion. Where the K1900 needs to wait for the
+ * current update to complete, the K1901 can process later updates already.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/firmware.h>
+#include <linux/gpio.h>
+#include <linux/pm_runtime.h>
+
+#include <video/auo_k190xfb.h>
+
+#include "auo_k190x.h"
+
+/*
+ * AUO-K1901 specific commands
+ */
+
+#define AUOK1901_CMD_LUT_INTERFACE     0x0005
+#define AUOK1901_CMD_DMA_START         0x1001
+#define AUOK1901_CMD_CURSOR_START      0x1007
+#define AUOK1901_CMD_CURSOR_STOP       AUOK190X_CMD_DATA_STOP
+#define AUOK1901_CMD_DDMA_START                0x1009
+
+#define AUOK1901_INIT_GATE_PULSE_LOW   (0 << 14)
+#define AUOK1901_INIT_GATE_PULSE_HIGH  (1 << 14)
+#define AUOK1901_INIT_SINGLE_GATE      (0 << 13)
+#define AUOK1901_INIT_DOUBLE_GATE      (1 << 13)
+
+/* Bits to pixels
+ *   Mode      15-12   11-8    7-4     3-0
+ *   format2   2       T       1       T
+ *   format3   1       T       2       T
+ *   format4   T       2       T       1
+ *   format5   T       1       T       2
+ *
+ *   halftone modes:
+ *   format6   2       2       1       1
+ *   format7   1       1       2       2
+ */
+#define AUOK1901_INIT_FORMAT2          (1 << 7)
+#define AUOK1901_INIT_FORMAT3          ((1 << 7) | (1 << 6))
+#define AUOK1901_INIT_FORMAT4          (1 << 8)
+#define AUOK1901_INIT_FORMAT5          ((1 << 8) | (1 << 6))
+#define AUOK1901_INIT_FORMAT6          ((1 << 8) | (1 << 7))
+#define AUOK1901_INIT_FORMAT7          ((1 << 8) | (1 << 7) | (1 << 6))
+
+/* res[4] to bit 10
+ * res[3-0] to bits 5-2
+ */
+#define AUOK1901_INIT_RESOLUTION(_res) (((_res & (1 << 4)) << 6) \
+                                        | ((_res & 0xf) << 2))
+
+/*
+ * portrait / landscape orientation in AUOK1901_CMD_DMA_START
+ */
+#define AUOK1901_DMA_ROTATE90(_rot)            ((_rot & 1) << 13)
+
+/*
+ * equivalent to 1 << 11, needs the ~ to have same rotation like K1900
+ */
+#define AUOK1901_DDMA_ROTATE180(_rot)          ((~_rot & 2) << 10)
+
+static void auok1901_init(struct auok190xfb_par *par)
+{
+       struct auok190x_board *board = par->board;
+       u16 init_param = 0;
+
+       init_param |= AUOK190X_INIT_INVERSE_WHITE;
+       init_param |= AUOK190X_INIT_FORMAT0;
+       init_param |= AUOK1901_INIT_RESOLUTION(par->resolution);
+       init_param |= AUOK190X_INIT_SHIFT_LEFT;
+
+       auok190x_send_cmdargs(par, AUOK190X_CMD_INIT, 1, &init_param);
+
+       /* let the controller finish */
+       board->wait_for_rdy(par);
+}
+
+static void auok1901_update_region(struct auok190xfb_par *par, int mode,
+                                               u16 y1, u16 y2)
+{
+       struct device *dev = par->info->device;
+       unsigned char *buf = (unsigned char *)par->info->screen_base;
+       int xres = par->info->var.xres;
+       u16 args[5];
+
+       pm_runtime_get_sync(dev);
+
+       mutex_lock(&(par->io_lock));
+
+       /* y1 and y2 must be a multiple of 2 so drop the lowest bit */
+       y1 &= 0xfffe;
+       y2 &= 0xfffe;
+
+       dev_dbg(dev, "update (x,y,w,h,mode)=(%d,%d,%d,%d,%d)\n",
+               1, y1+1, xres, y2-y1, mode);
+
+       /* K1901: first transfer the region data */
+       args[0] = AUOK1901_DMA_ROTATE90(par->rotation) | 1;
+       args[1] = y1 + 1;
+       args[2] = xres;
+       args[3] = y2 - y1;
+       buf += y1 * xres;
+       auok190x_send_cmdargs_pixels_nowait(par, AUOK1901_CMD_DMA_START, 4,
+                                           args, ((y2 - y1) * xres)/2,
+                                           (u16 *) buf);
+       auok190x_send_command_nowait(par, AUOK190X_CMD_DATA_STOP);
+
+       /* K1901: second tell the controller to update the region with mode */
+       args[0] = mode | AUOK1901_DDMA_ROTATE180(par->rotation);
+       args[1] = 1;
+       args[2] = y1 + 1;
+       args[3] = xres;
+       args[4] = y2 - y1;
+       auok190x_send_cmdargs_nowait(par, AUOK1901_CMD_DDMA_START, 5, args);
+
+       par->update_cnt++;
+
+       mutex_unlock(&(par->io_lock));
+
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_put_autosuspend(dev);
+}
+
+static void auok1901fb_dpy_update_pages(struct auok190xfb_par *par,
+                                               u16 y1, u16 y2)
+{
+       int mode;
+
+       if (par->update_mode < 0) {
+               mode = AUOK190X_UPDATE_MODE(1);
+               par->last_mode = -1;
+       } else {
+               mode = AUOK190X_UPDATE_MODE(par->update_mode);
+               par->last_mode = par->update_mode;
+       }
+
+       if (par->flash)
+               mode |= AUOK190X_UPDATE_NONFLASH;
+
+       auok1901_update_region(par, mode, y1, y2);
+}
+
+static void auok1901fb_dpy_update(struct auok190xfb_par *par)
+{
+       int mode;
+
+       /* When doing full updates, wait for the controller to be ready
+        * This will hopefully catch some hangs of the K1901
+        */
+       par->board->wait_for_rdy(par);
+
+       if (par->update_mode < 0) {
+               mode = AUOK190X_UPDATE_MODE(0);
+               par->last_mode = -1;
+       } else {
+               mode = AUOK190X_UPDATE_MODE(par->update_mode);
+               par->last_mode = par->update_mode;
+       }
+
+       if (par->flash)
+               mode |= AUOK190X_UPDATE_NONFLASH;
+
+       auok1901_update_region(par, mode, 0, par->info->var.yres);
+       par->update_cnt = 0;
+}
+
+static bool auok1901fb_need_refresh(struct auok190xfb_par *par)
+{
+       return (par->update_cnt > 10);
+}
+
+static int __devinit auok1901fb_probe(struct platform_device *pdev)
+{
+       struct auok190x_init_data init;
+       struct auok190x_board *board;
+
+       /* pick up board specific routines */
+       board = pdev->dev.platform_data;
+       if (!board)
+               return -EINVAL;
+
+       /* fill temporary init struct for common init */
+       init.id = "auo_k1901fb";
+       init.board = board;
+       init.update_partial = auok1901fb_dpy_update_pages;
+       init.update_all = auok1901fb_dpy_update;
+       init.need_refresh = auok1901fb_need_refresh;
+       init.init = auok1901_init;
+
+       return auok190x_common_probe(pdev, &init);
+}
+
+static int __devexit auok1901fb_remove(struct platform_device *pdev)
+{
+       return auok190x_common_remove(pdev);
+}
+
+static struct platform_driver auok1901fb_driver = {
+       .probe  = auok1901fb_probe,
+       .remove = __devexit_p(auok1901fb_remove),
+       .driver = {
+               .owner  = THIS_MODULE,
+               .name   = "auo_k1901fb",
+               .pm = &auok190x_pm,
+       },
+};
+module_platform_driver(auok1901fb_driver);
+
+MODULE_DESCRIPTION("framebuffer driver for the AUO-K1901 EPD controller");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/auo_k190x.c b/drivers/video/auo_k190x.c
new file mode 100644 (file)
index 0000000..77da6a2
--- /dev/null
@@ -0,0 +1,1046 @@
+/*
+ * Common code for AUO-K190X framebuffer drivers
+ *
+ * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/pm_runtime.h>
+#include <linux/fb.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/auo_k190xfb.h>
+
+#include "auo_k190x.h"
+
+struct panel_info {
+       int w;
+       int h;
+};
+
+/* table of panel specific parameters to be indexed into by the board drivers */
+static struct panel_info panel_table[] = {
+       /* standard 6" */
+       [AUOK190X_RESOLUTION_800_600] = {
+               .w = 800,
+               .h = 600,
+       },
+       /* standard 9" */
+       [AUOK190X_RESOLUTION_1024_768] = {
+               .w = 1024,
+               .h = 768,
+       },
+};
+
+/*
+ * private I80 interface to the board driver
+ */
+
+static void auok190x_issue_data(struct auok190xfb_par *par, u16 data)
+{
+       par->board->set_ctl(par, AUOK190X_I80_WR, 0);
+       par->board->set_hdb(par, data);
+       par->board->set_ctl(par, AUOK190X_I80_WR, 1);
+}
+
+static void auok190x_issue_cmd(struct auok190xfb_par *par, u16 data)
+{
+       par->board->set_ctl(par, AUOK190X_I80_DC, 0);
+       auok190x_issue_data(par, data);
+       par->board->set_ctl(par, AUOK190X_I80_DC, 1);
+}
+
+static int auok190x_issue_pixels(struct auok190xfb_par *par, int size,
+                                u16 *data)
+{
+       struct device *dev = par->info->device;
+       int i;
+       u16 tmp;
+
+       if (size & 3) {
+               dev_err(dev, "issue_pixels: size %d must be a multiple of 4\n",
+                       size);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < (size >> 1); i++) {
+               par->board->set_ctl(par, AUOK190X_I80_WR, 0);
+
+               /* simple reduction of 8bit staticgray to 4bit gray
+                * combines 4 * 4bit pixel values into a 16bit value
+                */
+               tmp  = (data[2*i] & 0xF0) >> 4;
+               tmp |= (data[2*i] & 0xF000) >> 8;
+               tmp |= (data[2*i+1] & 0xF0) << 4;
+               tmp |= (data[2*i+1] & 0xF000);
+
+               par->board->set_hdb(par, tmp);
+               par->board->set_ctl(par, AUOK190X_I80_WR, 1);
+       }
+
+       return 0;
+}
+
+static u16 auok190x_read_data(struct auok190xfb_par *par)
+{
+       u16 data;
+
+       par->board->set_ctl(par, AUOK190X_I80_OE, 0);
+       data = par->board->get_hdb(par);
+       par->board->set_ctl(par, AUOK190X_I80_OE, 1);
+
+       return data;
+}
+
+/*
+ * Command interface for the controller drivers
+ */
+
+void auok190x_send_command_nowait(struct auok190xfb_par *par, u16 data)
+{
+       par->board->set_ctl(par, AUOK190X_I80_CS, 0);
+       auok190x_issue_cmd(par, data);
+       par->board->set_ctl(par, AUOK190X_I80_CS, 1);
+}
+EXPORT_SYMBOL_GPL(auok190x_send_command_nowait);
+
+void auok190x_send_cmdargs_nowait(struct auok190xfb_par *par, u16 cmd,
+                                 int argc, u16 *argv)
+{
+       int i;
+
+       par->board->set_ctl(par, AUOK190X_I80_CS, 0);
+       auok190x_issue_cmd(par, cmd);
+
+       for (i = 0; i < argc; i++)
+               auok190x_issue_data(par, argv[i]);
+       par->board->set_ctl(par, AUOK190X_I80_CS, 1);
+}
+EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_nowait);
+
+int auok190x_send_command(struct auok190xfb_par *par, u16 data)
+{
+       int ret;
+
+       ret = par->board->wait_for_rdy(par);
+       if (ret)
+               return ret;
+
+       auok190x_send_command_nowait(par, data);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(auok190x_send_command);
+
+int auok190x_send_cmdargs(struct auok190xfb_par *par, u16 cmd,
+                          int argc, u16 *argv)
+{
+       int ret;
+
+       ret = par->board->wait_for_rdy(par);
+       if (ret)
+               return ret;
+
+       auok190x_send_cmdargs_nowait(par, cmd, argc, argv);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(auok190x_send_cmdargs);
+
+int auok190x_read_cmdargs(struct auok190xfb_par *par, u16 cmd,
+                          int argc, u16 *argv)
+{
+       int i, ret;
+
+       ret = par->board->wait_for_rdy(par);
+       if (ret)
+               return ret;
+
+       par->board->set_ctl(par, AUOK190X_I80_CS, 0);
+       auok190x_issue_cmd(par, cmd);
+
+       for (i = 0; i < argc; i++)
+               argv[i] = auok190x_read_data(par);
+       par->board->set_ctl(par, AUOK190X_I80_CS, 1);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(auok190x_read_cmdargs);
+
+void auok190x_send_cmdargs_pixels_nowait(struct auok190xfb_par *par, u16 cmd,
+                                 int argc, u16 *argv, int size, u16 *data)
+{
+       int i;
+
+       par->board->set_ctl(par, AUOK190X_I80_CS, 0);
+
+       auok190x_issue_cmd(par, cmd);
+
+       for (i = 0; i < argc; i++)
+               auok190x_issue_data(par, argv[i]);
+
+       auok190x_issue_pixels(par, size, data);
+
+       par->board->set_ctl(par, AUOK190X_I80_CS, 1);
+}
+EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_pixels_nowait);
+
+int auok190x_send_cmdargs_pixels(struct auok190xfb_par *par, u16 cmd,
+                                 int argc, u16 *argv, int size, u16 *data)
+{
+       int ret;
+
+       ret = par->board->wait_for_rdy(par);
+       if (ret)
+               return ret;
+
+       auok190x_send_cmdargs_pixels_nowait(par, cmd, argc, argv, size, data);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_pixels);
+
+/*
+ * fbdefio callbacks - common on both controllers.
+ */
+
+static void auok190xfb_dpy_first_io(struct fb_info *info)
+{
+       /* tell runtime-pm that we wish to use the device in a short time */
+       pm_runtime_get(info->device);
+}
+
+/* this is called back from the deferred io workqueue */
+static void auok190xfb_dpy_deferred_io(struct fb_info *info,
+                               struct list_head *pagelist)
+{
+       struct fb_deferred_io *fbdefio = info->fbdefio;
+       struct auok190xfb_par *par = info->par;
+       u16 yres = info->var.yres;
+       u16 xres = info->var.xres;
+       u16 y1 = 0, h = 0;
+       int prev_index = -1;
+       struct page *cur;
+       int h_inc;
+       int threshold;
+
+       if (!list_empty(pagelist))
+               /* the device resume should've been requested through first_io,
+                * if the resume did not finish until now, wait for it.
+                */
+               pm_runtime_barrier(info->device);
+       else
+               /* We reached this via the fsync or some other way.
+                * In either case the first_io function did not run,
+                * so we runtime_resume the device here synchronously.
+                */
+               pm_runtime_get_sync(info->device);
+
+       /* Do a full screen update every n updates to prevent
+        * excessive darkening of the Sipix display.
+        * If we do this, there is no need to walk the pages.
+        */
+       if (par->need_refresh(par)) {
+               par->update_all(par);
+               goto out;
+       }
+
+       /* height increment is fixed per page */
+       h_inc = DIV_ROUND_UP(PAGE_SIZE , xres);
+
+       /* calculate number of pages from pixel height */
+       threshold = par->consecutive_threshold / h_inc;
+       if (threshold < 1)
+               threshold = 1;
+
+       /* walk the written page list and swizzle the data */
+       list_for_each_entry(cur, &fbdefio->pagelist, lru) {
+               if (prev_index < 0) {
+                       /* just starting so assign first page */
+                       y1 = (cur->index << PAGE_SHIFT) / xres;
+                       h = h_inc;
+               } else if ((cur->index - prev_index) <= threshold) {
+                       /* page is within our threshold for single updates */
+                       h += h_inc * (cur->index - prev_index);
+               } else {
+                       /* page not consecutive, issue previous update first */
+                       par->update_partial(par, y1, y1 + h);
+
+                       /* start over with our non consecutive page */
+                       y1 = (cur->index << PAGE_SHIFT) / xres;
+                       h = h_inc;
+               }
+               prev_index = cur->index;
+       }
+
+       /* if we still have any pages to update we do so now */
+       if (h >= yres)
+               /* its a full screen update, just do it */
+               par->update_all(par);
+       else
+               par->update_partial(par, y1, min((u16) (y1 + h), yres));
+
+out:
+       pm_runtime_mark_last_busy(info->device);
+       pm_runtime_put_autosuspend(info->device);
+}
+
+/*
+ * framebuffer operations
+ */
+
+/*
+ * this is the slow path from userspace. they can seek and write to
+ * the fb. it's inefficient to do anything less than a full screen draw
+ */
+static ssize_t auok190xfb_write(struct fb_info *info, const char __user *buf,
+                               size_t count, loff_t *ppos)
+{
+       struct auok190xfb_par *par = info->par;
+       unsigned long p = *ppos;
+       void *dst;
+       int err = 0;
+       unsigned long total_size;
+
+       if (info->state != FBINFO_STATE_RUNNING)
+               return -EPERM;
+
+       total_size = info->fix.smem_len;
+
+       if (p > total_size)
+               return -EFBIG;
+
+       if (count > total_size) {
+               err = -EFBIG;
+               count = total_size;
+       }
+
+       if (count + p > total_size) {
+               if (!err)
+                       err = -ENOSPC;
+
+               count = total_size - p;
+       }
+
+       dst = (void *)(info->screen_base + p);
+
+       if (copy_from_user(dst, buf, count))
+               err = -EFAULT;
+
+       if  (!err)
+               *ppos += count;
+
+       par->update_all(par);
+
+       return (err) ? err : count;
+}
+
+static void auok190xfb_fillrect(struct fb_info *info,
+                                  const struct fb_fillrect *rect)
+{
+       struct auok190xfb_par *par = info->par;
+
+       sys_fillrect(info, rect);
+
+       par->update_all(par);
+}
+
+static void auok190xfb_copyarea(struct fb_info *info,
+                                  const struct fb_copyarea *area)
+{
+       struct auok190xfb_par *par = info->par;
+
+       sys_copyarea(info, area);
+
+       par->update_all(par);
+}
+
+static void auok190xfb_imageblit(struct fb_info *info,
+                               const struct fb_image *image)
+{
+       struct auok190xfb_par *par = info->par;
+
+       sys_imageblit(info, image);
+
+       par->update_all(par);
+}
+
+static int auok190xfb_check_var(struct fb_var_screeninfo *var,
+                                  struct fb_info *info)
+{
+       if (info->var.xres != var->xres || info->var.yres != var->yres ||
+           info->var.xres_virtual != var->xres_virtual ||
+           info->var.yres_virtual != var->yres_virtual) {
+               pr_info("%s: Resolution not supported: X%u x Y%u\n",
+                        __func__, var->xres, var->yres);
+               return -EINVAL;
+       }
+
+       /*
+        *  Memory limit
+        */
+
+       if ((info->fix.line_length * var->yres_virtual) > info->fix.smem_len) {
+               pr_info("%s: Memory Limit requested yres_virtual = %u\n",
+                        __func__, var->yres_virtual);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static struct fb_ops auok190xfb_ops = {
+       .owner          = THIS_MODULE,
+       .fb_read        = fb_sys_read,
+       .fb_write       = auok190xfb_write,
+       .fb_fillrect    = auok190xfb_fillrect,
+       .fb_copyarea    = auok190xfb_copyarea,
+       .fb_imageblit   = auok190xfb_imageblit,
+       .fb_check_var   = auok190xfb_check_var,
+};
+
+/*
+ * Controller-functions common to both K1900 and K1901
+ */
+
+static int auok190x_read_temperature(struct auok190xfb_par *par)
+{
+       struct device *dev = par->info->device;
+       u16 data[4];
+       int temp;
+
+       pm_runtime_get_sync(dev);
+
+       mutex_lock(&(par->io_lock));
+
+       auok190x_read_cmdargs(par, AUOK190X_CMD_READ_VERSION, 4, data);
+
+       mutex_unlock(&(par->io_lock));
+
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_put_autosuspend(dev);
+
+       /* sanitize and split of half-degrees for now */
+       temp = ((data[0] & AUOK190X_VERSION_TEMP_MASK) >> 1);
+
+       /* handle positive and negative temperatures */
+       if (temp >= 201)
+               return (255 - temp + 1) * (-1);
+       else
+               return temp;
+}
+
+static void auok190x_identify(struct auok190xfb_par *par)
+{
+       struct device *dev = par->info->device;
+       u16 data[4];
+
+       pm_runtime_get_sync(dev);
+
+       mutex_lock(&(par->io_lock));
+
+       auok190x_read_cmdargs(par, AUOK190X_CMD_READ_VERSION, 4, data);
+
+       mutex_unlock(&(par->io_lock));
+
+       par->epd_type = data[1] & AUOK190X_VERSION_TEMP_MASK;
+
+       par->panel_size_int = AUOK190X_VERSION_SIZE_INT(data[2]);
+       par->panel_size_float = AUOK190X_VERSION_SIZE_FLOAT(data[2]);
+       par->panel_model = AUOK190X_VERSION_MODEL(data[2]);
+
+       par->tcon_version = AUOK190X_VERSION_TCON(data[3]);
+       par->lut_version = AUOK190X_VERSION_LUT(data[3]);
+
+       dev_dbg(dev, "panel %d.%din, model 0x%x, EPD 0x%x TCON-rev 0x%x, LUT-rev 0x%x",
+               par->panel_size_int, par->panel_size_float, par->panel_model,
+               par->epd_type, par->tcon_version, par->lut_version);
+
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_put_autosuspend(dev);
+}
+
+/*
+ * Sysfs functions
+ */
+
+static ssize_t update_mode_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct fb_info *info = dev_get_drvdata(dev);
+       struct auok190xfb_par *par = info->par;
+
+       return sprintf(buf, "%d\n", par->update_mode);
+}
+
+static ssize_t update_mode_store(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t count)
+{
+       struct fb_info *info = dev_get_drvdata(dev);
+       struct auok190xfb_par *par = info->par;
+       int mode, ret;
+
+       ret = kstrtoint(buf, 10, &mode);
+       if (ret)
+               return ret;
+
+       par->update_mode = mode;
+
+       /* if we enter a better mode, do a full update */
+       if (par->last_mode > 1 && mode < par->last_mode)
+               par->update_all(par);
+
+       return count;
+}
+
+static ssize_t flash_show(struct device *dev, struct device_attribute *attr,
+                         char *buf)
+{
+       struct fb_info *info = dev_get_drvdata(dev);
+       struct auok190xfb_par *par = info->par;
+
+       return sprintf(buf, "%d\n", par->flash);
+}
+
+static ssize_t flash_store(struct device *dev, struct device_attribute *attr,
+                          const char *buf, size_t count)
+{
+       struct fb_info *info = dev_get_drvdata(dev);
+       struct auok190xfb_par *par = info->par;
+       int flash, ret;
+
+       ret = kstrtoint(buf, 10, &flash);
+       if (ret)
+               return ret;
+
+       if (flash > 0)
+               par->flash = 1;
+       else
+               par->flash = 0;
+
+       return count;
+}
+
+static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
+                        char *buf)
+{
+       struct fb_info *info = dev_get_drvdata(dev);
+       struct auok190xfb_par *par = info->par;
+       int temp;
+
+       temp = auok190x_read_temperature(par);
+       return sprintf(buf, "%d\n", temp);
+}
+
+static DEVICE_ATTR(update_mode, 0644, update_mode_show, update_mode_store);
+static DEVICE_ATTR(flash, 0644, flash_show, flash_store);
+static DEVICE_ATTR(temp, 0644, temp_show, NULL);
+
+static struct attribute *auok190x_attributes[] = {
+       &dev_attr_update_mode.attr,
+       &dev_attr_flash.attr,
+       &dev_attr_temp.attr,
+       NULL
+};
+
+static const struct attribute_group auok190x_attr_group = {
+       .attrs          = auok190x_attributes,
+};
+
+static int auok190x_power(struct auok190xfb_par *par, bool on)
+{
+       struct auok190x_board *board = par->board;
+       int ret;
+
+       if (on) {
+               /* We should maintain POWER up for at least 80ms before set
+                * RST_N and SLP_N to high (TCON spec 20100803_v35 p59)
+                */
+               ret = regulator_enable(par->regulator);
+               if (ret)
+                       return ret;
+
+               msleep(200);
+               gpio_set_value(board->gpio_nrst, 1);
+               gpio_set_value(board->gpio_nsleep, 1);
+               msleep(200);
+       } else {
+               regulator_disable(par->regulator);
+               gpio_set_value(board->gpio_nrst, 0);
+               gpio_set_value(board->gpio_nsleep, 0);
+       }
+
+       return 0;
+}
+
+/*
+ * Recovery - powercycle the controller
+ */
+
+static void auok190x_recover(struct auok190xfb_par *par)
+{
+       auok190x_power(par, 0);
+       msleep(100);
+       auok190x_power(par, 1);
+
+       par->init(par);
+
+       /* wait for init to complete */
+       par->board->wait_for_rdy(par);
+}
+
+/*
+ * Power-management
+ */
+
+#ifdef CONFIG_PM
+static int auok190x_runtime_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct fb_info *info = platform_get_drvdata(pdev);
+       struct auok190xfb_par *par = info->par;
+       struct auok190x_board *board = par->board;
+       u16 standby_param;
+
+       /* take and keep the lock until we are resumed, as the controller
+        * will never reach the non-busy state when in standby mode
+        */
+       mutex_lock(&(par->io_lock));
+
+       if (par->standby) {
+               dev_warn(dev, "already in standby, runtime-pm pairing mismatch\n");
+               mutex_unlock(&(par->io_lock));
+               return 0;
+       }
+
+       /* according to runtime_pm.txt runtime_suspend only means, that the
+        * device will not process data and will not communicate with the CPU
+        * As we hold the lock, this stays true even without standby
+        */
+       if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
+               dev_dbg(dev, "runtime suspend without standby\n");
+               goto finish;
+       } else if (board->quirks & AUOK190X_QUIRK_STANDBYPARAM) {
+               /* for some TCON versions STANDBY expects a parameter (0) but
+                * it seems the real tcon version has to be determined yet.
+                */
+               dev_dbg(dev, "runtime suspend with additional empty param\n");
+               standby_param = 0;
+               auok190x_send_cmdargs(par, AUOK190X_CMD_STANDBY, 1,
+                                     &standby_param);
+       } else {
+               dev_dbg(dev, "runtime suspend without param\n");
+               auok190x_send_command(par, AUOK190X_CMD_STANDBY);
+       }
+
+       msleep(64);
+
+finish:
+       par->standby = 1;
+
+       return 0;
+}
+
+static int auok190x_runtime_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct fb_info *info = platform_get_drvdata(pdev);
+       struct auok190xfb_par *par = info->par;
+       struct auok190x_board *board = par->board;
+
+       if (!par->standby) {
+               dev_warn(dev, "not in standby, runtime-pm pairing mismatch\n");
+               return 0;
+       }
+
+       if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
+               dev_dbg(dev, "runtime resume without standby\n");
+       } else {
+               /* when in standby, controller is always busy
+                * and only accepts the wakeup command
+                */
+               dev_dbg(dev, "runtime resume from standby\n");
+               auok190x_send_command_nowait(par, AUOK190X_CMD_WAKEUP);
+
+               msleep(160);
+
+               /* wait for the controller to be ready and release the lock */
+               board->wait_for_rdy(par);
+       }
+
+       par->standby = 0;
+
+       mutex_unlock(&(par->io_lock));
+
+       return 0;
+}
+
+static int auok190x_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct fb_info *info = platform_get_drvdata(pdev);
+       struct auok190xfb_par *par = info->par;
+       struct auok190x_board *board = par->board;
+       int ret;
+
+       dev_dbg(dev, "suspend\n");
+       if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
+               /* suspend via powering off the ic */
+               dev_dbg(dev, "suspend with broken standby\n");
+
+               auok190x_power(par, 0);
+       } else {
+               dev_dbg(dev, "suspend using sleep\n");
+
+               /* the sleep state can only be entered from the standby state.
+                * pm_runtime_get_noresume gets called before the suspend call.
+                * So the devices usage count is >0 but it is not necessarily
+                * active.
+                */
+               if (!pm_runtime_status_suspended(dev)) {
+                       ret = auok190x_runtime_suspend(dev);
+                       if (ret < 0) {
+                               dev_err(dev, "auok190x_runtime_suspend failed with %d\n",
+                                       ret);
+                               return ret;
+                       }
+                       par->manual_standby = 1;
+               }
+
+               gpio_direction_output(board->gpio_nsleep, 0);
+       }
+
+       msleep(100);
+
+       return 0;
+}
+
+static int auok190x_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct fb_info *info = platform_get_drvdata(pdev);
+       struct auok190xfb_par *par = info->par;
+       struct auok190x_board *board = par->board;
+
+       dev_dbg(dev, "resume\n");
+       if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
+               dev_dbg(dev, "resume with broken standby\n");
+
+               auok190x_power(par, 1);
+
+               par->init(par);
+       } else {
+               dev_dbg(dev, "resume from sleep\n");
+
+               /* device should be in runtime suspend when we were suspended
+                * and pm_runtime_put_sync gets called after this function.
+                * So there is no need to touch the standby mode here at all.
+                */
+               gpio_direction_output(board->gpio_nsleep, 1);
+               msleep(100);
+
+               /* an additional init call seems to be necessary after sleep */
+               auok190x_runtime_resume(dev);
+               par->init(par);
+
+               /* if we were runtime-suspended before, suspend again*/
+               if (!par->manual_standby)
+                       auok190x_runtime_suspend(dev);
+               else
+                       par->manual_standby = 0;
+       }
+
+       return 0;
+}
+#endif
+
+const struct dev_pm_ops auok190x_pm = {
+       SET_RUNTIME_PM_OPS(auok190x_runtime_suspend, auok190x_runtime_resume,
+                          NULL)
+       SET_SYSTEM_SLEEP_PM_OPS(auok190x_suspend, auok190x_resume)
+};
+EXPORT_SYMBOL_GPL(auok190x_pm);
+
+/*
+ * Common probe and remove code
+ */
+
+int __devinit auok190x_common_probe(struct platform_device *pdev,
+                                   struct auok190x_init_data *init)
+{
+       struct auok190x_board *board = init->board;
+       struct auok190xfb_par *par;
+       struct fb_info *info;
+       struct panel_info *panel;
+       int videomemorysize, ret;
+       unsigned char *videomemory;
+
+       /* check board contents */
+       if (!board->init || !board->cleanup || !board->wait_for_rdy
+           || !board->set_ctl || !board->set_hdb || !board->get_hdb
+           || !board->setup_irq)
+               return -EINVAL;
+
+       info = framebuffer_alloc(sizeof(struct auok190xfb_par), &pdev->dev);
+       if (!info)
+               return -ENOMEM;
+
+       par = info->par;
+       par->info = info;
+       par->board = board;
+       par->recover = auok190x_recover;
+       par->update_partial = init->update_partial;
+       par->update_all = init->update_all;
+       par->need_refresh = init->need_refresh;
+       par->init = init->init;
+
+       /* init update modes */
+       par->update_cnt = 0;
+       par->update_mode = -1;
+       par->last_mode = -1;
+       par->flash = 0;
+
+       par->regulator = regulator_get(info->device, "vdd");
+       if (IS_ERR(par->regulator)) {
+               ret = PTR_ERR(par->regulator);
+               dev_err(info->device, "Failed to get regulator: %d\n", ret);
+               goto err_reg;
+       }
+
+       ret = board->init(par);
+       if (ret) {
+               dev_err(info->device, "board init failed, %d\n", ret);
+               goto err_board;
+       }
+
+       ret = gpio_request(board->gpio_nsleep, "AUOK190x sleep");
+       if (ret) {
+               dev_err(info->device, "could not request sleep gpio, %d\n",
+                       ret);
+               goto err_gpio1;
+       }
+
+       ret = gpio_direction_output(board->gpio_nsleep, 0);
+       if (ret) {
+               dev_err(info->device, "could not set sleep gpio, %d\n", ret);
+               goto err_gpio2;
+       }
+
+       ret = gpio_request(board->gpio_nrst, "AUOK190x reset");
+       if (ret) {
+               dev_err(info->device, "could not request reset gpio, %d\n",
+                       ret);
+               goto err_gpio2;
+       }
+
+       ret = gpio_direction_output(board->gpio_nrst, 0);
+       if (ret) {
+               dev_err(info->device, "could not set reset gpio, %d\n", ret);
+               goto err_gpio3;
+       }
+
+       ret = auok190x_power(par, 1);
+       if (ret) {
+               dev_err(info->device, "could not power on the device, %d\n",
+                       ret);
+               goto err_gpio3;
+       }
+
+       mutex_init(&par->io_lock);
+
+       init_waitqueue_head(&par->waitq);
+
+       ret = par->board->setup_irq(par->info);
+       if (ret) {
+               dev_err(info->device, "could not setup ready-irq, %d\n", ret);
+               goto err_irq;
+       }
+
+       /* wait for init to complete */
+       par->board->wait_for_rdy(par);
+
+       /*
+        * From here on the controller can talk to us
+        */
+
+       /* initialise fix, var, resolution and rotation */
+
+       strlcpy(info->fix.id, init->id, 16);
+       info->fix.type = FB_TYPE_PACKED_PIXELS;
+       info->fix.visual = FB_VISUAL_STATIC_PSEUDOCOLOR;
+       info->fix.xpanstep = 0;
+       info->fix.ypanstep = 0;
+       info->fix.ywrapstep = 0;
+       info->fix.accel = FB_ACCEL_NONE;
+
+       info->var.bits_per_pixel = 8;
+       info->var.grayscale = 1;
+       info->var.red.length = 8;
+       info->var.green.length = 8;
+       info->var.blue.length = 8;
+
+       panel = &panel_table[board->resolution];
+
+       /* if 90 degree rotation, switch width and height */
+       if (board->rotation & 1) {
+               info->var.xres = panel->h;
+               info->var.yres = panel->w;
+               info->var.xres_virtual = panel->h;
+               info->var.yres_virtual = panel->w;
+               info->fix.line_length = panel->h;
+       } else {
+               info->var.xres = panel->w;
+               info->var.yres = panel->h;
+               info->var.xres_virtual = panel->w;
+               info->var.yres_virtual = panel->h;
+               info->fix.line_length = panel->w;
+       }
+
+       par->resolution = board->resolution;
+       par->rotation = board->rotation;
+
+       /* videomemory handling */
+
+       videomemorysize = roundup((panel->w * panel->h), PAGE_SIZE);
+       videomemory = vmalloc(videomemorysize);
+       if (!videomemory) {
+               ret = -ENOMEM;
+               goto err_irq;
+       }
+
+       memset(videomemory, 0, videomemorysize);
+       info->screen_base = (char *)videomemory;
+       info->fix.smem_len = videomemorysize;
+
+       info->flags = FBINFO_FLAG_DEFAULT | FBINFO_VIRTFB;
+       info->fbops = &auok190xfb_ops;
+
+       /* deferred io init */
+
+       info->fbdefio = devm_kzalloc(info->device,
+                                    sizeof(struct fb_deferred_io),
+                                    GFP_KERNEL);
+       if (!info->fbdefio) {
+               dev_err(info->device, "Failed to allocate memory\n");
+               ret = -ENOMEM;
+               goto err_defio;
+       }
+
+       dev_dbg(info->device, "targetting %d frames per second\n", board->fps);
+       info->fbdefio->delay = HZ / board->fps;
+       info->fbdefio->first_io = auok190xfb_dpy_first_io,
+       info->fbdefio->deferred_io = auok190xfb_dpy_deferred_io,
+       fb_deferred_io_init(info);
+
+       /* color map */
+
+       ret = fb_alloc_cmap(&info->cmap, 256, 0);
+       if (ret < 0) {
+               dev_err(info->device, "Failed to allocate colormap\n");
+               goto err_cmap;
+       }
+
+       /* controller init */
+
+       par->consecutive_threshold = 100;
+       par->init(par);
+       auok190x_identify(par);
+
+       platform_set_drvdata(pdev, info);
+
+       ret = register_framebuffer(info);
+       if (ret < 0)
+               goto err_regfb;
+
+       ret = sysfs_create_group(&info->device->kobj, &auok190x_attr_group);
+       if (ret)
+               goto err_sysfs;
+
+       dev_info(info->device, "fb%d: %dx%d using %dK of video memory\n",
+                info->node, info->var.xres, info->var.yres,
+                videomemorysize >> 10);
+
+       /* increase autosuspend_delay when we use alternative methods
+        * for runtime_pm
+        */
+       par->autosuspend_delay = (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN)
+                                       ? 1000 : 200;
+
+       pm_runtime_set_active(info->device);
+       pm_runtime_enable(info->device);
+       pm_runtime_set_autosuspend_delay(info->device, par->autosuspend_delay);
+       pm_runtime_use_autosuspend(info->device);
+
+       return 0;
+
+err_sysfs:
+       unregister_framebuffer(info);
+err_regfb:
+       fb_dealloc_cmap(&info->cmap);
+err_cmap:
+       fb_deferred_io_cleanup(info);
+       kfree(info->fbdefio);
+err_defio:
+       vfree((void *)info->screen_base);
+err_irq:
+       auok190x_power(par, 0);
+err_gpio3:
+       gpio_free(board->gpio_nrst);
+err_gpio2:
+       gpio_free(board->gpio_nsleep);
+err_gpio1:
+       board->cleanup(par);
+err_board:
+       regulator_put(par->regulator);
+err_reg:
+       framebuffer_release(info);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(auok190x_common_probe);
+
+int  __devexit auok190x_common_remove(struct platform_device *pdev)
+{
+       struct fb_info *info = platform_get_drvdata(pdev);
+       struct auok190xfb_par *par = info->par;
+       struct auok190x_board *board = par->board;
+
+       pm_runtime_disable(info->device);
+
+       sysfs_remove_group(&info->device->kobj, &auok190x_attr_group);
+
+       unregister_framebuffer(info);
+
+       fb_dealloc_cmap(&info->cmap);
+
+       fb_deferred_io_cleanup(info);
+       kfree(info->fbdefio);
+
+       vfree((void *)info->screen_base);
+
+       auok190x_power(par, 0);
+
+       gpio_free(board->gpio_nrst);
+       gpio_free(board->gpio_nsleep);
+
+       board->cleanup(par);
+
+       regulator_put(par->regulator);
+
+       framebuffer_release(info);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(auok190x_common_remove);
+
+MODULE_DESCRIPTION("Common code for AUO-K190X controllers");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/auo_k190x.h b/drivers/video/auo_k190x.h
new file mode 100644 (file)
index 0000000..e35af1f
--- /dev/null
@@ -0,0 +1,129 @@
+/*
+ * Private common definitions for AUO-K190X framebuffer drivers
+ *
+ * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * I80 interface specific defines
+ */
+
+#define AUOK190X_I80_CS                        0x01
+#define AUOK190X_I80_DC                        0x02
+#define AUOK190X_I80_WR                        0x03
+#define AUOK190X_I80_OE                        0x04
+
+/*
+ * AUOK190x commands, common to both controllers
+ */
+
+#define AUOK190X_CMD_INIT              0x0000
+#define AUOK190X_CMD_STANDBY           0x0001
+#define AUOK190X_CMD_WAKEUP            0x0002
+#define AUOK190X_CMD_TCON_RESET                0x0003
+#define AUOK190X_CMD_DATA_STOP         0x1002
+#define AUOK190X_CMD_LUT_START         0x1003
+#define AUOK190X_CMD_DISP_REFRESH      0x1004
+#define AUOK190X_CMD_DISP_RESET                0x1005
+#define AUOK190X_CMD_PRE_DISPLAY_START 0x100D
+#define AUOK190X_CMD_PRE_DISPLAY_STOP  0x100F
+#define AUOK190X_CMD_FLASH_W           0x2000
+#define AUOK190X_CMD_FLASH_E           0x2001
+#define AUOK190X_CMD_FLASH_STS         0x2002
+#define AUOK190X_CMD_FRAMERATE         0x3000
+#define AUOK190X_CMD_READ_VERSION      0x4000
+#define AUOK190X_CMD_READ_STATUS       0x4001
+#define AUOK190X_CMD_READ_LUT          0x4003
+#define AUOK190X_CMD_DRIVERTIMING      0x5000
+#define AUOK190X_CMD_LBALANCE          0x5001
+#define AUOK190X_CMD_AGINGMODE         0x6000
+#define AUOK190X_CMD_AGINGEXIT         0x6001
+
+/*
+ * Common settings for AUOK190X_CMD_INIT
+ */
+
+#define AUOK190X_INIT_DATA_FILTER      (0 << 12)
+#define AUOK190X_INIT_DATA_BYPASS      (1 << 12)
+#define AUOK190X_INIT_INVERSE_WHITE    (0 << 9)
+#define AUOK190X_INIT_INVERSE_BLACK    (1 << 9)
+#define AUOK190X_INIT_SCAN_DOWN                (0 << 1)
+#define AUOK190X_INIT_SCAN_UP          (1 << 1)
+#define AUOK190X_INIT_SHIFT_LEFT       (0 << 0)
+#define AUOK190X_INIT_SHIFT_RIGHT      (1 << 0)
+
+/* Common bits to pixels
+ *   Mode      15-12   11-8    7-4     3-0
+ *   format0   4       3       2       1
+ *   format1   3       4       1       2
+ */
+
+#define AUOK190X_INIT_FORMAT0          0
+#define AUOK190X_INIT_FORMAT1          (1 << 6)
+
+/*
+ * settings for AUOK190X_CMD_RESET
+ */
+
+#define AUOK190X_RESET_TCON            (0 << 0)
+#define AUOK190X_RESET_NORMAL          (1 << 0)
+#define AUOK190X_RESET_PON             (1 << 1)
+
+/*
+ * AUOK190X_CMD_VERSION
+ */
+
+#define AUOK190X_VERSION_TEMP_MASK             (0x1ff)
+#define AUOK190X_VERSION_EPD_MASK              (0xff)
+#define AUOK190X_VERSION_SIZE_INT(_val)                ((_val & 0xfc00) >> 10)
+#define AUOK190X_VERSION_SIZE_FLOAT(_val)      ((_val & 0x3c0) >> 6)
+#define AUOK190X_VERSION_MODEL(_val)           (_val & 0x3f)
+#define AUOK190X_VERSION_LUT(_val)             (_val & 0xff)
+#define AUOK190X_VERSION_TCON(_val)            ((_val & 0xff00) >> 8)
+
+/*
+ * update modes for CMD_PARTIALDISP on K1900 and CMD_DDMA on K1901
+ */
+
+#define AUOK190X_UPDATE_MODE(_res)             ((_res & 0x7) << 12)
+#define AUOK190X_UPDATE_NONFLASH               (1 << 15)
+
+/*
+ * track panel specific parameters for common init
+ */
+
+struct auok190x_init_data {
+       char *id;
+       struct auok190x_board *board;
+
+       void (*update_partial)(struct auok190xfb_par *par, u16 y1, u16 y2);
+       void (*update_all)(struct auok190xfb_par *par);
+       bool (*need_refresh)(struct auok190xfb_par *par);
+       void (*init)(struct auok190xfb_par *par);
+};
+
+
+extern void auok190x_send_command_nowait(struct auok190xfb_par *par, u16 data);
+extern int auok190x_send_command(struct auok190xfb_par *par, u16 data);
+extern void auok190x_send_cmdargs_nowait(struct auok190xfb_par *par, u16 cmd,
+                                        int argc, u16 *argv);
+extern int auok190x_send_cmdargs(struct auok190xfb_par *par, u16 cmd,
+                                 int argc, u16 *argv);
+extern void auok190x_send_cmdargs_pixels_nowait(struct auok190xfb_par *par,
+                                               u16 cmd, int argc, u16 *argv,
+                                               int size, u16 *data);
+extern int auok190x_send_cmdargs_pixels(struct auok190xfb_par *par, u16 cmd,
+                                       int argc, u16 *argv, int size,
+                                       u16 *data);
+extern int auok190x_read_cmdargs(struct auok190xfb_par *par, u16 cmd,
+                                 int argc, u16 *argv);
+
+extern int auok190x_common_probe(struct platform_device *pdev,
+                                struct auok190x_init_data *init);
+extern int auok190x_common_remove(struct platform_device *pdev);
+
+extern const struct dev_pm_ops auok190x_pm;
index af16884491edf45cf3105f79e340a6b912757541..fa2b03750316b175f36f72d6f7cf8cf8a69bc375 100644 (file)
@@ -184,6 +184,18 @@ config BACKLIGHT_GENERIC
          known as the Corgi backlight driver. If you have a Sharp Zaurus
          SL-C7xx, SL-Cxx00 or SL-6000x say y.
 
+config BACKLIGHT_LM3533
+       tristate "Backlight Driver for LM3533"
+       depends on BACKLIGHT_CLASS_DEVICE
+       depends on MFD_LM3533
+       help
+         Say Y to enable the backlight driver for National Semiconductor / TI
+         LM3533 Lighting Power chips.
+
+         The backlights can be controlled directly, through PWM input, or by
+         the ambient-light-sensor interface. The chip supports 256 brightness
+         levels.
+
 config BACKLIGHT_LOCOMO
        tristate "Sharp LOCOMO LCD/Backlight Driver"
        depends on SHARP_LOCOMO
index 36855ae887d6ae9f56a85a0b02b24a3afa271c83..a2ac9cfbaf6bf1869e1c2b020b597952d4a7633a 100644 (file)
@@ -21,6 +21,7 @@ obj-$(CONFIG_BACKLIGHT_EP93XX)        += ep93xx_bl.o
 obj-$(CONFIG_BACKLIGHT_GENERIC)        += generic_bl.o
 obj-$(CONFIG_BACKLIGHT_HP700)  += jornada720_bl.o
 obj-$(CONFIG_BACKLIGHT_HP680)  += hp680_bl.o
+obj-$(CONFIG_BACKLIGHT_LM3533) += lm3533_bl.o
 obj-$(CONFIG_BACKLIGHT_LOCOMO) += locomolcd.o
 obj-$(CONFIG_BACKLIGHT_LP855X) += lp855x_bl.o
 obj-$(CONFIG_BACKLIGHT_OMAP1)  += omap1_bl.o
index 4911ea7989c82553dd90558a8afc4637da8c539b..df5db99af23d7cfe711b6d155766ca362d89a7dd 100644 (file)
@@ -160,7 +160,7 @@ static ssize_t adp5520_store(struct device *dev, const char *buf,
        unsigned long val;
        int ret;
 
-       ret = strict_strtoul(buf, 10, &val);
+       ret = kstrtoul(buf, 10, &val);
        if (ret)
                return ret;
 
@@ -214,7 +214,7 @@ static ssize_t adp5520_bl_daylight_max_store(struct device *dev,
        struct adp5520_bl *data = dev_get_drvdata(dev);
        int ret;
 
-       ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
+       ret = kstrtoul(buf, 10, &data->cached_daylight_max);
        if (ret < 0)
                return ret;
 
index 550dbf0bb896f9b64083b6673a8da0359ef53912..77d1fdba597fb561037d865176f261989b908845 100644 (file)
@@ -222,7 +222,8 @@ static int __devinit adp8860_led_probe(struct i2c_client *client)
        struct led_info *cur_led;
        int ret, i;
 
-       led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
+       led = devm_kzalloc(&client->dev, sizeof(*led) * pdata->num_leds,
+                               GFP_KERNEL);
        if (led == NULL) {
                dev_err(&client->dev, "failed to alloc memory\n");
                return -ENOMEM;
@@ -236,7 +237,7 @@ static int __devinit adp8860_led_probe(struct i2c_client *client)
 
        if (ret) {
                dev_err(&client->dev, "failed to write\n");
-               goto err_free;
+               return ret;
        }
 
        for (i = 0; i < pdata->num_leds; ++i) {
@@ -291,9 +292,6 @@ static int __devinit adp8860_led_probe(struct i2c_client *client)
                cancel_work_sync(&led[i].work);
        }
 
- err_free:
-       kfree(led);
-
        return ret;
 }
 
@@ -309,7 +307,6 @@ static int __devexit adp8860_led_remove(struct i2c_client *client)
                cancel_work_sync(&data->led[i].work);
        }
 
-       kfree(data->led);
        return 0;
 }
 #else
@@ -451,7 +448,7 @@ static ssize_t adp8860_store(struct device *dev, const char *buf,
        unsigned long val;
        int ret;
 
-       ret = strict_strtoul(buf, 10, &val);
+       ret = kstrtoul(buf, 10, &val);
        if (ret)
                return ret;
 
@@ -501,7 +498,7 @@ static ssize_t adp8860_bl_l1_daylight_max_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
        struct adp8860_bl *data = dev_get_drvdata(dev);
-       int ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
+       int ret = kstrtoul(buf, 10, &data->cached_daylight_max);
        if (ret)
                return ret;
 
@@ -608,7 +605,7 @@ static ssize_t adp8860_bl_ambient_light_zone_store(struct device *dev,
        uint8_t reg_val;
        int ret;
 
-       ret = strict_strtoul(buf, 10, &val);
+       ret = kstrtoul(buf, 10, &val);
        if (ret)
                return ret;
 
@@ -675,13 +672,13 @@ static int __devinit adp8860_probe(struct i2c_client *client,
                return -EINVAL;
        }
 
-       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
        if (data == NULL)
                return -ENOMEM;
 
        ret = adp8860_read(client, ADP8860_MFDVID, &reg_val);
        if (ret < 0)
-               goto out2;
+               return ret;
 
        switch (ADP8860_MANID(reg_val)) {
        case ADP8863_MANUFID:
@@ -694,8 +691,7 @@ static int __devinit adp8860_probe(struct i2c_client *client,
                break;
        default:
                dev_err(&client->dev, "failed to probe\n");
-               ret = -ENODEV;
-               goto out2;
+               return -ENODEV;
        }
 
        /* It's confirmed that the DEVID field is actually a REVID */
@@ -717,8 +713,7 @@ static int __devinit adp8860_probe(struct i2c_client *client,
                        &client->dev, data, &adp8860_bl_ops, &props);
        if (IS_ERR(bl)) {
                dev_err(&client->dev, "failed to register backlight\n");
-               ret = PTR_ERR(bl);
-               goto out2;
+               return PTR_ERR(bl);
        }
 
        bl->props.brightness = ADP8860_MAX_BRIGHTNESS;
@@ -756,8 +751,6 @@ out:
                        &adp8860_bl_attr_group);
 out1:
        backlight_device_unregister(bl);
-out2:
-       kfree(data);
 
        return ret;
 }
@@ -776,7 +769,6 @@ static int __devexit adp8860_remove(struct i2c_client *client)
                        &adp8860_bl_attr_group);
 
        backlight_device_unregister(data->bl);
-       kfree(data);
 
        return 0;
 }
index 9be58c6f18f10d10c0f0ecff370c3d8e3cf0f3e3..edf7f91c8e612e7da24aa6d313cd67ff56cf9dbc 100644 (file)
@@ -244,8 +244,8 @@ static int __devinit adp8870_led_probe(struct i2c_client *client)
        struct led_info *cur_led;
        int ret, i;
 
-
-       led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
+       led = devm_kzalloc(&client->dev, pdata->num_leds * sizeof(*led),
+                               GFP_KERNEL);
        if (led == NULL) {
                dev_err(&client->dev, "failed to alloc memory\n");
                return -ENOMEM;
@@ -253,17 +253,17 @@ static int __devinit adp8870_led_probe(struct i2c_client *client)
 
        ret = adp8870_write(client, ADP8870_ISCLAW, pdata->led_fade_law);
        if (ret)
-               goto err_free;
+               return ret;
 
        ret = adp8870_write(client, ADP8870_ISCT1,
                        (pdata->led_on_time & 0x3) << 6);
        if (ret)
-               goto err_free;
+               return ret;
 
        ret = adp8870_write(client, ADP8870_ISCF,
                        FADE_VAL(pdata->led_fade_in, pdata->led_fade_out));
        if (ret)
-               goto err_free;
+               return ret;
 
        for (i = 0; i < pdata->num_leds; ++i) {
                cur_led = &pdata->leds[i];
@@ -317,9 +317,6 @@ static int __devinit adp8870_led_probe(struct i2c_client *client)
                cancel_work_sync(&led[i].work);
        }
 
- err_free:
-       kfree(led);
-
        return ret;
 }
 
@@ -335,7 +332,6 @@ static int __devexit adp8870_led_remove(struct i2c_client *client)
                cancel_work_sync(&data->led[i].work);
        }
 
-       kfree(data->led);
        return 0;
 }
 #else
@@ -572,7 +568,7 @@ static ssize_t adp8870_store(struct device *dev, const char *buf,
        unsigned long val;
        int ret;
 
-       ret = strict_strtoul(buf, 10, &val);
+       ret = kstrtoul(buf, 10, &val);
        if (ret)
                return ret;
 
@@ -652,7 +648,7 @@ static ssize_t adp8870_bl_l1_daylight_max_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
        struct adp8870_bl *data = dev_get_drvdata(dev);
-       int ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
+       int ret = kstrtoul(buf, 10, &data->cached_daylight_max);
        if (ret)
                return ret;
 
@@ -794,7 +790,7 @@ static ssize_t adp8870_bl_ambient_light_zone_store(struct device *dev,
        uint8_t reg_val;
        int ret;
 
-       ret = strict_strtoul(buf, 10, &val);
+       ret = kstrtoul(buf, 10, &val);
        if (ret)
                return ret;
 
@@ -874,7 +870,7 @@ static int __devinit adp8870_probe(struct i2c_client *client,
                return -ENODEV;
        }
 
-       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
        if (data == NULL)
                return -ENOMEM;
 
@@ -894,8 +890,7 @@ static int __devinit adp8870_probe(struct i2c_client *client,
                        &client->dev, data, &adp8870_bl_ops, &props);
        if (IS_ERR(bl)) {
                dev_err(&client->dev, "failed to register backlight\n");
-               ret = PTR_ERR(bl);
-               goto out2;
+               return PTR_ERR(bl);
        }
 
        data->bl = bl;
@@ -930,8 +925,6 @@ out:
                        &adp8870_bl_attr_group);
 out1:
        backlight_device_unregister(bl);
-out2:
-       kfree(data);
 
        return ret;
 }
@@ -950,7 +943,6 @@ static int __devexit adp8870_remove(struct i2c_client *client)
                        &adp8870_bl_attr_group);
 
        backlight_device_unregister(data->bl);
-       kfree(data);
 
        return 0;
 }
index 7bdadc790117c977e5b23727eb70dac4fac51309..3729238e70963d7e2d6d7e25ec79cf5a5f7288db 100644 (file)
@@ -482,7 +482,7 @@ static int __devinit ams369fg06_probe(struct spi_device *spi)
        struct backlight_device *bd = NULL;
        struct backlight_properties props;
 
-       lcd = kzalloc(sizeof(struct ams369fg06), GFP_KERNEL);
+       lcd = devm_kzalloc(&spi->dev, sizeof(struct ams369fg06), GFP_KERNEL);
        if (!lcd)
                return -ENOMEM;
 
@@ -492,7 +492,7 @@ static int __devinit ams369fg06_probe(struct spi_device *spi)
        ret = spi_setup(spi);
        if (ret < 0) {
                dev_err(&spi->dev, "spi setup failed.\n");
-               goto out_free_lcd;
+               return ret;
        }
 
        lcd->spi = spi;
@@ -501,15 +501,13 @@ static int __devinit ams369fg06_probe(struct spi_device *spi)
        lcd->lcd_pd = spi->dev.platform_data;
        if (!lcd->lcd_pd) {
                dev_err(&spi->dev, "platform data is NULL\n");
-               goto out_free_lcd;
+               return -EFAULT;
        }
 
        ld = lcd_device_register("ams369fg06", &spi->dev, lcd,
                &ams369fg06_lcd_ops);
-       if (IS_ERR(ld)) {
-               ret = PTR_ERR(ld);
-               goto out_free_lcd;
-       }
+       if (IS_ERR(ld))
+               return PTR_ERR(ld);
 
        lcd->ld = ld;
 
@@ -547,8 +545,6 @@ static int __devinit ams369fg06_probe(struct spi_device *spi)
 
 out_lcd_unregister:
        lcd_device_unregister(ld);
-out_free_lcd:
-       kfree(lcd);
        return ret;
 }
 
@@ -559,7 +555,6 @@ static int __devexit ams369fg06_remove(struct spi_device *spi)
        ams369fg06_power(lcd, FB_BLANK_POWERDOWN);
        backlight_device_unregister(lcd->bd);
        lcd_device_unregister(lcd->ld);
-       kfree(lcd);
 
        return 0;
 }
@@ -619,7 +614,6 @@ static void ams369fg06_shutdown(struct spi_device *spi)
 static struct spi_driver ams369fg06_driver = {
        .driver = {
                .name   = "ams369fg06",
-               .bus    = &spi_bus_type,
                .owner  = THIS_MODULE,
        },
        .probe          = ams369fg06_probe,
index a523b255e124cc3ff44e4b39d15c9ba4a6f5761a..9dc73ac3709a63b912ffce6c8eaf08067db321a4 100644 (file)
@@ -16,6 +16,8 @@
  *  get at the firmware code in order to figure out what it's actually doing.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -25,6 +27,7 @@
 #include <linux/pci.h>
 #include <linux/acpi.h>
 #include <linux/atomic.h>
+#include <linux/apple_bl.h>
 
 static struct backlight_device *apple_backlight_device;
 
@@ -39,8 +42,6 @@ struct hw_data {
 
 static const struct hw_data *hw_data;
 
-#define DRIVER "apple_backlight: "
-
 /* Module parameters. */
 static int debug;
 module_param_named(debug, debug, int, 0644);
@@ -60,8 +61,7 @@ static int intel_chipset_send_intensity(struct backlight_device *bd)
        int intensity = bd->props.brightness;
 
        if (debug)
-               printk(KERN_DEBUG DRIVER "setting brightness to %d\n",
-                      intensity);
+               pr_debug("setting brightness to %d\n", intensity);
 
        intel_chipset_set_brightness(intensity);
        return 0;
@@ -76,8 +76,7 @@ static int intel_chipset_get_intensity(struct backlight_device *bd)
        intensity = inb(0xb3) >> 4;
 
        if (debug)
-               printk(KERN_DEBUG DRIVER "read brightness of %d\n",
-                      intensity);
+               pr_debug("read brightness of %d\n", intensity);
 
        return intensity;
 }
@@ -107,8 +106,7 @@ static int nvidia_chipset_send_intensity(struct backlight_device *bd)
        int intensity = bd->props.brightness;
 
        if (debug)
-               printk(KERN_DEBUG DRIVER "setting brightness to %d\n",
-                      intensity);
+               pr_debug("setting brightness to %d\n", intensity);
 
        nvidia_chipset_set_brightness(intensity);
        return 0;
@@ -123,8 +121,7 @@ static int nvidia_chipset_get_intensity(struct backlight_device *bd)
        intensity = inb(0x52f) >> 4;
 
        if (debug)
-               printk(KERN_DEBUG DRIVER "read brightness of %d\n",
-                      intensity);
+               pr_debug("read brightness of %d\n", intensity);
 
        return intensity;
 }
@@ -149,7 +146,7 @@ static int __devinit apple_bl_add(struct acpi_device *dev)
        host = pci_get_bus_and_slot(0, 0);
 
        if (!host) {
-               printk(KERN_ERR DRIVER "unable to find PCI host\n");
+               pr_err("unable to find PCI host\n");
                return -ENODEV;
        }
 
@@ -161,7 +158,7 @@ static int __devinit apple_bl_add(struct acpi_device *dev)
        pci_dev_put(host);
 
        if (!hw_data) {
-               printk(KERN_ERR DRIVER "unknown hardware\n");
+               pr_err("unknown hardware\n");
                return -ENODEV;
        }
 
index bf5b1ece71605d701516f599af7ef1ce18751a11..297db2fa91f58e556873a114b812c214e650dca8 100644 (file)
@@ -5,6 +5,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/device.h>
@@ -123,7 +125,7 @@ static ssize_t backlight_store_power(struct device *dev,
        rc = -ENXIO;
        mutex_lock(&bd->ops_lock);
        if (bd->ops) {
-               pr_debug("backlight: set power to %lu\n", power);
+               pr_debug("set power to %lu\n", power);
                if (bd->props.power != power) {
                        bd->props.power = power;
                        backlight_update_status(bd);
@@ -161,8 +163,7 @@ static ssize_t backlight_store_brightness(struct device *dev,
                if (brightness > bd->props.max_brightness)
                        rc = -EINVAL;
                else {
-                       pr_debug("backlight: set brightness to %lu\n",
-                                brightness);
+                       pr_debug("set brightness to %lu\n", brightness);
                        bd->props.brightness = brightness;
                        backlight_update_status(bd);
                        rc = count;
@@ -378,8 +379,8 @@ static int __init backlight_class_init(void)
 {
        backlight_class = class_create(THIS_MODULE, "backlight");
        if (IS_ERR(backlight_class)) {
-               printk(KERN_WARNING "Unable to create backlight class; errno = %ld\n",
-                               PTR_ERR(backlight_class));
+               pr_warn("Unable to create backlight class; errno = %ld\n",
+                       PTR_ERR(backlight_class));
                return PTR_ERR(backlight_class);
        }
 
index 6dab13fe562ee8ed2a43fd42370b68eb81d76a15..23d732677ba177e6594818dbc98bf17fb5ada229 100644 (file)
@@ -544,7 +544,7 @@ static int __devinit corgi_lcd_probe(struct spi_device *spi)
                return -EINVAL;
        }
 
-       lcd = kzalloc(sizeof(struct corgi_lcd), GFP_KERNEL);
+       lcd = devm_kzalloc(&spi->dev, sizeof(struct corgi_lcd), GFP_KERNEL);
        if (!lcd) {
                dev_err(&spi->dev, "failed to allocate memory\n");
                return -ENOMEM;
@@ -554,10 +554,9 @@ static int __devinit corgi_lcd_probe(struct spi_device *spi)
 
        lcd->lcd_dev = lcd_device_register("corgi_lcd", &spi->dev,
                                        lcd, &corgi_lcd_ops);
-       if (IS_ERR(lcd->lcd_dev)) {
-               ret = PTR_ERR(lcd->lcd_dev);
-               goto err_free_lcd;
-       }
+       if (IS_ERR(lcd->lcd_dev))
+               return PTR_ERR(lcd->lcd_dev);
+
        lcd->power = FB_BLANK_POWERDOWN;
        lcd->mode = (pdata) ? pdata->init_mode : CORGI_LCD_MODE_VGA;
 
@@ -591,8 +590,6 @@ err_unregister_bl:
        backlight_device_unregister(lcd->bl_dev);
 err_unregister_lcd:
        lcd_device_unregister(lcd->lcd_dev);
-err_free_lcd:
-       kfree(lcd);
        return ret;
 }
 
@@ -613,7 +610,6 @@ static int __devexit corgi_lcd_remove(struct spi_device *spi)
 
        corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_POWERDOWN);
        lcd_device_unregister(lcd->lcd_dev);
-       kfree(lcd);
 
        return 0;
 }
index 22489eb5f3e0beb525c3965df1d1186c1ee914ca..37bae801e23bfa5e9c8085ef6bae47648730ac14 100644 (file)
@@ -27,6 +27,8 @@
  *   Alan Hourihane <alanh-at-tungstengraphics-dot-com>
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -180,14 +182,13 @@ static int cr_backlight_probe(struct platform_device *pdev)
        lpc_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
                                        CRVML_DEVICE_LPC, NULL);
        if (!lpc_dev) {
-               printk("INTEL CARILLO RANCH LPC not found.\n");
+               pr_err("INTEL CARILLO RANCH LPC not found.\n");
                return -ENODEV;
        }
 
        pci_read_config_byte(lpc_dev, CRVML_REG_GPIOEN, &dev_en);
        if (!(dev_en & CRVML_GPIOEN_BIT)) {
-               printk(KERN_ERR
-                      "Carillo Ranch GPIO device was not enabled.\n");
+               pr_err("Carillo Ranch GPIO device was not enabled.\n");
                pci_dev_put(lpc_dev);
                return -ENODEV;
        }
@@ -270,7 +271,7 @@ static int __init cr_backlight_init(void)
                return PTR_ERR(crp);
        }
 
-       printk("Carillo Ranch Backlight Driver Initialized.\n");
+       pr_info("Carillo Ranch Backlight Driver Initialized.\n");
 
        return 0;
 }
index 30e19681a30b452a9c06181b83d2f448c10c4db1..573c7ece0fde88f7b1990bd3989f17ccd8a86ed9 100644 (file)
@@ -136,6 +136,7 @@ static int da903x_backlight_probe(struct platform_device *pdev)
                da903x_write(data->da903x_dev, DA9034_WLED_CONTROL2,
                                DA9034_WLED_ISET(pdata->output_current));
 
+       memset(&props, 0, sizeof(props));
        props.type = BACKLIGHT_RAW;
        props.max_brightness = max_brightness;
        bl = backlight_device_register(pdev->name, data->da903x_dev, data,
index 9ce6170c186079414dcb62123100e7786b032721..8c660fcd250da09f446e07f9e0dfce3b5677a128 100644 (file)
@@ -9,6 +9,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -106,7 +108,7 @@ static int genericbl_probe(struct platform_device *pdev)
 
        generic_backlight_device = bd;
 
-       printk("Generic Backlight Driver Initialized.\n");
+       pr_info("Generic Backlight Driver Initialized.\n");
        return 0;
 }
 
@@ -120,7 +122,7 @@ static int genericbl_remove(struct platform_device *pdev)
 
        backlight_device_unregister(bd);
 
-       printk("Generic Backlight Driver Unloaded\n");
+       pr_info("Generic Backlight Driver Unloaded\n");
        return 0;
 }
 
index 5118a9f029aba5a872c231239d9f86c52738b73c..6c9399341bcf4aefcac58ffacae39f6b83ac5247 100644 (file)
@@ -220,7 +220,7 @@ int __devinit ili9320_probe_spi(struct spi_device *spi,
 
        /* allocate and initialse our state */
 
-       ili = kzalloc(sizeof(struct ili9320), GFP_KERNEL);
+       ili = devm_kzalloc(&spi->dev, sizeof(struct ili9320), GFP_KERNEL);
        if (ili == NULL) {
                dev_err(dev, "no memory for device\n");
                return -ENOMEM;
@@ -240,8 +240,7 @@ int __devinit ili9320_probe_spi(struct spi_device *spi,
        lcd = lcd_device_register("ili9320", dev, ili, &ili9320_ops);
        if (IS_ERR(lcd)) {
                dev_err(dev, "failed to register lcd device\n");
-               ret = PTR_ERR(lcd);
-               goto err_free;
+               return PTR_ERR(lcd);
        }
 
        ili->lcd = lcd;
@@ -259,9 +258,6 @@ int __devinit ili9320_probe_spi(struct spi_device *spi,
  err_unregister:
        lcd_device_unregister(lcd);
 
- err_free:
-       kfree(ili);
-
        return ret;
 }
 
@@ -272,7 +268,6 @@ int __devexit ili9320_remove(struct ili9320 *ili)
        ili9320_power(ili, FB_BLANK_POWERDOWN);
 
        lcd_device_unregister(ili->lcd);
-       kfree(ili);
 
        return 0;
 }
index 2f8af5d786abbb5971aed6cad8c153229a4b4b84..16f593b64427a606093f1c14dce6498ab791a5cb 100644 (file)
@@ -9,6 +9,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/backlight.h>
 #include <linux/device.h>
 #include <linux/fb.h>
@@ -38,7 +40,7 @@ static int jornada_bl_get_brightness(struct backlight_device *bd)
        ret = jornada_ssp_byte(GETBRIGHTNESS);
 
        if (jornada_ssp_byte(GETBRIGHTNESS) != TXDUMMY) {
-               printk(KERN_ERR "bl : get brightness timeout\n");
+               pr_err("get brightness timeout\n");
                jornada_ssp_end();
                return -ETIMEDOUT;
        } else /* exchange txdummy for value */
@@ -59,7 +61,7 @@ static int jornada_bl_update_status(struct backlight_device *bd)
        if ((bd->props.power != FB_BLANK_UNBLANK) || (bd->props.fb_blank != FB_BLANK_UNBLANK)) {
                ret = jornada_ssp_byte(BRIGHTNESSOFF);
                if (ret != TXDUMMY) {
-                       printk(KERN_INFO "bl : brightness off timeout\n");
+                       pr_info("brightness off timeout\n");
                        /* turn off backlight */
                        PPSR &= ~PPC_LDD1;
                        PPDR |= PPC_LDD1;
@@ -70,7 +72,7 @@ static int jornada_bl_update_status(struct backlight_device *bd)
 
                /* send command to our mcu */
                if (jornada_ssp_byte(SETBRIGHTNESS) != TXDUMMY) {
-                       printk(KERN_INFO "bl : failed to set brightness\n");
+                       pr_info("failed to set brightness\n");
                        ret = -ETIMEDOUT;
                        goto out;
                }
@@ -81,7 +83,7 @@ static int jornada_bl_update_status(struct backlight_device *bd)
                   but due to physical layout it is equal to 0, so we simply
                   invert the value (MAX VALUE - NEW VALUE). */
                if (jornada_ssp_byte(BL_MAX_BRIGHT - bd->props.brightness) != TXDUMMY) {
-                       printk(KERN_ERR "bl : set brightness failed\n");
+                       pr_err("set brightness failed\n");
                        ret = -ETIMEDOUT;
                }
 
@@ -113,7 +115,7 @@ static int jornada_bl_probe(struct platform_device *pdev)
 
        if (IS_ERR(bd)) {
                ret = PTR_ERR(bd);
-               printk(KERN_ERR "bl : failed to register device, err=%x\n", ret);
+               pr_err("failed to register device, err=%x\n", ret);
                return ret;
        }
 
@@ -125,7 +127,7 @@ static int jornada_bl_probe(struct platform_device *pdev)
        jornada_bl_update_status(bd);
 
        platform_set_drvdata(pdev, bd);
-       printk(KERN_INFO "HP Jornada 700 series backlight driver\n");
+       pr_info("HP Jornada 700 series backlight driver\n");
 
        return 0;
 }
index 22d231a17e3c4649652ec1992567111e5b70cd6c..635b30523fd598556f674ff31c8d46031475718f 100644 (file)
@@ -9,6 +9,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/device.h>
 #include <linux/fb.h>
 #include <linux/kernel.h>
@@ -44,7 +46,7 @@ static int jornada_lcd_get_contrast(struct lcd_device *dev)
        jornada_ssp_start();
 
        if (jornada_ssp_byte(GETCONTRAST) != TXDUMMY) {
-               printk(KERN_ERR "lcd: get contrast failed\n");
+               pr_err("get contrast failed\n");
                jornada_ssp_end();
                return -ETIMEDOUT;
        } else {
@@ -65,7 +67,7 @@ static int jornada_lcd_set_contrast(struct lcd_device *dev, int value)
 
        /* push the new value */
        if (jornada_ssp_byte(value) != TXDUMMY) {
-               printk(KERN_ERR "lcd : set contrast failed\n");
+               pr_err("set contrast failed\n");
                jornada_ssp_end();
                return -ETIMEDOUT;
        }
@@ -103,7 +105,7 @@ static int jornada_lcd_probe(struct platform_device *pdev)
 
        if (IS_ERR(lcd_device)) {
                ret = PTR_ERR(lcd_device);
-               printk(KERN_ERR "lcd : failed to register device\n");
+               pr_err("failed to register device\n");
                return ret;
        }
 
index 6022b67285ecd63e9bb799e082bb00a4e445ff52..40f606a860934a7bb21fab7fb60b457a390e1c2a 100644 (file)
@@ -11,6 +11,8 @@
  * published by the Free Software Foundation.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/device.h>
 #include <linux/kernel.h>
 #include <linux/delay.h>
@@ -159,7 +161,8 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
                return -EINVAL;
        }
 
-       priv = kzalloc(sizeof(struct l4f00242t03_priv), GFP_KERNEL);
+       priv = devm_kzalloc(&spi->dev, sizeof(struct l4f00242t03_priv),
+                               GFP_KERNEL);
 
        if (priv == NULL) {
                dev_err(&spi->dev, "No memory for this device.\n");
@@ -177,7 +180,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
        if (ret) {
                dev_err(&spi->dev,
                        "Unable to get the lcd l4f00242t03 reset gpio.\n");
-               goto err;
+               return ret;
        }
 
        ret = gpio_request_one(pdata->data_enable_gpio, GPIOF_OUT_INIT_LOW,
@@ -185,7 +188,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
        if (ret) {
                dev_err(&spi->dev,
                        "Unable to get the lcd l4f00242t03 data en gpio.\n");
-               goto err2;
+               goto err;
        }
 
        priv->io_reg = regulator_get(&spi->dev, "vdd");
@@ -193,7 +196,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
                ret = PTR_ERR(priv->io_reg);
                dev_err(&spi->dev, "%s: Unable to get the IO regulator\n",
                       __func__);
-               goto err3;
+               goto err2;
        }
 
        priv->core_reg = regulator_get(&spi->dev, "vcore");
@@ -201,14 +204,14 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
                ret = PTR_ERR(priv->core_reg);
                dev_err(&spi->dev, "%s: Unable to get the core regulator\n",
                       __func__);
-               goto err4;
+               goto err3;
        }
 
        priv->ld = lcd_device_register("l4f00242t03",
                                        &spi->dev, priv, &l4f_ops);
        if (IS_ERR(priv->ld)) {
                ret = PTR_ERR(priv->ld);
-               goto err5;
+               goto err4;
        }
 
        /* Init the LCD */
@@ -220,16 +223,14 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
 
        return 0;
 
-err5:
-       regulator_put(priv->core_reg);
 err4:
-       regulator_put(priv->io_reg);
+       regulator_put(priv->core_reg);
 err3:
-       gpio_free(pdata->data_enable_gpio);
+       regulator_put(priv->io_reg);
 err2:
-       gpio_free(pdata->reset_gpio);
+       gpio_free(pdata->data_enable_gpio);
 err:
-       kfree(priv);
+       gpio_free(pdata->reset_gpio);
 
        return ret;
 }
@@ -250,8 +251,6 @@ static int __devexit l4f00242t03_remove(struct spi_device *spi)
        regulator_put(priv->io_reg);
        regulator_put(priv->core_reg);
 
-       kfree(priv);
-
        return 0;
 }
 
index 79c1b0d609a809e189f43515ee20f648a7d2e8fa..a5d0d024bb92939ec9cdefbd6788febabebfb337 100644 (file)
@@ -5,6 +5,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/device.h>
@@ -32,6 +34,8 @@ static int fb_notifier_callback(struct notifier_block *self,
        case FB_EVENT_BLANK:
        case FB_EVENT_MODE_CHANGE:
        case FB_EVENT_MODE_CHANGE_ALL:
+       case FB_EARLY_EVENT_BLANK:
+       case FB_R_EARLY_EVENT_BLANK:
                break;
        default:
                return 0;
@@ -46,6 +50,14 @@ static int fb_notifier_callback(struct notifier_block *self,
                if (event == FB_EVENT_BLANK) {
                        if (ld->ops->set_power)
                                ld->ops->set_power(ld, *(int *)evdata->data);
+               } else if (event == FB_EARLY_EVENT_BLANK) {
+                       if (ld->ops->early_set_power)
+                               ld->ops->early_set_power(ld,
+                                               *(int *)evdata->data);
+               } else if (event == FB_R_EARLY_EVENT_BLANK) {
+                       if (ld->ops->r_early_set_power)
+                               ld->ops->r_early_set_power(ld,
+                                               *(int *)evdata->data);
                } else {
                        if (ld->ops->set_mode)
                                ld->ops->set_mode(ld, evdata->data);
@@ -106,7 +118,7 @@ static ssize_t lcd_store_power(struct device *dev,
 
        mutex_lock(&ld->ops_lock);
        if (ld->ops && ld->ops->set_power) {
-               pr_debug("lcd: set power to %lu\n", power);
+               pr_debug("set power to %lu\n", power);
                ld->ops->set_power(ld, power);
                rc = count;
        }
@@ -142,7 +154,7 @@ static ssize_t lcd_store_contrast(struct device *dev,
 
        mutex_lock(&ld->ops_lock);
        if (ld->ops && ld->ops->set_contrast) {
-               pr_debug("lcd: set contrast to %lu\n", contrast);
+               pr_debug("set contrast to %lu\n", contrast);
                ld->ops->set_contrast(ld, contrast);
                rc = count;
        }
@@ -253,8 +265,8 @@ static int __init lcd_class_init(void)
 {
        lcd_class = class_create(THIS_MODULE, "lcd");
        if (IS_ERR(lcd_class)) {
-               printk(KERN_WARNING "Unable to create backlight class; errno = %ld\n",
-                               PTR_ERR(lcd_class));
+               pr_warn("Unable to create backlight class; errno = %ld\n",
+                       PTR_ERR(lcd_class));
                return PTR_ERR(lcd_class);
        }
 
index efd352be21ae44a553cf8735392261d90552ff19..58f517fb7d40fa2ebd1035b5bf92371f0fd50b0c 100644 (file)
@@ -707,7 +707,7 @@ static int ld9040_probe(struct spi_device *spi)
        struct backlight_device *bd = NULL;
        struct backlight_properties props;
 
-       lcd = kzalloc(sizeof(struct ld9040), GFP_KERNEL);
+       lcd = devm_kzalloc(&spi->dev, sizeof(struct ld9040), GFP_KERNEL);
        if (!lcd)
                return -ENOMEM;
 
@@ -717,7 +717,7 @@ static int ld9040_probe(struct spi_device *spi)
        ret = spi_setup(spi);
        if (ret < 0) {
                dev_err(&spi->dev, "spi setup failed.\n");
-               goto out_free_lcd;
+               return ret;
        }
 
        lcd->spi = spi;
@@ -726,7 +726,7 @@ static int ld9040_probe(struct spi_device *spi)
        lcd->lcd_pd = spi->dev.platform_data;
        if (!lcd->lcd_pd) {
                dev_err(&spi->dev, "platform data is NULL.\n");
-               goto out_free_lcd;
+               return -EFAULT;
        }
 
        mutex_init(&lcd->lock);
@@ -734,13 +734,13 @@ static int ld9040_probe(struct spi_device *spi)
        ret = regulator_bulk_get(lcd->dev, ARRAY_SIZE(supplies), supplies);
        if (ret) {
                dev_err(lcd->dev, "Failed to get regulators: %d\n", ret);
-               goto out_free_lcd;
+               return ret;
        }
 
        ld = lcd_device_register("ld9040", &spi->dev, lcd, &ld9040_lcd_ops);
        if (IS_ERR(ld)) {
                ret = PTR_ERR(ld);
-               goto out_free_lcd;
+               goto out_free_regulator;
        }
 
        lcd->ld = ld;
@@ -782,10 +782,9 @@ static int ld9040_probe(struct spi_device *spi)
 
 out_unregister_lcd:
        lcd_device_unregister(lcd->ld);
-out_free_lcd:
+out_free_regulator:
        regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
 
-       kfree(lcd);
        return ret;
 }
 
@@ -797,7 +796,6 @@ static int __devexit ld9040_remove(struct spi_device *spi)
        backlight_device_unregister(lcd->bd);
        lcd_device_unregister(lcd->ld);
        regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
-       kfree(lcd);
 
        return 0;
 }
@@ -846,7 +844,6 @@ static void ld9040_shutdown(struct spi_device *spi)
 static struct spi_driver ld9040_driver = {
        .driver = {
                .name   = "ld9040",
-               .bus    = &spi_bus_type,
                .owner  = THIS_MODULE,
        },
        .probe          = ld9040_probe,
diff --git a/drivers/video/backlight/lm3533_bl.c b/drivers/video/backlight/lm3533_bl.c
new file mode 100644 (file)
index 0000000..bebeb63
--- /dev/null
@@ -0,0 +1,423 @@
+/*
+ * lm3533-bl.c -- LM3533 Backlight driver
+ *
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * Author: Johan Hovold <jhovold@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
+#include <linux/slab.h>
+
+#include <linux/mfd/lm3533.h>
+
+
+#define LM3533_HVCTRLBANK_COUNT                2
+#define LM3533_BL_MAX_BRIGHTNESS       255
+
+#define LM3533_REG_CTRLBANK_AB_BCONF   0x1a
+
+
+struct lm3533_bl {
+       struct lm3533 *lm3533;
+       struct lm3533_ctrlbank cb;
+       struct backlight_device *bd;
+       int id;
+};
+
+
+static inline int lm3533_bl_get_ctrlbank_id(struct lm3533_bl *bl)
+{
+       return bl->id;
+}
+
+static int lm3533_bl_update_status(struct backlight_device *bd)
+{
+       struct lm3533_bl *bl = bl_get_data(bd);
+       int brightness = bd->props.brightness;
+
+       if (bd->props.power != FB_BLANK_UNBLANK)
+               brightness = 0;
+       if (bd->props.fb_blank != FB_BLANK_UNBLANK)
+               brightness = 0;
+
+       return lm3533_ctrlbank_set_brightness(&bl->cb, (u8)brightness);
+}
+
+static int lm3533_bl_get_brightness(struct backlight_device *bd)
+{
+       struct lm3533_bl *bl = bl_get_data(bd);
+       u8 val;
+       int ret;
+
+       ret = lm3533_ctrlbank_get_brightness(&bl->cb, &val);
+       if (ret)
+               return ret;
+
+       return val;
+}
+
+static const struct backlight_ops lm3533_bl_ops = {
+       .get_brightness = lm3533_bl_get_brightness,
+       .update_status  = lm3533_bl_update_status,
+};
+
+static ssize_t show_id(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct lm3533_bl *bl = dev_get_drvdata(dev);
+
+       return scnprintf(buf, PAGE_SIZE, "%d\n", bl->id);
+}
+
+static ssize_t show_als_channel(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct lm3533_bl *bl = dev_get_drvdata(dev);
+       unsigned channel = lm3533_bl_get_ctrlbank_id(bl);
+
+       return scnprintf(buf, PAGE_SIZE, "%u\n", channel);
+}
+
+static ssize_t show_als_en(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct lm3533_bl *bl = dev_get_drvdata(dev);
+       int ctrlbank = lm3533_bl_get_ctrlbank_id(bl);
+       u8 val;
+       u8 mask;
+       bool enable;
+       int ret;
+
+       ret = lm3533_read(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, &val);
+       if (ret)
+               return ret;
+
+       mask = 1 << (2 * ctrlbank);
+       enable = val & mask;
+
+       return scnprintf(buf, PAGE_SIZE, "%d\n", enable);
+}
+
+static ssize_t store_als_en(struct device *dev,
+                                       struct device_attribute *attr,
+                                       const char *buf, size_t len)
+{
+       struct lm3533_bl *bl = dev_get_drvdata(dev);
+       int ctrlbank = lm3533_bl_get_ctrlbank_id(bl);
+       int enable;
+       u8 val;
+       u8 mask;
+       int ret;
+
+       if (kstrtoint(buf, 0, &enable))
+               return -EINVAL;
+
+       mask = 1 << (2 * ctrlbank);
+
+       if (enable)
+               val = mask;
+       else
+               val = 0;
+
+       ret = lm3533_update(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, val,
+                                                                       mask);
+       if (ret)
+               return ret;
+
+       return len;
+}
+
+static ssize_t show_linear(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct lm3533_bl *bl = dev_get_drvdata(dev);
+       u8 val;
+       u8 mask;
+       int linear;
+       int ret;
+
+       ret = lm3533_read(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, &val);
+       if (ret)
+               return ret;
+
+       mask = 1 << (2 * lm3533_bl_get_ctrlbank_id(bl) + 1);
+
+       if (val & mask)
+               linear = 1;
+       else
+               linear = 0;
+
+       return scnprintf(buf, PAGE_SIZE, "%x\n", linear);
+}
+
+static ssize_t store_linear(struct device *dev,
+                                       struct device_attribute *attr,
+                                       const char *buf, size_t len)
+{
+       struct lm3533_bl *bl = dev_get_drvdata(dev);
+       unsigned long linear;
+       u8 mask;
+       u8 val;
+       int ret;
+
+       if (kstrtoul(buf, 0, &linear))
+               return -EINVAL;
+
+       mask = 1 << (2 * lm3533_bl_get_ctrlbank_id(bl) + 1);
+
+       if (linear)
+               val = mask;
+       else
+               val = 0;
+
+       ret = lm3533_update(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, val,
+                                                                       mask);
+       if (ret)
+               return ret;
+
+       return len;
+}
+
+static ssize_t show_pwm(struct device *dev,
+                                       struct device_attribute *attr,
+                                       char *buf)
+{
+       struct lm3533_bl *bl = dev_get_drvdata(dev);
+       u8 val;
+       int ret;
+
+       ret = lm3533_ctrlbank_get_pwm(&bl->cb, &val);
+       if (ret)
+               return ret;
+
+       return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t store_pwm(struct device *dev,
+                                       struct device_attribute *attr,
+                                       const char *buf, size_t len)
+{
+       struct lm3533_bl *bl = dev_get_drvdata(dev);
+       u8 val;
+       int ret;
+
+       if (kstrtou8(buf, 0, &val))
+               return -EINVAL;
+
+       ret = lm3533_ctrlbank_set_pwm(&bl->cb, val);
+       if (ret)
+               return ret;
+
+       return len;
+}
+
+static LM3533_ATTR_RO(als_channel);
+static LM3533_ATTR_RW(als_en);
+static LM3533_ATTR_RO(id);
+static LM3533_ATTR_RW(linear);
+static LM3533_ATTR_RW(pwm);
+
+static struct attribute *lm3533_bl_attributes[] = {
+       &dev_attr_als_channel.attr,
+       &dev_attr_als_en.attr,
+       &dev_attr_id.attr,
+       &dev_attr_linear.attr,
+       &dev_attr_pwm.attr,
+       NULL,
+};
+
+static umode_t lm3533_bl_attr_is_visible(struct kobject *kobj,
+                                            struct attribute *attr, int n)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct lm3533_bl *bl = dev_get_drvdata(dev);
+       umode_t mode = attr->mode;
+
+       if (attr == &dev_attr_als_channel.attr ||
+                                       attr == &dev_attr_als_en.attr) {
+               if (!bl->lm3533->have_als)
+                       mode = 0;
+       }
+
+       return mode;
+};
+
+static struct attribute_group lm3533_bl_attribute_group = {
+       .is_visible     = lm3533_bl_attr_is_visible,
+       .attrs          = lm3533_bl_attributes
+};
+
+static int __devinit lm3533_bl_setup(struct lm3533_bl *bl,
+                                       struct lm3533_bl_platform_data *pdata)
+{
+       int ret;
+
+       ret = lm3533_ctrlbank_set_max_current(&bl->cb, pdata->max_current);
+       if (ret)
+               return ret;
+
+       return lm3533_ctrlbank_set_pwm(&bl->cb, pdata->pwm);
+}
+
+static int __devinit lm3533_bl_probe(struct platform_device *pdev)
+{
+       struct lm3533 *lm3533;
+       struct lm3533_bl_platform_data *pdata;
+       struct lm3533_bl *bl;
+       struct backlight_device *bd;
+       struct backlight_properties props;
+       int ret;
+
+       dev_dbg(&pdev->dev, "%s\n", __func__);
+
+       lm3533 = dev_get_drvdata(pdev->dev.parent);
+       if (!lm3533)
+               return -EINVAL;
+
+       pdata = pdev->dev.platform_data;
+       if (!pdata) {
+               dev_err(&pdev->dev, "no platform data\n");
+               return -EINVAL;
+       }
+
+       if (pdev->id < 0 || pdev->id >= LM3533_HVCTRLBANK_COUNT) {
+               dev_err(&pdev->dev, "illegal backlight id %d\n", pdev->id);
+               return -EINVAL;
+       }
+
+       bl = kzalloc(sizeof(*bl), GFP_KERNEL);
+       if (!bl) {
+               dev_err(&pdev->dev,
+                               "failed to allocate memory for backlight\n");
+               return -ENOMEM;
+       }
+
+       bl->lm3533 = lm3533;
+       bl->id = pdev->id;
+
+       bl->cb.lm3533 = lm3533;
+       bl->cb.id = lm3533_bl_get_ctrlbank_id(bl);
+       bl->cb.dev = NULL;                      /* until registered */
+
+       memset(&props, 0, sizeof(props));
+       props.type = BACKLIGHT_RAW;
+       props.max_brightness = LM3533_BL_MAX_BRIGHTNESS;
+       props.brightness = pdata->default_brightness;
+       bd = backlight_device_register(pdata->name, pdev->dev.parent, bl,
+                                               &lm3533_bl_ops, &props);
+       if (IS_ERR(bd)) {
+               dev_err(&pdev->dev, "failed to register backlight device\n");
+               ret = PTR_ERR(bd);
+               goto err_free;
+       }
+
+       bl->bd = bd;
+       bl->cb.dev = &bl->bd->dev;
+
+       platform_set_drvdata(pdev, bl);
+
+       ret = sysfs_create_group(&bd->dev.kobj, &lm3533_bl_attribute_group);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "failed to create sysfs attributes\n");
+               goto err_unregister;
+       }
+
+       backlight_update_status(bd);
+
+       ret = lm3533_bl_setup(bl, pdata);
+       if (ret)
+               goto err_sysfs_remove;
+
+       ret = lm3533_ctrlbank_enable(&bl->cb);
+       if (ret)
+               goto err_sysfs_remove;
+
+       return 0;
+
+err_sysfs_remove:
+       sysfs_remove_group(&bd->dev.kobj, &lm3533_bl_attribute_group);
+err_unregister:
+       backlight_device_unregister(bd);
+err_free:
+       kfree(bl);
+
+       return ret;
+}
+
+static int __devexit lm3533_bl_remove(struct platform_device *pdev)
+{
+       struct lm3533_bl *bl = platform_get_drvdata(pdev);
+       struct backlight_device *bd = bl->bd;
+
+       dev_dbg(&bd->dev, "%s\n", __func__);
+
+       bd->props.power = FB_BLANK_POWERDOWN;
+       bd->props.brightness = 0;
+
+       lm3533_ctrlbank_disable(&bl->cb);
+       sysfs_remove_group(&bd->dev.kobj, &lm3533_bl_attribute_group);
+       backlight_device_unregister(bd);
+       kfree(bl);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int lm3533_bl_suspend(struct platform_device *pdev, pm_message_t state)
+{
+       struct lm3533_bl *bl = platform_get_drvdata(pdev);
+
+       dev_dbg(&pdev->dev, "%s\n", __func__);
+
+       return lm3533_ctrlbank_disable(&bl->cb);
+}
+
+static int lm3533_bl_resume(struct platform_device *pdev)
+{
+       struct lm3533_bl *bl = platform_get_drvdata(pdev);
+
+       dev_dbg(&pdev->dev, "%s\n", __func__);
+
+       return lm3533_ctrlbank_enable(&bl->cb);
+}
+#else
+#define lm3533_bl_suspend      NULL
+#define lm3533_bl_resume       NULL
+#endif
+
+static void lm3533_bl_shutdown(struct platform_device *pdev)
+{
+       struct lm3533_bl *bl = platform_get_drvdata(pdev);
+
+       dev_dbg(&pdev->dev, "%s\n", __func__);
+
+       lm3533_ctrlbank_disable(&bl->cb);
+}
+
+static struct platform_driver lm3533_bl_driver = {
+       .driver = {
+               .name   = "lm3533-backlight",
+               .owner  = THIS_MODULE,
+       },
+       .probe          = lm3533_bl_probe,
+       .remove         = __devexit_p(lm3533_bl_remove),
+       .shutdown       = lm3533_bl_shutdown,
+       .suspend        = lm3533_bl_suspend,
+       .resume         = lm3533_bl_resume,
+};
+module_platform_driver(lm3533_bl_driver);
+
+MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>");
+MODULE_DESCRIPTION("LM3533 Backlight driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:lm3533-backlight");
index 4161f9e3982a2a544120142f100e45afa34b5d2c..a9f2c36966f1ff60fcf0a8873c1d01f93017c500 100644 (file)
@@ -168,7 +168,8 @@ static int __devinit lms283gf05_probe(struct spi_device *spi)
                        goto err;
        }
 
-       st = kzalloc(sizeof(struct lms283gf05_state), GFP_KERNEL);
+       st = devm_kzalloc(&spi->dev, sizeof(struct lms283gf05_state),
+                               GFP_KERNEL);
        if (st == NULL) {
                dev_err(&spi->dev, "No memory for device state\n");
                ret = -ENOMEM;
@@ -178,7 +179,7 @@ static int __devinit lms283gf05_probe(struct spi_device *spi)
        ld = lcd_device_register("lms283gf05", &spi->dev, st, &lms_ops);
        if (IS_ERR(ld)) {
                ret = PTR_ERR(ld);
-               goto err2;
+               goto err;
        }
 
        st->spi = spi;
@@ -193,8 +194,6 @@ static int __devinit lms283gf05_probe(struct spi_device *spi)
 
        return 0;
 
-err2:
-       kfree(st);
 err:
        if (pdata != NULL)
                gpio_free(pdata->reset_gpio);
@@ -212,8 +211,6 @@ static int __devexit lms283gf05_remove(struct spi_device *spi)
        if (pdata != NULL)
                gpio_free(pdata->reset_gpio);
 
-       kfree(st);
-
        return 0;
 }
 
index 333949ff3265200f716112be2afaa79e5c480e43..6c0f1ac0d32a93d9f278ad21155d2ca9d81b22d9 100644 (file)
@@ -232,23 +232,20 @@ static int __devinit ltv350qv_probe(struct spi_device *spi)
        struct lcd_device *ld;
        int ret;
 
-       lcd = kzalloc(sizeof(struct ltv350qv), GFP_KERNEL);
+       lcd = devm_kzalloc(&spi->dev, sizeof(struct ltv350qv), GFP_KERNEL);
        if (!lcd)
                return -ENOMEM;
 
        lcd->spi = spi;
        lcd->power = FB_BLANK_POWERDOWN;
-       lcd->buffer = kzalloc(8, GFP_KERNEL);
-       if (!lcd->buffer) {
-               ret = -ENOMEM;
-               goto out_free_lcd;
-       }
+       lcd->buffer = devm_kzalloc(&spi->dev, 8, GFP_KERNEL);
+       if (!lcd->buffer)
+               return -ENOMEM;
 
        ld = lcd_device_register("ltv350qv", &spi->dev, lcd, &ltv_ops);
-       if (IS_ERR(ld)) {
-               ret = PTR_ERR(ld);
-               goto out_free_buffer;
-       }
+       if (IS_ERR(ld))
+               return PTR_ERR(ld);
+
        lcd->ld = ld;
 
        ret = ltv350qv_power(lcd, FB_BLANK_UNBLANK);
@@ -261,10 +258,6 @@ static int __devinit ltv350qv_probe(struct spi_device *spi)
 
 out_unregister:
        lcd_device_unregister(ld);
-out_free_buffer:
-       kfree(lcd->buffer);
-out_free_lcd:
-       kfree(lcd);
        return ret;
 }
 
@@ -274,8 +267,6 @@ static int __devexit ltv350qv_remove(struct spi_device *spi)
 
        ltv350qv_power(lcd, FB_BLANK_POWERDOWN);
        lcd_device_unregister(lcd->ld);
-       kfree(lcd->buffer);
-       kfree(lcd);
 
        return 0;
 }
@@ -310,7 +301,6 @@ static void ltv350qv_shutdown(struct spi_device *spi)
 static struct spi_driver ltv350qv_driver = {
        .driver = {
                .name           = "ltv350qv",
-               .bus            = &spi_bus_type,
                .owner          = THIS_MODULE,
        },
 
index 0175bfb08a1ca13ef14412cd1e49d1bb033430d3..bfdc5fbeaa116aa7e919c191eac4dcac2f3985bc 100644 (file)
@@ -18,6 +18,8 @@
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -168,7 +170,7 @@ static int omapbl_probe(struct platform_device *pdev)
        dev->props.brightness = pdata->default_intensity;
        omapbl_update_status(dev);
 
-       printk(KERN_INFO "OMAP LCD backlight initialised\n");
+       pr_info("OMAP LCD backlight initialised\n");
 
        return 0;
 }
index c65853cb9740633ab9d1ff6b5ba7a659fac276e1..c092159f438344dab682d8a1410169b45524d2ad 100644 (file)
@@ -111,6 +111,7 @@ static int __devinit pcf50633_bl_probe(struct platform_device *pdev)
        if (!pcf_bl)
                return -ENOMEM;
 
+       memset(&bl_props, 0, sizeof(bl_props));
        bl_props.type = BACKLIGHT_RAW;
        bl_props.max_brightness = 0x3f;
        bl_props.power = FB_BLANK_UNBLANK;
index 6af183d6465ee80b4892c671fce687b85a6a272a..69b35f02929e470d6d384977b11b141ed1f08620 100644 (file)
@@ -15,6 +15,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -68,13 +70,13 @@ static int progearbl_probe(struct platform_device *pdev)
 
        pmu_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, NULL);
        if (!pmu_dev) {
-               printk("ALI M7101 PMU not found.\n");
+               pr_err("ALI M7101 PMU not found.\n");
                return -ENODEV;
        }
 
        sb_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
        if (!sb_dev) {
-               printk("ALI 1533 SB not found.\n");
+               pr_err("ALI 1533 SB not found.\n");
                ret = -ENODEV;
                goto put_pmu;
        }
index e264f55b257426bd342b9943ee80530af760ce68..6437ae474cf2be4d9b29a16a683c9de51a21fc61 100644 (file)
@@ -741,7 +741,7 @@ static int __devinit s6e63m0_probe(struct spi_device *spi)
        struct backlight_device *bd = NULL;
        struct backlight_properties props;
 
-       lcd = kzalloc(sizeof(struct s6e63m0), GFP_KERNEL);
+       lcd = devm_kzalloc(&spi->dev, sizeof(struct s6e63m0), GFP_KERNEL);
        if (!lcd)
                return -ENOMEM;
 
@@ -751,7 +751,7 @@ static int __devinit s6e63m0_probe(struct spi_device *spi)
        ret = spi_setup(spi);
        if (ret < 0) {
                dev_err(&spi->dev, "spi setup failed.\n");
-               goto out_free_lcd;
+               return ret;
        }
 
        lcd->spi = spi;
@@ -760,14 +760,12 @@ static int __devinit s6e63m0_probe(struct spi_device *spi)
        lcd->lcd_pd = (struct lcd_platform_data *)spi->dev.platform_data;
        if (!lcd->lcd_pd) {
                dev_err(&spi->dev, "platform data is NULL.\n");
-               goto out_free_lcd;
+               return -EFAULT;
        }
 
        ld = lcd_device_register("s6e63m0", &spi->dev, lcd, &s6e63m0_lcd_ops);
-       if (IS_ERR(ld)) {
-               ret = PTR_ERR(ld);
-               goto out_free_lcd;
-       }
+       if (IS_ERR(ld))
+               return PTR_ERR(ld);
 
        lcd->ld = ld;
 
@@ -824,8 +822,6 @@ static int __devinit s6e63m0_probe(struct spi_device *spi)
 
 out_lcd_unregister:
        lcd_device_unregister(ld);
-out_free_lcd:
-       kfree(lcd);
        return ret;
 }
 
@@ -838,7 +834,6 @@ static int __devexit s6e63m0_remove(struct spi_device *spi)
        device_remove_file(&spi->dev, &dev_attr_gamma_mode);
        backlight_device_unregister(lcd->bd);
        lcd_device_unregister(lcd->ld);
-       kfree(lcd);
 
        return 0;
 }
@@ -899,7 +894,6 @@ static void s6e63m0_shutdown(struct spi_device *spi)
 static struct spi_driver s6e63m0_driver = {
        .driver = {
                .name   = "s6e63m0",
-               .bus    = &spi_bus_type,
                .owner  = THIS_MODULE,
        },
        .probe          = s6e63m0_probe,
index 2368b8e5f89e99e6248e4f3eab7f7eda27f58e13..02444d042cd53db2972dc9978d11e456665c9ffc 100644 (file)
@@ -349,7 +349,7 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
        if (err)
                return err;
 
-       lcd = kzalloc(sizeof(struct tdo24m), GFP_KERNEL);
+       lcd = devm_kzalloc(&spi->dev, sizeof(struct tdo24m), GFP_KERNEL);
        if (!lcd)
                return -ENOMEM;
 
@@ -357,11 +357,9 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
        lcd->power = FB_BLANK_POWERDOWN;
        lcd->mode = MODE_VGA;   /* default to VGA */
 
-       lcd->buf = kmalloc(TDO24M_SPI_BUFF_SIZE, GFP_KERNEL);
-       if (lcd->buf == NULL) {
-               kfree(lcd);
+       lcd->buf = devm_kzalloc(&spi->dev, TDO24M_SPI_BUFF_SIZE, GFP_KERNEL);
+       if (lcd->buf == NULL)
                return -ENOMEM;
-       }
 
        m = &lcd->msg;
        x = &lcd->xfer;
@@ -383,15 +381,13 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
                break;
        default:
                dev_err(&spi->dev, "Unsupported model");
-               goto out_free;
+               return -EINVAL;
        }
 
        lcd->lcd_dev = lcd_device_register("tdo24m", &spi->dev,
                                        lcd, &tdo24m_ops);
-       if (IS_ERR(lcd->lcd_dev)) {
-               err = PTR_ERR(lcd->lcd_dev);
-               goto out_free;
-       }
+       if (IS_ERR(lcd->lcd_dev))
+               return PTR_ERR(lcd->lcd_dev);
 
        dev_set_drvdata(&spi->dev, lcd);
        err = tdo24m_power(lcd, FB_BLANK_UNBLANK);
@@ -402,9 +398,6 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
 
 out_unregister:
        lcd_device_unregister(lcd->lcd_dev);
-out_free:
-       kfree(lcd->buf);
-       kfree(lcd);
        return err;
 }
 
@@ -414,8 +407,6 @@ static int __devexit tdo24m_remove(struct spi_device *spi)
 
        tdo24m_power(lcd, FB_BLANK_POWERDOWN);
        lcd_device_unregister(lcd->lcd_dev);
-       kfree(lcd->buf);
-       kfree(lcd);
 
        return 0;
 }
index 2b241abced43467378ceb3ea68e9633f78644684..0d54e607e82d1bd3f86196944d425a8dee40fdb2 100644 (file)
@@ -82,8 +82,11 @@ static int __devinit tosa_bl_probe(struct i2c_client *client,
                const struct i2c_device_id *id)
 {
        struct backlight_properties props;
-       struct tosa_bl_data *data = kzalloc(sizeof(struct tosa_bl_data), GFP_KERNEL);
+       struct tosa_bl_data *data;
        int ret = 0;
+
+       data = devm_kzalloc(&client->dev, sizeof(struct tosa_bl_data),
+                               GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
@@ -92,7 +95,7 @@ static int __devinit tosa_bl_probe(struct i2c_client *client,
        ret = gpio_request(TOSA_GPIO_BL_C20MA, "backlight");
        if (ret) {
                dev_dbg(&data->bl->dev, "Unable to request gpio!\n");
-               goto err_gpio_bl;
+               return ret;
        }
        ret = gpio_direction_output(TOSA_GPIO_BL_C20MA, 0);
        if (ret)
@@ -122,8 +125,6 @@ err_reg:
        data->bl = NULL;
 err_gpio_dir:
        gpio_free(TOSA_GPIO_BL_C20MA);
-err_gpio_bl:
-       kfree(data);
        return ret;
 }
 
@@ -136,8 +137,6 @@ static int __devexit tosa_bl_remove(struct i2c_client *client)
 
        gpio_free(TOSA_GPIO_BL_C20MA);
 
-       kfree(data);
-
        return 0;
 }
 
index 2231aec23918fec6c1f65125421d1d35c229c759..47823b8efff060b77f1ef4db5bf19554b08e73c7 100644 (file)
@@ -174,7 +174,8 @@ static int __devinit tosa_lcd_probe(struct spi_device *spi)
        int ret;
        struct tosa_lcd_data *data;
 
-       data = kzalloc(sizeof(struct tosa_lcd_data), GFP_KERNEL);
+       data = devm_kzalloc(&spi->dev, sizeof(struct tosa_lcd_data),
+                               GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
@@ -187,7 +188,7 @@ static int __devinit tosa_lcd_probe(struct spi_device *spi)
 
        ret = spi_setup(spi);
        if (ret < 0)
-               goto err_spi;
+               return ret;
 
        data->spi = spi;
        dev_set_drvdata(&spi->dev, data);
@@ -224,8 +225,6 @@ err_gpio_dir:
        gpio_free(TOSA_GPIO_TG_ON);
 err_gpio_tg:
        dev_set_drvdata(&spi->dev, NULL);
-err_spi:
-       kfree(data);
        return ret;
 }
 
@@ -242,7 +241,6 @@ static int __devexit tosa_lcd_remove(struct spi_device *spi)
 
        gpio_free(TOSA_GPIO_TG_ON);
        dev_set_drvdata(&spi->dev, NULL);
-       kfree(data);
 
        return 0;
 }
index 5d365deb5f8220a31a4e2e0562dbf861d9270f06..9e5517a3a52baeea6546e0d24dc4a8ca2e5ef732 100644 (file)
@@ -194,6 +194,7 @@ static int wm831x_backlight_probe(struct platform_device *pdev)
        data->current_brightness = 0;
        data->isink_reg = isink_reg;
 
+       memset(&props, 0, sizeof(props));
        props.type = BACKLIGHT_RAW;
        props.max_brightness = max_isel;
        bl = backlight_device_register("wm831x", &pdev->dev, data,
index 1a268a294478d3de76613efd6581e542a296acc5..33ea874c87d236e34691a386b0fb8ea4cdd6c607 100644 (file)
@@ -414,14 +414,14 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
                if (ret) {
                        dev_err(&client->dev, "PPI0_FS3 GPIO request failed\n");
                        ret = -EBUSY;
-                       goto out_8;
+                       goto free_fbdev;
                }
        }
 
        if (peripheral_request_list(ppi_pins, DRIVER_NAME)) {
                dev_err(&client->dev, "requesting PPI peripheral failed\n");
                ret = -EFAULT;
-               goto out_8;
+               goto free_gpio;
        }
 
        fbdev->fb_mem =
@@ -432,7 +432,7 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
                dev_err(&client->dev, "couldn't allocate dma buffer (%d bytes)\n",
                       (u32) fbdev->fb_len);
                ret = -ENOMEM;
-               goto out_7;
+               goto free_ppi_pins;
        }
 
        fbdev->info.screen_base = (void *)fbdev->fb_mem;
@@ -464,27 +464,27 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
        if (!fbdev->info.pseudo_palette) {
                dev_err(&client->dev, "failed to allocate pseudo_palette\n");
                ret = -ENOMEM;
-               goto out_6;
+               goto free_fb_mem;
        }
 
        if (fb_alloc_cmap(&fbdev->info.cmap, BFIN_LCD_NBR_PALETTE_ENTRIES, 0) < 0) {
                dev_err(&client->dev, "failed to allocate colormap (%d entries)\n",
                           BFIN_LCD_NBR_PALETTE_ENTRIES);
                ret = -EFAULT;
-               goto out_5;
+               goto free_palette;
        }
 
        if (request_dma(CH_PPI, "BF5xx_PPI_DMA") < 0) {
                dev_err(&client->dev, "unable to request PPI DMA\n");
                ret = -EFAULT;
-               goto out_4;
+               goto free_cmap;
        }
 
        if (request_irq(IRQ_PPI_ERROR, ppi_irq_error, 0,
                        "PPI ERROR", fbdev) < 0) {
                dev_err(&client->dev, "unable to request PPI ERROR IRQ\n");
                ret = -EFAULT;
-               goto out_3;
+               goto free_ch_ppi;
        }
 
        fbdev->open = 0;
@@ -494,14 +494,14 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
 
        if (ret) {
                dev_err(&client->dev, "i2c attach: init error\n");
-               goto out_1;
+               goto free_irq_ppi;
        }
 
 
        if (register_framebuffer(&fbdev->info) < 0) {
                dev_err(&client->dev, "unable to register framebuffer\n");
                ret = -EFAULT;
-               goto out_1;
+               goto free_irq_ppi;
        }
 
        dev_info(&client->dev, "fb%d: %s frame buffer device\n",
@@ -512,7 +512,7 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
        if (!entry) {
                dev_err(&client->dev, "unable to create /proc entry\n");
                ret = -EFAULT;
-               goto out_0;
+               goto free_fb;
        }
 
        entry->read_proc = adv7393_read_proc;
@@ -521,22 +521,25 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
 
        return 0;
 
- out_0:
+free_fb:
        unregister_framebuffer(&fbdev->info);
- out_1:
+free_irq_ppi:
        free_irq(IRQ_PPI_ERROR, fbdev);
- out_3:
+free_ch_ppi:
        free_dma(CH_PPI);
- out_4:
-       dma_free_coherent(NULL, fbdev->fb_len, fbdev->fb_mem,
-                         fbdev->dma_handle);
- out_5:
+free_cmap:
        fb_dealloc_cmap(&fbdev->info.cmap);
- out_6:
+free_palette:
        kfree(fbdev->info.pseudo_palette);
- out_7:
+free_fb_mem:
+       dma_free_coherent(NULL, fbdev->fb_len, fbdev->fb_mem,
+                         fbdev->dma_handle);
+free_ppi_pins:
        peripheral_free_list(ppi_pins);
- out_8:
+free_gpio:
+       if (ANOMALY_05000400)
+               gpio_free(P_IDENT(P_PPI0_FS3));
+free_fbdev:
        kfree(fbdev);
 
        return ret;
index f56699d8122a381047bdcb345d44e1cabbb4414f..eae46f6457e2a59788c1fbd46995cab8ea6739c4 100644 (file)
@@ -1,7 +1,8 @@
 /*
- *  Cobalt server LCD frame buffer driver.
+ *  Cobalt/SEAD3 LCD frame buffer driver.
  *
  *  Copyright (C) 2008  Yoichi Yuasa <yuasa@linux-mips.org>
+ *  Copyright (C) 2012  MIPS Technologies, Inc.
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -62,6 +63,7 @@
 #define LCD_CUR_POS(x)         ((x) & LCD_CUR_POS_MASK)
 #define LCD_TEXT_POS(x)                ((x) | LCD_TEXT_MODE)
 
+#ifdef CONFIG_MIPS_COBALT
 static inline void lcd_write_control(struct fb_info *info, u8 control)
 {
        writel((u32)control << 24, info->screen_base);
@@ -81,6 +83,47 @@ static inline u8 lcd_read_data(struct fb_info *info)
 {
        return readl(info->screen_base + LCD_DATA_REG_OFFSET) >> 24;
 }
+#else
+
+#define LCD_CTL                        0x00
+#define LCD_DATA               0x08
+#define CPLD_STATUS            0x10
+#define CPLD_DATA              0x18
+
+static inline void cpld_wait(struct fb_info *info)
+{
+       do {
+       } while (readl(info->screen_base + CPLD_STATUS) & 1);
+}
+
+static inline void lcd_write_control(struct fb_info *info, u8 control)
+{
+       cpld_wait(info);
+       writel(control, info->screen_base + LCD_CTL);
+}
+
+static inline u8 lcd_read_control(struct fb_info *info)
+{
+       cpld_wait(info);
+       readl(info->screen_base + LCD_CTL);
+       cpld_wait(info);
+       return readl(info->screen_base + CPLD_DATA) & 0xff;
+}
+
+static inline void lcd_write_data(struct fb_info *info, u8 data)
+{
+       cpld_wait(info);
+       writel(data, info->screen_base + LCD_DATA);
+}
+
+static inline u8 lcd_read_data(struct fb_info *info)
+{
+       cpld_wait(info);
+       readl(info->screen_base + LCD_DATA);
+       cpld_wait(info);
+       return readl(info->screen_base + CPLD_DATA) & 0xff;
+}
+#endif
 
 static int lcd_busy_wait(struct fb_info *info)
 {
index f8babbeee27543e9149af7319f42c60ef4808d3b..345d96230978ed33d8d4e2a860d66273f60c35a5 100644 (file)
@@ -507,16 +507,16 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev)
 
        err = fb_alloc_cmap(&info->cmap, 256, 0);
        if (err)
-               goto failed;
+               goto failed_cmap;
 
        err = ep93xxfb_alloc_videomem(info);
        if (err)
-               goto failed;
+               goto failed_videomem;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
                err = -ENXIO;
-               goto failed;
+               goto failed_resource;
        }
 
        /*
@@ -532,7 +532,7 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev)
        fbi->mmio_base = ioremap(res->start, resource_size(res));
        if (!fbi->mmio_base) {
                err = -ENXIO;
-               goto failed;
+               goto failed_resource;
        }
 
        strcpy(info->fix.id, pdev->name);
@@ -553,24 +553,24 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev)
        if (err == 0) {
                dev_err(info->dev, "No suitable video mode found\n");
                err = -EINVAL;
-               goto failed;
+               goto failed_mode;
        }
 
        if (mach_info->setup) {
                err = mach_info->setup(pdev);
                if (err)
-                       return err;
+                       goto failed_mode;
        }
 
        err = ep93xxfb_check_var(&info->var, info);
        if (err)
-               goto failed;
+               goto failed_check;
 
        fbi->clk = clk_get(info->dev, NULL);
        if (IS_ERR(fbi->clk)) {
                err = PTR_ERR(fbi->clk);
                fbi->clk = NULL;
-               goto failed;
+               goto failed_check;
        }
 
        ep93xxfb_set_par(info);
@@ -585,15 +585,17 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev)
        return 0;
 
 failed:
-       if (fbi->clk)
-               clk_put(fbi->clk);
-       if (fbi->mmio_base)
-               iounmap(fbi->mmio_base);
-       ep93xxfb_dealloc_videomem(info);
-       if (&info->cmap)
-               fb_dealloc_cmap(&info->cmap);
+       clk_put(fbi->clk);
+failed_check:
        if (fbi->mach_info->teardown)
                fbi->mach_info->teardown(pdev);
+failed_mode:
+       iounmap(fbi->mmio_base);
+failed_resource:
+       ep93xxfb_dealloc_videomem(info);
+failed_videomem:
+       fb_dealloc_cmap(&info->cmap);
+failed_cmap:
        kfree(info);
        platform_set_drvdata(pdev, NULL);
 
index 2a4481cf260cc64f4fe19cbc50d674d5539df221..a36b2d28280edfb14c90c9491322fcd00e4eb9e5 100644 (file)
 
 #include <video/exynos_dp.h>
 
-#include <plat/cpu.h>
-
 #include "exynos_dp_core.h"
 
 static int exynos_dp_init_dp(struct exynos_dp_device *dp)
 {
        exynos_dp_reset(dp);
 
+       exynos_dp_swreset(dp);
+
        /* SW defined function Normal operation */
        exynos_dp_enable_sw_function(dp);
 
@@ -478,7 +478,7 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
        int lane_count;
        u8 buf[5];
 
-       u8 *adjust_request;
+       u8 adjust_request[2];
        u8 voltage_swing;
        u8 pre_emphasis;
        u8 training_lane;
@@ -493,8 +493,8 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
                /* set training pattern 2 for EQ */
                exynos_dp_set_training_pattern(dp, TRAINING_PTN2);
 
-               adjust_request = link_status + (DPCD_ADDR_ADJUST_REQUEST_LANE0_1
-                                               - DPCD_ADDR_LANE0_1_STATUS);
+               adjust_request[0] = link_status[4];
+               adjust_request[1] = link_status[5];
 
                exynos_dp_get_adjust_train(dp, adjust_request);
 
@@ -566,7 +566,7 @@ static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp)
        u8 buf[5];
        u32 reg;
 
-       u8 *adjust_request;
+       u8 adjust_request[2];
 
        udelay(400);
 
@@ -575,8 +575,8 @@ static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp)
        lane_count = dp->link_train.lane_count;
 
        if (exynos_dp_clock_recovery_ok(link_status, lane_count) == 0) {
-               adjust_request = link_status + (DPCD_ADDR_ADJUST_REQUEST_LANE0_1
-                                               - DPCD_ADDR_LANE0_1_STATUS);
+               adjust_request[0] = link_status[4];
+               adjust_request[1] = link_status[5];
 
                if (exynos_dp_channel_eq_ok(link_status, lane_count) == 0) {
                        /* traing pattern Set to Normal */
@@ -770,7 +770,7 @@ static int exynos_dp_config_video(struct exynos_dp_device *dp,
                        return -ETIMEDOUT;
                }
 
-               mdelay(100);
+               udelay(1);
        }
 
        /* Set to use the register calculated M/N video */
@@ -804,7 +804,7 @@ static int exynos_dp_config_video(struct exynos_dp_device *dp,
                        return -ETIMEDOUT;
                }
 
-               mdelay(100);
+               mdelay(1);
        }
 
        if (retval != 0)
@@ -860,7 +860,8 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       dp = kzalloc(sizeof(struct exynos_dp_device), GFP_KERNEL);
+       dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
+                               GFP_KERNEL);
        if (!dp) {
                dev_err(&pdev->dev, "no memory for device data\n");
                return -ENOMEM;
@@ -871,8 +872,7 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
        dp->clock = clk_get(&pdev->dev, "dp");
        if (IS_ERR(dp->clock)) {
                dev_err(&pdev->dev, "failed to get clock\n");
-               ret = PTR_ERR(dp->clock);
-               goto err_dp;
+               return PTR_ERR(dp->clock);
        }
 
        clk_enable(dp->clock);
@@ -884,35 +884,25 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
                goto err_clock;
        }
 
-       res = request_mem_region(res->start, resource_size(res),
-                               dev_name(&pdev->dev));
-       if (!res) {
-               dev_err(&pdev->dev, "failed to request registers region\n");
-               ret = -EINVAL;
-               goto err_clock;
-       }
-
-       dp->res = res;
-
-       dp->reg_base = ioremap(res->start, resource_size(res));
+       dp->reg_base = devm_request_and_ioremap(&pdev->dev, res);
        if (!dp->reg_base) {
                dev_err(&pdev->dev, "failed to ioremap\n");
                ret = -ENOMEM;
-               goto err_req_region;
+               goto err_clock;
        }
 
        dp->irq = platform_get_irq(pdev, 0);
        if (!dp->irq) {
                dev_err(&pdev->dev, "failed to get irq\n");
                ret = -ENODEV;
-               goto err_ioremap;
+               goto err_clock;
        }
 
-       ret = request_irq(dp->irq, exynos_dp_irq_handler, 0,
-                       "exynos-dp", dp);
+       ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler, 0,
+                               "exynos-dp", dp);
        if (ret) {
                dev_err(&pdev->dev, "failed to request irq\n");
-               goto err_ioremap;
+               goto err_clock;
        }
 
        dp->video_info = pdata->video_info;
@@ -924,7 +914,7 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
        ret = exynos_dp_detect_hpd(dp);
        if (ret) {
                dev_err(&pdev->dev, "unable to detect hpd\n");
-               goto err_irq;
+               goto err_clock;
        }
 
        exynos_dp_handle_edid(dp);
@@ -933,7 +923,7 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
                                dp->video_info->link_rate);
        if (ret) {
                dev_err(&pdev->dev, "unable to do link train\n");
-               goto err_irq;
+               goto err_clock;
        }
 
        exynos_dp_enable_scramble(dp, 1);
@@ -947,23 +937,15 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
        ret = exynos_dp_config_video(dp, dp->video_info);
        if (ret) {
                dev_err(&pdev->dev, "unable to config video\n");
-               goto err_irq;
+               goto err_clock;
        }
 
        platform_set_drvdata(pdev, dp);
 
        return 0;
 
-err_irq:
-       free_irq(dp->irq, dp);
-err_ioremap:
-       iounmap(dp->reg_base);
-err_req_region:
-       release_mem_region(res->start, resource_size(res));
 err_clock:
        clk_put(dp->clock);
-err_dp:
-       kfree(dp);
 
        return ret;
 }
@@ -976,16 +958,9 @@ static int __devexit exynos_dp_remove(struct platform_device *pdev)
        if (pdata && pdata->phy_exit)
                pdata->phy_exit();
 
-       free_irq(dp->irq, dp);
-       iounmap(dp->reg_base);
-
        clk_disable(dp->clock);
        clk_put(dp->clock);
 
-       release_mem_region(dp->res->start, resource_size(dp->res));
-
-       kfree(dp);
-
        return 0;
 }
 
index 90ceaca0fa248c4a0a1197f51654655b0d3b52ac..1e0f998e0c9f4c872d132aafa640442a0b2d4189 100644 (file)
@@ -26,7 +26,6 @@ struct link_train {
 
 struct exynos_dp_device {
        struct device           *dev;
-       struct resource         *res;
        struct clk              *clock;
        unsigned int            irq;
        void __iomem            *reg_base;
@@ -39,8 +38,10 @@ struct exynos_dp_device {
 void exynos_dp_enable_video_mute(struct exynos_dp_device *dp, bool enable);
 void exynos_dp_stop_video(struct exynos_dp_device *dp);
 void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable);
+void exynos_dp_init_analog_param(struct exynos_dp_device *dp);
 void exynos_dp_init_interrupt(struct exynos_dp_device *dp);
 void exynos_dp_reset(struct exynos_dp_device *dp);
+void exynos_dp_swreset(struct exynos_dp_device *dp);
 void exynos_dp_config_interrupt(struct exynos_dp_device *dp);
 u32 exynos_dp_get_pll_lock_status(struct exynos_dp_device *dp);
 void exynos_dp_set_pll_power_down(struct exynos_dp_device *dp, bool enable);
index 6548afa0e3d21da1e543d2dd38c0b7de749b06d2..6ce76d56c3a1a2a7d3920d7eebfca34001b64dc7 100644 (file)
@@ -16,8 +16,6 @@
 
 #include <video/exynos_dp.h>
 
-#include <plat/cpu.h>
-
 #include "exynos_dp_core.h"
 #include "exynos_dp_reg.h"
 
@@ -65,6 +63,28 @@ void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable)
        writel(reg, dp->reg_base + EXYNOS_DP_LANE_MAP);
 }
 
+void exynos_dp_init_analog_param(struct exynos_dp_device *dp)
+{
+       u32 reg;
+
+       reg = TX_TERMINAL_CTRL_50_OHM;
+       writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_1);
+
+       reg = SEL_24M | TX_DVDD_BIT_1_0625V;
+       writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_2);
+
+       reg = DRIVE_DVDD_BIT_1_0625V | VCO_BIT_600_MICRO;
+       writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_3);
+
+       reg = PD_RING_OSC | AUX_TERMINAL_CTRL_50_OHM |
+               TX_CUR1_2X | TX_CUR_8_MA;
+       writel(reg, dp->reg_base + EXYNOS_DP_PLL_FILTER_CTL_1);
+
+       reg = CH3_AMP_400_MV | CH2_AMP_400_MV |
+               CH1_AMP_400_MV | CH0_AMP_400_MV;
+       writel(reg, dp->reg_base + EXYNOS_DP_TX_AMP_TUNING_CTL);
+}
+
 void exynos_dp_init_interrupt(struct exynos_dp_device *dp)
 {
        /* Set interrupt pin assertion polarity as high */
@@ -89,8 +109,6 @@ void exynos_dp_reset(struct exynos_dp_device *dp)
 {
        u32 reg;
 
-       writel(RESET_DP_TX, dp->reg_base + EXYNOS_DP_TX_SW_RESET);
-
        exynos_dp_stop_video(dp);
        exynos_dp_enable_video_mute(dp, 0);
 
@@ -131,9 +149,15 @@ void exynos_dp_reset(struct exynos_dp_device *dp)
 
        writel(0x00000101, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
 
+       exynos_dp_init_analog_param(dp);
        exynos_dp_init_interrupt(dp);
 }
 
+void exynos_dp_swreset(struct exynos_dp_device *dp)
+{
+       writel(RESET_DP_TX, dp->reg_base + EXYNOS_DP_TX_SW_RESET);
+}
+
 void exynos_dp_config_interrupt(struct exynos_dp_device *dp)
 {
        u32 reg;
@@ -271,6 +295,7 @@ void exynos_dp_set_analog_power_down(struct exynos_dp_device *dp,
 void exynos_dp_init_analog_func(struct exynos_dp_device *dp)
 {
        u32 reg;
+       int timeout_loop = 0;
 
        exynos_dp_set_analog_power_down(dp, POWER_ALL, 0);
 
@@ -282,9 +307,19 @@ void exynos_dp_init_analog_func(struct exynos_dp_device *dp)
        writel(reg, dp->reg_base + EXYNOS_DP_DEBUG_CTL);
 
        /* Power up PLL */
-       if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED)
+       if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
                exynos_dp_set_pll_power_down(dp, 0);
 
+               while (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
+                       timeout_loop++;
+                       if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
+                               dev_err(dp->dev, "failed to get pll lock status\n");
+                               return;
+                       }
+                       usleep_range(10, 20);
+               }
+       }
+
        /* Enable Serdes FIFO function and Link symbol clock domain module */
        reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2);
        reg &= ~(SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N
index 42f608e2a43e056809f8b01595db6ea8714ade4c..125b27cd57aebd3f44acf997cf08f711928b61dd 100644 (file)
 
 #define EXYNOS_DP_LANE_MAP                     0x35C
 
+#define EXYNOS_DP_ANALOG_CTL_1                 0x370
+#define EXYNOS_DP_ANALOG_CTL_2                 0x374
+#define EXYNOS_DP_ANALOG_CTL_3                 0x378
+#define EXYNOS_DP_PLL_FILTER_CTL_1             0x37C
+#define EXYNOS_DP_TX_AMP_TUNING_CTL            0x380
+
 #define EXYNOS_DP_AUX_HW_RETRY_CTL             0x390
 
 #define EXYNOS_DP_COMMON_INT_STA_1             0x3C4
 #define LANE0_MAP_LOGIC_LANE_2                 (0x2 << 0)
 #define LANE0_MAP_LOGIC_LANE_3                 (0x3 << 0)
 
+/* EXYNOS_DP_ANALOG_CTL_1 */
+#define TX_TERMINAL_CTRL_50_OHM                        (0x1 << 4)
+
+/* EXYNOS_DP_ANALOG_CTL_2 */
+#define SEL_24M                                        (0x1 << 3)
+#define TX_DVDD_BIT_1_0625V                    (0x4 << 0)
+
+/* EXYNOS_DP_ANALOG_CTL_3 */
+#define DRIVE_DVDD_BIT_1_0625V                 (0x4 << 5)
+#define VCO_BIT_600_MICRO                      (0x5 << 0)
+
+/* EXYNOS_DP_PLL_FILTER_CTL_1 */
+#define PD_RING_OSC                            (0x1 << 6)
+#define AUX_TERMINAL_CTRL_50_OHM               (0x2 << 4)
+#define TX_CUR1_2X                             (0x1 << 2)
+#define TX_CUR_8_MA                            (0x2 << 0)
+
+/* EXYNOS_DP_TX_AMP_TUNING_CTL */
+#define CH3_AMP_400_MV                         (0x0 << 24)
+#define CH2_AMP_400_MV                         (0x0 << 16)
+#define CH1_AMP_400_MV                         (0x0 << 8)
+#define CH0_AMP_400_MV                         (0x0 << 0)
+
 /* EXYNOS_DP_AUX_HW_RETRY_CTL */
 #define AUX_BIT_PERIOD_EXPECTED_DELAY(x)       (((x) & 0x7) << 8)
 #define AUX_HW_RETRY_INTERVAL_MASK             (0x3 << 3)
index 557091dc0e97382bcab08baa165d21ea6b5ff8f8..6c1f5c314a42b1eccd152cdd07b746883824fa24 100644 (file)
@@ -58,7 +58,7 @@ static struct mipi_dsim_platform_data *to_dsim_plat(struct platform_device
 }
 
 static struct regulator_bulk_data supplies[] = {
-       { .supply = "vdd10", },
+       { .supply = "vdd11", },
        { .supply = "vdd18", },
 };
 
@@ -102,6 +102,8 @@ static void exynos_mipi_update_cfg(struct mipi_dsim_device *dsim)
        /* set display timing. */
        exynos_mipi_dsi_set_display_mode(dsim, dsim->dsim_config);
 
+       exynos_mipi_dsi_init_interrupt(dsim);
+
        /*
         * data from Display controller(FIMD) is transferred in video mode
         * but in case of command mode, all settigs is updated to registers.
@@ -413,27 +415,30 @@ static int exynos_mipi_dsi_probe(struct platform_device *pdev)
                goto err_platform_get_irq;
        }
 
+       init_completion(&dsim_wr_comp);
+       init_completion(&dsim_rd_comp);
+       platform_set_drvdata(pdev, dsim);
+
        ret = request_irq(dsim->irq, exynos_mipi_dsi_interrupt_handler,
-                       IRQF_SHARED, pdev->name, dsim);
+                       IRQF_SHARED, dev_name(&pdev->dev), dsim);
        if (ret != 0) {
                dev_err(&pdev->dev, "failed to request dsim irq\n");
                ret = -EINVAL;
                goto err_bind;
        }
 
-       init_completion(&dsim_wr_comp);
-       init_completion(&dsim_rd_comp);
-
-       /* enable interrupt */
+       /* enable interrupts */
        exynos_mipi_dsi_init_interrupt(dsim);
 
        /* initialize mipi-dsi client(lcd panel). */
        if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->probe)
                dsim_ddi->dsim_lcd_drv->probe(dsim_ddi->dsim_lcd_dev);
 
-       /* in case that mipi got enabled at bootloader. */
-       if (dsim_pd->enabled)
-               goto out;
+       /* in case mipi-dsi has been enabled by bootloader */
+       if (dsim_pd->enabled) {
+               exynos_mipi_regulator_enable(dsim);
+               goto done;
+       }
 
        /* lcd panel power on. */
        if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->power_on)
@@ -453,12 +458,11 @@ static int exynos_mipi_dsi_probe(struct platform_device *pdev)
 
        dsim->suspended = false;
 
-out:
+done:
        platform_set_drvdata(pdev, dsim);
 
-       dev_dbg(&pdev->dev, "mipi-dsi driver(%s mode) has been probed.\n",
-               (dsim_config->e_interface == DSIM_COMMAND) ?
-                       "CPU" : "RGB");
+       dev_dbg(&pdev->dev, "%s() completed sucessfuly (%s mode)\n", __func__,
+               dsim_config->e_interface == DSIM_COMMAND ? "CPU" : "RGB");
 
        return 0;
 
@@ -515,10 +519,10 @@ static int __devexit exynos_mipi_dsi_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int exynos_mipi_dsi_suspend(struct platform_device *pdev,
-               pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int exynos_mipi_dsi_suspend(struct device *dev)
 {
+       struct platform_device *pdev = to_platform_device(dev);
        struct mipi_dsim_device *dsim = platform_get_drvdata(pdev);
        struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv;
        struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev;
@@ -544,8 +548,9 @@ static int exynos_mipi_dsi_suspend(struct platform_device *pdev,
        return 0;
 }
 
-static int exynos_mipi_dsi_resume(struct platform_device *pdev)
+static int exynos_mipi_dsi_resume(struct device *dev)
 {
+       struct platform_device *pdev = to_platform_device(dev);
        struct mipi_dsim_device *dsim = platform_get_drvdata(pdev);
        struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv;
        struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev;
@@ -577,19 +582,19 @@ static int exynos_mipi_dsi_resume(struct platform_device *pdev)
 
        return 0;
 }
-#else
-#define exynos_mipi_dsi_suspend NULL
-#define exynos_mipi_dsi_resume NULL
 #endif
 
+static const struct dev_pm_ops exynos_mipi_dsi_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(exynos_mipi_dsi_suspend, exynos_mipi_dsi_resume)
+};
+
 static struct platform_driver exynos_mipi_dsi_driver = {
        .probe = exynos_mipi_dsi_probe,
        .remove = __devexit_p(exynos_mipi_dsi_remove),
-       .suspend = exynos_mipi_dsi_suspend,
-       .resume = exynos_mipi_dsi_resume,
        .driver = {
                   .name = "exynos-mipi-dsim",
                   .owner = THIS_MODULE,
+                  .pm = &exynos_mipi_dsi_pm_ops,
        },
 };
 
index 14909c1d38327fe0cd9cb8d975e66e47028c0f3f..47b533a183be2979ede8a29bb9db0d89b0e309bb 100644 (file)
@@ -76,33 +76,25 @@ static unsigned int dpll_table[15] = {
 
 irqreturn_t exynos_mipi_dsi_interrupt_handler(int irq, void *dev_id)
 {
-       unsigned int intsrc = 0;
-       unsigned int intmsk = 0;
-       struct mipi_dsim_device *dsim = NULL;
-
-       dsim = dev_id;
-       if (!dsim) {
-               dev_dbg(dsim->dev, KERN_ERR "%s:error: wrong parameter\n",
-                                                       __func__);
-               return IRQ_HANDLED;
+       struct mipi_dsim_device *dsim = dev_id;
+       unsigned int intsrc, intmsk;
+
+       if (dsim == NULL) {
+               dev_err(dsim->dev, "%s: wrong parameter\n", __func__);
+               return IRQ_NONE;
        }
 
        intsrc = exynos_mipi_dsi_read_interrupt(dsim);
        intmsk = exynos_mipi_dsi_read_interrupt_mask(dsim);
+       intmsk = ~intmsk & intsrc;
 
-       intmsk = ~(intmsk) & intsrc;
-
-       switch (intmsk) {
-       case INTMSK_RX_DONE:
+       if (intsrc & INTMSK_RX_DONE) {
                complete(&dsim_rd_comp);
                dev_dbg(dsim->dev, "MIPI INTMSK_RX_DONE\n");
-               break;
-       case INTMSK_FIFO_EMPTY:
+       }
+       if (intsrc & INTMSK_FIFO_EMPTY) {
                complete(&dsim_wr_comp);
                dev_dbg(dsim->dev, "MIPI INTMSK_FIFO_EMPTY\n");
-               break;
-       default:
-               break;
        }
 
        exynos_mipi_dsi_clear_interrupt(dsim, intmsk);
@@ -738,11 +730,11 @@ int exynos_mipi_dsi_set_display_mode(struct mipi_dsim_device *dsim,
                if (dsim_config->auto_vertical_cnt == 0) {
                        exynos_mipi_dsi_set_main_disp_vporch(dsim,
                                dsim_config->cmd_allow,
-                               timing->upper_margin,
-                               timing->lower_margin);
+                               timing->lower_margin,
+                               timing->upper_margin);
                        exynos_mipi_dsi_set_main_disp_hporch(dsim,
-                               timing->left_margin,
-                               timing->right_margin);
+                               timing->right_margin,
+                               timing->left_margin);
                        exynos_mipi_dsi_set_main_disp_sync_area(dsim,
                                timing->vsync_len,
                                timing->hsync_len);
index 4aa9ac6218bfa2b41d51ffe78159142e0ae021cd..05d080b63bc0b85276c4ff999d93dd9ba1b76982 100644 (file)
@@ -293,9 +293,20 @@ static void s6e8ax0_panel_cond(struct s6e8ax0 *lcd)
                0x6e, 0x00, 0x00, 0x00, 0x02, 0x08, 0x08, 0x23, 0x23, 0xc0,
                0xc8, 0x08, 0x48, 0xc1, 0x00, 0xc1, 0xff, 0xff, 0xc8
        };
+       static const unsigned char data_to_send_panel_reverse[] = {
+               0xf8, 0x19, 0x35, 0x00, 0x00, 0x00, 0x93, 0x00, 0x3c, 0x7d,
+               0x08, 0x27, 0x7d, 0x3f, 0x00, 0x00, 0x00, 0x20, 0x04, 0x08,
+               0x6e, 0x00, 0x00, 0x00, 0x02, 0x08, 0x08, 0x23, 0x23, 0xc0,
+               0xc1, 0x01, 0x41, 0xc1, 0x00, 0xc1, 0xf6, 0xf6, 0xc1
+       };
 
-       ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
-               data_to_send, ARRAY_SIZE(data_to_send));
+       if (lcd->dsim_dev->panel_reverse)
+               ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
+                               data_to_send_panel_reverse,
+                               ARRAY_SIZE(data_to_send_panel_reverse));
+       else
+               ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
+                               data_to_send, ARRAY_SIZE(data_to_send));
 }
 
 static void s6e8ax0_display_cond(struct s6e8ax0 *lcd)
index c27e153d8882053e2d28ab72cfc784875752f337..1ddeb11659d4db9e0023f07d98e5c7c189e97535 100644 (file)
@@ -23,7 +23,7 @@
 #include <linux/rmap.h>
 #include <linux/pagemap.h>
 
-struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
+static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
 {
        void *screen_base = (void __force *) info->screen_base;
        struct page *page;
@@ -107,6 +107,10 @@ static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
        /* protect against the workqueue changing the page list */
        mutex_lock(&fbdefio->lock);
 
+       /* first write in this cycle, notify the driver */
+       if (fbdefio->first_io && list_empty(&fbdefio->pagelist))
+               fbdefio->first_io(info);
+
        /*
         * We want the page to remain locked from ->page_mkwrite until
         * the PTE is marked dirty to avoid page_mkclean() being called
index c6ce416ab587776f72f629a1ffecbfcc3ecce693..0dff12a1daef26af52949a6ff52f3b5c9de61a61 100644 (file)
@@ -1046,20 +1046,29 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
 int
 fb_blank(struct fb_info *info, int blank)
 {      
-       int ret = -EINVAL;
+       struct fb_event event;
+       int ret = -EINVAL, early_ret;
 
        if (blank > FB_BLANK_POWERDOWN)
                blank = FB_BLANK_POWERDOWN;
 
+       event.info = info;
+       event.data = &blank;
+
+       early_ret = fb_notifier_call_chain(FB_EARLY_EVENT_BLANK, &event);
+
        if (info->fbops->fb_blank)
                ret = info->fbops->fb_blank(blank, info);
 
-       if (!ret) {
-               struct fb_event event;
-
-               event.info = info;
-               event.data = &blank;
+       if (!ret)
                fb_notifier_call_chain(FB_EVENT_BLANK, &event);
+       else {
+               /*
+                * if fb_blank is failed then revert effects of
+                * the early blank event.
+                */
+               if (!early_ret)
+                       fb_notifier_call_chain(FB_R_EARLY_EVENT_BLANK, &event);
        }
 
        return ret;
index 67afa9c2289d539e281bb831aa2260c0fba1205b..a55e3669d1352f387c29e1342d4d54db429c2b3f 100644 (file)
@@ -80,6 +80,8 @@ EXPORT_SYMBOL(framebuffer_alloc);
  */
 void framebuffer_release(struct fb_info *info)
 {
+       if (!info)
+               return;
        kfree(info->apertures);
        kfree(info);
 }
index 6af3f16754f0e2bd8e5062312164f05a80219a78..458c00664ade6110a6b26d58397086b2b1d4290f 100644 (file)
@@ -834,7 +834,6 @@ static void update_lcdc(struct fb_info *info)
        diu_ops.set_pixel_clock(var->pixclock);
 
        out_be32(&hw->syn_pol, 0);      /* SYNC SIGNALS POLARITY */
-       out_be32(&hw->thresholds, 0x00037800); /* The Thresholds */
        out_be32(&hw->int_status, 0);   /* INTERRUPT STATUS */
        out_be32(&hw->plut, 0x01F5F666);
 
index f135dbead07d8921068de3513dafb2c32ea10973..caad3689b4e6fb4677a393c44a9373ff1543f851 100644 (file)
@@ -131,7 +131,9 @@ struct imxfb_rgb {
 struct imxfb_info {
        struct platform_device  *pdev;
        void __iomem            *regs;
-       struct clk              *clk;
+       struct clk              *clk_ipg;
+       struct clk              *clk_ahb;
+       struct clk              *clk_per;
 
        /*
         * These are the addresses we mapped
@@ -340,7 +342,7 @@ static int imxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
 
        pr_debug("var->bits_per_pixel=%d\n", var->bits_per_pixel);
 
-       lcd_clk = clk_get_rate(fbi->clk);
+       lcd_clk = clk_get_rate(fbi->clk_per);
 
        tmp = var->pixclock * (unsigned long long)lcd_clk;
 
@@ -455,11 +457,17 @@ static int imxfb_bl_update_status(struct backlight_device *bl)
 
        fbi->pwmr = (fbi->pwmr & ~0xFF) | brightness;
 
-       if (bl->props.fb_blank != FB_BLANK_UNBLANK)
-               clk_enable(fbi->clk);
+       if (bl->props.fb_blank != FB_BLANK_UNBLANK) {
+               clk_prepare_enable(fbi->clk_ipg);
+               clk_prepare_enable(fbi->clk_ahb);
+               clk_prepare_enable(fbi->clk_per);
+       }
        writel(fbi->pwmr, fbi->regs + LCDC_PWMR);
-       if (bl->props.fb_blank != FB_BLANK_UNBLANK)
-               clk_disable(fbi->clk);
+       if (bl->props.fb_blank != FB_BLANK_UNBLANK) {
+               clk_disable_unprepare(fbi->clk_per);
+               clk_disable_unprepare(fbi->clk_ahb);
+               clk_disable_unprepare(fbi->clk_ipg);
+       }
 
        return 0;
 }
@@ -522,7 +530,9 @@ static void imxfb_enable_controller(struct imxfb_info *fbi)
         */
        writel(RMCR_LCDC_EN_MX1, fbi->regs + LCDC_RMCR);
 
-       clk_enable(fbi->clk);
+       clk_prepare_enable(fbi->clk_ipg);
+       clk_prepare_enable(fbi->clk_ahb);
+       clk_prepare_enable(fbi->clk_per);
 
        if (fbi->backlight_power)
                fbi->backlight_power(1);
@@ -539,7 +549,9 @@ static void imxfb_disable_controller(struct imxfb_info *fbi)
        if (fbi->lcd_power)
                fbi->lcd_power(0);
 
-       clk_disable(fbi->clk);
+       clk_disable_unprepare(fbi->clk_per);
+       clk_disable_unprepare(fbi->clk_ipg);
+       clk_disable_unprepare(fbi->clk_ahb);
 
        writel(0, fbi->regs + LCDC_RMCR);
 }
@@ -770,10 +782,21 @@ static int __init imxfb_probe(struct platform_device *pdev)
                goto failed_req;
        }
 
-       fbi->clk = clk_get(&pdev->dev, NULL);
-       if (IS_ERR(fbi->clk)) {
-               ret = PTR_ERR(fbi->clk);
-               dev_err(&pdev->dev, "unable to get clock: %d\n", ret);
+       fbi->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+       if (IS_ERR(fbi->clk_ipg)) {
+               ret = PTR_ERR(fbi->clk_ipg);
+               goto failed_getclock;
+       }
+
+       fbi->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+       if (IS_ERR(fbi->clk_ahb)) {
+               ret = PTR_ERR(fbi->clk_ahb);
+               goto failed_getclock;
+       }
+
+       fbi->clk_per = devm_clk_get(&pdev->dev, "per");
+       if (IS_ERR(fbi->clk_per)) {
+               ret = PTR_ERR(fbi->clk_per);
                goto failed_getclock;
        }
 
@@ -858,7 +881,6 @@ failed_platform_init:
 failed_map:
        iounmap(fbi->regs);
 failed_ioremap:
-       clk_put(fbi->clk);
 failed_getclock:
        release_mem_region(res->start, resource_size(res));
 failed_req:
@@ -895,8 +917,6 @@ static int __devexit imxfb_remove(struct platform_device *pdev)
 
        iounmap(fbi->regs);
        release_mem_region(res->start, resource_size(res));
-       clk_disable(fbi->clk);
-       clk_put(fbi->clk);
 
        platform_set_drvdata(pdev, NULL);
 
index 02fd2263610c1c63d1cdb8cd8173373a3ce88200..bdcbfbae277741e85b52f300f756d37bf3773a6c 100644 (file)
@@ -680,6 +680,7 @@ static int __devinit intelfb_pci_register(struct pci_dev *pdev,
                 + dinfo->fb.size);
        if (!dinfo->aperture.virtual) {
                ERR_MSG("Cannot remap FB region.\n");
+               agp_backend_release(bridge);
                cleanup(dinfo);
                return -ENODEV;
        }
@@ -689,6 +690,7 @@ static int __devinit intelfb_pci_register(struct pci_dev *pdev,
                                              INTEL_REG_SIZE);
        if (!dinfo->mmio_base) {
                ERR_MSG("Cannot remap MMIO region.\n");
+               agp_backend_release(bridge);
                cleanup(dinfo);
                return -ENODEV;
        }
index 31b8f67477b7957b8e193f757c4821e810b685a3..217678e0b983affe7ec7e1f1571c6a7811af5d5c 100644 (file)
@@ -1243,6 +1243,7 @@ static int maven_probe(struct i2c_client *client,
 
        if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_WORD_DATA |
                                              I2C_FUNC_SMBUS_BYTE_DATA |
+                                             I2C_FUNC_NOSTART |
                                              I2C_FUNC_PROTOCOL_MANGLING))
                goto ERROR0;
        if (!(data = kzalloc(sizeof(*data), GFP_KERNEL))) {
index 273769bb8debce1e53704ae4c864e6d221a583a6..c87e17afb3e2c9c74d9131c578235672d325f3d8 100644 (file)
@@ -68,7 +68,7 @@ static int mb862xx_i2c_read_byte(struct i2c_adapter *adap, u8 *byte, int last)
        return 1;
 }
 
-void mb862xx_i2c_stop(struct i2c_adapter *adap)
+static void mb862xx_i2c_stop(struct i2c_adapter *adap)
 {
        struct mb862xxfb_par *par = adap->algo_data;
 
index 11a7a333701d3abc7da2aa57573344dc0576645c..00ce1f34b4965aa1d20f9bc33c709046551f2eca 100644 (file)
@@ -579,7 +579,7 @@ static ssize_t mb862xxfb_show_dispregs(struct device *dev,
 
 static DEVICE_ATTR(dispregs, 0444, mb862xxfb_show_dispregs, NULL);
 
-irqreturn_t mb862xx_intr(int irq, void *dev_id)
+static irqreturn_t mb862xx_intr(int irq, void *dev_id)
 {
        struct mb862xxfb_par *par = (struct mb862xxfb_par *) dev_id;
        unsigned long reg_ist, mask;
index 55bf6196b7a0e1185c6237df1f3b4baae5f7cb97..ab0a8e527333b9f68edd884bfdc91067f1b214ad 100644 (file)
@@ -950,7 +950,7 @@ static int __devinit mbxfb_probe(struct platform_device *dev)
 
        mfbi->fb_virt_addr = ioremap_nocache(mfbi->fb_phys_addr,
                                             res_size(mfbi->fb_req));
-       if (!mfbi->reg_virt_addr) {
+       if (!mfbi->fb_virt_addr) {
                dev_err(&dev->dev, "failed to ioremap frame buffer\n");
                ret = -EINVAL;
                goto err4;
index 6c6bc578d0fcd3fbf6f008465d04e75bc8aeb2b3..abbe691047bde3ac8cdc05042a27873be649ea71 100644 (file)
@@ -889,6 +889,18 @@ static int __devexit mxsfb_remove(struct platform_device *pdev)
        return 0;
 }
 
+static void mxsfb_shutdown(struct platform_device *pdev)
+{
+       struct fb_info *fb_info = platform_get_drvdata(pdev);
+       struct mxsfb_info *host = to_imxfb_host(fb_info);
+
+       /*
+        * Force stop the LCD controller as keeping it running during reboot
+        * might interfere with the BootROM's boot mode pads sampling.
+        */
+       writel(CTRL_RUN, host->base + LCDC_CTRL + REG_CLR);
+}
+
 static struct platform_device_id mxsfb_devtype[] = {
        {
                .name = "imx23-fb",
@@ -905,6 +917,7 @@ MODULE_DEVICE_TABLE(platform, mxsfb_devtype);
 static struct platform_driver mxsfb_driver = {
        .probe = mxsfb_probe,
        .remove = __devexit_p(mxsfb_remove),
+       .shutdown = mxsfb_shutdown,
        .id_table = mxsfb_devtype,
        .driver = {
                   .name = DRIVER_NAME,
index 1e7536d9a8fcfeac9e9c808aeee9629363c92efa..b48f95f0dfe24d2685d8d6ee296b18b98eab9091 100644 (file)
@@ -39,14 +39,6 @@ config FB_OMAP_LCD_MIPID
          the Mobile Industry Processor Interface DBI-C/DCS
          specification. (Supported LCDs: Philips LPH8923, Sharp LS041Y3)
 
-config FB_OMAP_BOOTLOADER_INIT
-       bool "Check bootloader initialization"
-       depends on FB_OMAP
-       help
-         Say Y here if you want to enable checking if the bootloader has
-         already initialized the display controller. In this case the
-         driver will skip the initialization.
-
 config FB_OMAP_CONSISTENT_DMA_SIZE
        int "Consistent DMA memory size (MB)"
        depends on FB_OMAP
index d26f37ac69d87d882eb7a2a5d539a7e90e55c4fa..ad741c3d1ae1668f985c53e8a1f52df367e0ec59 100644 (file)
@@ -532,6 +532,7 @@ static int acx_panel_probe(struct omap_dss_device *dssdev)
 
        /*------- Backlight control --------*/
 
+       memset(&props, 0, sizeof(props));
        props.fb_blank = FB_BLANK_UNBLANK;
        props.power = FB_BLANK_UNBLANK;
        props.type = BACKLIGHT_RAW;
@@ -738,12 +739,6 @@ static void acx_panel_set_timings(struct omap_dss_device *dssdev,
        }
 }
 
-static void acx_panel_get_timings(struct omap_dss_device *dssdev,
-               struct omap_video_timings *timings)
-{
-       *timings = dssdev->panel.timings;
-}
-
 static int acx_panel_check_timings(struct omap_dss_device *dssdev,
                struct omap_video_timings *timings)
 {
@@ -761,7 +756,6 @@ static struct omap_dss_driver acx_panel_driver = {
        .resume         = acx_panel_resume,
 
        .set_timings    = acx_panel_set_timings,
-       .get_timings    = acx_panel_get_timings,
        .check_timings  = acx_panel_check_timings,
 
        .get_recommended_bpp = acx_get_recommended_bpp,
index 30fe4dfeb22700a92f8347f4cad682252e68df36..e42f9dc22123e319cde3eba127b0b57ccc05e910 100644 (file)
@@ -386,6 +386,106 @@ static struct panel_config generic_dpi_panels[] = {
 
                .name                   = "innolux_at080tn52",
        },
+
+       /* Mitsubishi AA084SB01 */
+       {
+               {
+                       .x_res          = 800,
+                       .y_res          = 600,
+                       .pixel_clock    = 40000,
+
+                       .hsw            = 1,
+                       .hfp            = 254,
+                       .hbp            = 1,
+
+                       .vsw            = 1,
+                       .vfp            = 26,
+                       .vbp            = 1,
+               },
+               .config                 = OMAP_DSS_LCD_TFT,
+               .name                   = "mitsubishi_aa084sb01",
+       },
+       /* EDT ET0500G0DH6 */
+       {
+               {
+                       .x_res          = 800,
+                       .y_res          = 480,
+                       .pixel_clock    = 33260,
+
+                       .hsw            = 128,
+                       .hfp            = 216,
+                       .hbp            = 40,
+
+                       .vsw            = 2,
+                       .vfp            = 35,
+                       .vbp            = 10,
+               },
+               .config                 = OMAP_DSS_LCD_TFT,
+               .name                   = "edt_et0500g0dh6",
+       },
+
+       /* Prime-View PD050VL1 */
+       {
+               {
+                       .x_res          = 640,
+                       .y_res          = 480,
+
+                       .pixel_clock    = 25000,
+
+                       .hsw            = 96,
+                       .hfp            = 18,
+                       .hbp            = 46,
+
+                       .vsw            = 2,
+                       .vfp            = 10,
+                       .vbp            = 33,
+               },
+               .config                 = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+                                         OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC,
+               .name                   = "primeview_pd050vl1",
+       },
+
+       /* Prime-View PM070WL4 */
+       {
+               {
+                       .x_res          = 800,
+                       .y_res          = 480,
+
+                       .pixel_clock    = 32000,
+
+                       .hsw            = 128,
+                       .hfp            = 42,
+                       .hbp            = 86,
+
+                       .vsw            = 2,
+                       .vfp            = 10,
+                       .vbp            = 33,
+               },
+               .config                 = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+                                         OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC,
+               .name                   = "primeview_pm070wl4",
+       },
+
+       /* Prime-View PD104SLF */
+       {
+               {
+                       .x_res          = 800,
+                       .y_res          = 600,
+
+                       .pixel_clock    = 40000,
+
+                       .hsw            = 128,
+                       .hfp            = 42,
+                       .hbp            = 86,
+
+                       .vsw            = 4,
+                       .vfp            = 1,
+                       .vbp            = 23,
+               },
+               .config                 = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+                                         OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC,
+               .name                   = "primeview_pd104slf",
+       },
 };
 
 struct panel_drv_data {
@@ -549,12 +649,6 @@ static void generic_dpi_panel_set_timings(struct omap_dss_device *dssdev,
        dpi_set_timings(dssdev, timings);
 }
 
-static void generic_dpi_panel_get_timings(struct omap_dss_device *dssdev,
-               struct omap_video_timings *timings)
-{
-       *timings = dssdev->panel.timings;
-}
-
 static int generic_dpi_panel_check_timings(struct omap_dss_device *dssdev,
                struct omap_video_timings *timings)
 {
@@ -571,7 +665,6 @@ static struct omap_dss_driver dpi_driver = {
        .resume         = generic_dpi_panel_resume,
 
        .set_timings    = generic_dpi_panel_set_timings,
-       .get_timings    = generic_dpi_panel_get_timings,
        .check_timings  = generic_dpi_panel_check_timings,
 
        .driver         = {
index dc9408dc93d1ed36f702187aebab9e5d7605b84b..4a34cdc1371b34c777c8b4747bd669186cd2d530 100644 (file)
@@ -610,12 +610,6 @@ static int n8x0_panel_resume(struct omap_dss_device *dssdev)
        return 0;
 }
 
-static void n8x0_panel_get_timings(struct omap_dss_device *dssdev,
-               struct omap_video_timings *timings)
-{
-       *timings = dssdev->panel.timings;
-}
-
 static void n8x0_panel_get_resolution(struct omap_dss_device *dssdev,
                u16 *xres, u16 *yres)
 {
@@ -678,8 +672,6 @@ static struct omap_dss_driver n8x0_panel_driver = {
        .get_resolution = n8x0_panel_get_resolution,
        .get_recommended_bpp = omapdss_default_get_recommended_bpp,
 
-       .get_timings    = n8x0_panel_get_timings,
-
        .driver         = {
                .name   = "n8x0_panel",
                .owner  = THIS_MODULE,
index b2dd88b484209b46f5b213e9007cb2108b438c34..2ce9992f403b838efababfbc9a7ec7e65a13e533 100644 (file)
@@ -30,7 +30,6 @@
 #include <linux/gpio.h>
 #include <linux/workqueue.h>
 #include <linux/slab.h>
-#include <linux/regulator/consumer.h>
 #include <linux/mutex.h>
 
 #include <video/omapdss.h>
@@ -55,73 +54,6 @@ static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable);
 
 static int taal_panel_reset(struct omap_dss_device *dssdev);
 
-struct panel_regulator {
-       struct regulator *regulator;
-       const char *name;
-       int min_uV;
-       int max_uV;
-};
-
-static void free_regulators(struct panel_regulator *regulators, int n)
-{
-       int i;
-
-       for (i = 0; i < n; i++) {
-               /* disable/put in reverse order */
-               regulator_disable(regulators[n - i - 1].regulator);
-               regulator_put(regulators[n - i - 1].regulator);
-       }
-}
-
-static int init_regulators(struct omap_dss_device *dssdev,
-                       struct panel_regulator *regulators, int n)
-{
-       int r, i, v;
-
-       for (i = 0; i < n; i++) {
-               struct regulator *reg;
-
-               reg = regulator_get(&dssdev->dev, regulators[i].name);
-               if (IS_ERR(reg)) {
-                       dev_err(&dssdev->dev, "failed to get regulator %s\n",
-                               regulators[i].name);
-                       r = PTR_ERR(reg);
-                       goto err;
-               }
-
-               /* FIXME: better handling of fixed vs. variable regulators */
-               v = regulator_get_voltage(reg);
-               if (v < regulators[i].min_uV || v > regulators[i].max_uV) {
-                       r = regulator_set_voltage(reg, regulators[i].min_uV,
-                                               regulators[i].max_uV);
-                       if (r) {
-                               dev_err(&dssdev->dev,
-                                       "failed to set regulator %s voltage\n",
-                                       regulators[i].name);
-                               regulator_put(reg);
-                               goto err;
-                       }
-               }
-
-               r = regulator_enable(reg);
-               if (r) {
-                       dev_err(&dssdev->dev, "failed to enable regulator %s\n",
-                               regulators[i].name);
-                       regulator_put(reg);
-                       goto err;
-               }
-
-               regulators[i].regulator = reg;
-       }
-
-       return 0;
-
-err:
-       free_regulators(regulators, i);
-
-       return r;
-}
-
 /**
  * struct panel_config - panel configuration
  * @name: panel name
@@ -150,8 +82,6 @@ struct panel_config {
                unsigned int low;
        } reset_sequence;
 
-       struct panel_regulator *regulators;
-       int num_regulators;
 };
 
 enum {
@@ -577,12 +507,6 @@ static const struct backlight_ops taal_bl_ops = {
        .update_status  = taal_bl_update_status,
 };
 
-static void taal_get_timings(struct omap_dss_device *dssdev,
-                       struct omap_video_timings *timings)
-{
-       *timings = dssdev->panel.timings;
-}
-
 static void taal_get_resolution(struct omap_dss_device *dssdev,
                u16 *xres, u16 *yres)
 {
@@ -977,11 +901,6 @@ static int taal_probe(struct omap_dss_device *dssdev)
 
        atomic_set(&td->do_update, 0);
 
-       r = init_regulators(dssdev, panel_config->regulators,
-                       panel_config->num_regulators);
-       if (r)
-               goto err_reg;
-
        td->workqueue = create_singlethread_workqueue("taal_esd");
        if (td->workqueue == NULL) {
                dev_err(&dssdev->dev, "can't create ESD workqueue\n");
@@ -1087,8 +1006,6 @@ err_bl:
 err_rst_gpio:
        destroy_workqueue(td->workqueue);
 err_wq:
-       free_regulators(panel_config->regulators, panel_config->num_regulators);
-err_reg:
        kfree(td);
 err:
        return r;
@@ -1125,9 +1042,6 @@ static void __exit taal_remove(struct omap_dss_device *dssdev)
        /* reset, to be sure that the panel is in a valid state */
        taal_hw_reset(dssdev);
 
-       free_regulators(td->panel_config->regulators,
-                       td->panel_config->num_regulators);
-
        if (gpio_is_valid(panel_data->reset_gpio))
                gpio_free(panel_data->reset_gpio);
 
@@ -1909,8 +1823,6 @@ static struct omap_dss_driver taal_driver = {
        .run_test       = taal_run_test,
        .memory_read    = taal_memory_read,
 
-       .get_timings    = taal_get_timings,
-
        .driver         = {
                .name   = "taal",
                .owner  = THIS_MODULE,
index 52637fa8fda83cfccaf8d70ddbf7c4ca26a7baf1..bff306e041cabef157929f4078401122c06510d9 100644 (file)
@@ -47,13 +47,9 @@ struct panel_drv_data {
        struct mutex lock;
 
        int pd_gpio;
-};
 
-static inline struct tfp410_platform_data
-*get_pdata(const struct omap_dss_device *dssdev)
-{
-       return dssdev->data;
-}
+       struct i2c_adapter *i2c_adapter;
+};
 
 static int tfp410_power_on(struct omap_dss_device *dssdev)
 {
@@ -68,7 +64,7 @@ static int tfp410_power_on(struct omap_dss_device *dssdev)
                goto err0;
 
        if (gpio_is_valid(ddata->pd_gpio))
-               gpio_set_value(ddata->pd_gpio, 1);
+               gpio_set_value_cansleep(ddata->pd_gpio, 1);
 
        return 0;
 err0:
@@ -83,18 +79,18 @@ static void tfp410_power_off(struct omap_dss_device *dssdev)
                return;
 
        if (gpio_is_valid(ddata->pd_gpio))
-               gpio_set_value(ddata->pd_gpio, 0);
+               gpio_set_value_cansleep(ddata->pd_gpio, 0);
 
        omapdss_dpi_display_disable(dssdev);
 }
 
 static int tfp410_probe(struct omap_dss_device *dssdev)
 {
-       struct tfp410_platform_data *pdata = get_pdata(dssdev);
        struct panel_drv_data *ddata;
        int r;
+       int i2c_bus_num;
 
-       ddata = kzalloc(sizeof(*ddata), GFP_KERNEL);
+       ddata = devm_kzalloc(&dssdev->dev, sizeof(*ddata), GFP_KERNEL);
        if (!ddata)
                return -ENOMEM;
 
@@ -104,10 +100,15 @@ static int tfp410_probe(struct omap_dss_device *dssdev)
        ddata->dssdev = dssdev;
        mutex_init(&ddata->lock);
 
-       if (pdata)
+       if (dssdev->data) {
+               struct tfp410_platform_data *pdata = dssdev->data;
+
                ddata->pd_gpio = pdata->power_down_gpio;
-       else
+               i2c_bus_num = pdata->i2c_bus_num;
+       } else {
                ddata->pd_gpio = -1;
+               i2c_bus_num = -1;
+       }
 
        if (gpio_is_valid(ddata->pd_gpio)) {
                r = gpio_request_one(ddata->pd_gpio, GPIOF_OUT_INIT_LOW,
@@ -115,13 +116,31 @@ static int tfp410_probe(struct omap_dss_device *dssdev)
                if (r) {
                        dev_err(&dssdev->dev, "Failed to request PD GPIO %d\n",
                                        ddata->pd_gpio);
-                       ddata->pd_gpio = -1;
+                       return r;
                }
        }
 
+       if (i2c_bus_num != -1) {
+               struct i2c_adapter *adapter;
+
+               adapter = i2c_get_adapter(i2c_bus_num);
+               if (!adapter) {
+                       dev_err(&dssdev->dev, "Failed to get I2C adapter, bus %d\n",
+                                       i2c_bus_num);
+                       r = -EINVAL;
+                       goto err_i2c;
+               }
+
+               ddata->i2c_adapter = adapter;
+       }
+
        dev_set_drvdata(&dssdev->dev, ddata);
 
        return 0;
+err_i2c:
+       if (gpio_is_valid(ddata->pd_gpio))
+               gpio_free(ddata->pd_gpio);
+       return r;
 }
 
 static void __exit tfp410_remove(struct omap_dss_device *dssdev)
@@ -130,14 +149,15 @@ static void __exit tfp410_remove(struct omap_dss_device *dssdev)
 
        mutex_lock(&ddata->lock);
 
+       if (ddata->i2c_adapter)
+               i2c_put_adapter(ddata->i2c_adapter);
+
        if (gpio_is_valid(ddata->pd_gpio))
                gpio_free(ddata->pd_gpio);
 
        dev_set_drvdata(&dssdev->dev, NULL);
 
        mutex_unlock(&ddata->lock);
-
-       kfree(ddata);
 }
 
 static int tfp410_enable(struct omap_dss_device *dssdev)
@@ -269,27 +289,17 @@ static int tfp410_read_edid(struct omap_dss_device *dssdev,
                u8 *edid, int len)
 {
        struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
-       struct tfp410_platform_data *pdata = get_pdata(dssdev);
-       struct i2c_adapter *adapter;
        int r, l, bytes_read;
 
        mutex_lock(&ddata->lock);
 
-       if (pdata->i2c_bus_num == 0) {
+       if (!ddata->i2c_adapter) {
                r = -ENODEV;
                goto err;
        }
 
-       adapter = i2c_get_adapter(pdata->i2c_bus_num);
-       if (!adapter) {
-               dev_err(&dssdev->dev, "Failed to get I2C adapter, bus %d\n",
-                               pdata->i2c_bus_num);
-               r = -EINVAL;
-               goto err;
-       }
-
        l = min(EDID_LENGTH, len);
-       r = tfp410_ddc_read(adapter, edid, l, 0);
+       r = tfp410_ddc_read(ddata->i2c_adapter, edid, l, 0);
        if (r)
                goto err;
 
@@ -299,7 +309,7 @@ static int tfp410_read_edid(struct omap_dss_device *dssdev,
        if (len > EDID_LENGTH && edid[0x7e] > 0) {
                l = min(EDID_LENGTH, len - EDID_LENGTH);
 
-               r = tfp410_ddc_read(adapter, edid + EDID_LENGTH,
+               r = tfp410_ddc_read(ddata->i2c_adapter, edid + EDID_LENGTH,
                                l, EDID_LENGTH);
                if (r)
                        goto err;
@@ -319,21 +329,15 @@ err:
 static bool tfp410_detect(struct omap_dss_device *dssdev)
 {
        struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
-       struct tfp410_platform_data *pdata = get_pdata(dssdev);
-       struct i2c_adapter *adapter;
        unsigned char out;
        int r;
 
        mutex_lock(&ddata->lock);
 
-       if (pdata->i2c_bus_num == 0)
-               goto out;
-
-       adapter = i2c_get_adapter(pdata->i2c_bus_num);
-       if (!adapter)
+       if (!ddata->i2c_adapter)
                goto out;
 
-       r = tfp410_ddc_read(adapter, &out, 1, 0);
+       r = tfp410_ddc_read(ddata->i2c_adapter, &out, 1, 0);
 
        mutex_unlock(&ddata->lock);
 
index 32f3fcd7f0f0adc8361523035ce5435412578424..4b6448b3c31f224f0919cd49d345eee9f61f52ae 100644 (file)
@@ -272,13 +272,16 @@ static const struct omap_video_timings tpo_td043_timings = {
 static int tpo_td043_power_on(struct tpo_td043_device *tpo_td043)
 {
        int nreset_gpio = tpo_td043->nreset_gpio;
+       int r;
 
        if (tpo_td043->powered_on)
                return 0;
 
-       regulator_enable(tpo_td043->vcc_reg);
+       r = regulator_enable(tpo_td043->vcc_reg);
+       if (r != 0)
+               return r;
 
-       /* wait for regulator to stabilize */
+       /* wait for panel to stabilize */
        msleep(160);
 
        if (gpio_is_valid(nreset_gpio))
@@ -470,6 +473,18 @@ static void tpo_td043_remove(struct omap_dss_device *dssdev)
                gpio_free(nreset_gpio);
 }
 
+static void tpo_td043_set_timings(struct omap_dss_device *dssdev,
+               struct omap_video_timings *timings)
+{
+       dpi_set_timings(dssdev, timings);
+}
+
+static int tpo_td043_check_timings(struct omap_dss_device *dssdev,
+               struct omap_video_timings *timings)
+{
+       return dpi_check_timings(dssdev, timings);
+}
+
 static struct omap_dss_driver tpo_td043_driver = {
        .probe          = tpo_td043_probe,
        .remove         = tpo_td043_remove,
@@ -481,6 +496,9 @@ static struct omap_dss_driver tpo_td043_driver = {
        .set_mirror     = tpo_td043_set_hmirror,
        .get_mirror     = tpo_td043_get_hmirror,
 
+       .set_timings    = tpo_td043_set_timings,
+       .check_timings  = tpo_td043_check_timings,
+
        .driver         = {
                .name   = "tpo_td043mtea1_panel",
                .owner  = THIS_MODULE,
index 7be7c06a249ecd9cde3487a4862e1e1f9a2a2673..43324e5ed25fc2bad6712eff76397b92da97bbba 100644 (file)
@@ -68,6 +68,10 @@ config OMAP4_DSS_HDMI
          HDMI Interface. This adds the High Definition Multimedia Interface.
          See http://www.hdmi.org/ for HDMI specification.
 
+config OMAP4_DSS_HDMI_AUDIO
+       bool
+       depends on OMAP4_DSS_HDMI
+
 config OMAP2_DSS_SDI
        bool "SDI support"
        depends on ARCH_OMAP3
@@ -90,15 +94,6 @@ config OMAP2_DSS_DSI
 
          See http://www.mipi.org/ for DSI spesifications.
 
-config OMAP2_DSS_FAKE_VSYNC
-       bool "Fake VSYNC irq from manual update displays"
-       default n
-       help
-         If this is selected, DSI will generate a fake DISPC VSYNC interrupt
-         when DSI has sent a frame. This is only needed with DSI or RFBI
-         displays using manual mode, and you want VSYNC to, for example,
-         time animation.
-
 config OMAP2_DSS_MIN_FCK_PER_PCK
        int "Minimum FCK/PCK ratio (for scaling)"
        range 0 32
index b10b3bc1931e6ce0217172475155c3d07a4c7fe0..ab22cc224f3eb8259a7d42dd2c841687703ffab7 100644 (file)
@@ -99,6 +99,11 @@ struct mgr_priv_data {
 
        /* If true, a display is enabled using this manager */
        bool enabled;
+
+       bool extra_info_dirty;
+       bool shadow_extra_info_dirty;
+
+       struct omap_video_timings timings;
 };
 
 static struct {
@@ -176,7 +181,7 @@ static bool mgr_manual_update(struct omap_overlay_manager *mgr)
 }
 
 static int dss_check_settings_low(struct omap_overlay_manager *mgr,
-               struct omap_dss_device *dssdev, bool applying)
+               bool applying)
 {
        struct omap_overlay_info *oi;
        struct omap_overlay_manager_info *mi;
@@ -187,6 +192,9 @@ static int dss_check_settings_low(struct omap_overlay_manager *mgr,
 
        mp = get_mgr_priv(mgr);
 
+       if (!mp->enabled)
+               return 0;
+
        if (applying && mp->user_info_dirty)
                mi = &mp->user_info;
        else
@@ -206,26 +214,24 @@ static int dss_check_settings_low(struct omap_overlay_manager *mgr,
                ois[ovl->id] = oi;
        }
 
-       return dss_mgr_check(mgr, dssdev, mi, ois);
+       return dss_mgr_check(mgr, mi, &mp->timings, ois);
 }
 
 /*
  * check manager and overlay settings using overlay_info from data->info
  */
-static int dss_check_settings(struct omap_overlay_manager *mgr,
-               struct omap_dss_device *dssdev)
+static int dss_check_settings(struct omap_overlay_manager *mgr)
 {
-       return dss_check_settings_low(mgr, dssdev, false);
+       return dss_check_settings_low(mgr, false);
 }
 
 /*
  * check manager and overlay settings using overlay_info from ovl->info if
  * dirty and from data->info otherwise
  */
-static int dss_check_settings_apply(struct omap_overlay_manager *mgr,
-               struct omap_dss_device *dssdev)
+static int dss_check_settings_apply(struct omap_overlay_manager *mgr)
 {
-       return dss_check_settings_low(mgr, dssdev, true);
+       return dss_check_settings_low(mgr, true);
 }
 
 static bool need_isr(void)
@@ -261,6 +267,20 @@ static bool need_isr(void)
                        if (mp->shadow_info_dirty)
                                return true;
 
+                       /*
+                        * NOTE: we don't check extra_info flags for disabled
+                        * managers, once the manager is enabled, the extra_info
+                        * related manager changes will be taken in by HW.
+                        */
+
+                       /* to write new values to registers */
+                       if (mp->extra_info_dirty)
+                               return true;
+
+                       /* to set GO bit */
+                       if (mp->shadow_extra_info_dirty)
+                               return true;
+
                        list_for_each_entry(ovl, &mgr->overlays, list) {
                                struct ovl_priv_data *op;
 
@@ -305,7 +325,7 @@ static bool need_go(struct omap_overlay_manager *mgr)
 
        mp = get_mgr_priv(mgr);
 
-       if (mp->shadow_info_dirty)
+       if (mp->shadow_info_dirty || mp->shadow_extra_info_dirty)
                return true;
 
        list_for_each_entry(ovl, &mgr->overlays, list) {
@@ -320,20 +340,16 @@ static bool need_go(struct omap_overlay_manager *mgr)
 /* returns true if an extra_info field is currently being updated */
 static bool extra_info_update_ongoing(void)
 {
-       const int num_ovls = omap_dss_get_num_overlays();
-       struct ovl_priv_data *op;
-       struct omap_overlay *ovl;
-       struct mgr_priv_data *mp;
+       const int num_mgrs = dss_feat_get_num_mgrs();
        int i;
 
-       for (i = 0; i < num_ovls; ++i) {
-               ovl = omap_dss_get_overlay(i);
-               op = get_ovl_priv(ovl);
-
-               if (!ovl->manager)
-                       continue;
+       for (i = 0; i < num_mgrs; ++i) {
+               struct omap_overlay_manager *mgr;
+               struct omap_overlay *ovl;
+               struct mgr_priv_data *mp;
 
-               mp = get_mgr_priv(ovl->manager);
+               mgr = omap_dss_get_overlay_manager(i);
+               mp = get_mgr_priv(mgr);
 
                if (!mp->enabled)
                        continue;
@@ -341,8 +357,15 @@ static bool extra_info_update_ongoing(void)
                if (!mp->updating)
                        continue;
 
-               if (op->extra_info_dirty || op->shadow_extra_info_dirty)
+               if (mp->extra_info_dirty || mp->shadow_extra_info_dirty)
                        return true;
+
+               list_for_each_entry(ovl, &mgr->overlays, list) {
+                       struct ovl_priv_data *op = get_ovl_priv(ovl);
+
+                       if (op->extra_info_dirty || op->shadow_extra_info_dirty)
+                               return true;
+               }
        }
 
        return false;
@@ -525,11 +548,13 @@ static void dss_ovl_write_regs(struct omap_overlay *ovl)
 
        oi = &op->info;
 
+       mp = get_mgr_priv(ovl->manager);
+
        replication = dss_use_replication(ovl->manager->device, oi->color_mode);
 
        ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC;
 
-       r = dispc_ovl_setup(ovl->id, oi, ilace, replication);
+       r = dispc_ovl_setup(ovl->id, oi, ilace, replication, &mp->timings);
        if (r) {
                /*
                 * We can't do much here, as this function can be called from
@@ -543,8 +568,6 @@ static void dss_ovl_write_regs(struct omap_overlay *ovl)
                return;
        }
 
-       mp = get_mgr_priv(ovl->manager);
-
        op->info_dirty = false;
        if (mp->updating)
                op->shadow_info_dirty = true;
@@ -601,6 +624,22 @@ static void dss_mgr_write_regs(struct omap_overlay_manager *mgr)
        }
 }
 
+static void dss_mgr_write_regs_extra(struct omap_overlay_manager *mgr)
+{
+       struct mgr_priv_data *mp = get_mgr_priv(mgr);
+
+       DSSDBGF("%d", mgr->id);
+
+       if (!mp->extra_info_dirty)
+               return;
+
+       dispc_mgr_set_timings(mgr->id, &mp->timings);
+
+       mp->extra_info_dirty = false;
+       if (mp->updating)
+               mp->shadow_extra_info_dirty = true;
+}
+
 static void dss_write_regs_common(void)
 {
        const int num_mgrs = omap_dss_get_num_overlay_managers();
@@ -646,7 +685,7 @@ static void dss_write_regs(void)
                if (!mp->enabled || mgr_manual_update(mgr) || mp->busy)
                        continue;
 
-               r = dss_check_settings(mgr, mgr->device);
+               r = dss_check_settings(mgr);
                if (r) {
                        DSSERR("cannot write registers for manager %s: "
                                        "illegal configuration\n", mgr->name);
@@ -654,6 +693,7 @@ static void dss_write_regs(void)
                }
 
                dss_mgr_write_regs(mgr);
+               dss_mgr_write_regs_extra(mgr);
        }
 }
 
@@ -693,6 +733,7 @@ static void mgr_clear_shadow_dirty(struct omap_overlay_manager *mgr)
 
        mp = get_mgr_priv(mgr);
        mp->shadow_info_dirty = false;
+       mp->shadow_extra_info_dirty = false;
 
        list_for_each_entry(ovl, &mgr->overlays, list) {
                op = get_ovl_priv(ovl);
@@ -711,7 +752,7 @@ void dss_mgr_start_update(struct omap_overlay_manager *mgr)
 
        WARN_ON(mp->updating);
 
-       r = dss_check_settings(mgr, mgr->device);
+       r = dss_check_settings(mgr);
        if (r) {
                DSSERR("cannot start manual update: illegal configuration\n");
                spin_unlock_irqrestore(&data_lock, flags);
@@ -719,6 +760,7 @@ void dss_mgr_start_update(struct omap_overlay_manager *mgr)
        }
 
        dss_mgr_write_regs(mgr);
+       dss_mgr_write_regs_extra(mgr);
 
        dss_write_regs_common();
 
@@ -857,7 +899,7 @@ int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
 
        spin_lock_irqsave(&data_lock, flags);
 
-       r = dss_check_settings_apply(mgr, mgr->device);
+       r = dss_check_settings_apply(mgr);
        if (r) {
                spin_unlock_irqrestore(&data_lock, flags);
                DSSERR("failed to apply settings: illegal configuration.\n");
@@ -918,16 +960,13 @@ static void dss_ovl_setup_fifo(struct omap_overlay *ovl,
                bool use_fifo_merge)
 {
        struct ovl_priv_data *op = get_ovl_priv(ovl);
-       struct omap_dss_device *dssdev;
        u32 fifo_low, fifo_high;
 
        if (!op->enabled && !op->enabling)
                return;
 
-       dssdev = ovl->manager->device;
-
        dispc_ovl_compute_fifo_thresholds(ovl->id, &fifo_low, &fifo_high,
-                       use_fifo_merge);
+                       use_fifo_merge, ovl_manual_update(ovl));
 
        dss_apply_ovl_fifo_thresholds(ovl, fifo_low, fifo_high);
 }
@@ -1050,7 +1089,7 @@ int dss_mgr_enable(struct omap_overlay_manager *mgr)
 
        mp->enabled = true;
 
-       r = dss_check_settings(mgr, mgr->device);
+       r = dss_check_settings(mgr);
        if (r) {
                DSSERR("failed to enable manager %d: check_settings failed\n",
                                mgr->id);
@@ -1225,6 +1264,35 @@ err:
        return r;
 }
 
+static void dss_apply_mgr_timings(struct omap_overlay_manager *mgr,
+               struct omap_video_timings *timings)
+{
+       struct mgr_priv_data *mp = get_mgr_priv(mgr);
+
+       mp->timings = *timings;
+       mp->extra_info_dirty = true;
+}
+
+void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
+               struct omap_video_timings *timings)
+{
+       unsigned long flags;
+
+       mutex_lock(&apply_lock);
+
+       spin_lock_irqsave(&data_lock, flags);
+
+       dss_apply_mgr_timings(mgr, timings);
+
+       dss_write_regs();
+       dss_set_go_bits();
+
+       spin_unlock_irqrestore(&data_lock, flags);
+
+       wait_pending_extra_info_updates();
+
+       mutex_unlock(&apply_lock);
+}
 
 int dss_ovl_set_info(struct omap_overlay *ovl,
                struct omap_overlay_info *info)
@@ -1393,7 +1461,7 @@ int dss_ovl_enable(struct omap_overlay *ovl)
 
        op->enabling = true;
 
-       r = dss_check_settings(ovl->manager, ovl->manager->device);
+       r = dss_check_settings(ovl->manager);
        if (r) {
                DSSERR("failed to enable overlay %d: check_settings failed\n",
                                ovl->id);
index e8a120771ac6fc0cdb4fc537aca478ecde0d6ab2..72ded9cd2cb0fa0e0540eeb19721ae8a602d72ef 100644 (file)
@@ -43,6 +43,8 @@ static struct {
 
        struct regulator *vdds_dsi_reg;
        struct regulator *vdds_sdi_reg;
+
+       const char *default_display_name;
 } core;
 
 static char *def_disp_name;
@@ -54,9 +56,6 @@ bool dss_debug;
 module_param_named(debug, dss_debug, bool, 0644);
 #endif
 
-static int omap_dss_register_device(struct omap_dss_device *);
-static void omap_dss_unregister_device(struct omap_dss_device *);
-
 /* REGULATORS */
 
 struct regulator *dss_get_vdds_dsi(void)
@@ -87,6 +86,51 @@ struct regulator *dss_get_vdds_sdi(void)
        return reg;
 }
 
+int dss_get_ctx_loss_count(struct device *dev)
+{
+       struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
+       int cnt;
+
+       if (!board_data->get_context_loss_count)
+               return -ENOENT;
+
+       cnt = board_data->get_context_loss_count(dev);
+
+       WARN_ONCE(cnt < 0, "get_context_loss_count failed: %d\n", cnt);
+
+       return cnt;
+}
+
+int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask)
+{
+       struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
+
+       if (!board_data->dsi_enable_pads)
+               return -ENOENT;
+
+       return board_data->dsi_enable_pads(dsi_id, lane_mask);
+}
+
+void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask)
+{
+       struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
+
+       if (!board_data->dsi_enable_pads)
+               return;
+
+       return board_data->dsi_disable_pads(dsi_id, lane_mask);
+}
+
+int dss_set_min_bus_tput(struct device *dev, unsigned long tput)
+{
+       struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
+
+       if (pdata->set_min_bus_tput)
+               return pdata->set_min_bus_tput(dev, tput);
+       else
+               return 0;
+}
+
 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
 static int dss_debug_show(struct seq_file *s, void *unused)
 {
@@ -121,34 +165,6 @@ static int dss_initialize_debugfs(void)
        debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir,
                        &dss_debug_dump_clocks, &dss_debug_fops);
 
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
-       debugfs_create_file("dispc_irq", S_IRUGO, dss_debugfs_dir,
-                       &dispc_dump_irqs, &dss_debug_fops);
-#endif
-
-#if defined(CONFIG_OMAP2_DSS_DSI) && defined(CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS)
-       dsi_create_debugfs_files_irq(dss_debugfs_dir, &dss_debug_fops);
-#endif
-
-       debugfs_create_file("dss", S_IRUGO, dss_debugfs_dir,
-                       &dss_dump_regs, &dss_debug_fops);
-       debugfs_create_file("dispc", S_IRUGO, dss_debugfs_dir,
-                       &dispc_dump_regs, &dss_debug_fops);
-#ifdef CONFIG_OMAP2_DSS_RFBI
-       debugfs_create_file("rfbi", S_IRUGO, dss_debugfs_dir,
-                       &rfbi_dump_regs, &dss_debug_fops);
-#endif
-#ifdef CONFIG_OMAP2_DSS_DSI
-       dsi_create_debugfs_files_reg(dss_debugfs_dir, &dss_debug_fops);
-#endif
-#ifdef CONFIG_OMAP2_DSS_VENC
-       debugfs_create_file("venc", S_IRUGO, dss_debugfs_dir,
-                       &venc_dump_regs, &dss_debug_fops);
-#endif
-#ifdef CONFIG_OMAP4_DSS_HDMI
-       debugfs_create_file("hdmi", S_IRUGO, dss_debugfs_dir,
-                       &hdmi_dump_regs, &dss_debug_fops);
-#endif
        return 0;
 }
 
@@ -157,6 +173,19 @@ static void dss_uninitialize_debugfs(void)
        if (dss_debugfs_dir)
                debugfs_remove_recursive(dss_debugfs_dir);
 }
+
+int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
+{
+       struct dentry *d;
+
+       d = debugfs_create_file(name, S_IRUGO, dss_debugfs_dir,
+                       write, &dss_debug_fops);
+
+       if (IS_ERR(d))
+               return PTR_ERR(d);
+
+       return 0;
+}
 #else /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
 static inline int dss_initialize_debugfs(void)
 {
@@ -165,14 +194,18 @@ static inline int dss_initialize_debugfs(void)
 static inline void dss_uninitialize_debugfs(void)
 {
 }
+static inline int dss_debugfs_create_file(const char *name,
+               void (*write)(struct seq_file *))
+{
+       return 0;
+}
 #endif /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
 
 /* PLATFORM DEVICE */
-static int omap_dss_probe(struct platform_device *pdev)
+static int __init omap_dss_probe(struct platform_device *pdev)
 {
        struct omap_dss_board_info *pdata = pdev->dev.platform_data;
        int r;
-       int i;
 
        core.pdev = pdev;
 
@@ -187,28 +220,13 @@ static int omap_dss_probe(struct platform_device *pdev)
        if (r)
                goto err_debugfs;
 
-       for (i = 0; i < pdata->num_devices; ++i) {
-               struct omap_dss_device *dssdev = pdata->devices[i];
-
-               r = omap_dss_register_device(dssdev);
-               if (r) {
-                       DSSERR("device %d %s register failed %d\n", i,
-                               dssdev->name ?: "unnamed", r);
-
-                       while (--i >= 0)
-                               omap_dss_unregister_device(pdata->devices[i]);
-
-                       goto err_register;
-               }
-
-               if (def_disp_name && strcmp(def_disp_name, dssdev->name) == 0)
-                       pdata->default_device = dssdev;
-       }
+       if (def_disp_name)
+               core.default_display_name = def_disp_name;
+       else if (pdata->default_device)
+               core.default_display_name = pdata->default_device->name;
 
        return 0;
 
-err_register:
-       dss_uninitialize_debugfs();
 err_debugfs:
 
        return r;
@@ -216,17 +234,11 @@ err_debugfs:
 
 static int omap_dss_remove(struct platform_device *pdev)
 {
-       struct omap_dss_board_info *pdata = pdev->dev.platform_data;
-       int i;
-
        dss_uninitialize_debugfs();
 
        dss_uninit_overlays(pdev);
        dss_uninit_overlay_managers(pdev);
 
-       for (i = 0; i < pdata->num_devices; ++i)
-               omap_dss_unregister_device(pdata->devices[i]);
-
        return 0;
 }
 
@@ -251,7 +263,6 @@ static int omap_dss_resume(struct platform_device *pdev)
 }
 
 static struct platform_driver omap_dss_driver = {
-       .probe          = omap_dss_probe,
        .remove         = omap_dss_remove,
        .shutdown       = omap_dss_shutdown,
        .suspend        = omap_dss_suspend,
@@ -326,7 +337,6 @@ static int dss_driver_probe(struct device *dev)
        int r;
        struct omap_dss_driver *dssdrv = to_dss_driver(dev->driver);
        struct omap_dss_device *dssdev = to_dss_device(dev);
-       struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
        bool force;
 
        DSSDBG("driver_probe: dev %s/%s, drv %s\n",
@@ -335,7 +345,8 @@ static int dss_driver_probe(struct device *dev)
 
        dss_init_device(core.pdev, dssdev);
 
-       force = pdata->default_device == dssdev;
+       force = core.default_display_name &&
+               strcmp(core.default_display_name, dssdev->name) == 0;
        dss_recheck_connections(dssdev, force);
 
        r = dssdrv->probe(dssdev);
@@ -381,6 +392,8 @@ int omap_dss_register_driver(struct omap_dss_driver *dssdriver)
        if (dssdriver->get_recommended_bpp == NULL)
                dssdriver->get_recommended_bpp =
                        omapdss_default_get_recommended_bpp;
+       if (dssdriver->get_timings == NULL)
+               dssdriver->get_timings = omapdss_default_get_timings;
 
        return driver_register(&dssdriver->driver);
 }
@@ -427,27 +440,38 @@ static void omap_dss_dev_release(struct device *dev)
        reset_device(dev, 0);
 }
 
-static int omap_dss_register_device(struct omap_dss_device *dssdev)
+int omap_dss_register_device(struct omap_dss_device *dssdev,
+               struct device *parent, int disp_num)
 {
-       static int dev_num;
-
        WARN_ON(!dssdev->driver_name);
 
        reset_device(&dssdev->dev, 1);
        dssdev->dev.bus = &dss_bus_type;
-       dssdev->dev.parent = &dss_bus;
+       dssdev->dev.parent = parent;
        dssdev->dev.release = omap_dss_dev_release;
-       dev_set_name(&dssdev->dev, "display%d", dev_num++);
+       dev_set_name(&dssdev->dev, "display%d", disp_num);
        return device_register(&dssdev->dev);
 }
 
-static void omap_dss_unregister_device(struct omap_dss_device *dssdev)
+void omap_dss_unregister_device(struct omap_dss_device *dssdev)
 {
        device_unregister(&dssdev->dev);
 }
 
+static int dss_unregister_dss_dev(struct device *dev, void *data)
+{
+       struct omap_dss_device *dssdev = to_dss_device(dev);
+       omap_dss_unregister_device(dssdev);
+       return 0;
+}
+
+void omap_dss_unregister_child_devices(struct device *parent)
+{
+       device_for_each_child(parent, NULL, dss_unregister_dss_dev);
+}
+
 /* BUS */
-static int omap_dss_bus_register(void)
+static int __init omap_dss_bus_register(void)
 {
        int r;
 
@@ -469,12 +493,56 @@ static int omap_dss_bus_register(void)
 }
 
 /* INIT */
+static int (*dss_output_drv_reg_funcs[])(void) __initdata = {
+#ifdef CONFIG_OMAP2_DSS_DPI
+       dpi_init_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_SDI
+       sdi_init_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_RFBI
+       rfbi_init_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_VENC
+       venc_init_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_DSI
+       dsi_init_platform_driver,
+#endif
+#ifdef CONFIG_OMAP4_DSS_HDMI
+       hdmi_init_platform_driver,
+#endif
+};
+
+static void (*dss_output_drv_unreg_funcs[])(void) __exitdata = {
+#ifdef CONFIG_OMAP2_DSS_DPI
+       dpi_uninit_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_SDI
+       sdi_uninit_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_RFBI
+       rfbi_uninit_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_VENC
+       venc_uninit_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_DSI
+       dsi_uninit_platform_driver,
+#endif
+#ifdef CONFIG_OMAP4_DSS_HDMI
+       hdmi_uninit_platform_driver,
+#endif
+};
+
+static bool dss_output_drv_loaded[ARRAY_SIZE(dss_output_drv_reg_funcs)];
 
 static int __init omap_dss_register_drivers(void)
 {
        int r;
+       int i;
 
-       r = platform_driver_register(&omap_dss_driver);
+       r = platform_driver_probe(&omap_dss_driver, omap_dss_probe);
        if (r)
                return r;
 
@@ -490,40 +558,18 @@ static int __init omap_dss_register_drivers(void)
                goto err_dispc;
        }
 
-       r = rfbi_init_platform_driver();
-       if (r) {
-               DSSERR("Failed to initialize rfbi platform driver\n");
-               goto err_rfbi;
-       }
-
-       r = venc_init_platform_driver();
-       if (r) {
-               DSSERR("Failed to initialize venc platform driver\n");
-               goto err_venc;
-       }
-
-       r = dsi_init_platform_driver();
-       if (r) {
-               DSSERR("Failed to initialize DSI platform driver\n");
-               goto err_dsi;
-       }
-
-       r = hdmi_init_platform_driver();
-       if (r) {
-               DSSERR("Failed to initialize hdmi\n");
-               goto err_hdmi;
+       /*
+        * It's ok if the output-driver register fails. It happens, for example,
+        * when there is no output-device (e.g. SDI for OMAP4).
+        */
+       for (i = 0; i < ARRAY_SIZE(dss_output_drv_reg_funcs); ++i) {
+               r = dss_output_drv_reg_funcs[i]();
+               if (r == 0)
+                       dss_output_drv_loaded[i] = true;
        }
 
        return 0;
 
-err_hdmi:
-       dsi_uninit_platform_driver();
-err_dsi:
-       venc_uninit_platform_driver();
-err_venc:
-       rfbi_uninit_platform_driver();
-err_rfbi:
-       dispc_uninit_platform_driver();
 err_dispc:
        dss_uninit_platform_driver();
 err_dss:
@@ -534,10 +580,13 @@ err_dss:
 
 static void __exit omap_dss_unregister_drivers(void)
 {
-       hdmi_uninit_platform_driver();
-       dsi_uninit_platform_driver();
-       venc_uninit_platform_driver();
-       rfbi_uninit_platform_driver();
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(dss_output_drv_unreg_funcs); ++i) {
+               if (dss_output_drv_loaded[i])
+                       dss_output_drv_unreg_funcs[i]();
+       }
+
        dispc_uninit_platform_driver();
        dss_uninit_platform_driver();
 
index ee30937482e1156240de18ccaff3f889a1e4637b..4749ac356469ea8645b3e92c1d72512a102f8e36 100644 (file)
@@ -131,23 +131,6 @@ static inline u32 dispc_read_reg(const u16 idx)
        return __raw_readl(dispc.base + idx);
 }
 
-static int dispc_get_ctx_loss_count(void)
-{
-       struct device *dev = &dispc.pdev->dev;
-       struct omap_display_platform_data *pdata = dev->platform_data;
-       struct omap_dss_board_info *board_data = pdata->board_data;
-       int cnt;
-
-       if (!board_data->get_context_loss_count)
-               return -ENOENT;
-
-       cnt = board_data->get_context_loss_count(dev);
-
-       WARN_ONCE(cnt < 0, "get_context_loss_count failed: %d\n", cnt);
-
-       return cnt;
-}
-
 #define SR(reg) \
        dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg)
 #define RR(reg) \
@@ -251,7 +234,7 @@ static void dispc_save_context(void)
        if (dss_has_feature(FEAT_CORE_CLK_DIV))
                SR(DIVISOR);
 
-       dispc.ctx_loss_cnt = dispc_get_ctx_loss_count();
+       dispc.ctx_loss_cnt = dss_get_ctx_loss_count(&dispc.pdev->dev);
        dispc.ctx_valid = true;
 
        DSSDBG("context saved, ctx_loss_count %d\n", dispc.ctx_loss_cnt);
@@ -266,7 +249,7 @@ static void dispc_restore_context(void)
        if (!dispc.ctx_valid)
                return;
 
-       ctx = dispc_get_ctx_loss_count();
+       ctx = dss_get_ctx_loss_count(&dispc.pdev->dev);
 
        if (ctx >= 0 && ctx == dispc.ctx_loss_cnt)
                return;
@@ -413,14 +396,6 @@ static inline bool dispc_mgr_is_lcd(enum omap_channel channel)
                return false;
 }
 
-static struct omap_dss_device *dispc_mgr_get_device(enum omap_channel channel)
-{
-       struct omap_overlay_manager *mgr =
-               omap_dss_get_overlay_manager(channel);
-
-       return mgr ? mgr->device : NULL;
-}
-
 u32 dispc_mgr_get_vsync_irq(enum omap_channel channel)
 {
        switch (channel) {
@@ -432,6 +407,7 @@ u32 dispc_mgr_get_vsync_irq(enum omap_channel channel)
                return DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -446,6 +422,7 @@ u32 dispc_mgr_get_framedone_irq(enum omap_channel channel)
                return 0;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -764,7 +741,7 @@ static void dispc_ovl_set_color_mode(enum omap_plane plane,
                case OMAP_DSS_COLOR_XRGB16_1555:
                        m = 0xf; break;
                default:
-                       BUG(); break;
+                       BUG(); return;
                }
        } else {
                switch (color_mode) {
@@ -801,13 +778,25 @@ static void dispc_ovl_set_color_mode(enum omap_plane plane,
                case OMAP_DSS_COLOR_XRGB16_1555:
                        m = 0xf; break;
                default:
-                       BUG(); break;
+                       BUG(); return;
                }
        }
 
        REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1);
 }
 
+static void dispc_ovl_configure_burst_type(enum omap_plane plane,
+               enum omap_dss_rotation_type rotation_type)
+{
+       if (dss_has_feature(FEAT_BURST_2D) == 0)
+               return;
+
+       if (rotation_type == OMAP_DSS_ROT_TILER)
+               REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), 1, 29, 29);
+       else
+               REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), 0, 29, 29);
+}
+
 void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel)
 {
        int shift;
@@ -845,6 +834,7 @@ void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel)
                        break;
                default:
                        BUG();
+                       return;
                }
 
                val = FLD_MOD(val, chan, shift, shift);
@@ -872,6 +862,7 @@ static enum omap_channel dispc_ovl_get_channel_out(enum omap_plane plane)
                break;
        default:
                BUG();
+               return 0;
        }
 
        val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
@@ -983,20 +974,13 @@ static void dispc_ovl_enable_replication(enum omap_plane plane, bool enable)
        REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, shift, shift);
 }
 
-void dispc_mgr_set_lcd_size(enum omap_channel channel, u16 width, u16 height)
+static void dispc_mgr_set_size(enum omap_channel channel, u16 width,
+               u16 height)
 {
        u32 val;
-       BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
-       val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
-       dispc_write_reg(DISPC_SIZE_MGR(channel), val);
-}
 
-void dispc_set_digit_size(u16 width, u16 height)
-{
-       u32 val;
-       BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
        val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
-       dispc_write_reg(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT), val);
+       dispc_write_reg(DISPC_SIZE_MGR(channel), val);
 }
 
 static void dispc_read_plane_fifo_sizes(void)
@@ -1063,7 +1047,8 @@ void dispc_enable_fifomerge(bool enable)
 }
 
 void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
-               u32 *fifo_low, u32 *fifo_high, bool use_fifomerge)
+               u32 *fifo_low, u32 *fifo_high, bool use_fifomerge,
+               bool manual_update)
 {
        /*
         * All sizes are in bytes. Both the buffer and burst are made of
@@ -1091,7 +1076,7 @@ void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
         * combined fifo size
         */
 
-       if (dss_has_feature(FEAT_OMAP3_DSI_FIFO_BUG)) {
+       if (manual_update && dss_has_feature(FEAT_OMAP3_DSI_FIFO_BUG)) {
                *fifo_low = ovl_fifo_size - burst_size * 2;
                *fifo_high = total_fifo_size - burst_size;
        } else {
@@ -1185,6 +1170,94 @@ static void dispc_ovl_set_scale_param(enum omap_plane plane,
        dispc_ovl_set_fir(plane, fir_hinc, fir_vinc, color_comp);
 }
 
+static void dispc_ovl_set_accu_uv(enum omap_plane plane,
+               u16 orig_width, u16 orig_height, u16 out_width, u16 out_height,
+               bool ilace, enum omap_color_mode color_mode, u8 rotation)
+{
+       int h_accu2_0, h_accu2_1;
+       int v_accu2_0, v_accu2_1;
+       int chroma_hinc, chroma_vinc;
+       int idx;
+
+       struct accu {
+               s8 h0_m, h0_n;
+               s8 h1_m, h1_n;
+               s8 v0_m, v0_n;
+               s8 v1_m, v1_n;
+       };
+
+       const struct accu *accu_table;
+       const struct accu *accu_val;
+
+       static const struct accu accu_nv12[4] = {
+               {  0, 1,  0, 1 , -1, 2, 0, 1 },
+               {  1, 2, -3, 4 ,  0, 1, 0, 1 },
+               { -1, 1,  0, 1 , -1, 2, 0, 1 },
+               { -1, 2, -1, 2 , -1, 1, 0, 1 },
+       };
+
+       static const struct accu accu_nv12_ilace[4] = {
+               {  0, 1,  0, 1 , -3, 4, -1, 4 },
+               { -1, 4, -3, 4 ,  0, 1,  0, 1 },
+               { -1, 1,  0, 1 , -1, 4, -3, 4 },
+               { -3, 4, -3, 4 , -1, 1,  0, 1 },
+       };
+
+       static const struct accu accu_yuv[4] = {
+               {  0, 1, 0, 1,  0, 1, 0, 1 },
+               {  0, 1, 0, 1,  0, 1, 0, 1 },
+               { -1, 1, 0, 1,  0, 1, 0, 1 },
+               {  0, 1, 0, 1, -1, 1, 0, 1 },
+       };
+
+       switch (rotation) {
+       case OMAP_DSS_ROT_0:
+               idx = 0;
+               break;
+       case OMAP_DSS_ROT_90:
+               idx = 1;
+               break;
+       case OMAP_DSS_ROT_180:
+               idx = 2;
+               break;
+       case OMAP_DSS_ROT_270:
+               idx = 3;
+               break;
+       default:
+               BUG();
+               return;
+       }
+
+       switch (color_mode) {
+       case OMAP_DSS_COLOR_NV12:
+               if (ilace)
+                       accu_table = accu_nv12_ilace;
+               else
+                       accu_table = accu_nv12;
+               break;
+       case OMAP_DSS_COLOR_YUV2:
+       case OMAP_DSS_COLOR_UYVY:
+               accu_table = accu_yuv;
+               break;
+       default:
+               BUG();
+               return;
+       }
+
+       accu_val = &accu_table[idx];
+
+       chroma_hinc = 1024 * orig_width / out_width;
+       chroma_vinc = 1024 * orig_height / out_height;
+
+       h_accu2_0 = (accu_val->h0_m * chroma_hinc / accu_val->h0_n) % 1024;
+       h_accu2_1 = (accu_val->h1_m * chroma_hinc / accu_val->h1_n) % 1024;
+       v_accu2_0 = (accu_val->v0_m * chroma_vinc / accu_val->v0_n) % 1024;
+       v_accu2_1 = (accu_val->v1_m * chroma_vinc / accu_val->v1_n) % 1024;
+
+       dispc_ovl_set_vid_accu2_0(plane, h_accu2_0, v_accu2_0);
+       dispc_ovl_set_vid_accu2_1(plane, h_accu2_1, v_accu2_1);
+}
+
 static void dispc_ovl_set_scaling_common(enum omap_plane plane,
                u16 orig_width, u16 orig_height,
                u16 out_width, u16 out_height,
@@ -1258,6 +1331,10 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
                REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), 0, 8, 8);
                return;
        }
+
+       dispc_ovl_set_accu_uv(plane, orig_width, orig_height, out_width,
+                       out_height, ilace, color_mode, rotation);
+
        switch (color_mode) {
        case OMAP_DSS_COLOR_NV12:
                /* UV is subsampled by 2 vertically*/
@@ -1280,6 +1357,7 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
                break;
        default:
                BUG();
+               return;
        }
 
        if (out_width != orig_width)
@@ -1297,9 +1375,6 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
        REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_x ? 1 : 0, 5, 5);
        /* set V scaling */
        REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_y ? 1 : 0, 6, 6);
-
-       dispc_ovl_set_vid_accu2_0(plane, 0x80, 0);
-       dispc_ovl_set_vid_accu2_1(plane, 0x80, 0);
 }
 
 static void dispc_ovl_set_scaling(enum omap_plane plane,
@@ -1410,6 +1485,7 @@ static int color_mode_to_bpp(enum omap_color_mode color_mode)
                return 32;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -1423,6 +1499,7 @@ static s32 pixinc(int pixels, u8 ps)
                return 1 - (-pixels + 1) * ps;
        else
                BUG();
+               return 0;
 }
 
 static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
@@ -1431,7 +1508,7 @@ static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
                enum omap_color_mode color_mode, bool fieldmode,
                unsigned int field_offset,
                unsigned *offset0, unsigned *offset1,
-               s32 *row_inc, s32 *pix_inc)
+               s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim)
 {
        u8 ps;
 
@@ -1477,10 +1554,10 @@ static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
                else
                        *offset0 = 0;
 
-               *row_inc = pixinc(1 + (screen_width - width) +
-                               (fieldmode ? screen_width : 0),
-                               ps);
-               *pix_inc = pixinc(1, ps);
+               *row_inc = pixinc(1 +
+                       (y_predecim * screen_width - x_predecim * width) +
+                       (fieldmode ? screen_width : 0), ps);
+               *pix_inc = pixinc(x_predecim, ps);
                break;
 
        case OMAP_DSS_ROT_0 + 4:
@@ -1498,14 +1575,15 @@ static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
                        *offset0 = field_offset * screen_width * ps;
                else
                        *offset0 = 0;
-               *row_inc = pixinc(1 - (screen_width + width) -
-                               (fieldmode ? screen_width : 0),
-                               ps);
-               *pix_inc = pixinc(1, ps);
+               *row_inc = pixinc(1 -
+                       (y_predecim * screen_width + x_predecim * width) -
+                       (fieldmode ? screen_width : 0), ps);
+               *pix_inc = pixinc(x_predecim, ps);
                break;
 
        default:
                BUG();
+               return;
        }
 }
 
@@ -1515,7 +1593,7 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
                enum omap_color_mode color_mode, bool fieldmode,
                unsigned int field_offset,
                unsigned *offset0, unsigned *offset1,
-               s32 *row_inc, s32 *pix_inc)
+               s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim)
 {
        u8 ps;
        u16 fbw, fbh;
@@ -1557,10 +1635,14 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
                        *offset0 = *offset1 + field_offset * screen_width * ps;
                else
                        *offset0 = *offset1;
-               *row_inc = pixinc(1 + (screen_width - fbw) +
-                               (fieldmode ? screen_width : 0),
-                               ps);
-               *pix_inc = pixinc(1, ps);
+               *row_inc = pixinc(1 +
+                       (y_predecim * screen_width - fbw * x_predecim) +
+                       (fieldmode ? screen_width : 0), ps);
+               if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+                       color_mode == OMAP_DSS_COLOR_UYVY)
+                       *pix_inc = pixinc(x_predecim, 2 * ps);
+               else
+                       *pix_inc = pixinc(x_predecim, ps);
                break;
        case OMAP_DSS_ROT_90:
                *offset1 = screen_width * (fbh - 1) * ps;
@@ -1568,9 +1650,9 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
                        *offset0 = *offset1 + field_offset * ps;
                else
                        *offset0 = *offset1;
-               *row_inc = pixinc(screen_width * (fbh - 1) + 1 +
-                               (fieldmode ? 1 : 0), ps);
-               *pix_inc = pixinc(-screen_width, ps);
+               *row_inc = pixinc(screen_width * (fbh * x_predecim - 1) +
+                               y_predecim + (fieldmode ? 1 : 0), ps);
+               *pix_inc = pixinc(-x_predecim * screen_width, ps);
                break;
        case OMAP_DSS_ROT_180:
                *offset1 = (screen_width * (fbh - 1) + fbw - 1) * ps;
@@ -1579,10 +1661,13 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
                else
                        *offset0 = *offset1;
                *row_inc = pixinc(-1 -
-                               (screen_width - fbw) -
-                               (fieldmode ? screen_width : 0),
-                               ps);
-               *pix_inc = pixinc(-1, ps);
+                       (y_predecim * screen_width - fbw * x_predecim) -
+                       (fieldmode ? screen_width : 0), ps);
+               if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+                       color_mode == OMAP_DSS_COLOR_UYVY)
+                       *pix_inc = pixinc(-x_predecim, 2 * ps);
+               else
+                       *pix_inc = pixinc(-x_predecim, ps);
                break;
        case OMAP_DSS_ROT_270:
                *offset1 = (fbw - 1) * ps;
@@ -1590,9 +1675,9 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
                        *offset0 = *offset1 - field_offset * ps;
                else
                        *offset0 = *offset1;
-               *row_inc = pixinc(-screen_width * (fbh - 1) - 1 -
-                               (fieldmode ? 1 : 0), ps);
-               *pix_inc = pixinc(screen_width, ps);
+               *row_inc = pixinc(-screen_width * (fbh * x_predecim - 1) -
+                               y_predecim - (fieldmode ? 1 : 0), ps);
+               *pix_inc = pixinc(x_predecim * screen_width, ps);
                break;
 
        /* mirroring */
@@ -1602,10 +1687,14 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
                        *offset0 = *offset1 + field_offset * screen_width * ps;
                else
                        *offset0 = *offset1;
-               *row_inc = pixinc(screen_width * 2 - 1 +
+               *row_inc = pixinc(y_predecim * screen_width * 2 - 1 +
                                (fieldmode ? screen_width : 0),
                                ps);
-               *pix_inc = pixinc(-1, ps);
+               if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+                       color_mode == OMAP_DSS_COLOR_UYVY)
+                       *pix_inc = pixinc(-x_predecim, 2 * ps);
+               else
+                       *pix_inc = pixinc(-x_predecim, ps);
                break;
 
        case OMAP_DSS_ROT_90 + 4:
@@ -1614,10 +1703,10 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
                        *offset0 = *offset1 + field_offset * ps;
                else
                        *offset0 = *offset1;
-               *row_inc = pixinc(-screen_width * (fbh - 1) + 1 +
-                               (fieldmode ? 1 : 0),
+               *row_inc = pixinc(-screen_width * (fbh * x_predecim - 1) +
+                               y_predecim + (fieldmode ? 1 : 0),
                                ps);
-               *pix_inc = pixinc(screen_width, ps);
+               *pix_inc = pixinc(x_predecim * screen_width, ps);
                break;
 
        case OMAP_DSS_ROT_180 + 4:
@@ -1626,10 +1715,14 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
                        *offset0 = *offset1 - field_offset * screen_width * ps;
                else
                        *offset0 = *offset1;
-               *row_inc = pixinc(1 - screen_width * 2 -
+               *row_inc = pixinc(1 - y_predecim * screen_width * 2 -
                                (fieldmode ? screen_width : 0),
                                ps);
-               *pix_inc = pixinc(1, ps);
+               if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+                       color_mode == OMAP_DSS_COLOR_UYVY)
+                       *pix_inc = pixinc(x_predecim, 2 * ps);
+               else
+                       *pix_inc = pixinc(x_predecim, ps);
                break;
 
        case OMAP_DSS_ROT_270 + 4:
@@ -1638,34 +1731,130 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
                        *offset0 = *offset1 - field_offset * ps;
                else
                        *offset0 = *offset1;
-               *row_inc = pixinc(screen_width * (fbh - 1) - 1 -
-                               (fieldmode ? 1 : 0),
+               *row_inc = pixinc(screen_width * (fbh * x_predecim - 1) -
+                               y_predecim - (fieldmode ? 1 : 0),
                                ps);
-               *pix_inc = pixinc(-screen_width, ps);
+               *pix_inc = pixinc(-x_predecim * screen_width, ps);
                break;
 
        default:
                BUG();
+               return;
+       }
+}
+
+static void calc_tiler_rotation_offset(u16 screen_width, u16 width,
+               enum omap_color_mode color_mode, bool fieldmode,
+               unsigned int field_offset, unsigned *offset0, unsigned *offset1,
+               s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim)
+{
+       u8 ps;
+
+       switch (color_mode) {
+       case OMAP_DSS_COLOR_CLUT1:
+       case OMAP_DSS_COLOR_CLUT2:
+       case OMAP_DSS_COLOR_CLUT4:
+       case OMAP_DSS_COLOR_CLUT8:
+               BUG();
+               return;
+       default:
+               ps = color_mode_to_bpp(color_mode) / 8;
+               break;
        }
+
+       DSSDBG("scrw %d, width %d\n", screen_width, width);
+
+       /*
+        * field 0 = even field = bottom field
+        * field 1 = odd field = top field
+        */
+       *offset1 = 0;
+       if (field_offset)
+               *offset0 = *offset1 + field_offset * screen_width * ps;
+       else
+               *offset0 = *offset1;
+       *row_inc = pixinc(1 + (y_predecim * screen_width - width * x_predecim) +
+                       (fieldmode ? screen_width : 0), ps);
+       if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+               color_mode == OMAP_DSS_COLOR_UYVY)
+               *pix_inc = pixinc(x_predecim, 2 * ps);
+       else
+               *pix_inc = pixinc(x_predecim, ps);
 }
 
-static unsigned long calc_fclk_five_taps(enum omap_channel channel, u16 width,
+/*
+ * This function is used to avoid synclosts in OMAP3, because of some
+ * undocumented horizontal position and timing related limitations.
+ */
+static int check_horiz_timing_omap3(enum omap_channel channel,
+               const struct omap_video_timings *t, u16 pos_x,
+               u16 width, u16 height, u16 out_width, u16 out_height)
+{
+       int DS = DIV_ROUND_UP(height, out_height);
+       unsigned long nonactive, lclk, pclk;
+       static const u8 limits[3] = { 8, 10, 20 };
+       u64 val, blank;
+       int i;
+
+       nonactive = t->x_res + t->hfp + t->hsw + t->hbp - out_width;
+       pclk = dispc_mgr_pclk_rate(channel);
+       if (dispc_mgr_is_lcd(channel))
+               lclk = dispc_mgr_lclk_rate(channel);
+       else
+               lclk = dispc_fclk_rate();
+
+       i = 0;
+       if (out_height < height)
+               i++;
+       if (out_width < width)
+               i++;
+       blank = div_u64((u64)(t->hbp + t->hsw + t->hfp) * lclk, pclk);
+       DSSDBG("blanking period + ppl = %llu (limit = %u)\n", blank, limits[i]);
+       if (blank <= limits[i])
+               return -EINVAL;
+
+       /*
+        * Pixel data should be prepared before visible display point starts.
+        * So, atleast DS-2 lines must have already been fetched by DISPC
+        * during nonactive - pos_x period.
+        */
+       val = div_u64((u64)(nonactive - pos_x) * lclk, pclk);
+       DSSDBG("(nonactive - pos_x) * pcd = %llu max(0, DS - 2) * width = %d\n",
+               val, max(0, DS - 2) * width);
+       if (val < max(0, DS - 2) * width)
+               return -EINVAL;
+
+       /*
+        * All lines need to be refilled during the nonactive period of which
+        * only one line can be loaded during the active period. So, atleast
+        * DS - 1 lines should be loaded during nonactive period.
+        */
+       val =  div_u64((u64)nonactive * lclk, pclk);
+       DSSDBG("nonactive * pcd  = %llu, max(0, DS - 1) * width = %d\n",
+               val, max(0, DS - 1) * width);
+       if (val < max(0, DS - 1) * width)
+               return -EINVAL;
+
+       return 0;
+}
+
+static unsigned long calc_core_clk_five_taps(enum omap_channel channel,
+               const struct omap_video_timings *mgr_timings, u16 width,
                u16 height, u16 out_width, u16 out_height,
                enum omap_color_mode color_mode)
 {
-       u32 fclk = 0;
+       u32 core_clk = 0;
        u64 tmp, pclk = dispc_mgr_pclk_rate(channel);
 
        if (height <= out_height && width <= out_width)
                return (unsigned long) pclk;
 
        if (height > out_height) {
-               struct omap_dss_device *dssdev = dispc_mgr_get_device(channel);
-               unsigned int ppl = dssdev->panel.timings.x_res;
+               unsigned int ppl = mgr_timings->x_res;
 
                tmp = pclk * height * out_width;
                do_div(tmp, 2 * out_height * ppl);
-               fclk = tmp;
+               core_clk = tmp;
 
                if (height > 2 * out_height) {
                        if (ppl == out_width)
@@ -1673,23 +1862,23 @@ static unsigned long calc_fclk_five_taps(enum omap_channel channel, u16 width,
 
                        tmp = pclk * (height - 2 * out_height) * out_width;
                        do_div(tmp, 2 * out_height * (ppl - out_width));
-                       fclk = max(fclk, (u32) tmp);
+                       core_clk = max_t(u32, core_clk, tmp);
                }
        }
 
        if (width > out_width) {
                tmp = pclk * width;
                do_div(tmp, out_width);
-               fclk = max(fclk, (u32) tmp);
+               core_clk = max_t(u32, core_clk, tmp);
 
                if (color_mode == OMAP_DSS_COLOR_RGB24U)
-                       fclk <<= 1;
+                       core_clk <<= 1;
        }
 
-       return fclk;
+       return core_clk;
 }
 
-static unsigned long calc_fclk(enum omap_channel channel, u16 width,
+static unsigned long calc_core_clk(enum omap_channel channel, u16 width,
                u16 height, u16 out_width, u16 out_height)
 {
        unsigned int hf, vf;
@@ -1730,15 +1919,20 @@ static unsigned long calc_fclk(enum omap_channel channel, u16 width,
 }
 
 static int dispc_ovl_calc_scaling(enum omap_plane plane,
-               enum omap_channel channel, u16 width, u16 height,
-               u16 out_width, u16 out_height,
-               enum omap_color_mode color_mode, bool *five_taps)
+               enum omap_channel channel,
+               const struct omap_video_timings *mgr_timings,
+               u16 width, u16 height, u16 out_width, u16 out_height,
+               enum omap_color_mode color_mode, bool *five_taps,
+               int *x_predecim, int *y_predecim, u16 pos_x)
 {
        struct omap_overlay *ovl = omap_dss_get_overlay(plane);
        const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
        const int maxsinglelinewidth =
                                dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
-       unsigned long fclk = 0;
+       const int max_decim_limit = 16;
+       unsigned long core_clk = 0;
+       int decim_x, decim_y, error, min_factor;
+       u16 in_width, in_height, in_width_max = 0;
 
        if (width == out_width && height == out_height)
                return 0;
@@ -1746,64 +1940,154 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
        if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0)
                return -EINVAL;
 
-       if (out_width < width / maxdownscale ||
-                       out_width > width * 8)
+       *x_predecim = max_decim_limit;
+       *y_predecim = max_decim_limit;
+
+       if (color_mode == OMAP_DSS_COLOR_CLUT1 ||
+           color_mode == OMAP_DSS_COLOR_CLUT2 ||
+           color_mode == OMAP_DSS_COLOR_CLUT4 ||
+           color_mode == OMAP_DSS_COLOR_CLUT8) {
+               *x_predecim = 1;
+               *y_predecim = 1;
+               *five_taps = false;
+               return 0;
+       }
+
+       decim_x = DIV_ROUND_UP(DIV_ROUND_UP(width, out_width), maxdownscale);
+       decim_y = DIV_ROUND_UP(DIV_ROUND_UP(height, out_height), maxdownscale);
+
+       min_factor = min(decim_x, decim_y);
+
+       if (decim_x > *x_predecim || out_width > width * 8)
                return -EINVAL;
 
-       if (out_height < height / maxdownscale ||
-                       out_height > height * 8)
+       if (decim_y > *y_predecim || out_height > height * 8)
                return -EINVAL;
 
        if (cpu_is_omap24xx()) {
-               if (width > maxsinglelinewidth)
-                       DSSERR("Cannot scale max input width exceeded");
                *five_taps = false;
-               fclk = calc_fclk(channel, width, height, out_width,
-                                                               out_height);
+
+               do {
+                       in_height = DIV_ROUND_UP(height, decim_y);
+                       in_width = DIV_ROUND_UP(width, decim_x);
+                       core_clk = calc_core_clk(channel, in_width, in_height,
+                                       out_width, out_height);
+                       error = (in_width > maxsinglelinewidth || !core_clk ||
+                               core_clk > dispc_core_clk_rate());
+                       if (error) {
+                               if (decim_x == decim_y) {
+                                       decim_x = min_factor;
+                                       decim_y++;
+                               } else {
+                                       swap(decim_x, decim_y);
+                                       if (decim_x < decim_y)
+                                               decim_x++;
+                               }
+                       }
+               } while (decim_x <= *x_predecim && decim_y <= *y_predecim &&
+                               error);
+
+               if (in_width > maxsinglelinewidth) {
+                       DSSERR("Cannot scale max input width exceeded");
+                       return -EINVAL;
+               }
        } else if (cpu_is_omap34xx()) {
-               if (width > (maxsinglelinewidth * 2)) {
+
+               do {
+                       in_height = DIV_ROUND_UP(height, decim_y);
+                       in_width = DIV_ROUND_UP(width, decim_x);
+                       core_clk = calc_core_clk_five_taps(channel, mgr_timings,
+                               in_width, in_height, out_width, out_height,
+                               color_mode);
+
+                       error = check_horiz_timing_omap3(channel, mgr_timings,
+                               pos_x, in_width, in_height, out_width,
+                               out_height);
+
+                       if (in_width > maxsinglelinewidth)
+                               if (in_height > out_height &&
+                                       in_height < out_height * 2)
+                                       *five_taps = false;
+                       if (!*five_taps)
+                               core_clk = calc_core_clk(channel, in_width,
+                                       in_height, out_width, out_height);
+                       error = (error || in_width > maxsinglelinewidth * 2 ||
+                               (in_width > maxsinglelinewidth && *five_taps) ||
+                               !core_clk || core_clk > dispc_core_clk_rate());
+                       if (error) {
+                               if (decim_x == decim_y) {
+                                       decim_x = min_factor;
+                                       decim_y++;
+                               } else {
+                                       swap(decim_x, decim_y);
+                                       if (decim_x < decim_y)
+                                               decim_x++;
+                               }
+                       }
+               } while (decim_x <= *x_predecim && decim_y <= *y_predecim
+                       && error);
+
+               if (check_horiz_timing_omap3(channel, mgr_timings, pos_x, width,
+                       height, out_width, out_height)){
+                               DSSERR("horizontal timing too tight\n");
+                               return -EINVAL;
+               }
+
+               if (in_width > (maxsinglelinewidth * 2)) {
                        DSSERR("Cannot setup scaling");
                        DSSERR("width exceeds maximum width possible");
                        return -EINVAL;
                }
-               fclk = calc_fclk_five_taps(channel, width, height, out_width,
-                                               out_height, color_mode);
-               if (width > maxsinglelinewidth) {
-                       if (height > out_height && height < out_height * 2)
-                               *five_taps = false;
-                       else {
-                               DSSERR("cannot setup scaling with five taps");
-                               return -EINVAL;
-                       }
+
+               if (in_width > maxsinglelinewidth && *five_taps) {
+                       DSSERR("cannot setup scaling with five taps");
+                       return -EINVAL;
                }
-               if (!*five_taps)
-                       fclk = calc_fclk(channel, width, height, out_width,
-                                       out_height);
        } else {
-               if (width > maxsinglelinewidth) {
+               int decim_x_min = decim_x;
+               in_height = DIV_ROUND_UP(height, decim_y);
+               in_width_max = dispc_core_clk_rate() /
+                               DIV_ROUND_UP(dispc_mgr_pclk_rate(channel),
+                                               out_width);
+               decim_x = DIV_ROUND_UP(width, in_width_max);
+
+               decim_x = decim_x > decim_x_min ? decim_x : decim_x_min;
+               if (decim_x > *x_predecim)
+                       return -EINVAL;
+
+               do {
+                       in_width = DIV_ROUND_UP(width, decim_x);
+               } while (decim_x <= *x_predecim &&
+                               in_width > maxsinglelinewidth && decim_x++);
+
+               if (in_width > maxsinglelinewidth) {
                        DSSERR("Cannot scale width exceeds max line width");
                        return -EINVAL;
                }
-               fclk = calc_fclk(channel, width, height, out_width,
-                               out_height);
+
+               core_clk = calc_core_clk(channel, in_width, in_height,
+                               out_width, out_height);
        }
 
-       DSSDBG("required fclk rate = %lu Hz\n", fclk);
-       DSSDBG("current fclk rate = %lu Hz\n", dispc_fclk_rate());
+       DSSDBG("required core clk rate = %lu Hz\n", core_clk);
+       DSSDBG("current core clk rate = %lu Hz\n", dispc_core_clk_rate());
 
-       if (!fclk || fclk > dispc_fclk_rate()) {
+       if (!core_clk || core_clk > dispc_core_clk_rate()) {
                DSSERR("failed to set up scaling, "
-                       "required fclk rate = %lu Hz, "
-                       "current fclk rate = %lu Hz\n",
-                       fclk, dispc_fclk_rate());
+                       "required core clk rate = %lu Hz, "
+                       "current core clk rate = %lu Hz\n",
+                       core_clk, dispc_core_clk_rate());
                return -EINVAL;
        }
 
+       *x_predecim = decim_x;
+       *y_predecim = decim_y;
        return 0;
 }
 
 int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
-               bool ilace, bool replication)
+               bool ilace, bool replication,
+               const struct omap_video_timings *mgr_timings)
 {
        struct omap_overlay *ovl = omap_dss_get_overlay(plane);
        bool five_taps = true;
@@ -1814,8 +2098,11 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
        s32 pix_inc;
        u16 frame_height = oi->height;
        unsigned int field_offset = 0;
-       u16 outw, outh;
+       u16 in_height = oi->height;
+       u16 in_width = oi->width;
+       u16 out_width, out_height;
        enum omap_channel channel;
+       int x_predecim = 1, y_predecim = 1;
 
        channel = dispc_ovl_get_channel_out(plane);
 
@@ -1829,32 +2116,35 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
        if (oi->paddr == 0)
                return -EINVAL;
 
-       outw = oi->out_width == 0 ? oi->width : oi->out_width;
-       outh = oi->out_height == 0 ? oi->height : oi->out_height;
+       out_width = oi->out_width == 0 ? oi->width : oi->out_width;
+       out_height = oi->out_height == 0 ? oi->height : oi->out_height;
 
-       if (ilace && oi->height == outh)
+       if (ilace && oi->height == out_height)
                fieldmode = 1;
 
        if (ilace) {
                if (fieldmode)
-                       oi->height /= 2;
+                       in_height /= 2;
                oi->pos_y /= 2;
-               outh /= 2;
+               out_height /= 2;
 
                DSSDBG("adjusting for ilace: height %d, pos_y %d, "
                                "out_height %d\n",
-                               oi->height, oi->pos_y, outh);
+                               in_height, oi->pos_y, out_height);
        }
 
        if (!dss_feat_color_mode_supported(plane, oi->color_mode))
                return -EINVAL;
 
-       r = dispc_ovl_calc_scaling(plane, channel, oi->width, oi->height,
-                       outw, outh, oi->color_mode,
-                       &five_taps);
+       r = dispc_ovl_calc_scaling(plane, channel, mgr_timings, in_width,
+                       in_height, out_width, out_height, oi->color_mode,
+                       &five_taps, &x_predecim, &y_predecim, oi->pos_x);
        if (r)
                return r;
 
+       in_width = DIV_ROUND_UP(in_width, x_predecim);
+       in_height = DIV_ROUND_UP(in_height, y_predecim);
+
        if (oi->color_mode == OMAP_DSS_COLOR_YUV2 ||
                        oi->color_mode == OMAP_DSS_COLOR_UYVY ||
                        oi->color_mode == OMAP_DSS_COLOR_NV12)
@@ -1868,32 +2158,46 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
                 * so the integer part must be added to the base address of the
                 * bottom field.
                 */
-               if (!oi->height || oi->height == outh)
+               if (!in_height || in_height == out_height)
                        field_offset = 0;
                else
-                       field_offset = oi->height / outh / 2;
+                       field_offset = in_height / out_height / 2;
        }
 
        /* Fields are independent but interleaved in memory. */
        if (fieldmode)
                field_offset = 1;
 
-       if (oi->rotation_type == OMAP_DSS_ROT_DMA)
+       offset0 = 0;
+       offset1 = 0;
+       row_inc = 0;
+       pix_inc = 0;
+
+       if (oi->rotation_type == OMAP_DSS_ROT_TILER)
+               calc_tiler_rotation_offset(oi->screen_width, in_width,
+                               oi->color_mode, fieldmode, field_offset,
+                               &offset0, &offset1, &row_inc, &pix_inc,
+                               x_predecim, y_predecim);
+       else if (oi->rotation_type == OMAP_DSS_ROT_DMA)
                calc_dma_rotation_offset(oi->rotation, oi->mirror,
-                               oi->screen_width, oi->width, frame_height,
+                               oi->screen_width, in_width, frame_height,
                                oi->color_mode, fieldmode, field_offset,
-                               &offset0, &offset1, &row_inc, &pix_inc);
+                               &offset0, &offset1, &row_inc, &pix_inc,
+                               x_predecim, y_predecim);
        else
                calc_vrfb_rotation_offset(oi->rotation, oi->mirror,
-                               oi->screen_width, oi->width, frame_height,
+                               oi->screen_width, in_width, frame_height,
                                oi->color_mode, fieldmode, field_offset,
-                               &offset0, &offset1, &row_inc, &pix_inc);
+                               &offset0, &offset1, &row_inc, &pix_inc,
+                               x_predecim, y_predecim);
 
        DSSDBG("offset0 %u, offset1 %u, row_inc %d, pix_inc %d\n",
                        offset0, offset1, row_inc, pix_inc);
 
        dispc_ovl_set_color_mode(plane, oi->color_mode);
 
+       dispc_ovl_configure_burst_type(plane, oi->rotation_type);
+
        dispc_ovl_set_ba0(plane, oi->paddr + offset0);
        dispc_ovl_set_ba1(plane, oi->paddr + offset1);
 
@@ -1906,19 +2210,18 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
        dispc_ovl_set_row_inc(plane, row_inc);
        dispc_ovl_set_pix_inc(plane, pix_inc);
 
-       DSSDBG("%d,%d %dx%d -> %dx%d\n", oi->pos_x, oi->pos_y, oi->width,
-                       oi->height, outw, outh);
+       DSSDBG("%d,%d %dx%d -> %dx%d\n", oi->pos_x, oi->pos_y, in_width,
+                       in_height, out_width, out_height);
 
        dispc_ovl_set_pos(plane, oi->pos_x, oi->pos_y);
 
-       dispc_ovl_set_pic_size(plane, oi->width, oi->height);
+       dispc_ovl_set_pic_size(plane, in_width, in_height);
 
        if (ovl->caps & OMAP_DSS_OVL_CAP_SCALE) {
-               dispc_ovl_set_scaling(plane, oi->width, oi->height,
-                                  outw, outh,
-                                  ilace, five_taps, fieldmode,
+               dispc_ovl_set_scaling(plane, in_width, in_height, out_width,
+                                  out_height, ilace, five_taps, fieldmode,
                                   oi->color_mode, oi->rotation);
-               dispc_ovl_set_vid_size(plane, outw, outh);
+               dispc_ovl_set_vid_size(plane, out_width, out_height);
                dispc_ovl_set_vid_color_conv(plane, cconv);
        }
 
@@ -2087,8 +2390,10 @@ bool dispc_mgr_is_enabled(enum omap_channel channel)
                return !!REG_GET(DISPC_CONTROL, 1, 1);
        else if (channel == OMAP_DSS_CHANNEL_LCD2)
                return !!REG_GET(DISPC_CONTROL2, 0, 0);
-       else
+       else {
                BUG();
+               return false;
+       }
 }
 
 void dispc_mgr_enable(enum omap_channel channel, bool enable)
@@ -2285,6 +2590,12 @@ void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable)
                REG_FLD_MOD(DISPC_CONTROL, enable, 11, 11);
 }
 
+static bool _dispc_mgr_size_ok(u16 width, u16 height)
+{
+       return width <= dss_feat_get_param_max(FEAT_PARAM_MGR_WIDTH) &&
+               height <= dss_feat_get_param_max(FEAT_PARAM_MGR_HEIGHT);
+}
+
 static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
                int vsw, int vfp, int vbp)
 {
@@ -2309,11 +2620,20 @@ static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
        return true;
 }
 
-bool dispc_lcd_timings_ok(struct omap_video_timings *timings)
+bool dispc_mgr_timings_ok(enum omap_channel channel,
+               const struct omap_video_timings *timings)
 {
-       return _dispc_lcd_timings_ok(timings->hsw, timings->hfp,
-                       timings->hbp, timings->vsw,
-                       timings->vfp, timings->vbp);
+       bool timings_ok;
+
+       timings_ok = _dispc_mgr_size_ok(timings->x_res, timings->y_res);
+
+       if (dispc_mgr_is_lcd(channel))
+               timings_ok =  timings_ok && _dispc_lcd_timings_ok(timings->hsw,
+                                               timings->hfp, timings->hbp,
+                                               timings->vsw, timings->vfp,
+                                               timings->vbp);
+
+       return timings_ok;
 }
 
 static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
@@ -2340,37 +2660,45 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
 }
 
 /* change name to mode? */
-void dispc_mgr_set_lcd_timings(enum omap_channel channel,
+void dispc_mgr_set_timings(enum omap_channel channel,
                struct omap_video_timings *timings)
 {
        unsigned xtot, ytot;
        unsigned long ht, vt;
+       struct omap_video_timings t = *timings;
+
+       DSSDBG("channel %d xres %u yres %u\n", channel, t.x_res, t.y_res);
 
-       if (!_dispc_lcd_timings_ok(timings->hsw, timings->hfp,
-                               timings->hbp, timings->vsw,
-                               timings->vfp, timings->vbp))
+       if (!dispc_mgr_timings_ok(channel, &t)) {
                BUG();
+               return;
+       }
+
+       if (dispc_mgr_is_lcd(channel)) {
+               _dispc_mgr_set_lcd_timings(channel, t.hsw, t.hfp, t.hbp, t.vsw,
+                               t.vfp, t.vbp);
+
+               xtot = t.x_res + t.hfp + t.hsw + t.hbp;
+               ytot = t.y_res + t.vfp + t.vsw + t.vbp;
 
-       _dispc_mgr_set_lcd_timings(channel, timings->hsw, timings->hfp,
-                       timings->hbp, timings->vsw, timings->vfp,
-                       timings->vbp);
+               ht = (timings->pixel_clock * 1000) / xtot;
+               vt = (timings->pixel_clock * 1000) / xtot / ytot;
 
-       dispc_mgr_set_lcd_size(channel, timings->x_res, timings->y_res);
+               DSSDBG("pck %u\n", timings->pixel_clock);
+               DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
+                       t.hsw, t.hfp, t.hbp, t.vsw, t.vfp, t.vbp);
 
-       xtot = timings->x_res + timings->hfp + timings->hsw + timings->hbp;
-       ytot = timings->y_res + timings->vfp + timings->vsw + timings->vbp;
+               DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt);
+       } else {
+               enum dss_hdmi_venc_clk_source_select source;
 
-       ht = (timings->pixel_clock * 1000) / xtot;
-       vt = (timings->pixel_clock * 1000) / xtot / ytot;
+               source = dss_get_hdmi_venc_clk_source();
 
-       DSSDBG("channel %d xres %u yres %u\n", channel, timings->x_res,
-                       timings->y_res);
-       DSSDBG("pck %u\n", timings->pixel_clock);
-       DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
-                       timings->hsw, timings->hfp, timings->hbp,
-                       timings->vsw, timings->vfp, timings->vbp);
+               if (source == DSS_VENC_TV_CLK)
+                       t.y_res /= 2;
+       }
 
-       DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt);
+       dispc_mgr_set_size(channel, t.x_res, t.y_res);
 }
 
 static void dispc_mgr_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
@@ -2411,6 +2739,7 @@ unsigned long dispc_fclk_rate(void)
                break;
        default:
                BUG();
+               return 0;
        }
 
        return r;
@@ -2441,6 +2770,7 @@ unsigned long dispc_mgr_lclk_rate(enum omap_channel channel)
                break;
        default:
                BUG();
+               return 0;
        }
 
        return r / lcd;
@@ -2462,20 +2792,35 @@ unsigned long dispc_mgr_pclk_rate(enum omap_channel channel)
 
                return r / pcd;
        } else {
-               struct omap_dss_device *dssdev =
-                       dispc_mgr_get_device(channel);
+               enum dss_hdmi_venc_clk_source_select source;
 
-               switch (dssdev->type) {
-               case OMAP_DISPLAY_TYPE_VENC:
+               source = dss_get_hdmi_venc_clk_source();
+
+               switch (source) {
+               case DSS_VENC_TV_CLK:
                        return venc_get_pixel_clock();
-               case OMAP_DISPLAY_TYPE_HDMI:
+               case DSS_HDMI_M_PCLK:
                        return hdmi_get_pixel_clock();
                default:
                        BUG();
+                       return 0;
                }
        }
 }
 
+unsigned long dispc_core_clk_rate(void)
+{
+       int lcd;
+       unsigned long fclk = dispc_fclk_rate();
+
+       if (dss_has_feature(FEAT_CORE_CLK_DIV))
+               lcd = REG_GET(DISPC_DIVISOR, 23, 16);
+       else
+               lcd = REG_GET(DISPC_DIVISORo(OMAP_DSS_CHANNEL_LCD), 23, 16);
+
+       return fclk / lcd;
+}
+
 void dispc_dump_clocks(struct seq_file *s)
 {
        int lcd, pcd;
@@ -2588,7 +2933,7 @@ void dispc_dump_irqs(struct seq_file *s)
 }
 #endif
 
-void dispc_dump_regs(struct seq_file *s)
+static void dispc_dump_regs(struct seq_file *s)
 {
        int i, j;
        const char *mgr_names[] = {
@@ -3247,27 +3592,6 @@ int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
        return 0;
 }
 
-#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
-void dispc_fake_vsync_irq(void)
-{
-       u32 irqstatus = DISPC_IRQ_VSYNC;
-       int i;
-
-       WARN_ON(!in_interrupt());
-
-       for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
-               struct omap_dispc_isr_data *isr_data;
-               isr_data = &dispc.registered_isr[i];
-
-               if (!isr_data->isr)
-                       continue;
-
-               if (isr_data->mask & irqstatus)
-                       isr_data->isr(isr_data->arg, irqstatus);
-       }
-}
-#endif
-
 static void _omap_dispc_initialize_irq(void)
 {
        unsigned long flags;
@@ -3330,7 +3654,7 @@ static void _omap_dispc_initial_config(void)
 }
 
 /* DISPC HW IP initialisation */
-static int omap_dispchw_probe(struct platform_device *pdev)
+static int __init omap_dispchw_probe(struct platform_device *pdev)
 {
        u32 rev;
        int r = 0;
@@ -3399,6 +3723,11 @@ static int omap_dispchw_probe(struct platform_device *pdev)
 
        dispc_runtime_put();
 
+       dss_debugfs_create_file("dispc", dispc_dump_regs);
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+       dss_debugfs_create_file("dispc_irq", dispc_dump_irqs);
+#endif
        return 0;
 
 err_runtime_get:
@@ -3407,7 +3736,7 @@ err_runtime_get:
        return r;
 }
 
-static int omap_dispchw_remove(struct platform_device *pdev)
+static int __exit omap_dispchw_remove(struct platform_device *pdev)
 {
        pm_runtime_disable(&pdev->dev);
 
@@ -3419,19 +3748,12 @@ static int omap_dispchw_remove(struct platform_device *pdev)
 static int dispc_runtime_suspend(struct device *dev)
 {
        dispc_save_context();
-       dss_runtime_put();
 
        return 0;
 }
 
 static int dispc_runtime_resume(struct device *dev)
 {
-       int r;
-
-       r = dss_runtime_get();
-       if (r < 0)
-               return r;
-
        dispc_restore_context();
 
        return 0;
@@ -3443,8 +3765,7 @@ static const struct dev_pm_ops dispc_pm_ops = {
 };
 
 static struct platform_driver omap_dispchw_driver = {
-       .probe          = omap_dispchw_probe,
-       .remove         = omap_dispchw_remove,
+       .remove         = __exit_p(omap_dispchw_remove),
        .driver         = {
                .name   = "omapdss_dispc",
                .owner  = THIS_MODULE,
@@ -3452,12 +3773,12 @@ static struct platform_driver omap_dispchw_driver = {
        },
 };
 
-int dispc_init_platform_driver(void)
+int __init dispc_init_platform_driver(void)
 {
-       return platform_driver_register(&omap_dispchw_driver);
+       return platform_driver_probe(&omap_dispchw_driver, omap_dispchw_probe);
 }
 
-void dispc_uninit_platform_driver(void)
+void __exit dispc_uninit_platform_driver(void)
 {
-       return platform_driver_unregister(&omap_dispchw_driver);
+       platform_driver_unregister(&omap_dispchw_driver);
 }
index 5836bd1650f9a93c53430734361065c47fef4c26..f278080e1063f2a92b102b0157ab80396bfe095d 100644 (file)
@@ -120,6 +120,7 @@ static inline u16 DISPC_DEFAULT_COLOR(enum omap_channel channel)
                return 0x03AC;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -134,6 +135,7 @@ static inline u16 DISPC_TRANS_COLOR(enum omap_channel channel)
                return 0x03B0;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -144,10 +146,12 @@ static inline u16 DISPC_TIMING_H(enum omap_channel channel)
                return 0x0064;
        case OMAP_DSS_CHANNEL_DIGIT:
                BUG();
+               return 0;
        case OMAP_DSS_CHANNEL_LCD2:
                return 0x0400;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -158,10 +162,12 @@ static inline u16 DISPC_TIMING_V(enum omap_channel channel)
                return 0x0068;
        case OMAP_DSS_CHANNEL_DIGIT:
                BUG();
+               return 0;
        case OMAP_DSS_CHANNEL_LCD2:
                return 0x0404;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -172,10 +178,12 @@ static inline u16 DISPC_POL_FREQ(enum omap_channel channel)
                return 0x006C;
        case OMAP_DSS_CHANNEL_DIGIT:
                BUG();
+               return 0;
        case OMAP_DSS_CHANNEL_LCD2:
                return 0x0408;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -186,10 +194,12 @@ static inline u16 DISPC_DIVISORo(enum omap_channel channel)
                return 0x0070;
        case OMAP_DSS_CHANNEL_DIGIT:
                BUG();
+               return 0;
        case OMAP_DSS_CHANNEL_LCD2:
                return 0x040C;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -205,6 +215,7 @@ static inline u16 DISPC_SIZE_MGR(enum omap_channel channel)
                return 0x03CC;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -215,10 +226,12 @@ static inline u16 DISPC_DATA_CYCLE1(enum omap_channel channel)
                return 0x01D4;
        case OMAP_DSS_CHANNEL_DIGIT:
                BUG();
+               return 0;
        case OMAP_DSS_CHANNEL_LCD2:
                return 0x03C0;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -229,10 +242,12 @@ static inline u16 DISPC_DATA_CYCLE2(enum omap_channel channel)
                return 0x01D8;
        case OMAP_DSS_CHANNEL_DIGIT:
                BUG();
+               return 0;
        case OMAP_DSS_CHANNEL_LCD2:
                return 0x03C4;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -243,10 +258,12 @@ static inline u16 DISPC_DATA_CYCLE3(enum omap_channel channel)
                return 0x01DC;
        case OMAP_DSS_CHANNEL_DIGIT:
                BUG();
+               return 0;
        case OMAP_DSS_CHANNEL_LCD2:
                return 0x03C8;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -257,10 +274,12 @@ static inline u16 DISPC_CPR_COEF_R(enum omap_channel channel)
                return 0x0220;
        case OMAP_DSS_CHANNEL_DIGIT:
                BUG();
+               return 0;
        case OMAP_DSS_CHANNEL_LCD2:
                return 0x03BC;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -271,10 +290,12 @@ static inline u16 DISPC_CPR_COEF_G(enum omap_channel channel)
                return 0x0224;
        case OMAP_DSS_CHANNEL_DIGIT:
                BUG();
+               return 0;
        case OMAP_DSS_CHANNEL_LCD2:
                return 0x03B8;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -285,10 +306,12 @@ static inline u16 DISPC_CPR_COEF_B(enum omap_channel channel)
                return 0x0228;
        case OMAP_DSS_CHANNEL_DIGIT:
                BUG();
+               return 0;
        case OMAP_DSS_CHANNEL_LCD2:
                return 0x03B4;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -306,6 +329,7 @@ static inline u16 DISPC_OVL_BASE(enum omap_plane plane)
                return 0x0300;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -321,6 +345,7 @@ static inline u16 DISPC_BA0_OFFSET(enum omap_plane plane)
                return 0x0008;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -335,6 +360,7 @@ static inline u16 DISPC_BA1_OFFSET(enum omap_plane plane)
                return 0x000C;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -343,6 +369,7 @@ static inline u16 DISPC_BA0_UV_OFFSET(enum omap_plane plane)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
                return 0x0544;
        case OMAP_DSS_VIDEO2:
@@ -351,6 +378,7 @@ static inline u16 DISPC_BA0_UV_OFFSET(enum omap_plane plane)
                return 0x0310;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -359,6 +387,7 @@ static inline u16 DISPC_BA1_UV_OFFSET(enum omap_plane plane)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
                return 0x0548;
        case OMAP_DSS_VIDEO2:
@@ -367,6 +396,7 @@ static inline u16 DISPC_BA1_UV_OFFSET(enum omap_plane plane)
                return 0x0314;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -381,6 +411,7 @@ static inline u16 DISPC_POS_OFFSET(enum omap_plane plane)
                return 0x009C;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -395,6 +426,7 @@ static inline u16 DISPC_SIZE_OFFSET(enum omap_plane plane)
                return 0x00A8;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -410,6 +442,7 @@ static inline u16 DISPC_ATTR_OFFSET(enum omap_plane plane)
                return 0x0070;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -418,6 +451,7 @@ static inline u16 DISPC_ATTR2_OFFSET(enum omap_plane plane)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
                return 0x0568;
        case OMAP_DSS_VIDEO2:
@@ -426,6 +460,7 @@ static inline u16 DISPC_ATTR2_OFFSET(enum omap_plane plane)
                return 0x032C;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -441,6 +476,7 @@ static inline u16 DISPC_FIFO_THRESH_OFFSET(enum omap_plane plane)
                return 0x008C;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -456,6 +492,7 @@ static inline u16 DISPC_FIFO_SIZE_STATUS_OFFSET(enum omap_plane plane)
                return 0x0088;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -471,6 +508,7 @@ static inline u16 DISPC_ROW_INC_OFFSET(enum omap_plane plane)
                return 0x00A4;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -486,6 +524,7 @@ static inline u16 DISPC_PIX_INC_OFFSET(enum omap_plane plane)
                return 0x0098;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -498,8 +537,10 @@ static inline u16 DISPC_WINDOW_SKIP_OFFSET(enum omap_plane plane)
        case OMAP_DSS_VIDEO2:
        case OMAP_DSS_VIDEO3:
                BUG();
+               return 0;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -512,8 +553,10 @@ static inline u16 DISPC_TABLE_BA_OFFSET(enum omap_plane plane)
        case OMAP_DSS_VIDEO2:
        case OMAP_DSS_VIDEO3:
                BUG();
+               return 0;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -522,6 +565,7 @@ static inline u16 DISPC_FIR_OFFSET(enum omap_plane plane)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
        case OMAP_DSS_VIDEO2:
                return 0x0024;
@@ -529,6 +573,7 @@ static inline u16 DISPC_FIR_OFFSET(enum omap_plane plane)
                return 0x0090;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -537,6 +582,7 @@ static inline u16 DISPC_FIR2_OFFSET(enum omap_plane plane)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
                return 0x0580;
        case OMAP_DSS_VIDEO2:
@@ -545,6 +591,7 @@ static inline u16 DISPC_FIR2_OFFSET(enum omap_plane plane)
                return 0x0424;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -553,6 +600,7 @@ static inline u16 DISPC_PIC_SIZE_OFFSET(enum omap_plane plane)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
        case OMAP_DSS_VIDEO2:
                return 0x0028;
@@ -560,6 +608,7 @@ static inline u16 DISPC_PIC_SIZE_OFFSET(enum omap_plane plane)
                return 0x0094;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -569,6 +618,7 @@ static inline u16 DISPC_ACCU0_OFFSET(enum omap_plane plane)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
        case OMAP_DSS_VIDEO2:
                return 0x002C;
@@ -576,6 +626,7 @@ static inline u16 DISPC_ACCU0_OFFSET(enum omap_plane plane)
                return 0x0000;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -584,6 +635,7 @@ static inline u16 DISPC_ACCU2_0_OFFSET(enum omap_plane plane)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
                return 0x0584;
        case OMAP_DSS_VIDEO2:
@@ -592,6 +644,7 @@ static inline u16 DISPC_ACCU2_0_OFFSET(enum omap_plane plane)
                return 0x0428;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -600,6 +653,7 @@ static inline u16 DISPC_ACCU1_OFFSET(enum omap_plane plane)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
        case OMAP_DSS_VIDEO2:
                return 0x0030;
@@ -607,6 +661,7 @@ static inline u16 DISPC_ACCU1_OFFSET(enum omap_plane plane)
                return 0x0004;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -615,6 +670,7 @@ static inline u16 DISPC_ACCU2_1_OFFSET(enum omap_plane plane)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
                return 0x0588;
        case OMAP_DSS_VIDEO2:
@@ -623,6 +679,7 @@ static inline u16 DISPC_ACCU2_1_OFFSET(enum omap_plane plane)
                return 0x042C;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -632,6 +689,7 @@ static inline u16 DISPC_FIR_COEF_H_OFFSET(enum omap_plane plane, u16 i)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
        case OMAP_DSS_VIDEO2:
                return 0x0034 + i * 0x8;
@@ -639,6 +697,7 @@ static inline u16 DISPC_FIR_COEF_H_OFFSET(enum omap_plane plane, u16 i)
                return 0x0010 + i * 0x8;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -648,6 +707,7 @@ static inline u16 DISPC_FIR_COEF_H2_OFFSET(enum omap_plane plane, u16 i)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
                return 0x058C + i * 0x8;
        case OMAP_DSS_VIDEO2:
@@ -656,6 +716,7 @@ static inline u16 DISPC_FIR_COEF_H2_OFFSET(enum omap_plane plane, u16 i)
                return 0x0430 + i * 0x8;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -665,6 +726,7 @@ static inline u16 DISPC_FIR_COEF_HV_OFFSET(enum omap_plane plane, u16 i)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
        case OMAP_DSS_VIDEO2:
                return 0x0038 + i * 0x8;
@@ -672,6 +734,7 @@ static inline u16 DISPC_FIR_COEF_HV_OFFSET(enum omap_plane plane, u16 i)
                return 0x0014 + i * 0x8;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -681,6 +744,7 @@ static inline u16 DISPC_FIR_COEF_HV2_OFFSET(enum omap_plane plane, u16 i)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
                return 0x0590 + i * 8;
        case OMAP_DSS_VIDEO2:
@@ -689,6 +753,7 @@ static inline u16 DISPC_FIR_COEF_HV2_OFFSET(enum omap_plane plane, u16 i)
                return 0x0434 + i * 0x8;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -698,12 +763,14 @@ static inline u16 DISPC_CONV_COEF_OFFSET(enum omap_plane plane, u16 i)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
        case OMAP_DSS_VIDEO2:
        case OMAP_DSS_VIDEO3:
                return 0x0074 + i * 0x4;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -713,6 +780,7 @@ static inline u16 DISPC_FIR_COEF_V_OFFSET(enum omap_plane plane, u16 i)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
                return 0x0124 + i * 0x4;
        case OMAP_DSS_VIDEO2:
@@ -721,6 +789,7 @@ static inline u16 DISPC_FIR_COEF_V_OFFSET(enum omap_plane plane, u16 i)
                return 0x0050 + i * 0x4;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -730,6 +799,7 @@ static inline u16 DISPC_FIR_COEF_V2_OFFSET(enum omap_plane plane, u16 i)
        switch (plane) {
        case OMAP_DSS_GFX:
                BUG();
+               return 0;
        case OMAP_DSS_VIDEO1:
                return 0x05CC + i * 0x4;
        case OMAP_DSS_VIDEO2:
@@ -738,6 +808,7 @@ static inline u16 DISPC_FIR_COEF_V2_OFFSET(enum omap_plane plane, u16 i)
                return 0x0470 + i * 0x4;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -754,6 +825,7 @@ static inline u16 DISPC_PRELOAD_OFFSET(enum omap_plane plane)
                return 0x00A0;
        default:
                BUG();
+               return 0;
        }
 }
 #endif
index 4424c198dbcda6c3a34fa53189ecd28a5a446388..24901063037024f25bae5ee1ba44ec3fe074001a 100644 (file)
@@ -304,10 +304,18 @@ int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev)
                return 24;
        default:
                BUG();
+               return 0;
        }
 }
 EXPORT_SYMBOL(omapdss_default_get_recommended_bpp);
 
+void omapdss_default_get_timings(struct omap_dss_device *dssdev,
+               struct omap_video_timings *timings)
+{
+       *timings = dssdev->panel.timings;
+}
+EXPORT_SYMBOL(omapdss_default_get_timings);
+
 /* Checks if replication logic should be used. Only use for active matrix,
  * when overlay is in RGB12U or RGB16 mode, and LCD interface is
  * 18bpp or 24bpp */
@@ -340,6 +348,7 @@ bool dss_use_replication(struct omap_dss_device *dssdev,
                break;
        default:
                BUG();
+               return false;
        }
 
        return bpp > 16;
@@ -352,46 +361,6 @@ void dss_init_device(struct platform_device *pdev,
        int i;
        int r;
 
-       switch (dssdev->type) {
-#ifdef CONFIG_OMAP2_DSS_DPI
-       case OMAP_DISPLAY_TYPE_DPI:
-               r = dpi_init_display(dssdev);
-               break;
-#endif
-#ifdef CONFIG_OMAP2_DSS_RFBI
-       case OMAP_DISPLAY_TYPE_DBI:
-               r = rfbi_init_display(dssdev);
-               break;
-#endif
-#ifdef CONFIG_OMAP2_DSS_VENC
-       case OMAP_DISPLAY_TYPE_VENC:
-               r = venc_init_display(dssdev);
-               break;
-#endif
-#ifdef CONFIG_OMAP2_DSS_SDI
-       case OMAP_DISPLAY_TYPE_SDI:
-               r = sdi_init_display(dssdev);
-               break;
-#endif
-#ifdef CONFIG_OMAP2_DSS_DSI
-       case OMAP_DISPLAY_TYPE_DSI:
-               r = dsi_init_display(dssdev);
-               break;
-#endif
-       case OMAP_DISPLAY_TYPE_HDMI:
-               r = hdmi_init_display(dssdev);
-               break;
-       default:
-               DSSERR("Support for display '%s' not compiled in.\n",
-                               dssdev->name);
-               return;
-       }
-
-       if (r) {
-               DSSERR("failed to init display %s\n", dssdev->name);
-               return;
-       }
-
        /* create device sysfs files */
        i = 0;
        while ((attr = display_sysfs_attrs[i++]) != NULL) {
index faaf305fda279615a6748ff0cfbbe78e315049a2..8c2056c9537bd1162d08dd0c6d63a2437b926c45 100644 (file)
@@ -156,7 +156,7 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
                t->pixel_clock = pck;
        }
 
-       dispc_mgr_set_lcd_timings(dssdev->manager->id, t);
+       dss_mgr_set_timings(dssdev->manager, t);
 
        return 0;
 }
@@ -202,10 +202,6 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
                        goto err_reg_enable;
        }
 
-       r = dss_runtime_get();
-       if (r)
-               goto err_get_dss;
-
        r = dispc_runtime_get();
        if (r)
                goto err_get_dispc;
@@ -244,8 +240,6 @@ err_dsi_pll_init:
 err_get_dsi:
        dispc_runtime_put();
 err_get_dispc:
-       dss_runtime_put();
-err_get_dss:
        if (cpu_is_omap34xx())
                regulator_disable(dpi.vdds_dsi_reg);
 err_reg_enable:
@@ -266,7 +260,6 @@ void omapdss_dpi_display_disable(struct omap_dss_device *dssdev)
        }
 
        dispc_runtime_put();
-       dss_runtime_put();
 
        if (cpu_is_omap34xx())
                regulator_disable(dpi.vdds_dsi_reg);
@@ -283,21 +276,15 @@ void dpi_set_timings(struct omap_dss_device *dssdev,
        DSSDBG("dpi_set_timings\n");
        dssdev->panel.timings = *timings;
        if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
-               r = dss_runtime_get();
-               if (r)
-                       return;
-
                r = dispc_runtime_get();
-               if (r) {
-                       dss_runtime_put();
+               if (r)
                        return;
-               }
 
                dpi_set_mode(dssdev);
-               dispc_mgr_go(dssdev->manager->id);
 
                dispc_runtime_put();
-               dss_runtime_put();
+       } else {
+               dss_mgr_set_timings(dssdev->manager, timings);
        }
 }
 EXPORT_SYMBOL(dpi_set_timings);
@@ -312,7 +299,7 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
        unsigned long pck;
        struct dispc_clock_info dispc_cinfo;
 
-       if (!dispc_lcd_timings_ok(timings))
+       if (dss_mgr_check_timings(dssdev->manager, timings))
                return -EINVAL;
 
        if (timings->pixel_clock == 0)
@@ -352,7 +339,7 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
 }
 EXPORT_SYMBOL(dpi_check_timings);
 
-int dpi_init_display(struct omap_dss_device *dssdev)
+static int __init dpi_init_display(struct omap_dss_device *dssdev)
 {
        DSSDBG("init_display\n");
 
@@ -378,12 +365,58 @@ int dpi_init_display(struct omap_dss_device *dssdev)
        return 0;
 }
 
-int dpi_init(void)
+static void __init dpi_probe_pdata(struct platform_device *pdev)
 {
+       struct omap_dss_board_info *pdata = pdev->dev.platform_data;
+       int i, r;
+
+       for (i = 0; i < pdata->num_devices; ++i) {
+               struct omap_dss_device *dssdev = pdata->devices[i];
+
+               if (dssdev->type != OMAP_DISPLAY_TYPE_DPI)
+                       continue;
+
+               r = dpi_init_display(dssdev);
+               if (r) {
+                       DSSERR("device %s init failed: %d\n", dssdev->name, r);
+                       continue;
+               }
+
+               r = omap_dss_register_device(dssdev, &pdev->dev, i);
+               if (r)
+                       DSSERR("device %s register failed: %d\n",
+                                       dssdev->name, r);
+       }
+}
+
+static int __init omap_dpi_probe(struct platform_device *pdev)
+{
+       dpi_probe_pdata(pdev);
+
+       return 0;
+}
+
+static int __exit omap_dpi_remove(struct platform_device *pdev)
+{
+       omap_dss_unregister_child_devices(&pdev->dev);
+
        return 0;
 }
 
-void dpi_exit(void)
+static struct platform_driver omap_dpi_driver = {
+       .remove         = __exit_p(omap_dpi_remove),
+       .driver         = {
+               .name   = "omapdss_dpi",
+               .owner  = THIS_MODULE,
+       },
+};
+
+int __init dpi_init_platform_driver(void)
 {
+       return platform_driver_probe(&omap_dpi_driver, omap_dpi_probe);
 }
 
+void __exit dpi_uninit_platform_driver(void)
+{
+       platform_driver_unregister(&omap_dpi_driver);
+}
index 210a3c4f615012662769010e37d6cd54e28e61e3..ec363d8390edd53873647fe96dff416f8d9d06f7 100644 (file)
@@ -256,14 +256,13 @@ struct dsi_data {
        struct platform_device *pdev;
        void __iomem    *base;
 
+       int module_id;
+
        int irq;
 
        struct clk *dss_clk;
        struct clk *sys_clk;
 
-       int (*enable_pads)(int dsi_id, unsigned lane_mask);
-       void (*disable_pads)(int dsi_id, unsigned lane_mask);
-
        struct dsi_clock_info current_cinfo;
 
        bool vdds_dsi_enabled;
@@ -361,11 +360,6 @@ struct platform_device *dsi_get_dsidev_from_id(int module)
        return dsi_pdev_map[module];
 }
 
-static inline int dsi_get_dsidev_id(struct platform_device *dsidev)
-{
-       return dsidev->id;
-}
-
 static inline void dsi_write_reg(struct platform_device *dsidev,
                const struct dsi_reg idx, u32 val)
 {
@@ -452,6 +446,7 @@ u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt)
                return 16;
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -1184,10 +1179,9 @@ static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev)
 static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
 {
        unsigned long r;
-       int dsi_module = dsi_get_dsidev_id(dsidev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 
-       if (dss_get_dsi_clk_source(dsi_module) == OMAP_DSS_CLK_SRC_FCK) {
+       if (dss_get_dsi_clk_source(dsi->module_id) == OMAP_DSS_CLK_SRC_FCK) {
                /* DSI FCLK source is DSS_CLK_FCK */
                r = clk_get_rate(dsi->dss_clk);
        } else {
@@ -1279,10 +1273,9 @@ static int dsi_pll_power(struct platform_device *dsidev,
 }
 
 /* calculate clock rates using dividers in cinfo */
-static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
+static int dsi_calc_clock_rates(struct platform_device *dsidev,
                struct dsi_clock_info *cinfo)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 
        if (cinfo->regn == 0 || cinfo->regn > dsi->regn_max)
@@ -1297,21 +1290,8 @@ static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
        if (cinfo->regm_dsi > dsi->regm_dsi_max)
                return -EINVAL;
 
-       if (cinfo->use_sys_clk) {
-               cinfo->clkin = clk_get_rate(dsi->sys_clk);
-               /* XXX it is unclear if highfreq should be used
-                * with DSS_SYS_CLK source also */
-               cinfo->highfreq = 0;
-       } else {
-               cinfo->clkin = dispc_mgr_pclk_rate(dssdev->manager->id);
-
-               if (cinfo->clkin < 32000000)
-                       cinfo->highfreq = 0;
-               else
-                       cinfo->highfreq = 1;
-       }
-
-       cinfo->fint = cinfo->clkin / (cinfo->regn * (cinfo->highfreq ? 2 : 1));
+       cinfo->clkin = clk_get_rate(dsi->sys_clk);
+       cinfo->fint = cinfo->clkin / cinfo->regn;
 
        if (cinfo->fint > dsi->fint_max || cinfo->fint < dsi->fint_min)
                return -EINVAL;
@@ -1378,27 +1358,21 @@ retry:
 
        memset(&cur, 0, sizeof(cur));
        cur.clkin = dss_sys_clk;
-       cur.use_sys_clk = 1;
-       cur.highfreq = 0;
 
-       /* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */
-       /* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */
+       /* 0.75MHz < Fint = clkin / regn < 2.1MHz */
        /* To reduce PLL lock time, keep Fint high (around 2 MHz) */
        for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) {
-               if (cur.highfreq == 0)
-                       cur.fint = cur.clkin / cur.regn;
-               else
-                       cur.fint = cur.clkin / (2 * cur.regn);
+               cur.fint = cur.clkin / cur.regn;
 
                if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min)
                        continue;
 
-               /* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */
+               /* DSIPHY(MHz) = (2 * regm / regn) * clkin */
                for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) {
                        unsigned long a, b;
 
                        a = 2 * cur.regm * (cur.clkin/1000);
-                       b = cur.regn * (cur.highfreq + 1);
+                       b = cur.regn;
                        cur.clkin4ddr = a / b * 1000;
 
                        if (cur.clkin4ddr > 1800 * 1000 * 1000)
@@ -1486,9 +1460,7 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev,
 
        DSSDBGF();
 
-       dsi->current_cinfo.use_sys_clk = cinfo->use_sys_clk;
-       dsi->current_cinfo.highfreq = cinfo->highfreq;
-
+       dsi->current_cinfo.clkin = cinfo->clkin;
        dsi->current_cinfo.fint = cinfo->fint;
        dsi->current_cinfo.clkin4ddr = cinfo->clkin4ddr;
        dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk =
@@ -1503,17 +1475,13 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev,
 
        DSSDBG("DSI Fint %ld\n", cinfo->fint);
 
-       DSSDBG("clkin (%s) rate %ld, highfreq %d\n",
-                       cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree",
-                       cinfo->clkin,
-                       cinfo->highfreq);
+       DSSDBG("clkin rate %ld\n", cinfo->clkin);
 
        /* DSIPHY == CLKIN4DDR */
-       DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu / %d = %lu\n",
+       DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu = %lu\n",
                        cinfo->regm,
                        cinfo->regn,
                        cinfo->clkin,
-                       cinfo->highfreq + 1,
                        cinfo->clkin4ddr);
 
        DSSDBG("Data rate on 1 DSI lane %ld Mbps\n",
@@ -1568,10 +1536,6 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev,
 
        if (dss_has_feature(FEAT_DSI_PLL_FREQSEL))
                l = FLD_MOD(l, f, 4, 1);        /* DSI_PLL_FREQSEL */
-       l = FLD_MOD(l, cinfo->use_sys_clk ? 0 : 1,
-                       11, 11);                /* DSI_PLL_CLKSEL */
-       l = FLD_MOD(l, cinfo->highfreq,
-                       12, 12);                /* DSI_PLL_HIGHFREQ */
        l = FLD_MOD(l, 1, 13, 13);              /* DSI_PLL_REFEN */
        l = FLD_MOD(l, 0, 14, 14);              /* DSIPHY_CLKINEN */
        l = FLD_MOD(l, 1, 20, 20);              /* DSI_HSDIVBYPASS */
@@ -1716,7 +1680,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
        struct dsi_clock_info *cinfo = &dsi->current_cinfo;
        enum omap_dss_clk_source dispc_clk_src, dsi_clk_src;
-       int dsi_module = dsi_get_dsidev_id(dsidev);
+       int dsi_module = dsi->module_id;
 
        dispc_clk_src = dss_get_dispc_clk_source();
        dsi_clk_src = dss_get_dsi_clk_source(dsi_module);
@@ -1726,8 +1690,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
 
        seq_printf(s,   "- DSI%d PLL -\n", dsi_module + 1);
 
-       seq_printf(s,   "dsi pll source = %s\n",
-                       cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree");
+       seq_printf(s,   "dsi pll clkin\t%lu\n", cinfo->clkin);
 
        seq_printf(s,   "Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn);
 
@@ -1789,7 +1752,6 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
        unsigned long flags;
        struct dsi_irq_stats stats;
-       int dsi_module = dsi_get_dsidev_id(dsidev);
 
        spin_lock_irqsave(&dsi->irq_stats_lock, flags);
 
@@ -1806,7 +1768,7 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
 #define PIS(x) \
        seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
 
-       seq_printf(s, "-- DSI%d interrupts --\n", dsi_module + 1);
+       seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1);
        PIS(VC0);
        PIS(VC1);
        PIS(VC2);
@@ -1886,22 +1848,6 @@ static void dsi2_dump_irqs(struct seq_file *s)
 
        dsi_dump_dsidev_irqs(dsidev, s);
 }
-
-void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir,
-               const struct file_operations *debug_fops)
-{
-       struct platform_device *dsidev;
-
-       dsidev = dsi_get_dsidev_from_id(0);
-       if (dsidev)
-               debugfs_create_file("dsi1_irqs", S_IRUGO, debugfs_dir,
-                       &dsi1_dump_irqs, debug_fops);
-
-       dsidev = dsi_get_dsidev_from_id(1);
-       if (dsidev)
-               debugfs_create_file("dsi2_irqs", S_IRUGO, debugfs_dir,
-                       &dsi2_dump_irqs, debug_fops);
-}
 #endif
 
 static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
@@ -2002,21 +1948,6 @@ static void dsi2_dump_regs(struct seq_file *s)
        dsi_dump_dsidev_regs(dsidev, s);
 }
 
-void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir,
-               const struct file_operations *debug_fops)
-{
-       struct platform_device *dsidev;
-
-       dsidev = dsi_get_dsidev_from_id(0);
-       if (dsidev)
-               debugfs_create_file("dsi1_regs", S_IRUGO, debugfs_dir,
-                       &dsi1_dump_regs, debug_fops);
-
-       dsidev = dsi_get_dsidev_from_id(1);
-       if (dsidev)
-               debugfs_create_file("dsi2_regs", S_IRUGO, debugfs_dir,
-                       &dsi2_dump_regs, debug_fops);
-}
 enum dsi_cio_power_state {
        DSI_COMPLEXIO_POWER_OFF         = 0x0,
        DSI_COMPLEXIO_POWER_ON          = 0x1,
@@ -2073,6 +2004,7 @@ static unsigned dsi_get_line_buf_size(struct platform_device *dsidev)
                return 1365 * 3;        /* 1365x24 bits */
        default:
                BUG();
+               return 0;
        }
 }
 
@@ -2337,7 +2269,7 @@ static int dsi_cio_init(struct omap_dss_device *dssdev)
 
        DSSDBGF();
 
-       r = dsi->enable_pads(dsidev->id, dsi_get_lane_mask(dssdev));
+       r = dss_dsi_enable_pads(dsi->module_id, dsi_get_lane_mask(dssdev));
        if (r)
                return r;
 
@@ -2447,7 +2379,7 @@ err_cio_pwr:
                dsi_cio_disable_lane_override(dsidev);
 err_scp_clk_dom:
        dsi_disable_scp_clk(dsidev);
-       dsi->disable_pads(dsidev->id, dsi_get_lane_mask(dssdev));
+       dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dssdev));
        return r;
 }
 
@@ -2461,7 +2393,7 @@ static void dsi_cio_uninit(struct omap_dss_device *dssdev)
 
        dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
        dsi_disable_scp_clk(dsidev);
-       dsi->disable_pads(dsidev->id, dsi_get_lane_mask(dssdev));
+       dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dssdev));
 }
 
 static void dsi_config_tx_fifo(struct platform_device *dsidev,
@@ -2485,6 +2417,7 @@ static void dsi_config_tx_fifo(struct platform_device *dsidev,
                if (add + size > 4) {
                        DSSERR("Illegal FIFO configuration\n");
                        BUG();
+                       return;
                }
 
                v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
@@ -2517,6 +2450,7 @@ static void dsi_config_rx_fifo(struct platform_device *dsidev,
                if (add + size > 4) {
                        DSSERR("Illegal FIFO configuration\n");
                        BUG();
+                       return;
                }
 
                v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
@@ -2658,6 +2592,7 @@ static int dsi_sync_vc(struct platform_device *dsidev, int channel)
                return dsi_sync_vc_l4(dsidev, channel);
        default:
                BUG();
+               return -EINVAL;
        }
 }
 
@@ -3226,6 +3161,7 @@ static int dsi_vc_generic_send_read_request(struct omap_dss_device *dssdev,
                data = reqdata[0] | (reqdata[1] << 8);
        } else {
                BUG();
+               return -EINVAL;
        }
 
        r = dsi_vc_send_short(dsidev, channel, data_type, data, 0);
@@ -3340,7 +3276,6 @@ static int dsi_vc_read_rx_fifo(struct platform_device *dsidev, int channel,
                goto err;
        }
 
-       BUG();
 err:
        DSSERR("dsi_vc_read_rx_fifo(ch %d type %s) failed\n", channel,
                type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : "DCS");
@@ -3735,6 +3670,186 @@ static void dsi_config_blanking_modes(struct omap_dss_device *dssdev)
        dsi_write_reg(dsidev, DSI_CTRL, r);
 }
 
+/*
+ * According to section 'HS Command Mode Interleaving' in OMAP TRM, Scenario 3
+ * results in maximum transition time for data and clock lanes to enter and
+ * exit HS mode. Hence, this is the scenario where the least amount of command
+ * mode data can be interleaved. We program the minimum amount of TXBYTECLKHS
+ * clock cycles that can be used to interleave command mode data in HS so that
+ * all scenarios are satisfied.
+ */
+static int dsi_compute_interleave_hs(int blank, bool ddr_alwon, int enter_hs,
+               int exit_hs, int exiths_clk, int ddr_pre, int ddr_post)
+{
+       int transition;
+
+       /*
+        * If DDR_CLK_ALWAYS_ON is set, we need to consider HS mode transition
+        * time of data lanes only, if it isn't set, we need to consider HS
+        * transition time of both data and clock lanes. HS transition time
+        * of Scenario 3 is considered.
+        */
+       if (ddr_alwon) {
+               transition = enter_hs + exit_hs + max(enter_hs, 2) + 1;
+       } else {
+               int trans1, trans2;
+               trans1 = ddr_pre + enter_hs + exit_hs + max(enter_hs, 2) + 1;
+               trans2 = ddr_pre + enter_hs + exiths_clk + ddr_post + ddr_pre +
+                               enter_hs + 1;
+               transition = max(trans1, trans2);
+       }
+
+       return blank > transition ? blank - transition : 0;
+}
+
+/*
+ * According to section 'LP Command Mode Interleaving' in OMAP TRM, Scenario 1
+ * results in maximum transition time for data lanes to enter and exit LP mode.
+ * Hence, this is the scenario where the least amount of command mode data can
+ * be interleaved. We program the minimum amount of bytes that can be
+ * interleaved in LP so that all scenarios are satisfied.
+ */
+static int dsi_compute_interleave_lp(int blank, int enter_hs, int exit_hs,
+               int lp_clk_div, int tdsi_fclk)
+{
+       int trans_lp;   /* time required for a LP transition, in TXBYTECLKHS */
+       int tlp_avail;  /* time left for interleaving commands, in CLKIN4DDR */
+       int ttxclkesc;  /* period of LP transmit escape clock, in CLKIN4DDR */
+       int thsbyte_clk = 16;   /* Period of TXBYTECLKHS clock, in CLKIN4DDR */
+       int lp_inter;   /* cmd mode data that can be interleaved, in bytes */
+
+       /* maximum LP transition time according to Scenario 1 */
+       trans_lp = exit_hs + max(enter_hs, 2) + 1;
+
+       /* CLKIN4DDR = 16 * TXBYTECLKHS */
+       tlp_avail = thsbyte_clk * (blank - trans_lp);
+
+       ttxclkesc = tdsi_fclk / lp_clk_div;
+
+       lp_inter = ((tlp_avail - 8 * thsbyte_clk - 5 * tdsi_fclk) / ttxclkesc -
+                       26) / 16;
+
+       return max(lp_inter, 0);
+}
+
+static void dsi_config_cmd_mode_interleaving(struct omap_dss_device *dssdev)
+{
+       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+       int blanking_mode;
+       int hfp_blanking_mode, hbp_blanking_mode, hsa_blanking_mode;
+       int hsa, hfp, hbp, width_bytes, bllp, lp_clk_div;
+       int ddr_clk_pre, ddr_clk_post, enter_hs_mode_lat, exit_hs_mode_lat;
+       int tclk_trail, ths_exit, exiths_clk;
+       bool ddr_alwon;
+       struct omap_video_timings *timings = &dssdev->panel.timings;
+       int bpp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt);
+       int ndl = dsi->num_lanes_used - 1;
+       int dsi_fclk_hsdiv = dssdev->clocks.dsi.regm_dsi + 1;
+       int hsa_interleave_hs = 0, hsa_interleave_lp = 0;
+       int hfp_interleave_hs = 0, hfp_interleave_lp = 0;
+       int hbp_interleave_hs = 0, hbp_interleave_lp = 0;
+       int bl_interleave_hs = 0, bl_interleave_lp = 0;
+       u32 r;
+
+       r = dsi_read_reg(dsidev, DSI_CTRL);
+       blanking_mode = FLD_GET(r, 20, 20);
+       hfp_blanking_mode = FLD_GET(r, 21, 21);
+       hbp_blanking_mode = FLD_GET(r, 22, 22);
+       hsa_blanking_mode = FLD_GET(r, 23, 23);
+
+       r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
+       hbp = FLD_GET(r, 11, 0);
+       hfp = FLD_GET(r, 23, 12);
+       hsa = FLD_GET(r, 31, 24);
+
+       r = dsi_read_reg(dsidev, DSI_CLK_TIMING);
+       ddr_clk_post = FLD_GET(r, 7, 0);
+       ddr_clk_pre = FLD_GET(r, 15, 8);
+
+       r = dsi_read_reg(dsidev, DSI_VM_TIMING7);
+       exit_hs_mode_lat = FLD_GET(r, 15, 0);
+       enter_hs_mode_lat = FLD_GET(r, 31, 16);
+
+       r = dsi_read_reg(dsidev, DSI_CLK_CTRL);
+       lp_clk_div = FLD_GET(r, 12, 0);
+       ddr_alwon = FLD_GET(r, 13, 13);
+
+       r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
+       ths_exit = FLD_GET(r, 7, 0);
+
+       r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
+       tclk_trail = FLD_GET(r, 15, 8);
+
+       exiths_clk = ths_exit + tclk_trail;
+
+       width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8);
+       bllp = hbp + hfp + hsa + DIV_ROUND_UP(width_bytes + 6, ndl);
+
+       if (!hsa_blanking_mode) {
+               hsa_interleave_hs = dsi_compute_interleave_hs(hsa, ddr_alwon,
+                                       enter_hs_mode_lat, exit_hs_mode_lat,
+                                       exiths_clk, ddr_clk_pre, ddr_clk_post);
+               hsa_interleave_lp = dsi_compute_interleave_lp(hsa,
+                                       enter_hs_mode_lat, exit_hs_mode_lat,
+                                       lp_clk_div, dsi_fclk_hsdiv);
+       }
+
+       if (!hfp_blanking_mode) {
+               hfp_interleave_hs = dsi_compute_interleave_hs(hfp, ddr_alwon,
+                                       enter_hs_mode_lat, exit_hs_mode_lat,
+                                       exiths_clk, ddr_clk_pre, ddr_clk_post);
+               hfp_interleave_lp = dsi_compute_interleave_lp(hfp,
+                                       enter_hs_mode_lat, exit_hs_mode_lat,
+                                       lp_clk_div, dsi_fclk_hsdiv);
+       }
+
+       if (!hbp_blanking_mode) {
+               hbp_interleave_hs = dsi_compute_interleave_hs(hbp, ddr_alwon,
+                                       enter_hs_mode_lat, exit_hs_mode_lat,
+                                       exiths_clk, ddr_clk_pre, ddr_clk_post);
+
+               hbp_interleave_lp = dsi_compute_interleave_lp(hbp,
+                                       enter_hs_mode_lat, exit_hs_mode_lat,
+                                       lp_clk_div, dsi_fclk_hsdiv);
+       }
+
+       if (!blanking_mode) {
+               bl_interleave_hs = dsi_compute_interleave_hs(bllp, ddr_alwon,
+                                       enter_hs_mode_lat, exit_hs_mode_lat,
+                                       exiths_clk, ddr_clk_pre, ddr_clk_post);
+
+               bl_interleave_lp = dsi_compute_interleave_lp(bllp,
+                                       enter_hs_mode_lat, exit_hs_mode_lat,
+                                       lp_clk_div, dsi_fclk_hsdiv);
+       }
+
+       DSSDBG("DSI HS interleaving(TXBYTECLKHS) HSA %d, HFP %d, HBP %d, BLLP %d\n",
+               hsa_interleave_hs, hfp_interleave_hs, hbp_interleave_hs,
+               bl_interleave_hs);
+
+       DSSDBG("DSI LP interleaving(bytes) HSA %d, HFP %d, HBP %d, BLLP %d\n",
+               hsa_interleave_lp, hfp_interleave_lp, hbp_interleave_lp,
+               bl_interleave_lp);
+
+       r = dsi_read_reg(dsidev, DSI_VM_TIMING4);
+       r = FLD_MOD(r, hsa_interleave_hs, 23, 16);
+       r = FLD_MOD(r, hfp_interleave_hs, 15, 8);
+       r = FLD_MOD(r, hbp_interleave_hs, 7, 0);
+       dsi_write_reg(dsidev, DSI_VM_TIMING4, r);
+
+       r = dsi_read_reg(dsidev, DSI_VM_TIMING5);
+       r = FLD_MOD(r, hsa_interleave_lp, 23, 16);
+       r = FLD_MOD(r, hfp_interleave_lp, 15, 8);
+       r = FLD_MOD(r, hbp_interleave_lp, 7, 0);
+       dsi_write_reg(dsidev, DSI_VM_TIMING5, r);
+
+       r = dsi_read_reg(dsidev, DSI_VM_TIMING6);
+       r = FLD_MOD(r, bl_interleave_hs, 31, 15);
+       r = FLD_MOD(r, bl_interleave_lp, 16, 0);
+       dsi_write_reg(dsidev, DSI_VM_TIMING6, r);
+}
+
 static int dsi_proto_config(struct omap_dss_device *dssdev)
 {
        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -3769,6 +3884,7 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
                break;
        default:
                BUG();
+               return -EINVAL;
        }
 
        r = dsi_read_reg(dsidev, DSI_CTRL);
@@ -3793,6 +3909,7 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
        if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_VIDEO_MODE) {
                dsi_config_vp_sync_events(dssdev);
                dsi_config_blanking_modes(dssdev);
+               dsi_config_cmd_mode_interleaving(dssdev);
        }
 
        dsi_vc_initial_config(dsidev, 0);
@@ -4008,6 +4125,7 @@ int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
                        break;
                default:
                        BUG();
+                       return -EINVAL;
                };
 
                dsi_if_enable(dsidev, false);
@@ -4192,10 +4310,6 @@ static void dsi_framedone_irq_callback(void *data, u32 mask)
        __cancel_delayed_work(&dsi->framedone_timeout_work);
 
        dsi_handle_framedone(dsidev, 0);
-
-#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
-       dispc_fake_vsync_irq();
-#endif
 }
 
 int omap_dsi_update(struct omap_dss_device *dssdev, int channel,
@@ -4259,13 +4373,12 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
                dispc_mgr_enable_stallmode(dssdev->manager->id, true);
                dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 1);
 
-               dispc_mgr_set_lcd_timings(dssdev->manager->id, &timings);
+               dss_mgr_set_timings(dssdev->manager, &timings);
        } else {
                dispc_mgr_enable_stallmode(dssdev->manager->id, false);
                dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 0);
 
-               dispc_mgr_set_lcd_timings(dssdev->manager->id,
-                       &dssdev->panel.timings);
+               dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
        }
 
                dispc_mgr_set_lcd_display_type(dssdev->manager->id,
@@ -4294,13 +4407,11 @@ static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
        struct dsi_clock_info cinfo;
        int r;
 
-       /* we always use DSS_CLK_SYSCK as input clock */
-       cinfo.use_sys_clk = true;
        cinfo.regn  = dssdev->clocks.dsi.regn;
        cinfo.regm  = dssdev->clocks.dsi.regm;
        cinfo.regm_dispc = dssdev->clocks.dsi.regm_dispc;
        cinfo.regm_dsi = dssdev->clocks.dsi.regm_dsi;
-       r = dsi_calc_clock_rates(dssdev, &cinfo);
+       r = dsi_calc_clock_rates(dsidev, &cinfo);
        if (r) {
                DSSERR("Failed to calc dsi clocks\n");
                return r;
@@ -4345,7 +4456,7 @@ static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
 static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
 {
        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
-       int dsi_module = dsi_get_dsidev_id(dsidev);
+       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
        int r;
 
        r = dsi_pll_init(dsidev, true, true);
@@ -4357,7 +4468,7 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
                goto err1;
 
        dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src);
-       dss_select_dsi_clk_source(dsi_module, dssdev->clocks.dsi.dsi_fclk_src);
+       dss_select_dsi_clk_source(dsi->module_id, dssdev->clocks.dsi.dsi_fclk_src);
        dss_select_lcd_clk_source(dssdev->manager->id,
                        dssdev->clocks.dispc.channel.lcd_clk_src);
 
@@ -4396,7 +4507,7 @@ err3:
        dsi_cio_uninit(dssdev);
 err2:
        dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
-       dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK);
+       dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
        dss_select_lcd_clk_source(dssdev->manager->id, OMAP_DSS_CLK_SRC_FCK);
 
 err1:
@@ -4410,7 +4521,6 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
 {
        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-       int dsi_module = dsi_get_dsidev_id(dsidev);
 
        if (enter_ulps && !dsi->ulps_enabled)
                dsi_enter_ulps(dsidev);
@@ -4423,7 +4533,7 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
        dsi_vc_enable(dsidev, 3, 0);
 
        dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
-       dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK);
+       dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
        dss_select_lcd_clk_source(dssdev->manager->id, OMAP_DSS_CLK_SRC_FCK);
        dsi_cio_uninit(dssdev);
        dsi_pll_uninit(dsidev, disconnect_lanes);
@@ -4527,7 +4637,7 @@ int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
 }
 EXPORT_SYMBOL(omapdss_dsi_enable_te);
 
-int dsi_init_display(struct omap_dss_device *dssdev)
+static int __init dsi_init_display(struct omap_dss_device *dssdev)
 {
        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
@@ -4680,13 +4790,39 @@ static void dsi_put_clocks(struct platform_device *dsidev)
                clk_put(dsi->sys_clk);
 }
 
+static void __init dsi_probe_pdata(struct platform_device *dsidev)
+{
+       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+       struct omap_dss_board_info *pdata = dsidev->dev.platform_data;
+       int i, r;
+
+       for (i = 0; i < pdata->num_devices; ++i) {
+               struct omap_dss_device *dssdev = pdata->devices[i];
+
+               if (dssdev->type != OMAP_DISPLAY_TYPE_DSI)
+                       continue;
+
+               if (dssdev->phy.dsi.module != dsi->module_id)
+                       continue;
+
+               r = dsi_init_display(dssdev);
+               if (r) {
+                       DSSERR("device %s init failed: %d\n", dssdev->name, r);
+                       continue;
+               }
+
+               r = omap_dss_register_device(dssdev, &dsidev->dev, i);
+               if (r)
+                       DSSERR("device %s register failed: %d\n",
+                                       dssdev->name, r);
+       }
+}
+
 /* DSI1 HW IP initialisation */
-static int omap_dsihw_probe(struct platform_device *dsidev)
+static int __init omap_dsihw_probe(struct platform_device *dsidev)
 {
-       struct omap_display_platform_data *dss_plat_data;
-       struct omap_dss_board_info *board_info;
        u32 rev;
-       int r, i, dsi_module = dsi_get_dsidev_id(dsidev);
+       int r, i;
        struct resource *dsi_mem;
        struct dsi_data *dsi;
 
@@ -4694,15 +4830,11 @@ static int omap_dsihw_probe(struct platform_device *dsidev)
        if (!dsi)
                return -ENOMEM;
 
+       dsi->module_id = dsidev->id;
        dsi->pdev = dsidev;
-       dsi_pdev_map[dsi_module] = dsidev;
+       dsi_pdev_map[dsi->module_id] = dsidev;
        dev_set_drvdata(&dsidev->dev, dsi);
 
-       dss_plat_data = dsidev->dev.platform_data;
-       board_info = dss_plat_data->board_data;
-       dsi->enable_pads = board_info->dsi_enable_pads;
-       dsi->disable_pads = board_info->dsi_disable_pads;
-
        spin_lock_init(&dsi->irq_lock);
        spin_lock_init(&dsi->errors_lock);
        dsi->errors = 0;
@@ -4780,8 +4912,21 @@ static int omap_dsihw_probe(struct platform_device *dsidev)
        else
                dsi->num_lanes_supported = 3;
 
+       dsi_probe_pdata(dsidev);
+
        dsi_runtime_put(dsidev);
 
+       if (dsi->module_id == 0)
+               dss_debugfs_create_file("dsi1_regs", dsi1_dump_regs);
+       else if (dsi->module_id == 1)
+               dss_debugfs_create_file("dsi2_regs", dsi2_dump_regs);
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+       if (dsi->module_id == 0)
+               dss_debugfs_create_file("dsi1_irqs", dsi1_dump_irqs);
+       else if (dsi->module_id == 1)
+               dss_debugfs_create_file("dsi2_irqs", dsi2_dump_irqs);
+#endif
        return 0;
 
 err_runtime_get:
@@ -4790,12 +4935,14 @@ err_runtime_get:
        return r;
 }
 
-static int omap_dsihw_remove(struct platform_device *dsidev)
+static int __exit omap_dsihw_remove(struct platform_device *dsidev)
 {
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 
        WARN_ON(dsi->scp_clk_refcount > 0);
 
+       omap_dss_unregister_child_devices(&dsidev->dev);
+
        pm_runtime_disable(&dsidev->dev);
 
        dsi_put_clocks(dsidev);
@@ -4816,7 +4963,6 @@ static int omap_dsihw_remove(struct platform_device *dsidev)
 static int dsi_runtime_suspend(struct device *dev)
 {
        dispc_runtime_put();
-       dss_runtime_put();
 
        return 0;
 }
@@ -4825,20 +4971,11 @@ static int dsi_runtime_resume(struct device *dev)
 {
        int r;
 
-       r = dss_runtime_get();
-       if (r)
-               goto err_get_dss;
-
        r = dispc_runtime_get();
        if (r)
-               goto err_get_dispc;
+               return r;
 
        return 0;
-
-err_get_dispc:
-       dss_runtime_put();
-err_get_dss:
-       return r;
 }
 
 static const struct dev_pm_ops dsi_pm_ops = {
@@ -4847,8 +4984,7 @@ static const struct dev_pm_ops dsi_pm_ops = {
 };
 
 static struct platform_driver omap_dsihw_driver = {
-       .probe          = omap_dsihw_probe,
-       .remove         = omap_dsihw_remove,
+       .remove         = __exit_p(omap_dsihw_remove),
        .driver         = {
                .name   = "omapdss_dsi",
                .owner  = THIS_MODULE,
@@ -4856,12 +4992,12 @@ static struct platform_driver omap_dsihw_driver = {
        },
 };
 
-int dsi_init_platform_driver(void)
+int __init dsi_init_platform_driver(void)
 {
-       return platform_driver_register(&omap_dsihw_driver);
+       return platform_driver_probe(&omap_dsihw_driver, omap_dsihw_probe);
 }
 
-void dsi_uninit_platform_driver(void)
+void __exit dsi_uninit_platform_driver(void)
 {
-       return platform_driver_unregister(&omap_dsihw_driver);
+       platform_driver_unregister(&omap_dsihw_driver);
 }
index bd2d5e159463c3c49a2f642a52814f8e8d83922f..6ea1ff149f6f08cf3311dc3c73341fca59372063 100644 (file)
@@ -62,6 +62,9 @@ struct dss_reg {
 #define REG_FLD_MOD(idx, val, start, end) \
        dss_write_reg(idx, FLD_MOD(dss_read_reg(idx), val, start, end))
 
+static int dss_runtime_get(void);
+static void dss_runtime_put(void);
+
 static struct {
        struct platform_device *pdev;
        void __iomem    *base;
@@ -277,7 +280,7 @@ void dss_dump_clocks(struct seq_file *s)
        dss_runtime_put();
 }
 
-void dss_dump_regs(struct seq_file *s)
+static void dss_dump_regs(struct seq_file *s)
 {
 #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r))
 
@@ -322,6 +325,7 @@ void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src)
                break;
        default:
                BUG();
+               return;
        }
 
        dss_feat_get_reg_field(FEAT_REG_DISPC_CLK_SWITCH, &start, &end);
@@ -335,7 +339,7 @@ void dss_select_dsi_clk_source(int dsi_module,
                enum omap_dss_clk_source clk_src)
 {
        struct platform_device *dsidev;
-       int b;
+       int b, pos;
 
        switch (clk_src) {
        case OMAP_DSS_CLK_SRC_FCK:
@@ -355,9 +359,11 @@ void dss_select_dsi_clk_source(int dsi_module,
                break;
        default:
                BUG();
+               return;
        }
 
-       REG_FLD_MOD(DSS_CONTROL, b, 1, 1);      /* DSI_CLK_SWITCH */
+       pos = dsi_module == 0 ? 1 : 10;
+       REG_FLD_MOD(DSS_CONTROL, b, pos, pos);  /* DSIx_CLK_SWITCH */
 
        dss.dsi_clk_source[dsi_module] = clk_src;
 }
@@ -389,6 +395,7 @@ void dss_select_lcd_clk_source(enum omap_channel channel,
                break;
        default:
                BUG();
+               return;
        }
 
        pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 12;
@@ -706,7 +713,7 @@ static void dss_put_clocks(void)
        clk_put(dss.dss_clk);
 }
 
-int dss_runtime_get(void)
+static int dss_runtime_get(void)
 {
        int r;
 
@@ -717,7 +724,7 @@ int dss_runtime_get(void)
        return r < 0 ? r : 0;
 }
 
-void dss_runtime_put(void)
+static void dss_runtime_put(void)
 {
        int r;
 
@@ -740,7 +747,7 @@ void dss_debug_dump_clocks(struct seq_file *s)
 #endif
 
 /* DSS HW IP initialisation */
-static int omap_dsshw_probe(struct platform_device *pdev)
+static int __init omap_dsshw_probe(struct platform_device *pdev)
 {
        struct resource *dss_mem;
        u32 rev;
@@ -785,40 +792,24 @@ static int omap_dsshw_probe(struct platform_device *pdev)
        dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
        dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
 
-       r = dpi_init();
-       if (r) {
-               DSSERR("Failed to initialize DPI\n");
-               goto err_dpi;
-       }
-
-       r = sdi_init();
-       if (r) {
-               DSSERR("Failed to initialize SDI\n");
-               goto err_sdi;
-       }
-
        rev = dss_read_reg(DSS_REVISION);
        printk(KERN_INFO "OMAP DSS rev %d.%d\n",
                        FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
 
        dss_runtime_put();
 
+       dss_debugfs_create_file("dss", dss_dump_regs);
+
        return 0;
-err_sdi:
-       dpi_exit();
-err_dpi:
-       dss_runtime_put();
+
 err_runtime_get:
        pm_runtime_disable(&pdev->dev);
        dss_put_clocks();
        return r;
 }
 
-static int omap_dsshw_remove(struct platform_device *pdev)
+static int __exit omap_dsshw_remove(struct platform_device *pdev)
 {
-       dpi_exit();
-       sdi_exit();
-
        pm_runtime_disable(&pdev->dev);
 
        dss_put_clocks();
@@ -829,11 +820,24 @@ static int omap_dsshw_remove(struct platform_device *pdev)
 static int dss_runtime_suspend(struct device *dev)
 {
        dss_save_context();
+       dss_set_min_bus_tput(dev, 0);
        return 0;
 }
 
 static int dss_runtime_resume(struct device *dev)
 {
+       int r;
+       /*
+        * Set an arbitrarily high tput request to ensure OPP100.
+        * What we should really do is to make a request to stay in OPP100,
+        * without any tput requirements, but that is not currently possible
+        * via the PM layer.
+        */
+
+       r = dss_set_min_bus_tput(dev, 1000000000);
+       if (r)
+               return r;
+
        dss_restore_context();
        return 0;
 }
@@ -844,8 +848,7 @@ static const struct dev_pm_ops dss_pm_ops = {
 };
 
 static struct platform_driver omap_dsshw_driver = {
-       .probe          = omap_dsshw_probe,
-       .remove         = omap_dsshw_remove,
+       .remove         = __exit_p(omap_dsshw_remove),
        .driver         = {
                .name   = "omapdss_dss",
                .owner  = THIS_MODULE,
@@ -853,12 +856,12 @@ static struct platform_driver omap_dsshw_driver = {
        },
 };
 
-int dss_init_platform_driver(void)
+int __init dss_init_platform_driver(void)
 {
-       return platform_driver_register(&omap_dsshw_driver);
+       return platform_driver_probe(&omap_dsshw_driver, omap_dsshw_probe);
 }
 
 void dss_uninit_platform_driver(void)
 {
-       return platform_driver_unregister(&omap_dsshw_driver);
+       platform_driver_unregister(&omap_dsshw_driver);
 }
index d4b3dff2ead338db918ce4801ac7380966f6b679..dd1092ceaeef91d0390c23d152b4c65f680cbc09 100644 (file)
@@ -150,9 +150,6 @@ struct dsi_clock_info {
        u16 regm_dsi;   /* OMAP3: REGM4
                         * OMAP4: REGM5 */
        u16 lp_clk_div;
-
-       u8 highfreq;
-       bool use_sys_clk;
 };
 
 struct seq_file;
@@ -162,6 +159,16 @@ struct platform_device;
 struct bus_type *dss_get_bus(void);
 struct regulator *dss_get_vdds_dsi(void);
 struct regulator *dss_get_vdds_sdi(void);
+int dss_get_ctx_loss_count(struct device *dev);
+int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask);
+void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask);
+int dss_set_min_bus_tput(struct device *dev, unsigned long tput);
+int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *));
+
+int omap_dss_register_device(struct omap_dss_device *dssdev,
+               struct device *parent, int disp_num);
+void omap_dss_unregister_device(struct omap_dss_device *dssdev);
+void omap_dss_unregister_child_devices(struct device *parent);
 
 /* apply */
 void dss_apply_init(void);
@@ -179,6 +186,9 @@ void dss_mgr_get_info(struct omap_overlay_manager *mgr,
 int dss_mgr_set_device(struct omap_overlay_manager *mgr,
                struct omap_dss_device *dssdev);
 int dss_mgr_unset_device(struct omap_overlay_manager *mgr);
+void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
+               struct omap_video_timings *timings);
+const struct omap_video_timings *dss_mgr_get_timings(struct omap_overlay_manager *mgr);
 
 bool dss_ovl_is_enabled(struct omap_overlay *ovl);
 int dss_ovl_enable(struct omap_overlay *ovl);
@@ -208,9 +218,11 @@ int dss_init_overlay_managers(struct platform_device *pdev);
 void dss_uninit_overlay_managers(struct platform_device *pdev);
 int dss_mgr_simple_check(struct omap_overlay_manager *mgr,
                const struct omap_overlay_manager_info *info);
+int dss_mgr_check_timings(struct omap_overlay_manager *mgr,
+               const struct omap_video_timings *timings);
 int dss_mgr_check(struct omap_overlay_manager *mgr,
-               struct omap_dss_device *dssdev,
                struct omap_overlay_manager_info *info,
+               const struct omap_video_timings *mgr_timings,
                struct omap_overlay_info **overlay_infos);
 
 /* overlay */
@@ -220,22 +232,18 @@ void dss_overlay_setup_dispc_manager(struct omap_overlay_manager *mgr);
 void dss_recheck_connections(struct omap_dss_device *dssdev, bool force);
 int dss_ovl_simple_check(struct omap_overlay *ovl,
                const struct omap_overlay_info *info);
-int dss_ovl_check(struct omap_overlay *ovl,
-               struct omap_overlay_info *info, struct omap_dss_device *dssdev);
+int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info,
+               const struct omap_video_timings *mgr_timings);
 
 /* DSS */
-int dss_init_platform_driver(void);
+int dss_init_platform_driver(void) __init;
 void dss_uninit_platform_driver(void);
 
-int dss_runtime_get(void);
-void dss_runtime_put(void);
-
 void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
 enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void);
 const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src);
 void dss_dump_clocks(struct seq_file *s);
 
-void dss_dump_regs(struct seq_file *s);
 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
 void dss_debug_dump_clocks(struct seq_file *s);
 #endif
@@ -265,19 +273,8 @@ int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
                struct dispc_clock_info *dispc_cinfo);
 
 /* SDI */
-#ifdef CONFIG_OMAP2_DSS_SDI
-int sdi_init(void);
-void sdi_exit(void);
-int sdi_init_display(struct omap_dss_device *display);
-#else
-static inline int sdi_init(void)
-{
-       return 0;
-}
-static inline void sdi_exit(void)
-{
-}
-#endif
+int sdi_init_platform_driver(void) __init;
+void sdi_uninit_platform_driver(void) __exit;
 
 /* DSI */
 #ifdef CONFIG_OMAP2_DSS_DSI
@@ -285,19 +282,14 @@ static inline void sdi_exit(void)
 struct dentry;
 struct file_operations;
 
-int dsi_init_platform_driver(void);
-void dsi_uninit_platform_driver(void);
+int dsi_init_platform_driver(void) __init;
+void dsi_uninit_platform_driver(void) __exit;
 
 int dsi_runtime_get(struct platform_device *dsidev);
 void dsi_runtime_put(struct platform_device *dsidev);
 
 void dsi_dump_clocks(struct seq_file *s);
-void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir,
-               const struct file_operations *debug_fops);
-void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir,
-               const struct file_operations *debug_fops);
 
-int dsi_init_display(struct omap_dss_device *display);
 void dsi_irq_handler(void);
 u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt);
 
@@ -314,13 +306,6 @@ void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev);
 void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev);
 struct platform_device *dsi_get_dsidev_from_id(int module);
 #else
-static inline int dsi_init_platform_driver(void)
-{
-       return 0;
-}
-static inline void dsi_uninit_platform_driver(void)
-{
-}
 static inline int dsi_runtime_get(struct platform_device *dsidev)
 {
        return 0;
@@ -377,28 +362,14 @@ static inline struct platform_device *dsi_get_dsidev_from_id(int module)
 #endif
 
 /* DPI */
-#ifdef CONFIG_OMAP2_DSS_DPI
-int dpi_init(void);
-void dpi_exit(void);
-int dpi_init_display(struct omap_dss_device *dssdev);
-#else
-static inline int dpi_init(void)
-{
-       return 0;
-}
-static inline void dpi_exit(void)
-{
-}
-#endif
+int dpi_init_platform_driver(void) __init;
+void dpi_uninit_platform_driver(void) __exit;
 
 /* DISPC */
-int dispc_init_platform_driver(void);
-void dispc_uninit_platform_driver(void);
+int dispc_init_platform_driver(void) __init;
+void dispc_uninit_platform_driver(void) __exit;
 void dispc_dump_clocks(struct seq_file *s);
-void dispc_dump_irqs(struct seq_file *s);
-void dispc_dump_regs(struct seq_file *s);
 void dispc_irq_handler(void);
-void dispc_fake_vsync_irq(void);
 
 int dispc_runtime_get(void);
 void dispc_runtime_put(void);
@@ -409,12 +380,12 @@ void dispc_disable_sidle(void);
 void dispc_lcd_enable_signal_polarity(bool act_high);
 void dispc_lcd_enable_signal(bool enable);
 void dispc_pck_free_enable(bool enable);
-void dispc_set_digit_size(u16 width, u16 height);
 void dispc_enable_fifomerge(bool enable);
 void dispc_enable_gamma_table(bool enable);
 void dispc_set_loadmode(enum omap_dss_load_mode mode);
 
-bool dispc_lcd_timings_ok(struct omap_video_timings *timings);
+bool dispc_mgr_timings_ok(enum omap_channel channel,
+               const struct omap_video_timings *timings);
 unsigned long dispc_fclk_rate(void);
 void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck,
                struct dispc_clock_info *cinfo);
@@ -424,15 +395,16 @@ int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
 
 void dispc_ovl_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high);
 void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
-               u32 *fifo_low, u32 *fifo_high, bool use_fifomerge);
+               u32 *fifo_low, u32 *fifo_high, bool use_fifomerge,
+               bool manual_update);
 int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
-               bool ilace, bool replication);
+               bool ilace, bool replication,
+               const struct omap_video_timings *mgr_timings);
 int dispc_ovl_enable(enum omap_plane plane, bool enable);
 void dispc_ovl_set_channel_out(enum omap_plane plane,
                enum omap_channel channel);
 
 void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable);
-void dispc_mgr_set_lcd_size(enum omap_channel channel, u16 width, u16 height);
 u32 dispc_mgr_get_vsync_irq(enum omap_channel channel);
 u32 dispc_mgr_get_framedone_irq(enum omap_channel channel);
 bool dispc_mgr_go_busy(enum omap_channel channel);
@@ -445,12 +417,13 @@ void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable);
 void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines);
 void dispc_mgr_set_lcd_display_type(enum omap_channel channel,
                enum omap_lcd_display_type type);
-void dispc_mgr_set_lcd_timings(enum omap_channel channel,
+void dispc_mgr_set_timings(enum omap_channel channel,
                struct omap_video_timings *timings);
 void dispc_mgr_set_pol_freq(enum omap_channel channel,
                enum omap_panel_config config, u8 acbi, u8 acb);
 unsigned long dispc_mgr_lclk_rate(enum omap_channel channel);
 unsigned long dispc_mgr_pclk_rate(enum omap_channel channel);
+unsigned long dispc_core_clk_rate(void);
 int dispc_mgr_set_clock_div(enum omap_channel channel,
                struct dispc_clock_info *cinfo);
 int dispc_mgr_get_clock_div(enum omap_channel channel,
@@ -460,19 +433,10 @@ void dispc_mgr_setup(enum omap_channel channel,
 
 /* VENC */
 #ifdef CONFIG_OMAP2_DSS_VENC
-int venc_init_platform_driver(void);
-void venc_uninit_platform_driver(void);
-void venc_dump_regs(struct seq_file *s);
-int venc_init_display(struct omap_dss_device *display);
+int venc_init_platform_driver(void) __init;
+void venc_uninit_platform_driver(void) __exit;
 unsigned long venc_get_pixel_clock(void);
 #else
-static inline int venc_init_platform_driver(void)
-{
-       return 0;
-}
-static inline void venc_uninit_platform_driver(void)
-{
-}
 static inline unsigned long venc_get_pixel_clock(void)
 {
        WARN("%s: VENC not compiled in, returning pclk as 0\n", __func__);
@@ -482,23 +446,10 @@ static inline unsigned long venc_get_pixel_clock(void)
 
 /* HDMI */
 #ifdef CONFIG_OMAP4_DSS_HDMI
-int hdmi_init_platform_driver(void);
-void hdmi_uninit_platform_driver(void);
-int hdmi_init_display(struct omap_dss_device *dssdev);
+int hdmi_init_platform_driver(void) __init;
+void hdmi_uninit_platform_driver(void) __exit;
 unsigned long hdmi_get_pixel_clock(void);
-void hdmi_dump_regs(struct seq_file *s);
 #else
-static inline int hdmi_init_display(struct omap_dss_device *dssdev)
-{
-       return 0;
-}
-static inline int hdmi_init_platform_driver(void)
-{
-       return 0;
-}
-static inline void hdmi_uninit_platform_driver(void)
-{
-}
 static inline unsigned long hdmi_get_pixel_clock(void)
 {
        WARN("%s: HDMI not compiled in, returning pclk as 0\n", __func__);
@@ -514,22 +465,18 @@ int omapdss_hdmi_read_edid(u8 *buf, int len);
 bool omapdss_hdmi_detect(void);
 int hdmi_panel_init(void);
 void hdmi_panel_exit(void);
+#ifdef CONFIG_OMAP4_DSS_HDMI_AUDIO
+int hdmi_audio_enable(void);
+void hdmi_audio_disable(void);
+int hdmi_audio_start(void);
+void hdmi_audio_stop(void);
+bool hdmi_mode_has_audio(void);
+int hdmi_audio_config(struct omap_dss_audio *audio);
+#endif
 
 /* RFBI */
-#ifdef CONFIG_OMAP2_DSS_RFBI
-int rfbi_init_platform_driver(void);
-void rfbi_uninit_platform_driver(void);
-void rfbi_dump_regs(struct seq_file *s);
-int rfbi_init_display(struct omap_dss_device *display);
-#else
-static inline int rfbi_init_platform_driver(void)
-{
-       return 0;
-}
-static inline void rfbi_uninit_platform_driver(void)
-{
-}
-#endif
+int rfbi_init_platform_driver(void) __init;
+void rfbi_uninit_platform_driver(void) __exit;
 
 
 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
index ce14aa6dd672802dd61659cdfb13201c8fabba0c..938709724f0cda2f6201c52b9548410e449eab49 100644 (file)
@@ -52,6 +52,8 @@ struct omap_dss_features {
        const char * const *clksrc_names;
        const struct dss_param_range *dss_params;
 
+       const enum omap_dss_rotation_type supported_rotation_types;
+
        const u32 buffer_size_unit;
        const u32 burst_size_unit;
 };
@@ -311,6 +313,8 @@ static const struct dss_param_range omap2_dss_param_range[] = {
         * scaler cannot scale a image with width more than 768.
         */
        [FEAT_PARAM_LINEWIDTH]                  = { 1, 768 },
+       [FEAT_PARAM_MGR_WIDTH]                  = { 1, 2048 },
+       [FEAT_PARAM_MGR_HEIGHT]                 = { 1, 2048 },
 };
 
 static const struct dss_param_range omap3_dss_param_range[] = {
@@ -324,6 +328,8 @@ static const struct dss_param_range omap3_dss_param_range[] = {
        [FEAT_PARAM_DSIPLL_LPDIV]               = { 1, (1 << 13) - 1},
        [FEAT_PARAM_DOWNSCALE]                  = { 1, 4 },
        [FEAT_PARAM_LINEWIDTH]                  = { 1, 1024 },
+       [FEAT_PARAM_MGR_WIDTH]                  = { 1, 2048 },
+       [FEAT_PARAM_MGR_HEIGHT]                 = { 1, 2048 },
 };
 
 static const struct dss_param_range omap4_dss_param_range[] = {
@@ -337,6 +343,8 @@ static const struct dss_param_range omap4_dss_param_range[] = {
        [FEAT_PARAM_DSIPLL_LPDIV]               = { 0, (1 << 13) - 1 },
        [FEAT_PARAM_DOWNSCALE]                  = { 1, 4 },
        [FEAT_PARAM_LINEWIDTH]                  = { 1, 2048 },
+       [FEAT_PARAM_MGR_WIDTH]                  = { 1, 2048 },
+       [FEAT_PARAM_MGR_HEIGHT]                 = { 1, 2048 },
 };
 
 static const enum dss_feat_id omap2_dss_feat_list[] = {
@@ -399,6 +407,7 @@ static const enum dss_feat_id omap4430_es1_0_dss_feat_list[] = {
        FEAT_FIR_COEF_V,
        FEAT_ALPHA_FREE_ZORDER,
        FEAT_FIFO_MERGE,
+       FEAT_BURST_2D,
 };
 
 static const enum dss_feat_id omap4430_es2_0_1_2_dss_feat_list[] = {
@@ -416,6 +425,7 @@ static const enum dss_feat_id omap4430_es2_0_1_2_dss_feat_list[] = {
        FEAT_FIR_COEF_V,
        FEAT_ALPHA_FREE_ZORDER,
        FEAT_FIFO_MERGE,
+       FEAT_BURST_2D,
 };
 
 static const enum dss_feat_id omap4_dss_feat_list[] = {
@@ -434,6 +444,7 @@ static const enum dss_feat_id omap4_dss_feat_list[] = {
        FEAT_FIR_COEF_V,
        FEAT_ALPHA_FREE_ZORDER,
        FEAT_FIFO_MERGE,
+       FEAT_BURST_2D,
 };
 
 /* OMAP2 DSS Features */
@@ -451,6 +462,7 @@ static const struct omap_dss_features omap2_dss_features = {
        .overlay_caps = omap2_dss_overlay_caps,
        .clksrc_names = omap2_dss_clk_source_names,
        .dss_params = omap2_dss_param_range,
+       .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
        .buffer_size_unit = 1,
        .burst_size_unit = 8,
 };
@@ -470,6 +482,7 @@ static const struct omap_dss_features omap3430_dss_features = {
        .overlay_caps = omap3430_dss_overlay_caps,
        .clksrc_names = omap3_dss_clk_source_names,
        .dss_params = omap3_dss_param_range,
+       .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
        .buffer_size_unit = 1,
        .burst_size_unit = 8,
 };
@@ -488,6 +501,7 @@ static const struct omap_dss_features omap3630_dss_features = {
        .overlay_caps = omap3630_dss_overlay_caps,
        .clksrc_names = omap3_dss_clk_source_names,
        .dss_params = omap3_dss_param_range,
+       .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
        .buffer_size_unit = 1,
        .burst_size_unit = 8,
 };
@@ -508,6 +522,7 @@ static const struct omap_dss_features omap4430_es1_0_dss_features  = {
        .overlay_caps = omap4_dss_overlay_caps,
        .clksrc_names = omap4_dss_clk_source_names,
        .dss_params = omap4_dss_param_range,
+       .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
        .buffer_size_unit = 16,
        .burst_size_unit = 16,
 };
@@ -527,6 +542,7 @@ static const struct omap_dss_features omap4430_es2_0_1_2_dss_features = {
        .overlay_caps = omap4_dss_overlay_caps,
        .clksrc_names = omap4_dss_clk_source_names,
        .dss_params = omap4_dss_param_range,
+       .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
        .buffer_size_unit = 16,
        .burst_size_unit = 16,
 };
@@ -546,6 +562,7 @@ static const struct omap_dss_features omap4_dss_features = {
        .overlay_caps = omap4_dss_overlay_caps,
        .clksrc_names = omap4_dss_clk_source_names,
        .dss_params = omap4_dss_param_range,
+       .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
        .buffer_size_unit = 16,
        .burst_size_unit = 16,
 };
@@ -562,13 +579,17 @@ static const struct ti_hdmi_ip_ops omap4_hdmi_functions = {
        .pll_enable             =       ti_hdmi_4xxx_pll_enable,
        .pll_disable            =       ti_hdmi_4xxx_pll_disable,
        .video_enable           =       ti_hdmi_4xxx_wp_video_start,
+       .video_disable          =       ti_hdmi_4xxx_wp_video_stop,
        .dump_wrapper           =       ti_hdmi_4xxx_wp_dump,
        .dump_core              =       ti_hdmi_4xxx_core_dump,
        .dump_pll               =       ti_hdmi_4xxx_pll_dump,
        .dump_phy               =       ti_hdmi_4xxx_phy_dump,
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-       defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
        .audio_enable           =       ti_hdmi_4xxx_wp_audio_enable,
+       .audio_disable          =       ti_hdmi_4xxx_wp_audio_disable,
+       .audio_start            =       ti_hdmi_4xxx_audio_start,
+       .audio_stop             =       ti_hdmi_4xxx_audio_stop,
+       .audio_config           =       ti_hdmi_4xxx_audio_config,
 #endif
 
 };
@@ -662,6 +683,11 @@ void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end)
        *end = omap_current_dss_features->reg_fields[id].end;
 }
 
+bool dss_feat_rotation_type_supported(enum omap_dss_rotation_type rot_type)
+{
+       return omap_current_dss_features->supported_rotation_types & rot_type;
+}
+
 void dss_features_init(void)
 {
        if (cpu_is_omap24xx())
index c332e7ddfce14dfedfecad805f1eae2c5478d27e..bdf469f080e75e3742cb350541ec08362fb2f683 100644 (file)
@@ -62,6 +62,7 @@ enum dss_feat_id {
        FEAT_FIFO_MERGE,
        /* An unknown HW bug causing the normal FIFO thresholds not to work */
        FEAT_OMAP3_DSI_FIFO_BUG,
+       FEAT_BURST_2D,
 };
 
 /* DSS register field id */
@@ -91,6 +92,8 @@ enum dss_range_param {
        FEAT_PARAM_DSIPLL_LPDIV,
        FEAT_PARAM_DOWNSCALE,
        FEAT_PARAM_LINEWIDTH,
+       FEAT_PARAM_MGR_WIDTH,
+       FEAT_PARAM_MGR_HEIGHT,
 };
 
 /* DSS Feature Functions */
@@ -108,6 +111,8 @@ const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id);
 u32 dss_feat_get_buffer_size_unit(void);       /* in bytes */
 u32 dss_feat_get_burst_size_unit(void);                /* in bytes */
 
+bool dss_feat_rotation_type_supported(enum omap_dss_rotation_type rot_type);
+
 bool dss_has_feature(enum dss_feat_id id);
 void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end);
 void dss_features_init(void);
index c4b4f6950a9269a925b8617865f4a4a87fb89d06..8195c7166d200c8e575aba54f5e4f46c335df002 100644 (file)
 #include <linux/pm_runtime.h>
 #include <linux/clk.h>
 #include <video/omapdss.h>
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-       defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-#include <sound/soc.h>
-#include <sound/pcm_params.h>
-#include "ti_hdmi_4xxx_ip.h"
-#endif
 
 #include "ti_hdmi.h"
 #include "dss.h"
@@ -63,7 +57,6 @@
 
 static struct {
        struct mutex lock;
-       struct omap_display_platform_data *pdata;
        struct platform_device *pdev;
        struct hdmi_ip_data ip_data;
 
@@ -130,25 +123,12 @@ static int hdmi_runtime_get(void)
 
        DSSDBG("hdmi_runtime_get\n");
 
-       /*
-        * HACK: Add dss_runtime_get() to ensure DSS clock domain is enabled.
-        * This should be removed later.
-        */
-       r = dss_runtime_get();
-       if (r < 0)
-               goto err_get_dss;
-
        r = pm_runtime_get_sync(&hdmi.pdev->dev);
        WARN_ON(r < 0);
        if (r < 0)
-               goto err_get_hdmi;
+               return r;
 
        return 0;
-
-err_get_hdmi:
-       dss_runtime_put();
-err_get_dss:
-       return r;
 }
 
 static void hdmi_runtime_put(void)
@@ -159,15 +139,9 @@ static void hdmi_runtime_put(void)
 
        r = pm_runtime_put_sync(&hdmi.pdev->dev);
        WARN_ON(r < 0);
-
-       /*
-        * HACK: This is added to complement the dss_runtime_get() call in
-        * hdmi_runtime_get(). This should be removed later.
-        */
-       dss_runtime_put();
 }
 
-int hdmi_init_display(struct omap_dss_device *dssdev)
+static int __init hdmi_init_display(struct omap_dss_device *dssdev)
 {
        DSSDBG("init_display\n");
 
@@ -344,7 +318,7 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
 
        hdmi_compute_pll(dssdev, phy, &hdmi.ip_data.pll_data);
 
-       hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0);
+       hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
 
        /* config the PLL and PHY hdmi_set_pll_pwrfirst */
        r = hdmi.ip_data.ops->pll_enable(&hdmi.ip_data);
@@ -376,10 +350,11 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
        dispc_enable_gamma_table(0);
 
        /* tv size */
-       dispc_set_digit_size(dssdev->panel.timings.x_res,
-                       dssdev->panel.timings.y_res);
+       dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
 
-       hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 1);
+       r = hdmi.ip_data.ops->video_enable(&hdmi.ip_data);
+       if (r)
+               goto err_vid_enable;
 
        r = dss_mgr_enable(dssdev->manager);
        if (r)
@@ -388,7 +363,8 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
        return 0;
 
 err_mgr_enable:
-       hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0);
+       hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
+err_vid_enable:
        hdmi.ip_data.ops->phy_disable(&hdmi.ip_data);
        hdmi.ip_data.ops->pll_disable(&hdmi.ip_data);
 err:
@@ -400,7 +376,7 @@ static void hdmi_power_off(struct omap_dss_device *dssdev)
 {
        dss_mgr_disable(dssdev->manager);
 
-       hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0);
+       hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
        hdmi.ip_data.ops->phy_disable(&hdmi.ip_data);
        hdmi.ip_data.ops->pll_disable(&hdmi.ip_data);
        hdmi_runtime_put();
@@ -436,10 +412,12 @@ void omapdss_hdmi_display_set_timing(struct omap_dss_device *dssdev)
                r = hdmi_power_on(dssdev);
                if (r)
                        DSSERR("failed to power on device\n");
+       } else {
+               dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
        }
 }
 
-void hdmi_dump_regs(struct seq_file *s)
+static void hdmi_dump_regs(struct seq_file *s)
 {
        mutex_lock(&hdmi.lock);
 
@@ -555,248 +533,201 @@ void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev)
        mutex_unlock(&hdmi.lock);
 }
 
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-       defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-
-static int hdmi_audio_trigger(struct snd_pcm_substream *substream, int cmd,
-                               struct snd_soc_dai *dai)
+static int hdmi_get_clocks(struct platform_device *pdev)
 {
-       struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct snd_soc_codec *codec = rtd->codec;
-       struct platform_device *pdev = to_platform_device(codec->dev);
-       struct hdmi_ip_data *ip_data = snd_soc_codec_get_drvdata(codec);
-       int err = 0;
+       struct clk *clk;
 
-       if (!(ip_data->ops) && !(ip_data->ops->audio_enable)) {
-               dev_err(&pdev->dev, "Cannot enable/disable audio\n");
-               return -ENODEV;
+       clk = clk_get(&pdev->dev, "sys_clk");
+       if (IS_ERR(clk)) {
+               DSSERR("can't get sys_clk\n");
+               return PTR_ERR(clk);
        }
 
-       switch (cmd) {
-       case SNDRV_PCM_TRIGGER_START:
-       case SNDRV_PCM_TRIGGER_RESUME:
-       case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-               ip_data->ops->audio_enable(ip_data, true);
-               break;
-       case SNDRV_PCM_TRIGGER_STOP:
-       case SNDRV_PCM_TRIGGER_SUSPEND:
-       case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-               ip_data->ops->audio_enable(ip_data, false);
-               break;
-       default:
-               err = -EINVAL;
-       }
-       return err;
-}
-
-static int hdmi_audio_hw_params(struct snd_pcm_substream *substream,
-                                   struct snd_pcm_hw_params *params,
-                                   struct snd_soc_dai *dai)
-{
-       struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct snd_soc_codec *codec = rtd->codec;
-       struct hdmi_ip_data *ip_data = snd_soc_codec_get_drvdata(codec);
-       struct hdmi_audio_format audio_format;
-       struct hdmi_audio_dma audio_dma;
-       struct hdmi_core_audio_config core_cfg;
-       struct hdmi_core_infoframe_audio aud_if_cfg;
-       int err, n, cts;
-       enum hdmi_core_audio_sample_freq sample_freq;
-
-       switch (params_format(params)) {
-       case SNDRV_PCM_FORMAT_S16_LE:
-               core_cfg.i2s_cfg.word_max_length =
-                       HDMI_AUDIO_I2S_MAX_WORD_20BITS;
-               core_cfg.i2s_cfg.word_length = HDMI_AUDIO_I2S_CHST_WORD_16_BITS;
-               core_cfg.i2s_cfg.in_length_bits =
-                       HDMI_AUDIO_I2S_INPUT_LENGTH_16;
-               core_cfg.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_LEFT;
-               audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_TWOSAMPLES;
-               audio_format.sample_size = HDMI_AUDIO_SAMPLE_16BITS;
-               audio_format.justification = HDMI_AUDIO_JUSTIFY_LEFT;
-               audio_dma.transfer_size = 0x10;
-               break;
-       case SNDRV_PCM_FORMAT_S24_LE:
-               core_cfg.i2s_cfg.word_max_length =
-                       HDMI_AUDIO_I2S_MAX_WORD_24BITS;
-               core_cfg.i2s_cfg.word_length = HDMI_AUDIO_I2S_CHST_WORD_24_BITS;
-               core_cfg.i2s_cfg.in_length_bits =
-                       HDMI_AUDIO_I2S_INPUT_LENGTH_24;
-               audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_ONESAMPLE;
-               audio_format.sample_size = HDMI_AUDIO_SAMPLE_24BITS;
-               audio_format.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
-               core_cfg.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
-               audio_dma.transfer_size = 0x20;
-               break;
-       default:
+       hdmi.sys_clk = clk;
+
+       return 0;
+}
+
+static void hdmi_put_clocks(void)
+{
+       if (hdmi.sys_clk)
+               clk_put(hdmi.sys_clk);
+}
+
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+int hdmi_compute_acr(u32 sample_freq, u32 *n, u32 *cts)
+{
+       u32 deep_color;
+       bool deep_color_correct = false;
+       u32 pclk = hdmi.ip_data.cfg.timings.pixel_clock;
+
+       if (n == NULL || cts == NULL)
                return -EINVAL;
-       }
 
-       switch (params_rate(params)) {
+       /* TODO: When implemented, query deep color mode here. */
+       deep_color = 100;
+
+       /*
+        * When using deep color, the default N value (as in the HDMI
+        * specification) yields to an non-integer CTS. Hence, we
+        * modify it while keeping the restrictions described in
+        * section 7.2.1 of the HDMI 1.4a specification.
+        */
+       switch (sample_freq) {
        case 32000:
-               sample_freq = HDMI_AUDIO_FS_32000;
+       case 48000:
+       case 96000:
+       case 192000:
+               if (deep_color == 125)
+                       if (pclk == 27027 || pclk == 74250)
+                               deep_color_correct = true;
+               if (deep_color == 150)
+                       if (pclk == 27027)
+                               deep_color_correct = true;
                break;
        case 44100:
-               sample_freq = HDMI_AUDIO_FS_44100;
-               break;
-       case 48000:
-               sample_freq = HDMI_AUDIO_FS_48000;
+       case 88200:
+       case 176400:
+               if (deep_color == 125)
+                       if (pclk == 27027)
+                               deep_color_correct = true;
                break;
        default:
                return -EINVAL;
        }
 
-       err = hdmi_config_audio_acr(ip_data, params_rate(params), &n, &cts);
-       if (err < 0)
-               return err;
-
-       /* Audio wrapper config */
-       audio_format.stereo_channels = HDMI_AUDIO_STEREO_ONECHANNEL;
-       audio_format.active_chnnls_msk = 0x03;
-       audio_format.type = HDMI_AUDIO_TYPE_LPCM;
-       audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST;
-       /* Disable start/stop signals of IEC 60958 blocks */
-       audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_OFF;
+       if (deep_color_correct) {
+               switch (sample_freq) {
+               case 32000:
+                       *n = 8192;
+                       break;
+               case 44100:
+                       *n = 12544;
+                       break;
+               case 48000:
+                       *n = 8192;
+                       break;
+               case 88200:
+                       *n = 25088;
+                       break;
+               case 96000:
+                       *n = 16384;
+                       break;
+               case 176400:
+                       *n = 50176;
+                       break;
+               case 192000:
+                       *n = 32768;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       } else {
+               switch (sample_freq) {
+               case 32000:
+                       *n = 4096;
+                       break;
+               case 44100:
+                       *n = 6272;
+                       break;
+               case 48000:
+                       *n = 6144;
+                       break;
+               case 88200:
+                       *n = 12544;
+                       break;
+               case 96000:
+                       *n = 12288;
+                       break;
+               case 176400:
+                       *n = 25088;
+                       break;
+               case 192000:
+                       *n = 24576;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       }
+       /* Calculate CTS. See HDMI 1.3a or 1.4a specifications */
+       *cts = pclk * (*n / 128) * deep_color / (sample_freq / 10);
 
-       audio_dma.block_size = 0xC0;
-       audio_dma.mode = HDMI_AUDIO_TRANSF_DMA;
-       audio_dma.fifo_threshold = 0x20; /* in number of samples */
+       return 0;
+}
 
-       hdmi_wp_audio_config_dma(ip_data, &audio_dma);
-       hdmi_wp_audio_config_format(ip_data, &audio_format);
+int hdmi_audio_enable(void)
+{
+       DSSDBG("audio_enable\n");
 
-       /*
-        * I2S config
-        */
-       core_cfg.i2s_cfg.en_high_bitrate_aud = false;
-       /* Only used with high bitrate audio */
-       core_cfg.i2s_cfg.cbit_order = false;
-       /* Serial data and word select should change on sck rising edge */
-       core_cfg.i2s_cfg.sck_edge_mode = HDMI_AUDIO_I2S_SCK_EDGE_RISING;
-       core_cfg.i2s_cfg.vbit = HDMI_AUDIO_I2S_VBIT_FOR_PCM;
-       /* Set I2S word select polarity */
-       core_cfg.i2s_cfg.ws_polarity = HDMI_AUDIO_I2S_WS_POLARITY_LOW_IS_LEFT;
-       core_cfg.i2s_cfg.direction = HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST;
-       /* Set serial data to word select shift. See Phillips spec. */
-       core_cfg.i2s_cfg.shift = HDMI_AUDIO_I2S_FIRST_BIT_SHIFT;
-       /* Enable one of the four available serial data channels */
-       core_cfg.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN;
-
-       /* Core audio config */
-       core_cfg.freq_sample = sample_freq;
-       core_cfg.n = n;
-       core_cfg.cts = cts;
-       if (dss_has_feature(FEAT_HDMI_CTS_SWMODE)) {
-               core_cfg.aud_par_busclk = 0;
-               core_cfg.cts_mode = HDMI_AUDIO_CTS_MODE_SW;
-               core_cfg.use_mclk = dss_has_feature(FEAT_HDMI_AUDIO_USE_MCLK);
-       } else {
-               core_cfg.aud_par_busclk = (((128 * 31) - 1) << 8);
-               core_cfg.cts_mode = HDMI_AUDIO_CTS_MODE_HW;
-               core_cfg.use_mclk = true;
-       }
+       return hdmi.ip_data.ops->audio_enable(&hdmi.ip_data);
+}
 
-       if (core_cfg.use_mclk)
-               core_cfg.mclk_mode = HDMI_AUDIO_MCLK_128FS;
-       core_cfg.layout = HDMI_AUDIO_LAYOUT_2CH;
-       core_cfg.en_spdif = false;
-       /* Use sample frequency from channel status word */
-       core_cfg.fs_override = true;
-       /* Enable ACR packets */
-       core_cfg.en_acr_pkt = true;
-       /* Disable direct streaming digital audio */
-       core_cfg.en_dsd_audio = false;
-       /* Use parallel audio interface */
-       core_cfg.en_parallel_aud_input = true;
-
-       hdmi_core_audio_config(ip_data, &core_cfg);
+void hdmi_audio_disable(void)
+{
+       DSSDBG("audio_disable\n");
 
-       /*
-        * Configure packet
-        * info frame audio see doc CEA861-D page 74
-        */
-       aud_if_cfg.db1_coding_type = HDMI_INFOFRAME_AUDIO_DB1CT_FROM_STREAM;
-       aud_if_cfg.db1_channel_count = 2;
-       aud_if_cfg.db2_sample_freq = HDMI_INFOFRAME_AUDIO_DB2SF_FROM_STREAM;
-       aud_if_cfg.db2_sample_size = HDMI_INFOFRAME_AUDIO_DB2SS_FROM_STREAM;
-       aud_if_cfg.db4_channel_alloc = 0x00;
-       aud_if_cfg.db5_downmix_inh = false;
-       aud_if_cfg.db5_lsv = 0;
-
-       hdmi_core_audio_infoframe_config(ip_data, &aud_if_cfg);
-       return 0;
+       hdmi.ip_data.ops->audio_disable(&hdmi.ip_data);
 }
 
-static int hdmi_audio_startup(struct snd_pcm_substream *substream,
-                                 struct snd_soc_dai *dai)
+int hdmi_audio_start(void)
 {
-       if (!hdmi.ip_data.cfg.cm.mode) {
-               pr_err("Current video settings do not support audio.\n");
-               return -EIO;
-       }
-       return 0;
+       DSSDBG("audio_start\n");
+
+       return hdmi.ip_data.ops->audio_start(&hdmi.ip_data);
 }
 
-static int hdmi_audio_codec_probe(struct snd_soc_codec *codec)
+void hdmi_audio_stop(void)
 {
-       struct hdmi_ip_data *priv = &hdmi.ip_data;
+       DSSDBG("audio_stop\n");
 
-       snd_soc_codec_set_drvdata(codec, priv);
-       return 0;
+       hdmi.ip_data.ops->audio_stop(&hdmi.ip_data);
 }
 
-static struct snd_soc_codec_driver hdmi_audio_codec_drv = {
-       .probe = hdmi_audio_codec_probe,
-};
+bool hdmi_mode_has_audio(void)
+{
+       if (hdmi.ip_data.cfg.cm.mode == HDMI_HDMI)
+               return true;
+       else
+               return false;
+}
 
-static struct snd_soc_dai_ops hdmi_audio_codec_ops = {
-       .hw_params = hdmi_audio_hw_params,
-       .trigger = hdmi_audio_trigger,
-       .startup = hdmi_audio_startup,
-};
+int hdmi_audio_config(struct omap_dss_audio *audio)
+{
+       return hdmi.ip_data.ops->audio_config(&hdmi.ip_data, audio);
+}
 
-static struct snd_soc_dai_driver hdmi_codec_dai_drv = {
-               .name = "hdmi-audio-codec",
-               .playback = {
-                       .channels_min = 2,
-                       .channels_max = 2,
-                       .rates = SNDRV_PCM_RATE_32000 |
-                               SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000,
-                       .formats = SNDRV_PCM_FMTBIT_S16_LE |
-                               SNDRV_PCM_FMTBIT_S24_LE,
-               },
-               .ops = &hdmi_audio_codec_ops,
-};
 #endif
 
-static int hdmi_get_clocks(struct platform_device *pdev)
+static void __init hdmi_probe_pdata(struct platform_device *pdev)
 {
-       struct clk *clk;
+       struct omap_dss_board_info *pdata = pdev->dev.platform_data;
+       int r, i;
 
-       clk = clk_get(&pdev->dev, "sys_clk");
-       if (IS_ERR(clk)) {
-               DSSERR("can't get sys_clk\n");
-               return PTR_ERR(clk);
-       }
+       for (i = 0; i < pdata->num_devices; ++i) {
+               struct omap_dss_device *dssdev = pdata->devices[i];
 
-       hdmi.sys_clk = clk;
+               if (dssdev->type != OMAP_DISPLAY_TYPE_HDMI)
+                       continue;
 
-       return 0;
-}
+               r = hdmi_init_display(dssdev);
+               if (r) {
+                       DSSERR("device %s init failed: %d\n", dssdev->name, r);
+                       continue;
+               }
 
-static void hdmi_put_clocks(void)
-{
-       if (hdmi.sys_clk)
-               clk_put(hdmi.sys_clk);
+               r = omap_dss_register_device(dssdev, &pdev->dev, i);
+               if (r)
+                       DSSERR("device %s register failed: %d\n",
+                                       dssdev->name, r);
+       }
 }
 
 /* HDMI HW IP initialisation */
-static int omapdss_hdmihw_probe(struct platform_device *pdev)
+static int __init omapdss_hdmihw_probe(struct platform_device *pdev)
 {
        struct resource *hdmi_mem;
        int r;
 
-       hdmi.pdata = pdev->dev.platform_data;
        hdmi.pdev = pdev;
 
        mutex_init(&hdmi.lock);
@@ -830,28 +761,18 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev)
 
        hdmi_panel_init();
 
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-       defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
+       dss_debugfs_create_file("hdmi", hdmi_dump_regs);
+
+       hdmi_probe_pdata(pdev);
 
-       /* Register ASoC codec DAI */
-       r = snd_soc_register_codec(&pdev->dev, &hdmi_audio_codec_drv,
-                                       &hdmi_codec_dai_drv, 1);
-       if (r) {
-               DSSERR("can't register ASoC HDMI audio codec\n");
-               return r;
-       }
-#endif
        return 0;
 }
 
-static int omapdss_hdmihw_remove(struct platform_device *pdev)
+static int __exit omapdss_hdmihw_remove(struct platform_device *pdev)
 {
-       hdmi_panel_exit();
+       omap_dss_unregister_child_devices(&pdev->dev);
 
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-       defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-       snd_soc_unregister_codec(&pdev->dev);
-#endif
+       hdmi_panel_exit();
 
        pm_runtime_disable(&pdev->dev);
 
@@ -867,7 +788,6 @@ static int hdmi_runtime_suspend(struct device *dev)
        clk_disable(hdmi.sys_clk);
 
        dispc_runtime_put();
-       dss_runtime_put();
 
        return 0;
 }
@@ -876,23 +796,13 @@ static int hdmi_runtime_resume(struct device *dev)
 {
        int r;
 
-       r = dss_runtime_get();
-       if (r < 0)
-               goto err_get_dss;
-
        r = dispc_runtime_get();
        if (r < 0)
-               goto err_get_dispc;
-
+               return r;
 
        clk_enable(hdmi.sys_clk);
 
        return 0;
-
-err_get_dispc:
-       dss_runtime_put();
-err_get_dss:
-       return r;
 }
 
 static const struct dev_pm_ops hdmi_pm_ops = {
@@ -901,8 +811,7 @@ static const struct dev_pm_ops hdmi_pm_ops = {
 };
 
 static struct platform_driver omapdss_hdmihw_driver = {
-       .probe          = omapdss_hdmihw_probe,
-       .remove         = omapdss_hdmihw_remove,
+       .remove         = __exit_p(omapdss_hdmihw_remove),
        .driver         = {
                .name   = "omapdss_hdmi",
                .owner  = THIS_MODULE,
@@ -910,12 +819,12 @@ static struct platform_driver omapdss_hdmihw_driver = {
        },
 };
 
-int hdmi_init_platform_driver(void)
+int __init hdmi_init_platform_driver(void)
 {
-       return platform_driver_register(&omapdss_hdmihw_driver);
+       return platform_driver_probe(&omapdss_hdmihw_driver, omapdss_hdmihw_probe);
 }
 
-void hdmi_uninit_platform_driver(void)
+void __exit hdmi_uninit_platform_driver(void)
 {
-       return platform_driver_unregister(&omapdss_hdmihw_driver);
+       platform_driver_unregister(&omapdss_hdmihw_driver);
 }
index 533d5dc634d256374a22b9a2da8a8eb0453f9a34..1179e3c4b1c76565336b8e4a6041c5bc49da0964 100644 (file)
 #include "dss.h"
 
 static struct {
-       struct mutex hdmi_lock;
+       /* This protects the panel ops, mainly when accessing the HDMI IP. */
+       struct mutex lock;
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+       /* This protects the audio ops, specifically. */
+       spinlock_t audio_lock;
+#endif
 } hdmi;
 
 
@@ -54,12 +59,168 @@ static void hdmi_panel_remove(struct omap_dss_device *dssdev)
 
 }
 
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+static int hdmi_panel_audio_enable(struct omap_dss_device *dssdev)
+{
+       unsigned long flags;
+       int r;
+
+       mutex_lock(&hdmi.lock);
+       spin_lock_irqsave(&hdmi.audio_lock, flags);
+
+       /* enable audio only if the display is active and supports audio */
+       if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE ||
+           !hdmi_mode_has_audio()) {
+               DSSERR("audio not supported or display is off\n");
+               r = -EPERM;
+               goto err;
+       }
+
+       r = hdmi_audio_enable();
+
+       if (!r)
+               dssdev->audio_state = OMAP_DSS_AUDIO_ENABLED;
+
+err:
+       spin_unlock_irqrestore(&hdmi.audio_lock, flags);
+       mutex_unlock(&hdmi.lock);
+       return r;
+}
+
+static void hdmi_panel_audio_disable(struct omap_dss_device *dssdev)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&hdmi.audio_lock, flags);
+
+       hdmi_audio_disable();
+
+       dssdev->audio_state = OMAP_DSS_AUDIO_DISABLED;
+
+       spin_unlock_irqrestore(&hdmi.audio_lock, flags);
+}
+
+static int hdmi_panel_audio_start(struct omap_dss_device *dssdev)
+{
+       unsigned long flags;
+       int r;
+
+       spin_lock_irqsave(&hdmi.audio_lock, flags);
+       /*
+        * No need to check the panel state. It was checked when trasitioning
+        * to AUDIO_ENABLED.
+        */
+       if (dssdev->audio_state != OMAP_DSS_AUDIO_ENABLED) {
+               DSSERR("audio start from invalid state\n");
+               r = -EPERM;
+               goto err;
+       }
+
+       r = hdmi_audio_start();
+
+       if (!r)
+               dssdev->audio_state = OMAP_DSS_AUDIO_PLAYING;
+
+err:
+       spin_unlock_irqrestore(&hdmi.audio_lock, flags);
+       return r;
+}
+
+static void hdmi_panel_audio_stop(struct omap_dss_device *dssdev)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&hdmi.audio_lock, flags);
+
+       hdmi_audio_stop();
+       dssdev->audio_state = OMAP_DSS_AUDIO_ENABLED;
+
+       spin_unlock_irqrestore(&hdmi.audio_lock, flags);
+}
+
+static bool hdmi_panel_audio_supported(struct omap_dss_device *dssdev)
+{
+       bool r = false;
+
+       mutex_lock(&hdmi.lock);
+
+       if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+               goto err;
+
+       if (!hdmi_mode_has_audio())
+               goto err;
+
+       r = true;
+err:
+       mutex_unlock(&hdmi.lock);
+       return r;
+}
+
+static int hdmi_panel_audio_config(struct omap_dss_device *dssdev,
+               struct omap_dss_audio *audio)
+{
+       unsigned long flags;
+       int r;
+
+       mutex_lock(&hdmi.lock);
+       spin_lock_irqsave(&hdmi.audio_lock, flags);
+
+       /* config audio only if the display is active and supports audio */
+       if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE ||
+           !hdmi_mode_has_audio()) {
+               DSSERR("audio not supported or display is off\n");
+               r = -EPERM;
+               goto err;
+       }
+
+       r = hdmi_audio_config(audio);
+
+       if (!r)
+               dssdev->audio_state = OMAP_DSS_AUDIO_CONFIGURED;
+
+err:
+       spin_unlock_irqrestore(&hdmi.audio_lock, flags);
+       mutex_unlock(&hdmi.lock);
+       return r;
+}
+
+#else
+static int hdmi_panel_audio_enable(struct omap_dss_device *dssdev)
+{
+       return -EPERM;
+}
+
+static void hdmi_panel_audio_disable(struct omap_dss_device *dssdev)
+{
+}
+
+static int hdmi_panel_audio_start(struct omap_dss_device *dssdev)
+{
+       return -EPERM;
+}
+
+static void hdmi_panel_audio_stop(struct omap_dss_device *dssdev)
+{
+}
+
+static bool hdmi_panel_audio_supported(struct omap_dss_device *dssdev)
+{
+       return false;
+}
+
+static int hdmi_panel_audio_config(struct omap_dss_device *dssdev,
+               struct omap_dss_audio *audio)
+{
+       return -EPERM;
+}
+#endif
+
 static int hdmi_panel_enable(struct omap_dss_device *dssdev)
 {
        int r = 0;
        DSSDBG("ENTER hdmi_panel_enable\n");
 
-       mutex_lock(&hdmi.hdmi_lock);
+       mutex_lock(&hdmi.lock);
 
        if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
                r = -EINVAL;
@@ -75,40 +236,52 @@ static int hdmi_panel_enable(struct omap_dss_device *dssdev)
        dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
 
 err:
-       mutex_unlock(&hdmi.hdmi_lock);
+       mutex_unlock(&hdmi.lock);
 
        return r;
 }
 
 static void hdmi_panel_disable(struct omap_dss_device *dssdev)
 {
-       mutex_lock(&hdmi.hdmi_lock);
-
-       if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+       mutex_lock(&hdmi.lock);
+
+       if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+               /*
+                * TODO: notify audio users that the display was disabled. For
+                * now, disable audio locally to not break our audio state
+                * machine.
+                */
+               hdmi_panel_audio_disable(dssdev);
                omapdss_hdmi_display_disable(dssdev);
+       }
 
        dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
 
-       mutex_unlock(&hdmi.hdmi_lock);
+       mutex_unlock(&hdmi.lock);
 }
 
 static int hdmi_panel_suspend(struct omap_dss_device *dssdev)
 {
        int r = 0;
 
-       mutex_lock(&hdmi.hdmi_lock);
+       mutex_lock(&hdmi.lock);
 
        if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
                r = -EINVAL;
                goto err;
        }
 
-       dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+       /*
+        * TODO: notify audio users that the display was suspended. For now,
+        * disable audio locally to not break our audio state machine.
+        */
+       hdmi_panel_audio_disable(dssdev);
 
+       dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
        omapdss_hdmi_display_disable(dssdev);
 
 err:
-       mutex_unlock(&hdmi.hdmi_lock);
+       mutex_unlock(&hdmi.lock);
 
        return r;
 }
@@ -117,7 +290,7 @@ static int hdmi_panel_resume(struct omap_dss_device *dssdev)
 {
        int r = 0;
 
-       mutex_lock(&hdmi.hdmi_lock);
+       mutex_lock(&hdmi.lock);
 
        if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
                r = -EINVAL;
@@ -129,11 +302,12 @@ static int hdmi_panel_resume(struct omap_dss_device *dssdev)
                DSSERR("failed to power on\n");
                goto err;
        }
+       /* TODO: notify audio users that the panel resumed. */
 
        dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
 
 err:
-       mutex_unlock(&hdmi.hdmi_lock);
+       mutex_unlock(&hdmi.lock);
 
        return r;
 }
@@ -141,11 +315,11 @@ err:
 static void hdmi_get_timings(struct omap_dss_device *dssdev,
                        struct omap_video_timings *timings)
 {
-       mutex_lock(&hdmi.hdmi_lock);
+       mutex_lock(&hdmi.lock);
 
        *timings = dssdev->panel.timings;
 
-       mutex_unlock(&hdmi.hdmi_lock);
+       mutex_unlock(&hdmi.lock);
 }
 
 static void hdmi_set_timings(struct omap_dss_device *dssdev,
@@ -153,12 +327,18 @@ static void hdmi_set_timings(struct omap_dss_device *dssdev,
 {
        DSSDBG("hdmi_set_timings\n");
 
-       mutex_lock(&hdmi.hdmi_lock);
+       mutex_lock(&hdmi.lock);
+
+       /*
+        * TODO: notify audio users that there was a timings change. For
+        * now, disable audio locally to not break our audio state machine.
+        */
+       hdmi_panel_audio_disable(dssdev);
 
        dssdev->panel.timings = *timings;
        omapdss_hdmi_display_set_timing(dssdev);
 
-       mutex_unlock(&hdmi.hdmi_lock);
+       mutex_unlock(&hdmi.lock);
 }
 
 static int hdmi_check_timings(struct omap_dss_device *dssdev,
@@ -168,11 +348,11 @@ static int hdmi_check_timings(struct omap_dss_device *dssdev,
 
        DSSDBG("hdmi_check_timings\n");
 
-       mutex_lock(&hdmi.hdmi_lock);
+       mutex_lock(&hdmi.lock);
 
        r = omapdss_hdmi_display_check_timing(dssdev, timings);
 
-       mutex_unlock(&hdmi.hdmi_lock);
+       mutex_unlock(&hdmi.lock);
        return r;
 }
 
@@ -180,7 +360,7 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev, u8 *buf, int len)
 {
        int r;
 
-       mutex_lock(&hdmi.hdmi_lock);
+       mutex_lock(&hdmi.lock);
 
        if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
                r = omapdss_hdmi_display_enable(dssdev);
@@ -194,7 +374,7 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev, u8 *buf, int len)
                        dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
                omapdss_hdmi_display_disable(dssdev);
 err:
-       mutex_unlock(&hdmi.hdmi_lock);
+       mutex_unlock(&hdmi.lock);
 
        return r;
 }
@@ -203,7 +383,7 @@ static bool hdmi_detect(struct omap_dss_device *dssdev)
 {
        int r;
 
-       mutex_lock(&hdmi.hdmi_lock);
+       mutex_lock(&hdmi.lock);
 
        if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
                r = omapdss_hdmi_display_enable(dssdev);
@@ -217,7 +397,7 @@ static bool hdmi_detect(struct omap_dss_device *dssdev)
                        dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
                omapdss_hdmi_display_disable(dssdev);
 err:
-       mutex_unlock(&hdmi.hdmi_lock);
+       mutex_unlock(&hdmi.lock);
 
        return r;
 }
@@ -234,6 +414,12 @@ static struct omap_dss_driver hdmi_driver = {
        .check_timings  = hdmi_check_timings,
        .read_edid      = hdmi_read_edid,
        .detect         = hdmi_detect,
+       .audio_enable   = hdmi_panel_audio_enable,
+       .audio_disable  = hdmi_panel_audio_disable,
+       .audio_start    = hdmi_panel_audio_start,
+       .audio_stop     = hdmi_panel_audio_stop,
+       .audio_supported        = hdmi_panel_audio_supported,
+       .audio_config   = hdmi_panel_audio_config,
        .driver                 = {
                .name   = "hdmi_panel",
                .owner  = THIS_MODULE,
@@ -242,7 +428,11 @@ static struct omap_dss_driver hdmi_driver = {
 
 int hdmi_panel_init(void)
 {
-       mutex_init(&hdmi.hdmi_lock);
+       mutex_init(&hdmi.lock);
+
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+       spin_lock_init(&hdmi.audio_lock);
+#endif
 
        omap_dss_register_driver(&hdmi_driver);
 
index e7364603f6a1d09fa193e63ed007565388090e1b..0cbcde4c688a9e40925daf5c12f960f6bb75473b 100644 (file)
@@ -654,9 +654,20 @@ static int dss_mgr_check_zorder(struct omap_overlay_manager *mgr,
        return 0;
 }
 
+int dss_mgr_check_timings(struct omap_overlay_manager *mgr,
+               const struct omap_video_timings *timings)
+{
+       if (!dispc_mgr_timings_ok(mgr->id, timings)) {
+               DSSERR("check_manager: invalid timings\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 int dss_mgr_check(struct omap_overlay_manager *mgr,
-               struct omap_dss_device *dssdev,
                struct omap_overlay_manager_info *info,
+               const struct omap_video_timings *mgr_timings,
                struct omap_overlay_info **overlay_infos)
 {
        struct omap_overlay *ovl;
@@ -668,6 +679,10 @@ int dss_mgr_check(struct omap_overlay_manager *mgr,
                        return r;
        }
 
+       r = dss_mgr_check_timings(mgr, mgr_timings);
+       if (r)
+               return r;
+
        list_for_each_entry(ovl, &mgr->overlays, list) {
                struct omap_overlay_info *oi;
                int r;
@@ -677,7 +692,7 @@ int dss_mgr_check(struct omap_overlay_manager *mgr,
                if (oi == NULL)
                        continue;
 
-               r = dss_ovl_check(ovl, oi, dssdev);
+               r = dss_ovl_check(ovl, oi, mgr_timings);
                if (r)
                        return r;
        }
index 6e821810deec092324a0f1013fbb3cb32bba72dd..b0ba60f88dd23d3fdf5d5583850ecc7282b4a09d 100644 (file)
@@ -628,19 +628,23 @@ int dss_ovl_simple_check(struct omap_overlay *ovl,
                return -EINVAL;
        }
 
+       if (dss_feat_rotation_type_supported(info->rotation_type) == 0) {
+               DSSERR("check_overlay: rotation type %d not supported\n",
+                               info->rotation_type);
+               return -EINVAL;
+       }
+
        return 0;
 }
 
-int dss_ovl_check(struct omap_overlay *ovl,
-               struct omap_overlay_info *info, struct omap_dss_device *dssdev)
+int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info,
+               const struct omap_video_timings *mgr_timings)
 {
        u16 outw, outh;
        u16 dw, dh;
 
-       if (dssdev == NULL)
-               return 0;
-
-       dssdev->driver->get_resolution(dssdev, &dw, &dh);
+       dw = mgr_timings->x_res;
+       dh = mgr_timings->y_res;
 
        if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
                outw = info->width;
index 788a0ef6323aef6e2784244b1825c5b72965abc5..3d8c206e90e5d93631a0dc493f9ab180fbfa181e 100644 (file)
@@ -304,13 +304,23 @@ static void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width,
                u16 height, void (*callback)(void *data), void *data)
 {
        u32 l;
+       struct omap_video_timings timings = {
+               .hsw            = 1,
+               .hfp            = 1,
+               .hbp            = 1,
+               .vsw            = 1,
+               .vfp            = 0,
+               .vbp            = 0,
+               .x_res          = width,
+               .y_res          = height,
+       };
 
        /*BUG_ON(callback == 0);*/
        BUG_ON(rfbi.framedone_callback != NULL);
 
        DSSDBG("rfbi_transfer_area %dx%d\n", width, height);
 
-       dispc_mgr_set_lcd_size(dssdev->manager->id, width, height);
+       dss_mgr_set_timings(dssdev->manager, &timings);
 
        dispc_mgr_enable(dssdev->manager->id, true);
 
@@ -766,6 +776,16 @@ int omap_rfbi_prepare_update(struct omap_dss_device *dssdev,
                u16 *x, u16 *y, u16 *w, u16 *h)
 {
        u16 dw, dh;
+       struct omap_video_timings timings = {
+               .hsw            = 1,
+               .hfp            = 1,
+               .hbp            = 1,
+               .vsw            = 1,
+               .vfp            = 0,
+               .vbp            = 0,
+               .x_res          = *w,
+               .y_res          = *h,
+       };
 
        dssdev->driver->get_resolution(dssdev, &dw, &dh);
 
@@ -784,7 +804,7 @@ int omap_rfbi_prepare_update(struct omap_dss_device *dssdev,
        if (*w == 0 || *h == 0)
                return -EINVAL;
 
-       dispc_mgr_set_lcd_size(dssdev->manager->id, *w, *h);
+       dss_mgr_set_timings(dssdev->manager, &timings);
 
        return 0;
 }
@@ -799,7 +819,7 @@ int omap_rfbi_update(struct omap_dss_device *dssdev,
 }
 EXPORT_SYMBOL(omap_rfbi_update);
 
-void rfbi_dump_regs(struct seq_file *s)
+static void rfbi_dump_regs(struct seq_file *s)
 {
 #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r))
 
@@ -900,15 +920,39 @@ void omapdss_rfbi_display_disable(struct omap_dss_device *dssdev)
 }
 EXPORT_SYMBOL(omapdss_rfbi_display_disable);
 
-int rfbi_init_display(struct omap_dss_device *dssdev)
+static int __init rfbi_init_display(struct omap_dss_device *dssdev)
 {
        rfbi.dssdev[dssdev->phy.rfbi.channel] = dssdev;
        dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
        return 0;
 }
 
+static void __init rfbi_probe_pdata(struct platform_device *pdev)
+{
+       struct omap_dss_board_info *pdata = pdev->dev.platform_data;
+       int i, r;
+
+       for (i = 0; i < pdata->num_devices; ++i) {
+               struct omap_dss_device *dssdev = pdata->devices[i];
+
+               if (dssdev->type != OMAP_DISPLAY_TYPE_DBI)
+                       continue;
+
+               r = rfbi_init_display(dssdev);
+               if (r) {
+                       DSSERR("device %s init failed: %d\n", dssdev->name, r);
+                       continue;
+               }
+
+               r = omap_dss_register_device(dssdev, &pdev->dev, i);
+               if (r)
+                       DSSERR("device %s register failed: %d\n",
+                               dssdev->name, r);
+       }
+}
+
 /* RFBI HW IP initialisation */
-static int omap_rfbihw_probe(struct platform_device *pdev)
+static int __init omap_rfbihw_probe(struct platform_device *pdev)
 {
        u32 rev;
        struct resource *rfbi_mem;
@@ -956,6 +1000,10 @@ static int omap_rfbihw_probe(struct platform_device *pdev)
 
        rfbi_runtime_put();
 
+       dss_debugfs_create_file("rfbi", rfbi_dump_regs);
+
+       rfbi_probe_pdata(pdev);
+
        return 0;
 
 err_runtime_get:
@@ -963,8 +1011,9 @@ err_runtime_get:
        return r;
 }
 
-static int omap_rfbihw_remove(struct platform_device *pdev)
+static int __exit omap_rfbihw_remove(struct platform_device *pdev)
 {
+       omap_dss_unregister_child_devices(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
        return 0;
 }
@@ -972,7 +1021,6 @@ static int omap_rfbihw_remove(struct platform_device *pdev)
 static int rfbi_runtime_suspend(struct device *dev)
 {
        dispc_runtime_put();
-       dss_runtime_put();
 
        return 0;
 }
@@ -981,20 +1029,11 @@ static int rfbi_runtime_resume(struct device *dev)
 {
        int r;
 
-       r = dss_runtime_get();
-       if (r < 0)
-               goto err_get_dss;
-
        r = dispc_runtime_get();
        if (r < 0)
-               goto err_get_dispc;
+               return r;
 
        return 0;
-
-err_get_dispc:
-       dss_runtime_put();
-err_get_dss:
-       return r;
 }
 
 static const struct dev_pm_ops rfbi_pm_ops = {
@@ -1003,8 +1042,7 @@ static const struct dev_pm_ops rfbi_pm_ops = {
 };
 
 static struct platform_driver omap_rfbihw_driver = {
-       .probe          = omap_rfbihw_probe,
-       .remove         = omap_rfbihw_remove,
+       .remove         = __exit_p(omap_rfbihw_remove),
        .driver         = {
                .name   = "omapdss_rfbi",
                .owner  = THIS_MODULE,
@@ -1012,12 +1050,12 @@ static struct platform_driver omap_rfbihw_driver = {
        },
 };
 
-int rfbi_init_platform_driver(void)
+int __init rfbi_init_platform_driver(void)
 {
-       return platform_driver_register(&omap_rfbihw_driver);
+       return platform_driver_probe(&omap_rfbihw_driver, omap_rfbihw_probe);
 }
 
-void rfbi_uninit_platform_driver(void)
+void __exit rfbi_uninit_platform_driver(void)
 {
-       return platform_driver_unregister(&omap_rfbihw_driver);
+       platform_driver_unregister(&omap_rfbihw_driver);
 }
index 8266ca0d666bc2de7547a12c38eca9dc98aba11a..3a43dc2a9b46c992b22770b163a5e47611b7bb9e 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/err.h>
 #include <linux/regulator/consumer.h>
 #include <linux/export.h>
+#include <linux/platform_device.h>
 
 #include <video/omapdss.h>
 #include "dss.h"
@@ -71,10 +72,6 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
        if (r)
                goto err_reg_enable;
 
-       r = dss_runtime_get();
-       if (r)
-               goto err_get_dss;
-
        r = dispc_runtime_get();
        if (r)
                goto err_get_dispc;
@@ -107,7 +104,7 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
        }
 
 
-       dispc_mgr_set_lcd_timings(dssdev->manager->id, t);
+       dss_mgr_set_timings(dssdev->manager, t);
 
        r = dss_set_clock_div(&dss_cinfo);
        if (r)
@@ -137,8 +134,6 @@ err_set_dss_clock_div:
 err_calc_clock_div:
        dispc_runtime_put();
 err_get_dispc:
-       dss_runtime_put();
-err_get_dss:
        regulator_disable(sdi.vdds_sdi_reg);
 err_reg_enable:
        omap_dss_stop_device(dssdev);
@@ -154,7 +149,6 @@ void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
        dss_sdi_disable();
 
        dispc_runtime_put();
-       dss_runtime_put();
 
        regulator_disable(sdi.vdds_sdi_reg);
 
@@ -162,7 +156,7 @@ void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
 }
 EXPORT_SYMBOL(omapdss_sdi_display_disable);
 
-int sdi_init_display(struct omap_dss_device *dssdev)
+static int __init sdi_init_display(struct omap_dss_device *dssdev)
 {
        DSSDBG("SDI init\n");
 
@@ -182,11 +176,58 @@ int sdi_init_display(struct omap_dss_device *dssdev)
        return 0;
 }
 
-int sdi_init(void)
+static void __init sdi_probe_pdata(struct platform_device *pdev)
+{
+       struct omap_dss_board_info *pdata = pdev->dev.platform_data;
+       int i, r;
+
+       for (i = 0; i < pdata->num_devices; ++i) {
+               struct omap_dss_device *dssdev = pdata->devices[i];
+
+               if (dssdev->type != OMAP_DISPLAY_TYPE_SDI)
+                       continue;
+
+               r = sdi_init_display(dssdev);
+               if (r) {
+                       DSSERR("device %s init failed: %d\n", dssdev->name, r);
+                       continue;
+               }
+
+               r = omap_dss_register_device(dssdev, &pdev->dev, i);
+               if (r)
+                       DSSERR("device %s register failed: %d\n",
+                                       dssdev->name, r);
+       }
+}
+
+static int __init omap_sdi_probe(struct platform_device *pdev)
 {
+       sdi_probe_pdata(pdev);
+
+       return 0;
+}
+
+static int __exit omap_sdi_remove(struct platform_device *pdev)
+{
+       omap_dss_unregister_child_devices(&pdev->dev);
+
        return 0;
 }
 
-void sdi_exit(void)
+static struct platform_driver omap_sdi_driver = {
+       .remove         = __exit_p(omap_sdi_remove),
+       .driver         = {
+               .name   = "omapdss_sdi",
+               .owner  = THIS_MODULE,
+       },
+};
+
+int __init sdi_init_platform_driver(void)
+{
+       return platform_driver_probe(&omap_sdi_driver, omap_sdi_probe);
+}
+
+void __exit sdi_uninit_platform_driver(void)
 {
+       platform_driver_unregister(&omap_sdi_driver);
 }
index 1f58b84d69015035c134bc5736b86a12bf7b46f8..e734cb444bc7ce30051adeb9e0f1ff69318bf296 100644 (file)
@@ -96,7 +96,9 @@ struct ti_hdmi_ip_ops {
 
        void (*pll_disable)(struct hdmi_ip_data *ip_data);
 
-       void (*video_enable)(struct hdmi_ip_data *ip_data, bool start);
+       int (*video_enable)(struct hdmi_ip_data *ip_data);
+
+       void (*video_disable)(struct hdmi_ip_data *ip_data);
 
        void (*dump_wrapper)(struct hdmi_ip_data *ip_data, struct seq_file *s);
 
@@ -106,9 +108,17 @@ struct ti_hdmi_ip_ops {
 
        void (*dump_phy)(struct hdmi_ip_data *ip_data, struct seq_file *s);
 
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-       defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-       void (*audio_enable)(struct hdmi_ip_data *ip_data, bool start);
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+       int (*audio_enable)(struct hdmi_ip_data *ip_data);
+
+       void (*audio_disable)(struct hdmi_ip_data *ip_data);
+
+       int (*audio_start)(struct hdmi_ip_data *ip_data);
+
+       void (*audio_stop)(struct hdmi_ip_data *ip_data);
+
+       int (*audio_config)(struct hdmi_ip_data *ip_data,
+               struct omap_dss_audio *audio);
 #endif
 
 };
@@ -173,7 +183,8 @@ int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data);
 void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data);
 int ti_hdmi_4xxx_read_edid(struct hdmi_ip_data *ip_data, u8 *edid, int len);
 bool ti_hdmi_4xxx_detect(struct hdmi_ip_data *ip_data);
-void ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data, bool start);
+int ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data);
+void ti_hdmi_4xxx_wp_video_stop(struct hdmi_ip_data *ip_data);
 int ti_hdmi_4xxx_pll_enable(struct hdmi_ip_data *ip_data);
 void ti_hdmi_4xxx_pll_disable(struct hdmi_ip_data *ip_data);
 void ti_hdmi_4xxx_basic_configure(struct hdmi_ip_data *ip_data);
@@ -181,8 +192,13 @@ void ti_hdmi_4xxx_wp_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
 void ti_hdmi_4xxx_pll_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
 void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
 void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-       defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-void ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data, bool enable);
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+int hdmi_compute_acr(u32 sample_freq, u32 *n, u32 *cts);
+int ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data);
+void ti_hdmi_4xxx_wp_audio_disable(struct hdmi_ip_data *ip_data);
+int ti_hdmi_4xxx_audio_start(struct hdmi_ip_data *ip_data);
+void ti_hdmi_4xxx_audio_stop(struct hdmi_ip_data *ip_data);
+int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
+               struct omap_dss_audio *audio);
 #endif
 #endif
index bfe6fe65c8becf4fa2bf0ed671612623effeeb19..4dae1b291079c9e8e00f0719aa22328f176934ab 100644 (file)
 #include <linux/string.h>
 #include <linux/seq_file.h>
 #include <linux/gpio.h>
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+#include <sound/asound.h>
+#include <sound/asoundef.h>
+#endif
 
 #include "ti_hdmi_4xxx_ip.h"
 #include "dss.h"
+#include "dss_features.h"
 
 static inline void hdmi_write_reg(void __iomem *base_addr,
                                const u16 idx, u32 val)
@@ -298,9 +303,9 @@ int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data)
        REG_FLD_MOD(phy_base, HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27);
 
        r = request_threaded_irq(gpio_to_irq(ip_data->hpd_gpio),
-                       NULL, hpd_irq_handler,
-                       IRQF_DISABLED | IRQF_TRIGGER_RISING |
-                       IRQF_TRIGGER_FALLING, "hpd", ip_data);
+                                NULL, hpd_irq_handler,
+                                IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+                                IRQF_ONESHOT, "hpd", ip_data);
        if (r) {
                DSSERR("HPD IRQ request failed\n");
                hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
@@ -699,9 +704,15 @@ static void hdmi_wp_init(struct omap_video_timings *timings,
 
 }
 
-void ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data, bool start)
+int ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data)
+{
+       REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, true, 31, 31);
+       return 0;
+}
+
+void ti_hdmi_4xxx_wp_video_stop(struct hdmi_ip_data *ip_data)
 {
-       REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, start, 31, 31);
+       REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, false, 31, 31);
 }
 
 static void hdmi_wp_video_init_format(struct hdmi_video_format *video_fmt,
@@ -886,10 +897,12 @@ void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
 
 #define CORE_REG(i, name) name(i)
 #define DUMPCORE(r) seq_printf(s, "%-35s %08x\n", #r,\
-               hdmi_read_reg(hdmi_pll_base(ip_data), r))
-#define DUMPCOREAV(i, r) seq_printf(s, "%s[%d]%*s %08x\n", #r, i, \
+               hdmi_read_reg(hdmi_core_sys_base(ip_data), r))
+#define DUMPCOREAV(r) seq_printf(s, "%-35s %08x\n", #r,\
+               hdmi_read_reg(hdmi_av_base(ip_data), r))
+#define DUMPCOREAV2(i, r) seq_printf(s, "%s[%d]%*s %08x\n", #r, i, \
                (i < 10) ? 32 - strlen(#r) : 31 - strlen(#r), " ", \
-               hdmi_read_reg(hdmi_pll_base(ip_data), CORE_REG(i, r)))
+               hdmi_read_reg(hdmi_av_base(ip_data), CORE_REG(i, r)))
 
        DUMPCORE(HDMI_CORE_SYS_VND_IDL);
        DUMPCORE(HDMI_CORE_SYS_DEV_IDL);
@@ -898,6 +911,13 @@ void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
        DUMPCORE(HDMI_CORE_SYS_SRST);
        DUMPCORE(HDMI_CORE_CTRL1);
        DUMPCORE(HDMI_CORE_SYS_SYS_STAT);
+       DUMPCORE(HDMI_CORE_SYS_DE_DLY);
+       DUMPCORE(HDMI_CORE_SYS_DE_CTRL);
+       DUMPCORE(HDMI_CORE_SYS_DE_TOP);
+       DUMPCORE(HDMI_CORE_SYS_DE_CNTL);
+       DUMPCORE(HDMI_CORE_SYS_DE_CNTH);
+       DUMPCORE(HDMI_CORE_SYS_DE_LINL);
+       DUMPCORE(HDMI_CORE_SYS_DE_LINH_1);
        DUMPCORE(HDMI_CORE_SYS_VID_ACEN);
        DUMPCORE(HDMI_CORE_SYS_VID_MODE);
        DUMPCORE(HDMI_CORE_SYS_INTR_STATE);
@@ -907,102 +927,91 @@ void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
        DUMPCORE(HDMI_CORE_SYS_INTR4);
        DUMPCORE(HDMI_CORE_SYS_UMASK1);
        DUMPCORE(HDMI_CORE_SYS_TMDS_CTRL);
-       DUMPCORE(HDMI_CORE_SYS_DE_DLY);
-       DUMPCORE(HDMI_CORE_SYS_DE_CTRL);
-       DUMPCORE(HDMI_CORE_SYS_DE_TOP);
-       DUMPCORE(HDMI_CORE_SYS_DE_CNTL);
-       DUMPCORE(HDMI_CORE_SYS_DE_CNTH);
-       DUMPCORE(HDMI_CORE_SYS_DE_LINL);
-       DUMPCORE(HDMI_CORE_SYS_DE_LINH_1);
 
-       DUMPCORE(HDMI_CORE_DDC_CMD);
-       DUMPCORE(HDMI_CORE_DDC_STATUS);
        DUMPCORE(HDMI_CORE_DDC_ADDR);
+       DUMPCORE(HDMI_CORE_DDC_SEGM);
        DUMPCORE(HDMI_CORE_DDC_OFFSET);
        DUMPCORE(HDMI_CORE_DDC_COUNT1);
        DUMPCORE(HDMI_CORE_DDC_COUNT2);
+       DUMPCORE(HDMI_CORE_DDC_STATUS);
+       DUMPCORE(HDMI_CORE_DDC_CMD);
        DUMPCORE(HDMI_CORE_DDC_DATA);
-       DUMPCORE(HDMI_CORE_DDC_SEGM);
 
-       DUMPCORE(HDMI_CORE_AV_HDMI_CTRL);
-       DUMPCORE(HDMI_CORE_AV_DPD);
-       DUMPCORE(HDMI_CORE_AV_PB_CTRL1);
-       DUMPCORE(HDMI_CORE_AV_PB_CTRL2);
-       DUMPCORE(HDMI_CORE_AV_AVI_TYPE);
-       DUMPCORE(HDMI_CORE_AV_AVI_VERS);
-       DUMPCORE(HDMI_CORE_AV_AVI_LEN);
-       DUMPCORE(HDMI_CORE_AV_AVI_CHSUM);
+       DUMPCOREAV(HDMI_CORE_AV_ACR_CTRL);
+       DUMPCOREAV(HDMI_CORE_AV_FREQ_SVAL);
+       DUMPCOREAV(HDMI_CORE_AV_N_SVAL1);
+       DUMPCOREAV(HDMI_CORE_AV_N_SVAL2);
+       DUMPCOREAV(HDMI_CORE_AV_N_SVAL3);
+       DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL1);
+       DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL2);
+       DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL3);
+       DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL1);
+       DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL2);
+       DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL3);
+       DUMPCOREAV(HDMI_CORE_AV_AUD_MODE);
+       DUMPCOREAV(HDMI_CORE_AV_SPDIF_CTRL);
+       DUMPCOREAV(HDMI_CORE_AV_HW_SPDIF_FS);
+       DUMPCOREAV(HDMI_CORE_AV_SWAP_I2S);
+       DUMPCOREAV(HDMI_CORE_AV_SPDIF_ERTH);
+       DUMPCOREAV(HDMI_CORE_AV_I2S_IN_MAP);
+       DUMPCOREAV(HDMI_CORE_AV_I2S_IN_CTRL);
+       DUMPCOREAV(HDMI_CORE_AV_I2S_CHST0);
+       DUMPCOREAV(HDMI_CORE_AV_I2S_CHST1);
+       DUMPCOREAV(HDMI_CORE_AV_I2S_CHST2);
+       DUMPCOREAV(HDMI_CORE_AV_I2S_CHST4);
+       DUMPCOREAV(HDMI_CORE_AV_I2S_CHST5);
+       DUMPCOREAV(HDMI_CORE_AV_ASRC);
+       DUMPCOREAV(HDMI_CORE_AV_I2S_IN_LEN);
+       DUMPCOREAV(HDMI_CORE_AV_HDMI_CTRL);
+       DUMPCOREAV(HDMI_CORE_AV_AUDO_TXSTAT);
+       DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_1);
+       DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_2);
+       DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_3);
+       DUMPCOREAV(HDMI_CORE_AV_TEST_TXCTRL);
+       DUMPCOREAV(HDMI_CORE_AV_DPD);
+       DUMPCOREAV(HDMI_CORE_AV_PB_CTRL1);
+       DUMPCOREAV(HDMI_CORE_AV_PB_CTRL2);
+       DUMPCOREAV(HDMI_CORE_AV_AVI_TYPE);
+       DUMPCOREAV(HDMI_CORE_AV_AVI_VERS);
+       DUMPCOREAV(HDMI_CORE_AV_AVI_LEN);
+       DUMPCOREAV(HDMI_CORE_AV_AVI_CHSUM);
 
        for (i = 0; i < HDMI_CORE_AV_AVI_DBYTE_NELEMS; i++)
-               DUMPCOREAV(i, HDMI_CORE_AV_AVI_DBYTE);
+               DUMPCOREAV2(i, HDMI_CORE_AV_AVI_DBYTE);
+
+       DUMPCOREAV(HDMI_CORE_AV_SPD_TYPE);
+       DUMPCOREAV(HDMI_CORE_AV_SPD_VERS);
+       DUMPCOREAV(HDMI_CORE_AV_SPD_LEN);
+       DUMPCOREAV(HDMI_CORE_AV_SPD_CHSUM);
 
        for (i = 0; i < HDMI_CORE_AV_SPD_DBYTE_NELEMS; i++)
-               DUMPCOREAV(i, HDMI_CORE_AV_SPD_DBYTE);
+               DUMPCOREAV2(i, HDMI_CORE_AV_SPD_DBYTE);
+
+       DUMPCOREAV(HDMI_CORE_AV_AUDIO_TYPE);
+       DUMPCOREAV(HDMI_CORE_AV_AUDIO_VERS);
+       DUMPCOREAV(HDMI_CORE_AV_AUDIO_LEN);
+       DUMPCOREAV(HDMI_CORE_AV_AUDIO_CHSUM);
 
        for (i = 0; i < HDMI_CORE_AV_AUD_DBYTE_NELEMS; i++)
-               DUMPCOREAV(i, HDMI_CORE_AV_AUD_DBYTE);
+               DUMPCOREAV2(i, HDMI_CORE_AV_AUD_DBYTE);
+
+       DUMPCOREAV(HDMI_CORE_AV_MPEG_TYPE);
+       DUMPCOREAV(HDMI_CORE_AV_MPEG_VERS);
+       DUMPCOREAV(HDMI_CORE_AV_MPEG_LEN);
+       DUMPCOREAV(HDMI_CORE_AV_MPEG_CHSUM);
 
        for (i = 0; i < HDMI_CORE_AV_MPEG_DBYTE_NELEMS; i++)
-               DUMPCOREAV(i, HDMI_CORE_AV_MPEG_DBYTE);
+               DUMPCOREAV2(i, HDMI_CORE_AV_MPEG_DBYTE);
 
        for (i = 0; i < HDMI_CORE_AV_GEN_DBYTE_NELEMS; i++)
-               DUMPCOREAV(i, HDMI_CORE_AV_GEN_DBYTE);
+               DUMPCOREAV2(i, HDMI_CORE_AV_GEN_DBYTE);
+
+       DUMPCOREAV(HDMI_CORE_AV_CP_BYTE1);
 
        for (i = 0; i < HDMI_CORE_AV_GEN2_DBYTE_NELEMS; i++)
-               DUMPCOREAV(i, HDMI_CORE_AV_GEN2_DBYTE);
-
-       DUMPCORE(HDMI_CORE_AV_ACR_CTRL);
-       DUMPCORE(HDMI_CORE_AV_FREQ_SVAL);
-       DUMPCORE(HDMI_CORE_AV_N_SVAL1);
-       DUMPCORE(HDMI_CORE_AV_N_SVAL2);
-       DUMPCORE(HDMI_CORE_AV_N_SVAL3);
-       DUMPCORE(HDMI_CORE_AV_CTS_SVAL1);
-       DUMPCORE(HDMI_CORE_AV_CTS_SVAL2);
-       DUMPCORE(HDMI_CORE_AV_CTS_SVAL3);
-       DUMPCORE(HDMI_CORE_AV_CTS_HVAL1);
-       DUMPCORE(HDMI_CORE_AV_CTS_HVAL2);
-       DUMPCORE(HDMI_CORE_AV_CTS_HVAL3);
-       DUMPCORE(HDMI_CORE_AV_AUD_MODE);
-       DUMPCORE(HDMI_CORE_AV_SPDIF_CTRL);
-       DUMPCORE(HDMI_CORE_AV_HW_SPDIF_FS);
-       DUMPCORE(HDMI_CORE_AV_SWAP_I2S);
-       DUMPCORE(HDMI_CORE_AV_SPDIF_ERTH);
-       DUMPCORE(HDMI_CORE_AV_I2S_IN_MAP);
-       DUMPCORE(HDMI_CORE_AV_I2S_IN_CTRL);
-       DUMPCORE(HDMI_CORE_AV_I2S_CHST0);
-       DUMPCORE(HDMI_CORE_AV_I2S_CHST1);
-       DUMPCORE(HDMI_CORE_AV_I2S_CHST2);
-       DUMPCORE(HDMI_CORE_AV_I2S_CHST4);
-       DUMPCORE(HDMI_CORE_AV_I2S_CHST5);
-       DUMPCORE(HDMI_CORE_AV_ASRC);
-       DUMPCORE(HDMI_CORE_AV_I2S_IN_LEN);
-       DUMPCORE(HDMI_CORE_AV_HDMI_CTRL);
-       DUMPCORE(HDMI_CORE_AV_AUDO_TXSTAT);
-       DUMPCORE(HDMI_CORE_AV_AUD_PAR_BUSCLK_1);
-       DUMPCORE(HDMI_CORE_AV_AUD_PAR_BUSCLK_2);
-       DUMPCORE(HDMI_CORE_AV_AUD_PAR_BUSCLK_3);
-       DUMPCORE(HDMI_CORE_AV_TEST_TXCTRL);
-       DUMPCORE(HDMI_CORE_AV_DPD);
-       DUMPCORE(HDMI_CORE_AV_PB_CTRL1);
-       DUMPCORE(HDMI_CORE_AV_PB_CTRL2);
-       DUMPCORE(HDMI_CORE_AV_AVI_TYPE);
-       DUMPCORE(HDMI_CORE_AV_AVI_VERS);
-       DUMPCORE(HDMI_CORE_AV_AVI_LEN);
-       DUMPCORE(HDMI_CORE_AV_AVI_CHSUM);
-       DUMPCORE(HDMI_CORE_AV_SPD_TYPE);
-       DUMPCORE(HDMI_CORE_AV_SPD_VERS);
-       DUMPCORE(HDMI_CORE_AV_SPD_LEN);
-       DUMPCORE(HDMI_CORE_AV_SPD_CHSUM);
-       DUMPCORE(HDMI_CORE_AV_AUDIO_TYPE);
-       DUMPCORE(HDMI_CORE_AV_AUDIO_VERS);
-       DUMPCORE(HDMI_CORE_AV_AUDIO_LEN);
-       DUMPCORE(HDMI_CORE_AV_AUDIO_CHSUM);
-       DUMPCORE(HDMI_CORE_AV_MPEG_TYPE);
-       DUMPCORE(HDMI_CORE_AV_MPEG_VERS);
-       DUMPCORE(HDMI_CORE_AV_MPEG_LEN);
-       DUMPCORE(HDMI_CORE_AV_MPEG_CHSUM);
-       DUMPCORE(HDMI_CORE_AV_CP_BYTE1);
-       DUMPCORE(HDMI_CORE_AV_CEC_ADDR_ID);
+               DUMPCOREAV2(i, HDMI_CORE_AV_GEN2_DBYTE);
+
+       DUMPCOREAV(HDMI_CORE_AV_CEC_ADDR_ID);
 }
 
 void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
@@ -1016,9 +1025,8 @@ void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
        DUMPPHY(HDMI_TXPHY_PAD_CFG_CTRL);
 }
 
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-       defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-void hdmi_wp_audio_config_format(struct hdmi_ip_data *ip_data,
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+static void ti_hdmi_4xxx_wp_audio_config_format(struct hdmi_ip_data *ip_data,
                                        struct hdmi_audio_format *aud_fmt)
 {
        u32 r;
@@ -1037,7 +1045,7 @@ void hdmi_wp_audio_config_format(struct hdmi_ip_data *ip_data,
        hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG, r);
 }
 
-void hdmi_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
+static void ti_hdmi_4xxx_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
                                        struct hdmi_audio_dma *aud_dma)
 {
        u32 r;
@@ -1055,7 +1063,7 @@ void hdmi_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
        hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CTRL, r);
 }
 
-void hdmi_core_audio_config(struct hdmi_ip_data *ip_data,
+static void ti_hdmi_4xxx_core_audio_config(struct hdmi_ip_data *ip_data,
                                        struct hdmi_core_audio_config *cfg)
 {
        u32 r;
@@ -1106,27 +1114,33 @@ void hdmi_core_audio_config(struct hdmi_ip_data *ip_data,
        REG_FLD_MOD(av_base, HDMI_CORE_AV_SPDIF_CTRL,
                                                cfg->fs_override, 1, 1);
 
-       /* I2S parameters */
-       REG_FLD_MOD(av_base, HDMI_CORE_AV_I2S_CHST4,
-                                               cfg->freq_sample, 3, 0);
-
+       /*
+        * Set IEC-60958-3 channel status word. It is passed to the IP
+        * just as it is received. The user of the driver is responsible
+        * for its contents.
+        */
+       hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST0,
+                      cfg->iec60958_cfg->status[0]);
+       hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST1,
+                      cfg->iec60958_cfg->status[1]);
+       hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST2,
+                      cfg->iec60958_cfg->status[2]);
+       /* yes, this is correct: status[3] goes to CHST4 register */
+       hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST4,
+                      cfg->iec60958_cfg->status[3]);
+       /* yes, this is correct: status[4] goes to CHST5 register */
+       hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST5,
+                      cfg->iec60958_cfg->status[4]);
+
+       /* set I2S parameters */
        r = hdmi_read_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL);
-       r = FLD_MOD(r, cfg->i2s_cfg.en_high_bitrate_aud, 7, 7);
        r = FLD_MOD(r, cfg->i2s_cfg.sck_edge_mode, 6, 6);
-       r = FLD_MOD(r, cfg->i2s_cfg.cbit_order, 5, 5);
        r = FLD_MOD(r, cfg->i2s_cfg.vbit, 4, 4);
-       r = FLD_MOD(r, cfg->i2s_cfg.ws_polarity, 3, 3);
        r = FLD_MOD(r, cfg->i2s_cfg.justification, 2, 2);
        r = FLD_MOD(r, cfg->i2s_cfg.direction, 1, 1);
        r = FLD_MOD(r, cfg->i2s_cfg.shift, 0, 0);
        hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL, r);
 
-       r = hdmi_read_reg(av_base, HDMI_CORE_AV_I2S_CHST5);
-       r = FLD_MOD(r, cfg->freq_sample, 7, 4);
-       r = FLD_MOD(r, cfg->i2s_cfg.word_length, 3, 1);
-       r = FLD_MOD(r, cfg->i2s_cfg.word_max_length, 0, 0);
-       hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST5, r);
-
        REG_FLD_MOD(av_base, HDMI_CORE_AV_I2S_IN_LEN,
                        cfg->i2s_cfg.in_length_bits, 3, 0);
 
@@ -1138,12 +1152,19 @@ void hdmi_core_audio_config(struct hdmi_ip_data *ip_data,
        r = FLD_MOD(r, cfg->en_parallel_aud_input, 2, 2);
        r = FLD_MOD(r, cfg->en_spdif, 1, 1);
        hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_MODE, r);
+
+       /* Audio channel mappings */
+       /* TODO: Make channel mapping dynamic. For now, map channels
+        * in the ALSA order: FL/FR/RL/RR/C/LFE/SL/SR. Remapping is needed as
+        * HDMI speaker order is different. See CEA-861 Section 6.6.2.
+        */
+       hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_IN_MAP, 0x78);
+       REG_FLD_MOD(av_base, HDMI_CORE_AV_SWAP_I2S, 1, 5, 5);
 }
 
-void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
-               struct hdmi_core_infoframe_audio *info_aud)
+static void ti_hdmi_4xxx_core_audio_infoframe_cfg(struct hdmi_ip_data *ip_data,
+               struct snd_cea_861_aud_if *info_aud)
 {
-       u8 val;
        u8 sum = 0, checksum = 0;
        void __iomem *av_base = hdmi_av_base(ip_data);
 
@@ -1157,24 +1178,23 @@ void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
        hdmi_write_reg(av_base, HDMI_CORE_AV_AUDIO_LEN, 0x0a);
        sum += 0x84 + 0x001 + 0x00a;
 
-       val = (info_aud->db1_coding_type << 4)
-                       | (info_aud->db1_channel_count - 1);
-       hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(0), val);
-       sum += val;
+       hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(0),
+                      info_aud->db1_ct_cc);
+       sum += info_aud->db1_ct_cc;
 
-       val = (info_aud->db2_sample_freq << 2) | info_aud->db2_sample_size;
-       hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(1), val);
-       sum += val;
+       hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(1),
+                      info_aud->db2_sf_ss);
+       sum += info_aud->db2_sf_ss;
 
-       hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(2), 0x00);
+       hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(2), info_aud->db3);
+       sum += info_aud->db3;
 
-       val = info_aud->db4_channel_alloc;
-       hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(3), val);
-       sum += val;
+       hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(3), info_aud->db4_ca);
+       sum += info_aud->db4_ca;
 
-       val = (info_aud->db5_downmix_inh << 7) | (info_aud->db5_lsv << 3);
-       hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(4), val);
-       sum += val;
+       hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(4),
+                      info_aud->db5_dminh_lsv);
+       sum += info_aud->db5_dminh_lsv;
 
        hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(5), 0x00);
        hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(6), 0x00);
@@ -1192,70 +1212,212 @@ void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
         */
 }
 
-int hdmi_config_audio_acr(struct hdmi_ip_data *ip_data,
-                               u32 sample_freq, u32 *n, u32 *cts)
+int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
+               struct omap_dss_audio *audio)
 {
-       u32 r;
-       u32 deep_color = 0;
-       u32 pclk = ip_data->cfg.timings.pixel_clock;
-
-       if (n == NULL || cts == NULL)
+       struct hdmi_audio_format audio_format;
+       struct hdmi_audio_dma audio_dma;
+       struct hdmi_core_audio_config core;
+       int err, n, cts, channel_count;
+       unsigned int fs_nr;
+       bool word_length_16b = false;
+
+       if (!audio || !audio->iec || !audio->cea || !ip_data)
                return -EINVAL;
+
+       core.iec60958_cfg = audio->iec;
        /*
-        * Obtain current deep color configuration. This needed
-        * to calculate the TMDS clock based on the pixel clock.
+        * In the IEC-60958 status word, check if the audio sample word length
+        * is 16-bit as several optimizations can be performed in such case.
         */
-       r = REG_GET(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, 1, 0);
-       switch (r) {
-       case 1: /* No deep color selected */
-               deep_color = 100;
+       if (!(audio->iec->status[4] & IEC958_AES4_CON_MAX_WORDLEN_24))
+               if (audio->iec->status[4] & IEC958_AES4_CON_WORDLEN_20_16)
+                       word_length_16b = true;
+
+       /* I2S configuration. See Phillips' specification */
+       if (word_length_16b)
+               core.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_LEFT;
+       else
+               core.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
+       /*
+        * The I2S input word length is twice the lenght given in the IEC-60958
+        * status word. If the word size is greater than
+        * 20 bits, increment by one.
+        */
+       core.i2s_cfg.in_length_bits = audio->iec->status[4]
+               & IEC958_AES4_CON_WORDLEN;
+       if (audio->iec->status[4] & IEC958_AES4_CON_MAX_WORDLEN_24)
+               core.i2s_cfg.in_length_bits++;
+       core.i2s_cfg.sck_edge_mode = HDMI_AUDIO_I2S_SCK_EDGE_RISING;
+       core.i2s_cfg.vbit = HDMI_AUDIO_I2S_VBIT_FOR_PCM;
+       core.i2s_cfg.direction = HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST;
+       core.i2s_cfg.shift = HDMI_AUDIO_I2S_FIRST_BIT_SHIFT;
+
+       /* convert sample frequency to a number */
+       switch (audio->iec->status[3] & IEC958_AES3_CON_FS) {
+       case IEC958_AES3_CON_FS_32000:
+               fs_nr = 32000;
+               break;
+       case IEC958_AES3_CON_FS_44100:
+               fs_nr = 44100;
+               break;
+       case IEC958_AES3_CON_FS_48000:
+               fs_nr = 48000;
                break;
-       case 2: /* 10-bit deep color selected */
-               deep_color = 125;
+       case IEC958_AES3_CON_FS_88200:
+               fs_nr = 88200;
                break;
-       case 3: /* 12-bit deep color selected */
-               deep_color = 150;
+       case IEC958_AES3_CON_FS_96000:
+               fs_nr = 96000;
+               break;
+       case IEC958_AES3_CON_FS_176400:
+               fs_nr = 176400;
+               break;
+       case IEC958_AES3_CON_FS_192000:
+               fs_nr = 192000;
                break;
        default:
                return -EINVAL;
        }
 
-       switch (sample_freq) {
-       case 32000:
-               if ((deep_color == 125) && ((pclk == 54054)
-                               || (pclk == 74250)))
-                       *n = 8192;
-               else
-                       *n = 4096;
+       err = hdmi_compute_acr(fs_nr, &n, &cts);
+
+       /* Audio clock regeneration settings */
+       core.n = n;
+       core.cts = cts;
+       if (dss_has_feature(FEAT_HDMI_CTS_SWMODE)) {
+               core.aud_par_busclk = 0;
+               core.cts_mode = HDMI_AUDIO_CTS_MODE_SW;
+               core.use_mclk = dss_has_feature(FEAT_HDMI_AUDIO_USE_MCLK);
+       } else {
+               core.aud_par_busclk = (((128 * 31) - 1) << 8);
+               core.cts_mode = HDMI_AUDIO_CTS_MODE_HW;
+               core.use_mclk = true;
+       }
+
+       if (core.use_mclk)
+               core.mclk_mode = HDMI_AUDIO_MCLK_128FS;
+
+       /* Audio channels settings */
+       channel_count = (audio->cea->db1_ct_cc &
+                        CEA861_AUDIO_INFOFRAME_DB1CC) + 1;
+
+       switch (channel_count) {
+       case 2:
+               audio_format.active_chnnls_msk = 0x03;
+               break;
+       case 3:
+               audio_format.active_chnnls_msk = 0x07;
+               break;
+       case 4:
+               audio_format.active_chnnls_msk = 0x0f;
+               break;
+       case 5:
+               audio_format.active_chnnls_msk = 0x1f;
                break;
-       case 44100:
-               *n = 6272;
+       case 6:
+               audio_format.active_chnnls_msk = 0x3f;
                break;
-       case 48000:
-               if ((deep_color == 125) && ((pclk == 54054)
-                               || (pclk == 74250)))
-                       *n = 8192;
-               else
-                       *n = 6144;
+       case 7:
+               audio_format.active_chnnls_msk = 0x7f;
+               break;
+       case 8:
+               audio_format.active_chnnls_msk = 0xff;
                break;
        default:
-               *n = 0;
                return -EINVAL;
        }
 
-       /* Calculate CTS. See HDMI 1.3a or 1.4a specifications */
-       *cts = pclk * (*n / 128) * deep_color / (sample_freq / 10);
+       /*
+        * the HDMI IP needs to enable four stereo channels when transmitting
+        * more than 2 audio channels
+        */
+       if (channel_count == 2) {
+               audio_format.stereo_channels = HDMI_AUDIO_STEREO_ONECHANNEL;
+               core.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN;
+               core.layout = HDMI_AUDIO_LAYOUT_2CH;
+       } else {
+               audio_format.stereo_channels = HDMI_AUDIO_STEREO_FOURCHANNELS;
+               core.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN |
+                               HDMI_AUDIO_I2S_SD1_EN | HDMI_AUDIO_I2S_SD2_EN |
+                               HDMI_AUDIO_I2S_SD3_EN;
+               core.layout = HDMI_AUDIO_LAYOUT_8CH;
+       }
+
+       core.en_spdif = false;
+       /* use sample frequency from channel status word */
+       core.fs_override = true;
+       /* enable ACR packets */
+       core.en_acr_pkt = true;
+       /* disable direct streaming digital audio */
+       core.en_dsd_audio = false;
+       /* use parallel audio interface */
+       core.en_parallel_aud_input = true;
+
+       /* DMA settings */
+       if (word_length_16b)
+               audio_dma.transfer_size = 0x10;
+       else
+               audio_dma.transfer_size = 0x20;
+       audio_dma.block_size = 0xC0;
+       audio_dma.mode = HDMI_AUDIO_TRANSF_DMA;
+       audio_dma.fifo_threshold = 0x20; /* in number of samples */
+
+       /* audio FIFO format settings */
+       if (word_length_16b) {
+               audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_TWOSAMPLES;
+               audio_format.sample_size = HDMI_AUDIO_SAMPLE_16BITS;
+               audio_format.justification = HDMI_AUDIO_JUSTIFY_LEFT;
+       } else {
+               audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_ONESAMPLE;
+               audio_format.sample_size = HDMI_AUDIO_SAMPLE_24BITS;
+               audio_format.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
+       }
+       audio_format.type = HDMI_AUDIO_TYPE_LPCM;
+       audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST;
+       /* disable start/stop signals of IEC 60958 blocks */
+       audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_ON;
+
+       /* configure DMA and audio FIFO format*/
+       ti_hdmi_4xxx_wp_audio_config_dma(ip_data, &audio_dma);
+       ti_hdmi_4xxx_wp_audio_config_format(ip_data, &audio_format);
+
+       /* configure the core*/
+       ti_hdmi_4xxx_core_audio_config(ip_data, &core);
+
+       /* configure CEA 861 audio infoframe*/
+       ti_hdmi_4xxx_core_audio_infoframe_cfg(ip_data, audio->cea);
 
        return 0;
 }
 
-void ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data, bool enable)
+int ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data)
+{
+       REG_FLD_MOD(hdmi_wp_base(ip_data),
+                   HDMI_WP_AUDIO_CTRL, true, 31, 31);
+       return 0;
+}
+
+void ti_hdmi_4xxx_wp_audio_disable(struct hdmi_ip_data *ip_data)
+{
+       REG_FLD_MOD(hdmi_wp_base(ip_data),
+                   HDMI_WP_AUDIO_CTRL, false, 31, 31);
+}
+
+int ti_hdmi_4xxx_audio_start(struct hdmi_ip_data *ip_data)
 {
        REG_FLD_MOD(hdmi_av_base(ip_data),
-                               HDMI_CORE_AV_AUD_MODE, enable, 0, 0);
+                   HDMI_CORE_AV_AUD_MODE, true, 0, 0);
        REG_FLD_MOD(hdmi_wp_base(ip_data),
-                               HDMI_WP_AUDIO_CTRL, enable, 31, 31);
+                   HDMI_WP_AUDIO_CTRL, true, 30, 30);
+       return 0;
+}
+
+void ti_hdmi_4xxx_audio_stop(struct hdmi_ip_data *ip_data)
+{
+       REG_FLD_MOD(hdmi_av_base(ip_data),
+                   HDMI_CORE_AV_AUD_MODE, false, 0, 0);
        REG_FLD_MOD(hdmi_wp_base(ip_data),
-                               HDMI_WP_AUDIO_CTRL, enable, 30, 30);
+                   HDMI_WP_AUDIO_CTRL, false, 30, 30);
 }
 #endif
index a14d1a0e6e4146d00a70a88645d7292ea1793288..8366ae19e82eece140606c7178e6a70216fbec87 100644 (file)
 #include <linux/string.h>
 #include <video/omapdss.h>
 #include "ti_hdmi.h"
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-       defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-#include <sound/soc.h>
-#include <sound/pcm_params.h>
-#endif
 
 /* HDMI Wrapper */
 
 #define HDMI_CORE_SYS_SRST                     0x14
 #define HDMI_CORE_CTRL1                                0x20
 #define HDMI_CORE_SYS_SYS_STAT                 0x24
+#define HDMI_CORE_SYS_DE_DLY                   0xC8
+#define HDMI_CORE_SYS_DE_CTRL                  0xCC
+#define HDMI_CORE_SYS_DE_TOP                   0xD0
+#define HDMI_CORE_SYS_DE_CNTL                  0xD8
+#define HDMI_CORE_SYS_DE_CNTH                  0xDC
+#define HDMI_CORE_SYS_DE_LINL                  0xE0
+#define HDMI_CORE_SYS_DE_LINH_1                        0xE4
 #define HDMI_CORE_SYS_VID_ACEN                 0x124
 #define HDMI_CORE_SYS_VID_MODE                 0x128
 #define HDMI_CORE_SYS_INTR_STATE               0x1C0
 #define HDMI_CORE_SYS_INTR4                    0x1D0
 #define HDMI_CORE_SYS_UMASK1                   0x1D4
 #define HDMI_CORE_SYS_TMDS_CTRL                        0x208
-#define HDMI_CORE_SYS_DE_DLY                   0xC8
-#define HDMI_CORE_SYS_DE_CTRL                  0xCC
-#define HDMI_CORE_SYS_DE_TOP                   0xD0
-#define HDMI_CORE_SYS_DE_CNTL                  0xD8
-#define HDMI_CORE_SYS_DE_CNTH                  0xDC
-#define HDMI_CORE_SYS_DE_LINL                  0xE0
-#define HDMI_CORE_SYS_DE_LINH_1                        0xE4
+
 #define HDMI_CORE_CTRL1_VEN_FOLLOWVSYNC        0x1
 #define HDMI_CORE_CTRL1_HEN_FOLLOWHSYNC        0x1
-#define HDMI_CORE_CTRL1_BSEL_24BITBUS          0x1
+#define HDMI_CORE_CTRL1_BSEL_24BITBUS  0x1
 #define HDMI_CORE_CTRL1_EDGE_RISINGEDGE        0x1
 
 /* HDMI DDC E-DID */
-#define HDMI_CORE_DDC_CMD                      0x3CC
-#define HDMI_CORE_DDC_STATUS                   0x3C8
 #define HDMI_CORE_DDC_ADDR                     0x3B4
+#define HDMI_CORE_DDC_SEGM                     0x3B8
 #define HDMI_CORE_DDC_OFFSET                   0x3BC
 #define HDMI_CORE_DDC_COUNT1                   0x3C0
 #define HDMI_CORE_DDC_COUNT2                   0x3C4
+#define HDMI_CORE_DDC_STATUS                   0x3C8
+#define HDMI_CORE_DDC_CMD                      0x3CC
 #define HDMI_CORE_DDC_DATA                     0x3D0
-#define HDMI_CORE_DDC_SEGM                     0x3B8
 
 /* HDMI IP Core Audio Video */
 
-#define HDMI_CORE_AV_HDMI_CTRL                 0xBC
-#define HDMI_CORE_AV_DPD                       0xF4
-#define HDMI_CORE_AV_PB_CTRL1                  0xF8
-#define HDMI_CORE_AV_PB_CTRL2                  0xFC
-#define HDMI_CORE_AV_AVI_TYPE                  0x100
-#define HDMI_CORE_AV_AVI_VERS                  0x104
-#define HDMI_CORE_AV_AVI_LEN                   0x108
-#define HDMI_CORE_AV_AVI_CHSUM                 0x10C
-#define HDMI_CORE_AV_AVI_DBYTE(n)              (n * 4 + 0x110)
-#define HDMI_CORE_AV_AVI_DBYTE_NELEMS          15
-#define HDMI_CORE_AV_SPD_DBYTE(n)              (n * 4 + 0x190)
-#define HDMI_CORE_AV_SPD_DBYTE_NELEMS          27
-#define HDMI_CORE_AV_AUD_DBYTE(n)              (n * 4 + 0x210)
-#define HDMI_CORE_AV_AUD_DBYTE_NELEMS          10
-#define HDMI_CORE_AV_MPEG_DBYTE(n)             (n * 4 + 0x290)
-#define HDMI_CORE_AV_MPEG_DBYTE_NELEMS         27
-#define HDMI_CORE_AV_GEN_DBYTE(n)              (n * 4 + 0x300)
-#define HDMI_CORE_AV_GEN_DBYTE_NELEMS          31
-#define HDMI_CORE_AV_GEN2_DBYTE(n)             (n * 4 + 0x380)
-#define HDMI_CORE_AV_GEN2_DBYTE_NELEMS         31
 #define HDMI_CORE_AV_ACR_CTRL                  0x4
 #define HDMI_CORE_AV_FREQ_SVAL                 0x8
 #define HDMI_CORE_AV_N_SVAL1                   0xC
 #define HDMI_CORE_AV_AVI_VERS                  0x104
 #define HDMI_CORE_AV_AVI_LEN                   0x108
 #define HDMI_CORE_AV_AVI_CHSUM                 0x10C
+#define HDMI_CORE_AV_AVI_DBYTE(n)              (n * 4 + 0x110)
 #define HDMI_CORE_AV_SPD_TYPE                  0x180
 #define HDMI_CORE_AV_SPD_VERS                  0x184
 #define HDMI_CORE_AV_SPD_LEN                   0x188
 #define HDMI_CORE_AV_SPD_CHSUM                 0x18C
+#define HDMI_CORE_AV_SPD_DBYTE(n)              (n * 4 + 0x190)
 #define HDMI_CORE_AV_AUDIO_TYPE                        0x200
 #define HDMI_CORE_AV_AUDIO_VERS                        0x204
 #define HDMI_CORE_AV_AUDIO_LEN                 0x208
 #define HDMI_CORE_AV_AUDIO_CHSUM               0x20C
+#define HDMI_CORE_AV_AUD_DBYTE(n)              (n * 4 + 0x210)
 #define HDMI_CORE_AV_MPEG_TYPE                 0x280
 #define HDMI_CORE_AV_MPEG_VERS                 0x284
 #define HDMI_CORE_AV_MPEG_LEN                  0x288
 #define HDMI_CORE_AV_MPEG_CHSUM                        0x28C
+#define HDMI_CORE_AV_MPEG_DBYTE(n)             (n * 4 + 0x290)
+#define HDMI_CORE_AV_GEN_DBYTE(n)              (n * 4 + 0x300)
 #define HDMI_CORE_AV_CP_BYTE1                  0x37C
+#define HDMI_CORE_AV_GEN2_DBYTE(n)             (n * 4 + 0x380)
 #define HDMI_CORE_AV_CEC_ADDR_ID               0x3FC
+
 #define HDMI_CORE_AV_SPD_DBYTE_ELSIZE          0x4
 #define HDMI_CORE_AV_GEN2_DBYTE_ELSIZE         0x4
 #define HDMI_CORE_AV_MPEG_DBYTE_ELSIZE         0x4
 #define HDMI_CORE_AV_GEN_DBYTE_ELSIZE          0x4
 
+#define HDMI_CORE_AV_AVI_DBYTE_NELEMS          15
+#define HDMI_CORE_AV_SPD_DBYTE_NELEMS          27
+#define HDMI_CORE_AV_AUD_DBYTE_NELEMS          10
+#define HDMI_CORE_AV_MPEG_DBYTE_NELEMS         27
+#define HDMI_CORE_AV_GEN_DBYTE_NELEMS          31
+#define HDMI_CORE_AV_GEN2_DBYTE_NELEMS         31
+
 /* PLL */
 
 #define PLLCTRL_PLL_CONTROL                    0x0
@@ -284,35 +274,6 @@ enum hdmi_core_infoframe {
        HDMI_INFOFRAME_AVI_DB5PR_8 = 7,
        HDMI_INFOFRAME_AVI_DB5PR_9 = 8,
        HDMI_INFOFRAME_AVI_DB5PR_10 = 9,
-       HDMI_INFOFRAME_AUDIO_DB1CT_FROM_STREAM = 0,
-       HDMI_INFOFRAME_AUDIO_DB1CT_IEC60958 = 1,
-       HDMI_INFOFRAME_AUDIO_DB1CT_AC3 = 2,
-       HDMI_INFOFRAME_AUDIO_DB1CT_MPEG1 = 3,
-       HDMI_INFOFRAME_AUDIO_DB1CT_MP3 = 4,
-       HDMI_INFOFRAME_AUDIO_DB1CT_MPEG2_MULTICH = 5,
-       HDMI_INFOFRAME_AUDIO_DB1CT_AAC = 6,
-       HDMI_INFOFRAME_AUDIO_DB1CT_DTS = 7,
-       HDMI_INFOFRAME_AUDIO_DB1CT_ATRAC = 8,
-       HDMI_INFOFRAME_AUDIO_DB1CT_ONEBIT = 9,
-       HDMI_INFOFRAME_AUDIO_DB1CT_DOLBY_DIGITAL_PLUS = 10,
-       HDMI_INFOFRAME_AUDIO_DB1CT_DTS_HD = 11,
-       HDMI_INFOFRAME_AUDIO_DB1CT_MAT = 12,
-       HDMI_INFOFRAME_AUDIO_DB1CT_DST = 13,
-       HDMI_INFOFRAME_AUDIO_DB1CT_WMA_PRO = 14,
-       HDMI_INFOFRAME_AUDIO_DB2SF_FROM_STREAM = 0,
-       HDMI_INFOFRAME_AUDIO_DB2SF_32000 = 1,
-       HDMI_INFOFRAME_AUDIO_DB2SF_44100 = 2,
-       HDMI_INFOFRAME_AUDIO_DB2SF_48000 = 3,
-       HDMI_INFOFRAME_AUDIO_DB2SF_88200 = 4,
-       HDMI_INFOFRAME_AUDIO_DB2SF_96000 = 5,
-       HDMI_INFOFRAME_AUDIO_DB2SF_176400 = 6,
-       HDMI_INFOFRAME_AUDIO_DB2SF_192000 = 7,
-       HDMI_INFOFRAME_AUDIO_DB2SS_FROM_STREAM = 0,
-       HDMI_INFOFRAME_AUDIO_DB2SS_16BIT = 1,
-       HDMI_INFOFRAME_AUDIO_DB2SS_20BIT = 2,
-       HDMI_INFOFRAME_AUDIO_DB2SS_24BIT = 3,
-       HDMI_INFOFRAME_AUDIO_DB5_DM_INH_PERMITTED = 0,
-       HDMI_INFOFRAME_AUDIO_DB5_DM_INH_PROHIBITED = 1
 };
 
 enum hdmi_packing_mode {
@@ -322,17 +283,6 @@ enum hdmi_packing_mode {
        HDMI_PACK_ALREADYPACKED = 7
 };
 
-enum hdmi_core_audio_sample_freq {
-       HDMI_AUDIO_FS_32000 = 0x3,
-       HDMI_AUDIO_FS_44100 = 0x0,
-       HDMI_AUDIO_FS_48000 = 0x2,
-       HDMI_AUDIO_FS_88200 = 0x8,
-       HDMI_AUDIO_FS_96000 = 0xA,
-       HDMI_AUDIO_FS_176400 = 0xC,
-       HDMI_AUDIO_FS_192000 = 0xE,
-       HDMI_AUDIO_FS_NOT_INDICATED = 0x1
-};
-
 enum hdmi_core_audio_layout {
        HDMI_AUDIO_LAYOUT_2CH = 0,
        HDMI_AUDIO_LAYOUT_8CH = 1
@@ -387,37 +337,12 @@ enum hdmi_audio_blk_strt_end_sig {
 };
 
 enum hdmi_audio_i2s_config {
-       HDMI_AUDIO_I2S_WS_POLARITY_LOW_IS_LEFT = 0,
-       HDMI_AUDIO_I2S_WS_POLARIT_YLOW_IS_RIGHT = 1,
        HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST = 0,
        HDMI_AUDIO_I2S_LSB_SHIFTED_FIRST = 1,
-       HDMI_AUDIO_I2S_MAX_WORD_20BITS = 0,
-       HDMI_AUDIO_I2S_MAX_WORD_24BITS = 1,
-       HDMI_AUDIO_I2S_CHST_WORD_NOT_SPECIFIED = 0,
-       HDMI_AUDIO_I2S_CHST_WORD_16_BITS = 1,
-       HDMI_AUDIO_I2S_CHST_WORD_17_BITS = 6,
-       HDMI_AUDIO_I2S_CHST_WORD_18_BITS = 2,
-       HDMI_AUDIO_I2S_CHST_WORD_19_BITS = 4,
-       HDMI_AUDIO_I2S_CHST_WORD_20_BITS_20MAX = 5,
-       HDMI_AUDIO_I2S_CHST_WORD_20_BITS_24MAX = 1,
-       HDMI_AUDIO_I2S_CHST_WORD_21_BITS = 6,
-       HDMI_AUDIO_I2S_CHST_WORD_22_BITS = 2,
-       HDMI_AUDIO_I2S_CHST_WORD_23_BITS = 4,
-       HDMI_AUDIO_I2S_CHST_WORD_24_BITS = 5,
        HDMI_AUDIO_I2S_SCK_EDGE_FALLING = 0,
        HDMI_AUDIO_I2S_SCK_EDGE_RISING = 1,
        HDMI_AUDIO_I2S_VBIT_FOR_PCM = 0,
        HDMI_AUDIO_I2S_VBIT_FOR_COMPRESSED = 1,
-       HDMI_AUDIO_I2S_INPUT_LENGTH_NA = 0,
-       HDMI_AUDIO_I2S_INPUT_LENGTH_16 = 2,
-       HDMI_AUDIO_I2S_INPUT_LENGTH_17 = 12,
-       HDMI_AUDIO_I2S_INPUT_LENGTH_18 = 4,
-       HDMI_AUDIO_I2S_INPUT_LENGTH_19 = 8,
-       HDMI_AUDIO_I2S_INPUT_LENGTH_20 = 10,
-       HDMI_AUDIO_I2S_INPUT_LENGTH_21 = 13,
-       HDMI_AUDIO_I2S_INPUT_LENGTH_22 = 5,
-       HDMI_AUDIO_I2S_INPUT_LENGTH_23 = 9,
-       HDMI_AUDIO_I2S_INPUT_LENGTH_24 = 11,
        HDMI_AUDIO_I2S_FIRST_BIT_SHIFT = 0,
        HDMI_AUDIO_I2S_FIRST_BIT_NO_SHIFT = 1,
        HDMI_AUDIO_I2S_SD0_EN = 1,
@@ -446,20 +371,6 @@ struct hdmi_core_video_config {
        enum hdmi_core_tclkselclkmult   tclk_sel_clkmult;
 };
 
-/*
- * Refer to section 8.2 in HDMI 1.3 specification for
- * details about infoframe databytes
- */
-struct hdmi_core_infoframe_audio {
-       u8 db1_coding_type;
-       u8 db1_channel_count;
-       u8 db2_sample_freq;
-       u8 db2_sample_size;
-       u8 db4_channel_alloc;
-       bool db5_downmix_inh;
-       u8 db5_lsv;     /* Level shift values for downmix */
-};
-
 struct hdmi_core_packet_enable_repeat {
        u32     audio_pkt;
        u32     audio_pkt_repeat;
@@ -496,15 +407,10 @@ struct hdmi_audio_dma {
 };
 
 struct hdmi_core_audio_i2s_config {
-       u8 word_max_length;
-       u8 word_length;
        u8 in_length_bits;
        u8 justification;
-       u8 en_high_bitrate_aud;
        u8 sck_edge_mode;
-       u8 cbit_order;
        u8 vbit;
-       u8 ws_polarity;
        u8 direction;
        u8 shift;
        u8 active_sds;
@@ -512,7 +418,7 @@ struct hdmi_core_audio_i2s_config {
 
 struct hdmi_core_audio_config {
        struct hdmi_core_audio_i2s_config       i2s_cfg;
-       enum hdmi_core_audio_sample_freq        freq_sample;
+       struct snd_aes_iec958                   *iec60958_cfg;
        bool                                    fs_override;
        u32                                     n;
        u32                                     cts;
@@ -527,17 +433,4 @@ struct hdmi_core_audio_config {
        bool                                    en_spdif;
 };
 
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
-       defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-int hdmi_config_audio_acr(struct hdmi_ip_data *ip_data,
-                               u32 sample_freq, u32 *n, u32 *cts);
-void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
-               struct hdmi_core_infoframe_audio *info_aud);
-void hdmi_core_audio_config(struct hdmi_ip_data *ip_data,
-                                       struct hdmi_core_audio_config *cfg);
-void hdmi_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
-                                       struct hdmi_audio_dma *aud_dma);
-void hdmi_wp_audio_config_format(struct hdmi_ip_data *ip_data,
-                                       struct hdmi_audio_format *aud_fmt);
-#endif
 #endif
index 9c3daf71750c769057d3a03f05019e950a3b5ed8..2b8973931ff48e845cd9a1386b7b437b369b24e4 100644 (file)
@@ -415,6 +415,7 @@ static const struct venc_config *venc_timings_to_config(
                return &venc_config_ntsc_trm;
 
        BUG();
+       return NULL;
 }
 
 static int venc_power_on(struct omap_dss_device *dssdev)
@@ -440,10 +441,11 @@ static int venc_power_on(struct omap_dss_device *dssdev)
 
        venc_write_reg(VENC_OUTPUT_CONTROL, l);
 
-       dispc_set_digit_size(dssdev->panel.timings.x_res,
-                       dssdev->panel.timings.y_res/2);
+       dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
 
-       regulator_enable(venc.vdda_dac_reg);
+       r = regulator_enable(venc.vdda_dac_reg);
+       if (r)
+               goto err;
 
        if (dssdev->platform_enable)
                dssdev->platform_enable(dssdev);
@@ -485,16 +487,68 @@ unsigned long venc_get_pixel_clock(void)
        return 13500000;
 }
 
+static ssize_t display_output_type_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct omap_dss_device *dssdev = to_dss_device(dev);
+       const char *ret;
+
+       switch (dssdev->phy.venc.type) {
+       case OMAP_DSS_VENC_TYPE_COMPOSITE:
+               ret = "composite";
+               break;
+       case OMAP_DSS_VENC_TYPE_SVIDEO:
+               ret = "svideo";
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return snprintf(buf, PAGE_SIZE, "%s\n", ret);
+}
+
+static ssize_t display_output_type_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t size)
+{
+       struct omap_dss_device *dssdev = to_dss_device(dev);
+       enum omap_dss_venc_type new_type;
+
+       if (sysfs_streq("composite", buf))
+               new_type = OMAP_DSS_VENC_TYPE_COMPOSITE;
+       else if (sysfs_streq("svideo", buf))
+               new_type = OMAP_DSS_VENC_TYPE_SVIDEO;
+       else
+               return -EINVAL;
+
+       mutex_lock(&venc.venc_lock);
+
+       if (dssdev->phy.venc.type != new_type) {
+               dssdev->phy.venc.type = new_type;
+               if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+                       venc_power_off(dssdev);
+                       venc_power_on(dssdev);
+               }
+       }
+
+       mutex_unlock(&venc.venc_lock);
+
+       return size;
+}
+
+static DEVICE_ATTR(output_type, S_IRUGO | S_IWUSR,
+               display_output_type_show, display_output_type_store);
+
 /* driver */
 static int venc_panel_probe(struct omap_dss_device *dssdev)
 {
        dssdev->panel.timings = omap_dss_pal_timings;
 
-       return 0;
+       return device_create_file(&dssdev->dev, &dev_attr_output_type);
 }
 
 static void venc_panel_remove(struct omap_dss_device *dssdev)
 {
+       device_remove_file(&dssdev->dev, &dev_attr_output_type);
 }
 
 static int venc_panel_enable(struct omap_dss_device *dssdev)
@@ -577,12 +631,6 @@ static int venc_panel_resume(struct omap_dss_device *dssdev)
        return venc_panel_enable(dssdev);
 }
 
-static void venc_get_timings(struct omap_dss_device *dssdev,
-                       struct omap_video_timings *timings)
-{
-       *timings = dssdev->panel.timings;
-}
-
 static void venc_set_timings(struct omap_dss_device *dssdev,
                        struct omap_video_timings *timings)
 {
@@ -597,6 +645,8 @@ static void venc_set_timings(struct omap_dss_device *dssdev,
                /* turn the venc off and on to get new timings to use */
                venc_panel_disable(dssdev);
                venc_panel_enable(dssdev);
+       } else {
+               dss_mgr_set_timings(dssdev->manager, timings);
        }
 }
 
@@ -661,7 +711,6 @@ static struct omap_dss_driver venc_driver = {
        .get_resolution = omapdss_default_get_resolution,
        .get_recommended_bpp = omapdss_default_get_recommended_bpp,
 
-       .get_timings    = venc_get_timings,
        .set_timings    = venc_set_timings,
        .check_timings  = venc_check_timings,
 
@@ -675,7 +724,7 @@ static struct omap_dss_driver venc_driver = {
 };
 /* driver end */
 
-int venc_init_display(struct omap_dss_device *dssdev)
+static int __init venc_init_display(struct omap_dss_device *dssdev)
 {
        DSSDBG("init_display\n");
 
@@ -695,7 +744,7 @@ int venc_init_display(struct omap_dss_device *dssdev)
        return 0;
 }
 
-void venc_dump_regs(struct seq_file *s)
+static void venc_dump_regs(struct seq_file *s)
 {
 #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r))
 
@@ -779,8 +828,32 @@ static void venc_put_clocks(void)
                clk_put(venc.tv_dac_clk);
 }
 
+static void __init venc_probe_pdata(struct platform_device *pdev)
+{
+       struct omap_dss_board_info *pdata = pdev->dev.platform_data;
+       int r, i;
+
+       for (i = 0; i < pdata->num_devices; ++i) {
+               struct omap_dss_device *dssdev = pdata->devices[i];
+
+               if (dssdev->type != OMAP_DISPLAY_TYPE_VENC)
+                       continue;
+
+               r = venc_init_display(dssdev);
+               if (r) {
+                       DSSERR("device %s init failed: %d\n", dssdev->name, r);
+                       continue;
+               }
+
+               r = omap_dss_register_device(dssdev, &pdev->dev, i);
+               if (r)
+                       DSSERR("device %s register failed: %d\n",
+                                       dssdev->name, r);
+       }
+}
+
 /* VENC HW IP initialisation */
-static int omap_venchw_probe(struct platform_device *pdev)
+static int __init omap_venchw_probe(struct platform_device *pdev)
 {
        u8 rev_id;
        struct resource *venc_mem;
@@ -824,6 +897,10 @@ static int omap_venchw_probe(struct platform_device *pdev)
        if (r)
                goto err_reg_panel_driver;
 
+       dss_debugfs_create_file("venc", venc_dump_regs);
+
+       venc_probe_pdata(pdev);
+
        return 0;
 
 err_reg_panel_driver:
@@ -833,12 +910,15 @@ err_runtime_get:
        return r;
 }
 
-static int omap_venchw_remove(struct platform_device *pdev)
+static int __exit omap_venchw_remove(struct platform_device *pdev)
 {
+       omap_dss_unregister_child_devices(&pdev->dev);
+
        if (venc.vdda_dac_reg != NULL) {
                regulator_put(venc.vdda_dac_reg);
                venc.vdda_dac_reg = NULL;
        }
+
        omap_dss_unregister_driver(&venc_driver);
 
        pm_runtime_disable(&pdev->dev);
@@ -853,7 +933,6 @@ static int venc_runtime_suspend(struct device *dev)
                clk_disable(venc.tv_dac_clk);
 
        dispc_runtime_put();
-       dss_runtime_put();
 
        return 0;
 }
@@ -862,23 +941,14 @@ static int venc_runtime_resume(struct device *dev)
 {
        int r;
 
-       r = dss_runtime_get();
-       if (r < 0)
-               goto err_get_dss;
-
        r = dispc_runtime_get();
        if (r < 0)
-               goto err_get_dispc;
+               return r;
 
        if (venc.tv_dac_clk)
                clk_enable(venc.tv_dac_clk);
 
        return 0;
-
-err_get_dispc:
-       dss_runtime_put();
-err_get_dss:
-       return r;
 }
 
 static const struct dev_pm_ops venc_pm_ops = {
@@ -887,8 +957,7 @@ static const struct dev_pm_ops venc_pm_ops = {
 };
 
 static struct platform_driver omap_venchw_driver = {
-       .probe          = omap_venchw_probe,
-       .remove         = omap_venchw_remove,
+       .remove         = __exit_p(omap_venchw_remove),
        .driver         = {
                .name   = "omapdss_venc",
                .owner  = THIS_MODULE,
@@ -896,18 +965,18 @@ static struct platform_driver omap_venchw_driver = {
        },
 };
 
-int venc_init_platform_driver(void)
+int __init venc_init_platform_driver(void)
 {
        if (cpu_is_omap44xx())
                return 0;
 
-       return platform_driver_register(&omap_venchw_driver);
+       return platform_driver_probe(&omap_venchw_driver, omap_venchw_probe);
 }
 
-void venc_uninit_platform_driver(void)
+void __exit venc_uninit_platform_driver(void)
 {
        if (cpu_is_omap44xx())
                return;
 
-       return platform_driver_unregister(&omap_venchw_driver);
+       platform_driver_unregister(&omap_venchw_driver);
 }
index 6a09ef87e14fae9886461bb7855c9d97350707e0..c6cf372d22c58b9941051c67028df365e178657f 100644 (file)
@@ -70,7 +70,7 @@ static int omapfb_setup_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
 
        DBG("omapfb_setup_plane\n");
 
-       if (ofbi->num_overlays != 1) {
+       if (ofbi->num_overlays == 0) {
                r = -EINVAL;
                goto out;
        }
@@ -185,7 +185,7 @@ static int omapfb_query_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
 {
        struct omapfb_info *ofbi = FB2OFB(fbi);
 
-       if (ofbi->num_overlays != 1) {
+       if (ofbi->num_overlays == 0) {
                memset(pi, 0, sizeof(*pi));
        } else {
                struct omap_overlay *ovl;
@@ -225,6 +225,9 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
        down_write_nested(&rg->lock, rg->id);
        atomic_inc(&rg->lock_count);
 
+       if (rg->size == size && rg->type == mi->type)
+               goto out;
+
        if (atomic_read(&rg->map_count)) {
                r = -EBUSY;
                goto out;
@@ -247,12 +250,10 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
                }
        }
 
-       if (rg->size != size || rg->type != mi->type) {
-               r = omapfb_realloc_fbmem(fbi, size, mi->type);
-               if (r) {
-                       dev_err(fbdev->dev, "realloc fbmem failed\n");
-                       goto out;
-               }
+       r = omapfb_realloc_fbmem(fbi, size, mi->type);
+       if (r) {
+               dev_err(fbdev->dev, "realloc fbmem failed\n");
+               goto out;
        }
 
  out:
index b00db4068d21c0280631fb1ca5816d4a499501f0..3450ea0966c97e6227145f3ad484f4850f0125ff 100644 (file)
@@ -179,6 +179,7 @@ static unsigned omapfb_get_vrfb_offset(const struct omapfb_info *ofbi, int rot)
                break;
        default:
                BUG();
+               return 0;
        }
 
        offset *= vrfb->bytespp;
@@ -1502,7 +1503,7 @@ static int omapfb_parse_vram_param(const char *param, int max_entries,
 
                fbnum = simple_strtoul(p, &p, 10);
 
-               if (p == param)
+               if (p == start)
                        return -EINVAL;
 
                if (*p != ':')
@@ -2307,7 +2308,7 @@ static int omapfb_init_display(struct omapfb2_device *fbdev,
        return 0;
 }
 
-static int omapfb_probe(struct platform_device *pdev)
+static int __init omapfb_probe(struct platform_device *pdev)
 {
        struct omapfb2_device *fbdev = NULL;
        int r = 0;
@@ -2448,7 +2449,7 @@ err0:
        return r;
 }
 
-static int omapfb_remove(struct platform_device *pdev)
+static int __exit omapfb_remove(struct platform_device *pdev)
 {
        struct omapfb2_device *fbdev = platform_get_drvdata(pdev);
 
@@ -2462,8 +2463,7 @@ static int omapfb_remove(struct platform_device *pdev)
 }
 
 static struct platform_driver omapfb_driver = {
-       .probe          = omapfb_probe,
-       .remove         = omapfb_remove,
+       .remove         = __exit_p(omapfb_remove),
        .driver         = {
                .name   = "omapfb",
                .owner  = THIS_MODULE,
@@ -2474,7 +2474,7 @@ static int __init omapfb_init(void)
 {
        DBG("omapfb_init\n");
 
-       if (platform_driver_register(&omapfb_driver)) {
+       if (platform_driver_probe(&omapfb_driver, omapfb_probe)) {
                printk(KERN_ERR "failed to register omapfb driver\n");
                return -ENODEV;
        }
index c0bdc9b54ecf77d1f22355ca956b6367d2ceac70..30361a09aecdd231c19d40241733d1aa873e1a79 100644 (file)
@@ -166,6 +166,7 @@ static inline struct omapfb_display_data *get_display_data(
 
        /* This should never happen */
        BUG();
+       return NULL;
 }
 
 static inline void omapfb_lock(struct omapfb2_device *fbdev)
index 4e5b960c32c88bbcab1ec7e31c1f1eb7772898d7..7e990220ad2a6f52a25ff861f754d86e447a8358 100644 (file)
@@ -179,8 +179,10 @@ void omap_vrfb_setup(struct vrfb *vrfb, unsigned long paddr,
                pixel_size_exp = 2;
        else if (bytespp == 2)
                pixel_size_exp = 1;
-       else
+       else {
                BUG();
+               return;
+       }
 
        vrfb_width = ALIGN(width * bytespp, VRFB_PAGE_WIDTH) / bytespp;
        vrfb_height = ALIGN(height, VRFB_PAGE_HEIGHT);
index 1d71c08a818f7d1fc646496f9ae9c32d77943c88..0b4ae0cebedaf7c0a6b9c8f3d262d7917ecf8911 100644 (file)
@@ -316,12 +316,9 @@ pxa3xx_gcu_wait_idle(struct pxa3xx_gcu_priv *priv)
                ret = wait_event_interruptible_timeout(priv->wait_idle,
                                        !priv->shared->hw_running, HZ*4);
 
-               if (ret < 0)
+               if (ret != 0)
                        break;
 
-               if (ret > 0)
-                       continue;
-
                if (gc_readl(priv, REG_GCRBEXHR) == rbexhr &&
                    priv->shared->num_interrupts == num) {
                        QERROR("TIMEOUT");
index f3105160bf9829104d22ec4c9e2b24c063debeaa..5f9d8e69029ee2e8ab3be25b8c1bd348c8cf49fb 100644 (file)
@@ -47,7 +47,7 @@
 #ifdef CONFIG_FB_S3C_DEBUG_REGWRITE
 #undef writel
 #define writel(v, r) do { \
-       printk(KERN_DEBUG "%s: %08x => %p\n", __func__, (unsigned int)v, r); \
+       pr_debug("%s: %08x => %p\n", __func__, (unsigned int)v, r); \
        __raw_writel(v, r); \
 } while (0)
 #endif /* FB_S3C_DEBUG_REGWRITE */
@@ -495,7 +495,6 @@ static int s3c_fb_set_par(struct fb_info *info)
        u32 alpha = 0;
        u32 data;
        u32 pagewidth;
-       int clkdiv;
 
        dev_dbg(sfb->dev, "setting framebuffer parameters\n");
 
@@ -532,48 +531,9 @@ static int s3c_fb_set_par(struct fb_info *info)
        /* disable the window whilst we update it */
        writel(0, regs + WINCON(win_no));
 
-       /* use platform specified window as the basis for the lcd timings */
-
-       if (win_no == sfb->pdata->default_win) {
-               clkdiv = s3c_fb_calc_pixclk(sfb, var->pixclock);
-
-               data = sfb->pdata->vidcon0;
-               data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR);
-
-               if (clkdiv > 1)
-                       data |= VIDCON0_CLKVAL_F(clkdiv-1) | VIDCON0_CLKDIR;
-               else
-                       data &= ~VIDCON0_CLKDIR;        /* 1:1 clock */
-
-               /* write the timing data to the panel */
-
-               if (sfb->variant.is_2443)
-                       data |= (1 << 5);
-
-               writel(data, regs + VIDCON0);
-
+       if (!sfb->output_on)
                s3c_fb_enable(sfb, 1);
 
-               data = VIDTCON0_VBPD(var->upper_margin - 1) |
-                      VIDTCON0_VFPD(var->lower_margin - 1) |
-                      VIDTCON0_VSPW(var->vsync_len - 1);
-
-               writel(data, regs + sfb->variant.vidtcon);
-
-               data = VIDTCON1_HBPD(var->left_margin - 1) |
-                      VIDTCON1_HFPD(var->right_margin - 1) |
-                      VIDTCON1_HSPW(var->hsync_len - 1);
-
-               /* VIDTCON1 */
-               writel(data, regs + sfb->variant.vidtcon + 4);
-
-               data = VIDTCON2_LINEVAL(var->yres - 1) |
-                      VIDTCON2_HOZVAL(var->xres - 1) |
-                      VIDTCON2_LINEVAL_E(var->yres - 1) |
-                      VIDTCON2_HOZVAL_E(var->xres - 1);
-               writel(data, regs + sfb->variant.vidtcon + 8);
-       }
-
        /* write the buffer address */
 
        /* start and end registers stride is 8 */
@@ -839,6 +799,7 @@ static int s3c_fb_blank(int blank_mode, struct fb_info *info)
        struct s3c_fb *sfb = win->parent;
        unsigned int index = win->index;
        u32 wincon;
+       u32 output_on = sfb->output_on;
 
        dev_dbg(sfb->dev, "blank mode %d\n", blank_mode);
 
@@ -877,34 +838,18 @@ static int s3c_fb_blank(int blank_mode, struct fb_info *info)
 
        shadow_protect_win(win, 1);
        writel(wincon, sfb->regs + sfb->variant.wincon + (index * 4));
-       shadow_protect_win(win, 0);
 
        /* Check the enabled state to see if we need to be running the
         * main LCD interface, as if there are no active windows then
         * it is highly likely that we also do not need to output
         * anything.
         */
-
-       /* We could do something like the following code, but the current
-        * system of using framebuffer events means that we cannot make
-        * the distinction between just window 0 being inactive and all
-        * the windows being down.
-        *
-        * s3c_fb_enable(sfb, sfb->enabled ? 1 : 0);
-       */
-
-       /* we're stuck with this until we can do something about overriding
-        * the power control using the blanking event for a single fb.
-        */
-       if (index == sfb->pdata->default_win) {
-               shadow_protect_win(win, 1);
-               s3c_fb_enable(sfb, blank_mode != FB_BLANK_POWERDOWN ? 1 : 0);
-               shadow_protect_win(win, 0);
-       }
+       s3c_fb_enable(sfb, sfb->enabled ? 1 : 0);
+       shadow_protect_win(win, 0);
 
        pm_runtime_put_sync(sfb->dev);
 
-       return 0;
+       return output_on == sfb->output_on;
 }
 
 /**
@@ -1111,7 +1056,7 @@ static struct fb_ops s3c_fb_ops = {
  *
  * Calculate the pixel clock when none has been given through platform data.
  */
-static void __devinit s3c_fb_missing_pixclock(struct fb_videomode *mode)
+static void s3c_fb_missing_pixclock(struct fb_videomode *mode)
 {
        u64 pixclk = 1000000000000ULL;
        u32 div;
@@ -1144,11 +1089,11 @@ static int __devinit s3c_fb_alloc_memory(struct s3c_fb *sfb,
 
        dev_dbg(sfb->dev, "allocating memory for display\n");
 
-       real_size = windata->win_mode.xres * windata->win_mode.yres;
+       real_size = windata->xres * windata->yres;
        virt_size = windata->virtual_x * windata->virtual_y;
 
        dev_dbg(sfb->dev, "real_size=%u (%u.%u), virt_size=%u (%u.%u)\n",
-               real_size, windata->win_mode.xres, windata->win_mode.yres,
+               real_size, windata->xres, windata->yres,
                virt_size, windata->virtual_x, windata->virtual_y);
 
        size = (real_size > virt_size) ? real_size : virt_size;
@@ -1230,7 +1175,7 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
                                      struct s3c_fb_win **res)
 {
        struct fb_var_screeninfo *var;
-       struct fb_videomode *initmode;
+       struct fb_videomode initmode;
        struct s3c_fb_pd_win *windata;
        struct s3c_fb_win *win;
        struct fb_info *fbinfo;
@@ -1251,11 +1196,11 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
        }
 
        windata = sfb->pdata->win[win_no];
-       initmode = &windata->win_mode;
+       initmode = *sfb->pdata->vtiming;
 
        WARN_ON(windata->max_bpp == 0);
-       WARN_ON(windata->win_mode.xres == 0);
-       WARN_ON(windata->win_mode.yres == 0);
+       WARN_ON(windata->xres == 0);
+       WARN_ON(windata->yres == 0);
 
        win = fbinfo->par;
        *res = win;
@@ -1294,7 +1239,9 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
        }
 
        /* setup the initial video mode from the window */
-       fb_videomode_to_var(&fbinfo->var, initmode);
+       initmode.xres = windata->xres;
+       initmode.yres = windata->yres;
+       fb_videomode_to_var(&fbinfo->var, &initmode);
 
        fbinfo->fix.type        = FB_TYPE_PACKED_PIXELS;
        fbinfo->fix.accel       = FB_ACCEL_NONE;
@@ -1338,6 +1285,53 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
        return 0;
 }
 
+/**
+ * s3c_fb_set_rgb_timing() - set video timing for rgb interface.
+ * @sfb: The base resources for the hardware.
+ *
+ * Set horizontal and vertical lcd rgb interface timing.
+ */
+static void s3c_fb_set_rgb_timing(struct s3c_fb *sfb)
+{
+       struct fb_videomode *vmode = sfb->pdata->vtiming;
+       void __iomem *regs = sfb->regs;
+       int clkdiv;
+       u32 data;
+
+       if (!vmode->pixclock)
+               s3c_fb_missing_pixclock(vmode);
+
+       clkdiv = s3c_fb_calc_pixclk(sfb, vmode->pixclock);
+
+       data = sfb->pdata->vidcon0;
+       data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR);
+
+       if (clkdiv > 1)
+               data |= VIDCON0_CLKVAL_F(clkdiv-1) | VIDCON0_CLKDIR;
+       else
+               data &= ~VIDCON0_CLKDIR;        /* 1:1 clock */
+
+       if (sfb->variant.is_2443)
+               data |= (1 << 5);
+       writel(data, regs + VIDCON0);
+
+       data = VIDTCON0_VBPD(vmode->upper_margin - 1) |
+              VIDTCON0_VFPD(vmode->lower_margin - 1) |
+              VIDTCON0_VSPW(vmode->vsync_len - 1);
+       writel(data, regs + sfb->variant.vidtcon);
+
+       data = VIDTCON1_HBPD(vmode->left_margin - 1) |
+              VIDTCON1_HFPD(vmode->right_margin - 1) |
+              VIDTCON1_HSPW(vmode->hsync_len - 1);
+       writel(data, regs + sfb->variant.vidtcon + 4);
+
+       data = VIDTCON2_LINEVAL(vmode->yres - 1) |
+              VIDTCON2_HOZVAL(vmode->xres - 1) |
+              VIDTCON2_LINEVAL_E(vmode->yres - 1) |
+              VIDTCON2_HOZVAL_E(vmode->xres - 1);
+       writel(data, regs + sfb->variant.vidtcon + 8);
+}
+
 /**
  * s3c_fb_clear_win() - clear hardware window registers.
  * @sfb: The base resources for the hardware.
@@ -1481,15 +1475,14 @@ static int __devinit s3c_fb_probe(struct platform_device *pdev)
                writel(0xffffff, regs + WKEYCON1);
        }
 
+       s3c_fb_set_rgb_timing(sfb);
+
        /* we have the register setup, start allocating framebuffers */
 
        for (win = 0; win < fbdrv->variant.nr_windows; win++) {
                if (!pd->win[win])
                        continue;
 
-               if (!pd->win[win]->win_mode.pixclock)
-                       s3c_fb_missing_pixclock(&pd->win[win]->win_mode);
-
                ret = s3c_fb_probe_win(sfb, win, fbdrv->win[win],
                                       &sfb->windows[win]);
                if (ret < 0) {
@@ -1564,6 +1557,8 @@ static int s3c_fb_suspend(struct device *dev)
        struct s3c_fb_win *win;
        int win_no;
 
+       pm_runtime_get_sync(sfb->dev);
+
        for (win_no = S3C_FB_MAX_WIN - 1; win_no >= 0; win_no--) {
                win = sfb->windows[win_no];
                if (!win)
@@ -1577,6 +1572,9 @@ static int s3c_fb_suspend(struct device *dev)
                clk_disable(sfb->lcd_clk);
 
        clk_disable(sfb->bus_clk);
+
+       pm_runtime_put_sync(sfb->dev);
+
        return 0;
 }
 
@@ -1589,6 +1587,8 @@ static int s3c_fb_resume(struct device *dev)
        int win_no;
        u32 reg;
 
+       pm_runtime_get_sync(sfb->dev);
+
        clk_enable(sfb->bus_clk);
 
        if (!sfb->variant.has_clksel)
@@ -1623,6 +1623,8 @@ static int s3c_fb_resume(struct device *dev)
                shadow_protect_win(win, 0);
        }
 
+       s3c_fb_set_rgb_timing(sfb);
+
        /* restore framebuffers */
        for (win_no = 0; win_no < S3C_FB_MAX_WIN; win_no++) {
                win = sfb->windows[win_no];
@@ -1633,6 +1635,8 @@ static int s3c_fb_resume(struct device *dev)
                s3c_fb_set_par(win->fbinfo);
        }
 
+       pm_runtime_put_sync(sfb->dev);
+
        return 0;
 }
 #endif
index eafb19da2c0783d92a12ea95e3104d1e6d591d66..930e550e752ac5712b99dc4909de57d0ccd7fafd 100644 (file)
@@ -31,6 +31,7 @@
 
 #include "sh_mobile_lcdcfb.h"
 
+/* HDMI Core Control Register (HTOP0) */
 #define HDMI_SYSTEM_CTRL                       0x00 /* System control */
 #define HDMI_L_R_DATA_SWAP_CTRL_RPKT           0x01 /* L/R data swap control,
                                                        bits 19..16 of 20-bit N for Audio Clock Regeneration packet */
 #define HDMI_REVISION_ID                       0xF1 /* Revision ID */
 #define HDMI_TEST_MODE                         0xFE /* Test mode */
 
+/* HDMI Control Register (HTOP1) */
+#define HDMI_HTOP1_TEST_MODE                   0x0000 /* Test mode */
+#define HDMI_HTOP1_VIDEO_INPUT                 0x0008 /* VideoInput */
+#define HDMI_HTOP1_CORE_RSTN                   0x000C /* CoreResetn */
+#define HDMI_HTOP1_PLLBW                       0x0018 /* PLLBW */
+#define HDMI_HTOP1_CLK_TO_PHY                  0x001C /* Clk to Phy */
+#define HDMI_HTOP1_VIDEO_INPUT2                        0x0020 /* VideoInput2 */
+#define HDMI_HTOP1_TISEMP0_1                   0x0024 /* tisemp0-1 */
+#define HDMI_HTOP1_TISEMP2_C                   0x0028 /* tisemp2-c */
+#define HDMI_HTOP1_TISIDRV                     0x002C /* tisidrv */
+#define HDMI_HTOP1_TISEN                       0x0034 /* tisen */
+#define HDMI_HTOP1_TISDREN                     0x0038 /* tisdren  */
+#define HDMI_HTOP1_CISRANGE                    0x003C /* cisrange  */
+#define HDMI_HTOP1_ENABLE_SELECTOR             0x0040 /* Enable Selector */
+#define HDMI_HTOP1_MACRO_RESET                 0x0044 /* Macro reset */
+#define HDMI_HTOP1_PLL_CALIBRATION             0x0048 /* PLL calibration */
+#define HDMI_HTOP1_RE_CALIBRATION              0x004C /* Re-calibration */
+#define HDMI_HTOP1_CURRENT                     0x0050 /* Current */
+#define HDMI_HTOP1_PLL_LOCK_DETECT             0x0054 /* PLL lock detect */
+#define HDMI_HTOP1_PHY_TEST_MODE               0x0058 /* PHY Test Mode */
+#define HDMI_HTOP1_CLK_SET                     0x0080 /* Clock Set */
+#define HDMI_HTOP1_DDC_FAIL_SAFE               0x0084 /* DDC fail safe */
+#define HDMI_HTOP1_PRBS                                0x0088 /* PRBS */
+#define HDMI_HTOP1_EDID_AINC_CONTROL           0x008C /* EDID ainc Control */
+#define HDMI_HTOP1_HTOP_DCL_MODE               0x00FC /* Deep Coloer Mode */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF0          0x0100 /* Deep Color:FRC COEF0 */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF1          0x0104 /* Deep Color:FRC COEF1 */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF2          0x0108 /* Deep Color:FRC COEF2 */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF3          0x010C /* Deep Color:FRC COEF3 */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF0_C                0x0110 /* Deep Color:FRC COEF0C */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF1_C                0x0114 /* Deep Color:FRC COEF1C */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF2_C                0x0118 /* Deep Color:FRC COEF2C */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF3_C                0x011C /* Deep Color:FRC COEF3C */
+#define HDMI_HTOP1_HTOP_DCL_FRC_MODE           0x0120 /* Deep Color:FRC Mode */
+#define HDMI_HTOP1_HTOP_DCL_RECT_START1                0x0124 /* Deep Color:Rect Start1 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE1         0x0128 /* Deep Color:Rect Size1 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_START2                0x012C /* Deep Color:Rect Start2 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE2         0x0130 /* Deep Color:Rect Size2 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_START3                0x0134 /* Deep Color:Rect Start3 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE3         0x0138 /* Deep Color:Rect Size3 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_START4                0x013C /* Deep Color:Rect Start4 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE4         0x0140 /* Deep Color:Rect Size4 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_1      0x0144 /* Deep Color:Fil Para Y1_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_2      0x0148 /* Deep Color:Fil Para Y1_2 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_1     0x014C /* Deep Color:Fil Para CB1_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_2     0x0150 /* Deep Color:Fil Para CB1_2 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_1     0x0154 /* Deep Color:Fil Para CR1_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_2     0x0158 /* Deep Color:Fil Para CR1_2 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_1      0x015C /* Deep Color:Fil Para Y2_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_2      0x0160 /* Deep Color:Fil Para Y2_2 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_1     0x0164 /* Deep Color:Fil Para CB2_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_2     0x0168 /* Deep Color:Fil Para CB2_2 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_1     0x016C /* Deep Color:Fil Para CR2_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_2     0x0170 /* Deep Color:Fil Para CR2_2 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_Y1                0x0174 /* Deep Color:Cor Para Y1 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CB1       0x0178 /* Deep Color:Cor Para CB1 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CR1       0x017C /* Deep Color:Cor Para CR1 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_Y2                0x0180 /* Deep Color:Cor Para Y2 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CB2       0x0184 /* Deep Color:Cor Para CB2 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CR2       0x0188 /* Deep Color:Cor Para CR2 */
+#define HDMI_HTOP1_EDID_DATA_READ              0x0200 /* EDID Data Read 128Byte:0x03FC */
+
 enum hotplug_state {
        HDMI_HOTPLUG_DISCONNECTED,
        HDMI_HOTPLUG_CONNECTED,
@@ -211,6 +274,7 @@ struct sh_hdmi {
        struct sh_mobile_lcdc_entity entity;
 
        void __iomem *base;
+       void __iomem *htop1;
        enum hotplug_state hp_state;    /* hot-plug status */
        u8 preprogrammed_vic;           /* use a pre-programmed VIC or
                                           the external mode */
@@ -222,20 +286,66 @@ struct sh_hdmi {
        struct delayed_work edid_work;
        struct fb_videomode mode;
        struct fb_monspecs monspec;
+
+       /* register access functions */
+       void (*write)(struct sh_hdmi *hdmi, u8 data, u8 reg);
+       u8 (*read)(struct sh_hdmi *hdmi, u8 reg);
 };
 
 #define entity_to_sh_hdmi(e)   container_of(e, struct sh_hdmi, entity)
 
-static void hdmi_write(struct sh_hdmi *hdmi, u8 data, u8 reg)
+static void __hdmi_write8(struct sh_hdmi *hdmi, u8 data, u8 reg)
 {
        iowrite8(data, hdmi->base + reg);
 }
 
-static u8 hdmi_read(struct sh_hdmi *hdmi, u8 reg)
+static u8 __hdmi_read8(struct sh_hdmi *hdmi, u8 reg)
 {
        return ioread8(hdmi->base + reg);
 }
 
+static void __hdmi_write32(struct sh_hdmi *hdmi, u8 data, u8 reg)
+{
+       iowrite32((u32)data, hdmi->base + (reg * 4));
+       udelay(100);
+}
+
+static u8 __hdmi_read32(struct sh_hdmi *hdmi, u8 reg)
+{
+       return (u8)ioread32(hdmi->base + (reg * 4));
+}
+
+static void hdmi_write(struct sh_hdmi *hdmi, u8 data, u8 reg)
+{
+       hdmi->write(hdmi, data, reg);
+}
+
+static u8 hdmi_read(struct sh_hdmi *hdmi, u8 reg)
+{
+       return hdmi->read(hdmi, reg);
+}
+
+static void hdmi_bit_set(struct sh_hdmi *hdmi, u8 mask, u8 data, u8 reg)
+{
+       u8 val = hdmi_read(hdmi, reg);
+
+       val &= ~mask;
+       val |= (data & mask);
+
+       hdmi_write(hdmi, val, reg);
+}
+
+static void hdmi_htop1_write(struct sh_hdmi *hdmi, u32 data, u32 reg)
+{
+       iowrite32(data, hdmi->htop1 + reg);
+       udelay(100);
+}
+
+static u32 hdmi_htop1_read(struct sh_hdmi *hdmi, u32 reg)
+{
+       return ioread32(hdmi->htop1 + reg);
+}
+
 /*
  *     HDMI sound
  */
@@ -693,11 +803,11 @@ static void sh_hdmi_configure(struct sh_hdmi *hdmi)
        msleep(10);
 
        /* PS mode b->d, reset PLLA and PLLB */
-       hdmi_write(hdmi, 0x4C, HDMI_SYSTEM_CTRL);
+       hdmi_bit_set(hdmi, 0xFC, 0x4C, HDMI_SYSTEM_CTRL);
 
        udelay(10);
 
-       hdmi_write(hdmi, 0x40, HDMI_SYSTEM_CTRL);
+       hdmi_bit_set(hdmi, 0xFC, 0x40, HDMI_SYSTEM_CTRL);
 }
 
 static unsigned long sh_hdmi_rate_error(struct sh_hdmi *hdmi,
@@ -746,7 +856,9 @@ static int sh_hdmi_read_edid(struct sh_hdmi *hdmi, unsigned long *hdmi_rate,
        /* Read EDID */
        dev_dbg(hdmi->dev, "Read back EDID code:");
        for (i = 0; i < 128; i++) {
-               edid[i] = hdmi_read(hdmi, HDMI_EDID_KSV_FIFO_ACCESS_WINDOW);
+               edid[i] = (hdmi->htop1) ?
+                       (u8)hdmi_htop1_read(hdmi, HDMI_HTOP1_EDID_DATA_READ + (i * 4)) :
+                       hdmi_read(hdmi, HDMI_EDID_KSV_FIFO_ACCESS_WINDOW);
 #ifdef DEBUG
                if ((i % 16) == 0) {
                        printk(KERN_CONT "\n");
@@ -917,13 +1029,13 @@ static irqreturn_t sh_hdmi_hotplug(int irq, void *dev_id)
        u8 status1, status2, mask1, mask2;
 
        /* mode_b and PLLA and PLLB reset */
-       hdmi_write(hdmi, 0x2C, HDMI_SYSTEM_CTRL);
+       hdmi_bit_set(hdmi, 0xFC, 0x2C, HDMI_SYSTEM_CTRL);
 
        /* How long shall reset be held? */
        udelay(10);
 
        /* mode_b and PLLA and PLLB reset release */
-       hdmi_write(hdmi, 0x20, HDMI_SYSTEM_CTRL);
+       hdmi_bit_set(hdmi, 0xFC, 0x20, HDMI_SYSTEM_CTRL);
 
        status1 = hdmi_read(hdmi, HDMI_INTERRUPT_STATUS_1);
        status2 = hdmi_read(hdmi, HDMI_INTERRUPT_STATUS_2);
@@ -1001,7 +1113,7 @@ static int sh_hdmi_display_on(struct sh_mobile_lcdc_entity *entity)
         */
        if (hdmi->hp_state == HDMI_HOTPLUG_EDID_DONE) {
                /* PS mode d->e. All functions are active */
-               hdmi_write(hdmi, 0x80, HDMI_SYSTEM_CTRL);
+               hdmi_bit_set(hdmi, 0xFC, 0x80, HDMI_SYSTEM_CTRL);
                dev_dbg(hdmi->dev, "HDMI running\n");
        }
 
@@ -1016,7 +1128,7 @@ static void sh_hdmi_display_off(struct sh_mobile_lcdc_entity *entity)
 
        dev_dbg(hdmi->dev, "%s(%p)\n", __func__, hdmi);
        /* PS mode e->a */
-       hdmi_write(hdmi, 0x10, HDMI_SYSTEM_CTRL);
+       hdmi_bit_set(hdmi, 0xFC, 0x10, HDMI_SYSTEM_CTRL);
 }
 
 static const struct sh_mobile_lcdc_entity_ops sh_hdmi_ops = {
@@ -1110,10 +1222,58 @@ out:
        dev_dbg(hdmi->dev, "%s(%p): end\n", __func__, hdmi);
 }
 
+static void sh_hdmi_htop1_init(struct sh_hdmi *hdmi)
+{
+       hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_MODE);
+       hdmi_htop1_write(hdmi, 0x0000000b, 0x0010);
+       hdmi_htop1_write(hdmi, 0x00006710, HDMI_HTOP1_HTOP_DCL_FRC_MODE);
+       hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_1);
+       hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_2);
+       hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_1);
+       hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_2);
+       hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_1);
+       hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_2);
+       hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_1);
+       hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_2);
+       hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_1);
+       hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_2);
+       hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_1);
+       hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_2);
+       hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_Y1);
+       hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CB1);
+       hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CR1);
+       hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_Y2);
+       hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CB2);
+       hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CR2);
+       hdmi_htop1_write(hdmi, 0x00000008, HDMI_HTOP1_CURRENT);
+       hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_TISEMP0_1);
+       hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_TISEMP2_C);
+       hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_PHY_TEST_MODE);
+       hdmi_htop1_write(hdmi, 0x00000081, HDMI_HTOP1_TISIDRV);
+       hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_PLLBW);
+       hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISEN);
+       hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISDREN);
+       hdmi_htop1_write(hdmi, 0x00000003, HDMI_HTOP1_ENABLE_SELECTOR);
+       hdmi_htop1_write(hdmi, 0x00000001, HDMI_HTOP1_MACRO_RESET);
+       hdmi_htop1_write(hdmi, 0x00000016, HDMI_HTOP1_CISRANGE);
+       msleep(100);
+       hdmi_htop1_write(hdmi, 0x00000001, HDMI_HTOP1_ENABLE_SELECTOR);
+       msleep(100);
+       hdmi_htop1_write(hdmi, 0x00000003, HDMI_HTOP1_ENABLE_SELECTOR);
+       hdmi_htop1_write(hdmi, 0x00000001, HDMI_HTOP1_MACRO_RESET);
+       hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISEN);
+       hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISDREN);
+       hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_VIDEO_INPUT);
+       hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_CLK_TO_PHY);
+       hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_VIDEO_INPUT2);
+       hdmi_htop1_write(hdmi, 0x0000000a, HDMI_HTOP1_CLK_SET);
+}
+
 static int __init sh_hdmi_probe(struct platform_device *pdev)
 {
        struct sh_mobile_hdmi_info *pdata = pdev->dev.platform_data;
        struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       struct resource *htop1_res;
        int irq = platform_get_irq(pdev, 0), ret;
        struct sh_hdmi *hdmi;
        long rate;
@@ -1121,6 +1281,15 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
        if (!res || !pdata || irq < 0)
                return -ENODEV;
 
+       htop1_res = NULL;
+       if (pdata->flags & HDMI_HAS_HTOP1) {
+               htop1_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+               if (!htop1_res) {
+                       dev_err(&pdev->dev, "htop1 needs register base\n");
+                       return -EINVAL;
+               }
+       }
+
        hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
        if (!hdmi) {
                dev_err(&pdev->dev, "Cannot allocate device data\n");
@@ -1138,6 +1307,15 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
                goto egetclk;
        }
 
+       /* select register access functions */
+       if (pdata->flags & HDMI_32BIT_REG) {
+               hdmi->write     = __hdmi_write32;
+               hdmi->read      = __hdmi_read32;
+       } else {
+               hdmi->write     = __hdmi_write8;
+               hdmi->read      = __hdmi_read8;
+       }
+
        /* An arbitrary relaxed pixclock just to get things started: from standard 480p */
        rate = clk_round_rate(hdmi->hdmi_clk, PICOS2KHZ(37037));
        if (rate > 0)
@@ -1176,6 +1354,24 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
        pm_runtime_enable(&pdev->dev);
        pm_runtime_get_sync(&pdev->dev);
 
+       /* init interrupt polarity */
+       if (pdata->flags & HDMI_OUTPUT_PUSH_PULL)
+               hdmi_bit_set(hdmi, 0x02, 0x02, HDMI_SYSTEM_CTRL);
+
+       if (pdata->flags & HDMI_OUTPUT_POLARITY_HI)
+               hdmi_bit_set(hdmi, 0x01, 0x01, HDMI_SYSTEM_CTRL);
+
+       /* enable htop1 register if needed */
+       if (htop1_res) {
+               hdmi->htop1 = ioremap(htop1_res->start, resource_size(htop1_res));
+               if (!hdmi->htop1) {
+                       dev_err(&pdev->dev, "control register region already claimed\n");
+                       ret = -ENOMEM;
+                       goto emap_htop1;
+               }
+               sh_hdmi_htop1_init(hdmi);
+       }
+
        /* Product and revision IDs are 0 in sh-mobile version */
        dev_info(&pdev->dev, "Detected HDMI controller 0x%x:0x%x\n",
                 hdmi_read(hdmi, HDMI_PRODUCT_ID), hdmi_read(hdmi, HDMI_REVISION_ID));
@@ -1199,6 +1395,9 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
 ecodec:
        free_irq(irq, hdmi);
 ereqirq:
+       if (hdmi->htop1)
+               iounmap(hdmi->htop1);
+emap_htop1:
        pm_runtime_put(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
        iounmap(hdmi->base);
@@ -1230,6 +1429,8 @@ static int __exit sh_hdmi_remove(struct platform_device *pdev)
        pm_runtime_disable(&pdev->dev);
        clk_disable(hdmi->hdmi_clk);
        clk_put(hdmi->hdmi_clk);
+       if (hdmi->htop1)
+               iounmap(hdmi->htop1);
        iounmap(hdmi->base);
        release_mem_region(res->start, resource_size(res));
        kfree(hdmi);
index aff73842d877b3b98c287b50dc1cbdf138ed6cc7..85d6738b6c64ad2755629f6d71596da9a0f0bfd8 100644 (file)
@@ -105,51 +105,6 @@ static const unsigned short ModeIndex_1920x1440[]    = {0x68, 0x69, 0x00, 0x6b};
 static const unsigned short ModeIndex_300_2048x1536[]= {0x6c, 0x6d, 0x00, 0x00};
 static const unsigned short ModeIndex_310_2048x1536[]= {0x6c, 0x6d, 0x00, 0x6e};
 
-static const unsigned short SiS_DRAMType[17][5]={
-       {0x0C,0x0A,0x02,0x40,0x39},
-       {0x0D,0x0A,0x01,0x40,0x48},
-       {0x0C,0x09,0x02,0x20,0x35},
-       {0x0D,0x09,0x01,0x20,0x44},
-       {0x0C,0x08,0x02,0x10,0x31},
-       {0x0D,0x08,0x01,0x10,0x40},
-       {0x0C,0x0A,0x01,0x20,0x34},
-       {0x0C,0x09,0x01,0x08,0x32},
-       {0x0B,0x08,0x02,0x08,0x21},
-       {0x0C,0x08,0x01,0x08,0x30},
-       {0x0A,0x08,0x02,0x04,0x11},
-       {0x0B,0x0A,0x01,0x10,0x28},
-       {0x09,0x08,0x02,0x02,0x01},
-       {0x0B,0x09,0x01,0x08,0x24},
-       {0x0B,0x08,0x01,0x04,0x20},
-       {0x0A,0x08,0x01,0x02,0x10},
-       {0x09,0x08,0x01,0x01,0x00}
-};
-
-static const unsigned short SiS_SDRDRAM_TYPE[13][5] =
-{
-       { 2,12, 9,64,0x35},
-       { 1,13, 9,64,0x44},
-       { 2,12, 8,32,0x31},
-       { 2,11, 9,32,0x25},
-       { 1,12, 9,32,0x34},
-       { 1,13, 8,32,0x40},
-       { 2,11, 8,16,0x21},
-       { 1,12, 8,16,0x30},
-       { 1,11, 9,16,0x24},
-       { 1,11, 8, 8,0x20},
-       { 2, 9, 8, 4,0x01},
-       { 1,10, 8, 4,0x10},
-       { 1, 9, 8, 2,0x00}
-};
-
-static const unsigned short SiS_DDRDRAM_TYPE[4][5] =
-{
-       { 2,12, 9,64,0x35},
-       { 2,12, 8,32,0x31},
-       { 2,11, 8,16,0x21},
-       { 2, 9, 8, 4,0x01}
-};
-
 static const unsigned char SiS_MDA_DAC[] =
 {
        0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
index 078ca2167d6f9754a3fc9ad26582643b82e34c8a..a7a48db64ce20d0cdad34635da4848bc0410dec1 100644 (file)
@@ -4222,6 +4222,26 @@ sisfb_post_300_buswidth(struct sis_video_info *ivideo)
        return 1;                       /* 32bit */
 }
 
+static const unsigned short __devinitconst SiS_DRAMType[17][5] = {
+       {0x0C,0x0A,0x02,0x40,0x39},
+       {0x0D,0x0A,0x01,0x40,0x48},
+       {0x0C,0x09,0x02,0x20,0x35},
+       {0x0D,0x09,0x01,0x20,0x44},
+       {0x0C,0x08,0x02,0x10,0x31},
+       {0x0D,0x08,0x01,0x10,0x40},
+       {0x0C,0x0A,0x01,0x20,0x34},
+       {0x0C,0x09,0x01,0x08,0x32},
+       {0x0B,0x08,0x02,0x08,0x21},
+       {0x0C,0x08,0x01,0x08,0x30},
+       {0x0A,0x08,0x02,0x04,0x11},
+       {0x0B,0x0A,0x01,0x10,0x28},
+       {0x09,0x08,0x02,0x02,0x01},
+       {0x0B,0x09,0x01,0x08,0x24},
+       {0x0B,0x08,0x01,0x04,0x20},
+       {0x0A,0x08,0x01,0x02,0x10},
+       {0x09,0x08,0x01,0x01,0x00}
+};
+
 static int __devinit
 sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration, int buswidth,
                        int PseudoRankCapacity, int PseudoAdrPinCount,
@@ -4231,27 +4251,8 @@ sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration, int buswidth
        unsigned short sr14;
        unsigned int k, RankCapacity, PageCapacity, BankNumHigh, BankNumMid;
        unsigned int PhysicalAdrOtherPage, PhysicalAdrHigh, PhysicalAdrHalfPage;
-       static const unsigned short SiS_DRAMType[17][5] = {
-               {0x0C,0x0A,0x02,0x40,0x39},
-               {0x0D,0x0A,0x01,0x40,0x48},
-               {0x0C,0x09,0x02,0x20,0x35},
-               {0x0D,0x09,0x01,0x20,0x44},
-               {0x0C,0x08,0x02,0x10,0x31},
-               {0x0D,0x08,0x01,0x10,0x40},
-               {0x0C,0x0A,0x01,0x20,0x34},
-               {0x0C,0x09,0x01,0x08,0x32},
-               {0x0B,0x08,0x02,0x08,0x21},
-               {0x0C,0x08,0x01,0x08,0x30},
-               {0x0A,0x08,0x02,0x04,0x11},
-               {0x0B,0x0A,0x01,0x10,0x28},
-               {0x09,0x08,0x02,0x02,0x01},
-               {0x0B,0x09,0x01,0x08,0x24},
-               {0x0B,0x08,0x01,0x04,0x20},
-               {0x0A,0x08,0x01,0x02,0x10},
-               {0x09,0x08,0x01,0x01,0x00}
-       };
 
-        for(k = 0; k <= 16; k++) {
+        for(k = 0; k < ARRAY_SIZE(SiS_DRAMType); k++) {
 
                RankCapacity = buswidth * SiS_DRAMType[k][3];
 
index 30f7a815a62bc0f36813a644ad1819d210958ca5..5b6abc6de84ba0a823a80806f0d84c6675f0b5c0 100644 (file)
@@ -1036,6 +1036,6 @@ static void __exit xxxfb_exit(void)
      */
 
 module_init(xxxfb_init);
-module_exit(xxxfb_remove);
+module_exit(xxxfb_exit);
 
 MODULE_LICENSE("GPL");
index ccbfef5e828f3d19815dc7f91c27164c737f4250..af3ef27ad36ccd4f6db2e8983351c030116e748a 100644 (file)
@@ -846,7 +846,7 @@ static void ufx_raw_rect(struct ufx_data *dev, u16 *cmd, int x, int y,
        }
 }
 
-int ufx_handle_damage(struct ufx_data *dev, int x, int y,
+static int ufx_handle_damage(struct ufx_data *dev, int x, int y,
        int width, int height)
 {
        size_t packed_line_len = ALIGN((width * 2), 4);
@@ -1083,7 +1083,7 @@ static int ufx_ops_open(struct fb_info *info, int user)
 
                struct fb_deferred_io *fbdefio;
 
-               fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
+               fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
 
                if (fbdefio) {
                        fbdefio->delay = UFX_DEFIO_WRITE_DELAY;
index 7af1e81661828669895d85b2efa6343ed047334e..8af64148294b88074a269f54691d05a9cb00a547 100644 (file)
@@ -893,7 +893,7 @@ static int dlfb_ops_open(struct fb_info *info, int user)
 
                struct fb_deferred_io *fbdefio;
 
-               fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
+               fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
 
                if (fbdefio) {
                        fbdefio->delay = DL_DEFIO_WRITE_DELAY;
index 0c8837565bc719cb745022b6918ba3b1e9d562a4..c80e770e180029a132087f73f3cbbfc6295a7c5e 100644 (file)
@@ -1276,17 +1276,12 @@ static int viafb_dfph_proc_open(struct inode *inode, struct file *file)
 static ssize_t viafb_dfph_proc_write(struct file *file,
        const char __user *buffer, size_t count, loff_t *pos)
 {
-       char buf[20];
-       u8 reg_val = 0;
-       unsigned long length;
-       if (count < 1)
-               return -EINVAL;
-       length = count > 20 ? 20 : count;
-       if (copy_from_user(&buf[0], buffer, length))
-               return -EFAULT;
-       buf[length - 1] = '\0'; /*Ensure end string */
-       if (kstrtou8(buf, 0, &reg_val) < 0)
-               return -EINVAL;
+       int err;
+       u8 reg_val;
+       err = kstrtou8_from_user(buffer, count, 0, &reg_val);
+       if (err)
+               return err;
+
        viafb_write_reg_mask(CR97, VIACR, reg_val, 0x0f);
        return count;
 }
@@ -1316,17 +1311,12 @@ static int viafb_dfpl_proc_open(struct inode *inode, struct file *file)
 static ssize_t viafb_dfpl_proc_write(struct file *file,
        const char __user *buffer, size_t count, loff_t *pos)
 {
-       char buf[20];
-       u8 reg_val = 0;
-       unsigned long length;
-       if (count < 1)
-               return -EINVAL;
-       length = count > 20 ? 20 : count;
-       if (copy_from_user(&buf[0], buffer, length))
-               return -EFAULT;
-       buf[length - 1] = '\0'; /*Ensure end string */
-       if (kstrtou8(buf, 0, &reg_val) < 0)
-               return -EINVAL;
+       int err;
+       u8 reg_val;
+       err = kstrtou8_from_user(buffer, count, 0, &reg_val);
+       if (err)
+               return err;
+
        viafb_write_reg_mask(CR99, VIACR, reg_val, 0x0f);
        return count;
 }
index a3b6a74c67a729b93949a1e06c89c40cf252c1a1..1cc61a700fa84bd7830bd37f4b3d069438f4ba87 100644 (file)
@@ -138,7 +138,7 @@ static int __devinit mxc_w1_probe(struct platform_device *pdev)
                goto failed_ioremap;
        }
 
-       clk_enable(mdev->clk);
+       clk_prepare_enable(mdev->clk);
        __raw_writeb(mdev->clkdiv, mdev->regs + MXC_W1_TIME_DIVIDER);
 
        mdev->bus_master.data = mdev;
@@ -178,7 +178,7 @@ static int __devexit mxc_w1_remove(struct platform_device *pdev)
 
        iounmap(mdev->regs);
        release_mem_region(res->start, resource_size(res));
-       clk_disable(mdev->clk);
+       clk_disable_unprepare(mdev->clk);
        clk_put(mdev->clk);
 
        platform_set_drvdata(pdev, NULL);
index a18bf6358eb89d5048ec8490985526bdf3141af1..fe819b76de5685f2cf28a2db9000a117ca8338ce 100644 (file)
@@ -64,6 +64,18 @@ config SOFT_WATCHDOG
          To compile this driver as a module, choose M here: the
          module will be called softdog.
 
+config DA9052_WATCHDOG
+        tristate "Dialog DA9052 Watchdog"
+        depends on PMIC_DA9052
+        select WATCHDOG_CORE
+        help
+          Support for the watchdog in the DA9052 PMIC. Watchdog trigger
+          cause system reset.
+
+          Say Y here to include support for the DA9052 watchdog.
+          Alternatively say M to compile the driver as a module,
+          which will be called da9052_wdt.
+
 config WM831X_WATCHDOG
        tristate "WM831x watchdog"
        depends on MFD_WM831X
@@ -87,6 +99,7 @@ config WM8350_WATCHDOG
 config ARM_SP805_WATCHDOG
        tristate "ARM SP805 Watchdog"
        depends on ARM_AMBA
+       select WATCHDOG_CORE
        help
          ARM Primecell SP805 Watchdog timer. This will reboot your system when
          the timeout is reached.
@@ -565,6 +578,7 @@ config INTEL_SCU_WATCHDOG
 config ITCO_WDT
        tristate "Intel TCO Timer/Watchdog"
        depends on (X86 || IA64) && PCI
+       select LPC_ICH
        ---help---
          Hardware driver for the intel TCO timer based watchdog devices.
          These drivers are included in the Intel 82801 I/O Controller
index 442bfbe0882a29206035d17c0faa5ca629c7af45..572b39bed06a256ff6c79df0b943acbda7e88174 100644 (file)
@@ -163,6 +163,7 @@ obj-$(CONFIG_WATCHDOG_CP1XXX)               += cpwd.o
 obj-$(CONFIG_XEN_WDT) += xen_wdt.o
 
 # Architecture Independent
+obj-$(CONFIG_DA9052_WATCHDOG) += da9052_wdt.o
 obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o
 obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o
 obj-$(CONFIG_MAX63XX_WATCHDOG) += max63xx_wdt.o
diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c
new file mode 100644 (file)
index 0000000..3f75129
--- /dev/null
@@ -0,0 +1,251 @@
+/*
+ * System monitoring driver for DA9052 PMICs.
+ *
+ * Copyright(c) 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: Anthony Olech <Anthony.Olech@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/time.h>
+#include <linux/watchdog.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+
+#include <linux/mfd/da9052/reg.h>
+#include <linux/mfd/da9052/da9052.h>
+
+#define DA9052_DEF_TIMEOUT     4
+#define DA9052_TWDMIN          256
+
+struct da9052_wdt_data {
+       struct watchdog_device wdt;
+       struct da9052 *da9052;
+       struct kref kref;
+       unsigned long jpast;
+};
+
+static const struct {
+       u8 reg_val;
+       int time;  /* Seconds */
+} da9052_wdt_maps[] = {
+       { 1, 2 },
+       { 2, 4 },
+       { 3, 8 },
+       { 4, 16 },
+       { 5, 32 },
+       { 5, 33 },  /* Actual time  32.768s so included both 32s and 33s */
+       { 6, 65 },
+       { 6, 66 },  /* Actual time 65.536s so include both, 65s and 66s */
+       { 7, 131 },
+};
+
+
+static void da9052_wdt_release_resources(struct kref *r)
+{
+       struct da9052_wdt_data *driver_data =
+               container_of(r, struct da9052_wdt_data, kref);
+
+       kfree(driver_data);
+}
+
+static int da9052_wdt_set_timeout(struct watchdog_device *wdt_dev,
+                                 unsigned int timeout)
+{
+       struct da9052_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+       struct da9052 *da9052 = driver_data->da9052;
+       int ret, i;
+
+       /*
+        * Disable the Watchdog timer before setting
+        * new time out.
+        */
+       ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+                               DA9052_CONTROLD_TWDSCALE, 0);
+       if (ret < 0) {
+               dev_err(da9052->dev, "Failed to disable watchdog bit, %d\n",
+                       ret);
+               return ret;
+       }
+       if (timeout) {
+               /*
+                * To change the timeout, da9052 needs to
+                * be disabled for at least 150 us.
+                */
+               udelay(150);
+
+               /* Set the desired timeout */
+               for (i = 0; i < ARRAY_SIZE(da9052_wdt_maps); i++)
+                       if (da9052_wdt_maps[i].time == timeout)
+                               break;
+
+               if (i == ARRAY_SIZE(da9052_wdt_maps))
+                       ret = -EINVAL;
+               else
+                       ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+                                               DA9052_CONTROLD_TWDSCALE,
+                                               da9052_wdt_maps[i].reg_val);
+               if (ret < 0) {
+                       dev_err(da9052->dev,
+                               "Failed to update timescale bit, %d\n", ret);
+                       return ret;
+               }
+
+               wdt_dev->timeout = timeout;
+               driver_data->jpast = jiffies;
+       }
+
+       return 0;
+}
+
+static void da9052_wdt_ref(struct watchdog_device *wdt_dev)
+{
+       struct da9052_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+
+       kref_get(&driver_data->kref);
+}
+
+static void da9052_wdt_unref(struct watchdog_device *wdt_dev)
+{
+       struct da9052_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+
+       kref_put(&driver_data->kref, da9052_wdt_release_resources);
+}
+
+static int da9052_wdt_start(struct watchdog_device *wdt_dev)
+{
+       return da9052_wdt_set_timeout(wdt_dev, wdt_dev->timeout);
+}
+
+static int da9052_wdt_stop(struct watchdog_device *wdt_dev)
+{
+       return da9052_wdt_set_timeout(wdt_dev, 0);
+}
+
+static int da9052_wdt_ping(struct watchdog_device *wdt_dev)
+{
+       struct da9052_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+       struct da9052 *da9052 = driver_data->da9052;
+       unsigned long msec, jnow = jiffies;
+       int ret;
+
+       /*
+        * We have a minimum time for watchdog window called TWDMIN. A write
+        * to the watchdog before this elapsed time should cause an error.
+        */
+       msec = (jnow - driver_data->jpast) * 1000/HZ;
+       if (msec < DA9052_TWDMIN)
+               mdelay(msec);
+
+       /* Reset the watchdog timer */
+       ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+                               DA9052_CONTROLD_WATCHDOG, 1 << 7);
+       if (ret < 0)
+               goto err_strobe;
+
+       /*
+        * FIXME: Reset the watchdog core, in general PMIC
+        * is supposed to do this
+        */
+       ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+                               DA9052_CONTROLD_WATCHDOG, 0 << 7);
+err_strobe:
+       return ret;
+}
+
+static struct watchdog_info da9052_wdt_info = {
+       .options        = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+       .identity       = "DA9052 Watchdog",
+};
+
+static const struct watchdog_ops da9052_wdt_ops = {
+       .owner = THIS_MODULE,
+       .start = da9052_wdt_start,
+       .stop = da9052_wdt_stop,
+       .ping = da9052_wdt_ping,
+       .set_timeout = da9052_wdt_set_timeout,
+       .ref = da9052_wdt_ref,
+       .unref = da9052_wdt_unref,
+};
+
+
+static int __devinit da9052_wdt_probe(struct platform_device *pdev)
+{
+       struct da9052 *da9052 = dev_get_drvdata(pdev->dev.parent);
+       struct da9052_wdt_data *driver_data;
+       struct watchdog_device *da9052_wdt;
+       int ret;
+
+       driver_data = devm_kzalloc(&pdev->dev, sizeof(*driver_data),
+                                  GFP_KERNEL);
+       if (!driver_data) {
+               dev_err(da9052->dev, "Unable to alloacate watchdog device\n");
+               ret = -ENOMEM;
+               goto err;
+       }
+       driver_data->da9052 = da9052;
+
+       da9052_wdt = &driver_data->wdt;
+
+       da9052_wdt->timeout = DA9052_DEF_TIMEOUT;
+       da9052_wdt->info = &da9052_wdt_info;
+       da9052_wdt->ops = &da9052_wdt_ops;
+       watchdog_set_drvdata(da9052_wdt, driver_data);
+
+       kref_init(&driver_data->kref);
+
+       ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+                               DA9052_CONTROLD_TWDSCALE, 0);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to disable watchdog bits, %d\n",
+                       ret);
+               goto err;
+       }
+
+       ret = watchdog_register_device(&driver_data->wdt);
+       if (ret != 0) {
+               dev_err(da9052->dev, "watchdog_register_device() failed: %d\n",
+                       ret);
+               goto err;
+       }
+
+       dev_set_drvdata(&pdev->dev, driver_data);
+err:
+       return ret;
+}
+
+static int __devexit da9052_wdt_remove(struct platform_device *pdev)
+{
+       struct da9052_wdt_data *driver_data = dev_get_drvdata(&pdev->dev);
+
+       watchdog_unregister_device(&driver_data->wdt);
+       kref_put(&driver_data->kref, da9052_wdt_release_resources);
+
+       return 0;
+}
+
+static struct platform_driver da9052_wdt_driver = {
+       .probe = da9052_wdt_probe,
+       .remove = __devexit_p(da9052_wdt_remove),
+       .driver = {
+               .name   = "da9052-watchdog",
+       },
+};
+
+module_platform_driver(da9052_wdt_driver);
+
+MODULE_AUTHOR("Anthony Olech <Anthony.Olech@diasemi.com>");
+MODULE_DESCRIPTION("DA9052 SM Device Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9052-watchdog");
index 9e27e6422f661c5fe6a97f8655489b3285eb40b0..3c57b45537a2f0cd552c7fc571156428de918175 100644 (file)
@@ -1,8 +1,8 @@
 /* iTCO Vendor Specific Support hooks */
 #ifdef CONFIG_ITCO_VENDOR_SUPPORT
-extern void iTCO_vendor_pre_start(unsigned long, unsigned int);
-extern void iTCO_vendor_pre_stop(unsigned long);
-extern void iTCO_vendor_pre_keepalive(unsigned long, unsigned int);
+extern void iTCO_vendor_pre_start(struct resource *, unsigned int);
+extern void iTCO_vendor_pre_stop(struct resource *);
+extern void iTCO_vendor_pre_keepalive(struct resource *, unsigned int);
 extern void iTCO_vendor_pre_set_heartbeat(unsigned int);
 extern int iTCO_vendor_check_noreboot_on(void);
 #else
index 2721d29ce243fe0d663fea99a4c7823d6bb50277..b6b2f90b5d443c85f08c10e7a68de0f1b25942f4 100644 (file)
 
 #include "iTCO_vendor.h"
 
-/* iTCO defines */
-#define        SMI_EN          (acpibase + 0x30) /* SMI Control and Enable Register */
-#define        TCOBASE         (acpibase + 0x60) /* TCO base address */
-#define        TCO1_STS        (TCOBASE + 0x04)  /* TCO1 Status Register */
-
 /* List of vendor support modes */
 /* SuperMicro Pentium 3 Era 370SSE+-OEM1/P3TSSE */
 #define SUPERMICRO_OLD_BOARD   1
@@ -82,24 +77,24 @@ MODULE_PARM_DESC(vendorsupport, "iTCO vendor specific support mode, default="
  *         20.6 seconds.
  */
 
-static void supermicro_old_pre_start(unsigned long acpibase)
+static void supermicro_old_pre_start(struct resource *smires)
 {
        unsigned long val32;
 
        /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
-       val32 = inl(SMI_EN);
+       val32 = inl(smires->start);
        val32 &= 0xffffdfff;    /* Turn off SMI clearing watchdog */
-       outl(val32, SMI_EN);    /* Needed to activate watchdog */
+       outl(val32, smires->start);     /* Needed to activate watchdog */
 }
 
-static void supermicro_old_pre_stop(unsigned long acpibase)
+static void supermicro_old_pre_stop(struct resource *smires)
 {
        unsigned long val32;
 
        /* Bit 13: TCO_EN -> 1 = Enables the TCO logic to generate SMI# */
-       val32 = inl(SMI_EN);
+       val32 = inl(smires->start);
        val32 |= 0x00002000;    /* Turn on SMI clearing watchdog */
-       outl(val32, SMI_EN);    /* Needed to deactivate watchdog */
+       outl(val32, smires->start);     /* Needed to deactivate watchdog */
 }
 
 /*
@@ -270,66 +265,66 @@ static void supermicro_new_pre_set_heartbeat(unsigned int heartbeat)
  *     Don't use this fix if you don't need to!!!
  */
 
-static void broken_bios_start(unsigned long acpibase)
+static void broken_bios_start(struct resource *smires)
 {
        unsigned long val32;
 
-       val32 = inl(SMI_EN);
+       val32 = inl(smires->start);
        /* Bit 13: TCO_EN     -> 0 = Disables TCO logic generating an SMI#
           Bit  0: GBL_SMI_EN -> 0 = No SMI# will be generated by ICH. */
        val32 &= 0xffffdffe;
-       outl(val32, SMI_EN);
+       outl(val32, smires->start);
 }
 
-static void broken_bios_stop(unsigned long acpibase)
+static void broken_bios_stop(struct resource *smires)
 {
        unsigned long val32;
 
-       val32 = inl(SMI_EN);
+       val32 = inl(smires->start);
        /* Bit 13: TCO_EN     -> 1 = Enables TCO logic generating an SMI#
           Bit  0: GBL_SMI_EN -> 1 = Turn global SMI on again. */
        val32 |= 0x00002001;
-       outl(val32, SMI_EN);
+       outl(val32, smires->start);
 }
 
 /*
  *     Generic Support Functions
  */
 
-void iTCO_vendor_pre_start(unsigned long acpibase,
+void iTCO_vendor_pre_start(struct resource *smires,
                           unsigned int heartbeat)
 {
        switch (vendorsupport) {
        case SUPERMICRO_OLD_BOARD:
-               supermicro_old_pre_start(acpibase);
+               supermicro_old_pre_start(smires);
                break;
        case SUPERMICRO_NEW_BOARD:
                supermicro_new_pre_start(heartbeat);
                break;
        case BROKEN_BIOS:
-               broken_bios_start(acpibase);
+               broken_bios_start(smires);
                break;
        }
 }
 EXPORT_SYMBOL(iTCO_vendor_pre_start);
 
-void iTCO_vendor_pre_stop(unsigned long acpibase)
+void iTCO_vendor_pre_stop(struct resource *smires)
 {
        switch (vendorsupport) {
        case SUPERMICRO_OLD_BOARD:
-               supermicro_old_pre_stop(acpibase);
+               supermicro_old_pre_stop(smires);
                break;
        case SUPERMICRO_NEW_BOARD:
                supermicro_new_pre_stop();
                break;
        case BROKEN_BIOS:
-               broken_bios_stop(acpibase);
+               broken_bios_stop(smires);
                break;
        }
 }
 EXPORT_SYMBOL(iTCO_vendor_pre_stop);
 
-void iTCO_vendor_pre_keepalive(unsigned long acpibase, unsigned int heartbeat)
+void iTCO_vendor_pre_keepalive(struct resource *smires, unsigned int heartbeat)
 {
        if (vendorsupport == SUPERMICRO_NEW_BOARD)
                supermicro_new_pre_set_heartbeat(heartbeat);
index 9fecb95645a35d86d5183a27212786abad001fa0..bc47e9012f370ff43c0e50d33581fe751cc4742f 100644 (file)
 #include <linux/spinlock.h>            /* For spin_lock/spin_unlock/... */
 #include <linux/uaccess.h>             /* For copy_to_user/put_user/... */
 #include <linux/io.h>                  /* For inb/outb/... */
+#include <linux/mfd/core.h>
+#include <linux/mfd/lpc_ich.h>
 
 #include "iTCO_vendor.h"
 
-/* TCO related info */
-enum iTCO_chipsets {
-       TCO_ICH = 0,    /* ICH */
-       TCO_ICH0,       /* ICH0 */
-       TCO_ICH2,       /* ICH2 */
-       TCO_ICH2M,      /* ICH2-M */
-       TCO_ICH3,       /* ICH3-S */
-       TCO_ICH3M,      /* ICH3-M */
-       TCO_ICH4,       /* ICH4 */
-       TCO_ICH4M,      /* ICH4-M */
-       TCO_CICH,       /* C-ICH */
-       TCO_ICH5,       /* ICH5 & ICH5R */
-       TCO_6300ESB,    /* 6300ESB */
-       TCO_ICH6,       /* ICH6 & ICH6R */
-       TCO_ICH6M,      /* ICH6-M */
-       TCO_ICH6W,      /* ICH6W & ICH6RW */
-       TCO_631XESB,    /* 631xESB/632xESB */
-       TCO_ICH7,       /* ICH7 & ICH7R */
-       TCO_ICH7DH,     /* ICH7DH */
-       TCO_ICH7M,      /* ICH7-M & ICH7-U */
-       TCO_ICH7MDH,    /* ICH7-M DH */
-       TCO_NM10,       /* NM10 */
-       TCO_ICH8,       /* ICH8 & ICH8R */
-       TCO_ICH8DH,     /* ICH8DH */
-       TCO_ICH8DO,     /* ICH8DO */
-       TCO_ICH8M,      /* ICH8M */
-       TCO_ICH8ME,     /* ICH8M-E */
-       TCO_ICH9,       /* ICH9 */
-       TCO_ICH9R,      /* ICH9R */
-       TCO_ICH9DH,     /* ICH9DH */
-       TCO_ICH9DO,     /* ICH9DO */
-       TCO_ICH9M,      /* ICH9M */
-       TCO_ICH9ME,     /* ICH9M-E */
-       TCO_ICH10,      /* ICH10 */
-       TCO_ICH10R,     /* ICH10R */
-       TCO_ICH10D,     /* ICH10D */
-       TCO_ICH10DO,    /* ICH10DO */
-       TCO_PCH,        /* PCH Desktop Full Featured */
-       TCO_PCHM,       /* PCH Mobile Full Featured */
-       TCO_P55,        /* P55 */
-       TCO_PM55,       /* PM55 */
-       TCO_H55,        /* H55 */
-       TCO_QM57,       /* QM57 */
-       TCO_H57,        /* H57 */
-       TCO_HM55,       /* HM55 */
-       TCO_Q57,        /* Q57 */
-       TCO_HM57,       /* HM57 */
-       TCO_PCHMSFF,    /* PCH Mobile SFF Full Featured */
-       TCO_QS57,       /* QS57 */
-       TCO_3400,       /* 3400 */
-       TCO_3420,       /* 3420 */
-       TCO_3450,       /* 3450 */
-       TCO_EP80579,    /* EP80579 */
-       TCO_CPT,        /* Cougar Point */
-       TCO_CPTD,       /* Cougar Point Desktop */
-       TCO_CPTM,       /* Cougar Point Mobile */
-       TCO_PBG,        /* Patsburg */
-       TCO_DH89XXCC,   /* DH89xxCC */
-       TCO_PPT,        /* Panther Point */
-       TCO_LPT,        /* Lynx Point */
-};
-
-static struct {
-       char *name;
-       unsigned int iTCO_version;
-} iTCO_chipset_info[] __devinitdata = {
-       {"ICH", 1},
-       {"ICH0", 1},
-       {"ICH2", 1},
-       {"ICH2-M", 1},
-       {"ICH3-S", 1},
-       {"ICH3-M", 1},
-       {"ICH4", 1},
-       {"ICH4-M", 1},
-       {"C-ICH", 1},
-       {"ICH5 or ICH5R", 1},
-       {"6300ESB", 1},
-       {"ICH6 or ICH6R", 2},
-       {"ICH6-M", 2},
-       {"ICH6W or ICH6RW", 2},
-       {"631xESB/632xESB", 2},
-       {"ICH7 or ICH7R", 2},
-       {"ICH7DH", 2},
-       {"ICH7-M or ICH7-U", 2},
-       {"ICH7-M DH", 2},
-       {"NM10", 2},
-       {"ICH8 or ICH8R", 2},
-       {"ICH8DH", 2},
-       {"ICH8DO", 2},
-       {"ICH8M", 2},
-       {"ICH8M-E", 2},
-       {"ICH9", 2},
-       {"ICH9R", 2},
-       {"ICH9DH", 2},
-       {"ICH9DO", 2},
-       {"ICH9M", 2},
-       {"ICH9M-E", 2},
-       {"ICH10", 2},
-       {"ICH10R", 2},
-       {"ICH10D", 2},
-       {"ICH10DO", 2},
-       {"PCH Desktop Full Featured", 2},
-       {"PCH Mobile Full Featured", 2},
-       {"P55", 2},
-       {"PM55", 2},
-       {"H55", 2},
-       {"QM57", 2},
-       {"H57", 2},
-       {"HM55", 2},
-       {"Q57", 2},
-       {"HM57", 2},
-       {"PCH Mobile SFF Full Featured", 2},
-       {"QS57", 2},
-       {"3400", 2},
-       {"3420", 2},
-       {"3450", 2},
-       {"EP80579", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point Desktop", 2},
-       {"Cougar Point Mobile", 2},
-       {"Patsburg", 2},
-       {"DH89xxCC", 2},
-       {"Panther Point", 2},
-       {"Lynx Point", 2},
-       {NULL, 0}
-};
-
-/*
- * This data only exists for exporting the supported PCI ids
- * via MODULE_DEVICE_TABLE.  We do not actually register a
- * pci_driver, because the I/O Controller Hub has also other
- * functions that probably will be registered by other drivers.
- */
-static DEFINE_PCI_DEVICE_TABLE(iTCO_wdt_pci_tbl) = {
-       { PCI_VDEVICE(INTEL, 0x2410), TCO_ICH},
-       { PCI_VDEVICE(INTEL, 0x2420), TCO_ICH0},
-       { PCI_VDEVICE(INTEL, 0x2440), TCO_ICH2},
-       { PCI_VDEVICE(INTEL, 0x244c), TCO_ICH2M},
-       { PCI_VDEVICE(INTEL, 0x2480), TCO_ICH3},
-       { PCI_VDEVICE(INTEL, 0x248c), TCO_ICH3M},
-       { PCI_VDEVICE(INTEL, 0x24c0), TCO_ICH4},
-       { PCI_VDEVICE(INTEL, 0x24cc), TCO_ICH4M},
-       { PCI_VDEVICE(INTEL, 0x2450), TCO_CICH},
-       { PCI_VDEVICE(INTEL, 0x24d0), TCO_ICH5},
-       { PCI_VDEVICE(INTEL, 0x25a1), TCO_6300ESB},
-       { PCI_VDEVICE(INTEL, 0x2640), TCO_ICH6},
-       { PCI_VDEVICE(INTEL, 0x2641), TCO_ICH6M},
-       { PCI_VDEVICE(INTEL, 0x2642), TCO_ICH6W},
-       { PCI_VDEVICE(INTEL, 0x2670), TCO_631XESB},
-       { PCI_VDEVICE(INTEL, 0x2671), TCO_631XESB},
-       { PCI_VDEVICE(INTEL, 0x2672), TCO_631XESB},
-       { PCI_VDEVICE(INTEL, 0x2673), TCO_631XESB},
-       { PCI_VDEVICE(INTEL, 0x2674), TCO_631XESB},
-       { PCI_VDEVICE(INTEL, 0x2675), TCO_631XESB},
-       { PCI_VDEVICE(INTEL, 0x2676), TCO_631XESB},
-       { PCI_VDEVICE(INTEL, 0x2677), TCO_631XESB},
-       { PCI_VDEVICE(INTEL, 0x2678), TCO_631XESB},
-       { PCI_VDEVICE(INTEL, 0x2679), TCO_631XESB},
-       { PCI_VDEVICE(INTEL, 0x267a), TCO_631XESB},
-       { PCI_VDEVICE(INTEL, 0x267b), TCO_631XESB},
-       { PCI_VDEVICE(INTEL, 0x267c), TCO_631XESB},
-       { PCI_VDEVICE(INTEL, 0x267d), TCO_631XESB},
-       { PCI_VDEVICE(INTEL, 0x267e), TCO_631XESB},
-       { PCI_VDEVICE(INTEL, 0x267f), TCO_631XESB},
-       { PCI_VDEVICE(INTEL, 0x27b8), TCO_ICH7},
-       { PCI_VDEVICE(INTEL, 0x27b0), TCO_ICH7DH},
-       { PCI_VDEVICE(INTEL, 0x27b9), TCO_ICH7M},
-       { PCI_VDEVICE(INTEL, 0x27bd), TCO_ICH7MDH},
-       { PCI_VDEVICE(INTEL, 0x27bc), TCO_NM10},
-       { PCI_VDEVICE(INTEL, 0x2810), TCO_ICH8},
-       { PCI_VDEVICE(INTEL, 0x2812), TCO_ICH8DH},
-       { PCI_VDEVICE(INTEL, 0x2814), TCO_ICH8DO},
-       { PCI_VDEVICE(INTEL, 0x2815), TCO_ICH8M},
-       { PCI_VDEVICE(INTEL, 0x2811), TCO_ICH8ME},
-       { PCI_VDEVICE(INTEL, 0x2918), TCO_ICH9},
-       { PCI_VDEVICE(INTEL, 0x2916), TCO_ICH9R},
-       { PCI_VDEVICE(INTEL, 0x2912), TCO_ICH9DH},
-       { PCI_VDEVICE(INTEL, 0x2914), TCO_ICH9DO},
-       { PCI_VDEVICE(INTEL, 0x2919), TCO_ICH9M},
-       { PCI_VDEVICE(INTEL, 0x2917), TCO_ICH9ME},
-       { PCI_VDEVICE(INTEL, 0x3a18), TCO_ICH10},
-       { PCI_VDEVICE(INTEL, 0x3a16), TCO_ICH10R},
-       { PCI_VDEVICE(INTEL, 0x3a1a), TCO_ICH10D},
-       { PCI_VDEVICE(INTEL, 0x3a14), TCO_ICH10DO},
-       { PCI_VDEVICE(INTEL, 0x3b00), TCO_PCH},
-       { PCI_VDEVICE(INTEL, 0x3b01), TCO_PCHM},
-       { PCI_VDEVICE(INTEL, 0x3b02), TCO_P55},
-       { PCI_VDEVICE(INTEL, 0x3b03), TCO_PM55},
-       { PCI_VDEVICE(INTEL, 0x3b06), TCO_H55},
-       { PCI_VDEVICE(INTEL, 0x3b07), TCO_QM57},
-       { PCI_VDEVICE(INTEL, 0x3b08), TCO_H57},
-       { PCI_VDEVICE(INTEL, 0x3b09), TCO_HM55},
-       { PCI_VDEVICE(INTEL, 0x3b0a), TCO_Q57},
-       { PCI_VDEVICE(INTEL, 0x3b0b), TCO_HM57},
-       { PCI_VDEVICE(INTEL, 0x3b0d), TCO_PCHMSFF},
-       { PCI_VDEVICE(INTEL, 0x3b0f), TCO_QS57},
-       { PCI_VDEVICE(INTEL, 0x3b12), TCO_3400},
-       { PCI_VDEVICE(INTEL, 0x3b14), TCO_3420},
-       { PCI_VDEVICE(INTEL, 0x3b16), TCO_3450},
-       { PCI_VDEVICE(INTEL, 0x5031), TCO_EP80579},
-       { PCI_VDEVICE(INTEL, 0x1c41), TCO_CPT},
-       { PCI_VDEVICE(INTEL, 0x1c42), TCO_CPTD},
-       { PCI_VDEVICE(INTEL, 0x1c43), TCO_CPTM},
-       { PCI_VDEVICE(INTEL, 0x1c44), TCO_CPT},
-       { PCI_VDEVICE(INTEL, 0x1c45), TCO_CPT},
-       { PCI_VDEVICE(INTEL, 0x1c46), TCO_CPT},
-       { PCI_VDEVICE(INTEL, 0x1c47), TCO_CPT},
-       { PCI_VDEVICE(INTEL, 0x1c48), TCO_CPT},
-       { PCI_VDEVICE(INTEL, 0x1c49), TCO_CPT},
-       { PCI_VDEVICE(INTEL, 0x1c4a), TCO_CPT},
-       { PCI_VDEVICE(INTEL, 0x1c4b), TCO_CPT},
-       { PCI_VDEVICE(INTEL, 0x1c4c), TCO_CPT},
-       { PCI_VDEVICE(INTEL, 0x1c4d), TCO_CPT},
-       { PCI_VDEVICE(INTEL, 0x1c4e), TCO_CPT},
-       { PCI_VDEVICE(INTEL, 0x1c4f), TCO_CPT},
-       { PCI_VDEVICE(INTEL, 0x1c50), TCO_CPT},
-       { PCI_VDEVICE(INTEL, 0x1c51), TCO_CPT},
-       { PCI_VDEVICE(INTEL, 0x1c52), TCO_CPT},
-       { PCI_VDEVICE(INTEL, 0x1c53), TCO_CPT},
-       { PCI_VDEVICE(INTEL, 0x1c54), TCO_CPT},
-       { PCI_VDEVICE(INTEL, 0x1c55), TCO_CPT},
-       { PCI_VDEVICE(INTEL, 0x1c56), TCO_CPT},
-       { PCI_VDEVICE(INTEL, 0x1c57), TCO_CPT},
-       { PCI_VDEVICE(INTEL, 0x1c58), TCO_CPT},
-       { PCI_VDEVICE(INTEL, 0x1c59), TCO_CPT},
-       { PCI_VDEVICE(INTEL, 0x1c5a), TCO_CPT},
-       { PCI_VDEVICE(INTEL, 0x1c5b), TCO_CPT},
-       { PCI_VDEVICE(INTEL, 0x1c5c), TCO_CPT},
-       { PCI_VDEVICE(INTEL, 0x1c5d), TCO_CPT},
-       { PCI_VDEVICE(INTEL, 0x1c5e), TCO_CPT},
-       { PCI_VDEVICE(INTEL, 0x1c5f), TCO_CPT},
-       { PCI_VDEVICE(INTEL, 0x1d40), TCO_PBG},
-       { PCI_VDEVICE(INTEL, 0x1d41), TCO_PBG},
-       { PCI_VDEVICE(INTEL, 0x2310), TCO_DH89XXCC},
-       { PCI_VDEVICE(INTEL, 0x1e40), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e41), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e42), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e43), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e44), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e45), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e46), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e47), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e48), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e49), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e4a), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e4b), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e4c), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e4d), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e4e), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e4f), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e50), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e51), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e52), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e53), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e54), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e55), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e56), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e57), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e58), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e59), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e5a), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e5b), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e5c), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e5d), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e5e), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x1e5f), TCO_PPT},
-       { PCI_VDEVICE(INTEL, 0x8c40), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c41), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c42), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c43), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c44), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c45), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c46), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c47), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c48), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c49), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c4a), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c4b), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c4c), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c4d), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c4e), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c4f), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c50), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c51), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c52), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c53), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c54), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c55), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c56), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c57), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c58), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c59), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c5a), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c5b), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c5c), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c5d), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c5e), TCO_LPT},
-       { PCI_VDEVICE(INTEL, 0x8c5f), TCO_LPT},
-       { 0, },                 /* End of list */
-};
-MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
-
 /* Address definitions for the TCO */
 /* TCO base address */
-#define TCOBASE                (iTCO_wdt_private.ACPIBASE + 0x60)
+#define TCOBASE                (iTCO_wdt_private.tco_res->start)
 /* SMI Control and Enable Register */
-#define SMI_EN         (iTCO_wdt_private.ACPIBASE + 0x30)
+#define SMI_EN         (iTCO_wdt_private.smi_res->start)
 
 #define TCO_RLD                (TCOBASE + 0x00) /* TCO Timer Reload and Curr. Value */
 #define TCOv1_TMR      (TCOBASE + 0x01) /* TCOv1 Timer Initial Value   */
@@ -393,19 +93,18 @@ static char expect_release;
 static struct {                /* this is private data for the iTCO_wdt device */
        /* TCO version/generation */
        unsigned int iTCO_version;
-       /* The device's ACPIBASE address (TCOBASE = ACPIBASE+0x60) */
-       unsigned long ACPIBASE;
+       struct resource *tco_res;
+       struct resource *smi_res;
+       struct resource *gcs_res;
        /* NO_REBOOT flag is Memory-Mapped GCS register bit 5 (TCO version 2)*/
        unsigned long __iomem *gcs;
        /* the lock for io operations */
        spinlock_t io_lock;
+       struct platform_device *dev;
        /* the PCI-device */
        struct pci_dev *pdev;
 } iTCO_wdt_private;
 
-/* the watchdog platform device */
-static struct platform_device *iTCO_wdt_platform_device;
-
 /* module parameters */
 #define WATCHDOG_HEARTBEAT 30  /* 30 sec default heartbeat */
 static int heartbeat = WATCHDOG_HEARTBEAT;  /* in seconds */
@@ -485,7 +184,7 @@ static int iTCO_wdt_start(void)
 
        spin_lock(&iTCO_wdt_private.io_lock);
 
-       iTCO_vendor_pre_start(iTCO_wdt_private.ACPIBASE, heartbeat);
+       iTCO_vendor_pre_start(iTCO_wdt_private.smi_res, heartbeat);
 
        /* disable chipset's NO_REBOOT bit */
        if (iTCO_wdt_unset_NO_REBOOT_bit()) {
@@ -519,7 +218,7 @@ static int iTCO_wdt_stop(void)
 
        spin_lock(&iTCO_wdt_private.io_lock);
 
-       iTCO_vendor_pre_stop(iTCO_wdt_private.ACPIBASE);
+       iTCO_vendor_pre_stop(iTCO_wdt_private.smi_res);
 
        /* Bit 11: TCO Timer Halt -> 1 = The TCO timer is disabled */
        val = inw(TCO1_CNT);
@@ -541,7 +240,7 @@ static int iTCO_wdt_keepalive(void)
 {
        spin_lock(&iTCO_wdt_private.io_lock);
 
-       iTCO_vendor_pre_keepalive(iTCO_wdt_private.ACPIBASE, heartbeat);
+       iTCO_vendor_pre_keepalive(iTCO_wdt_private.smi_res, heartbeat);
 
        /* Reload the timer by writing to the TCO Timer Counter register */
        if (iTCO_wdt_private.iTCO_version == 2)
@@ -786,83 +485,120 @@ static struct miscdevice iTCO_wdt_miscdev = {
  *     Init & exit routines
  */
 
-static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
-               const struct pci_device_id *ent, struct platform_device *dev)
+static void __devexit iTCO_wdt_cleanup(void)
+{
+       /* Stop the timer before we leave */
+       if (!nowayout)
+               iTCO_wdt_stop();
+
+       /* Deregister */
+       misc_deregister(&iTCO_wdt_miscdev);
+
+       /* release resources */
+       release_region(iTCO_wdt_private.tco_res->start,
+                       resource_size(iTCO_wdt_private.tco_res));
+       release_region(iTCO_wdt_private.smi_res->start,
+                       resource_size(iTCO_wdt_private.smi_res));
+       if (iTCO_wdt_private.iTCO_version == 2) {
+               iounmap(iTCO_wdt_private.gcs);
+               release_mem_region(iTCO_wdt_private.gcs_res->start,
+                               resource_size(iTCO_wdt_private.gcs_res));
+       }
+
+       iTCO_wdt_private.tco_res = NULL;
+       iTCO_wdt_private.smi_res = NULL;
+       iTCO_wdt_private.gcs_res = NULL;
+       iTCO_wdt_private.gcs = NULL;
+}
+
+static int __devinit iTCO_wdt_probe(struct platform_device *dev)
 {
-       int ret;
-       u32 base_address;
-       unsigned long RCBA;
+       int ret = -ENODEV;
        unsigned long val32;
+       struct lpc_ich_info *ich_info = dev->dev.platform_data;
+
+       if (!ich_info)
+               goto out;
+
+       spin_lock_init(&iTCO_wdt_private.io_lock);
+
+       iTCO_wdt_private.tco_res =
+               platform_get_resource(dev, IORESOURCE_IO, ICH_RES_IO_TCO);
+       if (!iTCO_wdt_private.tco_res)
+               goto out;
+
+       iTCO_wdt_private.smi_res =
+               platform_get_resource(dev, IORESOURCE_IO, ICH_RES_IO_SMI);
+       if (!iTCO_wdt_private.smi_res)
+               goto out;
+
+       iTCO_wdt_private.iTCO_version = ich_info->iTCO_version;
+       iTCO_wdt_private.dev = dev;
+       iTCO_wdt_private.pdev = to_pci_dev(dev->dev.parent);
 
        /*
-        *      Find the ACPI/PM base I/O address which is the base
-        *      for the TCO registers (TCOBASE=ACPIBASE + 0x60)
-        *      ACPIBASE is bits [15:7] from 0x40-0x43
+        * Get the Memory-Mapped GCS register, we need it for the
+        * NO_REBOOT flag (TCO v2).
         */
-       pci_read_config_dword(pdev, 0x40, &base_address);
-       base_address &= 0x0000ff80;
-       if (base_address == 0x00000000) {
-               /* Something's wrong here, ACPIBASE has to be set */
-               pr_err("failed to get TCOBASE address, device disabled by hardware/BIOS\n");
-               return -ENODEV;
-       }
-       iTCO_wdt_private.iTCO_version =
-                       iTCO_chipset_info[ent->driver_data].iTCO_version;
-       iTCO_wdt_private.ACPIBASE = base_address;
-       iTCO_wdt_private.pdev = pdev;
-
-       /* Get the Memory-Mapped GCS register, we need it for the
-          NO_REBOOT flag (TCO v2). To get access to it you have to
-          read RCBA from PCI Config space 0xf0 and use it as base.
-          GCS = RCBA + ICH6_GCS(0x3410). */
        if (iTCO_wdt_private.iTCO_version == 2) {
-               pci_read_config_dword(pdev, 0xf0, &base_address);
-               if ((base_address & 1) == 0) {
-                       pr_err("RCBA is disabled by hardware/BIOS, device disabled\n");
-                       ret = -ENODEV;
+               iTCO_wdt_private.gcs_res = platform_get_resource(dev,
+                                                       IORESOURCE_MEM,
+                                                       ICH_RES_MEM_GCS);
+
+               if (!iTCO_wdt_private.gcs_res)
+                       goto out;
+
+               if (!request_mem_region(iTCO_wdt_private.gcs_res->start,
+                       resource_size(iTCO_wdt_private.gcs_res), dev->name)) {
+                       ret = -EBUSY;
                        goto out;
                }
-               RCBA = base_address & 0xffffc000;
-               iTCO_wdt_private.gcs = ioremap((RCBA + 0x3410), 4);
+               iTCO_wdt_private.gcs = ioremap(iTCO_wdt_private.gcs_res->start,
+                       resource_size(iTCO_wdt_private.gcs_res));
+               if (!iTCO_wdt_private.gcs) {
+                       ret = -EIO;
+                       goto unreg_gcs;
+               }
        }
 
        /* Check chipset's NO_REBOOT bit */
        if (iTCO_wdt_unset_NO_REBOOT_bit() && iTCO_vendor_check_noreboot_on()) {
                pr_info("unable to reset NO_REBOOT flag, device disabled by hardware/BIOS\n");
                ret = -ENODEV;  /* Cannot reset NO_REBOOT bit */
-               goto out_unmap;
+               goto unmap_gcs;
        }
 
        /* Set the NO_REBOOT bit to prevent later reboots, just for sure */
        iTCO_wdt_set_NO_REBOOT_bit();
 
        /* The TCO logic uses the TCO_EN bit in the SMI_EN register */
-       if (!request_region(SMI_EN, 4, "iTCO_wdt")) {
-               pr_err("I/O address 0x%04lx already in use, device disabled\n",
-                      SMI_EN);
-               ret = -EIO;
-               goto out_unmap;
+       if (!request_region(iTCO_wdt_private.smi_res->start,
+                       resource_size(iTCO_wdt_private.smi_res), dev->name)) {
+               pr_err("I/O address 0x%04llx already in use, device disabled\n",
+                      (u64)SMI_EN);
+               ret = -EBUSY;
+               goto unmap_gcs;
        }
        if (turn_SMI_watchdog_clear_off >= iTCO_wdt_private.iTCO_version) {
-               /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
+               /*
+                * Bit 13: TCO_EN -> 0
+                * Disables TCO logic generating an SMI#
+                */
                val32 = inl(SMI_EN);
                val32 &= 0xffffdfff;    /* Turn off SMI clearing watchdog */
                outl(val32, SMI_EN);
        }
 
-       /* The TCO I/O registers reside in a 32-byte range pointed to
-          by the TCOBASE value */
-       if (!request_region(TCOBASE, 0x20, "iTCO_wdt")) {
-               pr_err("I/O address 0x%04lx already in use, device disabled\n",
-                      TCOBASE);
-               ret = -EIO;
-               goto unreg_smi_en;
+       if (!request_region(iTCO_wdt_private.tco_res->start,
+                       resource_size(iTCO_wdt_private.tco_res), dev->name)) {
+               pr_err("I/O address 0x%04llx already in use, device disabled\n",
+                      (u64)TCOBASE);
+               ret = -EBUSY;
+               goto unreg_smi;
        }
 
-       pr_info("Found a %s TCO device (Version=%d, TCOBASE=0x%04lx)\n",
-               iTCO_chipset_info[ent->driver_data].name,
-               iTCO_chipset_info[ent->driver_data].iTCO_version,
-               TCOBASE);
+       pr_info("Found a %s TCO device (Version=%d, TCOBASE=0x%04llx)\n",
+               ich_info->name, ich_info->iTCO_version, (u64)TCOBASE);
 
        /* Clear out the (probably old) status */
        outw(0x0008, TCO1_STS); /* Clear the Time Out Status bit */
@@ -883,7 +619,7 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
        if (ret != 0) {
                pr_err("cannot register miscdev on minor=%d (err=%d)\n",
                       WATCHDOG_MINOR, ret);
-               goto unreg_region;
+               goto unreg_tco;
        }
 
        pr_info("initialized. heartbeat=%d sec (nowayout=%d)\n",
@@ -891,62 +627,31 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
 
        return 0;
 
-unreg_region:
-       release_region(TCOBASE, 0x20);
-unreg_smi_en:
-       release_region(SMI_EN, 4);
-out_unmap:
+unreg_tco:
+       release_region(iTCO_wdt_private.tco_res->start,
+                       resource_size(iTCO_wdt_private.tco_res));
+unreg_smi:
+       release_region(iTCO_wdt_private.smi_res->start,
+                       resource_size(iTCO_wdt_private.smi_res));
+unmap_gcs:
        if (iTCO_wdt_private.iTCO_version == 2)
                iounmap(iTCO_wdt_private.gcs);
-out:
-       iTCO_wdt_private.ACPIBASE = 0;
-       return ret;
-}
-
-static void __devexit iTCO_wdt_cleanup(void)
-{
-       /* Stop the timer before we leave */
-       if (!nowayout)
-               iTCO_wdt_stop();
-
-       /* Deregister */
-       misc_deregister(&iTCO_wdt_miscdev);
-       release_region(TCOBASE, 0x20);
-       release_region(SMI_EN, 4);
+unreg_gcs:
        if (iTCO_wdt_private.iTCO_version == 2)
-               iounmap(iTCO_wdt_private.gcs);
-       pci_dev_put(iTCO_wdt_private.pdev);
-       iTCO_wdt_private.ACPIBASE = 0;
-}
-
-static int __devinit iTCO_wdt_probe(struct platform_device *dev)
-{
-       int ret = -ENODEV;
-       int found = 0;
-       struct pci_dev *pdev = NULL;
-       const struct pci_device_id *ent;
-
-       spin_lock_init(&iTCO_wdt_private.io_lock);
-
-       for_each_pci_dev(pdev) {
-               ent = pci_match_id(iTCO_wdt_pci_tbl, pdev);
-               if (ent) {
-                       found++;
-                       ret = iTCO_wdt_init(pdev, ent, dev);
-                       if (!ret)
-                               break;
-               }
-       }
-
-       if (!found)
-               pr_info("No device detected\n");
+               release_mem_region(iTCO_wdt_private.gcs_res->start,
+                               resource_size(iTCO_wdt_private.gcs_res));
+out:
+       iTCO_wdt_private.tco_res = NULL;
+       iTCO_wdt_private.smi_res = NULL;
+       iTCO_wdt_private.gcs_res = NULL;
+       iTCO_wdt_private.gcs = NULL;
 
        return ret;
 }
 
 static int __devexit iTCO_wdt_remove(struct platform_device *dev)
 {
-       if (iTCO_wdt_private.ACPIBASE)
+       if (iTCO_wdt_private.tco_res || iTCO_wdt_private.smi_res)
                iTCO_wdt_cleanup();
 
        return 0;
@@ -977,23 +682,11 @@ static int __init iTCO_wdt_init_module(void)
        if (err)
                return err;
 
-       iTCO_wdt_platform_device = platform_device_register_simple(DRV_NAME,
-                                                               -1, NULL, 0);
-       if (IS_ERR(iTCO_wdt_platform_device)) {
-               err = PTR_ERR(iTCO_wdt_platform_device);
-               goto unreg_platform_driver;
-       }
-
        return 0;
-
-unreg_platform_driver:
-       platform_driver_unregister(&iTCO_wdt_driver);
-       return err;
 }
 
 static void __exit iTCO_wdt_cleanup_module(void)
 {
-       platform_device_unregister(iTCO_wdt_platform_device);
        platform_driver_unregister(&iTCO_wdt_driver);
        pr_info("Watchdog Module Unloaded\n");
 }
index 7a2b734fcdc78f89c3c7f1b9a3eb0f3eb005842c..bcfab2b00ad20ae5c1e72fbf0e2809aa02cfdf2d 100644 (file)
@@ -121,7 +121,7 @@ static void imx2_wdt_start(void)
 {
        if (!test_and_set_bit(IMX2_WDT_STATUS_STARTED, &imx2_wdt.status)) {
                /* at our first start we enable clock and do initialisations */
-               clk_enable(imx2_wdt.clk);
+               clk_prepare_enable(imx2_wdt.clk);
 
                imx2_wdt_setup();
        } else  /* delete the timer that pings the watchdog after close */
index a9593a3a32a09a1c44d367877d5feb9486efb609..2e74c3a8ee5840ff405ded8932f32c6be6ea9d3e 100644 (file)
 #include <linux/fs.h>
 #include <linux/miscdevice.h>
 #include <linux/watchdog.h>
-#include <linux/platform_device.h>
+#include <linux/of_platform.h>
 #include <linux/uaccess.h>
 #include <linux/clk.h>
 #include <linux/io.h>
 
-#include <lantiq.h>
+#include <lantiq_soc.h>
 
-/* Section 3.4 of the datasheet
+/*
+ * Section 3.4 of the datasheet
  * The password sequence protects the WDT control register from unintended
  * write actions, which might cause malfunction of the WDT.
  *
@@ -70,7 +71,8 @@ ltq_wdt_disable(void)
 {
        /* write the first password magic */
        ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR);
-       /* write the second password magic with no config
+       /*
+        * write the second password magic with no config
         * this turns the watchdog off
         */
        ltq_w32(LTQ_WDT_PW2, ltq_wdt_membase + LTQ_WDT_CR);
@@ -184,7 +186,7 @@ static struct miscdevice ltq_wdt_miscdev = {
        .fops   = &ltq_wdt_fops,
 };
 
-static int __init
+static int __devinit
 ltq_wdt_probe(struct platform_device *pdev)
 {
        struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -194,28 +196,27 @@ ltq_wdt_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "cannot obtain I/O memory region");
                return -ENOENT;
        }
-       res = devm_request_mem_region(&pdev->dev, res->start,
-               resource_size(res), dev_name(&pdev->dev));
-       if (!res) {
-               dev_err(&pdev->dev, "cannot request I/O memory region");
-               return -EBUSY;
-       }
-       ltq_wdt_membase = devm_ioremap_nocache(&pdev->dev, res->start,
-               resource_size(res));
+
+       ltq_wdt_membase = devm_request_and_ioremap(&pdev->dev, res);
        if (!ltq_wdt_membase) {
                dev_err(&pdev->dev, "cannot remap I/O memory region\n");
                return -ENOMEM;
        }
 
        /* we do not need to enable the clock as it is always running */
-       clk = clk_get(&pdev->dev, "io");
-       WARN_ON(!clk);
+       clk = clk_get_io();
+       if (IS_ERR(clk)) {
+               dev_err(&pdev->dev, "Failed to get clock\n");
+               return -ENOENT;
+       }
        ltq_io_region_clk_rate = clk_get_rate(clk);
        clk_put(clk);
 
+       /* find out if the watchdog caused the last reboot */
        if (ltq_reset_cause() == LTQ_RST_CAUSE_WDTRST)
                ltq_wdt_bootstatus = WDIOF_CARDRESET;
 
+       dev_info(&pdev->dev, "Init done\n");
        return misc_register(&ltq_wdt_miscdev);
 }
 
@@ -227,33 +228,26 @@ ltq_wdt_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id ltq_wdt_match[] = {
+       { .compatible = "lantiq,wdt" },
+       {},
+};
+MODULE_DEVICE_TABLE(of, ltq_wdt_match);
 
 static struct platform_driver ltq_wdt_driver = {
+       .probe = ltq_wdt_probe,
        .remove = __devexit_p(ltq_wdt_remove),
        .driver = {
-               .name = "ltq_wdt",
+               .name = "wdt",
                .owner = THIS_MODULE,
+               .of_match_table = ltq_wdt_match,
        },
 };
 
-static int __init
-init_ltq_wdt(void)
-{
-       return platform_driver_probe(&ltq_wdt_driver, ltq_wdt_probe);
-}
-
-static void __exit
-exit_ltq_wdt(void)
-{
-       return platform_driver_unregister(&ltq_wdt_driver);
-}
-
-module_init(init_ltq_wdt);
-module_exit(exit_ltq_wdt);
+module_platform_driver(ltq_wdt_driver);
 
 module_param(nowayout, bool, 0);
 MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
-
 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
 MODULE_DESCRIPTION("Lantiq SoC Watchdog");
 MODULE_LICENSE("GPL");
index 788aa158e78c054eef91dabae21de96fa3e20d32..0f5736949c612515afcd023351106e87db596369 100644 (file)
@@ -24,8 +24,8 @@
 #include <linux/uaccess.h>
 #include <linux/io.h>
 #include <linux/spinlock.h>
+#include <linux/clk.h>
 #include <mach/bridge-regs.h>
-#include <plat/orion_wdt.h>
 
 /*
  * Watchdog timer block registers.
@@ -41,6 +41,7 @@
 static bool nowayout = WATCHDOG_NOWAYOUT;
 static int heartbeat = -1;             /* module parameter (seconds) */
 static unsigned int wdt_max_duration;  /* (seconds) */
+static struct clk *clk;
 static unsigned int wdt_tclk;
 static void __iomem *wdt_reg;
 static unsigned long wdt_status;
@@ -237,16 +238,16 @@ static struct miscdevice orion_wdt_miscdev = {
 
 static int __devinit orion_wdt_probe(struct platform_device *pdev)
 {
-       struct orion_wdt_platform_data *pdata = pdev->dev.platform_data;
        struct resource *res;
        int ret;
 
-       if (pdata) {
-               wdt_tclk = pdata->tclk;
-       } else {
-               pr_err("misses platform data\n");
+       clk = clk_get(&pdev->dev, NULL);
+       if (IS_ERR(clk)) {
+               printk(KERN_ERR "Orion Watchdog missing clock\n");
                return -ENODEV;
        }
+       clk_prepare_enable(clk);
+       wdt_tclk = clk_get_rate(clk);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
@@ -282,6 +283,9 @@ static int __devexit orion_wdt_remove(struct platform_device *pdev)
        if (!ret)
                orion_wdt_miscdev.parent = NULL;
 
+       clk_disable_unprepare(clk);
+       clk_put(clk);
+
        return ret;
 }
 
index bbb170e50055d43e55ff783f4aa645ec45ae1d85..afcd13676542338a4d4f231e0193ad11626f5d1a 100644 (file)
 #include <linux/amba/bus.h>
 #include <linux/bitops.h>
 #include <linux/clk.h>
-#include <linux/fs.h>
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/ioport.h>
 #include <linux/kernel.h>
 #include <linux/math64.h>
-#include <linux/miscdevice.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/pm.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
-#include <linux/uaccess.h>
 #include <linux/watchdog.h>
 
 /* default timeout in seconds */
@@ -56,6 +53,7 @@
 
 /**
  * struct sp805_wdt: sp805 wdt device structure
+ * @wdd: instance of struct watchdog_device
  * @lock: spin lock protecting dev structure and io access
  * @base: base address of wdt
  * @clk: clock structure of wdt
  * @timeout: current programmed timeout
  */
 struct sp805_wdt {
+       struct watchdog_device          wdd;
        spinlock_t                      lock;
        void __iomem                    *base;
        struct clk                      *clk;
        struct amba_device              *adev;
-       unsigned long                   status;
-       #define WDT_BUSY                0
-       #define WDT_CAN_BE_CLOSED       1
        unsigned int                    load_val;
        unsigned int                    timeout;
 };
 
-/* local variables */
-static struct sp805_wdt *wdt;
 static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout,
+               "Set to 1 to keep watchdog running after device release");
 
 /* This routine finds load value that will reset system in required timout */
-static void wdt_setload(unsigned int timeout)
+static int wdt_setload(struct watchdog_device *wdd, unsigned int timeout)
 {
+       struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
        u64 load, rate;
 
        rate = clk_get_rate(wdt->clk);
@@ -103,11 +101,14 @@ static void wdt_setload(unsigned int timeout)
        /* roundup timeout to closest positive integer value */
        wdt->timeout = div_u64((load + 1) * 2 + (rate / 2), rate);
        spin_unlock(&wdt->lock);
+
+       return 0;
 }
 
 /* returns number of seconds left for reset to occur */
-static u32 wdt_timeleft(void)
+static unsigned int wdt_timeleft(struct watchdog_device *wdd)
 {
+       struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
        u64 load, rate;
 
        rate = clk_get_rate(wdt->clk);
@@ -123,166 +124,96 @@ static u32 wdt_timeleft(void)
        return div_u64(load, rate);
 }
 
-/* enables watchdog timers reset */
-static void wdt_enable(void)
+static int wdt_config(struct watchdog_device *wdd, bool ping)
 {
-       spin_lock(&wdt->lock);
+       struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
+       int ret;
 
-       writel_relaxed(UNLOCK, wdt->base + WDTLOCK);
-       writel_relaxed(wdt->load_val, wdt->base + WDTLOAD);
-       writel_relaxed(INT_MASK, wdt->base + WDTINTCLR);
-       writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base + WDTCONTROL);
-       writel_relaxed(LOCK, wdt->base + WDTLOCK);
+       if (!ping) {
+               ret = clk_prepare(wdt->clk);
+               if (ret) {
+                       dev_err(&wdt->adev->dev, "clock prepare fail");
+                       return ret;
+               }
 
-       /* Flush posted writes. */
-       readl_relaxed(wdt->base + WDTLOCK);
-       spin_unlock(&wdt->lock);
-}
+               ret = clk_enable(wdt->clk);
+               if (ret) {
+                       dev_err(&wdt->adev->dev, "clock enable fail");
+                       clk_unprepare(wdt->clk);
+                       return ret;
+               }
+       }
 
-/* disables watchdog timers reset */
-static void wdt_disable(void)
-{
        spin_lock(&wdt->lock);
 
        writel_relaxed(UNLOCK, wdt->base + WDTLOCK);
-       writel_relaxed(0, wdt->base + WDTCONTROL);
+       writel_relaxed(wdt->load_val, wdt->base + WDTLOAD);
+
+       if (!ping) {
+               writel_relaxed(INT_MASK, wdt->base + WDTINTCLR);
+               writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base +
+                               WDTCONTROL);
+       }
+
        writel_relaxed(LOCK, wdt->base + WDTLOCK);
 
        /* Flush posted writes. */
        readl_relaxed(wdt->base + WDTLOCK);
        spin_unlock(&wdt->lock);
+
+       return 0;
 }
 
-static ssize_t sp805_wdt_write(struct file *file, const char *data,
-               size_t len, loff_t *ppos)
+static int wdt_ping(struct watchdog_device *wdd)
 {
-       if (len) {
-               if (!nowayout) {
-                       size_t i;
-
-                       clear_bit(WDT_CAN_BE_CLOSED, &wdt->status);
-
-                       for (i = 0; i != len; i++) {
-                               char c;
-
-                               if (get_user(c, data + i))
-                                       return -EFAULT;
-                               /* Check for Magic Close character */
-                               if (c == 'V') {
-                                       set_bit(WDT_CAN_BE_CLOSED,
-                                                       &wdt->status);
-                                       break;
-                               }
-                       }
-               }
-               wdt_enable();
-       }
-       return len;
+       return wdt_config(wdd, true);
 }
 
-static const struct watchdog_info ident = {
-       .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
-       .identity = MODULE_NAME,
-};
-
-static long sp805_wdt_ioctl(struct file *file, unsigned int cmd,
-               unsigned long arg)
+/* enables watchdog timers reset */
+static int wdt_enable(struct watchdog_device *wdd)
 {
-       int ret = -ENOTTY;
-       unsigned int timeout;
-
-       switch (cmd) {
-       case WDIOC_GETSUPPORT:
-               ret = copy_to_user((struct watchdog_info *)arg, &ident,
-                               sizeof(ident)) ? -EFAULT : 0;
-               break;
-
-       case WDIOC_GETSTATUS:
-               ret = put_user(0, (int *)arg);
-               break;
-
-       case WDIOC_KEEPALIVE:
-               wdt_enable();
-               ret = 0;
-               break;
-
-       case WDIOC_SETTIMEOUT:
-               ret = get_user(timeout, (unsigned int *)arg);
-               if (ret)
-                       break;
-
-               wdt_setload(timeout);
-
-               wdt_enable();
-               /* Fall through */
-
-       case WDIOC_GETTIMEOUT:
-               ret = put_user(wdt->timeout, (unsigned int *)arg);
-               break;
-       case WDIOC_GETTIMELEFT:
-               ret = put_user(wdt_timeleft(), (unsigned int *)arg);
-               break;
-       }
-       return ret;
+       return wdt_config(wdd, false);
 }
 
-static int sp805_wdt_open(struct inode *inode, struct file *file)
+/* disables watchdog timers reset */
+static int wdt_disable(struct watchdog_device *wdd)
 {
-       int ret = 0;
-
-       if (test_and_set_bit(WDT_BUSY, &wdt->status))
-               return -EBUSY;
-
-       ret = clk_enable(wdt->clk);
-       if (ret) {
-               dev_err(&wdt->adev->dev, "clock enable fail");
-               goto err;
-       }
-
-       wdt_enable();
+       struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
 
-       /* can not be closed, once enabled */
-       clear_bit(WDT_CAN_BE_CLOSED, &wdt->status);
-       return nonseekable_open(inode, file);
+       spin_lock(&wdt->lock);
 
-err:
-       clear_bit(WDT_BUSY, &wdt->status);
-       return ret;
-}
+       writel_relaxed(UNLOCK, wdt->base + WDTLOCK);
+       writel_relaxed(0, wdt->base + WDTCONTROL);
+       writel_relaxed(LOCK, wdt->base + WDTLOCK);
 
-static int sp805_wdt_release(struct inode *inode, struct file *file)
-{
-       if (!test_bit(WDT_CAN_BE_CLOSED, &wdt->status)) {
-               clear_bit(WDT_BUSY, &wdt->status);
-               dev_warn(&wdt->adev->dev, "Device closed unexpectedly\n");
-               return 0;
-       }
+       /* Flush posted writes. */
+       readl_relaxed(wdt->base + WDTLOCK);
+       spin_unlock(&wdt->lock);
 
-       wdt_disable();
        clk_disable(wdt->clk);
-       clear_bit(WDT_BUSY, &wdt->status);
+       clk_unprepare(wdt->clk);
 
        return 0;
 }
 
-static const struct file_operations sp805_wdt_fops = {
-       .owner = THIS_MODULE,
-       .llseek = no_llseek,
-       .write = sp805_wdt_write,
-       .unlocked_ioctl = sp805_wdt_ioctl,
-       .open = sp805_wdt_open,
-       .release = sp805_wdt_release,
+static const struct watchdog_info wdt_info = {
+       .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+       .identity = MODULE_NAME,
 };
 
-static struct miscdevice sp805_wdt_miscdev = {
-       .minor = WATCHDOG_MINOR,
-       .name = "watchdog",
-       .fops = &sp805_wdt_fops,
+static const struct watchdog_ops wdt_ops = {
+       .owner          = THIS_MODULE,
+       .start          = wdt_enable,
+       .stop           = wdt_disable,
+       .ping           = wdt_ping,
+       .set_timeout    = wdt_setload,
+       .get_timeleft   = wdt_timeleft,
 };
 
 static int __devinit
 sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
 {
+       struct sp805_wdt *wdt;
        int ret = 0;
 
        if (!devm_request_mem_region(&adev->dev, adev->res.start,
@@ -315,19 +246,26 @@ sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
        }
 
        wdt->adev = adev;
+       wdt->wdd.info = &wdt_info;
+       wdt->wdd.ops = &wdt_ops;
+
        spin_lock_init(&wdt->lock);
-       wdt_setload(DEFAULT_TIMEOUT);
+       watchdog_set_nowayout(&wdt->wdd, nowayout);
+       watchdog_set_drvdata(&wdt->wdd, wdt);
+       wdt_setload(&wdt->wdd, DEFAULT_TIMEOUT);
 
-       ret = misc_register(&sp805_wdt_miscdev);
-       if (ret < 0) {
-               dev_warn(&adev->dev, "cannot register misc device\n");
-               goto err_misc_register;
+       ret = watchdog_register_device(&wdt->wdd);
+       if (ret) {
+               dev_err(&adev->dev, "watchdog_register_device() failed: %d\n",
+                               ret);
+               goto err_register;
        }
+       amba_set_drvdata(adev, wdt);
 
        dev_info(&adev->dev, "registration successful\n");
        return 0;
 
-err_misc_register:
+err_register:
        clk_put(wdt->clk);
 err:
        dev_err(&adev->dev, "Probe Failed!!!\n");
@@ -336,7 +274,11 @@ err:
 
 static int __devexit sp805_wdt_remove(struct amba_device *adev)
 {
-       misc_deregister(&sp805_wdt_miscdev);
+       struct sp805_wdt *wdt = amba_get_drvdata(adev);
+
+       watchdog_unregister_device(&wdt->wdd);
+       amba_set_drvdata(adev, NULL);
+       watchdog_set_drvdata(&wdt->wdd, NULL);
        clk_put(wdt->clk);
 
        return 0;
@@ -345,28 +287,22 @@ static int __devexit sp805_wdt_remove(struct amba_device *adev)
 #ifdef CONFIG_PM
 static int sp805_wdt_suspend(struct device *dev)
 {
-       if (test_bit(WDT_BUSY, &wdt->status)) {
-               wdt_disable();
-               clk_disable(wdt->clk);
-       }
+       struct sp805_wdt *wdt = dev_get_drvdata(dev);
+
+       if (watchdog_active(&wdt->wdd))
+               return wdt_disable(&wdt->wdd);
 
        return 0;
 }
 
 static int sp805_wdt_resume(struct device *dev)
 {
-       int ret = 0;
+       struct sp805_wdt *wdt = dev_get_drvdata(dev);
 
-       if (test_bit(WDT_BUSY, &wdt->status)) {
-               ret = clk_enable(wdt->clk);
-               if (ret) {
-                       dev_err(dev, "clock enable fail");
-                       return ret;
-               }
-               wdt_enable();
-       }
+       if (watchdog_active(&wdt->wdd))
+               return wdt_enable(&wdt->wdd);
 
-       return ret;
+       return 0;
 }
 #endif /* CONFIG_PM */
 
@@ -395,11 +331,6 @@ static struct amba_driver sp805_wdt_driver = {
 
 module_amba_driver(sp805_wdt_driver);
 
-module_param(nowayout, bool, 0);
-MODULE_PARM_DESC(nowayout,
-               "Set to 1 to keep watchdog running after device release");
-
 MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
 MODULE_DESCRIPTION("ARM SP805 Watchdog Driver");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
index 5603e31afdab03e8d67ad97151f772b879dd015e..aa50da3ccfe3678f8a740eab41abcb61e7beefef 100644 (file)
@@ -91,7 +91,7 @@ static inline void wdt_reset(void)
 static void wdt_timer_tick(unsigned long data)
 {
        if (time_before(jiffies, next_heartbeat) ||
-          (!test_bit(WDOG_ACTIVE, &wdt_dev.status))) {
+          (!watchdog_active(&wdt_dev))) {
                wdt_reset();
                mod_timer(&timer, jiffies + WDT_HEARTBEAT);
        } else
index 14d768bfa267d78923a467484ed1f66275471bb2..6aa46a90ff028691f97627765f593d864bc885a1 100644 (file)
 #include <linux/kernel.h>      /* For printk/panic/... */
 #include <linux/watchdog.h>    /* For watchdog specific items */
 #include <linux/init.h>                /* For __init/__exit/... */
+#include <linux/idr.h>         /* For ida_* macros */
+#include <linux/err.h>         /* For IS_ERR macros */
 
-#include "watchdog_dev.h"      /* For watchdog_dev_register/... */
+#include "watchdog_core.h"     /* For watchdog_dev_register/... */
+
+static DEFINE_IDA(watchdog_ida);
+static struct class *watchdog_class;
 
 /**
  * watchdog_register_device() - register a watchdog device
@@ -49,7 +54,7 @@
  */
 int watchdog_register_device(struct watchdog_device *wdd)
 {
-       int ret;
+       int ret, id, devno;
 
        if (wdd == NULL || wdd->info == NULL || wdd->ops == NULL)
                return -EINVAL;
@@ -74,10 +79,38 @@ int watchdog_register_device(struct watchdog_device *wdd)
         * corrupted in a later stage then we expect a kernel panic!
         */
 
-       /* We only support 1 watchdog device via the /dev/watchdog interface */
+       mutex_init(&wdd->lock);
+       id = ida_simple_get(&watchdog_ida, 0, MAX_DOGS, GFP_KERNEL);
+       if (id < 0)
+               return id;
+       wdd->id = id;
+
        ret = watchdog_dev_register(wdd);
        if (ret) {
-               pr_err("error registering /dev/watchdog (err=%d)\n", ret);
+               ida_simple_remove(&watchdog_ida, id);
+               if (!(id == 0 && ret == -EBUSY))
+                       return ret;
+
+               /* Retry in case a legacy watchdog module exists */
+               id = ida_simple_get(&watchdog_ida, 1, MAX_DOGS, GFP_KERNEL);
+               if (id < 0)
+                       return id;
+               wdd->id = id;
+
+               ret = watchdog_dev_register(wdd);
+               if (ret) {
+                       ida_simple_remove(&watchdog_ida, id);
+                       return ret;
+               }
+       }
+
+       devno = wdd->cdev.dev;
+       wdd->dev = device_create(watchdog_class, wdd->parent, devno,
+                                       NULL, "watchdog%d", wdd->id);
+       if (IS_ERR(wdd->dev)) {
+               watchdog_dev_unregister(wdd);
+               ida_simple_remove(&watchdog_ida, id);
+               ret = PTR_ERR(wdd->dev);
                return ret;
        }
 
@@ -95,6 +128,7 @@ EXPORT_SYMBOL_GPL(watchdog_register_device);
 void watchdog_unregister_device(struct watchdog_device *wdd)
 {
        int ret;
+       int devno = wdd->cdev.dev;
 
        if (wdd == NULL)
                return;
@@ -102,9 +136,41 @@ void watchdog_unregister_device(struct watchdog_device *wdd)
        ret = watchdog_dev_unregister(wdd);
        if (ret)
                pr_err("error unregistering /dev/watchdog (err=%d)\n", ret);
+       device_destroy(watchdog_class, devno);
+       ida_simple_remove(&watchdog_ida, wdd->id);
+       wdd->dev = NULL;
 }
 EXPORT_SYMBOL_GPL(watchdog_unregister_device);
 
+static int __init watchdog_init(void)
+{
+       int err;
+
+       watchdog_class = class_create(THIS_MODULE, "watchdog");
+       if (IS_ERR(watchdog_class)) {
+               pr_err("couldn't create class\n");
+               return PTR_ERR(watchdog_class);
+       }
+
+       err = watchdog_dev_init();
+       if (err < 0) {
+               class_destroy(watchdog_class);
+               return err;
+       }
+
+       return 0;
+}
+
+static void __exit watchdog_exit(void)
+{
+       watchdog_dev_exit();
+       class_destroy(watchdog_class);
+       ida_destroy(&watchdog_ida);
+}
+
+subsys_initcall(watchdog_init);
+module_exit(watchdog_exit);
+
 MODULE_AUTHOR("Alan Cox <alan@lxorguk.ukuu.org.uk>");
 MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>");
 MODULE_DESCRIPTION("WatchDog Timer Driver Core");
diff --git a/drivers/watchdog/watchdog_core.h b/drivers/watchdog/watchdog_core.h
new file mode 100644 (file)
index 0000000..6c95141
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ *     watchdog_core.h
+ *
+ *     (c) Copyright 2008-2011 Alan Cox <alan@lxorguk.ukuu.org.uk>,
+ *                                             All Rights Reserved.
+ *
+ *     (c) Copyright 2008-2011 Wim Van Sebroeck <wim@iguana.be>.
+ *
+ *     This source code is part of the generic code that can be used
+ *     by all the watchdog timer drivers.
+ *
+ *     Based on source code of the following authors:
+ *       Matt Domsch <Matt_Domsch@dell.com>,
+ *       Rob Radez <rob@osinvestor.com>,
+ *       Rusty Lynch <rusty@linux.co.intel.com>
+ *       Satyam Sharma <satyam@infradead.org>
+ *       Randy Dunlap <randy.dunlap@oracle.com>
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ *
+ *     Neither Alan Cox, CymruNet Ltd., Wim Van Sebroeck nor Iguana vzw.
+ *     admit liability nor provide warranty for any of this software.
+ *     This material is provided "AS-IS" and at no charge.
+ */
+
+#define MAX_DOGS       32      /* Maximum number of watchdog devices */
+
+/*
+ *     Functions/procedures to be called by the core
+ */
+extern int watchdog_dev_register(struct watchdog_device *);
+extern int watchdog_dev_unregister(struct watchdog_device *);
+extern int __init watchdog_dev_init(void);
+extern void __exit watchdog_dev_exit(void);
index 8558da912c42fd76c5bde4ddd55528ed0be48615..672d169bf1dacfa4ae0150da6109caeb266d0590 100644 (file)
 #include <linux/init.h>                /* For __init/__exit/... */
 #include <linux/uaccess.h>     /* For copy_to_user/put_user/... */
 
-/* make sure we only register one /dev/watchdog device */
-static unsigned long watchdog_dev_busy;
+#include "watchdog_core.h"
+
+/* the dev_t structure to store the dynamically allocated watchdog devices */
+static dev_t watchdog_devt;
 /* the watchdog device behind /dev/watchdog */
-static struct watchdog_device *wdd;
+static struct watchdog_device *old_wdd;
 
 /*
  *     watchdog_ping: ping the watchdog.
@@ -59,13 +61,26 @@ static struct watchdog_device *wdd;
 
 static int watchdog_ping(struct watchdog_device *wddev)
 {
-       if (test_bit(WDOG_ACTIVE, &wddev->status)) {
-               if (wddev->ops->ping)
-                       return wddev->ops->ping(wddev);  /* ping the watchdog */
-               else
-                       return wddev->ops->start(wddev); /* restart watchdog */
+       int err = 0;
+
+       mutex_lock(&wddev->lock);
+
+       if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+               err = -ENODEV;
+               goto out_ping;
        }
-       return 0;
+
+       if (!watchdog_active(wddev))
+               goto out_ping;
+
+       if (wddev->ops->ping)
+               err = wddev->ops->ping(wddev);  /* ping the watchdog */
+       else
+               err = wddev->ops->start(wddev); /* restart watchdog */
+
+out_ping:
+       mutex_unlock(&wddev->lock);
+       return err;
 }
 
 /*
@@ -79,16 +94,25 @@ static int watchdog_ping(struct watchdog_device *wddev)
 
 static int watchdog_start(struct watchdog_device *wddev)
 {
-       int err;
+       int err = 0;
 
-       if (!test_bit(WDOG_ACTIVE, &wddev->status)) {
-               err = wddev->ops->start(wddev);
-               if (err < 0)
-                       return err;
+       mutex_lock(&wddev->lock);
 
-               set_bit(WDOG_ACTIVE, &wddev->status);
+       if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+               err = -ENODEV;
+               goto out_start;
        }
-       return 0;
+
+       if (watchdog_active(wddev))
+               goto out_start;
+
+       err = wddev->ops->start(wddev);
+       if (err == 0)
+               set_bit(WDOG_ACTIVE, &wddev->status);
+
+out_start:
+       mutex_unlock(&wddev->lock);
+       return err;
 }
 
 /*
@@ -103,22 +127,155 @@ static int watchdog_start(struct watchdog_device *wddev)
 
 static int watchdog_stop(struct watchdog_device *wddev)
 {
-       int err = -EBUSY;
+       int err = 0;
 
-       if (test_bit(WDOG_NO_WAY_OUT, &wddev->status)) {
-               pr_info("%s: nowayout prevents watchdog to be stopped!\n",
-                                                       wddev->info->identity);
-               return err;
+       mutex_lock(&wddev->lock);
+
+       if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+               err = -ENODEV;
+               goto out_stop;
        }
 
-       if (test_bit(WDOG_ACTIVE, &wddev->status)) {
-               err = wddev->ops->stop(wddev);
-               if (err < 0)
-                       return err;
+       if (!watchdog_active(wddev))
+               goto out_stop;
 
+       if (test_bit(WDOG_NO_WAY_OUT, &wddev->status)) {
+               dev_info(wddev->dev, "nowayout prevents watchdog being stopped!\n");
+               err = -EBUSY;
+               goto out_stop;
+       }
+
+       err = wddev->ops->stop(wddev);
+       if (err == 0)
                clear_bit(WDOG_ACTIVE, &wddev->status);
+
+out_stop:
+       mutex_unlock(&wddev->lock);
+       return err;
+}
+
+/*
+ *     watchdog_get_status: wrapper to get the watchdog status
+ *     @wddev: the watchdog device to get the status from
+ *     @status: the status of the watchdog device
+ *
+ *     Get the watchdog's status flags.
+ */
+
+static int watchdog_get_status(struct watchdog_device *wddev,
+                                                       unsigned int *status)
+{
+       int err = 0;
+
+       *status = 0;
+       if (!wddev->ops->status)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&wddev->lock);
+
+       if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+               err = -ENODEV;
+               goto out_status;
        }
-       return 0;
+
+       *status = wddev->ops->status(wddev);
+
+out_status:
+       mutex_unlock(&wddev->lock);
+       return err;
+}
+
+/*
+ *     watchdog_set_timeout: set the watchdog timer timeout
+ *     @wddev: the watchdog device to set the timeout for
+ *     @timeout: timeout to set in seconds
+ */
+
+static int watchdog_set_timeout(struct watchdog_device *wddev,
+                                                       unsigned int timeout)
+{
+       int err;
+
+       if ((wddev->ops->set_timeout == NULL) ||
+           !(wddev->info->options & WDIOF_SETTIMEOUT))
+               return -EOPNOTSUPP;
+
+       if ((wddev->max_timeout != 0) &&
+           (timeout < wddev->min_timeout || timeout > wddev->max_timeout))
+               return -EINVAL;
+
+       mutex_lock(&wddev->lock);
+
+       if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+               err = -ENODEV;
+               goto out_timeout;
+       }
+
+       err = wddev->ops->set_timeout(wddev, timeout);
+
+out_timeout:
+       mutex_unlock(&wddev->lock);
+       return err;
+}
+
+/*
+ *     watchdog_get_timeleft: wrapper to get the time left before a reboot
+ *     @wddev: the watchdog device to get the remaining time from
+ *     @timeleft: the time that's left
+ *
+ *     Get the time before a watchdog will reboot (if not pinged).
+ */
+
+static int watchdog_get_timeleft(struct watchdog_device *wddev,
+                                                       unsigned int *timeleft)
+{
+       int err = 0;
+
+       *timeleft = 0;
+       if (!wddev->ops->get_timeleft)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&wddev->lock);
+
+       if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+               err = -ENODEV;
+               goto out_timeleft;
+       }
+
+       *timeleft = wddev->ops->get_timeleft(wddev);
+
+out_timeleft:
+       mutex_unlock(&wddev->lock);
+       return err;
+}
+
+/*
+ *     watchdog_ioctl_op: call the watchdog drivers ioctl op if defined
+ *     @wddev: the watchdog device to do the ioctl on
+ *     @cmd: watchdog command
+ *     @arg: argument pointer
+ */
+
+static int watchdog_ioctl_op(struct watchdog_device *wddev, unsigned int cmd,
+                                                       unsigned long arg)
+{
+       int err;
+
+       if (!wddev->ops->ioctl)
+               return -ENOIOCTLCMD;
+
+       mutex_lock(&wddev->lock);
+
+       if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+               err = -ENODEV;
+               goto out_ioctl;
+       }
+
+       err = wddev->ops->ioctl(wddev, cmd, arg);
+
+out_ioctl:
+       mutex_unlock(&wddev->lock);
+       return err;
 }
 
 /*
@@ -136,6 +293,7 @@ static int watchdog_stop(struct watchdog_device *wddev)
 static ssize_t watchdog_write(struct file *file, const char __user *data,
                                                size_t len, loff_t *ppos)
 {
+       struct watchdog_device *wdd = file->private_data;
        size_t i;
        char c;
 
@@ -175,23 +333,24 @@ static ssize_t watchdog_write(struct file *file, const char __user *data,
 static long watchdog_ioctl(struct file *file, unsigned int cmd,
                                                        unsigned long arg)
 {
+       struct watchdog_device *wdd = file->private_data;
        void __user *argp = (void __user *)arg;
        int __user *p = argp;
        unsigned int val;
        int err;
 
-       if (wdd->ops->ioctl) {
-               err = wdd->ops->ioctl(wdd, cmd, arg);
-               if (err != -ENOIOCTLCMD)
-                       return err;
-       }
+       err = watchdog_ioctl_op(wdd, cmd, arg);
+       if (err != -ENOIOCTLCMD)
+               return err;
 
        switch (cmd) {
        case WDIOC_GETSUPPORT:
                return copy_to_user(argp, wdd->info,
                        sizeof(struct watchdog_info)) ? -EFAULT : 0;
        case WDIOC_GETSTATUS:
-               val = wdd->ops->status ? wdd->ops->status(wdd) : 0;
+               err = watchdog_get_status(wdd, &val);
+               if (err)
+                       return err;
                return put_user(val, p);
        case WDIOC_GETBOOTSTATUS:
                return put_user(wdd->bootstatus, p);
@@ -215,15 +374,9 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
                watchdog_ping(wdd);
                return 0;
        case WDIOC_SETTIMEOUT:
-               if ((wdd->ops->set_timeout == NULL) ||
-                   !(wdd->info->options & WDIOF_SETTIMEOUT))
-                       return -EOPNOTSUPP;
                if (get_user(val, p))
                        return -EFAULT;
-               if ((wdd->max_timeout != 0) &&
-                   (val < wdd->min_timeout || val > wdd->max_timeout))
-                               return -EINVAL;
-               err = wdd->ops->set_timeout(wdd, val);
+               err = watchdog_set_timeout(wdd, val);
                if (err < 0)
                        return err;
                /* If the watchdog is active then we send a keepalive ping
@@ -237,21 +390,21 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
                        return -EOPNOTSUPP;
                return put_user(wdd->timeout, p);
        case WDIOC_GETTIMELEFT:
-               if (!wdd->ops->get_timeleft)
-                       return -EOPNOTSUPP;
-
-               return put_user(wdd->ops->get_timeleft(wdd), p);
+               err = watchdog_get_timeleft(wdd, &val);
+               if (err)
+                       return err;
+               return put_user(val, p);
        default:
                return -ENOTTY;
        }
 }
 
 /*
- *     watchdog_open: open the /dev/watchdog device.
+ *     watchdog_open: open the /dev/watchdog* devices.
  *     @inode: inode of device
  *     @file: file handle to device
  *
- *     When the /dev/watchdog device gets opened, we start the watchdog.
+ *     When the /dev/watchdog* device gets opened, we start the watchdog.
  *     Watch out: the /dev/watchdog device is single open, so we make sure
  *     it can only be opened once.
  */
@@ -259,6 +412,13 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
 static int watchdog_open(struct inode *inode, struct file *file)
 {
        int err = -EBUSY;
+       struct watchdog_device *wdd;
+
+       /* Get the corresponding watchdog device */
+       if (imajor(inode) == MISC_MAJOR)
+               wdd = old_wdd;
+       else
+               wdd = container_of(inode->i_cdev, struct watchdog_device, cdev);
 
        /* the watchdog is single open! */
        if (test_and_set_bit(WDOG_DEV_OPEN, &wdd->status))
@@ -275,6 +435,11 @@ static int watchdog_open(struct inode *inode, struct file *file)
        if (err < 0)
                goto out_mod;
 
+       file->private_data = wdd;
+
+       if (wdd->ops->ref)
+               wdd->ops->ref(wdd);
+
        /* dev/watchdog is a virtual (and thus non-seekable) filesystem */
        return nonseekable_open(inode, file);
 
@@ -286,9 +451,9 @@ out:
 }
 
 /*
- *      watchdog_release: release the /dev/watchdog device.
- *      @inode: inode of device
- *      @file: file handle to device
+ *     watchdog_release: release the watchdog device.
+ *     @inode: inode of device
+ *     @file: file handle to device
  *
  *     This is the code for when /dev/watchdog gets closed. We will only
  *     stop the watchdog when we have received the magic char (and nowayout
@@ -297,6 +462,7 @@ out:
 
 static int watchdog_release(struct inode *inode, struct file *file)
 {
+       struct watchdog_device *wdd = file->private_data;
        int err = -EBUSY;
 
        /*
@@ -310,7 +476,10 @@ static int watchdog_release(struct inode *inode, struct file *file)
 
        /* If the watchdog was not stopped, send a keepalive ping */
        if (err < 0) {
-               pr_crit("%s: watchdog did not stop!\n", wdd->info->identity);
+               mutex_lock(&wdd->lock);
+               if (!test_bit(WDOG_UNREGISTERED, &wdd->status))
+                       dev_crit(wdd->dev, "watchdog did not stop!\n");
+               mutex_unlock(&wdd->lock);
                watchdog_ping(wdd);
        }
 
@@ -320,6 +489,10 @@ static int watchdog_release(struct inode *inode, struct file *file)
        /* make sure that /dev/watchdog can be re-opened */
        clear_bit(WDOG_DEV_OPEN, &wdd->status);
 
+       /* Note wdd may be gone after this, do not use after this! */
+       if (wdd->ops->unref)
+               wdd->ops->unref(wdd);
+
        return 0;
 }
 
@@ -338,62 +511,92 @@ static struct miscdevice watchdog_miscdev = {
 };
 
 /*
- *     watchdog_dev_register:
+ *     watchdog_dev_register: register a watchdog device
  *     @watchdog: watchdog device
  *
- *     Register a watchdog device as /dev/watchdog. /dev/watchdog
- *     is actually a miscdevice and thus we set it up like that.
+ *     Register a watchdog device including handling the legacy
+ *     /dev/watchdog node. /dev/watchdog is actually a miscdevice and
+ *     thus we set it up like that.
  */
 
 int watchdog_dev_register(struct watchdog_device *watchdog)
 {
-       int err;
-
-       /* Only one device can register for /dev/watchdog */
-       if (test_and_set_bit(0, &watchdog_dev_busy)) {
-               pr_err("only one watchdog can use /dev/watchdog\n");
-               return -EBUSY;
+       int err, devno;
+
+       if (watchdog->id == 0) {
+               watchdog_miscdev.parent = watchdog->parent;
+               err = misc_register(&watchdog_miscdev);
+               if (err != 0) {
+                       pr_err("%s: cannot register miscdev on minor=%d (err=%d).\n",
+                               watchdog->info->identity, WATCHDOG_MINOR, err);
+                       if (err == -EBUSY)
+                               pr_err("%s: a legacy watchdog module is probably present.\n",
+                                       watchdog->info->identity);
+                       return err;
+               }
+               old_wdd = watchdog;
        }
 
-       wdd = watchdog;
-
-       err = misc_register(&watchdog_miscdev);
-       if (err != 0) {
-               pr_err("%s: cannot register miscdev on minor=%d (err=%d)\n",
-                      watchdog->info->identity, WATCHDOG_MINOR, err);
-               goto out;
+       /* Fill in the data structures */
+       devno = MKDEV(MAJOR(watchdog_devt), watchdog->id);
+       cdev_init(&watchdog->cdev, &watchdog_fops);
+       watchdog->cdev.owner = watchdog->ops->owner;
+
+       /* Add the device */
+       err  = cdev_add(&watchdog->cdev, devno, 1);
+       if (err) {
+               pr_err("watchdog%d unable to add device %d:%d\n",
+                       watchdog->id,  MAJOR(watchdog_devt), watchdog->id);
+               if (watchdog->id == 0) {
+                       misc_deregister(&watchdog_miscdev);
+                       old_wdd = NULL;
+               }
        }
-
-       return 0;
-
-out:
-       wdd = NULL;
-       clear_bit(0, &watchdog_dev_busy);
        return err;
 }
 
 /*
- *     watchdog_dev_unregister:
+ *     watchdog_dev_unregister: unregister a watchdog device
  *     @watchdog: watchdog device
  *
- *     Deregister the /dev/watchdog device.
+ *     Unregister the watchdog and if needed the legacy /dev/watchdog device.
  */
 
 int watchdog_dev_unregister(struct watchdog_device *watchdog)
 {
-       /* Check that a watchdog device was registered in the past */
-       if (!test_bit(0, &watchdog_dev_busy) || !wdd)
-               return -ENODEV;
-
-       /* We can only unregister the watchdog device that was registered */
-       if (watchdog != wdd) {
-               pr_err("%s: watchdog was not registered as /dev/watchdog\n",
-                      watchdog->info->identity);
-               return -ENODEV;
+       mutex_lock(&watchdog->lock);
+       set_bit(WDOG_UNREGISTERED, &watchdog->status);
+       mutex_unlock(&watchdog->lock);
+
+       cdev_del(&watchdog->cdev);
+       if (watchdog->id == 0) {
+               misc_deregister(&watchdog_miscdev);
+               old_wdd = NULL;
        }
-
-       misc_deregister(&watchdog_miscdev);
-       wdd = NULL;
-       clear_bit(0, &watchdog_dev_busy);
        return 0;
 }
+
+/*
+ *     watchdog_dev_init: init dev part of watchdog core
+ *
+ *     Allocate a range of chardev nodes to use for watchdog devices
+ */
+
+int __init watchdog_dev_init(void)
+{
+       int err = alloc_chrdev_region(&watchdog_devt, 0, MAX_DOGS, "watchdog");
+       if (err < 0)
+               pr_err("watchdog: unable to allocate char dev region\n");
+       return err;
+}
+
+/*
+ *     watchdog_dev_exit: exit dev part of watchdog core
+ *
+ *     Release the range of chardev nodes used for watchdog devices
+ */
+
+void __exit watchdog_dev_exit(void)
+{
+       unregister_chrdev_region(watchdog_devt, MAX_DOGS);
+}
diff --git a/drivers/watchdog/watchdog_dev.h b/drivers/watchdog/watchdog_dev.h
deleted file mode 100644 (file)
index bc7612b..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- *     watchdog_core.h
- *
- *     (c) Copyright 2008-2011 Alan Cox <alan@lxorguk.ukuu.org.uk>,
- *                                             All Rights Reserved.
- *
- *     (c) Copyright 2008-2011 Wim Van Sebroeck <wim@iguana.be>.
- *
- *     This source code is part of the generic code that can be used
- *     by all the watchdog timer drivers.
- *
- *     Based on source code of the following authors:
- *       Matt Domsch <Matt_Domsch@dell.com>,
- *       Rob Radez <rob@osinvestor.com>,
- *       Rusty Lynch <rusty@linux.co.intel.com>
- *       Satyam Sharma <satyam@infradead.org>
- *       Randy Dunlap <randy.dunlap@oracle.com>
- *
- *     This program is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License
- *     as published by the Free Software Foundation; either version
- *     2 of the License, or (at your option) any later version.
- *
- *     Neither Alan Cox, CymruNet Ltd., Wim Van Sebroeck nor Iguana vzw.
- *     admit liability nor provide warranty for any of this software.
- *     This material is provided "AS-IS" and at no charge.
- */
-
-/*
- *     Functions/procedures to be called by the core
- */
-int watchdog_dev_register(struct watchdog_device *);
-int watchdog_dev_unregister(struct watchdog_device *);
index 9adc5be57b13259b794f2b8b9904c9f1a14efb7e..fc3488631136c11bff8d7e0100c01137ae31d9ef 100644 (file)
@@ -17,7 +17,7 @@ obj-$(CONFIG_XEN_SYS_HYPERVISOR)      += sys-hypervisor.o
 obj-$(CONFIG_XEN_PVHVM)                        += platform-pci.o
 obj-$(CONFIG_XEN_TMEM)                 += tmem.o
 obj-$(CONFIG_SWIOTLB_XEN)              += swiotlb-xen.o
-obj-$(CONFIG_XEN_DOM0)                 += pci.o
+obj-$(CONFIG_XEN_DOM0)                 += pci.o acpi.o
 obj-$(CONFIG_XEN_PCIDEV_BACKEND)       += xen-pciback/
 obj-$(CONFIG_XEN_PRIVCMD)              += xen-privcmd.o
 obj-$(CONFIG_XEN_ACPI_PROCESSOR)       += xen-acpi-processor.o
diff --git a/drivers/xen/acpi.c b/drivers/xen/acpi.c
new file mode 100644 (file)
index 0000000..119d42a
--- /dev/null
@@ -0,0 +1,62 @@
+/******************************************************************************
+ * acpi.c
+ * acpi file for domain 0 kernel
+ *
+ * Copyright (c) 2011 Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+ * Copyright (c) 2011 Yu Ke ke.yu@intel.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <xen/acpi.h>
+#include <xen/interface/platform.h>
+#include <asm/xen/hypercall.h>
+#include <asm/xen/hypervisor.h>
+
+int xen_acpi_notify_hypervisor_state(u8 sleep_state,
+                                    u32 pm1a_cnt, u32 pm1b_cnt)
+{
+       struct xen_platform_op op = {
+               .cmd = XENPF_enter_acpi_sleep,
+               .interface_version = XENPF_INTERFACE_VERSION,
+               .u = {
+                       .enter_acpi_sleep = {
+                               .pm1a_cnt_val = (u16)pm1a_cnt,
+                               .pm1b_cnt_val = (u16)pm1b_cnt,
+                               .sleep_state = sleep_state,
+                       },
+               },
+       };
+
+       if ((pm1a_cnt & 0xffff0000) || (pm1b_cnt & 0xffff0000)) {
+               WARN(1, "Using more than 16bits of PM1A/B 0x%x/0x%x!"
+                    "Email xen-devel@lists.xensource.com  Thank you.\n", \
+                    pm1a_cnt, pm1b_cnt);
+               return -1;
+       }
+
+       HYPERVISOR_dom0_op(&op);
+       return 1;
+}
index 0a8a17cd80bea172d767f02609b0164b45fe1717..6908e4ce2a0d69aa67ca0e11251d834770bbd68c 100644 (file)
@@ -611,7 +611,7 @@ static void disable_pirq(struct irq_data *data)
        disable_dynirq(data);
 }
 
-static int find_irq_by_gsi(unsigned gsi)
+int xen_irq_from_gsi(unsigned gsi)
 {
        struct irq_info *info;
 
@@ -625,6 +625,7 @@ static int find_irq_by_gsi(unsigned gsi)
 
        return -1;
 }
+EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
 
 /*
  * Do not make any assumptions regarding the relationship between the
@@ -644,7 +645,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
 
        mutex_lock(&irq_mapping_update_lock);
 
-       irq = find_irq_by_gsi(gsi);
+       irq = xen_irq_from_gsi(gsi);
        if (irq != -1) {
                printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
                       irq, gsi);
index f100ce20b16b428880863ab768bf952ed84c43a9..0bfc1ef11259eccaa937eb8d0913e4e14e04832e 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/vmalloc.h>
 #include <linux/uaccess.h>
 #include <linux/io.h>
+#include <linux/hardirq.h>
 
 #include <xen/xen.h>
 #include <xen/interface/xen.h>
@@ -426,10 +427,8 @@ static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
        nflags = *pflags;
        do {
                flags = nflags;
-               if (flags & (GTF_reading|GTF_writing)) {
-                       printk(KERN_ALERT "WARNING: g.e. still in use!\n");
+               if (flags & (GTF_reading|GTF_writing))
                        return 0;
-               }
        } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
 
        return 1;
@@ -458,12 +457,103 @@ static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
        return 1;
 }
 
-int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
+static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
 {
        return gnttab_interface->end_foreign_access_ref(ref, readonly);
 }
+
+int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
+{
+       if (_gnttab_end_foreign_access_ref(ref, readonly))
+               return 1;
+       pr_warn("WARNING: g.e. %#x still in use!\n", ref);
+       return 0;
+}
 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
 
+struct deferred_entry {
+       struct list_head list;
+       grant_ref_t ref;
+       bool ro;
+       uint16_t warn_delay;
+       struct page *page;
+};
+static LIST_HEAD(deferred_list);
+static void gnttab_handle_deferred(unsigned long);
+static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred, 0, 0);
+
+static void gnttab_handle_deferred(unsigned long unused)
+{
+       unsigned int nr = 10;
+       struct deferred_entry *first = NULL;
+       unsigned long flags;
+
+       spin_lock_irqsave(&gnttab_list_lock, flags);
+       while (nr--) {
+               struct deferred_entry *entry
+                       = list_first_entry(&deferred_list,
+                                          struct deferred_entry, list);
+
+               if (entry == first)
+                       break;
+               list_del(&entry->list);
+               spin_unlock_irqrestore(&gnttab_list_lock, flags);
+               if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
+                       put_free_entry(entry->ref);
+                       if (entry->page) {
+                               pr_debug("freeing g.e. %#x (pfn %#lx)\n",
+                                        entry->ref, page_to_pfn(entry->page));
+                               __free_page(entry->page);
+                       } else
+                               pr_info("freeing g.e. %#x\n", entry->ref);
+                       kfree(entry);
+                       entry = NULL;
+               } else {
+                       if (!--entry->warn_delay)
+                               pr_info("g.e. %#x still pending\n",
+                                       entry->ref);
+                       if (!first)
+                               first = entry;
+               }
+               spin_lock_irqsave(&gnttab_list_lock, flags);
+               if (entry)
+                       list_add_tail(&entry->list, &deferred_list);
+               else if (list_empty(&deferred_list))
+                       break;
+       }
+       if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
+               deferred_timer.expires = jiffies + HZ;
+               add_timer(&deferred_timer);
+       }
+       spin_unlock_irqrestore(&gnttab_list_lock, flags);
+}
+
+static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
+                               struct page *page)
+{
+       struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+       const char *what = KERN_WARNING "leaking";
+
+       if (entry) {
+               unsigned long flags;
+
+               entry->ref = ref;
+               entry->ro = readonly;
+               entry->page = page;
+               entry->warn_delay = 60;
+               spin_lock_irqsave(&gnttab_list_lock, flags);
+               list_add_tail(&entry->list, &deferred_list);
+               if (!timer_pending(&deferred_timer)) {
+                       deferred_timer.expires = jiffies + HZ;
+                       add_timer(&deferred_timer);
+               }
+               spin_unlock_irqrestore(&gnttab_list_lock, flags);
+               what = KERN_DEBUG "deferring";
+       }
+       printk("%s g.e. %#x (pfn %#lx)\n",
+              what, ref, page ? page_to_pfn(page) : -1);
+}
+
 void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
                               unsigned long page)
 {
@@ -471,12 +561,9 @@ void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
                put_free_entry(ref);
                if (page != 0)
                        free_page(page);
-       } else {
-               /* XXX This needs to be fixed so that the ref and page are
-                  placed on a list to be freed up later. */
-               printk(KERN_WARNING
-                      "WARNING: leaking g.e. and page still in use!\n");
-       }
+       } else
+               gnttab_add_deferred(ref, readonly,
+                                   page ? virt_to_page(page) : NULL);
 }
 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
 
@@ -741,6 +828,7 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
                    struct page **pages, unsigned int count)
 {
        int i, ret;
+       bool lazy = false;
        pte_t *pte;
        unsigned long mfn;
 
@@ -751,6 +839,11 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return ret;
 
+       if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
+               arch_enter_lazy_mmu_mode();
+               lazy = true;
+       }
+
        for (i = 0; i < count; i++) {
                /* Do not add to override if the map failed. */
                if (map_ops[i].status)
@@ -769,6 +862,9 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
                        return ret;
        }
 
+       if (lazy)
+               arch_leave_lazy_mmu_mode();
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(gnttab_map_refs);
@@ -777,6 +873,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
                      struct page **pages, unsigned int count, bool clear_pte)
 {
        int i, ret;
+       bool lazy = false;
 
        ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
        if (ret)
@@ -785,12 +882,20 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return ret;
 
+       if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
+               arch_enter_lazy_mmu_mode();
+               lazy = true;
+       }
+
        for (i = 0; i < count; i++) {
                ret = m2p_remove_override(pages[i], clear_pte);
                if (ret)
                        return ret;
        }
 
+       if (lazy)
+               arch_leave_lazy_mmu_mode();
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
index 0b48579a9cd6066c170741913875ebbe716dce0c..7ff2569e17aeae423039f6c44e23b246c549e0b3 100644 (file)
@@ -29,6 +29,7 @@
 #include <acpi/acpi_drivers.h>
 #include <acpi/processor.h>
 
+#include <xen/xen.h>
 #include <xen/interface/platform.h>
 #include <asm/xen/hypercall.h>
 
index 146c948970160147142e22b511439346f6602cf1..7d041cb6da2662fada88101835b94d1aed63b9bc 100644 (file)
@@ -105,6 +105,12 @@ static unsigned int selfballoon_interval __read_mostly = 5;
  */
 static unsigned int selfballoon_min_usable_mb;
 
+/*
+ * Amount of RAM in MB to add to the target number of pages.
+ * Can be used to reserve some more room for caches and the like.
+ */
+static unsigned int selfballoon_reserved_mb;
+
 static void selfballoon_process(struct work_struct *work);
 static DECLARE_DELAYED_WORK(selfballoon_worker, selfballoon_process);
 
@@ -217,7 +223,8 @@ static void selfballoon_process(struct work_struct *work)
                cur_pages = totalram_pages;
                tgt_pages = cur_pages; /* default is no change */
                goal_pages = percpu_counter_read_positive(&vm_committed_as) +
-                               totalreserve_pages;
+                               totalreserve_pages +
+                               MB2PAGES(selfballoon_reserved_mb);
 #ifdef CONFIG_FRONTSWAP
                /* allow space for frontswap pages to be repatriated */
                if (frontswap_selfshrinking && frontswap_enabled)
@@ -397,6 +404,30 @@ static DEVICE_ATTR(selfballoon_min_usable_mb, S_IRUGO | S_IWUSR,
                   show_selfballoon_min_usable_mb,
                   store_selfballoon_min_usable_mb);
 
+SELFBALLOON_SHOW(selfballoon_reserved_mb, "%d\n",
+                               selfballoon_reserved_mb);
+
+static ssize_t store_selfballoon_reserved_mb(struct device *dev,
+                                            struct device_attribute *attr,
+                                            const char *buf,
+                                            size_t count)
+{
+       unsigned long val;
+       int err;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+       err = strict_strtoul(buf, 10, &val);
+       if (err || val == 0)
+               return -EINVAL;
+       selfballoon_reserved_mb = val;
+       return count;
+}
+
+static DEVICE_ATTR(selfballoon_reserved_mb, S_IRUGO | S_IWUSR,
+                  show_selfballoon_reserved_mb,
+                  store_selfballoon_reserved_mb);
+
 
 #ifdef CONFIG_FRONTSWAP
 SELFBALLOON_SHOW(frontswap_selfshrinking, "%d\n", frontswap_selfshrinking);
@@ -480,6 +511,7 @@ static struct attribute *selfballoon_attrs[] = {
        &dev_attr_selfballoon_downhysteresis.attr,
        &dev_attr_selfballoon_uphysteresis.attr,
        &dev_attr_selfballoon_min_usable_mb.attr,
+       &dev_attr_selfballoon_reserved_mb.attr,
 #ifdef CONFIG_FRONTSWAP
        &dev_attr_frontswap_selfshrinking.attr,
        &dev_attr_frontswap_hysteresis.attr,
index 2eff7a6aaa20a8e920ed4f0e7b024a941e791580..52fe7ad076669ce40992d0d423c26c37f1568819 100644 (file)
@@ -234,3 +234,9 @@ int xb_init_comms(void)
 
        return 0;
 }
+
+void xb_deinit_comms(void)
+{
+       unbind_from_irqhandler(xenbus_irq, &xb_waitq);
+       xenbus_irq = 0;
+}
index 6e42800fa499bc6a303efabd300e763129d9888e..c8abd3b8a6c48087967ddcfb2ac012bf642a0e10 100644 (file)
@@ -35,6 +35,7 @@
 
 int xs_init(void);
 int xb_init_comms(void);
+void xb_deinit_comms(void);
 
 /* Low level routines. */
 int xb_write(const void *data, unsigned len);
index 3d3be78c1093788f1591426157fdd4db9f3d5459..be738c43104bea15dfca7422055d2f0cd79ecf9f 100644 (file)
@@ -8,7 +8,11 @@
 
 #include <xen/xen.h>
 #include <xen/page.h>
+#include <xen/xenbus.h>
 #include <xen/xenbus_dev.h>
+#include <xen/grant_table.h>
+#include <xen/events.h>
+#include <asm/xen/hypervisor.h>
 
 #include "xenbus_comms.h"
 
@@ -22,6 +26,50 @@ static int xenbus_backend_open(struct inode *inode, struct file *filp)
        return nonseekable_open(inode, filp);
 }
 
+static long xenbus_alloc(domid_t domid)
+{
+       struct evtchn_alloc_unbound arg;
+       int err = -EEXIST;
+
+       xs_suspend();
+
+       /* If xenstored_ready is nonzero, that means we have already talked to
+        * xenstore and set up watches. These watches will be restored by
+        * xs_resume, but that requires communication over the port established
+        * below that is not visible to anyone until the ioctl returns.
+        *
+        * This can be resolved by splitting the ioctl into two parts
+        * (postponing the resume until xenstored is active) but this is
+        * unnecessarily complex for the intended use where xenstored is only
+        * started once - so return -EEXIST if it's already running.
+        */
+       if (xenstored_ready)
+               goto out_err;
+
+       gnttab_grant_foreign_access_ref(GNTTAB_RESERVED_XENSTORE, domid,
+                       virt_to_mfn(xen_store_interface), 0 /* writable */);
+
+       arg.dom = DOMID_SELF;
+       arg.remote_dom = domid;
+
+       err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &arg);
+       if (err)
+               goto out_err;
+
+       if (xen_store_evtchn > 0)
+               xb_deinit_comms();
+
+       xen_store_evtchn = arg.port;
+
+       xs_resume();
+
+       return arg.port;
+
+ out_err:
+       xs_suspend_cancel();
+       return err;
+}
+
 static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned long data)
 {
        if (!capable(CAP_SYS_ADMIN))
@@ -33,6 +81,9 @@ static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned l
                                return xen_store_evtchn;
                        return -ENODEV;
 
+               case IOCTL_XENBUS_BACKEND_SETUP:
+                       return xenbus_alloc(data);
+
                default:
                        return -ENOTTY;
        }
index 014c8dd62962c02a8acfb17f6a771bc76de1a73a..57ccb7537dae3064b6892e4f6af6cff8bcada3a6 100644 (file)
@@ -448,7 +448,7 @@ void v9fs_evict_inode(struct inode *inode)
        struct v9fs_inode *v9inode = V9FS_I(inode);
 
        truncate_inode_pages(inode->i_mapping, 0);
-       end_writeback(inode);
+       clear_inode(inode);
        filemap_fdatawrite(inode->i_mapping);
 
 #ifdef CONFIG_9P_FSCACHE
index a1e6c990cd410efded55c826f03bc5db13839d75..e3dd2a1e2bfc18e47abae82bce7ee60238527c08 100644 (file)
@@ -68,24 +68,6 @@ static gid_t v9fs_get_fsgid_for_create(struct inode *dir_inode)
        return current_fsgid();
 }
 
-/**
- * v9fs_dentry_from_dir_inode - helper function to get the dentry from
- * dir inode.
- *
- */
-
-static struct dentry *v9fs_dentry_from_dir_inode(struct inode *inode)
-{
-       struct dentry *dentry;
-
-       spin_lock(&inode->i_lock);
-       /* Directory should have only one entry. */
-       BUG_ON(S_ISDIR(inode->i_mode) && !list_is_singular(&inode->i_dentry));
-       dentry = list_entry(inode->i_dentry.next, struct dentry, d_alias);
-       spin_unlock(&inode->i_lock);
-       return dentry;
-}
-
 static int v9fs_test_inode_dotl(struct inode *inode, void *data)
 {
        struct v9fs_inode *v9inode = V9FS_I(inode);
@@ -415,7 +397,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
        if (dir->i_mode & S_ISGID)
                omode |= S_ISGID;
 
-       dir_dentry = v9fs_dentry_from_dir_inode(dir);
+       dir_dentry = dentry->d_parent;
        dfid = v9fs_fid_lookup(dir_dentry);
        if (IS_ERR(dfid)) {
                err = PTR_ERR(dfid);
@@ -793,7 +775,7 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
                 dir->i_ino, old_dentry->d_name.name, dentry->d_name.name);
 
        v9ses = v9fs_inode2v9ses(dir);
-       dir_dentry = v9fs_dentry_from_dir_inode(dir);
+       dir_dentry = dentry->d_parent;
        dfid = v9fs_fid_lookup(dir_dentry);
        if (IS_ERR(dfid))
                return PTR_ERR(dfid);
@@ -858,7 +840,7 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
                return -EINVAL;
 
        v9ses = v9fs_inode2v9ses(dir);
-       dir_dentry = v9fs_dentry_from_dir_inode(dir);
+       dir_dentry = dentry->d_parent;
        dfid = v9fs_fid_lookup(dir_dentry);
        if (IS_ERR(dfid)) {
                err = PTR_ERR(dfid);
index 45a0ce45d7b46afa94b1290511bc1f91a9872137..1fceb320d2f22c16bc1a900cb27597d68977dbbd 100644 (file)
 #define AFFS_GET_HASHENTRY(data,hashkey) be32_to_cpu(((struct dir_front *)data)->hashtable[hashkey])
 #define AFFS_BLOCK(sb, bh, blk)                (AFFS_HEAD(bh)->table[AFFS_SB(sb)->s_hashsize-1-(blk)])
 
-#ifdef __LITTLE_ENDIAN
-#define BO_EXBITS      0x18UL
-#elif defined(__BIG_ENDIAN)
-#define BO_EXBITS      0x00UL
-#else
-#error Endianness must be known for affs to work.
-#endif
-
 #define AFFS_HEAD(bh)          ((struct affs_head *)(bh)->b_data)
 #define AFFS_TAIL(sb, bh)      ((struct affs_tail *)((bh)->b_data+(sb)->s_blocksize-sizeof(struct affs_tail)))
 #define AFFS_ROOT_HEAD(bh)     ((struct affs_root_head *)(bh)->b_data)
index 88a4b0b50058a85857de62c958504a52baa60a4d..8bc4a59f4e7ec896b6dfb29f4b252efbbc0ee933 100644 (file)
@@ -264,7 +264,7 @@ affs_evict_inode(struct inode *inode)
        }
 
        invalidate_inode_buffers(inode);
-       end_writeback(inode);
+       clear_inode(inode);
        affs_free_prealloc(inode);
        cache_page = (unsigned long)AFFS_I(inode)->i_lc;
        if (cache_page) {
index d890ae3b2ce6f487c1e5483cbaca8c398b37c35b..95cffd38239fe867d5afe1e5a1740ec9564c3341 100644 (file)
@@ -423,7 +423,7 @@ void afs_evict_inode(struct inode *inode)
        ASSERTCMP(inode->i_ino, ==, vnode->fid.vnode);
 
        truncate_inode_pages(&inode->i_data, 0);
-       end_writeback(inode);
+       clear_inode(inode);
 
        afs_give_up_callback(vnode);
 
index e7f2fad7b4ce7cae2d334456f5d9998e795c917e..55c4c76560537f7fe72d6ff5f429eff666b86789 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -134,9 +134,9 @@ static int aio_setup_ring(struct kioctx *ctx)
        info->mmap_size = nr_pages * PAGE_SIZE;
        dprintk("attempting mmap of %lu bytes\n", info->mmap_size);
        down_write(&ctx->mm->mmap_sem);
-       info->mmap_base = do_mmap(NULL, 0, info->mmap_size, 
-                                 PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE,
-                                 0);
+       info->mmap_base = do_mmap_pgoff(NULL, 0, info->mmap_size, 
+                                       PROT_READ|PROT_WRITE,
+                                       MAP_ANONYMOUS|MAP_PRIVATE, 0);
        if (IS_ERR((void *)info->mmap_base)) {
                up_write(&ctx->mm->mmap_sem);
                info->mmap_size = 0;
@@ -1446,13 +1446,13 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
                ret = compat_rw_copy_check_uvector(type,
                                (struct compat_iovec __user *)kiocb->ki_buf,
                                kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
-                               &kiocb->ki_iovec, 1);
+                               &kiocb->ki_iovec);
        else
 #endif
                ret = rw_copy_check_uvector(type,
                                (struct iovec __user *)kiocb->ki_buf,
                                kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
-                               &kiocb->ki_iovec, 1);
+                               &kiocb->ki_iovec);
        if (ret < 0)
                goto out;
 
index 584620e5dee52b5be4a456fb0572a5227a0ef534..0da90951d2776f827a905337938399ada79e8e69 100644 (file)
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -176,6 +176,11 @@ int notify_change(struct dentry * dentry, struct iattr * attr)
                        return -EPERM;
        }
 
+       if ((ia_valid & ATTR_SIZE) && IS_I_VERSION(inode)) {
+               if (attr->ia_size != inode->i_size)
+                       inode_inc_iversion(inode);
+       }
+
        if ((ia_valid & ATTR_MODE)) {
                umode_t amode = attr->ia_mode;
                /* Flag setting protected by i_mutex */
index 6e488ebe7784458623139c91ebd42fbba0752074..8a4fed8ead30a5a051fded49a2cbfd07a1ff9630 100644 (file)
@@ -100,7 +100,7 @@ static int autofs4_show_options(struct seq_file *m, struct dentry *root)
 
 static void autofs4_evict_inode(struct inode *inode)
 {
-       end_writeback(inode);
+       clear_inode(inode);
        kfree(inode->i_private);
 }
 
index 37268c5bb98b2f2061a2e18134ebcdec0544fd9a..1b35d6bd06b06d071f8b24e28ece1aa97d87d1ce 100644 (file)
@@ -292,7 +292,6 @@ static const struct inode_operations bad_inode_ops =
        .getxattr       = bad_inode_getxattr,
        .listxattr      = bad_inode_listxattr,
        .removexattr    = bad_inode_removexattr,
-       /* truncate_range returns void */
 };
 
 
index e23dc7c8b884138ec7eabd03d98367aef50d4b6b..9870417c26e7c43852f98b3d641445b5a94c40b1 100644 (file)
@@ -174,7 +174,7 @@ static void bfs_evict_inode(struct inode *inode)
 
        truncate_inode_pages(&inode->i_data, 0);
        invalidate_inode_buffers(inode);
-       end_writeback(inode);
+       clear_inode(inode);
 
        if (inode->i_nlink)
                return;
index e658dd134b95fb375b371a931e739baa95d249a8..1b52956afe33ab07889c3963ce2c41b32133483b 100644 (file)
@@ -329,7 +329,6 @@ static unsigned long elf_map(struct file *filep, unsigned long addr,
        if (!size)
                return addr;
 
-       down_write(&current->mm->mmap_sem);
        /*
        * total_size is the size of the ELF (interpreter) image.
        * The _first_ mmap needs to know the full size, otherwise
@@ -340,13 +339,12 @@ static unsigned long elf_map(struct file *filep, unsigned long addr,
        */
        if (total_size) {
                total_size = ELF_PAGEALIGN(total_size);
-               map_addr = do_mmap(filep, addr, total_size, prot, type, off);
+               map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
                if (!BAD_ADDR(map_addr))
-                       do_munmap(current->mm, map_addr+size, total_size-size);
+                       vm_munmap(map_addr+size, total_size-size);
        } else
-               map_addr = do_mmap(filep, addr, size, prot, type, off);
+               map_addr = vm_mmap(filep, addr, size, prot, type, off);
 
-       up_write(&current->mm->mmap_sem);
        return(map_addr);
 }
 
index 6b2daf99fab8bcd91d314f0abd951b8472a092d2..178cb70acc26de80ec3db21a8455e88b7fc0360b 100644 (file)
@@ -562,7 +562,7 @@ static int load_flat_file(struct linux_binprm * bprm,
                                realdatastart = (unsigned long) -ENOMEM;
                        printk("Unable to allocate RAM for process data, errno %d\n",
                                        (int)-realdatastart);
-                       do_munmap(current->mm, textpos, text_len);
+                       vm_munmap(textpos, text_len);
                        ret = realdatastart;
                        goto err;
                }
@@ -586,8 +586,8 @@ static int load_flat_file(struct linux_binprm * bprm,
                }
                if (IS_ERR_VALUE(result)) {
                        printk("Unable to read data+bss, errno %d\n", (int)-result);
-                       do_munmap(current->mm, textpos, text_len);
-                       do_munmap(current->mm, realdatastart, len);
+                       vm_munmap(textpos, text_len);
+                       vm_munmap(realdatastart, len);
                        ret = result;
                        goto err;
                }
@@ -654,7 +654,7 @@ static int load_flat_file(struct linux_binprm * bprm,
                }
                if (IS_ERR_VALUE(result)) {
                        printk("Unable to read code+data+bss, errno %d\n",(int)-result);
-                       do_munmap(current->mm, textpos, text_len + data_len + extra +
+                       vm_munmap(textpos, text_len + data_len + extra +
                                MAX_SHARED_LIBS * sizeof(unsigned long));
                        ret = result;
                        goto err;
index 613aa06182358806cd65f077df4e687593806455..790b3cddca673284c00d78cb80a68fb550568aff 100644 (file)
@@ -505,7 +505,7 @@ static struct inode *bm_get_inode(struct super_block *sb, int mode)
 
 static void bm_evict_inode(struct inode *inode)
 {
-       end_writeback(inode);
+       clear_inode(inode);
        kfree(inode->i_private);
 }
 
index 84da88539046fa21a6db865454db135df40be080..73922abba832d0a41c1cb01bbf5bcc0fec1cb78b 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
 #include <linux/swap.h>
 #include <linux/bio.h>
 #include <linux/blkdev.h>
+#include <linux/iocontext.h>
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/export.h>
 #include <linux/mempool.h>
 #include <linux/workqueue.h>
+#include <linux/cgroup.h>
 #include <scsi/sg.h>           /* for struct sg_iovec */
 
 #include <trace/events/block.h>
@@ -418,6 +420,7 @@ void bio_put(struct bio *bio)
         * last put frees it
         */
        if (atomic_dec_and_test(&bio->bi_cnt)) {
+               bio_disassociate_task(bio);
                bio->bi_next = NULL;
                bio->bi_destructor(bio);
        }
@@ -1646,6 +1649,64 @@ bad:
 }
 EXPORT_SYMBOL(bioset_create);
 
+#ifdef CONFIG_BLK_CGROUP
+/**
+ * bio_associate_current - associate a bio with %current
+ * @bio: target bio
+ *
+ * Associate @bio with %current if it hasn't been associated yet.  Block
+ * layer will treat @bio as if it were issued by %current no matter which
+ * task actually issues it.
+ *
+ * This function takes an extra reference of @task's io_context and blkcg
+ * which will be put when @bio is released.  The caller must own @bio,
+ * ensure %current->io_context exists, and is responsible for synchronizing
+ * calls to this function.
+ */
+int bio_associate_current(struct bio *bio)
+{
+       struct io_context *ioc;
+       struct cgroup_subsys_state *css;
+
+       if (bio->bi_ioc)
+               return -EBUSY;
+
+       ioc = current->io_context;
+       if (!ioc)
+               return -ENOENT;
+
+       /* acquire active ref on @ioc and associate */
+       get_io_context_active(ioc);
+       bio->bi_ioc = ioc;
+
+       /* associate blkcg if exists */
+       rcu_read_lock();
+       css = task_subsys_state(current, blkio_subsys_id);
+       if (css && css_tryget(css))
+               bio->bi_css = css;
+       rcu_read_unlock();
+
+       return 0;
+}
+
+/**
+ * bio_disassociate_task - undo bio_associate_current()
+ * @bio: target bio
+ */
+void bio_disassociate_task(struct bio *bio)
+{
+       if (bio->bi_ioc) {
+               put_io_context(bio->bi_ioc);
+               bio->bi_ioc = NULL;
+       }
+       if (bio->bi_css) {
+               css_put(bio->bi_css);
+               bio->bi_css = NULL;
+       }
+}
+
+#endif /* CONFIG_BLK_CGROUP */
+
 static void __init biovec_init_slabs(void)
 {
        int i;
index ba11c30f302dd37012d361a14c96db7c8ca22d0f..c2bbe1fb132632c14ebb2ea0675ad78500441a52 100644 (file)
@@ -487,7 +487,7 @@ static void bdev_evict_inode(struct inode *inode)
        struct list_head *p;
        truncate_inode_pages(&inode->i_data, 0);
        invalidate_inode_buffers(inode); /* is it needed here? */
-       end_writeback(inode);
+       clear_inode(inode);
        spin_lock(&bdev_lock);
        while ( (p = bdev->bd_inodes.next) != &bdev->bd_inodes ) {
                __bd_forget(list_entry(p, struct inode, i_devices));
index 89b156d85d63c9f29b66413e1558e85a758d0e12..761e2cd8fed16e6046951e50504b8bb9e7acd3e4 100644 (file)
@@ -227,7 +227,11 @@ int btrfs_init_acl(struct btrfs_trans_handle *trans,
                if (ret > 0) {
                        /* we need an acl */
                        ret = btrfs_set_acl(trans, inode, acl, ACL_TYPE_ACCESS);
+               } else {
+                       cache_no_acl(inode);
                }
+       } else {
+               cache_no_acl(inode);
        }
 failed:
        posix_acl_release(acl);
index bcec06750232e6cc3de09c62648201547709222b..3f75895c919bcc3b80ae63ab1fca42dcf335f95b 100644 (file)
 #include "delayed-ref.h"
 #include "locking.h"
 
+struct extent_inode_elem {
+       u64 inum;
+       u64 offset;
+       struct extent_inode_elem *next;
+};
+
+static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb,
+                               struct btrfs_file_extent_item *fi,
+                               u64 extent_item_pos,
+                               struct extent_inode_elem **eie)
+{
+       u64 data_offset;
+       u64 data_len;
+       struct extent_inode_elem *e;
+
+       data_offset = btrfs_file_extent_offset(eb, fi);
+       data_len = btrfs_file_extent_num_bytes(eb, fi);
+
+       if (extent_item_pos < data_offset ||
+           extent_item_pos >= data_offset + data_len)
+               return 1;
+
+       e = kmalloc(sizeof(*e), GFP_NOFS);
+       if (!e)
+               return -ENOMEM;
+
+       e->next = *eie;
+       e->inum = key->objectid;
+       e->offset = key->offset + (extent_item_pos - data_offset);
+       *eie = e;
+
+       return 0;
+}
+
+static int find_extent_in_eb(struct extent_buffer *eb, u64 wanted_disk_byte,
+                               u64 extent_item_pos,
+                               struct extent_inode_elem **eie)
+{
+       u64 disk_byte;
+       struct btrfs_key key;
+       struct btrfs_file_extent_item *fi;
+       int slot;
+       int nritems;
+       int extent_type;
+       int ret;
+
+       /*
+        * from the shared data ref, we only have the leaf but we need
+        * the key. thus, we must look into all items and see that we
+        * find one (some) with a reference to our extent item.
+        */
+       nritems = btrfs_header_nritems(eb);
+       for (slot = 0; slot < nritems; ++slot) {
+               btrfs_item_key_to_cpu(eb, &key, slot);
+               if (key.type != BTRFS_EXTENT_DATA_KEY)
+                       continue;
+               fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
+               extent_type = btrfs_file_extent_type(eb, fi);
+               if (extent_type == BTRFS_FILE_EXTENT_INLINE)
+                       continue;
+               /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
+               disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
+               if (disk_byte != wanted_disk_byte)
+                       continue;
+
+               ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return 0;
+}
+
 /*
  * this structure records all encountered refs on the way up to the root
  */
 struct __prelim_ref {
        struct list_head list;
        u64 root_id;
-       struct btrfs_key key;
+       struct btrfs_key key_for_search;
        int level;
        int count;
+       struct extent_inode_elem *inode_list;
        u64 parent;
        u64 wanted_disk_byte;
 };
 
+/*
+ * the rules for all callers of this function are:
+ * - obtaining the parent is the goal
+ * - if you add a key, you must know that it is a correct key
+ * - if you cannot add the parent or a correct key, then we will look into the
+ *   block later to set a correct key
+ *
+ * delayed refs
+ * ============
+ *        backref type | shared | indirect | shared | indirect
+ * information         |   tree |     tree |   data |     data
+ * --------------------+--------+----------+--------+----------
+ *      parent logical |    y   |     -    |    -   |     -
+ *      key to resolve |    -   |     y    |    y   |     y
+ *  tree block logical |    -   |     -    |    -   |     -
+ *  root for resolving |    y   |     y    |    y   |     y
+ *
+ * - column 1:       we've the parent -> done
+ * - column 2, 3, 4: we use the key to find the parent
+ *
+ * on disk refs (inline or keyed)
+ * ==============================
+ *        backref type | shared | indirect | shared | indirect
+ * information         |   tree |     tree |   data |     data
+ * --------------------+--------+----------+--------+----------
+ *      parent logical |    y   |     -    |    y   |     -
+ *      key to resolve |    -   |     -    |    -   |     y
+ *  tree block logical |    y   |     y    |    y   |     y
+ *  root for resolving |    -   |     y    |    y   |     y
+ *
+ * - column 1, 3: we've the parent -> done
+ * - column 2:    we take the first key from the block to find the parent
+ *                (see __add_missing_keys)
+ * - column 4:    we use the key to find the parent
+ *
+ * additional information that's available but not required to find the parent
+ * block might help in merging entries to gain some speed.
+ */
+
 static int __add_prelim_ref(struct list_head *head, u64 root_id,
-                           struct btrfs_key *key, int level, u64 parent,
-                           u64 wanted_disk_byte, int count)
+                           struct btrfs_key *key, int level,
+                           u64 parent, u64 wanted_disk_byte, int count)
 {
        struct __prelim_ref *ref;
 
@@ -50,10 +163,11 @@ static int __add_prelim_ref(struct list_head *head, u64 root_id,
 
        ref->root_id = root_id;
        if (key)
-               ref->key = *key;
+               ref->key_for_search = *key;
        else
-               memset(&ref->key, 0, sizeof(ref->key));
+               memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
 
+       ref->inode_list = NULL;
        ref->level = level;
        ref->count = count;
        ref->parent = parent;
@@ -64,18 +178,26 @@ static int __add_prelim_ref(struct list_head *head, u64 root_id,
 }
 
 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
-                               struct ulist *parents,
-                               struct extent_buffer *eb, int level,
-                               u64 wanted_objectid, u64 wanted_disk_byte)
+                               struct ulist *parents, int level,
+                               struct btrfs_key *key, u64 wanted_disk_byte,
+                               const u64 *extent_item_pos)
 {
        int ret;
-       int slot;
+       int slot = path->slots[level];
+       struct extent_buffer *eb = path->nodes[level];
        struct btrfs_file_extent_item *fi;
-       struct btrfs_key key;
+       struct extent_inode_elem *eie = NULL;
        u64 disk_byte;
+       u64 wanted_objectid = key->objectid;
 
 add_parent:
-       ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
+       if (level == 0 && extent_item_pos) {
+               fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
+               ret = check_extent_in_eb(key, eb, fi, *extent_item_pos, &eie);
+               if (ret < 0)
+                       return ret;
+       }
+       ret = ulist_add(parents, eb->start, (unsigned long)eie, GFP_NOFS);
        if (ret < 0)
                return ret;
 
@@ -89,6 +211,7 @@ add_parent:
         * repeat this until we don't find any additional EXTENT_DATA items.
         */
        while (1) {
+               eie = NULL;
                ret = btrfs_next_leaf(root, path);
                if (ret < 0)
                        return ret;
@@ -97,9 +220,9 @@ add_parent:
 
                eb = path->nodes[0];
                for (slot = 0; slot < btrfs_header_nritems(eb); ++slot) {
-                       btrfs_item_key_to_cpu(eb, &key, slot);
-                       if (key.objectid != wanted_objectid ||
-                           key.type != BTRFS_EXTENT_DATA_KEY)
+                       btrfs_item_key_to_cpu(eb, key, slot);
+                       if (key->objectid != wanted_objectid ||
+                           key->type != BTRFS_EXTENT_DATA_KEY)
                                return 0;
                        fi = btrfs_item_ptr(eb, slot,
                                                struct btrfs_file_extent_item);
@@ -118,8 +241,10 @@ add_parent:
  */
 static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
                                        int search_commit_root,
+                                       u64 time_seq,
                                        struct __prelim_ref *ref,
-                                       struct ulist *parents)
+                                       struct ulist *parents,
+                                       const u64 *extent_item_pos)
 {
        struct btrfs_path *path;
        struct btrfs_root *root;
@@ -152,12 +277,13 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
                goto out;
 
        path->lowest_level = level;
-       ret = btrfs_search_slot(NULL, root, &ref->key, path, 0, 0);
+       ret = btrfs_search_old_slot(root, &ref->key_for_search, path, time_seq);
        pr_debug("search slot in root %llu (level %d, ref count %d) returned "
                 "%d for key (%llu %u %llu)\n",
                 (unsigned long long)ref->root_id, level, ref->count, ret,
-                (unsigned long long)ref->key.objectid, ref->key.type,
-                (unsigned long long)ref->key.offset);
+                (unsigned long long)ref->key_for_search.objectid,
+                ref->key_for_search.type,
+                (unsigned long long)ref->key_for_search.offset);
        if (ret < 0)
                goto out;
 
@@ -179,9 +305,8 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
                btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
        }
 
-       /* the last two parameters will only be used for level == 0 */
-       ret = add_all_parents(root, path, parents, eb, level, key.objectid,
-                               ref->wanted_disk_byte);
+       ret = add_all_parents(root, path, parents, level, &key,
+                               ref->wanted_disk_byte, extent_item_pos);
 out:
        btrfs_free_path(path);
        return ret;
@@ -191,8 +316,9 @@ out:
  * resolve all indirect backrefs from the list
  */
 static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
-                                  int search_commit_root,
-                                  struct list_head *head)
+                                  int search_commit_root, u64 time_seq,
+                                  struct list_head *head,
+                                  const u64 *extent_item_pos)
 {
        int err;
        int ret = 0;
@@ -201,6 +327,7 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
        struct __prelim_ref *new_ref;
        struct ulist *parents;
        struct ulist_node *node;
+       struct ulist_iterator uiter;
 
        parents = ulist_alloc(GFP_NOFS);
        if (!parents)
@@ -217,7 +344,8 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
                if (ref->count == 0)
                        continue;
                err = __resolve_indirect_ref(fs_info, search_commit_root,
-                                            ref, parents);
+                                            time_seq, ref, parents,
+                                            extent_item_pos);
                if (err) {
                        if (ret == 0)
                                ret = err;
@@ -225,11 +353,14 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
                }
 
                /* we put the first parent into the ref at hand */
-               node = ulist_next(parents, NULL);
+               ULIST_ITER_INIT(&uiter);
+               node = ulist_next(parents, &uiter);
                ref->parent = node ? node->val : 0;
+               ref->inode_list =
+                       node ? (struct extent_inode_elem *)node->aux : 0;
 
                /* additional parents require new refs being added here */
-               while ((node = ulist_next(parents, node))) {
+               while ((node = ulist_next(parents, &uiter))) {
                        new_ref = kmalloc(sizeof(*new_ref), GFP_NOFS);
                        if (!new_ref) {
                                ret = -ENOMEM;
@@ -237,6 +368,8 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
                        }
                        memcpy(new_ref, ref, sizeof(*ref));
                        new_ref->parent = node->val;
+                       new_ref->inode_list =
+                                       (struct extent_inode_elem *)node->aux;
                        list_add(&new_ref->list, &ref->list);
                }
                ulist_reinit(parents);
@@ -246,10 +379,65 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
        return ret;
 }
 
+static inline int ref_for_same_block(struct __prelim_ref *ref1,
+                                    struct __prelim_ref *ref2)
+{
+       if (ref1->level != ref2->level)
+               return 0;
+       if (ref1->root_id != ref2->root_id)
+               return 0;
+       if (ref1->key_for_search.type != ref2->key_for_search.type)
+               return 0;
+       if (ref1->key_for_search.objectid != ref2->key_for_search.objectid)
+               return 0;
+       if (ref1->key_for_search.offset != ref2->key_for_search.offset)
+               return 0;
+       if (ref1->parent != ref2->parent)
+               return 0;
+
+       return 1;
+}
+
+/*
+ * read tree blocks and add keys where required.
+ */
+static int __add_missing_keys(struct btrfs_fs_info *fs_info,
+                             struct list_head *head)
+{
+       struct list_head *pos;
+       struct extent_buffer *eb;
+
+       list_for_each(pos, head) {
+               struct __prelim_ref *ref;
+               ref = list_entry(pos, struct __prelim_ref, list);
+
+               if (ref->parent)
+                       continue;
+               if (ref->key_for_search.type)
+                       continue;
+               BUG_ON(!ref->wanted_disk_byte);
+               eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte,
+                                    fs_info->tree_root->leafsize, 0);
+               BUG_ON(!eb);
+               btrfs_tree_read_lock(eb);
+               if (btrfs_header_level(eb) == 0)
+                       btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
+               else
+                       btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
+               btrfs_tree_read_unlock(eb);
+               free_extent_buffer(eb);
+       }
+       return 0;
+}
+
 /*
  * merge two lists of backrefs and adjust counts accordingly
  *
  * mode = 1: merge identical keys, if key is set
+ *    FIXME: if we add more keys in __add_prelim_ref, we can merge more here.
+ *           additionally, we could even add a key range for the blocks we
+ *           looked into to merge even more (-> replace unresolved refs by those
+ *           having a parent).
  * mode = 2: merge identical parents
  */
 static int __merge_refs(struct list_head *head, int mode)
@@ -263,20 +451,21 @@ static int __merge_refs(struct list_head *head, int mode)
 
                ref1 = list_entry(pos1, struct __prelim_ref, list);
 
-               if (mode == 1 && ref1->key.type == 0)
-                       continue;
                for (pos2 = pos1->next, n2 = pos2->next; pos2 != head;
                     pos2 = n2, n2 = pos2->next) {
                        struct __prelim_ref *ref2;
+                       struct __prelim_ref *xchg;
 
                        ref2 = list_entry(pos2, struct __prelim_ref, list);
 
                        if (mode == 1) {
-                               if (memcmp(&ref1->key, &ref2->key,
-                                          sizeof(ref1->key)) ||
-                                   ref1->level != ref2->level ||
-                                   ref1->root_id != ref2->root_id)
+                               if (!ref_for_same_block(ref1, ref2))
                                        continue;
+                               if (!ref1->parent && ref2->parent) {
+                                       xchg = ref1;
+                                       ref1 = ref2;
+                                       ref2 = xchg;
+                               }
                                ref1->count += ref2->count;
                        } else {
                                if (ref1->parent != ref2->parent)
@@ -296,16 +485,17 @@ static int __merge_refs(struct list_head *head, int mode)
  * smaller or equal that seq to the list
  */
 static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
-                             struct btrfs_key *info_key,
                              struct list_head *prefs)
 {
        struct btrfs_delayed_extent_op *extent_op = head->extent_op;
        struct rb_node *n = &head->node.rb_node;
+       struct btrfs_key key;
+       struct btrfs_key op_key = {0};
        int sgn;
        int ret = 0;
 
        if (extent_op && extent_op->update_key)
-               btrfs_disk_key_to_cpu(info_key, &extent_op->key);
+               btrfs_disk_key_to_cpu(&op_key, &extent_op->key);
 
        while ((n = rb_prev(n))) {
                struct btrfs_delayed_ref_node *node;
@@ -337,7 +527,7 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
                        struct btrfs_delayed_tree_ref *ref;
 
                        ref = btrfs_delayed_node_to_tree_ref(node);
-                       ret = __add_prelim_ref(prefs, ref->root, info_key,
+                       ret = __add_prelim_ref(prefs, ref->root, &op_key,
                                               ref->level + 1, 0, node->bytenr,
                                               node->ref_mod * sgn);
                        break;
@@ -346,7 +536,7 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
                        struct btrfs_delayed_tree_ref *ref;
 
                        ref = btrfs_delayed_node_to_tree_ref(node);
-                       ret = __add_prelim_ref(prefs, ref->root, info_key,
+                       ret = __add_prelim_ref(prefs, ref->root, NULL,
                                               ref->level + 1, ref->parent,
                                               node->bytenr,
                                               node->ref_mod * sgn);
@@ -354,8 +544,6 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
                }
                case BTRFS_EXTENT_DATA_REF_KEY: {
                        struct btrfs_delayed_data_ref *ref;
-                       struct btrfs_key key;
-
                        ref = btrfs_delayed_node_to_data_ref(node);
 
                        key.objectid = ref->objectid;
@@ -368,7 +556,6 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
                }
                case BTRFS_SHARED_DATA_REF_KEY: {
                        struct btrfs_delayed_data_ref *ref;
-                       struct btrfs_key key;
 
                        ref = btrfs_delayed_node_to_data_ref(node);
 
@@ -394,8 +581,7 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
  */
 static int __add_inline_refs(struct btrfs_fs_info *fs_info,
                             struct btrfs_path *path, u64 bytenr,
-                            struct btrfs_key *info_key, int *info_level,
-                            struct list_head *prefs)
+                            int *info_level, struct list_head *prefs)
 {
        int ret = 0;
        int slot;
@@ -411,7 +597,7 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
         * enumerate all inline refs
         */
        leaf = path->nodes[0];
-       slot = path->slots[0] - 1;
+       slot = path->slots[0];
 
        item_size = btrfs_item_size_nr(leaf, slot);
        BUG_ON(item_size < sizeof(*ei));
@@ -424,12 +610,9 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
 
        if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
                struct btrfs_tree_block_info *info;
-               struct btrfs_disk_key disk_key;
 
                info = (struct btrfs_tree_block_info *)ptr;
                *info_level = btrfs_tree_block_level(leaf, info);
-               btrfs_tree_block_key(leaf, info, &disk_key);
-               btrfs_disk_key_to_cpu(info_key, &disk_key);
                ptr += sizeof(struct btrfs_tree_block_info);
                BUG_ON(ptr > end);
        } else {
@@ -447,7 +630,7 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
 
                switch (type) {
                case BTRFS_SHARED_BLOCK_REF_KEY:
-                       ret = __add_prelim_ref(prefs, 0, info_key,
+                       ret = __add_prelim_ref(prefs, 0, NULL,
                                                *info_level + 1, offset,
                                                bytenr, 1);
                        break;
@@ -462,8 +645,9 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
                        break;
                }
                case BTRFS_TREE_BLOCK_REF_KEY:
-                       ret = __add_prelim_ref(prefs, offset, info_key,
-                                              *info_level + 1, 0, bytenr, 1);
+                       ret = __add_prelim_ref(prefs, offset, NULL,
+                                              *info_level + 1, 0,
+                                              bytenr, 1);
                        break;
                case BTRFS_EXTENT_DATA_REF_KEY: {
                        struct btrfs_extent_data_ref *dref;
@@ -477,8 +661,8 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
                        key.type = BTRFS_EXTENT_DATA_KEY;
                        key.offset = btrfs_extent_data_ref_offset(leaf, dref);
                        root = btrfs_extent_data_ref_root(leaf, dref);
-                       ret = __add_prelim_ref(prefs, root, &key, 0, 0, bytenr,
-                                               count);
+                       ret = __add_prelim_ref(prefs, root, &key, 0, 0,
+                                              bytenr, count);
                        break;
                }
                default:
@@ -496,8 +680,7 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
  */
 static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
                            struct btrfs_path *path, u64 bytenr,
-                           struct btrfs_key *info_key, int info_level,
-                           struct list_head *prefs)
+                           int info_level, struct list_head *prefs)
 {
        struct btrfs_root *extent_root = fs_info->extent_root;
        int ret;
@@ -527,7 +710,7 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
 
                switch (key.type) {
                case BTRFS_SHARED_BLOCK_REF_KEY:
-                       ret = __add_prelim_ref(prefs, 0, info_key,
+                       ret = __add_prelim_ref(prefs, 0, NULL,
                                                info_level + 1, key.offset,
                                                bytenr, 1);
                        break;
@@ -543,8 +726,9 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
                        break;
                }
                case BTRFS_TREE_BLOCK_REF_KEY:
-                       ret = __add_prelim_ref(prefs, key.offset, info_key,
-                                               info_level + 1, 0, bytenr, 1);
+                       ret = __add_prelim_ref(prefs, key.offset, NULL,
+                                              info_level + 1, 0,
+                                              bytenr, 1);
                        break;
                case BTRFS_EXTENT_DATA_REF_KEY: {
                        struct btrfs_extent_data_ref *dref;
@@ -560,7 +744,7 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
                        key.offset = btrfs_extent_data_ref_offset(leaf, dref);
                        root = btrfs_extent_data_ref_root(leaf, dref);
                        ret = __add_prelim_ref(prefs, root, &key, 0, 0,
-                                               bytenr, count);
+                                              bytenr, count);
                        break;
                }
                default:
@@ -582,11 +766,12 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
  */
 static int find_parent_nodes(struct btrfs_trans_handle *trans,
                             struct btrfs_fs_info *fs_info, u64 bytenr,
-                            u64 seq, struct ulist *refs, struct ulist *roots)
+                            u64 delayed_ref_seq, u64 time_seq,
+                            struct ulist *refs, struct ulist *roots,
+                            const u64 *extent_item_pos)
 {
        struct btrfs_key key;
        struct btrfs_path *path;
-       struct btrfs_key info_key = { 0 };
        struct btrfs_delayed_ref_root *delayed_refs = NULL;
        struct btrfs_delayed_ref_head *head;
        int info_level = 0;
@@ -645,7 +830,7 @@ again:
                                btrfs_put_delayed_ref(&head->node);
                                goto again;
                        }
-                       ret = __add_delayed_refs(head, seq, &info_key,
+                       ret = __add_delayed_refs(head, delayed_ref_seq,
                                                 &prefs_delayed);
                        if (ret) {
                                spin_unlock(&delayed_refs->lock);
@@ -659,16 +844,17 @@ again:
                struct extent_buffer *leaf;
                int slot;
 
+               path->slots[0]--;
                leaf = path->nodes[0];
-               slot = path->slots[0] - 1;
+               slot = path->slots[0];
                btrfs_item_key_to_cpu(leaf, &key, slot);
                if (key.objectid == bytenr &&
                    key.type == BTRFS_EXTENT_ITEM_KEY) {
                        ret = __add_inline_refs(fs_info, path, bytenr,
-                                               &info_key, &info_level, &prefs);
+                                               &info_level, &prefs);
                        if (ret)
                                goto out;
-                       ret = __add_keyed_refs(fs_info, path, bytenr, &info_key,
+                       ret = __add_keyed_refs(fs_info, path, bytenr,
                                               info_level, &prefs);
                        if (ret)
                                goto out;
@@ -676,21 +862,18 @@ again:
        }
        btrfs_release_path(path);
 
-       /*
-        * when adding the delayed refs above, the info_key might not have
-        * been known yet. Go over the list and replace the missing keys
-        */
-       list_for_each_entry(ref, &prefs_delayed, list) {
-               if ((ref->key.offset | ref->key.type | ref->key.objectid) == 0)
-                       memcpy(&ref->key, &info_key, sizeof(ref->key));
-       }
        list_splice_init(&prefs_delayed, &prefs);
 
+       ret = __add_missing_keys(fs_info, &prefs);
+       if (ret)
+               goto out;
+
        ret = __merge_refs(&prefs, 1);
        if (ret)
                goto out;
 
-       ret = __resolve_indirect_refs(fs_info, search_commit_root, &prefs);
+       ret = __resolve_indirect_refs(fs_info, search_commit_root, time_seq,
+                                     &prefs, extent_item_pos);
        if (ret)
                goto out;
 
@@ -709,7 +892,33 @@ again:
                        BUG_ON(ret < 0);
                }
                if (ref->count && ref->parent) {
-                       ret = ulist_add(refs, ref->parent, 0, GFP_NOFS);
+                       struct extent_inode_elem *eie = NULL;
+                       if (extent_item_pos && !ref->inode_list) {
+                               u32 bsz;
+                               struct extent_buffer *eb;
+                               bsz = btrfs_level_size(fs_info->extent_root,
+                                                       info_level);
+                               eb = read_tree_block(fs_info->extent_root,
+                                                          ref->parent, bsz, 0);
+                               BUG_ON(!eb);
+                               ret = find_extent_in_eb(eb, bytenr,
+                                                       *extent_item_pos, &eie);
+                               ref->inode_list = eie;
+                               free_extent_buffer(eb);
+                       }
+                       ret = ulist_add_merge(refs, ref->parent,
+                                             (unsigned long)ref->inode_list,
+                                             (unsigned long *)&eie, GFP_NOFS);
+                       if (!ret && extent_item_pos) {
+                               /*
+                                * we've recorded that parent, so we must extend
+                                * its inode list here
+                                */
+                               BUG_ON(!eie);
+                               while (eie->next)
+                                       eie = eie->next;
+                               eie->next = ref->inode_list;
+                       }
                        BUG_ON(ret < 0);
                }
                kfree(ref);
@@ -734,6 +943,28 @@ out:
        return ret;
 }
 
+static void free_leaf_list(struct ulist *blocks)
+{
+       struct ulist_node *node = NULL;
+       struct extent_inode_elem *eie;
+       struct extent_inode_elem *eie_next;
+       struct ulist_iterator uiter;
+
+       ULIST_ITER_INIT(&uiter);
+       while ((node = ulist_next(blocks, &uiter))) {
+               if (!node->aux)
+                       continue;
+               eie = (struct extent_inode_elem *)node->aux;
+               for (; eie; eie = eie_next) {
+                       eie_next = eie->next;
+                       kfree(eie);
+               }
+               node->aux = 0;
+       }
+
+       ulist_free(blocks);
+}
+
 /*
  * Finds all leafs with a reference to the specified combination of bytenr and
  * offset. key_list_head will point to a list of corresponding keys (caller must
@@ -744,7 +975,9 @@ out:
  */
 static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
                                struct btrfs_fs_info *fs_info, u64 bytenr,
-                               u64 num_bytes, u64 seq, struct ulist **leafs)
+                               u64 delayed_ref_seq, u64 time_seq,
+                               struct ulist **leafs,
+                               const u64 *extent_item_pos)
 {
        struct ulist *tmp;
        int ret;
@@ -758,11 +991,12 @@ static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
                return -ENOMEM;
        }
 
-       ret = find_parent_nodes(trans, fs_info, bytenr, seq, *leafs, tmp);
+       ret = find_parent_nodes(trans, fs_info, bytenr, delayed_ref_seq,
+                               time_seq, *leafs, tmp, extent_item_pos);
        ulist_free(tmp);
 
        if (ret < 0 && ret != -ENOENT) {
-               ulist_free(*leafs);
+               free_leaf_list(*leafs);
                return ret;
        }
 
@@ -784,10 +1018,12 @@ static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
  */
 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
                                struct btrfs_fs_info *fs_info, u64 bytenr,
-                               u64 num_bytes, u64 seq, struct ulist **roots)
+                               u64 delayed_ref_seq, u64 time_seq,
+                               struct ulist **roots)
 {
        struct ulist *tmp;
        struct ulist_node *node = NULL;
+       struct ulist_iterator uiter;
        int ret;
 
        tmp = ulist_alloc(GFP_NOFS);
@@ -799,15 +1035,16 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
                return -ENOMEM;
        }
 
+       ULIST_ITER_INIT(&uiter);
        while (1) {
-               ret = find_parent_nodes(trans, fs_info, bytenr, seq,
-                                       tmp, *roots);
+               ret = find_parent_nodes(trans, fs_info, bytenr, delayed_ref_seq,
+                                       time_seq, tmp, *roots, NULL);
                if (ret < 0 && ret != -ENOENT) {
                        ulist_free(tmp);
                        ulist_free(*roots);
                        return ret;
                }
-               node = ulist_next(tmp, node);
+               node = ulist_next(tmp, &uiter);
                if (!node)
                        break;
                bytenr = node->val;
@@ -1093,67 +1330,25 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
        return 0;
 }
 
-static int iterate_leaf_refs(struct btrfs_fs_info *fs_info, u64 logical,
-                               u64 orig_extent_item_objectid,
-                               u64 extent_item_pos, u64 root,
+static int iterate_leaf_refs(struct extent_inode_elem *inode_list,
+                               u64 root, u64 extent_item_objectid,
                                iterate_extent_inodes_t *iterate, void *ctx)
 {
-       u64 disk_byte;
-       struct btrfs_key key;
-       struct btrfs_file_extent_item *fi;
-       struct extent_buffer *eb;
-       int slot;
-       int nritems;
+       struct extent_inode_elem *eie;
        int ret = 0;
-       int extent_type;
-       u64 data_offset;
-       u64 data_len;
-
-       eb = read_tree_block(fs_info->tree_root, logical,
-                               fs_info->tree_root->leafsize, 0);
-       if (!eb)
-               return -EIO;
-
-       /*
-        * from the shared data ref, we only have the leaf but we need
-        * the key. thus, we must look into all items and see that we
-        * find one (some) with a reference to our extent item.
-        */
-       nritems = btrfs_header_nritems(eb);
-       for (slot = 0; slot < nritems; ++slot) {
-               btrfs_item_key_to_cpu(eb, &key, slot);
-               if (key.type != BTRFS_EXTENT_DATA_KEY)
-                       continue;
-               fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
-               extent_type = btrfs_file_extent_type(eb, fi);
-               if (extent_type == BTRFS_FILE_EXTENT_INLINE)
-                       continue;
-               /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
-               disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
-               if (disk_byte != orig_extent_item_objectid)
-                       continue;
-
-               data_offset = btrfs_file_extent_offset(eb, fi);
-               data_len = btrfs_file_extent_num_bytes(eb, fi);
-
-               if (extent_item_pos < data_offset ||
-                   extent_item_pos >= data_offset + data_len)
-                       continue;
 
+       for (eie = inode_list; eie; eie = eie->next) {
                pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), "
-                               "root %llu\n", orig_extent_item_objectid,
-                               key.objectid, key.offset, root);
-               ret = iterate(key.objectid,
-                               key.offset + (extent_item_pos - data_offset),
-                               root, ctx);
+                        "root %llu\n", extent_item_objectid,
+                        eie->inum, eie->offset, root);
+               ret = iterate(eie->inum, eie->offset, root, ctx);
                if (ret) {
-                       pr_debug("stopping iteration because ret=%d\n", ret);
+                       pr_debug("stopping iteration for %llu due to ret=%d\n",
+                                extent_item_objectid, ret);
                        break;
                }
        }
 
-       free_extent_buffer(eb);
-
        return ret;
 }
 
@@ -1175,7 +1370,10 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
        struct ulist *roots = NULL;
        struct ulist_node *ref_node = NULL;
        struct ulist_node *root_node = NULL;
-       struct seq_list seq_elem;
+       struct seq_list seq_elem = {};
+       struct seq_list tree_mod_seq_elem = {};
+       struct ulist_iterator ref_uiter;
+       struct ulist_iterator root_uiter;
        struct btrfs_delayed_ref_root *delayed_refs = NULL;
 
        pr_debug("resolving all inodes for extent %llu\n",
@@ -1192,34 +1390,41 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
                spin_lock(&delayed_refs->lock);
                btrfs_get_delayed_seq(delayed_refs, &seq_elem);
                spin_unlock(&delayed_refs->lock);
+               btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
        }
 
        ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
-                                  extent_item_pos, seq_elem.seq,
-                                  &refs);
-
+                                  seq_elem.seq, tree_mod_seq_elem.seq, &refs,
+                                  &extent_item_pos);
        if (ret)
                goto out;
 
-       while (!ret && (ref_node = ulist_next(refs, ref_node))) {
-               ret = btrfs_find_all_roots(trans, fs_info, ref_node->val, -1,
-                                               seq_elem.seq, &roots);
+       ULIST_ITER_INIT(&ref_uiter);
+       while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
+               ret = btrfs_find_all_roots(trans, fs_info, ref_node->val,
+                                               seq_elem.seq,
+                                               tree_mod_seq_elem.seq, &roots);
                if (ret)
                        break;
-               while (!ret && (root_node = ulist_next(roots, root_node))) {
-                       pr_debug("root %llu references leaf %llu\n",
-                                       root_node->val, ref_node->val);
-                       ret = iterate_leaf_refs(fs_info, ref_node->val,
-                                               extent_item_objectid,
-                                               extent_item_pos, root_node->val,
-                                               iterate, ctx);
+               ULIST_ITER_INIT(&root_uiter);
+               while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
+                       pr_debug("root %llu references leaf %llu, data list "
+                                "%#lx\n", root_node->val, ref_node->val,
+                                ref_node->aux);
+                       ret = iterate_leaf_refs(
+                               (struct extent_inode_elem *)ref_node->aux,
+                               root_node->val, extent_item_objectid,
+                               iterate, ctx);
                }
+               ulist_free(roots);
+               roots = NULL;
        }
 
-       ulist_free(refs);
+       free_leaf_list(refs);
        ulist_free(roots);
 out:
        if (!search_commit_root) {
+               btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
                btrfs_put_delayed_seq(delayed_refs, &seq_elem);
                btrfs_end_transaction(trans, fs_info->extent_root);
        }
index 57ea2e959e4dcfaba89e4ee0b833f5744c3639d3..c18d8ac7b795da487c4a526979954e91cbddf52b 100644 (file)
@@ -58,7 +58,8 @@ int paths_from_inode(u64 inum, struct inode_fs_paths *ipath);
 
 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
                                struct btrfs_fs_info *fs_info, u64 bytenr,
-                               u64 num_bytes, u64 seq, struct ulist **roots);
+                               u64 delayed_ref_seq, u64 time_seq,
+                               struct ulist **roots);
 
 struct btrfs_data_container *init_data_container(u32 total_bytes);
 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
index 9b9b15fd5204347c5ef2931fb186af679cb0d369..e616f8872e69bb0cf3b9a3f369ba49eeca57f2de 100644 (file)
 #include "ordered-data.h"
 #include "delayed-inode.h"
 
+/*
+ * ordered_data_close is set by truncate when a file that used
+ * to have good data has been truncated to zero.  When it is set
+ * the btrfs file release call will add this inode to the
+ * ordered operations list so that we make sure to flush out any
+ * new data the application may have written before commit.
+ */
+#define BTRFS_INODE_ORDERED_DATA_CLOSE         0
+#define BTRFS_INODE_ORPHAN_META_RESERVED       1
+#define BTRFS_INODE_DUMMY                      2
+#define BTRFS_INODE_IN_DEFRAG                  3
+#define BTRFS_INODE_DELALLOC_META_RESERVED     4
+#define BTRFS_INODE_HAS_ORPHAN_ITEM            5
+
 /* in memory btrfs inode */
 struct btrfs_inode {
        /* which subvolume this inode belongs to */
@@ -57,9 +71,6 @@ struct btrfs_inode {
        /* used to order data wrt metadata */
        struct btrfs_ordered_inode_tree ordered_tree;
 
-       /* for keeping track of orphaned inodes */
-       struct list_head i_orphan;
-
        /* list of all the delalloc inodes in the FS.  There are times we need
         * to write all the delalloc pages to disk, and this list is used
         * to walk them all.
@@ -78,14 +89,13 @@ struct btrfs_inode {
        /* the space_info for where this inode's data allocations are done */
        struct btrfs_space_info *space_info;
 
+       unsigned long runtime_flags;
+
        /* full 64 bit generation number, struct vfs_inode doesn't have a big
         * enough field for this.
         */
        u64 generation;
 
-       /* sequence number for NFS changes */
-       u64 sequence;
-
        /*
         * transid of the trans_handle that last modified this inode
         */
@@ -144,23 +154,10 @@ struct btrfs_inode {
        unsigned outstanding_extents;
        unsigned reserved_extents;
 
-       /*
-        * ordered_data_close is set by truncate when a file that used
-        * to have good data has been truncated to zero.  When it is set
-        * the btrfs file release call will add this inode to the
-        * ordered operations list so that we make sure to flush out any
-        * new data the application may have written before commit.
-        */
-       unsigned ordered_data_close:1;
-       unsigned orphan_meta_reserved:1;
-       unsigned dummy_inode:1;
-       unsigned in_defrag:1;
-       unsigned delalloc_meta_reserved:1;
-
        /*
         * always compress this one file
         */
-       unsigned force_compress:4;
+       unsigned force_compress;
 
        struct btrfs_delayed_node *delayed_node;
 
@@ -202,4 +199,17 @@ static inline bool btrfs_is_free_space_inode(struct btrfs_root *root,
        return false;
 }
 
+static inline int btrfs_inode_in_log(struct inode *inode, u64 generation)
+{
+       struct btrfs_root *root = BTRFS_I(inode)->root;
+       int ret = 0;
+
+       mutex_lock(&root->log_mutex);
+       if (BTRFS_I(inode)->logged_trans == generation &&
+           BTRFS_I(inode)->last_sub_trans <= root->last_log_commit)
+               ret = 1;
+       mutex_unlock(&root->log_mutex);
+       return ret;
+}
+
 #endif
index c053e90f2006f580ed4f8a4440fb520639c3edd6..9cebb1fd6a3cc59919c7c990d3016caee52b5849 100644 (file)
 #define BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER 20111300
 #define BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL (200 - 6)   /* in characters,
                                                         * excluding " [...]" */
-#define BTRFSIC_BLOCK_SIZE PAGE_SIZE
-
 #define BTRFSIC_GENERATION_UNKNOWN ((u64)-1)
 
 /*
@@ -210,8 +208,9 @@ struct btrfsic_block_data_ctx {
        u64 dev_bytenr;         /* physical bytenr on device */
        u32 len;
        struct btrfsic_dev_state *dev;
-       char *data;
-       struct buffer_head *bh; /* do not use if set to NULL */
+       char **datav;
+       struct page **pagev;
+       void *mem_to_free;
 };
 
 /* This structure is used to implement recursion without occupying
@@ -243,6 +242,8 @@ struct btrfsic_state {
        struct btrfs_root *root;
        u64 max_superblock_generation;
        struct btrfsic_block *latest_superblock;
+       u32 metablock_size;
+       u32 datablock_size;
 };
 
 static void btrfsic_block_init(struct btrfsic_block *b);
@@ -290,8 +291,10 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
 static int btrfsic_process_metablock(struct btrfsic_state *state,
                                     struct btrfsic_block *block,
                                     struct btrfsic_block_data_ctx *block_ctx,
-                                    struct btrfs_header *hdr,
                                     int limit_nesting, int force_iodone_flag);
+static void btrfsic_read_from_block_data(
+       struct btrfsic_block_data_ctx *block_ctx,
+       void *dst, u32 offset, size_t len);
 static int btrfsic_create_link_to_next_block(
                struct btrfsic_state *state,
                struct btrfsic_block *block,
@@ -318,12 +321,13 @@ static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx);
 static int btrfsic_read_block(struct btrfsic_state *state,
                              struct btrfsic_block_data_ctx *block_ctx);
 static void btrfsic_dump_database(struct btrfsic_state *state);
+static void btrfsic_complete_bio_end_io(struct bio *bio, int err);
 static int btrfsic_test_for_metadata(struct btrfsic_state *state,
-                                    const u8 *data, unsigned int size);
+                                    char **datav, unsigned int num_pages);
 static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
-                                         u64 dev_bytenr, u8 *mapped_data,
-                                         unsigned int len, struct bio *bio,
-                                         int *bio_is_patched,
+                                         u64 dev_bytenr, char **mapped_datav,
+                                         unsigned int num_pages,
+                                         struct bio *bio, int *bio_is_patched,
                                          struct buffer_head *bh,
                                          int submit_bio_bh_rw);
 static int btrfsic_process_written_superblock(
@@ -375,7 +379,7 @@ static struct btrfsic_dev_state *btrfsic_dev_state_lookup(
 static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
                                           u64 bytenr,
                                           struct btrfsic_dev_state *dev_state,
-                                          u64 dev_bytenr, char *data);
+                                          u64 dev_bytenr);
 
 static struct mutex btrfsic_mutex;
 static int btrfsic_is_initialized;
@@ -651,7 +655,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
        int pass;
 
        BUG_ON(NULL == state);
-       selected_super = kmalloc(sizeof(*selected_super), GFP_NOFS);
+       selected_super = kzalloc(sizeof(*selected_super), GFP_NOFS);
        if (NULL == selected_super) {
                printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
                return -1;
@@ -718,7 +722,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
 
                num_copies =
                    btrfs_num_copies(&state->root->fs_info->mapping_tree,
-                                    next_bytenr, PAGE_SIZE);
+                                    next_bytenr, state->metablock_size);
                if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
                        printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
                               (unsigned long long)next_bytenr, num_copies);
@@ -727,9 +731,9 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
                        struct btrfsic_block *next_block;
                        struct btrfsic_block_data_ctx tmp_next_block_ctx;
                        struct btrfsic_block_link *l;
-                       struct btrfs_header *hdr;
 
-                       ret = btrfsic_map_block(state, next_bytenr, PAGE_SIZE,
+                       ret = btrfsic_map_block(state, next_bytenr,
+                                               state->metablock_size,
                                                &tmp_next_block_ctx,
                                                mirror_num);
                        if (ret) {
@@ -758,7 +762,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
                        BUG_ON(NULL == l);
 
                        ret = btrfsic_read_block(state, &tmp_next_block_ctx);
-                       if (ret < (int)BTRFSIC_BLOCK_SIZE) {
+                       if (ret < (int)PAGE_CACHE_SIZE) {
                                printk(KERN_INFO
                                       "btrfsic: read @logical %llu failed!\n",
                                       (unsigned long long)
@@ -768,11 +772,9 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
                                return -1;
                        }
 
-                       hdr = (struct btrfs_header *)tmp_next_block_ctx.data;
                        ret = btrfsic_process_metablock(state,
                                                        next_block,
                                                        &tmp_next_block_ctx,
-                                                       hdr,
                                                        BTRFS_MAX_LEVEL + 3, 1);
                        btrfsic_release_block_ctx(&tmp_next_block_ctx);
                }
@@ -799,7 +801,10 @@ static int btrfsic_process_superblock_dev_mirror(
 
        /* super block bytenr is always the unmapped device bytenr */
        dev_bytenr = btrfs_sb_offset(superblock_mirror_num);
-       bh = __bread(superblock_bdev, dev_bytenr / 4096, 4096);
+       if (dev_bytenr + BTRFS_SUPER_INFO_SIZE > device->total_bytes)
+               return -1;
+       bh = __bread(superblock_bdev, dev_bytenr / 4096,
+                    BTRFS_SUPER_INFO_SIZE);
        if (NULL == bh)
                return -1;
        super_tmp = (struct btrfs_super_block *)
@@ -808,7 +813,10 @@ static int btrfsic_process_superblock_dev_mirror(
        if (btrfs_super_bytenr(super_tmp) != dev_bytenr ||
            strncmp((char *)(&(super_tmp->magic)), BTRFS_MAGIC,
                    sizeof(super_tmp->magic)) ||
-           memcmp(device->uuid, super_tmp->dev_item.uuid, BTRFS_UUID_SIZE)) {
+           memcmp(device->uuid, super_tmp->dev_item.uuid, BTRFS_UUID_SIZE) ||
+           btrfs_super_nodesize(super_tmp) != state->metablock_size ||
+           btrfs_super_leafsize(super_tmp) != state->metablock_size ||
+           btrfs_super_sectorsize(super_tmp) != state->datablock_size) {
                brelse(bh);
                return 0;
        }
@@ -893,7 +901,7 @@ static int btrfsic_process_superblock_dev_mirror(
 
                num_copies =
                    btrfs_num_copies(&state->root->fs_info->mapping_tree,
-                                    next_bytenr, PAGE_SIZE);
+                                    next_bytenr, state->metablock_size);
                if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
                        printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
                               (unsigned long long)next_bytenr, num_copies);
@@ -902,7 +910,8 @@ static int btrfsic_process_superblock_dev_mirror(
                        struct btrfsic_block_data_ctx tmp_next_block_ctx;
                        struct btrfsic_block_link *l;
 
-                       if (btrfsic_map_block(state, next_bytenr, PAGE_SIZE,
+                       if (btrfsic_map_block(state, next_bytenr,
+                                             state->metablock_size,
                                              &tmp_next_block_ctx,
                                              mirror_num)) {
                                printk(KERN_INFO "btrfsic: btrfsic_map_block("
@@ -966,13 +975,15 @@ static int btrfsic_process_metablock(
                struct btrfsic_state *state,
                struct btrfsic_block *const first_block,
                struct btrfsic_block_data_ctx *const first_block_ctx,
-               struct btrfs_header *const first_hdr,
                int first_limit_nesting, int force_iodone_flag)
 {
        struct btrfsic_stack_frame initial_stack_frame = { 0 };
        struct btrfsic_stack_frame *sf;
        struct btrfsic_stack_frame *next_stack;
+       struct btrfs_header *const first_hdr =
+               (struct btrfs_header *)first_block_ctx->datav[0];
 
+       BUG_ON(!first_hdr);
        sf = &initial_stack_frame;
        sf->error = 0;
        sf->i = -1;
@@ -1012,21 +1023,47 @@ continue_with_current_leaf_stack_frame:
                }
 
                if (sf->i < sf->nr) {
-                       struct btrfs_item *disk_item = leafhdr->items + sf->i;
-                       struct btrfs_disk_key *disk_key = &disk_item->key;
+                       struct btrfs_item disk_item;
+                       u32 disk_item_offset =
+                               (uintptr_t)(leafhdr->items + sf->i) -
+                               (uintptr_t)leafhdr;
+                       struct btrfs_disk_key *disk_key;
                        u8 type;
-                       const u32 item_offset = le32_to_cpu(disk_item->offset);
+                       u32 item_offset;
 
+                       if (disk_item_offset + sizeof(struct btrfs_item) >
+                           sf->block_ctx->len) {
+leaf_item_out_of_bounce_error:
+                               printk(KERN_INFO
+                                      "btrfsic: leaf item out of bounce at logical %llu, dev %s\n",
+                                      sf->block_ctx->start,
+                                      sf->block_ctx->dev->name);
+                               goto one_stack_frame_backwards;
+                       }
+                       btrfsic_read_from_block_data(sf->block_ctx,
+                                                    &disk_item,
+                                                    disk_item_offset,
+                                                    sizeof(struct btrfs_item));
+                       item_offset = le32_to_cpu(disk_item.offset);
+                       disk_key = &disk_item.key;
                        type = disk_key->type;
 
                        if (BTRFS_ROOT_ITEM_KEY == type) {
-                               const struct btrfs_root_item *const root_item =
-                                   (struct btrfs_root_item *)
-                                   (sf->block_ctx->data +
-                                    offsetof(struct btrfs_leaf, items) +
-                                    item_offset);
-                               const u64 next_bytenr =
-                                   le64_to_cpu(root_item->bytenr);
+                               struct btrfs_root_item root_item;
+                               u32 root_item_offset;
+                               u64 next_bytenr;
+
+                               root_item_offset = item_offset +
+                                       offsetof(struct btrfs_leaf, items);
+                               if (root_item_offset +
+                                   sizeof(struct btrfs_root_item) >
+                                   sf->block_ctx->len)
+                                       goto leaf_item_out_of_bounce_error;
+                               btrfsic_read_from_block_data(
+                                       sf->block_ctx, &root_item,
+                                       root_item_offset,
+                                       sizeof(struct btrfs_root_item));
+                               next_bytenr = le64_to_cpu(root_item.bytenr);
 
                                sf->error =
                                    btrfsic_create_link_to_next_block(
@@ -1041,7 +1078,7 @@ continue_with_current_leaf_stack_frame:
                                                &sf->num_copies,
                                                &sf->mirror_num,
                                                disk_key,
-                                               le64_to_cpu(root_item->
+                                               le64_to_cpu(root_item.
                                                generation));
                                if (sf->error)
                                        goto one_stack_frame_backwards;
@@ -1049,7 +1086,7 @@ continue_with_current_leaf_stack_frame:
                                if (NULL != sf->next_block) {
                                        struct btrfs_header *const next_hdr =
                                            (struct btrfs_header *)
-                                           sf->next_block_ctx.data;
+                                           sf->next_block_ctx.datav[0];
 
                                        next_stack =
                                            btrfsic_stack_frame_alloc();
@@ -1111,10 +1148,24 @@ continue_with_current_node_stack_frame:
                }
 
                if (sf->i < sf->nr) {
-                       struct btrfs_key_ptr *disk_key_ptr =
-                           nodehdr->ptrs + sf->i;
-                       const u64 next_bytenr =
-                           le64_to_cpu(disk_key_ptr->blockptr);
+                       struct btrfs_key_ptr key_ptr;
+                       u32 key_ptr_offset;
+                       u64 next_bytenr;
+
+                       key_ptr_offset = (uintptr_t)(nodehdr->ptrs + sf->i) -
+                                         (uintptr_t)nodehdr;
+                       if (key_ptr_offset + sizeof(struct btrfs_key_ptr) >
+                           sf->block_ctx->len) {
+                               printk(KERN_INFO
+                                      "btrfsic: node item out of bounce at logical %llu, dev %s\n",
+                                      sf->block_ctx->start,
+                                      sf->block_ctx->dev->name);
+                               goto one_stack_frame_backwards;
+                       }
+                       btrfsic_read_from_block_data(
+                               sf->block_ctx, &key_ptr, key_ptr_offset,
+                               sizeof(struct btrfs_key_ptr));
+                       next_bytenr = le64_to_cpu(key_ptr.blockptr);
 
                        sf->error = btrfsic_create_link_to_next_block(
                                        state,
@@ -1127,15 +1178,15 @@ continue_with_current_node_stack_frame:
                                        force_iodone_flag,
                                        &sf->num_copies,
                                        &sf->mirror_num,
-                                       &disk_key_ptr->key,
-                                       le64_to_cpu(disk_key_ptr->generation));
+                                       &key_ptr.key,
+                                       le64_to_cpu(key_ptr.generation));
                        if (sf->error)
                                goto one_stack_frame_backwards;
 
                        if (NULL != sf->next_block) {
                                struct btrfs_header *const next_hdr =
                                    (struct btrfs_header *)
-                                   sf->next_block_ctx.data;
+                                   sf->next_block_ctx.datav[0];
 
                                next_stack = btrfsic_stack_frame_alloc();
                                if (NULL == next_stack)
@@ -1181,6 +1232,35 @@ one_stack_frame_backwards:
        return sf->error;
 }
 
+static void btrfsic_read_from_block_data(
+       struct btrfsic_block_data_ctx *block_ctx,
+       void *dstv, u32 offset, size_t len)
+{
+       size_t cur;
+       size_t offset_in_page;
+       char *kaddr;
+       char *dst = (char *)dstv;
+       size_t start_offset = block_ctx->start & ((u64)PAGE_CACHE_SIZE - 1);
+       unsigned long i = (start_offset + offset) >> PAGE_CACHE_SHIFT;
+
+       WARN_ON(offset + len > block_ctx->len);
+       offset_in_page = (start_offset + offset) &
+                        ((unsigned long)PAGE_CACHE_SIZE - 1);
+
+       while (len > 0) {
+               cur = min(len, ((size_t)PAGE_CACHE_SIZE - offset_in_page));
+               BUG_ON(i >= (block_ctx->len + PAGE_CACHE_SIZE - 1) >>
+                           PAGE_CACHE_SHIFT);
+               kaddr = block_ctx->datav[i];
+               memcpy(dst, kaddr + offset_in_page, cur);
+
+               dst += cur;
+               len -= cur;
+               offset_in_page = 0;
+               i++;
+       }
+}
+
 static int btrfsic_create_link_to_next_block(
                struct btrfsic_state *state,
                struct btrfsic_block *block,
@@ -1204,7 +1284,7 @@ static int btrfsic_create_link_to_next_block(
        if (0 == *num_copiesp) {
                *num_copiesp =
                    btrfs_num_copies(&state->root->fs_info->mapping_tree,
-                                    next_bytenr, PAGE_SIZE);
+                                    next_bytenr, state->metablock_size);
                if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
                        printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
                               (unsigned long long)next_bytenr, *num_copiesp);
@@ -1219,7 +1299,7 @@ static int btrfsic_create_link_to_next_block(
                       "btrfsic_create_link_to_next_block(mirror_num=%d)\n",
                       *mirror_nump);
        ret = btrfsic_map_block(state, next_bytenr,
-                               BTRFSIC_BLOCK_SIZE,
+                               state->metablock_size,
                                next_block_ctx, *mirror_nump);
        if (ret) {
                printk(KERN_INFO
@@ -1314,7 +1394,7 @@ static int btrfsic_create_link_to_next_block(
 
        if (limit_nesting > 0 && did_alloc_block_link) {
                ret = btrfsic_read_block(state, next_block_ctx);
-               if (ret < (int)BTRFSIC_BLOCK_SIZE) {
+               if (ret < (int)next_block_ctx->len) {
                        printk(KERN_INFO
                               "btrfsic: read block @logical %llu failed!\n",
                               (unsigned long long)next_bytenr);
@@ -1339,43 +1419,74 @@ static int btrfsic_handle_extent_data(
                u32 item_offset, int force_iodone_flag)
 {
        int ret;
-       struct btrfs_file_extent_item *file_extent_item =
-           (struct btrfs_file_extent_item *)(block_ctx->data +
-                                             offsetof(struct btrfs_leaf,
-                                                      items) + item_offset);
-       u64 next_bytenr =
-           le64_to_cpu(file_extent_item->disk_bytenr) +
-           le64_to_cpu(file_extent_item->offset);
-       u64 num_bytes = le64_to_cpu(file_extent_item->num_bytes);
-       u64 generation = le64_to_cpu(file_extent_item->generation);
+       struct btrfs_file_extent_item file_extent_item;
+       u64 file_extent_item_offset;
+       u64 next_bytenr;
+       u64 num_bytes;
+       u64 generation;
        struct btrfsic_block_link *l;
 
+       file_extent_item_offset = offsetof(struct btrfs_leaf, items) +
+                                 item_offset;
+       if (file_extent_item_offset +
+           offsetof(struct btrfs_file_extent_item, disk_num_bytes) >
+           block_ctx->len) {
+               printk(KERN_INFO
+                      "btrfsic: file item out of bounce at logical %llu, dev %s\n",
+                      block_ctx->start, block_ctx->dev->name);
+               return -1;
+       }
+
+       btrfsic_read_from_block_data(block_ctx, &file_extent_item,
+               file_extent_item_offset,
+               offsetof(struct btrfs_file_extent_item, disk_num_bytes));
+       if (BTRFS_FILE_EXTENT_REG != file_extent_item.type ||
+           ((u64)0) == le64_to_cpu(file_extent_item.disk_bytenr)) {
+               if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
+                       printk(KERN_INFO "extent_data: type %u, disk_bytenr = %llu\n",
+                              file_extent_item.type,
+                              (unsigned long long)
+                              le64_to_cpu(file_extent_item.disk_bytenr));
+               return 0;
+       }
+
+       if (file_extent_item_offset + sizeof(struct btrfs_file_extent_item) >
+           block_ctx->len) {
+               printk(KERN_INFO
+                      "btrfsic: file item out of bounce at logical %llu, dev %s\n",
+                      block_ctx->start, block_ctx->dev->name);
+               return -1;
+       }
+       btrfsic_read_from_block_data(block_ctx, &file_extent_item,
+                                    file_extent_item_offset,
+                                    sizeof(struct btrfs_file_extent_item));
+       next_bytenr = le64_to_cpu(file_extent_item.disk_bytenr) +
+                     le64_to_cpu(file_extent_item.offset);
+       generation = le64_to_cpu(file_extent_item.generation);
+       num_bytes = le64_to_cpu(file_extent_item.num_bytes);
+       generation = le64_to_cpu(file_extent_item.generation);
+
        if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
                printk(KERN_INFO "extent_data: type %u, disk_bytenr = %llu,"
                       " offset = %llu, num_bytes = %llu\n",
-                      file_extent_item->type,
+                      file_extent_item.type,
                       (unsigned long long)
-                      le64_to_cpu(file_extent_item->disk_bytenr),
-                      (unsigned long long)
-                      le64_to_cpu(file_extent_item->offset),
-                      (unsigned long long)
-                      le64_to_cpu(file_extent_item->num_bytes));
-       if (BTRFS_FILE_EXTENT_REG != file_extent_item->type ||
-           ((u64)0) == le64_to_cpu(file_extent_item->disk_bytenr))
-               return 0;
+                      le64_to_cpu(file_extent_item.disk_bytenr),
+                      (unsigned long long)le64_to_cpu(file_extent_item.offset),
+                      (unsigned long long)num_bytes);
        while (num_bytes > 0) {
                u32 chunk_len;
                int num_copies;
                int mirror_num;
 
-               if (num_bytes > BTRFSIC_BLOCK_SIZE)
-                       chunk_len = BTRFSIC_BLOCK_SIZE;
+               if (num_bytes > state->datablock_size)
+                       chunk_len = state->datablock_size;
                else
                        chunk_len = num_bytes;
 
                num_copies =
                    btrfs_num_copies(&state->root->fs_info->mapping_tree,
-                                    next_bytenr, PAGE_SIZE);
+                                    next_bytenr, state->datablock_size);
                if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
                        printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
                               (unsigned long long)next_bytenr, num_copies);
@@ -1475,8 +1586,9 @@ static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
        block_ctx_out->dev_bytenr = multi->stripes[0].physical;
        block_ctx_out->start = bytenr;
        block_ctx_out->len = len;
-       block_ctx_out->data = NULL;
-       block_ctx_out->bh = NULL;
+       block_ctx_out->datav = NULL;
+       block_ctx_out->pagev = NULL;
+       block_ctx_out->mem_to_free = NULL;
 
        if (0 == ret)
                kfree(multi);
@@ -1496,8 +1608,9 @@ static int btrfsic_map_superblock(struct btrfsic_state *state, u64 bytenr,
        block_ctx_out->dev_bytenr = bytenr;
        block_ctx_out->start = bytenr;
        block_ctx_out->len = len;
-       block_ctx_out->data = NULL;
-       block_ctx_out->bh = NULL;
+       block_ctx_out->datav = NULL;
+       block_ctx_out->pagev = NULL;
+       block_ctx_out->mem_to_free = NULL;
        if (NULL != block_ctx_out->dev) {
                return 0;
        } else {
@@ -1508,38 +1621,127 @@ static int btrfsic_map_superblock(struct btrfsic_state *state, u64 bytenr,
 
 static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx)
 {
-       if (NULL != block_ctx->bh) {
-               brelse(block_ctx->bh);
-               block_ctx->bh = NULL;
+       if (block_ctx->mem_to_free) {
+               unsigned int num_pages;
+
+               BUG_ON(!block_ctx->datav);
+               BUG_ON(!block_ctx->pagev);
+               num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >>
+                           PAGE_CACHE_SHIFT;
+               while (num_pages > 0) {
+                       num_pages--;
+                       if (block_ctx->datav[num_pages]) {
+                               kunmap(block_ctx->pagev[num_pages]);
+                               block_ctx->datav[num_pages] = NULL;
+                       }
+                       if (block_ctx->pagev[num_pages]) {
+                               __free_page(block_ctx->pagev[num_pages]);
+                               block_ctx->pagev[num_pages] = NULL;
+                       }
+               }
+
+               kfree(block_ctx->mem_to_free);
+               block_ctx->mem_to_free = NULL;
+               block_ctx->pagev = NULL;
+               block_ctx->datav = NULL;
        }
 }
 
 static int btrfsic_read_block(struct btrfsic_state *state,
                              struct btrfsic_block_data_ctx *block_ctx)
 {
-       block_ctx->bh = NULL;
-       if (block_ctx->dev_bytenr & 4095) {
+       unsigned int num_pages;
+       unsigned int i;
+       u64 dev_bytenr;
+       int ret;
+
+       BUG_ON(block_ctx->datav);
+       BUG_ON(block_ctx->pagev);
+       BUG_ON(block_ctx->mem_to_free);
+       if (block_ctx->dev_bytenr & ((u64)PAGE_CACHE_SIZE - 1)) {
                printk(KERN_INFO
                       "btrfsic: read_block() with unaligned bytenr %llu\n",
                       (unsigned long long)block_ctx->dev_bytenr);
                return -1;
        }
-       if (block_ctx->len > 4096) {
-               printk(KERN_INFO
-                      "btrfsic: read_block() with too huge size %d\n",
-                      block_ctx->len);
+
+       num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >>
+                   PAGE_CACHE_SHIFT;
+       block_ctx->mem_to_free = kzalloc((sizeof(*block_ctx->datav) +
+                                         sizeof(*block_ctx->pagev)) *
+                                        num_pages, GFP_NOFS);
+       if (!block_ctx->mem_to_free)
                return -1;
+       block_ctx->datav = block_ctx->mem_to_free;
+       block_ctx->pagev = (struct page **)(block_ctx->datav + num_pages);
+       for (i = 0; i < num_pages; i++) {
+               block_ctx->pagev[i] = alloc_page(GFP_NOFS);
+               if (!block_ctx->pagev[i])
+                       return -1;
        }
 
-       block_ctx->bh = __bread(block_ctx->dev->bdev,
-                               block_ctx->dev_bytenr >> 12, 4096);
-       if (NULL == block_ctx->bh)
-               return -1;
-       block_ctx->data = block_ctx->bh->b_data;
+       dev_bytenr = block_ctx->dev_bytenr;
+       for (i = 0; i < num_pages;) {
+               struct bio *bio;
+               unsigned int j;
+               DECLARE_COMPLETION_ONSTACK(complete);
+
+               bio = bio_alloc(GFP_NOFS, num_pages - i);
+               if (!bio) {
+                       printk(KERN_INFO
+                              "btrfsic: bio_alloc() for %u pages failed!\n",
+                              num_pages - i);
+                       return -1;
+               }
+               bio->bi_bdev = block_ctx->dev->bdev;
+               bio->bi_sector = dev_bytenr >> 9;
+               bio->bi_end_io = btrfsic_complete_bio_end_io;
+               bio->bi_private = &complete;
+
+               for (j = i; j < num_pages; j++) {
+                       ret = bio_add_page(bio, block_ctx->pagev[j],
+                                          PAGE_CACHE_SIZE, 0);
+                       if (PAGE_CACHE_SIZE != ret)
+                               break;
+               }
+               if (j == i) {
+                       printk(KERN_INFO
+                              "btrfsic: error, failed to add a single page!\n");
+                       return -1;
+               }
+               submit_bio(READ, bio);
+
+               /* this will also unplug the queue */
+               wait_for_completion(&complete);
+
+               if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
+                       printk(KERN_INFO
+                              "btrfsic: read error at logical %llu dev %s!\n",
+                              block_ctx->start, block_ctx->dev->name);
+                       bio_put(bio);
+                       return -1;
+               }
+               bio_put(bio);
+               dev_bytenr += (j - i) * PAGE_CACHE_SIZE;
+               i = j;
+       }
+       for (i = 0; i < num_pages; i++) {
+               block_ctx->datav[i] = kmap(block_ctx->pagev[i]);
+               if (!block_ctx->datav[i]) {
+                       printk(KERN_INFO "btrfsic: kmap() failed (dev %s)!\n",
+                              block_ctx->dev->name);
+                       return -1;
+               }
+       }
 
        return block_ctx->len;
 }
 
+static void btrfsic_complete_bio_end_io(struct bio *bio, int err)
+{
+       complete((struct completion *)bio->bi_private);
+}
+
 static void btrfsic_dump_database(struct btrfsic_state *state)
 {
        struct list_head *elem_all;
@@ -1617,32 +1819,39 @@ static void btrfsic_dump_database(struct btrfsic_state *state)
  * (note that this test fails for the super block)
  */
 static int btrfsic_test_for_metadata(struct btrfsic_state *state,
-                                    const u8 *data, unsigned int size)
+                                    char **datav, unsigned int num_pages)
 {
        struct btrfs_header *h;
        u8 csum[BTRFS_CSUM_SIZE];
        u32 crc = ~(u32)0;
-       int fail = 0;
-       int crc_fail = 0;
+       unsigned int i;
 
-       h = (struct btrfs_header *)data;
+       if (num_pages * PAGE_CACHE_SIZE < state->metablock_size)
+               return 1; /* not metadata */
+       num_pages = state->metablock_size >> PAGE_CACHE_SHIFT;
+       h = (struct btrfs_header *)datav[0];
 
        if (memcmp(h->fsid, state->root->fs_info->fsid, BTRFS_UUID_SIZE))
-               fail++;
+               return 1;
+
+       for (i = 0; i < num_pages; i++) {
+               u8 *data = i ? datav[i] : (datav[i] + BTRFS_CSUM_SIZE);
+               size_t sublen = i ? PAGE_CACHE_SIZE :
+                                   (PAGE_CACHE_SIZE - BTRFS_CSUM_SIZE);
 
-       crc = crc32c(crc, data + BTRFS_CSUM_SIZE, PAGE_SIZE - BTRFS_CSUM_SIZE);
+               crc = crc32c(crc, data, sublen);
+       }
        btrfs_csum_final(crc, csum);
        if (memcmp(csum, h->csum, state->csum_size))
-               crc_fail++;
+               return 1;
 
-       return fail || crc_fail;
+       return 0; /* is metadata */
 }
 
 static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
-                                         u64 dev_bytenr,
-                                         u8 *mapped_data, unsigned int len,
-                                         struct bio *bio,
-                                         int *bio_is_patched,
+                                         u64 dev_bytenr, char **mapped_datav,
+                                         unsigned int num_pages,
+                                         struct bio *bio, int *bio_is_patched,
                                          struct buffer_head *bh,
                                          int submit_bio_bh_rw)
 {
@@ -1652,12 +1861,19 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
        int ret;
        struct btrfsic_state *state = dev_state->state;
        struct block_device *bdev = dev_state->bdev;
+       unsigned int processed_len;
 
-       WARN_ON(len > PAGE_SIZE);
-       is_metadata = (0 == btrfsic_test_for_metadata(state, mapped_data, len));
        if (NULL != bio_is_patched)
                *bio_is_patched = 0;
 
+again:
+       if (num_pages == 0)
+               return;
+
+       processed_len = 0;
+       is_metadata = (0 == btrfsic_test_for_metadata(state, mapped_datav,
+                                                     num_pages));
+
        block = btrfsic_block_hashtable_lookup(bdev, dev_bytenr,
                                               &state->block_hashtable);
        if (NULL != block) {
@@ -1667,8 +1883,16 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
 
                if (block->is_superblock) {
                        bytenr = le64_to_cpu(((struct btrfs_super_block *)
-                                             mapped_data)->bytenr);
+                                             mapped_datav[0])->bytenr);
+                       if (num_pages * PAGE_CACHE_SIZE <
+                           BTRFS_SUPER_INFO_SIZE) {
+                               printk(KERN_INFO
+                                      "btrfsic: cannot work with too short bios!\n");
+                               return;
+                       }
                        is_metadata = 1;
+                       BUG_ON(BTRFS_SUPER_INFO_SIZE & (PAGE_CACHE_SIZE - 1));
+                       processed_len = BTRFS_SUPER_INFO_SIZE;
                        if (state->print_mask &
                            BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) {
                                printk(KERN_INFO
@@ -1678,12 +1902,18 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
                }
                if (is_metadata) {
                        if (!block->is_superblock) {
+                               if (num_pages * PAGE_CACHE_SIZE <
+                                   state->metablock_size) {
+                                       printk(KERN_INFO
+                                              "btrfsic: cannot work with too short bios!\n");
+                                       return;
+                               }
+                               processed_len = state->metablock_size;
                                bytenr = le64_to_cpu(((struct btrfs_header *)
-                                                     mapped_data)->bytenr);
+                                                     mapped_datav[0])->bytenr);
                                btrfsic_cmp_log_and_dev_bytenr(state, bytenr,
                                                               dev_state,
-                                                              dev_bytenr,
-                                                              mapped_data);
+                                                              dev_bytenr);
                        }
                        if (block->logical_bytenr != bytenr) {
                                printk(KERN_INFO
@@ -1710,6 +1940,13 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
                                       block->mirror_num,
                                       btrfsic_get_block_type(state, block));
                } else {
+                       if (num_pages * PAGE_CACHE_SIZE <
+                           state->datablock_size) {
+                               printk(KERN_INFO
+                                      "btrfsic: cannot work with too short bios!\n");
+                               return;
+                       }
+                       processed_len = state->datablock_size;
                        bytenr = block->logical_bytenr;
                        if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
                                printk(KERN_INFO
@@ -1747,7 +1984,7 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
                               le64_to_cpu(block->disk_key.offset),
                               (unsigned long long)
                               le64_to_cpu(((struct btrfs_header *)
-                                           mapped_data)->generation),
+                                           mapped_datav[0])->generation),
                               (unsigned long long)
                               state->max_superblock_generation);
                        btrfsic_dump_tree(state);
@@ -1765,10 +2002,10 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
                               (unsigned long long)block->generation,
                               (unsigned long long)
                               le64_to_cpu(((struct btrfs_header *)
-                                           mapped_data)->generation));
+                                           mapped_datav[0])->generation));
                        /* it would not be safe to go on */
                        btrfsic_dump_tree(state);
-                       return;
+                       goto continue_loop;
                }
 
                /*
@@ -1796,18 +2033,19 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
                }
 
                if (block->is_superblock)
-                       ret = btrfsic_map_superblock(state, bytenr, len,
+                       ret = btrfsic_map_superblock(state, bytenr,
+                                                    processed_len,
                                                     bdev, &block_ctx);
                else
-                       ret = btrfsic_map_block(state, bytenr, len,
+                       ret = btrfsic_map_block(state, bytenr, processed_len,
                                                &block_ctx, 0);
                if (ret) {
                        printk(KERN_INFO
                               "btrfsic: btrfsic_map_block(root @%llu)"
                               " failed!\n", (unsigned long long)bytenr);
-                       return;
+                       goto continue_loop;
                }
-               block_ctx.data = mapped_data;
+               block_ctx.datav = mapped_datav;
                /* the following is required in case of writes to mirrors,
                 * use the same that was used for the lookup */
                block_ctx.dev = dev_state;
@@ -1863,11 +2101,13 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
                        block->logical_bytenr = bytenr;
                        block->is_metadata = 1;
                        if (block->is_superblock) {
+                               BUG_ON(PAGE_CACHE_SIZE !=
+                                      BTRFS_SUPER_INFO_SIZE);
                                ret = btrfsic_process_written_superblock(
                                                state,
                                                block,
                                                (struct btrfs_super_block *)
-                                               mapped_data);
+                                               mapped_datav[0]);
                                if (state->print_mask &
                                    BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE) {
                                        printk(KERN_INFO
@@ -1880,8 +2120,6 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
                                                state,
                                                block,
                                                &block_ctx,
-                                               (struct btrfs_header *)
-                                               block_ctx.data,
                                                0, 0);
                        }
                        if (ret)
@@ -1912,26 +2150,30 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
                u64 bytenr;
 
                if (!is_metadata) {
+                       processed_len = state->datablock_size;
                        if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
                                printk(KERN_INFO "Written block (%s/%llu/?)"
                                       " !found in hash table, D.\n",
                                       dev_state->name,
                                       (unsigned long long)dev_bytenr);
-                       if (!state->include_extent_data)
-                               return; /* ignore that written D block */
+                       if (!state->include_extent_data) {
+                               /* ignore that written D block */
+                               goto continue_loop;
+                       }
 
                        /* this is getting ugly for the
                         * include_extent_data case... */
                        bytenr = 0;     /* unknown */
                        block_ctx.start = bytenr;
-                       block_ctx.len = len;
-                       block_ctx.bh = NULL;
+                       block_ctx.len = processed_len;
+                       block_ctx.mem_to_free = NULL;
+                       block_ctx.pagev = NULL;
                } else {
+                       processed_len = state->metablock_size;
                        bytenr = le64_to_cpu(((struct btrfs_header *)
-                                             mapped_data)->bytenr);
+                                             mapped_datav[0])->bytenr);
                        btrfsic_cmp_log_and_dev_bytenr(state, bytenr, dev_state,
-                                                      dev_bytenr,
-                                                      mapped_data);
+                                                      dev_bytenr);
                        if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
                                printk(KERN_INFO
                                       "Written block @%llu (%s/%llu/?)"
@@ -1940,17 +2182,17 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
                                       dev_state->name,
                                       (unsigned long long)dev_bytenr);
 
-                       ret = btrfsic_map_block(state, bytenr, len, &block_ctx,
-                                               0);
+                       ret = btrfsic_map_block(state, bytenr, processed_len,
+                                               &block_ctx, 0);
                        if (ret) {
                                printk(KERN_INFO
                                       "btrfsic: btrfsic_map_block(root @%llu)"
                                       " failed!\n",
                                       (unsigned long long)dev_bytenr);
-                               return;
+                               goto continue_loop;
                        }
                }
-               block_ctx.data = mapped_data;
+               block_ctx.datav = mapped_datav;
                /* the following is required in case of writes to mirrors,
                 * use the same that was used for the lookup */
                block_ctx.dev = dev_state;
@@ -1960,7 +2202,7 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
                if (NULL == block) {
                        printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
                        btrfsic_release_block_ctx(&block_ctx);
-                       return;
+                       goto continue_loop;
                }
                block->dev_state = dev_state;
                block->dev_bytenr = dev_bytenr;
@@ -2020,9 +2262,7 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
 
                if (is_metadata) {
                        ret = btrfsic_process_metablock(state, block,
-                                                       &block_ctx,
-                                                       (struct btrfs_header *)
-                                                       block_ctx.data, 0, 0);
+                                                       &block_ctx, 0, 0);
                        if (ret)
                                printk(KERN_INFO
                                       "btrfsic: process_metablock(root @%llu)"
@@ -2031,6 +2271,13 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
                }
                btrfsic_release_block_ctx(&block_ctx);
        }
+
+continue_loop:
+       BUG_ON(!processed_len);
+       dev_bytenr += processed_len;
+       mapped_datav += processed_len >> PAGE_CACHE_SHIFT;
+       num_pages -= processed_len >> PAGE_CACHE_SHIFT;
+       goto again;
 }
 
 static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status)
@@ -2213,7 +2460,7 @@ static int btrfsic_process_written_superblock(
 
                num_copies =
                    btrfs_num_copies(&state->root->fs_info->mapping_tree,
-                                    next_bytenr, PAGE_SIZE);
+                                    next_bytenr, BTRFS_SUPER_INFO_SIZE);
                if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
                        printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
                               (unsigned long long)next_bytenr, num_copies);
@@ -2224,7 +2471,8 @@ static int btrfsic_process_written_superblock(
                                printk(KERN_INFO
                                       "btrfsic_process_written_superblock("
                                       "mirror_num=%d)\n", mirror_num);
-                       ret = btrfsic_map_block(state, next_bytenr, PAGE_SIZE,
+                       ret = btrfsic_map_block(state, next_bytenr,
+                                               BTRFS_SUPER_INFO_SIZE,
                                                &tmp_next_block_ctx,
                                                mirror_num);
                        if (ret) {
@@ -2689,7 +2937,7 @@ static struct btrfsic_block *btrfsic_block_lookup_or_add(
 static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
                                           u64 bytenr,
                                           struct btrfsic_dev_state *dev_state,
-                                          u64 dev_bytenr, char *data)
+                                          u64 dev_bytenr)
 {
        int num_copies;
        int mirror_num;
@@ -2698,10 +2946,10 @@ static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
        int match = 0;
 
        num_copies = btrfs_num_copies(&state->root->fs_info->mapping_tree,
-                                     bytenr, PAGE_SIZE);
+                                     bytenr, state->metablock_size);
 
        for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
-               ret = btrfsic_map_block(state, bytenr, PAGE_SIZE,
+               ret = btrfsic_map_block(state, bytenr, state->metablock_size,
                                        &block_ctx, mirror_num);
                if (ret) {
                        printk(KERN_INFO "btrfsic:"
@@ -2727,7 +2975,8 @@ static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
                       (unsigned long long)bytenr, dev_state->name,
                       (unsigned long long)dev_bytenr);
                for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
-                       ret = btrfsic_map_block(state, bytenr, PAGE_SIZE,
+                       ret = btrfsic_map_block(state, bytenr,
+                                               state->metablock_size,
                                                &block_ctx, mirror_num);
                        if (ret)
                                continue;
@@ -2781,13 +3030,13 @@ int btrfsic_submit_bh(int rw, struct buffer_head *bh)
                               (unsigned long)bh->b_size, bh->b_data,
                               bh->b_bdev);
                btrfsic_process_written_block(dev_state, dev_bytenr,
-                                             bh->b_data, bh->b_size, NULL,
+                                             &bh->b_data, 1, NULL,
                                              NULL, bh, rw);
        } else if (NULL != dev_state && (rw & REQ_FLUSH)) {
                if (dev_state->state->print_mask &
                    BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
                        printk(KERN_INFO
-                              "submit_bh(rw=0x%x) FLUSH, bdev=%p)\n",
+                              "submit_bh(rw=0x%x FLUSH, bdev=%p)\n",
                               rw, bh->b_bdev);
                if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
                        if ((dev_state->state->print_mask &
@@ -2836,6 +3085,7 @@ void btrfsic_submit_bio(int rw, struct bio *bio)
                unsigned int i;
                u64 dev_bytenr;
                int bio_is_patched;
+               char **mapped_datav;
 
                dev_bytenr = 512 * bio->bi_sector;
                bio_is_patched = 0;
@@ -2848,35 +3098,46 @@ void btrfsic_submit_bio(int rw, struct bio *bio)
                               (unsigned long long)dev_bytenr,
                               bio->bi_bdev);
 
+               mapped_datav = kmalloc(sizeof(*mapped_datav) * bio->bi_vcnt,
+                                      GFP_NOFS);
+               if (!mapped_datav)
+                       goto leave;
                for (i = 0; i < bio->bi_vcnt; i++) {
-                       u8 *mapped_data;
-
-                       mapped_data = kmap(bio->bi_io_vec[i].bv_page);
+                       BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_CACHE_SIZE);
+                       mapped_datav[i] = kmap(bio->bi_io_vec[i].bv_page);
+                       if (!mapped_datav[i]) {
+                               while (i > 0) {
+                                       i--;
+                                       kunmap(bio->bi_io_vec[i].bv_page);
+                               }
+                               kfree(mapped_datav);
+                               goto leave;
+                       }
                        if ((BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
                             BTRFSIC_PRINT_MASK_VERBOSE) ==
                            (dev_state->state->print_mask &
                             (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
                              BTRFSIC_PRINT_MASK_VERBOSE)))
                                printk(KERN_INFO
-                                      "#%u: page=%p, mapped=%p, len=%u,"
-                                      " offset=%u\n",
+                                      "#%u: page=%p, len=%u, offset=%u\n",
                                       i, bio->bi_io_vec[i].bv_page,
-                                      mapped_data,
                                       bio->bi_io_vec[i].bv_len,
                                       bio->bi_io_vec[i].bv_offset);
-                       btrfsic_process_written_block(dev_state, dev_bytenr,
-                                                     mapped_data,
-                                                     bio->bi_io_vec[i].bv_len,
-                                                     bio, &bio_is_patched,
-                                                     NULL, rw);
+               }
+               btrfsic_process_written_block(dev_state, dev_bytenr,
+                                             mapped_datav, bio->bi_vcnt,
+                                             bio, &bio_is_patched,
+                                             NULL, rw);
+               while (i > 0) {
+                       i--;
                        kunmap(bio->bi_io_vec[i].bv_page);
-                       dev_bytenr += bio->bi_io_vec[i].bv_len;
                }
+               kfree(mapped_datav);
        } else if (NULL != dev_state && (rw & REQ_FLUSH)) {
                if (dev_state->state->print_mask &
                    BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
                        printk(KERN_INFO
-                              "submit_bio(rw=0x%x) FLUSH, bdev=%p)\n",
+                              "submit_bio(rw=0x%x FLUSH, bdev=%p)\n",
                               rw, bio->bi_bdev);
                if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
                        if ((dev_state->state->print_mask &
@@ -2903,6 +3164,7 @@ void btrfsic_submit_bio(int rw, struct bio *bio)
                        bio->bi_end_io = btrfsic_bio_end_io;
                }
        }
+leave:
        mutex_unlock(&btrfsic_mutex);
 
        submit_bio(rw, bio);
@@ -2917,6 +3179,30 @@ int btrfsic_mount(struct btrfs_root *root,
        struct list_head *dev_head = &fs_devices->devices;
        struct btrfs_device *device;
 
+       if (root->nodesize != root->leafsize) {
+               printk(KERN_INFO
+                      "btrfsic: cannot handle nodesize %d != leafsize %d!\n",
+                      root->nodesize, root->leafsize);
+               return -1;
+       }
+       if (root->nodesize & ((u64)PAGE_CACHE_SIZE - 1)) {
+               printk(KERN_INFO
+                      "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
+                      root->nodesize, (unsigned long)PAGE_CACHE_SIZE);
+               return -1;
+       }
+       if (root->leafsize & ((u64)PAGE_CACHE_SIZE - 1)) {
+               printk(KERN_INFO
+                      "btrfsic: cannot handle leafsize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
+                      root->leafsize, (unsigned long)PAGE_CACHE_SIZE);
+               return -1;
+       }
+       if (root->sectorsize & ((u64)PAGE_CACHE_SIZE - 1)) {
+               printk(KERN_INFO
+                      "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
+                      root->sectorsize, (unsigned long)PAGE_CACHE_SIZE);
+               return -1;
+       }
        state = kzalloc(sizeof(*state), GFP_NOFS);
        if (NULL == state) {
                printk(KERN_INFO "btrfs check-integrity: kmalloc() failed!\n");
@@ -2933,6 +3219,8 @@ int btrfsic_mount(struct btrfs_root *root,
        state->print_mask = print_mask;
        state->include_extent_data = including_extent_data;
        state->csum_size = 0;
+       state->metablock_size = root->nodesize;
+       state->datablock_size = root->sectorsize;
        INIT_LIST_HEAD(&state->all_blocks_list);
        btrfsic_block_hashtable_init(&state->block_hashtable);
        btrfsic_block_link_hashtable_init(&state->block_link_hashtable);
@@ -3049,7 +3337,7 @@ void btrfsic_unmount(struct btrfs_root *root,
                                btrfsic_block_link_free(l);
                }
 
-               if (b_all->is_iodone)
+               if (b_all->is_iodone || b_all->never_written)
                        btrfsic_block_free(b_all);
                else
                        printk(KERN_INFO "btrfs: attempt to free %c-block"
index 4106264fbc655ac79b26efa1177384ea92b72988..d7a96cfdc50ae6a2d8afef1dad7ca3642248bbb8 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include <linux/rbtree.h>
 #include "ctree.h"
 #include "disk-io.h"
 #include "transaction.h"
@@ -37,7 +38,16 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
                              struct extent_buffer *dst_buf,
                              struct extent_buffer *src_buf);
 static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
-                  struct btrfs_path *path, int level, int slot);
+                   struct btrfs_path *path, int level, int slot,
+                   int tree_mod_log);
+static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
+                                struct extent_buffer *eb);
+struct extent_buffer *read_old_tree_block(struct btrfs_root *root, u64 bytenr,
+                                         u32 blocksize, u64 parent_transid,
+                                         u64 time_seq);
+struct extent_buffer *btrfs_find_old_tree_block(struct btrfs_root *root,
+                                               u64 bytenr, u32 blocksize,
+                                               u64 time_seq);
 
 struct btrfs_path *btrfs_alloc_path(void)
 {
@@ -255,7 +265,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
 
        cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
                                     new_root_objectid, &disk_key, level,
-                                    buf->start, 0, 1);
+                                    buf->start, 0);
        if (IS_ERR(cow))
                return PTR_ERR(cow);
 
@@ -288,6 +298,434 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
        return 0;
 }
 
+enum mod_log_op {
+       MOD_LOG_KEY_REPLACE,
+       MOD_LOG_KEY_ADD,
+       MOD_LOG_KEY_REMOVE,
+       MOD_LOG_KEY_REMOVE_WHILE_FREEING,
+       MOD_LOG_KEY_REMOVE_WHILE_MOVING,
+       MOD_LOG_MOVE_KEYS,
+       MOD_LOG_ROOT_REPLACE,
+};
+
+struct tree_mod_move {
+       int dst_slot;
+       int nr_items;
+};
+
+struct tree_mod_root {
+       u64 logical;
+       u8 level;
+};
+
+struct tree_mod_elem {
+       struct rb_node node;
+       u64 index;              /* shifted logical */
+       struct seq_list elem;
+       enum mod_log_op op;
+
+       /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
+       int slot;
+
+       /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
+       u64 generation;
+
+       /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
+       struct btrfs_disk_key key;
+       u64 blockptr;
+
+       /* this is used for op == MOD_LOG_MOVE_KEYS */
+       struct tree_mod_move move;
+
+       /* this is used for op == MOD_LOG_ROOT_REPLACE */
+       struct tree_mod_root old_root;
+};
+
+static inline void
+__get_tree_mod_seq(struct btrfs_fs_info *fs_info, struct seq_list *elem)
+{
+       elem->seq = atomic_inc_return(&fs_info->tree_mod_seq);
+       list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
+}
+
+void btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
+                           struct seq_list *elem)
+{
+       elem->flags = 1;
+       spin_lock(&fs_info->tree_mod_seq_lock);
+       __get_tree_mod_seq(fs_info, elem);
+       spin_unlock(&fs_info->tree_mod_seq_lock);
+}
+
+void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
+                           struct seq_list *elem)
+{
+       struct rb_root *tm_root;
+       struct rb_node *node;
+       struct rb_node *next;
+       struct seq_list *cur_elem;
+       struct tree_mod_elem *tm;
+       u64 min_seq = (u64)-1;
+       u64 seq_putting = elem->seq;
+
+       if (!seq_putting)
+               return;
+
+       BUG_ON(!(elem->flags & 1));
+       spin_lock(&fs_info->tree_mod_seq_lock);
+       list_del(&elem->list);
+
+       list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
+               if ((cur_elem->flags & 1) && cur_elem->seq < min_seq) {
+                       if (seq_putting > cur_elem->seq) {
+                               /*
+                                * blocker with lower sequence number exists, we
+                                * cannot remove anything from the log
+                                */
+                               goto out;
+                       }
+                       min_seq = cur_elem->seq;
+               }
+       }
+
+       /*
+        * anything that's lower than the lowest existing (read: blocked)
+        * sequence number can be removed from the tree.
+        */
+       write_lock(&fs_info->tree_mod_log_lock);
+       tm_root = &fs_info->tree_mod_log;
+       for (node = rb_first(tm_root); node; node = next) {
+               next = rb_next(node);
+               tm = container_of(node, struct tree_mod_elem, node);
+               if (tm->elem.seq > min_seq)
+                       continue;
+               rb_erase(node, tm_root);
+               list_del(&tm->elem.list);
+               kfree(tm);
+       }
+       write_unlock(&fs_info->tree_mod_log_lock);
+out:
+       spin_unlock(&fs_info->tree_mod_seq_lock);
+}
+
+/*
+ * key order of the log:
+ *       index -> sequence
+ *
+ * the index is the shifted logical of the *new* root node for root replace
+ * operations, or the shifted logical of the affected block for all other
+ * operations.
+ */
+static noinline int
+__tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
+{
+       struct rb_root *tm_root;
+       struct rb_node **new;
+       struct rb_node *parent = NULL;
+       struct tree_mod_elem *cur;
+       int ret = 0;
+
+       BUG_ON(!tm || !tm->elem.seq);
+
+       write_lock(&fs_info->tree_mod_log_lock);
+       tm_root = &fs_info->tree_mod_log;
+       new = &tm_root->rb_node;
+       while (*new) {
+               cur = container_of(*new, struct tree_mod_elem, node);
+               parent = *new;
+               if (cur->index < tm->index)
+                       new = &((*new)->rb_left);
+               else if (cur->index > tm->index)
+                       new = &((*new)->rb_right);
+               else if (cur->elem.seq < tm->elem.seq)
+                       new = &((*new)->rb_left);
+               else if (cur->elem.seq > tm->elem.seq)
+                       new = &((*new)->rb_right);
+               else {
+                       kfree(tm);
+                       ret = -EEXIST;
+                       goto unlock;
+               }
+       }
+
+       rb_link_node(&tm->node, parent, new);
+       rb_insert_color(&tm->node, tm_root);
+unlock:
+       write_unlock(&fs_info->tree_mod_log_lock);
+       return ret;
+}
+
+static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
+                                   struct extent_buffer *eb) {
+       smp_mb();
+       if (list_empty(&(fs_info)->tree_mod_seq_list))
+               return 1;
+       if (!eb)
+               return 0;
+       if (btrfs_header_level(eb) == 0)
+               return 1;
+       return 0;
+}
+
+static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags,
+                                struct tree_mod_elem **tm_ret)
+{
+       struct tree_mod_elem *tm;
+       int seq;
+
+       if (tree_mod_dont_log(fs_info, NULL))
+               return 0;
+
+       tm = *tm_ret = kzalloc(sizeof(*tm), flags);
+       if (!tm)
+               return -ENOMEM;
+
+       tm->elem.flags = 0;
+       spin_lock(&fs_info->tree_mod_seq_lock);
+       if (list_empty(&fs_info->tree_mod_seq_list)) {
+               /*
+                * someone emptied the list while we were waiting for the lock.
+                * we must not add to the list, because no blocker exists. items
+                * are removed from the list only when the existing blocker is
+                * removed from the list.
+                */
+               kfree(tm);
+               seq = 0;
+       } else {
+               __get_tree_mod_seq(fs_info, &tm->elem);
+               seq = tm->elem.seq;
+       }
+       spin_unlock(&fs_info->tree_mod_seq_lock);
+
+       return seq;
+}
+
+static noinline int
+tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info,
+                            struct extent_buffer *eb, int slot,
+                            enum mod_log_op op, gfp_t flags)
+{
+       struct tree_mod_elem *tm;
+       int ret;
+
+       ret = tree_mod_alloc(fs_info, flags, &tm);
+       if (ret <= 0)
+               return ret;
+
+       tm->index = eb->start >> PAGE_CACHE_SHIFT;
+       if (op != MOD_LOG_KEY_ADD) {
+               btrfs_node_key(eb, &tm->key, slot);
+               tm->blockptr = btrfs_node_blockptr(eb, slot);
+       }
+       tm->op = op;
+       tm->slot = slot;
+       tm->generation = btrfs_node_ptr_generation(eb, slot);
+
+       return __tree_mod_log_insert(fs_info, tm);
+}
+
+static noinline int
+tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
+                       int slot, enum mod_log_op op)
+{
+       return tree_mod_log_insert_key_mask(fs_info, eb, slot, op, GFP_NOFS);
+}
+
+static noinline int
+tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
+                        struct extent_buffer *eb, int dst_slot, int src_slot,
+                        int nr_items, gfp_t flags)
+{
+       struct tree_mod_elem *tm;
+       int ret;
+       int i;
+
+       if (tree_mod_dont_log(fs_info, eb))
+               return 0;
+
+       for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
+               ret = tree_mod_log_insert_key(fs_info, eb, i + dst_slot,
+                                             MOD_LOG_KEY_REMOVE_WHILE_MOVING);
+               BUG_ON(ret < 0);
+       }
+
+       ret = tree_mod_alloc(fs_info, flags, &tm);
+       if (ret <= 0)
+               return ret;
+
+       tm->index = eb->start >> PAGE_CACHE_SHIFT;
+       tm->slot = src_slot;
+       tm->move.dst_slot = dst_slot;
+       tm->move.nr_items = nr_items;
+       tm->op = MOD_LOG_MOVE_KEYS;
+
+       return __tree_mod_log_insert(fs_info, tm);
+}
+
+static noinline int
+tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
+                        struct extent_buffer *old_root,
+                        struct extent_buffer *new_root, gfp_t flags)
+{
+       struct tree_mod_elem *tm;
+       int ret;
+
+       ret = tree_mod_alloc(fs_info, flags, &tm);
+       if (ret <= 0)
+               return ret;
+
+       tm->index = new_root->start >> PAGE_CACHE_SHIFT;
+       tm->old_root.logical = old_root->start;
+       tm->old_root.level = btrfs_header_level(old_root);
+       tm->generation = btrfs_header_generation(old_root);
+       tm->op = MOD_LOG_ROOT_REPLACE;
+
+       return __tree_mod_log_insert(fs_info, tm);
+}
+
+static struct tree_mod_elem *
+__tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
+                     int smallest)
+{
+       struct rb_root *tm_root;
+       struct rb_node *node;
+       struct tree_mod_elem *cur = NULL;
+       struct tree_mod_elem *found = NULL;
+       u64 index = start >> PAGE_CACHE_SHIFT;
+
+       read_lock(&fs_info->tree_mod_log_lock);
+       tm_root = &fs_info->tree_mod_log;
+       node = tm_root->rb_node;
+       while (node) {
+               cur = container_of(node, struct tree_mod_elem, node);
+               if (cur->index < index) {
+                       node = node->rb_left;
+               } else if (cur->index > index) {
+                       node = node->rb_right;
+               } else if (cur->elem.seq < min_seq) {
+                       node = node->rb_left;
+               } else if (!smallest) {
+                       /* we want the node with the highest seq */
+                       if (found)
+                               BUG_ON(found->elem.seq > cur->elem.seq);
+                       found = cur;
+                       node = node->rb_left;
+               } else if (cur->elem.seq > min_seq) {
+                       /* we want the node with the smallest seq */
+                       if (found)
+                               BUG_ON(found->elem.seq < cur->elem.seq);
+                       found = cur;
+                       node = node->rb_right;
+               } else {
+                       found = cur;
+                       break;
+               }
+       }
+       read_unlock(&fs_info->tree_mod_log_lock);
+
+       return found;
+}
+
+/*
+ * this returns the element from the log with the smallest time sequence
+ * value that's in the log (the oldest log item). any element with a time
+ * sequence lower than min_seq will be ignored.
+ */
+static struct tree_mod_elem *
+tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
+                          u64 min_seq)
+{
+       return __tree_mod_log_search(fs_info, start, min_seq, 1);
+}
+
+/*
+ * this returns the element from the log with the largest time sequence
+ * value that's in the log (the most recent log item). any element with
+ * a time sequence lower than min_seq will be ignored.
+ */
+static struct tree_mod_elem *
+tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
+{
+       return __tree_mod_log_search(fs_info, start, min_seq, 0);
+}
+
+static inline void
+tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
+                    struct extent_buffer *src, unsigned long dst_offset,
+                    unsigned long src_offset, int nr_items)
+{
+       int ret;
+       int i;
+
+       if (tree_mod_dont_log(fs_info, NULL))
+               return;
+
+       if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
+               return;
+
+       /* speed this up by single seq for all operations? */
+       for (i = 0; i < nr_items; i++) {
+               ret = tree_mod_log_insert_key(fs_info, src, i + src_offset,
+                                             MOD_LOG_KEY_REMOVE);
+               BUG_ON(ret < 0);
+               ret = tree_mod_log_insert_key(fs_info, dst, i + dst_offset,
+                                             MOD_LOG_KEY_ADD);
+               BUG_ON(ret < 0);
+       }
+}
+
+static inline void
+tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
+                    int dst_offset, int src_offset, int nr_items)
+{
+       int ret;
+       ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
+                                      nr_items, GFP_NOFS);
+       BUG_ON(ret < 0);
+}
+
+static inline void
+tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
+                         struct extent_buffer *eb,
+                         struct btrfs_disk_key *disk_key, int slot, int atomic)
+{
+       int ret;
+
+       ret = tree_mod_log_insert_key_mask(fs_info, eb, slot,
+                                          MOD_LOG_KEY_REPLACE,
+                                          atomic ? GFP_ATOMIC : GFP_NOFS);
+       BUG_ON(ret < 0);
+}
+
+static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
+                                struct extent_buffer *eb)
+{
+       int i;
+       int ret;
+       u32 nritems;
+
+       if (tree_mod_dont_log(fs_info, eb))
+               return;
+
+       nritems = btrfs_header_nritems(eb);
+       for (i = nritems - 1; i >= 0; i--) {
+               ret = tree_mod_log_insert_key(fs_info, eb, i,
+                                             MOD_LOG_KEY_REMOVE_WHILE_FREEING);
+               BUG_ON(ret < 0);
+       }
+}
+
+static inline void
+tree_mod_log_set_root_pointer(struct btrfs_root *root,
+                             struct extent_buffer *new_root_node)
+{
+       int ret;
+       tree_mod_log_free_eb(root->fs_info, root->node);
+       ret = tree_mod_log_insert_root(root->fs_info, root->node,
+                                      new_root_node, GFP_NOFS);
+       BUG_ON(ret < 0);
+}
+
 /*
  * check if the tree block can be shared by multiple trees
  */
@@ -409,6 +847,12 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
                        ret = btrfs_dec_ref(trans, root, buf, 1, 1);
                        BUG_ON(ret); /* -ENOMEM */
                }
+               /*
+                * don't log freeing in case we're freeing the root node, this
+                * is done by tree_mod_log_set_root_pointer later
+                */
+               if (buf != root->node && btrfs_header_level(buf) != 0)
+                       tree_mod_log_free_eb(root->fs_info, buf);
                clean_tree_block(trans, root, buf);
                *last_ref = 1;
        }
@@ -467,7 +911,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
 
        cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
                                     root->root_key.objectid, &disk_key,
-                                    level, search_start, empty_size, 1);
+                                    level, search_start, empty_size);
        if (IS_ERR(cow))
                return PTR_ERR(cow);
 
@@ -506,10 +950,11 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
                        parent_start = 0;
 
                extent_buffer_get(cow);
+               tree_mod_log_set_root_pointer(root, cow);
                rcu_assign_pointer(root->node, cow);
 
                btrfs_free_tree_block(trans, root, buf, parent_start,
-                                     last_ref, 1);
+                                     last_ref);
                free_extent_buffer(buf);
                add_root_to_dirty_list(root);
        } else {
@@ -519,13 +964,15 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
                        parent_start = 0;
 
                WARN_ON(trans->transid != btrfs_header_generation(parent));
+               tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
+                                       MOD_LOG_KEY_REPLACE);
                btrfs_set_node_blockptr(parent, parent_slot,
                                        cow->start);
                btrfs_set_node_ptr_generation(parent, parent_slot,
                                              trans->transid);
                btrfs_mark_buffer_dirty(parent);
                btrfs_free_tree_block(trans, root, buf, parent_start,
-                                     last_ref, 1);
+                                     last_ref);
        }
        if (unlock_orig)
                btrfs_tree_unlock(buf);
@@ -535,6 +982,210 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
        return 0;
 }
 
+/*
+ * returns the logical address of the oldest predecessor of the given root.
+ * entries older than time_seq are ignored.
+ */
+static struct tree_mod_elem *
+__tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
+                          struct btrfs_root *root, u64 time_seq)
+{
+       struct tree_mod_elem *tm;
+       struct tree_mod_elem *found = NULL;
+       u64 root_logical = root->node->start;
+       int looped = 0;
+
+       if (!time_seq)
+               return 0;
+
+       /*
+        * the very last operation that's logged for a root is the replacement
+        * operation (if it is replaced at all). this has the index of the *new*
+        * root, making it the very first operation that's logged for this root.
+        */
+       while (1) {
+               tm = tree_mod_log_search_oldest(fs_info, root_logical,
+                                               time_seq);
+               if (!looped && !tm)
+                       return 0;
+               /*
+                * we must have key remove operations in the log before the
+                * replace operation.
+                */
+               BUG_ON(!tm);
+
+               if (tm->op != MOD_LOG_ROOT_REPLACE)
+                       break;
+
+               found = tm;
+               root_logical = tm->old_root.logical;
+               BUG_ON(root_logical == root->node->start);
+               looped = 1;
+       }
+
+       return found;
+}
+
+/*
+ * tm is a pointer to the first operation to rewind within eb. then, all
+ * previous operations will be rewinded (until we reach something older than
+ * time_seq).
+ */
+static void
+__tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
+                     struct tree_mod_elem *first_tm)
+{
+       u32 n;
+       struct rb_node *next;
+       struct tree_mod_elem *tm = first_tm;
+       unsigned long o_dst;
+       unsigned long o_src;
+       unsigned long p_size = sizeof(struct btrfs_key_ptr);
+
+       n = btrfs_header_nritems(eb);
+       while (tm && tm->elem.seq >= time_seq) {
+               /*
+                * all the operations are recorded with the operator used for
+                * the modification. as we're going backwards, we do the
+                * opposite of each operation here.
+                */
+               switch (tm->op) {
+               case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
+                       BUG_ON(tm->slot < n);
+               case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
+               case MOD_LOG_KEY_REMOVE:
+                       btrfs_set_node_key(eb, &tm->key, tm->slot);
+                       btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
+                       btrfs_set_node_ptr_generation(eb, tm->slot,
+                                                     tm->generation);
+                       n++;
+                       break;
+               case MOD_LOG_KEY_REPLACE:
+                       BUG_ON(tm->slot >= n);
+                       btrfs_set_node_key(eb, &tm->key, tm->slot);
+                       btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
+                       btrfs_set_node_ptr_generation(eb, tm->slot,
+                                                     tm->generation);
+                       break;
+               case MOD_LOG_KEY_ADD:
+                       if (tm->slot != n - 1) {
+                               o_dst = btrfs_node_key_ptr_offset(tm->slot);
+                               o_src = btrfs_node_key_ptr_offset(tm->slot + 1);
+                               memmove_extent_buffer(eb, o_dst, o_src, p_size);
+                       }
+                       n--;
+                       break;
+               case MOD_LOG_MOVE_KEYS:
+                       o_dst = btrfs_node_key_ptr_offset(tm->slot);
+                       o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
+                       memmove_extent_buffer(eb, o_dst, o_src,
+                                             tm->move.nr_items * p_size);
+                       break;
+               case MOD_LOG_ROOT_REPLACE:
+                       /*
+                        * this operation is special. for roots, this must be
+                        * handled explicitly before rewinding.
+                        * for non-roots, this operation may exist if the node
+                        * was a root: root A -> child B; then A gets empty and
+                        * B is promoted to the new root. in the mod log, we'll
+                        * have a root-replace operation for B, a tree block
+                        * that is no root. we simply ignore that operation.
+                        */
+                       break;
+               }
+               next = rb_next(&tm->node);
+               if (!next)
+                       break;
+               tm = container_of(next, struct tree_mod_elem, node);
+               if (tm->index != first_tm->index)
+                       break;
+       }
+       btrfs_set_header_nritems(eb, n);
+}
+
+static struct extent_buffer *
+tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
+                   u64 time_seq)
+{
+       struct extent_buffer *eb_rewin;
+       struct tree_mod_elem *tm;
+
+       if (!time_seq)
+               return eb;
+
+       if (btrfs_header_level(eb) == 0)
+               return eb;
+
+       tm = tree_mod_log_search(fs_info, eb->start, time_seq);
+       if (!tm)
+               return eb;
+
+       if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
+               BUG_ON(tm->slot != 0);
+               eb_rewin = alloc_dummy_extent_buffer(eb->start,
+                                               fs_info->tree_root->nodesize);
+               BUG_ON(!eb_rewin);
+               btrfs_set_header_bytenr(eb_rewin, eb->start);
+               btrfs_set_header_backref_rev(eb_rewin,
+                                            btrfs_header_backref_rev(eb));
+               btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
+               btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
+       } else {
+               eb_rewin = btrfs_clone_extent_buffer(eb);
+               BUG_ON(!eb_rewin);
+       }
+
+       extent_buffer_get(eb_rewin);
+       free_extent_buffer(eb);
+
+       __tree_mod_log_rewind(eb_rewin, time_seq, tm);
+
+       return eb_rewin;
+}
+
+static inline struct extent_buffer *
+get_old_root(struct btrfs_root *root, u64 time_seq)
+{
+       struct tree_mod_elem *tm;
+       struct extent_buffer *eb;
+       struct tree_mod_root *old_root;
+       u64 old_generation;
+
+       tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
+       if (!tm)
+               return root->node;
+
+       old_root = &tm->old_root;
+       old_generation = tm->generation;
+
+       tm = tree_mod_log_search(root->fs_info, old_root->logical, time_seq);
+       /*
+        * there was an item in the log when __tree_mod_log_oldest_root
+        * returned. this one must not go away, because the time_seq passed to
+        * us must be blocking its removal.
+        */
+       BUG_ON(!tm);
+
+       if (old_root->logical == root->node->start) {
+               /* there are logged operations for the current root */
+               eb = btrfs_clone_extent_buffer(root->node);
+       } else {
+               /* there's a root replace operation for the current root */
+               eb = alloc_dummy_extent_buffer(tm->index << PAGE_CACHE_SHIFT,
+                                              root->nodesize);
+               btrfs_set_header_bytenr(eb, eb->start);
+               btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
+               btrfs_set_header_owner(eb, root->root_key.objectid);
+       }
+       if (!eb)
+               return NULL;
+       btrfs_set_header_level(eb, old_root->level);
+       btrfs_set_header_generation(eb, old_generation);
+       __tree_mod_log_rewind(eb, time_seq, tm);
+
+       return eb;
+}
+
 static inline int should_cow_block(struct btrfs_trans_handle *trans,
                                   struct btrfs_root *root,
                                   struct extent_buffer *buf)
@@ -739,7 +1390,11 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
                                if (!cur)
                                        return -EIO;
                        } else if (!uptodate) {
-                               btrfs_read_buffer(cur, gen);
+                               err = btrfs_read_buffer(cur, gen);
+                               if (err) {
+                                       free_extent_buffer(cur);
+                                       return err;
+                               }
                        }
                }
                if (search_start == 0)
@@ -854,20 +1509,18 @@ static noinline int generic_bin_search(struct extent_buffer *eb,
 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
                      int level, int *slot)
 {
-       if (level == 0) {
+       if (level == 0)
                return generic_bin_search(eb,
                                          offsetof(struct btrfs_leaf, items),
                                          sizeof(struct btrfs_item),
                                          key, btrfs_header_nritems(eb),
                                          slot);
-       } else {
+       else
                return generic_bin_search(eb,
                                          offsetof(struct btrfs_node, ptrs),
                                          sizeof(struct btrfs_key_ptr),
                                          key, btrfs_header_nritems(eb),
                                          slot);
-       }
-       return -1;
 }
 
 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
@@ -974,6 +1627,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
                        goto enospc;
                }
 
+               tree_mod_log_set_root_pointer(root, child);
                rcu_assign_pointer(root->node, child);
 
                add_root_to_dirty_list(root);
@@ -987,7 +1641,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
                free_extent_buffer(mid);
 
                root_sub_used(root, mid->len);
-               btrfs_free_tree_block(trans, root, mid, 0, 1, 0);
+               btrfs_free_tree_block(trans, root, mid, 0, 1);
                /* once for the root ptr */
                free_extent_buffer_stale(mid);
                return 0;
@@ -1040,14 +1694,16 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
                if (btrfs_header_nritems(right) == 0) {
                        clean_tree_block(trans, root, right);
                        btrfs_tree_unlock(right);
-                       del_ptr(trans, root, path, level + 1, pslot + 1);
+                       del_ptr(trans, root, path, level + 1, pslot + 1, 1);
                        root_sub_used(root, right->len);
-                       btrfs_free_tree_block(trans, root, right, 0, 1, 0);
+                       btrfs_free_tree_block(trans, root, right, 0, 1);
                        free_extent_buffer_stale(right);
                        right = NULL;
                } else {
                        struct btrfs_disk_key right_key;
                        btrfs_node_key(right, &right_key, 0);
+                       tree_mod_log_set_node_key(root->fs_info, parent,
+                                                 &right_key, pslot + 1, 0);
                        btrfs_set_node_key(parent, &right_key, pslot + 1);
                        btrfs_mark_buffer_dirty(parent);
                }
@@ -1082,15 +1738,17 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
        if (btrfs_header_nritems(mid) == 0) {
                clean_tree_block(trans, root, mid);
                btrfs_tree_unlock(mid);
-               del_ptr(trans, root, path, level + 1, pslot);
+               del_ptr(trans, root, path, level + 1, pslot, 1);
                root_sub_used(root, mid->len);
-               btrfs_free_tree_block(trans, root, mid, 0, 1, 0);
+               btrfs_free_tree_block(trans, root, mid, 0, 1);
                free_extent_buffer_stale(mid);
                mid = NULL;
        } else {
                /* update the parent key to reflect our changes */
                struct btrfs_disk_key mid_key;
                btrfs_node_key(mid, &mid_key, 0);
+               tree_mod_log_set_node_key(root->fs_info, parent, &mid_key,
+                                         pslot, 0);
                btrfs_set_node_key(parent, &mid_key, pslot);
                btrfs_mark_buffer_dirty(parent);
        }
@@ -1188,6 +1846,8 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
                        struct btrfs_disk_key disk_key;
                        orig_slot += left_nr;
                        btrfs_node_key(mid, &disk_key, 0);
+                       tree_mod_log_set_node_key(root->fs_info, parent,
+                                                 &disk_key, pslot, 0);
                        btrfs_set_node_key(parent, &disk_key, pslot);
                        btrfs_mark_buffer_dirty(parent);
                        if (btrfs_header_nritems(left) > orig_slot) {
@@ -1239,6 +1899,8 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
                        struct btrfs_disk_key disk_key;
 
                        btrfs_node_key(right, &disk_key, 0);
+                       tree_mod_log_set_node_key(root->fs_info, parent,
+                                                 &disk_key, pslot + 1, 0);
                        btrfs_set_node_key(parent, &disk_key, pslot + 1);
                        btrfs_mark_buffer_dirty(parent);
 
@@ -1496,7 +2158,7 @@ static int
 read_block_for_search(struct btrfs_trans_handle *trans,
                       struct btrfs_root *root, struct btrfs_path *p,
                       struct extent_buffer **eb_ret, int level, int slot,
-                      struct btrfs_key *key)
+                      struct btrfs_key *key, u64 time_seq)
 {
        u64 blocknr;
        u64 gen;
@@ -1850,7 +2512,7 @@ cow_done:
                        }
 
                        err = read_block_for_search(trans, root, p,
-                                                   &b, level, slot, key);
+                                                   &b, level, slot, key, 0);
                        if (err == -EAGAIN)
                                goto again;
                        if (err) {
@@ -1921,6 +2583,115 @@ done:
        return ret;
 }
 
+/*
+ * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
+ * current state of the tree together with the operations recorded in the tree
+ * modification log to search for the key in a previous version of this tree, as
+ * denoted by the time_seq parameter.
+ *
+ * Naturally, there is no support for insert, delete or cow operations.
+ *
+ * The resulting path and return value will be set up as if we called
+ * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
+ */
+int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
+                         struct btrfs_path *p, u64 time_seq)
+{
+       struct extent_buffer *b;
+       int slot;
+       int ret;
+       int err;
+       int level;
+       int lowest_unlock = 1;
+       u8 lowest_level = 0;
+
+       lowest_level = p->lowest_level;
+       WARN_ON(p->nodes[0] != NULL);
+
+       if (p->search_commit_root) {
+               BUG_ON(time_seq);
+               return btrfs_search_slot(NULL, root, key, p, 0, 0);
+       }
+
+again:
+       b = get_old_root(root, time_seq);
+       extent_buffer_get(b);
+       level = btrfs_header_level(b);
+       btrfs_tree_read_lock(b);
+       p->locks[level] = BTRFS_READ_LOCK;
+
+       while (b) {
+               level = btrfs_header_level(b);
+               p->nodes[level] = b;
+               btrfs_clear_path_blocking(p, NULL, 0);
+
+               /*
+                * we have a lock on b and as long as we aren't changing
+                * the tree, there is no way to for the items in b to change.
+                * It is safe to drop the lock on our parent before we
+                * go through the expensive btree search on b.
+                */
+               btrfs_unlock_up_safe(p, level + 1);
+
+               ret = bin_search(b, key, level, &slot);
+
+               if (level != 0) {
+                       int dec = 0;
+                       if (ret && slot > 0) {
+                               dec = 1;
+                               slot -= 1;
+                       }
+                       p->slots[level] = slot;
+                       unlock_up(p, level, lowest_unlock, 0, NULL);
+
+                       if (level == lowest_level) {
+                               if (dec)
+                                       p->slots[level]++;
+                               goto done;
+                       }
+
+                       err = read_block_for_search(NULL, root, p, &b, level,
+                                                   slot, key, time_seq);
+                       if (err == -EAGAIN)
+                               goto again;
+                       if (err) {
+                               ret = err;
+                               goto done;
+                       }
+
+                       level = btrfs_header_level(b);
+                       err = btrfs_try_tree_read_lock(b);
+                       if (!err) {
+                               btrfs_set_path_blocking(p);
+                               btrfs_tree_read_lock(b);
+                               btrfs_clear_path_blocking(p, b,
+                                                         BTRFS_READ_LOCK);
+                       }
+                       p->locks[level] = BTRFS_READ_LOCK;
+                       p->nodes[level] = b;
+                       b = tree_mod_log_rewind(root->fs_info, b, time_seq);
+                       if (b != p->nodes[level]) {
+                               btrfs_tree_unlock_rw(p->nodes[level],
+                                                    p->locks[level]);
+                               p->locks[level] = 0;
+                               p->nodes[level] = b;
+                       }
+               } else {
+                       p->slots[level] = slot;
+                       unlock_up(p, level, lowest_unlock, 0, NULL);
+                       goto done;
+               }
+       }
+       ret = 1;
+done:
+       if (!p->leave_spinning)
+               btrfs_set_path_blocking(p);
+       if (ret < 0)
+               btrfs_release_path(p);
+
+       return ret;
+}
+
 /*
  * adjust the pointers going up the tree, starting at level
  * making sure the right key of each node is points to 'key'.
@@ -1941,6 +2712,7 @@ static void fixup_low_keys(struct btrfs_trans_handle *trans,
                if (!path->nodes[i])
                        break;
                t = path->nodes[i];
+               tree_mod_log_set_node_key(root->fs_info, t, key, tslot, 1);
                btrfs_set_node_key(t, key, tslot);
                btrfs_mark_buffer_dirty(path->nodes[i]);
                if (tslot != 0)
@@ -2023,12 +2795,16 @@ static int push_node_left(struct btrfs_trans_handle *trans,
        } else
                push_items = min(src_nritems - 8, push_items);
 
+       tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
+                            push_items);
        copy_extent_buffer(dst, src,
                           btrfs_node_key_ptr_offset(dst_nritems),
                           btrfs_node_key_ptr_offset(0),
                           push_items * sizeof(struct btrfs_key_ptr));
 
        if (push_items < src_nritems) {
+               tree_mod_log_eb_move(root->fs_info, src, 0, push_items,
+                                    src_nritems - push_items);
                memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
                                      btrfs_node_key_ptr_offset(push_items),
                                      (src_nritems - push_items) *
@@ -2082,11 +2858,14 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
        if (max_push < push_items)
                push_items = max_push;
 
+       tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
        memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
                                      btrfs_node_key_ptr_offset(0),
                                      (dst_nritems) *
                                      sizeof(struct btrfs_key_ptr));
 
+       tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
+                            src_nritems - push_items, push_items);
        copy_extent_buffer(dst, src,
                           btrfs_node_key_ptr_offset(0),
                           btrfs_node_key_ptr_offset(src_nritems - push_items),
@@ -2129,7 +2908,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
 
        c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
                                   root->root_key.objectid, &lower_key,
-                                  level, root->node->start, 0, 0);
+                                  level, root->node->start, 0);
        if (IS_ERR(c))
                return PTR_ERR(c);
 
@@ -2161,6 +2940,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
        btrfs_mark_buffer_dirty(c);
 
        old = root->node;
+       tree_mod_log_set_root_pointer(root, c);
        rcu_assign_pointer(root->node, c);
 
        /* the super has an extra ref to root->node */
@@ -2184,10 +2964,11 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
 static void insert_ptr(struct btrfs_trans_handle *trans,
                       struct btrfs_root *root, struct btrfs_path *path,
                       struct btrfs_disk_key *key, u64 bytenr,
-                      int slot, int level)
+                      int slot, int level, int tree_mod_log)
 {
        struct extent_buffer *lower;
        int nritems;
+       int ret;
 
        BUG_ON(!path->nodes[level]);
        btrfs_assert_tree_locked(path->nodes[level]);
@@ -2196,11 +2977,19 @@ static void insert_ptr(struct btrfs_trans_handle *trans,
        BUG_ON(slot > nritems);
        BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
        if (slot != nritems) {
+               if (tree_mod_log && level)
+                       tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
+                                            slot, nritems - slot);
                memmove_extent_buffer(lower,
                              btrfs_node_key_ptr_offset(slot + 1),
                              btrfs_node_key_ptr_offset(slot),
                              (nritems - slot) * sizeof(struct btrfs_key_ptr));
        }
+       if (tree_mod_log && level) {
+               ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
+                                             MOD_LOG_KEY_ADD);
+               BUG_ON(ret < 0);
+       }
        btrfs_set_node_key(lower, key, slot);
        btrfs_set_node_blockptr(lower, slot, bytenr);
        WARN_ON(trans->transid == 0);
@@ -2252,7 +3041,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
 
        split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
                                        root->root_key.objectid,
-                                       &disk_key, level, c->start, 0, 0);
+                                       &disk_key, level, c->start, 0);
        if (IS_ERR(split))
                return PTR_ERR(split);
 
@@ -2271,7 +3060,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
                            (unsigned long)btrfs_header_chunk_tree_uuid(split),
                            BTRFS_UUID_SIZE);
 
-
+       tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid);
        copy_extent_buffer(split, c,
                           btrfs_node_key_ptr_offset(0),
                           btrfs_node_key_ptr_offset(mid),
@@ -2284,7 +3073,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
        btrfs_mark_buffer_dirty(split);
 
        insert_ptr(trans, root, path, &disk_key, split->start,
-                  path->slots[level + 1] + 1, level + 1);
+                  path->slots[level + 1] + 1, level + 1, 1);
 
        if (path->slots[level] >= mid) {
                path->slots[level] -= mid;
@@ -2821,7 +3610,7 @@ static noinline void copy_for_split(struct btrfs_trans_handle *trans,
        btrfs_set_header_nritems(l, mid);
        btrfs_item_key(right, &disk_key, 0);
        insert_ptr(trans, root, path, &disk_key, right->start,
-                  path->slots[1] + 1, 1);
+                  path->slots[1] + 1, 1, 0);
 
        btrfs_mark_buffer_dirty(right);
        btrfs_mark_buffer_dirty(l);
@@ -3004,7 +3793,7 @@ again:
 
        right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
                                        root->root_key.objectid,
-                                       &disk_key, 0, l->start, 0, 0);
+                                       &disk_key, 0, l->start, 0);
        if (IS_ERR(right))
                return PTR_ERR(right);
 
@@ -3028,7 +3817,7 @@ again:
                if (mid <= slot) {
                        btrfs_set_header_nritems(right, 0);
                        insert_ptr(trans, root, path, &disk_key, right->start,
-                                  path->slots[1] + 1, 1);
+                                  path->slots[1] + 1, 1, 0);
                        btrfs_tree_unlock(path->nodes[0]);
                        free_extent_buffer(path->nodes[0]);
                        path->nodes[0] = right;
@@ -3037,7 +3826,7 @@ again:
                } else {
                        btrfs_set_header_nritems(right, 0);
                        insert_ptr(trans, root, path, &disk_key, right->start,
-                                         path->slots[1], 1);
+                                         path->slots[1], 1, 0);
                        btrfs_tree_unlock(path->nodes[0]);
                        free_extent_buffer(path->nodes[0]);
                        path->nodes[0] = right;
@@ -3749,19 +4538,29 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
  * empty a node.
  */
 static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
-                   struct btrfs_path *path, int level, int slot)
+                   struct btrfs_path *path, int level, int slot,
+                   int tree_mod_log)
 {
        struct extent_buffer *parent = path->nodes[level];
        u32 nritems;
+       int ret;
 
        nritems = btrfs_header_nritems(parent);
        if (slot != nritems - 1) {
+               if (tree_mod_log && level)
+                       tree_mod_log_eb_move(root->fs_info, parent, slot,
+                                            slot + 1, nritems - slot - 1);
                memmove_extent_buffer(parent,
                              btrfs_node_key_ptr_offset(slot),
                              btrfs_node_key_ptr_offset(slot + 1),
                              sizeof(struct btrfs_key_ptr) *
                              (nritems - slot - 1));
+       } else if (tree_mod_log && level) {
+               ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
+                                             MOD_LOG_KEY_REMOVE);
+               BUG_ON(ret < 0);
        }
+
        nritems--;
        btrfs_set_header_nritems(parent, nritems);
        if (nritems == 0 && parent == root->node) {
@@ -3793,7 +4592,7 @@ static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
                                    struct extent_buffer *leaf)
 {
        WARN_ON(btrfs_header_generation(leaf) != trans->transid);
-       del_ptr(trans, root, path, 1, path->slots[1]);
+       del_ptr(trans, root, path, 1, path->slots[1], 1);
 
        /*
         * btrfs_free_extent is expensive, we want to make sure we
@@ -3804,7 +4603,7 @@ static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
        root_sub_used(root, leaf->len);
 
        extent_buffer_get(leaf);
-       btrfs_free_tree_block(trans, root, leaf, 0, 1, 0);
+       btrfs_free_tree_block(trans, root, leaf, 0, 1);
        free_extent_buffer_stale(leaf);
 }
 /*
@@ -4271,7 +5070,7 @@ again:
                next = c;
                next_rw_lock = path->locks[level];
                ret = read_block_for_search(NULL, root, path, &next, level,
-                                           slot, &key);
+                                           slot, &key, 0);
                if (ret == -EAGAIN)
                        goto again;
 
@@ -4308,7 +5107,7 @@ again:
                        break;
 
                ret = read_block_for_search(NULL, root, path, &next, level,
-                                           0, &key);
+                                           0, &key, 0);
                if (ret == -EAGAIN)
                        goto again;
 
index 8fd72331d6008c100e48db1c808566eb382187b2..0236d03c6732569a48a561049ea5a861d473da65 100644 (file)
@@ -173,6 +173,9 @@ static int btrfs_csum_sizes[] = { 4, 0 };
 #define BTRFS_FT_XATTR         8
 #define BTRFS_FT_MAX           9
 
+/* ioprio of readahead is set to idle */
+#define BTRFS_IOPRIO_READA (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0))
+
 /*
  * The key defines the order in the tree, and so it also defines (optimal)
  * block layout.
@@ -823,6 +826,14 @@ struct btrfs_csum_item {
        u8 csum;
 } __attribute__ ((__packed__));
 
+struct btrfs_dev_stats_item {
+       /*
+        * grow this item struct at the end for future enhancements and keep
+        * the existing values unchanged
+        */
+       __le64 values[BTRFS_DEV_STAT_VALUES_MAX];
+} __attribute__ ((__packed__));
+
 /* different types of block groups (and chunks) */
 #define BTRFS_BLOCK_GROUP_DATA         (1ULL << 0)
 #define BTRFS_BLOCK_GROUP_SYSTEM       (1ULL << 1)
@@ -1129,6 +1140,15 @@ struct btrfs_fs_info {
        spinlock_t delayed_iput_lock;
        struct list_head delayed_iputs;
 
+       /* this protects tree_mod_seq_list */
+       spinlock_t tree_mod_seq_lock;
+       atomic_t tree_mod_seq;
+       struct list_head tree_mod_seq_list;
+
+       /* this protects tree_mod_log */
+       rwlock_t tree_mod_log_lock;
+       struct rb_root tree_mod_log;
+
        atomic_t nr_async_submits;
        atomic_t async_submit_draining;
        atomic_t nr_async_bios;
@@ -1375,7 +1395,7 @@ struct btrfs_root {
        struct list_head root_list;
 
        spinlock_t orphan_lock;
-       struct list_head orphan_list;
+       atomic_t orphan_inodes;
        struct btrfs_block_rsv *orphan_block_rsv;
        int orphan_item_inserted;
        int orphan_cleanup_state;
@@ -1507,6 +1527,12 @@ struct btrfs_ioctl_defrag_range_args {
 
 #define BTRFS_BALANCE_ITEM_KEY 248
 
+/*
+ * Persistantly stores the io stats in the device tree.
+ * One key for all stats, (0, BTRFS_DEV_STATS_KEY, devid).
+ */
+#define BTRFS_DEV_STATS_KEY    249
+
 /*
  * string items are for debugging.  They just store a short string of
  * data in the FS
@@ -2415,6 +2441,30 @@ static inline u32 btrfs_file_extent_inline_item_len(struct extent_buffer *eb,
        return btrfs_item_size(eb, e) - offset;
 }
 
+/* btrfs_dev_stats_item */
+static inline u64 btrfs_dev_stats_value(struct extent_buffer *eb,
+                                       struct btrfs_dev_stats_item *ptr,
+                                       int index)
+{
+       u64 val;
+
+       read_extent_buffer(eb, &val,
+                          offsetof(struct btrfs_dev_stats_item, values) +
+                           ((unsigned long)ptr) + (index * sizeof(u64)),
+                          sizeof(val));
+       return val;
+}
+
+static inline void btrfs_set_dev_stats_value(struct extent_buffer *eb,
+                                            struct btrfs_dev_stats_item *ptr,
+                                            int index, u64 val)
+{
+       write_extent_buffer(eb, &val,
+                           offsetof(struct btrfs_dev_stats_item, values) +
+                            ((unsigned long)ptr) + (index * sizeof(u64)),
+                           sizeof(val));
+}
+
 static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb)
 {
        return sb->s_fs_info;
@@ -2496,11 +2546,11 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
                                        struct btrfs_root *root, u32 blocksize,
                                        u64 parent, u64 root_objectid,
                                        struct btrfs_disk_key *key, int level,
-                                       u64 hint, u64 empty_size, int for_cow);
+                                       u64 hint, u64 empty_size);
 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root,
                           struct extent_buffer *buf,
-                          u64 parent, int last_ref, int for_cow);
+                          u64 parent, int last_ref);
 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
                                            struct btrfs_root *root,
                                            u64 bytenr, u32 blocksize,
@@ -2659,6 +2709,8 @@ int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
                      *root, struct btrfs_key *key, struct btrfs_path *p, int
                      ins_len, int cow);
+int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
+                         struct btrfs_path *p, u64 time_seq);
 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
                       struct btrfs_root *root, struct extent_buffer *parent,
                       int start_slot, int cache_only, u64 *last_ret,
@@ -2922,7 +2974,6 @@ int btrfs_readpage(struct file *file, struct page *page);
 void btrfs_evict_inode(struct inode *inode);
 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
 int btrfs_dirty_inode(struct inode *inode);
-int btrfs_update_time(struct file *file);
 struct inode *btrfs_alloc_inode(struct super_block *sb);
 void btrfs_destroy_inode(struct inode *inode);
 int btrfs_drop_inode(struct inode *inode);
@@ -3098,4 +3149,23 @@ void btrfs_reada_detach(void *handle);
 int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
                         u64 start, int err);
 
+/* delayed seq elem */
+struct seq_list {
+       struct list_head list;
+       u64 seq;
+       u32 flags;
+};
+
+void btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
+                           struct seq_list *elem);
+void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
+                           struct seq_list *elem);
+
+static inline int is_fstree(u64 rootid)
+{
+       if (rootid == BTRFS_FS_TREE_OBJECTID ||
+           (s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID)
+               return 1;
+       return 0;
+}
 #endif
index 03e3748d84d02407c19c6d46648667a56f13ba3e..c18d0442ae6daa69a564ebba400f9ad09573ea1d 100644 (file)
@@ -669,8 +669,8 @@ static int btrfs_delayed_inode_reserve_metadata(
                return ret;
        } else if (src_rsv == &root->fs_info->delalloc_block_rsv) {
                spin_lock(&BTRFS_I(inode)->lock);
-               if (BTRFS_I(inode)->delalloc_meta_reserved) {
-                       BTRFS_I(inode)->delalloc_meta_reserved = 0;
+               if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
+                                      &BTRFS_I(inode)->runtime_flags)) {
                        spin_unlock(&BTRFS_I(inode)->lock);
                        release = true;
                        goto migrate;
@@ -1706,7 +1706,7 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
        btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
        btrfs_set_stack_inode_generation(inode_item,
                                         BTRFS_I(inode)->generation);
-       btrfs_set_stack_inode_sequence(inode_item, BTRFS_I(inode)->sequence);
+       btrfs_set_stack_inode_sequence(inode_item, inode->i_version);
        btrfs_set_stack_inode_transid(inode_item, trans->transid);
        btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
        btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
@@ -1754,7 +1754,7 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev)
        set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
        inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
        BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
-       BTRFS_I(inode)->sequence = btrfs_stack_inode_sequence(inode_item);
+       inode->i_version = btrfs_stack_inode_sequence(inode_item);
        inode->i_rdev = 0;
        *rdev = btrfs_stack_inode_rdev(inode_item);
        BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
index 69f22e3ab3bc307974b5cae14f99310a498b54cf..13ae7b04790eaff72e8c23fb145fca8bfae88175 100644 (file)
@@ -525,7 +525,7 @@ static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
        ref->is_head = 0;
        ref->in_tree = 1;
 
-       if (need_ref_seq(for_cow, ref_root))
+       if (is_fstree(ref_root))
                seq = inc_delayed_seq(delayed_refs);
        ref->seq = seq;
 
@@ -584,7 +584,7 @@ static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
        ref->is_head = 0;
        ref->in_tree = 1;
 
-       if (need_ref_seq(for_cow, ref_root))
+       if (is_fstree(ref_root))
                seq = inc_delayed_seq(delayed_refs);
        ref->seq = seq;
 
@@ -658,10 +658,11 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
        add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
                                   num_bytes, parent, ref_root, level, action,
                                   for_cow);
-       if (!need_ref_seq(for_cow, ref_root) &&
+       if (!is_fstree(ref_root) &&
            waitqueue_active(&delayed_refs->seq_wait))
                wake_up(&delayed_refs->seq_wait);
        spin_unlock(&delayed_refs->lock);
+
        return 0;
 }
 
@@ -706,10 +707,11 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
        add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
                                   num_bytes, parent, ref_root, owner, offset,
                                   action, for_cow);
-       if (!need_ref_seq(for_cow, ref_root) &&
+       if (!is_fstree(ref_root) &&
            waitqueue_active(&delayed_refs->seq_wait))
                wake_up(&delayed_refs->seq_wait);
        spin_unlock(&delayed_refs->lock);
+
        return 0;
 }
 
index d8f244d9492511e3b108b26bcf4da1bc9fbf6826..413927fb9957e41fdcfb82511e63d416b8a36c76 100644 (file)
@@ -195,11 +195,6 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
 int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
                           struct list_head *cluster, u64 search_start);
 
-struct seq_list {
-       struct list_head list;
-       u64 seq;
-};
-
 static inline u64 inc_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs)
 {
        assert_spin_locked(&delayed_refs->lock);
@@ -229,25 +224,6 @@ btrfs_put_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
 int btrfs_check_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
                            u64 seq);
 
-/*
- * delayed refs with a ref_seq > 0 must be held back during backref walking.
- * this only applies to items in one of the fs-trees. for_cow items never need
- * to be held back, so they won't get a ref_seq number.
- */
-static inline int need_ref_seq(int for_cow, u64 rootid)
-{
-       if (for_cow)
-               return 0;
-
-       if (rootid == BTRFS_FS_TREE_OBJECTID)
-               return 1;
-
-       if ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID)
-               return 1;
-
-       return 0;
-}
-
 /*
  * a node might live in a head or a regular ref, this lets you
  * test for the proper type to use.
index e1fe74a2ce16e6a4e0b38129160f484e642c42fa..7ae51decf6d3d0fb5c3d44bb7791f843c74aa376 100644 (file)
@@ -1153,7 +1153,6 @@ static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
        root->orphan_block_rsv = NULL;
 
        INIT_LIST_HEAD(&root->dirty_list);
-       INIT_LIST_HEAD(&root->orphan_list);
        INIT_LIST_HEAD(&root->root_list);
        spin_lock_init(&root->orphan_lock);
        spin_lock_init(&root->inode_lock);
@@ -1166,6 +1165,7 @@ static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
        atomic_set(&root->log_commit[0], 0);
        atomic_set(&root->log_commit[1], 0);
        atomic_set(&root->log_writers, 0);
+       atomic_set(&root->orphan_inodes, 0);
        root->log_batch = 0;
        root->log_transid = 0;
        root->last_log_commit = 0;
@@ -1252,7 +1252,7 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
 
        leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
                                      BTRFS_TREE_LOG_OBJECTID, NULL,
-                                     0, 0, 0, 0);
+                                     0, 0, 0);
        if (IS_ERR(leaf)) {
                kfree(root);
                return ERR_CAST(leaf);
@@ -1914,11 +1914,14 @@ int open_ctree(struct super_block *sb,
        spin_lock_init(&fs_info->delayed_iput_lock);
        spin_lock_init(&fs_info->defrag_inodes_lock);
        spin_lock_init(&fs_info->free_chunk_lock);
+       spin_lock_init(&fs_info->tree_mod_seq_lock);
+       rwlock_init(&fs_info->tree_mod_log_lock);
        mutex_init(&fs_info->reloc_mutex);
 
        init_completion(&fs_info->kobj_unregister);
        INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
        INIT_LIST_HEAD(&fs_info->space_info);
+       INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
        btrfs_mapping_init(&fs_info->mapping_tree);
        btrfs_init_block_rsv(&fs_info->global_block_rsv);
        btrfs_init_block_rsv(&fs_info->delalloc_block_rsv);
@@ -1931,12 +1934,14 @@ int open_ctree(struct super_block *sb,
        atomic_set(&fs_info->async_submit_draining, 0);
        atomic_set(&fs_info->nr_async_bios, 0);
        atomic_set(&fs_info->defrag_running, 0);
+       atomic_set(&fs_info->tree_mod_seq, 0);
        fs_info->sb = sb;
        fs_info->max_inline = 8192 * 1024;
        fs_info->metadata_ratio = 0;
        fs_info->defrag_inodes = RB_ROOT;
        fs_info->trans_no_join = 0;
        fs_info->free_chunk_space = 0;
+       fs_info->tree_mod_log = RB_ROOT;
 
        /* readahead state */
        INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
@@ -2001,7 +2006,8 @@ int open_ctree(struct super_block *sb,
        BTRFS_I(fs_info->btree_inode)->root = tree_root;
        memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
               sizeof(struct btrfs_key));
-       BTRFS_I(fs_info->btree_inode)->dummy_inode = 1;
+       set_bit(BTRFS_INODE_DUMMY,
+               &BTRFS_I(fs_info->btree_inode)->runtime_flags);
        insert_inode_hash(fs_info->btree_inode);
 
        spin_lock_init(&fs_info->block_group_cache_lock);
@@ -2353,6 +2359,13 @@ retry_root_backup:
        fs_info->generation = generation;
        fs_info->last_trans_committed = generation;
 
+       ret = btrfs_init_dev_stats(fs_info);
+       if (ret) {
+               printk(KERN_ERR "btrfs: failed to init dev_stats: %d\n",
+                      ret);
+               goto fail_block_groups;
+       }
+
        ret = btrfs_init_space_info(fs_info);
        if (ret) {
                printk(KERN_ERR "Failed to initial space info: %d\n", ret);
@@ -2556,18 +2569,19 @@ recovery_tree_root:
 
 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
 {
-       char b[BDEVNAME_SIZE];
-
        if (uptodate) {
                set_buffer_uptodate(bh);
        } else {
+               struct btrfs_device *device = (struct btrfs_device *)
+                       bh->b_private;
+
                printk_ratelimited(KERN_WARNING "lost page write due to "
-                                       "I/O error on %s\n",
-                                      bdevname(bh->b_bdev, b));
+                                  "I/O error on %s\n", device->name);
                /* note, we dont' set_buffer_write_io_error because we have
                 * our own ways of dealing with the IO errors
                 */
                clear_buffer_uptodate(bh);
+               btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
        }
        unlock_buffer(bh);
        put_bh(bh);
@@ -2682,6 +2696,7 @@ static int write_dev_supers(struct btrfs_device *device,
                        set_buffer_uptodate(bh);
                        lock_buffer(bh);
                        bh->b_end_io = btrfs_end_buffer_write_sync;
+                       bh->b_private = device;
                }
 
                /*
@@ -2740,6 +2755,9 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
                }
                if (!bio_flagged(bio, BIO_UPTODATE)) {
                        ret = -EIO;
+                       if (!bio_flagged(bio, BIO_EOPNOTSUPP))
+                               btrfs_dev_stat_inc_and_print(device,
+                                       BTRFS_DEV_STAT_FLUSH_ERRS);
                }
 
                /* drop the reference from the wait == 0 run */
@@ -2902,19 +2920,6 @@ int write_ctree_super(struct btrfs_trans_handle *trans,
        return ret;
 }
 
-/* Kill all outstanding I/O */
-void btrfs_abort_devices(struct btrfs_root *root)
-{
-       struct list_head *head;
-       struct btrfs_device *dev;
-       mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
-       head = &root->fs_info->fs_devices->devices;
-       list_for_each_entry_rcu(dev, head, dev_list) {
-               blk_abort_queue(dev->bdev->bd_disk->queue);
-       }
-       mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
-}
-
 void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
 {
        spin_lock(&fs_info->fs_roots_radix_lock);
@@ -3671,17 +3676,6 @@ int btrfs_cleanup_transaction(struct btrfs_root *root)
        return 0;
 }
 
-static int btree_writepage_io_failed_hook(struct bio *bio, struct page *page,
-                                         u64 start, u64 end,
-                                         struct extent_state *state)
-{
-       struct super_block *sb = page->mapping->host->i_sb;
-       struct btrfs_fs_info *fs_info = btrfs_sb(sb);
-       btrfs_error(fs_info, -EIO,
-                   "Error occured while writing out btree at %llu", start);
-       return -EIO;
-}
-
 static struct extent_io_ops btree_extent_io_ops = {
        .write_cache_pages_lock_hook = btree_lock_page_hook,
        .readpage_end_io_hook = btree_readpage_end_io_hook,
@@ -3689,5 +3683,4 @@ static struct extent_io_ops btree_extent_io_ops = {
        .submit_bio_hook = btree_submit_bio_hook,
        /* note we're sharing with inode.c for the merge bio hook */
        .merge_bio_hook = btrfs_merge_bio_hook,
-       .writepage_io_failed_hook = btree_writepage_io_failed_hook,
 };
index ab1830aaf0edbffba6a0cef86d13e9b3f2742cda..05b3fab39f7e814fc8c958e125f5a14c7e39d7f9 100644 (file)
@@ -89,7 +89,6 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
 int btrfs_cleanup_transaction(struct btrfs_root *root);
 void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans,
                                  struct btrfs_root *root);
-void btrfs_abort_devices(struct btrfs_root *root);
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 void btrfs_init_lockdep(void);
index e887ee62b6d4ba0a98f7e2437323eecfca88bf23..614f34a899c2db468792f1ef8406c5a366739258 100644 (file)
                                             parent_root_objectid) / 4)
 #define BTRFS_FID_SIZE_CONNECTABLE_ROOT (sizeof(struct btrfs_fid) / 4)
 
-static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
-                          int connectable)
+static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
+                          struct inode *parent)
 {
        struct btrfs_fid *fid = (struct btrfs_fid *)fh;
-       struct inode *inode = dentry->d_inode;
        int len = *max_len;
        int type;
 
-       if (connectable && (len < BTRFS_FID_SIZE_CONNECTABLE)) {
+       if (parent && (len < BTRFS_FID_SIZE_CONNECTABLE)) {
                *max_len = BTRFS_FID_SIZE_CONNECTABLE;
                return 255;
        } else if (len < BTRFS_FID_SIZE_NON_CONNECTABLE) {
@@ -36,19 +35,13 @@ static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
        fid->root_objectid = BTRFS_I(inode)->root->objectid;
        fid->gen = inode->i_generation;
 
-       if (connectable && !S_ISDIR(inode->i_mode)) {
-               struct inode *parent;
+       if (parent) {
                u64 parent_root_id;
 
-               spin_lock(&dentry->d_lock);
-
-               parent = dentry->d_parent->d_inode;
                fid->parent_objectid = BTRFS_I(parent)->location.objectid;
                fid->parent_gen = parent->i_generation;
                parent_root_id = BTRFS_I(parent)->root->objectid;
 
-               spin_unlock(&dentry->d_lock);
-
                if (parent_root_id != fid->root_objectid) {
                        fid->parent_root_objectid = parent_root_id;
                        len = BTRFS_FID_SIZE_CONNECTABLE_ROOT;
index 49fd7b66d57b272c7aeaea7db4b1bbd0985f8aa2..4b5a1e1bdefbe095c239b464e55c9699e865175b 100644 (file)
@@ -3578,7 +3578,7 @@ again:
        space_info->chunk_alloc = 0;
        spin_unlock(&space_info->lock);
 out:
-       mutex_unlock(&extent_root->fs_info->chunk_mutex);
+       mutex_unlock(&fs_info->chunk_mutex);
        return ret;
 }
 
@@ -4355,10 +4355,9 @@ static unsigned drop_outstanding_extent(struct inode *inode)
        BTRFS_I(inode)->outstanding_extents--;
 
        if (BTRFS_I(inode)->outstanding_extents == 0 &&
-           BTRFS_I(inode)->delalloc_meta_reserved) {
+           test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
+                              &BTRFS_I(inode)->runtime_flags))
                drop_inode_space = 1;
-               BTRFS_I(inode)->delalloc_meta_reserved = 0;
-       }
 
        /*
         * If we have more or the same amount of outsanding extents than we have
@@ -4465,7 +4464,8 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
         * Add an item to reserve for updating the inode when we complete the
         * delalloc io.
         */
-       if (!BTRFS_I(inode)->delalloc_meta_reserved) {
+       if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
+                     &BTRFS_I(inode)->runtime_flags)) {
                nr_extents++;
                extra_reserve = 1;
        }
@@ -4511,7 +4511,8 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
 
        spin_lock(&BTRFS_I(inode)->lock);
        if (extra_reserve) {
-               BTRFS_I(inode)->delalloc_meta_reserved = 1;
+               set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
+                       &BTRFS_I(inode)->runtime_flags);
                nr_extents--;
        }
        BTRFS_I(inode)->reserved_extents += nr_extents;
@@ -5217,7 +5218,7 @@ out:
 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root,
                           struct extent_buffer *buf,
-                          u64 parent, int last_ref, int for_cow)
+                          u64 parent, int last_ref)
 {
        struct btrfs_block_group_cache *cache = NULL;
        int ret;
@@ -5227,7 +5228,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
                                        buf->start, buf->len,
                                        parent, root->root_key.objectid,
                                        btrfs_header_level(buf),
-                                       BTRFS_DROP_DELAYED_REF, NULL, for_cow);
+                                       BTRFS_DROP_DELAYED_REF, NULL, 0);
                BUG_ON(ret); /* -ENOMEM */
        }
 
@@ -6249,7 +6250,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
                                        struct btrfs_root *root, u32 blocksize,
                                        u64 parent, u64 root_objectid,
                                        struct btrfs_disk_key *key, int level,
-                                       u64 hint, u64 empty_size, int for_cow)
+                                       u64 hint, u64 empty_size)
 {
        struct btrfs_key ins;
        struct btrfs_block_rsv *block_rsv;
@@ -6297,7 +6298,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
                                        ins.objectid,
                                        ins.offset, parent, root_objectid,
                                        level, BTRFS_ADD_DELAYED_EXTENT,
-                                       extent_op, for_cow);
+                                       extent_op, 0);
                BUG_ON(ret); /* -ENOMEM */
        }
        return buf;
@@ -6715,7 +6716,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
                               btrfs_header_owner(path->nodes[level + 1]));
        }
 
-       btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1, 0);
+       btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
 out:
        wc->refs[level] = 0;
        wc->flags[level] = 0;
index c9018a05036e943a52ad91d81019bb4b934b6b9a..2c8f7b2046173954f720125a6e53e96de3c7727e 100644 (file)
@@ -186,7 +186,6 @@ static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
                        return parent;
        }
 
-       entry = rb_entry(node, struct tree_entry, rb_node);
        rb_link_node(node, parent, p);
        rb_insert_color(node, root);
        return NULL;
@@ -413,7 +412,7 @@ static struct extent_state *next_state(struct extent_state *state)
 
 /*
  * utility function to clear some bits in an extent state struct.
- * it will optionally wake up any one waiting on this state (wake == 1)
+ * it will optionally wake up any one waiting on this state (wake == 1).
  *
  * If no bits are set on the state struct after clearing things, the
  * struct is freed and removed from the tree
@@ -570,10 +569,8 @@ hit_next:
                if (err)
                        goto out;
                if (state->end <= end) {
-                       clear_state_bit(tree, state, &bits, wake);
-                       if (last_end == (u64)-1)
-                               goto out;
-                       start = last_end + 1;
+                       state = clear_state_bit(tree, state, &bits, wake);
+                       goto next;
                }
                goto search_again;
        }
@@ -781,7 +778,6 @@ hit_next:
         * Just lock what we found and keep going
         */
        if (state->start == start && state->end <= end) {
-               struct rb_node *next_node;
                if (state->state & exclusive_bits) {
                        *failed_start = state->start;
                        err = -EEXIST;
@@ -789,20 +785,15 @@ hit_next:
                }
 
                set_state_bits(tree, state, &bits);
-
                cache_state(state, cached_state);
                merge_state(tree, state);
                if (last_end == (u64)-1)
                        goto out;
-
                start = last_end + 1;
-               next_node = rb_next(&state->rb_node);
-               if (next_node && start < end && prealloc && !need_resched()) {
-                       state = rb_entry(next_node, struct extent_state,
-                                        rb_node);
-                       if (state->start == start)
-                               goto hit_next;
-               }
+               state = next_state(state);
+               if (start < end && state && state->start == start &&
+                   !need_resched())
+                       goto hit_next;
                goto search_again;
        }
 
@@ -845,6 +836,10 @@ hit_next:
                        if (last_end == (u64)-1)
                                goto out;
                        start = last_end + 1;
+                       state = next_state(state);
+                       if (start < end && state && state->start == start &&
+                           !need_resched())
+                               goto hit_next;
                }
                goto search_again;
        }
@@ -994,21 +989,14 @@ hit_next:
         * Just lock what we found and keep going
         */
        if (state->start == start && state->end <= end) {
-               struct rb_node *next_node;
-
                set_state_bits(tree, state, &bits);
-               clear_state_bit(tree, state, &clear_bits, 0);
+               state = clear_state_bit(tree, state, &clear_bits, 0);
                if (last_end == (u64)-1)
                        goto out;
-
                start = last_end + 1;
-               next_node = rb_next(&state->rb_node);
-               if (next_node && start < end && prealloc && !need_resched()) {
-                       state = rb_entry(next_node, struct extent_state,
-                                        rb_node);
-                       if (state->start == start)
-                               goto hit_next;
-               }
+               if (start < end && state && state->start == start &&
+                   !need_resched())
+                       goto hit_next;
                goto search_again;
        }
 
@@ -1042,10 +1030,13 @@ hit_next:
                        goto out;
                if (state->end <= end) {
                        set_state_bits(tree, state, &bits);
-                       clear_state_bit(tree, state, &clear_bits, 0);
+                       state = clear_state_bit(tree, state, &clear_bits, 0);
                        if (last_end == (u64)-1)
                                goto out;
                        start = last_end + 1;
+                       if (start < end && state && state->start == start &&
+                           !need_resched())
+                               goto hit_next;
                }
                goto search_again;
        }
@@ -1173,9 +1164,8 @@ int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
                              cached_state, mask);
 }
 
-static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
-                                u64 end, struct extent_state **cached_state,
-                                gfp_t mask)
+int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
+                         struct extent_state **cached_state, gfp_t mask)
 {
        return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
                                cached_state, mask);
@@ -1293,7 +1283,7 @@ out:
  * returned if we find something, and *start_ret and *end_ret are
  * set to reflect the state struct that was found.
  *
- * If nothing was found, 1 is returned, < 0 on error
+ * If nothing was found, 1 is returned. If found something, return 0.
  */
 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
                          u64 *start_ret, u64 *end_ret, int bits)
@@ -1923,6 +1913,7 @@ int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
        if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
                /* try to remap that extent elsewhere? */
                bio_put(bio);
+               btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
                return -EIO;
        }
 
@@ -2222,17 +2213,7 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
                        uptodate = 0;
        }
 
-       if (!uptodate && tree->ops &&
-           tree->ops->writepage_io_failed_hook) {
-               ret = tree->ops->writepage_io_failed_hook(NULL, page,
-                                                start, end, NULL);
-               /* Writeback already completed */
-               if (ret == 0)
-                       return 1;
-       }
-
        if (!uptodate) {
-               clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
                ClearPageUptodate(page);
                SetPageError(page);
        }
@@ -2347,10 +2328,23 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
                if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
                        ret = tree->ops->readpage_end_io_hook(page, start, end,
                                                              state, mirror);
-                       if (ret)
+                       if (ret) {
+                               /* no IO indicated but software detected errors
+                                * in the block, either checksum errors or
+                                * issues with the contents */
+                               struct btrfs_root *root =
+                                       BTRFS_I(page->mapping->host)->root;
+                               struct btrfs_device *device;
+
                                uptodate = 0;
-                       else
+                               device = btrfs_find_device_for_logical(
+                                               root, start, mirror);
+                               if (device)
+                                       btrfs_dev_stat_inc_and_print(device,
+                                               BTRFS_DEV_STAT_CORRUPTION_ERRS);
+                       } else {
                                clean_io_failure(start, page);
+                       }
                }
 
                if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) {
@@ -3164,7 +3158,7 @@ static int write_one_eb(struct extent_buffer *eb,
        u64 offset = eb->start;
        unsigned long i, num_pages;
        int rw = (epd->sync_io ? WRITE_SYNC : WRITE);
-       int ret;
+       int ret = 0;
 
        clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
        num_pages = num_extent_pages(eb->start, eb->len);
@@ -3930,6 +3924,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
        eb->start = start;
        eb->len = len;
        eb->tree = tree;
+       eb->bflags = 0;
        rwlock_init(&eb->lock);
        atomic_set(&eb->write_locks, 0);
        atomic_set(&eb->read_locks, 0);
@@ -3967,6 +3962,60 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
        return eb;
 }
 
+struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
+{
+       unsigned long i;
+       struct page *p;
+       struct extent_buffer *new;
+       unsigned long num_pages = num_extent_pages(src->start, src->len);
+
+       new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_ATOMIC);
+       if (new == NULL)
+               return NULL;
+
+       for (i = 0; i < num_pages; i++) {
+               p = alloc_page(GFP_ATOMIC);
+               BUG_ON(!p);
+               attach_extent_buffer_page(new, p);
+               WARN_ON(PageDirty(p));
+               SetPageUptodate(p);
+               new->pages[i] = p;
+       }
+
+       copy_extent_buffer(new, src, 0, 0, src->len);
+       set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
+       set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
+
+       return new;
+}
+
+struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
+{
+       struct extent_buffer *eb;
+       unsigned long num_pages = num_extent_pages(0, len);
+       unsigned long i;
+
+       eb = __alloc_extent_buffer(NULL, start, len, GFP_ATOMIC);
+       if (!eb)
+               return NULL;
+
+       for (i = 0; i < num_pages; i++) {
+               eb->pages[i] = alloc_page(GFP_ATOMIC);
+               if (!eb->pages[i])
+                       goto err;
+       }
+       set_extent_buffer_uptodate(eb);
+       btrfs_set_header_nritems(eb, 0);
+       set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
+
+       return eb;
+err:
+       for (i--; i > 0; i--)
+               __free_page(eb->pages[i]);
+       __free_extent_buffer(eb);
+       return NULL;
+}
+
 static int extent_buffer_under_io(struct extent_buffer *eb)
 {
        return (atomic_read(&eb->io_pages) ||
@@ -3981,18 +4030,21 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
                                                unsigned long start_idx)
 {
        unsigned long index;
+       unsigned long num_pages;
        struct page *page;
+       int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
 
        BUG_ON(extent_buffer_under_io(eb));
 
-       index = num_extent_pages(eb->start, eb->len);
+       num_pages = num_extent_pages(eb->start, eb->len);
+       index = start_idx + num_pages;
        if (start_idx >= index)
                return;
 
        do {
                index--;
                page = extent_buffer_page(eb, index);
-               if (page) {
+               if (page && mapped) {
                        spin_lock(&page->mapping->private_lock);
                        /*
                         * We do this since we'll remove the pages after we've
@@ -4017,6 +4069,8 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
                        }
                        spin_unlock(&page->mapping->private_lock);
 
+               }
+               if (page) {
                        /* One for when we alloced the page */
                        page_cache_release(page);
                }
@@ -4235,14 +4289,18 @@ static void release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
 {
        WARN_ON(atomic_read(&eb->refs) == 0);
        if (atomic_dec_and_test(&eb->refs)) {
-               struct extent_io_tree *tree = eb->tree;
+               if (test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags)) {
+                       spin_unlock(&eb->refs_lock);
+               } else {
+                       struct extent_io_tree *tree = eb->tree;
 
-               spin_unlock(&eb->refs_lock);
+                       spin_unlock(&eb->refs_lock);
 
-               spin_lock(&tree->buffer_lock);
-               radix_tree_delete(&tree->buffer,
-                                 eb->start >> PAGE_CACHE_SHIFT);
-               spin_unlock(&tree->buffer_lock);
+                       spin_lock(&tree->buffer_lock);
+                       radix_tree_delete(&tree->buffer,
+                                         eb->start >> PAGE_CACHE_SHIFT);
+                       spin_unlock(&tree->buffer_lock);
+               }
 
                /* Should be safe to release our pages at this point */
                btrfs_release_extent_buffer_page(eb, 0);
@@ -4259,6 +4317,10 @@ void free_extent_buffer(struct extent_buffer *eb)
                return;
 
        spin_lock(&eb->refs_lock);
+       if (atomic_read(&eb->refs) == 2 &&
+           test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
+               atomic_dec(&eb->refs);
+
        if (atomic_read(&eb->refs) == 2 &&
            test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
            !extent_buffer_under_io(eb) &&
index b516c3b8dec68d825e380a1930976f34c8a3e1a4..25900af5b15d43e6bdfe0cef7c865ac2aa81bd36 100644 (file)
@@ -39,6 +39,7 @@
 #define EXTENT_BUFFER_STALE 6
 #define EXTENT_BUFFER_WRITEBACK 7
 #define EXTENT_BUFFER_IOERR 8
+#define EXTENT_BUFFER_DUMMY 9
 
 /* these are flags for extent_clear_unlock_delalloc */
 #define EXTENT_CLEAR_UNLOCK_PAGE 0x1
@@ -75,9 +76,6 @@ struct extent_io_ops {
                              unsigned long bio_flags);
        int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
        int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
-       int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
-                                       u64 start, u64 end,
-                                      struct extent_state *state);
        int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
                                    struct extent_state *state, int mirror);
        int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
@@ -225,6 +223,8 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
                   struct extent_state **cached_state, gfp_t mask);
 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
                        struct extent_state **cached_state, gfp_t mask);
+int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
+                         struct extent_state **cached_state, gfp_t mask);
 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
                   gfp_t mask);
 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
@@ -265,6 +265,8 @@ void set_page_extent_mapped(struct page *page);
 
 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
                                          u64 start, unsigned long len);
+struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len);
+struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
                                         u64 start, unsigned long len);
 void free_extent_buffer(struct extent_buffer *eb);
index 53bf2d764bbc4f5814db04710d3123d03c3779ba..70dc8ca73e257bc3a1e7a96ea48009bff093af9a 100644 (file)
@@ -65,6 +65,21 @@ struct inode_defrag {
        int cycled;
 };
 
+static int __compare_inode_defrag(struct inode_defrag *defrag1,
+                                 struct inode_defrag *defrag2)
+{
+       if (defrag1->root > defrag2->root)
+               return 1;
+       else if (defrag1->root < defrag2->root)
+               return -1;
+       else if (defrag1->ino > defrag2->ino)
+               return 1;
+       else if (defrag1->ino < defrag2->ino)
+               return -1;
+       else
+               return 0;
+}
+
 /* pop a record for an inode into the defrag tree.  The lock
  * must be held already
  *
@@ -81,15 +96,17 @@ static void __btrfs_add_inode_defrag(struct inode *inode,
        struct inode_defrag *entry;
        struct rb_node **p;
        struct rb_node *parent = NULL;
+       int ret;
 
        p = &root->fs_info->defrag_inodes.rb_node;
        while (*p) {
                parent = *p;
                entry = rb_entry(parent, struct inode_defrag, rb_node);
 
-               if (defrag->ino < entry->ino)
+               ret = __compare_inode_defrag(defrag, entry);
+               if (ret < 0)
                        p = &parent->rb_left;
-               else if (defrag->ino > entry->ino)
+               else if (ret > 0)
                        p = &parent->rb_right;
                else {
                        /* if we're reinserting an entry for
@@ -103,7 +120,7 @@ static void __btrfs_add_inode_defrag(struct inode *inode,
                        goto exists;
                }
        }
-       BTRFS_I(inode)->in_defrag = 1;
+       set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
        rb_link_node(&defrag->rb_node, parent, p);
        rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
        return;
@@ -131,7 +148,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
        if (btrfs_fs_closing(root->fs_info))
                return 0;
 
-       if (BTRFS_I(inode)->in_defrag)
+       if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
                return 0;
 
        if (trans)
@@ -148,7 +165,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
        defrag->root = root->root_key.objectid;
 
        spin_lock(&root->fs_info->defrag_inodes_lock);
-       if (!BTRFS_I(inode)->in_defrag)
+       if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
                __btrfs_add_inode_defrag(inode, defrag);
        else
                kfree(defrag);
@@ -159,28 +176,35 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
 /*
  * must be called with the defrag_inodes lock held
  */
-struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info, u64 ino,
+struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info,
+                                            u64 root, u64 ino,
                                             struct rb_node **next)
 {
        struct inode_defrag *entry = NULL;
+       struct inode_defrag tmp;
        struct rb_node *p;
        struct rb_node *parent = NULL;
+       int ret;
+
+       tmp.ino = ino;
+       tmp.root = root;
 
        p = info->defrag_inodes.rb_node;
        while (p) {
                parent = p;
                entry = rb_entry(parent, struct inode_defrag, rb_node);
 
-               if (ino < entry->ino)
+               ret = __compare_inode_defrag(&tmp, entry);
+               if (ret < 0)
                        p = parent->rb_left;
-               else if (ino > entry->ino)
+               else if (ret > 0)
                        p = parent->rb_right;
                else
                        return entry;
        }
 
        if (next) {
-               while (parent && ino > entry->ino) {
+               while (parent && __compare_inode_defrag(&tmp, entry) > 0) {
                        parent = rb_next(parent);
                        entry = rb_entry(parent, struct inode_defrag, rb_node);
                }
@@ -202,6 +226,7 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
        struct btrfs_key key;
        struct btrfs_ioctl_defrag_range_args range;
        u64 first_ino = 0;
+       u64 root_objectid = 0;
        int num_defrag;
        int defrag_batch = 1024;
 
@@ -214,11 +239,14 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
                n = NULL;
 
                /* find an inode to defrag */
-               defrag = btrfs_find_defrag_inode(fs_info, first_ino, &n);
+               defrag = btrfs_find_defrag_inode(fs_info, root_objectid,
+                                                first_ino, &n);
                if (!defrag) {
-                       if (n)
-                               defrag = rb_entry(n, struct inode_defrag, rb_node);
-                       else if (first_ino) {
+                       if (n) {
+                               defrag = rb_entry(n, struct inode_defrag,
+                                                 rb_node);
+                       } else if (root_objectid || first_ino) {
+                               root_objectid = 0;
                                first_ino = 0;
                                continue;
                        } else {
@@ -228,6 +256,7 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
 
                /* remove it from the rbtree */
                first_ino = defrag->ino + 1;
+               root_objectid = defrag->root;
                rb_erase(&defrag->rb_node, &fs_info->defrag_inodes);
 
                if (btrfs_fs_closing(fs_info))
@@ -252,7 +281,7 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
                        goto next;
 
                /* do a chunk of defrag */
-               BTRFS_I(inode)->in_defrag = 0;
+               clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
                range.start = defrag->last_offset;
                num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
                                               defrag_batch);
@@ -1404,12 +1433,11 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
                goto out;
        }
 
-       err = btrfs_update_time(file);
+       err = file_update_time(file);
        if (err) {
                mutex_unlock(&inode->i_mutex);
                goto out;
        }
-       BTRFS_I(inode)->sequence++;
 
        start_pos = round_down(pos, root->sectorsize);
        if (start_pos > i_size_read(inode)) {
@@ -1466,8 +1494,8 @@ int btrfs_release_file(struct inode *inode, struct file *filp)
         * flush down new bytes that may have been written if the
         * application were using truncate to replace a file in place.
         */
-       if (BTRFS_I(inode)->ordered_data_close) {
-               BTRFS_I(inode)->ordered_data_close = 0;
+       if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
+                              &BTRFS_I(inode)->runtime_flags)) {
                btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
                if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
                        filemap_flush(inode->i_mapping);
@@ -1498,14 +1526,15 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 
        trace_btrfs_sync_file(file, datasync);
 
-       ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
-       if (ret)
-               return ret;
        mutex_lock(&inode->i_mutex);
 
-       /* we wait first, since the writeback may change the inode */
+       /*
+        * we wait first, since the writeback may change the inode, also wait
+        * ordered range does a filemape_write_and_wait_range which is why we
+        * don't do it above like other file systems.
+        */
        root->log_batch++;
-       btrfs_wait_ordered_range(inode, 0, (u64)-1);
+       btrfs_wait_ordered_range(inode, start, end);
        root->log_batch++;
 
        /*
@@ -1523,7 +1552,8 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
         * syncing
         */
        smp_mb();
-       if (BTRFS_I(inode)->last_trans <=
+       if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
+           BTRFS_I(inode)->last_trans <=
            root->fs_info->last_trans_committed) {
                BTRFS_I(inode)->last_trans = 0;
                mutex_unlock(&inode->i_mutex);
index 202008ec367d4c4c2cfcf73f7289692dd910b25c..81296c57405a5d53a27dba626a4d6201829bd578 100644 (file)
@@ -33,6 +33,8 @@
 
 static int link_free_space(struct btrfs_free_space_ctl *ctl,
                           struct btrfs_free_space *info);
+static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
+                             struct btrfs_free_space *info);
 
 static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
                                               struct btrfs_path *path,
@@ -75,7 +77,8 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
                return ERR_PTR(-ENOENT);
        }
 
-       inode->i_mapping->flags &= ~__GFP_FS;
+       mapping_set_gfp_mask(inode->i_mapping,
+                       mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
 
        return inode;
 }
@@ -365,7 +368,7 @@ static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode,
 
 static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation)
 {
-       u64 *val;
+       __le64 *val;
 
        io_ctl_map_page(io_ctl, 1);
 
@@ -388,7 +391,7 @@ static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation)
 
 static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation)
 {
-       u64 *gen;
+       __le64 *gen;
 
        /*
         * Skip the crc area.  If we don't check crcs then we just have a 64bit
@@ -584,6 +587,44 @@ static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
        return 0;
 }
 
+/*
+ * Since we attach pinned extents after the fact we can have contiguous sections
+ * of free space that are split up in entries.  This poses a problem with the
+ * tree logging stuff since it could have allocated across what appears to be 2
+ * entries since we would have merged the entries when adding the pinned extents
+ * back to the free space cache.  So run through the space cache that we just
+ * loaded and merge contiguous entries.  This will make the log replay stuff not
+ * blow up and it will make for nicer allocator behavior.
+ */
+static void merge_space_tree(struct btrfs_free_space_ctl *ctl)
+{
+       struct btrfs_free_space *e, *prev = NULL;
+       struct rb_node *n;
+
+again:
+       spin_lock(&ctl->tree_lock);
+       for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
+               e = rb_entry(n, struct btrfs_free_space, offset_index);
+               if (!prev)
+                       goto next;
+               if (e->bitmap || prev->bitmap)
+                       goto next;
+               if (prev->offset + prev->bytes == e->offset) {
+                       unlink_free_space(ctl, prev);
+                       unlink_free_space(ctl, e);
+                       prev->bytes += e->bytes;
+                       kmem_cache_free(btrfs_free_space_cachep, e);
+                       link_free_space(ctl, prev);
+                       prev = NULL;
+                       spin_unlock(&ctl->tree_lock);
+                       goto again;
+               }
+next:
+               prev = e;
+       }
+       spin_unlock(&ctl->tree_lock);
+}
+
 int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
                            struct btrfs_free_space_ctl *ctl,
                            struct btrfs_path *path, u64 offset)
@@ -726,6 +767,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
        }
 
        io_ctl_drop_pages(&io_ctl);
+       merge_space_tree(ctl);
        ret = 1;
 out:
        io_ctl_free(&io_ctl);
@@ -972,9 +1014,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
                goto out;
 
 
-       ret = filemap_write_and_wait(inode->i_mapping);
-       if (ret)
-               goto out;
+       btrfs_wait_ordered_range(inode, 0, (u64)-1);
 
        key.objectid = BTRFS_FREE_SPACE_OBJECTID;
        key.offset = offset;
index 61b16c641ce0975fcbd302fc6156820233c15c93..f6ab6f5e635a39b18ddb7f259bf5f0edd25d10a0 100644 (file)
@@ -89,7 +89,7 @@ static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
 
 static int btrfs_setsize(struct inode *inode, loff_t newsize);
 static int btrfs_truncate(struct inode *inode);
-static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
+static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
 static noinline int cow_file_range(struct inode *inode,
                                   struct page *locked_page,
                                   u64 start, u64 end, int *page_started,
@@ -257,10 +257,13 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
        ret = insert_inline_extent(trans, root, inode, start,
                                   inline_len, compressed_size,
                                   compress_type, compressed_pages);
-       if (ret) {
+       if (ret && ret != -ENOSPC) {
                btrfs_abort_transaction(trans, root, ret);
                return ret;
+       } else if (ret == -ENOSPC) {
+               return 1;
        }
+
        btrfs_delalloc_release_metadata(inode, end + 1 - start);
        btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
        return 0;
@@ -1572,11 +1575,11 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
        if (btrfs_is_free_space_inode(root, inode))
                metadata = 2;
 
-       ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
-       if (ret)
-               return ret;
-
        if (!(rw & REQ_WRITE)) {
+               ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
+               if (ret)
+                       return ret;
+
                if (bio_flags & EXTENT_BIO_COMPRESSED) {
                        return btrfs_submit_compressed_read(inode, bio,
                                                    mirror_num, bio_flags);
@@ -1815,25 +1818,24 @@ out:
  * an ordered extent if the range of bytes in the file it covers are
  * fully written.
  */
-static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
+static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
 {
+       struct inode *inode = ordered_extent->inode;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_trans_handle *trans = NULL;
-       struct btrfs_ordered_extent *ordered_extent = NULL;
        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
        struct extent_state *cached_state = NULL;
        int compress_type = 0;
        int ret;
        bool nolock;
 
-       ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
-                                            end - start + 1);
-       if (!ret)
-               return 0;
-       BUG_ON(!ordered_extent); /* Logic error */
-
        nolock = btrfs_is_free_space_inode(root, inode);
 
+       if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
+               ret = -EIO;
+               goto out;
+       }
+
        if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
                BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
                ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
@@ -1889,12 +1891,10 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
                                   ordered_extent->file_offset,
                                   ordered_extent->len);
        }
-       unlock_extent_cached(io_tree, ordered_extent->file_offset,
-                            ordered_extent->file_offset +
-                            ordered_extent->len - 1, &cached_state, GFP_NOFS);
+
        if (ret < 0) {
                btrfs_abort_transaction(trans, root, ret);
-               goto out;
+               goto out_unlock;
        }
 
        add_pending_csums(trans, inode, ordered_extent->file_offset,
@@ -1905,10 +1905,14 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
                ret = btrfs_update_inode_fallback(trans, root, inode);
                if (ret) { /* -ENOMEM or corruption */
                        btrfs_abort_transaction(trans, root, ret);
-                       goto out;
+                       goto out_unlock;
                }
        }
        ret = 0;
+out_unlock:
+       unlock_extent_cached(io_tree, ordered_extent->file_offset,
+                            ordered_extent->file_offset +
+                            ordered_extent->len - 1, &cached_state, GFP_NOFS);
 out:
        if (root != root->fs_info->tree_root)
                btrfs_delalloc_release_metadata(inode, ordered_extent->len);
@@ -1919,26 +1923,57 @@ out:
                        btrfs_end_transaction(trans, root);
        }
 
+       if (ret)
+               clear_extent_uptodate(io_tree, ordered_extent->file_offset,
+                                     ordered_extent->file_offset +
+                                     ordered_extent->len - 1, NULL, GFP_NOFS);
+
+       /*
+        * This needs to be dont to make sure anybody waiting knows we are done
+        * upating everything for this ordered extent.
+        */
+       btrfs_remove_ordered_extent(inode, ordered_extent);
+
        /* once for us */
        btrfs_put_ordered_extent(ordered_extent);
        /* once for the tree */
        btrfs_put_ordered_extent(ordered_extent);
 
-       return 0;
-out_unlock:
-       unlock_extent_cached(io_tree, ordered_extent->file_offset,
-                            ordered_extent->file_offset +
-                            ordered_extent->len - 1, &cached_state, GFP_NOFS);
-       goto out;
+       return ret;
+}
+
+static void finish_ordered_fn(struct btrfs_work *work)
+{
+       struct btrfs_ordered_extent *ordered_extent;
+       ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
+       btrfs_finish_ordered_io(ordered_extent);
 }
 
 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
                                struct extent_state *state, int uptodate)
 {
+       struct inode *inode = page->mapping->host;
+       struct btrfs_root *root = BTRFS_I(inode)->root;
+       struct btrfs_ordered_extent *ordered_extent = NULL;
+       struct btrfs_workers *workers;
+
        trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
 
        ClearPagePrivate2(page);
-       return btrfs_finish_ordered_io(page->mapping->host, start, end);
+       if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
+                                           end - start + 1, uptodate))
+               return 0;
+
+       ordered_extent->work.func = finish_ordered_fn;
+       ordered_extent->work.flags = 0;
+
+       if (btrfs_is_free_space_inode(root, inode))
+               workers = &root->fs_info->endio_freespace_worker;
+       else
+               workers = &root->fs_info->endio_write_workers;
+       btrfs_queue_worker(workers, &ordered_extent->work);
+
+       return 0;
 }
 
 /*
@@ -2072,12 +2107,12 @@ void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
        struct btrfs_block_rsv *block_rsv;
        int ret;
 
-       if (!list_empty(&root->orphan_list) ||
+       if (atomic_read(&root->orphan_inodes) ||
            root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
                return;
 
        spin_lock(&root->orphan_lock);
-       if (!list_empty(&root->orphan_list)) {
+       if (atomic_read(&root->orphan_inodes)) {
                spin_unlock(&root->orphan_lock);
                return;
        }
@@ -2134,8 +2169,8 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
                block_rsv = NULL;
        }
 
-       if (list_empty(&BTRFS_I(inode)->i_orphan)) {
-               list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
+       if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+                             &BTRFS_I(inode)->runtime_flags)) {
 #if 0
                /*
                 * For proper ENOSPC handling, we should do orphan
@@ -2148,12 +2183,12 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
                        insert = 1;
 #endif
                insert = 1;
+               atomic_dec(&root->orphan_inodes);
        }
 
-       if (!BTRFS_I(inode)->orphan_meta_reserved) {
-               BTRFS_I(inode)->orphan_meta_reserved = 1;
+       if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
+                             &BTRFS_I(inode)->runtime_flags))
                reserve = 1;
-       }
        spin_unlock(&root->orphan_lock);
 
        /* grab metadata reservation from transaction handle */
@@ -2166,6 +2201,8 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
        if (insert >= 1) {
                ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
                if (ret && ret != -EEXIST) {
+                       clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+                                 &BTRFS_I(inode)->runtime_flags);
                        btrfs_abort_transaction(trans, root, ret);
                        return ret;
                }
@@ -2196,15 +2233,13 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
        int ret = 0;
 
        spin_lock(&root->orphan_lock);
-       if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
-               list_del_init(&BTRFS_I(inode)->i_orphan);
+       if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+                              &BTRFS_I(inode)->runtime_flags))
                delete_item = 1;
-       }
 
-       if (BTRFS_I(inode)->orphan_meta_reserved) {
-               BTRFS_I(inode)->orphan_meta_reserved = 0;
+       if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
+                              &BTRFS_I(inode)->runtime_flags))
                release_rsv = 1;
-       }
        spin_unlock(&root->orphan_lock);
 
        if (trans && delete_item) {
@@ -2212,8 +2247,10 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
                BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
        }
 
-       if (release_rsv)
+       if (release_rsv) {
                btrfs_orphan_release_metadata(inode);
+               atomic_dec(&root->orphan_inodes);
+       }
 
        return 0;
 }
@@ -2341,6 +2378,8 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
                                ret = PTR_ERR(trans);
                                goto out;
                        }
+                       printk(KERN_ERR "auto deleting %Lu\n",
+                              found_key.objectid);
                        ret = btrfs_del_orphan_item(trans, root,
                                                    found_key.objectid);
                        BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
@@ -2352,9 +2391,8 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
                 * add this inode to the orphan list so btrfs_orphan_del does
                 * the proper thing when we hit it
                 */
-               spin_lock(&root->orphan_lock);
-               list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
-               spin_unlock(&root->orphan_lock);
+               set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+                       &BTRFS_I(inode)->runtime_flags);
 
                /* if we have links, this was a truncate, lets do that */
                if (inode->i_nlink) {
@@ -2510,7 +2548,7 @@ static void btrfs_read_locked_inode(struct inode *inode)
 
        inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
        BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
-       BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
+       inode->i_version = btrfs_inode_sequence(leaf, inode_item);
        inode->i_generation = BTRFS_I(inode)->generation;
        inode->i_rdev = 0;
        rdev = btrfs_inode_rdev(leaf, inode_item);
@@ -2594,7 +2632,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
 
        btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
        btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
-       btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
+       btrfs_set_inode_sequence(leaf, item, inode->i_version);
        btrfs_set_inode_transid(leaf, item, trans->transid);
        btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
        btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
@@ -2752,6 +2790,8 @@ err:
                goto out;
 
        btrfs_i_size_write(dir, dir->i_size - name_len * 2);
+       inode_inc_iversion(inode);
+       inode_inc_iversion(dir);
        inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
        btrfs_update_inode(trans, root, dir);
 out:
@@ -3089,6 +3129,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
        }
 
        btrfs_i_size_write(dir, dir->i_size - name_len * 2);
+       inode_inc_iversion(dir);
        dir->i_mtime = dir->i_ctime = CURRENT_TIME;
        ret = btrfs_update_inode(trans, root, dir);
        if (ret)
@@ -3607,7 +3648,8 @@ static int btrfs_setsize(struct inode *inode, loff_t newsize)
                 * any new writes get down to disk quickly.
                 */
                if (newsize == 0)
-                       BTRFS_I(inode)->ordered_data_close = 1;
+                       set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
+                               &BTRFS_I(inode)->runtime_flags);
 
                /* we don't support swapfiles, so vmtruncate shouldn't fail */
                truncate_setsize(inode, newsize);
@@ -3638,6 +3680,7 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
 
        if (attr->ia_valid) {
                setattr_copy(inode, attr);
+               inode_inc_iversion(inode);
                err = btrfs_dirty_inode(inode);
 
                if (!err && attr->ia_valid & ATTR_MODE)
@@ -3671,7 +3714,8 @@ void btrfs_evict_inode(struct inode *inode)
        btrfs_wait_ordered_range(inode, 0, (u64)-1);
 
        if (root->fs_info->log_root_recovering) {
-               BUG_ON(!list_empty(&BTRFS_I(inode)->i_orphan));
+               BUG_ON(!test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+                                &BTRFS_I(inode)->runtime_flags));
                goto no_delete;
        }
 
@@ -3756,7 +3800,7 @@ void btrfs_evict_inode(struct inode *inode)
        btrfs_end_transaction(trans, root);
        btrfs_btree_balance_dirty(root, nr);
 no_delete:
-       end_writeback(inode);
+       clear_inode(inode);
        return;
 }
 
@@ -4066,7 +4110,7 @@ static struct inode *new_simple_dir(struct super_block *s,
 
        BTRFS_I(inode)->root = root;
        memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
-       BTRFS_I(inode)->dummy_inode = 1;
+       set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
 
        inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
        inode->i_op = &btrfs_dir_ro_inode_operations;
@@ -4370,7 +4414,7 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
        int ret = 0;
        bool nolock = false;
 
-       if (BTRFS_I(inode)->dummy_inode)
+       if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
                return 0;
 
        if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(root, inode))
@@ -4403,7 +4447,7 @@ int btrfs_dirty_inode(struct inode *inode)
        struct btrfs_trans_handle *trans;
        int ret;
 
-       if (BTRFS_I(inode)->dummy_inode)
+       if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
                return 0;
 
        trans = btrfs_join_transaction(root);
@@ -4431,46 +4475,18 @@ int btrfs_dirty_inode(struct inode *inode)
  * This is a copy of file_update_time.  We need this so we can return error on
  * ENOSPC for updating the inode in the case of file write and mmap writes.
  */
-int btrfs_update_time(struct file *file)
+static int btrfs_update_time(struct inode *inode, struct timespec *now,
+                            int flags)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
-       struct timespec now;
-       int ret;
-       enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
-
-       /* First try to exhaust all avenues to not sync */
-       if (IS_NOCMTIME(inode))
-               return 0;
-
-       now = current_fs_time(inode->i_sb);
-       if (!timespec_equal(&inode->i_mtime, &now))
-               sync_it = S_MTIME;
-
-       if (!timespec_equal(&inode->i_ctime, &now))
-               sync_it |= S_CTIME;
-
-       if (IS_I_VERSION(inode))
-               sync_it |= S_VERSION;
-
-       if (!sync_it)
-               return 0;
-
-       /* Finally allowed to write? Takes lock. */
-       if (mnt_want_write_file(file))
-               return 0;
-
-       /* Only change inode inside the lock region */
-       if (sync_it & S_VERSION)
+       if (flags & S_VERSION)
                inode_inc_iversion(inode);
-       if (sync_it & S_CTIME)
-               inode->i_ctime = now;
-       if (sync_it & S_MTIME)
-               inode->i_mtime = now;
-       ret = btrfs_dirty_inode(inode);
-       if (!ret)
-               mark_inode_dirty_sync(inode);
-       mnt_drop_write(file->f_path.mnt);
-       return ret;
+       if (flags & S_CTIME)
+               inode->i_ctime = *now;
+       if (flags & S_MTIME)
+               inode->i_mtime = *now;
+       if (flags & S_ATIME)
+               inode->i_atime = *now;
+       return btrfs_dirty_inode(inode);
 }
 
 /*
@@ -4730,6 +4746,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
 
        btrfs_i_size_write(parent_inode, parent_inode->i_size +
                           name_len * 2);
+       inode_inc_iversion(parent_inode);
        parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
        ret = btrfs_update_inode(trans, root, parent_inode);
        if (ret)
@@ -4937,6 +4954,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
        }
 
        btrfs_inc_nlink(inode);
+       inode_inc_iversion(inode);
        inode->i_ctime = CURRENT_TIME;
        ihold(inode);
 
@@ -5903,9 +5921,7 @@ static void btrfs_endio_direct_write(struct bio *bio, int err)
        struct btrfs_dio_private *dip = bio->bi_private;
        struct inode *inode = dip->inode;
        struct btrfs_root *root = BTRFS_I(inode)->root;
-       struct btrfs_trans_handle *trans;
        struct btrfs_ordered_extent *ordered = NULL;
-       struct extent_state *cached_state = NULL;
        u64 ordered_offset = dip->logical_offset;
        u64 ordered_bytes = dip->bytes;
        int ret;
@@ -5915,73 +5931,14 @@ static void btrfs_endio_direct_write(struct bio *bio, int err)
 again:
        ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
                                                   &ordered_offset,
-                                                  ordered_bytes);
+                                                  ordered_bytes, !err);
        if (!ret)
                goto out_test;
 
-       BUG_ON(!ordered);
-
-       trans = btrfs_join_transaction(root);
-       if (IS_ERR(trans)) {
-               err = -ENOMEM;
-               goto out;
-       }
-       trans->block_rsv = &root->fs_info->delalloc_block_rsv;
-
-       if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
-               ret = btrfs_ordered_update_i_size(inode, 0, ordered);
-               if (!ret)
-                       err = btrfs_update_inode_fallback(trans, root, inode);
-               goto out;
-       }
-
-       lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset,
-                        ordered->file_offset + ordered->len - 1, 0,
-                        &cached_state);
-
-       if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
-               ret = btrfs_mark_extent_written(trans, inode,
-                                               ordered->file_offset,
-                                               ordered->file_offset +
-                                               ordered->len);
-               if (ret) {
-                       err = ret;
-                       goto out_unlock;
-               }
-       } else {
-               ret = insert_reserved_file_extent(trans, inode,
-                                                 ordered->file_offset,
-                                                 ordered->start,
-                                                 ordered->disk_len,
-                                                 ordered->len,
-                                                 ordered->len,
-                                                 0, 0, 0,
-                                                 BTRFS_FILE_EXTENT_REG);
-               unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
-                                  ordered->file_offset, ordered->len);
-               if (ret) {
-                       err = ret;
-                       WARN_ON(1);
-                       goto out_unlock;
-               }
-       }
-
-       add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
-       ret = btrfs_ordered_update_i_size(inode, 0, ordered);
-       if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
-               btrfs_update_inode_fallback(trans, root, inode);
-       ret = 0;
-out_unlock:
-       unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
-                            ordered->file_offset + ordered->len - 1,
-                            &cached_state, GFP_NOFS);
-out:
-       btrfs_delalloc_release_metadata(inode, ordered->len);
-       btrfs_end_transaction(trans, root);
-       ordered_offset = ordered->file_offset + ordered->len;
-       btrfs_put_ordered_extent(ordered);
-       btrfs_put_ordered_extent(ordered);
-
+       ordered->work.func = finish_ordered_fn;
+       ordered->work.flags = 0;
+       btrfs_queue_worker(&root->fs_info->endio_write_workers,
+                          &ordered->work);
 out_test:
        /*
         * our bio might span multiple ordered extents.  If we haven't
@@ -5990,12 +5947,12 @@ out_test:
        if (ordered_offset < dip->logical_offset + dip->bytes) {
                ordered_bytes = dip->logical_offset + dip->bytes -
                        ordered_offset;
+               ordered = NULL;
                goto again;
        }
 out_done:
        bio->bi_private = dip->private;
 
-       kfree(dip->csums);
        kfree(dip);
 
        /* If we had an error make sure to clear the uptodate flag */
@@ -6063,9 +6020,12 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
        int ret;
 
        bio_get(bio);
-       ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
-       if (ret)
-               goto err;
+
+       if (!write) {
+               ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
+               if (ret)
+                       goto err;
+       }
 
        if (skip_sum)
                goto map;
@@ -6485,13 +6445,13 @@ static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
 
 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
 {
+       struct inode *inode = page->mapping->host;
        struct extent_io_tree *tree;
        struct btrfs_ordered_extent *ordered;
        struct extent_state *cached_state = NULL;
        u64 page_start = page_offset(page);
        u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
 
-
        /*
         * we have the page locked, so new writeback can't start,
         * and the dirty bit won't be cleared while we are here.
@@ -6501,13 +6461,13 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
         */
        wait_on_page_writeback(page);
 
-       tree = &BTRFS_I(page->mapping->host)->io_tree;
+       tree = &BTRFS_I(inode)->io_tree;
        if (offset) {
                btrfs_releasepage(page, GFP_NOFS);
                return;
        }
        lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
-       ordered = btrfs_lookup_ordered_extent(page->mapping->host,
+       ordered = btrfs_lookup_ordered_extent(inode,
                                           page_offset(page));
        if (ordered) {
                /*
@@ -6522,9 +6482,10 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
                 * whoever cleared the private bit is responsible
                 * for the finish_ordered_io
                 */
-               if (TestClearPagePrivate2(page)) {
-                       btrfs_finish_ordered_io(page->mapping->host,
-                                               page_start, page_end);
+               if (TestClearPagePrivate2(page) &&
+                   btrfs_dec_test_ordered_pending(inode, &ordered, page_start,
+                                                  PAGE_CACHE_SIZE, 1)) {
+                       btrfs_finish_ordered_io(ordered);
                }
                btrfs_put_ordered_extent(ordered);
                cached_state = NULL;
@@ -6576,7 +6537,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 
        ret  = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
        if (!ret) {
-               ret = btrfs_update_time(vma->vm_file);
+               ret = file_update_time(vma->vm_file);
                reserved = 1;
        }
        if (ret) {
@@ -6771,7 +6732,8 @@ static int btrfs_truncate(struct inode *inode)
         * using truncate to replace the contents of the file will
         * end up with a zero length file after a crash.
         */
-       if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
+       if (inode->i_size == 0 && test_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
+                                          &BTRFS_I(inode)->runtime_flags))
                btrfs_add_ordered_operation(trans, root, inode);
 
        while (1) {
@@ -6894,7 +6856,6 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
        ei->root = NULL;
        ei->space_info = NULL;
        ei->generation = 0;
-       ei->sequence = 0;
        ei->last_trans = 0;
        ei->last_sub_trans = 0;
        ei->logged_trans = 0;
@@ -6909,11 +6870,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
        ei->outstanding_extents = 0;
        ei->reserved_extents = 0;
 
-       ei->ordered_data_close = 0;
-       ei->orphan_meta_reserved = 0;
-       ei->dummy_inode = 0;
-       ei->in_defrag = 0;
-       ei->delalloc_meta_reserved = 0;
+       ei->runtime_flags = 0;
        ei->force_compress = BTRFS_COMPRESS_NONE;
 
        ei->delayed_node = NULL;
@@ -6927,7 +6884,6 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
        mutex_init(&ei->log_mutex);
        mutex_init(&ei->delalloc_mutex);
        btrfs_ordered_inode_tree_init(&ei->ordered_tree);
-       INIT_LIST_HEAD(&ei->i_orphan);
        INIT_LIST_HEAD(&ei->delalloc_inodes);
        INIT_LIST_HEAD(&ei->ordered_operations);
        RB_CLEAR_NODE(&ei->rb_node);
@@ -6972,13 +6928,12 @@ void btrfs_destroy_inode(struct inode *inode)
                spin_unlock(&root->fs_info->ordered_extent_lock);
        }
 
-       spin_lock(&root->orphan_lock);
-       if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
+       if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+                    &BTRFS_I(inode)->runtime_flags)) {
                printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n",
                       (unsigned long long)btrfs_ino(inode));
-               list_del_init(&BTRFS_I(inode)->i_orphan);
+               atomic_dec(&root->orphan_inodes);
        }
-       spin_unlock(&root->orphan_lock);
 
        while (1) {
                ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
@@ -7193,6 +7148,9 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
                btrfs_add_ordered_operation(trans, root, old_inode);
 
+       inode_inc_iversion(old_dir);
+       inode_inc_iversion(new_dir);
+       inode_inc_iversion(old_inode);
        old_dir->i_ctime = old_dir->i_mtime = ctime;
        new_dir->i_ctime = new_dir->i_mtime = ctime;
        old_inode->i_ctime = ctime;
@@ -7219,6 +7177,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        }
 
        if (new_inode) {
+               inode_inc_iversion(new_inode);
                new_inode->i_ctime = CURRENT_TIME;
                if (unlikely(btrfs_ino(new_inode) ==
                             BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
@@ -7490,6 +7449,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
                cur_offset += ins.offset;
                *alloc_hint = ins.objectid + ins.offset;
 
+               inode_inc_iversion(inode);
                inode->i_ctime = CURRENT_TIME;
                BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
                if (!(mode & FALLOC_FL_KEEP_SIZE) &&
@@ -7647,6 +7607,7 @@ static const struct inode_operations btrfs_file_inode_operations = {
        .permission     = btrfs_permission,
        .fiemap         = btrfs_fiemap,
        .get_acl        = btrfs_get_acl,
+       .update_time    = btrfs_update_time,
 };
 static const struct inode_operations btrfs_special_inode_operations = {
        .getattr        = btrfs_getattr,
@@ -7657,6 +7618,7 @@ static const struct inode_operations btrfs_special_inode_operations = {
        .listxattr      = btrfs_listxattr,
        .removexattr    = btrfs_removexattr,
        .get_acl        = btrfs_get_acl,
+       .update_time    = btrfs_update_time,
 };
 static const struct inode_operations btrfs_symlink_inode_operations = {
        .readlink       = generic_readlink,
@@ -7670,6 +7632,7 @@ static const struct inode_operations btrfs_symlink_inode_operations = {
        .listxattr      = btrfs_listxattr,
        .removexattr    = btrfs_removexattr,
        .get_acl        = btrfs_get_acl,
+       .update_time    = btrfs_update_time,
 };
 
 const struct dentry_operations btrfs_dentry_operations = {
index 14f8e1faa46ee0478ebb83d6f82d205d25c1dc51..24b776c08d99f7bbb621076f68500464b6829435 100644 (file)
@@ -261,6 +261,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
        }
 
        btrfs_update_iflags(inode);
+       inode_inc_iversion(inode);
        inode->i_ctime = CURRENT_TIME;
        ret = btrfs_update_inode(trans, root, inode);
 
@@ -367,7 +368,7 @@ static noinline int create_subvol(struct btrfs_root *root,
                return PTR_ERR(trans);
 
        leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
-                                     0, objectid, NULL, 0, 0, 0, 0);
+                                     0, objectid, NULL, 0, 0, 0);
        if (IS_ERR(leaf)) {
                ret = PTR_ERR(leaf);
                goto fail;
@@ -2262,10 +2263,12 @@ static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg)
        di_args->bytes_used = dev->bytes_used;
        di_args->total_bytes = dev->total_bytes;
        memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
-       if (dev->name)
+       if (dev->name) {
                strncpy(di_args->path, dev->name, sizeof(di_args->path));
-       else
+               di_args->path[sizeof(di_args->path) - 1] = 0;
+       } else {
                di_args->path[0] = '\0';
+       }
 
 out:
        if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
@@ -2622,6 +2625,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
                        btrfs_mark_buffer_dirty(leaf);
                        btrfs_release_path(path);
 
+                       inode_inc_iversion(inode);
                        inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 
                        /*
@@ -2914,7 +2918,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
                up_read(&info->groups_sem);
        }
 
-       user_dest = (struct btrfs_ioctl_space_info *)
+       user_dest = (struct btrfs_ioctl_space_info __user *)
                (arg + sizeof(struct btrfs_ioctl_space_args));
 
        if (copy_to_user(user_dest, dest_orig, alloc_size))
@@ -3042,6 +3046,28 @@ static long btrfs_ioctl_scrub_progress(struct btrfs_root *root,
        return ret;
 }
 
+static long btrfs_ioctl_get_dev_stats(struct btrfs_root *root,
+                                     void __user *arg, int reset_after_read)
+{
+       struct btrfs_ioctl_get_dev_stats *sa;
+       int ret;
+
+       if (reset_after_read && !capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       sa = memdup_user(arg, sizeof(*sa));
+       if (IS_ERR(sa))
+               return PTR_ERR(sa);
+
+       ret = btrfs_get_dev_stats(root, sa, reset_after_read);
+
+       if (copy_to_user(arg, sa, sizeof(*sa)))
+               ret = -EFAULT;
+
+       kfree(sa);
+       return ret;
+}
+
 static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
 {
        int ret = 0;
@@ -3212,8 +3238,9 @@ void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
        }
 }
 
-static long btrfs_ioctl_balance(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_balance(struct file *file, void __user *arg)
 {
+       struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
        struct btrfs_fs_info *fs_info = root->fs_info;
        struct btrfs_ioctl_balance_args *bargs;
        struct btrfs_balance_control *bctl;
@@ -3225,6 +3252,10 @@ static long btrfs_ioctl_balance(struct btrfs_root *root, void __user *arg)
        if (fs_info->sb->s_flags & MS_RDONLY)
                return -EROFS;
 
+       ret = mnt_want_write(file->f_path.mnt);
+       if (ret)
+               return ret;
+
        mutex_lock(&fs_info->volume_mutex);
        mutex_lock(&fs_info->balance_mutex);
 
@@ -3291,6 +3322,7 @@ out_bargs:
 out:
        mutex_unlock(&fs_info->balance_mutex);
        mutex_unlock(&fs_info->volume_mutex);
+       mnt_drop_write(file->f_path.mnt);
        return ret;
 }
 
@@ -3386,7 +3418,7 @@ long btrfs_ioctl(struct file *file, unsigned int
        case BTRFS_IOC_DEV_INFO:
                return btrfs_ioctl_dev_info(root, argp);
        case BTRFS_IOC_BALANCE:
-               return btrfs_ioctl_balance(root, NULL);
+               return btrfs_ioctl_balance(file, NULL);
        case BTRFS_IOC_CLONE:
                return btrfs_ioctl_clone(file, arg, 0, 0, 0);
        case BTRFS_IOC_CLONE_RANGE:
@@ -3419,11 +3451,15 @@ long btrfs_ioctl(struct file *file, unsigned int
        case BTRFS_IOC_SCRUB_PROGRESS:
                return btrfs_ioctl_scrub_progress(root, argp);
        case BTRFS_IOC_BALANCE_V2:
-               return btrfs_ioctl_balance(root, argp);
+               return btrfs_ioctl_balance(file, argp);
        case BTRFS_IOC_BALANCE_CTL:
                return btrfs_ioctl_balance_ctl(root, arg);
        case BTRFS_IOC_BALANCE_PROGRESS:
                return btrfs_ioctl_balance_progress(root, argp);
+       case BTRFS_IOC_GET_DEV_STATS:
+               return btrfs_ioctl_get_dev_stats(root, argp, 0);
+       case BTRFS_IOC_GET_AND_RESET_DEV_STATS:
+               return btrfs_ioctl_get_dev_stats(root, argp, 1);
        }
 
        return -ENOTTY;
index 086e6bdae1c4482b93b6dda4d16b1c5af288f2eb..497c530724cf6b7a50296d2c6660fef7f4066cb9 100644 (file)
@@ -266,6 +266,35 @@ struct btrfs_ioctl_logical_ino_args {
        __u64                           inodes;
 };
 
+enum btrfs_dev_stat_values {
+       /* disk I/O failure stats */
+       BTRFS_DEV_STAT_WRITE_ERRS, /* EIO or EREMOTEIO from lower layers */
+       BTRFS_DEV_STAT_READ_ERRS, /* EIO or EREMOTEIO from lower layers */
+       BTRFS_DEV_STAT_FLUSH_ERRS, /* EIO or EREMOTEIO from lower layers */
+
+       /* stats for indirect indications for I/O failures */
+       BTRFS_DEV_STAT_CORRUPTION_ERRS, /* checksum error, bytenr error or
+                                        * contents is illegal: this is an
+                                        * indication that the block was damaged
+                                        * during read or write, or written to
+                                        * wrong location or read from wrong
+                                        * location */
+       BTRFS_DEV_STAT_GENERATION_ERRS, /* an indication that blocks have not
+                                        * been written */
+
+       BTRFS_DEV_STAT_VALUES_MAX
+};
+
+struct btrfs_ioctl_get_dev_stats {
+       __u64 devid;                            /* in */
+       __u64 nr_items;                         /* in/out */
+
+       /* out values: */
+       __u64 values[BTRFS_DEV_STAT_VALUES_MAX];
+
+       __u64 unused[128 - 2 - BTRFS_DEV_STAT_VALUES_MAX]; /* pad to 1k */
+};
+
 #define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \
                                   struct btrfs_ioctl_vol_args)
 #define BTRFS_IOC_DEFRAG _IOW(BTRFS_IOCTL_MAGIC, 2, \
@@ -330,5 +359,9 @@ struct btrfs_ioctl_logical_ino_args {
                                        struct btrfs_ioctl_ino_path_args)
 #define BTRFS_IOC_LOGICAL_INO _IOWR(BTRFS_IOCTL_MAGIC, 36, \
                                        struct btrfs_ioctl_ino_path_args)
+#define BTRFS_IOC_GET_DEV_STATS _IOWR(BTRFS_IOCTL_MAGIC, 52, \
+                                     struct btrfs_ioctl_get_dev_stats)
+#define BTRFS_IOC_GET_AND_RESET_DEV_STATS _IOWR(BTRFS_IOCTL_MAGIC, 53, \
+                                       struct btrfs_ioctl_get_dev_stats)
 
 #endif
index bbf6d0d9aebe9b68f0ea8e5c121783d81733f7d7..9e138cdc36c5eb7d66bf80dfc37829878eeaa6e2 100644 (file)
@@ -196,7 +196,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
        entry->len = len;
        entry->disk_len = disk_len;
        entry->bytes_left = len;
-       entry->inode = inode;
+       entry->inode = igrab(inode);
        entry->compress_type = compress_type;
        if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
                set_bit(type, &entry->flags);
@@ -212,12 +212,12 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
 
        trace_btrfs_ordered_extent_add(inode, entry);
 
-       spin_lock(&tree->lock);
+       spin_lock_irq(&tree->lock);
        node = tree_insert(&tree->tree, file_offset,
                           &entry->rb_node);
        if (node)
                ordered_data_tree_panic(inode, -EEXIST, file_offset);
-       spin_unlock(&tree->lock);
+       spin_unlock_irq(&tree->lock);
 
        spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
        list_add_tail(&entry->root_extent_list,
@@ -264,9 +264,9 @@ void btrfs_add_ordered_sum(struct inode *inode,
        struct btrfs_ordered_inode_tree *tree;
 
        tree = &BTRFS_I(inode)->ordered_tree;
-       spin_lock(&tree->lock);
+       spin_lock_irq(&tree->lock);
        list_add_tail(&sum->list, &entry->list);
-       spin_unlock(&tree->lock);
+       spin_unlock_irq(&tree->lock);
 }
 
 /*
@@ -283,18 +283,19 @@ void btrfs_add_ordered_sum(struct inode *inode,
  */
 int btrfs_dec_test_first_ordered_pending(struct inode *inode,
                                   struct btrfs_ordered_extent **cached,
-                                  u64 *file_offset, u64 io_size)
+                                  u64 *file_offset, u64 io_size, int uptodate)
 {
        struct btrfs_ordered_inode_tree *tree;
        struct rb_node *node;
        struct btrfs_ordered_extent *entry = NULL;
        int ret;
+       unsigned long flags;
        u64 dec_end;
        u64 dec_start;
        u64 to_dec;
 
        tree = &BTRFS_I(inode)->ordered_tree;
-       spin_lock(&tree->lock);
+       spin_lock_irqsave(&tree->lock, flags);
        node = tree_search(tree, *file_offset);
        if (!node) {
                ret = 1;
@@ -323,6 +324,9 @@ int btrfs_dec_test_first_ordered_pending(struct inode *inode,
                       (unsigned long long)to_dec);
        }
        entry->bytes_left -= to_dec;
+       if (!uptodate)
+               set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
+
        if (entry->bytes_left == 0)
                ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
        else
@@ -332,7 +336,7 @@ out:
                *cached = entry;
                atomic_inc(&entry->refs);
        }
-       spin_unlock(&tree->lock);
+       spin_unlock_irqrestore(&tree->lock, flags);
        return ret == 0;
 }
 
@@ -347,15 +351,21 @@ out:
  */
 int btrfs_dec_test_ordered_pending(struct inode *inode,
                                   struct btrfs_ordered_extent **cached,
-                                  u64 file_offset, u64 io_size)
+                                  u64 file_offset, u64 io_size, int uptodate)
 {
        struct btrfs_ordered_inode_tree *tree;
        struct rb_node *node;
        struct btrfs_ordered_extent *entry = NULL;
+       unsigned long flags;
        int ret;
 
        tree = &BTRFS_I(inode)->ordered_tree;
-       spin_lock(&tree->lock);
+       spin_lock_irqsave(&tree->lock, flags);
+       if (cached && *cached) {
+               entry = *cached;
+               goto have_entry;
+       }
+
        node = tree_search(tree, file_offset);
        if (!node) {
                ret = 1;
@@ -363,6 +373,7 @@ int btrfs_dec_test_ordered_pending(struct inode *inode,
        }
 
        entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
+have_entry:
        if (!offset_in_entry(entry, file_offset)) {
                ret = 1;
                goto out;
@@ -374,6 +385,9 @@ int btrfs_dec_test_ordered_pending(struct inode *inode,
                       (unsigned long long)io_size);
        }
        entry->bytes_left -= io_size;
+       if (!uptodate)
+               set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
+
        if (entry->bytes_left == 0)
                ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
        else
@@ -383,7 +397,7 @@ out:
                *cached = entry;
                atomic_inc(&entry->refs);
        }
-       spin_unlock(&tree->lock);
+       spin_unlock_irqrestore(&tree->lock, flags);
        return ret == 0;
 }
 
@@ -399,6 +413,8 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
        trace_btrfs_ordered_extent_put(entry->inode, entry);
 
        if (atomic_dec_and_test(&entry->refs)) {
+               if (entry->inode)
+                       btrfs_add_delayed_iput(entry->inode);
                while (!list_empty(&entry->list)) {
                        cur = entry->list.next;
                        sum = list_entry(cur, struct btrfs_ordered_sum, list);
@@ -411,21 +427,22 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
 
 /*
  * remove an ordered extent from the tree.  No references are dropped
- * and you must wake_up entry->wait.  You must hold the tree lock
- * while you call this function.
+ * and waiters are woken up.
  */
-static void __btrfs_remove_ordered_extent(struct inode *inode,
-                                         struct btrfs_ordered_extent *entry)
+void btrfs_remove_ordered_extent(struct inode *inode,
+                                struct btrfs_ordered_extent *entry)
 {
        struct btrfs_ordered_inode_tree *tree;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct rb_node *node;
 
        tree = &BTRFS_I(inode)->ordered_tree;
+       spin_lock_irq(&tree->lock);
        node = &entry->rb_node;
        rb_erase(node, &tree->tree);
        tree->last = NULL;
        set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
+       spin_unlock_irq(&tree->lock);
 
        spin_lock(&root->fs_info->ordered_extent_lock);
        list_del_init(&entry->root_extent_list);
@@ -442,21 +459,6 @@ static void __btrfs_remove_ordered_extent(struct inode *inode,
                list_del_init(&BTRFS_I(inode)->ordered_operations);
        }
        spin_unlock(&root->fs_info->ordered_extent_lock);
-}
-
-/*
- * remove an ordered extent from the tree.  No references are dropped
- * but any waiters are woken.
- */
-void btrfs_remove_ordered_extent(struct inode *inode,
-                                struct btrfs_ordered_extent *entry)
-{
-       struct btrfs_ordered_inode_tree *tree;
-
-       tree = &BTRFS_I(inode)->ordered_tree;
-       spin_lock(&tree->lock);
-       __btrfs_remove_ordered_extent(inode, entry);
-       spin_unlock(&tree->lock);
        wake_up(&entry->wait);
 }
 
@@ -621,19 +623,11 @@ void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
                if (orig_end > INT_LIMIT(loff_t))
                        orig_end = INT_LIMIT(loff_t);
        }
-again:
+
        /* start IO across the range first to instantiate any delalloc
         * extents
         */
-       filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
-
-       /* The compression code will leave pages locked but return from
-        * writepage without setting the page writeback.  Starting again
-        * with WB_SYNC_ALL will end up waiting for the IO to actually start.
-        */
-       filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
-
-       filemap_fdatawait_range(inode->i_mapping, start, orig_end);
+       filemap_write_and_wait_range(inode->i_mapping, start, orig_end);
 
        end = orig_end;
        found = 0;
@@ -657,11 +651,6 @@ again:
                        break;
                end--;
        }
-       if (found || test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end,
-                          EXTENT_DELALLOC, 0, NULL)) {
-               schedule_timeout(1);
-               goto again;
-       }
 }
 
 /*
@@ -676,7 +665,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
        struct btrfs_ordered_extent *entry = NULL;
 
        tree = &BTRFS_I(inode)->ordered_tree;
-       spin_lock(&tree->lock);
+       spin_lock_irq(&tree->lock);
        node = tree_search(tree, file_offset);
        if (!node)
                goto out;
@@ -687,7 +676,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
        if (entry)
                atomic_inc(&entry->refs);
 out:
-       spin_unlock(&tree->lock);
+       spin_unlock_irq(&tree->lock);
        return entry;
 }
 
@@ -703,7 +692,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
        struct btrfs_ordered_extent *entry = NULL;
 
        tree = &BTRFS_I(inode)->ordered_tree;
-       spin_lock(&tree->lock);
+       spin_lock_irq(&tree->lock);
        node = tree_search(tree, file_offset);
        if (!node) {
                node = tree_search(tree, file_offset + len);
@@ -728,7 +717,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
 out:
        if (entry)
                atomic_inc(&entry->refs);
-       spin_unlock(&tree->lock);
+       spin_unlock_irq(&tree->lock);
        return entry;
 }
 
@@ -744,7 +733,7 @@ btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
        struct btrfs_ordered_extent *entry = NULL;
 
        tree = &BTRFS_I(inode)->ordered_tree;
-       spin_lock(&tree->lock);
+       spin_lock_irq(&tree->lock);
        node = tree_search(tree, file_offset);
        if (!node)
                goto out;
@@ -752,7 +741,7 @@ btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
        entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
        atomic_inc(&entry->refs);
 out:
-       spin_unlock(&tree->lock);
+       spin_unlock_irq(&tree->lock);
        return entry;
 }
 
@@ -764,7 +753,6 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
                                struct btrfs_ordered_extent *ordered)
 {
        struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
-       struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
        u64 disk_i_size;
        u64 new_i_size;
        u64 i_size_test;
@@ -779,7 +767,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
        else
                offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
 
-       spin_lock(&tree->lock);
+       spin_lock_irq(&tree->lock);
        disk_i_size = BTRFS_I(inode)->disk_i_size;
 
        /* truncate file */
@@ -797,14 +785,6 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
                goto out;
        }
 
-       /*
-        * we can't update the disk_isize if there are delalloc bytes
-        * between disk_i_size and  this ordered extent
-        */
-       if (test_range_bit(io_tree, disk_i_size, offset - 1,
-                          EXTENT_DELALLOC, 0, NULL)) {
-               goto out;
-       }
        /*
         * walk backward from this ordered extent to disk_i_size.
         * if we find an ordered extent then we can't update disk i_size
@@ -825,15 +805,18 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
                }
                node = prev;
        }
-       while (node) {
+       for (; node; node = rb_prev(node)) {
                test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
+
+               /* We treat this entry as if it doesnt exist */
+               if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
+                       continue;
                if (test->file_offset + test->len <= disk_i_size)
                        break;
                if (test->file_offset >= i_size)
                        break;
                if (test->file_offset >= disk_i_size)
                        goto out;
-               node = rb_prev(node);
        }
        new_i_size = min_t(u64, offset, i_size);
 
@@ -851,43 +834,49 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
                else
                        node = rb_first(&tree->tree);
        }
-       i_size_test = 0;
-       if (node) {
-               /*
-                * do we have an area where IO might have finished
-                * between our ordered extent and the next one.
-                */
+
+       /*
+        * We are looking for an area between our current extent and the next
+        * ordered extent to update the i_size to.  There are 3 cases here
+        *
+        * 1) We don't actually have anything and we can update to i_size.
+        * 2) We have stuff but they already did their i_size update so again we
+        * can just update to i_size.
+        * 3) We have an outstanding ordered extent so the most we can update
+        * our disk_i_size to is the start of the next offset.
+        */
+       i_size_test = i_size;
+       for (; node; node = rb_next(node)) {
                test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
-               if (test->file_offset > offset)
+
+               if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
+                       continue;
+               if (test->file_offset > offset) {
                        i_size_test = test->file_offset;
-       } else {
-               i_size_test = i_size;
+                       break;
+               }
        }
 
        /*
         * i_size_test is the end of a region after this ordered
-        * extent where there are no ordered extents.  As long as there
-        * are no delalloc bytes in this area, it is safe to update
-        * disk_i_size to the end of the region.
+        * extent where there are no ordered extents, we can safely set
+        * disk_i_size to this.
         */
-       if (i_size_test > offset &&
-           !test_range_bit(io_tree, offset, i_size_test - 1,
-                           EXTENT_DELALLOC, 0, NULL)) {
+       if (i_size_test > offset)
                new_i_size = min_t(u64, i_size_test, i_size);
-       }
        BTRFS_I(inode)->disk_i_size = new_i_size;
        ret = 0;
 out:
        /*
-        * we need to remove the ordered extent with the tree lock held
-        * so that other people calling this function don't find our fully
-        * processed ordered entry and skip updating the i_size
+        * We need to do this because we can't remove ordered extents until
+        * after the i_disk_size has been updated and then the inode has been
+        * updated to reflect the change, so we need to tell anybody who finds
+        * this ordered extent that we've already done all the real work, we
+        * just haven't completed all the other work.
         */
        if (ordered)
-               __btrfs_remove_ordered_extent(inode, ordered);
-       spin_unlock(&tree->lock);
-       if (ordered)
-               wake_up(&ordered->wait);
+               set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags);
+       spin_unlock_irq(&tree->lock);
        return ret;
 }
 
@@ -912,7 +901,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
        if (!ordered)
                return 1;
 
-       spin_lock(&tree->lock);
+       spin_lock_irq(&tree->lock);
        list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
                if (disk_bytenr >= ordered_sum->bytenr) {
                        num_sectors = ordered_sum->len / sectorsize;
@@ -927,7 +916,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
                }
        }
 out:
-       spin_unlock(&tree->lock);
+       spin_unlock_irq(&tree->lock);
        btrfs_put_ordered_extent(ordered);
        return ret;
 }
index c355ad4dc1a66962d30557e9bbdc08ca9fc25da8..e03c560d299732cfe2114fe41d049b691a949e61 100644 (file)
@@ -74,6 +74,12 @@ struct btrfs_ordered_sum {
 
 #define BTRFS_ORDERED_DIRECT 5 /* set when we're doing DIO with this extent */
 
+#define BTRFS_ORDERED_IOERR 6 /* We had an io error when writing this out */
+
+#define BTRFS_ORDERED_UPDATED_ISIZE 7 /* indicates wether this ordered extent
+                                      * has done its due diligence in updating
+                                      * the isize. */
+
 struct btrfs_ordered_extent {
        /* logical offset in the file */
        u64 file_offset;
@@ -113,6 +119,8 @@ struct btrfs_ordered_extent {
 
        /* a per root list of all the pending ordered extents */
        struct list_head root_extent_list;
+
+       struct btrfs_work work;
 };
 
 
@@ -143,10 +151,11 @@ void btrfs_remove_ordered_extent(struct inode *inode,
                                struct btrfs_ordered_extent *entry);
 int btrfs_dec_test_ordered_pending(struct inode *inode,
                                   struct btrfs_ordered_extent **cached,
-                                  u64 file_offset, u64 io_size);
+                                  u64 file_offset, u64 io_size, int uptodate);
 int btrfs_dec_test_first_ordered_pending(struct inode *inode,
                                   struct btrfs_ordered_extent **cached,
-                                  u64 *file_offset, u64 io_size);
+                                  u64 *file_offset, u64 io_size,
+                                  int uptodate);
 int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
                             u64 start, u64 len, u64 disk_len, int type);
 int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
index f38e452486b8d12ba36589248579dc158981c3be..5e23684887eb8eb401594af69b1be7372f7188aa 100644 (file)
@@ -294,6 +294,9 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
                               btrfs_dev_extent_chunk_offset(l, dev_extent),
                               (unsigned long long)
                               btrfs_dev_extent_length(l, dev_extent));
+               case BTRFS_DEV_STATS_KEY:
+                       printk(KERN_INFO "\t\tdevice stats\n");
+                       break;
                };
        }
 }
index ac5d010858848d007e380d529476ad9eb4f6fb31..48a4882d8ad5955eaa0be2b940e35f0b3b2a7f6f 100644 (file)
@@ -718,13 +718,18 @@ static void reada_start_machine_worker(struct btrfs_work *work)
 {
        struct reada_machine_work *rmw;
        struct btrfs_fs_info *fs_info;
+       int old_ioprio;
 
        rmw = container_of(work, struct reada_machine_work, work);
        fs_info = rmw->fs_info;
 
        kfree(rmw);
 
+       old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current),
+                                      task_nice_ioprio(current));
+       set_task_ioprio(current, BTRFS_IOPRIO_READA);
        __reada_start_machine(fs_info);
+       set_task_ioprio(current, old_ioprio);
 }
 
 static void __reada_start_machine(struct btrfs_fs_info *fs_info)
index 2f3d6f917fb3373c02335b6912fcba1006f5fabe..a38cfa4f251ec1065410f561188c4adf5868cea3 100644 (file)
@@ -50,7 +50,7 @@ struct scrub_dev;
 struct scrub_page {
        struct scrub_block      *sblock;
        struct page             *page;
-       struct block_device     *bdev;
+       struct btrfs_device     *dev;
        u64                     flags;  /* extent flags */
        u64                     generation;
        u64                     logical;
@@ -86,6 +86,7 @@ struct scrub_block {
                unsigned int    header_error:1;
                unsigned int    checksum_error:1;
                unsigned int    no_io_error_seen:1;
+               unsigned int    generation_error:1; /* also sets header_error */
        };
 };
 
@@ -675,6 +676,8 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
                sdev->stat.read_errors++;
                sdev->stat.uncorrectable_errors++;
                spin_unlock(&sdev->stat_lock);
+               btrfs_dev_stat_inc_and_print(sdev->dev,
+                                            BTRFS_DEV_STAT_READ_ERRS);
                goto out;
        }
 
@@ -686,6 +689,8 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
                sdev->stat.read_errors++;
                sdev->stat.uncorrectable_errors++;
                spin_unlock(&sdev->stat_lock);
+               btrfs_dev_stat_inc_and_print(sdev->dev,
+                                            BTRFS_DEV_STAT_READ_ERRS);
                goto out;
        }
        BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
@@ -699,6 +704,8 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
                sdev->stat.read_errors++;
                sdev->stat.uncorrectable_errors++;
                spin_unlock(&sdev->stat_lock);
+               btrfs_dev_stat_inc_and_print(sdev->dev,
+                                            BTRFS_DEV_STAT_READ_ERRS);
                goto out;
        }
 
@@ -725,12 +732,16 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
                spin_unlock(&sdev->stat_lock);
                if (__ratelimit(&_rs))
                        scrub_print_warning("i/o error", sblock_to_check);
+               btrfs_dev_stat_inc_and_print(sdev->dev,
+                                            BTRFS_DEV_STAT_READ_ERRS);
        } else if (sblock_bad->checksum_error) {
                spin_lock(&sdev->stat_lock);
                sdev->stat.csum_errors++;
                spin_unlock(&sdev->stat_lock);
                if (__ratelimit(&_rs))
                        scrub_print_warning("checksum error", sblock_to_check);
+               btrfs_dev_stat_inc_and_print(sdev->dev,
+                                            BTRFS_DEV_STAT_CORRUPTION_ERRS);
        } else if (sblock_bad->header_error) {
                spin_lock(&sdev->stat_lock);
                sdev->stat.verify_errors++;
@@ -738,6 +749,12 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
                if (__ratelimit(&_rs))
                        scrub_print_warning("checksum/header error",
                                            sblock_to_check);
+               if (sblock_bad->generation_error)
+                       btrfs_dev_stat_inc_and_print(sdev->dev,
+                               BTRFS_DEV_STAT_GENERATION_ERRS);
+               else
+                       btrfs_dev_stat_inc_and_print(sdev->dev,
+                               BTRFS_DEV_STAT_CORRUPTION_ERRS);
        }
 
        if (sdev->readonly)
@@ -998,8 +1015,8 @@ static int scrub_setup_recheck_block(struct scrub_dev *sdev,
                        page = sblock->pagev + page_index;
                        page->logical = logical;
                        page->physical = bbio->stripes[mirror_index].physical;
-                       /* for missing devices, bdev is NULL */
-                       page->bdev = bbio->stripes[mirror_index].dev->bdev;
+                       /* for missing devices, dev->bdev is NULL */
+                       page->dev = bbio->stripes[mirror_index].dev;
                        page->mirror_num = mirror_index + 1;
                        page->page = alloc_page(GFP_NOFS);
                        if (!page->page) {
@@ -1043,7 +1060,7 @@ static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
                struct scrub_page *page = sblock->pagev + page_num;
                DECLARE_COMPLETION_ONSTACK(complete);
 
-               if (page->bdev == NULL) {
+               if (page->dev->bdev == NULL) {
                        page->io_error = 1;
                        sblock->no_io_error_seen = 0;
                        continue;
@@ -1053,7 +1070,7 @@ static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
                bio = bio_alloc(GFP_NOFS, 1);
                if (!bio)
                        return -EIO;
-               bio->bi_bdev = page->bdev;
+               bio->bi_bdev = page->dev->bdev;
                bio->bi_sector = page->physical >> 9;
                bio->bi_end_io = scrub_complete_bio_end_io;
                bio->bi_private = &complete;
@@ -1102,11 +1119,14 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
                h = (struct btrfs_header *)mapped_buffer;
 
                if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr) ||
-                   generation != le64_to_cpu(h->generation) ||
                    memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) ||
                    memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
-                          BTRFS_UUID_SIZE))
+                          BTRFS_UUID_SIZE)) {
                        sblock->header_error = 1;
+               } else if (generation != le64_to_cpu(h->generation)) {
+                       sblock->header_error = 1;
+                       sblock->generation_error = 1;
+               }
                csum = h->csum;
        } else {
                if (!have_csum)
@@ -1182,7 +1202,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
                bio = bio_alloc(GFP_NOFS, 1);
                if (!bio)
                        return -EIO;
-               bio->bi_bdev = page_bad->bdev;
+               bio->bi_bdev = page_bad->dev->bdev;
                bio->bi_sector = page_bad->physical >> 9;
                bio->bi_end_io = scrub_complete_bio_end_io;
                bio->bi_private = &complete;
@@ -1196,6 +1216,12 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
 
                /* this will also unplug the queue */
                wait_for_completion(&complete);
+               if (!bio_flagged(bio, BIO_UPTODATE)) {
+                       btrfs_dev_stat_inc_and_print(page_bad->dev,
+                               BTRFS_DEV_STAT_WRITE_ERRS);
+                       bio_put(bio);
+                       return -EIO;
+               }
                bio_put(bio);
        }
 
@@ -1352,7 +1378,8 @@ static int scrub_checksum_super(struct scrub_block *sblock)
        u64 mapped_size;
        void *p;
        u32 crc = ~(u32)0;
-       int fail = 0;
+       int fail_gen = 0;
+       int fail_cor = 0;
        u64 len;
        int index;
 
@@ -1363,13 +1390,13 @@ static int scrub_checksum_super(struct scrub_block *sblock)
        memcpy(on_disk_csum, s->csum, sdev->csum_size);
 
        if (sblock->pagev[0].logical != le64_to_cpu(s->bytenr))
-               ++fail;
+               ++fail_cor;
 
        if (sblock->pagev[0].generation != le64_to_cpu(s->generation))
-               ++fail;
+               ++fail_gen;
 
        if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
-               ++fail;
+               ++fail_cor;
 
        len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
        mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
@@ -1394,9 +1421,9 @@ static int scrub_checksum_super(struct scrub_block *sblock)
 
        btrfs_csum_final(crc, calculated_csum);
        if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
-               ++fail;
+               ++fail_cor;
 
-       if (fail) {
+       if (fail_cor + fail_gen) {
                /*
                 * if we find an error in a super block, we just report it.
                 * They will get written with the next transaction commit
@@ -1405,9 +1432,15 @@ static int scrub_checksum_super(struct scrub_block *sblock)
                spin_lock(&sdev->stat_lock);
                ++sdev->stat.super_errors;
                spin_unlock(&sdev->stat_lock);
+               if (fail_cor)
+                       btrfs_dev_stat_inc_and_print(sdev->dev,
+                               BTRFS_DEV_STAT_CORRUPTION_ERRS);
+               else
+                       btrfs_dev_stat_inc_and_print(sdev->dev,
+                               BTRFS_DEV_STAT_GENERATION_ERRS);
        }
 
-       return fail;
+       return fail_cor + fail_gen;
 }
 
 static void scrub_block_get(struct scrub_block *sblock)
@@ -1551,7 +1584,7 @@ static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
                        return -ENOMEM;
                }
                spage->sblock = sblock;
-               spage->bdev = sdev->dev->bdev;
+               spage->dev = sdev->dev;
                spage->flags = flags;
                spage->generation = gen;
                spage->logical = logical;
index c5f8fca4195fca9eb3806ebfbccf52d03049691e..96eb9fef7bd279584cf4dd8b6ed42cc09e425c1d 100644 (file)
@@ -188,7 +188,8 @@ void btrfs_printk(struct btrfs_fs_info *fs_info, const char *fmt, ...)
        va_start(args, fmt);
 
        if (fmt[0] == '<' && isdigit(fmt[1]) && fmt[2] == '>') {
-               strncpy(lvl, fmt, 3);
+               memcpy(lvl, fmt, 3);
+               lvl[3] = '\0';
                fmt += 3;
                type = logtypes[fmt[1] - '0'];
        } else
@@ -435,11 +436,8 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
                case Opt_thread_pool:
                        intarg = 0;
                        match_int(&args[0], &intarg);
-                       if (intarg) {
+                       if (intarg)
                                info->thread_pool_size = intarg;
-                               printk(KERN_INFO "btrfs: thread pool %d\n",
-                                      info->thread_pool_size);
-                       }
                        break;
                case Opt_max_inline:
                        num = match_strdup(&args[0]);
@@ -769,7 +767,7 @@ static int btrfs_fill_super(struct super_block *sb,
 #ifdef CONFIG_BTRFS_FS_POSIX_ACL
        sb->s_flags |= MS_POSIXACL;
 #endif
-
+       sb->s_flags |= MS_I_VERSION;
        err = open_ctree(sb, fs_devices, (char *)data);
        if (err) {
                printk("btrfs: open_ctree failed\n");
@@ -925,63 +923,48 @@ static inline int is_subvolume_inode(struct inode *inode)
  */
 static char *setup_root_args(char *args)
 {
-       unsigned copied = 0;
-       unsigned len = strlen(args) + 2;
-       char *pos;
-       char *ret;
+       unsigned len = strlen(args) + 2 + 1;
+       char *src, *dst, *buf;
 
        /*
-        * We need the same args as before, but minus
-        *
-        * subvol=a
-        *
-        * and add
-        *
-        * subvolid=0
+        * We need the same args as before, but with this substitution:
+        * s!subvol=[^,]+!subvolid=0!
         *
-        * which is a difference of 2 characters, so we allocate strlen(args) +
-        * 2 characters.
+        * Since the replacement string is up to 2 bytes longer than the
+        * original, allocate strlen(args) + 2 + 1 bytes.
         */
-       ret = kzalloc(len * sizeof(char), GFP_NOFS);
-       if (!ret)
-               return NULL;
-       pos = strstr(args, "subvol=");
 
+       src = strstr(args, "subvol=");
        /* This shouldn't happen, but just in case.. */
-       if (!pos) {
-               kfree(ret);
+       if (!src)
+               return NULL;
+
+       buf = dst = kmalloc(len, GFP_NOFS);
+       if (!buf)
                return NULL;
-       }
 
        /*
-        * The subvol=<> arg is not at the front of the string, copy everybody
-        * up to that into ret.
+        * If the subvol= arg is not at the start of the string,
+        * copy whatever precedes it into buf.
         */
-       if (pos != args) {
-               *pos = '\0';
-               strcpy(ret, args);
-               copied += strlen(args);
-               pos++;
+       if (src != args) {
+               *src++ = '\0';
+               strcpy(buf, args);
+               dst += strlen(args);
        }
 
-       strncpy(ret + copied, "subvolid=0", len - copied);
-
-       /* Length of subvolid=0 */
-       copied += 10;
+       strcpy(dst, "subvolid=0");
+       dst += strlen("subvolid=0");
 
        /*
-        * If there is no , after the subvol= option then we know there's no
-        * other options and we can just return.
+        * If there is a "," after the original subvol=... string,
+        * copy that suffix into our buffer.  Otherwise, we're done.
         */
-       pos = strchr(pos, ',');
-       if (!pos)
-               return ret;
+       src = strchr(src, ',');
+       if (src)
+               strcpy(dst, src);
 
-       /* Copy the rest of the arguments into our buffer */
-       strncpy(ret + copied, pos, len - copied);
-       copied += strlen(pos);
-
-       return ret;
+       return buf;
 }
 
 static struct dentry *mount_subvol(const char *subvol_name, int flags,
@@ -1118,6 +1101,40 @@ error_fs_info:
        return ERR_PTR(error);
 }
 
+static void btrfs_set_max_workers(struct btrfs_workers *workers, int new_limit)
+{
+       spin_lock_irq(&workers->lock);
+       workers->max_workers = new_limit;
+       spin_unlock_irq(&workers->lock);
+}
+
+static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
+                                    int new_pool_size, int old_pool_size)
+{
+       if (new_pool_size == old_pool_size)
+               return;
+
+       fs_info->thread_pool_size = new_pool_size;
+
+       printk(KERN_INFO "btrfs: resize thread pool %d -> %d\n",
+              old_pool_size, new_pool_size);
+
+       btrfs_set_max_workers(&fs_info->generic_worker, new_pool_size);
+       btrfs_set_max_workers(&fs_info->workers, new_pool_size);
+       btrfs_set_max_workers(&fs_info->delalloc_workers, new_pool_size);
+       btrfs_set_max_workers(&fs_info->submit_workers, new_pool_size);
+       btrfs_set_max_workers(&fs_info->caching_workers, new_pool_size);
+       btrfs_set_max_workers(&fs_info->fixup_workers, new_pool_size);
+       btrfs_set_max_workers(&fs_info->endio_workers, new_pool_size);
+       btrfs_set_max_workers(&fs_info->endio_meta_workers, new_pool_size);
+       btrfs_set_max_workers(&fs_info->endio_meta_write_workers, new_pool_size);
+       btrfs_set_max_workers(&fs_info->endio_write_workers, new_pool_size);
+       btrfs_set_max_workers(&fs_info->endio_freespace_worker, new_pool_size);
+       btrfs_set_max_workers(&fs_info->delayed_workers, new_pool_size);
+       btrfs_set_max_workers(&fs_info->readahead_workers, new_pool_size);
+       btrfs_set_max_workers(&fs_info->scrub_workers, new_pool_size);
+}
+
 static int btrfs_remount(struct super_block *sb, int *flags, char *data)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(sb);
@@ -1137,6 +1154,9 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                goto restore;
        }
 
+       btrfs_resize_thread_pool(fs_info,
+               fs_info->thread_pool_size, old_thread_pool_size);
+
        if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
                return 0;
 
@@ -1180,7 +1200,8 @@ restore:
        fs_info->compress_type = old_compress_type;
        fs_info->max_inline = old_max_inline;
        fs_info->alloc_start = old_alloc_start;
-       fs_info->thread_pool_size = old_thread_pool_size;
+       btrfs_resize_thread_pool(fs_info,
+               old_thread_pool_size, fs_info->thread_pool_size);
        fs_info->metadata_ratio = old_metadata_ratio;
        return ret;
 }
index 36422254ef6765c14290a2373fa6d83cf2d364d5..1791c6e3d83487d82c9ffe80ab0239976cfd1c96 100644 (file)
@@ -28,6 +28,7 @@
 #include "locking.h"
 #include "tree-log.h"
 #include "inode-map.h"
+#include "volumes.h"
 
 #define BTRFS_ROOT_TRANS_TAG 0
 
@@ -55,48 +56,49 @@ static noinline void switch_commit_root(struct btrfs_root *root)
 static noinline int join_transaction(struct btrfs_root *root, int nofail)
 {
        struct btrfs_transaction *cur_trans;
+       struct btrfs_fs_info *fs_info = root->fs_info;
 
-       spin_lock(&root->fs_info->trans_lock);
+       spin_lock(&fs_info->trans_lock);
 loop:
        /* The file system has been taken offline. No new transactions. */
-       if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
-               spin_unlock(&root->fs_info->trans_lock);
+       if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+               spin_unlock(&fs_info->trans_lock);
                return -EROFS;
        }
 
-       if (root->fs_info->trans_no_join) {
+       if (fs_info->trans_no_join) {
                if (!nofail) {
-                       spin_unlock(&root->fs_info->trans_lock);
+                       spin_unlock(&fs_info->trans_lock);
                        return -EBUSY;
                }
        }
 
-       cur_trans = root->fs_info->running_transaction;
+       cur_trans = fs_info->running_transaction;
        if (cur_trans) {
                if (cur_trans->aborted) {
-                       spin_unlock(&root->fs_info->trans_lock);
+                       spin_unlock(&fs_info->trans_lock);
                        return cur_trans->aborted;
                }
                atomic_inc(&cur_trans->use_count);
                atomic_inc(&cur_trans->num_writers);
                cur_trans->num_joined++;
-               spin_unlock(&root->fs_info->trans_lock);
+               spin_unlock(&fs_info->trans_lock);
                return 0;
        }
-       spin_unlock(&root->fs_info->trans_lock);
+       spin_unlock(&fs_info->trans_lock);
 
        cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
        if (!cur_trans)
                return -ENOMEM;
 
-       spin_lock(&root->fs_info->trans_lock);
-       if (root->fs_info->running_transaction) {
+       spin_lock(&fs_info->trans_lock);
+       if (fs_info->running_transaction) {
                /*
                 * someone started a transaction after we unlocked.  Make sure
                 * to redo the trans_no_join checks above
                 */
                kmem_cache_free(btrfs_transaction_cachep, cur_trans);
-               cur_trans = root->fs_info->running_transaction;
+               cur_trans = fs_info->running_transaction;
                goto loop;
        }
 
@@ -121,20 +123,38 @@ loop:
        cur_trans->delayed_refs.flushing = 0;
        cur_trans->delayed_refs.run_delayed_start = 0;
        cur_trans->delayed_refs.seq = 1;
+
+       /*
+        * although the tree mod log is per file system and not per transaction,
+        * the log must never go across transaction boundaries.
+        */
+       smp_mb();
+       if (!list_empty(&fs_info->tree_mod_seq_list)) {
+               printk(KERN_ERR "btrfs: tree_mod_seq_list not empty when "
+                       "creating a fresh transaction\n");
+               WARN_ON(1);
+       }
+       if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log)) {
+               printk(KERN_ERR "btrfs: tree_mod_log rb tree not empty when "
+                       "creating a fresh transaction\n");
+               WARN_ON(1);
+       }
+       atomic_set(&fs_info->tree_mod_seq, 0);
+
        init_waitqueue_head(&cur_trans->delayed_refs.seq_wait);
        spin_lock_init(&cur_trans->commit_lock);
        spin_lock_init(&cur_trans->delayed_refs.lock);
        INIT_LIST_HEAD(&cur_trans->delayed_refs.seq_head);
 
        INIT_LIST_HEAD(&cur_trans->pending_snapshots);
-       list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
+       list_add_tail(&cur_trans->list, &fs_info->trans_list);
        extent_io_tree_init(&cur_trans->dirty_pages,
-                            root->fs_info->btree_inode->i_mapping);
-       root->fs_info->generation++;
-       cur_trans->transid = root->fs_info->generation;
-       root->fs_info->running_transaction = cur_trans;
+                            fs_info->btree_inode->i_mapping);
+       fs_info->generation++;
+       cur_trans->transid = fs_info->generation;
+       fs_info->running_transaction = cur_trans;
        cur_trans->aborted = 0;
-       spin_unlock(&root->fs_info->trans_lock);
+       spin_unlock(&fs_info->trans_lock);
 
        return 0;
 }
@@ -758,6 +778,9 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
        if (ret)
                return ret;
 
+       ret = btrfs_run_dev_stats(trans, root->fs_info);
+       BUG_ON(ret);
+
        while (!list_empty(&fs_info->dirty_cowonly_roots)) {
                next = fs_info->dirty_cowonly_roots.next;
                list_del_init(next);
index eb1ae908582cc51162a61798c80f3ed38e7ab6e8..2017d0ff511ca3304dad46e85045ad2ab28d4e75 100644 (file)
@@ -1628,7 +1628,9 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
        int i;
        int ret;
 
-       btrfs_read_buffer(eb, gen);
+       ret = btrfs_read_buffer(eb, gen);
+       if (ret)
+               return ret;
 
        level = btrfs_header_level(eb);
 
@@ -1749,7 +1751,11 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
 
                        path->slots[*level]++;
                        if (wc->free) {
-                               btrfs_read_buffer(next, ptr_gen);
+                               ret = btrfs_read_buffer(next, ptr_gen);
+                               if (ret) {
+                                       free_extent_buffer(next);
+                                       return ret;
+                               }
 
                                btrfs_tree_lock(next);
                                btrfs_set_lock_blocking(next);
@@ -1766,7 +1772,11 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
                        free_extent_buffer(next);
                        continue;
                }
-               btrfs_read_buffer(next, ptr_gen);
+               ret = btrfs_read_buffer(next, ptr_gen);
+               if (ret) {
+                       free_extent_buffer(next);
+                       return ret;
+               }
 
                WARN_ON(*level <= 0);
                if (path->nodes[*level-1])
@@ -2657,6 +2667,8 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans,
                btrfs_release_path(path);
        }
        btrfs_release_path(path);
+       if (ret > 0)
+               ret = 0;
        return ret;
 }
 
@@ -3028,21 +3040,6 @@ out:
        return ret;
 }
 
-static int inode_in_log(struct btrfs_trans_handle *trans,
-                struct inode *inode)
-{
-       struct btrfs_root *root = BTRFS_I(inode)->root;
-       int ret = 0;
-
-       mutex_lock(&root->log_mutex);
-       if (BTRFS_I(inode)->logged_trans == trans->transid &&
-           BTRFS_I(inode)->last_sub_trans <= root->last_log_commit)
-               ret = 1;
-       mutex_unlock(&root->log_mutex);
-       return ret;
-}
-
-
 /*
  * helper function around btrfs_log_inode to make sure newly created
  * parent directories also end up in the log.  A minimal inode and backref
@@ -3083,7 +3080,7 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
        if (ret)
                goto end_no_trans;
 
-       if (inode_in_log(trans, inode)) {
+       if (btrfs_inode_in_log(inode, trans->transid)) {
                ret = BTRFS_NO_LOG_SYNC;
                goto end_no_trans;
        }
index 12f5147bd2b1ae2a6016e7283c72ceccb44283b7..ab942f46b3dd81e06348c4950901f3e4eef87016 100644 (file)
@@ -23,9 +23,9 @@
  *
  * ulist = ulist_alloc();
  * ulist_add(ulist, root);
- * elem = NULL;
+ * ULIST_ITER_INIT(&uiter);
  *
- * while ((elem = ulist_next(ulist, elem)) {
+ * while ((elem = ulist_next(ulist, &uiter)) {
  *     for (all child nodes n in elem)
  *             ulist_add(ulist, n);
  *     do something useful with the node;
@@ -95,7 +95,7 @@ EXPORT_SYMBOL(ulist_reinit);
  *
  * The allocated ulist will be returned in an initialized state.
  */
-struct ulist *ulist_alloc(unsigned long gfp_mask)
+struct ulist *ulist_alloc(gfp_t gfp_mask)
 {
        struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask);
 
@@ -144,13 +144,22 @@ EXPORT_SYMBOL(ulist_free);
  * unaltered.
  */
 int ulist_add(struct ulist *ulist, u64 val, unsigned long aux,
-             unsigned long gfp_mask)
+             gfp_t gfp_mask)
+{
+       return ulist_add_merge(ulist, val, aux, NULL, gfp_mask);
+}
+
+int ulist_add_merge(struct ulist *ulist, u64 val, unsigned long aux,
+                   unsigned long *old_aux, gfp_t gfp_mask)
 {
        int i;
 
        for (i = 0; i < ulist->nnodes; ++i) {
-               if (ulist->nodes[i].val == val)
+               if (ulist->nodes[i].val == val) {
+                       if (old_aux)
+                               *old_aux = ulist->nodes[i].aux;
                        return 0;
+               }
        }
 
        if (ulist->nnodes >= ulist->nodes_alloced) {
@@ -188,33 +197,26 @@ EXPORT_SYMBOL(ulist_add);
 /**
  * ulist_next - iterate ulist
  * @ulist:     ulist to iterate
- * @prev:      previously returned element or %NULL to start iteration
+ * @uiter:     iterator variable, initialized with ULIST_ITER_INIT(&iterator)
  *
  * Note: locking must be provided by the caller. In case of rwlocks only read
  *       locking is needed
  *
- * This function is used to iterate an ulist. The iteration is started with
- * @prev = %NULL. It returns the next element from the ulist or %NULL when the
+ * This function is used to iterate an ulist.
+ * It returns the next element from the ulist or %NULL when the
  * end is reached. No guarantee is made with respect to the order in which
  * the elements are returned. They might neither be returned in order of
  * addition nor in ascending order.
  * It is allowed to call ulist_add during an enumeration. Newly added items
  * are guaranteed to show up in the running enumeration.
  */
-struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_node *prev)
+struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_iterator *uiter)
 {
-       int next;
-
        if (ulist->nnodes == 0)
                return NULL;
-
-       if (!prev)
-               return &ulist->nodes[0];
-
-       next = (prev - ulist->nodes) + 1;
-       if (next < 0 || next >= ulist->nnodes)
+       if (uiter->i < 0 || uiter->i >= ulist->nnodes)
                return NULL;
 
-       return &ulist->nodes[next];
+       return &ulist->nodes[uiter->i++];
 }
 EXPORT_SYMBOL(ulist_next);
index 2e25dec58ec0e56251fbca880d27cc927aac95dc..21bdc8ec813046ac56e3c7db0739bcdba7ac188a 100644 (file)
  */
 #define ULIST_SIZE 16
 
+struct ulist_iterator {
+       int i;
+};
+
 /*
  * element of the list
  */
@@ -59,10 +63,15 @@ struct ulist {
 void ulist_init(struct ulist *ulist);
 void ulist_fini(struct ulist *ulist);
 void ulist_reinit(struct ulist *ulist);
-struct ulist *ulist_alloc(unsigned long gfp_mask);
+struct ulist *ulist_alloc(gfp_t gfp_mask);
 void ulist_free(struct ulist *ulist);
 int ulist_add(struct ulist *ulist, u64 val, unsigned long aux,
-             unsigned long gfp_mask);
-struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_node *prev);
+             gfp_t gfp_mask);
+int ulist_add_merge(struct ulist *ulist, u64 val, unsigned long aux,
+                   unsigned long *old_aux, gfp_t gfp_mask);
+struct ulist_node *ulist_next(struct ulist *ulist,
+                             struct ulist_iterator *uiter);
+
+#define ULIST_ITER_INIT(uiter) ((uiter)->i = 0)
 
 #endif
index 1411b99555a4c1f138a6a3bf699842849d2b3e08..7782020996feccd4b7103528a4c2989230f79b71 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/random.h>
 #include <linux/iocontext.h>
 #include <linux/capability.h>
+#include <linux/ratelimit.h>
 #include <linux/kthread.h>
 #include <asm/div64.h>
 #include "compat.h"
@@ -39,6 +40,8 @@ static int init_first_rw_device(struct btrfs_trans_handle *trans,
                                struct btrfs_root *root,
                                struct btrfs_device *device);
 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
+static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
+static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
 
 static DEFINE_MUTEX(uuid_mutex);
 static LIST_HEAD(fs_uuids);
@@ -361,6 +364,7 @@ static noinline int device_list_add(const char *path,
                        return -ENOMEM;
                }
                device->devid = devid;
+               device->dev_stats_valid = 0;
                device->work.func = pending_bios_fn;
                memcpy(device->uuid, disk_super->dev_item.uuid,
                       BTRFS_UUID_SIZE);
@@ -1633,7 +1637,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
        int ret = 0;
 
        if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
-               return -EINVAL;
+               return -EROFS;
 
        bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
                                  root->fs_info->bdev_holder);
@@ -4001,13 +4005,58 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
        return 0;
 }
 
+static void *merge_stripe_index_into_bio_private(void *bi_private,
+                                                unsigned int stripe_index)
+{
+       /*
+        * with single, dup, RAID0, RAID1 and RAID10, stripe_index is
+        * at most 1.
+        * The alternative solution (instead of stealing bits from the
+        * pointer) would be to allocate an intermediate structure
+        * that contains the old private pointer plus the stripe_index.
+        */
+       BUG_ON((((uintptr_t)bi_private) & 3) != 0);
+       BUG_ON(stripe_index > 3);
+       return (void *)(((uintptr_t)bi_private) | stripe_index);
+}
+
+static struct btrfs_bio *extract_bbio_from_bio_private(void *bi_private)
+{
+       return (struct btrfs_bio *)(((uintptr_t)bi_private) & ~((uintptr_t)3));
+}
+
+static unsigned int extract_stripe_index_from_bio_private(void *bi_private)
+{
+       return (unsigned int)((uintptr_t)bi_private) & 3;
+}
+
 static void btrfs_end_bio(struct bio *bio, int err)
 {
-       struct btrfs_bio *bbio = bio->bi_private;
+       struct btrfs_bio *bbio = extract_bbio_from_bio_private(bio->bi_private);
        int is_orig_bio = 0;
 
-       if (err)
+       if (err) {
                atomic_inc(&bbio->error);
+               if (err == -EIO || err == -EREMOTEIO) {
+                       unsigned int stripe_index =
+                               extract_stripe_index_from_bio_private(
+                                       bio->bi_private);
+                       struct btrfs_device *dev;
+
+                       BUG_ON(stripe_index >= bbio->num_stripes);
+                       dev = bbio->stripes[stripe_index].dev;
+                       if (bio->bi_rw & WRITE)
+                               btrfs_dev_stat_inc(dev,
+                                                  BTRFS_DEV_STAT_WRITE_ERRS);
+                       else
+                               btrfs_dev_stat_inc(dev,
+                                                  BTRFS_DEV_STAT_READ_ERRS);
+                       if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
+                               btrfs_dev_stat_inc(dev,
+                                                  BTRFS_DEV_STAT_FLUSH_ERRS);
+                       btrfs_dev_stat_print_on_error(dev);
+               }
+       }
 
        if (bio == bbio->orig_bio)
                is_orig_bio = 1;
@@ -4149,6 +4198,8 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
                        bio = first_bio;
                }
                bio->bi_private = bbio;
+               bio->bi_private = merge_stripe_index_into_bio_private(
+                               bio->bi_private, (unsigned int)dev_nr);
                bio->bi_end_io = btrfs_end_bio;
                bio->bi_sector = bbio->stripes[dev_nr].physical >> 9;
                dev = bbio->stripes[dev_nr].dev;
@@ -4509,6 +4560,28 @@ int btrfs_read_sys_array(struct btrfs_root *root)
        return ret;
 }
 
+struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root,
+                                                  u64 logical, int mirror_num)
+{
+       struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
+       int ret;
+       u64 map_length = 0;
+       struct btrfs_bio *bbio = NULL;
+       struct btrfs_device *device;
+
+       BUG_ON(mirror_num == 0);
+       ret = btrfs_map_block(map_tree, WRITE, logical, &map_length, &bbio,
+                             mirror_num);
+       if (ret) {
+               BUG_ON(bbio != NULL);
+               return NULL;
+       }
+       BUG_ON(mirror_num != bbio->mirror_num);
+       device = bbio->stripes[mirror_num - 1].dev;
+       kfree(bbio);
+       return device;
+}
+
 int btrfs_read_chunk_tree(struct btrfs_root *root)
 {
        struct btrfs_path *path;
@@ -4583,3 +4656,230 @@ error:
        btrfs_free_path(path);
        return ret;
 }
+
+static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
+{
+       int i;
+
+       for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
+               btrfs_dev_stat_reset(dev, i);
+}
+
+int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
+{
+       struct btrfs_key key;
+       struct btrfs_key found_key;
+       struct btrfs_root *dev_root = fs_info->dev_root;
+       struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+       struct extent_buffer *eb;
+       int slot;
+       int ret = 0;
+       struct btrfs_device *device;
+       struct btrfs_path *path = NULL;
+       int i;
+
+       path = btrfs_alloc_path();
+       if (!path) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       mutex_lock(&fs_devices->device_list_mutex);
+       list_for_each_entry(device, &fs_devices->devices, dev_list) {
+               int item_size;
+               struct btrfs_dev_stats_item *ptr;
+
+               key.objectid = 0;
+               key.type = BTRFS_DEV_STATS_KEY;
+               key.offset = device->devid;
+               ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
+               if (ret) {
+                       printk(KERN_WARNING "btrfs: no dev_stats entry found for device %s (devid %llu) (OK on first mount after mkfs)\n",
+                              device->name, (unsigned long long)device->devid);
+                       __btrfs_reset_dev_stats(device);
+                       device->dev_stats_valid = 1;
+                       btrfs_release_path(path);
+                       continue;
+               }
+               slot = path->slots[0];
+               eb = path->nodes[0];
+               btrfs_item_key_to_cpu(eb, &found_key, slot);
+               item_size = btrfs_item_size_nr(eb, slot);
+
+               ptr = btrfs_item_ptr(eb, slot,
+                                    struct btrfs_dev_stats_item);
+
+               for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
+                       if (item_size >= (1 + i) * sizeof(__le64))
+                               btrfs_dev_stat_set(device, i,
+                                       btrfs_dev_stats_value(eb, ptr, i));
+                       else
+                               btrfs_dev_stat_reset(device, i);
+               }
+
+               device->dev_stats_valid = 1;
+               btrfs_dev_stat_print_on_load(device);
+               btrfs_release_path(path);
+       }
+       mutex_unlock(&fs_devices->device_list_mutex);
+
+out:
+       btrfs_free_path(path);
+       return ret < 0 ? ret : 0;
+}
+
+static int update_dev_stat_item(struct btrfs_trans_handle *trans,
+                               struct btrfs_root *dev_root,
+                               struct btrfs_device *device)
+{
+       struct btrfs_path *path;
+       struct btrfs_key key;
+       struct extent_buffer *eb;
+       struct btrfs_dev_stats_item *ptr;
+       int ret;
+       int i;
+
+       key.objectid = 0;
+       key.type = BTRFS_DEV_STATS_KEY;
+       key.offset = device->devid;
+
+       path = btrfs_alloc_path();
+       BUG_ON(!path);
+       ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
+       if (ret < 0) {
+               printk(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
+                      ret, device->name);
+               goto out;
+       }
+
+       if (ret == 0 &&
+           btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
+               /* need to delete old one and insert a new one */
+               ret = btrfs_del_item(trans, dev_root, path);
+               if (ret != 0) {
+                       printk(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
+                              device->name, ret);
+                       goto out;
+               }
+               ret = 1;
+       }
+
+       if (ret == 1) {
+               /* need to insert a new item */
+               btrfs_release_path(path);
+               ret = btrfs_insert_empty_item(trans, dev_root, path,
+                                             &key, sizeof(*ptr));
+               if (ret < 0) {
+                       printk(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
+                              device->name, ret);
+                       goto out;
+               }
+       }
+
+       eb = path->nodes[0];
+       ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
+       for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
+               btrfs_set_dev_stats_value(eb, ptr, i,
+                                         btrfs_dev_stat_read(device, i));
+       btrfs_mark_buffer_dirty(eb);
+
+out:
+       btrfs_free_path(path);
+       return ret;
+}
+
+/*
+ * called from commit_transaction. Writes all changed device stats to disk.
+ */
+int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
+                       struct btrfs_fs_info *fs_info)
+{
+       struct btrfs_root *dev_root = fs_info->dev_root;
+       struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+       struct btrfs_device *device;
+       int ret = 0;
+
+       mutex_lock(&fs_devices->device_list_mutex);
+       list_for_each_entry(device, &fs_devices->devices, dev_list) {
+               if (!device->dev_stats_valid || !device->dev_stats_dirty)
+                       continue;
+
+               ret = update_dev_stat_item(trans, dev_root, device);
+               if (!ret)
+                       device->dev_stats_dirty = 0;
+       }
+       mutex_unlock(&fs_devices->device_list_mutex);
+
+       return ret;
+}
+
+void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
+{
+       btrfs_dev_stat_inc(dev, index);
+       btrfs_dev_stat_print_on_error(dev);
+}
+
+void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
+{
+       if (!dev->dev_stats_valid)
+               return;
+       printk_ratelimited(KERN_ERR
+                          "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
+                          dev->name,
+                          btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
+                          btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
+                          btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
+                          btrfs_dev_stat_read(dev,
+                                              BTRFS_DEV_STAT_CORRUPTION_ERRS),
+                          btrfs_dev_stat_read(dev,
+                                              BTRFS_DEV_STAT_GENERATION_ERRS));
+}
+
+static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
+{
+       printk(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
+              dev->name,
+              btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
+              btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
+              btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
+              btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
+              btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
+}
+
+int btrfs_get_dev_stats(struct btrfs_root *root,
+                       struct btrfs_ioctl_get_dev_stats *stats,
+                       int reset_after_read)
+{
+       struct btrfs_device *dev;
+       struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+       int i;
+
+       mutex_lock(&fs_devices->device_list_mutex);
+       dev = btrfs_find_device(root, stats->devid, NULL, NULL);
+       mutex_unlock(&fs_devices->device_list_mutex);
+
+       if (!dev) {
+               printk(KERN_WARNING
+                      "btrfs: get dev_stats failed, device not found\n");
+               return -ENODEV;
+       } else if (!dev->dev_stats_valid) {
+               printk(KERN_WARNING
+                      "btrfs: get dev_stats failed, not yet valid\n");
+               return -ENODEV;
+       } else if (reset_after_read) {
+               for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
+                       if (stats->nr_items > i)
+                               stats->values[i] =
+                                       btrfs_dev_stat_read_and_reset(dev, i);
+                       else
+                               btrfs_dev_stat_reset(dev, i);
+               }
+       } else {
+               for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
+                       if (stats->nr_items > i)
+                               stats->values[i] = btrfs_dev_stat_read(dev, i);
+       }
+       if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
+               stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
+       return 0;
+}
index bb6b03f97aaa089793d667fae93335373773a7eb..3406a88ca83e023429b8af19f2d6aa64d4cac6f8 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/bio.h>
 #include <linux/sort.h>
 #include "async-thread.h"
+#include "ioctl.h"
 
 #define BTRFS_STRIPE_LEN       (64 * 1024)
 
@@ -106,6 +107,11 @@ struct btrfs_device {
        struct completion flush_wait;
        int nobarriers;
 
+       /* disk I/O failure stats. For detailed description refer to
+        * enum btrfs_dev_stat_values in ioctl.h */
+       int dev_stats_valid;
+       int dev_stats_dirty; /* counters need to be written to disk */
+       atomic_t dev_stat_values[BTRFS_DEV_STAT_VALUES_MAX];
 };
 
 struct btrfs_fs_devices {
@@ -281,4 +287,50 @@ int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
                         u64 *start, u64 *max_avail);
+struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root,
+                                                  u64 logical, int mirror_num);
+void btrfs_dev_stat_print_on_error(struct btrfs_device *device);
+void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
+int btrfs_get_dev_stats(struct btrfs_root *root,
+                       struct btrfs_ioctl_get_dev_stats *stats,
+                       int reset_after_read);
+int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
+int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
+                       struct btrfs_fs_info *fs_info);
+
+static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
+                                     int index)
+{
+       atomic_inc(dev->dev_stat_values + index);
+       dev->dev_stats_dirty = 1;
+}
+
+static inline int btrfs_dev_stat_read(struct btrfs_device *dev,
+                                     int index)
+{
+       return atomic_read(dev->dev_stat_values + index);
+}
+
+static inline int btrfs_dev_stat_read_and_reset(struct btrfs_device *dev,
+                                               int index)
+{
+       int ret;
+
+       ret = atomic_xchg(dev->dev_stat_values + index, 0);
+       dev->dev_stats_dirty = 1;
+       return ret;
+}
+
+static inline void btrfs_dev_stat_set(struct btrfs_device *dev,
+                                     int index, unsigned long val)
+{
+       atomic_set(dev->dev_stat_values + index, val);
+       dev->dev_stats_dirty = 1;
+}
+
+static inline void btrfs_dev_stat_reset(struct btrfs_device *dev,
+                                       int index)
+{
+       btrfs_dev_stat_set(dev, index, 0);
+}
 #endif
index e7a5659087e66f93769bc750562d21294c9bd2b6..3f4e2d69e83a13cb66f3f3a56024f53f5299f5c4 100644 (file)
@@ -196,6 +196,7 @@ int __btrfs_setxattr(struct btrfs_trans_handle *trans,
        if (ret)
                goto out;
 
+       inode_inc_iversion(inode);
        inode->i_ctime = CURRENT_TIME;
        ret = btrfs_update_inode(trans, root, inode);
        BUG_ON(ret);
index ad5938ca357c270ace08388401176f22a6343571..838a9cf246bd0fa561ab66295f9bb3df77e0c6a2 100644 (file)
@@ -3152,7 +3152,7 @@ SYSCALL_DEFINE2(bdflush, int, func, long, data)
 /*
  * Buffer-head allocation
  */
-static struct kmem_cache *bh_cachep;
+static struct kmem_cache *bh_cachep __read_mostly;
 
 /*
  * Once the number of bh's in the machine exceeds this level, we start
index fbb2a643ef10a1f75c4918f165c9e3a22a603a86..8e1b60e557b65bea0df86a881376456658a9cffd 100644 (file)
@@ -40,38 +40,49 @@ struct ceph_nfs_confh {
        u32 parent_name_hash;
 } __attribute__ ((packed));
 
-static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len,
-                         int connectable)
+/*
+ * The presence of @parent_inode here tells us whether NFS wants a
+ * connectable file handle.  However, we want to make a connectionable
+ * file handle unconditionally so that the MDS gets as much of a hint
+ * as possible.  That means we only use @parent_dentry to indicate
+ * whether nfsd wants a connectable fh, and whether we should indicate
+ * failure from a too-small @max_len.
+ */
+static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
+                         struct inode *parent_inode)
 {
        int type;
        struct ceph_nfs_fh *fh = (void *)rawfh;
        struct ceph_nfs_confh *cfh = (void *)rawfh;
-       struct dentry *parent;
-       struct inode *inode = dentry->d_inode;
        int connected_handle_length = sizeof(*cfh)/4;
        int handle_length = sizeof(*fh)/4;
+       struct dentry *dentry = d_find_alias(inode);
+       struct dentry *parent;
 
        /* don't re-export snaps */
        if (ceph_snap(inode) != CEPH_NOSNAP)
                return -EINVAL;
 
-       spin_lock(&dentry->d_lock);
-       parent = dentry->d_parent;
-       if (*max_len >= connected_handle_length) {
+       /* if we found an alias, generate a connectable fh */
+       if (*max_len >= connected_handle_length && dentry) {
                dout("encode_fh %p connectable\n", dentry);
-               cfh->ino = ceph_ino(dentry->d_inode);
+               spin_lock(&dentry->d_lock);
+               parent = dentry->d_parent;
+               cfh->ino = ceph_ino(inode);
                cfh->parent_ino = ceph_ino(parent->d_inode);
                cfh->parent_name_hash = ceph_dentry_hash(parent->d_inode,
                                                         dentry);
                *max_len = connected_handle_length;
                type = 2;
+               spin_unlock(&dentry->d_lock);
        } else if (*max_len >= handle_length) {
-               if (connectable) {
+               if (parent_inode) {
+                       /* nfsd wants connectable */
                        *max_len = connected_handle_length;
                        type = 255;
                } else {
                        dout("encode_fh %p\n", dentry);
-                       fh->ino = ceph_ino(dentry->d_inode);
+                       fh->ino = ceph_ino(inode);
                        *max_len = handle_length;
                        type = 1;
                }
@@ -79,7 +90,6 @@ static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len,
                *max_len = handle_length;
                type = 255;
        }
-       spin_unlock(&dentry->d_lock);
        return type;
 }
 
index ed72428d9c75c80a6744ccd6a996b83c1a20d333..988d4f302e4880281a2b5e04c9f44dd7870202d2 100644 (file)
@@ -54,7 +54,6 @@ prepare_open_request(struct super_block *sb, int flags, int create_mode)
        req->r_fmode = ceph_flags_to_mode(flags);
        req->r_args.open.flags = cpu_to_le32(flags);
        req->r_args.open.mode = cpu_to_le32(create_mode);
-       req->r_args.open.preferred = cpu_to_le32(-1);
 out:
        return req;
 }
index 790914a598dd5d68b8f40b851c2faff2e790e4af..8e3fb69fbe62e60cd3698c07d9fa8e53d6d2b184 100644 (file)
@@ -26,8 +26,7 @@ static long ceph_ioctl_get_layout(struct file *file, void __user *arg)
                l.stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
                l.object_size = ceph_file_layout_object_size(ci->i_layout);
                l.data_pool = le32_to_cpu(ci->i_layout.fl_pg_pool);
-               l.preferred_osd =
-                       (s32)le32_to_cpu(ci->i_layout.fl_pg_preferred);
+               l.preferred_osd = (s32)-1;
                if (copy_to_user(arg, &l, sizeof(l)))
                        return -EFAULT;
        }
@@ -35,6 +34,32 @@ static long ceph_ioctl_get_layout(struct file *file, void __user *arg)
        return err;
 }
 
+static long __validate_layout(struct ceph_mds_client *mdsc,
+                             struct ceph_ioctl_layout *l)
+{
+       int i, err;
+
+       /* validate striping parameters */
+       if ((l->object_size & ~PAGE_MASK) ||
+           (l->stripe_unit & ~PAGE_MASK) ||
+           ((unsigned)l->object_size % (unsigned)l->stripe_unit))
+               return -EINVAL;
+
+       /* make sure it's a valid data pool */
+       mutex_lock(&mdsc->mutex);
+       err = -EINVAL;
+       for (i = 0; i < mdsc->mdsmap->m_num_data_pg_pools; i++)
+               if (mdsc->mdsmap->m_data_pg_pools[i] == l->data_pool) {
+                       err = 0;
+                       break;
+               }
+       mutex_unlock(&mdsc->mutex);
+       if (err)
+               return err;
+
+       return 0;
+}
+
 static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
 {
        struct inode *inode = file->f_dentry->d_inode;
@@ -44,52 +69,40 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
        struct ceph_ioctl_layout l;
        struct ceph_inode_info *ci = ceph_inode(file->f_dentry->d_inode);
        struct ceph_ioctl_layout nl;
-       int err, i;
+       int err;
 
        if (copy_from_user(&l, arg, sizeof(l)))
                return -EFAULT;
 
        /* validate changed params against current layout */
        err = ceph_do_getattr(file->f_dentry->d_inode, CEPH_STAT_CAP_LAYOUT);
-       if (!err) {
-               nl.stripe_unit = ceph_file_layout_su(ci->i_layout);
-               nl.stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
-               nl.object_size = ceph_file_layout_object_size(ci->i_layout);
-               nl.data_pool = le32_to_cpu(ci->i_layout.fl_pg_pool);
-               nl.preferred_osd =
-                               (s32)le32_to_cpu(ci->i_layout.fl_pg_preferred);
-       } else
+       if (err)
                return err;
 
+       memset(&nl, 0, sizeof(nl));
        if (l.stripe_count)
                nl.stripe_count = l.stripe_count;
+       else
+               nl.stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
        if (l.stripe_unit)
                nl.stripe_unit = l.stripe_unit;
+       else
+               nl.stripe_unit = ceph_file_layout_su(ci->i_layout);
        if (l.object_size)
                nl.object_size = l.object_size;
+       else
+               nl.object_size = ceph_file_layout_object_size(ci->i_layout);
        if (l.data_pool)
                nl.data_pool = l.data_pool;
-       if (l.preferred_osd)
-               nl.preferred_osd = l.preferred_osd;
+       else
+               nl.data_pool = ceph_file_layout_pg_pool(ci->i_layout);
 
-       if ((nl.object_size & ~PAGE_MASK) ||
-           (nl.stripe_unit & ~PAGE_MASK) ||
-           ((unsigned)nl.object_size % (unsigned)nl.stripe_unit))
-               return -EINVAL;
+       /* this is obsolete, and always -1 */
+       nl.preferred_osd = le64_to_cpu(-1);
 
-       /* make sure it's a valid data pool */
-       if (l.data_pool > 0) {
-               mutex_lock(&mdsc->mutex);
-               err = -EINVAL;
-               for (i = 0; i < mdsc->mdsmap->m_num_data_pg_pools; i++)
-                       if (mdsc->mdsmap->m_data_pg_pools[i] == l.data_pool) {
-                               err = 0;
-                               break;
-                       }
-               mutex_unlock(&mdsc->mutex);
-               if (err)
-                       return err;
-       }
+       err = __validate_layout(mdsc, &nl);
+       if (err)
+               return err;
 
        req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETLAYOUT,
                                       USE_AUTH_MDS);
@@ -106,8 +119,6 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
        req->r_args.setlayout.layout.fl_object_size =
                cpu_to_le32(l.object_size);
        req->r_args.setlayout.layout.fl_pg_pool = cpu_to_le32(l.data_pool);
-       req->r_args.setlayout.layout.fl_pg_preferred =
-               cpu_to_le32(l.preferred_osd);
 
        parent_inode = ceph_get_dentry_parent_inode(file->f_dentry);
        err = ceph_mdsc_do_request(mdsc, parent_inode, req);
@@ -127,33 +138,16 @@ static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg)
        struct inode *inode = file->f_dentry->d_inode;
        struct ceph_mds_request *req;
        struct ceph_ioctl_layout l;
-       int err, i;
+       int err;
        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
 
        /* copy and validate */
        if (copy_from_user(&l, arg, sizeof(l)))
                return -EFAULT;
 
-       if ((l.object_size & ~PAGE_MASK) ||
-           (l.stripe_unit & ~PAGE_MASK) ||
-           !l.stripe_unit ||
-           (l.object_size &&
-               (unsigned)l.object_size % (unsigned)l.stripe_unit))
-               return -EINVAL;
-
-       /* make sure it's a valid data pool */
-       if (l.data_pool > 0) {
-               mutex_lock(&mdsc->mutex);
-               err = -EINVAL;
-               for (i = 0; i < mdsc->mdsmap->m_num_data_pg_pools; i++)
-                       if (mdsc->mdsmap->m_data_pg_pools[i] == l.data_pool) {
-                               err = 0;
-                               break;
-                       }
-               mutex_unlock(&mdsc->mutex);
-               if (err)
-                       return err;
-       }
+       err = __validate_layout(mdsc, &l);
+       if (err)
+               return err;
 
        req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETDIRLAYOUT,
                                       USE_AUTH_MDS);
@@ -171,8 +165,6 @@ static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg)
                        cpu_to_le32(l.object_size);
        req->r_args.setlayout.layout.fl_pg_pool =
                        cpu_to_le32(l.data_pool);
-       req->r_args.setlayout.layout.fl_pg_preferred =
-                       cpu_to_le32(l.preferred_osd);
 
        err = ceph_mdsc_do_request(mdsc, inode, req);
        ceph_mdsc_put_request(req);
index be4a604873331dc7c547950650a899b514f56d26..c77028afb1e1e6b52315a73d013a26fb5c9c7f5b 100644 (file)
@@ -34,6 +34,8 @@
 struct ceph_ioctl_layout {
        __u64 stripe_unit, stripe_count, object_size;
        __u64 data_pool;
+
+       /* obsolete.  new values ignored, always return -1 */
        __s64 preferred_osd;
 };
 
index 89971e137aab80454fed8a51a105d7df903b3101..200bc87eceb1cc417a1caa1a73e264cde79dda78 100644 (file)
@@ -334,10 +334,10 @@ void ceph_put_mds_session(struct ceph_mds_session *s)
        dout("mdsc put_session %p %d -> %d\n", s,
             atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
        if (atomic_dec_and_test(&s->s_ref)) {
-               if (s->s_authorizer)
+               if (s->s_auth.authorizer)
                     s->s_mdsc->fsc->client->monc.auth->ops->destroy_authorizer(
                             s->s_mdsc->fsc->client->monc.auth,
-                            s->s_authorizer);
+                            s->s_auth.authorizer);
                kfree(s);
        }
 }
@@ -3395,39 +3395,33 @@ out:
 /*
  * authentication
  */
-static int get_authorizer(struct ceph_connection *con,
-                         void **buf, int *len, int *proto,
-                         void **reply_buf, int *reply_len, int force_new)
+
+/*
+ * Note: returned pointer is the address of a structure that's
+ * managed separately.  Caller must *not* attempt to free it.
+ */
+static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
+                                       int *proto, int force_new)
 {
        struct ceph_mds_session *s = con->private;
        struct ceph_mds_client *mdsc = s->s_mdsc;
        struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
-       int ret = 0;
-
-       if (force_new && s->s_authorizer) {
-               ac->ops->destroy_authorizer(ac, s->s_authorizer);
-               s->s_authorizer = NULL;
-       }
-       if (s->s_authorizer == NULL) {
-               if (ac->ops->create_authorizer) {
-                       ret = ac->ops->create_authorizer(
-                               ac, CEPH_ENTITY_TYPE_MDS,
-                               &s->s_authorizer,
-                               &s->s_authorizer_buf,
-                               &s->s_authorizer_buf_len,
-                               &s->s_authorizer_reply_buf,
-                               &s->s_authorizer_reply_buf_len);
-                       if (ret)
-                               return ret;
-               }
-       }
+       struct ceph_auth_handshake *auth = &s->s_auth;
 
+       if (force_new && auth->authorizer) {
+               if (ac->ops && ac->ops->destroy_authorizer)
+                       ac->ops->destroy_authorizer(ac, auth->authorizer);
+               auth->authorizer = NULL;
+       }
+       if (!auth->authorizer && ac->ops && ac->ops->create_authorizer) {
+               int ret = ac->ops->create_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
+                                                       auth);
+               if (ret)
+                       return ERR_PTR(ret);
+       }
        *proto = ac->protocol;
-       *buf = s->s_authorizer_buf;
-       *len = s->s_authorizer_buf_len;
-       *reply_buf = s->s_authorizer_reply_buf;
-       *reply_len = s->s_authorizer_reply_buf_len;
-       return 0;
+
+       return auth;
 }
 
 
@@ -3437,7 +3431,7 @@ static int verify_authorizer_reply(struct ceph_connection *con, int len)
        struct ceph_mds_client *mdsc = s->s_mdsc;
        struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
 
-       return ac->ops->verify_authorizer_reply(ac, s->s_authorizer, len);
+       return ac->ops->verify_authorizer_reply(ac, s->s_auth.authorizer, len);
 }
 
 static int invalidate_authorizer(struct ceph_connection *con)
index 8c7c04ebb595a1a8bd2e9c1b177890f8ba234b9a..dd26846dd71de4267146b7deb43c7a5d7b9b976f 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/ceph/types.h>
 #include <linux/ceph/messenger.h>
 #include <linux/ceph/mdsmap.h>
+#include <linux/ceph/auth.h>
 
 /*
  * Some lock dependencies:
@@ -113,9 +114,7 @@ struct ceph_mds_session {
 
        struct ceph_connection s_con;
 
-       struct ceph_authorizer *s_authorizer;
-       void             *s_authorizer_buf, *s_authorizer_reply_buf;
-       size_t            s_authorizer_buf_len, s_authorizer_reply_buf_len;
+       struct ceph_auth_handshake s_auth;
 
        /* protected by s_gen_ttl_lock */
        spinlock_t        s_gen_ttl_lock;
index f04c0961f9937eb6f553978f10942800818528ba..e5206fc765620f3b550c2457e1f271cb9d81c8c8 100644 (file)
@@ -331,7 +331,7 @@ static int build_snap_context(struct ceph_snap_realm *realm)
 
        /* alloc new snap context */
        err = -ENOMEM;
-       if (num > (ULONG_MAX - sizeof(*snapc)) / sizeof(u64))
+       if (num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64))
                goto fail;
        snapc = kzalloc(sizeof(*snapc) + num*sizeof(u64), GFP_NOFS);
        if (!snapc)
index 35b86331d8a5ce84c311e9eb2730757f80149179..785cb3057c95a436c344da2d76ee24b86cb7954b 100644 (file)
@@ -118,15 +118,6 @@ static size_t ceph_vxattrcb_file_layout(struct ceph_inode_info *ci, char *val,
                (unsigned long long)ceph_file_layout_su(ci->i_layout),
                (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
                (unsigned long long)ceph_file_layout_object_size(ci->i_layout));
-
-       if (ceph_file_layout_pg_preferred(ci->i_layout) >= 0) {
-               val += ret;
-               size -= ret;
-               ret += snprintf(val, size, "preferred_osd=%lld\n",
-                           (unsigned long long)ceph_file_layout_pg_preferred(
-                                   ci->i_layout));
-       }
-
        return ret;
 }
 
index 2b243af70aa325b3c6049ded6121473f98cfbe8e..a08306a8bec911ac72dfc76bf6fbbf79760d8db1 100644 (file)
@@ -158,3 +158,23 @@ config CIFS_NFSD_EXPORT
          depends on CIFS && EXPERIMENTAL && BROKEN
          help
           Allows NFS server to export a CIFS mounted share (nfsd over cifs)
+
+config CIFS_SMB2
+       bool "SMB2 network file system support (EXPERIMENTAL)"
+       depends on EXPERIMENTAL && INET && BROKEN
+       select NLS
+       select KEYS
+       select FSCACHE
+       select DNS_RESOLVER
+
+       help
+         This enables experimental support for the SMB2 (Server Message Block
+         version 2) protocol. The SMB2 protocol is the successor to the
+         popular CIFS and SMB network file sharing protocols. SMB2 is the
+         native file sharing mechanism for recent versions of Windows
+         operating systems (since Vista).  SMB2 enablement will eventually
+         allow users better performance, security and features, than would be
+         possible with cifs. Note that smb2 mount options also are simpler
+         (compared to cifs) due to protocol improvements.
+
+         Unless you are a developer or tester, say N.
index 005d524c3a4ae0c390c6f9b001249423f1dd3b09..4b4127544349290d4c86ded91eb377a9b87c33e1 100644 (file)
@@ -6,7 +6,7 @@ obj-$(CONFIG_CIFS) += cifs.o
 cifs-y := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o \
          link.o misc.o netmisc.o smbencrypt.o transport.o asn1.o \
          cifs_unicode.o nterr.o xattr.o cifsencrypt.o \
-         readdir.o ioctl.o sess.o export.o
+         readdir.o ioctl.o sess.o export.o smb1ops.o
 
 cifs-$(CONFIG_CIFS_ACL) += cifsacl.o
 
@@ -15,3 +15,5 @@ cifs-$(CONFIG_CIFS_UPCALL) += cifs_spnego.o
 cifs-$(CONFIG_CIFS_DFS_UPCALL) += dns_resolve.o cifs_dfs_ref.o
 
 cifs-$(CONFIG_CIFS_FSCACHE) += fscache.o cache.o
+
+cifs-$(CONFIG_CIFS_SMB2) += smb2ops.o
index b7d782bab79731694adc8d55489d8e258aace167..22ab7b5b8da7eda6bf38d19aff3415eade104c5c 100644 (file)
@@ -608,11 +608,6 @@ Stats                      Lists summary resource usage information as well as per
                        in the kernel configuration.
 
 Configuration pseudo-files:
-MultiuserMount         If set to one, more than one CIFS session to 
-                       the same server ip address can be established
-                       if more than one uid accesses the same mount
-                       point and if the uids user/password mapping
-                       information is available. (default is 0)
 PacketSigningEnabled   If set to one, cifs packet signing is enabled
                        and will be used if the server requires 
                        it.  If set to two, cifs packet signing is
index 2704646294166bec7edf9c006c98d5229a1eaab9..e8140528ca5c426b2fe7b4b150592e70de19cdca 100644 (file)
@@ -57,19 +57,21 @@ cifs_dump_mem(char *label, void *data, int length)
        }
 }
 
-#ifdef CONFIG_CIFS_DEBUG2
 void cifs_dump_detail(void *buf)
 {
+#ifdef CONFIG_CIFS_DEBUG2
        struct smb_hdr *smb = (struct smb_hdr *)buf;
 
        cERROR(1, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d",
                  smb->Command, smb->Status.CifsError,
                  smb->Flags, smb->Flags2, smb->Mid, smb->Pid);
        cERROR(1, "smb buf %p len %d", smb, smbCalcSize(smb));
+#endif /* CONFIG_CIFS_DEBUG2 */
 }
 
 void cifs_dump_mids(struct TCP_Server_Info *server)
 {
+#ifdef CONFIG_CIFS_DEBUG2
        struct list_head *tmp;
        struct mid_q_entry *mid_entry;
 
@@ -102,8 +104,8 @@ void cifs_dump_mids(struct TCP_Server_Info *server)
                }
        }
        spin_unlock(&GlobalMid_Lock);
-}
 #endif /* CONFIG_CIFS_DEBUG2 */
+}
 
 #ifdef CONFIG_PROC_FS
 static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
@@ -420,7 +422,6 @@ static struct proc_dir_entry *proc_fs_cifs;
 static const struct file_operations cifsFYI_proc_fops;
 static const struct file_operations cifs_lookup_cache_proc_fops;
 static const struct file_operations traceSMB_proc_fops;
-static const struct file_operations cifs_multiuser_mount_proc_fops;
 static const struct file_operations cifs_security_flags_proc_fops;
 static const struct file_operations cifs_linux_ext_proc_fops;
 
@@ -440,8 +441,6 @@ cifs_proc_init(void)
        proc_create("traceSMB", 0, proc_fs_cifs, &traceSMB_proc_fops);
        proc_create("LinuxExtensionsEnabled", 0, proc_fs_cifs,
                    &cifs_linux_ext_proc_fops);
-       proc_create("MultiuserMount", 0, proc_fs_cifs,
-                   &cifs_multiuser_mount_proc_fops);
        proc_create("SecurityFlags", 0, proc_fs_cifs,
                    &cifs_security_flags_proc_fops);
        proc_create("LookupCacheEnabled", 0, proc_fs_cifs,
@@ -460,7 +459,6 @@ cifs_proc_clean(void)
 #ifdef CONFIG_CIFS_STATS
        remove_proc_entry("Stats", proc_fs_cifs);
 #endif
-       remove_proc_entry("MultiuserMount", proc_fs_cifs);
        remove_proc_entry("SecurityFlags", proc_fs_cifs);
        remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs);
        remove_proc_entry("LookupCacheEnabled", proc_fs_cifs);
@@ -617,52 +615,6 @@ static const struct file_operations traceSMB_proc_fops = {
        .write          = traceSMB_proc_write,
 };
 
-static int cifs_multiuser_mount_proc_show(struct seq_file *m, void *v)
-{
-       seq_printf(m, "%d\n", multiuser_mount);
-       return 0;
-}
-
-static int cifs_multiuser_mount_proc_open(struct inode *inode, struct file *fh)
-{
-       return single_open(fh, cifs_multiuser_mount_proc_show, NULL);
-}
-
-static ssize_t cifs_multiuser_mount_proc_write(struct file *file,
-               const char __user *buffer, size_t count, loff_t *ppos)
-{
-       char c;
-       int rc;
-       static bool warned;
-
-       rc = get_user(c, buffer);
-       if (rc)
-               return rc;
-       if (c == '0' || c == 'n' || c == 'N')
-               multiuser_mount = 0;
-       else if (c == '1' || c == 'y' || c == 'Y') {
-               multiuser_mount = 1;
-               if (!warned) {
-                       warned = true;
-                       printk(KERN_WARNING "CIFS VFS: The legacy multiuser "
-                               "mount code is scheduled to be deprecated in "
-                               "3.5. Please switch to using the multiuser "
-                               "mount option.");
-               }
-       }
-
-       return count;
-}
-
-static const struct file_operations cifs_multiuser_mount_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = cifs_multiuser_mount_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = cifs_multiuser_mount_proc_write,
-};
-
 static int cifs_security_flags_proc_show(struct seq_file *m, void *v)
 {
        seq_printf(m, "0x%x\n", global_secflags);
index 566e0ae8dc2cb64d8e10822da691caed0dc28a9d..c0c68bb492d7d98e43257bd4d433bf0676b06530 100644 (file)
 #define _H_CIFS_DEBUG
 
 void cifs_dump_mem(char *label, void *data, int length);
-#ifdef CONFIG_CIFS_DEBUG2
-#define DBG2 2
 void cifs_dump_detail(void *);
 void cifs_dump_mids(struct TCP_Server_Info *);
+#ifdef CONFIG_CIFS_DEBUG2
+#define DBG2 2
 #else
 #define DBG2 0
 #endif
index 541ef81f6ae8ffe99ebcfa9c303f9c15975dd8c7..8b6e344eb0ba3a483780f0b32cf0961ec258ca4c 100644 (file)
@@ -56,7 +56,6 @@ int traceSMB = 0;
 bool enable_oplocks = true;
 unsigned int linuxExtEnabled = 1;
 unsigned int lookupCacheEnabled = 1;
-unsigned int multiuser_mount = 0;
 unsigned int global_secflags = CIFSSEC_DEF;
 /* unsigned int ntlmv2_support = 0; */
 unsigned int sign_CIFS_PDUs = 1;
@@ -125,7 +124,7 @@ cifs_read_super(struct super_block *sb)
                goto out_no_root;
        }
 
-       /* do that *after* d_alloc_root() - we want NULL ->d_op for root here */
+       /* do that *after* d_make_root() - we want NULL ->d_op for root here */
        if (cifs_sb_master_tcon(cifs_sb)->nocase)
                sb->s_d_op = &cifs_ci_dentry_ops;
        else
@@ -272,7 +271,7 @@ static void
 cifs_evict_inode(struct inode *inode)
 {
        truncate_inode_pages(&inode->i_data, 0);
-       end_writeback(inode);
+       clear_inode(inode);
        cifs_fscache_release_inode_cookie(inode);
 }
 
@@ -329,6 +328,19 @@ cifs_show_security(struct seq_file *s, struct TCP_Server_Info *server)
                seq_printf(s, "i");
 }
 
+static void
+cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
+{
+       seq_printf(s, ",cache=");
+
+       if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
+               seq_printf(s, "strict");
+       else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
+               seq_printf(s, "none");
+       else
+               seq_printf(s, "loose");
+}
+
 /*
  * cifs_show_options() is for displaying mount options in /proc/mounts.
  * Not all settable options are displayed but most of the important
@@ -342,7 +354,9 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
        struct sockaddr *srcaddr;
        srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
 
+       seq_printf(s, ",vers=%s", tcon->ses->server->vals->version_string);
        cifs_show_security(s, tcon->ses->server);
+       cifs_show_cache_flavor(s, cifs_sb);
 
        seq_printf(s, ",unc=%s", tcon->treeName);
 
@@ -408,8 +422,6 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
                seq_printf(s, ",rwpidforward");
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
                seq_printf(s, ",forcemand");
-       if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
-               seq_printf(s, ",directio");
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
                seq_printf(s, ",nouser_xattr");
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
@@ -432,8 +444,6 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
                seq_printf(s, ",nostrictsync");
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
                seq_printf(s, ",noperm");
-       if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
-               seq_printf(s, ",strictcache");
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
                seq_printf(s, ",backupuid=%u", cifs_sb->mnt_backupuid);
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
@@ -945,7 +955,6 @@ cifs_init_once(void *inode)
        struct cifsInodeInfo *cifsi = inode;
 
        inode_init_once(&cifsi->vfs_inode);
-       INIT_LIST_HEAD(&cifsi->llist);
        mutex_init(&cifsi->lock_mutex);
 }
 
index 4ff6313f0a9158958fbf0f63e4b21464b281c996..20350a93ed99105062743e1123e441af52b59a39 100644 (file)
@@ -43,6 +43,7 @@
 
 #define CIFS_MIN_RCV_POOL 4
 
+#define MAX_REOPEN_ATT 5 /* these many maximum attempts to reopen a file */
 /*
  * default attribute cache timeout (jiffies)
  */
@@ -150,6 +151,57 @@ struct cifs_cred {
  *****************************************************************
  */
 
+enum smb_version {
+       Smb_1 = 1,
+       Smb_21,
+};
+
+struct mid_q_entry;
+struct TCP_Server_Info;
+struct cifsFileInfo;
+struct cifs_ses;
+
+struct smb_version_operations {
+       int (*send_cancel)(struct TCP_Server_Info *, void *,
+                          struct mid_q_entry *);
+       bool (*compare_fids)(struct cifsFileInfo *, struct cifsFileInfo *);
+       /* setup request: allocate mid, sign message */
+       int (*setup_request)(struct cifs_ses *, struct kvec *, unsigned int,
+                            struct mid_q_entry **);
+       /* check response: verify signature, map error */
+       int (*check_receive)(struct mid_q_entry *, struct TCP_Server_Info *,
+                            bool);
+       void (*add_credits)(struct TCP_Server_Info *, const unsigned int);
+       void (*set_credits)(struct TCP_Server_Info *, const int);
+       int * (*get_credits_field)(struct TCP_Server_Info *);
+       /* data offset from read response message */
+       unsigned int (*read_data_offset)(char *);
+       /* data length from read response message */
+       unsigned int (*read_data_length)(char *);
+       /* map smb to linux error */
+       int (*map_error)(char *, bool);
+       /* find mid corresponding to the response message */
+       struct mid_q_entry * (*find_mid)(struct TCP_Server_Info *, char *);
+       void (*dump_detail)(void *);
+       /* verify the message */
+       int (*check_message)(char *, unsigned int);
+       bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
+};
+
+struct smb_version_values {
+       char            *version_string;
+       __u32           large_lock_type;
+       __u32           exclusive_lock_type;
+       __u32           shared_lock_type;
+       __u32           unlock_lock_type;
+       size_t          header_size;
+       size_t          max_header_size;
+       size_t          read_rsp_size;
+};
+
+#define HEADER_SIZE(server) (server->vals->header_size)
+#define MAX_HEADER_SIZE(server) (server->vals->max_header_size)
+
 struct smb_vol {
        char *username;
        char *password;
@@ -205,6 +257,8 @@ struct smb_vol {
        bool sockopt_tcp_nodelay:1;
        unsigned short int port;
        unsigned long actimeo; /* attribute cache timeout (jiffies) */
+       struct smb_version_operations *ops;
+       struct smb_version_values *vals;
        char *prepath;
        struct sockaddr_storage srcaddr; /* allow binding to a local IP */
        struct nls_table *local_nls;
@@ -242,6 +296,8 @@ struct TCP_Server_Info {
        int srv_count; /* reference counter */
        /* 15 character server name + 0x20 16th byte indicating type = srv */
        char server_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
+       struct smb_version_operations   *ops;
+       struct smb_version_values       *vals;
        enum statusEnum tcpStatus; /* what we think the status is */
        char *hostname; /* hostname portion of UNC string */
        struct socket *ssocket;
@@ -321,16 +377,6 @@ in_flight(struct TCP_Server_Info *server)
        return num;
 }
 
-static inline int*
-get_credits_field(struct TCP_Server_Info *server)
-{
-       /*
-        * This will change to switch statement when we reserve slots for echos
-        * and oplock breaks.
-        */
-       return &server->credits;
-}
-
 static inline bool
 has_credits(struct TCP_Server_Info *server, int *credits)
 {
@@ -341,16 +387,16 @@ has_credits(struct TCP_Server_Info *server, int *credits)
        return num > 0;
 }
 
-static inline size_t
-header_size(void)
+static inline void
+add_credits(struct TCP_Server_Info *server, const unsigned int add)
 {
-       return sizeof(struct smb_hdr);
+       server->ops->add_credits(server, add);
 }
 
-static inline size_t
-max_header_size(void)
+static inline void
+set_credits(struct TCP_Server_Info *server, const int val)
 {
-       return MAX_CIFS_HDR_SIZE;
+       server->ops->set_credits(server, val);
 }
 
 /*
@@ -547,8 +593,7 @@ struct cifsLockInfo {
        __u64 offset;
        __u64 length;
        __u32 pid;
-       __u8 type;
-       __u16 netfid;
+       __u32 type;
 };
 
 /*
@@ -573,6 +618,10 @@ struct cifs_search_info {
 struct cifsFileInfo {
        struct list_head tlist; /* pointer to next fid owned by tcon */
        struct list_head flist; /* next fid (file instance) for this inode */
+       struct list_head llist; /*
+                                * brlocks held by this fid, protected by
+                                * lock_mutex from cifsInodeInfo structure
+                                */
        unsigned int uid;       /* allows finding which FileInfo structure */
        __u32 pid;              /* process id who opened file */
        __u16 netfid;           /* file id from remote */
@@ -615,9 +664,12 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
  */
 
 struct cifsInodeInfo {
-       struct list_head llist;         /* brlocks for this inode */
        bool can_cache_brlcks;
-       struct mutex lock_mutex;        /* protect two fields above */
+       struct mutex lock_mutex;        /*
+                                        * protect the field above and llist
+                                        * from every cifsFileInfo structure
+                                        * from openFileList
+                                        */
        /* BB add in lists for dirty pages i.e. write caching info for oplock */
        struct list_head openFileList;
        __u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */
@@ -703,7 +755,6 @@ static inline void cifs_stats_bytes_read(struct cifs_tcon *tcon,
 
 #endif
 
-struct mid_q_entry;
 
 /*
  * This is the prototype for the mid receive function. This function is for
@@ -1042,12 +1093,7 @@ GLOBAL_EXTERN atomic_t smBufAllocCount;
 GLOBAL_EXTERN atomic_t midCount;
 
 /* Misc globals */
-GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions
-                               to be established on existing mount if we
-                               have the uid/password or Kerberos credential
-                               or equivalent for current user */
-/* enable or disable oplocks */
-GLOBAL_EXTERN bool enable_oplocks;
+GLOBAL_EXTERN bool enable_oplocks; /* enable or disable oplocks */
 GLOBAL_EXTERN unsigned int lookupCacheEnabled;
 GLOBAL_EXTERN unsigned int global_secflags;    /* if on, session setup sent
                                with more secure ntlmssp2 challenge/resp */
@@ -1074,4 +1120,11 @@ void cifs_oplock_break(struct work_struct *work);
 extern const struct slow_work_ops cifs_oplock_break_ops;
 extern struct workqueue_struct *cifsiod_wq;
 
+/* Operations for different SMB versions */
+#define SMB1_VERSION_STRING    "1.0"
+extern struct smb_version_operations smb1_operations;
+extern struct smb_version_values smb1_values;
+#define SMB21_VERSION_STRING   "2.1"
+extern struct smb_version_operations smb21_operations;
+extern struct smb_version_values smb21_values;
 #endif /* _CIFS_GLOB_H */
index 96192c1e380afb9475048f5d932e886fb4338d61..5ec21ecf7980e98a2d51ed9d2edf41c720952d4d 100644 (file)
@@ -78,6 +78,8 @@ extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *,
                        int * /* bytes returned */ , const int long_op);
 extern int SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
                            char *in_buf, int flags);
+extern int cifs_setup_request(struct cifs_ses *, struct kvec *, unsigned int,
+                             struct mid_q_entry **);
 extern int cifs_check_receive(struct mid_q_entry *mid,
                        struct TCP_Server_Info *server, bool log_error);
 extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
@@ -88,9 +90,6 @@ extern int SendReceiveBlockingLock(const unsigned int xid,
                        struct smb_hdr *in_buf ,
                        struct smb_hdr *out_buf,
                        int *bytes_returned);
-extern void cifs_add_credits(struct TCP_Server_Info *server,
-                            const unsigned int add);
-extern void cifs_set_credits(struct TCP_Server_Info *server, const int val);
 extern int checkSMB(char *buf, unsigned int length);
 extern bool is_valid_oplock_break(char *, struct TCP_Server_Info *);
 extern bool backup_cred(struct cifs_sb_info *);
@@ -192,11 +191,13 @@ extern int CIFSTCon(unsigned int xid, struct cifs_ses *ses,
 
 extern int CIFSFindFirst(const int xid, struct cifs_tcon *tcon,
                const char *searchName, const struct nls_table *nls_codepage,
-               __u16 *searchHandle, struct cifs_search_info *psrch_inf,
+               __u16 *searchHandle, __u16 search_flags,
+               struct cifs_search_info *psrch_inf,
                int map, const char dirsep);
 
 extern int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
-               __u16 searchHandle, struct cifs_search_info *psrch_inf);
+               __u16 searchHandle, __u16 search_flags,
+               struct cifs_search_info *psrch_inf);
 
 extern int CIFSFindClose(const int, struct cifs_tcon *tcon,
                        const __u16 search_handle);
@@ -464,6 +465,9 @@ extern int SMBencrypt(unsigned char *passwd, const unsigned char *c8,
 
 /* asynchronous read support */
 struct cifs_readdata {
+       struct kref                     refcount;
+       struct list_head                list;
+       struct completion               done;
        struct cifsFileInfo             *cfile;
        struct address_space            *mapping;
        __u64                           offset;
@@ -472,12 +476,13 @@ struct cifs_readdata {
        int                             result;
        struct list_head                pages;
        struct work_struct              work;
+       int (*marshal_iov) (struct cifs_readdata *rdata,
+                           unsigned int remaining);
        unsigned int                    nr_iov;
        struct kvec                     iov[1];
 };
 
-struct cifs_readdata *cifs_readdata_alloc(unsigned int nr_pages);
-void cifs_readdata_free(struct cifs_readdata *rdata);
+void cifs_readdata_release(struct kref *refcount);
 int cifs_async_readv(struct cifs_readdata *rdata);
 
 /* asynchronous write support */
index da2f5446fa7ae3d3bbba6bb1b7a92cd1cc8743e4..b5ad716b2642138ebebdc18a975718f9a09855c8 100644 (file)
@@ -87,7 +87,6 @@ static struct {
 #endif /* CIFS_POSIX */
 
 /* Forward declarations */
-static void cifs_readv_complete(struct work_struct *work);
 
 /* Mark as invalid, all open files on tree connections since they
    were closed when session to server was lost */
@@ -461,7 +460,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
                server->maxReq = min_t(unsigned int,
                                       le16_to_cpu(rsp->MaxMpxCount),
                                       cifs_max_pending);
-               cifs_set_credits(server, server->maxReq);
+               set_credits(server, server->maxReq);
                server->maxBuf = le16_to_cpu(rsp->MaxBufSize);
                server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs);
                /* even though we do not use raw we might as well set this
@@ -569,7 +568,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
           little endian */
        server->maxReq = min_t(unsigned int, le16_to_cpu(pSMBr->MaxMpxCount),
                               cifs_max_pending);
-       cifs_set_credits(server, server->maxReq);
+       set_credits(server, server->maxReq);
        /* probably no need to store and check maxvcs */
        server->maxBuf = le32_to_cpu(pSMBr->MaxBufferSize);
        server->max_rw = le32_to_cpu(pSMBr->MaxRawSize);
@@ -721,7 +720,7 @@ cifs_echo_callback(struct mid_q_entry *mid)
        struct TCP_Server_Info *server = mid->callback_data;
 
        DeleteMidQEntry(mid);
-       cifs_add_credits(server, 1);
+       add_credits(server, 1);
 }
 
 int
@@ -1385,28 +1384,6 @@ openRetry:
        return rc;
 }
 
-struct cifs_readdata *
-cifs_readdata_alloc(unsigned int nr_pages)
-{
-       struct cifs_readdata *rdata;
-
-       /* readdata + 1 kvec for each page */
-       rdata = kzalloc(sizeof(*rdata) +
-                       sizeof(struct kvec) * nr_pages, GFP_KERNEL);
-       if (rdata != NULL) {
-               INIT_WORK(&rdata->work, cifs_readv_complete);
-               INIT_LIST_HEAD(&rdata->pages);
-       }
-       return rdata;
-}
-
-void
-cifs_readdata_free(struct cifs_readdata *rdata)
-{
-       cifsFileInfo_put(rdata->cfile);
-       kfree(rdata);
-}
-
 /*
  * Discard any remaining data in the current SMB. To do this, we borrow the
  * current bigbuf.
@@ -1423,7 +1400,7 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
 
                length = cifs_read_from_socket(server, server->bigbuf,
                                min_t(unsigned int, remaining,
-                                       CIFSMaxBufSize + max_header_size()));
+                                   CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
                if (length < 0)
                        return length;
                server->total_read += length;
@@ -1434,38 +1411,14 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
        return 0;
 }
 
-static inline size_t
-read_rsp_size(void)
-{
-       return sizeof(READ_RSP);
-}
-
-static inline unsigned int
-read_data_offset(char *buf)
-{
-       READ_RSP *rsp = (READ_RSP *)buf;
-       return le16_to_cpu(rsp->DataOffset);
-}
-
-static inline unsigned int
-read_data_length(char *buf)
-{
-       READ_RSP *rsp = (READ_RSP *)buf;
-       return (le16_to_cpu(rsp->DataLengthHigh) << 16) +
-              le16_to_cpu(rsp->DataLength);
-}
-
 static int
 cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
 {
        int length, len;
-       unsigned int data_offset, remaining, data_len;
+       unsigned int data_offset, data_len;
        struct cifs_readdata *rdata = mid->callback_data;
        char *buf = server->smallbuf;
        unsigned int buflen = get_rfc1002_length(buf) + 4;
-       u64 eof;
-       pgoff_t eof_index;
-       struct page *page, *tpage;
 
        cFYI(1, "%s: mid=%llu offset=%llu bytes=%u", __func__,
                mid->mid, rdata->offset, rdata->bytes);
@@ -1475,9 +1428,10 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
         * can if there's not enough data. At this point, we've read down to
         * the Mid.
         */
-       len = min_t(unsigned int, buflen, read_rsp_size()) - header_size() + 1;
+       len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
+                                                       HEADER_SIZE(server) + 1;
 
-       rdata->iov[0].iov_base = buf + header_size() - 1;
+       rdata->iov[0].iov_base = buf + HEADER_SIZE(server) - 1;
        rdata->iov[0].iov_len = len;
 
        length = cifs_readv_from_socket(server, rdata->iov, 1, len);
@@ -1486,7 +1440,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
        server->total_read += length;
 
        /* Was the SMB read successful? */
-       rdata->result = map_smb_to_linux_error(buf, false);
+       rdata->result = server->ops->map_error(buf, false);
        if (rdata->result != 0) {
                cFYI(1, "%s: server returned error %d", __func__,
                        rdata->result);
@@ -1494,14 +1448,15 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
        }
 
        /* Is there enough to get to the rest of the READ_RSP header? */
-       if (server->total_read < read_rsp_size()) {
+       if (server->total_read < server->vals->read_rsp_size) {
                cFYI(1, "%s: server returned short header. got=%u expected=%zu",
-                       __func__, server->total_read, read_rsp_size());
+                       __func__, server->total_read,
+                       server->vals->read_rsp_size);
                rdata->result = -EIO;
                return cifs_readv_discard(server, mid);
        }
 
-       data_offset = read_data_offset(buf) + 4;
+       data_offset = server->ops->read_data_offset(buf) + 4;
        if (data_offset < server->total_read) {
                /*
                 * win2k8 sometimes sends an offset of 0 when the read
@@ -1540,7 +1495,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
                rdata->iov[0].iov_base, rdata->iov[0].iov_len);
 
        /* how much data is in the response? */
-       data_len = read_data_length(buf);
+       data_len = server->ops->read_data_length(buf);
        if (data_offset + data_len > buflen) {
                /* data_len is corrupt -- discard frame */
                rdata->result = -EIO;
@@ -1548,64 +1503,8 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
        }
 
        /* marshal up the page array */
-       len = 0;
-       remaining = data_len;
-       rdata->nr_iov = 1;
-
-       /* determine the eof that the server (probably) has */
-       eof = CIFS_I(rdata->mapping->host)->server_eof;
-       eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
-       cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
-
-       list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
-               if (remaining >= PAGE_CACHE_SIZE) {
-                       /* enough data to fill the page */
-                       rdata->iov[rdata->nr_iov].iov_base = kmap(page);
-                       rdata->iov[rdata->nr_iov].iov_len = PAGE_CACHE_SIZE;
-                       cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
-                               rdata->nr_iov, page->index,
-                               rdata->iov[rdata->nr_iov].iov_base,
-                               rdata->iov[rdata->nr_iov].iov_len);
-                       ++rdata->nr_iov;
-                       len += PAGE_CACHE_SIZE;
-                       remaining -= PAGE_CACHE_SIZE;
-               } else if (remaining > 0) {
-                       /* enough for partial page, fill and zero the rest */
-                       rdata->iov[rdata->nr_iov].iov_base = kmap(page);
-                       rdata->iov[rdata->nr_iov].iov_len = remaining;
-                       cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
-                               rdata->nr_iov, page->index,
-                               rdata->iov[rdata->nr_iov].iov_base,
-                               rdata->iov[rdata->nr_iov].iov_len);
-                       memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
-                               '\0', PAGE_CACHE_SIZE - remaining);
-                       ++rdata->nr_iov;
-                       len += remaining;
-                       remaining = 0;
-               } else if (page->index > eof_index) {
-                       /*
-                        * The VFS will not try to do readahead past the
-                        * i_size, but it's possible that we have outstanding
-                        * writes with gaps in the middle and the i_size hasn't
-                        * caught up yet. Populate those with zeroed out pages
-                        * to prevent the VFS from repeatedly attempting to
-                        * fill them until the writes are flushed.
-                        */
-                       zero_user(page, 0, PAGE_CACHE_SIZE);
-                       list_del(&page->lru);
-                       lru_cache_add_file(page);
-                       flush_dcache_page(page);
-                       SetPageUptodate(page);
-                       unlock_page(page);
-                       page_cache_release(page);
-               } else {
-                       /* no need to hold page hostage */
-                       list_del(&page->lru);
-                       lru_cache_add_file(page);
-                       unlock_page(page);
-                       page_cache_release(page);
-               }
-       }
+       len = rdata->marshal_iov(rdata, data_len);
+       data_len -= len;
 
        /* issue the read if we have any iovecs left to fill */
        if (rdata->nr_iov > 1) {
@@ -1621,7 +1520,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
        rdata->bytes = length;
 
        cFYI(1, "total_read=%u buflen=%u remaining=%u", server->total_read,
-               buflen, remaining);
+               buflen, data_len);
 
        /* discard anything left over */
        if (server->total_read < buflen)
@@ -1631,33 +1530,6 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
        return length;
 }
 
-static void
-cifs_readv_complete(struct work_struct *work)
-{
-       struct cifs_readdata *rdata = container_of(work,
-                                               struct cifs_readdata, work);
-       struct page *page, *tpage;
-
-       list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
-               list_del(&page->lru);
-               lru_cache_add_file(page);
-
-               if (rdata->result == 0) {
-                       kunmap(page);
-                       flush_dcache_page(page);
-                       SetPageUptodate(page);
-               }
-
-               unlock_page(page);
-
-               if (rdata->result == 0)
-                       cifs_readpage_to_fscache(rdata->mapping->host, page);
-
-               page_cache_release(page);
-       }
-       cifs_readdata_free(rdata);
-}
-
 static void
 cifs_readv_callback(struct mid_q_entry *mid)
 {
@@ -1691,7 +1563,7 @@ cifs_readv_callback(struct mid_q_entry *mid)
 
        queue_work(cifsiod_wq, &rdata->work);
        DeleteMidQEntry(mid);
-       cifs_add_credits(server, 1);
+       add_credits(server, 1);
 }
 
 /* cifs_async_readv - send an async write, and set up mid to handle result */
@@ -1744,12 +1616,15 @@ cifs_async_readv(struct cifs_readdata *rdata)
        rdata->iov[0].iov_base = smb;
        rdata->iov[0].iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4;
 
+       kref_get(&rdata->refcount);
        rc = cifs_call_async(tcon->ses->server, rdata->iov, 1,
                             cifs_readv_receive, cifs_readv_callback,
                             rdata, false);
 
        if (rc == 0)
                cifs_stats_inc(&tcon->num_reads);
+       else
+               kref_put(&rdata->refcount, cifs_readdata_release);
 
        cifs_small_buf_release(smb);
        return rc;
@@ -2135,7 +2010,7 @@ cifs_writev_callback(struct mid_q_entry *mid)
 
        queue_work(cifsiod_wq, &wdata->work);
        DeleteMidQEntry(mid);
-       cifs_add_credits(tcon->ses->server, 1);
+       add_credits(tcon->ses->server, 1);
 }
 
 /* cifs_async_writev - send an async write, and set up mid to handle result */
@@ -4344,7 +4219,7 @@ int
 CIFSFindFirst(const int xid, struct cifs_tcon *tcon,
              const char *searchName,
              const struct nls_table *nls_codepage,
-             __u16 *pnetfid,
+             __u16 *pnetfid, __u16 search_flags,
              struct cifs_search_info *psrch_inf, int remap, const char dirsep)
 {
 /* level 257 SMB_ */
@@ -4416,8 +4291,7 @@ findFirstRetry:
            cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM |
                        ATTR_DIRECTORY);
        pSMB->SearchCount = cpu_to_le16(CIFSMaxBufSize/sizeof(FILE_UNIX_INFO));
-       pSMB->SearchFlags = cpu_to_le16(CIFS_SEARCH_CLOSE_AT_END |
-               CIFS_SEARCH_RETURN_RESUME);
+       pSMB->SearchFlags = cpu_to_le16(search_flags);
        pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level);
 
        /* BB what should we set StorageType to? Does it matter? BB */
@@ -4487,8 +4361,8 @@ findFirstRetry:
        return rc;
 }
 
-int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
-                __u16 searchHandle, struct cifs_search_info *psrch_inf)
+int CIFSFindNext(const int xid, struct cifs_tcon *tcon, __u16 searchHandle,
+                __u16 search_flags, struct cifs_search_info *psrch_inf)
 {
        TRANSACTION2_FNEXT_REQ *pSMB = NULL;
        TRANSACTION2_FNEXT_RSP *pSMBr = NULL;
@@ -4531,8 +4405,7 @@ int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
                cpu_to_le16(CIFSMaxBufSize / sizeof(FILE_UNIX_INFO));
        pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level);
        pSMB->ResumeKey = psrch_inf->resume_key;
-       pSMB->SearchFlags =
-             cpu_to_le16(CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME);
+       pSMB->SearchFlags = cpu_to_le16(search_flags);
 
        name_len = psrch_inf->resume_name_len;
        params += name_len;
index e0b56d7a19c561be0b173a670d18f6d8435a12ed..ccafdedd0dbc4df14e04c0087d28b1dfa453be1f 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *   fs/cifs/connect.c
  *
- *   Copyright (C) International Business Machines  Corp., 2002,2009
+ *   Copyright (C) International Business Machines  Corp., 2002,2011
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
  *   This library is free software; you can redistribute it and/or modify
@@ -102,7 +102,7 @@ enum {
        Opt_srcaddr, Opt_prefixpath,
        Opt_iocharset, Opt_sockopt,
        Opt_netbiosname, Opt_servern,
-       Opt_ver, Opt_sec,
+       Opt_ver, Opt_vers, Opt_sec, Opt_cache,
 
        /* Mount options to be ignored */
        Opt_ignore,
@@ -210,9 +210,9 @@ static const match_table_t cifs_mount_option_tokens = {
        { Opt_netbiosname, "netbiosname=%s" },
        { Opt_servern, "servern=%s" },
        { Opt_ver, "ver=%s" },
-       { Opt_ver, "vers=%s" },
-       { Opt_ver, "version=%s" },
+       { Opt_vers, "vers=%s" },
        { Opt_sec, "sec=%s" },
+       { Opt_cache, "cache=%s" },
 
        { Opt_ignore, "cred" },
        { Opt_ignore, "credentials" },
@@ -261,6 +261,26 @@ static const match_table_t cifs_secflavor_tokens = {
        { Opt_sec_err, NULL }
 };
 
+/* cache flavors */
+enum {
+       Opt_cache_loose,
+       Opt_cache_strict,
+       Opt_cache_none,
+       Opt_cache_err
+};
+
+static const match_table_t cifs_cacheflavor_tokens = {
+       { Opt_cache_loose, "loose" },
+       { Opt_cache_strict, "strict" },
+       { Opt_cache_none, "none" },
+       { Opt_cache_err, NULL }
+};
+
+static const match_table_t cifs_smb_version_tokens = {
+       { Smb_1, SMB1_VERSION_STRING },
+       { Smb_21, SMB21_VERSION_STRING },
+};
+
 static int ip_connect(struct TCP_Server_Info *server);
 static int generic_ip_connect(struct TCP_Server_Info *server);
 static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
@@ -549,7 +569,7 @@ allocate_buffers(struct TCP_Server_Info *server)
                }
        } else if (server->large_buf) {
                /* we are reusing a dirty large buf, clear its start */
-               memset(server->bigbuf, 0, header_size());
+               memset(server->bigbuf, 0, HEADER_SIZE(server));
        }
 
        if (!server->smallbuf) {
@@ -563,7 +583,7 @@ allocate_buffers(struct TCP_Server_Info *server)
                /* beginning of smb buffer is cleared in our buf_get */
        } else {
                /* if existing small buf clear beginning */
-               memset(server->smallbuf, 0, header_size());
+               memset(server->smallbuf, 0, HEADER_SIZE(server));
        }
 
        return true;
@@ -764,25 +784,6 @@ is_smb_response(struct TCP_Server_Info *server, unsigned char type)
        return false;
 }
 
-static struct mid_q_entry *
-find_mid(struct TCP_Server_Info *server, char *buffer)
-{
-       struct smb_hdr *buf = (struct smb_hdr *)buffer;
-       struct mid_q_entry *mid;
-
-       spin_lock(&GlobalMid_Lock);
-       list_for_each_entry(mid, &server->pending_mid_q, qhead) {
-               if (mid->mid == buf->Mid &&
-                   mid->mid_state == MID_REQUEST_SUBMITTED &&
-                   le16_to_cpu(mid->command) == buf->Command) {
-                       spin_unlock(&GlobalMid_Lock);
-                       return mid;
-               }
-       }
-       spin_unlock(&GlobalMid_Lock);
-       return NULL;
-}
-
 void
 dequeue_mid(struct mid_q_entry *mid, bool malformed)
 {
@@ -934,7 +935,7 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
        unsigned int pdu_length = get_rfc1002_length(buf);
 
        /* make sure this will fit in a large buffer */
-       if (pdu_length > CIFSMaxBufSize + max_header_size() - 4) {
+       if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server) - 4) {
                cERROR(1, "SMB response too long (%u bytes)",
                        pdu_length);
                cifs_reconnect(server);
@@ -950,8 +951,8 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
        }
 
        /* now read the rest */
-       length = cifs_read_from_socket(server, buf + header_size() - 1,
-                                      pdu_length - header_size() + 1 + 4);
+       length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
+                               pdu_length - HEADER_SIZE(server) + 1 + 4);
        if (length < 0)
                return length;
        server->total_read += length;
@@ -967,7 +968,7 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
         * 48 bytes is enough to display the header and a little bit
         * into the payload for debugging purposes.
         */
-       length = checkSMB(buf, server->total_read);
+       length = server->ops->check_message(buf, server->total_read);
        if (length != 0)
                cifs_dump_mem("Bad SMB: ", buf,
                        min_t(unsigned int, server->total_read, 48));
@@ -1025,7 +1026,7 @@ cifs_demultiplex_thread(void *p)
                        continue;
 
                /* make sure we have enough to get to the MID */
-               if (pdu_length < header_size() - 1 - 4) {
+               if (pdu_length < HEADER_SIZE(server) - 1 - 4) {
                        cERROR(1, "SMB response too short (%u bytes)",
                                pdu_length);
                        cifs_reconnect(server);
@@ -1035,12 +1036,12 @@ cifs_demultiplex_thread(void *p)
 
                /* read down to the MID */
                length = cifs_read_from_socket(server, buf + 4,
-                                              header_size() - 1 - 4);
+                                              HEADER_SIZE(server) - 1 - 4);
                if (length < 0)
                        continue;
                server->total_read += length;
 
-               mid_entry = find_mid(server, buf);
+               mid_entry = server->ops->find_mid(server, buf);
 
                if (!mid_entry || !mid_entry->receive)
                        length = standard_receive3(server, mid_entry);
@@ -1057,12 +1058,13 @@ cifs_demultiplex_thread(void *p)
                if (mid_entry != NULL) {
                        if (!mid_entry->multiRsp || mid_entry->multiEnd)
                                mid_entry->callback(mid_entry);
-               } else if (!is_valid_oplock_break(buf, server)) {
+               } else if (!server->ops->is_oplock_break(buf, server)) {
                        cERROR(1, "No task to wake, unknown frame received! "
                                   "NumMids %d", atomic_read(&midCount));
-                       cifs_dump_mem("Received Data is: ", buf, header_size());
+                       cifs_dump_mem("Received Data is: ", buf,
+                                     HEADER_SIZE(server));
 #ifdef CONFIG_CIFS_DEBUG2
-                       cifs_dump_detail(buf);
+                       server->ops->dump_detail(buf);
                        cifs_dump_mids(server);
 #endif /* CIFS_DEBUG2 */
 
@@ -1185,6 +1187,54 @@ static int cifs_parse_security_flavors(char *value,
        return 0;
 }
 
+static int
+cifs_parse_cache_flavor(char *value, struct smb_vol *vol)
+{
+       substring_t args[MAX_OPT_ARGS];
+
+       switch (match_token(value, cifs_cacheflavor_tokens, args)) {
+       case Opt_cache_loose:
+               vol->direct_io = false;
+               vol->strict_io = false;
+               break;
+       case Opt_cache_strict:
+               vol->direct_io = false;
+               vol->strict_io = true;
+               break;
+       case Opt_cache_none:
+               vol->direct_io = true;
+               vol->strict_io = false;
+               break;
+       default:
+               cERROR(1, "bad cache= option: %s", value);
+               return 1;
+       }
+       return 0;
+}
+
+static int
+cifs_parse_smb_version(char *value, struct smb_vol *vol)
+{
+       substring_t args[MAX_OPT_ARGS];
+
+       switch (match_token(value, cifs_smb_version_tokens, args)) {
+       case Smb_1:
+               vol->ops = &smb1_operations;
+               vol->vals = &smb1_values;
+               break;
+#ifdef CONFIG_CIFS_SMB2
+       case Smb_21:
+               vol->ops = &smb21_operations;
+               vol->vals = &smb21_values;
+               break;
+#endif
+       default:
+               cERROR(1, "Unknown vers= option specified: %s", value);
+               return 1;
+       }
+       return 0;
+}
+
 static int
 cifs_parse_mount_options(const char *mountdata, const char *devname,
                         struct smb_vol *vol)
@@ -1203,6 +1253,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
        char *string = NULL;
        char *tmp_end, *value;
        char delim;
+       bool cache_specified = false;
+       static bool cache_warned = false;
 
        separator[0] = ',';
        separator[1] = 0;
@@ -1236,6 +1288,10 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
 
        vol->actimeo = CIFS_DEF_ACTIMEO;
 
+       /* FIXME: add autonegotiation -- for now, SMB1 is default */
+       vol->ops = &smb1_operations;
+       vol->vals = &smb1_values;
+
        if (!mountdata)
                goto cifs_parse_mount_err;
 
@@ -1414,10 +1470,20 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                        vol->seal = 1;
                        break;
                case Opt_direct:
-                       vol->direct_io = 1;
+                       cache_specified = true;
+                       vol->direct_io = true;
+                       vol->strict_io = false;
+                       cERROR(1, "The \"directio\" option will be removed in "
+                                 "3.7. Please switch to the \"cache=none\" "
+                                 "option.");
                        break;
                case Opt_strictcache:
-                       vol->strict_io = 1;
+                       cache_specified = true;
+                       vol->direct_io = false;
+                       vol->strict_io = true;
+                       cERROR(1, "The \"strictcache\" option will be removed "
+                               "in 3.7. Please switch to the \"cache=strict\" "
+                               "option.");
                        break;
                case Opt_noac:
                        printk(KERN_WARNING "CIFS: Mount option noac not "
@@ -1821,8 +1887,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                        if (string == NULL)
                                goto out_nomem;
 
-                       if (strnicmp(string, "cifs", 4) == 0 ||
-                           strnicmp(string, "1", 1) == 0) {
+                       if (strnicmp(string, "1", 1) == 0) {
                                /* This is the default */
                                break;
                        }
@@ -1830,6 +1895,14 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                        printk(KERN_WARNING "CIFS: Invalid version"
                                            " specified\n");
                        goto cifs_parse_mount_err;
+               case Opt_vers:
+                       string = match_strdup(args);
+                       if (string == NULL)
+                               goto out_nomem;
+
+                       if (cifs_parse_smb_version(string, vol) != 0)
+                               goto cifs_parse_mount_err;
+                       break;
                case Opt_sec:
                        string = match_strdup(args);
                        if (string == NULL)
@@ -1838,6 +1911,15 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                        if (cifs_parse_security_flavors(string, vol) != 0)
                                goto cifs_parse_mount_err;
                        break;
+               case Opt_cache:
+                       cache_specified = true;
+                       string = match_strdup(args);
+                       if (string == NULL)
+                               goto out_nomem;
+
+                       if (cifs_parse_cache_flavor(string, vol) != 0)
+                               goto cifs_parse_mount_err;
+                       break;
                default:
                        /*
                         * An option we don't recognize. Save it off for later
@@ -1881,6 +1963,14 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                printk(KERN_NOTICE "CIFS: ignoring forcegid mount option "
                                   "specified with no gid= option.\n");
 
+       /* FIXME: remove this block in 3.7 */
+       if (!cache_specified && !cache_warned) {
+               cache_warned = true;
+               printk(KERN_NOTICE "CIFS: no cache= option specified, using "
+                                  "\"cache=loose\". This default will change "
+                                  "to \"cache=strict\" in 3.7.\n");
+       }
+
        kfree(mountdata_copy);
        return 0;
 
@@ -2041,6 +2131,9 @@ match_security(struct TCP_Server_Info *server, struct smb_vol *vol)
 static int match_server(struct TCP_Server_Info *server, struct sockaddr *addr,
                         struct smb_vol *vol)
 {
+       if ((server->vals != vol->vals) || (server->ops != vol->ops))
+               return 0;
+
        if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns))
                return 0;
 
@@ -2163,6 +2256,8 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
                goto out_err;
        }
 
+       tcp_ses->ops = volume_info->ops;
+       tcp_ses->vals = volume_info->vals;
        cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
        tcp_ses->hostname = extract_hostname(volume_info->UNC);
        if (IS_ERR(tcp_ses->hostname)) {
@@ -3569,6 +3664,7 @@ cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
        if (cifs_parse_mount_options(mount_data, devname, volume_info))
                return -EINVAL;
 
+
        if (volume_info->nullauth) {
                cFYI(1, "Anonymous login");
                kfree(volume_info->username);
@@ -4010,11 +4106,11 @@ int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses)
        if (server->maxBuf != 0)
                return 0;
 
-       cifs_set_credits(server, 1);
+       set_credits(server, 1);
        rc = CIFSSMBNegotiate(xid, ses);
        if (rc == -EAGAIN) {
                /* retry only once on 1st time connection */
-               cifs_set_credits(server, 1);
+               set_credits(server, 1);
                rc = CIFSSMBNegotiate(xid, ses);
                if (rc == -EAGAIN)
                        rc = -EHOSTDOWN;
index 81725e9286e911f501e4a78d1d7c28768753d118..253170dfa71650704c8c2f6cdb69942635488404 100644 (file)
@@ -264,6 +264,7 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file,
        pCifsFile->tlink = cifs_get_tlink(tlink);
        mutex_init(&pCifsFile->fh_mutex);
        INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
+       INIT_LIST_HEAD(&pCifsFile->llist);
 
        spin_lock(&cifs_file_list_lock);
        list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
@@ -334,9 +335,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
         * is closed anyway.
         */
        mutex_lock(&cifsi->lock_mutex);
-       list_for_each_entry_safe(li, tmp, &cifsi->llist, llist) {
-               if (li->netfid != cifs_file->netfid)
-                       continue;
+       list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) {
                list_del(&li->llist);
                cifs_del_lock_waiters(li);
                kfree(li);
@@ -645,7 +644,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
 }
 
 static struct cifsLockInfo *
-cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 netfid)
+cifs_lock_init(__u64 offset, __u64 length, __u8 type)
 {
        struct cifsLockInfo *lock =
                kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
@@ -654,7 +653,6 @@ cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 netfid)
        lock->offset = offset;
        lock->length = length;
        lock->type = type;
-       lock->netfid = netfid;
        lock->pid = current->tgid;
        INIT_LIST_HEAD(&lock->blist);
        init_waitqueue_head(&lock->block_q);
@@ -672,19 +670,20 @@ cifs_del_lock_waiters(struct cifsLockInfo *lock)
 }
 
 static bool
-__cifs_find_lock_conflict(struct cifsInodeInfo *cinode, __u64 offset,
-                       __u64 length, __u8 type, __u16 netfid,
-                       struct cifsLockInfo **conf_lock)
+cifs_find_fid_lock_conflict(struct cifsFileInfo *cfile, __u64 offset,
+                           __u64 length, __u8 type, struct cifsFileInfo *cur,
+                           struct cifsLockInfo **conf_lock)
 {
-       struct cifsLockInfo *li, *tmp;
+       struct cifsLockInfo *li;
+       struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
 
-       list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
+       list_for_each_entry(li, &cfile->llist, llist) {
                if (offset + length <= li->offset ||
                    offset >= li->offset + li->length)
                        continue;
-               else if ((type & LOCKING_ANDX_SHARED_LOCK) &&
-                        ((netfid == li->netfid && current->tgid == li->pid) ||
-                         type == li->type))
+               else if ((type & server->vals->shared_lock_type) &&
+                        ((server->ops->compare_fids(cur, cfile) &&
+                          current->tgid == li->pid) || type == li->type))
                        continue;
                else {
                        *conf_lock = li;
@@ -695,11 +694,23 @@ __cifs_find_lock_conflict(struct cifsInodeInfo *cinode, __u64 offset,
 }
 
 static bool
-cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
-                       struct cifsLockInfo **conf_lock)
+cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
+                       __u8 type, struct cifsLockInfo **conf_lock)
 {
-       return __cifs_find_lock_conflict(cinode, lock->offset, lock->length,
-                                        lock->type, lock->netfid, conf_lock);
+       bool rc = false;
+       struct cifsFileInfo *fid, *tmp;
+       struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
+
+       spin_lock(&cifs_file_list_lock);
+       list_for_each_entry_safe(fid, tmp, &cinode->openFileList, flist) {
+               rc = cifs_find_fid_lock_conflict(fid, offset, length, type,
+                                                cfile, conf_lock);
+               if (rc)
+                       break;
+       }
+       spin_unlock(&cifs_file_list_lock);
+
+       return rc;
 }
 
 /*
@@ -710,22 +721,24 @@ cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
  * the server or 1 otherwise.
  */
 static int
-cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
-              __u8 type, __u16 netfid, struct file_lock *flock)
+cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
+              __u8 type, struct file_lock *flock)
 {
        int rc = 0;
        struct cifsLockInfo *conf_lock;
+       struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
+       struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
        bool exist;
 
        mutex_lock(&cinode->lock_mutex);
 
-       exist = __cifs_find_lock_conflict(cinode, offset, length, type, netfid,
-                                         &conf_lock);
+       exist = cifs_find_lock_conflict(cfile, offset, length, type,
+                                       &conf_lock);
        if (exist) {
                flock->fl_start = conf_lock->offset;
                flock->fl_end = conf_lock->offset + conf_lock->length - 1;
                flock->fl_pid = conf_lock->pid;
-               if (conf_lock->type & LOCKING_ANDX_SHARED_LOCK)
+               if (conf_lock->type & server->vals->shared_lock_type)
                        flock->fl_type = F_RDLCK;
                else
                        flock->fl_type = F_WRLCK;
@@ -739,10 +752,11 @@ cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
 }
 
 static void
-cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock)
+cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
 {
+       struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
        mutex_lock(&cinode->lock_mutex);
-       list_add_tail(&lock->llist, &cinode->llist);
+       list_add_tail(&lock->llist, &cfile->llist);
        mutex_unlock(&cinode->lock_mutex);
 }
 
@@ -753,10 +767,11 @@ cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock)
  * 3) -EACCESS, if there is a lock that prevents us and wait is false.
  */
 static int
-cifs_lock_add_if(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
+cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
                 bool wait)
 {
        struct cifsLockInfo *conf_lock;
+       struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
        bool exist;
        int rc = 0;
 
@@ -764,9 +779,10 @@ try_again:
        exist = false;
        mutex_lock(&cinode->lock_mutex);
 
-       exist = cifs_find_lock_conflict(cinode, lock, &conf_lock);
+       exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
+                                       lock->type, &conf_lock);
        if (!exist && cinode->can_cache_brlcks) {
-               list_add_tail(&lock->llist, &cinode->llist);
+               list_add_tail(&lock->llist, &cfile->llist);
                mutex_unlock(&cinode->lock_mutex);
                return rc;
        }
@@ -888,7 +904,7 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
        for (i = 0; i < 2; i++) {
                cur = buf;
                num = 0;
-               list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
+               list_for_each_entry_safe(li, tmp, &cfile->llist, llist) {
                        if (li->type != types[i])
                                continue;
                        cur->Pid = cpu_to_le16(li->pid);
@@ -898,7 +914,8 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
                        cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
                        if (++num == max_num) {
                                stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
-                                                      li->type, 0, num, buf);
+                                                      (__u8)li->type, 0, num,
+                                                      buf);
                                if (stored_rc)
                                        rc = stored_rc;
                                cur = buf;
@@ -909,7 +926,7 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
 
                if (num) {
                        stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
-                                              types[i], 0, num, buf);
+                                              (__u8)types[i], 0, num, buf);
                        if (stored_rc)
                                rc = stored_rc;
                }
@@ -1053,8 +1070,8 @@ cifs_push_locks(struct cifsFileInfo *cfile)
 }
 
 static void
-cifs_read_flock(struct file_lock *flock, __u8 *type, int *lock, int *unlock,
-               bool *wait_flag)
+cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
+               bool *wait_flag, struct TCP_Server_Info *server)
 {
        if (flock->fl_flags & FL_POSIX)
                cFYI(1, "Posix");
@@ -1073,38 +1090,50 @@ cifs_read_flock(struct file_lock *flock, __u8 *type, int *lock, int *unlock,
            (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
                cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
 
-       *type = LOCKING_ANDX_LARGE_FILES;
+       *type = server->vals->large_lock_type;
        if (flock->fl_type == F_WRLCK) {
                cFYI(1, "F_WRLCK ");
+               *type |= server->vals->exclusive_lock_type;
                *lock = 1;
        } else if (flock->fl_type == F_UNLCK) {
                cFYI(1, "F_UNLCK");
+               *type |= server->vals->unlock_lock_type;
                *unlock = 1;
                /* Check if unlock includes more than one lock range */
        } else if (flock->fl_type == F_RDLCK) {
                cFYI(1, "F_RDLCK");
-               *type |= LOCKING_ANDX_SHARED_LOCK;
+               *type |= server->vals->shared_lock_type;
                *lock = 1;
        } else if (flock->fl_type == F_EXLCK) {
                cFYI(1, "F_EXLCK");
+               *type |= server->vals->exclusive_lock_type;
                *lock = 1;
        } else if (flock->fl_type == F_SHLCK) {
                cFYI(1, "F_SHLCK");
-               *type |= LOCKING_ANDX_SHARED_LOCK;
+               *type |= server->vals->shared_lock_type;
                *lock = 1;
        } else
                cFYI(1, "Unknown type of lock");
 }
 
 static int
-cifs_getlk(struct file *file, struct file_lock *flock, __u8 type,
+cifs_mandatory_lock(int xid, struct cifsFileInfo *cfile, __u64 offset,
+                   __u64 length, __u32 type, int lock, int unlock, bool wait)
+{
+       return CIFSSMBLock(xid, tlink_tcon(cfile->tlink), cfile->netfid,
+                          current->tgid, length, offset, unlock, lock,
+                          (__u8)type, wait, 0);
+}
+
+static int
+cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
           bool wait_flag, bool posix_lck, int xid)
 {
        int rc = 0;
        __u64 length = 1 + flock->fl_end - flock->fl_start;
        struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
        struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
-       struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
+       struct TCP_Server_Info *server = tcon->ses->server;
        __u16 netfid = cfile->netfid;
 
        if (posix_lck) {
@@ -1114,7 +1143,7 @@ cifs_getlk(struct file *file, struct file_lock *flock, __u8 type,
                if (!rc)
                        return rc;
 
-               if (type & LOCKING_ANDX_SHARED_LOCK)
+               if (type & server->vals->shared_lock_type)
                        posix_lock_type = CIFS_RDLCK;
                else
                        posix_lock_type = CIFS_WRLCK;
@@ -1124,38 +1153,35 @@ cifs_getlk(struct file *file, struct file_lock *flock, __u8 type,
                return rc;
        }
 
-       rc = cifs_lock_test(cinode, flock->fl_start, length, type, netfid,
-                           flock);
+       rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
        if (!rc)
                return rc;
 
        /* BB we could chain these into one lock request BB */
-       rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
-                        flock->fl_start, 0, 1, type, 0, 0);
+       rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length, type,
+                                1, 0, false);
        if (rc == 0) {
-               rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
-                                length, flock->fl_start, 1, 0,
-                                type, 0, 0);
+               rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
+                                        type, 0, 1, false);
                flock->fl_type = F_UNLCK;
                if (rc != 0)
                        cERROR(1, "Error unlocking previously locked "
-                                  "range %d during test of lock", rc);
+                                 "range %d during test of lock", rc);
                return 0;
        }
 
-       if (type & LOCKING_ANDX_SHARED_LOCK) {
+       if (type & server->vals->shared_lock_type) {
                flock->fl_type = F_WRLCK;
                return 0;
        }
 
-       rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
-                        flock->fl_start, 0, 1,
-                        type | LOCKING_ANDX_SHARED_LOCK, 0, 0);
+       rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
+                                type | server->vals->shared_lock_type, 1, 0,
+                                false);
        if (rc == 0) {
-               rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
-                                length, flock->fl_start, 1, 0,
-                                type | LOCKING_ANDX_SHARED_LOCK,
-                                0, 0);
+               rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
+                                        type | server->vals->shared_lock_type,
+                                        0, 1, false);
                flock->fl_type = F_RDLCK;
                if (rc != 0)
                        cERROR(1, "Error unlocking previously locked "
@@ -1212,15 +1238,13 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
        for (i = 0; i < 2; i++) {
                cur = buf;
                num = 0;
-               list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
+               list_for_each_entry_safe(li, tmp, &cfile->llist, llist) {
                        if (flock->fl_start > li->offset ||
                            (flock->fl_start + length) <
                            (li->offset + li->length))
                                continue;
                        if (current->tgid != li->pid)
                                continue;
-                       if (cfile->netfid != li->netfid)
-                               continue;
                        if (types[i] != li->type)
                                continue;
                        if (!cinode->can_cache_brlcks) {
@@ -1233,7 +1257,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
                                        cpu_to_le32((u32)(li->offset>>32));
                                /*
                                 * We need to save a lock here to let us add
-                                * it again to the inode list if the unlock
+                                * it again to the file's list if the unlock
                                 * range request fails on the server.
                                 */
                                list_move(&li->llist, &tmp_llist);
@@ -1247,10 +1271,10 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
                                                 * We failed on the unlock range
                                                 * request - add all locks from
                                                 * the tmp list to the head of
-                                                * the inode list.
+                                                * the file's list.
                                                 */
                                                cifs_move_llist(&tmp_llist,
-                                                               &cinode->llist);
+                                                               &cfile->llist);
                                                rc = stored_rc;
                                        } else
                                                /*
@@ -1265,7 +1289,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
                        } else {
                                /*
                                 * We can cache brlock requests - simply remove
-                                * a lock from the inode list.
+                                * a lock from the file's list.
                                 */
                                list_del(&li->llist);
                                cifs_del_lock_waiters(li);
@@ -1276,7 +1300,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
                        stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
                                               types[i], num, 0, buf);
                        if (stored_rc) {
-                               cifs_move_llist(&tmp_llist, &cinode->llist);
+                               cifs_move_llist(&tmp_llist, &cfile->llist);
                                rc = stored_rc;
                        } else
                                cifs_free_llist(&tmp_llist);
@@ -1289,14 +1313,14 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
 }
 
 static int
-cifs_setlk(struct file *file,  struct file_lock *flock, __u8 type,
+cifs_setlk(struct file *file,  struct file_lock *flock, __u32 type,
           bool wait_flag, bool posix_lck, int lock, int unlock, int xid)
 {
        int rc = 0;
        __u64 length = 1 + flock->fl_end - flock->fl_start;
        struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
        struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
-       struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
+       struct TCP_Server_Info *server = tcon->ses->server;
        __u16 netfid = cfile->netfid;
 
        if (posix_lck) {
@@ -1306,7 +1330,7 @@ cifs_setlk(struct file *file,  struct file_lock *flock, __u8 type,
                if (!rc || rc < 0)
                        return rc;
 
-               if (type & LOCKING_ANDX_SHARED_LOCK)
+               if (type & server->vals->shared_lock_type)
                        posix_lock_type = CIFS_RDLCK;
                else
                        posix_lock_type = CIFS_WRLCK;
@@ -1323,24 +1347,24 @@ cifs_setlk(struct file *file,  struct file_lock *flock, __u8 type,
        if (lock) {
                struct cifsLockInfo *lock;
 
-               lock = cifs_lock_init(flock->fl_start, length, type, netfid);
+               lock = cifs_lock_init(flock->fl_start, length, type);
                if (!lock)
                        return -ENOMEM;
 
-               rc = cifs_lock_add_if(cinode, lock, wait_flag);
+               rc = cifs_lock_add_if(cfile, lock, wait_flag);
                if (rc < 0)
                        kfree(lock);
                if (rc <= 0)
                        goto out;
 
-               rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
-                                flock->fl_start, 0, 1, type, wait_flag, 0);
+               rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
+                                        type, 1, 0, wait_flag);
                if (rc) {
                        kfree(lock);
                        goto out;
                }
 
-               cifs_lock_add(cinode, lock);
+               cifs_lock_add(cfile, lock);
        } else if (unlock)
                rc = cifs_unlock_range(cfile, flock, xid);
 
@@ -1361,7 +1385,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
        struct cifsInodeInfo *cinode;
        struct cifsFileInfo *cfile;
        __u16 netfid;
-       __u8 type;
+       __u32 type;
 
        rc = -EACCES;
        xid = GetXid();
@@ -1370,11 +1394,13 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
                "end: %lld", cmd, flock->fl_flags, flock->fl_type,
                flock->fl_start, flock->fl_end);
 
-       cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag);
-
-       cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
        cfile = (struct cifsFileInfo *)file->private_data;
        tcon = tlink_tcon(cfile->tlink);
+
+       cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
+                       tcon->ses->server);
+
+       cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
        netfid = cfile->netfid;
        cinode = CIFS_I(file->f_path.dentry->d_inode);
 
@@ -1539,10 +1565,11 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
                                        bool fsuid_only)
 {
-       struct cifsFileInfo *open_file;
+       struct cifsFileInfo *open_file, *inv_file = NULL;
        struct cifs_sb_info *cifs_sb;
        bool any_available = false;
        int rc;
+       unsigned int refind = 0;
 
        /* Having a null inode here (because mapping->host was set to zero by
        the VFS or MM) should not happen but we had reports of on oops (due to
@@ -1562,40 +1589,25 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
 
        spin_lock(&cifs_file_list_lock);
 refind_writable:
+       if (refind > MAX_REOPEN_ATT) {
+               spin_unlock(&cifs_file_list_lock);
+               return NULL;
+       }
        list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
                if (!any_available && open_file->pid != current->tgid)
                        continue;
                if (fsuid_only && open_file->uid != current_fsuid())
                        continue;
                if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
-                       cifsFileInfo_get(open_file);
-
                        if (!open_file->invalidHandle) {
                                /* found a good writable file */
+                               cifsFileInfo_get(open_file);
                                spin_unlock(&cifs_file_list_lock);
                                return open_file;
+                       } else {
+                               if (!inv_file)
+                                       inv_file = open_file;
                        }
-
-                       spin_unlock(&cifs_file_list_lock);
-
-                       /* Had to unlock since following call can block */
-                       rc = cifs_reopen_file(open_file, false);
-                       if (!rc)
-                               return open_file;
-
-                       /* if it fails, try another handle if possible */
-                       cFYI(1, "wp failed on reopen file");
-                       cifsFileInfo_put(open_file);
-
-                       spin_lock(&cifs_file_list_lock);
-
-                       /* else we simply continue to the next entry. Thus
-                          we do not loop on reopen errors.  If we
-                          can not reopen the file, for example if we
-                          reconnected to a server with another client
-                          racing to delete or lock the file we would not
-                          make progress if we restarted before the beginning
-                          of the loop here. */
                }
        }
        /* couldn't find useable FH with same pid, try any available */
@@ -1603,7 +1615,30 @@ refind_writable:
                any_available = true;
                goto refind_writable;
        }
+
+       if (inv_file) {
+               any_available = false;
+               cifsFileInfo_get(inv_file);
+       }
+
        spin_unlock(&cifs_file_list_lock);
+
+       if (inv_file) {
+               rc = cifs_reopen_file(inv_file, false);
+               if (!rc)
+                       return inv_file;
+               else {
+                       spin_lock(&cifs_file_list_lock);
+                       list_move_tail(&inv_file->flist,
+                                       &cifs_inode->openFileList);
+                       spin_unlock(&cifs_file_list_lock);
+                       cifsFileInfo_put(inv_file);
+                       spin_lock(&cifs_file_list_lock);
+                       ++refind;
+                       goto refind_writable;
+               }
+       }
+
        return NULL;
 }
 
@@ -2339,24 +2374,224 @@ ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
        return cifs_user_writev(iocb, iov, nr_segs, pos);
 }
 
+static struct cifs_readdata *
+cifs_readdata_alloc(unsigned int nr_vecs, work_func_t complete)
+{
+       struct cifs_readdata *rdata;
+
+       rdata = kzalloc(sizeof(*rdata) +
+                       sizeof(struct kvec) * nr_vecs, GFP_KERNEL);
+       if (rdata != NULL) {
+               kref_init(&rdata->refcount);
+               INIT_LIST_HEAD(&rdata->list);
+               init_completion(&rdata->done);
+               INIT_WORK(&rdata->work, complete);
+               INIT_LIST_HEAD(&rdata->pages);
+       }
+       return rdata;
+}
+
+void
+cifs_readdata_release(struct kref *refcount)
+{
+       struct cifs_readdata *rdata = container_of(refcount,
+                                       struct cifs_readdata, refcount);
+
+       if (rdata->cfile)
+               cifsFileInfo_put(rdata->cfile);
+
+       kfree(rdata);
+}
+
+static int
+cifs_read_allocate_pages(struct list_head *list, unsigned int npages)
+{
+       int rc = 0;
+       struct page *page, *tpage;
+       unsigned int i;
+
+       for (i = 0; i < npages; i++) {
+               page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
+               if (!page) {
+                       rc = -ENOMEM;
+                       break;
+               }
+               list_add(&page->lru, list);
+       }
+
+       if (rc) {
+               list_for_each_entry_safe(page, tpage, list, lru) {
+                       list_del(&page->lru);
+                       put_page(page);
+               }
+       }
+       return rc;
+}
+
+static void
+cifs_uncached_readdata_release(struct kref *refcount)
+{
+       struct page *page, *tpage;
+       struct cifs_readdata *rdata = container_of(refcount,
+                                       struct cifs_readdata, refcount);
+
+       list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
+               list_del(&page->lru);
+               put_page(page);
+       }
+       cifs_readdata_release(refcount);
+}
+
+static int
+cifs_retry_async_readv(struct cifs_readdata *rdata)
+{
+       int rc;
+
+       do {
+               if (rdata->cfile->invalidHandle) {
+                       rc = cifs_reopen_file(rdata->cfile, true);
+                       if (rc != 0)
+                               continue;
+               }
+               rc = cifs_async_readv(rdata);
+       } while (rc == -EAGAIN);
+
+       return rc;
+}
+
+/**
+ * cifs_readdata_to_iov - copy data from pages in response to an iovec
+ * @rdata:     the readdata response with list of pages holding data
+ * @iov:       vector in which we should copy the data
+ * @nr_segs:   number of segments in vector
+ * @offset:    offset into file of the first iovec
+ * @copied:    used to return the amount of data copied to the iov
+ *
+ * This function copies data from a list of pages in a readdata response into
+ * an array of iovecs. It will first calculate where the data should go
+ * based on the info in the readdata and then copy the data into that spot.
+ */
+static ssize_t
+cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
+                       unsigned long nr_segs, loff_t offset, ssize_t *copied)
+{
+       int rc = 0;
+       struct iov_iter ii;
+       size_t pos = rdata->offset - offset;
+       struct page *page, *tpage;
+       ssize_t remaining = rdata->bytes;
+       unsigned char *pdata;
+
+       /* set up iov_iter and advance to the correct offset */
+       iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
+       iov_iter_advance(&ii, pos);
+
+       *copied = 0;
+       list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
+               ssize_t copy;
+
+               /* copy a whole page or whatever's left */
+               copy = min_t(ssize_t, remaining, PAGE_SIZE);
+
+               /* ...but limit it to whatever space is left in the iov */
+               copy = min_t(ssize_t, copy, iov_iter_count(&ii));
+
+               /* go while there's data to be copied and no errors */
+               if (copy && !rc) {
+                       pdata = kmap(page);
+                       rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
+                                               (int)copy);
+                       kunmap(page);
+                       if (!rc) {
+                               *copied += copy;
+                               remaining -= copy;
+                               iov_iter_advance(&ii, copy);
+                       }
+               }
+
+               list_del(&page->lru);
+               put_page(page);
+       }
+
+       return rc;
+}
+
+static void
+cifs_uncached_readv_complete(struct work_struct *work)
+{
+       struct cifs_readdata *rdata = container_of(work,
+                                               struct cifs_readdata, work);
+
+       /* if the result is non-zero then the pages weren't kmapped */
+       if (rdata->result == 0) {
+               struct page *page;
+
+               list_for_each_entry(page, &rdata->pages, lru)
+                       kunmap(page);
+       }
+
+       complete(&rdata->done);
+       kref_put(&rdata->refcount, cifs_uncached_readdata_release);
+}
+
+static int
+cifs_uncached_read_marshal_iov(struct cifs_readdata *rdata,
+                               unsigned int remaining)
+{
+       int len = 0;
+       struct page *page, *tpage;
+
+       rdata->nr_iov = 1;
+       list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
+               if (remaining >= PAGE_SIZE) {
+                       /* enough data to fill the page */
+                       rdata->iov[rdata->nr_iov].iov_base = kmap(page);
+                       rdata->iov[rdata->nr_iov].iov_len = PAGE_SIZE;
+                       cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
+                               rdata->nr_iov, page->index,
+                               rdata->iov[rdata->nr_iov].iov_base,
+                               rdata->iov[rdata->nr_iov].iov_len);
+                       ++rdata->nr_iov;
+                       len += PAGE_SIZE;
+                       remaining -= PAGE_SIZE;
+               } else if (remaining > 0) {
+                       /* enough for partial page, fill and zero the rest */
+                       rdata->iov[rdata->nr_iov].iov_base = kmap(page);
+                       rdata->iov[rdata->nr_iov].iov_len = remaining;
+                       cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
+                               rdata->nr_iov, page->index,
+                               rdata->iov[rdata->nr_iov].iov_base,
+                               rdata->iov[rdata->nr_iov].iov_len);
+                       memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
+                               '\0', PAGE_SIZE - remaining);
+                       ++rdata->nr_iov;
+                       len += remaining;
+                       remaining = 0;
+               } else {
+                       /* no need to hold page hostage */
+                       list_del(&page->lru);
+                       put_page(page);
+               }
+       }
+
+       return len;
+}
+
 static ssize_t
 cifs_iovec_read(struct file *file, const struct iovec *iov,
                 unsigned long nr_segs, loff_t *poffset)
 {
-       int rc;
-       int xid;
-       ssize_t total_read;
-       unsigned int bytes_read = 0;
+       ssize_t rc;
        size_t len, cur_len;
-       int iov_offset = 0;
+       ssize_t total_read = 0;
+       loff_t offset = *poffset;
+       unsigned int npages;
        struct cifs_sb_info *cifs_sb;
-       struct cifs_tcon *pTcon;
+       struct cifs_tcon *tcon;
        struct cifsFileInfo *open_file;
-       struct smb_com_read_rsp *pSMBr;
-       struct cifs_io_parms io_parms;
-       char *read_data;
-       unsigned int rsize;
-       __u32 pid;
+       struct cifs_readdata *rdata, *tmp;
+       struct list_head rdata_list;
+       pid_t pid;
 
        if (!nr_segs)
                return 0;
@@ -2365,14 +2600,10 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
        if (!len)
                return 0;
 
-       xid = GetXid();
+       INIT_LIST_HEAD(&rdata_list);
        cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
-
-       /* FIXME: set up handlers for larger reads and/or convert to async */
-       rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
-
        open_file = file->private_data;
-       pTcon = tlink_tcon(open_file->tlink);
+       tcon = tlink_tcon(open_file->tlink);
 
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
                pid = open_file->pid;
@@ -2382,56 +2613,78 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
        if ((file->f_flags & O_ACCMODE) == O_WRONLY)
                cFYI(1, "attempting read on write only file instance");
 
-       for (total_read = 0; total_read < len; total_read += bytes_read) {
-               cur_len = min_t(const size_t, len - total_read, rsize);
-               rc = -EAGAIN;
-               read_data = NULL;
+       do {
+               cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
+               npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
 
-               while (rc == -EAGAIN) {
-                       int buf_type = CIFS_NO_BUFFER;
-                       if (open_file->invalidHandle) {
-                               rc = cifs_reopen_file(open_file, true);
-                               if (rc != 0)
-                                       break;
-                       }
-                       io_parms.netfid = open_file->netfid;
-                       io_parms.pid = pid;
-                       io_parms.tcon = pTcon;
-                       io_parms.offset = *poffset;
-                       io_parms.length = cur_len;
-                       rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
-                                        &read_data, &buf_type);
-                       pSMBr = (struct smb_com_read_rsp *)read_data;
-                       if (read_data) {
-                               char *data_offset = read_data + 4 +
-                                               le16_to_cpu(pSMBr->DataOffset);
-                               if (memcpy_toiovecend(iov, data_offset,
-                                                     iov_offset, bytes_read))
-                                       rc = -EFAULT;
-                               if (buf_type == CIFS_SMALL_BUFFER)
-                                       cifs_small_buf_release(read_data);
-                               else if (buf_type == CIFS_LARGE_BUFFER)
-                                       cifs_buf_release(read_data);
-                               read_data = NULL;
-                               iov_offset += bytes_read;
-                       }
+               /* allocate a readdata struct */
+               rdata = cifs_readdata_alloc(npages,
+                                           cifs_uncached_readv_complete);
+               if (!rdata) {
+                       rc = -ENOMEM;
+                       goto error;
                }
 
-               if (rc || (bytes_read == 0)) {
-                       if (total_read) {
-                               break;
-                       } else {
-                               FreeXid(xid);
-                               return rc;
+               rc = cifs_read_allocate_pages(&rdata->pages, npages);
+               if (rc)
+                       goto error;
+
+               rdata->cfile = cifsFileInfo_get(open_file);
+               rdata->offset = offset;
+               rdata->bytes = cur_len;
+               rdata->pid = pid;
+               rdata->marshal_iov = cifs_uncached_read_marshal_iov;
+
+               rc = cifs_retry_async_readv(rdata);
+error:
+               if (rc) {
+                       kref_put(&rdata->refcount,
+                                cifs_uncached_readdata_release);
+                       break;
+               }
+
+               list_add_tail(&rdata->list, &rdata_list);
+               offset += cur_len;
+               len -= cur_len;
+       } while (len > 0);
+
+       /* if at least one read request send succeeded, then reset rc */
+       if (!list_empty(&rdata_list))
+               rc = 0;
+
+       /* the loop below should proceed in the order of increasing offsets */
+restart_loop:
+       list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
+               if (!rc) {
+                       ssize_t copied;
+
+                       /* FIXME: freezable sleep too? */
+                       rc = wait_for_completion_killable(&rdata->done);
+                       if (rc)
+                               rc = -EINTR;
+                       else if (rdata->result)
+                               rc = rdata->result;
+                       else {
+                               rc = cifs_readdata_to_iov(rdata, iov,
+                                                       nr_segs, *poffset,
+                                                       &copied);
+                               total_read += copied;
+                       }
+
+                       /* resend call if it's a retryable error */
+                       if (rc == -EAGAIN) {
+                               rc = cifs_retry_async_readv(rdata);
+                               goto restart_loop;
                        }
-               } else {
-                       cifs_stats_bytes_read(pTcon, bytes_read);
-                       *poffset += bytes_read;
                }
+               list_del_init(&rdata->list);
+               kref_put(&rdata->refcount, cifs_uncached_readdata_release);
        }
 
-       FreeXid(xid);
-       return total_read;
+       cifs_stats_bytes_read(tcon, total_read);
+       *poffset += total_read;
+
+       return total_read ? total_read : rc;
 }
 
 ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
@@ -2606,6 +2859,100 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
        return rc;
 }
 
+static void
+cifs_readv_complete(struct work_struct *work)
+{
+       struct cifs_readdata *rdata = container_of(work,
+                                               struct cifs_readdata, work);
+       struct page *page, *tpage;
+
+       list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
+               list_del(&page->lru);
+               lru_cache_add_file(page);
+
+               if (rdata->result == 0) {
+                       kunmap(page);
+                       flush_dcache_page(page);
+                       SetPageUptodate(page);
+               }
+
+               unlock_page(page);
+
+               if (rdata->result == 0)
+                       cifs_readpage_to_fscache(rdata->mapping->host, page);
+
+               page_cache_release(page);
+       }
+       kref_put(&rdata->refcount, cifs_readdata_release);
+}
+
+static int
+cifs_readpages_marshal_iov(struct cifs_readdata *rdata, unsigned int remaining)
+{
+       int len = 0;
+       struct page *page, *tpage;
+       u64 eof;
+       pgoff_t eof_index;
+
+       /* determine the eof that the server (probably) has */
+       eof = CIFS_I(rdata->mapping->host)->server_eof;
+       eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
+       cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
+
+       rdata->nr_iov = 1;
+       list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
+               if (remaining >= PAGE_CACHE_SIZE) {
+                       /* enough data to fill the page */
+                       rdata->iov[rdata->nr_iov].iov_base = kmap(page);
+                       rdata->iov[rdata->nr_iov].iov_len = PAGE_CACHE_SIZE;
+                       cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
+                               rdata->nr_iov, page->index,
+                               rdata->iov[rdata->nr_iov].iov_base,
+                               rdata->iov[rdata->nr_iov].iov_len);
+                       ++rdata->nr_iov;
+                       len += PAGE_CACHE_SIZE;
+                       remaining -= PAGE_CACHE_SIZE;
+               } else if (remaining > 0) {
+                       /* enough for partial page, fill and zero the rest */
+                       rdata->iov[rdata->nr_iov].iov_base = kmap(page);
+                       rdata->iov[rdata->nr_iov].iov_len = remaining;
+                       cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
+                               rdata->nr_iov, page->index,
+                               rdata->iov[rdata->nr_iov].iov_base,
+                               rdata->iov[rdata->nr_iov].iov_len);
+                       memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
+                               '\0', PAGE_CACHE_SIZE - remaining);
+                       ++rdata->nr_iov;
+                       len += remaining;
+                       remaining = 0;
+               } else if (page->index > eof_index) {
+                       /*
+                        * The VFS will not try to do readahead past the
+                        * i_size, but it's possible that we have outstanding
+                        * writes with gaps in the middle and the i_size hasn't
+                        * caught up yet. Populate those with zeroed out pages
+                        * to prevent the VFS from repeatedly attempting to
+                        * fill them until the writes are flushed.
+                        */
+                       zero_user(page, 0, PAGE_CACHE_SIZE);
+                       list_del(&page->lru);
+                       lru_cache_add_file(page);
+                       flush_dcache_page(page);
+                       SetPageUptodate(page);
+                       unlock_page(page);
+                       page_cache_release(page);
+               } else {
+                       /* no need to hold page hostage */
+                       list_del(&page->lru);
+                       lru_cache_add_file(page);
+                       unlock_page(page);
+                       page_cache_release(page);
+               }
+       }
+
+       return len;
+}
+
 static int cifs_readpages(struct file *file, struct address_space *mapping,
        struct list_head *page_list, unsigned num_pages)
 {
@@ -2708,7 +3055,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
                        nr_pages++;
                }
 
-               rdata = cifs_readdata_alloc(nr_pages);
+               rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
                if (!rdata) {
                        /* best to give up if we're out of mem */
                        list_for_each_entry_safe(page, tpage, &tmplist, lru) {
@@ -2722,24 +3069,16 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
                }
 
                spin_lock(&cifs_file_list_lock);
-               cifsFileInfo_get(open_file);
                spin_unlock(&cifs_file_list_lock);
-               rdata->cfile = open_file;
+               rdata->cfile = cifsFileInfo_get(open_file);
                rdata->mapping = mapping;
                rdata->offset = offset;
                rdata->bytes = bytes;
                rdata->pid = pid;
+               rdata->marshal_iov = cifs_readpages_marshal_iov;
                list_splice_init(&tmplist, &rdata->pages);
 
-               do {
-                       if (open_file->invalidHandle) {
-                               rc = cifs_reopen_file(open_file, true);
-                               if (rc != 0)
-                                       continue;
-                       }
-                       rc = cifs_async_readv(rdata);
-               } while (rc == -EAGAIN);
-
+               rc = cifs_retry_async_readv(rdata);
                if (rc != 0) {
                        list_for_each_entry_safe(page, tpage, &rdata->pages,
                                                 lru) {
@@ -2748,9 +3087,11 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
                                unlock_page(page);
                                page_cache_release(page);
                        }
-                       cifs_readdata_free(rdata);
+                       kref_put(&rdata->refcount, cifs_readdata_release);
                        break;
                }
+
+               kref_put(&rdata->refcount, cifs_readdata_release);
        }
 
        return rc;
index 4221b5e48a426af74b540105ec255026291e8c05..6d2667f0c98c3801d41960b784ff64f2899f2e33 100644 (file)
@@ -51,7 +51,15 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
        cifs_sb = CIFS_SB(inode->i_sb);
 
        switch (command) {
+               static bool warned = false;
                case CIFS_IOC_CHECKUMOUNT:
+                       if (!warned) {
+                               warned = true;
+                               cERROR(1, "the CIFS_IOC_CHECKMOUNT ioctl will "
+                                         "be deprecated in 3.7. Please "
+                                         "migrate away from the use of "
+                                         "umount.cifs");
+                       }
                        cFYI(1, "User unmount attempted");
                        if (cifs_sb->mnt_uid == current_uid())
                                rc = 0;
index c29d1aa2c54f30a76c25aa6ba843008b567ca96f..e2552d2b2e42c551fde38d91e3ead83c6cfccd47 100644 (file)
@@ -306,8 +306,6 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
                const struct cifs_tcon *treeCon, int word_count
                /* length of fixed section (word count) in two byte units  */)
 {
-       struct list_head *temp_item;
-       struct cifs_ses *ses;
        char *temp = (char *) buffer;
 
        memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
@@ -337,51 +335,6 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
                        /* Uid is not converted */
                        buffer->Uid = treeCon->ses->Suid;
                        buffer->Mid = GetNextMid(treeCon->ses->server);
-                       if (multiuser_mount != 0) {
-               /* For the multiuser case, there are few obvious technically  */
-               /* possible mechanisms to match the local linux user (uid)    */
-               /* to a valid remote smb user (smb_uid):                      */
-               /*      1) Query Winbind (or other local pam/nss daemon       */
-               /*        for userid/password/logon_domain or credential      */
-               /*      2) Query Winbind for uid to sid to username mapping   */
-               /*         and see if we have a matching password for existing*/
-               /*         session for that user perhas getting password by   */
-               /*         adding a new pam_cifs module that stores passwords */
-               /*         so that the cifs vfs can get at that for all logged*/
-               /*         on users                                           */
-               /*      3) (Which is the mechanism we have chosen)            */
-               /*         Search through sessions to the same server for a   */
-               /*         a match on the uid that was passed in on mount     */
-               /*         with the current processes uid (or euid?) and use  */
-               /*         that smb uid.   If no existing smb session for     */
-               /*         that uid found, use the default smb session ie     */
-               /*         the smb session for the volume mounted which is    */
-               /*         the same as would be used if the multiuser mount   */
-               /*         flag were disabled.  */
-
-               /*  BB Add support for establishing new tCon and SMB Session  */
-               /*      with userid/password pairs found on the smb session   */
-               /*      for other target tcp/ip addresses               BB    */
-                               if (current_fsuid() != treeCon->ses->linux_uid) {
-                                       cFYI(1, "Multiuser mode and UID "
-                                                "did not match tcon uid");
-                                       spin_lock(&cifs_tcp_ses_lock);
-                                       list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) {
-                                               ses = list_entry(temp_item, struct cifs_ses, smb_ses_list);
-                                               if (ses->linux_uid == current_fsuid()) {
-                                                       if (ses->server == treeCon->ses->server) {
-                                                               cFYI(1, "found matching uid substitute right smb_uid");
-                                                               buffer->Uid = ses->Suid;
-                                                               break;
-                                                       } else {
-                               /* BB eventually call cifs_setup_session here */
-                                                               cFYI(1, "local UID found but no smb sess with this server exists");
-                                                       }
-                                               }
-                                       }
-                                       spin_unlock(&cifs_tcp_ses_lock);
-                               }
-                       }
                }
                if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
                        buffer->Flags2 |= SMBFLG2_DFS;
@@ -700,22 +653,3 @@ backup_cred(struct cifs_sb_info *cifs_sb)
 
        return false;
 }
-
-void
-cifs_add_credits(struct TCP_Server_Info *server, const unsigned int add)
-{
-       spin_lock(&server->req_lock);
-       server->credits += add;
-       server->in_flight--;
-       spin_unlock(&server->req_lock);
-       wake_up(&server->request_q);
-}
-
-void
-cifs_set_credits(struct TCP_Server_Info *server, const int val)
-{
-       spin_lock(&server->req_lock);
-       server->credits = val;
-       server->oplocks = val > 1 ? enable_oplocks : false;
-       spin_unlock(&server->req_lock);
-}
index e2bbc683e0184a736509b7a41337b2fda401e4a3..0a8224d1c4c5f2df8545f2c84f9e668feba2e0e9 100644 (file)
@@ -219,6 +219,7 @@ int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb,
 
 static int initiate_cifs_search(const int xid, struct file *file)
 {
+       __u16 search_flags;
        int rc = 0;
        char *full_path = NULL;
        struct cifsFileInfo *cifsFile;
@@ -270,8 +271,12 @@ ffirst_retry:
                cifsFile->srch_inf.info_level = SMB_FIND_FILE_DIRECTORY_INFO;
        }
 
+       search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME;
+       if (backup_cred(cifs_sb))
+               search_flags |= CIFS_SEARCH_BACKUP_SEARCH;
+
        rc = CIFSFindFirst(xid, pTcon, full_path, cifs_sb->local_nls,
-               &cifsFile->netfid, &cifsFile->srch_inf,
+               &cifsFile->netfid, search_flags, &cifsFile->srch_inf,
                cifs_sb->mnt_cifs_flags &
                        CIFS_MOUNT_MAP_SPECIAL_CHR, CIFS_DIR_SEP(cifs_sb));
        if (rc == 0)
@@ -502,11 +507,13 @@ static int cifs_save_resume_key(const char *current_entry,
 static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
        struct file *file, char **ppCurrentEntry, int *num_to_ret)
 {
+       __u16 search_flags;
        int rc = 0;
        int pos_in_buf = 0;
        loff_t first_entry_in_buffer;
        loff_t index_to_find = file->f_pos;
        struct cifsFileInfo *cifsFile = file->private_data;
+       struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
        /* check if index in the buffer */
 
        if ((cifsFile == NULL) || (ppCurrentEntry == NULL) ||
@@ -560,10 +567,14 @@ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
                                                cifsFile);
        }
 
+       search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME;
+       if (backup_cred(cifs_sb))
+               search_flags |= CIFS_SEARCH_BACKUP_SEARCH;
+
        while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) &&
              (rc == 0) && !cifsFile->srch_inf.endOfSearch) {
                cFYI(1, "calling findnext2");
-               rc = CIFSFindNext(xid, pTcon, cifsFile->netfid,
+               rc = CIFSFindNext(xid, pTcon, cifsFile->netfid, search_flags,
                                  &cifsFile->srch_inf);
                /* FindFirst/Next set last_entry to NULL on malformed reply */
                if (cifsFile->srch_inf.last_entry)
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
new file mode 100644 (file)
index 0000000..d9d615f
--- /dev/null
@@ -0,0 +1,154 @@
+/*
+ *  SMB1 (CIFS) version specific operations
+ *
+ *  Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
+ *
+ *  This library is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License v2 as published
+ *  by the Free Software Foundation.
+ *
+ *  This library is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *  the GNU Lesser General Public License for more details.
+ *
+ *  You should have received a copy of the GNU Lesser General Public License
+ *  along with this library; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "cifsglob.h"
+#include "cifsproto.h"
+#include "cifs_debug.h"
+#include "cifspdu.h"
+
+/*
+ * An NT cancel request header looks just like the original request except:
+ *
+ * The Command is SMB_COM_NT_CANCEL
+ * The WordCount is zeroed out
+ * The ByteCount is zeroed out
+ *
+ * This function mangles an existing request buffer into a
+ * SMB_COM_NT_CANCEL request and then sends it.
+ */
+static int
+send_nt_cancel(struct TCP_Server_Info *server, void *buf,
+              struct mid_q_entry *mid)
+{
+       int rc = 0;
+       struct smb_hdr *in_buf = (struct smb_hdr *)buf;
+
+       /* -4 for RFC1001 length and +2 for BCC field */
+       in_buf->smb_buf_length = cpu_to_be32(sizeof(struct smb_hdr) - 4  + 2);
+       in_buf->Command = SMB_COM_NT_CANCEL;
+       in_buf->WordCount = 0;
+       put_bcc(0, in_buf);
+
+       mutex_lock(&server->srv_mutex);
+       rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
+       if (rc) {
+               mutex_unlock(&server->srv_mutex);
+               return rc;
+       }
+       rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
+       mutex_unlock(&server->srv_mutex);
+
+       cFYI(1, "issued NT_CANCEL for mid %u, rc = %d",
+               in_buf->Mid, rc);
+
+       return rc;
+}
+
+static bool
+cifs_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
+{
+       return ob1->netfid == ob2->netfid;
+}
+
+static unsigned int
+cifs_read_data_offset(char *buf)
+{
+       READ_RSP *rsp = (READ_RSP *)buf;
+       return le16_to_cpu(rsp->DataOffset);
+}
+
+static unsigned int
+cifs_read_data_length(char *buf)
+{
+       READ_RSP *rsp = (READ_RSP *)buf;
+       return (le16_to_cpu(rsp->DataLengthHigh) << 16) +
+              le16_to_cpu(rsp->DataLength);
+}
+
+static struct mid_q_entry *
+cifs_find_mid(struct TCP_Server_Info *server, char *buffer)
+{
+       struct smb_hdr *buf = (struct smb_hdr *)buffer;
+       struct mid_q_entry *mid;
+
+       spin_lock(&GlobalMid_Lock);
+       list_for_each_entry(mid, &server->pending_mid_q, qhead) {
+               if (mid->mid == buf->Mid &&
+                   mid->mid_state == MID_REQUEST_SUBMITTED &&
+                   le16_to_cpu(mid->command) == buf->Command) {
+                       spin_unlock(&GlobalMid_Lock);
+                       return mid;
+               }
+       }
+       spin_unlock(&GlobalMid_Lock);
+       return NULL;
+}
+
+static void
+cifs_add_credits(struct TCP_Server_Info *server, const unsigned int add)
+{
+       spin_lock(&server->req_lock);
+       server->credits += add;
+       server->in_flight--;
+       spin_unlock(&server->req_lock);
+       wake_up(&server->request_q);
+}
+
+static void
+cifs_set_credits(struct TCP_Server_Info *server, const int val)
+{
+       spin_lock(&server->req_lock);
+       server->credits = val;
+       server->oplocks = val > 1 ? enable_oplocks : false;
+       spin_unlock(&server->req_lock);
+}
+
+static int *
+cifs_get_credits_field(struct TCP_Server_Info *server)
+{
+       return &server->credits;
+}
+
+struct smb_version_operations smb1_operations = {
+       .send_cancel = send_nt_cancel,
+       .compare_fids = cifs_compare_fids,
+       .setup_request = cifs_setup_request,
+       .check_receive = cifs_check_receive,
+       .add_credits = cifs_add_credits,
+       .set_credits = cifs_set_credits,
+       .get_credits_field = cifs_get_credits_field,
+       .read_data_offset = cifs_read_data_offset,
+       .read_data_length = cifs_read_data_length,
+       .map_error = map_smb_to_linux_error,
+       .find_mid = cifs_find_mid,
+       .check_message = checkSMB,
+       .dump_detail = cifs_dump_detail,
+       .is_oplock_break = is_valid_oplock_break,
+};
+
+struct smb_version_values smb1_values = {
+       .version_string = SMB1_VERSION_STRING,
+       .large_lock_type = LOCKING_ANDX_LARGE_FILES,
+       .exclusive_lock_type = 0,
+       .shared_lock_type = LOCKING_ANDX_SHARED_LOCK,
+       .unlock_lock_type = 0,
+       .header_size = sizeof(struct smb_hdr),
+       .max_header_size = MAX_CIFS_HDR_SIZE,
+       .read_rsp_size = sizeof(READ_RSP),
+};
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
new file mode 100644 (file)
index 0000000..f065e89
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ *  SMB2 version specific operations
+ *
+ *  Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
+ *
+ *  This library is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License v2 as published
+ *  by the Free Software Foundation.
+ *
+ *  This library is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *  the GNU Lesser General Public License for more details.
+ *
+ *  You should have received a copy of the GNU Lesser General Public License
+ *  along with this library; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "cifsglob.h"
+
+struct smb_version_operations smb21_operations = {
+};
+
+struct smb_version_values smb21_values = {
+       .version_string = SMB21_VERSION_STRING,
+};
index 0961336513d5334d99084eee82addeb1b582ffbf..1b36ffe6a47b5f6888519fea797aebdbe3a9da97 100644 (file)
@@ -304,7 +304,8 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int optype,
 static int
 wait_for_free_request(struct TCP_Server_Info *server, const int optype)
 {
-       return wait_for_free_credits(server, optype, get_credits_field(server));
+       return wait_for_free_credits(server, optype,
+                                    server->ops->get_credits_field(server));
 }
 
 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
@@ -396,7 +397,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
        rc = cifs_setup_async_request(server, iov, nvec, &mid);
        if (rc) {
                mutex_unlock(&server->srv_mutex);
-               cifs_add_credits(server, 1);
+               add_credits(server, 1);
                wake_up(&server->request_q);
                return rc;
        }
@@ -418,7 +419,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
        return rc;
 out_err:
        delete_mid(mid);
-       cifs_add_credits(server, 1);
+       add_credits(server, 1);
        wake_up(&server->request_q);
        return rc;
 }
@@ -483,41 +484,11 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
        return rc;
 }
 
-/*
- * An NT cancel request header looks just like the original request except:
- *
- * The Command is SMB_COM_NT_CANCEL
- * The WordCount is zeroed out
- * The ByteCount is zeroed out
- *
- * This function mangles an existing request buffer into a
- * SMB_COM_NT_CANCEL request and then sends it.
- */
-static int
-send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
-               struct mid_q_entry *mid)
+static inline int
+send_cancel(struct TCP_Server_Info *server, void *buf, struct mid_q_entry *mid)
 {
-       int rc = 0;
-
-       /* -4 for RFC1001 length and +2 for BCC field */
-       in_buf->smb_buf_length = cpu_to_be32(sizeof(struct smb_hdr) - 4  + 2);
-       in_buf->Command = SMB_COM_NT_CANCEL;
-       in_buf->WordCount = 0;
-       put_bcc(0, in_buf);
-
-       mutex_lock(&server->srv_mutex);
-       rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
-       if (rc) {
-               mutex_unlock(&server->srv_mutex);
-               return rc;
-       }
-       rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
-       mutex_unlock(&server->srv_mutex);
-
-       cFYI(1, "issued NT_CANCEL for mid %u, rc = %d",
-               in_buf->Mid, rc);
-
-       return rc;
+       return server->ops->send_cancel ?
+                               server->ops->send_cancel(server, buf, mid) : 0;
 }
 
 int
@@ -544,7 +515,7 @@ cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
        return map_smb_to_linux_error(mid->resp_buf, log_error);
 }
 
-static int
+int
 cifs_setup_request(struct cifs_ses *ses, struct kvec *iov,
                   unsigned int nvec, struct mid_q_entry **ret_mid)
 {
@@ -607,12 +578,12 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
 
        mutex_lock(&ses->server->srv_mutex);
 
-       rc = cifs_setup_request(ses, iov, n_vec, &midQ);
+       rc = ses->server->ops->setup_request(ses, iov, n_vec, &midQ);
        if (rc) {
                mutex_unlock(&ses->server->srv_mutex);
                cifs_small_buf_release(buf);
                /* Update # of requests on wire to server */
-               cifs_add_credits(ses->server, 1);
+               add_credits(ses->server, 1);
                return rc;
        }
 
@@ -636,13 +607,13 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
 
        rc = wait_for_response(ses->server, midQ);
        if (rc != 0) {
-               send_nt_cancel(ses->server, (struct smb_hdr *)buf, midQ);
+               send_cancel(ses->server, buf, midQ);
                spin_lock(&GlobalMid_Lock);
                if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
                        midQ->callback = DeleteMidQEntry;
                        spin_unlock(&GlobalMid_Lock);
                        cifs_small_buf_release(buf);
-                       cifs_add_credits(ses->server, 1);
+                       add_credits(ses->server, 1);
                        return rc;
                }
                spin_unlock(&GlobalMid_Lock);
@@ -652,7 +623,7 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
 
        rc = cifs_sync_mid_result(midQ, ses->server);
        if (rc != 0) {
-               cifs_add_credits(ses->server, 1);
+               add_credits(ses->server, 1);
                return rc;
        }
 
@@ -670,14 +641,15 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
        else
                *pRespBufType = CIFS_SMALL_BUFFER;
 
-       rc = cifs_check_receive(midQ, ses->server, flags & CIFS_LOG_ERROR);
+       rc = ses->server->ops->check_receive(midQ, ses->server,
+                                            flags & CIFS_LOG_ERROR);
 
        /* mark it so buf will not be freed by delete_mid */
        if ((flags & CIFS_NO_RESP) == 0)
                midQ->resp_buf = NULL;
 out:
        delete_mid(midQ);
-       cifs_add_credits(ses->server, 1);
+       add_credits(ses->server, 1);
 
        return rc;
 }
@@ -727,7 +699,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
        if (rc) {
                mutex_unlock(&ses->server->srv_mutex);
                /* Update # of requests on wire to server */
-               cifs_add_credits(ses->server, 1);
+               add_credits(ses->server, 1);
                return rc;
        }
 
@@ -753,13 +725,13 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
 
        rc = wait_for_response(ses->server, midQ);
        if (rc != 0) {
-               send_nt_cancel(ses->server, in_buf, midQ);
+               send_cancel(ses->server, in_buf, midQ);
                spin_lock(&GlobalMid_Lock);
                if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
                        /* no longer considered to be "in-flight" */
                        midQ->callback = DeleteMidQEntry;
                        spin_unlock(&GlobalMid_Lock);
-                       cifs_add_credits(ses->server, 1);
+                       add_credits(ses->server, 1);
                        return rc;
                }
                spin_unlock(&GlobalMid_Lock);
@@ -767,7 +739,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
 
        rc = cifs_sync_mid_result(midQ, ses->server);
        if (rc != 0) {
-               cifs_add_credits(ses->server, 1);
+               add_credits(ses->server, 1);
                return rc;
        }
 
@@ -783,7 +755,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
        rc = cifs_check_receive(midQ, ses->server, 0);
 out:
        delete_mid(midQ);
-       cifs_add_credits(ses->server, 1);
+       add_credits(ses->server, 1);
 
        return rc;
 }
@@ -898,7 +870,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
                if (in_buf->Command == SMB_COM_TRANSACTION2) {
                        /* POSIX lock. We send a NT_CANCEL SMB to cause the
                           blocking lock to return. */
-                       rc = send_nt_cancel(ses->server, in_buf, midQ);
+                       rc = send_cancel(ses->server, in_buf, midQ);
                        if (rc) {
                                delete_mid(midQ);
                                return rc;
@@ -919,7 +891,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
 
                rc = wait_for_response(ses->server, midQ);
                if (rc) {
-                       send_nt_cancel(ses->server, in_buf, midQ);
+                       send_cancel(ses->server, in_buf, midQ);
                        spin_lock(&GlobalMid_Lock);
                        if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
                                /* no longer considered to be "in-flight" */
index 2870597b5c9d37e416c03d2e894c2556713b3339..f1813120d753e23ce153e700dd62d4d0d871d779 100644 (file)
@@ -244,7 +244,7 @@ static void coda_put_super(struct super_block *sb)
 static void coda_evict_inode(struct inode *inode)
 {
        truncate_inode_pages(&inode->i_data, 0);
-       end_writeback(inode);
+       clear_inode(inode);
        coda_cache_clear_inode(inode);
 }
 
index 0781e619a62a48babf8969fa5453994d784ba13c..6161255fac45648efdfe437d9d880d390268d14f 100644 (file)
@@ -532,7 +532,7 @@ out:
 ssize_t compat_rw_copy_check_uvector(int type,
                const struct compat_iovec __user *uvector, unsigned long nr_segs,
                unsigned long fast_segs, struct iovec *fast_pointer,
-               struct iovec **ret_pointer, int check_access)
+               struct iovec **ret_pointer)
 {
        compat_ssize_t tot_len;
        struct iovec *iov = *ret_pointer = fast_pointer;
@@ -579,7 +579,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
                }
                if (len < 0)    /* size_t not fitting in compat_ssize_t .. */
                        goto out;
-               if (check_access &&
+               if (type >= 0 &&
                    !access_ok(vrfy_dir(type), compat_ptr(buf), len)) {
                        ret = -EFAULT;
                        goto out;
@@ -871,12 +871,12 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
 {
        int error;
        struct file *file;
+       int fput_needed;
        struct compat_readdir_callback buf;
 
-       error = -EBADF;
-       file = fget(fd);
+       file = fget_light(fd, &fput_needed);
        if (!file)
-               goto out;
+               return -EBADF;
 
        buf.result = 0;
        buf.dirent = dirent;
@@ -885,8 +885,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
        if (buf.result)
                error = buf.result;
 
-       fput(file);
-out:
+       fput_light(file, fput_needed);
        return error;
 }
 
@@ -953,16 +952,15 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
        struct file * file;
        struct compat_linux_dirent __user * lastdirent;
        struct compat_getdents_callback buf;
+       int fput_needed;
        int error;
 
-       error = -EFAULT;
        if (!access_ok(VERIFY_WRITE, dirent, count))
-               goto out;
+               return -EFAULT;
 
-       error = -EBADF;
-       file = fget(fd);
+       file = fget_light(fd, &fput_needed);
        if (!file)
-               goto out;
+               return -EBADF;
 
        buf.current_dir = dirent;
        buf.previous = NULL;
@@ -979,8 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
                else
                        error = count - buf.count;
        }
-       fput(file);
-out:
+       fput_light(file, fput_needed);
        return error;
 }
 
@@ -1041,16 +1038,15 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
        struct file * file;
        struct linux_dirent64 __user * lastdirent;
        struct compat_getdents_callback64 buf;
+       int fput_needed;
        int error;
 
-       error = -EFAULT;
        if (!access_ok(VERIFY_WRITE, dirent, count))
-               goto out;
+               return -EFAULT;
 
-       error = -EBADF;
-       file = fget(fd);
+       file = fget_light(fd, &fput_needed);
        if (!file)
-               goto out;
+               return -EBADF;
 
        buf.current_dir = dirent;
        buf.previous = NULL;
@@ -1068,8 +1064,7 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
                else
                        error = count - buf.count;
        }
-       fput(file);
-out:
+       fput_light(file, fput_needed);
        return error;
 }
 #endif /* ! __ARCH_OMIT_COMPAT_SYS_GETDENTS64 */
@@ -1094,7 +1089,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
                goto out;
 
        tot_len = compat_rw_copy_check_uvector(type, uvector, nr_segs,
-                                              UIO_FASTIOV, iovstack, &iov, 1);
+                                              UIO_FASTIOV, iovstack, &iov);
        if (tot_len == 0) {
                ret = 0;
                goto out;
@@ -1547,7 +1542,6 @@ asmlinkage long compat_sys_old_select(struct compat_sel_arg_struct __user *arg)
                                 compat_ptr(a.exp), compat_ptr(a.tvp));
 }
 
-#ifdef HAVE_SET_RESTORE_SIGMASK
 static long do_compat_pselect(int n, compat_ulong_t __user *inp,
        compat_ulong_t __user *outp, compat_ulong_t __user *exp,
        struct compat_timespec __user *tsp, compat_sigset_t __user *sigmask,
@@ -1670,11 +1664,9 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
 
        return ret;
 }
-#endif /* HAVE_SET_RESTORE_SIGMASK */
 
 #ifdef CONFIG_EPOLL
 
-#ifdef HAVE_SET_RESTORE_SIGMASK
 asmlinkage long compat_sys_epoll_pwait(int epfd,
                        struct compat_epoll_event __user *events,
                        int maxevents, int timeout,
@@ -1718,7 +1710,6 @@ asmlinkage long compat_sys_epoll_pwait(int epfd,
 
        return err;
 }
-#endif /* HAVE_SET_RESTORE_SIGMASK */
 
 #endif /* CONFIG_EPOLL */
 
index 4435d8b329044da3b48c83dfe555409464797d0d..85c9e2bff8e65126eaca755e14d1ee4a15a27170 100644 (file)
@@ -683,8 +683,6 @@ EXPORT_SYMBOL(dget_parent);
 /**
  * d_find_alias - grab a hashed alias of inode
  * @inode: inode in question
- * @want_discon:  flag, used by d_splice_alias, to request
- *          that only a DISCONNECTED alias be returned.
  *
  * If inode has a hashed alias, or is a directory and has any alias,
  * acquire the reference to alias and return it. Otherwise return NULL.
@@ -693,10 +691,9 @@ EXPORT_SYMBOL(dget_parent);
  * of a filesystem.
  *
  * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
- * any other hashed alias over that one unless @want_discon is set,
- * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias.
+ * any other hashed alias over that.
  */
-static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
+static struct dentry *__d_find_alias(struct inode *inode)
 {
        struct dentry *alias, *discon_alias;
 
@@ -708,7 +705,7 @@ again:
                        if (IS_ROOT(alias) &&
                            (alias->d_flags & DCACHE_DISCONNECTED)) {
                                discon_alias = alias;
-                       } else if (!want_discon) {
+                       } else {
                                __dget_dlock(alias);
                                spin_unlock(&alias->d_lock);
                                return alias;
@@ -739,7 +736,7 @@ struct dentry *d_find_alias(struct inode *inode)
 
        if (!list_empty(&inode->i_dentry)) {
                spin_lock(&inode->i_lock);
-               de = __d_find_alias(inode, 0);
+               de = __d_find_alias(inode);
                spin_unlock(&inode->i_lock);
        }
        return de;
@@ -1650,9 +1647,8 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
 
        if (inode && S_ISDIR(inode->i_mode)) {
                spin_lock(&inode->i_lock);
-               new = __d_find_alias(inode, 1);
+               new = __d_find_any_alias(inode);
                if (new) {
-                       BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
                        spin_unlock(&inode->i_lock);
                        security_d_instantiate(new, inode);
                        d_move(new, dentry);
@@ -2482,7 +2478,7 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
                struct dentry *alias;
 
                /* Does an aliased dentry already exist? */
-               alias = __d_find_alias(inode, 0);
+               alias = __d_find_alias(inode);
                if (alias) {
                        actual = alias;
                        write_seqlock(&rename_lock);
@@ -2575,7 +2571,7 @@ static int prepend_path(const struct path *path,
        bool slash = false;
        int error = 0;
 
-       br_read_lock(vfsmount_lock);
+       br_read_lock(&vfsmount_lock);
        while (dentry != root->dentry || vfsmnt != root->mnt) {
                struct dentry * parent;
 
@@ -2606,7 +2602,7 @@ static int prepend_path(const struct path *path,
                error = prepend(buffer, buflen, "/", 1);
 
 out:
-       br_read_unlock(vfsmount_lock);
+       br_read_unlock(&vfsmount_lock);
        return error;
 
 global_root:
index 5dfafdd1dbd3cbdd553ef3f88fce4d0ff2fe54a5..2340f6978d6e29e01c9b90ddaa9abd33beb0ee11 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/namei.h>
 #include <linux/debugfs.h>
 #include <linux/io.h>
+#include <linux/slab.h>
 
 static ssize_t default_read_file(struct file *file, char __user *buf,
                                 size_t count, loff_t *ppos)
@@ -520,6 +521,133 @@ struct dentry *debugfs_create_blob(const char *name, umode_t mode,
 }
 EXPORT_SYMBOL_GPL(debugfs_create_blob);
 
+struct array_data {
+       void *array;
+       u32 elements;
+};
+
+static int u32_array_open(struct inode *inode, struct file *file)
+{
+       file->private_data = NULL;
+       return nonseekable_open(inode, file);
+}
+
+static size_t format_array(char *buf, size_t bufsize, const char *fmt,
+                          u32 *array, u32 array_size)
+{
+       size_t ret = 0;
+       u32 i;
+
+       for (i = 0; i < array_size; i++) {
+               size_t len;
+
+               len = snprintf(buf, bufsize, fmt, array[i]);
+               len++;  /* ' ' or '\n' */
+               ret += len;
+
+               if (buf) {
+                       buf += len;
+                       bufsize -= len;
+                       buf[-1] = (i == array_size-1) ? '\n' : ' ';
+               }
+       }
+
+       ret++;          /* \0 */
+       if (buf)
+               *buf = '\0';
+
+       return ret;
+}
+
+static char *format_array_alloc(const char *fmt, u32 *array,
+                                               u32 array_size)
+{
+       size_t len = format_array(NULL, 0, fmt, array, array_size);
+       char *ret;
+
+       ret = kmalloc(len, GFP_KERNEL);
+       if (ret == NULL)
+               return NULL;
+
+       format_array(ret, len, fmt, array, array_size);
+       return ret;
+}
+
+static ssize_t u32_array_read(struct file *file, char __user *buf, size_t len,
+                             loff_t *ppos)
+{
+       struct inode *inode = file->f_path.dentry->d_inode;
+       struct array_data *data = inode->i_private;
+       size_t size;
+
+       if (*ppos == 0) {
+               if (file->private_data) {
+                       kfree(file->private_data);
+                       file->private_data = NULL;
+               }
+
+               file->private_data = format_array_alloc("%u", data->array,
+                                                             data->elements);
+       }
+
+       size = 0;
+       if (file->private_data)
+               size = strlen(file->private_data);
+
+       return simple_read_from_buffer(buf, len, ppos,
+                                       file->private_data, size);
+}
+
+static int u32_array_release(struct inode *inode, struct file *file)
+{
+       kfree(file->private_data);
+
+       return 0;
+}
+
+static const struct file_operations u32_array_fops = {
+       .owner   = THIS_MODULE,
+       .open    = u32_array_open,
+       .release = u32_array_release,
+       .read    = u32_array_read,
+       .llseek  = no_llseek,
+};
+
+/**
+ * debugfs_create_u32_array - create a debugfs file that is used to read u32
+ * array.
+ * @name: a pointer to a string containing the name of the file to create.
+ * @mode: the permission that the file should have.
+ * @parent: a pointer to the parent dentry for this file.  This should be a
+ *          directory dentry if set.  If this parameter is %NULL, then the
+ *          file will be created in the root of the debugfs filesystem.
+ * @array: u32 array that provides data.
+ * @elements: total number of elements in the array.
+ *
+ * This function creates a file in debugfs with the given name that exports
+ * @array as data. If the @mode variable is so set it can be read from.
+ * Writing is not supported. Seek within the file is also not supported.
+ * Once array is created its size can not be changed.
+ *
+ * The function returns a pointer to dentry on success. If debugfs is not
+ * enabled in the kernel, the value -%ENODEV will be returned.
+ */
+struct dentry *debugfs_create_u32_array(const char *name, umode_t mode,
+                                           struct dentry *parent,
+                                           u32 *array, u32 elements)
+{
+       struct array_data *data = kmalloc(sizeof(*data), GFP_KERNEL);
+
+       if (data == NULL)
+               return NULL;
+
+       data->array = array;
+       data->elements = elements;
+
+       return debugfs_create_file(name, mode, parent, data, &u32_array_fops);
+}
+EXPORT_SYMBOL_GPL(debugfs_create_u32_array);
+
 #ifdef CONFIG_HAS_IOMEM
 
 /*
index f4aadd15b61376ee97a4a7bababe2eb659850820..0c85fae37666db4b18fb2bf9ebf692ae04bcbf40 100644 (file)
@@ -145,50 +145,6 @@ struct dio {
 
 static struct kmem_cache *dio_cache __read_mostly;
 
-static void __inode_dio_wait(struct inode *inode)
-{
-       wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
-       DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
-
-       do {
-               prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE);
-               if (atomic_read(&inode->i_dio_count))
-                       schedule();
-       } while (atomic_read(&inode->i_dio_count));
-       finish_wait(wq, &q.wait);
-}
-
-/**
- * inode_dio_wait - wait for outstanding DIO requests to finish
- * @inode: inode to wait for
- *
- * Waits for all pending direct I/O requests to finish so that we can
- * proceed with a truncate or equivalent operation.
- *
- * Must be called under a lock that serializes taking new references
- * to i_dio_count, usually by inode->i_mutex.
- */
-void inode_dio_wait(struct inode *inode)
-{
-       if (atomic_read(&inode->i_dio_count))
-               __inode_dio_wait(inode);
-}
-EXPORT_SYMBOL(inode_dio_wait);
-
-/*
- * inode_dio_done - signal finish of a direct I/O requests
- * @inode: inode the direct I/O happens on
- *
- * This is called once we've finished processing a direct I/O request,
- * and is used to wake up callers waiting for direct I/O to be quiesced.
- */
-void inode_dio_done(struct inode *inode)
-{
-       if (atomic_dec_and_test(&inode->i_dio_count))
-               wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
-}
-EXPORT_SYMBOL(inode_dio_done);
-
 /*
  * How many pages are in the queue?
  */
index ab35b113003b900ad3592217d64e6cef82fe8f9d..a07441a0a8789a9ee1e43f5be0d2b43ec3ee04e8 100644 (file)
@@ -660,11 +660,10 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
 {
        struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
        char *lower_buf;
-       size_t lower_bufsiz = PATH_MAX;
        mm_segment_t old_fs;
        int rc;
 
-       lower_buf = kmalloc(lower_bufsiz, GFP_KERNEL);
+       lower_buf = kmalloc(PATH_MAX, GFP_KERNEL);
        if (!lower_buf) {
                rc = -ENOMEM;
                goto out;
@@ -673,58 +672,29 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
        set_fs(get_ds());
        rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
                                                   (char __user *)lower_buf,
-                                                  lower_bufsiz);
+                                                  PATH_MAX);
        set_fs(old_fs);
        if (rc < 0)
                goto out;
-       lower_bufsiz = rc;
        rc = ecryptfs_decode_and_decrypt_filename(buf, bufsiz, dentry,
-                                                 lower_buf, lower_bufsiz);
+                                                 lower_buf, rc);
 out:
        kfree(lower_buf);
        return rc;
 }
 
-static int
-ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
+static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
-       char *kbuf;
-       size_t kbufsiz, copied;
+       char *buf;
+       size_t len = PATH_MAX;
        int rc;
 
-       rc = ecryptfs_readlink_lower(dentry, &kbuf, &kbufsiz);
+       rc = ecryptfs_readlink_lower(dentry, &buf, &len);
        if (rc)
                goto out;
-       copied = min_t(size_t, bufsiz, kbufsiz);
-       rc = copy_to_user(buf, kbuf, copied) ? -EFAULT : copied;
-       kfree(kbuf);
        fsstack_copy_attr_atime(dentry->d_inode,
                                ecryptfs_dentry_to_lower(dentry)->d_inode);
-out:
-       return rc;
-}
-
-static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
-{
-       char *buf;
-       int len = PAGE_SIZE, rc;
-       mm_segment_t old_fs;
-
-       /* Released in ecryptfs_put_link(); only release here on error */
-       buf = kmalloc(len, GFP_KERNEL);
-       if (!buf) {
-               buf = ERR_PTR(-ENOMEM);
-               goto out;
-       }
-       old_fs = get_fs();
-       set_fs(get_ds());
-       rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
-       set_fs(old_fs);
-       if (rc < 0) {
-               kfree(buf);
-               buf = ERR_PTR(rc);
-       } else
-               buf[rc] = '\0';
+       buf[len] = '\0';
 out:
        nd_set_link(nd, buf);
        return NULL;
@@ -1153,7 +1123,7 @@ out:
 }
 
 const struct inode_operations ecryptfs_symlink_iops = {
-       .readlink = ecryptfs_readlink,
+       .readlink = generic_readlink,
        .follow_link = ecryptfs_follow_link,
        .put_link = ecryptfs_put_link,
        .permission = ecryptfs_permission,
index 2dd946b636d276df37edbcc21f79b7314b4184c4..e879cf8ff0b172798badbe8945a3c8b51a3dca09 100644 (file)
@@ -133,7 +133,7 @@ static int ecryptfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 static void ecryptfs_evict_inode(struct inode *inode)
 {
        truncate_inode_pages(&inode->i_data, 0);
-       end_writeback(inode);
+       clear_inode(inode);
        iput(ecryptfs_inode_to_lower(inode));
 }
 
index dba15fecf23e376f12ad2cb367424cf202d3a819..d81b9f654086d1cdb3899767cf6e1e5bf05e9f6e 100644 (file)
@@ -46,20 +46,16 @@ struct eventfd_ctx {
  * value, and we signal this as overflow condition by returining a POLLERR
  * to poll(2).
  *
- * Returns @n in case of success, a non-negative number lower than @n in case
- * of overflow, or the following error codes:
- *
- * -EINVAL    : The value of @n is negative.
+ * Returns the amount by which the counter was incrememnted.  This will be less
+ * than @n if the counter has overflowed.
  */
-int eventfd_signal(struct eventfd_ctx *ctx, int n)
+__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
 {
        unsigned long flags;
 
-       if (n < 0)
-               return -EINVAL;
        spin_lock_irqsave(&ctx->wqh.lock, flags);
        if (ULLONG_MAX - ctx->count < n)
-               n = (int) (ULLONG_MAX - ctx->count);
+               n = ULLONG_MAX - ctx->count;
        ctx->count += n;
        if (waitqueue_active(&ctx->wqh))
                wake_up_locked_poll(&ctx->wqh, POLLIN);
index 079d1be65ba9e61e6f0452ecffc0cd23a6c3987d..74598f67efebb85e71204a832c929dd20ea96771 100644 (file)
@@ -1853,8 +1853,6 @@ error_return:
        return error;
 }
 
-#ifdef HAVE_SET_RESTORE_SIGMASK
-
 /*
  * Implement the event wait interface for the eventpoll file. It is the kernel
  * part of the user space epoll_pwait(2).
@@ -1899,8 +1897,6 @@ SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events,
        return error;
 }
 
-#endif /* HAVE_SET_RESTORE_SIGMASK */
-
 static int __init eventpoll_init(void)
 {
        struct sysinfo si;
index 52c9e2ff6e6bd8b6f763e56ceafda431731cea9b..a79786a8d2c88d5b6c580859ef12496f43b4b0f4 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -280,10 +280,6 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
        INIT_LIST_HEAD(&vma->anon_vma_chain);
 
-       err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
-       if (err)
-               goto err;
-
        err = insert_vm_struct(mm, vma);
        if (err)
                goto err;
index 352ba149d23ed51261d46f55fd4c8babf066b06c..389ba8312d5d182ea0682bd719366a305f1e2d1b 100644 (file)
@@ -16,5 +16,5 @@
 libore-y := ore.o ore_raid.o
 obj-$(CONFIG_ORE) += libore.o
 
-exofs-y := inode.o file.o symlink.o namei.o dir.o super.o
+exofs-y := inode.o file.o symlink.o namei.o dir.o super.o sys.o
 obj-$(CONFIG_EXOFS_FS) += exofs.o
index ca9d49665ef6bc5a2b6ae285b8dbde36e0d0bc31..fffe86fd7a4260cc3bce3b6910f930ee7f32f5d6 100644 (file)
@@ -56,6 +56,9 @@
 struct exofs_dev {
        struct ore_dev ored;
        unsigned did;
+       unsigned urilen;
+       uint8_t *uri;
+       struct kobject ed_kobj;
 };
 /*
  * our extension to the in-memory superblock
@@ -73,6 +76,7 @@ struct exofs_sb_info {
        struct ore_layout       layout;         /* Default files layout       */
        struct ore_comp one_comp;               /* id & cred of partition id=0*/
        struct ore_components oc;               /* comps for the partition    */
+       struct kobject  s_kobj;                 /* holds per-sbi kobject      */
 };
 
 /*
@@ -176,6 +180,16 @@ void exofs_make_credential(u8 cred_a[OSD_CAP_LEN],
                           const struct osd_obj_id *obj);
 int exofs_sbi_write_stats(struct exofs_sb_info *sbi);
 
+/* sys.c                 */
+int exofs_sysfs_init(void);
+void exofs_sysfs_uninit(void);
+int exofs_sysfs_sb_add(struct exofs_sb_info *sbi,
+                      struct exofs_dt_device_info *dt_dev);
+void exofs_sysfs_sb_del(struct exofs_sb_info *sbi);
+int exofs_sysfs_odev_add(struct exofs_dev *edev,
+                        struct exofs_sb_info *sbi);
+void exofs_sysfs_dbg_print(void);
+
 /*********************
  * operation vectors *
  *********************/
index ea5e1f97806a7dc284e11cb1f1185981361d4e08..5badb0c039de404b24208c8e6c2041b3331ae5bb 100644 (file)
@@ -1473,7 +1473,7 @@ void exofs_evict_inode(struct inode *inode)
                goto no_delete;
 
        inode->i_size = 0;
-       end_writeback(inode);
+       clear_inode(inode);
 
        /* if we are deleting an obj that hasn't been created yet, wait.
         * This also makes sure that create_done cannot be called with an
@@ -1503,5 +1503,5 @@ void exofs_evict_inode(struct inode *inode)
        return;
 
 no_delete:
-       end_writeback(inode);
+       clear_inode(inode);
 }
index 735ca06430ac9df3aadb6d470f4a16a3ea0e5f89..433783624d107d1d29fd16ffb1c4890ecd78c7cd 100644 (file)
@@ -472,6 +472,7 @@ static void exofs_put_super(struct super_block *sb)
        _exofs_print_device("Unmounting", NULL, ore_comp_dev(&sbi->oc, 0),
                            sbi->one_comp.obj.partition);
 
+       exofs_sysfs_sb_del(sbi);
        bdi_destroy(&sbi->bdi);
        exofs_free_sbi(sbi);
        sb->s_fs_info = NULL;
@@ -632,6 +633,12 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info *sbi,
        memcpy(&sbi->oc.ods[numdevs], &sbi->oc.ods[0],
                (numdevs - 1) * sizeof(sbi->oc.ods[0]));
 
+       /* create sysfs subdir under which we put the device table
+        * And cluster layout. A Superblock is identified by the string:
+        *      "dev[0].osdname"_"pid"
+        */
+       exofs_sysfs_sb_add(sbi, &dt->dt_dev_table[0]);
+
        for (i = 0; i < numdevs; i++) {
                struct exofs_fscb fscb;
                struct osd_dev_info odi;
@@ -657,6 +664,7 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info *sbi,
                        eds[i].ored.od = fscb_od;
                        ++sbi->oc.numdevs;
                        fscb_od = NULL;
+                       exofs_sysfs_odev_add(&eds[i], sbi);
                        continue;
                }
 
@@ -682,6 +690,7 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info *sbi,
                                  odi.osdname);
                        goto out;
                }
+               exofs_sysfs_odev_add(&eds[i], sbi);
 
                /* TODO: verify other information is correct and FS-uuid
                 *       matches. Benny what did you say about device table
@@ -745,7 +754,6 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
        sbi->one_comp.obj.partition = opts->pid;
        sbi->one_comp.obj.id = 0;
        exofs_make_credential(sbi->one_comp.cred, &sbi->one_comp.obj);
-       sbi->oc.numdevs = 1;
        sbi->oc.single_comp = EC_SINGLE_COMP;
        sbi->oc.comps = &sbi->one_comp;
 
@@ -804,6 +812,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
                        goto free_sbi;
 
                ore_comp_set_dev(&sbi->oc, 0, od);
+               sbi->oc.numdevs = 1;
        }
 
        __sbi_read_stats(sbi);
@@ -844,6 +853,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
                goto free_sbi;
        }
 
+       exofs_sysfs_dbg_print();
        _exofs_print_device("Mounting", opts->dev_name,
                            ore_comp_dev(&sbi->oc, 0),
                            sbi->one_comp.obj.partition);
@@ -1023,6 +1033,9 @@ static int __init init_exofs(void)
        if (err)
                goto out_d;
 
+       /* We don't fail if sysfs creation failed */
+       exofs_sysfs_init();
+
        return 0;
 out_d:
        destroy_inodecache();
@@ -1032,6 +1045,7 @@ out:
 
 static void __exit exit_exofs(void)
 {
+       exofs_sysfs_uninit();
        unregister_filesystem(&exofs_type);
        destroy_inodecache();
 }
diff --git a/fs/exofs/sys.c b/fs/exofs/sys.c
new file mode 100644 (file)
index 0000000..e32bc91
--- /dev/null
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2012
+ * Sachin Bhamare <sbhamare@panasas.com>
+ * Boaz Harrosh <bharrosh@panasas.com>
+ *
+ * This file is part of exofs.
+ *
+ * exofs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License 2 as published by
+ * the Free Software Foundation.
+ *
+ * exofs is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with exofs; if not, write to the:
+ *     Free Software Foundation <licensing@fsf.org>
+ */
+
+#include <linux/kobject.h>
+#include <linux/device.h>
+
+#include "exofs.h"
+
+struct odev_attr {
+       struct attribute attr;
+       ssize_t (*show)(struct exofs_dev *, char *);
+       ssize_t (*store)(struct exofs_dev *, const char *, size_t);
+};
+
+static ssize_t odev_attr_show(struct kobject *kobj, struct attribute *attr,
+               char *buf)
+{
+       struct exofs_dev *edp = container_of(kobj, struct exofs_dev, ed_kobj);
+       struct odev_attr *a = container_of(attr, struct odev_attr, attr);
+
+       return a->show ? a->show(edp, buf) : 0;
+}
+
+static ssize_t odev_attr_store(struct kobject *kobj, struct attribute *attr,
+               const char *buf, size_t len)
+{
+       struct exofs_dev *edp = container_of(kobj, struct exofs_dev, ed_kobj);
+       struct odev_attr *a = container_of(attr, struct odev_attr, attr);
+
+       return a->store ? a->store(edp, buf, len) : len;
+}
+
+static const struct sysfs_ops odev_attr_ops = {
+       .show  = odev_attr_show,
+       .store = odev_attr_store,
+};
+
+
+static struct kset *exofs_kset;
+
+static ssize_t osdname_show(struct exofs_dev *edp, char *buf)
+{
+       struct osd_dev *odev = edp->ored.od;
+       const struct osd_dev_info *odi = osduld_device_info(odev);
+
+       return snprintf(buf, odi->osdname_len + 1, "%s", odi->osdname);
+}
+
+static ssize_t systemid_show(struct exofs_dev *edp, char *buf)
+{
+       struct osd_dev *odev = edp->ored.od;
+       const struct osd_dev_info *odi = osduld_device_info(odev);
+
+       memcpy(buf, odi->systemid, odi->systemid_len);
+       return odi->systemid_len;
+}
+
+static ssize_t uri_show(struct exofs_dev *edp, char *buf)
+{
+       return snprintf(buf, edp->urilen, "%s", edp->uri);
+}
+
+static ssize_t uri_store(struct exofs_dev *edp, const char *buf, size_t len)
+{
+       edp->urilen = strlen(buf) + 1;
+       edp->uri = krealloc(edp->uri, edp->urilen, GFP_KERNEL);
+       strncpy(edp->uri, buf, edp->urilen);
+       return edp->urilen;
+}
+
+#define OSD_ATTR(name, mode, show, store) \
+       static struct odev_attr odev_attr_##name = \
+                                       __ATTR(name, mode, show, store)
+
+OSD_ATTR(osdname, S_IRUGO, osdname_show, NULL);
+OSD_ATTR(systemid, S_IRUGO, systemid_show, NULL);
+OSD_ATTR(uri, S_IRWXU, uri_show, uri_store);
+
+static struct attribute *odev_attrs[] = {
+       &odev_attr_osdname.attr,
+       &odev_attr_systemid.attr,
+       &odev_attr_uri.attr,
+       NULL,
+};
+
+static struct kobj_type odev_ktype = {
+       .default_attrs  = odev_attrs,
+       .sysfs_ops      = &odev_attr_ops,
+};
+
+static struct kobj_type uuid_ktype = {
+};
+
+void exofs_sysfs_dbg_print()
+{
+#ifdef CONFIG_EXOFS_DEBUG
+       struct kobject *k_name, *k_tmp;
+
+       list_for_each_entry_safe(k_name, k_tmp, &exofs_kset->list, entry) {
+               printk(KERN_INFO "%s: name %s ref %d\n",
+                       __func__, kobject_name(k_name),
+                       (int)atomic_read(&k_name->kref.refcount));
+       }
+#endif
+}
+/*
+ * This function removes all kobjects under exofs_kset
+ * At the end of it, exofs_kset kobject will have a refcount
+ * of 1 which gets decremented only on exofs module unload
+ */
+void exofs_sysfs_sb_del(struct exofs_sb_info *sbi)
+{
+       struct kobject *k_name, *k_tmp;
+       struct kobject *s_kobj = &sbi->s_kobj;
+
+       list_for_each_entry_safe(k_name, k_tmp, &exofs_kset->list, entry) {
+               /* Remove all that are children of this SBI */
+               if (k_name->parent == s_kobj)
+                       kobject_put(k_name);
+       }
+       kobject_put(s_kobj);
+}
+
+/*
+ * This function creates sysfs entries to hold the current exofs cluster
+ * instance (uniquely identified by osdname,pid tuple).
+ * This function gets called once per exofs mount instance.
+ */
+int exofs_sysfs_sb_add(struct exofs_sb_info *sbi,
+                      struct exofs_dt_device_info *dt_dev)
+{
+       struct kobject *s_kobj;
+       int retval = 0;
+       uint64_t pid = sbi->one_comp.obj.partition;
+
+       /* allocate new uuid dirent */
+       s_kobj = &sbi->s_kobj;
+       s_kobj->kset = exofs_kset;
+       retval = kobject_init_and_add(s_kobj, &uuid_ktype,
+                       &exofs_kset->kobj,  "%s_%llx", dt_dev->osdname, pid);
+       if (retval) {
+               EXOFS_ERR("ERROR: Failed to create sysfs entry for "
+                         "uuid-%s_%llx => %d\n", dt_dev->osdname, pid, retval);
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+int exofs_sysfs_odev_add(struct exofs_dev *edev, struct exofs_sb_info *sbi)
+{
+       struct kobject *d_kobj;
+       int retval = 0;
+
+       /* create osd device group which contains following attributes
+        * osdname, systemid & uri
+        */
+       d_kobj = &edev->ed_kobj;
+       d_kobj->kset = exofs_kset;
+       retval = kobject_init_and_add(d_kobj, &odev_ktype,
+                       &sbi->s_kobj, "dev%u", edev->did);
+       if (retval) {
+               EXOFS_ERR("ERROR: Failed to create sysfs entry for "
+                               "device dev%u\n", edev->did);
+               return retval;
+       }
+       return 0;
+}
+
+int exofs_sysfs_init(void)
+{
+       exofs_kset = kset_create_and_add("exofs", NULL, fs_kobj);
+       if (!exofs_kset) {
+               EXOFS_ERR("ERROR: kset_create_and_add exofs failed\n");
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+void exofs_sysfs_uninit(void)
+{
+       kset_unregister(exofs_kset);
+}
index b05acb7961355dfb680e49f3145a11065f6ac851..b0201ca6e9c6e0b7837917420bb3dfe1dc06b88f 100644 (file)
@@ -304,24 +304,23 @@ out:
 
 /**
  * export_encode_fh - default export_operations->encode_fh function
- * @dentry:  the dentry to encode
+ * @inode:   the object to encode
  * @fh:      where to store the file handle fragment
  * @max_len: maximum length to store there
- * @connectable: whether to store parent information
+ * @parent:  parent directory inode, if wanted
  *
  * This default encode_fh function assumes that the 32 inode number
  * is suitable for locating an inode, and that the generation number
  * can be used to check that it is still valid.  It places them in the
  * filehandle fragment where export_decode_fh expects to find them.
  */
-static int export_encode_fh(struct dentry *dentry, struct fid *fid,
-               int *max_len, int connectable)
+static int export_encode_fh(struct inode *inode, struct fid *fid,
+               int *max_len, struct inode *parent)
 {
-       struct inode * inode = dentry->d_inode;
        int len = *max_len;
        int type = FILEID_INO32_GEN;
 
-       if (connectable && (len < 4)) {
+       if (parent && (len < 4)) {
                *max_len = 4;
                return 255;
        } else if (len < 2) {
@@ -332,14 +331,9 @@ static int export_encode_fh(struct dentry *dentry, struct fid *fid,
        len = 2;
        fid->i32.ino = inode->i_ino;
        fid->i32.gen = inode->i_generation;
-       if (connectable && !S_ISDIR(inode->i_mode)) {
-               struct inode *parent;
-
-               spin_lock(&dentry->d_lock);
-               parent = dentry->d_parent->d_inode;
+       if (parent) {
                fid->i32.parent_ino = parent->i_ino;
                fid->i32.parent_gen = parent->i_generation;
-               spin_unlock(&dentry->d_lock);
                len = 4;
                type = FILEID_INO32_GEN_PARENT;
        }
@@ -352,11 +346,22 @@ int exportfs_encode_fh(struct dentry *dentry, struct fid *fid, int *max_len,
 {
        const struct export_operations *nop = dentry->d_sb->s_export_op;
        int error;
+       struct dentry *p = NULL;
+       struct inode *inode = dentry->d_inode, *parent = NULL;
 
+       if (connectable && !S_ISDIR(inode->i_mode)) {
+               p = dget_parent(dentry);
+               /*
+                * note that while p might've ceased to be our parent already,
+                * it's still pinned by and still positive.
+                */
+               parent = p->d_inode;
+       }
        if (nop->encode_fh)
-               error = nop->encode_fh(dentry, fid->raw, max_len, connectable);
+               error = nop->encode_fh(inode, fid->raw, max_len, parent);
        else
-               error = export_encode_fh(dentry, fid, max_len, connectable);
+               error = export_encode_fh(inode, fid, max_len, parent);
+       dput(p);
 
        return error;
 }
index 030c6d277e146feb18382a0958fae788e3e78dc0..1c361399886249070044987d8c2a93033202f445 100644 (file)
@@ -165,7 +165,6 @@ static void release_blocks(struct super_block *sb, int count)
                struct ext2_sb_info *sbi = EXT2_SB(sb);
 
                percpu_counter_add(&sbi->s_freeblocks_counter, count);
-               sb->s_dirt = 1;
        }
 }
 
@@ -180,7 +179,6 @@ static void group_adjust_blocks(struct super_block *sb, int group_no,
                free_blocks = le16_to_cpu(desc->bg_free_blocks_count);
                desc->bg_free_blocks_count = cpu_to_le16(free_blocks + count);
                spin_unlock(sb_bgl_lock(sbi, group_no));
-               sb->s_dirt = 1;
                mark_buffer_dirty(bh);
        }
 }
@@ -479,7 +477,7 @@ void ext2_discard_reservation(struct inode *inode)
 }
 
 /**
- * ext2_free_blocks_sb() -- Free given blocks and update quota and i_blocks
+ * ext2_free_blocks() -- Free given blocks and update quota and i_blocks
  * @inode:             inode
  * @block:             start physcial block to free
  * @count:             number of blocks to free
index 8b15cf8cef37bfa29360444204eacfd5b722b952..c13eb7b91a1100fa108a33275905d31ae35bfb02 100644 (file)
@@ -81,7 +81,6 @@ static void ext2_release_inode(struct super_block *sb, int group, int dir)
        spin_unlock(sb_bgl_lock(EXT2_SB(sb), group));
        if (dir)
                percpu_counter_dec(&EXT2_SB(sb)->s_dirs_counter);
-       sb->s_dirt = 1;
        mark_buffer_dirty(bh);
 }
 
@@ -543,7 +542,6 @@ got:
        }
        spin_unlock(sb_bgl_lock(sbi, group));
 
-       sb->s_dirt = 1;
        mark_buffer_dirty(bh2);
        if (test_opt(sb, GRPID)) {
                inode->i_mode = mode;
index f9fa95f8443d79275b5cdbf73dda982f40b4b11d..264d315f6c4753d6bee17f28ea86c6497b560449 100644 (file)
@@ -90,7 +90,7 @@ void ext2_evict_inode(struct inode * inode)
        }
 
        invalidate_inode_buffers(inode);
-       end_writeback(inode);
+       clear_inode(inode);
 
        ext2_discard_reservation(inode);
        rsv = EXT2_I(inode)->i_block_alloc_info;
index 38f816071ddb7e7dcda1ee1fa3332c4cb3fe55f2..b3621cb7ea31d6959943c7a2ec37e7ae2b362293 100644 (file)
@@ -130,9 +130,6 @@ static void ext2_put_super (struct super_block * sb)
 
        dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
 
-       if (sb->s_dirt)
-               ext2_write_super(sb);
-
        ext2_xattr_put_super(sb);
        if (!(sb->s_flags & MS_RDONLY)) {
                struct ext2_super_block *es = sbi->s_es;
@@ -307,7 +304,6 @@ static const struct super_operations ext2_sops = {
        .write_inode    = ext2_write_inode,
        .evict_inode    = ext2_evict_inode,
        .put_super      = ext2_put_super,
-       .write_super    = ext2_write_super,
        .sync_fs        = ext2_sync_fs,
        .statfs         = ext2_statfs,
        .remount_fs     = ext2_remount,
@@ -358,11 +354,6 @@ static struct dentry *ext2_fh_to_parent(struct super_block *sb, struct fid *fid,
                                    ext2_nfs_get_inode);
 }
 
-/* Yes, most of these are left as NULL!!
- * A NULL value implies the default, which works with ext2-like file
- * systems, but can be improved upon.
- * Currently only get_parent is required.
- */
 static const struct export_operations ext2_export_ops = {
        .fh_to_dentry = ext2_fh_to_dentry,
        .fh_to_parent = ext2_fh_to_parent,
@@ -1176,7 +1167,6 @@ static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es,
        mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
        if (wait)
                sync_dirty_buffer(EXT2_SB(sb)->s_sbh);
-       sb->s_dirt = 0;
 }
 
 /*
@@ -1209,8 +1199,6 @@ void ext2_write_super(struct super_block *sb)
 {
        if (!(sb->s_flags & MS_RDONLY))
                ext2_sync_fs(sb, 1);
-       else
-               sb->s_dirt = 0;
 }
 
 static int ext2_remount (struct super_block * sb, int * flags, char * data)
@@ -1456,7 +1444,6 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
        struct buffer_head tmp_bh;
        struct buffer_head *bh;
 
-       mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
        while (towrite > 0) {
                tocopy = sb->s_blocksize - offset < towrite ?
                                sb->s_blocksize - offset : towrite;
@@ -1486,16 +1473,13 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
                blk++;
        }
 out:
-       if (len == towrite) {
-               mutex_unlock(&inode->i_mutex);
+       if (len == towrite)
                return err;
-       }
        if (inode->i_size < off+len-towrite)
                i_size_write(inode, off+len-towrite);
        inode->i_version++;
        inode->i_mtime = inode->i_ctime = CURRENT_TIME;
        mark_inode_dirty(inode);
-       mutex_unlock(&inode->i_mutex);
        return len - towrite;
 }
 
index 6dcafc7efdfdb26c989f802c646a8b943adb93a3..b6754dbbce3c5e4942d8f46273d24c1236f0588b 100644 (file)
@@ -339,7 +339,6 @@ static void ext2_xattr_update_super_block(struct super_block *sb)
        spin_lock(&EXT2_SB(sb)->s_lock);
        EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR);
        spin_unlock(&EXT2_SB(sb)->s_lock);
-       sb->s_dirt = 1;
        mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
 }
 
index cc761ad8fa571541ae6aede1924b9178a342ab5f..92490e9f85ca0fc61a23f8142c0bb197758b1f1f 100644 (file)
  *
  */
 
+#include <linux/compat.h>
 #include "ext3.h"
 
 static unsigned char ext3_filetype_table[] = {
        DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
 };
 
-static int ext3_readdir(struct file *, void *, filldir_t);
 static int ext3_dx_readdir(struct file * filp,
                           void * dirent, filldir_t filldir);
-static int ext3_release_dir (struct inode * inode,
-                               struct file * filp);
-
-const struct file_operations ext3_dir_operations = {
-       .llseek         = generic_file_llseek,
-       .read           = generic_read_dir,
-       .readdir        = ext3_readdir,         /* we take BKL. needed?*/
-       .unlocked_ioctl = ext3_ioctl,
-#ifdef CONFIG_COMPAT
-       .compat_ioctl   = ext3_compat_ioctl,
-#endif
-       .fsync          = ext3_sync_file,       /* BKL held */
-       .release        = ext3_release_dir,
-};
-
 
 static unsigned char get_dtype(struct super_block *sb, int filetype)
 {
@@ -55,6 +40,25 @@ static unsigned char get_dtype(struct super_block *sb, int filetype)
        return (ext3_filetype_table[filetype]);
 }
 
+/**
+ * Check if the given dir-inode refers to an htree-indexed directory
+ * (or a directory which chould potentially get coverted to use htree
+ * indexing).
+ *
+ * Return 1 if it is a dx dir, 0 if not
+ */
+static int is_dx_dir(struct inode *inode)
+{
+       struct super_block *sb = inode->i_sb;
+
+       if (EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
+                    EXT3_FEATURE_COMPAT_DIR_INDEX) &&
+           ((EXT3_I(inode)->i_flags & EXT3_INDEX_FL) ||
+            ((inode->i_size >> sb->s_blocksize_bits) == 1)))
+               return 1;
+
+       return 0;
+}
 
 int ext3_check_dir_entry (const char * function, struct inode * dir,
                          struct ext3_dir_entry_2 * de,
@@ -94,18 +98,13 @@ static int ext3_readdir(struct file * filp,
        unsigned long offset;
        int i, stored;
        struct ext3_dir_entry_2 *de;
-       struct super_block *sb;
        int err;
        struct inode *inode = filp->f_path.dentry->d_inode;
+       struct super_block *sb = inode->i_sb;
        int ret = 0;
        int dir_has_error = 0;
 
-       sb = inode->i_sb;
-
-       if (EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
-                                   EXT3_FEATURE_COMPAT_DIR_INDEX) &&
-           ((EXT3_I(inode)->i_flags & EXT3_INDEX_FL) ||
-            ((inode->i_size >> sb->s_blocksize_bits) == 1))) {
+       if (is_dx_dir(inode)) {
                err = ext3_dx_readdir(filp, dirent, filldir);
                if (err != ERR_BAD_DX_DIR) {
                        ret = err;
@@ -227,22 +226,87 @@ out:
        return ret;
 }
 
+static inline int is_32bit_api(void)
+{
+#ifdef CONFIG_COMPAT
+       return is_compat_task();
+#else
+       return (BITS_PER_LONG == 32);
+#endif
+}
+
 /*
  * These functions convert from the major/minor hash to an f_pos
- * value.
+ * value for dx directories
  *
- * Currently we only use major hash numer.  This is unfortunate, but
- * on 32-bit machines, the same VFS interface is used for lseek and
- * llseek, so if we use the 64 bit offset, then the 32-bit versions of
- * lseek/telldir/seekdir will blow out spectacularly, and from within
- * the ext2 low-level routine, we don't know if we're being called by
- * a 64-bit version of the system call or the 32-bit version of the
- * system call.  Worse yet, NFSv2 only allows for a 32-bit readdir
- * cookie.  Sigh.
+ * Upper layer (for example NFS) should specify FMODE_32BITHASH or
+ * FMODE_64BITHASH explicitly. On the other hand, we allow ext3 to be mounted
+ * directly on both 32-bit and 64-bit nodes, under such case, neither
+ * FMODE_32BITHASH nor FMODE_64BITHASH is specified.
  */
-#define hash2pos(major, minor) (major >> 1)
-#define pos2maj_hash(pos)      ((pos << 1) & 0xffffffff)
-#define pos2min_hash(pos)      (0)
+static inline loff_t hash2pos(struct file *filp, __u32 major, __u32 minor)
+{
+       if ((filp->f_mode & FMODE_32BITHASH) ||
+           (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
+               return major >> 1;
+       else
+               return ((__u64)(major >> 1) << 32) | (__u64)minor;
+}
+
+static inline __u32 pos2maj_hash(struct file *filp, loff_t pos)
+{
+       if ((filp->f_mode & FMODE_32BITHASH) ||
+           (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
+               return (pos << 1) & 0xffffffff;
+       else
+               return ((pos >> 32) << 1) & 0xffffffff;
+}
+
+static inline __u32 pos2min_hash(struct file *filp, loff_t pos)
+{
+       if ((filp->f_mode & FMODE_32BITHASH) ||
+           (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
+               return 0;
+       else
+               return pos & 0xffffffff;
+}
+
+/*
+ * Return 32- or 64-bit end-of-file for dx directories
+ */
+static inline loff_t ext3_get_htree_eof(struct file *filp)
+{
+       if ((filp->f_mode & FMODE_32BITHASH) ||
+           (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
+               return EXT3_HTREE_EOF_32BIT;
+       else
+               return EXT3_HTREE_EOF_64BIT;
+}
+
+
+/*
+ * ext3_dir_llseek() calls generic_file_llseek[_size]() to handle both
+ * non-htree and htree directories, where the "offset" is in terms
+ * of the filename hash value instead of the byte offset.
+ *
+ * Because we may return a 64-bit hash that is well beyond s_maxbytes,
+ * we need to pass the max hash as the maximum allowable offset in
+ * the htree directory case.
+ *
+ * NOTE: offsets obtained *before* ext3_set_inode_flag(dir, EXT3_INODE_INDEX)
+ *       will be invalid once the directory was converted into a dx directory
+ */
+loff_t ext3_dir_llseek(struct file *file, loff_t offset, int origin)
+{
+       struct inode *inode = file->f_mapping->host;
+       int dx_dir = is_dx_dir(inode);
+
+       if (likely(dx_dir))
+               return generic_file_llseek_size(file, offset, origin,
+                                               ext3_get_htree_eof(file));
+       else
+               return generic_file_llseek(file, offset, origin);
+}
 
 /*
  * This structure holds the nodes of the red-black tree used to store
@@ -303,15 +367,16 @@ static void free_rb_tree_fname(struct rb_root *root)
 }
 
 
-static struct dir_private_info *ext3_htree_create_dir_info(loff_t pos)
+static struct dir_private_info *ext3_htree_create_dir_info(struct file *filp,
+                                                          loff_t pos)
 {
        struct dir_private_info *p;
 
        p = kzalloc(sizeof(struct dir_private_info), GFP_KERNEL);
        if (!p)
                return NULL;
-       p->curr_hash = pos2maj_hash(pos);
-       p->curr_minor_hash = pos2min_hash(pos);
+       p->curr_hash = pos2maj_hash(filp, pos);
+       p->curr_minor_hash = pos2min_hash(filp, pos);
        return p;
 }
 
@@ -401,7 +466,7 @@ static int call_filldir(struct file * filp, void * dirent,
                printk("call_filldir: called with null fname?!?\n");
                return 0;
        }
-       curr_pos = hash2pos(fname->hash, fname->minor_hash);
+       curr_pos = hash2pos(filp, fname->hash, fname->minor_hash);
        while (fname) {
                error = filldir(dirent, fname->name,
                                fname->name_len, curr_pos,
@@ -426,13 +491,13 @@ static int ext3_dx_readdir(struct file * filp,
        int     ret;
 
        if (!info) {
-               info = ext3_htree_create_dir_info(filp->f_pos);
+               info = ext3_htree_create_dir_info(filp, filp->f_pos);
                if (!info)
                        return -ENOMEM;
                filp->private_data = info;
        }
 
-       if (filp->f_pos == EXT3_HTREE_EOF)
+       if (filp->f_pos == ext3_get_htree_eof(filp))
                return 0;       /* EOF */
 
        /* Some one has messed with f_pos; reset the world */
@@ -440,8 +505,8 @@ static int ext3_dx_readdir(struct file * filp,
                free_rb_tree_fname(&info->root);
                info->curr_node = NULL;
                info->extra_fname = NULL;
-               info->curr_hash = pos2maj_hash(filp->f_pos);
-               info->curr_minor_hash = pos2min_hash(filp->f_pos);
+               info->curr_hash = pos2maj_hash(filp, filp->f_pos);
+               info->curr_minor_hash = pos2min_hash(filp, filp->f_pos);
        }
 
        /*
@@ -473,7 +538,7 @@ static int ext3_dx_readdir(struct file * filp,
                        if (ret < 0)
                                return ret;
                        if (ret == 0) {
-                               filp->f_pos = EXT3_HTREE_EOF;
+                               filp->f_pos = ext3_get_htree_eof(filp);
                                break;
                        }
                        info->curr_node = rb_first(&info->root);
@@ -493,7 +558,7 @@ static int ext3_dx_readdir(struct file * filp,
                        info->curr_minor_hash = fname->minor_hash;
                } else {
                        if (info->next_hash == ~0) {
-                               filp->f_pos = EXT3_HTREE_EOF;
+                               filp->f_pos = ext3_get_htree_eof(filp);
                                break;
                        }
                        info->curr_hash = info->next_hash;
@@ -512,3 +577,15 @@ static int ext3_release_dir (struct inode * inode, struct file * filp)
 
        return 0;
 }
+
+const struct file_operations ext3_dir_operations = {
+       .llseek         = ext3_dir_llseek,
+       .read           = generic_read_dir,
+       .readdir        = ext3_readdir,
+       .unlocked_ioctl = ext3_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = ext3_compat_ioctl,
+#endif
+       .fsync          = ext3_sync_file,
+       .release        = ext3_release_dir,
+};
index 7977973a24f0d83bc71a09b04b75d53e39959373..e85ff15a060e7dcb220c4170f8455f7d1c96cba2 100644 (file)
@@ -920,7 +920,11 @@ struct dx_hash_info
        u32             *seed;
 };
 
-#define EXT3_HTREE_EOF 0x7fffffff
+
+/* 32 and 64 bit signed EOF for dx directories */
+#define EXT3_HTREE_EOF_32BIT   ((1UL  << (32 - 1)) - 1)
+#define EXT3_HTREE_EOF_64BIT   ((1ULL << (64 - 1)) - 1)
+
 
 /*
  * Control parameters used by ext3_htree_next_block
index d10231ddcf8aa9058d6849a66c855c7fa901b46e..ede315cdf12614ba0520500aac39583083a2034e 100644 (file)
@@ -198,8 +198,8 @@ int ext3fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
                return -1;
        }
        hash = hash & ~1;
-       if (hash == (EXT3_HTREE_EOF << 1))
-               hash = (EXT3_HTREE_EOF-1) << 1;
+       if (hash == (EXT3_HTREE_EOF_32BIT << 1))
+               hash = (EXT3_HTREE_EOF_32BIT - 1) << 1;
        hinfo->hash = hash;
        hinfo->minor_hash = minor_hash;
        return 0;
index e3c39e4cec1943e0fb172e999cdaf51990ab7dc7..082afd78b10788ed8b1645f0f26c76bf3ea91b0e 100644 (file)
@@ -180,8 +180,7 @@ error_return:
  * It's OK to put directory into a group unless
  * it has too many directories already (max_dirs) or
  * it has too few free inodes left (min_inodes) or
- * it has too few free blocks left (min_blocks) or
- * it's already running too large debt (max_debt).
+ * it has too few free blocks left (min_blocks).
  * Parent's group is preferred, if it doesn't satisfy these
  * conditions we search cyclically through the rest. If none
  * of the groups look good we just look for a group with more
@@ -191,21 +190,16 @@ error_return:
  * when we allocate an inode, within 0--255.
  */
 
-#define INODE_COST 64
-#define BLOCK_COST 256
-
 static int find_group_orlov(struct super_block *sb, struct inode *parent)
 {
        int parent_group = EXT3_I(parent)->i_block_group;
        struct ext3_sb_info *sbi = EXT3_SB(sb);
-       struct ext3_super_block *es = sbi->s_es;
        int ngroups = sbi->s_groups_count;
        int inodes_per_group = EXT3_INODES_PER_GROUP(sb);
        unsigned int freei, avefreei;
        ext3_fsblk_t freeb, avefreeb;
-       ext3_fsblk_t blocks_per_dir;
        unsigned int ndirs;
-       int max_debt, max_dirs, min_inodes;
+       int max_dirs, min_inodes;
        ext3_grpblk_t min_blocks;
        int group = -1, i;
        struct ext3_group_desc *desc;
@@ -242,20 +236,10 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent)
                goto fallback;
        }
 
-       blocks_per_dir = (le32_to_cpu(es->s_blocks_count) - freeb) / ndirs;
-
        max_dirs = ndirs / ngroups + inodes_per_group / 16;
        min_inodes = avefreei - inodes_per_group / 4;
        min_blocks = avefreeb - EXT3_BLOCKS_PER_GROUP(sb) / 4;
 
-       max_debt = EXT3_BLOCKS_PER_GROUP(sb) / max(blocks_per_dir, (ext3_fsblk_t)BLOCK_COST);
-       if (max_debt * INODE_COST > inodes_per_group)
-               max_debt = inodes_per_group / INODE_COST;
-       if (max_debt > 255)
-               max_debt = 255;
-       if (max_debt == 0)
-               max_debt = 1;
-
        for (i = 0; i < ngroups; i++) {
                group = (parent_group + i) % ngroups;
                desc = ext3_get_group_desc (sb, group, NULL);
index a09790a412b1a1e2313410eba141971992c866fe..9a4a5c48b1c99f6a60ff02d35cda4ef1ced9bd50 100644 (file)
@@ -272,18 +272,18 @@ void ext3_evict_inode (struct inode *inode)
        if (ext3_mark_inode_dirty(handle, inode)) {
                /* If that failed, just dquot_drop() and be done with that */
                dquot_drop(inode);
-               end_writeback(inode);
+               clear_inode(inode);
        } else {
                ext3_xattr_delete_inode(handle, inode);
                dquot_free_inode(inode);
                dquot_drop(inode);
-               end_writeback(inode);
+               clear_inode(inode);
                ext3_free_inode(handle, inode);
        }
        ext3_journal_stop(handle);
        return;
 no_delete:
-       end_writeback(inode);
+       clear_inode(inode);
        dquot_drop(inode);
 }
 
index 94ef7e6161292a596ad3b7d7f9bb6dc4c39caac7..8c3a44b7c375247f54ac0d0e61be3be99e0b1d6a 100644 (file)
@@ -3015,7 +3015,6 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type,
                        (unsigned long long)off, (unsigned long long)len);
                return -EIO;
        }
-       mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
        bh = ext3_bread(handle, inode, blk, 1, &err);
        if (!bh)
                goto out;
@@ -3039,10 +3038,8 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type,
        }
        brelse(bh);
 out:
-       if (err) {
-               mutex_unlock(&inode->i_mutex);
+       if (err)
                return err;
-       }
        if (inode->i_size < off + len) {
                i_size_write(inode, off + len);
                EXT3_I(inode)->i_disksize = inode->i_size;
@@ -3050,7 +3047,6 @@ out:
        inode->i_version++;
        inode->i_mtime = inode->i_ctime = CURRENT_TIME;
        ext3_mark_inode_dirty(handle, inode);
-       mutex_unlock(&inode->i_mutex);
        return len;
 }
 
index 9ed1bb1f319f381b700a6386a4d8d068d04e0fdf..c22f17021b6eee7ca942a3525eb9f4fd23de6011 100644 (file)
@@ -2,6 +2,8 @@ config EXT4_FS
        tristate "The Extended 4 (ext4) filesystem"
        select JBD2
        select CRC16
+       select CRYPTO
+       select CRYPTO_CRC32C
        help
          This is the next generation of the ext3 filesystem.
 
index c45c41129a35b7346463e0f18e847b78a52e0426..99b6324290db916466d8b5c0633e9fa216d21798 100644 (file)
@@ -168,12 +168,14 @@ void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
 
        /* If checksum is bad mark all blocks used to prevent allocation
         * essentially implementing a per-group read-only flag. */
-       if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
+       if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
                ext4_error(sb, "Checksum bad for group %u", block_group);
                ext4_free_group_clusters_set(sb, gdp, 0);
                ext4_free_inodes_set(sb, gdp, 0);
                ext4_itable_unused_set(sb, gdp, 0);
                memset(bh->b_data, 0xff, sb->s_blocksize);
+               ext4_block_bitmap_csum_set(sb, block_group, gdp, bh,
+                                          EXT4_BLOCKS_PER_GROUP(sb) / 8);
                return;
        }
        memset(bh->b_data, 0, sb->s_blocksize);
@@ -210,6 +212,9 @@ void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
         */
        ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
                             sb->s_blocksize * 8, bh->b_data);
+       ext4_block_bitmap_csum_set(sb, block_group, gdp, bh,
+                                  EXT4_BLOCKS_PER_GROUP(sb) / 8);
+       ext4_group_desc_csum_set(sb, block_group, gdp);
 }
 
 /* Return the number of free blocks in a block group.  It is used when
@@ -276,9 +281,9 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
 }
 
 static int ext4_valid_block_bitmap(struct super_block *sb,
-                                       struct ext4_group_desc *desc,
-                                       unsigned int block_group,
-                                       struct buffer_head *bh)
+                                  struct ext4_group_desc *desc,
+                                  unsigned int block_group,
+                                  struct buffer_head *bh)
 {
        ext4_grpblk_t offset;
        ext4_grpblk_t next_zero_bit;
@@ -325,6 +330,23 @@ err_out:
                        block_group, bitmap_blk);
        return 0;
 }
+
+void ext4_validate_block_bitmap(struct super_block *sb,
+                              struct ext4_group_desc *desc,
+                              unsigned int block_group,
+                              struct buffer_head *bh)
+{
+       if (buffer_verified(bh))
+               return;
+
+       ext4_lock_group(sb, block_group);
+       if (ext4_valid_block_bitmap(sb, desc, block_group, bh) &&
+           ext4_block_bitmap_csum_verify(sb, block_group, desc, bh,
+                                         EXT4_BLOCKS_PER_GROUP(sb) / 8))
+               set_buffer_verified(bh);
+       ext4_unlock_group(sb, block_group);
+}
+
 /**
  * ext4_read_block_bitmap()
  * @sb:                        super block
@@ -355,12 +377,12 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
        }
 
        if (bitmap_uptodate(bh))
-               return bh;
+               goto verify;
 
        lock_buffer(bh);
        if (bitmap_uptodate(bh)) {
                unlock_buffer(bh);
-               return bh;
+               goto verify;
        }
        ext4_lock_group(sb, block_group);
        if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
@@ -379,7 +401,7 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
                 */
                set_bitmap_uptodate(bh);
                unlock_buffer(bh);
-               return bh;
+               goto verify;
        }
        /*
         * submit the buffer_head for reading
@@ -390,6 +412,9 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
        get_bh(bh);
        submit_bh(READ, bh);
        return bh;
+verify:
+       ext4_validate_block_bitmap(sb, desc, block_group, bh);
+       return bh;
 }
 
 /* Returns 0 on success, 1 on error */
@@ -412,7 +437,7 @@ int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group,
        }
        clear_buffer_new(bh);
        /* Panic or remount fs read-only if block bitmap is invalid */
-       ext4_valid_block_bitmap(sb, desc, block_group, bh);
+       ext4_validate_block_bitmap(sb, desc, block_group, bh);
        return 0;
 }
 
index fa3af81ac565c16dba6237edc89c3c7d70c5fc61..b319721da26ae32010adcd46db7e2d98ec50887a 100644 (file)
@@ -29,3 +29,86 @@ unsigned int ext4_count_free(struct buffer_head *map, unsigned int numchars)
 
 #endif  /*  EXT4FS_DEBUG  */
 
+int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
+                                 struct ext4_group_desc *gdp,
+                                 struct buffer_head *bh, int sz)
+{
+       __u32 hi;
+       __u32 provided, calculated;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return 1;
+
+       provided = le16_to_cpu(gdp->bg_inode_bitmap_csum_lo);
+       calculated = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
+       if (sbi->s_desc_size >= EXT4_BG_INODE_BITMAP_CSUM_HI_END) {
+               hi = le16_to_cpu(gdp->bg_inode_bitmap_csum_hi);
+               provided |= (hi << 16);
+       } else
+               calculated &= 0xFFFF;
+
+       return provided == calculated;
+}
+
+void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
+                               struct ext4_group_desc *gdp,
+                               struct buffer_head *bh, int sz)
+{
+       __u32 csum;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return;
+
+       csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
+       gdp->bg_inode_bitmap_csum_lo = cpu_to_le16(csum & 0xFFFF);
+       if (sbi->s_desc_size >= EXT4_BG_INODE_BITMAP_CSUM_HI_END)
+               gdp->bg_inode_bitmap_csum_hi = cpu_to_le16(csum >> 16);
+}
+
+int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
+                                 struct ext4_group_desc *gdp,
+                                 struct buffer_head *bh, int sz)
+{
+       __u32 hi;
+       __u32 provided, calculated;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return 1;
+
+       provided = le16_to_cpu(gdp->bg_block_bitmap_csum_lo);
+       calculated = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
+       if (sbi->s_desc_size >= EXT4_BG_BLOCK_BITMAP_CSUM_HI_END) {
+               hi = le16_to_cpu(gdp->bg_block_bitmap_csum_hi);
+               provided |= (hi << 16);
+       } else
+               calculated &= 0xFFFF;
+
+       if (provided == calculated)
+               return 1;
+
+       ext4_error(sb, "Bad block bitmap checksum: block_group = %u", group);
+       return 0;
+}
+
+void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
+                               struct ext4_group_desc *gdp,
+                               struct buffer_head *bh, int sz)
+{
+       __u32 csum;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return;
+
+       csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
+       gdp->bg_block_bitmap_csum_lo = cpu_to_le16(csum & 0xFFFF);
+       if (sbi->s_desc_size >= EXT4_BG_BLOCK_BITMAP_CSUM_HI_END)
+               gdp->bg_block_bitmap_csum_hi = cpu_to_le16(csum >> 16);
+}
index b86786202643bdd8044ee85fb72a0a21bc2c9bef..aa39e600d15954244aead38f7aed30513ce86d65 100644 (file)
@@ -179,6 +179,18 @@ static int ext4_readdir(struct file *filp,
                        continue;
                }
 
+               /* Check the checksum */
+               if (!buffer_verified(bh) &&
+                   !ext4_dirent_csum_verify(inode,
+                               (struct ext4_dir_entry *)bh->b_data)) {
+                       EXT4_ERROR_FILE(filp, 0, "directory fails checksum "
+                                       "at offset %llu",
+                                       (unsigned long long)filp->f_pos);
+                       filp->f_pos += sb->s_blocksize - offset;
+                       continue;
+               }
+               set_buffer_verified(bh);
+
 revalidate:
                /* If the dir block has changed since the last call to
                 * readdir(2), then we might be pointing to an invalid
index c21b1de51afbb42191adea4fc4a357e3906c8489..cfc4e01b3c8370c642681824ef55b13a66683c0d 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/wait.h>
 #include <linux/blockgroup_lock.h>
 #include <linux/percpu_counter.h>
+#include <crypto/hash.h>
 #ifdef __KERNEL__
 #include <linux/compat.h>
 #endif
@@ -298,7 +299,9 @@ struct ext4_group_desc
        __le16  bg_free_inodes_count_lo;/* Free inodes count */
        __le16  bg_used_dirs_count_lo;  /* Directories count */
        __le16  bg_flags;               /* EXT4_BG_flags (INODE_UNINIT, etc) */
-       __u32   bg_reserved[2];         /* Likely block/inode bitmap checksum */
+       __le32  bg_exclude_bitmap_lo;   /* Exclude bitmap for snapshots */
+       __le16  bg_block_bitmap_csum_lo;/* crc32c(s_uuid+grp_num+bbitmap) LE */
+       __le16  bg_inode_bitmap_csum_lo;/* crc32c(s_uuid+grp_num+ibitmap) LE */
        __le16  bg_itable_unused_lo;    /* Unused inodes count */
        __le16  bg_checksum;            /* crc16(sb_uuid+group+desc) */
        __le32  bg_block_bitmap_hi;     /* Blocks bitmap block MSB */
@@ -308,9 +311,19 @@ struct ext4_group_desc
        __le16  bg_free_inodes_count_hi;/* Free inodes count MSB */
        __le16  bg_used_dirs_count_hi;  /* Directories count MSB */
        __le16  bg_itable_unused_hi;    /* Unused inodes count MSB */
-       __u32   bg_reserved2[3];
+       __le32  bg_exclude_bitmap_hi;   /* Exclude bitmap block MSB */
+       __le16  bg_block_bitmap_csum_hi;/* crc32c(s_uuid+grp_num+bbitmap) BE */
+       __le16  bg_inode_bitmap_csum_hi;/* crc32c(s_uuid+grp_num+ibitmap) BE */
+       __u32   bg_reserved;
 };
 
+#define EXT4_BG_INODE_BITMAP_CSUM_HI_END       \
+       (offsetof(struct ext4_group_desc, bg_inode_bitmap_csum_hi) + \
+        sizeof(__le16))
+#define EXT4_BG_BLOCK_BITMAP_CSUM_HI_END       \
+       (offsetof(struct ext4_group_desc, bg_block_bitmap_csum_hi) + \
+        sizeof(__le16))
+
 /*
  * Structure of a flex block group info
  */
@@ -650,7 +663,8 @@ struct ext4_inode {
                        __le16  l_i_file_acl_high;
                        __le16  l_i_uid_high;   /* these 2 fields */
                        __le16  l_i_gid_high;   /* were reserved2[0] */
-                       __u32   l_i_reserved2;
+                       __le16  l_i_checksum_lo;/* crc32c(uuid+inum+inode) LE */
+                       __le16  l_i_reserved;
                } linux2;
                struct {
                        __le16  h_i_reserved1;  /* Obsoleted fragment number/size which are removed in ext4 */
@@ -666,7 +680,7 @@ struct ext4_inode {
                } masix2;
        } osd2;                         /* OS dependent 2 */
        __le16  i_extra_isize;
-       __le16  i_pad1;
+       __le16  i_checksum_hi;  /* crc32c(uuid+inum+inode) BE */
        __le32  i_ctime_extra;  /* extra Change time      (nsec << 2 | epoch) */
        __le32  i_mtime_extra;  /* extra Modification time(nsec << 2 | epoch) */
        __le32  i_atime_extra;  /* extra Access time      (nsec << 2 | epoch) */
@@ -768,7 +782,7 @@ do {                                                                               \
 #define i_gid_low      i_gid
 #define i_uid_high     osd2.linux2.l_i_uid_high
 #define i_gid_high     osd2.linux2.l_i_gid_high
-#define i_reserved2    osd2.linux2.l_i_reserved2
+#define i_checksum_lo  osd2.linux2.l_i_checksum_lo
 
 #elif defined(__GNU__)
 
@@ -908,6 +922,9 @@ struct ext4_inode_info {
         */
        tid_t i_sync_tid;
        tid_t i_datasync_tid;
+
+       /* Precomputed uuid+inum+igen checksum for seeding inode checksums */
+       __u32 i_csum_seed;
 };
 
 /*
@@ -1001,6 +1018,9 @@ extern void ext4_set_bits(void *bm, int cur, int len);
 #define EXT4_ERRORS_PANIC              3       /* Panic */
 #define EXT4_ERRORS_DEFAULT            EXT4_ERRORS_CONTINUE
 
+/* Metadata checksum algorithm codes */
+#define EXT4_CRC32C_CHKSUM             1
+
 /*
  * Structure of the super block
  */
@@ -1087,7 +1107,7 @@ struct ext4_super_block {
        __le64  s_mmp_block;            /* Block for multi-mount protection */
        __le32  s_raid_stripe_width;    /* blocks on all data disks (N*stride)*/
        __u8    s_log_groups_per_flex;  /* FLEX_BG group size */
-       __u8    s_reserved_char_pad;
+       __u8    s_checksum_type;        /* metadata checksum algorithm used */
        __le16  s_reserved_pad;
        __le64  s_kbytes_written;       /* nr of lifetime kilobytes written */
        __le32  s_snapshot_inum;        /* Inode number of active snapshot */
@@ -1113,7 +1133,8 @@ struct ext4_super_block {
        __le32  s_usr_quota_inum;       /* inode for tracking user quota */
        __le32  s_grp_quota_inum;       /* inode for tracking group quota */
        __le32  s_overhead_clusters;    /* overhead blocks/clusters in fs */
-       __le32  s_reserved[109];        /* Padding to the end of the block */
+       __le32  s_reserved[108];        /* Padding to the end of the block */
+       __le32  s_checksum;             /* crc32c(superblock) */
 };
 
 #define EXT4_S_ERR_LEN (EXT4_S_ERR_END - EXT4_S_ERR_START)
@@ -1176,6 +1197,7 @@ struct ext4_sb_info {
        struct proc_dir_entry *s_proc;
        struct kobject s_kobj;
        struct completion s_kobj_unregister;
+       struct super_block *s_sb;
 
        /* Journaling */
        struct journal_s *s_journal;
@@ -1266,6 +1288,12 @@ struct ext4_sb_info {
 
        /* record the last minlen when FITRIM is called. */
        atomic_t s_last_trim_minblks;
+
+       /* Reference to checksum algorithm driver via cryptoapi */
+       struct crypto_shash *s_chksum_driver;
+
+       /* Precomputed FS UUID checksum for seeding other checksums */
+       __u32 s_csum_seed;
 };
 
 static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
@@ -1414,6 +1442,12 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
 #define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE     0x0040
 #define EXT4_FEATURE_RO_COMPAT_QUOTA           0x0100
 #define EXT4_FEATURE_RO_COMPAT_BIGALLOC                0x0200
+/*
+ * METADATA_CSUM also enables group descriptor checksums (GDT_CSUM).  When
+ * METADATA_CSUM is set, group descriptor checksums use the same algorithm as
+ * all other data structures' checksums.  However, the METADATA_CSUM and
+ * GDT_CSUM bits are mutually exclusive.
+ */
 #define EXT4_FEATURE_RO_COMPAT_METADATA_CSUM   0x0400
 
 #define EXT4_FEATURE_INCOMPAT_COMPRESSION      0x0001
@@ -1461,7 +1495,8 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
                                         EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE | \
                                         EXT4_FEATURE_RO_COMPAT_BTREE_DIR |\
                                         EXT4_FEATURE_RO_COMPAT_HUGE_FILE |\
-                                        EXT4_FEATURE_RO_COMPAT_BIGALLOC)
+                                        EXT4_FEATURE_RO_COMPAT_BIGALLOC |\
+                                        EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)
 
 /*
  * Default values for user and/or group using reserved blocks
@@ -1526,6 +1561,18 @@ struct ext4_dir_entry_2 {
        char    name[EXT4_NAME_LEN];    /* File name */
 };
 
+/*
+ * This is a bogus directory entry at the end of each leaf block that
+ * records checksums.
+ */
+struct ext4_dir_entry_tail {
+       __le32  det_reserved_zero1;     /* Pretend to be unused */
+       __le16  det_rec_len;            /* 12 */
+       __u8    det_reserved_zero2;     /* Zero name length */
+       __u8    det_reserved_ft;        /* 0xDE, fake file type */
+       __le32  det_checksum;           /* crc32c(uuid+inum+dirblock) */
+};
+
 /*
  * Ext4 directory file types.  Only the low 3 bits are used.  The
  * other bits are reserved for now.
@@ -1541,6 +1588,8 @@ struct ext4_dir_entry_2 {
 
 #define EXT4_FT_MAX            8
 
+#define EXT4_FT_DIR_CSUM       0xDE
+
 /*
  * EXT4_DIR_PAD defines the directory entries boundaries
  *
@@ -1609,6 +1658,25 @@ static inline __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize)
 #define DX_HASH_HALF_MD4_UNSIGNED      4
 #define DX_HASH_TEA_UNSIGNED           5
 
+static inline u32 ext4_chksum(struct ext4_sb_info *sbi, u32 crc,
+                             const void *address, unsigned int length)
+{
+       struct {
+               struct shash_desc shash;
+               char ctx[crypto_shash_descsize(sbi->s_chksum_driver)];
+       } desc;
+       int err;
+
+       desc.shash.tfm = sbi->s_chksum_driver;
+       desc.shash.flags = 0;
+       *(u32 *)desc.ctx = crc;
+
+       err = crypto_shash_update(&desc.shash, address, length);
+       BUG_ON(err);
+
+       return *(u32 *)desc.ctx;
+}
+
 #ifdef __KERNEL__
 
 /* hash info structure used by the directory hash */
@@ -1741,7 +1809,8 @@ struct mmp_struct {
        __le16  mmp_check_interval;
 
        __le16  mmp_pad1;
-       __le32  mmp_pad2[227];
+       __le32  mmp_pad2[226];
+       __le32  mmp_checksum;           /* crc32c(uuid+mmp_block) */
 };
 
 /* arguments passed to the mmp thread */
@@ -1784,8 +1853,24 @@ struct mmpd_data {
 
 /* bitmap.c */
 extern unsigned int ext4_count_free(struct buffer_head *, unsigned);
+void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
+                               struct ext4_group_desc *gdp,
+                               struct buffer_head *bh, int sz);
+int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
+                                 struct ext4_group_desc *gdp,
+                                 struct buffer_head *bh, int sz);
+void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
+                               struct ext4_group_desc *gdp,
+                               struct buffer_head *bh, int sz);
+int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
+                                 struct ext4_group_desc *gdp,
+                                 struct buffer_head *bh, int sz);
 
 /* balloc.c */
+extern void ext4_validate_block_bitmap(struct super_block *sb,
+                                      struct ext4_group_desc *desc,
+                                      unsigned int block_group,
+                                      struct buffer_head *bh);
 extern unsigned int ext4_block_group(struct super_block *sb,
                        ext4_fsblk_t blocknr);
 extern ext4_grpblk_t ext4_block_group_offset(struct super_block *sb,
@@ -1864,7 +1949,7 @@ extern void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate);
 /* mballoc.c */
 extern long ext4_mb_stats;
 extern long ext4_mb_max_to_scan;
-extern int ext4_mb_init(struct super_block *, int);
+extern int ext4_mb_init(struct super_block *);
 extern int ext4_mb_release(struct super_block *);
 extern ext4_fsblk_t ext4_mb_new_blocks(handle_t *,
                                struct ext4_allocation_request *, int *);
@@ -1936,6 +2021,8 @@ extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long);
 extern int ext4_ext_migrate(struct inode *);
 
 /* namei.c */
+extern int ext4_dirent_csum_verify(struct inode *inode,
+                                  struct ext4_dir_entry *dirent);
 extern int ext4_orphan_add(handle_t *, struct inode *);
 extern int ext4_orphan_del(handle_t *, struct inode *);
 extern int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
@@ -1950,6 +2037,10 @@ extern int ext4_group_extend(struct super_block *sb,
 extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count);
 
 /* super.c */
+extern int ext4_superblock_csum_verify(struct super_block *sb,
+                                      struct ext4_super_block *es);
+extern void ext4_superblock_csum_set(struct super_block *sb,
+                                    struct ext4_super_block *es);
 extern void *ext4_kvmalloc(size_t size, gfp_t flags);
 extern void *ext4_kvzalloc(size_t size, gfp_t flags);
 extern void ext4_kvfree(void *ptr);
@@ -2025,10 +2116,17 @@ extern void ext4_used_dirs_set(struct super_block *sb,
                                struct ext4_group_desc *bg, __u32 count);
 extern void ext4_itable_unused_set(struct super_block *sb,
                                   struct ext4_group_desc *bg, __u32 count);
-extern __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 group,
-                                  struct ext4_group_desc *gdp);
-extern int ext4_group_desc_csum_verify(struct ext4_sb_info *sbi, __u32 group,
+extern int ext4_group_desc_csum_verify(struct super_block *sb, __u32 group,
                                       struct ext4_group_desc *gdp);
+extern void ext4_group_desc_csum_set(struct super_block *sb, __u32 group,
+                                    struct ext4_group_desc *gdp);
+
+static inline int ext4_has_group_desc_csum(struct super_block *sb)
+{
+       return EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                         EXT4_FEATURE_RO_COMPAT_GDT_CSUM |
+                                         EXT4_FEATURE_RO_COMPAT_METADATA_CSUM);
+}
 
 static inline ext4_fsblk_t ext4_blocks_count(struct ext4_super_block *es)
 {
@@ -2225,6 +2323,9 @@ static inline void ext4_unlock_group(struct super_block *sb,
 
 static inline void ext4_mark_super_dirty(struct super_block *sb)
 {
+       struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+
+       ext4_superblock_csum_set(sb, es);
        if (EXT4_SB(sb)->s_journal == NULL)
                sb->s_dirt =1;
 }
@@ -2314,6 +2415,9 @@ extern int ext4_bio_write_page(struct ext4_io_submit *io,
 
 /* mmp.c */
 extern int ext4_multi_mount_protect(struct super_block *, ext4_fsblk_t);
+extern void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp);
+extern int ext4_mmp_csum_verify(struct super_block *sb,
+                               struct mmp_struct *mmp);
 
 /* BH_Uninit flag: blocks are allocated but uninitialized on disk */
 enum ext4_state_bits {
index 0f58b86e3a0206e19626f361f453aa80b2838857..cb1b2c919963290fd10d09ba12f6d8c53ace9fa6 100644 (file)
  * ext4_inode has i_block array (60 bytes total).
  * The first 12 bytes store ext4_extent_header;
  * the remainder stores an array of ext4_extent.
+ * For non-inode extent blocks, ext4_extent_tail
+ * follows the array.
  */
 
+/*
+ * This is the extent tail on-disk structure.
+ * All other extent structures are 12 bytes long.  It turns out that
+ * block_size % 12 >= 4 for at least all powers of 2 greater than 512, which
+ * covers all valid ext4 block sizes.  Therefore, this tail structure can be
+ * crammed into the end of the block without having to rebalance the tree.
+ */
+struct ext4_extent_tail {
+       __le32  et_checksum;    /* crc32c(uuid+inum+extent_block) */
+};
+
 /*
  * This is the extent on-disk structure.
  * It's used at the bottom of the tree.
@@ -101,6 +114,17 @@ struct ext4_extent_header {
 
 #define EXT4_EXT_MAGIC         cpu_to_le16(0xf30a)
 
+#define EXT4_EXTENT_TAIL_OFFSET(hdr) \
+       (sizeof(struct ext4_extent_header) + \
+        (sizeof(struct ext4_extent) * le16_to_cpu((hdr)->eh_max)))
+
+static inline struct ext4_extent_tail *
+find_ext4_extent_tail(struct ext4_extent_header *eh)
+{
+       return (struct ext4_extent_tail *)(((void *)eh) +
+                                          EXT4_EXTENT_TAIL_OFFSET(eh));
+}
+
 /*
  * Array of ext4_ext_path contains path to some extent.
  * Creation/lookup routines use it for traversal/splitting/etc.
index aca17901758249d4329d780714e328ef42851e35..90f7c2e84db1bef3fdb90931f9124cfe052705dd 100644 (file)
@@ -138,16 +138,23 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
 }
 
 int __ext4_handle_dirty_super(const char *where, unsigned int line,
-                             handle_t *handle, struct super_block *sb)
+                             handle_t *handle, struct super_block *sb,
+                             int now)
 {
        struct buffer_head *bh = EXT4_SB(sb)->s_sbh;
        int err = 0;
 
        if (ext4_handle_valid(handle)) {
+               ext4_superblock_csum_set(sb,
+                               (struct ext4_super_block *)bh->b_data);
                err = jbd2_journal_dirty_metadata(handle, bh);
                if (err)
                        ext4_journal_abort_handle(where, line, __func__,
                                                  bh, handle, err);
+       } else if (now) {
+               ext4_superblock_csum_set(sb,
+                               (struct ext4_super_block *)bh->b_data);
+               mark_buffer_dirty(bh);
        } else
                sb->s_dirt = 1;
        return err;
index 83b20fcf9400b11b28185470f8feef309faa8252..f440e8f1841f4e2521486bd94ae19ed83aa896ab 100644 (file)
@@ -213,7 +213,8 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
                                 struct buffer_head *bh);
 
 int __ext4_handle_dirty_super(const char *where, unsigned int line,
-                             handle_t *handle, struct super_block *sb);
+                             handle_t *handle, struct super_block *sb,
+                             int now);
 
 #define ext4_journal_get_write_access(handle, bh) \
        __ext4_journal_get_write_access(__func__, __LINE__, (handle), (bh))
@@ -225,8 +226,10 @@ int __ext4_handle_dirty_super(const char *where, unsigned int line,
 #define ext4_handle_dirty_metadata(handle, inode, bh) \
        __ext4_handle_dirty_metadata(__func__, __LINE__, (handle), (inode), \
                                     (bh))
+#define ext4_handle_dirty_super_now(handle, sb) \
+       __ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb), 1)
 #define ext4_handle_dirty_super(handle, sb) \
-       __ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb))
+       __ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb), 0)
 
 handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks);
 int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle);
index abcdeab67f5232b66d4aa5a6cbb88838094f6247..91341ec6e06a94f2f400d10039a64585cd17ed2e 100644 (file)
 #define EXT4_EXT_MARK_UNINIT1  0x2  /* mark first half uninitialized */
 #define EXT4_EXT_MARK_UNINIT2  0x4  /* mark second half uninitialized */
 
+static __le32 ext4_extent_block_csum(struct inode *inode,
+                                    struct ext4_extent_header *eh)
+{
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+       __u32 csum;
+
+       csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
+                          EXT4_EXTENT_TAIL_OFFSET(eh));
+       return cpu_to_le32(csum);
+}
+
+static int ext4_extent_block_csum_verify(struct inode *inode,
+                                        struct ext4_extent_header *eh)
+{
+       struct ext4_extent_tail *et;
+
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+               EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return 1;
+
+       et = find_ext4_extent_tail(eh);
+       if (et->et_checksum != ext4_extent_block_csum(inode, eh))
+               return 0;
+       return 1;
+}
+
+static void ext4_extent_block_csum_set(struct inode *inode,
+                                      struct ext4_extent_header *eh)
+{
+       struct ext4_extent_tail *et;
+
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+               EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return;
+
+       et = find_ext4_extent_tail(eh);
+       et->et_checksum = ext4_extent_block_csum(inode, eh);
+}
+
 static int ext4_split_extent(handle_t *handle,
                                struct inode *inode,
                                struct ext4_ext_path *path,
@@ -117,6 +157,7 @@ static int __ext4_ext_dirty(const char *where, unsigned int line,
 {
        int err;
        if (path->p_bh) {
+               ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
                /* path points to block */
                err = __ext4_handle_dirty_metadata(where, line, handle,
                                                   inode, path->p_bh);
@@ -391,6 +432,12 @@ static int __ext4_ext_check(const char *function, unsigned int line,
                error_msg = "invalid extent entries";
                goto corrupted;
        }
+       /* Verify checksum on non-root extent tree nodes */
+       if (ext_depth(inode) != depth &&
+           !ext4_extent_block_csum_verify(inode, eh)) {
+               error_msg = "extent tree corrupted";
+               goto corrupted;
+       }
        return 0;
 
 corrupted:
@@ -412,6 +459,26 @@ int ext4_ext_check_inode(struct inode *inode)
        return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
 }
 
+static int __ext4_ext_check_block(const char *function, unsigned int line,
+                                 struct inode *inode,
+                                 struct ext4_extent_header *eh,
+                                 int depth,
+                                 struct buffer_head *bh)
+{
+       int ret;
+
+       if (buffer_verified(bh))
+               return 0;
+       ret = ext4_ext_check(inode, eh, depth);
+       if (ret)
+               return ret;
+       set_buffer_verified(bh);
+       return ret;
+}
+
+#define ext4_ext_check_block(inode, eh, depth, bh)     \
+       __ext4_ext_check_block(__func__, __LINE__, inode, eh, depth, bh)
+
 #ifdef EXT_DEBUG
 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
 {
@@ -536,7 +603,7 @@ ext4_ext_binsearch_idx(struct inode *inode,
        }
 
        path->p_idx = l - 1;
-       ext_debug("  -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
+       ext_debug("  -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
                  ext4_idx_pblock(path->p_idx));
 
 #ifdef CHECK_BINSEARCH
@@ -668,8 +735,6 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
        i = depth;
        /* walk through the tree */
        while (i) {
-               int need_to_validate = 0;
-
                ext_debug("depth %d: num %d, max %d\n",
                          ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
 
@@ -688,8 +753,6 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
                                put_bh(bh);
                                goto err;
                        }
-                       /* validate the extent entries */
-                       need_to_validate = 1;
                }
                eh = ext_block_hdr(bh);
                ppos++;
@@ -703,7 +766,7 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
                path[ppos].p_hdr = eh;
                i--;
 
-               if (need_to_validate && ext4_ext_check(inode, eh, i))
+               if (ext4_ext_check_block(inode, eh, i, bh))
                        goto err;
        }
 
@@ -914,6 +977,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
                le16_add_cpu(&neh->eh_entries, m);
        }
 
+       ext4_extent_block_csum_set(inode, neh);
        set_buffer_uptodate(bh);
        unlock_buffer(bh);
 
@@ -992,6 +1056,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
                                sizeof(struct ext4_extent_idx) * m);
                        le16_add_cpu(&neh->eh_entries, m);
                }
+               ext4_extent_block_csum_set(inode, neh);
                set_buffer_uptodate(bh);
                unlock_buffer(bh);
 
@@ -1089,6 +1154,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
        else
                neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
        neh->eh_magic = EXT4_EXT_MAGIC;
+       ext4_extent_block_csum_set(inode, neh);
        set_buffer_uptodate(bh);
        unlock_buffer(bh);
 
@@ -1344,7 +1410,8 @@ got_index:
                        return -EIO;
                eh = ext_block_hdr(bh);
                /* subtract from p_depth to get proper eh_depth */
-               if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
+               if (ext4_ext_check_block(inode, eh,
+                                        path->p_depth - depth, bh)) {
                        put_bh(bh);
                        return -EIO;
                }
@@ -1357,7 +1424,7 @@ got_index:
        if (bh == NULL)
                return -EIO;
        eh = ext_block_hdr(bh);
-       if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
+       if (ext4_ext_check_block(inode, eh, path->p_depth - depth, bh)) {
                put_bh(bh);
                return -EIO;
        }
@@ -2644,8 +2711,8 @@ cont:
                                err = -EIO;
                                break;
                        }
-                       if (ext4_ext_check(inode, ext_block_hdr(bh),
-                                                       depth - i - 1)) {
+                       if (ext4_ext_check_block(inode, ext_block_hdr(bh),
+                                                       depth - i - 1, bh)) {
                                err = -EIO;
                                break;
                        }
@@ -4722,8 +4789,8 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
 
        /* Now release the pages */
        if (last_page_offset > first_page_offset) {
-               truncate_inode_pages_range(mapping, first_page_offset,
-                                          last_page_offset-1);
+               truncate_pagecache_range(inode, first_page_offset,
+                                        last_page_offset - 1);
        }
 
        /* finish any pending end_io work */
index cb70f1812a70f5ca8452e98776cd309ad6638055..8c7642a00054fd1ddf649e733e4b6efb5a0eb14b 100644 (file)
@@ -95,7 +95,7 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
 {
        struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
        int unaligned_aio = 0;
-       int ret;
+       ssize_t ret;
 
        /*
         * If we have encountered a bitmap-format file, the size limit
index 9f9acac6c43f4ac8006e363b5567b4a88dd56615..d48e8b14928cf993c50c33fe9b18a90203c2c492 100644 (file)
@@ -70,24 +70,27 @@ static unsigned ext4_init_inode_bitmap(struct super_block *sb,
                                       ext4_group_t block_group,
                                       struct ext4_group_desc *gdp)
 {
-       struct ext4_sb_info *sbi = EXT4_SB(sb);
-
        J_ASSERT_BH(bh, buffer_locked(bh));
 
        /* If checksum is bad mark all blocks and inodes use to prevent
         * allocation, essentially implementing a per-group read-only flag. */
-       if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
+       if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
                ext4_error(sb, "Checksum bad for group %u", block_group);
                ext4_free_group_clusters_set(sb, gdp, 0);
                ext4_free_inodes_set(sb, gdp, 0);
                ext4_itable_unused_set(sb, gdp, 0);
                memset(bh->b_data, 0xff, sb->s_blocksize);
+               ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
+                                          EXT4_INODES_PER_GROUP(sb) / 8);
                return 0;
        }
 
        memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
        ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
                        bh->b_data);
+       ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
+                                  EXT4_INODES_PER_GROUP(sb) / 8);
+       ext4_group_desc_csum_set(sb, block_group, gdp);
 
        return EXT4_INODES_PER_GROUP(sb);
 }
@@ -128,12 +131,12 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
                return NULL;
        }
        if (bitmap_uptodate(bh))
-               return bh;
+               goto verify;
 
        lock_buffer(bh);
        if (bitmap_uptodate(bh)) {
                unlock_buffer(bh);
-               return bh;
+               goto verify;
        }
 
        ext4_lock_group(sb, block_group);
@@ -141,6 +144,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
                ext4_init_inode_bitmap(sb, bh, block_group, desc);
                set_bitmap_uptodate(bh);
                set_buffer_uptodate(bh);
+               set_buffer_verified(bh);
                ext4_unlock_group(sb, block_group);
                unlock_buffer(bh);
                return bh;
@@ -154,7 +158,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
                 */
                set_bitmap_uptodate(bh);
                unlock_buffer(bh);
-               return bh;
+               goto verify;
        }
        /*
         * submit the buffer_head for reading
@@ -171,6 +175,20 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
                           block_group, bitmap_blk);
                return NULL;
        }
+
+verify:
+       ext4_lock_group(sb, block_group);
+       if (!buffer_verified(bh) &&
+           !ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
+                                          EXT4_INODES_PER_GROUP(sb) / 8)) {
+               ext4_unlock_group(sb, block_group);
+               put_bh(bh);
+               ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
+                          "inode_bitmap = %llu", block_group, bitmap_blk);
+               return NULL;
+       }
+       ext4_unlock_group(sb, block_group);
+       set_buffer_verified(bh);
        return bh;
 }
 
@@ -276,7 +294,9 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
                ext4_used_dirs_set(sb, gdp, count);
                percpu_counter_dec(&sbi->s_dirs_counter);
        }
-       gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
+       ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
+                                  EXT4_INODES_PER_GROUP(sb) / 8);
+       ext4_group_desc_csum_set(sb, block_group, gdp);
        ext4_unlock_group(sb, block_group);
 
        percpu_counter_inc(&sbi->s_freeinodes_counter);
@@ -488,10 +508,12 @@ fallback_retry:
        for (i = 0; i < ngroups; i++) {
                grp = (parent_group + i) % ngroups;
                desc = ext4_get_group_desc(sb, grp, NULL);
-               grp_free = ext4_free_inodes_count(sb, desc);
-               if (desc && grp_free && grp_free >= avefreei) {
-                       *group = grp;
-                       return 0;
+               if (desc) {
+                       grp_free = ext4_free_inodes_count(sb, desc);
+                       if (grp_free && grp_free >= avefreei) {
+                               *group = grp;
+                               return 0;
+                       }
                }
        }
 
@@ -709,7 +731,7 @@ repeat_in_this_group:
 
 got:
        /* We may have to initialize the block bitmap if it isn't already */
-       if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) &&
+       if (ext4_has_group_desc_csum(sb) &&
            gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
                struct buffer_head *block_bitmap_bh;
 
@@ -731,8 +753,11 @@ got:
                        gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
                        ext4_free_group_clusters_set(sb, gdp,
                                ext4_free_clusters_after_init(sb, group, gdp));
-                       gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
-                                                               gdp);
+                       ext4_block_bitmap_csum_set(sb, group, gdp,
+                                                  block_bitmap_bh,
+                                                  EXT4_BLOCKS_PER_GROUP(sb) /
+                                                  8);
+                       ext4_group_desc_csum_set(sb, group, gdp);
                }
                ext4_unlock_group(sb, group);
 
@@ -751,7 +776,7 @@ got:
                goto fail;
 
        /* Update the relevant bg descriptor fields */
-       if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
+       if (ext4_has_group_desc_csum(sb)) {
                int free;
                struct ext4_group_info *grp = ext4_get_group_info(sb, group);
 
@@ -772,7 +797,10 @@ got:
                        ext4_itable_unused_set(sb, gdp,
                                        (EXT4_INODES_PER_GROUP(sb) - ino));
                up_read(&grp->alloc_sem);
+       } else {
+               ext4_lock_group(sb, group);
        }
+
        ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1);
        if (S_ISDIR(mode)) {
                ext4_used_dirs_set(sb, gdp, ext4_used_dirs_count(sb, gdp) + 1);
@@ -782,10 +810,12 @@ got:
                        atomic_inc(&sbi->s_flex_groups[f].used_dirs);
                }
        }
-       if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
-               gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
-               ext4_unlock_group(sb, group);
+       if (ext4_has_group_desc_csum(sb)) {
+               ext4_inode_bitmap_csum_set(sb, group, gdp, inode_bitmap_bh,
+                                          EXT4_INODES_PER_GROUP(sb) / 8);
+               ext4_group_desc_csum_set(sb, group, gdp);
        }
+       ext4_unlock_group(sb, group);
 
        BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata");
        err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh);
@@ -850,6 +880,19 @@ got:
        inode->i_generation = sbi->s_next_generation++;
        spin_unlock(&sbi->s_next_gen_lock);
 
+       /* Precompute checksum seed for inode metadata */
+       if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
+               __u32 csum;
+               struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+               __le32 inum = cpu_to_le32(inode->i_ino);
+               __le32 gen = cpu_to_le32(inode->i_generation);
+               csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
+                                  sizeof(inum));
+               ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
+                                             sizeof(gen));
+       }
+
        ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
        ext4_set_inode_state(inode, EXT4_STATE_NEW);
 
@@ -1140,7 +1183,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
 skip_zeroout:
        ext4_lock_group(sb, group);
        gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
-       gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
+       ext4_group_desc_csum_set(sb, group, gdp);
        ext4_unlock_group(sb, group);
 
        BUFFER_TRACE(group_desc_bh,
index 07eaf565fdcb2ad4fba4f92c6fe55a01b2fea17b..02bc8cbe7281b3d47c3449a1c4b8e4220685ba52 100644 (file)
 
 #define MPAGE_DA_EXTENT_TAIL 0x01
 
+static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
+                             struct ext4_inode_info *ei)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+       __u16 csum_lo;
+       __u16 csum_hi = 0;
+       __u32 csum;
+
+       csum_lo = raw->i_checksum_lo;
+       raw->i_checksum_lo = 0;
+       if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
+           EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
+               csum_hi = raw->i_checksum_hi;
+               raw->i_checksum_hi = 0;
+       }
+
+       csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw,
+                          EXT4_INODE_SIZE(inode->i_sb));
+
+       raw->i_checksum_lo = csum_lo;
+       if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
+           EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
+               raw->i_checksum_hi = csum_hi;
+
+       return csum;
+}
+
+static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
+                                 struct ext4_inode_info *ei)
+{
+       __u32 provided, calculated;
+
+       if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
+           cpu_to_le32(EXT4_OS_LINUX) ||
+           !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+               EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return 1;
+
+       provided = le16_to_cpu(raw->i_checksum_lo);
+       calculated = ext4_inode_csum(inode, raw, ei);
+       if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
+           EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
+               provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
+       else
+               calculated &= 0xFFFF;
+
+       return provided == calculated;
+}
+
+static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
+                               struct ext4_inode_info *ei)
+{
+       __u32 csum;
+
+       if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
+           cpu_to_le32(EXT4_OS_LINUX) ||
+           !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+               EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return;
+
+       csum = ext4_inode_csum(inode, raw, ei);
+       raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
+       if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
+           EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
+               raw->i_checksum_hi = cpu_to_le16(csum >> 16);
+}
+
 static inline int ext4_begin_ordered_truncate(struct inode *inode,
                                              loff_t new_size)
 {
@@ -3517,8 +3584,7 @@ make_io:
                                b = table;
                        end = b + EXT4_SB(sb)->s_inode_readahead_blks;
                        num = EXT4_INODES_PER_GROUP(sb);
-                       if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
-                                      EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
+                       if (ext4_has_group_desc_csum(sb))
                                num -= ext4_itable_unused_count(sb, gdp);
                        table += num / inodes_per_block;
                        if (end > table)
@@ -3646,6 +3712,39 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
        if (ret < 0)
                goto bad_inode;
        raw_inode = ext4_raw_inode(&iloc);
+
+       if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
+               ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
+               if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
+                   EXT4_INODE_SIZE(inode->i_sb)) {
+                       EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)",
+                               EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize,
+                               EXT4_INODE_SIZE(inode->i_sb));
+                       ret = -EIO;
+                       goto bad_inode;
+               }
+       } else
+               ei->i_extra_isize = 0;
+
+       /* Precompute checksum seed for inode metadata */
+       if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
+               struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+               __u32 csum;
+               __le32 inum = cpu_to_le32(inode->i_ino);
+               __le32 gen = raw_inode->i_generation;
+               csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
+                                  sizeof(inum));
+               ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
+                                             sizeof(gen));
+       }
+
+       if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
+               EXT4_ERROR_INODE(inode, "checksum invalid");
+               ret = -EIO;
+               goto bad_inode;
+       }
+
        inode->i_mode = le16_to_cpu(raw_inode->i_mode);
        i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
        i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
@@ -3725,12 +3824,6 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
        }
 
        if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
-               ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
-               if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
-                   EXT4_INODE_SIZE(inode->i_sb)) {
-                       ret = -EIO;
-                       goto bad_inode;
-               }
                if (ei->i_extra_isize == 0) {
                        /* The extra space is currently unused. Use it. */
                        ei->i_extra_isize = sizeof(struct ext4_inode) -
@@ -3742,8 +3835,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
                        if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
                                ext4_set_inode_state(inode, EXT4_STATE_XATTR);
                }
-       } else
-               ei->i_extra_isize = 0;
+       }
 
        EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
        EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
@@ -3942,7 +4034,7 @@ static int ext4_do_update_inode(handle_t *handle,
                        EXT4_SET_RO_COMPAT_FEATURE(sb,
                                        EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
                        ext4_handle_sync(handle);
-                       err = ext4_handle_dirty_super(handle, sb);
+                       err = ext4_handle_dirty_super_now(handle, sb);
                }
        }
        raw_inode->i_generation = cpu_to_le32(inode->i_generation);
@@ -3969,6 +4061,8 @@ static int ext4_do_update_inode(handle_t *handle,
                raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
        }
 
+       ext4_inode_csum_set(inode, raw_inode, ei);
+
        BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
        rc = ext4_handle_dirty_metadata(handle, NULL, bh);
        if (!err)
@@ -4213,7 +4307,8 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
         * will return the blocks that include the delayed allocation
         * blocks for this file.
         */
-       delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks;
+       delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
+                               EXT4_I(inode)->i_reserved_data_blocks);
 
        stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
        return 0;
index 6eee25591b8159bc96d35a16f94f94c0855a35b9..8ad112ae0ade2f21a953ccc03b687939b0b81310 100644 (file)
@@ -38,7 +38,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                handle_t *handle = NULL;
                int err, migrate = 0;
                struct ext4_iloc iloc;
-               unsigned int oldflags;
+               unsigned int oldflags, mask, i;
                unsigned int jflag;
 
                if (!inode_owner_or_capable(inode))
@@ -115,8 +115,14 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                if (err)
                        goto flags_err;
 
-               flags = flags & EXT4_FL_USER_MODIFIABLE;
-               flags |= oldflags & ~EXT4_FL_USER_MODIFIABLE;
+               for (i = 0, mask = 1; i < 32; i++, mask <<= 1) {
+                       if (!(mask & EXT4_FL_USER_MODIFIABLE))
+                               continue;
+                       if (mask & flags)
+                               ext4_set_inode_flag(inode, i);
+                       else
+                               ext4_clear_inode_flag(inode, i);
+               }
                ei->i_flags = flags;
 
                ext4_set_inode_flags(inode);
@@ -152,6 +158,13 @@ flags_out:
                if (!inode_owner_or_capable(inode))
                        return -EPERM;
 
+               if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+                               EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
+                       ext4_warning(sb, "Setting inode version is not "
+                                    "supported with metadata_csum enabled.");
+                       return -ENOTTY;
+               }
+
                err = mnt_want_write_file(filp);
                if (err)
                        return err;
index 99ab428bcfa089822e74b433aee7b1bf4076e34d..1cd6994fc446008b74dc9b77863edf0f24e14c33 100644 (file)
@@ -788,7 +788,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore)
        int first_block;
        struct super_block *sb;
        struct buffer_head *bhs;
-       struct buffer_head **bh;
+       struct buffer_head **bh = NULL;
        struct inode *inode;
        char *data;
        char *bitmap;
@@ -2375,7 +2375,7 @@ static int ext4_groupinfo_create_slab(size_t size)
        return 0;
 }
 
-int ext4_mb_init(struct super_block *sb, int needs_recovery)
+int ext4_mb_init(struct super_block *sb)
 {
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        unsigned i, j;
@@ -2517,6 +2517,9 @@ int ext4_mb_release(struct super_block *sb)
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
 
+       if (sbi->s_proc)
+               remove_proc_entry("mb_groups", sbi->s_proc);
+
        if (sbi->s_group_info) {
                for (i = 0; i < ngroups; i++) {
                        grinfo = ext4_get_group_info(sb, i);
@@ -2564,8 +2567,6 @@ int ext4_mb_release(struct super_block *sb)
        }
 
        free_percpu(sbi->s_locality_groups);
-       if (sbi->s_proc)
-               remove_proc_entry("mb_groups", sbi->s_proc);
 
        return 0;
 }
@@ -2797,7 +2798,9 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
        }
        len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
        ext4_free_group_clusters_set(sb, gdp, len);
-       gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
+       ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh,
+                                  EXT4_BLOCKS_PER_GROUP(sb) / 8);
+       ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
 
        ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
        percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
@@ -3071,13 +3074,9 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
 {
        struct ext4_prealloc_space *pa = ac->ac_pa;
-       int len;
-
-       if (pa && pa->pa_type == MB_INODE_PA) {
-               len = ac->ac_b_ex.fe_len;
-               pa->pa_free += len;
-       }
 
+       if (pa && pa->pa_type == MB_INODE_PA)
+               pa->pa_free += ac->ac_b_ex.fe_len;
 }
 
 /*
@@ -4636,6 +4635,7 @@ do_more:
                 */
                new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS);
                if (!new_entry) {
+                       ext4_mb_unload_buddy(&e4b);
                        err = -ENOMEM;
                        goto error_return;
                }
@@ -4659,7 +4659,9 @@ do_more:
 
        ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
        ext4_free_group_clusters_set(sb, gdp, ret);
-       gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
+       ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
+                                  EXT4_BLOCKS_PER_GROUP(sb) / 8);
+       ext4_group_desc_csum_set(sb, block_group, gdp);
        ext4_unlock_group(sb, block_group);
        percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
 
@@ -4803,7 +4805,9 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
        mb_free_blocks(NULL, &e4b, bit, count);
        blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc);
        ext4_free_group_clusters_set(sb, desc, blk_free_count);
-       desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
+       ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh,
+                                  EXT4_BLOCKS_PER_GROUP(sb) / 8);
+       ext4_group_desc_csum_set(sb, block_group, desc);
        ext4_unlock_group(sb, block_group);
        percpu_counter_add(&sbi->s_freeclusters_counter,
                           EXT4_B2C(sbi, blocks_freed));
index ed6548d89165e1d9c31118aca21d3e89a3772ab2..f99a1311e84765296b0a0a04534e0be0536915bc 100644 (file)
@@ -6,12 +6,45 @@
 
 #include "ext4.h"
 
+/* Checksumming functions */
+static __u32 ext4_mmp_csum(struct super_block *sb, struct mmp_struct *mmp)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       int offset = offsetof(struct mmp_struct, mmp_checksum);
+       __u32 csum;
+
+       csum = ext4_chksum(sbi, sbi->s_csum_seed, (char *)mmp, offset);
+
+       return cpu_to_le32(csum);
+}
+
+int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp)
+{
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return 1;
+
+       return mmp->mmp_checksum == ext4_mmp_csum(sb, mmp);
+}
+
+void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp)
+{
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return;
+
+       mmp->mmp_checksum = ext4_mmp_csum(sb, mmp);
+}
+
 /*
  * Write the MMP block using WRITE_SYNC to try to get the block on-disk
  * faster.
  */
-static int write_mmp_block(struct buffer_head *bh)
+static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
 {
+       struct mmp_struct *mmp = (struct mmp_struct *)(bh->b_data);
+
+       ext4_mmp_csum_set(sb, mmp);
        mark_buffer_dirty(bh);
        lock_buffer(bh);
        bh->b_end_io = end_buffer_write_sync;
@@ -59,7 +92,8 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
        }
 
        mmp = (struct mmp_struct *)((*bh)->b_data);
-       if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC)
+       if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC ||
+           !ext4_mmp_csum_verify(sb, mmp))
                return -EINVAL;
 
        return 0;
@@ -120,7 +154,7 @@ static int kmmpd(void *data)
                mmp->mmp_time = cpu_to_le64(get_seconds());
                last_update_time = jiffies;
 
-               retval = write_mmp_block(bh);
+               retval = write_mmp_block(sb, bh);
                /*
                 * Don't spew too many error messages. Print one every
                 * (s_mmp_update_interval * 60) seconds.
@@ -200,7 +234,7 @@ static int kmmpd(void *data)
        mmp->mmp_seq = cpu_to_le32(EXT4_MMP_SEQ_CLEAN);
        mmp->mmp_time = cpu_to_le64(get_seconds());
 
-       retval = write_mmp_block(bh);
+       retval = write_mmp_block(sb, bh);
 
 failed:
        kfree(data);
@@ -299,7 +333,7 @@ skip:
        seq = mmp_new_seq();
        mmp->mmp_seq = cpu_to_le32(seq);
 
-       retval = write_mmp_block(bh);
+       retval = write_mmp_block(sb, bh);
        if (retval)
                goto failed;
 
index e2a3f4b0ff78d6f81fbf2228f12f201e6ab1a024..5845cd97bf8b094b0fc01082279e8d65ee73f241 100644 (file)
@@ -145,6 +145,14 @@ struct dx_map_entry
        u16 size;
 };
 
+/*
+ * This goes at the end of each htree block.
+ */
+struct dx_tail {
+       u32 dt_reserved;
+       __le32 dt_checksum;     /* crc32c(uuid+inum+dirblock) */
+};
+
 static inline ext4_lblk_t dx_get_block(struct dx_entry *entry);
 static void dx_set_block(struct dx_entry *entry, ext4_lblk_t value);
 static inline unsigned dx_get_hash(struct dx_entry *entry);
@@ -180,6 +188,230 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
 static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
                             struct inode *inode);
 
+/* checksumming functions */
+#define EXT4_DIRENT_TAIL(block, blocksize) \
+       ((struct ext4_dir_entry_tail *)(((void *)(block)) + \
+                                       ((blocksize) - \
+                                        sizeof(struct ext4_dir_entry_tail))))
+
+static void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
+                                  unsigned int blocksize)
+{
+       memset(t, 0, sizeof(struct ext4_dir_entry_tail));
+       t->det_rec_len = ext4_rec_len_to_disk(
+                       sizeof(struct ext4_dir_entry_tail), blocksize);
+       t->det_reserved_ft = EXT4_FT_DIR_CSUM;
+}
+
+/* Walk through a dirent block to find a checksum "dirent" at the tail */
+static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode,
+                                                  struct ext4_dir_entry *de)
+{
+       struct ext4_dir_entry_tail *t;
+
+#ifdef PARANOID
+       struct ext4_dir_entry *d, *top;
+
+       d = de;
+       top = (struct ext4_dir_entry *)(((void *)de) +
+               (EXT4_BLOCK_SIZE(inode->i_sb) -
+               sizeof(struct ext4_dir_entry_tail)));
+       while (d < top && d->rec_len)
+               d = (struct ext4_dir_entry *)(((void *)d) +
+                   le16_to_cpu(d->rec_len));
+
+       if (d != top)
+               return NULL;
+
+       t = (struct ext4_dir_entry_tail *)d;
+#else
+       t = EXT4_DIRENT_TAIL(de, EXT4_BLOCK_SIZE(inode->i_sb));
+#endif
+
+       if (t->det_reserved_zero1 ||
+           le16_to_cpu(t->det_rec_len) != sizeof(struct ext4_dir_entry_tail) ||
+           t->det_reserved_zero2 ||
+           t->det_reserved_ft != EXT4_FT_DIR_CSUM)
+               return NULL;
+
+       return t;
+}
+
+static __le32 ext4_dirent_csum(struct inode *inode,
+                              struct ext4_dir_entry *dirent, int size)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       __u32 csum;
+
+       csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size);
+       return cpu_to_le32(csum);
+}
+
+int ext4_dirent_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent)
+{
+       struct ext4_dir_entry_tail *t;
+
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+                                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return 1;
+
+       t = get_dirent_tail(inode, dirent);
+       if (!t) {
+               EXT4_ERROR_INODE(inode, "metadata_csum set but no space in dir "
+                                "leaf for checksum.  Please run e2fsck -D.");
+               return 0;
+       }
+
+       if (t->det_checksum != ext4_dirent_csum(inode, dirent,
+                                               (void *)t - (void *)dirent))
+               return 0;
+
+       return 1;
+}
+
+static void ext4_dirent_csum_set(struct inode *inode,
+                                struct ext4_dir_entry *dirent)
+{
+       struct ext4_dir_entry_tail *t;
+
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+                                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return;
+
+       t = get_dirent_tail(inode, dirent);
+       if (!t) {
+               EXT4_ERROR_INODE(inode, "metadata_csum set but no space in dir "
+                                "leaf for checksum.  Please run e2fsck -D.");
+               return;
+       }
+
+       t->det_checksum = ext4_dirent_csum(inode, dirent,
+                                          (void *)t - (void *)dirent);
+}
+
+static inline int ext4_handle_dirty_dirent_node(handle_t *handle,
+                                               struct inode *inode,
+                                               struct buffer_head *bh)
+{
+       ext4_dirent_csum_set(inode, (struct ext4_dir_entry *)bh->b_data);
+       return ext4_handle_dirty_metadata(handle, inode, bh);
+}
+
+static struct dx_countlimit *get_dx_countlimit(struct inode *inode,
+                                              struct ext4_dir_entry *dirent,
+                                              int *offset)
+{
+       struct ext4_dir_entry *dp;
+       struct dx_root_info *root;
+       int count_offset;
+
+       if (le16_to_cpu(dirent->rec_len) == EXT4_BLOCK_SIZE(inode->i_sb))
+               count_offset = 8;
+       else if (le16_to_cpu(dirent->rec_len) == 12) {
+               dp = (struct ext4_dir_entry *)(((void *)dirent) + 12);
+               if (le16_to_cpu(dp->rec_len) !=
+                   EXT4_BLOCK_SIZE(inode->i_sb) - 12)
+                       return NULL;
+               root = (struct dx_root_info *)(((void *)dp + 12));
+               if (root->reserved_zero ||
+                   root->info_length != sizeof(struct dx_root_info))
+                       return NULL;
+               count_offset = 32;
+       } else
+               return NULL;
+
+       if (offset)
+               *offset = count_offset;
+       return (struct dx_countlimit *)(((void *)dirent) + count_offset);
+}
+
+static __le32 ext4_dx_csum(struct inode *inode, struct ext4_dir_entry *dirent,
+                          int count_offset, int count, struct dx_tail *t)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       __u32 csum, old_csum;
+       int size;
+
+       size = count_offset + (count * sizeof(struct dx_entry));
+       old_csum = t->dt_checksum;
+       t->dt_checksum = 0;
+       csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size);
+       csum = ext4_chksum(sbi, csum, (__u8 *)t, sizeof(struct dx_tail));
+       t->dt_checksum = old_csum;
+
+       return cpu_to_le32(csum);
+}
+
+static int ext4_dx_csum_verify(struct inode *inode,
+                              struct ext4_dir_entry *dirent)
+{
+       struct dx_countlimit *c;
+       struct dx_tail *t;
+       int count_offset, limit, count;
+
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+                                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return 1;
+
+       c = get_dx_countlimit(inode, dirent, &count_offset);
+       if (!c) {
+               EXT4_ERROR_INODE(inode, "dir seems corrupt?  Run e2fsck -D.");
+               return 1;
+       }
+       limit = le16_to_cpu(c->limit);
+       count = le16_to_cpu(c->count);
+       if (count_offset + (limit * sizeof(struct dx_entry)) >
+           EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) {
+               EXT4_ERROR_INODE(inode, "metadata_csum set but no space for "
+                                "tree checksum found.  Run e2fsck -D.");
+               return 1;
+       }
+       t = (struct dx_tail *)(((struct dx_entry *)c) + limit);
+
+       if (t->dt_checksum != ext4_dx_csum(inode, dirent, count_offset,
+                                           count, t))
+               return 0;
+       return 1;
+}
+
+static void ext4_dx_csum_set(struct inode *inode, struct ext4_dir_entry *dirent)
+{
+       struct dx_countlimit *c;
+       struct dx_tail *t;
+       int count_offset, limit, count;
+
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+                                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return;
+
+       c = get_dx_countlimit(inode, dirent, &count_offset);
+       if (!c) {
+               EXT4_ERROR_INODE(inode, "dir seems corrupt?  Run e2fsck -D.");
+               return;
+       }
+       limit = le16_to_cpu(c->limit);
+       count = le16_to_cpu(c->count);
+       if (count_offset + (limit * sizeof(struct dx_entry)) >
+           EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) {
+               EXT4_ERROR_INODE(inode, "metadata_csum set but no space for "
+                                "tree checksum.  Run e2fsck -D.");
+               return;
+       }
+       t = (struct dx_tail *)(((struct dx_entry *)c) + limit);
+
+       t->dt_checksum = ext4_dx_csum(inode, dirent, count_offset, count, t);
+}
+
+static inline int ext4_handle_dirty_dx_node(handle_t *handle,
+                                           struct inode *inode,
+                                           struct buffer_head *bh)
+{
+       ext4_dx_csum_set(inode, (struct ext4_dir_entry *)bh->b_data);
+       return ext4_handle_dirty_metadata(handle, inode, bh);
+}
+
 /*
  * p is at least 6 bytes before the end of page
  */
@@ -239,12 +471,20 @@ static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
 {
        unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) -
                EXT4_DIR_REC_LEN(2) - infosize;
+
+       if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               entry_space -= sizeof(struct dx_tail);
        return entry_space / sizeof(struct dx_entry);
 }
 
 static inline unsigned dx_node_limit(struct inode *dir)
 {
        unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0);
+
+       if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               entry_space -= sizeof(struct dx_tail);
        return entry_space / sizeof(struct dx_entry);
 }
 
@@ -390,6 +630,15 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
                goto fail;
        }
 
+       if (!buffer_verified(bh) &&
+           !ext4_dx_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data)) {
+               ext4_warning(dir->i_sb, "Root failed checksum");
+               brelse(bh);
+               *err = ERR_BAD_DX_DIR;
+               goto fail;
+       }
+       set_buffer_verified(bh);
+
        entries = (struct dx_entry *) (((char *)&root->info) +
                                       root->info.info_length);
 
@@ -450,6 +699,17 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
                if (!(bh = ext4_bread (NULL,dir, dx_get_block(at), 0, err)))
                        goto fail2;
                at = entries = ((struct dx_node *) bh->b_data)->entries;
+
+               if (!buffer_verified(bh) &&
+                   !ext4_dx_csum_verify(dir,
+                                        (struct ext4_dir_entry *)bh->b_data)) {
+                       ext4_warning(dir->i_sb, "Node failed checksum");
+                       brelse(bh);
+                       *err = ERR_BAD_DX_DIR;
+                       goto fail;
+               }
+               set_buffer_verified(bh);
+
                if (dx_get_limit(entries) != dx_node_limit (dir)) {
                        ext4_warning(dir->i_sb,
                                     "dx entry: limit != node limit");
@@ -549,6 +809,15 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
                if (!(bh = ext4_bread(NULL, dir, dx_get_block(p->at),
                                      0, &err)))
                        return err; /* Failure */
+
+               if (!buffer_verified(bh) &&
+                   !ext4_dx_csum_verify(dir,
+                                        (struct ext4_dir_entry *)bh->b_data)) {
+                       ext4_warning(dir->i_sb, "Node failed checksum");
+                       return -EIO;
+               }
+               set_buffer_verified(bh);
+
                p++;
                brelse(p->bh);
                p->bh = bh;
@@ -577,6 +846,11 @@ static int htree_dirblock_to_tree(struct file *dir_file,
        if (!(bh = ext4_bread (NULL, dir, block, 0, &err)))
                return err;
 
+       if (!buffer_verified(bh) &&
+           !ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data))
+               return -EIO;
+       set_buffer_verified(bh);
+
        de = (struct ext4_dir_entry_2 *) bh->b_data;
        top = (struct ext4_dir_entry_2 *) ((char *) de +
                                           dir->i_sb->s_blocksize -
@@ -936,6 +1210,15 @@ restart:
                        brelse(bh);
                        goto next;
                }
+               if (!buffer_verified(bh) &&
+                   !ext4_dirent_csum_verify(dir,
+                               (struct ext4_dir_entry *)bh->b_data)) {
+                       EXT4_ERROR_INODE(dir, "checksumming directory "
+                                        "block %lu", (unsigned long)block);
+                       brelse(bh);
+                       goto next;
+               }
+               set_buffer_verified(bh);
                i = search_dirblock(bh, dir, d_name,
                            block << EXT4_BLOCK_SIZE_BITS(sb), res_dir);
                if (i == 1) {
@@ -987,6 +1270,16 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct q
                if (!(bh = ext4_bread(NULL, dir, block, 0, err)))
                        goto errout;
 
+               if (!buffer_verified(bh) &&
+                   !ext4_dirent_csum_verify(dir,
+                               (struct ext4_dir_entry *)bh->b_data)) {
+                       EXT4_ERROR_INODE(dir, "checksumming directory "
+                                        "block %lu", (unsigned long)block);
+                       brelse(bh);
+                       *err = -EIO;
+                       goto errout;
+               }
+               set_buffer_verified(bh);
                retval = search_dirblock(bh, dir, d_name,
                                         block << EXT4_BLOCK_SIZE_BITS(sb),
                                         res_dir);
@@ -1037,6 +1330,12 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, stru
                        EXT4_ERROR_INODE(dir, "bad inode number: %u", ino);
                        return ERR_PTR(-EIO);
                }
+               if (unlikely(ino == dir->i_ino)) {
+                       EXT4_ERROR_INODE(dir, "'%.*s' linked to parent dir",
+                                        dentry->d_name.len,
+                                        dentry->d_name.name);
+                       return ERR_PTR(-EIO);
+               }
                inode = ext4_iget(dir->i_sb, ino);
                if (inode == ERR_PTR(-ESTALE)) {
                        EXT4_ERROR_INODE(dir,
@@ -1156,8 +1455,14 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
        char *data1 = (*bh)->b_data, *data2;
        unsigned split, move, size;
        struct ext4_dir_entry_2 *de = NULL, *de2;
+       struct ext4_dir_entry_tail *t;
+       int     csum_size = 0;
        int     err = 0, i;
 
+       if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               csum_size = sizeof(struct ext4_dir_entry_tail);
+
        bh2 = ext4_append (handle, dir, &newblock, &err);
        if (!(bh2)) {
                brelse(*bh);
@@ -1204,10 +1509,20 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
        /* Fancy dance to stay within two buffers */
        de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize);
        de = dx_pack_dirents(data1, blocksize);
-       de->rec_len = ext4_rec_len_to_disk(data1 + blocksize - (char *) de,
+       de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
+                                          (char *) de,
                                           blocksize);
-       de2->rec_len = ext4_rec_len_to_disk(data2 + blocksize - (char *) de2,
+       de2->rec_len = ext4_rec_len_to_disk(data2 + (blocksize - csum_size) -
+                                           (char *) de2,
                                            blocksize);
+       if (csum_size) {
+               t = EXT4_DIRENT_TAIL(data2, blocksize);
+               initialize_dirent_tail(t, blocksize);
+
+               t = EXT4_DIRENT_TAIL(data1, blocksize);
+               initialize_dirent_tail(t, blocksize);
+       }
+
        dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1));
        dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1));
 
@@ -1218,10 +1533,10 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
                de = de2;
        }
        dx_insert_block(frame, hash2 + continued, newblock);
-       err = ext4_handle_dirty_metadata(handle, dir, bh2);
+       err = ext4_handle_dirty_dirent_node(handle, dir, bh2);
        if (err)
                goto journal_error;
-       err = ext4_handle_dirty_metadata(handle, dir, frame->bh);
+       err = ext4_handle_dirty_dx_node(handle, dir, frame->bh);
        if (err)
                goto journal_error;
        brelse(bh2);
@@ -1258,11 +1573,16 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
        unsigned short  reclen;
        int             nlen, rlen, err;
        char            *top;
+       int             csum_size = 0;
+
+       if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               csum_size = sizeof(struct ext4_dir_entry_tail);
 
        reclen = EXT4_DIR_REC_LEN(namelen);
        if (!de) {
                de = (struct ext4_dir_entry_2 *)bh->b_data;
-               top = bh->b_data + blocksize - reclen;
+               top = bh->b_data + (blocksize - csum_size) - reclen;
                while ((char *) de <= top) {
                        if (ext4_check_dir_entry(dir, NULL, de, bh, offset))
                                return -EIO;
@@ -1295,11 +1615,8 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
                de = de1;
        }
        de->file_type = EXT4_FT_UNKNOWN;
-       if (inode) {
-               de->inode = cpu_to_le32(inode->i_ino);
-               ext4_set_de_type(dir->i_sb, de, inode->i_mode);
-       } else
-               de->inode = 0;
+       de->inode = cpu_to_le32(inode->i_ino);
+       ext4_set_de_type(dir->i_sb, de, inode->i_mode);
        de->name_len = namelen;
        memcpy(de->name, name, namelen);
        /*
@@ -1318,7 +1635,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
        dir->i_version++;
        ext4_mark_inode_dirty(handle, dir);
        BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
-       err = ext4_handle_dirty_metadata(handle, dir, bh);
+       err = ext4_handle_dirty_dirent_node(handle, dir, bh);
        if (err)
                ext4_std_error(dir->i_sb, err);
        return 0;
@@ -1339,6 +1656,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
        struct dx_frame frames[2], *frame;
        struct dx_entry *entries;
        struct ext4_dir_entry_2 *de, *de2;
+       struct ext4_dir_entry_tail *t;
        char            *data1, *top;
        unsigned        len;
        int             retval;
@@ -1346,6 +1664,11 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
        struct dx_hash_info hinfo;
        ext4_lblk_t  block;
        struct fake_dirent *fde;
+       int             csum_size = 0;
+
+       if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               csum_size = sizeof(struct ext4_dir_entry_tail);
 
        blocksize =  dir->i_sb->s_blocksize;
        dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino));
@@ -1366,7 +1689,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
                brelse(bh);
                return -EIO;
        }
-       len = ((char *) root) + blocksize - (char *) de;
+       len = ((char *) root) + (blocksize - csum_size) - (char *) de;
 
        /* Allocate new block for the 0th block's dirents */
        bh2 = ext4_append(handle, dir, &block, &retval);
@@ -1382,8 +1705,15 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
        top = data1 + len;
        while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top)
                de = de2;
-       de->rec_len = ext4_rec_len_to_disk(data1 + blocksize - (char *) de,
+       de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
+                                          (char *) de,
                                           blocksize);
+
+       if (csum_size) {
+               t = EXT4_DIRENT_TAIL(data1, blocksize);
+               initialize_dirent_tail(t, blocksize);
+       }
+
        /* Initialize the root; the dot dirents already exist */
        de = (struct ext4_dir_entry_2 *) (&root->dotdot);
        de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(2),
@@ -1408,8 +1738,8 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
        frame->bh = bh;
        bh = bh2;
 
-       ext4_handle_dirty_metadata(handle, dir, frame->bh);
-       ext4_handle_dirty_metadata(handle, dir, bh);
+       ext4_handle_dirty_dx_node(handle, dir, frame->bh);
+       ext4_handle_dirty_dirent_node(handle, dir, bh);
 
        de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
        if (!de) {
@@ -1445,11 +1775,17 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
        struct inode *dir = dentry->d_parent->d_inode;
        struct buffer_head *bh;
        struct ext4_dir_entry_2 *de;
+       struct ext4_dir_entry_tail *t;
        struct super_block *sb;
        int     retval;
        int     dx_fallback=0;
        unsigned blocksize;
        ext4_lblk_t block, blocks;
+       int     csum_size = 0;
+
+       if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               csum_size = sizeof(struct ext4_dir_entry_tail);
 
        sb = dir->i_sb;
        blocksize = sb->s_blocksize;
@@ -1468,6 +1804,11 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
                bh = ext4_bread(handle, dir, block, 0, &retval);
                if(!bh)
                        return retval;
+               if (!buffer_verified(bh) &&
+                   !ext4_dirent_csum_verify(dir,
+                               (struct ext4_dir_entry *)bh->b_data))
+                       return -EIO;
+               set_buffer_verified(bh);
                retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
                if (retval != -ENOSPC) {
                        brelse(bh);
@@ -1484,7 +1825,13 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
                return retval;
        de = (struct ext4_dir_entry_2 *) bh->b_data;
        de->inode = 0;
-       de->rec_len = ext4_rec_len_to_disk(blocksize, blocksize);
+       de->rec_len = ext4_rec_len_to_disk(blocksize - csum_size, blocksize);
+
+       if (csum_size) {
+               t = EXT4_DIRENT_TAIL(bh->b_data, blocksize);
+               initialize_dirent_tail(t, blocksize);
+       }
+
        retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
        brelse(bh);
        if (retval == 0)
@@ -1516,6 +1863,11 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
        if (!(bh = ext4_bread(handle,dir, dx_get_block(frame->at), 0, &err)))
                goto cleanup;
 
+       if (!buffer_verified(bh) &&
+           !ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data))
+               goto journal_error;
+       set_buffer_verified(bh);
+
        BUFFER_TRACE(bh, "get_write_access");
        err = ext4_journal_get_write_access(handle, bh);
        if (err)
@@ -1583,7 +1935,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
                        dxtrace(dx_show_index("node", frames[1].entries));
                        dxtrace(dx_show_index("node",
                               ((struct dx_node *) bh2->b_data)->entries));
-                       err = ext4_handle_dirty_metadata(handle, dir, bh2);
+                       err = ext4_handle_dirty_dx_node(handle, dir, bh2);
                        if (err)
                                goto journal_error;
                        brelse (bh2);
@@ -1609,7 +1961,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
                        if (err)
                                goto journal_error;
                }
-               err = ext4_handle_dirty_metadata(handle, dir, frames[0].bh);
+               err = ext4_handle_dirty_dx_node(handle, dir, frames[0].bh);
                if (err) {
                        ext4_std_error(inode->i_sb, err);
                        goto cleanup;
@@ -1641,12 +1993,17 @@ static int ext4_delete_entry(handle_t *handle,
 {
        struct ext4_dir_entry_2 *de, *pde;
        unsigned int blocksize = dir->i_sb->s_blocksize;
+       int csum_size = 0;
        int i, err;
 
+       if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               csum_size = sizeof(struct ext4_dir_entry_tail);
+
        i = 0;
        pde = NULL;
        de = (struct ext4_dir_entry_2 *) bh->b_data;
-       while (i < bh->b_size) {
+       while (i < bh->b_size - csum_size) {
                if (ext4_check_dir_entry(dir, NULL, de, bh, i))
                        return -EIO;
                if (de == de_del)  {
@@ -1667,7 +2024,7 @@ static int ext4_delete_entry(handle_t *handle,
                                de->inode = 0;
                        dir->i_version++;
                        BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
-                       err = ext4_handle_dirty_metadata(handle, dir, bh);
+                       err = ext4_handle_dirty_dirent_node(handle, dir, bh);
                        if (unlikely(err)) {
                                ext4_std_error(dir->i_sb, err);
                                return err;
@@ -1809,9 +2166,15 @@ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        struct inode *inode;
        struct buffer_head *dir_block = NULL;
        struct ext4_dir_entry_2 *de;
+       struct ext4_dir_entry_tail *t;
        unsigned int blocksize = dir->i_sb->s_blocksize;
+       int csum_size = 0;
        int err, retries = 0;
 
+       if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               csum_size = sizeof(struct ext4_dir_entry_tail);
+
        if (EXT4_DIR_LINK_MAX(dir))
                return -EMLINK;
 
@@ -1852,16 +2215,24 @@ retry:
        ext4_set_de_type(dir->i_sb, de, S_IFDIR);
        de = ext4_next_entry(de, blocksize);
        de->inode = cpu_to_le32(dir->i_ino);
-       de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(1),
+       de->rec_len = ext4_rec_len_to_disk(blocksize -
+                                          (csum_size + EXT4_DIR_REC_LEN(1)),
                                           blocksize);
        de->name_len = 2;
        strcpy(de->name, "..");
        ext4_set_de_type(dir->i_sb, de, S_IFDIR);
        set_nlink(inode, 2);
+
+       if (csum_size) {
+               t = EXT4_DIRENT_TAIL(dir_block->b_data, blocksize);
+               initialize_dirent_tail(t, blocksize);
+       }
+
        BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
-       err = ext4_handle_dirty_metadata(handle, inode, dir_block);
+       err = ext4_handle_dirty_dirent_node(handle, inode, dir_block);
        if (err)
                goto out_clear_inode;
+       set_buffer_verified(dir_block);
        err = ext4_mark_inode_dirty(handle, inode);
        if (!err)
                err = ext4_add_entry(handle, dentry, inode);
@@ -1911,6 +2282,14 @@ static int empty_dir(struct inode *inode)
                                     inode->i_ino);
                return 1;
        }
+       if (!buffer_verified(bh) &&
+           !ext4_dirent_csum_verify(inode,
+                       (struct ext4_dir_entry *)bh->b_data)) {
+               EXT4_ERROR_INODE(inode, "checksum error reading directory "
+                                "lblock 0");
+               return -EIO;
+       }
+       set_buffer_verified(bh);
        de = (struct ext4_dir_entry_2 *) bh->b_data;
        de1 = ext4_next_entry(de, sb->s_blocksize);
        if (le32_to_cpu(de->inode) != inode->i_ino ||
@@ -1942,6 +2321,14 @@ static int empty_dir(struct inode *inode)
                                offset += sb->s_blocksize;
                                continue;
                        }
+                       if (!buffer_verified(bh) &&
+                           !ext4_dirent_csum_verify(inode,
+                                       (struct ext4_dir_entry *)bh->b_data)) {
+                               EXT4_ERROR_INODE(inode, "checksum error "
+                                                "reading directory lblock 0");
+                               return -EIO;
+                       }
+                       set_buffer_verified(bh);
                        de = (struct ext4_dir_entry_2 *) bh->b_data;
                }
                if (ext4_check_dir_entry(inode, NULL, de, bh, offset)) {
@@ -2010,7 +2397,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
        /* Insert this inode at the head of the on-disk orphan list... */
        NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan);
        EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
-       err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
+       err = ext4_handle_dirty_super_now(handle, sb);
        rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
        if (!err)
                err = rc;
@@ -2083,7 +2470,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
                if (err)
                        goto out_brelse;
                sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
-               err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
+               err = ext4_handle_dirty_super_now(handle, inode->i_sb);
        } else {
                struct ext4_iloc iloc2;
                struct inode *i_prev =
@@ -2442,6 +2829,11 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
                dir_bh = ext4_bread(handle, old_inode, 0, 0, &retval);
                if (!dir_bh)
                        goto end_rename;
+               if (!buffer_verified(dir_bh) &&
+                   !ext4_dirent_csum_verify(old_inode,
+                               (struct ext4_dir_entry *)dir_bh->b_data))
+                       goto end_rename;
+               set_buffer_verified(dir_bh);
                if (le32_to_cpu(PARENT_INO(dir_bh->b_data,
                                old_dir->i_sb->s_blocksize)) != old_dir->i_ino)
                        goto end_rename;
@@ -2472,7 +2864,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
                                        ext4_current_time(new_dir);
                ext4_mark_inode_dirty(handle, new_dir);
                BUFFER_TRACE(new_bh, "call ext4_handle_dirty_metadata");
-               retval = ext4_handle_dirty_metadata(handle, new_dir, new_bh);
+               retval = ext4_handle_dirty_dirent_node(handle, new_dir, new_bh);
                if (unlikely(retval)) {
                        ext4_std_error(new_dir->i_sb, retval);
                        goto end_rename;
@@ -2526,7 +2918,8 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
                PARENT_INO(dir_bh->b_data, new_dir->i_sb->s_blocksize) =
                                                cpu_to_le32(new_dir->i_ino);
                BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata");
-               retval = ext4_handle_dirty_metadata(handle, old_inode, dir_bh);
+               retval = ext4_handle_dirty_dirent_node(handle, old_inode,
+                                                      dir_bh);
                if (retval) {
                        ext4_std_error(old_dir->i_sb, retval);
                        goto end_rename;
index 59fa0be272516adf6cbbc94384106690bf710c65..7ea6cbb44121952bf0d4f81f914950ab284dba6b 100644 (file)
@@ -161,6 +161,8 @@ static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
        if (flex_gd == NULL)
                goto out3;
 
+       if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_flex_group_data))
+               goto out2;
        flex_gd->count = flexbg_size;
 
        flex_gd->groups = kmalloc(sizeof(struct ext4_new_group_data) *
@@ -796,7 +798,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
        ext4_kvfree(o_group_desc);
 
        le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
-       err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
+       err = ext4_handle_dirty_super_now(handle, sb);
        if (err)
                ext4_std_error(sb, err);
 
@@ -968,6 +970,8 @@ static void update_backups(struct super_block *sb,
                goto exit_err;
        }
 
+       ext4_superblock_csum_set(sb, (struct ext4_super_block *)data);
+
        while ((group = ext4_list_backups(sb, &three, &five, &seven)) < last) {
                struct buffer_head *bh;
 
@@ -1067,6 +1071,54 @@ static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
        return err;
 }
 
+static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
+{
+       struct buffer_head *bh = sb_getblk(sb, block);
+       if (!bh)
+               return NULL;
+
+       if (bitmap_uptodate(bh))
+               return bh;
+
+       lock_buffer(bh);
+       if (bh_submit_read(bh) < 0) {
+               unlock_buffer(bh);
+               brelse(bh);
+               return NULL;
+       }
+       unlock_buffer(bh);
+
+       return bh;
+}
+
+static int ext4_set_bitmap_checksums(struct super_block *sb,
+                                    ext4_group_t group,
+                                    struct ext4_group_desc *gdp,
+                                    struct ext4_new_group_data *group_data)
+{
+       struct buffer_head *bh;
+
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return 0;
+
+       bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
+       if (!bh)
+               return -EIO;
+       ext4_inode_bitmap_csum_set(sb, group, gdp, bh,
+                                  EXT4_INODES_PER_GROUP(sb) / 8);
+       brelse(bh);
+
+       bh = ext4_get_bitmap(sb, group_data->block_bitmap);
+       if (!bh)
+               return -EIO;
+       ext4_block_bitmap_csum_set(sb, group, gdp, bh,
+                                  EXT4_BLOCKS_PER_GROUP(sb) / 8);
+       brelse(bh);
+
+       return 0;
+}
+
 /*
  * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg
  */
@@ -1093,18 +1145,24 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
                 */
                gdb_bh = sbi->s_group_desc[gdb_num];
                /* Update group descriptor block for new group */
-               gdp = (struct ext4_group_desc *)((char *)gdb_bh->b_data +
+               gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
                                                 gdb_off * EXT4_DESC_SIZE(sb));
 
                memset(gdp, 0, EXT4_DESC_SIZE(sb));
                ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap);
                ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap);
+               err = ext4_set_bitmap_checksums(sb, group, gdp, group_data);
+               if (err) {
+                       ext4_std_error(sb, err);
+                       break;
+               }
+
                ext4_inode_table_set(sb, gdp, group_data->inode_table);
                ext4_free_group_clusters_set(sb, gdp,
                                             EXT4_B2C(sbi, group_data->free_blocks_count));
                ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
                gdp->bg_flags = cpu_to_le16(*bg_flags);
-               gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
+               ext4_group_desc_csum_set(sb, group, gdp);
 
                err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
                if (unlikely(err)) {
@@ -1343,17 +1401,14 @@ static int ext4_setup_next_flex_gd(struct super_block *sb,
                           (1 + ext4_bg_num_gdb(sb, group + i) +
                            le16_to_cpu(es->s_reserved_gdt_blocks)) : 0;
                group_data[i].free_blocks_count = blocks_per_group - overhead;
-               if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
-                                              EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
+               if (ext4_has_group_desc_csum(sb))
                        flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT |
                                               EXT4_BG_INODE_UNINIT;
                else
                        flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED;
        }
 
-       if (last_group == n_group &&
-           EXT4_HAS_RO_COMPAT_FEATURE(sb,
-                                      EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
+       if (last_group == n_group && ext4_has_group_desc_csum(sb))
                /* We need to initialize block bitmap of last group. */
                flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT;
 
index 436b4223df66a889dc4e8f685cc89195afec8650..eb7aa3e4ef05caf136f24e0565a28e6d1e0a1539 100644 (file)
@@ -112,6 +112,48 @@ static struct file_system_type ext3_fs_type = {
 #define IS_EXT3_SB(sb) (0)
 #endif
 
+static int ext4_verify_csum_type(struct super_block *sb,
+                                struct ext4_super_block *es)
+{
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return 1;
+
+       return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
+}
+
+static __le32 ext4_superblock_csum(struct super_block *sb,
+                                  struct ext4_super_block *es)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       int offset = offsetof(struct ext4_super_block, s_checksum);
+       __u32 csum;
+
+       csum = ext4_chksum(sbi, ~0, (char *)es, offset);
+
+       return cpu_to_le32(csum);
+}
+
+int ext4_superblock_csum_verify(struct super_block *sb,
+                               struct ext4_super_block *es)
+{
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return 1;
+
+       return es->s_checksum == ext4_superblock_csum(sb, es);
+}
+
+void ext4_superblock_csum_set(struct super_block *sb,
+                             struct ext4_super_block *es)
+{
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+               EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return;
+
+       es->s_checksum = ext4_superblock_csum(sb, es);
+}
+
 void *ext4_kvmalloc(size_t size, gfp_t flags)
 {
        void *ret;
@@ -497,6 +539,7 @@ void __ext4_error(struct super_block *sb, const char *function,
        printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
               sb->s_id, function, line, current->comm, &vaf);
        va_end(args);
+       save_error_info(sb, function, line);
 
        ext4_handle_error(sb);
 }
@@ -905,6 +948,8 @@ static void ext4_put_super(struct super_block *sb)
        unlock_super(sb);
        kobject_put(&sbi->s_kobj);
        wait_for_completion(&sbi->s_kobj_unregister);
+       if (sbi->s_chksum_driver)
+               crypto_free_shash(sbi->s_chksum_driver);
        kfree(sbi->s_blockgroup_lock);
        kfree(sbi);
 }
@@ -1007,7 +1052,7 @@ static void destroy_inodecache(void)
 void ext4_clear_inode(struct inode *inode)
 {
        invalidate_inode_buffers(inode);
-       end_writeback(inode);
+       clear_inode(inode);
        dquot_drop(inode);
        ext4_discard_preallocations(inode);
        if (EXT4_I(inode)->jinode) {
@@ -1922,43 +1967,69 @@ failed:
        return 0;
 }
 
-__le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group,
-                           struct ext4_group_desc *gdp)
+static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group,
+                                  struct ext4_group_desc *gdp)
 {
+       int offset;
        __u16 crc = 0;
+       __le32 le_group = cpu_to_le32(block_group);
 
-       if (sbi->s_es->s_feature_ro_compat &
-           cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
-               int offset = offsetof(struct ext4_group_desc, bg_checksum);
-               __le32 le_group = cpu_to_le32(block_group);
-
-               crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
-               crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
-               crc = crc16(crc, (__u8 *)gdp, offset);
-               offset += sizeof(gdp->bg_checksum); /* skip checksum */
-               /* for checksum of struct ext4_group_desc do the rest...*/
-               if ((sbi->s_es->s_feature_incompat &
-                    cpu_to_le32(EXT4_FEATURE_INCOMPAT_64BIT)) &&
-                   offset < le16_to_cpu(sbi->s_es->s_desc_size))
-                       crc = crc16(crc, (__u8 *)gdp + offset,
-                                   le16_to_cpu(sbi->s_es->s_desc_size) -
-                                       offset);
+       if ((sbi->s_es->s_feature_ro_compat &
+            cpu_to_le32(EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))) {
+               /* Use new metadata_csum algorithm */
+               __u16 old_csum;
+               __u32 csum32;
+
+               old_csum = gdp->bg_checksum;
+               gdp->bg_checksum = 0;
+               csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
+                                    sizeof(le_group));
+               csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp,
+                                    sbi->s_desc_size);
+               gdp->bg_checksum = old_csum;
+
+               crc = csum32 & 0xFFFF;
+               goto out;
        }
 
+       /* old crc16 code */
+       offset = offsetof(struct ext4_group_desc, bg_checksum);
+
+       crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
+       crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
+       crc = crc16(crc, (__u8 *)gdp, offset);
+       offset += sizeof(gdp->bg_checksum); /* skip checksum */
+       /* for checksum of struct ext4_group_desc do the rest...*/
+       if ((sbi->s_es->s_feature_incompat &
+            cpu_to_le32(EXT4_FEATURE_INCOMPAT_64BIT)) &&
+           offset < le16_to_cpu(sbi->s_es->s_desc_size))
+               crc = crc16(crc, (__u8 *)gdp + offset,
+                           le16_to_cpu(sbi->s_es->s_desc_size) -
+                               offset);
+
+out:
        return cpu_to_le16(crc);
 }
 
-int ext4_group_desc_csum_verify(struct ext4_sb_info *sbi, __u32 block_group,
+int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group,
                                struct ext4_group_desc *gdp)
 {
-       if ((sbi->s_es->s_feature_ro_compat &
-            cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) &&
-           (gdp->bg_checksum != ext4_group_desc_csum(sbi, block_group, gdp)))
+       if (ext4_has_group_desc_csum(sb) &&
+           (gdp->bg_checksum != ext4_group_desc_csum(EXT4_SB(sb),
+                                                     block_group, gdp)))
                return 0;
 
        return 1;
 }
 
+void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
+                             struct ext4_group_desc *gdp)
+{
+       if (!ext4_has_group_desc_csum(sb))
+               return;
+       gdp->bg_checksum = ext4_group_desc_csum(EXT4_SB(sb), block_group, gdp);
+}
+
 /* Called at mount-time, super-block is locked */
 static int ext4_check_descriptors(struct super_block *sb,
                                  ext4_group_t *first_not_zeroed)
@@ -2013,7 +2084,7 @@ static int ext4_check_descriptors(struct super_block *sb,
                        return 0;
                }
                ext4_lock_group(sb, i);
-               if (!ext4_group_desc_csum_verify(sbi, i, gdp)) {
+               if (!ext4_group_desc_csum_verify(sb, i, gdp)) {
                        ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
                                 "Checksum for group %u failed (%u!=%u)",
                                 i, le16_to_cpu(ext4_group_desc_csum(sbi, i,
@@ -2417,6 +2488,23 @@ static ssize_t sbi_ui_store(struct ext4_attr *a,
        return count;
 }
 
+static ssize_t trigger_test_error(struct ext4_attr *a,
+                                 struct ext4_sb_info *sbi,
+                                 const char *buf, size_t count)
+{
+       int len = count;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       if (len && buf[len-1] == '\n')
+               len--;
+
+       if (len)
+               ext4_error(sbi->s_sb, "%.*s", len, buf);
+       return count;
+}
+
 #define EXT4_ATTR_OFFSET(_name,_mode,_show,_store,_elname) \
 static struct ext4_attr ext4_attr_##_name = {                  \
        .attr = {.name = __stringify(_name), .mode = _mode },   \
@@ -2447,6 +2535,7 @@ EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
 EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
 EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
 EXT4_RW_ATTR_SBI_UI(max_writeback_mb_bump, s_max_writeback_mb_bump);
+EXT4_ATTR(trigger_fs_error, 0200, NULL, trigger_test_error);
 
 static struct attribute *ext4_attrs[] = {
        ATTR_LIST(delayed_allocation_blocks),
@@ -2461,6 +2550,7 @@ static struct attribute *ext4_attrs[] = {
        ATTR_LIST(mb_stream_req),
        ATTR_LIST(mb_group_prealloc),
        ATTR_LIST(max_writeback_mb_bump),
+       ATTR_LIST(trigger_fs_error),
        NULL,
 };
 
@@ -2957,6 +3047,44 @@ static void ext4_destroy_lazyinit_thread(void)
        kthread_stop(ext4_lazyinit_task);
 }
 
+static int set_journal_csum_feature_set(struct super_block *sb)
+{
+       int ret = 1;
+       int compat, incompat;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+       if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
+               /* journal checksum v2 */
+               compat = 0;
+               incompat = JBD2_FEATURE_INCOMPAT_CSUM_V2;
+       } else {
+               /* journal checksum v1 */
+               compat = JBD2_FEATURE_COMPAT_CHECKSUM;
+               incompat = 0;
+       }
+
+       if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
+               ret = jbd2_journal_set_features(sbi->s_journal,
+                               compat, 0,
+                               JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
+                               incompat);
+       } else if (test_opt(sb, JOURNAL_CHECKSUM)) {
+               ret = jbd2_journal_set_features(sbi->s_journal,
+                               compat, 0,
+                               incompat);
+               jbd2_journal_clear_features(sbi->s_journal, 0, 0,
+                               JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
+       } else {
+               jbd2_journal_clear_features(sbi->s_journal,
+                               JBD2_FEATURE_COMPAT_CHECKSUM, 0,
+                               JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
+                               JBD2_FEATURE_INCOMPAT_CSUM_V2);
+       }
+
+       return ret;
+}
+
 static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 {
        char *orig_data = kstrdup(data, GFP_KERNEL);
@@ -2993,6 +3121,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                goto out_free_orig;
        }
        sb->s_fs_info = sbi;
+       sbi->s_sb = sb;
        sbi->s_mount_opt = 0;
        sbi->s_resuid = make_kuid(&init_user_ns, EXT4_DEF_RESUID);
        sbi->s_resgid = make_kgid(&init_user_ns, EXT4_DEF_RESGID);
@@ -3032,13 +3161,54 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
         * Note: s_es must be initialized as soon as possible because
         *       some ext4 macro-instructions depend on its value
         */
-       es = (struct ext4_super_block *) (((char *)bh->b_data) + offset);
+       es = (struct ext4_super_block *) (bh->b_data + offset);
        sbi->s_es = es;
        sb->s_magic = le16_to_cpu(es->s_magic);
        if (sb->s_magic != EXT4_SUPER_MAGIC)
                goto cantfind_ext4;
        sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
 
+       /* Warn if metadata_csum and gdt_csum are both set. */
+       if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
+           EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
+               ext4_warning(sb, KERN_INFO "metadata_csum and uninit_bg are "
+                            "redundant flags; please run fsck.");
+
+       /* Check for a known checksum algorithm */
+       if (!ext4_verify_csum_type(sb, es)) {
+               ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
+                        "unknown checksum algorithm.");
+               silent = 1;
+               goto cantfind_ext4;
+       }
+
+       /* Load the checksum driver */
+       if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
+               sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
+               if (IS_ERR(sbi->s_chksum_driver)) {
+                       ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
+                       ret = PTR_ERR(sbi->s_chksum_driver);
+                       sbi->s_chksum_driver = NULL;
+                       goto failed_mount;
+               }
+       }
+
+       /* Check superblock checksum */
+       if (!ext4_superblock_csum_verify(sb, es)) {
+               ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
+                        "invalid superblock checksum.  Run e2fsck?");
+               silent = 1;
+               goto cantfind_ext4;
+       }
+
+       /* Precompute checksum seed for all metadata */
+       if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
+                                              sizeof(es->s_uuid));
+
        /* Set defaults before we parse the mount options */
        def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
        set_opt(sb, INIT_INODE_TABLE);
@@ -3200,7 +3370,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                               "Can't read superblock on 2nd try");
                        goto failed_mount;
                }
-               es = (struct ext4_super_block *)(((char *)bh->b_data) + offset);
+               es = (struct ext4_super_block *)(bh->b_data + offset);
                sbi->s_es = es;
                if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
                        ext4_msg(sb, KERN_ERR,
@@ -3392,6 +3562,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                                          GFP_KERNEL);
        if (sbi->s_group_desc == NULL) {
                ext4_msg(sb, KERN_ERR, "not enough memory");
+               ret = -ENOMEM;
                goto failed_mount;
        }
 
@@ -3449,6 +3620,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        }
        if (err) {
                ext4_msg(sb, KERN_ERR, "insufficient memory");
+               ret = err;
                goto failed_mount3;
        }
 
@@ -3506,26 +3678,17 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                goto no_journal;
        }
 
-       if (ext4_blocks_count(es) > 0xffffffffULL &&
+       if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT) &&
            !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
                                       JBD2_FEATURE_INCOMPAT_64BIT)) {
                ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
                goto failed_mount_wq;
        }
 
-       if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
-               jbd2_journal_set_features(sbi->s_journal,
-                               JBD2_FEATURE_COMPAT_CHECKSUM, 0,
-                               JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
-       } else if (test_opt(sb, JOURNAL_CHECKSUM)) {
-               jbd2_journal_set_features(sbi->s_journal,
-                               JBD2_FEATURE_COMPAT_CHECKSUM, 0, 0);
-               jbd2_journal_clear_features(sbi->s_journal, 0, 0,
-                               JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
-       } else {
-               jbd2_journal_clear_features(sbi->s_journal,
-                               JBD2_FEATURE_COMPAT_CHECKSUM, 0,
-                               JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
+       if (!set_journal_csum_feature_set(sb)) {
+               ext4_msg(sb, KERN_ERR, "Failed to set journal checksum "
+                        "feature set");
+               goto failed_mount_wq;
        }
 
        /* We have now updated the journal if required, so we can
@@ -3606,7 +3769,8 @@ no_journal:
                goto failed_mount4;
        }
 
-       ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY);
+       if (ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY))
+               sb->s_flags |= MS_RDONLY;
 
        /* determine the minimum size of new large inodes, if present */
        if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
@@ -3641,7 +3805,7 @@ no_journal:
        }
 
        ext4_ext_init(sb);
-       err = ext4_mb_init(sb, needs_recovery);
+       err = ext4_mb_init(sb);
        if (err) {
                ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
                         err);
@@ -3724,6 +3888,8 @@ failed_mount2:
                brelse(sbi->s_group_desc[i]);
        ext4_kvfree(sbi->s_group_desc);
 failed_mount:
+       if (sbi->s_chksum_driver)
+               crypto_free_shash(sbi->s_chksum_driver);
        if (sbi->s_proc) {
                remove_proc_entry("options", sbi->s_proc);
                remove_proc_entry(sb->s_id, ext4_proc_root);
@@ -3847,7 +4013,7 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb,
                goto out_bdev;
        }
 
-       es = (struct ext4_super_block *) (((char *)bh->b_data) + offset);
+       es = (struct ext4_super_block *) (bh->b_data + offset);
        if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
            !(le32_to_cpu(es->s_feature_incompat) &
              EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
@@ -4039,6 +4205,7 @@ static int ext4_commit_super(struct super_block *sb, int sync)
                                &EXT4_SB(sb)->s_freeinodes_counter));
        sb->s_dirt = 0;
        BUFFER_TRACE(sbh, "marking dirty");
+       ext4_superblock_csum_set(sb, es);
        mark_buffer_dirty(sbh);
        if (sync) {
                error = sync_dirty_buffer(sbh);
@@ -4333,7 +4500,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                                struct ext4_group_desc *gdp =
                                        ext4_get_group_desc(sb, g, NULL);
 
-                               if (!ext4_group_desc_csum_verify(sbi, g, gdp)) {
+                               if (!ext4_group_desc_csum_verify(sb, g, gdp)) {
                                        ext4_msg(sb, KERN_ERR,
               "ext4_remount: Checksum for group %u failed (%u!=%u)",
                g, le16_to_cpu(ext4_group_desc_csum(sbi, g, gdp)),
@@ -4758,7 +4925,6 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
                return -EIO;
        }
 
-       mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
        bh = ext4_bread(handle, inode, blk, 1, &err);
        if (!bh)
                goto out;
@@ -4774,16 +4940,13 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
        err = ext4_handle_dirty_metadata(handle, NULL, bh);
        brelse(bh);
 out:
-       if (err) {
-               mutex_unlock(&inode->i_mutex);
+       if (err)
                return err;
-       }
        if (inode->i_size < off + len) {
                i_size_write(inode, off + len);
                EXT4_I(inode)->i_disksize = inode->i_size;
                ext4_mark_inode_dirty(handle, inode);
        }
-       mutex_unlock(&inode->i_mutex);
        return len;
 }
 
index e88748e55c0f246e90ca21c2094303719f83df07..e56c9ed7d6e30d523b7f8e4b638f9190427cf50d 100644 (file)
@@ -122,6 +122,58 @@ const struct xattr_handler *ext4_xattr_handlers[] = {
        NULL
 };
 
+static __le32 ext4_xattr_block_csum(struct inode *inode,
+                                   sector_t block_nr,
+                                   struct ext4_xattr_header *hdr)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       __u32 csum, old;
+
+       old = hdr->h_checksum;
+       hdr->h_checksum = 0;
+       if (le32_to_cpu(hdr->h_refcount) != 1) {
+               block_nr = cpu_to_le64(block_nr);
+               csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&block_nr,
+                                  sizeof(block_nr));
+       } else
+               csum = ei->i_csum_seed;
+       csum = ext4_chksum(sbi, csum, (__u8 *)hdr,
+                          EXT4_BLOCK_SIZE(inode->i_sb));
+       hdr->h_checksum = old;
+       return cpu_to_le32(csum);
+}
+
+static int ext4_xattr_block_csum_verify(struct inode *inode,
+                                       sector_t block_nr,
+                                       struct ext4_xattr_header *hdr)
+{
+       if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+               EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
+           (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr)))
+               return 0;
+       return 1;
+}
+
+static void ext4_xattr_block_csum_set(struct inode *inode,
+                                     sector_t block_nr,
+                                     struct ext4_xattr_header *hdr)
+{
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+               EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               return;
+
+       hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr);
+}
+
+static inline int ext4_handle_dirty_xattr_block(handle_t *handle,
+                                               struct inode *inode,
+                                               struct buffer_head *bh)
+{
+       ext4_xattr_block_csum_set(inode, bh->b_blocknr, BHDR(bh));
+       return ext4_handle_dirty_metadata(handle, inode, bh);
+}
+
 static inline const struct xattr_handler *
 ext4_xattr_handler(int name_index)
 {
@@ -156,12 +208,22 @@ ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end)
 }
 
 static inline int
-ext4_xattr_check_block(struct buffer_head *bh)
+ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
 {
+       int error;
+
+       if (buffer_verified(bh))
+               return 0;
+
        if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
            BHDR(bh)->h_blocks != cpu_to_le32(1))
                return -EIO;
-       return ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size);
+       if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh)))
+               return -EIO;
+       error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size);
+       if (!error)
+               set_buffer_verified(bh);
+       return error;
 }
 
 static inline int
@@ -224,7 +286,7 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
                goto cleanup;
        ea_bdebug(bh, "b_count=%d, refcount=%d",
                atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
-       if (ext4_xattr_check_block(bh)) {
+       if (ext4_xattr_check_block(inode, bh)) {
 bad_block:
                EXT4_ERROR_INODE(inode, "bad block %llu",
                                 EXT4_I(inode)->i_file_acl);
@@ -369,7 +431,7 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
                goto cleanup;
        ea_bdebug(bh, "b_count=%d, refcount=%d",
                atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
-       if (ext4_xattr_check_block(bh)) {
+       if (ext4_xattr_check_block(inode, bh)) {
                EXT4_ERROR_INODE(inode, "bad block %llu",
                                 EXT4_I(inode)->i_file_acl);
                error = -EIO;
@@ -492,7 +554,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
                if (ce)
                        mb_cache_entry_release(ce);
                unlock_buffer(bh);
-               error = ext4_handle_dirty_metadata(handle, inode, bh);
+               error = ext4_handle_dirty_xattr_block(handle, inode, bh);
                if (IS_SYNC(inode))
                        ext4_handle_sync(handle);
                dquot_free_block(inode, 1);
@@ -662,7 +724,7 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
                ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
                        atomic_read(&(bs->bh->b_count)),
                        le32_to_cpu(BHDR(bs->bh)->h_refcount));
-               if (ext4_xattr_check_block(bs->bh)) {
+               if (ext4_xattr_check_block(inode, bs->bh)) {
                        EXT4_ERROR_INODE(inode, "bad block %llu",
                                         EXT4_I(inode)->i_file_acl);
                        error = -EIO;
@@ -725,9 +787,9 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
                        if (error == -EIO)
                                goto bad_block;
                        if (!error)
-                               error = ext4_handle_dirty_metadata(handle,
-                                                                  inode,
-                                                                  bs->bh);
+                               error = ext4_handle_dirty_xattr_block(handle,
+                                                                     inode,
+                                                                     bs->bh);
                        if (error)
                                goto cleanup;
                        goto inserted;
@@ -796,9 +858,9 @@ inserted:
                                ea_bdebug(new_bh, "reusing; refcount now=%d",
                                        le32_to_cpu(BHDR(new_bh)->h_refcount));
                                unlock_buffer(new_bh);
-                               error = ext4_handle_dirty_metadata(handle,
-                                                                  inode,
-                                                                  new_bh);
+                               error = ext4_handle_dirty_xattr_block(handle,
+                                                                     inode,
+                                                                     new_bh);
                                if (error)
                                        goto cleanup_dquot;
                        }
@@ -855,8 +917,8 @@ getblk_failed:
                        set_buffer_uptodate(new_bh);
                        unlock_buffer(new_bh);
                        ext4_xattr_cache_insert(new_bh);
-                       error = ext4_handle_dirty_metadata(handle,
-                                                          inode, new_bh);
+                       error = ext4_handle_dirty_xattr_block(handle,
+                                                             inode, new_bh);
                        if (error)
                                goto cleanup;
                }
@@ -1193,7 +1255,7 @@ retry:
                error = -EIO;
                if (!bh)
                        goto cleanup;
-               if (ext4_xattr_check_block(bh)) {
+               if (ext4_xattr_check_block(inode, bh)) {
                        EXT4_ERROR_INODE(inode, "bad block %llu",
                                         EXT4_I(inode)->i_file_acl);
                        error = -EIO;
index 25b7387ff183f880cdb9ccaf2529ca8c0f218a7b..91f31ca7d9af9df24a965c64bb0271c43a4d4b09 100644 (file)
@@ -27,7 +27,9 @@ struct ext4_xattr_header {
        __le32  h_refcount;     /* reference count */
        __le32  h_blocks;       /* number of disk blocks used */
        __le32  h_hash;         /* hash value of all attributes */
-       __u32   h_reserved[4];  /* zero right now */
+       __le32  h_checksum;     /* crc32c(uuid+id+xattrblock) */
+                               /* id = inum if refcount=1, blknum otherwise */
+       __u32   h_reserved[3];  /* zero right now */
 };
 
 struct ext4_xattr_ibody_header {
index aca191bd5f8fa66bcef77c549b647fce81e28b0f..6eaa28c98ad1e9038dd939f2960d31034d8663a3 100644 (file)
@@ -98,8 +98,8 @@ next:
 
        *bh = sb_bread(sb, phys);
        if (*bh == NULL) {
-               fat_msg(sb, KERN_ERR, "Directory bread(block %llu) failed",
-                      (llu)phys);
+               fat_msg_ratelimit(sb, KERN_ERR,
+                       "Directory bread(block %llu) failed", (llu)phys);
                /* skip this block */
                *pos = (iblock + 1) << sb->s_blocksize_bits;
                goto next;
index 66994f316e18d11f052c85b97922cc1407347a70..fc35c5c69136e805b41ffa6102dc1878d68f7a3f 100644 (file)
@@ -82,6 +82,7 @@ struct msdos_sb_info {
        int fatent_shift;
        struct fatent_operations *fatent_ops;
        struct inode *fat_inode;
+       struct inode *fsinfo_inode;
 
        struct ratelimit_state ratelimit;
 
@@ -334,6 +335,11 @@ void __fat_fs_error(struct super_block *sb, int report, const char *fmt, ...);
        __fat_fs_error(sb, __ratelimit(&MSDOS_SB(sb)->ratelimit), fmt , ## args)
 __printf(3, 4) __cold
 void fat_msg(struct super_block *sb, const char *level, const char *fmt, ...);
+#define fat_msg_ratelimit(sb, level, fmt, args...)     \
+       do {    \
+                       if (__ratelimit(&MSDOS_SB(sb)->ratelimit))      \
+                               fat_msg(sb, level, fmt, ## args);       \
+        } while (0)
 extern int fat_clusters_flush(struct super_block *sb);
 extern int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster);
 extern void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec *ts,
index 2e81ac0df7e2eae4b30ad9b2e1db360798f92b2c..31f08ab62c562d1926a75183c802793642cd390c 100644 (file)
@@ -308,6 +308,16 @@ void fat_ent_access_init(struct super_block *sb)
        }
 }
 
+static void mark_fsinfo_dirty(struct super_block *sb)
+{
+       struct msdos_sb_info *sbi = MSDOS_SB(sb);
+
+       if (sb->s_flags & MS_RDONLY || sbi->fat_bits != 32)
+               return;
+
+       __mark_inode_dirty(sbi->fsinfo_inode, I_DIRTY_SYNC);
+}
+
 static inline int fat_ent_update_ptr(struct super_block *sb,
                                     struct fat_entry *fatent,
                                     int offset, sector_t blocknr)
@@ -498,7 +508,6 @@ int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster)
                                sbi->prev_free = entry;
                                if (sbi->free_clusters != -1)
                                        sbi->free_clusters--;
-                               sb->s_dirt = 1;
 
                                cluster[idx_clus] = entry;
                                idx_clus++;
@@ -520,11 +529,11 @@ int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster)
        /* Couldn't allocate the free entries */
        sbi->free_clusters = 0;
        sbi->free_clus_valid = 1;
-       sb->s_dirt = 1;
        err = -ENOSPC;
 
 out:
        unlock_fat(sbi);
+       mark_fsinfo_dirty(sb);
        fatent_brelse(&fatent);
        if (!err) {
                if (inode_needs_sync(inode))
@@ -549,7 +558,7 @@ int fat_free_clusters(struct inode *inode, int cluster)
        struct fat_entry fatent;
        struct buffer_head *bhs[MAX_BUF_PER_PAGE];
        int i, err, nr_bhs;
-       int first_cl = cluster;
+       int first_cl = cluster, dirty_fsinfo = 0;
 
        nr_bhs = 0;
        fatent_init(&fatent);
@@ -587,7 +596,7 @@ int fat_free_clusters(struct inode *inode, int cluster)
                ops->ent_put(&fatent, FAT_ENT_FREE);
                if (sbi->free_clusters != -1) {
                        sbi->free_clusters++;
-                       sb->s_dirt = 1;
+                       dirty_fsinfo = 1;
                }
 
                if (nr_bhs + fatent.nr_bhs > MAX_BUF_PER_PAGE) {
@@ -617,6 +626,8 @@ error:
        for (i = 0; i < nr_bhs; i++)
                brelse(bhs[i]);
        unlock_fat(sbi);
+       if (dirty_fsinfo)
+               mark_fsinfo_dirty(sb);
 
        return err;
 }
@@ -677,7 +688,7 @@ int fat_count_free_clusters(struct super_block *sb)
        }
        sbi->free_clusters = free;
        sbi->free_clus_valid = 1;
-       sb->s_dirt = 1;
+       mark_fsinfo_dirty(sb);
        fatent_brelse(&fatent);
 out:
        unlock_fat(sbi);
index 21687e31acc03624a2934b98c3394b20a972b7a9..a3d81ebf6d864a8c2189147e5771c435473b2e42 100644 (file)
@@ -454,42 +454,16 @@ static void fat_evict_inode(struct inode *inode)
                fat_truncate_blocks(inode, 0);
        }
        invalidate_inode_buffers(inode);
-       end_writeback(inode);
+       clear_inode(inode);
        fat_cache_inval_inode(inode);
        fat_detach(inode);
 }
 
-static void fat_write_super(struct super_block *sb)
-{
-       lock_super(sb);
-       sb->s_dirt = 0;
-
-       if (!(sb->s_flags & MS_RDONLY))
-               fat_clusters_flush(sb);
-       unlock_super(sb);
-}
-
-static int fat_sync_fs(struct super_block *sb, int wait)
-{
-       int err = 0;
-
-       if (sb->s_dirt) {
-               lock_super(sb);
-               sb->s_dirt = 0;
-               err = fat_clusters_flush(sb);
-               unlock_super(sb);
-       }
-
-       return err;
-}
-
 static void fat_put_super(struct super_block *sb)
 {
        struct msdos_sb_info *sbi = MSDOS_SB(sb);
 
-       if (sb->s_dirt)
-               fat_write_super(sb);
-
+       iput(sbi->fsinfo_inode);
        iput(sbi->fat_inode);
 
        unload_nls(sbi->nls_disk);
@@ -661,7 +635,18 @@ retry:
 
 static int fat_write_inode(struct inode *inode, struct writeback_control *wbc)
 {
-       return __fat_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
+       int err;
+
+       if (inode->i_ino == MSDOS_FSINFO_INO) {
+               struct super_block *sb = inode->i_sb;
+
+               lock_super(sb);
+               err = fat_clusters_flush(sb);
+               unlock_super(sb);
+       } else
+               err = __fat_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
+
+       return err;
 }
 
 int fat_sync_inode(struct inode *inode)
@@ -678,8 +663,6 @@ static const struct super_operations fat_sops = {
        .write_inode    = fat_write_inode,
        .evict_inode    = fat_evict_inode,
        .put_super      = fat_put_super,
-       .write_super    = fat_write_super,
-       .sync_fs        = fat_sync_fs,
        .statfs         = fat_statfs,
        .remount_fs     = fat_remount,
 
@@ -752,10 +735,9 @@ static struct dentry *fat_fh_to_dentry(struct super_block *sb,
 }
 
 static int
-fat_encode_fh(struct dentry *de, __u32 *fh, int *lenp, int connectable)
+fat_encode_fh(struct inode *inode, __u32 *fh, int *lenp, struct inode *parent)
 {
        int len = *lenp;
-       struct inode *inode =  de->d_inode;
        u32 ipos_h, ipos_m, ipos_l;
 
        if (len < 5) {
@@ -771,9 +753,9 @@ fat_encode_fh(struct dentry *de, __u32 *fh, int *lenp, int connectable)
        fh[1] = inode->i_generation;
        fh[2] = ipos_h;
        fh[3] = ipos_m | MSDOS_I(inode)->i_logstart;
-       spin_lock(&de->d_lock);
-       fh[4] = ipos_l | MSDOS_I(de->d_parent->d_inode)->i_logstart;
-       spin_unlock(&de->d_lock);
+       fh[4] = ipos_l;
+       if (parent)
+               fh[4] |= MSDOS_I(parent)->i_logstart;
        return 3;
 }
 
@@ -1244,6 +1226,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
                   void (*setup)(struct super_block *))
 {
        struct inode *root_inode = NULL, *fat_inode = NULL;
+       struct inode *fsinfo_inode = NULL;
        struct buffer_head *bh;
        struct fat_boot_sector *b;
        struct msdos_sb_info *sbi;
@@ -1490,6 +1473,14 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
                goto out_fail;
        MSDOS_I(fat_inode)->i_pos = 0;
        sbi->fat_inode = fat_inode;
+
+       fsinfo_inode = new_inode(sb);
+       if (!fsinfo_inode)
+               goto out_fail;
+       fsinfo_inode->i_ino = MSDOS_FSINFO_INO;
+       sbi->fsinfo_inode = fsinfo_inode;
+       insert_inode_hash(fsinfo_inode);
+
        root_inode = new_inode(sb);
        if (!root_inode)
                goto out_fail;
@@ -1516,6 +1507,8 @@ out_invalid:
                fat_msg(sb, KERN_INFO, "Can't find a valid FAT filesystem");
 
 out_fail:
+       if (fsinfo_inode)
+               iput(fsinfo_inode);
        if (fat_inode)
                iput(fat_inode);
        unload_nls(sbi->nls_io);
index d078b75572a75eb9117092ee5bb752c84e1b38b8..81b70e665bf000412f73aa300890a53823db36f0 100644 (file)
@@ -442,28 +442,24 @@ static int check_fcntl_cmd(unsigned cmd)
 SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
 {      
        struct file *filp;
+       int fput_needed;
        long err = -EBADF;
 
-       filp = fget_raw(fd);
+       filp = fget_raw_light(fd, &fput_needed);
        if (!filp)
                goto out;
 
        if (unlikely(filp->f_mode & FMODE_PATH)) {
-               if (!check_fcntl_cmd(cmd)) {
-                       fput(filp);
-                       goto out;
-               }
+               if (!check_fcntl_cmd(cmd))
+                       goto out1;
        }
 
        err = security_file_fcntl(filp, cmd, arg);
-       if (err) {
-               fput(filp);
-               return err;
-       }
+       if (!err)
+               err = do_fcntl(fd, cmd, arg, filp);
 
-       err = do_fcntl(fd, cmd, arg, filp);
-
-       fput(filp);
+out1:
+       fput_light(filp, fput_needed);
 out:
        return err;
 }
@@ -473,26 +469,21 @@ SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
                unsigned long, arg)
 {      
        struct file * filp;
-       long err;
+       long err = -EBADF;
+       int fput_needed;
 
-       err = -EBADF;
-       filp = fget_raw(fd);
+       filp = fget_raw_light(fd, &fput_needed);
        if (!filp)
                goto out;
 
        if (unlikely(filp->f_mode & FMODE_PATH)) {
-               if (!check_fcntl_cmd(cmd)) {
-                       fput(filp);
-                       goto out;
-               }
+               if (!check_fcntl_cmd(cmd))
+                       goto out1;
        }
 
        err = security_file_fcntl(filp, cmd, arg);
-       if (err) {
-               fput(filp);
-               return err;
-       }
-       err = -EBADF;
+       if (err)
+               goto out1;
        
        switch (cmd) {
                case F_GETLK64:
@@ -507,7 +498,8 @@ SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
                        err = do_fcntl(fd, cmd, arg, filp);
                        break;
        }
-       fput(filp);
+out1:
+       fput_light(filp, fput_needed);
 out:
        return err;
 }
index 70f2a0fd6aec62b28724d46e356dc0ff871f88b8..a305d9e2d1b2aac05dcd456bdd23885652272439 100644 (file)
@@ -34,7 +34,6 @@ struct files_stat_struct files_stat = {
        .max_files = NR_FILE
 };
 
-DECLARE_LGLOCK(files_lglock);
 DEFINE_LGLOCK(files_lglock);
 
 /* SLAB cache for file structures */
@@ -421,9 +420,9 @@ static inline void __file_sb_list_add(struct file *file, struct super_block *sb)
  */
 void file_sb_list_add(struct file *file, struct super_block *sb)
 {
-       lg_local_lock(files_lglock);
+       lg_local_lock(&files_lglock);
        __file_sb_list_add(file, sb);
-       lg_local_unlock(files_lglock);
+       lg_local_unlock(&files_lglock);
 }
 
 /**
@@ -436,9 +435,9 @@ void file_sb_list_add(struct file *file, struct super_block *sb)
 void file_sb_list_del(struct file *file)
 {
        if (!list_empty(&file->f_u.fu_list)) {
-               lg_local_lock_cpu(files_lglock, file_list_cpu(file));
+               lg_local_lock_cpu(&files_lglock, file_list_cpu(file));
                list_del_init(&file->f_u.fu_list);
-               lg_local_unlock_cpu(files_lglock, file_list_cpu(file));
+               lg_local_unlock_cpu(&files_lglock, file_list_cpu(file));
        }
 }
 
@@ -485,7 +484,7 @@ void mark_files_ro(struct super_block *sb)
        struct file *f;
 
 retry:
-       lg_global_lock(files_lglock);
+       lg_global_lock(&files_lglock);
        do_file_list_for_each_entry(sb, f) {
                struct vfsmount *mnt;
                if (!S_ISREG(f->f_path.dentry->d_inode->i_mode))
@@ -502,12 +501,12 @@ retry:
                file_release_write(f);
                mnt = mntget(f->f_path.mnt);
                /* This can sleep, so we can't hold the spinlock. */
-               lg_global_unlock(files_lglock);
+               lg_global_unlock(&files_lglock);
                mnt_drop_write(mnt);
                mntput(mnt);
                goto retry;
        } while_file_list_for_each_entry;
-       lg_global_unlock(files_lglock);
+       lg_global_unlock(&files_lglock);
 }
 
 void __init files_init(unsigned long mempages)
@@ -525,6 +524,6 @@ void __init files_init(unsigned long mempages)
        n = (mempages * (PAGE_SIZE / 1024)) / 10;
        files_stat.max_files = max_t(unsigned long, n, NR_FILE);
        files_defer_init();
-       lg_lock_init(files_lglock);
+       lg_lock_init(&files_lglock, "files_lglock");
        percpu_counter_init(&nr_files, 0);
 } 
index cf9ef918a2a96bfe053e44f5c37be566830d6302..ef67c95f12d42511cf91892b8afcf5caddea678c 100644 (file)
@@ -355,6 +355,6 @@ void
 vxfs_evict_inode(struct inode *ip)
 {
        truncate_inode_pages(&ip->i_data, 0);
-       end_writeback(ip);
+       clear_inode(ip);
        call_rcu(&ip->i_rcu, vxfs_i_callback);
 }
index 539f36cf3e4a3b574f6bb977bcbdf387571675b3..8d2fb8c88cf36a196c47f473bcc729510ad89d8e 100644 (file)
@@ -231,11 +231,8 @@ static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
 
 static void inode_sync_complete(struct inode *inode)
 {
-       /*
-        * Prevent speculative execution through
-        * spin_unlock(&wb->list_lock);
-        */
-
+       inode->i_state &= ~I_SYNC;
+       /* Waiters must see I_SYNC cleared before being woken up */
        smp_mb();
        wake_up_bit(&inode->i_state, __I_SYNC);
 }
@@ -329,10 +326,12 @@ static int write_inode(struct inode *inode, struct writeback_control *wbc)
 }
 
 /*
- * Wait for writeback on an inode to complete.
+ * Wait for writeback on an inode to complete. Called with i_lock held.
+ * Caller must make sure inode cannot go away when we drop i_lock.
  */
-static void inode_wait_for_writeback(struct inode *inode,
-                                    struct bdi_writeback *wb)
+static void __inode_wait_for_writeback(struct inode *inode)
+       __releases(inode->i_lock)
+       __acquires(inode->i_lock)
 {
        DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
        wait_queue_head_t *wqh;
@@ -340,70 +339,119 @@ static void inode_wait_for_writeback(struct inode *inode,
        wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
        while (inode->i_state & I_SYNC) {
                spin_unlock(&inode->i_lock);
-               spin_unlock(&wb->list_lock);
                __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
-               spin_lock(&wb->list_lock);
                spin_lock(&inode->i_lock);
        }
 }
 
 /*
- * Write out an inode's dirty pages.  Called under wb->list_lock and
- * inode->i_lock.  Either the caller has an active reference on the inode or
- * the inode has I_WILL_FREE set.
- *
- * If `wait' is set, wait on the writeout.
- *
- * The whole writeout design is quite complex and fragile.  We want to avoid
- * starvation of particular inodes when others are being redirtied, prevent
- * livelocks, etc.
+ * Wait for writeback on an inode to complete. Caller must have inode pinned.
  */
-static int
-writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
-                      struct writeback_control *wbc)
+void inode_wait_for_writeback(struct inode *inode)
 {
-       struct address_space *mapping = inode->i_mapping;
-       long nr_to_write = wbc->nr_to_write;
-       unsigned dirty;
-       int ret;
+       spin_lock(&inode->i_lock);
+       __inode_wait_for_writeback(inode);
+       spin_unlock(&inode->i_lock);
+}
 
-       assert_spin_locked(&wb->list_lock);
-       assert_spin_locked(&inode->i_lock);
+/*
+ * Sleep until I_SYNC is cleared. This function must be called with i_lock
+ * held and drops it. It is aimed for callers not holding any inode reference
+ * so once i_lock is dropped, inode can go away.
+ */
+static void inode_sleep_on_writeback(struct inode *inode)
+       __releases(inode->i_lock)
+{
+       DEFINE_WAIT(wait);
+       wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
+       int sleep;
 
-       if (!atomic_read(&inode->i_count))
-               WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
-       else
-               WARN_ON(inode->i_state & I_WILL_FREE);
+       prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
+       sleep = inode->i_state & I_SYNC;
+       spin_unlock(&inode->i_lock);
+       if (sleep)
+               schedule();
+       finish_wait(wqh, &wait);
+}
 
-       if (inode->i_state & I_SYNC) {
+/*
+ * Find proper writeback list for the inode depending on its current state and
+ * possibly also change of its state while we were doing writeback.  Here we
+ * handle things such as livelock prevention or fairness of writeback among
+ * inodes. This function can be called only by flusher thread - noone else
+ * processes all inodes in writeback lists and requeueing inodes behind flusher
+ * thread's back can have unexpected consequences.
+ */
+static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
+                         struct writeback_control *wbc)
+{
+       if (inode->i_state & I_FREEING)
+               return;
+
+       /*
+        * Sync livelock prevention. Each inode is tagged and synced in one
+        * shot. If still dirty, it will be redirty_tail()'ed below.  Update
+        * the dirty time to prevent enqueue and sync it again.
+        */
+       if ((inode->i_state & I_DIRTY) &&
+           (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
+               inode->dirtied_when = jiffies;
+
+       if (wbc->pages_skipped) {
                /*
-                * If this inode is locked for writeback and we are not doing
-                * writeback-for-data-integrity, move it to b_more_io so that
-                * writeback can proceed with the other inodes on s_io.
-                *
-                * We'll have another go at writing back this inode when we
-                * completed a full scan of b_io.
+                * writeback is not making progress due to locked
+                * buffers. Skip this inode for now.
                 */
-               if (wbc->sync_mode != WB_SYNC_ALL) {
+               redirty_tail(inode, wb);
+               return;
+       }
+
+       if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
+               /*
+                * We didn't write back all the pages.  nfs_writepages()
+                * sometimes bales out without doing anything.
+                */
+               if (wbc->nr_to_write <= 0) {
+                       /* Slice used up. Queue for next turn. */
                        requeue_io(inode, wb);
-                       trace_writeback_single_inode_requeue(inode, wbc,
-                                                            nr_to_write);
-                       return 0;
+               } else {
+                       /*
+                        * Writeback blocked by something other than
+                        * congestion. Delay the inode for some time to
+                        * avoid spinning on the CPU (100% iowait)
+                        * retrying writeback of the dirty page/inode
+                        * that cannot be performed immediately.
+                        */
+                       redirty_tail(inode, wb);
                }
-
+       } else if (inode->i_state & I_DIRTY) {
                /*
-                * It's a data-integrity sync.  We must wait.
+                * Filesystems can dirty the inode during writeback operations,
+                * such as delayed allocation during submission or metadata
+                * updates after data IO completion.
                 */
-               inode_wait_for_writeback(inode, wb);
+               redirty_tail(inode, wb);
+       } else {
+               /* The inode is clean. Remove from writeback lists. */
+               list_del_init(&inode->i_wb_list);
        }
+}
 
-       BUG_ON(inode->i_state & I_SYNC);
+/*
+ * Write out an inode and its dirty pages. Do not update the writeback list
+ * linkage. That is left to the caller. The caller is also responsible for
+ * setting I_SYNC flag and calling inode_sync_complete() to clear it.
+ */
+static int
+__writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
+                        struct writeback_control *wbc)
+{
+       struct address_space *mapping = inode->i_mapping;
+       long nr_to_write = wbc->nr_to_write;
+       unsigned dirty;
+       int ret;
 
-       /* Set I_SYNC, reset I_DIRTY_PAGES */
-       inode->i_state |= I_SYNC;
-       inode->i_state &= ~I_DIRTY_PAGES;
-       spin_unlock(&inode->i_lock);
-       spin_unlock(&wb->list_lock);
+       WARN_ON(!(inode->i_state & I_SYNC));
 
        ret = do_writepages(mapping, wbc);
 
@@ -424,6 +472,9 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
         * write_inode()
         */
        spin_lock(&inode->i_lock);
+       /* Clear I_DIRTY_PAGES if we've written out all dirty pages */
+       if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
+               inode->i_state &= ~I_DIRTY_PAGES;
        dirty = inode->i_state & I_DIRTY;
        inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
        spin_unlock(&inode->i_lock);
@@ -433,60 +484,67 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
                if (ret == 0)
                        ret = err;
        }
+       trace_writeback_single_inode(inode, wbc, nr_to_write);
+       return ret;
+}
+
+/*
+ * Write out an inode's dirty pages. Either the caller has an active reference
+ * on the inode or the inode has I_WILL_FREE set.
+ *
+ * This function is designed to be called for writing back one inode which
+ * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
+ * and does more profound writeback list handling in writeback_sb_inodes().
+ */
+static int
+writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
+                      struct writeback_control *wbc)
+{
+       int ret = 0;
 
-       spin_lock(&wb->list_lock);
        spin_lock(&inode->i_lock);
-       inode->i_state &= ~I_SYNC;
-       if (!(inode->i_state & I_FREEING)) {
+       if (!atomic_read(&inode->i_count))
+               WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
+       else
+               WARN_ON(inode->i_state & I_WILL_FREE);
+
+       if (inode->i_state & I_SYNC) {
+               if (wbc->sync_mode != WB_SYNC_ALL)
+                       goto out;
                /*
-                * Sync livelock prevention. Each inode is tagged and synced in
-                * one shot. If still dirty, it will be redirty_tail()'ed below.
-                * Update the dirty time to prevent enqueue and sync it again.
+                * It's a data-integrity sync. We must wait. Since callers hold
+                * inode reference or inode has I_WILL_FREE set, it cannot go
+                * away under us.
                 */
-               if ((inode->i_state & I_DIRTY) &&
-                   (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
-                       inode->dirtied_when = jiffies;
-
-               if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
-                       /*
-                        * We didn't write back all the pages.  nfs_writepages()
-                        * sometimes bales out without doing anything.
-                        */
-                       inode->i_state |= I_DIRTY_PAGES;
-                       if (wbc->nr_to_write <= 0) {
-                               /*
-                                * slice used up: queue for next turn
-                                */
-                               requeue_io(inode, wb);
-                       } else {
-                               /*
-                                * Writeback blocked by something other than
-                                * congestion. Delay the inode for some time to
-                                * avoid spinning on the CPU (100% iowait)
-                                * retrying writeback of the dirty page/inode
-                                * that cannot be performed immediately.
-                                */
-                               redirty_tail(inode, wb);
-                       }
-               } else if (inode->i_state & I_DIRTY) {
-                       /*
-                        * Filesystems can dirty the inode during writeback
-                        * operations, such as delayed allocation during
-                        * submission or metadata updates after data IO
-                        * completion.
-                        */
-                       redirty_tail(inode, wb);
-               } else {
-                       /*
-                        * The inode is clean.  At this point we either have
-                        * a reference to the inode or it's on it's way out.
-                        * No need to add it back to the LRU.
-                        */
-                       list_del_init(&inode->i_wb_list);
-               }
+               __inode_wait_for_writeback(inode);
        }
+       WARN_ON(inode->i_state & I_SYNC);
+       /*
+        * Skip inode if it is clean. We don't want to mess with writeback
+        * lists in this function since flusher thread may be doing for example
+        * sync in parallel and if we move the inode, it could get skipped. So
+        * here we make sure inode is on some writeback list and leave it there
+        * unless we have completely cleaned the inode.
+        */
+       if (!(inode->i_state & I_DIRTY))
+               goto out;
+       inode->i_state |= I_SYNC;
+       spin_unlock(&inode->i_lock);
+
+       ret = __writeback_single_inode(inode, wb, wbc);
+
+       spin_lock(&wb->list_lock);
+       spin_lock(&inode->i_lock);
+       /*
+        * If inode is clean, remove it from writeback lists. Otherwise don't
+        * touch it. See comment above for explanation.
+        */
+       if (!(inode->i_state & I_DIRTY))
+               list_del_init(&inode->i_wb_list);
+       spin_unlock(&wb->list_lock);
        inode_sync_complete(inode);
-       trace_writeback_single_inode(inode, wbc, nr_to_write);
+out:
+       spin_unlock(&inode->i_lock);
        return ret;
 }
 
@@ -580,29 +638,57 @@ static long writeback_sb_inodes(struct super_block *sb,
                        redirty_tail(inode, wb);
                        continue;
                }
-               __iget(inode);
+               if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
+                       /*
+                        * If this inode is locked for writeback and we are not
+                        * doing writeback-for-data-integrity, move it to
+                        * b_more_io so that writeback can proceed with the
+                        * other inodes on s_io.
+                        *
+                        * We'll have another go at writing back this inode
+                        * when we completed a full scan of b_io.
+                        */
+                       spin_unlock(&inode->i_lock);
+                       requeue_io(inode, wb);
+                       trace_writeback_sb_inodes_requeue(inode);
+                       continue;
+               }
+               spin_unlock(&wb->list_lock);
+
+               /*
+                * We already requeued the inode if it had I_SYNC set and we
+                * are doing WB_SYNC_NONE writeback. So this catches only the
+                * WB_SYNC_ALL case.
+                */
+               if (inode->i_state & I_SYNC) {
+                       /* Wait for I_SYNC. This function drops i_lock... */
+                       inode_sleep_on_writeback(inode);
+                       /* Inode may be gone, start again */
+                       continue;
+               }
+               inode->i_state |= I_SYNC;
+               spin_unlock(&inode->i_lock);
+
                write_chunk = writeback_chunk_size(wb->bdi, work);
                wbc.nr_to_write = write_chunk;
                wbc.pages_skipped = 0;
 
-               writeback_single_inode(inode, wb, &wbc);
+               /*
+                * We use I_SYNC to pin the inode in memory. While it is set
+                * evict_inode() will wait so the inode cannot be freed.
+                */
+               __writeback_single_inode(inode, wb, &wbc);
 
                work->nr_pages -= write_chunk - wbc.nr_to_write;
                wrote += write_chunk - wbc.nr_to_write;
+               spin_lock(&wb->list_lock);
+               spin_lock(&inode->i_lock);
                if (!(inode->i_state & I_DIRTY))
                        wrote++;
-               if (wbc.pages_skipped) {
-                       /*
-                        * writeback is not making progress due to locked
-                        * buffers.  Skip this inode for now.
-                        */
-                       redirty_tail(inode, wb);
-               }
+               requeue_inode(inode, wb, &wbc);
+               inode_sync_complete(inode);
                spin_unlock(&inode->i_lock);
-               spin_unlock(&wb->list_lock);
-               iput(inode);
-               cond_resched();
-               spin_lock(&wb->list_lock);
+               cond_resched_lock(&wb->list_lock);
                /*
                 * bail out to wb_writeback() often enough to check
                 * background threshold and other termination conditions.
@@ -796,8 +882,10 @@ static long wb_writeback(struct bdi_writeback *wb,
                        trace_writeback_wait(wb->bdi, work);
                        inode = wb_inode(wb->b_more_io.prev);
                        spin_lock(&inode->i_lock);
-                       inode_wait_for_writeback(inode, wb);
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&wb->list_lock);
+                       /* This function drops i_lock... */
+                       inode_sleep_on_writeback(inode);
+                       spin_lock(&wb->list_lock);
                }
        }
        spin_unlock(&wb->list_lock);
@@ -1331,7 +1419,6 @@ EXPORT_SYMBOL(sync_inodes_sb);
 int write_inode_now(struct inode *inode, int sync)
 {
        struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
-       int ret;
        struct writeback_control wbc = {
                .nr_to_write = LONG_MAX,
                .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
@@ -1343,12 +1430,7 @@ int write_inode_now(struct inode *inode, int sync)
                wbc.nr_to_write = 0;
 
        might_sleep();
-       spin_lock(&wb->list_lock);
-       spin_lock(&inode->i_lock);
-       ret = writeback_single_inode(inode, wb, &wbc);
-       spin_unlock(&inode->i_lock);
-       spin_unlock(&wb->list_lock);
-       return ret;
+       return writeback_single_inode(inode, wb, &wbc);
 }
 EXPORT_SYMBOL(write_inode_now);
 
@@ -1365,15 +1447,7 @@ EXPORT_SYMBOL(write_inode_now);
  */
 int sync_inode(struct inode *inode, struct writeback_control *wbc)
 {
-       struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
-       int ret;
-
-       spin_lock(&wb->list_lock);
-       spin_lock(&inode->i_lock);
-       ret = writeback_single_inode(inode, wb, wbc);
-       spin_unlock(&inode->i_lock);
-       spin_unlock(&wb->list_lock);
-       return ret;
+       return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc);
 }
 EXPORT_SYMBOL(sync_inode);
 
index 504e61b7fd7515f8aafe7e3b9edd2c9fa42fd91d..9562109d3a879b3dab50ee27d989f3ae89c8b833 100644 (file)
@@ -962,7 +962,9 @@ static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
        if (err)
                goto out;
 
-       file_update_time(file);
+       err = file_update_time(file);
+       if (err)
+               goto out;
 
        if (file->f_flags & O_DIRECT) {
                written = generic_file_direct_write(iocb, iov, &nr_segs,
index 26783eb2b1fc9465602cc0f452c49aa3e72d32b5..42678a33b7bb6297ced300f7fbb61696d37628c9 100644 (file)
@@ -122,7 +122,7 @@ static void fuse_destroy_inode(struct inode *inode)
 static void fuse_evict_inode(struct inode *inode)
 {
        truncate_inode_pages(&inode->i_data, 0);
-       end_writeback(inode);
+       clear_inode(inode);
        if (inode->i_sb->s_flags & MS_ACTIVE) {
                struct fuse_conn *fc = get_fuse_conn(inode);
                struct fuse_inode *fi = get_fuse_inode(inode);
@@ -627,12 +627,10 @@ static struct dentry *fuse_get_dentry(struct super_block *sb,
        return ERR_PTR(err);
 }
 
-static int fuse_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
-                          int connectable)
+static int fuse_encode_fh(struct inode *inode, u32 *fh, int *max_len,
+                          struct inode *parent)
 {
-       struct inode *inode = dentry->d_inode;
-       bool encode_parent = connectable && !S_ISDIR(inode->i_mode);
-       int len = encode_parent ? 6 : 3;
+       int len = parent ? 6 : 3;
        u64 nodeid;
        u32 generation;
 
@@ -648,14 +646,9 @@ static int fuse_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
        fh[1] = (u32)(nodeid & 0xffffffff);
        fh[2] = generation;
 
-       if (encode_parent) {
-               struct inode *parent;
-
-               spin_lock(&dentry->d_lock);
-               parent = dentry->d_parent->d_inode;
+       if (parent) {
                nodeid = get_fuse_inode(parent)->nodeid;
                generation = parent->i_generation;
-               spin_unlock(&dentry->d_lock);
 
                fh[3] = (u32)(nodeid >> 32);
                fh[4] = (u32)(nodeid & 0xffffffff);
@@ -663,7 +656,7 @@ static int fuse_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
        }
 
        *max_len = len;
-       return encode_parent ? 0x82 : 0x81;
+       return parent ? 0x82 : 0x81;
 }
 
 static struct dentry *fuse_fh_to_dentry(struct super_block *sb,
index 70ba891654f8ce3582c456e208feda6d56e90a1a..e8ed6d4a6181132ff47960dc118cd6fb60c1b81c 100644 (file)
 #define GFS2_LARGE_FH_SIZE 8
 #define GFS2_OLD_FH_SIZE 10
 
-static int gfs2_encode_fh(struct dentry *dentry, __u32 *p, int *len,
-                         int connectable)
+static int gfs2_encode_fh(struct inode *inode, __u32 *p, int *len,
+                         struct inode *parent)
 {
        __be32 *fh = (__force __be32 *)p;
-       struct inode *inode = dentry->d_inode;
        struct super_block *sb = inode->i_sb;
        struct gfs2_inode *ip = GFS2_I(inode);
 
-       if (connectable && (*len < GFS2_LARGE_FH_SIZE)) {
+       if (parent && (*len < GFS2_LARGE_FH_SIZE)) {
                *len = GFS2_LARGE_FH_SIZE;
                return 255;
        } else if (*len < GFS2_SMALL_FH_SIZE) {
@@ -50,14 +49,10 @@ static int gfs2_encode_fh(struct dentry *dentry, __u32 *p, int *len,
        fh[3] = cpu_to_be32(ip->i_no_addr & 0xFFFFFFFF);
        *len = GFS2_SMALL_FH_SIZE;
 
-       if (!connectable || inode == sb->s_root->d_inode)
+       if (!parent || inode == sb->s_root->d_inode)
                return *len;
 
-       spin_lock(&dentry->d_lock);
-       inode = dentry->d_parent->d_inode;
-       ip = GFS2_I(inode);
-       igrab(inode);
-       spin_unlock(&dentry->d_lock);
+       ip = GFS2_I(parent);
 
        fh[4] = cpu_to_be32(ip->i_no_formal_ino >> 32);
        fh[5] = cpu_to_be32(ip->i_no_formal_ino & 0xFFFFFFFF);
@@ -65,8 +60,6 @@ static int gfs2_encode_fh(struct dentry *dentry, __u32 *p, int *len,
        fh[7] = cpu_to_be32(ip->i_no_addr & 0xFFFFFFFF);
        *len = GFS2_LARGE_FH_SIZE;
 
-       iput(inode);
-
        return *len;
 }
 
index 6172fa77ad59acf938d7a82f12c6bd1b78735df0..713e621c240b9e6989ea50b7fa0a0c4f3b5f111d 100644 (file)
@@ -1554,7 +1554,7 @@ out_unlock:
 out:
        /* Case 3 starts here */
        truncate_inode_pages(&inode->i_data, 0);
-       end_writeback(inode);
+       clear_inode(inode);
        gfs2_dir_hash_inval(ip);
        ip->i_gl->gl_object = NULL;
        flush_delayed_work_sync(&ip->i_gl->gl_work);
index 737dbeb64320744aa85f63e117298e6b33362618..761ec06354b4719df8731e098caaca14bbcf8127 100644 (file)
@@ -532,7 +532,7 @@ out:
 void hfs_evict_inode(struct inode *inode)
 {
        truncate_inode_pages(&inode->i_data, 0);
-       end_writeback(inode);
+       clear_inode(inode);
        if (HFS_IS_RSRC(inode) && HFS_I(inode)->rsrc_inode) {
                HFS_I(HFS_I(inode)->rsrc_inode)->rsrc_inode = NULL;
                iput(HFS_I(inode)->rsrc_inode);
index ceb1c281eefb0f3f5fa17e7358fd51fb6c69e6e5..a9bca4b8768be786346ec1c832ed73e2c3699df3 100644 (file)
@@ -154,7 +154,7 @@ static void hfsplus_evict_inode(struct inode *inode)
 {
        dprint(DBG_INODE, "hfsplus_evict_inode: %lu\n", inode->i_ino);
        truncate_inode_pages(&inode->i_data, 0);
-       end_writeback(inode);
+       clear_inode(inode);
        if (HFSPLUS_IS_RSRC(inode)) {
                HFSPLUS_I(HFSPLUS_I(inode)->rsrc_inode)->rsrc_inode = NULL;
                iput(HFSPLUS_I(inode)->rsrc_inode);
index 07c516bfea7671853513d9a80f61f81d5f674f91..2afa5bbccf9baf9cc4389d88940cb21516bf8bb8 100644 (file)
@@ -240,7 +240,7 @@ static struct inode *hostfs_alloc_inode(struct super_block *sb)
 static void hostfs_evict_inode(struct inode *inode)
 {
        truncate_inode_pages(&inode->i_data, 0);
-       end_writeback(inode);
+       clear_inode(inode);
        if (HOSTFS_I(inode)->fd != -1) {
                close_file(&HOSTFS_I(inode)->fd);
                HOSTFS_I(inode)->fd = -1;
index 7a5eb2c718c854206d6db419abe4ce7bc61d12c7..cdb84a8380682b5f341138cb6f75e2754434e073 100644 (file)
@@ -16,9 +16,9 @@
 static int chk_if_allocated(struct super_block *s, secno sec, char *msg)
 {
        struct quad_buffer_head qbh;
-       u32 *bmp;
+       __le32 *bmp;
        if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "chk"))) goto fail;
-       if ((cpu_to_le32(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f)) & 1) {
+       if ((le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f)) & 1) {
                hpfs_error(s, "sector '%s' - %08x not allocated in bitmap", msg, sec);
                goto fail1;
        }
@@ -62,7 +62,7 @@ int hpfs_chk_sectors(struct super_block *s, secno start, int len, char *msg)
 static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigned forward)
 {
        struct quad_buffer_head qbh;
-       unsigned *bmp;
+       __le32 *bmp;
        unsigned bs = near & ~0x3fff;
        unsigned nr = (near & 0x3fff) & ~(n - 1);
        /*unsigned mnr;*/
@@ -236,7 +236,7 @@ static secno alloc_in_dirband(struct super_block *s, secno near)
 int hpfs_alloc_if_possible(struct super_block *s, secno sec)
 {
        struct quad_buffer_head qbh;
-       u32 *bmp;
+       __le32 *bmp;
        if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "aip"))) goto end;
        if (le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) & (1 << (sec & 0x1f))) {
                bmp[(sec & 0x3fff) >> 5] &= cpu_to_le32(~(1 << (sec & 0x1f)));
@@ -254,7 +254,7 @@ int hpfs_alloc_if_possible(struct super_block *s, secno sec)
 void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n)
 {
        struct quad_buffer_head qbh;
-       u32 *bmp;
+       __le32 *bmp;
        struct hpfs_sb_info *sbi = hpfs_sb(s);
        /*printk("2 - ");*/
        if (!n) return;
@@ -299,7 +299,7 @@ int hpfs_check_free_dnodes(struct super_block *s, int n)
        int n_bmps = (hpfs_sb(s)->sb_fs_size + 0x4000 - 1) >> 14;
        int b = hpfs_sb(s)->sb_c_bitmap & 0x0fffffff;
        int i, j;
-       u32 *bmp;
+       __le32 *bmp;
        struct quad_buffer_head qbh;
        if ((bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
                for (j = 0; j < 512; j++) {
@@ -351,7 +351,7 @@ void hpfs_free_dnode(struct super_block *s, dnode_secno dno)
                hpfs_free_sectors(s, dno, 4);
        } else {
                struct quad_buffer_head qbh;
-               u32 *bmp;
+               __le32 *bmp;
                unsigned ssec = (dno - hpfs_sb(s)->sb_dirband_start) / 4;
                if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
                        return;
index 08b503e8ed29ec610a098cb9658e1a2ecaa1779c..4bae4a4a60b1936eba70d17d18e7d4a016ed54b9 100644 (file)
@@ -20,7 +20,7 @@ secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode,
        int c1, c2 = 0;
        go_down:
        if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1;
-       if (btree->internal) {
+       if (bp_internal(btree)) {
                for (i = 0; i < btree->n_used_nodes; i++)
                        if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) {
                                a = le32_to_cpu(btree->u.internal[i].down);
@@ -82,7 +82,7 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
                brelse(bh);
                return -1;
        }
-       if (btree->internal) {
+       if (bp_internal(btree)) {
                a = le32_to_cpu(btree->u.internal[n].down);
                btree->u.internal[n].file_secno = cpu_to_le32(-1);
                mark_buffer_dirty(bh);
@@ -129,12 +129,12 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
                }
                if (a == node && fnod) {
                        anode->up = cpu_to_le32(node);
-                       anode->btree.fnode_parent = 1;
+                       anode->btree.flags |= BP_fnode_parent;
                        anode->btree.n_used_nodes = btree->n_used_nodes;
                        anode->btree.first_free = btree->first_free;
                        anode->btree.n_free_nodes = 40 - anode->btree.n_used_nodes;
                        memcpy(&anode->u, &btree->u, btree->n_used_nodes * 12);
-                       btree->internal = 1;
+                       btree->flags |= BP_internal;
                        btree->n_free_nodes = 11;
                        btree->n_used_nodes = 1;
                        btree->first_free = cpu_to_le16((char *)&(btree->u.internal[1]) - (char *)btree);
@@ -184,7 +184,10 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
                        hpfs_free_sectors(s, ra, 1);
                        if ((anode = hpfs_map_anode(s, na, &bh))) {
                                anode->up = cpu_to_le32(up);
-                               anode->btree.fnode_parent = up == node && fnod;
+                               if (up == node && fnod)
+                                       anode->btree.flags |= BP_fnode_parent;
+                               else
+                                       anode->btree.flags &= ~BP_fnode_parent;
                                mark_buffer_dirty(bh);
                                brelse(bh);
                        }
@@ -198,7 +201,7 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
                if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) {
                        anode = new_anode;
                        /*anode->up = cpu_to_le32(up != -1 ? up : ra);*/
-                       anode->btree.internal = 1;
+                       anode->btree.flags |= BP_internal;
                        anode->btree.n_used_nodes = 1;
                        anode->btree.n_free_nodes = 59;
                        anode->btree.first_free = cpu_to_le16(16);
@@ -215,7 +218,8 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
        }
        if ((anode = hpfs_map_anode(s, na, &bh))) {
                anode->up = cpu_to_le32(node);
-               if (fnod) anode->btree.fnode_parent = 1;
+               if (fnod)
+                       anode->btree.flags |= BP_fnode_parent;
                mark_buffer_dirty(bh);
                brelse(bh);
        }
@@ -234,18 +238,19 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
        }
        ranode->up = cpu_to_le32(node);
        memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free));
-       if (fnod) ranode->btree.fnode_parent = 1;
-       ranode->btree.n_free_nodes = (ranode->btree.internal ? 60 : 40) - ranode->btree.n_used_nodes;
-       if (ranode->btree.internal) for (n = 0; n < ranode->btree.n_used_nodes; n++) {
+       if (fnod)
+               ranode->btree.flags |= BP_fnode_parent;
+       ranode->btree.n_free_nodes = (bp_internal(&ranode->btree) ? 60 : 40) - ranode->btree.n_used_nodes;
+       if (bp_internal(&ranode->btree)) for (n = 0; n < ranode->btree.n_used_nodes; n++) {
                struct anode *unode;
                if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) {
                        unode->up = cpu_to_le32(ra);
-                       unode->btree.fnode_parent = 0;
+                       unode->btree.flags &= ~BP_fnode_parent;
                        mark_buffer_dirty(bh1);
                        brelse(bh1);
                }
        }
-       btree->internal = 1;
+       btree->flags |= BP_internal;
        btree->n_free_nodes = fnod ? 10 : 58;
        btree->n_used_nodes = 2;
        btree->first_free = cpu_to_le16((char *)&btree->u.internal[2] - (char *)btree);
@@ -278,7 +283,7 @@ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
        int d1, d2;
        go_down:
        d2 = 0;
-       while (btree1->internal) {
+       while (bp_internal(btree1)) {
                ano = le32_to_cpu(btree1->u.internal[pos].down);
                if (level) brelse(bh);
                if (hpfs_sb(s)->sb_chk)
@@ -412,13 +417,13 @@ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
                        btree->n_free_nodes = 8;
                        btree->n_used_nodes = 0;
                        btree->first_free = cpu_to_le16(8);
-                       btree->internal = 0;
+                       btree->flags &= ~BP_internal;
                        mark_buffer_dirty(bh);
                } else hpfs_free_sectors(s, f, 1);
                brelse(bh);
                return;
        }
-       while (btree->internal) {
+       while (bp_internal(btree)) {
                nodes = btree->n_used_nodes + btree->n_free_nodes;
                for (i = 0; i < btree->n_used_nodes; i++)
                        if (le32_to_cpu(btree->u.internal[i].file_secno) >= secs) goto f;
@@ -479,13 +484,13 @@ void hpfs_remove_fnode(struct super_block *s, fnode_secno fno)
        struct extended_attribute *ea;
        struct extended_attribute *ea_end;
        if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return;
-       if (!fnode->dirflag) hpfs_remove_btree(s, &fnode->btree);
+       if (!fnode_is_dir(fnode)) hpfs_remove_btree(s, &fnode->btree);
        else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno));
        ea_end = fnode_end_ea(fnode);
        for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
-               if (ea->indirect)
-                       hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea));
-       hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l));
+               if (ea_indirect(ea))
+                       hpfs_ea_remove(s, ea_sec(ea), ea_in_anode(ea), ea_len(ea));
+       hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l));
        brelse(bh);
        hpfs_free_sectors(s, fno, 1);
 }
index 9ecde27d1e297ed3d42a742352f469f1bf2dfb9a..f49d1498aa2e98d3a1bc46fff59d67ca45f04a4e 100644 (file)
@@ -156,7 +156,6 @@ void hpfs_brelse4(struct quad_buffer_head *qbh)
 
 void hpfs_mark_4buffers_dirty(struct quad_buffer_head *qbh)
 {
-       PRINTK(("hpfs_mark_4buffers_dirty\n"));
        memcpy(qbh->bh[0]->b_data, qbh->data, 512);
        memcpy(qbh->bh[1]->b_data, qbh->data + 512, 512);
        memcpy(qbh->bh[2]->b_data, qbh->data + 2 * 512, 512);
index 2fa0089a02a8ec2934cda55cbbae18e50c34a4ea..b8472f803f4e54ea5039b85ac36cfdf33a48925b 100644 (file)
@@ -87,7 +87,7 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                        ret = -EIOERROR;
                        goto out;
                }
-               if (!fno->dirflag) {
+               if (!fnode_is_dir(fno)) {
                        e = 1;
                        hpfs_error(inode->i_sb, "not a directory, fnode %08lx",
                                        (unsigned long)inode->i_ino);
index 1e0e2ac30fd3be93f8e5b7a97618f19a52220ec4..3228c524ebe56f948d8896cec23ca6b1284f6303 100644 (file)
@@ -153,7 +153,7 @@ static void set_last_pointer(struct super_block *s, struct dnode *d, dnode_secno
                }
                de->length = cpu_to_le16(36);
                de->down = 1;
-               *(dnode_secno *)((char *)de + 32) = cpu_to_le32(ptr);
+               *(__le32 *)((char *)de + 32) = cpu_to_le32(ptr);
        }
 }
 
@@ -177,7 +177,7 @@ struct hpfs_dirent *hpfs_add_de(struct super_block *s, struct dnode *d,
        memmove((char *)de + d_size, de, (char *)de_end - (char *)de);
        memset(de, 0, d_size);
        if (down_ptr) {
-               *(dnode_secno *)((char *)de + d_size - 4) = cpu_to_le32(down_ptr);
+               *(__le32 *)((char *)de + d_size - 4) = cpu_to_le32(down_ptr);
                de->down = 1;
        }
        de->length = cpu_to_le16(d_size);
@@ -656,7 +656,7 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno)
                                del->down = 0;
                                d1->first_free = cpu_to_le32(le32_to_cpu(d1->first_free) - 4);
                        } else if (down)
-                               *(dnode_secno *) ((void *) del + le16_to_cpu(del->length) - 4) = cpu_to_le32(down);
+                               *(__le32 *) ((void *) del + le16_to_cpu(del->length) - 4) = cpu_to_le32(down);
                } else goto endm;
                if (!(de_cp = kmalloc(le16_to_cpu(de_prev->length), GFP_NOFS))) {
                        printk("HPFS: out of memory for dtree balancing\n");
@@ -672,7 +672,7 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno)
                        de_prev->down = 1;
                        dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) + 4);
                }
-               *(dnode_secno *) ((void *) de_prev + le16_to_cpu(de_prev->length) - 4) = cpu_to_le32(ndown);
+               *(__le32 *) ((void *) de_prev + le16_to_cpu(de_prev->length) - 4) = cpu_to_le32(ndown);
                hpfs_mark_4buffers_dirty(&qbh);
                hpfs_brelse4(&qbh);
                for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | (p - 1), 4);
@@ -1015,7 +1015,7 @@ struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno,
                kfree(name2);
                return NULL;
        }       
-       if (!upf->dirflag) {
+       if (!fnode_is_dir(upf)) {
                brelse(bh);
                hpfs_error(s, "fnode %08x has non-directory parent %08x", fno, le32_to_cpu(f->up));
                kfree(name2);
index d8b84d113c891bbcfd8416d3f35153983b0549a7..bcaafcd2666ac275d02c2f054023cc537ebd7644 100644 (file)
@@ -23,15 +23,15 @@ void hpfs_ea_ext_remove(struct super_block *s, secno a, int ano, unsigned len)
                        return;
                }
                if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return;
-               if (ea->indirect) {
+               if (ea_indirect(ea)) {
                        if (ea_valuelen(ea) != 8) {
-                               hpfs_error(s, "ea->indirect set while ea->valuelen!=8, %s %08x, pos %08x",
+                               hpfs_error(s, "ea_indirect(ea) set while ea->valuelen!=8, %s %08x, pos %08x",
                                        ano ? "anode" : "sectors", a, pos);
                                return;
                        }
                        if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 9, ex+4))
                                return;
-                       hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea));
+                       hpfs_ea_remove(s, ea_sec(ea), ea_in_anode(ea), ea_len(ea));
                }
                pos += ea->namelen + ea_valuelen(ea) + 5;
        }
@@ -81,7 +81,7 @@ int hpfs_read_ea(struct super_block *s, struct fnode *fnode, char *key,
        struct extended_attribute *ea_end = fnode_end_ea(fnode);
        for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
                if (!strcmp(ea->name, key)) {
-                       if (ea->indirect)
+                       if (ea_indirect(ea))
                                goto indirect;
                        if (ea_valuelen(ea) >= size)
                                return -EINVAL;
@@ -91,7 +91,7 @@ int hpfs_read_ea(struct super_block *s, struct fnode *fnode, char *key,
                }
        a = le32_to_cpu(fnode->ea_secno);
        len = le32_to_cpu(fnode->ea_size_l);
-       ano = fnode->ea_anode;
+       ano = fnode_in_anode(fnode);
        pos = 0;
        while (pos < len) {
                ea = (struct extended_attribute *)ex;
@@ -101,10 +101,10 @@ int hpfs_read_ea(struct super_block *s, struct fnode *fnode, char *key,
                        return -EIO;
                }
                if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return -EIO;
-               if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea->indirect ? 8 : 0), ex + 4))
+               if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea_indirect(ea) ? 8 : 0), ex + 4))
                        return -EIO;
                if (!strcmp(ea->name, key)) {
-                       if (ea->indirect)
+                       if (ea_indirect(ea))
                                goto indirect;
                        if (ea_valuelen(ea) >= size)
                                return -EINVAL;
@@ -119,7 +119,7 @@ int hpfs_read_ea(struct super_block *s, struct fnode *fnode, char *key,
 indirect:
        if (ea_len(ea) >= size)
                return -EINVAL;
-       if (hpfs_ea_read(s, ea_sec(ea), ea->anode, 0, ea_len(ea), buf))
+       if (hpfs_ea_read(s, ea_sec(ea), ea_in_anode(ea), 0, ea_len(ea), buf))
                return -EIO;
        buf[ea_len(ea)] = 0;
        return 0;
@@ -136,8 +136,8 @@ char *hpfs_get_ea(struct super_block *s, struct fnode *fnode, char *key, int *si
        struct extended_attribute *ea_end = fnode_end_ea(fnode);
        for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
                if (!strcmp(ea->name, key)) {
-                       if (ea->indirect)
-                               return get_indirect_ea(s, ea->anode, ea_sec(ea), *size = ea_len(ea));
+                       if (ea_indirect(ea))
+                               return get_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), *size = ea_len(ea));
                        if (!(ret = kmalloc((*size = ea_valuelen(ea)) + 1, GFP_NOFS))) {
                                printk("HPFS: out of memory for EA\n");
                                return NULL;
@@ -148,7 +148,7 @@ char *hpfs_get_ea(struct super_block *s, struct fnode *fnode, char *key, int *si
                }
        a = le32_to_cpu(fnode->ea_secno);
        len = le32_to_cpu(fnode->ea_size_l);
-       ano = fnode->ea_anode;
+       ano = fnode_in_anode(fnode);
        pos = 0;
        while (pos < len) {
                char ex[4 + 255 + 1 + 8];
@@ -159,11 +159,11 @@ char *hpfs_get_ea(struct super_block *s, struct fnode *fnode, char *key, int *si
                        return NULL;
                }
                if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return NULL;
-               if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea->indirect ? 8 : 0), ex + 4))
+               if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea_indirect(ea) ? 8 : 0), ex + 4))
                        return NULL;
                if (!strcmp(ea->name, key)) {
-                       if (ea->indirect)
-                               return get_indirect_ea(s, ea->anode, ea_sec(ea), *size = ea_len(ea));
+                       if (ea_indirect(ea))
+                               return get_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), *size = ea_len(ea));
                        if (!(ret = kmalloc((*size = ea_valuelen(ea)) + 1, GFP_NOFS))) {
                                printk("HPFS: out of memory for EA\n");
                                return NULL;
@@ -199,9 +199,9 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
        struct extended_attribute *ea_end = fnode_end_ea(fnode);
        for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
                if (!strcmp(ea->name, key)) {
-                       if (ea->indirect) {
+                       if (ea_indirect(ea)) {
                                if (ea_len(ea) == size)
-                                       set_indirect_ea(s, ea->anode, ea_sec(ea), data, size);
+                                       set_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), data, size);
                        } else if (ea_valuelen(ea) == size) {
                                memcpy(ea_data(ea), data, size);
                        }
@@ -209,7 +209,7 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
                }
        a = le32_to_cpu(fnode->ea_secno);
        len = le32_to_cpu(fnode->ea_size_l);
-       ano = fnode->ea_anode;
+       ano = fnode_in_anode(fnode);
        pos = 0;
        while (pos < len) {
                char ex[4 + 255 + 1 + 8];
@@ -220,12 +220,12 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
                        return;
                }
                if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return;
-               if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea->indirect ? 8 : 0), ex + 4))
+               if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea_indirect(ea) ? 8 : 0), ex + 4))
                        return;
                if (!strcmp(ea->name, key)) {
-                       if (ea->indirect) {
+                       if (ea_indirect(ea)) {
                                if (ea_len(ea) == size)
-                                       set_indirect_ea(s, ea->anode, ea_sec(ea), data, size);
+                                       set_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), data, size);
                        }
                        else {
                                if (ea_valuelen(ea) == size)
@@ -246,7 +246,7 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
        if (le16_to_cpu(fnode->ea_offs) < 0xc4 || le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) > 0x200) {
                hpfs_error(s, "fnode %08lx: ea_offs == %03x, ea_size_s == %03x",
                        (unsigned long)inode->i_ino,
-                       le32_to_cpu(fnode->ea_offs), le16_to_cpu(fnode->ea_size_s));
+                       le16_to_cpu(fnode->ea_offs), le16_to_cpu(fnode->ea_size_s));
                return;
        }
        if ((le16_to_cpu(fnode->ea_size_s) || !le32_to_cpu(fnode->ea_size_l)) &&
@@ -276,7 +276,7 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
                fnode->ea_size_l = cpu_to_le32(le16_to_cpu(fnode->ea_size_s));
                fnode->ea_size_s = cpu_to_le16(0);
                fnode->ea_secno = cpu_to_le32(n);
-               fnode->ea_anode = cpu_to_le32(0);
+               fnode->flags &= ~FNODE_anode;
                mark_buffer_dirty(bh);
                brelse(bh);
        }
@@ -288,9 +288,9 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
                        secno q = hpfs_alloc_sector(s, fno, 1, 0);
                        if (!q) goto bail;
                        fnode->ea_secno = cpu_to_le32(q);
-                       fnode->ea_anode = 0;
+                       fnode->flags &= ~FNODE_anode;
                        len++;
-               } else if (!fnode->ea_anode) {
+               } else if (!fnode_in_anode(fnode)) {
                        if (hpfs_alloc_if_possible(s, le32_to_cpu(fnode->ea_secno) + len)) {
                                len++;
                        } else {
@@ -310,7 +310,7 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
                                anode->u.external[0].length = cpu_to_le32(len);
                                mark_buffer_dirty(bh);
                                brelse(bh);
-                               fnode->ea_anode = 1;
+                               fnode->flags |= FNODE_anode;
                                fnode->ea_secno = cpu_to_le32(a_s);*/
                                secno new_sec;
                                int i;
@@ -338,7 +338,7 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
                                len = (pos + 511) >> 9;
                        }
                }
-               if (fnode->ea_anode) {
+               if (fnode_in_anode(fnode)) {
                        if (hpfs_add_sector_to_btree(s, le32_to_cpu(fnode->ea_secno),
                                                     0, len) != -1) {
                                len++;
@@ -351,16 +351,16 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
        h[1] = strlen(key);
        h[2] = size & 0xff;
        h[3] = size >> 8;
-       if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l), 4, h)) goto bail;
-       if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l) + 4, h[1] + 1, key)) goto bail;
-       if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l) + 5 + h[1], size, data)) goto bail;
+       if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l), 4, h)) goto bail;
+       if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l) + 4, h[1] + 1, key)) goto bail;
+       if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l) + 5 + h[1], size, data)) goto bail;
        fnode->ea_size_l = cpu_to_le32(pos);
        ret:
        hpfs_i(inode)->i_ea_size += 5 + strlen(key) + size;
        return;
        bail:
        if (le32_to_cpu(fnode->ea_secno))
-               if (fnode->ea_anode) hpfs_truncate_btree(s, le32_to_cpu(fnode->ea_secno), 1, (le32_to_cpu(fnode->ea_size_l) + 511) >> 9);
+               if (fnode_in_anode(fnode)) hpfs_truncate_btree(s, le32_to_cpu(fnode->ea_secno), 1, (le32_to_cpu(fnode->ea_size_l) + 511) >> 9);
                else hpfs_free_sectors(s, le32_to_cpu(fnode->ea_secno) + ((le32_to_cpu(fnode->ea_size_l) + 511) >> 9), len - ((le32_to_cpu(fnode->ea_size_l) + 511) >> 9));
        else fnode->ea_secno = fnode->ea_size_l = cpu_to_le32(0);
 }
index 8b0650aae32812bac9abbb581439b592d64949d2..cce025aff1b19b86f824cd47bfb6c9457d583068 100644 (file)
@@ -51,11 +51,11 @@ struct hpfs_boot_block
   u8 n_rootdir_entries[2];
   u8 n_sectors_s[2];
   u8 media_byte;
-  u16 sectors_per_fat;
-  u16 sectors_per_track;
-  u16 heads_per_cyl;
-  u32 n_hidden_sectors;
-  u32 n_sectors_l;             /* size of partition */
+  __le16 sectors_per_fat;
+  __le16 sectors_per_track;
+  __le16 heads_per_cyl;
+  __le32 n_hidden_sectors;
+  __le32 n_sectors_l;          /* size of partition */
   u8 drive_number;
   u8 mbz;
   u8 sig_28h;                  /* 28h */
@@ -63,7 +63,7 @@ struct hpfs_boot_block
   u8 vol_label[11];
   u8 sig_hpfs[8];              /* "HPFS    " */
   u8 pad[448];
-  u16 magic;                   /* aa55 */
+  __le16 magic;                        /* aa55 */
 };
 
 
@@ -75,28 +75,28 @@ struct hpfs_boot_block
 
 struct hpfs_super_block
 {
-  u32 magic;                           /* f995 e849 */
-  u32 magic1;                          /* fa53 e9c5, more magic? */
+  __le32 magic;                                /* f995 e849 */
+  __le32 magic1;                       /* fa53 e9c5, more magic? */
   u8 version;                          /* version of a filesystem  usually 2 */
   u8 funcversion;                      /* functional version - oldest version
                                           of filesystem that can understand
                                           this disk */
-  u16 zero;                            /* 0 */
-  fnode_secno root;                    /* fnode of root directory */
-  secno n_sectors;                     /* size of filesystem */
-  u32 n_badblocks;                     /* number of bad blocks */
-  secno bitmaps;                       /* pointers to free space bit maps */
-  u32 zero1;                           /* 0 */
-  secno badblocks;                     /* bad block list */
-  u32 zero3;                           /* 0 */
-  time32_t last_chkdsk;                        /* date last checked, 0 if never */
-  time32_t last_optimize;              /* date last optimized, 0 if never */
-  secno n_dir_band;                    /* number of sectors in dir band */
-  secno dir_band_start;                        /* first sector in dir band */
-  secno dir_band_end;                  /* last sector in dir band */
-  secno dir_band_bitmap;               /* free space map, 1 dnode per bit */
+  __le16 zero;                         /* 0 */
+  __le32 root;                         /* fnode of root directory */
+  __le32 n_sectors;                    /* size of filesystem */
+  __le32 n_badblocks;                  /* number of bad blocks */
+  __le32 bitmaps;                      /* pointers to free space bit maps */
+  __le32 zero1;                                /* 0 */
+  __le32 badblocks;                    /* bad block list */
+  __le32 zero3;                                /* 0 */
+  __le32 last_chkdsk;                  /* date last checked, 0 if never */
+  __le32 last_optimize;                        /* date last optimized, 0 if never */
+  __le32 n_dir_band;                   /* number of sectors in dir band */
+  __le32 dir_band_start;                       /* first sector in dir band */
+  __le32 dir_band_end;                 /* last sector in dir band */
+  __le32 dir_band_bitmap;              /* free space map, 1 dnode per bit */
   u8 volume_name[32];                  /* not used */
-  secno user_id_table;                 /* 8 preallocated sectors - user id */
+  __le32 user_id_table;                        /* 8 preallocated sectors - user id */
   u32 zero6[103];                      /* 0 */
 };
 
@@ -109,8 +109,8 @@ struct hpfs_super_block
 
 struct hpfs_spare_block
 {
-  u32 magic;                           /* f991 1849 */
-  u32 magic1;                          /* fa52 29c5, more magic? */
+  __le32 magic;                                /* f991 1849 */
+  __le32 magic1;                               /* fa52 29c5, more magic? */
 
 #ifdef __LITTLE_ENDIAN
   u8 dirty: 1;                         /* 0 clean, 1 "improperly stopped" */
@@ -153,21 +153,21 @@ struct hpfs_spare_block
   u8 mm_contlgulty;
   u8 unused;
 
-  secno hotfix_map;                    /* info about remapped bad sectors */
-  u32 n_spares_used;                   /* number of hotfixes */
-  u32 n_spares;                                /* number of spares in hotfix map */
-  u32 n_dnode_spares_free;             /* spare dnodes unused */
-  u32 n_dnode_spares;                  /* length of spare_dnodes[] list,
+  __le32 hotfix_map;                   /* info about remapped bad sectors */
+  __le32 n_spares_used;                        /* number of hotfixes */
+  __le32 n_spares;                     /* number of spares in hotfix map */
+  __le32 n_dnode_spares_free;          /* spare dnodes unused */
+  __le32 n_dnode_spares;               /* length of spare_dnodes[] list,
                                           follows in this block*/
-  secno code_page_dir;                 /* code page directory block */
-  u32 n_code_pages;                    /* number of code pages */
-  u32 super_crc;                       /* on HPFS386 and LAN Server this is
+  __le32 code_page_dir;                        /* code page directory block */
+  __le32 n_code_pages;                 /* number of code pages */
+  __le32 super_crc;                    /* on HPFS386 and LAN Server this is
                                           checksum of superblock, on normal
                                           OS/2 unused */
-  u32 spare_crc;                       /* on HPFS386 checksum of spareblock */
-  u32 zero1[15];                       /* unused */
-  dnode_secno spare_dnodes[100];       /* emergency free dnode list */
-  u32 zero2[1];                                /* room for more? */
+  __le32 spare_crc;                    /* on HPFS386 checksum of spareblock */
+  __le32 zero1[15];                    /* unused */
+  __le32 spare_dnodes[100];            /* emergency free dnode list */
+  __le32 zero2[1];                     /* room for more? */
 };
 
 /* The bad block list is 4 sectors long.  The first word must be zero,
@@ -202,18 +202,18 @@ struct hpfs_spare_block
 
 struct code_page_directory
 {
-  u32 magic;                           /* 4945 21f7 */
-  u32 n_code_pages;                    /* number of pointers following */
-  u32 zero1[2];
+  __le32 magic;                                /* 4945 21f7 */
+  __le32 n_code_pages;                 /* number of pointers following */
+  __le32 zero1[2];
   struct {
-    u16 ix;                            /* index */
-    u16 code_page_number;              /* code page number */
-    u32 bounds;                                /* matches corresponding word
+    __le16 ix;                         /* index */
+    __le16 code_page_number;           /* code page number */
+    __le32 bounds;                     /* matches corresponding word
                                           in data block */
-    secno code_page_data;              /* sector number of a code_page_data
+    __le32 code_page_data;             /* sector number of a code_page_data
                                           containing c.p. array */
-    u16 index;                         /* index in c.p. array in that sector*/
-    u16 unknown;                       /* some unknown value; usually 0;
+    __le16 index;                      /* index in c.p. array in that sector*/
+    __le16 unknown;                    /* some unknown value; usually 0;
                                           2 in Japanese version */
   } array[31];                         /* unknown length */
 };
@@ -224,19 +224,19 @@ struct code_page_directory
 
 struct code_page_data
 {
-  u32 magic;                           /* 8945 21f7 */
-  u32 n_used;                          /* # elements used in c_p_data[] */
-  u32 bounds[3];                       /* looks a bit like
+  __le32 magic;                                /* 8945 21f7 */
+  __le32 n_used;                       /* # elements used in c_p_data[] */
+  __le32 bounds[3];                    /* looks a bit like
                                             (beg1,end1), (beg2,end2)
                                           one byte each */
-  u16 offs[3];                         /* offsets from start of sector
+  __le16 offs[3];                      /* offsets from start of sector
                                           to start of c_p_data[ix] */
   struct {
-    u16 ix;                            /* index */
-    u16 code_page_number;              /* code page number */
-    u16 unknown;                       /* the same as in cp directory */
+    __le16 ix;                         /* index */
+    __le16 code_page_number;           /* code page number */
+    __le16 unknown;                    /* the same as in cp directory */
     u8 map[128];                       /* upcase table for chars 80..ff */
-    u16 zero2;
+    __le16 zero2;
   } code_page[3];
   u8 incognita[78];
 };
@@ -278,8 +278,8 @@ struct code_page_data
 #define DNODE_MAGIC   0x77e40aae
 
 struct dnode {
-  u32 magic;                           /* 77e4 0aae */
-  u32 first_free;                      /* offset from start of dnode to
+  __le32 magic;                                /* 77e4 0aae */
+  __le32 first_free;                   /* offset from start of dnode to
                                           first free dir entry */
 #ifdef __LITTLE_ENDIAN
   u8 root_dnode: 1;                    /* Is it root dnode? */
@@ -293,14 +293,14 @@ struct dnode {
   u8 root_dnode: 1;                    /* Is it root dnode? */
 #endif
   u8 increment_me2[3];
-  secno up;                            /* (root dnode) directory's fnode
+  __le32 up;                           /* (root dnode) directory's fnode
                                           (nonroot) parent dnode */
-  dnode_secno self;                    /* pointer to this dnode */
+  __le32 self;                 /* pointer to this dnode */
   u8 dirent[2028];                     /* one or more dirents */
 };
 
 struct hpfs_dirent {
-  u16 length;                          /* offset to next dirent */
+  __le16 length;                       /* offset to next dirent */
 
 #ifdef __LITTLE_ENDIAN
   u8 first: 1;                         /* set on phony ^A^A (".") entry */
@@ -346,12 +346,12 @@ struct hpfs_dirent {
   u8 read_only: 1;                     /* dos attrib */
 #endif
 
-  fnode_secno fnode;                   /* fnode giving allocation info */
-  time32_t write_date;                 /* mtime */
-  u32 file_size;                       /* file length, bytes */
-  time32_t read_date;                  /* atime */
-  time32_t creation_date;                      /* ctime */
-  u32 ea_size;                         /* total EA length, bytes */
+  __le32 fnode;                                /* fnode giving allocation info */
+  __le32 write_date;                   /* mtime */
+  __le32 file_size;                    /* file length, bytes */
+  __le32 read_date;                    /* atime */
+  __le32 creation_date;                        /* ctime */
+  __le32 ea_size;                      /* total EA length, bytes */
   u8 no_of_acls;                       /* number of ACL's (low 3 bits) */
   u8 ix;                               /* code page index (of filename), see
                                           struct code_page_data */
@@ -375,50 +375,36 @@ struct hpfs_dirent {
 
 struct bplus_leaf_node
 {
-  u32 file_secno;                      /* first file sector in extent */
-  u32 length;                          /* length, sectors */
-  secno disk_secno;                    /* first corresponding disk sector */
+  __le32 file_secno;                   /* first file sector in extent */
+  __le32 length;                       /* length, sectors */
+  __le32 disk_secno;                   /* first corresponding disk sector */
 };
 
 struct bplus_internal_node
 {
-  u32 file_secno;                      /* subtree maps sectors < this  */
-  anode_secno down;                    /* pointer to subtree */
+  __le32 file_secno;                   /* subtree maps sectors < this  */
+  __le32 down;                         /* pointer to subtree */
 };
 
+enum {
+       BP_hbff = 1,
+       BP_fnode_parent = 0x20,
+       BP_binary_search = 0x40,
+       BP_internal = 0x80
+};
 struct bplus_header
 {
-#ifdef __LITTLE_ENDIAN
-  u8 hbff: 1;                  /* high bit of first free entry offset */
-  u8 flag1234: 4;
-  u8 fnode_parent: 1;                  /* ? we're pointed to by an fnode,
-                                          the data btree or some ea or the
-                                          main ea bootage pointer ea_secno */
-                                       /* also can get set in fnodes, which
-                                          may be a chkdsk glitch or may mean
-                                          this bit is irrelevant in fnodes,
-                                          or this interpretation is all wet */
-  u8 binary_search: 1;                 /* suggest binary search (unused) */
-  u8 internal: 1;                      /* 1 -> (internal) tree of anodes
-                                          0 -> (leaf) list of extents */
-#else
-  u8 internal: 1;                      /* 1 -> (internal) tree of anodes
-                                          0 -> (leaf) list of extents */
-  u8 binary_search: 1;                 /* suggest binary search (unused) */
-  u8 fnode_parent: 1;                  /* ? we're pointed to by an fnode,
+  u8 flags;                            /* bit 0 - high bit of first free entry offset
+                                          bit 5 - we're pointed to by an fnode,
                                           the data btree or some ea or the
-                                          main ea bootage pointer ea_secno */
-                                       /* also can get set in fnodes, which
-                                          may be a chkdsk glitch or may mean
-                                          this bit is irrelevant in fnodes,
-                                          or this interpretation is all wet */
-  u8 flag1234: 4;
-  u8 hbff: 1;                  /* high bit of first free entry offset */
-#endif
+                                          main ea bootage pointer ea_secno
+                                          bit 6 - suggest binary search (unused)
+                                          bit 7 - 1 -> (internal) tree of anodes
+                                                  0 -> (leaf) list of extents */
   u8 fill[3];
   u8 n_free_nodes;                     /* free nodes in following array */
   u8 n_used_nodes;                     /* used nodes in following array */
-  u16 first_free;                      /* offset from start of header to
+  __le16 first_free;                   /* offset from start of header to
                                           first free node in array */
   union {
     struct bplus_internal_node internal[0]; /* (internal) 2-word entries giving
@@ -428,6 +414,16 @@ struct bplus_header
   } u;
 };
 
+static inline bool bp_internal(struct bplus_header *bp)
+{
+       return bp->flags & BP_internal;
+}
+
+static inline bool bp_fnode_parent(struct bplus_header *bp)
+{
+       return bp->flags & BP_fnode_parent;
+}
+
 /* fnode: root of allocation b+ tree, and EA's */
 
 /* Every file and every directory has one fnode, pointed to by the directory
@@ -436,62 +432,56 @@ struct bplus_header
 
 #define FNODE_MAGIC 0xf7e40aae
 
+enum {FNODE_anode = cpu_to_le16(2), FNODE_dir = cpu_to_le16(256)};
 struct fnode
 {
-  u32 magic;                           /* f7e4 0aae */
-  u32 zero1[2];                                /* read history */
+  __le32 magic;                                /* f7e4 0aae */
+  __le32 zero1[2];                     /* read history */
   u8 len, name[15];                    /* true length, truncated name */
-  fnode_secno up;                      /* pointer to file's directory fnode */
-  secno acl_size_l;
-  secno acl_secno;
-  u16 acl_size_s;
+  __le32 up;                           /* pointer to file's directory fnode */
+  __le32 acl_size_l;
+  __le32 acl_secno;
+  __le16 acl_size_s;
   u8 acl_anode;
   u8 zero2;                            /* history bit count */
-  u32 ea_size_l;                       /* length of disk-resident ea's */
-  secno ea_secno;                      /* first sector of disk-resident ea's*/
-  u16 ea_size_s;                       /* length of fnode-resident ea's */
-
-#ifdef __LITTLE_ENDIAN
-  u8 flag0: 1;
-  u8 ea_anode: 1;                      /* 1 -> ea_secno is an anode */
-  u8 flag234567: 6;
-#else
-  u8 flag234567: 6;
-  u8 ea_anode: 1;                      /* 1 -> ea_secno is an anode */
-  u8 flag0: 1;
-#endif
+  __le32 ea_size_l;                    /* length of disk-resident ea's */
+  __le32 ea_secno;                     /* first sector of disk-resident ea's*/
+  __le16 ea_size_s;                    /* length of fnode-resident ea's */
 
-#ifdef __LITTLE_ENDIAN
-  u8 dirflag: 1;                       /* 1 -> directory.  first & only extent
-                                          points to dnode. */
-  u8 flag9012345: 7;
-#else
-  u8 flag9012345: 7;
-  u8 dirflag: 1;                       /* 1 -> directory.  first & only extent
+  __le16 flags;                                /* bit 1 set -> ea_secno is an anode */
+                                       /* bit 8 set -> directory.  first & only extent
                                           points to dnode. */
-#endif
-
   struct bplus_header btree;           /* b+ tree, 8 extents or 12 subtrees */
   union {
     struct bplus_leaf_node external[8];
     struct bplus_internal_node internal[12];
   } u;
 
-  u32 file_size;                       /* file length, bytes */
-  u32 n_needea;                                /* number of EA's with NEEDEA set */
+  __le32 file_size;                    /* file length, bytes */
+  __le32 n_needea;                     /* number of EA's with NEEDEA set */
   u8 user_id[16];                      /* unused */
-  u16 ea_offs;                         /* offset from start of fnode
+  __le16 ea_offs;                      /* offset from start of fnode
                                           to first fnode-resident ea */
   u8 dasd_limit_treshhold;
   u8 dasd_limit_delta;
-  u32 dasd_limit;
-  u32 dasd_usage;
+  __le32 dasd_limit;
+  __le32 dasd_usage;
   u8 ea[316];                          /* zero or more EA's, packed together
                                           with no alignment padding.
                                           (Do not use this name, get here
                                           via fnode + ea_offs. I think.) */
 };
 
+static inline bool fnode_in_anode(struct fnode *p)
+{
+       return (p->flags & FNODE_anode) != 0;
+}
+
+static inline bool fnode_is_dir(struct fnode *p)
+{
+       return (p->flags & FNODE_dir) != 0;
+}
+
 
 /* anode: 99.44% pure allocation tree */
 
@@ -499,9 +489,9 @@ struct fnode
 
 struct anode
 {
-  u32 magic;                           /* 37e4 0aae */
-  anode_secno self;                    /* pointer to this anode */
-  secno up;                            /* parent anode or fnode */
+  __le32 magic;                                /* 37e4 0aae */
+  __le32 self;                         /* pointer to this anode */
+  __le32 up;                           /* parent anode or fnode */
 
   struct bplus_header btree;           /* b+tree, 40 extents or 60 subtrees */
   union {
@@ -509,7 +499,7 @@ struct anode
     struct bplus_internal_node internal[60];
   } u;
 
-  u32 fill[3];                         /* unused */
+  __le32 fill[3];                      /* unused */
 };
 
 
@@ -528,32 +518,23 @@ struct anode
    run, or in multiple runs.  Flags in the fnode tell whether the EA list
    is immediate, in a single run, or in multiple runs. */
 
+enum {EA_indirect = 1, EA_anode = 2, EA_needea = 128 };
 struct extended_attribute
 {
-#ifdef __LITTLE_ENDIAN
-  u8 indirect: 1;                      /* 1 -> value gives sector number
+  u8 flags;                            /* bit 0 set -> value gives sector number
                                           where real value starts */
-  u8 anode: 1;                         /* 1 -> sector is an anode
+                                       /* bit 1 set -> sector is an anode
                                           that points to fragmented value */
-  u8 flag23456: 5;
-  u8 needea: 1;                                /* required ea */
-#else
-  u8 needea: 1;                                /* required ea */
-  u8 flag23456: 5;
-  u8 anode: 1;                         /* 1 -> sector is an anode
-                                          that points to fragmented value */
-  u8 indirect: 1;                      /* 1 -> value gives sector number
-                                          where real value starts */
-#endif
+                                       /* bit 7 set -> required ea */
   u8 namelen;                          /* length of name, bytes */
   u8 valuelen_lo;                      /* length of value, bytes */
   u8 valuelen_hi;                      /* length of value, bytes */
-  u8 name[0];
+  u8 name[];
   /*
     u8 name[namelen];                  ascii attrib name
     u8 nul;                            terminating '\0', not counted
     u8 value[valuelen];                        value, arbitrary
-      if this.indirect, valuelen is 8 and the value is
+      if this.flags & 1, valuelen is 8 and the value is
         u32 length;                    real length of value, bytes
         secno secno;                   sector address where it starts
       if this.anode, the above sector number is the root of an anode tree
@@ -561,6 +542,16 @@ struct extended_attribute
   */
 };
 
+static inline bool ea_indirect(struct extended_attribute *ea)
+{
+       return ea->flags & EA_indirect;
+}
+
+static inline bool ea_in_anode(struct extended_attribute *ea)
+{
+       return ea->flags & EA_anode;
+}
+
 /*
    Local Variables:
    comment-column: 40
index de946170ebb1092937a1efd5f8dbd104f064d170..c07ef1f1ced60a0cf295772a218575d9c78e58d1 100644 (file)
 
 #define CHKCOND(x,y) if (!(x)) printk y
 
-#ifdef DBG
-#define PRINTK(x) printk x
-#else
-#undef PRINTK
-#define PRINTK(x)
-#endif
-
 struct hpfs_inode_info {
        loff_t mmu_private;
        ino_t i_parent_dir;     /* (directories) gives fnode of parent dir */
@@ -82,7 +75,7 @@ struct hpfs_sb_info {
        unsigned char *sb_cp_table;     /* code page tables: */
                                        /*      128 bytes uppercasing table & */
                                        /*      128 bytes lowercasing table */
-       unsigned *sb_bmp_dir;           /* main bitmap directory */
+       __le32 *sb_bmp_dir;             /* main bitmap directory */
        unsigned sb_c_bitmap;           /* current bitmap */
        unsigned sb_max_fwd_alloc;      /* max forwad allocation */
        int sb_timeshift;
@@ -100,7 +93,7 @@ struct quad_buffer_head {
 static inline dnode_secno de_down_pointer (struct hpfs_dirent *de)
 {
   CHKCOND(de->down,("HPFS: de_down_pointer: !de->down\n"));
-  return le32_to_cpu(*(dnode_secno *) ((void *) de + le16_to_cpu(de->length) - 4));
+  return le32_to_cpu(*(__le32 *) ((void *) de + le16_to_cpu(de->length) - 4));
 }
 
 /* The first dir entry in a dnode */
@@ -148,12 +141,12 @@ static inline struct extended_attribute *next_ea(struct extended_attribute *ea)
 
 static inline secno ea_sec(struct extended_attribute *ea)
 {
-       return le32_to_cpu(get_unaligned((secno *)((char *)ea + 9 + ea->namelen)));
+       return le32_to_cpu(get_unaligned((__le32 *)((char *)ea + 9 + ea->namelen)));
 }
 
 static inline secno ea_len(struct extended_attribute *ea)
 {
-       return le32_to_cpu(get_unaligned((secno *)((char *)ea + 5 + ea->namelen)));
+       return le32_to_cpu(get_unaligned((__le32 *)((char *)ea + 5 + ea->namelen)));
 }
 
 static inline char *ea_data(struct extended_attribute *ea)
@@ -178,7 +171,7 @@ static inline void copy_de(struct hpfs_dirent *dst, struct hpfs_dirent *src)
        dst->not_8x3 = n;
 }
 
-static inline unsigned tstbits(u32 *bmp, unsigned b, unsigned n)
+static inline unsigned tstbits(__le32 *bmp, unsigned b, unsigned n)
 {
        int i;
        if ((b >= 0x4000) || (b + n - 1 >= 0x4000)) return n;
@@ -275,10 +268,10 @@ void hpfs_evict_inode(struct inode *);
 
 /* map.c */
 
-unsigned *hpfs_map_dnode_bitmap(struct super_block *, struct quad_buffer_head *);
-unsigned *hpfs_map_bitmap(struct super_block *, unsigned, struct quad_buffer_head *, char *);
+__le32 *hpfs_map_dnode_bitmap(struct super_block *, struct quad_buffer_head *);
+__le32 *hpfs_map_bitmap(struct super_block *, unsigned, struct quad_buffer_head *, char *);
 unsigned char *hpfs_load_code_page(struct super_block *, secno);
-secno *hpfs_load_bitmap_directory(struct super_block *, secno bmp);
+__le32 *hpfs_load_bitmap_directory(struct super_block *, secno bmp);
 struct fnode *hpfs_map_fnode(struct super_block *s, ino_t, struct buffer_head **);
 struct anode *hpfs_map_anode(struct super_block *s, anode_secno, struct buffer_head **);
 struct dnode *hpfs_map_dnode(struct super_block *s, dnode_secno, struct quad_buffer_head *);
index 3b2cec29972b167359359de4bd1891975ffd9cc0..ed671e0ea78443b35bb6d1dd3eabc64bd7559c1f 100644 (file)
@@ -110,7 +110,7 @@ void hpfs_read_inode(struct inode *i)
                        }
                }
        }
-       if (fnode->dirflag) {
+       if (fnode_is_dir(fnode)) {
                int n_dnodes, n_subdirs;
                i->i_mode |= S_IFDIR;
                i->i_op = &hpfs_dir_iops;
@@ -299,7 +299,7 @@ void hpfs_write_if_changed(struct inode *inode)
 void hpfs_evict_inode(struct inode *inode)
 {
        truncate_inode_pages(&inode->i_data, 0);
-       end_writeback(inode);
+       clear_inode(inode);
        if (!inode->i_nlink) {
                hpfs_lock(inode->i_sb);
                hpfs_remove_fnode(inode->i_sb, inode->i_ino);
index a790821366a7f045d068fe47df517dc479b0ecce..4acb19d78359d4bec83f90b854680dc3962905cf 100644 (file)
@@ -8,12 +8,12 @@
 
 #include "hpfs_fn.h"
 
-unsigned *hpfs_map_dnode_bitmap(struct super_block *s, struct quad_buffer_head *qbh)
+__le32 *hpfs_map_dnode_bitmap(struct super_block *s, struct quad_buffer_head *qbh)
 {
        return hpfs_map_4sectors(s, hpfs_sb(s)->sb_dmap, qbh, 0);
 }
 
-unsigned int *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block,
+__le32 *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block,
                         struct quad_buffer_head *qbh, char *id)
 {
        secno sec;
@@ -89,18 +89,18 @@ unsigned char *hpfs_load_code_page(struct super_block *s, secno cps)
        return cp_table;
 }
 
-secno *hpfs_load_bitmap_directory(struct super_block *s, secno bmp)
+__le32 *hpfs_load_bitmap_directory(struct super_block *s, secno bmp)
 {
        struct buffer_head *bh;
        int n = (hpfs_sb(s)->sb_fs_size + 0x200000 - 1) >> 21;
        int i;
-       secno *b;
+       __le32 *b;
        if (!(b = kmalloc(n * 512, GFP_KERNEL))) {
                printk("HPFS: can't allocate memory for bitmap directory\n");
                return NULL;
        }       
        for (i=0;i<n;i++) {
-               secno *d = hpfs_map_sector(s, bmp+i, &bh, n - i - 1);
+               __le32 *d = hpfs_map_sector(s, bmp+i, &bh, n - i - 1);
                if (!d) {
                        kfree(b);
                        return NULL;
@@ -130,16 +130,16 @@ struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_hea
                                        (unsigned long)ino);
                                goto bail;
                        }
-                       if (!fnode->dirflag) {
+                       if (!fnode_is_dir(fnode)) {
                                if ((unsigned)fnode->btree.n_used_nodes + (unsigned)fnode->btree.n_free_nodes !=
-                                   (fnode->btree.internal ? 12 : 8)) {
+                                   (bp_internal(&fnode->btree) ? 12 : 8)) {
                                        hpfs_error(s,
                                           "bad number of nodes in fnode %08lx",
                                            (unsigned long)ino);
                                        goto bail;
                                }
                                if (le16_to_cpu(fnode->btree.first_free) !=
-                                   8 + fnode->btree.n_used_nodes * (fnode->btree.internal ? 8 : 12)) {
+                                   8 + fnode->btree.n_used_nodes * (bp_internal(&fnode->btree) ? 8 : 12)) {
                                        hpfs_error(s,
                                            "bad first_free pointer in fnode %08lx",
                                            (unsigned long)ino);
@@ -187,12 +187,12 @@ struct anode *hpfs_map_anode(struct super_block *s, anode_secno ano, struct buff
                                goto bail;
                        }
                        if ((unsigned)anode->btree.n_used_nodes + (unsigned)anode->btree.n_free_nodes !=
-                           (anode->btree.internal ? 60 : 40)) {
+                           (bp_internal(&anode->btree) ? 60 : 40)) {
                                hpfs_error(s, "bad number of nodes in anode %08x", ano);
                                goto bail;
                        }
                        if (le16_to_cpu(anode->btree.first_free) !=
-                           8 + anode->btree.n_used_nodes * (anode->btree.internal ? 8 : 12)) {
+                           8 + anode->btree.n_used_nodes * (bp_internal(&anode->btree) ? 8 : 12)) {
                                hpfs_error(s, "bad first_free pointer in anode %08x", ano);
                                goto bail;
                        }
index 30dd7b10b507a077877d58a2bb4d5ada18ee3101..9083ef8af58c162f7fd207f7ef37263b1f35de4f 100644 (file)
@@ -70,7 +70,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        fnode->len = len;
        memcpy(fnode->name, name, len > 15 ? 15 : len);
        fnode->up = cpu_to_le32(dir->i_ino);
-       fnode->dirflag = 1;
+       fnode->flags |= FNODE_dir;
        fnode->btree.n_free_nodes = 7;
        fnode->btree.n_used_nodes = 1;
        fnode->btree.first_free = cpu_to_le16(0x14);
index 54f6eccb79d9ed8c67f7ada5a96867ad4c61b37c..706a12c083ea726a7a268d647ae266b02a3a2ca7 100644 (file)
@@ -572,7 +572,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
                mark_buffer_dirty(bh2);
        }
 
-       if (le32_to_cpu(spareblock->hotfixes_used) || le32_to_cpu(spareblock->n_spares_used)) {
+       if (spareblock->hotfixes_used || spareblock->n_spares_used) {
                if (errs >= 2) {
                        printk("HPFS: Hotfixes not supported here, try chkdsk\n");
                        mark_dirty(s, 0);
@@ -645,7 +645,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
                root->i_mtime.tv_nsec = 0;
                root->i_ctime.tv_sec = local_to_gmt(s, le32_to_cpu(de->creation_date));
                root->i_ctime.tv_nsec = 0;
-               hpfs_i(root)->i_ea_size = le16_to_cpu(de->ea_size);
+               hpfs_i(root)->i_ea_size = le32_to_cpu(de->ea_size);
                hpfs_i(root)->i_parent_dir = root->i_ino;
                if (root->i_size == -1)
                        root->i_size = 2048;
index a80e45a690ac9a07176319abefa3a8d83c906d3d..d4f93b52cec512723447fb1390a9c9b6cca8b45f 100644 (file)
@@ -614,7 +614,7 @@ static struct inode *hppfs_alloc_inode(struct super_block *sb)
 
 void hppfs_evict_inode(struct inode *ino)
 {
-       end_writeback(ino);
+       clear_inode(ino);
        dput(HPPFS_I(ino)->proc_dentry);
        mntput(ino->i_sb->s_fs_info);
 }
index 001ef01d2fe2705a6767075032ea9f9f9ed0ec04..cc9281b6c62893a94cedcb36128dcae427861fcd 100644 (file)
@@ -393,7 +393,7 @@ static void truncate_hugepages(struct inode *inode, loff_t lstart)
 static void hugetlbfs_evict_inode(struct inode *inode)
 {
        truncate_hugepages(inode, 0);
-       end_writeback(inode);
+       clear_inode(inode);
 }
 
 static inline void
index da93f7d160d4f432a144966809c75f6011f97498..c99163b1b31036ef68974c0c5dbc192f8f73f4da 100644 (file)
@@ -486,7 +486,7 @@ void __remove_inode_hash(struct inode *inode)
 }
 EXPORT_SYMBOL(__remove_inode_hash);
 
-void end_writeback(struct inode *inode)
+void clear_inode(struct inode *inode)
 {
        might_sleep();
        /*
@@ -500,11 +500,10 @@ void end_writeback(struct inode *inode)
        BUG_ON(!list_empty(&inode->i_data.private_list));
        BUG_ON(!(inode->i_state & I_FREEING));
        BUG_ON(inode->i_state & I_CLEAR);
-       inode_sync_wait(inode);
        /* don't need i_lock here, no concurrent mods to i_state */
        inode->i_state = I_FREEING | I_CLEAR;
 }
-EXPORT_SYMBOL(end_writeback);
+EXPORT_SYMBOL(clear_inode);
 
 /*
  * Free the inode passed in, removing it from the lists it is still connected
@@ -531,12 +530,20 @@ static void evict(struct inode *inode)
 
        inode_sb_list_del(inode);
 
+       /*
+        * Wait for flusher thread to be done with the inode so that filesystem
+        * does not start destroying it while writeback is still running. Since
+        * the inode has I_FREEING set, flusher thread won't start new work on
+        * the inode.  We just have to wait for running writeback to finish.
+        */
+       inode_wait_for_writeback(inode);
+
        if (op->evict_inode) {
                op->evict_inode(inode);
        } else {
                if (inode->i_data.nrpages)
                        truncate_inode_pages(&inode->i_data, 0);
-               end_writeback(inode);
+               clear_inode(inode);
        }
        if (S_ISBLK(inode->i_mode) && inode->i_bdev)
                bd_forget(inode);
@@ -1480,10 +1487,30 @@ static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
        return 0;
 }
 
+/*
+ * This does the actual work of updating an inodes time or version.  Must have
+ * had called mnt_want_write() before calling this.
+ */
+static int update_time(struct inode *inode, struct timespec *time, int flags)
+{
+       if (inode->i_op->update_time)
+               return inode->i_op->update_time(inode, time, flags);
+
+       if (flags & S_ATIME)
+               inode->i_atime = *time;
+       if (flags & S_VERSION)
+               inode_inc_iversion(inode);
+       if (flags & S_CTIME)
+               inode->i_ctime = *time;
+       if (flags & S_MTIME)
+               inode->i_mtime = *time;
+       mark_inode_dirty_sync(inode);
+       return 0;
+}
+
 /**
  *     touch_atime     -       update the access time
- *     @mnt: mount the inode is accessed on
- *     @dentry: dentry accessed
+ *     @path: the &struct path to update
  *
  *     Update the accessed time on an inode and mark it for writeback.
  *     This function automatically handles read only file systems and media,
@@ -1518,12 +1545,83 @@ void touch_atime(struct path *path)
        if (mnt_want_write(mnt))
                return;
 
-       inode->i_atime = now;
-       mark_inode_dirty_sync(inode);
+       /*
+        * File systems can error out when updating inodes if they need to
+        * allocate new space to modify an inode (such is the case for
+        * Btrfs), but since we touch atime while walking down the path we
+        * really don't care if we failed to update the atime of the file,
+        * so just ignore the return value.
+        */
+       update_time(inode, &now, S_ATIME);
        mnt_drop_write(mnt);
 }
 EXPORT_SYMBOL(touch_atime);
 
+/*
+ * The logic we want is
+ *
+ *     if suid or (sgid and xgrp)
+ *             remove privs
+ */
+int should_remove_suid(struct dentry *dentry)
+{
+       umode_t mode = dentry->d_inode->i_mode;
+       int kill = 0;
+
+       /* suid always must be killed */
+       if (unlikely(mode & S_ISUID))
+               kill = ATTR_KILL_SUID;
+
+       /*
+        * sgid without any exec bits is just a mandatory locking mark; leave
+        * it alone.  If some exec bits are set, it's a real sgid; kill it.
+        */
+       if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
+               kill |= ATTR_KILL_SGID;
+
+       if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
+               return kill;
+
+       return 0;
+}
+EXPORT_SYMBOL(should_remove_suid);
+
+static int __remove_suid(struct dentry *dentry, int kill)
+{
+       struct iattr newattrs;
+
+       newattrs.ia_valid = ATTR_FORCE | kill;
+       return notify_change(dentry, &newattrs);
+}
+
+int file_remove_suid(struct file *file)
+{
+       struct dentry *dentry = file->f_path.dentry;
+       struct inode *inode = dentry->d_inode;
+       int killsuid;
+       int killpriv;
+       int error = 0;
+
+       /* Fast path for nothing security related */
+       if (IS_NOSEC(inode))
+               return 0;
+
+       killsuid = should_remove_suid(dentry);
+       killpriv = security_inode_need_killpriv(dentry);
+
+       if (killpriv < 0)
+               return killpriv;
+       if (killpriv)
+               error = security_inode_killpriv(dentry);
+       if (!error && killsuid)
+               error = __remove_suid(dentry, killsuid);
+       if (!error && (inode->i_sb->s_flags & MS_NOSEC))
+               inode->i_flags |= S_NOSEC;
+
+       return error;
+}
+EXPORT_SYMBOL(file_remove_suid);
+
 /**
  *     file_update_time        -       update mtime and ctime time
  *     @file: file accessed
@@ -1533,18 +1631,20 @@ EXPORT_SYMBOL(touch_atime);
  *     usage in the file write path of filesystems, and filesystems may
  *     choose to explicitly ignore update via this function with the
  *     S_NOCMTIME inode flag, e.g. for network filesystem where these
- *     timestamps are handled by the server.
+ *     timestamps are handled by the server.  This can return an error for
+ *     file systems who need to allocate space in order to update an inode.
  */
 
-void file_update_time(struct file *file)
+int file_update_time(struct file *file)
 {
        struct inode *inode = file->f_path.dentry->d_inode;
        struct timespec now;
-       enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
+       int sync_it = 0;
+       int ret;
 
        /* First try to exhaust all avenues to not sync */
        if (IS_NOCMTIME(inode))
-               return;
+               return 0;
 
        now = current_fs_time(inode->i_sb);
        if (!timespec_equal(&inode->i_mtime, &now))
@@ -1557,21 +1657,16 @@ void file_update_time(struct file *file)
                sync_it |= S_VERSION;
 
        if (!sync_it)
-               return;
+               return 0;
 
        /* Finally allowed to write? Takes lock. */
        if (mnt_want_write_file(file))
-               return;
+               return 0;
 
-       /* Only change inode inside the lock region */
-       if (sync_it & S_VERSION)
-               inode_inc_iversion(inode);
-       if (sync_it & S_CTIME)
-               inode->i_ctime = now;
-       if (sync_it & S_MTIME)
-               inode->i_mtime = now;
-       mark_inode_dirty_sync(inode);
+       ret = update_time(inode, &now, sync_it);
        mnt_drop_write_file(file);
+
+       return ret;
 }
 EXPORT_SYMBOL(file_update_time);
 
@@ -1741,3 +1836,50 @@ bool inode_owner_or_capable(const struct inode *inode)
        return false;
 }
 EXPORT_SYMBOL(inode_owner_or_capable);
+
+/*
+ * Direct i/o helper functions
+ */
+static void __inode_dio_wait(struct inode *inode)
+{
+       wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
+       DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
+
+       do {
+               prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE);
+               if (atomic_read(&inode->i_dio_count))
+                       schedule();
+       } while (atomic_read(&inode->i_dio_count));
+       finish_wait(wq, &q.wait);
+}
+
+/**
+ * inode_dio_wait - wait for outstanding DIO requests to finish
+ * @inode: inode to wait for
+ *
+ * Waits for all pending direct I/O requests to finish so that we can
+ * proceed with a truncate or equivalent operation.
+ *
+ * Must be called under a lock that serializes taking new references
+ * to i_dio_count, usually by inode->i_mutex.
+ */
+void inode_dio_wait(struct inode *inode)
+{
+       if (atomic_read(&inode->i_dio_count))
+               __inode_dio_wait(inode);
+}
+EXPORT_SYMBOL(inode_dio_wait);
+
+/*
+ * inode_dio_done - signal finish of a direct I/O requests
+ * @inode: inode the direct I/O happens on
+ *
+ * This is called once we've finished processing a direct I/O request,
+ * and is used to wake up callers waiting for direct I/O to be quiesced.
+ */
+void inode_dio_done(struct inode *inode)
+{
+       if (atomic_dec_and_test(&inode->i_dio_count))
+               wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
+}
+EXPORT_SYMBOL(inode_dio_done);
index 9962c59ba280b1c75d78adc55b8491733075a5e0..18bc216ea09d95ecff126ef96987ff786b5cbcb1 100644 (file)
@@ -56,7 +56,7 @@ extern int sb_prepare_remount_readonly(struct super_block *);
 
 extern void __init mnt_init(void);
 
-DECLARE_BRLOCK(vfsmount_lock);
+extern struct lglock vfsmount_lock;
 
 
 /*
@@ -100,6 +100,7 @@ extern struct file *do_file_open_root(struct dentry *, struct vfsmount *,
 
 extern long do_handle_open(int mountdirfd,
                           struct file_handle __user *ufh, int open_flag);
+extern int open_check_o_direct(struct file *f);
 
 /*
  * inode.c
index 5e6dbe8958fc7b8fa650ec23d4ebe67de06f13a3..e50170ca7c33f446acc16e29a0d0097828919c30 100644 (file)
@@ -50,7 +50,7 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
 
        ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
        if (ioc) {
-               ioc_ioprio_changed(ioc, ioprio);
+               ioc->ioprio = ioprio;
                put_io_context(ioc);
        }
 
index dd4687ff30d09900a14f113aec870007cfcfb7f0..aa4356d09eeeb03167bcf506a7fe8ad98efaba39 100644 (file)
@@ -107,12 +107,11 @@ static struct dentry *isofs_export_get_parent(struct dentry *child)
 }
 
 static int
-isofs_export_encode_fh(struct dentry *dentry,
+isofs_export_encode_fh(struct inode *inode,
                       __u32 *fh32,
                       int *max_len,
-                      int connectable)
+                      struct inode *parent)
 {
-       struct inode * inode = dentry->d_inode;
        struct iso_inode_info * ei = ISOFS_I(inode);
        int len = *max_len;
        int type = 1;
@@ -124,7 +123,7 @@ isofs_export_encode_fh(struct dentry *dentry,
         * offset of the inode and the upper 16 bits of fh32[1] to
         * hold the offset of the parent.
         */
-       if (connectable && (len < 5)) {
+       if (parent && (len < 5)) {
                *max_len = 5;
                return 255;
        } else if (len < 3) {
@@ -136,16 +135,12 @@ isofs_export_encode_fh(struct dentry *dentry,
        fh32[0] = ei->i_iget5_block;
        fh16[2] = (__u16)ei->i_iget5_offset;  /* fh16 [sic] */
        fh32[2] = inode->i_generation;
-       if (connectable && !S_ISDIR(inode->i_mode)) {
-               struct inode *parent;
+       if (parent) {
                struct iso_inode_info *eparent;
-               spin_lock(&dentry->d_lock);
-               parent = dentry->d_parent->d_inode;
                eparent = ISOFS_I(parent);
                fh32[3] = eparent->i_iget5_block;
                fh16[3] = (__u16)eparent->i_iget5_offset;  /* fh16 [sic] */
                fh32[4] = parent->i_generation;
-               spin_unlock(&dentry->d_lock);
                len = 5;
                type = 2;
        }
index 05f0754f2b466f1446b8501331bc5b698f34f8a3..08c03044abddbc216d5c8904295631d11c4b6c1b 100644 (file)
@@ -508,20 +508,19 @@ int cleanup_journal_tail(journal_t *journal)
        /*
         * We need to make sure that any blocks that were recently written out
         * --- perhaps by log_do_checkpoint() --- are flushed out before we
-        * drop the transactions from the journal. It's unlikely this will be
-        * necessary, especially with an appropriately sized journal, but we
-        * need this to guarantee correctness.  Fortunately
-        * cleanup_journal_tail() doesn't get called all that often.
+        * drop the transactions from the journal. Similarly we need to be sure
+        * superblock makes it to disk before next transaction starts reusing
+        * freed space (otherwise we could replay some blocks of the new
+        * transaction thinking they belong to the old one). So we use
+        * WRITE_FLUSH_FUA. It's unlikely this will be necessary, especially
+        * with an appropriately sized journal, but we need this to guarantee
+        * correctness.  Fortunately cleanup_journal_tail() doesn't get called
+        * all that often.
         */
-       if (journal->j_flags & JFS_BARRIER)
-               blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
+       journal_update_sb_log_tail(journal, first_tid, blocknr,
+                                  WRITE_FLUSH_FUA);
 
        spin_lock(&journal->j_state_lock);
-       if (!tid_gt(first_tid, journal->j_tail_sequence)) {
-               spin_unlock(&journal->j_state_lock);
-               /* Someone else cleaned up journal so return 0 */
-               return 0;
-       }
        /* OK, update the superblock to recover the freed space.
         * Physical blocks come first: have we wrapped beyond the end of
         * the log?  */
@@ -539,8 +538,6 @@ int cleanup_journal_tail(journal_t *journal)
        journal->j_tail_sequence = first_tid;
        journal->j_tail = blocknr;
        spin_unlock(&journal->j_state_lock);
-       if (!(journal->j_flags & JFS_ABORT))
-               journal_update_superblock(journal, 1);
        return 0;
 }
 
index f2b9a571f4cf51f8a61995c43b131851c504f567..52c15c776029098546cc8c4d1309d4a156f651cf 100644 (file)
@@ -298,6 +298,7 @@ void journal_commit_transaction(journal_t *journal)
        int tag_flag;
        int i;
        struct blk_plug plug;
+       int write_op = WRITE;
 
        /*
         * First job: lock down the current transaction and wait for
@@ -307,7 +308,16 @@ void journal_commit_transaction(journal_t *journal)
        /* Do we need to erase the effects of a prior journal_flush? */
        if (journal->j_flags & JFS_FLUSHED) {
                jbd_debug(3, "super block updated\n");
-               journal_update_superblock(journal, 1);
+               mutex_lock(&journal->j_checkpoint_mutex);
+               /*
+                * We hold j_checkpoint_mutex so tail cannot change under us.
+                * We don't need any special data guarantees for writing sb
+                * since journal is empty and it is ok for write to be
+                * flushed only with transaction commit.
+                */
+               journal_update_sb_log_tail(journal, journal->j_tail_sequence,
+                                          journal->j_tail, WRITE_SYNC);
+               mutex_unlock(&journal->j_checkpoint_mutex);
        } else {
                jbd_debug(3, "superblock not updated\n");
        }
@@ -413,13 +423,16 @@ void journal_commit_transaction(journal_t *journal)
 
        jbd_debug (3, "JBD: commit phase 2\n");
 
+       if (tid_geq(journal->j_commit_waited, commit_transaction->t_tid))
+               write_op = WRITE_SYNC;
+
        /*
         * Now start flushing things to disk, in the order they appear
         * on the transaction lists.  Data blocks go first.
         */
        blk_start_plug(&plug);
        err = journal_submit_data_buffers(journal, commit_transaction,
-                                         WRITE_SYNC);
+                                         write_op);
        blk_finish_plug(&plug);
 
        /*
@@ -478,7 +491,7 @@ void journal_commit_transaction(journal_t *journal)
 
        blk_start_plug(&plug);
 
-       journal_write_revoke_records(journal, commit_transaction, WRITE_SYNC);
+       journal_write_revoke_records(journal, commit_transaction, write_op);
 
        /*
         * If we found any dirty or locked buffers, then we should have
@@ -649,7 +662,7 @@ start_journal_io:
                                clear_buffer_dirty(bh);
                                set_buffer_uptodate(bh);
                                bh->b_end_io = journal_end_buffer_io_sync;
-                               submit_bh(WRITE_SYNC, bh);
+                               submit_bh(write_op, bh);
                        }
                        cond_resched();
 
index 0971e9217808829a67f5fd340a1972a63a982930..425c2f2cf1700a3f5d9db8fc4e93dcde0450d998 100644 (file)
@@ -563,6 +563,8 @@ int log_wait_commit(journal_t *journal, tid_t tid)
        spin_unlock(&journal->j_state_lock);
 #endif
        spin_lock(&journal->j_state_lock);
+       if (!tid_geq(journal->j_commit_waited, tid))
+               journal->j_commit_waited = tid;
        while (tid_gt(tid, journal->j_commit_sequence)) {
                jbd_debug(1, "JBD: want %d, j_commit_sequence=%d\n",
                                  tid, journal->j_commit_sequence);
@@ -921,8 +923,33 @@ static int journal_reset(journal_t *journal)
 
        journal->j_max_transaction_buffers = journal->j_maxlen / 4;
 
-       /* Add the dynamic fields and write it to disk. */
-       journal_update_superblock(journal, 1);
+       /*
+        * As a special case, if the on-disk copy is already marked as needing
+        * no recovery (s_start == 0), then we can safely defer the superblock
+        * update until the next commit by setting JFS_FLUSHED.  This avoids
+        * attempting a write to a potential-readonly device.
+        */
+       if (sb->s_start == 0) {
+               jbd_debug(1,"JBD: Skipping superblock update on recovered sb "
+                       "(start %u, seq %d, errno %d)\n",
+                       journal->j_tail, journal->j_tail_sequence,
+                       journal->j_errno);
+               journal->j_flags |= JFS_FLUSHED;
+       } else {
+               /* Lock here to make assertions happy... */
+               mutex_lock(&journal->j_checkpoint_mutex);
+               /*
+                * Update log tail information. We use WRITE_FUA since new
+                * transaction will start reusing journal space and so we
+                * must make sure information about current log tail is on
+                * disk before that.
+                */
+               journal_update_sb_log_tail(journal,
+                                          journal->j_tail_sequence,
+                                          journal->j_tail,
+                                          WRITE_FUA);
+               mutex_unlock(&journal->j_checkpoint_mutex);
+       }
        return journal_start_thread(journal);
 }
 
@@ -999,35 +1026,15 @@ int journal_create(journal_t *journal)
        return journal_reset(journal);
 }
 
-/**
- * void journal_update_superblock() - Update journal sb on disk.
- * @journal: The journal to update.
- * @wait: Set to '0' if you don't want to wait for IO completion.
- *
- * Update a journal's dynamic superblock fields and write it to disk,
- * optionally waiting for the IO to complete.
- */
-void journal_update_superblock(journal_t *journal, int wait)
+static void journal_write_superblock(journal_t *journal, int write_op)
 {
-       journal_superblock_t *sb = journal->j_superblock;
        struct buffer_head *bh = journal->j_sb_buffer;
+       int ret;
 
-       /*
-        * As a special case, if the on-disk copy is already marked as needing
-        * no recovery (s_start == 0) and there are no outstanding transactions
-        * in the filesystem, then we can safely defer the superblock update
-        * until the next commit by setting JFS_FLUSHED.  This avoids
-        * attempting a write to a potential-readonly device.
-        */
-       if (sb->s_start == 0 && journal->j_tail_sequence ==
-                               journal->j_transaction_sequence) {
-               jbd_debug(1,"JBD: Skipping superblock update on recovered sb "
-                       "(start %u, seq %d, errno %d)\n",
-                       journal->j_tail, journal->j_tail_sequence,
-                       journal->j_errno);
-               goto out;
-       }
-
+       trace_journal_write_superblock(journal, write_op);
+       if (!(journal->j_flags & JFS_BARRIER))
+               write_op &= ~(REQ_FUA | REQ_FLUSH);
+       lock_buffer(bh);
        if (buffer_write_io_error(bh)) {
                char b[BDEVNAME_SIZE];
                /*
@@ -1045,42 +1052,100 @@ void journal_update_superblock(journal_t *journal, int wait)
                set_buffer_uptodate(bh);
        }
 
+       get_bh(bh);
+       bh->b_end_io = end_buffer_write_sync;
+       ret = submit_bh(write_op, bh);
+       wait_on_buffer(bh);
+       if (buffer_write_io_error(bh)) {
+               clear_buffer_write_io_error(bh);
+               set_buffer_uptodate(bh);
+               ret = -EIO;
+       }
+       if (ret) {
+               char b[BDEVNAME_SIZE];
+               printk(KERN_ERR "JBD: Error %d detected "
+                      "when updating journal superblock for %s.\n",
+                      ret, journal_dev_name(journal, b));
+       }
+}
+
+/**
+ * journal_update_sb_log_tail() - Update log tail in journal sb on disk.
+ * @journal: The journal to update.
+ * @tail_tid: TID of the new transaction at the tail of the log
+ * @tail_block: The first block of the transaction at the tail of the log
+ * @write_op: With which operation should we write the journal sb
+ *
+ * Update a journal's superblock information about log tail and write it to
+ * disk, waiting for the IO to complete.
+ */
+void journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
+                               unsigned int tail_block, int write_op)
+{
+       journal_superblock_t *sb = journal->j_superblock;
+
+       BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
+       jbd_debug(1,"JBD: updating superblock (start %u, seq %u)\n",
+                 tail_block, tail_tid);
+
+       sb->s_sequence = cpu_to_be32(tail_tid);
+       sb->s_start    = cpu_to_be32(tail_block);
+
+       journal_write_superblock(journal, write_op);
+
+       /* Log is no longer empty */
+       spin_lock(&journal->j_state_lock);
+       WARN_ON(!sb->s_sequence);
+       journal->j_flags &= ~JFS_FLUSHED;
+       spin_unlock(&journal->j_state_lock);
+}
+
+/**
+ * mark_journal_empty() - Mark on disk journal as empty.
+ * @journal: The journal to update.
+ *
+ * Update a journal's dynamic superblock fields to show that journal is empty.
+ * Write updated superblock to disk waiting for IO to complete.
+ */
+static void mark_journal_empty(journal_t *journal)
+{
+       journal_superblock_t *sb = journal->j_superblock;
+
+       BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
        spin_lock(&journal->j_state_lock);
-       jbd_debug(1,"JBD: updating superblock (start %u, seq %d, errno %d)\n",
-                 journal->j_tail, journal->j_tail_sequence, journal->j_errno);
+       jbd_debug(1, "JBD: Marking journal as empty (seq %d)\n",
+                 journal->j_tail_sequence);
 
        sb->s_sequence = cpu_to_be32(journal->j_tail_sequence);
-       sb->s_start    = cpu_to_be32(journal->j_tail);
-       sb->s_errno    = cpu_to_be32(journal->j_errno);
+       sb->s_start    = cpu_to_be32(0);
        spin_unlock(&journal->j_state_lock);
 
-       BUFFER_TRACE(bh, "marking dirty");
-       mark_buffer_dirty(bh);
-       if (wait) {
-               sync_dirty_buffer(bh);
-               if (buffer_write_io_error(bh)) {
-                       char b[BDEVNAME_SIZE];
-                       printk(KERN_ERR "JBD: I/O error detected "
-                              "when updating journal superblock for %s.\n",
-                              journal_dev_name(journal, b));
-                       clear_buffer_write_io_error(bh);
-                       set_buffer_uptodate(bh);
-               }
-       } else
-               write_dirty_buffer(bh, WRITE);
+       journal_write_superblock(journal, WRITE_FUA);
 
-       trace_jbd_update_superblock_end(journal, wait);
-out:
-       /* If we have just flushed the log (by marking s_start==0), then
-        * any future commit will have to be careful to update the
-        * superblock again to re-record the true start of the log. */
+       spin_lock(&journal->j_state_lock);
+       /* Log is empty */
+       journal->j_flags |= JFS_FLUSHED;
+       spin_unlock(&journal->j_state_lock);
+}
+
+/**
+ * journal_update_sb_errno() - Update error in the journal.
+ * @journal: The journal to update.
+ *
+ * Update a journal's errno.  Write updated superblock to disk waiting for IO
+ * to complete.
+ */
+static void journal_update_sb_errno(journal_t *journal)
+{
+       journal_superblock_t *sb = journal->j_superblock;
 
        spin_lock(&journal->j_state_lock);
-       if (sb->s_start)
-               journal->j_flags &= ~JFS_FLUSHED;
-       else
-               journal->j_flags |= JFS_FLUSHED;
+       jbd_debug(1, "JBD: updating superblock error (errno %d)\n",
+                 journal->j_errno);
+       sb->s_errno = cpu_to_be32(journal->j_errno);
        spin_unlock(&journal->j_state_lock);
+
+       journal_write_superblock(journal, WRITE_SYNC);
 }
 
 /*
@@ -1251,6 +1316,8 @@ int journal_destroy(journal_t *journal)
 
        /* Force any old transactions to disk */
 
+       /* We cannot race with anybody but must keep assertions happy */
+       mutex_lock(&journal->j_checkpoint_mutex);
        /* Totally anal locking here... */
        spin_lock(&journal->j_list_lock);
        while (journal->j_checkpoint_transactions != NULL) {
@@ -1266,16 +1333,14 @@ int journal_destroy(journal_t *journal)
 
        if (journal->j_sb_buffer) {
                if (!is_journal_aborted(journal)) {
-                       /* We can now mark the journal as empty. */
-                       journal->j_tail = 0;
                        journal->j_tail_sequence =
                                ++journal->j_transaction_sequence;
-                       journal_update_superblock(journal, 1);
-               } else {
+                       mark_journal_empty(journal);
+               } else
                        err = -EIO;
-               }
                brelse(journal->j_sb_buffer);
        }
+       mutex_unlock(&journal->j_checkpoint_mutex);
 
        if (journal->j_inode)
                iput(journal->j_inode);
@@ -1455,7 +1520,6 @@ int journal_flush(journal_t *journal)
 {
        int err = 0;
        transaction_t *transaction = NULL;
-       unsigned int old_tail;
 
        spin_lock(&journal->j_state_lock);
 
@@ -1490,6 +1554,7 @@ int journal_flush(journal_t *journal)
        if (is_journal_aborted(journal))
                return -EIO;
 
+       mutex_lock(&journal->j_checkpoint_mutex);
        cleanup_journal_tail(journal);
 
        /* Finally, mark the journal as really needing no recovery.
@@ -1497,14 +1562,9 @@ int journal_flush(journal_t *journal)
         * the magic code for a fully-recovered superblock.  Any future
         * commits of data to the journal will restore the current
         * s_start value. */
+       mark_journal_empty(journal);
+       mutex_unlock(&journal->j_checkpoint_mutex);
        spin_lock(&journal->j_state_lock);
-       old_tail = journal->j_tail;
-       journal->j_tail = 0;
-       spin_unlock(&journal->j_state_lock);
-       journal_update_superblock(journal, 1);
-       spin_lock(&journal->j_state_lock);
-       journal->j_tail = old_tail;
-
        J_ASSERT(!journal->j_running_transaction);
        J_ASSERT(!journal->j_committing_transaction);
        J_ASSERT(!journal->j_checkpoint_transactions);
@@ -1544,8 +1604,12 @@ int journal_wipe(journal_t *journal, int write)
                write ? "Clearing" : "Ignoring");
 
        err = journal_skip_recovery(journal);
-       if (write)
-               journal_update_superblock(journal, 1);
+       if (write) {
+               /* Lock to make assertions happy... */
+               mutex_lock(&journal->j_checkpoint_mutex);
+               mark_journal_empty(journal);
+               mutex_unlock(&journal->j_checkpoint_mutex);
+       }
 
  no_recovery:
        return err;
@@ -1613,7 +1677,7 @@ static void __journal_abort_soft (journal_t *journal, int errno)
        __journal_abort_hard(journal);
 
        if (errno)
-               journal_update_superblock(journal, 1);
+               journal_update_sb_errno(journal);
 }
 
 /**
index b2a7e5244e394d4bf88bcd4d8156d108d5d772e8..febc10db5cedb8d26e40d2a894105f0af3547735 100644 (file)
@@ -1433,8 +1433,6 @@ int journal_stop(handle_t *handle)
                }
        }
 
-       if (handle->h_sync)
-               transaction->t_synchronous_commit = 1;
        current->journal_info = NULL;
        spin_lock(&journal->j_state_lock);
        spin_lock(&transaction->t_handle_lock);
index f32f346f4b0a521a5b6bbaedc7b0a5a7750c4b1e..69a48c2944da682c8a133fe75183c086ef08813b 100644 (file)
@@ -1,6 +1,8 @@
 config JBD2
        tristate
        select CRC32
+       select CRYPTO
+       select CRYPTO_CRC32C
        help
          This is a generic journaling layer for block devices that support
          both 32-bit and 64-bit block numbers.  It is currently used by
index 840f70f507924a0ac4db70a9d729f715783b49be..216f4299f65e7e2f1e26859c8e1247cdf71c55df 100644 (file)
@@ -85,6 +85,24 @@ nope:
        __brelse(bh);
 }
 
+static void jbd2_commit_block_csum_set(journal_t *j,
+                                      struct journal_head *descriptor)
+{
+       struct commit_header *h;
+       __u32 csum;
+
+       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               return;
+
+       h = (struct commit_header *)(jh2bh(descriptor)->b_data);
+       h->h_chksum_type = 0;
+       h->h_chksum_size = 0;
+       h->h_chksum[0] = 0;
+       csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data,
+                          j->j_blocksize);
+       h->h_chksum[0] = cpu_to_be32(csum);
+}
+
 /*
  * Done it all: now submit the commit record.  We should have
  * cleaned up our previous buffers by now, so if we are in abort
@@ -128,6 +146,7 @@ static int journal_submit_commit_record(journal_t *journal,
                tmp->h_chksum_size      = JBD2_CRC32_CHKSUM_SIZE;
                tmp->h_chksum[0]        = cpu_to_be32(crc32_sum);
        }
+       jbd2_commit_block_csum_set(journal, descriptor);
 
        JBUFFER_TRACE(descriptor, "submit commit block");
        lock_buffer(bh);
@@ -301,6 +320,44 @@ static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
                tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
 }
 
+static void jbd2_descr_block_csum_set(journal_t *j,
+                                     struct journal_head *descriptor)
+{
+       struct jbd2_journal_block_tail *tail;
+       __u32 csum;
+
+       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               return;
+
+       tail = (struct jbd2_journal_block_tail *)
+                       (jh2bh(descriptor)->b_data + j->j_blocksize -
+                       sizeof(struct jbd2_journal_block_tail));
+       tail->t_checksum = 0;
+       csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data,
+                          j->j_blocksize);
+       tail->t_checksum = cpu_to_be32(csum);
+}
+
+static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
+                                   struct buffer_head *bh, __u32 sequence)
+{
+       struct page *page = bh->b_page;
+       __u8 *addr;
+       __u32 csum;
+
+       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               return;
+
+       sequence = cpu_to_be32(sequence);
+       addr = kmap_atomic(page, KM_USER0);
+       csum = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence,
+                         sizeof(sequence));
+       csum = jbd2_chksum(j, csum, addr + offset_in_page(bh->b_data),
+                         bh->b_size);
+       kunmap_atomic(addr, KM_USER0);
+
+       tag->t_checksum = cpu_to_be32(csum);
+}
 /*
  * jbd2_journal_commit_transaction
  *
@@ -334,6 +391,10 @@ void jbd2_journal_commit_transaction(journal_t *journal)
        unsigned long first_block;
        tid_t first_tid;
        int update_tail;
+       int csum_size = 0;
+
+       if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               csum_size = sizeof(struct jbd2_journal_block_tail);
 
        /*
         * First job: lock down the current transaction and wait for
@@ -627,7 +688,9 @@ void jbd2_journal_commit_transaction(journal_t *journal)
 
                tag = (journal_block_tag_t *) tagp;
                write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
-               tag->t_flags = cpu_to_be32(tag_flag);
+               tag->t_flags = cpu_to_be16(tag_flag);
+               jbd2_block_tag_csum_set(journal, tag, jh2bh(new_jh),
+                                       commit_transaction->t_tid);
                tagp += tag_bytes;
                space_left -= tag_bytes;
 
@@ -643,7 +706,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
 
                if (bufs == journal->j_wbufsize ||
                    commit_transaction->t_buffers == NULL ||
-                   space_left < tag_bytes + 16) {
+                   space_left < tag_bytes + 16 + csum_size) {
 
                        jbd_debug(4, "JBD2: Submit %d IOs\n", bufs);
 
@@ -651,8 +714,9 @@ void jbd2_journal_commit_transaction(journal_t *journal)
                            submitting the IOs.  "tag" still points to
                            the last tag we set up. */
 
-                       tag->t_flags |= cpu_to_be32(JBD2_FLAG_LAST_TAG);
+                       tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG);
 
+                       jbd2_descr_block_csum_set(journal, descriptor);
 start_journal_io:
                        for (i = 0; i < bufs; i++) {
                                struct buffer_head *bh = wbuf[i];
index 1afb701622b0b17748b4cd7f7d171df139bcc9cc..e9a3c4c85594e30aca1ed1f14d5667ba0595160a 100644 (file)
@@ -97,6 +97,43 @@ EXPORT_SYMBOL(jbd2_inode_cache);
 static void __journal_abort_soft (journal_t *journal, int errno);
 static int jbd2_journal_create_slab(size_t slab_size);
 
+/* Checksumming functions */
+int jbd2_verify_csum_type(journal_t *j, journal_superblock_t *sb)
+{
+       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               return 1;
+
+       return sb->s_checksum_type == JBD2_CRC32C_CHKSUM;
+}
+
+static __u32 jbd2_superblock_csum(journal_t *j, journal_superblock_t *sb)
+{
+       __u32 csum, old_csum;
+
+       old_csum = sb->s_checksum;
+       sb->s_checksum = 0;
+       csum = jbd2_chksum(j, ~0, (char *)sb, sizeof(journal_superblock_t));
+       sb->s_checksum = old_csum;
+
+       return cpu_to_be32(csum);
+}
+
+int jbd2_superblock_csum_verify(journal_t *j, journal_superblock_t *sb)
+{
+       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               return 1;
+
+       return sb->s_checksum == jbd2_superblock_csum(j, sb);
+}
+
+void jbd2_superblock_csum_set(journal_t *j, journal_superblock_t *sb)
+{
+       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               return;
+
+       sb->s_checksum = jbd2_superblock_csum(j, sb);
+}
+
 /*
  * Helper function used to manage commit timeouts
  */
@@ -1348,6 +1385,7 @@ static void jbd2_journal_update_sb_errno(journal_t *journal)
        jbd_debug(1, "JBD2: updating superblock error (errno %d)\n",
                  journal->j_errno);
        sb->s_errno    = cpu_to_be32(journal->j_errno);
+       jbd2_superblock_csum_set(journal, sb);
        read_unlock(&journal->j_state_lock);
 
        jbd2_write_superblock(journal, WRITE_SYNC);
@@ -1376,6 +1414,9 @@ static int journal_get_superblock(journal_t *journal)
                }
        }
 
+       if (buffer_verified(bh))
+               return 0;
+
        sb = journal->j_superblock;
 
        err = -EINVAL;
@@ -1413,6 +1454,43 @@ static int journal_get_superblock(journal_t *journal)
                goto out;
        }
 
+       if (JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) &&
+           JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
+               /* Can't have checksum v1 and v2 on at the same time! */
+               printk(KERN_ERR "JBD: Can't enable checksumming v1 and v2 "
+                      "at the same time!\n");
+               goto out;
+       }
+
+       if (!jbd2_verify_csum_type(journal, sb)) {
+               printk(KERN_ERR "JBD: Unknown checksum type\n");
+               goto out;
+       }
+
+       /* Load the checksum driver */
+       if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
+               journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
+               if (IS_ERR(journal->j_chksum_driver)) {
+                       printk(KERN_ERR "JBD: Cannot load crc32c driver.\n");
+                       err = PTR_ERR(journal->j_chksum_driver);
+                       journal->j_chksum_driver = NULL;
+                       goto out;
+               }
+       }
+
+       /* Check superblock checksum */
+       if (!jbd2_superblock_csum_verify(journal, sb)) {
+               printk(KERN_ERR "JBD: journal checksum error\n");
+               goto out;
+       }
+
+       /* Precompute checksum seed for all metadata */
+       if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid,
+                                                  sizeof(sb->s_uuid));
+
+       set_buffer_verified(bh);
+
        return 0;
 
 out:
@@ -1564,6 +1642,8 @@ int jbd2_journal_destroy(journal_t *journal)
                iput(journal->j_inode);
        if (journal->j_revoke)
                jbd2_journal_destroy_revoke(journal);
+       if (journal->j_chksum_driver)
+               crypto_free_shash(journal->j_chksum_driver);
        kfree(journal->j_wbuf);
        kfree(journal);
 
@@ -1653,6 +1733,10 @@ int jbd2_journal_check_available_features (journal_t *journal, unsigned long com
 int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
                          unsigned long ro, unsigned long incompat)
 {
+#define INCOMPAT_FEATURE_ON(f) \
+               ((incompat & (f)) && !(sb->s_feature_incompat & cpu_to_be32(f)))
+#define COMPAT_FEATURE_ON(f) \
+               ((compat & (f)) && !(sb->s_feature_compat & cpu_to_be32(f)))
        journal_superblock_t *sb;
 
        if (jbd2_journal_check_used_features(journal, compat, ro, incompat))
@@ -1661,16 +1745,54 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
        if (!jbd2_journal_check_available_features(journal, compat, ro, incompat))
                return 0;
 
+       /* Asking for checksumming v2 and v1?  Only give them v2. */
+       if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V2 &&
+           compat & JBD2_FEATURE_COMPAT_CHECKSUM)
+               compat &= ~JBD2_FEATURE_COMPAT_CHECKSUM;
+
        jbd_debug(1, "Setting new features 0x%lx/0x%lx/0x%lx\n",
                  compat, ro, incompat);
 
        sb = journal->j_superblock;
 
+       /* If enabling v2 checksums, update superblock */
+       if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
+               sb->s_checksum_type = JBD2_CRC32C_CHKSUM;
+               sb->s_feature_compat &=
+                       ~cpu_to_be32(JBD2_FEATURE_COMPAT_CHECKSUM);
+
+               /* Load the checksum driver */
+               if (journal->j_chksum_driver == NULL) {
+                       journal->j_chksum_driver = crypto_alloc_shash("crc32c",
+                                                                     0, 0);
+                       if (IS_ERR(journal->j_chksum_driver)) {
+                               printk(KERN_ERR "JBD: Cannot load crc32c "
+                                      "driver.\n");
+                               journal->j_chksum_driver = NULL;
+                               return 0;
+                       }
+               }
+
+               /* Precompute checksum seed for all metadata */
+               if (JBD2_HAS_INCOMPAT_FEATURE(journal,
+                                             JBD2_FEATURE_INCOMPAT_CSUM_V2))
+                       journal->j_csum_seed = jbd2_chksum(journal, ~0,
+                                                          sb->s_uuid,
+                                                          sizeof(sb->s_uuid));
+       }
+
+       /* If enabling v1 checksums, downgrade superblock */
+       if (COMPAT_FEATURE_ON(JBD2_FEATURE_COMPAT_CHECKSUM))
+               sb->s_feature_incompat &=
+                       ~cpu_to_be32(JBD2_FEATURE_INCOMPAT_CSUM_V2);
+
        sb->s_feature_compat    |= cpu_to_be32(compat);
        sb->s_feature_ro_compat |= cpu_to_be32(ro);
        sb->s_feature_incompat  |= cpu_to_be32(incompat);
 
        return 1;
+#undef COMPAT_FEATURE_ON
+#undef INCOMPAT_FEATURE_ON
 }
 
 /*
@@ -1975,10 +2097,16 @@ int jbd2_journal_blocks_per_page(struct inode *inode)
  */
 size_t journal_tag_bytes(journal_t *journal)
 {
+       journal_block_tag_t tag;
+       size_t x = 0;
+
+       if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               x += sizeof(tag.t_checksum);
+
        if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
-               return JBD2_TAG_SIZE64;
+               return x + JBD2_TAG_SIZE64;
        else
-               return JBD2_TAG_SIZE32;
+               return x + JBD2_TAG_SIZE32;
 }
 
 /*
index c1a03354a22ff1b5a787251b422afcb5225ca2c9..0131e4362534c4d5b83273130ee292463ec49f07 100644 (file)
@@ -174,6 +174,25 @@ static int jread(struct buffer_head **bhp, journal_t *journal,
        return 0;
 }
 
+static int jbd2_descr_block_csum_verify(journal_t *j,
+                                       void *buf)
+{
+       struct jbd2_journal_block_tail *tail;
+       __u32 provided, calculated;
+
+       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               return 1;
+
+       tail = (struct jbd2_journal_block_tail *)(buf + j->j_blocksize -
+                       sizeof(struct jbd2_journal_block_tail));
+       provided = tail->t_checksum;
+       tail->t_checksum = 0;
+       calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize);
+       tail->t_checksum = provided;
+
+       provided = be32_to_cpu(provided);
+       return provided == calculated;
+}
 
 /*
  * Count the number of in-use tags in a journal descriptor block.
@@ -186,6 +205,9 @@ static int count_tags(journal_t *journal, struct buffer_head *bh)
        int                     nr = 0, size = journal->j_blocksize;
        int                     tag_bytes = journal_tag_bytes(journal);
 
+       if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               size -= sizeof(struct jbd2_journal_block_tail);
+
        tagp = &bh->b_data[sizeof(journal_header_t)];
 
        while ((tagp - bh->b_data + tag_bytes) <= size) {
@@ -193,10 +215,10 @@ static int count_tags(journal_t *journal, struct buffer_head *bh)
 
                nr++;
                tagp += tag_bytes;
-               if (!(tag->t_flags & cpu_to_be32(JBD2_FLAG_SAME_UUID)))
+               if (!(tag->t_flags & cpu_to_be16(JBD2_FLAG_SAME_UUID)))
                        tagp += 16;
 
-               if (tag->t_flags & cpu_to_be32(JBD2_FLAG_LAST_TAG))
+               if (tag->t_flags & cpu_to_be16(JBD2_FLAG_LAST_TAG))
                        break;
        }
 
@@ -353,6 +375,41 @@ static int calc_chksums(journal_t *journal, struct buffer_head *bh,
        return 0;
 }
 
+static int jbd2_commit_block_csum_verify(journal_t *j, void *buf)
+{
+       struct commit_header *h;
+       __u32 provided, calculated;
+
+       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               return 1;
+
+       h = buf;
+       provided = h->h_chksum[0];
+       h->h_chksum[0] = 0;
+       calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize);
+       h->h_chksum[0] = provided;
+
+       provided = be32_to_cpu(provided);
+       return provided == calculated;
+}
+
+static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag,
+                                     void *buf, __u32 sequence)
+{
+       __u32 provided, calculated;
+
+       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               return 1;
+
+       sequence = cpu_to_be32(sequence);
+       calculated = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence,
+                                sizeof(sequence));
+       calculated = jbd2_chksum(j, calculated, buf, j->j_blocksize);
+       provided = be32_to_cpu(tag->t_checksum);
+
+       return provided == cpu_to_be32(calculated);
+}
+
 static int do_one_pass(journal_t *journal,
                        struct recovery_info *info, enum passtype pass)
 {
@@ -366,6 +423,7 @@ static int do_one_pass(journal_t *journal,
        int                     blocktype;
        int                     tag_bytes = journal_tag_bytes(journal);
        __u32                   crc32_sum = ~0; /* Transactional Checksums */
+       int                     descr_csum_size = 0;
 
        /*
         * First thing is to establish what we expect to find in the log
@@ -451,6 +509,18 @@ static int do_one_pass(journal_t *journal,
 
                switch(blocktype) {
                case JBD2_DESCRIPTOR_BLOCK:
+                       /* Verify checksum first */
+                       if (JBD2_HAS_INCOMPAT_FEATURE(journal,
+                                       JBD2_FEATURE_INCOMPAT_CSUM_V2))
+                               descr_csum_size =
+                                       sizeof(struct jbd2_journal_block_tail);
+                       if (descr_csum_size > 0 &&
+                           !jbd2_descr_block_csum_verify(journal,
+                                                         bh->b_data)) {
+                               err = -EIO;
+                               goto failed;
+                       }
+
                        /* If it is a valid descriptor block, replay it
                         * in pass REPLAY; if journal_checksums enabled, then
                         * calculate checksums in PASS_SCAN, otherwise,
@@ -481,11 +551,11 @@ static int do_one_pass(journal_t *journal,
 
                        tagp = &bh->b_data[sizeof(journal_header_t)];
                        while ((tagp - bh->b_data + tag_bytes)
-                              <= journal->j_blocksize) {
+                              <= journal->j_blocksize - descr_csum_size) {
                                unsigned long io_block;
 
                                tag = (journal_block_tag_t *) tagp;
-                               flags = be32_to_cpu(tag->t_flags);
+                               flags = be16_to_cpu(tag->t_flags);
 
                                io_block = next_log_block++;
                                wrap(journal, next_log_block);
@@ -516,6 +586,19 @@ static int do_one_pass(journal_t *journal,
                                                goto skip_write;
                                        }
 
+                                       /* Look for block corruption */
+                                       if (!jbd2_block_tag_csum_verify(
+                                               journal, tag, obh->b_data,
+                                               be32_to_cpu(tmp->h_sequence))) {
+                                               brelse(obh);
+                                               success = -EIO;
+                                               printk(KERN_ERR "JBD: Invalid "
+                                                      "checksum recovering "
+                                                      "block %llu in log\n",
+                                                      blocknr);
+                                               continue;
+                                       }
+
                                        /* Find a buffer for the new
                                         * data being restored */
                                        nbh = __getblk(journal->j_fs_dev,
@@ -650,6 +733,19 @@ static int do_one_pass(journal_t *journal,
                                }
                                crc32_sum = ~0;
                        }
+                       if (pass == PASS_SCAN &&
+                           !jbd2_commit_block_csum_verify(journal,
+                                                          bh->b_data)) {
+                               info->end_transaction = next_commit_ID;
+
+                               if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
+                                    JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
+                                       journal->j_failed_commit =
+                                               next_commit_ID;
+                                       brelse(bh);
+                                       break;
+                               }
+                       }
                        brelse(bh);
                        next_commit_ID++;
                        continue;
@@ -706,6 +802,25 @@ static int do_one_pass(journal_t *journal,
        return err;
 }
 
+static int jbd2_revoke_block_csum_verify(journal_t *j,
+                                        void *buf)
+{
+       struct jbd2_journal_revoke_tail *tail;
+       __u32 provided, calculated;
+
+       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               return 1;
+
+       tail = (struct jbd2_journal_revoke_tail *)(buf + j->j_blocksize -
+                       sizeof(struct jbd2_journal_revoke_tail));
+       provided = tail->r_checksum;
+       tail->r_checksum = 0;
+       calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize);
+       tail->r_checksum = provided;
+
+       provided = be32_to_cpu(provided);
+       return provided == calculated;
+}
 
 /* Scan a revoke record, marking all blocks mentioned as revoked. */
 
@@ -720,6 +835,9 @@ static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
        offset = sizeof(jbd2_journal_revoke_header_t);
        max = be32_to_cpu(header->r_count);
 
+       if (!jbd2_revoke_block_csum_verify(journal, header))
+               return -EINVAL;
+
        if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
                record_len = 8;
 
index 6973705d6a3d9db1c96ed67f55c97c8a13ee2f6d..f30b80b4ce8bef98cab621bf731e13682661ca6d 100644 (file)
@@ -578,6 +578,7 @@ static void write_one_revoke_record(journal_t *journal,
                                    struct jbd2_revoke_record_s *record,
                                    int write_op)
 {
+       int csum_size = 0;
        struct journal_head *descriptor;
        int offset;
        journal_header_t *header;
@@ -592,9 +593,13 @@ static void write_one_revoke_record(journal_t *journal,
        descriptor = *descriptorp;
        offset = *offsetp;
 
+       /* Do we need to leave space at the end for a checksum? */
+       if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               csum_size = sizeof(struct jbd2_journal_revoke_tail);
+
        /* Make sure we have a descriptor with space left for the record */
        if (descriptor) {
-               if (offset == journal->j_blocksize) {
+               if (offset >= journal->j_blocksize - csum_size) {
                        flush_descriptor(journal, descriptor, offset, write_op);
                        descriptor = NULL;
                }
@@ -631,6 +636,24 @@ static void write_one_revoke_record(journal_t *journal,
        *offsetp = offset;
 }
 
+static void jbd2_revoke_csum_set(journal_t *j,
+                                struct journal_head *descriptor)
+{
+       struct jbd2_journal_revoke_tail *tail;
+       __u32 csum;
+
+       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               return;
+
+       tail = (struct jbd2_journal_revoke_tail *)
+                       (jh2bh(descriptor)->b_data + j->j_blocksize -
+                       sizeof(struct jbd2_journal_revoke_tail));
+       tail->r_checksum = 0;
+       csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data,
+                          j->j_blocksize);
+       tail->r_checksum = cpu_to_be32(csum);
+}
+
 /*
  * Flush a revoke descriptor out to the journal.  If we are aborting,
  * this is a noop; otherwise we are generating a buffer which needs to
@@ -652,6 +675,8 @@ static void flush_descriptor(journal_t *journal,
 
        header = (jbd2_journal_revoke_header_t *) jh2bh(descriptor)->b_data;
        header->r_count = cpu_to_be32(offset);
+       jbd2_revoke_csum_set(journal, descriptor);
+
        set_buffer_jwrite(bh);
        BUFFER_TRACE(bh, "write");
        set_buffer_dirty(bh);
index ddcd3549c6c26cbc9cb9dd46831b189ed3c0441e..fb1ab9533b67277a557cd5f8ea9f7216b8284d4e 100644 (file)
@@ -162,8 +162,8 @@ static int start_this_handle(journal_t *journal, handle_t *handle,
 
 alloc_transaction:
        if (!journal->j_running_transaction) {
-               new_transaction = kmem_cache_alloc(transaction_cache,
-                                                  gfp_mask | __GFP_ZERO);
+               new_transaction = kmem_cache_zalloc(transaction_cache,
+                                                   gfp_mask);
                if (!new_transaction) {
                        /*
                         * If __GFP_FS is not present, then we may be
index bb6f993ebca924dd39471163b586ded206096f62..3d3092eda8119faa47818fded504aa6a328511e7 100644 (file)
@@ -240,7 +240,7 @@ void jffs2_evict_inode (struct inode *inode)
        jffs2_dbg(1, "%s(): ino #%lu mode %o\n",
                  __func__, inode->i_ino, inode->i_mode);
        truncate_inode_pages(&inode->i_data, 0);
-       end_writeback(inode);
+       clear_inode(inode);
        jffs2_do_clear_inode(c, f);
 }
 
index 55a0c1dceadfddcf990b8fdbcfec015fc75fab32..413ef89c2d1ba32fe8507f29355d11c5873bc7d0 100644 (file)
@@ -32,6 +32,13 @@ struct jffs2_inodirty;
 struct jffs2_mount_opts {
        bool override_compr;
        unsigned int compr;
+
+       /* The size of the reserved pool. The reserved pool is the JFFS2 flash
+        * space which may only be used by root cannot be used by the other
+        * users. This is implemented simply by means of not allowing the
+        * latter users to write to the file system if the amount if the
+        * available space is less then 'rp_size'. */
+       unsigned int rp_size;
 };
 
 /* A struct for the overall file system control.  Pointers to
@@ -126,6 +133,10 @@ struct jffs2_sb_info {
        struct jffs2_inodirty *wbuf_inodes;
        struct rw_semaphore wbuf_sem;   /* Protects the write buffer */
 
+       struct delayed_work wbuf_dwork; /* write-buffer write-out work */
+       int wbuf_queued;                /* non-zero delayed work is queued */
+       spinlock_t wbuf_dwork_lock;     /* protects wbuf_dwork and and wbuf_queued */
+
        unsigned char *oobbuf;
        int oobavail; /* How many bytes are available for JFFS2 in OOB */
 #endif
index 6784d1e7a7eb3440b7e7707a4659f79e8cec7433..0c96eb52c79783057862a2c8f66106fc2a424113 100644 (file)
 #include "nodelist.h"
 #include "debug.h"
 
+/*
+ * Check whether the user is allowed to write.
+ */
+static int jffs2_rp_can_write(struct jffs2_sb_info *c)
+{
+       uint32_t avail;
+       struct jffs2_mount_opts *opts = &c->mount_opts;
+
+       avail = c->dirty_size + c->free_size + c->unchecked_size +
+               c->erasing_size - c->resv_blocks_write * c->sector_size
+               - c->nospc_dirty_size;
+
+       if (avail < 2 * opts->rp_size)
+               jffs2_dbg(1, "rpsize %u, dirty_size %u, free_size %u, "
+                         "erasing_size %u, unchecked_size %u, "
+                         "nr_erasing_blocks %u, avail %u, resrv %u\n",
+                         opts->rp_size, c->dirty_size, c->free_size,
+                         c->erasing_size, c->unchecked_size,
+                         c->nr_erasing_blocks, avail, c->nospc_dirty_size);
+
+       if (avail > opts->rp_size)
+               return 1;
+
+       /* Always allow root */
+       if (capable(CAP_SYS_RESOURCE))
+               return 1;
+
+       jffs2_dbg(1, "forbid writing\n");
+       return 0;
+}
+
 /**
  *     jffs2_reserve_space - request physical space to write nodes to flash
  *     @c: superblock info
@@ -55,6 +86,15 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
 
        spin_lock(&c->erase_completion_lock);
 
+       /*
+        * Check if the free space is greater then size of the reserved pool.
+        * If not, only allow root to proceed with writing.
+        */
+       if (prio != ALLOC_DELETION && !jffs2_rp_can_write(c)) {
+               ret = -ENOSPC;
+               goto out;
+       }
+
        /* this needs a little more thought (true <tglx> :)) */
        while(ret == -EAGAIN) {
                while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
@@ -158,6 +198,8 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
                        jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret);
                }
        }
+
+out:
        spin_unlock(&c->erase_completion_lock);
        if (!ret)
                ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
index 1cd3aec9d9ae282dd31226d0717aaf69a55f414d..bcd983d7e7f99e7e295decc1d26092d464a14d9f 100644 (file)
@@ -95,6 +95,7 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
 #define jffs2_ubivol(c) (0)
 #define jffs2_ubivol_setup(c) (0)
 #define jffs2_ubivol_cleanup(c) do {} while (0)
+#define jffs2_dirty_trigger(c) do {} while (0)
 
 #else /* NAND and/or ECC'd NOR support present */
 
@@ -135,14 +136,10 @@ void jffs2_ubivol_cleanup(struct jffs2_sb_info *c);
 #define jffs2_nor_wbuf_flash(c) (c->mtd->type == MTD_NORFLASH && ! (c->mtd->flags & MTD_BIT_WRITEABLE))
 int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c);
 void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c);
+void jffs2_dirty_trigger(struct jffs2_sb_info *c);
 
 #endif /* WRITEBUFFER */
 
-static inline void jffs2_dirty_trigger(struct jffs2_sb_info *c)
-{
-       OFNI_BS_2SFFJ(c)->s_dirt = 1;
-}
-
 /* background.c */
 int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c);
 void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c);
index dc0437e8476322aaff40dc01737dcc2cabdc6976..1ea349fff68b625389c5647f03094ef56f5e4262 100644 (file)
@@ -1266,19 +1266,25 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
                        /* Symlink's inode data is the target path. Read it and
                         * keep in RAM to facilitate quick follow symlink
                         * operation. */
-                       f->target = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL);
+                       uint32_t csize = je32_to_cpu(latest_node->csize);
+                       if (csize > JFFS2_MAX_NAME_LEN) {
+                               mutex_unlock(&f->sem);
+                               jffs2_do_clear_inode(c, f);
+                               return -ENAMETOOLONG;
+                       }
+                       f->target = kmalloc(csize + 1, GFP_KERNEL);
                        if (!f->target) {
-                               JFFS2_ERROR("can't allocate %d bytes of memory for the symlink target path cache\n", je32_to_cpu(latest_node->csize));
+                               JFFS2_ERROR("can't allocate %u bytes of memory for the symlink target path cache\n", csize);
                                mutex_unlock(&f->sem);
                                jffs2_do_clear_inode(c, f);
                                return -ENOMEM;
                        }
 
                        ret = jffs2_flash_read(c, ref_offset(rii.latest_ref) + sizeof(*latest_node),
-                                               je32_to_cpu(latest_node->csize), &retlen, (char *)f->target);
+                                              csize, &retlen, (char *)f->target);
 
-                       if (ret  || retlen != je32_to_cpu(latest_node->csize)) {
-                               if (retlen != je32_to_cpu(latest_node->csize))
+                       if (ret || retlen != csize) {
+                               if (retlen != csize)
                                        ret = -EIO;
                                kfree(f->target);
                                f->target = NULL;
@@ -1287,7 +1293,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
                                return ret;
                        }
 
-                       f->target[je32_to_cpu(latest_node->csize)] = '\0';
+                       f->target[csize] = '\0';
                        dbg_readinode("symlink's target '%s' cached\n", f->target);
                }
 
@@ -1415,6 +1421,7 @@ int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *i
                mutex_unlock(&f->sem);
                jffs2_do_clear_inode(c, f);
        }
+       jffs2_xattr_do_crccheck_inode(c, ic);
        kfree (f);
        return ret;
 }
index f9916f312bd81e3590fde1c92a025458cb64ab11..61ea41389f90d91d8b3ab6a6a39cd580720f1950 100644 (file)
@@ -63,21 +63,6 @@ static void jffs2_i_init_once(void *foo)
        inode_init_once(&f->vfs_inode);
 }
 
-static void jffs2_write_super(struct super_block *sb)
-{
-       struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
-
-       lock_super(sb);
-       sb->s_dirt = 0;
-
-       if (!(sb->s_flags & MS_RDONLY)) {
-               jffs2_dbg(1, "%s()\n", __func__);
-               jffs2_flush_wbuf_gc(c, 0);
-       }
-
-       unlock_super(sb);
-}
-
 static const char *jffs2_compr_name(unsigned int compr)
 {
        switch (compr) {
@@ -105,6 +90,8 @@ static int jffs2_show_options(struct seq_file *s, struct dentry *root)
 
        if (opts->override_compr)
                seq_printf(s, ",compr=%s", jffs2_compr_name(opts->compr));
+       if (opts->rp_size)
+               seq_printf(s, ",rp_size=%u", opts->rp_size / 1024);
 
        return 0;
 }
@@ -113,8 +100,6 @@ static int jffs2_sync_fs(struct super_block *sb, int wait)
 {
        struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
 
-       jffs2_write_super(sb);
-
        mutex_lock(&c->alloc_sem);
        jffs2_flush_wbuf_pad(c);
        mutex_unlock(&c->alloc_sem);
@@ -171,15 +156,18 @@ static const struct export_operations jffs2_export_ops = {
  * JFFS2 mount options.
  *
  * Opt_override_compr: override default compressor
+ * Opt_rp_size: size of reserved pool in KiB
  * Opt_err: just end of array marker
  */
 enum {
        Opt_override_compr,
+       Opt_rp_size,
        Opt_err,
 };
 
 static const match_table_t tokens = {
        {Opt_override_compr, "compr=%s"},
+       {Opt_rp_size, "rp_size=%u"},
        {Opt_err, NULL},
 };
 
@@ -187,6 +175,7 @@ static int jffs2_parse_options(struct jffs2_sb_info *c, char *data)
 {
        substring_t args[MAX_OPT_ARGS];
        char *p, *name;
+       unsigned int opt;
 
        if (!data)
                return 0;
@@ -224,6 +213,17 @@ static int jffs2_parse_options(struct jffs2_sb_info *c, char *data)
                        kfree(name);
                        c->mount_opts.override_compr = true;
                        break;
+               case Opt_rp_size:
+                       if (match_int(&args[0], &opt))
+                               return -EINVAL;
+                       opt *= 1024;
+                       if (opt > c->mtd->size) {
+                               pr_warn("Too large reserve pool specified, max "
+                                       "is %llu KB\n", c->mtd->size / 1024);
+                               return -EINVAL;
+                       }
+                       c->mount_opts.rp_size = opt;
+                       break;
                default:
                        pr_err("Error: unrecognized mount option '%s' or missing value\n",
                               p);
@@ -251,7 +251,6 @@ static const struct super_operations jffs2_super_operations =
        .alloc_inode =  jffs2_alloc_inode,
        .destroy_inode =jffs2_destroy_inode,
        .put_super =    jffs2_put_super,
-       .write_super =  jffs2_write_super,
        .statfs =       jffs2_statfs,
        .remount_fs =   jffs2_remount_fs,
        .evict_inode =  jffs2_evict_inode,
@@ -319,9 +318,6 @@ static void jffs2_put_super (struct super_block *sb)
 
        jffs2_dbg(2, "%s()\n", __func__);
 
-       if (sb->s_dirt)
-               jffs2_write_super(sb);
-
        mutex_lock(&c->alloc_sem);
        jffs2_flush_wbuf_pad(c);
        mutex_unlock(&c->alloc_sem);
index 74d9be19df3f1fff1d7defdc7824c90240a302f6..6f4529d3697fd3f97d5b018dbe9f5c0362cee034 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/mtd/nand.h>
 #include <linux/jiffies.h>
 #include <linux/sched.h>
+#include <linux/writeback.h>
 
 #include "nodelist.h"
 
@@ -85,7 +86,7 @@ static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
 {
        struct jffs2_inodirty *new;
 
-       /* Mark the superblock dirty so that kupdated will flush... */
+       /* Schedule delayed write-buffer write-out */
        jffs2_dirty_trigger(c);
 
        if (jffs2_wbuf_pending_for_ino(c, ino))
@@ -1148,6 +1149,47 @@ int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *
        return 1;
 }
 
+static struct jffs2_sb_info *work_to_sb(struct work_struct *work)
+{
+       struct delayed_work *dwork;
+
+       dwork = container_of(work, struct delayed_work, work);
+       return container_of(dwork, struct jffs2_sb_info, wbuf_dwork);
+}
+
+static void delayed_wbuf_sync(struct work_struct *work)
+{
+       struct jffs2_sb_info *c = work_to_sb(work);
+       struct super_block *sb = OFNI_BS_2SFFJ(c);
+
+       spin_lock(&c->wbuf_dwork_lock);
+       c->wbuf_queued = 0;
+       spin_unlock(&c->wbuf_dwork_lock);
+
+       if (!(sb->s_flags & MS_RDONLY)) {
+               jffs2_dbg(1, "%s()\n", __func__);
+               jffs2_flush_wbuf_gc(c, 0);
+       }
+}
+
+void jffs2_dirty_trigger(struct jffs2_sb_info *c)
+{
+       struct super_block *sb = OFNI_BS_2SFFJ(c);
+       unsigned long delay;
+
+       if (sb->s_flags & MS_RDONLY)
+               return;
+
+       spin_lock(&c->wbuf_dwork_lock);
+       if (!c->wbuf_queued) {
+               jffs2_dbg(1, "%s()\n", __func__);
+               delay = msecs_to_jiffies(dirty_writeback_interval * 10);
+               queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay);
+               c->wbuf_queued = 1;
+       }
+       spin_unlock(&c->wbuf_dwork_lock);
+}
+
 int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
 {
        struct nand_ecclayout *oinfo = c->mtd->ecclayout;
@@ -1169,6 +1211,8 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
 
        /* Initialise write buffer */
        init_rwsem(&c->wbuf_sem);
+       spin_lock_init(&c->wbuf_dwork_lock);
+       INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
        c->wbuf_pagesize = c->mtd->writesize;
        c->wbuf_ofs = 0xFFFFFFFF;
 
@@ -1207,8 +1251,8 @@ int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
 
        /* Initialize write buffer */
        init_rwsem(&c->wbuf_sem);
-
-
+       spin_lock_init(&c->wbuf_dwork_lock);
+       INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
        c->wbuf_pagesize =  c->mtd->erasesize;
 
        /* Find a suitable c->sector_size
@@ -1267,6 +1311,9 @@ int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
 
        /* Initialize write buffer */
        init_rwsem(&c->wbuf_sem);
+       spin_lock_init(&c->wbuf_dwork_lock);
+       INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
+
        c->wbuf_pagesize = c->mtd->writesize;
        c->wbuf_ofs = 0xFFFFFFFF;
 
@@ -1299,6 +1346,8 @@ int jffs2_ubivol_setup(struct jffs2_sb_info *c) {
                return 0;
 
        init_rwsem(&c->wbuf_sem);
+       spin_lock_init(&c->wbuf_dwork_lock);
+       INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
 
        c->wbuf_pagesize =  c->mtd->writesize;
        c->wbuf_ofs = 0xFFFFFFFF;
index b55b803eddcb92081908aa1f53da50cbdfc6babb..3034e970eb9a130cea79d7d413aa2463070408ff 100644 (file)
@@ -11,6 +11,8 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#define JFFS2_XATTR_IS_CORRUPTED       1
+
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/fs.h>
@@ -153,7 +155,7 @@ static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_dat
                JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
                            offset, je32_to_cpu(rx.hdr_crc), crc);
                xd->flags |= JFFS2_XFLAGS_INVALID;
-               return -EIO;
+               return JFFS2_XATTR_IS_CORRUPTED;
        }
        totlen = PAD(sizeof(rx) + rx.name_len + 1 + je16_to_cpu(rx.value_len));
        if (je16_to_cpu(rx.magic) != JFFS2_MAGIC_BITMASK
@@ -169,7 +171,7 @@ static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_dat
                            je32_to_cpu(rx.xid), xd->xid,
                            je32_to_cpu(rx.version), xd->version);
                xd->flags |= JFFS2_XFLAGS_INVALID;
-               return -EIO;
+               return JFFS2_XATTR_IS_CORRUPTED;
        }
        xd->xprefix = rx.xprefix;
        xd->name_len = rx.name_len;
@@ -227,12 +229,12 @@ static int do_load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum
        data[xd->name_len] = '\0';
        crc = crc32(0, data, length);
        if (crc != xd->data_crc) {
-               JFFS2_WARNING("node CRC failed (JFFS2_NODETYPE_XREF)"
+               JFFS2_WARNING("node CRC failed (JFFS2_NODETYPE_XATTR)"
                              " at %#08x, read: 0x%08x calculated: 0x%08x\n",
                              ref_offset(xd->node), xd->data_crc, crc);
                kfree(data);
                xd->flags |= JFFS2_XFLAGS_INVALID;
-               return -EIO;
+               return JFFS2_XATTR_IS_CORRUPTED;
        }
 
        xd->flags |= JFFS2_XFLAGS_HOT;
@@ -270,7 +272,7 @@ static int load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *x
        if (xd->xname)
                return 0;
        if (xd->flags & JFFS2_XFLAGS_INVALID)
-               return -EIO;
+               return JFFS2_XATTR_IS_CORRUPTED;
        if (unlikely(is_xattr_datum_unchecked(c, xd)))
                rc = do_verify_xattr_datum(c, xd);
        if (!rc)
@@ -435,6 +437,8 @@ static void unrefer_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datu
  *   is called to release xattr related objects when unmounting. 
  * check_xattr_ref_inode(c, ic)
  *   is used to confirm inode does not have duplicate xattr name/value pair.
+ * jffs2_xattr_do_crccheck_inode(c, ic)
+ *   is used to force xattr data integrity check during the initial gc scan.
  * -------------------------------------------------- */
 static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
 {
@@ -462,7 +466,7 @@ static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref
        if (crc != je32_to_cpu(rr.node_crc)) {
                JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
                            offset, je32_to_cpu(rr.node_crc), crc);
-               return -EIO;
+               return JFFS2_XATTR_IS_CORRUPTED;
        }
        if (je16_to_cpu(rr.magic) != JFFS2_MAGIC_BITMASK
            || je16_to_cpu(rr.nodetype) != JFFS2_NODETYPE_XREF
@@ -472,7 +476,7 @@ static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref
                            offset, je16_to_cpu(rr.magic), JFFS2_MAGIC_BITMASK,
                            je16_to_cpu(rr.nodetype), JFFS2_NODETYPE_XREF,
                            je32_to_cpu(rr.totlen), PAD(sizeof(rr)));
-               return -EIO;
+               return JFFS2_XATTR_IS_CORRUPTED;
        }
        ref->ino = je32_to_cpu(rr.ino);
        ref->xid = je32_to_cpu(rr.xid);
@@ -682,6 +686,11 @@ static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cac
        return rc;
 }
 
+void jffs2_xattr_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
+{
+       check_xattr_ref_inode(c, ic);
+}
+
 /* -------- xattr subsystem functions ---------------
  * jffs2_init_xattr_subsystem(c)
  *   is used to initialize semaphore and list_head, and some variables.
index 7be4beb306f3ff06eb6df2a8427ca31cb2027e6f..467ff376ee265041b40d94d30ee73c6b7edae4aa 100644 (file)
@@ -77,6 +77,7 @@ extern void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c);
 extern struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c,
                                                         uint32_t xid, uint32_t version);
 
+extern void jffs2_xattr_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
 extern void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
 extern void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
 
@@ -108,6 +109,7 @@ extern ssize_t jffs2_listxattr(struct dentry *, char *, size_t);
 #define jffs2_build_xattr_subsystem(c)
 #define jffs2_clear_xattr_subsystem(c)
 
+#define jffs2_xattr_do_crccheck_inode(c, ic)
 #define jffs2_xattr_delete_inode(c, ic)
 #define jffs2_xattr_free_inode(c, ic)
 #define jffs2_verify_xattr(c)                  (1)
index 77b69b27f825e877f45bb3a97f9f21887d92d764..4692bf3ca8cbcbaff9db883759b77d3d725cbc0d 100644 (file)
@@ -169,7 +169,7 @@ void jfs_evict_inode(struct inode *inode)
        } else {
                truncate_inode_pages(&inode->i_data, 0);
        }
-       end_writeback(inode);
+       clear_inode(inode);
        dquot_drop(inode);
 }
 
index ba1dc2eebd1ef8413d0593abfde9e14229169ab3..ca0a08001449a999e7070253ba51f7607291285e 100644 (file)
@@ -56,7 +56,7 @@ struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init)
        u32 nlm_version = (nlm_init->nfs_version == 2) ? 1 : 4;
        int status;
 
-       status = lockd_up();
+       status = lockd_up(nlm_init->net);
        if (status < 0)
                return ERR_PTR(status);
 
@@ -65,7 +65,7 @@ struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init)
                                   nlm_init->hostname, nlm_init->noresvport,
                                   nlm_init->net);
        if (host == NULL) {
-               lockd_down();
+               lockd_down(nlm_init->net);
                return ERR_PTR(-ENOLCK);
        }
 
@@ -80,8 +80,10 @@ EXPORT_SYMBOL_GPL(nlmclnt_init);
  */
 void nlmclnt_done(struct nlm_host *host)
 {
+       struct net *net = host->net;
+
        nlmclnt_release_host(host);
-       lockd_down();
+       lockd_down(net);
 }
 EXPORT_SYMBOL_GPL(nlmclnt_done);
 
@@ -220,11 +222,12 @@ reclaimer(void *ptr)
        struct nlm_wait   *block;
        struct file_lock *fl, *next;
        u32 nsmstate;
+       struct net *net = host->net;
 
        allow_signal(SIGKILL);
 
        down_write(&host->h_rwsem);
-       lockd_up();     /* note: this cannot fail as lockd is already running */
+       lockd_up(net);  /* note: this cannot fail as lockd is already running */
 
        dprintk("lockd: reclaiming locks for host %s\n", host->h_name);
 
@@ -275,6 +278,6 @@ restart:
 
        /* Release host handle after use */
        nlmclnt_release_host(host);
-       lockd_down();
+       lockd_down(net);
        return 0;
 }
index f49b9afc443690a2377db100ed7da33452ef98db..80938fda67e0e6fde67999d3556820b87b6acd33 100644 (file)
@@ -251,39 +251,40 @@ out_err:
        return err;
 }
 
-static int lockd_up_net(struct net *net)
+static int lockd_up_net(struct svc_serv *serv, struct net *net)
 {
        struct lockd_net *ln = net_generic(net, lockd_net_id);
-       struct svc_serv *serv = nlmsvc_rqst->rq_server;
        int error;
 
-       if (ln->nlmsvc_users)
+       if (ln->nlmsvc_users++)
                return 0;
 
-       error = svc_rpcb_setup(serv, net);
+       error = svc_bind(serv, net);
        if (error)
-               goto err_rpcb;
+               goto err_bind;
 
        error = make_socks(serv, net);
        if (error < 0)
                goto err_socks;
+       dprintk("lockd_up_net: per-net data created; net=%p\n", net);
        return 0;
 
 err_socks:
        svc_rpcb_cleanup(serv, net);
-err_rpcb:
+err_bind:
+       ln->nlmsvc_users--;
        return error;
 }
 
-static void lockd_down_net(struct net *net)
+static void lockd_down_net(struct svc_serv *serv, struct net *net)
 {
        struct lockd_net *ln = net_generic(net, lockd_net_id);
-       struct svc_serv *serv = nlmsvc_rqst->rq_server;
 
        if (ln->nlmsvc_users) {
                if (--ln->nlmsvc_users == 0) {
                        nlm_shutdown_hosts_net(net);
                        svc_shutdown_net(serv, net);
+                       dprintk("lockd_down_net: per-net data destroyed; net=%p\n", net);
                }
        } else {
                printk(KERN_ERR "lockd_down_net: no users! task=%p, net=%p\n",
@@ -292,22 +293,60 @@ static void lockd_down_net(struct net *net)
        }
 }
 
-/*
- * Bring up the lockd process if it's not already up.
- */
-int lockd_up(void)
+static int lockd_start_svc(struct svc_serv *serv)
+{
+       int error;
+
+       if (nlmsvc_rqst)
+               return 0;
+
+       /*
+        * Create the kernel thread and wait for it to start.
+        */
+       nlmsvc_rqst = svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE);
+       if (IS_ERR(nlmsvc_rqst)) {
+               error = PTR_ERR(nlmsvc_rqst);
+               printk(KERN_WARNING
+                       "lockd_up: svc_rqst allocation failed, error=%d\n",
+                       error);
+               goto out_rqst;
+       }
+
+       svc_sock_update_bufs(serv);
+       serv->sv_maxconn = nlm_max_connections;
+
+       nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, serv->sv_name);
+       if (IS_ERR(nlmsvc_task)) {
+               error = PTR_ERR(nlmsvc_task);
+               printk(KERN_WARNING
+                       "lockd_up: kthread_run failed, error=%d\n", error);
+               goto out_task;
+       }
+       dprintk("lockd_up: service started\n");
+       return 0;
+
+out_task:
+       svc_exit_thread(nlmsvc_rqst);
+       nlmsvc_task = NULL;
+out_rqst:
+       nlmsvc_rqst = NULL;
+       return error;
+}
+
+static struct svc_serv *lockd_create_svc(void)
 {
        struct svc_serv *serv;
-       int             error = 0;
-       struct net *net = current->nsproxy->net_ns;
 
-       mutex_lock(&nlmsvc_mutex);
        /*
         * Check whether we're already up and running.
         */
        if (nlmsvc_rqst) {
-               error = lockd_up_net(net);
-               goto out;
+               /*
+                * Note: increase service usage, because later in case of error
+                * svc_destroy() will be called.
+                */
+               svc_get(nlmsvc_rqst->rq_server);
+               return nlmsvc_rqst->rq_server;
        }
 
        /*
@@ -318,59 +357,53 @@ int lockd_up(void)
                printk(KERN_WARNING
                        "lockd_up: no pid, %d users??\n", nlmsvc_users);
 
-       error = -ENOMEM;
        serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, NULL);
        if (!serv) {
                printk(KERN_WARNING "lockd_up: create service failed\n");
-               goto out;
+               return ERR_PTR(-ENOMEM);
        }
+       dprintk("lockd_up: service created\n");
+       return serv;
+}
 
-       error = make_socks(serv, net);
-       if (error < 0)
-               goto destroy_and_out;
+/*
+ * Bring up the lockd process if it's not already up.
+ */
+int lockd_up(struct net *net)
+{
+       struct svc_serv *serv;
+       int error;
 
-       /*
-        * Create the kernel thread and wait for it to start.
-        */
-       nlmsvc_rqst = svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE);
-       if (IS_ERR(nlmsvc_rqst)) {
-               error = PTR_ERR(nlmsvc_rqst);
-               nlmsvc_rqst = NULL;
-               printk(KERN_WARNING
-                       "lockd_up: svc_rqst allocation failed, error=%d\n",
-                       error);
-               goto destroy_and_out;
+       mutex_lock(&nlmsvc_mutex);
+
+       serv = lockd_create_svc();
+       if (IS_ERR(serv)) {
+               error = PTR_ERR(serv);
+               goto err_create;
        }
 
-       svc_sock_update_bufs(serv);
-       serv->sv_maxconn = nlm_max_connections;
+       error = lockd_up_net(serv, net);
+       if (error < 0)
+               goto err_net;
 
-       nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, serv->sv_name);
-       if (IS_ERR(nlmsvc_task)) {
-               error = PTR_ERR(nlmsvc_task);
-               svc_exit_thread(nlmsvc_rqst);
-               nlmsvc_task = NULL;
-               nlmsvc_rqst = NULL;
-               printk(KERN_WARNING
-                       "lockd_up: kthread_run failed, error=%d\n", error);
-               goto destroy_and_out;
-       }
+       error = lockd_start_svc(serv);
+       if (error < 0)
+               goto err_start;
 
+       nlmsvc_users++;
        /*
         * Note: svc_serv structures have an initial use count of 1,
         * so we exit through here on both success and failure.
         */
-destroy_and_out:
+err_net:
        svc_destroy(serv);
-out:
-       if (!error) {
-               struct lockd_net *ln = net_generic(net, lockd_net_id);
-
-               ln->nlmsvc_users++;
-               nlmsvc_users++;
-       }
+err_create:
        mutex_unlock(&nlmsvc_mutex);
        return error;
+
+err_start:
+       lockd_down_net(serv, net);
+       goto err_net;
 }
 EXPORT_SYMBOL_GPL(lockd_up);
 
@@ -378,14 +411,13 @@ EXPORT_SYMBOL_GPL(lockd_up);
  * Decrement the user count and bring down lockd if we're the last.
  */
 void
-lockd_down(void)
+lockd_down(struct net *net)
 {
        mutex_lock(&nlmsvc_mutex);
+       lockd_down_net(nlmsvc_rqst->rq_server, net);
        if (nlmsvc_users) {
-               if (--nlmsvc_users) {
-                       lockd_down_net(current->nsproxy->net_ns);
+               if (--nlmsvc_users)
                        goto out;
-               }
        } else {
                printk(KERN_ERR "lockd_down: no users! task=%p\n",
                        nlmsvc_task);
@@ -397,7 +429,9 @@ lockd_down(void)
                BUG();
        }
        kthread_stop(nlmsvc_task);
+       dprintk("lockd_down: service stopped\n");
        svc_exit_thread(nlmsvc_rqst);
+       dprintk("lockd_down: service destroyed\n");
        nlmsvc_task = NULL;
        nlmsvc_rqst = NULL;
 out:
index 4f441e46cef47bc67b08a3e82b78f389dfbbf818..814c51d0de4739e4b89e9091e00c17f284c0e2ba 100644 (file)
@@ -1636,12 +1636,13 @@ EXPORT_SYMBOL(flock_lock_file_wait);
 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
 {
        struct file *filp;
+       int fput_needed;
        struct file_lock *lock;
        int can_sleep, unlock;
        int error;
 
        error = -EBADF;
-       filp = fget(fd);
+       filp = fget_light(fd, &fput_needed);
        if (!filp)
                goto out;
 
@@ -1674,7 +1675,7 @@ SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
        locks_free_lock(lock);
 
  out_putf:
-       fput(filp);
+       fput_light(filp, fput_needed);
  out:
        return error;
 }
index e3ab5e5a904c23a238fdda580447988efc0941ce..f1cb512c5019dacf057391ed857a9697f0b98422 100644 (file)
@@ -2175,7 +2175,7 @@ void logfs_evict_inode(struct inode *inode)
                }
        }
        truncate_inode_pages(&inode->i_data, 0);
-       end_writeback(inode);
+       clear_inode(inode);
 
        /* Cheaper version of write_inode.  All changes are concealed in
         * aliases, which are moved back.  No write to the medium happens.
index fcb05d2c6b5f99cd4a26031668fe5e06de915d8c..2a503ad020d5da4bd1de5442e306f5b98a0b02d9 100644 (file)
@@ -32,7 +32,7 @@ static void minix_evict_inode(struct inode *inode)
                minix_truncate(inode);
        }
        invalidate_inode_buffers(inode);
-       end_writeback(inode);
+       clear_inode(inode);
        if (!inode->i_nlink)
                minix_free_inode(inode);
 }
index 93ff12b1a1de0e64375e38152fa9bffc24842761..7d694194024ac4d2459e7cc3d60014bdff64e3ba 100644 (file)
@@ -449,7 +449,7 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
        mntget(nd->path.mnt);
 
        rcu_read_unlock();
-       br_read_unlock(vfsmount_lock);
+       br_read_unlock(&vfsmount_lock);
        nd->flags &= ~LOOKUP_RCU;
        return 0;
 
@@ -507,14 +507,14 @@ static int complete_walk(struct nameidata *nd)
                if (unlikely(!__d_rcu_to_refcount(dentry, nd->seq))) {
                        spin_unlock(&dentry->d_lock);
                        rcu_read_unlock();
-                       br_read_unlock(vfsmount_lock);
+                       br_read_unlock(&vfsmount_lock);
                        return -ECHILD;
                }
                BUG_ON(nd->inode != dentry->d_inode);
                spin_unlock(&dentry->d_lock);
                mntget(nd->path.mnt);
                rcu_read_unlock();
-               br_read_unlock(vfsmount_lock);
+               br_read_unlock(&vfsmount_lock);
        }
 
        if (likely(!(nd->flags & LOOKUP_JUMPED)))
@@ -681,15 +681,15 @@ int follow_up(struct path *path)
        struct mount *parent;
        struct dentry *mountpoint;
 
-       br_read_lock(vfsmount_lock);
+       br_read_lock(&vfsmount_lock);
        parent = mnt->mnt_parent;
        if (&parent->mnt == path->mnt) {
-               br_read_unlock(vfsmount_lock);
+               br_read_unlock(&vfsmount_lock);
                return 0;
        }
        mntget(&parent->mnt);
        mountpoint = dget(mnt->mnt_mountpoint);
-       br_read_unlock(vfsmount_lock);
+       br_read_unlock(&vfsmount_lock);
        dput(path->dentry);
        path->dentry = mountpoint;
        mntput(path->mnt);
@@ -947,7 +947,7 @@ failed:
        if (!(nd->flags & LOOKUP_ROOT))
                nd->root.mnt = NULL;
        rcu_read_unlock();
-       br_read_unlock(vfsmount_lock);
+       br_read_unlock(&vfsmount_lock);
        return -ECHILD;
 }
 
@@ -1125,8 +1125,8 @@ static struct dentry *__lookup_hash(struct qstr *name,
  *  small and for now I'd prefer to have fast path as straight as possible.
  *  It _is_ time-critical.
  */
-static int do_lookup(struct nameidata *nd, struct qstr *name,
-                       struct path *path, struct inode **inode)
+static int lookup_fast(struct nameidata *nd, struct qstr *name,
+                      struct path *path, struct inode **inode)
 {
        struct vfsmount *mnt = nd->path.mnt;
        struct dentry *dentry, *parent = nd->path.dentry;
@@ -1208,7 +1208,7 @@ unlazy:
                        goto need_lookup;
                }
        }
-done:
+
        path->mnt = mnt;
        path->dentry = dentry;
        err = follow_managed(path, nd->flags);
@@ -1222,6 +1222,17 @@ done:
        return 0;
 
 need_lookup:
+       return 1;
+}
+
+/* Fast lookup failed, do it the slow way */
+static int lookup_slow(struct nameidata *nd, struct qstr *name,
+                      struct path *path)
+{
+       struct dentry *dentry, *parent;
+       int err;
+
+       parent = nd->path.dentry;
        BUG_ON(nd->inode != parent->d_inode);
 
        mutex_lock(&parent->d_inode->i_mutex);
@@ -1229,7 +1240,16 @@ need_lookup:
        mutex_unlock(&parent->d_inode->i_mutex);
        if (IS_ERR(dentry))
                return PTR_ERR(dentry);
-       goto done;
+       path->mnt = nd->path.mnt;
+       path->dentry = dentry;
+       err = follow_managed(path, nd->flags);
+       if (unlikely(err < 0)) {
+               path_put_conditional(path, nd);
+               return err;
+       }
+       if (err)
+               nd->flags |= LOOKUP_JUMPED;
+       return 0;
 }
 
 static inline int may_lookup(struct nameidata *nd)
@@ -1265,7 +1285,7 @@ static void terminate_walk(struct nameidata *nd)
                if (!(nd->flags & LOOKUP_ROOT))
                        nd->root.mnt = NULL;
                rcu_read_unlock();
-               br_read_unlock(vfsmount_lock);
+               br_read_unlock(&vfsmount_lock);
        }
 }
 
@@ -1301,21 +1321,26 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
         */
        if (unlikely(type != LAST_NORM))
                return handle_dots(nd, type);
-       err = do_lookup(nd, name, path, &inode);
+       err = lookup_fast(nd, name, path, &inode);
        if (unlikely(err)) {
-               terminate_walk(nd);
-               return err;
-       }
-       if (!inode) {
-               path_to_nameidata(path, nd);
-               terminate_walk(nd);
-               return -ENOENT;
+               if (err < 0)
+                       goto out_err;
+
+               err = lookup_slow(nd, name, path);
+               if (err < 0)
+                       goto out_err;
+
+               inode = path->dentry->d_inode;
        }
+       err = -ENOENT;
+       if (!inode)
+               goto out_path_put;
+
        if (should_follow_link(inode, follow)) {
                if (nd->flags & LOOKUP_RCU) {
                        if (unlikely(unlazy_walk(nd, path->dentry))) {
-                               terminate_walk(nd);
-                               return -ECHILD;
+                               err = -ECHILD;
+                               goto out_err;
                        }
                }
                BUG_ON(inode != path->dentry->d_inode);
@@ -1324,6 +1349,12 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
        path_to_nameidata(path, nd);
        nd->inode = inode;
        return 0;
+
+out_path_put:
+       path_to_nameidata(path, nd);
+out_err:
+       terminate_walk(nd);
+       return err;
 }
 
 /*
@@ -1452,7 +1483,8 @@ EXPORT_SYMBOL(full_name_hash);
  */
 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
 {
-       unsigned long a, mask, hash, len;
+       unsigned long a, b, adata, bdata, mask, hash, len;
+       const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
 
        hash = a = 0;
        len = -sizeof(unsigned long);
@@ -1460,17 +1492,18 @@ static inline unsigned long hash_name(const char *name, unsigned int *hashp)
                hash = (hash + a) * 9;
                len += sizeof(unsigned long);
                a = load_unaligned_zeropad(name+len);
-               /* Do we have any NUL or '/' bytes in this word? */
-               mask = has_zero(a) | has_zero(a ^ REPEAT_BYTE('/'));
-       } while (!mask);
-
-       /* The mask *below* the first high bit set */
-       mask = (mask - 1) & ~mask;
-       mask >>= 7;
-       hash += a & mask;
+               b = a ^ REPEAT_BYTE('/');
+       } while (!(has_zero(a, &adata, &constants) | has_zero(b, &bdata, &constants)));
+
+       adata = prep_zero_mask(a, adata, &constants);
+       bdata = prep_zero_mask(b, bdata, &constants);
+
+       mask = create_zero_mask(adata | bdata);
+
+       hash += a & zero_bytemask(mask);
        *hashp = fold_hash(hash);
 
-       return len + count_masked_bytes(mask);
+       return len + find_zero(mask);
 }
 
 #else
@@ -1618,7 +1651,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
                nd->path = nd->root;
                nd->inode = inode;
                if (flags & LOOKUP_RCU) {
-                       br_read_lock(vfsmount_lock);
+                       br_read_lock(&vfsmount_lock);
                        rcu_read_lock();
                        nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
                } else {
@@ -1631,7 +1664,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
 
        if (*name=='/') {
                if (flags & LOOKUP_RCU) {
-                       br_read_lock(vfsmount_lock);
+                       br_read_lock(&vfsmount_lock);
                        rcu_read_lock();
                        set_root_rcu(nd);
                } else {
@@ -1644,7 +1677,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
                        struct fs_struct *fs = current->fs;
                        unsigned seq;
 
-                       br_read_lock(vfsmount_lock);
+                       br_read_lock(&vfsmount_lock);
                        rcu_read_lock();
 
                        do {
@@ -1680,7 +1713,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
                        if (fput_needed)
                                *fp = file;
                        nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
-                       br_read_lock(vfsmount_lock);
+                       br_read_lock(&vfsmount_lock);
                        rcu_read_lock();
                } else {
                        path_get(&file->f_path);
@@ -2167,6 +2200,10 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
        int want_write = 0;
        int acc_mode = op->acc_mode;
        struct file *filp;
+       struct inode *inode;
+       int symlink_ok = 0;
+       struct path save_parent = { .dentry = NULL, .mnt = NULL };
+       bool retried = false;
        int error;
 
        nd->flags &= ~LOOKUP_PARENT;
@@ -2198,30 +2235,23 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
        }
 
        if (!(open_flag & O_CREAT)) {
-               int symlink_ok = 0;
                if (nd->last.name[nd->last.len])
                        nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
                if (open_flag & O_PATH && !(nd->flags & LOOKUP_FOLLOW))
                        symlink_ok = 1;
                /* we _can_ be in RCU mode here */
-               error = walk_component(nd, path, &nd->last, LAST_NORM,
-                                       !symlink_ok);
-               if (error < 0)
-                       return ERR_PTR(error);
-               if (error) /* symlink */
-                       return NULL;
-               /* sayonara */
-               error = complete_walk(nd);
-               if (error)
-                       return ERR_PTR(error);
+               error = lookup_fast(nd, &nd->last, path, &inode);
+               if (unlikely(error)) {
+                       if (error < 0)
+                               goto exit;
 
-               error = -ENOTDIR;
-               if (nd->flags & LOOKUP_DIRECTORY) {
-                       if (!nd->inode->i_op->lookup)
+                       error = lookup_slow(nd, &nd->last, path);
+                       if (error < 0)
                                goto exit;
+
+                       inode = path->dentry->d_inode;
                }
-               audit_inode(pathname, nd->path.dentry);
-               goto ok;
+               goto finish_lookup;
        }
 
        /* create side of things */
@@ -2239,6 +2269,7 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
        if (nd->last.name[nd->last.len])
                goto exit;
 
+retry_lookup:
        mutex_lock(&dir->d_inode->i_mutex);
 
        dentry = lookup_hash(nd);
@@ -2300,22 +2331,49 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
        if (error)
                nd->flags |= LOOKUP_JUMPED;
 
+       BUG_ON(nd->flags & LOOKUP_RCU);
+       inode = path->dentry->d_inode;
+finish_lookup:
+       /* we _can_ be in RCU mode here */
        error = -ENOENT;
-       if (!path->dentry->d_inode)
-               goto exit_dput;
+       if (!inode) {
+               path_to_nameidata(path, nd);
+               goto exit;
+       }
 
-       if (path->dentry->d_inode->i_op->follow_link)
+       if (should_follow_link(inode, !symlink_ok)) {
+               if (nd->flags & LOOKUP_RCU) {
+                       if (unlikely(unlazy_walk(nd, path->dentry))) {
+                               error = -ECHILD;
+                               goto exit;
+                       }
+               }
+               BUG_ON(inode != path->dentry->d_inode);
                return NULL;
+       }
 
-       path_to_nameidata(path, nd);
-       nd->inode = path->dentry->d_inode;
+       if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path->mnt) {
+               path_to_nameidata(path, nd);
+       } else {
+               save_parent.dentry = nd->path.dentry;
+               save_parent.mnt = mntget(path->mnt);
+               nd->path.dentry = path->dentry;
+
+       }
+       nd->inode = inode;
        /* Why this, you ask?  _Now_ we might have grown LOOKUP_JUMPED... */
        error = complete_walk(nd);
-       if (error)
+       if (error) {
+               path_put(&save_parent);
                return ERR_PTR(error);
+       }
        error = -EISDIR;
-       if (S_ISDIR(nd->inode->i_mode))
+       if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
+               goto exit;
+       error = -ENOTDIR;
+       if ((nd->flags & LOOKUP_DIRECTORY) && !nd->inode->i_op->lookup)
                goto exit;
+       audit_inode(pathname, nd->path.dentry);
 ok:
        if (!S_ISREG(nd->inode->i_mode))
                will_truncate = 0;
@@ -2331,6 +2389,20 @@ common:
        if (error)
                goto exit;
        filp = nameidata_to_filp(nd);
+       if (filp == ERR_PTR(-EOPENSTALE) && save_parent.dentry && !retried) {
+               BUG_ON(save_parent.dentry != dir);
+               path_put(&nd->path);
+               nd->path = save_parent;
+               nd->inode = dir->d_inode;
+               save_parent.mnt = NULL;
+               save_parent.dentry = NULL;
+               if (want_write) {
+                       mnt_drop_write(nd->path.mnt);
+                       want_write = 0;
+               }
+               retried = true;
+               goto retry_lookup;
+       }
        if (!IS_ERR(filp)) {
                error = ima_file_check(filp, op->acc_mode);
                if (error) {
@@ -2350,7 +2422,8 @@ common:
 out:
        if (want_write)
                mnt_drop_write(nd->path.mnt);
-       path_put(&nd->path);
+       path_put(&save_parent);
+       terminate_walk(nd);
        return filp;
 
 exit_mutex_unlock:
@@ -2413,6 +2486,12 @@ out:
        if (base)
                fput(base);
        release_open_intent(nd);
+       if (filp == ERR_PTR(-EOPENSTALE)) {
+               if (flags & LOOKUP_RCU)
+                       filp = ERR_PTR(-ECHILD);
+               else
+                       filp = ERR_PTR(-ESTALE);
+       }
        return filp;
 
 out_filp:
index e6081996c9a2f9d26525740545445630c4737583..1e4a5fe3d7b7f789d66839f37b1f917c1fa3e2ba 100644 (file)
@@ -397,7 +397,7 @@ static int mnt_make_readonly(struct mount *mnt)
 {
        int ret = 0;
 
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
        /*
         * After storing MNT_WRITE_HOLD, we'll read the counters. This store
@@ -431,15 +431,15 @@ static int mnt_make_readonly(struct mount *mnt)
         */
        smp_wmb();
        mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
        return ret;
 }
 
 static void __mnt_unmake_readonly(struct mount *mnt)
 {
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        mnt->mnt.mnt_flags &= ~MNT_READONLY;
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
 }
 
 int sb_prepare_remount_readonly(struct super_block *sb)
@@ -451,7 +451,7 @@ int sb_prepare_remount_readonly(struct super_block *sb)
        if (atomic_long_read(&sb->s_remove_count))
                return -EBUSY;
 
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
                if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
                        mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
@@ -473,7 +473,7 @@ int sb_prepare_remount_readonly(struct super_block *sb)
                if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
                        mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
        }
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
 
        return err;
 }
@@ -522,14 +522,14 @@ struct vfsmount *lookup_mnt(struct path *path)
 {
        struct mount *child_mnt;
 
-       br_read_lock(vfsmount_lock);
+       br_read_lock(&vfsmount_lock);
        child_mnt = __lookup_mnt(path->mnt, path->dentry, 1);
        if (child_mnt) {
                mnt_add_count(child_mnt, 1);
-               br_read_unlock(vfsmount_lock);
+               br_read_unlock(&vfsmount_lock);
                return &child_mnt->mnt;
        } else {
-               br_read_unlock(vfsmount_lock);
+               br_read_unlock(&vfsmount_lock);
                return NULL;
        }
 }
@@ -714,9 +714,9 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void
        mnt->mnt.mnt_sb = root->d_sb;
        mnt->mnt_mountpoint = mnt->mnt.mnt_root;
        mnt->mnt_parent = mnt;
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts);
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
        return &mnt->mnt;
 }
 EXPORT_SYMBOL_GPL(vfs_kern_mount);
@@ -745,9 +745,9 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
                mnt->mnt.mnt_root = dget(root);
                mnt->mnt_mountpoint = mnt->mnt.mnt_root;
                mnt->mnt_parent = mnt;
-               br_write_lock(vfsmount_lock);
+               br_write_lock(&vfsmount_lock);
                list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
-               br_write_unlock(vfsmount_lock);
+               br_write_unlock(&vfsmount_lock);
 
                if (flag & CL_SLAVE) {
                        list_add(&mnt->mnt_slave, &old->mnt_slave_list);
@@ -803,35 +803,36 @@ static void mntput_no_expire(struct mount *mnt)
 {
 put_again:
 #ifdef CONFIG_SMP
-       br_read_lock(vfsmount_lock);
+       br_read_lock(&vfsmount_lock);
        if (likely(atomic_read(&mnt->mnt_longterm))) {
                mnt_add_count(mnt, -1);
-               br_read_unlock(vfsmount_lock);
+               br_read_unlock(&vfsmount_lock);
                return;
        }
-       br_read_unlock(vfsmount_lock);
+       br_read_unlock(&vfsmount_lock);
 
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        mnt_add_count(mnt, -1);
        if (mnt_get_count(mnt)) {
-               br_write_unlock(vfsmount_lock);
+               br_write_unlock(&vfsmount_lock);
                return;
        }
 #else
        mnt_add_count(mnt, -1);
        if (likely(mnt_get_count(mnt)))
                return;
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
 #endif
        if (unlikely(mnt->mnt_pinned)) {
                mnt_add_count(mnt, mnt->mnt_pinned + 1);
                mnt->mnt_pinned = 0;
-               br_write_unlock(vfsmount_lock);
+               br_write_unlock(&vfsmount_lock);
                acct_auto_close_mnt(&mnt->mnt);
                goto put_again;
        }
+
        list_del(&mnt->mnt_instance);
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
        mntfree(mnt);
 }
 
@@ -857,21 +858,21 @@ EXPORT_SYMBOL(mntget);
 
 void mnt_pin(struct vfsmount *mnt)
 {
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        real_mount(mnt)->mnt_pinned++;
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
 }
 EXPORT_SYMBOL(mnt_pin);
 
 void mnt_unpin(struct vfsmount *m)
 {
        struct mount *mnt = real_mount(m);
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        if (mnt->mnt_pinned) {
                mnt_add_count(mnt, 1);
                mnt->mnt_pinned--;
        }
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
 }
 EXPORT_SYMBOL(mnt_unpin);
 
@@ -988,12 +989,12 @@ int may_umount_tree(struct vfsmount *m)
        BUG_ON(!m);
 
        /* write lock needed for mnt_get_count */
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        for (p = mnt; p; p = next_mnt(p, mnt)) {
                actual_refs += mnt_get_count(p);
                minimum_refs += 2;
        }
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
 
        if (actual_refs > minimum_refs)
                return 0;
@@ -1020,10 +1021,10 @@ int may_umount(struct vfsmount *mnt)
 {
        int ret = 1;
        down_read(&namespace_sem);
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        if (propagate_mount_busy(real_mount(mnt), 2))
                ret = 0;
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
        up_read(&namespace_sem);
        return ret;
 }
@@ -1040,13 +1041,13 @@ void release_mounts(struct list_head *head)
                        struct dentry *dentry;
                        struct mount *m;
 
-                       br_write_lock(vfsmount_lock);
+                       br_write_lock(&vfsmount_lock);
                        dentry = mnt->mnt_mountpoint;
                        m = mnt->mnt_parent;
                        mnt->mnt_mountpoint = mnt->mnt.mnt_root;
                        mnt->mnt_parent = mnt;
                        m->mnt_ghosts--;
-                       br_write_unlock(vfsmount_lock);
+                       br_write_unlock(&vfsmount_lock);
                        dput(dentry);
                        mntput(&m->mnt);
                }
@@ -1073,8 +1074,9 @@ void umount_tree(struct mount *mnt, int propagate, struct list_head *kill)
                list_del_init(&p->mnt_expire);
                list_del_init(&p->mnt_list);
                __touch_mnt_namespace(p->mnt_ns);
+               if (p->mnt_ns)
+                       __mnt_make_shortterm(p);
                p->mnt_ns = NULL;
-               __mnt_make_shortterm(p);
                list_del_init(&p->mnt_child);
                if (mnt_has_parent(p)) {
                        p->mnt_parent->mnt_ghosts++;
@@ -1112,12 +1114,12 @@ static int do_umount(struct mount *mnt, int flags)
                 * probably don't strictly need the lock here if we examined
                 * all race cases, but it's a slowpath.
                 */
-               br_write_lock(vfsmount_lock);
+               br_write_lock(&vfsmount_lock);
                if (mnt_get_count(mnt) != 2) {
-                       br_write_unlock(vfsmount_lock);
+                       br_write_unlock(&vfsmount_lock);
                        return -EBUSY;
                }
-               br_write_unlock(vfsmount_lock);
+               br_write_unlock(&vfsmount_lock);
 
                if (!xchg(&mnt->mnt_expiry_mark, 1))
                        return -EAGAIN;
@@ -1159,7 +1161,7 @@ static int do_umount(struct mount *mnt, int flags)
        }
 
        down_write(&namespace_sem);
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        event++;
 
        if (!(flags & MNT_DETACH))
@@ -1171,7 +1173,7 @@ static int do_umount(struct mount *mnt, int flags)
                        umount_tree(mnt, 1, &umount_list);
                retval = 0;
        }
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
        up_write(&namespace_sem);
        release_mounts(&umount_list);
        return retval;
@@ -1286,19 +1288,19 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
                        q = clone_mnt(p, p->mnt.mnt_root, flag);
                        if (!q)
                                goto Enomem;
-                       br_write_lock(vfsmount_lock);
+                       br_write_lock(&vfsmount_lock);
                        list_add_tail(&q->mnt_list, &res->mnt_list);
                        attach_mnt(q, &path);
-                       br_write_unlock(vfsmount_lock);
+                       br_write_unlock(&vfsmount_lock);
                }
        }
        return res;
 Enomem:
        if (res) {
                LIST_HEAD(umount_list);
-               br_write_lock(vfsmount_lock);
+               br_write_lock(&vfsmount_lock);
                umount_tree(res, 0, &umount_list);
-               br_write_unlock(vfsmount_lock);
+               br_write_unlock(&vfsmount_lock);
                release_mounts(&umount_list);
        }
        return NULL;
@@ -1318,9 +1320,9 @@ void drop_collected_mounts(struct vfsmount *mnt)
 {
        LIST_HEAD(umount_list);
        down_write(&namespace_sem);
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        umount_tree(real_mount(mnt), 0, &umount_list);
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
        up_write(&namespace_sem);
        release_mounts(&umount_list);
 }
@@ -1448,7 +1450,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
        if (err)
                goto out_cleanup_ids;
 
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
 
        if (IS_MNT_SHARED(dest_mnt)) {
                for (p = source_mnt; p; p = next_mnt(p, source_mnt))
@@ -1467,7 +1469,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
                list_del_init(&child->mnt_hash);
                commit_tree(child);
        }
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
 
        return 0;
 
@@ -1565,10 +1567,10 @@ static int do_change_type(struct path *path, int flag)
                        goto out_unlock;
        }
 
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
                change_mnt_propagation(m, type);
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
 
  out_unlock:
        up_write(&namespace_sem);
@@ -1617,9 +1619,9 @@ static int do_loopback(struct path *path, char *old_name,
 
        err = graft_tree(mnt, path);
        if (err) {
-               br_write_lock(vfsmount_lock);
+               br_write_lock(&vfsmount_lock);
                umount_tree(mnt, 0, &umount_list);
-               br_write_unlock(vfsmount_lock);
+               br_write_unlock(&vfsmount_lock);
        }
 out2:
        unlock_mount(path);
@@ -1677,16 +1679,16 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
        else
                err = do_remount_sb(sb, flags, data, 0);
        if (!err) {
-               br_write_lock(vfsmount_lock);
+               br_write_lock(&vfsmount_lock);
                mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;
                mnt->mnt.mnt_flags = mnt_flags;
-               br_write_unlock(vfsmount_lock);
+               br_write_unlock(&vfsmount_lock);
        }
        up_write(&sb->s_umount);
        if (!err) {
-               br_write_lock(vfsmount_lock);
+               br_write_lock(&vfsmount_lock);
                touch_mnt_namespace(mnt->mnt_ns);
-               br_write_unlock(vfsmount_lock);
+               br_write_unlock(&vfsmount_lock);
        }
        return err;
 }
@@ -1893,9 +1895,9 @@ fail:
        /* remove m from any expiration list it may be on */
        if (!list_empty(&mnt->mnt_expire)) {
                down_write(&namespace_sem);
-               br_write_lock(vfsmount_lock);
+               br_write_lock(&vfsmount_lock);
                list_del_init(&mnt->mnt_expire);
-               br_write_unlock(vfsmount_lock);
+               br_write_unlock(&vfsmount_lock);
                up_write(&namespace_sem);
        }
        mntput(m);
@@ -1911,11 +1913,11 @@ fail:
 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
 {
        down_write(&namespace_sem);
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
 
        list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
 
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
        up_write(&namespace_sem);
 }
 EXPORT_SYMBOL(mnt_set_expiry);
@@ -1935,7 +1937,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
                return;
 
        down_write(&namespace_sem);
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
 
        /* extract from the expiration list every vfsmount that matches the
         * following criteria:
@@ -1954,7 +1956,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
                touch_mnt_namespace(mnt->mnt_ns);
                umount_tree(mnt, 1, &umounts);
        }
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
        up_write(&namespace_sem);
 
        release_mounts(&umounts);
@@ -2218,9 +2220,9 @@ void mnt_make_shortterm(struct vfsmount *m)
        struct mount *mnt = real_mount(m);
        if (atomic_add_unless(&mnt->mnt_longterm, -1, 1))
                return;
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        atomic_dec(&mnt->mnt_longterm);
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
 #endif
 }
 
@@ -2250,9 +2252,9 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
                return ERR_PTR(-ENOMEM);
        }
        new_ns->root = new;
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        list_add_tail(&new_ns->list, &new->mnt_list);
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
 
        /*
         * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
@@ -2416,9 +2418,9 @@ bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
 int path_is_under(struct path *path1, struct path *path2)
 {
        int res;
-       br_read_lock(vfsmount_lock);
+       br_read_lock(&vfsmount_lock);
        res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
-       br_read_unlock(vfsmount_lock);
+       br_read_unlock(&vfsmount_lock);
        return res;
 }
 EXPORT_SYMBOL(path_is_under);
@@ -2505,7 +2507,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
        /* make sure we can reach put_old from new_root */
        if (!is_path_reachable(real_mount(old.mnt), old.dentry, &new))
                goto out4;
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        detach_mnt(new_mnt, &parent_path);
        detach_mnt(root_mnt, &root_parent);
        /* mount old root on put_old */
@@ -2513,7 +2515,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
        /* mount new_root on / */
        attach_mnt(new_mnt, &root_parent);
        touch_mnt_namespace(current->nsproxy->mnt_ns);
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
        chroot_fs_refs(&root, &new);
        error = 0;
 out4:
@@ -2576,7 +2578,7 @@ void __init mnt_init(void)
        for (u = 0; u < HASH_SIZE; u++)
                INIT_LIST_HEAD(&mount_hashtable[u]);
 
-       br_lock_init(vfsmount_lock);
+       br_lock_init(&vfsmount_lock);
 
        err = sysfs_init();
        if (err)
@@ -2596,9 +2598,9 @@ void put_mnt_ns(struct mnt_namespace *ns)
        if (!atomic_dec_and_test(&ns->count))
                return;
        down_write(&namespace_sem);
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        umount_tree(ns->root, 0, &umount_list);
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
        up_write(&namespace_sem);
        release_mounts(&umount_list);
        kfree(ns);
index 3ff5fcc1528fd21ae18a7a240ec9f2920ec30d32..122e260247f53c663550073fda567a4342b0ba63 100644 (file)
@@ -221,6 +221,10 @@ ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *
 
        already_written = 0;
 
+       errno = file_update_time(file);
+       if (errno)
+               goto outrel;
+
        bouncebuffer = vmalloc(bufsize);
        if (!bouncebuffer) {
                errno = -EIO;   /* -ENOMEM */
@@ -252,8 +256,6 @@ ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *
        }
        vfree(bouncebuffer);
 
-       file_update_time(file);
-
        *ppos = pos;
 
        if (pos > i_size_read(inode)) {
index 87484fb8d1773b07e42c6ec63a7ec50533fd0b60..333df07ae3bd2387e0425fd2523de5e95b05667b 100644 (file)
@@ -292,7 +292,7 @@ static void
 ncp_evict_inode(struct inode *inode)
 {
        truncate_inode_pages(&inode->i_data, 0);
-       end_writeback(inode);
+       clear_inode(inode);
 
        if (S_ISDIR(inode->i_mode)) {
                DDPRINTK("ncp_evict_inode: put directory %ld\n", inode->i_ino);
index 4af803f13516c98deaf7372af2dda0499e329fe6..54cc0cdb3dcbda111e24a3a67e7953e5173dd07e 100644 (file)
@@ -23,17 +23,17 @@ struct ncp_mount_data_kernel {
        unsigned long    flags;         /* NCP_MOUNT_* flags */
        unsigned int     int_flags;     /* internal flags */
 #define NCP_IMOUNT_LOGGEDIN_POSSIBLE   0x0001
-       __kernel_uid32_t mounted_uid;   /* Who may umount() this filesystem? */
+       uid_t            mounted_uid;   /* Who may umount() this filesystem? */
        struct pid      *wdog_pid;      /* Who cares for our watchdog packets? */
        unsigned int     ncp_fd;        /* The socket to the ncp port */
        unsigned int     time_out;      /* How long should I wait after
                                           sending a NCP request? */
        unsigned int     retry_count;   /* And how often should I retry? */
        unsigned char    mounted_vol[NCP_VOLNAME_LEN + 1];
-       __kernel_uid32_t uid;
-       __kernel_gid32_t gid;
-       __kernel_mode_t  file_mode;
-       __kernel_mode_t  dir_mode;
+       uid_t            uid;
+       gid_t            gid;
+       umode_t          file_mode;
+       umode_t          dir_mode;
        int              info_fd;
 };
 
index 2a0e6c599147aac9e66a5969c9c00593aa0dd380..f90f4f5cd421dc7be9db2151ead1d8d458acf096 100644 (file)
@@ -29,9 +29,20 @@ config NFS_FS
 
          If unsure, say N.
 
+config NFS_V2
+       bool "NFS client support for NFS version 2"
+       depends on NFS_FS
+       default y
+       help
+         This option enables support for version 2 of the NFS protocol
+         (RFC 1094) in the kernel's NFS client.
+
+         If unsure, say Y.
+
 config NFS_V3
        bool "NFS client support for NFS version 3"
        depends on NFS_FS
+       default y
        help
          This option enables support for version 3 of the NFS protocol
          (RFC 1813) in the kernel's NFS client.
index b58613d0abb3f62eaa3bb29fd4b8141ebdb596d6..7ddd45d9f1707d24a7661a402d95f58aef108c03 100644 (file)
@@ -4,11 +4,12 @@
 
 obj-$(CONFIG_NFS_FS) += nfs.o
 
-nfs-y                  := client.o dir.o file.o getroot.o inode.o super.o nfs2xdr.o \
-                          direct.o pagelist.o proc.o read.o symlink.o unlink.o \
+nfs-y                  := client.o dir.o file.o getroot.o inode.o super.o \
+                          direct.o pagelist.o read.o symlink.o unlink.o \
                           write.o namespace.o mount_clnt.o \
                           dns_resolve.o cache_lib.o
 nfs-$(CONFIG_ROOT_NFS) += nfsroot.o
+nfs-$(CONFIG_NFS_V2)   += proc.o nfs2xdr.o
 nfs-$(CONFIG_NFS_V3)   += nfs3proc.o nfs3xdr.o
 nfs-$(CONFIG_NFS_V3_ACL)       += nfs3acl.o
 nfs-$(CONFIG_NFS_V4)   += nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o \
index 7f6a23f0244e7340f2861ac6fb0d3a2d20721287..7ae8a608956f60d7343fdfa9e77be7532b2db17d 100644 (file)
@@ -187,7 +187,6 @@ static void bl_end_io_read(struct bio *bio, int err)
        struct parallel_io *par = bio->bi_private;
        const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
        struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
-       struct nfs_read_data *rdata = (struct nfs_read_data *)par->data;
 
        do {
                struct page *page = bvec->bv_page;
@@ -198,9 +197,12 @@ static void bl_end_io_read(struct bio *bio, int err)
                        SetPageUptodate(page);
        } while (bvec >= bio->bi_io_vec);
        if (!uptodate) {
-               if (!rdata->pnfs_error)
-                       rdata->pnfs_error = -EIO;
-               pnfs_set_lo_fail(rdata->lseg);
+               struct nfs_read_data *rdata = par->data;
+               struct nfs_pgio_header *header = rdata->header;
+
+               if (!header->pnfs_error)
+                       header->pnfs_error = -EIO;
+               pnfs_set_lo_fail(header->lseg);
        }
        bio_put(bio);
        put_parallel(par);
@@ -221,7 +223,7 @@ bl_end_par_io_read(void *data, int unused)
 {
        struct nfs_read_data *rdata = data;
 
-       rdata->task.tk_status = rdata->pnfs_error;
+       rdata->task.tk_status = rdata->header->pnfs_error;
        INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup);
        schedule_work(&rdata->task.u.tk_work);
 }
@@ -229,6 +231,7 @@ bl_end_par_io_read(void *data, int unused)
 static enum pnfs_try_status
 bl_read_pagelist(struct nfs_read_data *rdata)
 {
+       struct nfs_pgio_header *header = rdata->header;
        int i, hole;
        struct bio *bio = NULL;
        struct pnfs_block_extent *be = NULL, *cow_read = NULL;
@@ -239,7 +242,7 @@ bl_read_pagelist(struct nfs_read_data *rdata)
        int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT;
 
        dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
-              rdata->npages, f_offset, (unsigned int)rdata->args.count);
+              rdata->pages.npages, f_offset, (unsigned int)rdata->args.count);
 
        par = alloc_parallel(rdata);
        if (!par)
@@ -249,17 +252,17 @@ bl_read_pagelist(struct nfs_read_data *rdata)
 
        isect = (sector_t) (f_offset >> SECTOR_SHIFT);
        /* Code assumes extents are page-aligned */
-       for (i = pg_index; i < rdata->npages; i++) {
+       for (i = pg_index; i < rdata->pages.npages; i++) {
                if (!extent_length) {
                        /* We've used up the previous extent */
                        bl_put_extent(be);
                        bl_put_extent(cow_read);
                        bio = bl_submit_bio(READ, bio);
                        /* Get the next one */
-                       be = bl_find_get_extent(BLK_LSEG2EXT(rdata->lseg),
+                       be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
                                             isect, &cow_read);
                        if (!be) {
-                               rdata->pnfs_error = -EIO;
+                               header->pnfs_error = -EIO;
                                goto out;
                        }
                        extent_length = be->be_length -
@@ -282,11 +285,12 @@ bl_read_pagelist(struct nfs_read_data *rdata)
                        struct pnfs_block_extent *be_read;
 
                        be_read = (hole && cow_read) ? cow_read : be;
-                       bio = bl_add_page_to_bio(bio, rdata->npages - i, READ,
+                       bio = bl_add_page_to_bio(bio, rdata->pages.npages - i,
+                                                READ,
                                                 isect, pages[i], be_read,
                                                 bl_end_io_read, par);
                        if (IS_ERR(bio)) {
-                               rdata->pnfs_error = PTR_ERR(bio);
+                               header->pnfs_error = PTR_ERR(bio);
                                bio = NULL;
                                goto out;
                        }
@@ -294,9 +298,9 @@ bl_read_pagelist(struct nfs_read_data *rdata)
                isect += PAGE_CACHE_SECTORS;
                extent_length -= PAGE_CACHE_SECTORS;
        }
-       if ((isect << SECTOR_SHIFT) >= rdata->inode->i_size) {
+       if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
                rdata->res.eof = 1;
-               rdata->res.count = rdata->inode->i_size - f_offset;
+               rdata->res.count = header->inode->i_size - f_offset;
        } else {
                rdata->res.count = (isect << SECTOR_SHIFT) - f_offset;
        }
@@ -345,7 +349,6 @@ static void bl_end_io_write_zero(struct bio *bio, int err)
        struct parallel_io *par = bio->bi_private;
        const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
        struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
-       struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
 
        do {
                struct page *page = bvec->bv_page;
@@ -358,9 +361,12 @@ static void bl_end_io_write_zero(struct bio *bio, int err)
        } while (bvec >= bio->bi_io_vec);
 
        if (unlikely(!uptodate)) {
-               if (!wdata->pnfs_error)
-                       wdata->pnfs_error = -EIO;
-               pnfs_set_lo_fail(wdata->lseg);
+               struct nfs_write_data *data = par->data;
+               struct nfs_pgio_header *header = data->header;
+
+               if (!header->pnfs_error)
+                       header->pnfs_error = -EIO;
+               pnfs_set_lo_fail(header->lseg);
        }
        bio_put(bio);
        put_parallel(par);
@@ -370,12 +376,13 @@ static void bl_end_io_write(struct bio *bio, int err)
 {
        struct parallel_io *par = bio->bi_private;
        const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-       struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
+       struct nfs_write_data *data = par->data;
+       struct nfs_pgio_header *header = data->header;
 
        if (!uptodate) {
-               if (!wdata->pnfs_error)
-                       wdata->pnfs_error = -EIO;
-               pnfs_set_lo_fail(wdata->lseg);
+               if (!header->pnfs_error)
+                       header->pnfs_error = -EIO;
+               pnfs_set_lo_fail(header->lseg);
        }
        bio_put(bio);
        put_parallel(par);
@@ -391,9 +398,9 @@ static void bl_write_cleanup(struct work_struct *work)
        dprintk("%s enter\n", __func__);
        task = container_of(work, struct rpc_task, u.tk_work);
        wdata = container_of(task, struct nfs_write_data, task);
-       if (likely(!wdata->pnfs_error)) {
+       if (likely(!wdata->header->pnfs_error)) {
                /* Marks for LAYOUTCOMMIT */
-               mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
+               mark_extents_written(BLK_LSEG2EXT(wdata->header->lseg),
                                     wdata->args.offset, wdata->args.count);
        }
        pnfs_ld_write_done(wdata);
@@ -404,12 +411,12 @@ static void bl_end_par_io_write(void *data, int num_se)
 {
        struct nfs_write_data *wdata = data;
 
-       if (unlikely(wdata->pnfs_error)) {
-               bl_free_short_extents(&BLK_LSEG2EXT(wdata->lseg)->bl_inval,
+       if (unlikely(wdata->header->pnfs_error)) {
+               bl_free_short_extents(&BLK_LSEG2EXT(wdata->header->lseg)->bl_inval,
                                        num_se);
        }
 
-       wdata->task.tk_status = wdata->pnfs_error;
+       wdata->task.tk_status = wdata->header->pnfs_error;
        wdata->verf.committed = NFS_FILE_SYNC;
        INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup);
        schedule_work(&wdata->task.u.tk_work);
@@ -540,6 +547,7 @@ check_page:
 static enum pnfs_try_status
 bl_write_pagelist(struct nfs_write_data *wdata, int sync)
 {
+       struct nfs_pgio_header *header = wdata->header;
        int i, ret, npg_zero, pg_index, last = 0;
        struct bio *bio = NULL;
        struct pnfs_block_extent *be = NULL, *cow_read = NULL;
@@ -552,7 +560,7 @@ bl_write_pagelist(struct nfs_write_data *wdata, int sync)
        pgoff_t index;
        u64 temp;
        int npg_per_block =
-           NFS_SERVER(wdata->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
+           NFS_SERVER(header->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
 
        dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
        /* At this point, wdata->pages is a (sequential) list of nfs_pages.
@@ -566,7 +574,7 @@ bl_write_pagelist(struct nfs_write_data *wdata, int sync)
        /* At this point, have to be more careful with error handling */
 
        isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
-       be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg), isect, &cow_read);
+       be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg), isect, &cow_read);
        if (!be || !is_writable(be, isect)) {
                dprintk("%s no matching extents!\n", __func__);
                goto out_mds;
@@ -597,10 +605,10 @@ fill_invalid_ext:
                        dprintk("%s zero %dth page: index %lu isect %llu\n",
                                __func__, npg_zero, index,
                                (unsigned long long)isect);
-                       page = bl_find_get_zeroing_page(wdata->inode, index,
+                       page = bl_find_get_zeroing_page(header->inode, index,
                                                        cow_read);
                        if (unlikely(IS_ERR(page))) {
-                               wdata->pnfs_error = PTR_ERR(page);
+                               header->pnfs_error = PTR_ERR(page);
                                goto out;
                        } else if (page == NULL)
                                goto next_page;
@@ -612,7 +620,7 @@ fill_invalid_ext:
                                        __func__, ret);
                                end_page_writeback(page);
                                page_cache_release(page);
-                               wdata->pnfs_error = ret;
+                               header->pnfs_error = ret;
                                goto out;
                        }
                        if (likely(!bl_push_one_short_extent(be->be_inval)))
@@ -620,11 +628,11 @@ fill_invalid_ext:
                        else {
                                end_page_writeback(page);
                                page_cache_release(page);
-                               wdata->pnfs_error = -ENOMEM;
+                               header->pnfs_error = -ENOMEM;
                                goto out;
                        }
                        /* FIXME: This should be done in bi_end_io */
-                       mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
+                       mark_extents_written(BLK_LSEG2EXT(header->lseg),
                                             page->index << PAGE_CACHE_SHIFT,
                                             PAGE_CACHE_SIZE);
 
@@ -632,7 +640,7 @@ fill_invalid_ext:
                                                 isect, page, be,
                                                 bl_end_io_write_zero, par);
                        if (IS_ERR(bio)) {
-                               wdata->pnfs_error = PTR_ERR(bio);
+                               header->pnfs_error = PTR_ERR(bio);
                                bio = NULL;
                                goto out;
                        }
@@ -647,16 +655,16 @@ next_page:
 
        /* Middle pages */
        pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT;
-       for (i = pg_index; i < wdata->npages; i++) {
+       for (i = pg_index; i < wdata->pages.npages; i++) {
                if (!extent_length) {
                        /* We've used up the previous extent */
                        bl_put_extent(be);
                        bio = bl_submit_bio(WRITE, bio);
                        /* Get the next one */
-                       be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg),
+                       be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
                                             isect, NULL);
                        if (!be || !is_writable(be, isect)) {
-                               wdata->pnfs_error = -EINVAL;
+                               header->pnfs_error = -EINVAL;
                                goto out;
                        }
                        if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
@@ -664,7 +672,7 @@ next_page:
                                                                be->be_inval)))
                                        par->bse_count++;
                                else {
-                                       wdata->pnfs_error = -ENOMEM;
+                                       header->pnfs_error = -ENOMEM;
                                        goto out;
                                }
                        }
@@ -677,15 +685,15 @@ next_page:
                        if (unlikely(ret)) {
                                dprintk("%s bl_mark_sectors_init fail %d\n",
                                        __func__, ret);
-                               wdata->pnfs_error = ret;
+                               header->pnfs_error = ret;
                                goto out;
                        }
                }
-               bio = bl_add_page_to_bio(bio, wdata->npages - i, WRITE,
+               bio = bl_add_page_to_bio(bio, wdata->pages.npages - i, WRITE,
                                         isect, pages[i], be,
                                         bl_end_io_write, par);
                if (IS_ERR(bio)) {
-                       wdata->pnfs_error = PTR_ERR(bio);
+                       header->pnfs_error = PTR_ERR(bio);
                        bio = NULL;
                        goto out;
                }
index a5c88a554d921455256bb4dbeea7eaa498da5499..c96554245ccf7d90703c80d9a3e3f81b48fa65ee 100644 (file)
@@ -123,7 +123,7 @@ nfs4_blk_decode_device(struct nfs_server *server,
        uint8_t *dataptr;
        DECLARE_WAITQUEUE(wq, current);
        int offset, len, i, rc;
-       struct net *net = server->nfs_client->net;
+       struct net *net = server->nfs_client->cl_net;
        struct nfs_net *nn = net_generic(net, nfs_net_id);
        struct bl_dev_msg *reply = &nn->bl_mount_reply;
 
index eb95f5091c1aff93930e17a829a808023edc2e12..970659daa323865a113d25075d461c50c7f7dc7c 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/kthread.h>
 #include <linux/sunrpc/svcauth_gss.h>
 #include <linux/sunrpc/bc_xprt.h>
+#include <linux/nsproxy.h>
 
 #include <net/inet_sock.h>
 
@@ -253,6 +254,7 @@ int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt)
        char svc_name[12];
        int ret = 0;
        int minorversion_setup;
+       struct net *net = current->nsproxy->net_ns;
 
        mutex_lock(&nfs_callback_mutex);
        if (cb_info->users++ || cb_info->task != NULL) {
@@ -265,6 +267,12 @@ int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt)
                goto out_err;
        }
 
+       ret = svc_bind(serv, net);
+       if (ret < 0) {
+               printk(KERN_WARNING "NFS: bind callback service failed\n");
+               goto out_err;
+       }
+
        minorversion_setup =  nfs_minorversion_callback_svc_setup(minorversion,
                                        serv, xprt, &rqstp, &callback_svc);
        if (!minorversion_setup) {
@@ -306,6 +314,8 @@ out_err:
        dprintk("NFS: Couldn't create callback socket or server thread; "
                "err = %d\n", ret);
        cb_info->users--;
+       if (serv)
+               svc_shutdown_net(serv, net);
        goto out;
 }
 
@@ -320,6 +330,7 @@ void nfs_callback_down(int minorversion)
        cb_info->users--;
        if (cb_info->users == 0 && cb_info->task != NULL) {
                kthread_stop(cb_info->task);
+               svc_shutdown_net(cb_info->serv, current->nsproxy->net_ns);
                svc_exit_thread(cb_info->rqst);
                cb_info->serv = NULL;
                cb_info->rqst = NULL;
@@ -332,7 +343,7 @@ void nfs_callback_down(int minorversion)
 int
 check_gss_callback_principal(struct nfs_client *clp, struct svc_rqst *rqstp)
 {
-       char *p = svc_gss_principal(rqstp);
+       char *p = rqstp->rq_cred.cr_principal;
 
        if (rqstp->rq_authop->flavour != RPC_AUTH_GSS)
                return 1;
index 60f7e4ec842cf1d4fd48f21862d1d5ece26efc88..7d108753af81e9783ab1465c8bb9986452a6cf00 100644 (file)
@@ -65,7 +65,7 @@ static DECLARE_WAIT_QUEUE_HEAD(nfs_client_active_wq);
 static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
 {
        int ret = 0;
-       struct nfs_net *nn = net_generic(clp->net, nfs_net_id);
+       struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
 
        if (clp->rpc_ops->version != 4 || minorversion != 0)
                return ret;
@@ -90,7 +90,9 @@ static bool nfs4_disable_idmapping = true;
  * RPC cruft for NFS
  */
 static const struct rpc_version *nfs_version[5] = {
+#ifdef CONFIG_NFS_V2
        [2]                     = &nfs_version2,
+#endif
 #ifdef CONFIG_NFS_V3
        [3]                     = &nfs_version3,
 #endif
@@ -129,6 +131,7 @@ const struct rpc_program nfsacl_program = {
 #endif  /* CONFIG_NFS_V3_ACL */
 
 struct nfs_client_initdata {
+       unsigned long init_flags;
        const char *hostname;
        const struct sockaddr *addr;
        size_t addrlen;
@@ -172,7 +175,7 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
        clp->cl_rpcclient = ERR_PTR(-EINVAL);
 
        clp->cl_proto = cl_init->proto;
-       clp->net = get_net(cl_init->net);
+       clp->cl_net = get_net(cl_init->net);
 
 #ifdef CONFIG_NFS_V4
        err = nfs_get_cb_ident_idr(clp, cl_init->minorversion);
@@ -182,7 +185,6 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
        spin_lock_init(&clp->cl_lock);
        INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
        rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
-       clp->cl_boot_time = CURRENT_TIME;
        clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
        clp->cl_minorversion = cl_init->minorversion;
        clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion];
@@ -207,6 +209,7 @@ static void nfs4_shutdown_session(struct nfs_client *clp)
        if (nfs4_has_session(clp)) {
                nfs4_deviceid_purge_client(clp);
                nfs4_destroy_session(clp->cl_session);
+               nfs4_destroy_clientid(clp);
        }
 
 }
@@ -235,6 +238,9 @@ static void nfs4_shutdown_client(struct nfs_client *clp)
                nfs_idmap_delete(clp);
 
        rpc_destroy_wait_queue(&clp->cl_rpcwaitq);
+       kfree(clp->cl_serverowner);
+       kfree(clp->cl_serverscope);
+       kfree(clp->cl_implid);
 }
 
 /* idr_remove_all is not needed as all id's are removed by nfs_put_client */
@@ -248,7 +254,7 @@ void nfs_cleanup_cb_ident_idr(struct net *net)
 /* nfs_client_lock held */
 static void nfs_cb_idr_remove_locked(struct nfs_client *clp)
 {
-       struct nfs_net *nn = net_generic(clp->net, nfs_net_id);
+       struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
 
        if (clp->cl_cb_ident)
                idr_remove(&nn->cb_ident_idr, clp->cl_cb_ident);
@@ -301,10 +307,8 @@ static void nfs_free_client(struct nfs_client *clp)
        if (clp->cl_machine_cred != NULL)
                put_rpccred(clp->cl_machine_cred);
 
-       put_net(clp->net);
+       put_net(clp->cl_net);
        kfree(clp->cl_hostname);
-       kfree(clp->server_scope);
-       kfree(clp->impl_id);
        kfree(clp);
 
        dprintk("<-- nfs_free_client()\n");
@@ -321,7 +325,7 @@ void nfs_put_client(struct nfs_client *clp)
                return;
 
        dprintk("--> nfs_put_client({%d})\n", atomic_read(&clp->cl_count));
-       nn = net_generic(clp->net, nfs_net_id);
+       nn = net_generic(clp->cl_net, nfs_net_id);
 
        if (atomic_dec_and_lock(&clp->cl_count, &nn->nfs_client_lock)) {
                list_del(&clp->cl_share_link);
@@ -456,6 +460,8 @@ static bool nfs4_cb_match_client(const struct sockaddr *addr,
            clp->cl_cons_state == NFS_CS_SESSION_INITING))
                return false;
 
+       smp_rmb();
+
        /* Match the version and minorversion */
        if (clp->rpc_ops->version != 4 ||
            clp->cl_minorversion != minorversion)
@@ -504,6 +510,47 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
        return NULL;
 }
 
+static bool nfs_client_init_is_complete(const struct nfs_client *clp)
+{
+       return clp->cl_cons_state != NFS_CS_INITING;
+}
+
+int nfs_wait_client_init_complete(const struct nfs_client *clp)
+{
+       return wait_event_killable(nfs_client_active_wq,
+                       nfs_client_init_is_complete(clp));
+}
+
+/*
+ * Found an existing client.  Make sure it's ready before returning.
+ */
+static struct nfs_client *
+nfs_found_client(const struct nfs_client_initdata *cl_init,
+                struct nfs_client *clp)
+{
+       int error;
+
+       error = nfs_wait_client_init_complete(clp);
+       if (error < 0) {
+               nfs_put_client(clp);
+               return ERR_PTR(-ERESTARTSYS);
+       }
+
+       if (clp->cl_cons_state < NFS_CS_READY) {
+               error = clp->cl_cons_state;
+               nfs_put_client(clp);
+               return ERR_PTR(error);
+       }
+
+       smp_rmb();
+
+       BUG_ON(clp->cl_cons_state != NFS_CS_READY);
+
+       dprintk("<-- %s found nfs_client %p for %s\n",
+               __func__, clp, cl_init->hostname ?: "");
+       return clp;
+}
+
 /*
  * Look up a client by IP address and protocol version
  * - creates a new record if one doesn't yet exist
@@ -512,11 +559,9 @@ static struct nfs_client *
 nfs_get_client(const struct nfs_client_initdata *cl_init,
               const struct rpc_timeout *timeparms,
               const char *ip_addr,
-              rpc_authflavor_t authflavour,
-              int noresvport)
+              rpc_authflavor_t authflavour)
 {
        struct nfs_client *clp, *new = NULL;
-       int error;
        struct nfs_net *nn = net_generic(cl_init->net, nfs_net_id);
 
        dprintk("--> nfs_get_client(%s,v%u)\n",
@@ -527,60 +572,29 @@ nfs_get_client(const struct nfs_client_initdata *cl_init,
                spin_lock(&nn->nfs_client_lock);
 
                clp = nfs_match_client(cl_init);
-               if (clp)
-                       goto found_client;
-               if (new)
-                       goto install_client;
+               if (clp) {
+                       spin_unlock(&nn->nfs_client_lock);
+                       if (new)
+                               nfs_free_client(new);
+                       return nfs_found_client(cl_init, clp);
+               }
+               if (new) {
+                       list_add(&new->cl_share_link, &nn->nfs_client_list);
+                       spin_unlock(&nn->nfs_client_lock);
+                       new->cl_flags = cl_init->init_flags;
+                       return cl_init->rpc_ops->init_client(new,
+                                               timeparms, ip_addr,
+                                               authflavour);
+               }
 
                spin_unlock(&nn->nfs_client_lock);
 
                new = nfs_alloc_client(cl_init);
        } while (!IS_ERR(new));
 
-       dprintk("--> nfs_get_client() = %ld [failed]\n", PTR_ERR(new));
+       dprintk("<-- nfs_get_client() Failed to find %s (%ld)\n",
+               cl_init->hostname ?: "", PTR_ERR(new));
        return new;
-
-       /* install a new client and return with it unready */
-install_client:
-       clp = new;
-       list_add(&clp->cl_share_link, &nn->nfs_client_list);
-       spin_unlock(&nn->nfs_client_lock);
-
-       error = cl_init->rpc_ops->init_client(clp, timeparms, ip_addr,
-                                             authflavour, noresvport);
-       if (error < 0) {
-               nfs_put_client(clp);
-               return ERR_PTR(error);
-       }
-       dprintk("--> nfs_get_client() = %p [new]\n", clp);
-       return clp;
-
-       /* found an existing client
-        * - make sure it's ready before returning
-        */
-found_client:
-       spin_unlock(&nn->nfs_client_lock);
-
-       if (new)
-               nfs_free_client(new);
-
-       error = wait_event_killable(nfs_client_active_wq,
-                               clp->cl_cons_state < NFS_CS_INITING);
-       if (error < 0) {
-               nfs_put_client(clp);
-               return ERR_PTR(-ERESTARTSYS);
-       }
-
-       if (clp->cl_cons_state < NFS_CS_READY) {
-               error = clp->cl_cons_state;
-               nfs_put_client(clp);
-               return ERR_PTR(error);
-       }
-
-       BUG_ON(clp->cl_cons_state != NFS_CS_READY);
-
-       dprintk("--> nfs_get_client() = %p [share]\n", clp);
-       return clp;
 }
 
 /*
@@ -588,26 +602,11 @@ found_client:
  */
 void nfs_mark_client_ready(struct nfs_client *clp, int state)
 {
+       smp_wmb();
        clp->cl_cons_state = state;
        wake_up_all(&nfs_client_active_wq);
 }
 
-/*
- * With sessions, the client is not marked ready until after a
- * successful EXCHANGE_ID and CREATE_SESSION.
- *
- * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
- * other versions of NFS can be tried.
- */
-int nfs4_check_client_ready(struct nfs_client *clp)
-{
-       if (!nfs4_has_session(clp))
-               return 0;
-       if (clp->cl_cons_state < NFS_CS_READY)
-               return -EPROTONOSUPPORT;
-       return 0;
-}
-
 /*
  * Initialise the timeout values for a connection
  */
@@ -654,12 +653,11 @@ static void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
  */
 static int nfs_create_rpc_client(struct nfs_client *clp,
                                 const struct rpc_timeout *timeparms,
-                                rpc_authflavor_t flavor,
-                                int discrtry, int noresvport)
+                                rpc_authflavor_t flavor)
 {
        struct rpc_clnt         *clnt = NULL;
        struct rpc_create_args args = {
-               .net            = clp->net,
+               .net            = clp->cl_net,
                .protocol       = clp->cl_proto,
                .address        = (struct sockaddr *)&clp->cl_addr,
                .addrsize       = clp->cl_addrlen,
@@ -670,9 +668,9 @@ static int nfs_create_rpc_client(struct nfs_client *clp,
                .authflavor     = flavor,
        };
 
-       if (discrtry)
+       if (test_bit(NFS_CS_DISCRTRY, &clp->cl_flags))
                args.flags |= RPC_CLNT_CREATE_DISCRTRY;
-       if (noresvport)
+       if (test_bit(NFS_CS_NORESVPORT, &clp->cl_flags))
                args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
 
        if (!IS_ERR(clp->cl_rpcclient))
@@ -713,7 +711,7 @@ static int nfs_start_lockd(struct nfs_server *server)
                .nfs_version    = clp->rpc_ops->version,
                .noresvport     = server->flags & NFS_MOUNT_NORESVPORT ?
                                        1 : 0,
-               .net            = clp->net,
+               .net            = clp->cl_net,
        };
 
        if (nlm_init.nfs_version > 3)
@@ -805,36 +803,43 @@ static int nfs_init_server_rpcclient(struct nfs_server *server,
        return 0;
 }
 
-/*
- * Initialise an NFS2 or NFS3 client
+/**
+ * nfs_init_client - Initialise an NFS2 or NFS3 client
+ *
+ * @clp: nfs_client to initialise
+ * @timeparms: timeout parameters for underlying RPC transport
+ * @ip_addr: IP presentation address (not used)
+ * @authflavor: authentication flavor for underlying RPC transport
+ *
+ * Returns pointer to an NFS client, or an ERR_PTR value.
  */
-int nfs_init_client(struct nfs_client *clp, const struct rpc_timeout *timeparms,
-                   const char *ip_addr, rpc_authflavor_t authflavour,
-                   int noresvport)
+struct nfs_client *nfs_init_client(struct nfs_client *clp,
+                   const struct rpc_timeout *timeparms,
+                   const char *ip_addr, rpc_authflavor_t authflavour)
 {
        int error;
 
        if (clp->cl_cons_state == NFS_CS_READY) {
                /* the client is already initialised */
                dprintk("<-- nfs_init_client() = 0 [already %p]\n", clp);
-               return 0;
+               return clp;
        }
 
        /*
         * Create a client RPC handle for doing FSSTAT with UNIX auth only
         * - RFC 2623, sec 2.3.2
         */
-       error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX,
-                                     0, noresvport);
+       error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX);
        if (error < 0)
                goto error;
        nfs_mark_client_ready(clp, NFS_CS_READY);
-       return 0;
+       return clp;
 
 error:
        nfs_mark_client_ready(clp, error);
+       nfs_put_client(clp);
        dprintk("<-- nfs_init_client() = xerror %d\n", error);
-       return error;
+       return ERR_PTR(error);
 }
 
 /*
@@ -847,7 +852,7 @@ static int nfs_init_server(struct nfs_server *server,
                .hostname = data->nfs_server.hostname,
                .addr = (const struct sockaddr *)&data->nfs_server.address,
                .addrlen = data->nfs_server.addrlen,
-               .rpc_ops = &nfs_v2_clientops,
+               .rpc_ops = NULL,
                .proto = data->nfs_server.protocol,
                .net = data->net,
        };
@@ -857,17 +862,28 @@ static int nfs_init_server(struct nfs_server *server,
 
        dprintk("--> nfs_init_server()\n");
 
+       switch (data->version) {
+#ifdef CONFIG_NFS_V2
+       case 2:
+               cl_init.rpc_ops = &nfs_v2_clientops;
+               break;
+#endif
 #ifdef CONFIG_NFS_V3
-       if (data->version == 3)
+       case 3:
                cl_init.rpc_ops = &nfs_v3_clientops;
+               break;
 #endif
+       default:
+               return -EPROTONOSUPPORT;
+       }
 
        nfs_init_timeout_values(&timeparms, data->nfs_server.protocol,
                        data->timeo, data->retrans);
+       if (data->flags & NFS_MOUNT_NORESVPORT)
+               set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
 
        /* Allocate or find a client reference we can use */
-       clp = nfs_get_client(&cl_init, &timeparms, NULL, RPC_AUTH_UNIX,
-                            data->flags & NFS_MOUNT_NORESVPORT);
+       clp = nfs_get_client(&cl_init, &timeparms, NULL, RPC_AUTH_UNIX);
        if (IS_ERR(clp)) {
                dprintk("<-- nfs_init_server() = error %ld\n", PTR_ERR(clp));
                return PTR_ERR(clp);
@@ -880,7 +896,7 @@ static int nfs_init_server(struct nfs_server *server,
        server->options = data->options;
        server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
                NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP|
-               NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME;
+               NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME|NFS_CAP_CHANGE_ATTR;
 
        if (data->rsize)
                server->rsize = nfs_block_size(data->rsize, NULL);
@@ -1048,7 +1064,7 @@ static void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_serve
 static void nfs_server_insert_lists(struct nfs_server *server)
 {
        struct nfs_client *clp = server->nfs_client;
-       struct nfs_net *nn = net_generic(clp->net, nfs_net_id);
+       struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
 
        spin_lock(&nn->nfs_client_lock);
        list_add_tail_rcu(&server->client_link, &clp->cl_superblocks);
@@ -1065,7 +1081,7 @@ static void nfs_server_remove_lists(struct nfs_server *server)
 
        if (clp == NULL)
                return;
-       nn = net_generic(clp->net, nfs_net_id);
+       nn = net_generic(clp->cl_net, nfs_net_id);
        spin_lock(&nn->nfs_client_lock);
        list_del_rcu(&server->client_link);
        if (list_empty(&clp->cl_superblocks))
@@ -1333,21 +1349,27 @@ static int nfs4_init_client_minor_version(struct nfs_client *clp)
                 * so that the client back channel can find the
                 * nfs_client struct
                 */
-               clp->cl_cons_state = NFS_CS_SESSION_INITING;
+               nfs_mark_client_ready(clp, NFS_CS_SESSION_INITING);
        }
 #endif /* CONFIG_NFS_V4_1 */
 
        return nfs4_init_callback(clp);
 }
 
-/*
- * Initialise an NFS4 client record
+/**
+ * nfs4_init_client - Initialise an NFS4 client record
+ *
+ * @clp: nfs_client to initialise
+ * @timeparms: timeout parameters for underlying RPC transport
+ * @ip_addr: callback IP address in presentation format
+ * @authflavor: authentication flavor for underlying RPC transport
+ *
+ * Returns pointer to an NFS client, or an ERR_PTR value.
  */
-int nfs4_init_client(struct nfs_client *clp,
-                    const struct rpc_timeout *timeparms,
-                    const char *ip_addr,
-                    rpc_authflavor_t authflavour,
-                    int noresvport)
+struct nfs_client *nfs4_init_client(struct nfs_client *clp,
+                                   const struct rpc_timeout *timeparms,
+                                   const char *ip_addr,
+                                   rpc_authflavor_t authflavour)
 {
        char buf[INET6_ADDRSTRLEN + 1];
        int error;
@@ -1355,14 +1377,14 @@ int nfs4_init_client(struct nfs_client *clp,
        if (clp->cl_cons_state == NFS_CS_READY) {
                /* the client is initialised already */
                dprintk("<-- nfs4_init_client() = 0 [already %p]\n", clp);
-               return 0;
+               return clp;
        }
 
        /* Check NFS protocol revision and initialize RPC op vector */
        clp->rpc_ops = &nfs_v4_clientops;
 
-       error = nfs_create_rpc_client(clp, timeparms, authflavour,
-                                     1, noresvport);
+       __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
+       error = nfs_create_rpc_client(clp, timeparms, authflavour);
        if (error < 0)
                goto error;
 
@@ -1395,12 +1417,13 @@ int nfs4_init_client(struct nfs_client *clp,
 
        if (!nfs4_has_session(clp))
                nfs_mark_client_ready(clp, NFS_CS_READY);
-       return 0;
+       return clp;
 
 error:
        nfs_mark_client_ready(clp, error);
+       nfs_put_client(clp);
        dprintk("<-- nfs4_init_client() = xerror %d\n", error);
-       return error;
+       return ERR_PTR(error);
 }
 
 /*
@@ -1429,9 +1452,11 @@ static int nfs4_set_client(struct nfs_server *server,
 
        dprintk("--> nfs4_set_client()\n");
 
+       if (server->flags & NFS_MOUNT_NORESVPORT)
+               set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+
        /* Allocate or find a client reference we can use */
-       clp = nfs_get_client(&cl_init, timeparms, ip_addr, authflavour,
-                            server->flags & NFS_MOUNT_NORESVPORT);
+       clp = nfs_get_client(&cl_init, timeparms, ip_addr, authflavour);
        if (IS_ERR(clp)) {
                error = PTR_ERR(clp);
                goto error;
@@ -1465,8 +1490,8 @@ error:
  * the MDS.
  */
 struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
-               const struct sockaddr *ds_addr,
-               int ds_addrlen, int ds_proto)
+               const struct sockaddr *ds_addr, int ds_addrlen,
+               int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans)
 {
        struct nfs_client_initdata cl_init = {
                .addr = ds_addr,
@@ -1474,14 +1499,9 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
                .rpc_ops = &nfs_v4_clientops,
                .proto = ds_proto,
                .minorversion = mds_clp->cl_minorversion,
-               .net = mds_clp->net,
-       };
-       struct rpc_timeout ds_timeout = {
-               .to_initval = 15 * HZ,
-               .to_maxval = 15 * HZ,
-               .to_retries = 1,
-               .to_exponential = 1,
+               .net = mds_clp->cl_net,
        };
+       struct rpc_timeout ds_timeout;
        struct nfs_client *clp;
 
        /*
@@ -1489,8 +1509,9 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
         * cl_ipaddr so as to use the same EXCHANGE_ID co_ownerid as the MDS
         * (section 13.1 RFC 5661).
         */
+       nfs_init_timeout_values(&ds_timeout, ds_proto, ds_timeo, ds_retrans);
        clp = nfs_get_client(&cl_init, &ds_timeout, mds_clp->cl_ipaddr,
-                            mds_clp->cl_rpcclient->cl_auth->au_flavor, 0);
+                            mds_clp->cl_rpcclient->cl_auth->au_flavor);
 
        dprintk("<-- %s %p\n", __func__, clp);
        return clp;
@@ -1701,7 +1722,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
                                rpc_protocol(parent_server->client),
                                parent_server->client->cl_timeout,
                                parent_client->cl_mvops->minor_version,
-                               parent_client->net);
+                               parent_client->cl_net);
        if (error < 0)
                goto error;
 
@@ -1805,6 +1826,7 @@ void nfs_clients_init(struct net *net)
        idr_init(&nn->cb_ident_idr);
 #endif
        spin_lock_init(&nn->nfs_client_lock);
+       nn->boot_time = CURRENT_TIME;
 }
 
 #ifdef CONFIG_PROC_FS
index 89af1d269274f3a91401f704226dec43f9c02528..bd3a9601d32d9915a70e1888ab5a710671e00aac 100644 (file)
@@ -316,6 +316,10 @@ out:
  * nfs_client_return_marked_delegations - return previously marked delegations
  * @clp: nfs_client to process
  *
+ * Note that this function is designed to be called by the state
+ * manager thread. For this reason, it cannot flush the dirty data,
+ * since that could deadlock in case of a state recovery error.
+ *
  * Returns zero on success, or a negative errno value.
  */
 int nfs_client_return_marked_delegations(struct nfs_client *clp)
@@ -340,11 +344,9 @@ restart:
                                                                server);
                        rcu_read_unlock();
 
-                       if (delegation != NULL) {
-                               filemap_flush(inode->i_mapping);
+                       if (delegation != NULL)
                                err = __nfs_inode_return_delegation(inode,
                                                                delegation, 0);
-                       }
                        iput(inode);
                        if (!err)
                                goto restart;
@@ -380,6 +382,10 @@ void nfs_inode_return_delegation_noreclaim(struct inode *inode)
  * nfs_inode_return_delegation - synchronously return a delegation
  * @inode: inode to process
  *
+ * This routine will always flush any dirty data to disk on the
+ * assumption that if we need to return the delegation, then
+ * we should stop caching.
+ *
  * Returns zero on success, or a negative errno value.
  */
 int nfs_inode_return_delegation(struct inode *inode)
@@ -389,10 +395,10 @@ int nfs_inode_return_delegation(struct inode *inode)
        struct nfs_delegation *delegation;
        int err = 0;
 
+       nfs_wb_all(inode);
        if (rcu_access_pointer(nfsi->delegation) != NULL) {
                delegation = nfs_detach_delegation(nfsi, server);
                if (delegation != NULL) {
-                       nfs_wb_all(inode);
                        err = __nfs_inode_return_delegation(inode, delegation, 1);
                }
        }
@@ -538,6 +544,8 @@ int nfs_async_inode_return_delegation(struct inode *inode,
        struct nfs_client *clp = server->nfs_client;
        struct nfs_delegation *delegation;
 
+       filemap_flush(inode->i_mapping);
+
        rcu_read_lock();
        delegation = rcu_dereference(NFS_I(inode)->delegation);
 
index cd6a7a8dadae9054e5bd5557c0226bfd8accc116..72709c4193fa28ac3afeba63f65e2f46e954eb23 100644 (file)
@@ -66,6 +66,7 @@ static inline int nfs_have_delegation(struct inode *inode, fmode_t flags)
 
 static inline int nfs_inode_return_delegation(struct inode *inode)
 {
+       nfs_wb_all(inode);
        return 0;
 }
 #endif
index eedd24d0ad2efc6a02e7ff7b552e2e707adadfbd..f430057ff3b397c2fe1f523bf5fcea4135276f8c 100644 (file)
@@ -474,6 +474,29 @@ different:
        return 0;
 }
 
+static
+bool nfs_use_readdirplus(struct inode *dir, struct file *filp)
+{
+       if (!nfs_server_capable(dir, NFS_CAP_READDIRPLUS))
+               return false;
+       if (test_and_clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(dir)->flags))
+               return true;
+       if (filp->f_pos == 0)
+               return true;
+       return false;
+}
+
+/*
+ * This function is called by the lookup code to request the use of
+ * readdirplus to accelerate any future lookups in the same
+ * directory.
+ */
+static
+void nfs_advise_use_readdirplus(struct inode *dir)
+{
+       set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(dir)->flags);
+}
+
 static
 void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
 {
@@ -871,7 +894,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
        desc->file = filp;
        desc->dir_cookie = &dir_ctx->dir_cookie;
        desc->decode = NFS_PROTO(inode)->decode_dirent;
-       desc->plus = NFS_USE_READDIRPLUS(inode);
+       desc->plus = nfs_use_readdirplus(inode, filp) ? 1 : 0;
 
        nfs_block_sillyrename(dentry);
        res = nfs_revalidate_mapping(inode, filp->f_mapping);
@@ -1111,7 +1134,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
        if (!inode) {
                if (nfs_neg_need_reval(dir, dentry, nd))
                        goto out_bad;
-               goto out_valid;
+               goto out_valid_noent;
        }
 
        if (is_bad_inode(inode)) {
@@ -1140,7 +1163,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
        if (fhandle == NULL || fattr == NULL)
                goto out_error;
 
-       error = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, &dentry->d_name, fhandle, fattr);
+       error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr);
        if (error)
                goto out_bad;
        if (nfs_compare_fh(NFS_FH(inode), fhandle))
@@ -1153,6 +1176,9 @@ static int nfs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
 out_set_verifier:
        nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
  out_valid:
+       /* Success: notify readdir to use READDIRPLUS */
+       nfs_advise_use_readdirplus(dir);
+ out_valid_noent:
        dput(parent);
        dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is valid\n",
                        __func__, dentry->d_parent->d_name.name,
@@ -1296,7 +1322,7 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru
        parent = dentry->d_parent;
        /* Protect against concurrent sillydeletes */
        nfs_block_sillyrename(parent);
-       error = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, &dentry->d_name, fhandle, fattr);
+       error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr);
        if (error == -ENOENT)
                goto no_entry;
        if (error < 0) {
@@ -1308,6 +1334,9 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru
        if (IS_ERR(res))
                goto out_unblock_sillyrename;
 
+       /* Success: notify readdir to use READDIRPLUS */
+       nfs_advise_use_readdirplus(dir);
+
 no_entry:
        res = d_materialise_unique(dentry, inode);
        if (res != NULL) {
@@ -1325,10 +1354,10 @@ out:
 }
 
 #ifdef CONFIG_NFS_V4
-static int nfs_open_revalidate(struct dentry *, struct nameidata *);
+static int nfs4_lookup_revalidate(struct dentry *, struct nameidata *);
 
 const struct dentry_operations nfs4_dentry_operations = {
-       .d_revalidate   = nfs_open_revalidate,
+       .d_revalidate   = nfs4_lookup_revalidate,
        .d_delete       = nfs_dentry_delete,
        .d_iput         = nfs_dentry_iput,
        .d_automount    = nfs_d_automount,
@@ -1490,13 +1519,11 @@ no_open:
        return nfs_lookup(dir, dentry, nd);
 }
 
-static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
+static int nfs4_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
        struct dentry *parent = NULL;
        struct inode *inode;
        struct inode *dir;
-       struct nfs_open_context *ctx;
-       struct iattr attr;
        int openflags, ret = 0;
 
        if (nd->flags & LOOKUP_RCU)
@@ -1525,57 +1552,13 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
        /* We cannot do exclusive creation on a positive dentry */
        if ((openflags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
                goto no_open_dput;
-       /* We can't create new files here */
-       openflags &= ~(O_CREAT|O_EXCL);
-
-       ctx = create_nfs_open_context(dentry, openflags);
-       ret = PTR_ERR(ctx);
-       if (IS_ERR(ctx))
-               goto out;
 
-       attr.ia_valid = ATTR_OPEN;
-       if (openflags & O_TRUNC) {
-               attr.ia_valid |= ATTR_SIZE;
-               attr.ia_size = 0;
-               nfs_wb_all(inode);
-       }
+       /* Let f_op->open() actually open (and revalidate) the file */
+       ret = 1;
 
-       /*
-        * Note: we're not holding inode->i_mutex and so may be racing with
-        * operations that change the directory. We therefore save the
-        * change attribute *before* we do the RPC call.
-        */
-       inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr);
-       if (IS_ERR(inode)) {
-               ret = PTR_ERR(inode);
-               switch (ret) {
-               case -EPERM:
-               case -EACCES:
-               case -EDQUOT:
-               case -ENOSPC:
-               case -EROFS:
-                       goto out_put_ctx;
-               default:
-                       goto out_drop;
-               }
-       }
-       iput(inode);
-       if (inode != dentry->d_inode)
-               goto out_drop;
-
-       nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
-       ret = nfs_intent_set_file(nd, ctx);
-       if (ret >= 0)
-               ret = 1;
 out:
        dput(parent);
        return ret;
-out_drop:
-       d_drop(dentry);
-       ret = 0;
-out_put_ctx:
-       put_nfs_open_context(ctx);
-       goto out;
 
 no_open_dput:
        dput(parent);
@@ -1643,7 +1626,7 @@ int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
        if (dentry->d_inode)
                goto out;
        if (fhandle->size == 0) {
-               error = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, &dentry->d_name, fhandle, fattr);
+               error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr);
                if (error)
                        goto out_error;
        }
index 481be7f7bdd3b953179a2ab354c9f05fc4773925..ad2775d3e219b65f6efed3454d6422f8822de0e8 100644 (file)
@@ -56,6 +56,7 @@
 
 #include "internal.h"
 #include "iostat.h"
+#include "pnfs.h"
 
 #define NFSDBG_FACILITY                NFSDBG_VFS
 
@@ -81,16 +82,19 @@ struct nfs_direct_req {
        struct completion       completion;     /* wait for i/o completion */
 
        /* commit state */
-       struct list_head        rewrite_list;   /* saved nfs_write_data structs */
-       struct nfs_write_data * commit_data;    /* special write_data for commits */
+       struct nfs_mds_commit_info mds_cinfo;   /* Storage for cinfo */
+       struct pnfs_ds_commit_info ds_cinfo;    /* Storage for cinfo */
+       struct work_struct      work;
        int                     flags;
 #define NFS_ODIRECT_DO_COMMIT          (1)     /* an unstable reply was received */
 #define NFS_ODIRECT_RESCHED_WRITES     (2)     /* write verification failed */
        struct nfs_writeverf    verf;           /* unstable write verifier */
 };
 
+static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
+static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
-static const struct rpc_call_ops nfs_write_direct_ops;
+static void nfs_direct_write_schedule_work(struct work_struct *work);
 
 static inline void get_dreq(struct nfs_direct_req *dreq)
 {
@@ -124,22 +128,6 @@ ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_
        return -EINVAL;
 }
 
-static void nfs_direct_dirty_pages(struct page **pages, unsigned int pgbase, size_t count)
-{
-       unsigned int npages;
-       unsigned int i;
-
-       if (count == 0)
-               return;
-       pages += (pgbase >> PAGE_SHIFT);
-       npages = (count + (pgbase & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       for (i = 0; i < npages; i++) {
-               struct page *page = pages[i];
-               if (!PageCompound(page))
-                       set_page_dirty(page);
-       }
-}
-
 static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
 {
        unsigned int i;
@@ -147,26 +135,30 @@ static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
                page_cache_release(pages[i]);
 }
 
+void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
+                             struct nfs_direct_req *dreq)
+{
+       cinfo->lock = &dreq->lock;
+       cinfo->mds = &dreq->mds_cinfo;
+       cinfo->ds = &dreq->ds_cinfo;
+       cinfo->dreq = dreq;
+       cinfo->completion_ops = &nfs_direct_commit_completion_ops;
+}
+
 static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
 {
        struct nfs_direct_req *dreq;
 
-       dreq = kmem_cache_alloc(nfs_direct_cachep, GFP_KERNEL);
+       dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
        if (!dreq)
                return NULL;
 
        kref_init(&dreq->kref);
        kref_get(&dreq->kref);
        init_completion(&dreq->completion);
-       INIT_LIST_HEAD(&dreq->rewrite_list);
-       dreq->iocb = NULL;
-       dreq->ctx = NULL;
-       dreq->l_ctx = NULL;
+       INIT_LIST_HEAD(&dreq->mds_cinfo.list);
+       INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
        spin_lock_init(&dreq->lock);
-       atomic_set(&dreq->io_count, 0);
-       dreq->count = 0;
-       dreq->error = 0;
-       dreq->flags = 0;
 
        return dreq;
 }
@@ -226,47 +218,80 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq)
        nfs_direct_req_release(dreq);
 }
 
-/*
- * We must hold a reference to all the pages in this direct read request
- * until the RPCs complete.  This could be long *after* we are woken up in
- * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
- */
-static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
+static void nfs_direct_readpage_release(struct nfs_page *req)
 {
-       struct nfs_read_data *data = calldata;
-
-       nfs_readpage_result(task, data);
+       dprintk("NFS: direct read done (%s/%lld %d@%lld)\n",
+               req->wb_context->dentry->d_inode->i_sb->s_id,
+               (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
+               req->wb_bytes,
+               (long long)req_offset(req));
+       nfs_release_request(req);
 }
 
-static void nfs_direct_read_release(void *calldata)
+static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
 {
+       unsigned long bytes = 0;
+       struct nfs_direct_req *dreq = hdr->dreq;
 
-       struct nfs_read_data *data = calldata;
-       struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
-       int status = data->task.tk_status;
+       if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
+               goto out_put;
 
        spin_lock(&dreq->lock);
-       if (unlikely(status < 0)) {
-               dreq->error = status;
-               spin_unlock(&dreq->lock);
-       } else {
-               dreq->count += data->res.count;
-               spin_unlock(&dreq->lock);
-               nfs_direct_dirty_pages(data->pagevec,
-                               data->args.pgbase,
-                               data->res.count);
-       }
-       nfs_direct_release_pages(data->pagevec, data->npages);
+       if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
+               dreq->error = hdr->error;
+       else
+               dreq->count += hdr->good_bytes;
+       spin_unlock(&dreq->lock);
 
+       while (!list_empty(&hdr->pages)) {
+               struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+               struct page *page = req->wb_page;
+
+               if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
+                       if (bytes > hdr->good_bytes)
+                               zero_user(page, 0, PAGE_SIZE);
+                       else if (hdr->good_bytes - bytes < PAGE_SIZE)
+                               zero_user_segment(page,
+                                       hdr->good_bytes & ~PAGE_MASK,
+                                       PAGE_SIZE);
+               }
+               if (!PageCompound(page)) {
+                       if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
+                               if (bytes < hdr->good_bytes)
+                                       set_page_dirty(page);
+                       } else
+                               set_page_dirty(page);
+               }
+               bytes += req->wb_bytes;
+               nfs_list_remove_request(req);
+               nfs_direct_readpage_release(req);
+       }
+out_put:
        if (put_dreq(dreq))
                nfs_direct_complete(dreq);
-       nfs_readdata_free(data);
+       hdr->release(hdr);
+}
+
+static void nfs_read_sync_pgio_error(struct list_head *head)
+{
+       struct nfs_page *req;
+
+       while (!list_empty(head)) {
+               req = nfs_list_entry(head->next);
+               nfs_list_remove_request(req);
+               nfs_release_request(req);
+       }
 }
 
-static const struct rpc_call_ops nfs_read_direct_ops = {
-       .rpc_call_prepare = nfs_read_prepare,
-       .rpc_call_done = nfs_direct_read_result,
-       .rpc_release = nfs_direct_read_release,
+static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
+{
+       get_dreq(hdr->dreq);
+}
+
+static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
+       .error_cleanup = nfs_read_sync_pgio_error,
+       .init_hdr = nfs_direct_pgio_init,
+       .completion = nfs_direct_read_completion,
 };
 
 /*
@@ -276,107 +301,82 @@ static const struct rpc_call_ops nfs_read_direct_ops = {
  * handled automatically by nfs_direct_read_result().  Otherwise, if
  * no requests have been sent, just return an error.
  */
-static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
+static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *desc,
                                                const struct iovec *iov,
                                                loff_t pos)
 {
+       struct nfs_direct_req *dreq = desc->pg_dreq;
        struct nfs_open_context *ctx = dreq->ctx;
        struct inode *inode = ctx->dentry->d_inode;
        unsigned long user_addr = (unsigned long)iov->iov_base;
        size_t count = iov->iov_len;
        size_t rsize = NFS_SERVER(inode)->rsize;
-       struct rpc_task *task;
-       struct rpc_message msg = {
-               .rpc_cred = ctx->cred,
-       };
-       struct rpc_task_setup task_setup_data = {
-               .rpc_client = NFS_CLIENT(inode),
-               .rpc_message = &msg,
-               .callback_ops = &nfs_read_direct_ops,
-               .workqueue = nfsiod_workqueue,
-               .flags = RPC_TASK_ASYNC,
-       };
        unsigned int pgbase;
        int result;
        ssize_t started = 0;
+       struct page **pagevec = NULL;
+       unsigned int npages;
 
        do {
-               struct nfs_read_data *data;
                size_t bytes;
+               int i;
 
                pgbase = user_addr & ~PAGE_MASK;
-               bytes = min(rsize,count);
+               bytes = min(max_t(size_t, rsize, PAGE_SIZE), count);
 
                result = -ENOMEM;
-               data = nfs_readdata_alloc(nfs_page_array_len(pgbase, bytes));
-               if (unlikely(!data))
+               npages = nfs_page_array_len(pgbase, bytes);
+               if (!pagevec)
+                       pagevec = kmalloc(npages * sizeof(struct page *),
+                                         GFP_KERNEL);
+               if (!pagevec)
                        break;
-
                down_read(&current->mm->mmap_sem);
                result = get_user_pages(current, current->mm, user_addr,
-                                       data->npages, 1, 0, data->pagevec, NULL);
+                                       npages, 1, 0, pagevec, NULL);
                up_read(&current->mm->mmap_sem);
-               if (result < 0) {
-                       nfs_readdata_free(data);
+               if (result < 0)
                        break;
-               }
-               if ((unsigned)result < data->npages) {
+               if ((unsigned)result < npages) {
                        bytes = result * PAGE_SIZE;
                        if (bytes <= pgbase) {
-                               nfs_direct_release_pages(data->pagevec, result);
-                               nfs_readdata_free(data);
+                               nfs_direct_release_pages(pagevec, result);
                                break;
                        }
                        bytes -= pgbase;
-                       data->npages = result;
+                       npages = result;
                }
 
-               get_dreq(dreq);
-
-               data->req = (struct nfs_page *) dreq;
-               data->inode = inode;
-               data->cred = msg.rpc_cred;
-               data->args.fh = NFS_FH(inode);
-               data->args.context = ctx;
-               data->args.lock_context = dreq->l_ctx;
-               data->args.offset = pos;
-               data->args.pgbase = pgbase;
-               data->args.pages = data->pagevec;
-               data->args.count = bytes;
-               data->res.fattr = &data->fattr;
-               data->res.eof = 0;
-               data->res.count = bytes;
-               nfs_fattr_init(&data->fattr);
-               msg.rpc_argp = &data->args;
-               msg.rpc_resp = &data->res;
-
-               task_setup_data.task = &data->task;
-               task_setup_data.callback_data = data;
-               NFS_PROTO(inode)->read_setup(data, &msg);
-
-               task = rpc_run_task(&task_setup_data);
-               if (IS_ERR(task))
-                       break;
-               rpc_put_task(task);
-
-               dprintk("NFS: %5u initiated direct read call "
-                       "(req %s/%Ld, %zu bytes @ offset %Lu)\n",
-                               data->task.tk_pid,
-                               inode->i_sb->s_id,
-                               (long long)NFS_FILEID(inode),
-                               bytes,
-                               (unsigned long long)data->args.offset);
-
-               started += bytes;
-               user_addr += bytes;
-               pos += bytes;
-               /* FIXME: Remove this unnecessary math from final patch */
-               pgbase += bytes;
-               pgbase &= ~PAGE_MASK;
-               BUG_ON(pgbase != (user_addr & ~PAGE_MASK));
-
-               count -= bytes;
-       } while (count != 0);
+               for (i = 0; i < npages; i++) {
+                       struct nfs_page *req;
+                       unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
+                       /* XXX do we need to do the eof zeroing found in async_filler? */
+                       req = nfs_create_request(dreq->ctx, dreq->inode,
+                                                pagevec[i],
+                                                pgbase, req_len);
+                       if (IS_ERR(req)) {
+                               result = PTR_ERR(req);
+                               break;
+                       }
+                       req->wb_index = pos >> PAGE_SHIFT;
+                       req->wb_offset = pos & ~PAGE_MASK;
+                       if (!nfs_pageio_add_request(desc, req)) {
+                               result = desc->pg_error;
+                               nfs_release_request(req);
+                               break;
+                       }
+                       pgbase = 0;
+                       bytes -= req_len;
+                       started += req_len;
+                       user_addr += req_len;
+                       pos += req_len;
+                       count -= req_len;
+               }
+               /* The nfs_page now hold references to these pages */
+               nfs_direct_release_pages(pagevec, npages);
+       } while (count != 0 && result >= 0);
+
+       kfree(pagevec);
 
        if (started)
                return started;
@@ -388,15 +388,19 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
                                              unsigned long nr_segs,
                                              loff_t pos)
 {
+       struct nfs_pageio_descriptor desc;
        ssize_t result = -EINVAL;
        size_t requested_bytes = 0;
        unsigned long seg;
 
+       nfs_pageio_init_read(&desc, dreq->inode,
+                            &nfs_direct_read_completion_ops);
        get_dreq(dreq);
+       desc.pg_dreq = dreq;
 
        for (seg = 0; seg < nr_segs; seg++) {
                const struct iovec *vec = &iov[seg];
-               result = nfs_direct_read_schedule_segment(dreq, vec, pos);
+               result = nfs_direct_read_schedule_segment(&desc, vec, pos);
                if (result < 0)
                        break;
                requested_bytes += result;
@@ -405,6 +409,8 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
                pos += vec->iov_len;
        }
 
+       nfs_pageio_complete(&desc);
+
        /*
         * If no bytes were started, return the error, and let the
         * generic layer handle the completion.
@@ -441,104 +447,70 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
        result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos);
        if (!result)
                result = nfs_direct_wait(dreq);
+       NFS_I(inode)->read_io += result;
 out_release:
        nfs_direct_req_release(dreq);
 out:
        return result;
 }
 
-static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
+static void nfs_inode_dio_write_done(struct inode *inode)
 {
-       while (!list_empty(&dreq->rewrite_list)) {
-               struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages);
-               list_del(&data->pages);
-               nfs_direct_release_pages(data->pagevec, data->npages);
-               nfs_writedata_free(data);
-       }
+       nfs_zap_mapping(inode, inode->i_mapping);
+       inode_dio_done(inode);
 }
 
 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
 static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
 {
-       struct inode *inode = dreq->inode;
-       struct list_head *p;
-       struct nfs_write_data *data;
-       struct rpc_task *task;
-       struct rpc_message msg = {
-               .rpc_cred = dreq->ctx->cred,
-       };
-       struct rpc_task_setup task_setup_data = {
-               .rpc_client = NFS_CLIENT(inode),
-               .rpc_message = &msg,
-               .callback_ops = &nfs_write_direct_ops,
-               .workqueue = nfsiod_workqueue,
-               .flags = RPC_TASK_ASYNC,
-       };
+       struct nfs_pageio_descriptor desc;
+       struct nfs_page *req, *tmp;
+       LIST_HEAD(reqs);
+       struct nfs_commit_info cinfo;
+       LIST_HEAD(failed);
+
+       nfs_init_cinfo_from_dreq(&cinfo, dreq);
+       pnfs_recover_commit_reqs(dreq->inode, &reqs, &cinfo);
+       spin_lock(cinfo.lock);
+       nfs_scan_commit_list(&cinfo.mds->list, &reqs, &cinfo, 0);
+       spin_unlock(cinfo.lock);
 
        dreq->count = 0;
        get_dreq(dreq);
 
-       list_for_each(p, &dreq->rewrite_list) {
-               data = list_entry(p, struct nfs_write_data, pages);
-
-               get_dreq(dreq);
-
-               /* Use stable writes */
-               data->args.stable = NFS_FILE_SYNC;
-
-               /*
-                * Reset data->res.
-                */
-               nfs_fattr_init(&data->fattr);
-               data->res.count = data->args.count;
-               memset(&data->verf, 0, sizeof(data->verf));
-
-               /*
-                * Reuse data->task; data->args should not have changed
-                * since the original request was sent.
-                */
-               task_setup_data.task = &data->task;
-               task_setup_data.callback_data = data;
-               msg.rpc_argp = &data->args;
-               msg.rpc_resp = &data->res;
-               NFS_PROTO(inode)->write_setup(data, &msg);
-
-               /*
-                * We're called via an RPC callback, so BKL is already held.
-                */
-               task = rpc_run_task(&task_setup_data);
-               if (!IS_ERR(task))
-                       rpc_put_task(task);
-
-               dprintk("NFS: %5u rescheduled direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
-                               data->task.tk_pid,
-                               inode->i_sb->s_id,
-                               (long long)NFS_FILEID(inode),
-                               data->args.count,
-                               (unsigned long long)data->args.offset);
+       nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE,
+                             &nfs_direct_write_completion_ops);
+       desc.pg_dreq = dreq;
+
+       list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
+               if (!nfs_pageio_add_request(&desc, req)) {
+                       nfs_list_add_request(req, &failed);
+                       spin_lock(cinfo.lock);
+                       dreq->flags = 0;
+                       dreq->error = -EIO;
+                       spin_unlock(cinfo.lock);
+               }
        }
+       nfs_pageio_complete(&desc);
 
-       if (put_dreq(dreq))
-               nfs_direct_write_complete(dreq, inode);
-}
-
-static void nfs_direct_commit_result(struct rpc_task *task, void *calldata)
-{
-       struct nfs_write_data *data = calldata;
+       while (!list_empty(&failed))
+               nfs_unlock_and_release_request(req);
 
-       /* Call the NFS version-specific code */
-       NFS_PROTO(data->inode)->commit_done(task, data);
+       if (put_dreq(dreq))
+               nfs_direct_write_complete(dreq, dreq->inode);
 }
 
-static void nfs_direct_commit_release(void *calldata)
+static void nfs_direct_commit_complete(struct nfs_commit_data *data)
 {
-       struct nfs_write_data *data = calldata;
-       struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
+       struct nfs_direct_req *dreq = data->dreq;
+       struct nfs_commit_info cinfo;
+       struct nfs_page *req;
        int status = data->task.tk_status;
 
+       nfs_init_cinfo_from_dreq(&cinfo, dreq);
        if (status < 0) {
                dprintk("NFS: %5u commit failed with error %d.\n",
-                               data->task.tk_pid, status);
+                       data->task.tk_pid, status);
                dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
        } else if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
                dprintk("NFS: %5u commit verify failed\n", data->task.tk_pid);
@@ -546,62 +518,47 @@ static void nfs_direct_commit_release(void *calldata)
        }
 
        dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
-       nfs_direct_write_complete(dreq, data->inode);
-       nfs_commit_free(data);
+       while (!list_empty(&data->pages)) {
+               req = nfs_list_entry(data->pages.next);
+               nfs_list_remove_request(req);
+               if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
+                       /* Note the rewrite will go through mds */
+                       kref_get(&req->wb_kref);
+                       nfs_mark_request_commit(req, NULL, &cinfo);
+               }
+               nfs_unlock_and_release_request(req);
+       }
+
+       if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
+               nfs_direct_write_complete(dreq, data->inode);
 }
 
-static const struct rpc_call_ops nfs_commit_direct_ops = {
-       .rpc_call_prepare = nfs_write_prepare,
-       .rpc_call_done = nfs_direct_commit_result,
-       .rpc_release = nfs_direct_commit_release,
+static void nfs_direct_error_cleanup(struct nfs_inode *nfsi)
+{
+       /* There is no lock to clear */
+}
+
+static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
+       .completion = nfs_direct_commit_complete,
+       .error_cleanup = nfs_direct_error_cleanup,
 };
 
 static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
 {
-       struct nfs_write_data *data = dreq->commit_data;
-       struct rpc_task *task;
-       struct rpc_message msg = {
-               .rpc_argp = &data->args,
-               .rpc_resp = &data->res,
-               .rpc_cred = dreq->ctx->cred,
-       };
-       struct rpc_task_setup task_setup_data = {
-               .task = &data->task,
-               .rpc_client = NFS_CLIENT(dreq->inode),
-               .rpc_message = &msg,
-               .callback_ops = &nfs_commit_direct_ops,
-               .callback_data = data,
-               .workqueue = nfsiod_workqueue,
-               .flags = RPC_TASK_ASYNC,
-       };
-
-       data->inode = dreq->inode;
-       data->cred = msg.rpc_cred;
-
-       data->args.fh = NFS_FH(data->inode);
-       data->args.offset = 0;
-       data->args.count = 0;
-       data->args.context = dreq->ctx;
-       data->args.lock_context = dreq->l_ctx;
-       data->res.count = 0;
-       data->res.fattr = &data->fattr;
-       data->res.verf = &data->verf;
-       nfs_fattr_init(&data->fattr);
-
-       NFS_PROTO(data->inode)->commit_setup(data, &msg);
-
-       /* Note: task.tk_ops->rpc_release will free dreq->commit_data */
-       dreq->commit_data = NULL;
-
-       dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
-
-       task = rpc_run_task(&task_setup_data);
-       if (!IS_ERR(task))
-               rpc_put_task(task);
+       int res;
+       struct nfs_commit_info cinfo;
+       LIST_HEAD(mds_list);
+
+       nfs_init_cinfo_from_dreq(&cinfo, dreq);
+       nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
+       res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
+       if (res < 0) /* res == -ENOMEM */
+               nfs_direct_write_reschedule(dreq);
 }
 
-static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
+static void nfs_direct_write_schedule_work(struct work_struct *work)
 {
+       struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
        int flags = dreq->flags;
 
        dreq->flags = 0;
@@ -613,89 +570,32 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode
                        nfs_direct_write_reschedule(dreq);
                        break;
                default:
-                       if (dreq->commit_data != NULL)
-                               nfs_commit_free(dreq->commit_data);
-                       nfs_direct_free_writedata(dreq);
-                       nfs_zap_mapping(inode, inode->i_mapping);
+                       nfs_inode_dio_write_done(dreq->inode);
                        nfs_direct_complete(dreq);
        }
 }
 
-static void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
+static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
 {
-       dreq->commit_data = nfs_commitdata_alloc();
-       if (dreq->commit_data != NULL)
-               dreq->commit_data->req = (struct nfs_page *) dreq;
+       schedule_work(&dreq->work); /* Calls nfs_direct_write_schedule_work */
 }
+
 #else
-static inline void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
+static void nfs_direct_write_schedule_work(struct work_struct *work)
 {
-       dreq->commit_data = NULL;
 }
 
 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
 {
-       nfs_direct_free_writedata(dreq);
-       nfs_zap_mapping(inode, inode->i_mapping);
+       nfs_inode_dio_write_done(inode);
        nfs_direct_complete(dreq);
 }
 #endif
 
-static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
-{
-       struct nfs_write_data *data = calldata;
-
-       nfs_writeback_done(task, data);
-}
-
 /*
  * NB: Return the value of the first error return code.  Subsequent
  *     errors after the first one are ignored.
  */
-static void nfs_direct_write_release(void *calldata)
-{
-       struct nfs_write_data *data = calldata;
-       struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
-       int status = data->task.tk_status;
-
-       spin_lock(&dreq->lock);
-
-       if (unlikely(status < 0)) {
-               /* An error has occurred, so we should not commit */
-               dreq->flags = 0;
-               dreq->error = status;
-       }
-       if (unlikely(dreq->error != 0))
-               goto out_unlock;
-
-       dreq->count += data->res.count;
-
-       if (data->res.verf->committed != NFS_FILE_SYNC) {
-               switch (dreq->flags) {
-                       case 0:
-                               memcpy(&dreq->verf, &data->verf, sizeof(dreq->verf));
-                               dreq->flags = NFS_ODIRECT_DO_COMMIT;
-                               break;
-                       case NFS_ODIRECT_DO_COMMIT:
-                               if (memcmp(&dreq->verf, &data->verf, sizeof(dreq->verf))) {
-                                       dprintk("NFS: %5u write verify failed\n", data->task.tk_pid);
-                                       dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
-                               }
-               }
-       }
-out_unlock:
-       spin_unlock(&dreq->lock);
-
-       if (put_dreq(dreq))
-               nfs_direct_write_complete(dreq, data->inode);
-}
-
-static const struct rpc_call_ops nfs_write_direct_ops = {
-       .rpc_call_prepare = nfs_write_prepare,
-       .rpc_call_done = nfs_direct_write_result,
-       .rpc_release = nfs_direct_write_release,
-};
-
 /*
  * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
  * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
@@ -703,132 +603,189 @@ static const struct rpc_call_ops nfs_write_direct_ops = {
  * handled automatically by nfs_direct_write_result().  Otherwise, if
  * no requests have been sent, just return an error.
  */
-static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
+static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *desc,
                                                 const struct iovec *iov,
-                                                loff_t pos, int sync)
+                                                loff_t pos)
 {
+       struct nfs_direct_req *dreq = desc->pg_dreq;
        struct nfs_open_context *ctx = dreq->ctx;
        struct inode *inode = ctx->dentry->d_inode;
        unsigned long user_addr = (unsigned long)iov->iov_base;
        size_t count = iov->iov_len;
-       struct rpc_task *task;
-       struct rpc_message msg = {
-               .rpc_cred = ctx->cred,
-       };
-       struct rpc_task_setup task_setup_data = {
-               .rpc_client = NFS_CLIENT(inode),
-               .rpc_message = &msg,
-               .callback_ops = &nfs_write_direct_ops,
-               .workqueue = nfsiod_workqueue,
-               .flags = RPC_TASK_ASYNC,
-       };
        size_t wsize = NFS_SERVER(inode)->wsize;
        unsigned int pgbase;
        int result;
        ssize_t started = 0;
+       struct page **pagevec = NULL;
+       unsigned int npages;
 
        do {
-               struct nfs_write_data *data;
                size_t bytes;
+               int i;
 
                pgbase = user_addr & ~PAGE_MASK;
-               bytes = min(wsize,count);
+               bytes = min(max_t(size_t, wsize, PAGE_SIZE), count);
 
                result = -ENOMEM;
-               data = nfs_writedata_alloc(nfs_page_array_len(pgbase, bytes));
-               if (unlikely(!data))
+               npages = nfs_page_array_len(pgbase, bytes);
+               if (!pagevec)
+                       pagevec = kmalloc(npages * sizeof(struct page *), GFP_KERNEL);
+               if (!pagevec)
                        break;
 
                down_read(&current->mm->mmap_sem);
                result = get_user_pages(current, current->mm, user_addr,
-                                       data->npages, 0, 0, data->pagevec, NULL);
+                                       npages, 0, 0, pagevec, NULL);
                up_read(&current->mm->mmap_sem);
-               if (result < 0) {
-                       nfs_writedata_free(data);
+               if (result < 0)
                        break;
-               }
-               if ((unsigned)result < data->npages) {
+
+               if ((unsigned)result < npages) {
                        bytes = result * PAGE_SIZE;
                        if (bytes <= pgbase) {
-                               nfs_direct_release_pages(data->pagevec, result);
-                               nfs_writedata_free(data);
+                               nfs_direct_release_pages(pagevec, result);
                                break;
                        }
                        bytes -= pgbase;
-                       data->npages = result;
+                       npages = result;
                }
 
-               get_dreq(dreq);
-
-               list_move_tail(&data->pages, &dreq->rewrite_list);
-
-               data->req = (struct nfs_page *) dreq;
-               data->inode = inode;
-               data->cred = msg.rpc_cred;
-               data->args.fh = NFS_FH(inode);
-               data->args.context = ctx;
-               data->args.lock_context = dreq->l_ctx;
-               data->args.offset = pos;
-               data->args.pgbase = pgbase;
-               data->args.pages = data->pagevec;
-               data->args.count = bytes;
-               data->args.stable = sync;
-               data->res.fattr = &data->fattr;
-               data->res.count = bytes;
-               data->res.verf = &data->verf;
-               nfs_fattr_init(&data->fattr);
-
-               task_setup_data.task = &data->task;
-               task_setup_data.callback_data = data;
-               msg.rpc_argp = &data->args;
-               msg.rpc_resp = &data->res;
-               NFS_PROTO(inode)->write_setup(data, &msg);
-
-               task = rpc_run_task(&task_setup_data);
-               if (IS_ERR(task))
-                       break;
-               rpc_put_task(task);
-
-               dprintk("NFS: %5u initiated direct write call "
-                       "(req %s/%Ld, %zu bytes @ offset %Lu)\n",
-                               data->task.tk_pid,
-                               inode->i_sb->s_id,
-                               (long long)NFS_FILEID(inode),
-                               bytes,
-                               (unsigned long long)data->args.offset);
+               for (i = 0; i < npages; i++) {
+                       struct nfs_page *req;
+                       unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
 
-               started += bytes;
-               user_addr += bytes;
-               pos += bytes;
-
-               /* FIXME: Remove this useless math from the final patch */
-               pgbase += bytes;
-               pgbase &= ~PAGE_MASK;
-               BUG_ON(pgbase != (user_addr & ~PAGE_MASK));
+                       req = nfs_create_request(dreq->ctx, dreq->inode,
+                                                pagevec[i],
+                                                pgbase, req_len);
+                       if (IS_ERR(req)) {
+                               result = PTR_ERR(req);
+                               break;
+                       }
+                       nfs_lock_request(req);
+                       req->wb_index = pos >> PAGE_SHIFT;
+                       req->wb_offset = pos & ~PAGE_MASK;
+                       if (!nfs_pageio_add_request(desc, req)) {
+                               result = desc->pg_error;
+                               nfs_unlock_and_release_request(req);
+                               break;
+                       }
+                       pgbase = 0;
+                       bytes -= req_len;
+                       started += req_len;
+                       user_addr += req_len;
+                       pos += req_len;
+                       count -= req_len;
+               }
+               /* The nfs_page now hold references to these pages */
+               nfs_direct_release_pages(pagevec, npages);
+       } while (count != 0 && result >= 0);
 
-               count -= bytes;
-       } while (count != 0);
+       kfree(pagevec);
 
        if (started)
                return started;
        return result < 0 ? (ssize_t) result : -EFAULT;
 }
 
+static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
+{
+       struct nfs_direct_req *dreq = hdr->dreq;
+       struct nfs_commit_info cinfo;
+       int bit = -1;
+       struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+
+       if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
+               goto out_put;
+
+       nfs_init_cinfo_from_dreq(&cinfo, dreq);
+
+       spin_lock(&dreq->lock);
+
+       if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
+               dreq->flags = 0;
+               dreq->error = hdr->error;
+       }
+       if (dreq->error != 0)
+               bit = NFS_IOHDR_ERROR;
+       else {
+               dreq->count += hdr->good_bytes;
+               if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) {
+                       dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+                       bit = NFS_IOHDR_NEED_RESCHED;
+               } else if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) {
+                       if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
+                               bit = NFS_IOHDR_NEED_RESCHED;
+                       else if (dreq->flags == 0) {
+                               memcpy(&dreq->verf, &req->wb_verf,
+                                      sizeof(dreq->verf));
+                               bit = NFS_IOHDR_NEED_COMMIT;
+                               dreq->flags = NFS_ODIRECT_DO_COMMIT;
+                       } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
+                               if (memcmp(&dreq->verf, &req->wb_verf, sizeof(dreq->verf))) {
+                                       dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+                                       bit = NFS_IOHDR_NEED_RESCHED;
+                               } else
+                                       bit = NFS_IOHDR_NEED_COMMIT;
+                       }
+               }
+       }
+       spin_unlock(&dreq->lock);
+
+       while (!list_empty(&hdr->pages)) {
+               req = nfs_list_entry(hdr->pages.next);
+               nfs_list_remove_request(req);
+               switch (bit) {
+               case NFS_IOHDR_NEED_RESCHED:
+               case NFS_IOHDR_NEED_COMMIT:
+                       kref_get(&req->wb_kref);
+                       nfs_mark_request_commit(req, hdr->lseg, &cinfo);
+               }
+               nfs_unlock_and_release_request(req);
+       }
+
+out_put:
+       if (put_dreq(dreq))
+               nfs_direct_write_complete(dreq, hdr->inode);
+       hdr->release(hdr);
+}
+
+static void nfs_write_sync_pgio_error(struct list_head *head)
+{
+       struct nfs_page *req;
+
+       while (!list_empty(head)) {
+               req = nfs_list_entry(head->next);
+               nfs_list_remove_request(req);
+               nfs_unlock_and_release_request(req);
+       }
+}
+
+static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
+       .error_cleanup = nfs_write_sync_pgio_error,
+       .init_hdr = nfs_direct_pgio_init,
+       .completion = nfs_direct_write_completion,
+};
+
 static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
                                               const struct iovec *iov,
                                               unsigned long nr_segs,
-                                              loff_t pos, int sync)
+                                              loff_t pos)
 {
+       struct nfs_pageio_descriptor desc;
+       struct inode *inode = dreq->inode;
        ssize_t result = 0;
        size_t requested_bytes = 0;
        unsigned long seg;
 
+       nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE,
+                             &nfs_direct_write_completion_ops);
+       desc.pg_dreq = dreq;
        get_dreq(dreq);
+       atomic_inc(&inode->i_dio_count);
 
        for (seg = 0; seg < nr_segs; seg++) {
                const struct iovec *vec = &iov[seg];
-               result = nfs_direct_write_schedule_segment(dreq, vec,
-                                                          pos, sync);
+               result = nfs_direct_write_schedule_segment(&desc, vec, pos);
                if (result < 0)
                        break;
                requested_bytes += result;
@@ -836,12 +793,15 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
                        break;
                pos += vec->iov_len;
        }
+       nfs_pageio_complete(&desc);
+       NFS_I(dreq->inode)->write_io += desc.pg_bytes_written;
 
        /*
         * If no bytes were started, return the error, and let the
         * generic layer handle the completion.
         */
        if (requested_bytes == 0) {
+               inode_dio_done(inode);
                nfs_direct_req_release(dreq);
                return result < 0 ? result : -EIO;
        }
@@ -858,16 +818,10 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
        ssize_t result = -ENOMEM;
        struct inode *inode = iocb->ki_filp->f_mapping->host;
        struct nfs_direct_req *dreq;
-       size_t wsize = NFS_SERVER(inode)->wsize;
-       int sync = NFS_UNSTABLE;
 
        dreq = nfs_direct_req_alloc();
        if (!dreq)
                goto out;
-       nfs_alloc_commit_data(dreq);
-
-       if (dreq->commit_data == NULL || count <= wsize)
-               sync = NFS_FILE_SYNC;
 
        dreq->inode = inode;
        dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
@@ -877,7 +831,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
        if (!is_sync_kiocb(iocb))
                dreq->iocb = iocb;
 
-       result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, sync);
+       result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos);
        if (!result)
                result = nfs_direct_wait(dreq);
 out_release:
@@ -997,10 +951,15 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
        task_io_account_write(count);
 
        retval = nfs_direct_write(iocb, iov, nr_segs, pos, count);
+       if (retval > 0) {
+               struct inode *inode = mapping->host;
 
-       if (retval > 0)
                iocb->ki_pos = pos + retval;
-
+               spin_lock(&inode->i_lock);
+               if (i_size_read(inode) < iocb->ki_pos)
+                       i_size_write(inode, iocb->ki_pos);
+               spin_unlock(&inode->i_lock);
+       }
 out:
        return retval;
 }
index aa9b709fd328d7178cd1c4bdb417fb6f454472af..a6708e6b438dd55f2924e5bb78c809c1575a97a9 100644 (file)
@@ -174,6 +174,13 @@ nfs_file_flush(struct file *file, fl_owner_t id)
        if ((file->f_mode & FMODE_WRITE) == 0)
                return 0;
 
+       /*
+        * If we're holding a write delegation, then just start the i/o
+        * but don't wait for completion (or send a commit).
+        */
+       if (nfs_have_delegation(inode, FMODE_WRITE))
+               return filemap_fdatawrite(file->f_mapping);
+
        /* Flush writes to the server and return any errors */
        return vfs_fsync(file, 0);
 }
@@ -417,6 +424,7 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
 
        if (status < 0)
                return status;
+       NFS_I(mapping->host)->write_io += copied;
        return copied;
 }
 
@@ -871,12 +879,81 @@ const struct file_operations nfs_file_operations = {
 static int
 nfs4_file_open(struct inode *inode, struct file *filp)
 {
+       struct nfs_open_context *ctx;
+       struct dentry *dentry = filp->f_path.dentry;
+       struct dentry *parent = NULL;
+       struct inode *dir;
+       unsigned openflags = filp->f_flags;
+       struct iattr attr;
+       int err;
+
+       BUG_ON(inode != dentry->d_inode);
        /*
-        * NFSv4 opens are handled in d_lookup and d_revalidate. If we get to
-        * this point, then something is very wrong
+        * If no cached dentry exists or if it's negative, NFSv4 handled the
+        * opens in ->lookup() or ->create().
+        *
+        * We only get this far for a cached positive dentry.  We skipped
+        * revalidation, so handle it here by dropping the dentry and returning
+        * -EOPENSTALE.  The VFS will retry the lookup/create/open.
         */
-       dprintk("NFS: %s called! inode=%p filp=%p\n", __func__, inode, filp);
-       return -ENOTDIR;
+
+       dprintk("NFS: open file(%s/%s)\n",
+               dentry->d_parent->d_name.name,
+               dentry->d_name.name);
+
+       if ((openflags & O_ACCMODE) == 3)
+               openflags--;
+
+       /* We can't create new files here */
+       openflags &= ~(O_CREAT|O_EXCL);
+
+       parent = dget_parent(dentry);
+       dir = parent->d_inode;
+
+       ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
+       err = PTR_ERR(ctx);
+       if (IS_ERR(ctx))
+               goto out;
+
+       attr.ia_valid = ATTR_OPEN;
+       if (openflags & O_TRUNC) {
+               attr.ia_valid |= ATTR_SIZE;
+               attr.ia_size = 0;
+               nfs_wb_all(inode);
+       }
+
+       inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr);
+       if (IS_ERR(inode)) {
+               err = PTR_ERR(inode);
+               switch (err) {
+               case -EPERM:
+               case -EACCES:
+               case -EDQUOT:
+               case -ENOSPC:
+               case -EROFS:
+                       goto out_put_ctx;
+               default:
+                       goto out_drop;
+               }
+       }
+       iput(inode);
+       if (inode != dentry->d_inode)
+               goto out_drop;
+
+       nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+       nfs_file_set_open_context(filp, ctx);
+       err = 0;
+
+out_put_ctx:
+       put_nfs_open_context(ctx);
+out:
+       dput(parent);
+       return err;
+
+out_drop:
+       d_drop(dentry);
+       err = -EOPENSTALE;
+       goto out_put_ctx;
 }
 
 const struct file_operations nfs4_file_operations = {
index ae65c16b3670ebb5ed6da1d214f2cde16fd7f73e..c817787fbdb4024738a84d804a8800cd7b6dbedf 100644 (file)
@@ -64,23 +64,12 @@ void nfs_fscache_release_client_cookie(struct nfs_client *clp)
  * either by the 'fsc=xxx' option to mount, or by inheriting it from the parent
  * superblock across an automount point of some nature.
  */
-void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq,
-                                 struct nfs_clone_mount *mntdata)
+void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int ulen)
 {
        struct nfs_fscache_key *key, *xkey;
        struct nfs_server *nfss = NFS_SB(sb);
        struct rb_node **p, *parent;
-       int diff, ulen;
-
-       if (uniq) {
-               ulen = strlen(uniq);
-       } else if (mntdata) {
-               struct nfs_server *mnt_s = NFS_SB(mntdata->sb);
-               if (mnt_s->fscache_key) {
-                       uniq = mnt_s->fscache_key->key.uniquifier;
-                       ulen = mnt_s->fscache_key->key.uniq_len;
-               }
-       }
+       int diff;
 
        if (!uniq) {
                uniq = "";
index b9c572d0679f8ced0aead467778d56229de9a03b..c5b11b53ff33b33d4249a88443b102ae33f00df5 100644 (file)
@@ -73,9 +73,7 @@ extern void nfs_fscache_unregister(void);
 extern void nfs_fscache_get_client_cookie(struct nfs_client *);
 extern void nfs_fscache_release_client_cookie(struct nfs_client *);
 
-extern void nfs_fscache_get_super_cookie(struct super_block *,
-                                        const char *,
-                                        struct nfs_clone_mount *);
+extern void nfs_fscache_get_super_cookie(struct super_block *, const char *, int);
 extern void nfs_fscache_release_super_cookie(struct super_block *);
 
 extern void nfs_fscache_init_inode_cookie(struct inode *);
@@ -172,12 +170,6 @@ static inline void nfs_fscache_unregister(void) {}
 static inline void nfs_fscache_get_client_cookie(struct nfs_client *clp) {}
 static inline void nfs_fscache_release_client_cookie(struct nfs_client *clp) {}
 
-static inline void nfs_fscache_get_super_cookie(
-       struct super_block *sb,
-       const char *uniq,
-       struct nfs_clone_mount *mntdata)
-{
-}
 static inline void nfs_fscache_release_super_cookie(struct super_block *sb) {}
 
 static inline void nfs_fscache_init_inode_cookie(struct inode *inode) {}
index 4ca6f5c8038e02dfddb3a8a33dec71a8cb20295f..8abfb19bd3aa3b739611ce1f97025d643539b09c 100644 (file)
@@ -150,7 +150,7 @@ int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh)
                goto out;
 
        /* Start by getting the root filehandle from the server */
-       ret = server->nfs_client->rpc_ops->getroot(server, mntfh, &fsinfo);
+       ret = nfs4_proc_get_rootfh(server, mntfh, &fsinfo);
        if (ret < 0) {
                dprintk("nfs4_get_rootfh: getroot error = %d\n", -ret);
                goto out;
@@ -178,87 +178,4 @@ out:
        return ret;
 }
 
-/*
- * get an NFS4 root dentry from the root filehandle
- */
-struct dentry *nfs4_get_root(struct super_block *sb, struct nfs_fh *mntfh,
-                            const char *devname)
-{
-       struct nfs_server *server = NFS_SB(sb);
-       struct nfs_fattr *fattr = NULL;
-       struct dentry *ret;
-       struct inode *inode;
-       void *name = kstrdup(devname, GFP_KERNEL);
-       int error;
-
-       dprintk("--> nfs4_get_root()\n");
-
-       if (!name)
-               return ERR_PTR(-ENOMEM);
-
-       /* get the info about the server and filesystem */
-       error = nfs4_server_capabilities(server, mntfh);
-       if (error < 0) {
-               dprintk("nfs_get_root: getcaps error = %d\n",
-                       -error);
-               kfree(name);
-               return ERR_PTR(error);
-       }
-
-       fattr = nfs_alloc_fattr();
-       if (fattr == NULL) {
-               kfree(name);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       /* get the actual root for this mount */
-       error = server->nfs_client->rpc_ops->getattr(server, mntfh, fattr);
-       if (error < 0) {
-               dprintk("nfs_get_root: getattr error = %d\n", -error);
-               ret = ERR_PTR(error);
-               goto out;
-       }
-
-       if (fattr->valid & NFS_ATTR_FATTR_FSID &&
-           !nfs_fsid_equal(&server->fsid, &fattr->fsid))
-               memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
-
-       inode = nfs_fhget(sb, mntfh, fattr);
-       if (IS_ERR(inode)) {
-               dprintk("nfs_get_root: get root inode failed\n");
-               ret = ERR_CAST(inode);
-               goto out;
-       }
-
-       error = nfs_superblock_set_dummy_root(sb, inode);
-       if (error != 0) {
-               ret = ERR_PTR(error);
-               goto out;
-       }
-
-       /* root dentries normally start off anonymous and get spliced in later
-        * if the dentry tree reaches them; however if the dentry already
-        * exists, we'll pick it up at this point and use it as the root
-        */
-       ret = d_obtain_alias(inode);
-       if (IS_ERR(ret)) {
-               dprintk("nfs_get_root: get root dentry failed\n");
-               goto out;
-       }
-
-       security_d_instantiate(ret, inode);
-       spin_lock(&ret->d_lock);
-       if (IS_ROOT(ret) && !(ret->d_flags & DCACHE_NFSFS_RENAMED)) {
-               ret->d_fsdata = name;
-               name = NULL;
-       }
-       spin_unlock(&ret->d_lock);
-out:
-       if (name)
-               kfree(name);
-       nfs_free_fattr(fattr);
-       dprintk("<-- nfs4_get_root()\n");
-       return ret;
-}
-
 #endif /* CONFIG_NFS_V4 */
index ba3019f5934c21a610a96569b1d239b90eca0459..b5b86a05059c8c0cf157495878bad3621a25a8dc 100644 (file)
@@ -415,7 +415,7 @@ static int __nfs_idmap_register(struct dentry *dir,
 static void nfs_idmap_unregister(struct nfs_client *clp,
                                      struct rpc_pipe *pipe)
 {
-       struct net *net = clp->net;
+       struct net *net = clp->cl_net;
        struct super_block *pipefs_sb;
 
        pipefs_sb = rpc_get_sb_net(net);
@@ -429,7 +429,7 @@ static int nfs_idmap_register(struct nfs_client *clp,
                                   struct idmap *idmap,
                                   struct rpc_pipe *pipe)
 {
-       struct net *net = clp->net;
+       struct net *net = clp->cl_net;
        struct super_block *pipefs_sb;
        int err = 0;
 
@@ -530,9 +530,25 @@ static struct nfs_client *nfs_get_client_for_event(struct net *net, int event)
        struct nfs_net *nn = net_generic(net, nfs_net_id);
        struct dentry *cl_dentry;
        struct nfs_client *clp;
+       int err;
 
+restart:
        spin_lock(&nn->nfs_client_lock);
        list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) {
+               /* Wait for initialisation to finish */
+               if (clp->cl_cons_state == NFS_CS_INITING) {
+                       atomic_inc(&clp->cl_count);
+                       spin_unlock(&nn->nfs_client_lock);
+                       err = nfs_wait_client_init_complete(clp);
+                       nfs_put_client(clp);
+                       if (err)
+                               return NULL;
+                       goto restart;
+               }
+               /* Skip nfs_clients that failed to initialise */
+               if (clp->cl_cons_state < 0)
+                       continue;
+               smp_rmb();
                if (clp->rpc_ops != &nfs_v4_clientops)
                        continue;
                cl_dentry = clp->cl_idmap->idmap_pipe->dentry;
@@ -640,20 +656,16 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
        struct idmap_msg *im;
        struct idmap *idmap = (struct idmap *)aux;
        struct key *key = cons->key;
-       int ret;
+       int ret = -ENOMEM;
 
        /* msg and im are freed in idmap_pipe_destroy_msg */
        msg = kmalloc(sizeof(*msg), GFP_KERNEL);
-       if (IS_ERR(msg)) {
-               ret = PTR_ERR(msg);
+       if (!msg)
                goto out0;
-       }
 
        im = kmalloc(sizeof(*im), GFP_KERNEL);
-       if (IS_ERR(im)) {
-               ret = PTR_ERR(im);
+       if (!im)
                goto out1;
-       }
 
        ret = nfs_idmap_prepare_message(key->description, im, msg);
        if (ret < 0)
index e8bbfa5b35009ae6ff7b2ac576d23615dec1c1c7..e605d695dbcb7b746d633f37e5ceb509deb7b790 100644 (file)
@@ -121,7 +121,7 @@ static void nfs_clear_inode(struct inode *inode)
 void nfs_evict_inode(struct inode *inode)
 {
        truncate_inode_pages(&inode->i_data, 0);
-       end_writeback(inode);
+       clear_inode(inode);
        nfs_clear_inode(inode);
 }
 
@@ -285,9 +285,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
                inode->i_mode = fattr->mode;
                if ((fattr->valid & NFS_ATTR_FATTR_MODE) == 0
                                && nfs_server_capable(inode, NFS_CAP_MODE))
-                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR
-                               | NFS_INO_INVALID_ACCESS
-                               | NFS_INO_INVALID_ACL;
+                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
                /* Why so? Because we want revalidate for devices/FIFOs, and
                 * that's precisely what we have in nfs_file_inode_operations.
                 */
@@ -300,8 +298,6 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
                        inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->dir_inode_ops;
                        inode->i_fop = &nfs_dir_operations;
                        inode->i_data.a_ops = &nfs_dir_aops;
-                       if (nfs_server_capable(inode, NFS_CAP_READDIRPLUS))
-                               set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
                        /* Deal with crossing mountpoints */
                        if (fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT ||
                                        fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) {
@@ -327,6 +323,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
                inode->i_gid = -2;
                inode->i_blocks = 0;
                memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
+               nfsi->write_io = 0;
+               nfsi->read_io = 0;
 
                nfsi->read_cache_jiffies = fattr->time_start;
                nfsi->attr_gencount = fattr->gencount;
@@ -337,24 +335,19 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
                if (fattr->valid & NFS_ATTR_FATTR_MTIME)
                        inode->i_mtime = fattr->mtime;
                else if (nfs_server_capable(inode, NFS_CAP_MTIME))
-                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR
-                               | NFS_INO_INVALID_DATA;
+                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
                if (fattr->valid & NFS_ATTR_FATTR_CTIME)
                        inode->i_ctime = fattr->ctime;
                else if (nfs_server_capable(inode, NFS_CAP_CTIME))
-                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR
-                               | NFS_INO_INVALID_ACCESS
-                               | NFS_INO_INVALID_ACL;
+                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
                if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
                        inode->i_version = fattr->change_attr;
                else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR))
-                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR
-                               | NFS_INO_INVALID_DATA;
+                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
                if (fattr->valid & NFS_ATTR_FATTR_SIZE)
                        inode->i_size = nfs_size_to_loff_t(fattr->size);
                else
                        nfsi->cache_validity |= NFS_INO_INVALID_ATTR
-                               | NFS_INO_INVALID_DATA
                                | NFS_INO_REVAL_PAGECACHE;
                if (fattr->valid & NFS_ATTR_FATTR_NLINK)
                        set_nlink(inode, fattr->nlink);
@@ -363,15 +356,11 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
                if (fattr->valid & NFS_ATTR_FATTR_OWNER)
                        inode->i_uid = fattr->uid;
                else if (nfs_server_capable(inode, NFS_CAP_OWNER))
-                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR
-                               | NFS_INO_INVALID_ACCESS
-                               | NFS_INO_INVALID_ACL;
+                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
                if (fattr->valid & NFS_ATTR_FATTR_GROUP)
                        inode->i_gid = fattr->gid;
                else if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP))
-                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR
-                               | NFS_INO_INVALID_ACCESS
-                               | NFS_INO_INVALID_ACL;
+                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
                if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
                        inode->i_blocks = fattr->du.nfs2.blocks;
                if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
@@ -429,8 +418,10 @@ nfs_setattr(struct dentry *dentry, struct iattr *attr)
                return 0;
 
        /* Write all dirty data */
-       if (S_ISREG(inode->i_mode))
+       if (S_ISREG(inode->i_mode)) {
+               nfs_inode_dio_wait(inode);
                nfs_wb_all(inode);
+       }
 
        fattr = nfs_alloc_fattr();
        if (fattr == NULL)
@@ -514,6 +505,7 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
 
        /* Flush out writes to the server in order to update c/mtime.  */
        if (S_ISREG(inode->i_mode)) {
+               nfs_inode_dio_wait(inode);
                err = filemap_write_and_wait(inode->i_mapping);
                if (err)
                        goto out;
@@ -654,6 +646,7 @@ struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f
        nfs_init_lock_context(&ctx->lock_context);
        ctx->lock_context.open_context = ctx;
        INIT_LIST_HEAD(&ctx->list);
+       ctx->mdsthreshold = NULL;
        return ctx;
 }
 
@@ -682,6 +675,7 @@ static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync)
                put_rpccred(ctx->cred);
        dput(ctx->dentry);
        nfs_sb_deactive(sb);
+       kfree(ctx->mdsthreshold);
        kfree(ctx);
 }
 
@@ -870,6 +864,15 @@ static int nfs_invalidate_mapping(struct inode *inode, struct address_space *map
        return 0;
 }
 
+static bool nfs_mapping_need_revalidate_inode(struct inode *inode)
+{
+       if (nfs_have_delegated_attributes(inode))
+               return false;
+       return (NFS_I(inode)->cache_validity & NFS_INO_REVAL_PAGECACHE)
+               || nfs_attribute_timeout(inode)
+               || NFS_STALE(inode);
+}
+
 /**
  * nfs_revalidate_mapping - Revalidate the pagecache
  * @inode - pointer to host inode
@@ -880,9 +883,7 @@ int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
        struct nfs_inode *nfsi = NFS_I(inode);
        int ret = 0;
 
-       if ((nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
-                       || nfs_attribute_cache_expired(inode)
-                       || NFS_STALE(inode)) {
+       if (nfs_mapping_need_revalidate_inode(inode)) {
                ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
                if (ret < 0)
                        goto out;
@@ -948,6 +949,8 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
        unsigned long invalid = 0;
 
 
+       if (nfs_have_delegated_attributes(inode))
+               return 0;
        /* Has the inode gone and changed behind our back? */
        if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid)
                return -EIO;
@@ -960,7 +963,7 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
 
        /* Verify a few of the more important attributes */
        if ((fattr->valid & NFS_ATTR_FATTR_MTIME) && !timespec_equal(&inode->i_mtime, &fattr->mtime))
-               invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
+               invalid |= NFS_INO_INVALID_ATTR;
 
        if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
                cur_size = i_size_read(inode);
@@ -1279,14 +1282,26 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
                        nfs_display_fhandle_hash(NFS_FH(inode)),
                        atomic_read(&inode->i_count), fattr->valid);
 
-       if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid)
-               goto out_fileid;
+       if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid) {
+               printk(KERN_ERR "NFS: server %s error: fileid changed\n"
+                       "fsid %s: expected fileid 0x%Lx, got 0x%Lx\n",
+                       NFS_SERVER(inode)->nfs_client->cl_hostname,
+                       inode->i_sb->s_id, (long long)nfsi->fileid,
+                       (long long)fattr->fileid);
+               goto out_err;
+       }
 
        /*
         * Make sure the inode's type hasn't changed.
         */
-       if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
-               goto out_changed;
+       if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT)) {
+               /*
+               * Big trouble! The inode has become a different object.
+               */
+               printk(KERN_DEBUG "NFS: %s: inode %ld mode changed, %07o to %07o\n",
+                               __func__, inode->i_ino, inode->i_mode, fattr->mode);
+               goto out_err;
+       }
 
        server = NFS_SERVER(inode);
        /* Update the fsid? */
@@ -1314,7 +1329,11 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
                if (inode->i_version != fattr->change_attr) {
                        dprintk("NFS: change_attr change on server for file %s/%ld\n",
                                        inode->i_sb->s_id, inode->i_ino);
-                       invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+                       invalid |= NFS_INO_INVALID_ATTR
+                               | NFS_INO_INVALID_DATA
+                               | NFS_INO_INVALID_ACCESS
+                               | NFS_INO_INVALID_ACL
+                               | NFS_INO_REVAL_PAGECACHE;
                        if (S_ISDIR(inode->i_mode))
                                nfs_force_lookup_revalidate(inode);
                        inode->i_version = fattr->change_attr;
@@ -1323,38 +1342,15 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
                invalid |= save_cache_validity;
 
        if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
-               /* NFSv2/v3: Check if the mtime agrees */
-               if (!timespec_equal(&inode->i_mtime, &fattr->mtime)) {
-                       dprintk("NFS: mtime change on server for file %s/%ld\n",
-                                       inode->i_sb->s_id, inode->i_ino);
-                       invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
-                       if (S_ISDIR(inode->i_mode))
-                               nfs_force_lookup_revalidate(inode);
-                       memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
-               }
+               memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
        } else if (server->caps & NFS_CAP_MTIME)
                invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
-                               | NFS_INO_INVALID_DATA
-                               | NFS_INO_REVAL_PAGECACHE
                                | NFS_INO_REVAL_FORCED);
 
        if (fattr->valid & NFS_ATTR_FATTR_CTIME) {
-               /* If ctime has changed we should definitely clear access+acl caches */
-               if (!timespec_equal(&inode->i_ctime, &fattr->ctime)) {
-                       invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
-                       /* and probably clear data for a directory too as utimes can cause
-                        * havoc with our cache.
-                        */
-                       if (S_ISDIR(inode->i_mode)) {
-                               invalid |= NFS_INO_INVALID_DATA;
-                               nfs_force_lookup_revalidate(inode);
-                       }
-                       memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
-               }
+               memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
        } else if (server->caps & NFS_CAP_CTIME)
                invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
-                               | NFS_INO_INVALID_ACCESS
-                               | NFS_INO_INVALID_ACL
                                | NFS_INO_REVAL_FORCED);
 
        /* Check if our cached file size is stale */
@@ -1466,12 +1462,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
                nfsi->cache_validity |= invalid;
 
        return 0;
- out_changed:
-       /*
-        * Big trouble! The inode has become a different object.
-        */
-       printk(KERN_DEBUG "NFS: %s: inode %ld mode changed, %07o to %07o\n",
-                       __func__, inode->i_ino, inode->i_mode, fattr->mode);
  out_err:
        /*
         * No need to worry about unhashing the dentry, as the
@@ -1480,13 +1470,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
         */
        nfs_invalidate_inode(inode);
        return -ESTALE;
-
- out_fileid:
-       printk(KERN_ERR "NFS: server %s error: fileid changed\n"
-               "fsid %s: expected fileid 0x%Lx, got 0x%Lx\n",
-               NFS_SERVER(inode)->nfs_client->cl_hostname, inode->i_sb->s_id,
-               (long long)nfsi->fileid, (long long)fattr->fileid);
-       goto out_err;
 }
 
 
@@ -1500,7 +1483,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
 void nfs4_evict_inode(struct inode *inode)
 {
        truncate_inode_pages(&inode->i_data, 0);
-       end_writeback(inode);
+       clear_inode(inode);
        pnfs_return_layout(inode);
        pnfs_destroy_layout(NFS_I(inode));
        /* If we are holding a delegation, return it! */
@@ -1547,7 +1530,7 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi)
        nfsi->delegation_state = 0;
        init_rwsem(&nfsi->rwsem);
        nfsi->layout = NULL;
-       atomic_set(&nfsi->commits_outstanding, 0);
+       atomic_set(&nfsi->commit_info.rpcs_out, 0);
 #endif
 }
 
@@ -1559,9 +1542,9 @@ static void init_once(void *foo)
        INIT_LIST_HEAD(&nfsi->open_files);
        INIT_LIST_HEAD(&nfsi->access_cache_entry_lru);
        INIT_LIST_HEAD(&nfsi->access_cache_inode_lru);
-       INIT_LIST_HEAD(&nfsi->commit_list);
+       INIT_LIST_HEAD(&nfsi->commit_info.list);
        nfsi->npages = 0;
-       nfsi->ncommit = 0;
+       nfsi->commit_info.ncommit = 0;
        atomic_set(&nfsi->silly_count, 1);
        INIT_HLIST_HEAD(&nfsi->silly_list);
        init_waitqueue_head(&nfsi->waitqueue);
index b777bdaba4c52e72ee86a1d6c1e67ec381a37788..18f99ef7134387128507e8ffd93fff6943b241d6 100644 (file)
@@ -103,6 +103,7 @@ struct nfs_parsed_mount_data {
        unsigned int            version;
        unsigned int            minorversion;
        char                    *fscache_uniq;
+       bool                    need_mount;
 
        struct {
                struct sockaddr_storage address;
@@ -167,11 +168,13 @@ extern struct nfs_server *nfs_clone_server(struct nfs_server *,
                                           struct nfs_fh *,
                                           struct nfs_fattr *,
                                           rpc_authflavor_t);
+extern int nfs_wait_client_init_complete(const struct nfs_client *clp);
 extern void nfs_mark_client_ready(struct nfs_client *clp, int state);
-extern int nfs4_check_client_ready(struct nfs_client *clp);
 extern struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
                                             const struct sockaddr *ds_addr,
-                                            int ds_addrlen, int ds_proto);
+                                            int ds_addrlen, int ds_proto,
+                                            unsigned int ds_timeo,
+                                            unsigned int ds_retrans);
 #ifdef CONFIG_PROC_FS
 extern int __init nfs_fs_proc_init(void);
 extern void nfs_fs_proc_exit(void);
@@ -185,21 +188,11 @@ static inline void nfs_fs_proc_exit(void)
 }
 #endif
 
-/* nfs4namespace.c */
-#ifdef CONFIG_NFS_V4
-extern struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry);
-#else
-static inline
-struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry)
-{
-       return ERR_PTR(-ENOENT);
-}
-#endif
-
 /* callback_xdr.c */
 extern struct svc_version nfs4_callback_version1;
 extern struct svc_version nfs4_callback_version4;
 
+struct nfs_pageio_descriptor;
 /* pagelist.c */
 extern int __init nfs_init_nfspagecache(void);
 extern void nfs_destroy_nfspagecache(void);
@@ -210,9 +203,13 @@ extern void nfs_destroy_writepagecache(void);
 
 extern int __init nfs_init_directcache(void);
 extern void nfs_destroy_directcache(void);
+extern bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount);
+extern void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
+                             struct nfs_pgio_header *hdr,
+                             void (*release)(struct nfs_pgio_header *hdr));
+void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos);
 
 /* nfs2xdr.c */
-extern int nfs_stat_to_errno(enum nfs_stat);
 extern struct rpc_procinfo nfs_procedures[];
 extern int nfs2_decode_dirent(struct xdr_stream *,
                                struct nfs_entry *, int);
@@ -237,14 +234,13 @@ extern const u32 nfs41_maxwrite_overhead;
 extern struct rpc_procinfo nfs4_procedures[];
 #endif
 
-extern int nfs4_init_ds_session(struct nfs_client *clp);
+extern int nfs4_init_ds_session(struct nfs_client *, unsigned long);
 
 /* proc.c */
 void nfs_close_context(struct nfs_open_context *ctx, int is_sync);
-extern int nfs_init_client(struct nfs_client *clp,
+extern struct nfs_client *nfs_init_client(struct nfs_client *clp,
                           const struct rpc_timeout *timeparms,
-                          const char *ip_addr, rpc_authflavor_t authflavour,
-                          int noresvport);
+                          const char *ip_addr, rpc_authflavor_t authflavour);
 
 /* dir.c */
 extern int nfs_access_cache_shrinker(struct shrinker *shrink,
@@ -280,9 +276,10 @@ extern void nfs_sb_deactive(struct super_block *sb);
 extern char *nfs_path(char **p, struct dentry *dentry,
                      char *buffer, ssize_t buflen);
 extern struct vfsmount *nfs_d_automount(struct path *path);
-#ifdef CONFIG_NFS_V4
-rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *);
-#endif
+struct vfsmount *nfs_submount(struct nfs_server *, struct dentry *,
+                             struct nfs_fh *, struct nfs_fattr *);
+struct vfsmount *nfs_do_submount(struct dentry *, struct nfs_fh *,
+                                struct nfs_fattr *, rpc_authflavor_t);
 
 /* getroot.c */
 extern struct dentry *nfs_get_root(struct super_block *, struct nfs_fh *,
@@ -294,46 +291,73 @@ extern struct dentry *nfs4_get_root(struct super_block *, struct nfs_fh *,
 extern int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh);
 #endif
 
-struct nfs_pageio_descriptor;
+struct nfs_pgio_completion_ops;
 /* read.c */
-extern int nfs_initiate_read(struct nfs_read_data *data, struct rpc_clnt *clnt,
-                            const struct rpc_call_ops *call_ops);
+extern struct nfs_read_header *nfs_readhdr_alloc(void);
+extern void nfs_readhdr_free(struct nfs_pgio_header *hdr);
+extern void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
+                       struct inode *inode,
+                       const struct nfs_pgio_completion_ops *compl_ops);
+extern int nfs_initiate_read(struct rpc_clnt *clnt,
+                            struct nfs_read_data *data,
+                            const struct rpc_call_ops *call_ops, int flags);
 extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
 extern int nfs_generic_pagein(struct nfs_pageio_descriptor *desc,
-               struct list_head *head);
-
+                             struct nfs_pgio_header *hdr);
 extern void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
-               struct inode *inode);
+                       struct inode *inode,
+                       const struct nfs_pgio_completion_ops *compl_ops);
 extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
 extern void nfs_readdata_release(struct nfs_read_data *rdata);
 
 /* write.c */
+extern void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
+                       struct inode *inode, int ioflags,
+                       const struct nfs_pgio_completion_ops *compl_ops);
+extern struct nfs_write_header *nfs_writehdr_alloc(void);
+extern void nfs_writehdr_free(struct nfs_pgio_header *hdr);
 extern int nfs_generic_flush(struct nfs_pageio_descriptor *desc,
-               struct list_head *head);
+                            struct nfs_pgio_header *hdr);
 extern void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
-                                 struct inode *inode, int ioflags);
+                       struct inode *inode, int ioflags,
+                       const struct nfs_pgio_completion_ops *compl_ops);
 extern void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio);
 extern void nfs_writedata_release(struct nfs_write_data *wdata);
-extern void nfs_commit_free(struct nfs_write_data *p);
-extern int nfs_initiate_write(struct nfs_write_data *data,
-                             struct rpc_clnt *clnt,
+extern void nfs_commit_free(struct nfs_commit_data *p);
+extern int nfs_initiate_write(struct rpc_clnt *clnt,
+                             struct nfs_write_data *data,
                              const struct rpc_call_ops *call_ops,
-                             int how);
+                             int how, int flags);
 extern void nfs_write_prepare(struct rpc_task *task, void *calldata);
-extern int nfs_initiate_commit(struct nfs_write_data *data,
-                              struct rpc_clnt *clnt,
+extern void nfs_commit_prepare(struct rpc_task *task, void *calldata);
+extern int nfs_initiate_commit(struct rpc_clnt *clnt,
+                              struct nfs_commit_data *data,
                               const struct rpc_call_ops *call_ops,
-                              int how);
-extern void nfs_init_commit(struct nfs_write_data *data,
+                              int how, int flags);
+extern void nfs_init_commit(struct nfs_commit_data *data,
                            struct list_head *head,
-                           struct pnfs_layout_segment *lseg);
+                           struct pnfs_layout_segment *lseg,
+                           struct nfs_commit_info *cinfo);
+int nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
+                        struct nfs_commit_info *cinfo, int max);
+int nfs_scan_commit(struct inode *inode, struct list_head *dst,
+                   struct nfs_commit_info *cinfo);
+void nfs_mark_request_commit(struct nfs_page *req,
+                            struct pnfs_layout_segment *lseg,
+                            struct nfs_commit_info *cinfo);
+int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
+                           int how, struct nfs_commit_info *cinfo);
 void nfs_retry_commit(struct list_head *page_list,
-                     struct pnfs_layout_segment *lseg);
-void nfs_commit_clear_lock(struct nfs_inode *nfsi);
-void nfs_commitdata_release(void *data);
-void nfs_commit_release_pages(struct nfs_write_data *data);
-void nfs_request_add_commit_list(struct nfs_page *req, struct list_head *head);
-void nfs_request_remove_commit_list(struct nfs_page *req);
+                     struct pnfs_layout_segment *lseg,
+                     struct nfs_commit_info *cinfo);
+void nfs_commitdata_release(struct nfs_commit_data *data);
+void nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
+                                struct nfs_commit_info *cinfo);
+void nfs_request_remove_commit_list(struct nfs_page *req,
+                                   struct nfs_commit_info *cinfo);
+void nfs_init_cinfo(struct nfs_commit_info *cinfo,
+                   struct inode *inode,
+                   struct nfs_direct_req *dreq);
 
 #ifdef CONFIG_MIGRATION
 extern int nfs_migrate_page(struct address_space *,
@@ -342,15 +366,20 @@ extern int nfs_migrate_page(struct address_space *,
 #define nfs_migrate_page NULL
 #endif
 
+/* direct.c */
+void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
+                             struct nfs_direct_req *dreq);
+static inline void nfs_inode_dio_wait(struct inode *inode)
+{
+       inode_dio_wait(inode);
+}
+
 /* nfs4proc.c */
 extern void __nfs4_read_done_cb(struct nfs_read_data *);
-extern void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data);
-extern int nfs4_init_client(struct nfs_client *clp,
+extern struct nfs_client *nfs4_init_client(struct nfs_client *clp,
                            const struct rpc_timeout *timeparms,
                            const char *ip_addr,
-                           rpc_authflavor_t authflavour,
-                           int noresvport);
-extern void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data);
+                           rpc_authflavor_t authflavour);
 extern int _nfs4_call_sync(struct rpc_clnt *clnt,
                           struct nfs_server *server,
                           struct rpc_message *msg,
@@ -466,3 +495,15 @@ unsigned int nfs_page_array_len(unsigned int base, size_t len)
                PAGE_SIZE - 1) >> PAGE_SHIFT;
 }
 
+/*
+ * Convert a struct timespec into a 64-bit change attribute
+ *
+ * This does approximately the same thing as timespec_to_ns(),
+ * but for calculation efficiency, we multiply the seconds by
+ * 1024*1024*1024.
+ */
+static inline
+u64 nfs_timespec_to_change_attr(const struct timespec *ts)
+{
+       return ((u64)ts->tv_sec << 30) + ts->tv_nsec;
+}
index d51868e5683c0b34530c9db38a0cd4c26277c0b8..08b9c93675da512e0ef8174a25e1e048f282b527 100644 (file)
@@ -26,11 +26,6 @@ static LIST_HEAD(nfs_automount_list);
 static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts);
 int nfs_mountpoint_expiry_timeout = 500 * HZ;
 
-static struct vfsmount *nfs_do_submount(struct dentry *dentry,
-                                       struct nfs_fh *fh,
-                                       struct nfs_fattr *fattr,
-                                       rpc_authflavor_t authflavor);
-
 /*
  * nfs_path - reconstruct the path given an arbitrary dentry
  * @base - used to return pointer to the end of devname part of path
@@ -118,64 +113,6 @@ Elong:
        return ERR_PTR(-ENAMETOOLONG);
 }
 
-#ifdef CONFIG_NFS_V4
-rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *flavors)
-{
-       struct gss_api_mech *mech;
-       struct xdr_netobj oid;
-       int i;
-       rpc_authflavor_t pseudoflavor = RPC_AUTH_UNIX;
-
-       for (i = 0; i < flavors->num_flavors; i++) {
-               struct nfs4_secinfo_flavor *flavor;
-               flavor = &flavors->flavors[i];
-
-               if (flavor->flavor == RPC_AUTH_NULL || flavor->flavor == RPC_AUTH_UNIX) {
-                       pseudoflavor = flavor->flavor;
-                       break;
-               } else if (flavor->flavor == RPC_AUTH_GSS) {
-                       oid.len  = flavor->gss.sec_oid4.len;
-                       oid.data = flavor->gss.sec_oid4.data;
-                       mech = gss_mech_get_by_OID(&oid);
-                       if (!mech)
-                               continue;
-                       pseudoflavor = gss_svc_to_pseudoflavor(mech, flavor->gss.service);
-                       gss_mech_put(mech);
-                       break;
-               }
-       }
-
-       return pseudoflavor;
-}
-
-static struct rpc_clnt *nfs_lookup_mountpoint(struct inode *dir,
-                                             struct qstr *name,
-                                             struct nfs_fh *fh,
-                                             struct nfs_fattr *fattr)
-{
-       int err;
-
-       if (NFS_PROTO(dir)->version == 4)
-               return nfs4_proc_lookup_mountpoint(dir, name, fh, fattr);
-
-       err = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, name, fh, fattr);
-       if (err)
-               return ERR_PTR(err);
-       return rpc_clone_client(NFS_SERVER(dir)->client);
-}
-#else /* CONFIG_NFS_V4 */
-static inline struct rpc_clnt *nfs_lookup_mountpoint(struct inode *dir,
-                                                    struct qstr *name,
-                                                    struct nfs_fh *fh,
-                                                    struct nfs_fattr *fattr)
-{
-       int err = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, name, fh, fattr);
-       if (err)
-               return ERR_PTR(err);
-       return rpc_clone_client(NFS_SERVER(dir)->client);
-}
-#endif /* CONFIG_NFS_V4 */
-
 /*
  * nfs_d_automount - Handle crossing a mountpoint on the server
  * @path - The mountpoint
@@ -191,10 +128,9 @@ static inline struct rpc_clnt *nfs_lookup_mountpoint(struct inode *dir,
 struct vfsmount *nfs_d_automount(struct path *path)
 {
        struct vfsmount *mnt;
-       struct dentry *parent;
+       struct nfs_server *server = NFS_SERVER(path->dentry->d_inode);
        struct nfs_fh *fh = NULL;
        struct nfs_fattr *fattr = NULL;
-       struct rpc_clnt *client;
 
        dprintk("--> nfs_d_automount()\n");
 
@@ -210,21 +146,7 @@ struct vfsmount *nfs_d_automount(struct path *path)
 
        dprintk("%s: enter\n", __func__);
 
-       /* Look it up again to get its attributes */
-       parent = dget_parent(path->dentry);
-       client = nfs_lookup_mountpoint(parent->d_inode, &path->dentry->d_name, fh, fattr);
-       dput(parent);
-       if (IS_ERR(client)) {
-               mnt = ERR_CAST(client);
-               goto out;
-       }
-
-       if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
-               mnt = nfs_do_refmount(client, path->dentry);
-       else
-               mnt = nfs_do_submount(path->dentry, fh, fattr, client->cl_auth->au_flavor);
-       rpc_shutdown_client(client);
-
+       mnt = server->nfs_client->rpc_ops->submount(server, path->dentry, fh, fattr);
        if (IS_ERR(mnt))
                goto out;
 
@@ -297,10 +219,8 @@ static struct vfsmount *nfs_do_clone_mount(struct nfs_server *server,
  * @authflavor - security flavor to use when performing the mount
  *
  */
-static struct vfsmount *nfs_do_submount(struct dentry *dentry,
-                                       struct nfs_fh *fh,
-                                       struct nfs_fattr *fattr,
-                                       rpc_authflavor_t authflavor)
+struct vfsmount *nfs_do_submount(struct dentry *dentry, struct nfs_fh *fh,
+                                struct nfs_fattr *fattr, rpc_authflavor_t authflavor)
 {
        struct nfs_clone_mount mountdata = {
                .sb = dentry->d_sb,
@@ -333,3 +253,18 @@ out:
        dprintk("<-- nfs_do_submount() = %p\n", mnt);
        return mnt;
 }
+
+struct vfsmount *nfs_submount(struct nfs_server *server, struct dentry *dentry,
+                             struct nfs_fh *fh, struct nfs_fattr *fattr)
+{
+       int err;
+       struct dentry *parent = dget_parent(dentry);
+
+       /* Look it up again to get its attributes */
+       err = server->nfs_client->rpc_ops->lookup(parent->d_inode, &dentry->d_name, fh, fattr);
+       dput(parent);
+       if (err != 0)
+               return ERR_PTR(err);
+
+       return nfs_do_submount(dentry, fh, fattr, server->client->cl_auth->au_flavor);
+}
index aa14ec303e9408a7111bf2cf5ceffeda7703d17e..8a6394edb8b015375eb26016594a09e7b275261f 100644 (file)
@@ -1,3 +1,7 @@
+/*
+ * NFS-private data for each "struct net".  Accessed with net_generic().
+ */
+
 #ifndef __NFS_NETNS_H__
 #define __NFS_NETNS_H__
 
@@ -20,6 +24,7 @@ struct nfs_net {
        struct idr cb_ident_idr; /* Protected by nfs_client_lock */
 #endif
        spinlock_t nfs_client_lock;
+       struct timespec boot_time;
 };
 
 extern int nfs_net_id;
index 1f56000fabbdc1b1283961e340661e20e986e3bf..baf759bccd054d562d24d9adf43af18915da8267 100644 (file)
@@ -61,6 +61,7 @@
 #define NFS_readdirres_sz      (1)
 #define NFS_statfsres_sz       (1+NFS_info_sz)
 
+static int nfs_stat_to_errno(enum nfs_stat);
 
 /*
  * While encoding arguments, set up the reply buffer in advance to
@@ -313,6 +314,8 @@ static int decode_fattr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
        p = xdr_decode_time(p, &fattr->atime);
        p = xdr_decode_time(p, &fattr->mtime);
        xdr_decode_time(p, &fattr->ctime);
+       fattr->change_attr = nfs_timespec_to_change_attr(&fattr->ctime);
+
        return 0;
 out_overflow:
        print_overflow_msg(__func__, xdr);
@@ -1109,7 +1112,7 @@ static const struct {
  * Returns a local errno value, or -EIO if the NFS status code is
  * not recognized.  This function is used jointly by NFSv2 and NFSv3.
  */
-int nfs_stat_to_errno(enum nfs_stat status)
+static int nfs_stat_to_errno(enum nfs_stat status)
 {
        int i;
 
index 75c68299358e226e805e861e4ac8ffc81d96ab4f..2292a0fd2bffd3b042b43e9dc4b607acbd0eb6ad 100644 (file)
@@ -142,7 +142,7 @@ nfs3_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
 }
 
 static int
-nfs3_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
+nfs3_proc_lookup(struct inode *dir, struct qstr *name,
                 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
 {
        struct nfs3_diropargs   arg = {
@@ -810,11 +810,13 @@ nfs3_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
 
 static int nfs3_read_done(struct rpc_task *task, struct nfs_read_data *data)
 {
-       if (nfs3_async_handle_jukebox(task, data->inode))
+       struct inode *inode = data->header->inode;
+
+       if (nfs3_async_handle_jukebox(task, inode))
                return -EAGAIN;
 
-       nfs_invalidate_atime(data->inode);
-       nfs_refresh_inode(data->inode, &data->fattr);
+       nfs_invalidate_atime(inode);
+       nfs_refresh_inode(inode, &data->fattr);
        return 0;
 }
 
@@ -830,10 +832,12 @@ static void nfs3_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_da
 
 static int nfs3_write_done(struct rpc_task *task, struct nfs_write_data *data)
 {
-       if (nfs3_async_handle_jukebox(task, data->inode))
+       struct inode *inode = data->header->inode;
+
+       if (nfs3_async_handle_jukebox(task, inode))
                return -EAGAIN;
        if (task->tk_status >= 0)
-               nfs_post_op_update_inode_force_wcc(data->inode, data->res.fattr);
+               nfs_post_op_update_inode_force_wcc(inode, data->res.fattr);
        return 0;
 }
 
@@ -847,7 +851,12 @@ static void nfs3_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_
        rpc_call_start(task);
 }
 
-static int nfs3_commit_done(struct rpc_task *task, struct nfs_write_data *data)
+static void nfs3_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
+{
+       rpc_call_start(task);
+}
+
+static int nfs3_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
 {
        if (nfs3_async_handle_jukebox(task, data->inode))
                return -EAGAIN;
@@ -855,7 +864,7 @@ static int nfs3_commit_done(struct rpc_task *task, struct nfs_write_data *data)
        return 0;
 }
 
-static void nfs3_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
+static void nfs3_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
 {
        msg->rpc_proc = &nfs3_procedures[NFS3PROC_COMMIT];
 }
@@ -875,6 +884,7 @@ const struct nfs_rpc_ops nfs_v3_clientops = {
        .file_inode_ops = &nfs3_file_inode_operations,
        .file_ops       = &nfs_file_operations,
        .getroot        = nfs3_proc_get_root,
+       .submount       = nfs_submount,
        .getattr        = nfs3_proc_getattr,
        .setattr        = nfs3_proc_setattr,
        .lookup         = nfs3_proc_lookup,
@@ -906,6 +916,7 @@ const struct nfs_rpc_ops nfs_v3_clientops = {
        .write_rpc_prepare = nfs3_proc_write_rpc_prepare,
        .write_done     = nfs3_write_done,
        .commit_setup   = nfs3_proc_commit_setup,
+       .commit_rpc_prepare = nfs3_proc_commit_rpc_prepare,
        .commit_done    = nfs3_commit_done,
        .lock           = nfs3_proc_lock,
        .clear_acl_cache = nfs3_forget_cached_acls,
index a77cc9a3ce5561f1d8b23e78bb16ac49fcbf14b4..902de489ec9bac793dd2e3fa65b663262879b271 100644 (file)
@@ -86,6 +86,8 @@
                                XDR_QUADLEN(NFS_ACL_INLINE_BUFSIZE))
 #define ACL3_setaclres_sz      (1+NFS3_post_op_attr_sz)
 
+static int nfs3_stat_to_errno(enum nfs_stat);
+
 /*
  * Map file type to S_IFMT bits
  */
@@ -675,6 +677,7 @@ static int decode_fattr3(struct xdr_stream *xdr, struct nfs_fattr *fattr)
        p = xdr_decode_nfstime3(p, &fattr->atime);
        p = xdr_decode_nfstime3(p, &fattr->mtime);
        xdr_decode_nfstime3(p, &fattr->ctime);
+       fattr->change_attr = nfs_timespec_to_change_attr(&fattr->ctime);
 
        fattr->valid |= NFS_ATTR_FATTR_V3;
        return 0;
@@ -725,12 +728,14 @@ static int decode_wcc_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
                goto out_overflow;
 
        fattr->valid |= NFS_ATTR_FATTR_PRESIZE
+               | NFS_ATTR_FATTR_PRECHANGE
                | NFS_ATTR_FATTR_PREMTIME
                | NFS_ATTR_FATTR_PRECTIME;
 
        p = xdr_decode_size3(p, &fattr->pre_size);
        p = xdr_decode_nfstime3(p, &fattr->pre_mtime);
        xdr_decode_nfstime3(p, &fattr->pre_ctime);
+       fattr->pre_change_attr = nfs_timespec_to_change_attr(&fattr->pre_ctime);
 
        return 0;
 out_overflow:
@@ -1287,7 +1292,7 @@ static void nfs3_xdr_enc_readdirplus3args(struct rpc_rqst *req,
  *     };
  */
 static void encode_commit3args(struct xdr_stream *xdr,
-                              const struct nfs_writeargs *args)
+                              const struct nfs_commitargs *args)
 {
        __be32 *p;
 
@@ -1300,7 +1305,7 @@ static void encode_commit3args(struct xdr_stream *xdr,
 
 static void nfs3_xdr_enc_commit3args(struct rpc_rqst *req,
                                     struct xdr_stream *xdr,
-                                    const struct nfs_writeargs *args)
+                                    const struct nfs_commitargs *args)
 {
        encode_commit3args(xdr, args);
 }
@@ -1385,7 +1390,7 @@ static int nfs3_xdr_dec_getattr3res(struct rpc_rqst *req,
 out:
        return error;
 out_default:
-       return nfs_stat_to_errno(status);
+       return nfs3_stat_to_errno(status);
 }
 
 /*
@@ -1424,7 +1429,7 @@ static int nfs3_xdr_dec_setattr3res(struct rpc_rqst *req,
 out:
        return error;
 out_status:
-       return nfs_stat_to_errno(status);
+       return nfs3_stat_to_errno(status);
 }
 
 /*
@@ -1472,7 +1477,7 @@ out_default:
        error = decode_post_op_attr(xdr, result->dir_attr);
        if (unlikely(error))
                goto out;
-       return nfs_stat_to_errno(status);
+       return nfs3_stat_to_errno(status);
 }
 
 /*
@@ -1513,7 +1518,7 @@ static int nfs3_xdr_dec_access3res(struct rpc_rqst *req,
 out:
        return error;
 out_default:
-       return nfs_stat_to_errno(status);
+       return nfs3_stat_to_errno(status);
 }
 
 /*
@@ -1554,7 +1559,7 @@ static int nfs3_xdr_dec_readlink3res(struct rpc_rqst *req,
 out:
        return error;
 out_default:
-       return nfs_stat_to_errno(status);
+       return nfs3_stat_to_errno(status);
 }
 
 /*
@@ -1636,7 +1641,7 @@ static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
 out:
        return error;
 out_status:
-       return nfs_stat_to_errno(status);
+       return nfs3_stat_to_errno(status);
 }
 
 /*
@@ -1706,7 +1711,7 @@ static int nfs3_xdr_dec_write3res(struct rpc_rqst *req, struct xdr_stream *xdr,
 out:
        return error;
 out_status:
-       return nfs_stat_to_errno(status);
+       return nfs3_stat_to_errno(status);
 }
 
 /*
@@ -1770,7 +1775,7 @@ out_default:
        error = decode_wcc_data(xdr, result->dir_attr);
        if (unlikely(error))
                goto out;
-       return nfs_stat_to_errno(status);
+       return nfs3_stat_to_errno(status);
 }
 
 /*
@@ -1809,7 +1814,7 @@ static int nfs3_xdr_dec_remove3res(struct rpc_rqst *req,
 out:
        return error;
 out_status:
-       return nfs_stat_to_errno(status);
+       return nfs3_stat_to_errno(status);
 }
 
 /*
@@ -1853,7 +1858,7 @@ static int nfs3_xdr_dec_rename3res(struct rpc_rqst *req,
 out:
        return error;
 out_status:
-       return nfs_stat_to_errno(status);
+       return nfs3_stat_to_errno(status);
 }
 
 /*
@@ -1896,7 +1901,7 @@ static int nfs3_xdr_dec_link3res(struct rpc_rqst *req, struct xdr_stream *xdr,
 out:
        return error;
 out_status:
-       return nfs_stat_to_errno(status);
+       return nfs3_stat_to_errno(status);
 }
 
 /**
@@ -2088,7 +2093,7 @@ out_default:
        error = decode_post_op_attr(xdr, result->dir_attr);
        if (unlikely(error))
                goto out;
-       return nfs_stat_to_errno(status);
+       return nfs3_stat_to_errno(status);
 }
 
 /*
@@ -2156,7 +2161,7 @@ static int nfs3_xdr_dec_fsstat3res(struct rpc_rqst *req,
 out:
        return error;
 out_status:
-       return nfs_stat_to_errno(status);
+       return nfs3_stat_to_errno(status);
 }
 
 /*
@@ -2232,7 +2237,7 @@ static int nfs3_xdr_dec_fsinfo3res(struct rpc_rqst *req,
 out:
        return error;
 out_status:
-       return nfs_stat_to_errno(status);
+       return nfs3_stat_to_errno(status);
 }
 
 /*
@@ -2295,7 +2300,7 @@ static int nfs3_xdr_dec_pathconf3res(struct rpc_rqst *req,
 out:
        return error;
 out_status:
-       return nfs_stat_to_errno(status);
+       return nfs3_stat_to_errno(status);
 }
 
 /*
@@ -2319,7 +2324,7 @@ out_status:
  */
 static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
                                   struct xdr_stream *xdr,
-                                  struct nfs_writeres *result)
+                                  struct nfs_commitres *result)
 {
        enum nfs_stat status;
        int error;
@@ -2336,7 +2341,7 @@ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
 out:
        return error;
 out_status:
-       return nfs_stat_to_errno(status);
+       return nfs3_stat_to_errno(status);
 }
 
 #ifdef CONFIG_NFS_V3_ACL
@@ -2401,7 +2406,7 @@ static int nfs3_xdr_dec_getacl3res(struct rpc_rqst *req,
 out:
        return error;
 out_default:
-       return nfs_stat_to_errno(status);
+       return nfs3_stat_to_errno(status);
 }
 
 static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
@@ -2420,11 +2425,76 @@ static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
 out:
        return error;
 out_default:
-       return nfs_stat_to_errno(status);
+       return nfs3_stat_to_errno(status);
 }
 
 #endif  /* CONFIG_NFS_V3_ACL */
 
+
+/*
+ * We need to translate between nfs status return values and
+ * the local errno values which may not be the same.
+ */
+static const struct {
+       int stat;
+       int errno;
+} nfs_errtbl[] = {
+       { NFS_OK,               0               },
+       { NFSERR_PERM,          -EPERM          },
+       { NFSERR_NOENT,         -ENOENT         },
+       { NFSERR_IO,            -errno_NFSERR_IO},
+       { NFSERR_NXIO,          -ENXIO          },
+/*     { NFSERR_EAGAIN,        -EAGAIN         }, */
+       { NFSERR_ACCES,         -EACCES         },
+       { NFSERR_EXIST,         -EEXIST         },
+       { NFSERR_XDEV,          -EXDEV          },
+       { NFSERR_NODEV,         -ENODEV         },
+       { NFSERR_NOTDIR,        -ENOTDIR        },
+       { NFSERR_ISDIR,         -EISDIR         },
+       { NFSERR_INVAL,         -EINVAL         },
+       { NFSERR_FBIG,          -EFBIG          },
+       { NFSERR_NOSPC,         -ENOSPC         },
+       { NFSERR_ROFS,          -EROFS          },
+       { NFSERR_MLINK,         -EMLINK         },
+       { NFSERR_NAMETOOLONG,   -ENAMETOOLONG   },
+       { NFSERR_NOTEMPTY,      -ENOTEMPTY      },
+       { NFSERR_DQUOT,         -EDQUOT         },
+       { NFSERR_STALE,         -ESTALE         },
+       { NFSERR_REMOTE,        -EREMOTE        },
+#ifdef EWFLUSH
+       { NFSERR_WFLUSH,        -EWFLUSH        },
+#endif
+       { NFSERR_BADHANDLE,     -EBADHANDLE     },
+       { NFSERR_NOT_SYNC,      -ENOTSYNC       },
+       { NFSERR_BAD_COOKIE,    -EBADCOOKIE     },
+       { NFSERR_NOTSUPP,       -ENOTSUPP       },
+       { NFSERR_TOOSMALL,      -ETOOSMALL      },
+       { NFSERR_SERVERFAULT,   -EREMOTEIO      },
+       { NFSERR_BADTYPE,       -EBADTYPE       },
+       { NFSERR_JUKEBOX,       -EJUKEBOX       },
+       { -1,                   -EIO            }
+};
+
+/**
+ * nfs3_stat_to_errno - convert an NFS status code to a local errno
+ * @status: NFS status code to convert
+ *
+ * Returns a local errno value, or -EIO if the NFS status code is
+ * not recognized.  This function is used jointly by NFSv2 and NFSv3.
+ */
+static int nfs3_stat_to_errno(enum nfs_stat status)
+{
+       int i;
+
+       for (i = 0; nfs_errtbl[i].stat != -1; i++) {
+               if (nfs_errtbl[i].stat == (int)status)
+                       return nfs_errtbl[i].errno;
+       }
+       dprintk("NFS: Unrecognized nfs status value: %u\n", status);
+       return nfs_errtbl[i].errno;
+}
+
+
 #define PROC(proc, argtype, restype, timer)                            \
 [NFS3PROC_##proc] = {                                                  \
        .p_proc      = NFS3PROC_##proc,                                 \
index 8d75021020b31f44f0fcb9ec6f1ff05a8b39b313..c6827f93ab57caeab4e613e97919c04a51aa5d64 100644 (file)
@@ -24,6 +24,8 @@ enum nfs4_client_state {
        NFS4CLNT_RECALL_SLOT,
        NFS4CLNT_LEASE_CONFIRM,
        NFS4CLNT_SERVER_SCOPE_MISMATCH,
+       NFS4CLNT_PURGE_STATE,
+       NFS4CLNT_BIND_CONN_TO_SESSION,
 };
 
 enum nfs4_session_state {
@@ -52,11 +54,6 @@ struct nfs4_minor_version_ops {
        const struct nfs4_state_maintenance_ops *state_renewal_ops;
 };
 
-struct nfs_unique_id {
-       struct rb_node rb_node;
-       __u64 id;
-};
-
 #define NFS_SEQID_CONFIRMED 1
 struct nfs_seqid_counter {
        ktime_t create_time;
@@ -206,12 +203,18 @@ extern const struct dentry_operations nfs4_dentry_operations;
 extern const struct inode_operations nfs4_dir_inode_operations;
 
 /* nfs4namespace.c */
+rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *);
 struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *, struct inode *, struct qstr *);
+struct vfsmount *nfs4_submount(struct nfs_server *, struct dentry *,
+                              struct nfs_fh *, struct nfs_fattr *);
 
 /* nfs4proc.c */
 extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *);
 extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *);
+extern int nfs4_proc_get_rootfh(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
+extern int nfs4_proc_bind_conn_to_session(struct nfs_client *, struct rpc_cred *cred);
 extern int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred);
+extern int nfs4_destroy_clientid(struct nfs_client *clp);
 extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
 extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *);
 extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc);
@@ -239,8 +242,8 @@ extern int nfs41_setup_sequence(struct nfs4_session *session,
                struct rpc_task *task);
 extern void nfs4_destroy_session(struct nfs4_session *session);
 extern struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp);
-extern int nfs4_proc_create_session(struct nfs_client *);
-extern int nfs4_proc_destroy_session(struct nfs4_session *);
+extern int nfs4_proc_create_session(struct nfs_client *, struct rpc_cred *);
+extern int nfs4_proc_destroy_session(struct nfs4_session *, struct rpc_cred *);
 extern int nfs4_init_session(struct nfs_server *server);
 extern int nfs4_proc_get_lease_time(struct nfs_client *clp,
                struct nfs_fsinfo *fsinfo);
@@ -310,9 +313,9 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp);
 #if defined(CONFIG_NFS_V4_1)
 struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp);
 struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp);
-extern void nfs4_schedule_session_recovery(struct nfs4_session *);
+extern void nfs4_schedule_session_recovery(struct nfs4_session *, int);
 #else
-static inline void nfs4_schedule_session_recovery(struct nfs4_session *session)
+static inline void nfs4_schedule_session_recovery(struct nfs4_session *session, int err)
 {
 }
 #endif /* CONFIG_NFS_V4_1 */
@@ -334,7 +337,7 @@ extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs
 extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
 extern void nfs41_handle_recall_slot(struct nfs_client *clp);
 extern void nfs41_handle_server_scope(struct nfs_client *,
-                                     struct server_scope **);
+                                     struct nfs41_server_scope **);
 extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
 extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
 extern void nfs4_select_rw_stateid(nfs4_stateid *, struct nfs4_state *,
index 5acfd9ea8a31390eb0f6b3ab6872f8efc334bc2b..e1340293872c7a70e747d051888e5ab603db905e 100644 (file)
@@ -82,29 +82,76 @@ filelayout_get_dserver_offset(struct pnfs_layout_segment *lseg, loff_t offset)
        BUG();
 }
 
+static void filelayout_reset_write(struct nfs_write_data *data)
+{
+       struct nfs_pgio_header *hdr = data->header;
+       struct rpc_task *task = &data->task;
+
+       if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+               dprintk("%s Reset task %5u for i/o through MDS "
+                       "(req %s/%lld, %u bytes @ offset %llu)\n", __func__,
+                       data->task.tk_pid,
+                       hdr->inode->i_sb->s_id,
+                       (long long)NFS_FILEID(hdr->inode),
+                       data->args.count,
+                       (unsigned long long)data->args.offset);
+
+               task->tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
+                                                       &hdr->pages,
+                                                       hdr->completion_ops);
+       }
+}
+
+static void filelayout_reset_read(struct nfs_read_data *data)
+{
+       struct nfs_pgio_header *hdr = data->header;
+       struct rpc_task *task = &data->task;
+
+       if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+               dprintk("%s Reset task %5u for i/o through MDS "
+                       "(req %s/%lld, %u bytes @ offset %llu)\n", __func__,
+                       data->task.tk_pid,
+                       hdr->inode->i_sb->s_id,
+                       (long long)NFS_FILEID(hdr->inode),
+                       data->args.count,
+                       (unsigned long long)data->args.offset);
+
+               task->tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
+                                                       &hdr->pages,
+                                                       hdr->completion_ops);
+       }
+}
+
 static int filelayout_async_handle_error(struct rpc_task *task,
                                         struct nfs4_state *state,
                                         struct nfs_client *clp,
-                                        int *reset)
+                                        struct pnfs_layout_segment *lseg)
 {
-       struct nfs_server *mds_server = NFS_SERVER(state->inode);
+       struct inode *inode = lseg->pls_layout->plh_inode;
+       struct nfs_server *mds_server = NFS_SERVER(inode);
+       struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
        struct nfs_client *mds_client = mds_server->nfs_client;
+       struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
 
        if (task->tk_status >= 0)
                return 0;
-       *reset = 0;
 
        switch (task->tk_status) {
        /* MDS state errors */
        case -NFS4ERR_DELEG_REVOKED:
        case -NFS4ERR_ADMIN_REVOKED:
        case -NFS4ERR_BAD_STATEID:
+               if (state == NULL)
+                       break;
                nfs_remove_bad_delegation(state->inode);
        case -NFS4ERR_OPENMODE:
+               if (state == NULL)
+                       break;
                nfs4_schedule_stateid_recovery(mds_server, state);
                goto wait_on_recovery;
        case -NFS4ERR_EXPIRED:
-               nfs4_schedule_stateid_recovery(mds_server, state);
+               if (state != NULL)
+                       nfs4_schedule_stateid_recovery(mds_server, state);
                nfs4_schedule_lease_recovery(mds_client);
                goto wait_on_recovery;
        /* DS session errors */
@@ -118,7 +165,7 @@ static int filelayout_async_handle_error(struct rpc_task *task,
                dprintk("%s ERROR %d, Reset session. Exchangeid "
                        "flags 0x%x\n", __func__, task->tk_status,
                        clp->cl_exchange_flags);
-               nfs4_schedule_session_recovery(clp->cl_session);
+               nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
                break;
        case -NFS4ERR_DELAY:
        case -NFS4ERR_GRACE:
@@ -127,11 +174,48 @@ static int filelayout_async_handle_error(struct rpc_task *task,
                break;
        case -NFS4ERR_RETRY_UNCACHED_REP:
                break;
+       /* Invalidate Layout errors */
+       case -NFS4ERR_PNFS_NO_LAYOUT:
+       case -ESTALE:           /* mapped NFS4ERR_STALE */
+       case -EBADHANDLE:       /* mapped NFS4ERR_BADHANDLE */
+       case -EISDIR:           /* mapped NFS4ERR_ISDIR */
+       case -NFS4ERR_FHEXPIRED:
+       case -NFS4ERR_WRONG_TYPE:
+               dprintk("%s Invalid layout error %d\n", __func__,
+                       task->tk_status);
+               /*
+                * Destroy layout so new i/o will get a new layout.
+                * Layout will not be destroyed until all current lseg
+                * references are put. Mark layout as invalid to resend failed
+                * i/o and all i/o waiting on the slot table to the MDS until
+                * layout is destroyed and a new valid layout is obtained.
+                */
+               set_bit(NFS_LAYOUT_INVALID,
+                               &NFS_I(inode)->layout->plh_flags);
+               pnfs_destroy_layout(NFS_I(inode));
+               rpc_wake_up(&tbl->slot_tbl_waitq);
+               goto reset;
+       /* RPC connection errors */
+       case -ECONNREFUSED:
+       case -EHOSTDOWN:
+       case -EHOSTUNREACH:
+       case -ENETUNREACH:
+       case -EIO:
+       case -ETIMEDOUT:
+       case -EPIPE:
+               dprintk("%s DS connection error %d\n", __func__,
+                       task->tk_status);
+               if (!filelayout_test_devid_invalid(devid))
+                       _pnfs_return_layout(inode);
+               filelayout_mark_devid_invalid(devid);
+               rpc_wake_up(&tbl->slot_tbl_waitq);
+               nfs4_ds_disconnect(clp);
+               /* fall through */
        default:
-               dprintk("%s DS error. Retry through MDS %d\n", __func__,
+reset:
+               dprintk("%s Retry through MDS. Error %d\n", __func__,
                        task->tk_status);
-               *reset = 1;
-               break;
+               return -NFS4ERR_RESET_TO_MDS;
        }
 out:
        task->tk_status = 0;
@@ -148,18 +232,17 @@ wait_on_recovery:
 static int filelayout_read_done_cb(struct rpc_task *task,
                                struct nfs_read_data *data)
 {
-       int reset = 0;
+       struct nfs_pgio_header *hdr = data->header;
+       int err;
 
-       dprintk("%s DS read\n", __func__);
+       err = filelayout_async_handle_error(task, data->args.context->state,
+                                           data->ds_clp, hdr->lseg);
 
-       if (filelayout_async_handle_error(task, data->args.context->state,
-                                         data->ds_clp, &reset) == -EAGAIN) {
-               dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n",
-                       __func__, data->ds_clp, data->ds_clp->cl_session);
-               if (reset) {
-                       pnfs_set_lo_fail(data->lseg);
-                       nfs4_reset_read(task, data);
-               }
+       switch (err) {
+       case -NFS4ERR_RESET_TO_MDS:
+               filelayout_reset_read(data);
+               return task->tk_status;
+       case -EAGAIN:
                rpc_restart_call_prepare(task);
                return -EAGAIN;
        }
@@ -175,13 +258,15 @@ static int filelayout_read_done_cb(struct rpc_task *task,
 static void
 filelayout_set_layoutcommit(struct nfs_write_data *wdata)
 {
-       if (FILELAYOUT_LSEG(wdata->lseg)->commit_through_mds ||
+       struct nfs_pgio_header *hdr = wdata->header;
+
+       if (FILELAYOUT_LSEG(hdr->lseg)->commit_through_mds ||
            wdata->res.verf->committed == NFS_FILE_SYNC)
                return;
 
        pnfs_set_layoutcommit(wdata);
-       dprintk("%s ionde %lu pls_end_pos %lu\n", __func__, wdata->inode->i_ino,
-               (unsigned long) NFS_I(wdata->inode)->layout->plh_lwb);
+       dprintk("%s ionde %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino,
+               (unsigned long) NFS_I(hdr->inode)->layout->plh_lwb);
 }
 
 /*
@@ -191,8 +276,14 @@ filelayout_set_layoutcommit(struct nfs_write_data *wdata)
  */
 static void filelayout_read_prepare(struct rpc_task *task, void *data)
 {
-       struct nfs_read_data *rdata = (struct nfs_read_data *)data;
+       struct nfs_read_data *rdata = data;
 
+       if (filelayout_reset_to_mds(rdata->header->lseg)) {
+               dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
+               filelayout_reset_read(rdata);
+               rpc_exit(task, 0);
+               return;
+       }
        rdata->read_done_cb = filelayout_read_done_cb;
 
        if (nfs41_setup_sequence(rdata->ds_clp->cl_session,
@@ -205,42 +296,47 @@ static void filelayout_read_prepare(struct rpc_task *task, void *data)
 
 static void filelayout_read_call_done(struct rpc_task *task, void *data)
 {
-       struct nfs_read_data *rdata = (struct nfs_read_data *)data;
+       struct nfs_read_data *rdata = data;
 
        dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
 
+       if (test_bit(NFS_IOHDR_REDO, &rdata->header->flags) &&
+           task->tk_status == 0)
+               return;
+
        /* Note this may cause RPC to be resent */
-       rdata->mds_ops->rpc_call_done(task, data);
+       rdata->header->mds_ops->rpc_call_done(task, data);
 }
 
 static void filelayout_read_count_stats(struct rpc_task *task, void *data)
 {
-       struct nfs_read_data *rdata = (struct nfs_read_data *)data;
+       struct nfs_read_data *rdata = data;
 
-       rpc_count_iostats(task, NFS_SERVER(rdata->inode)->client->cl_metrics);
+       rpc_count_iostats(task, NFS_SERVER(rdata->header->inode)->client->cl_metrics);
 }
 
 static void filelayout_read_release(void *data)
 {
-       struct nfs_read_data *rdata = (struct nfs_read_data *)data;
+       struct nfs_read_data *rdata = data;
 
-       put_lseg(rdata->lseg);
-       rdata->mds_ops->rpc_release(data);
+       nfs_put_client(rdata->ds_clp);
+       rdata->header->mds_ops->rpc_release(data);
 }
 
 static int filelayout_write_done_cb(struct rpc_task *task,
                                struct nfs_write_data *data)
 {
-       int reset = 0;
-
-       if (filelayout_async_handle_error(task, data->args.context->state,
-                                         data->ds_clp, &reset) == -EAGAIN) {
-               dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n",
-                       __func__, data->ds_clp, data->ds_clp->cl_session);
-               if (reset) {
-                       pnfs_set_lo_fail(data->lseg);
-                       nfs4_reset_write(task, data);
-               }
+       struct nfs_pgio_header *hdr = data->header;
+       int err;
+
+       err = filelayout_async_handle_error(task, data->args.context->state,
+                                           data->ds_clp, hdr->lseg);
+
+       switch (err) {
+       case -NFS4ERR_RESET_TO_MDS:
+               filelayout_reset_write(data);
+               return task->tk_status;
+       case -EAGAIN:
                rpc_restart_call_prepare(task);
                return -EAGAIN;
        }
@@ -250,7 +346,7 @@ static int filelayout_write_done_cb(struct rpc_task *task,
 }
 
 /* Fake up some data that will cause nfs_commit_release to retry the writes. */
-static void prepare_to_resend_writes(struct nfs_write_data *data)
+static void prepare_to_resend_writes(struct nfs_commit_data *data)
 {
        struct nfs_page *first = nfs_list_entry(data->pages.next);
 
@@ -261,19 +357,19 @@ static void prepare_to_resend_writes(struct nfs_write_data *data)
 }
 
 static int filelayout_commit_done_cb(struct rpc_task *task,
-                                    struct nfs_write_data *data)
+                                    struct nfs_commit_data *data)
 {
-       int reset = 0;
-
-       if (filelayout_async_handle_error(task, data->args.context->state,
-                                         data->ds_clp, &reset) == -EAGAIN) {
-               dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n",
-                       __func__, data->ds_clp, data->ds_clp->cl_session);
-               if (reset) {
-                       prepare_to_resend_writes(data);
-                       pnfs_set_lo_fail(data->lseg);
-               } else
-                       rpc_restart_call_prepare(task);
+       int err;
+
+       err = filelayout_async_handle_error(task, NULL, data->ds_clp,
+                                           data->lseg);
+
+       switch (err) {
+       case -NFS4ERR_RESET_TO_MDS:
+               prepare_to_resend_writes(data);
+               return -EAGAIN;
+       case -EAGAIN:
+               rpc_restart_call_prepare(task);
                return -EAGAIN;
        }
 
@@ -282,8 +378,14 @@ static int filelayout_commit_done_cb(struct rpc_task *task,
 
 static void filelayout_write_prepare(struct rpc_task *task, void *data)
 {
-       struct nfs_write_data *wdata = (struct nfs_write_data *)data;
+       struct nfs_write_data *wdata = data;
 
+       if (filelayout_reset_to_mds(wdata->header->lseg)) {
+               dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
+               filelayout_reset_write(wdata);
+               rpc_exit(task, 0);
+               return;
+       }
        if (nfs41_setup_sequence(wdata->ds_clp->cl_session,
                                &wdata->args.seq_args, &wdata->res.seq_res,
                                task))
@@ -294,36 +396,66 @@ static void filelayout_write_prepare(struct rpc_task *task, void *data)
 
 static void filelayout_write_call_done(struct rpc_task *task, void *data)
 {
-       struct nfs_write_data *wdata = (struct nfs_write_data *)data;
+       struct nfs_write_data *wdata = data;
+
+       if (test_bit(NFS_IOHDR_REDO, &wdata->header->flags) &&
+           task->tk_status == 0)
+               return;
 
        /* Note this may cause RPC to be resent */
-       wdata->mds_ops->rpc_call_done(task, data);
+       wdata->header->mds_ops->rpc_call_done(task, data);
 }
 
 static void filelayout_write_count_stats(struct rpc_task *task, void *data)
 {
-       struct nfs_write_data *wdata = (struct nfs_write_data *)data;
+       struct nfs_write_data *wdata = data;
 
-       rpc_count_iostats(task, NFS_SERVER(wdata->inode)->client->cl_metrics);
+       rpc_count_iostats(task, NFS_SERVER(wdata->header->inode)->client->cl_metrics);
 }
 
 static void filelayout_write_release(void *data)
 {
-       struct nfs_write_data *wdata = (struct nfs_write_data *)data;
+       struct nfs_write_data *wdata = data;
+
+       nfs_put_client(wdata->ds_clp);
+       wdata->header->mds_ops->rpc_release(data);
+}
+
+static void filelayout_commit_prepare(struct rpc_task *task, void *data)
+{
+       struct nfs_commit_data *wdata = data;
 
-       put_lseg(wdata->lseg);
-       wdata->mds_ops->rpc_release(data);
+       if (nfs41_setup_sequence(wdata->ds_clp->cl_session,
+                               &wdata->args.seq_args, &wdata->res.seq_res,
+                               task))
+               return;
+
+       rpc_call_start(task);
+}
+
+static void filelayout_write_commit_done(struct rpc_task *task, void *data)
+{
+       struct nfs_commit_data *wdata = data;
+
+       /* Note this may cause RPC to be resent */
+       wdata->mds_ops->rpc_call_done(task, data);
+}
+
+static void filelayout_commit_count_stats(struct rpc_task *task, void *data)
+{
+       struct nfs_commit_data *cdata = data;
+
+       rpc_count_iostats(task, NFS_SERVER(cdata->inode)->client->cl_metrics);
 }
 
-static void filelayout_commit_release(void *data)
+static void filelayout_commit_release(void *calldata)
 {
-       struct nfs_write_data *wdata = (struct nfs_write_data *)data;
+       struct nfs_commit_data *data = calldata;
 
-       nfs_commit_release_pages(wdata);
-       if (atomic_dec_and_test(&NFS_I(wdata->inode)->commits_outstanding))
-               nfs_commit_clear_lock(NFS_I(wdata->inode));
-       put_lseg(wdata->lseg);
-       nfs_commitdata_release(wdata);
+       data->completion_ops->completion(data);
+       put_lseg(data->lseg);
+       nfs_put_client(data->ds_clp);
+       nfs_commitdata_release(data);
 }
 
 static const struct rpc_call_ops filelayout_read_call_ops = {
@@ -341,16 +473,17 @@ static const struct rpc_call_ops filelayout_write_call_ops = {
 };
 
 static const struct rpc_call_ops filelayout_commit_call_ops = {
-       .rpc_call_prepare = filelayout_write_prepare,
-       .rpc_call_done = filelayout_write_call_done,
-       .rpc_count_stats = filelayout_write_count_stats,
+       .rpc_call_prepare = filelayout_commit_prepare,
+       .rpc_call_done = filelayout_write_commit_done,
+       .rpc_count_stats = filelayout_commit_count_stats,
        .rpc_release = filelayout_commit_release,
 };
 
 static enum pnfs_try_status
 filelayout_read_pagelist(struct nfs_read_data *data)
 {
-       struct pnfs_layout_segment *lseg = data->lseg;
+       struct nfs_pgio_header *hdr = data->header;
+       struct pnfs_layout_segment *lseg = hdr->lseg;
        struct nfs4_pnfs_ds *ds;
        loff_t offset = data->args.offset;
        u32 j, idx;
@@ -358,25 +491,20 @@ filelayout_read_pagelist(struct nfs_read_data *data)
        int status;
 
        dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n",
-               __func__, data->inode->i_ino,
+               __func__, hdr->inode->i_ino,
                data->args.pgbase, (size_t)data->args.count, offset);
 
-       if (test_bit(NFS_DEVICEID_INVALID, &FILELAYOUT_DEVID_NODE(lseg)->flags))
-               return PNFS_NOT_ATTEMPTED;
-
        /* Retrieve the correct rpc_client for the byte range */
        j = nfs4_fl_calc_j_index(lseg, offset);
        idx = nfs4_fl_calc_ds_index(lseg, j);
        ds = nfs4_fl_prepare_ds(lseg, idx);
-       if (!ds) {
-               /* Either layout fh index faulty, or ds connect failed */
-               set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
-               set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
+       if (!ds)
                return PNFS_NOT_ATTEMPTED;
-       }
-       dprintk("%s USE DS: %s\n", __func__, ds->ds_remotestr);
+       dprintk("%s USE DS: %s cl_count %d\n", __func__,
+               ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count));
 
        /* No multipath support. Use first DS */
+       atomic_inc(&ds->ds_clp->cl_count);
        data->ds_clp = ds->ds_clp;
        fh = nfs4_fl_select_ds_fh(lseg, j);
        if (fh)
@@ -386,8 +514,8 @@ filelayout_read_pagelist(struct nfs_read_data *data)
        data->mds_offset = offset;
 
        /* Perform an asynchronous read to ds */
-       status = nfs_initiate_read(data, ds->ds_clp->cl_rpcclient,
-                                  &filelayout_read_call_ops);
+       status = nfs_initiate_read(ds->ds_clp->cl_rpcclient, data,
+                                 &filelayout_read_call_ops, RPC_TASK_SOFTCONN);
        BUG_ON(status != 0);
        return PNFS_ATTEMPTED;
 }
@@ -396,32 +524,26 @@ filelayout_read_pagelist(struct nfs_read_data *data)
 static enum pnfs_try_status
 filelayout_write_pagelist(struct nfs_write_data *data, int sync)
 {
-       struct pnfs_layout_segment *lseg = data->lseg;
+       struct nfs_pgio_header *hdr = data->header;
+       struct pnfs_layout_segment *lseg = hdr->lseg;
        struct nfs4_pnfs_ds *ds;
        loff_t offset = data->args.offset;
        u32 j, idx;
        struct nfs_fh *fh;
        int status;
 
-       if (test_bit(NFS_DEVICEID_INVALID, &FILELAYOUT_DEVID_NODE(lseg)->flags))
-               return PNFS_NOT_ATTEMPTED;
-
        /* Retrieve the correct rpc_client for the byte range */
        j = nfs4_fl_calc_j_index(lseg, offset);
        idx = nfs4_fl_calc_ds_index(lseg, j);
        ds = nfs4_fl_prepare_ds(lseg, idx);
-       if (!ds) {
-               printk(KERN_ERR "NFS: %s: prepare_ds failed, use MDS\n",
-                       __func__);
-               set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
-               set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
+       if (!ds)
                return PNFS_NOT_ATTEMPTED;
-       }
-       dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s\n", __func__,
-               data->inode->i_ino, sync, (size_t) data->args.count, offset,
-               ds->ds_remotestr);
+       dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s cl_count %d\n",
+               __func__, hdr->inode->i_ino, sync, (size_t) data->args.count,
+               offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count));
 
        data->write_done_cb = filelayout_write_done_cb;
+       atomic_inc(&ds->ds_clp->cl_count);
        data->ds_clp = ds->ds_clp;
        fh = nfs4_fl_select_ds_fh(lseg, j);
        if (fh)
@@ -433,8 +555,9 @@ filelayout_write_pagelist(struct nfs_write_data *data, int sync)
        data->args.offset = filelayout_get_dserver_offset(lseg, offset);
 
        /* Perform an asynchronous write */
-       status = nfs_initiate_write(data, ds->ds_clp->cl_rpcclient,
-                                   &filelayout_write_call_ops, sync);
+       status = nfs_initiate_write(ds->ds_clp->cl_rpcclient, data,
+                                   &filelayout_write_call_ops, sync,
+                                   RPC_TASK_SOFTCONN);
        BUG_ON(status != 0);
        return PNFS_ATTEMPTED;
 }
@@ -650,10 +773,65 @@ filelayout_free_lseg(struct pnfs_layout_segment *lseg)
 
        dprintk("--> %s\n", __func__);
        nfs4_fl_put_deviceid(fl->dsaddr);
-       kfree(fl->commit_buckets);
+       /* This assumes a single RW lseg */
+       if (lseg->pls_range.iomode == IOMODE_RW) {
+               struct nfs4_filelayout *flo;
+
+               flo = FILELAYOUT_FROM_HDR(lseg->pls_layout);
+               flo->commit_info.nbuckets = 0;
+               kfree(flo->commit_info.buckets);
+               flo->commit_info.buckets = NULL;
+       }
        _filelayout_free_lseg(fl);
 }
 
+static int
+filelayout_alloc_commit_info(struct pnfs_layout_segment *lseg,
+                            struct nfs_commit_info *cinfo,
+                            gfp_t gfp_flags)
+{
+       struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
+       struct pnfs_commit_bucket *buckets;
+       int size;
+
+       if (fl->commit_through_mds)
+               return 0;
+       if (cinfo->ds->nbuckets != 0) {
+               /* This assumes there is only one IOMODE_RW lseg.  What
+                * we really want to do is have a layout_hdr level
+                * dictionary of <multipath_list4, fh> keys, each
+                * associated with a struct list_head, populated by calls
+                * to filelayout_write_pagelist().
+                * */
+               return 0;
+       }
+
+       size = (fl->stripe_type == STRIPE_SPARSE) ?
+               fl->dsaddr->ds_num : fl->dsaddr->stripe_count;
+
+       buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
+                         gfp_flags);
+       if (!buckets)
+               return -ENOMEM;
+       else {
+               int i;
+
+               spin_lock(cinfo->lock);
+               if (cinfo->ds->nbuckets != 0)
+                       kfree(buckets);
+               else {
+                       cinfo->ds->buckets = buckets;
+                       cinfo->ds->nbuckets = size;
+                       for (i = 0; i < size; i++) {
+                               INIT_LIST_HEAD(&buckets[i].written);
+                               INIT_LIST_HEAD(&buckets[i].committing);
+                       }
+               }
+               spin_unlock(cinfo->lock);
+               return 0;
+       }
+}
+
 static struct pnfs_layout_segment *
 filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
                      struct nfs4_layoutget_res *lgr,
@@ -673,29 +851,6 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
                _filelayout_free_lseg(fl);
                return NULL;
        }
-
-       /* This assumes there is only one IOMODE_RW lseg.  What
-        * we really want to do is have a layout_hdr level
-        * dictionary of <multipath_list4, fh> keys, each
-        * associated with a struct list_head, populated by calls
-        * to filelayout_write_pagelist().
-        * */
-       if ((!fl->commit_through_mds) && (lgr->range.iomode == IOMODE_RW)) {
-               int i;
-               int size = (fl->stripe_type == STRIPE_SPARSE) ?
-                       fl->dsaddr->ds_num : fl->dsaddr->stripe_count;
-
-               fl->commit_buckets = kcalloc(size, sizeof(struct nfs4_fl_commit_bucket), gfp_flags);
-               if (!fl->commit_buckets) {
-                       filelayout_free_lseg(&fl->generic_hdr);
-                       return NULL;
-               }
-               fl->number_of_buckets = size;
-               for (i = 0; i < size; i++) {
-                       INIT_LIST_HEAD(&fl->commit_buckets[i].written);
-                       INIT_LIST_HEAD(&fl->commit_buckets[i].committing);
-               }
-       }
        return &fl->generic_hdr;
 }
 
@@ -716,8 +871,8 @@ filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
            !nfs_generic_pg_test(pgio, prev, req))
                return false;
 
-       p_stripe = (u64)prev->wb_index << PAGE_CACHE_SHIFT;
-       r_stripe = (u64)req->wb_index << PAGE_CACHE_SHIFT;
+       p_stripe = (u64)req_offset(prev);
+       r_stripe = (u64)req_offset(req);
        stripe_unit = FILELAYOUT_LSEG(pgio->pg_lseg)->stripe_unit;
 
        do_div(p_stripe, stripe_unit);
@@ -732,6 +887,16 @@ filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio,
 {
        BUG_ON(pgio->pg_lseg != NULL);
 
+       if (req->wb_offset != req->wb_pgbase) {
+               /*
+                * Handling unaligned pages is difficult, because have to
+                * somehow split a req in two in certain cases in the
+                * pg.test code.  Avoid this by just not using pnfs
+                * in this case.
+                */
+               nfs_pageio_reset_read_mds(pgio);
+               return;
+       }
        pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
                                           req->wb_context,
                                           0,
@@ -747,8 +912,13 @@ static void
 filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
                         struct nfs_page *req)
 {
+       struct nfs_commit_info cinfo;
+       int status;
+
        BUG_ON(pgio->pg_lseg != NULL);
 
+       if (req->wb_offset != req->wb_pgbase)
+               goto out_mds;
        pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
                                           req->wb_context,
                                           0,
@@ -757,7 +927,17 @@ filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
                                           GFP_NOFS);
        /* If no lseg, fall back to write through mds */
        if (pgio->pg_lseg == NULL)
-               nfs_pageio_reset_write_mds(pgio);
+               goto out_mds;
+       nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
+       status = filelayout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
+       if (status < 0) {
+               put_lseg(pgio->pg_lseg);
+               pgio->pg_lseg = NULL;
+               goto out_mds;
+       }
+       return;
+out_mds:
+       nfs_pageio_reset_write_mds(pgio);
 }
 
 static const struct nfs_pageio_ops filelayout_pg_read_ops = {
@@ -784,43 +964,42 @@ static u32 select_bucket_index(struct nfs4_filelayout_segment *fl, u32 j)
  * If this will make the bucket empty, it will need to put the lseg reference.
  */
 static void
-filelayout_clear_request_commit(struct nfs_page *req)
+filelayout_clear_request_commit(struct nfs_page *req,
+                               struct nfs_commit_info *cinfo)
 {
        struct pnfs_layout_segment *freeme = NULL;
-       struct inode *inode = req->wb_context->dentry->d_inode;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(cinfo->lock);
        if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags))
                goto out;
+       cinfo->ds->nwritten--;
        if (list_is_singular(&req->wb_list)) {
-               struct pnfs_layout_segment *lseg;
+               struct pnfs_commit_bucket *bucket;
 
-               /* From here we can find the bucket, but for the moment,
-                * since there is only one relevant lseg...
-                */
-               list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
-                       if (lseg->pls_range.iomode == IOMODE_RW) {
-                               freeme = lseg;
-                               break;
-                       }
-               }
+               bucket = list_first_entry(&req->wb_list,
+                                         struct pnfs_commit_bucket,
+                                         written);
+               freeme = bucket->wlseg;
+               bucket->wlseg = NULL;
        }
 out:
-       nfs_request_remove_commit_list(req);
-       spin_unlock(&inode->i_lock);
+       nfs_request_remove_commit_list(req, cinfo);
+       spin_unlock(cinfo->lock);
        put_lseg(freeme);
 }
 
 static struct list_head *
 filelayout_choose_commit_list(struct nfs_page *req,
-                             struct pnfs_layout_segment *lseg)
+                             struct pnfs_layout_segment *lseg,
+                             struct nfs_commit_info *cinfo)
 {
        struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
        u32 i, j;
        struct list_head *list;
+       struct pnfs_commit_bucket *buckets;
 
        if (fl->commit_through_mds)
-               return &NFS_I(req->wb_context->dentry->d_inode)->commit_list;
+               return &cinfo->mds->list;
 
        /* Note that we are calling nfs4_fl_calc_j_index on each page
         * that ends up being committed to a data server.  An attractive
@@ -828,31 +1007,33 @@ filelayout_choose_commit_list(struct nfs_page *req,
         * to store the value calculated in filelayout_write_pagelist
         * and just use that here.
         */
-       j = nfs4_fl_calc_j_index(lseg,
-                                (loff_t)req->wb_index << PAGE_CACHE_SHIFT);
+       j = nfs4_fl_calc_j_index(lseg, req_offset(req));
        i = select_bucket_index(fl, j);
-       list = &fl->commit_buckets[i].written;
+       buckets = cinfo->ds->buckets;
+       list = &buckets[i].written;
        if (list_empty(list)) {
                /* Non-empty buckets hold a reference on the lseg.  That ref
                 * is normally transferred to the COMMIT call and released
                 * there.  It could also be released if the last req is pulled
                 * off due to a rewrite, in which case it will be done in
-                * filelayout_remove_commit_req
+                * filelayout_clear_request_commit
                 */
-               get_lseg(lseg);
+               buckets[i].wlseg = get_lseg(lseg);
        }
        set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
+       cinfo->ds->nwritten++;
        return list;
 }
 
 static void
 filelayout_mark_request_commit(struct nfs_page *req,
-               struct pnfs_layout_segment *lseg)
+                              struct pnfs_layout_segment *lseg,
+                              struct nfs_commit_info *cinfo)
 {
        struct list_head *list;
 
-       list = filelayout_choose_commit_list(req, lseg);
-       nfs_request_add_commit_list(req, list);
+       list = filelayout_choose_commit_list(req, lseg, cinfo);
+       nfs_request_add_commit_list(req, list, cinfo);
 }
 
 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
@@ -880,7 +1061,7 @@ select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
        return flseg->fh_array[i];
 }
 
-static int filelayout_initiate_commit(struct nfs_write_data *data, int how)
+static int filelayout_initiate_commit(struct nfs_commit_data *data, int how)
 {
        struct pnfs_layout_segment *lseg = data->lseg;
        struct nfs4_pnfs_ds *ds;
@@ -890,135 +1071,138 @@ static int filelayout_initiate_commit(struct nfs_write_data *data, int how)
        idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
        ds = nfs4_fl_prepare_ds(lseg, idx);
        if (!ds) {
-               printk(KERN_ERR "NFS: %s: prepare_ds failed, use MDS\n",
-                       __func__);
-               set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
-               set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
                prepare_to_resend_writes(data);
                filelayout_commit_release(data);
                return -EAGAIN;
        }
-       dprintk("%s ino %lu, how %d\n", __func__, data->inode->i_ino, how);
-       data->write_done_cb = filelayout_commit_done_cb;
+       dprintk("%s ino %lu, how %d cl_count %d\n", __func__,
+               data->inode->i_ino, how, atomic_read(&ds->ds_clp->cl_count));
+       data->commit_done_cb = filelayout_commit_done_cb;
+       atomic_inc(&ds->ds_clp->cl_count);
        data->ds_clp = ds->ds_clp;
        fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
        if (fh)
                data->args.fh = fh;
-       return nfs_initiate_commit(data, ds->ds_clp->cl_rpcclient,
-                                  &filelayout_commit_call_ops, how);
-}
-
-/*
- * This is only useful while we are using whole file layouts.
- */
-static struct pnfs_layout_segment *
-find_only_write_lseg_locked(struct inode *inode)
-{
-       struct pnfs_layout_segment *lseg;
-
-       list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list)
-               if (lseg->pls_range.iomode == IOMODE_RW)
-                       return lseg;
-       return NULL;
-}
-
-static struct pnfs_layout_segment *find_only_write_lseg(struct inode *inode)
-{
-       struct pnfs_layout_segment *rv;
-
-       spin_lock(&inode->i_lock);
-       rv = find_only_write_lseg_locked(inode);
-       if (rv)
-               get_lseg(rv);
-       spin_unlock(&inode->i_lock);
-       return rv;
+       return nfs_initiate_commit(ds->ds_clp->cl_rpcclient, data,
+                                  &filelayout_commit_call_ops, how,
+                                  RPC_TASK_SOFTCONN);
 }
 
 static int
-filelayout_scan_ds_commit_list(struct nfs4_fl_commit_bucket *bucket, int max,
-               spinlock_t *lock)
+transfer_commit_list(struct list_head *src, struct list_head *dst,
+                    struct nfs_commit_info *cinfo, int max)
 {
-       struct list_head *src = &bucket->written;
-       struct list_head *dst = &bucket->committing;
        struct nfs_page *req, *tmp;
        int ret = 0;
 
        list_for_each_entry_safe(req, tmp, src, wb_list) {
                if (!nfs_lock_request(req))
                        continue;
-               if (cond_resched_lock(lock))
+               kref_get(&req->wb_kref);
+               if (cond_resched_lock(cinfo->lock))
                        list_safe_reset_next(req, tmp, wb_list);
-               nfs_request_remove_commit_list(req);
+               nfs_request_remove_commit_list(req, cinfo);
                clear_bit(PG_COMMIT_TO_DS, &req->wb_flags);
                nfs_list_add_request(req, dst);
                ret++;
-               if (ret == max)
+               if ((ret == max) && !cinfo->dreq)
                        break;
        }
        return ret;
 }
 
+static int
+filelayout_scan_ds_commit_list(struct pnfs_commit_bucket *bucket,
+                              struct nfs_commit_info *cinfo,
+                              int max)
+{
+       struct list_head *src = &bucket->written;
+       struct list_head *dst = &bucket->committing;
+       int ret;
+
+       ret = transfer_commit_list(src, dst, cinfo, max);
+       if (ret) {
+               cinfo->ds->nwritten -= ret;
+               cinfo->ds->ncommitting += ret;
+               bucket->clseg = bucket->wlseg;
+               if (list_empty(src))
+                       bucket->wlseg = NULL;
+               else
+                       get_lseg(bucket->clseg);
+       }
+       return ret;
+}
+
 /* Move reqs from written to committing lists, returning count of number moved.
- * Note called with i_lock held.
+ * Note called with cinfo->lock held.
  */
-static int filelayout_scan_commit_lists(struct inode *inode, int max,
-               spinlock_t *lock)
+static int filelayout_scan_commit_lists(struct nfs_commit_info *cinfo,
+                                       int max)
 {
-       struct pnfs_layout_segment *lseg;
-       struct nfs4_filelayout_segment *fl;
        int i, rv = 0, cnt;
 
-       lseg = find_only_write_lseg_locked(inode);
-       if (!lseg)
-               goto out_done;
-       fl = FILELAYOUT_LSEG(lseg);
-       if (fl->commit_through_mds)
-               goto out_done;
-       for (i = 0; i < fl->number_of_buckets && max != 0; i++) {
-               cnt = filelayout_scan_ds_commit_list(&fl->commit_buckets[i],
-                               max, lock);
+       for (i = 0; i < cinfo->ds->nbuckets && max != 0; i++) {
+               cnt = filelayout_scan_ds_commit_list(&cinfo->ds->buckets[i],
+                                                    cinfo, max);
                max -= cnt;
                rv += cnt;
        }
-out_done:
        return rv;
 }
 
+/* Pull everything off the committing lists and dump into @dst */
+static void filelayout_recover_commit_reqs(struct list_head *dst,
+                                          struct nfs_commit_info *cinfo)
+{
+       struct pnfs_commit_bucket *b;
+       int i;
+
+       /* NOTE cinfo->lock is NOT held, relying on fact that this is
+        * only called on single thread per dreq.
+        * Can't take the lock because need to do put_lseg
+        */
+       for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) {
+               if (transfer_commit_list(&b->written, dst, cinfo, 0)) {
+                       BUG_ON(!list_empty(&b->written));
+                       put_lseg(b->wlseg);
+                       b->wlseg = NULL;
+               }
+       }
+       cinfo->ds->nwritten = 0;
+}
+
 static unsigned int
-alloc_ds_commits(struct inode *inode, struct list_head *list)
+alloc_ds_commits(struct nfs_commit_info *cinfo, struct list_head *list)
 {
-       struct pnfs_layout_segment *lseg;
-       struct nfs4_filelayout_segment *fl;
-       struct nfs_write_data *data;
+       struct pnfs_ds_commit_info *fl_cinfo;
+       struct pnfs_commit_bucket *bucket;
+       struct nfs_commit_data *data;
        int i, j;
        unsigned int nreq = 0;
 
-       /* Won't need this when non-whole file layout segments are supported
-        * instead we will use a pnfs_layout_hdr structure */
-       lseg = find_only_write_lseg(inode);
-       if (!lseg)
-               return 0;
-       fl = FILELAYOUT_LSEG(lseg);
-       for (i = 0; i < fl->number_of_buckets; i++) {
-               if (list_empty(&fl->commit_buckets[i].committing))
+       fl_cinfo = cinfo->ds;
+       bucket = fl_cinfo->buckets;
+       for (i = 0; i < fl_cinfo->nbuckets; i++, bucket++) {
+               if (list_empty(&bucket->committing))
                        continue;
                data = nfs_commitdata_alloc();
                if (!data)
                        break;
                data->ds_commit_index = i;
-               data->lseg = lseg;
+               data->lseg = bucket->clseg;
+               bucket->clseg = NULL;
                list_add(&data->pages, list);
                nreq++;
        }
 
        /* Clean up on error */
-       for (j = i; j < fl->number_of_buckets; j++) {
-               if (list_empty(&fl->commit_buckets[i].committing))
+       for (j = i; j < fl_cinfo->nbuckets; j++, bucket++) {
+               if (list_empty(&bucket->committing))
                        continue;
-               nfs_retry_commit(&fl->commit_buckets[i].committing, lseg);
-               put_lseg(lseg);  /* associated with emptying bucket */
+               nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo);
+               put_lseg(bucket->clseg);
+               bucket->clseg = NULL;
        }
-       put_lseg(lseg);
        /* Caller will clean up entries put on list */
        return nreq;
 }
@@ -1026,9 +1210,9 @@ alloc_ds_commits(struct inode *inode, struct list_head *list)
 /* This follows nfs_commit_list pretty closely */
 static int
 filelayout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
-                          int how)
+                          int how, struct nfs_commit_info *cinfo)
 {
-       struct nfs_write_data   *data, *tmp;
+       struct nfs_commit_data *data, *tmp;
        LIST_HEAD(list);
        unsigned int nreq = 0;
 
@@ -1039,30 +1223,34 @@ filelayout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
                        list_add(&data->pages, &list);
                        nreq++;
                } else
-                       nfs_retry_commit(mds_pages, NULL);
+                       nfs_retry_commit(mds_pages, NULL, cinfo);
        }
 
-       nreq += alloc_ds_commits(inode, &list);
+       nreq += alloc_ds_commits(cinfo, &list);
 
        if (nreq == 0) {
-               nfs_commit_clear_lock(NFS_I(inode));
+               cinfo->completion_ops->error_cleanup(NFS_I(inode));
                goto out;
        }
 
-       atomic_add(nreq, &NFS_I(inode)->commits_outstanding);
+       atomic_add(nreq, &cinfo->mds->rpcs_out);
 
        list_for_each_entry_safe(data, tmp, &list, pages) {
                list_del_init(&data->pages);
                if (!data->lseg) {
-                       nfs_init_commit(data, mds_pages, NULL);
-                       nfs_initiate_commit(data, NFS_CLIENT(inode),
-                                           data->mds_ops, how);
+                       nfs_init_commit(data, mds_pages, NULL, cinfo);
+                       nfs_initiate_commit(NFS_CLIENT(inode), data,
+                                           data->mds_ops, how, 0);
                } else {
-                       nfs_init_commit(data, &FILELAYOUT_LSEG(data->lseg)->commit_buckets[data->ds_commit_index].committing, data->lseg);
+                       struct pnfs_commit_bucket *buckets;
+
+                       buckets = cinfo->ds->buckets;
+                       nfs_init_commit(data, &buckets[data->ds_commit_index].committing, data->lseg, cinfo);
                        filelayout_initiate_commit(data, how);
                }
        }
 out:
+       cinfo->ds->ncommitting = 0;
        return PNFS_ATTEMPTED;
 }
 
@@ -1072,17 +1260,47 @@ filelayout_free_deveiceid_node(struct nfs4_deviceid_node *d)
        nfs4_fl_free_deviceid(container_of(d, struct nfs4_file_layout_dsaddr, id_node));
 }
 
+static struct pnfs_layout_hdr *
+filelayout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
+{
+       struct nfs4_filelayout *flo;
+
+       flo = kzalloc(sizeof(*flo), gfp_flags);
+       return &flo->generic_hdr;
+}
+
+static void
+filelayout_free_layout_hdr(struct pnfs_layout_hdr *lo)
+{
+       kfree(FILELAYOUT_FROM_HDR(lo));
+}
+
+static struct pnfs_ds_commit_info *
+filelayout_get_ds_info(struct inode *inode)
+{
+       struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
+
+       if (layout == NULL)
+               return NULL;
+       else
+               return &FILELAYOUT_FROM_HDR(layout)->commit_info;
+}
+
 static struct pnfs_layoutdriver_type filelayout_type = {
        .id                     = LAYOUT_NFSV4_1_FILES,
        .name                   = "LAYOUT_NFSV4_1_FILES",
        .owner                  = THIS_MODULE,
+       .alloc_layout_hdr       = filelayout_alloc_layout_hdr,
+       .free_layout_hdr        = filelayout_free_layout_hdr,
        .alloc_lseg             = filelayout_alloc_lseg,
        .free_lseg              = filelayout_free_lseg,
        .pg_read_ops            = &filelayout_pg_read_ops,
        .pg_write_ops           = &filelayout_pg_write_ops,
+       .get_ds_info            = &filelayout_get_ds_info,
        .mark_request_commit    = filelayout_mark_request_commit,
        .clear_request_commit   = filelayout_clear_request_commit,
        .scan_commit_lists      = filelayout_scan_commit_lists,
+       .recover_commit_reqs    = filelayout_recover_commit_reqs,
        .commit_pagelist        = filelayout_commit_pagelist,
        .read_pagelist          = filelayout_read_pagelist,
        .write_pagelist         = filelayout_write_pagelist,
index 21190bb1f5e348c5549e5985afb8cdf896aa72dd..43fe802dd67855fbfe521825d0c24ef3acca7c38 100644 (file)
 
 #include "pnfs.h"
 
+/*
+ * Default data server connection timeout and retrans vaules.
+ * Set by module paramters dataserver_timeo and dataserver_retrans.
+ */
+#define NFS4_DEF_DS_TIMEO   60
+#define NFS4_DEF_DS_RETRANS 5
+
 /*
  * Field testing shows we need to support up to 4096 stripe indices.
  * We store each index as a u8 (u32 on the wire) to keep the memory footprint
@@ -41,6 +48,9 @@
 #define NFS4_PNFS_MAX_STRIPE_CNT 4096
 #define NFS4_PNFS_MAX_MULTI_CNT  256 /* 256 fit into a u8 stripe_index */
 
+/* error codes for internal use */
+#define NFS4ERR_RESET_TO_MDS   12001
+
 enum stripetype4 {
        STRIPE_SPARSE = 1,
        STRIPE_DENSE = 2
@@ -62,23 +72,14 @@ struct nfs4_pnfs_ds {
        atomic_t                ds_count;
 };
 
-/* nfs4_file_layout_dsaddr flags */
-#define NFS4_DEVICE_ID_NEG_ENTRY       0x00000001
-
 struct nfs4_file_layout_dsaddr {
        struct nfs4_deviceid_node       id_node;
-       unsigned long                   flags;
        u32                             stripe_count;
        u8                              *stripe_indices;
        u32                             ds_num;
        struct nfs4_pnfs_ds             *ds_list[1];
 };
 
-struct nfs4_fl_commit_bucket {
-       struct list_head written;
-       struct list_head committing;
-};
-
 struct nfs4_filelayout_segment {
        struct pnfs_layout_segment generic_hdr;
        u32 stripe_type;
@@ -89,10 +90,19 @@ struct nfs4_filelayout_segment {
        struct nfs4_file_layout_dsaddr *dsaddr; /* Point to GETDEVINFO data */
        unsigned int num_fh;
        struct nfs_fh **fh_array;
-       struct nfs4_fl_commit_bucket *commit_buckets; /* Sort commits to ds */
-       int number_of_buckets;
 };
 
+struct nfs4_filelayout {
+       struct pnfs_layout_hdr generic_hdr;
+       struct pnfs_ds_commit_info commit_info;
+};
+
+static inline struct nfs4_filelayout *
+FILELAYOUT_FROM_HDR(struct pnfs_layout_hdr *lo)
+{
+       return container_of(lo, struct nfs4_filelayout, generic_hdr);
+}
+
 static inline struct nfs4_filelayout_segment *
 FILELAYOUT_LSEG(struct pnfs_layout_segment *lseg)
 {
@@ -107,6 +117,36 @@ FILELAYOUT_DEVID_NODE(struct pnfs_layout_segment *lseg)
        return &FILELAYOUT_LSEG(lseg)->dsaddr->id_node;
 }
 
+static inline void
+filelayout_mark_devid_invalid(struct nfs4_deviceid_node *node)
+{
+       u32 *p = (u32 *)&node->deviceid;
+
+       printk(KERN_WARNING "NFS: Deviceid [%x%x%x%x] marked out of use.\n",
+               p[0], p[1], p[2], p[3]);
+
+       set_bit(NFS_DEVICEID_INVALID, &node->flags);
+}
+
+static inline bool
+filelayout_test_layout_invalid(struct pnfs_layout_hdr *lo)
+{
+       return test_bit(NFS_LAYOUT_INVALID, &lo->plh_flags);
+}
+
+static inline bool
+filelayout_test_devid_invalid(struct nfs4_deviceid_node *node)
+{
+       return test_bit(NFS_DEVICEID_INVALID, &node->flags);
+}
+
+static inline bool
+filelayout_reset_to_mds(struct pnfs_layout_segment *lseg)
+{
+       return filelayout_test_devid_invalid(FILELAYOUT_DEVID_NODE(lseg)) ||
+               filelayout_test_layout_invalid(lseg->pls_layout);
+}
+
 extern struct nfs_fh *
 nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j);
 
@@ -119,5 +159,6 @@ extern void nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr);
 extern void nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr);
 struct nfs4_file_layout_dsaddr *
 get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags);
+void nfs4_ds_disconnect(struct nfs_client *clp);
 
 #endif /* FS_NFS_NFS4FILELAYOUT_H */
index c9cff9adb2d3f7c832f3e3bc7e6d76ec45a6e692..a1fab8da7f03c8819951af81d95e6268a71dc80f 100644 (file)
 
 #include <linux/nfs_fs.h>
 #include <linux/vmalloc.h>
+#include <linux/module.h>
 
 #include "internal.h"
 #include "nfs4filelayout.h"
 
 #define NFSDBG_FACILITY                NFSDBG_PNFS_LD
 
+static unsigned int dataserver_timeo = NFS4_DEF_DS_TIMEO;
+static unsigned int dataserver_retrans = NFS4_DEF_DS_RETRANS;
+
 /*
  * Data server cache
  *
@@ -144,6 +148,28 @@ _data_server_lookup_locked(const struct list_head *dsaddrs)
        return NULL;
 }
 
+/*
+ * Lookup DS by nfs_client pointer. Zero data server client pointer
+ */
+void nfs4_ds_disconnect(struct nfs_client *clp)
+{
+       struct nfs4_pnfs_ds *ds;
+       struct nfs_client *found = NULL;
+
+       dprintk("%s clp %p\n", __func__, clp);
+       spin_lock(&nfs4_ds_cache_lock);
+       list_for_each_entry(ds, &nfs4_data_server_cache, ds_node)
+               if (ds->ds_clp && ds->ds_clp == clp) {
+                       found = ds->ds_clp;
+                       ds->ds_clp = NULL;
+               }
+       spin_unlock(&nfs4_ds_cache_lock);
+       if (found) {
+               set_bit(NFS_CS_STOP_RENEW, &clp->cl_res_state);
+               nfs_put_client(clp);
+       }
+}
+
 /*
  * Create an rpc connection to the nfs4_pnfs_ds data server
  * Currently only supports IPv4 and IPv6 addresses
@@ -165,8 +191,9 @@ nfs4_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds)
                        __func__, ds->ds_remotestr, da->da_remotestr);
 
                clp = nfs4_set_ds_client(mds_srv->nfs_client,
-                                (struct sockaddr *)&da->da_addr,
-                                da->da_addrlen, IPPROTO_TCP);
+                                       (struct sockaddr *)&da->da_addr,
+                                       da->da_addrlen, IPPROTO_TCP,
+                                       dataserver_timeo, dataserver_retrans);
                if (!IS_ERR(clp))
                        break;
        }
@@ -176,28 +203,7 @@ nfs4_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds)
                goto out;
        }
 
-       if ((clp->cl_exchange_flags & EXCHGID4_FLAG_MASK_PNFS) != 0) {
-               if (!is_ds_client(clp)) {
-                       status = -ENODEV;
-                       goto out_put;
-               }
-               ds->ds_clp = clp;
-               dprintk("%s [existing] server=%s\n", __func__,
-                       ds->ds_remotestr);
-               goto out;
-       }
-
-       /*
-        * Do not set NFS_CS_CHECK_LEASE_TIME instead set the DS lease to
-        * be equal to the MDS lease. Renewal is scheduled in create_session.
-        */
-       spin_lock(&mds_srv->nfs_client->cl_lock);
-       clp->cl_lease_time = mds_srv->nfs_client->cl_lease_time;
-       spin_unlock(&mds_srv->nfs_client->cl_lock);
-       clp->cl_last_renewal = jiffies;
-
-       /* New nfs_client */
-       status = nfs4_init_ds_session(clp);
+       status = nfs4_init_ds_session(clp, mds_srv->nfs_client->cl_lease_time);
        if (status)
                goto out_put;
 
@@ -602,7 +608,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags)
 
                mp_count = be32_to_cpup(p); /* multipath count */
                for (j = 0; j < mp_count; j++) {
-                       da = decode_ds_addr(NFS_SERVER(ino)->nfs_client->net,
+                       da = decode_ds_addr(NFS_SERVER(ino)->nfs_client->cl_net,
                                            &stream, gfp_flags);
                        if (da)
                                list_add_tail(&da->da_node, &dsaddrs);
@@ -791,48 +797,42 @@ nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j)
        return flseg->fh_array[i];
 }
 
-static void
-filelayout_mark_devid_negative(struct nfs4_file_layout_dsaddr *dsaddr,
-                              int err, const char *ds_remotestr)
-{
-       u32 *p = (u32 *)&dsaddr->id_node.deviceid;
-
-       printk(KERN_ERR "NFS: data server %s connection error %d."
-               " Deviceid [%x%x%x%x] marked out of use.\n",
-               ds_remotestr, err, p[0], p[1], p[2], p[3]);
-
-       spin_lock(&nfs4_ds_cache_lock);
-       dsaddr->flags |= NFS4_DEVICE_ID_NEG_ENTRY;
-       spin_unlock(&nfs4_ds_cache_lock);
-}
-
 struct nfs4_pnfs_ds *
 nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
 {
        struct nfs4_file_layout_dsaddr *dsaddr = FILELAYOUT_LSEG(lseg)->dsaddr;
        struct nfs4_pnfs_ds *ds = dsaddr->ds_list[ds_idx];
+       struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
+
+       if (filelayout_test_devid_invalid(devid))
+               return NULL;
 
        if (ds == NULL) {
                printk(KERN_ERR "NFS: %s: No data server for offset index %d\n",
                        __func__, ds_idx);
-               return NULL;
+               goto mark_dev_invalid;
        }
 
        if (!ds->ds_clp) {
                struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode);
                int err;
 
-               if (dsaddr->flags & NFS4_DEVICE_ID_NEG_ENTRY) {
-                       /* Already tried to connect, don't try again */
-                       dprintk("%s Deviceid marked out of use\n", __func__);
-                       return NULL;
-               }
                err = nfs4_ds_connect(s, ds);
-               if (err) {
-                       filelayout_mark_devid_negative(dsaddr, err,
-                                                      ds->ds_remotestr);
-                       return NULL;
-               }
+               if (err)
+                       goto mark_dev_invalid;
        }
        return ds;
+
+mark_dev_invalid:
+       filelayout_mark_devid_invalid(devid);
+       return NULL;
 }
+
+module_param(dataserver_retrans, uint, 0644);
+MODULE_PARM_DESC(dataserver_retrans, "The  number of times the NFSv4.1 client "
+                       "retries a request before it attempts further "
+                       " recovery  action.");
+module_param(dataserver_timeo, uint, 0644);
+MODULE_PARM_DESC(dataserver_timeo, "The time (in tenths of a second) the "
+                       "NFSv4.1  client  waits for a response from a "
+                       " data server before it retries an NFS request.");
index a7f3dedc4ec7bade9df84ed6f0fc7524507b9c21..017b4b01a69c7a747b4afab8c7591b5f2ac59a20 100644 (file)
@@ -132,6 +132,35 @@ static size_t nfs_parse_server_name(char *string, size_t len,
        return ret;
 }
 
+rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *flavors)
+{
+       struct gss_api_mech *mech;
+       struct xdr_netobj oid;
+       int i;
+       rpc_authflavor_t pseudoflavor = RPC_AUTH_UNIX;
+
+       for (i = 0; i < flavors->num_flavors; i++) {
+               struct nfs4_secinfo_flavor *flavor;
+               flavor = &flavors->flavors[i];
+
+               if (flavor->flavor == RPC_AUTH_NULL || flavor->flavor == RPC_AUTH_UNIX) {
+                       pseudoflavor = flavor->flavor;
+                       break;
+               } else if (flavor->flavor == RPC_AUTH_GSS) {
+                       oid.len  = flavor->gss.sec_oid4.len;
+                       oid.data = flavor->gss.sec_oid4.data;
+                       mech = gss_mech_get_by_OID(&oid);
+                       if (!mech)
+                               continue;
+                       pseudoflavor = gss_svc_to_pseudoflavor(mech, flavor->gss.service);
+                       gss_mech_put(mech);
+                       break;
+               }
+       }
+
+       return pseudoflavor;
+}
+
 static rpc_authflavor_t nfs4_negotiate_security(struct inode *inode, struct qstr *name)
 {
        struct page *page;
@@ -168,7 +197,7 @@ struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *clnt, struct inode *ino
        rpc_authflavor_t flavor;
 
        flavor = nfs4_negotiate_security(inode, name);
-       if (flavor < 0)
+       if ((int)flavor < 0)
                return ERR_PTR(flavor);
 
        clone = rpc_clone_client(clnt);
@@ -300,7 +329,7 @@ out:
  * @dentry - dentry of referral
  *
  */
-struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry)
+static struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry)
 {
        struct vfsmount *mnt = ERR_PTR(-ENOMEM);
        struct dentry *parent;
@@ -341,3 +370,25 @@ out:
        dprintk("%s: done\n", __func__);
        return mnt;
 }
+
+struct vfsmount *nfs4_submount(struct nfs_server *server, struct dentry *dentry,
+                              struct nfs_fh *fh, struct nfs_fattr *fattr)
+{
+       struct dentry *parent = dget_parent(dentry);
+       struct rpc_clnt *client;
+       struct vfsmount *mnt;
+
+       /* Look it up again to get its attributes and sec flavor */
+       client = nfs4_proc_lookup_mountpoint(parent->d_inode, &dentry->d_name, fh, fattr);
+       dput(parent);
+       if (IS_ERR(client))
+               return ERR_CAST(client);
+
+       if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
+               mnt = nfs_do_refmount(client, dentry);
+       else
+               mnt = nfs_do_submount(dentry, fh, fattr, client->cl_auth->au_flavor);
+
+       rpc_shutdown_client(client);
+       return mnt;
+}
index ab985f6f0da8d93da67f625ba8027ff851678c55..d48dbefa0e71ebf6d9ac90edbb893364afd2d0a3 100644 (file)
@@ -64,6 +64,7 @@
 #include "iostat.h"
 #include "callback.h"
 #include "pnfs.h"
+#include "netns.h"
 
 #define NFSDBG_FACILITY                NFSDBG_PROC
 
@@ -80,6 +81,7 @@ static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
 static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
+static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *);
 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
                            struct nfs_fattr *fattr, struct iattr *sattr,
@@ -101,6 +103,8 @@ static int nfs4_map_errors(int err)
        case -NFS4ERR_BADOWNER:
        case -NFS4ERR_BADNAME:
                return -EINVAL;
+       case -NFS4ERR_SHARE_DENIED:
+               return -EACCES;
        default:
                dprintk("%s could not handle NFSv4 error %d\n",
                                __func__, -err);
@@ -304,7 +308,7 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc
                case -NFS4ERR_SEQ_MISORDERED:
                        dprintk("%s ERROR: %d Reset session\n", __func__,
                                errorcode);
-                       nfs4_schedule_session_recovery(clp->cl_session);
+                       nfs4_schedule_session_recovery(clp->cl_session, errorcode);
                        exception->retry = 1;
                        break;
 #endif /* defined(CONFIG_NFS_V4_1) */
@@ -772,7 +776,7 @@ static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
        struct nfs_inode *nfsi = NFS_I(dir);
 
        spin_lock(&dir->i_lock);
-       nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA;
+       nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
        if (!cinfo->atomic || cinfo->before != dir->i_version)
                nfs_force_lookup_revalidate(dir);
        dir->i_version = cinfo->after;
@@ -788,7 +792,6 @@ struct nfs4_opendata {
        struct nfs4_string owner_name;
        struct nfs4_string group_name;
        struct nfs_fattr f_attr;
-       struct nfs_fattr dir_attr;
        struct dentry *dir;
        struct dentry *dentry;
        struct nfs4_state_owner *owner;
@@ -804,12 +807,10 @@ struct nfs4_opendata {
 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
 {
        p->o_res.f_attr = &p->f_attr;
-       p->o_res.dir_attr = &p->dir_attr;
        p->o_res.seqid = p->o_arg.seqid;
        p->c_res.seqid = p->c_arg.seqid;
        p->o_res.server = p->o_arg.server;
        nfs_fattr_init(&p->f_attr);
-       nfs_fattr_init(&p->dir_attr);
        nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
 }
 
@@ -843,7 +844,6 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
        p->o_arg.name = &dentry->d_name;
        p->o_arg.server = server;
        p->o_arg.bitmask = server->attr_bitmask;
-       p->o_arg.dir_bitmask = server->cache_consistency_bitmask;
        p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
        if (attrs != NULL && attrs->ia_valid != 0) {
                __be32 verf[2];
@@ -1332,7 +1332,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state
                        case -NFS4ERR_BAD_HIGH_SLOT:
                        case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
                        case -NFS4ERR_DEADSESSION:
-                               nfs4_schedule_session_recovery(server->nfs_client->cl_session);
+                               nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
                                goto out;
                        case -NFS4ERR_STALE_CLIENTID:
                        case -NFS4ERR_STALE_STATEID:
@@ -1611,8 +1611,6 @@ static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
 
        nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
 
-       nfs_refresh_inode(dir, o_res->dir_attr);
-
        if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
                status = _nfs4_proc_open_confirm(data);
                if (status != 0)
@@ -1645,11 +1643,8 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
 
        nfs_fattr_map_and_free_names(server, &data->f_attr);
 
-       if (o_arg->open_flags & O_CREAT) {
+       if (o_arg->open_flags & O_CREAT)
                update_changeattr(dir, &o_res->cinfo);
-               nfs_post_op_update_inode(dir, o_res->dir_attr);
-       } else
-               nfs_refresh_inode(dir, o_res->dir_attr);
        if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
                server->caps &= ~NFS_CAP_POSIX_LOCK;
        if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
@@ -1789,7 +1784,14 @@ static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct
 /*
  * Returns a referenced nfs4_state
  */
-static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res)
+static int _nfs4_do_open(struct inode *dir,
+                       struct dentry *dentry,
+                       fmode_t fmode,
+                       int flags,
+                       struct iattr *sattr,
+                       struct rpc_cred *cred,
+                       struct nfs4_state **res,
+                       struct nfs4_threshold **ctx_th)
 {
        struct nfs4_state_owner  *sp;
        struct nfs4_state     *state = NULL;
@@ -1814,6 +1816,11 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode
        if (opendata == NULL)
                goto err_put_state_owner;
 
+       if (ctx_th && server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
+               opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
+               if (!opendata->f_attr.mdsthreshold)
+                       goto err_opendata_put;
+       }
        if (dentry->d_inode != NULL)
                opendata->state = nfs4_get_open_state(dentry->d_inode, sp);
 
@@ -1839,11 +1846,19 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode
                        nfs_setattr_update_inode(state->inode, sattr);
                nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
        }
+
+       if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server))
+               *ctx_th = opendata->f_attr.mdsthreshold;
+       else
+               kfree(opendata->f_attr.mdsthreshold);
+       opendata->f_attr.mdsthreshold = NULL;
+
        nfs4_opendata_put(opendata);
        nfs4_put_state_owner(sp);
        *res = state;
        return 0;
 err_opendata_put:
+       kfree(opendata->f_attr.mdsthreshold);
        nfs4_opendata_put(opendata);
 err_put_state_owner:
        nfs4_put_state_owner(sp);
@@ -1853,14 +1868,21 @@ out_err:
 }
 
 
-static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred)
+static struct nfs4_state *nfs4_do_open(struct inode *dir,
+                                       struct dentry *dentry,
+                                       fmode_t fmode,
+                                       int flags,
+                                       struct iattr *sattr,
+                                       struct rpc_cred *cred,
+                                       struct nfs4_threshold **ctx_th)
 {
        struct nfs4_exception exception = { };
        struct nfs4_state *res;
        int status;
 
        do {
-               status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, &res);
+               status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred,
+                                      &res, ctx_th);
                if (status == 0)
                        break;
                /* NOTE: BAD_SEQID means the server and client disagree about the
@@ -2184,7 +2206,8 @@ nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags
        struct nfs4_state *state;
 
        /* Protect against concurrent sillydeletes */
-       state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr, ctx->cred);
+       state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr,
+                            ctx->cred, &ctx->mdsthreshold);
        if (IS_ERR(state))
                return ERR_CAST(state);
        ctx->state = state;
@@ -2354,8 +2377,8 @@ static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
 /*
  * get the file handle for the "/" directory on the server
  */
-static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
-                             struct nfs_fsinfo *info)
+int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
+                        struct nfs_fsinfo *info)
 {
        int minor_version = server->nfs_client->cl_minorversion;
        int status = nfs4_lookup_root(server, fhandle, info);
@@ -2372,6 +2395,31 @@ static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
        return nfs4_map_errors(status);
 }
 
+static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
+                             struct nfs_fsinfo *info)
+{
+       int error;
+       struct nfs_fattr *fattr = info->fattr;
+
+       error = nfs4_server_capabilities(server, mntfh);
+       if (error < 0) {
+               dprintk("nfs4_get_root: getcaps error = %d\n", -error);
+               return error;
+       }
+
+       error = nfs4_proc_getattr(server, mntfh, fattr);
+       if (error < 0) {
+               dprintk("nfs4_get_root: getattr error = %d\n", -error);
+               return error;
+       }
+
+       if (fattr->valid & NFS_ATTR_FATTR_FSID &&
+           !nfs_fsid_equal(&server->fsid, &fattr->fsid))
+               memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
+
+       return error;
+}
+
 /*
  * Get locations and (maybe) other attributes of a referral.
  * Note that we'll actually follow the referral later when
@@ -2578,7 +2626,7 @@ out:
        return err;
 }
 
-static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
+static int nfs4_proc_lookup(struct inode *dir, struct qstr *name,
                            struct nfs_fh *fhandle, struct nfs_fattr *fattr)
 {
        int status;
@@ -2761,7 +2809,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
                fmode = ctx->mode;
        }
        sattr->ia_mode &= ~current_umask();
-       state = nfs4_do_open(dir, de, fmode, flags, sattr, cred);
+       state = nfs4_do_open(dir, de, fmode, flags, sattr, cred, NULL);
        d_drop(dentry);
        if (IS_ERR(state)) {
                status = PTR_ERR(state);
@@ -2783,7 +2831,6 @@ static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
        struct nfs_removeargs args = {
                .fh = NFS_FH(dir),
                .name = *name,
-               .bitmask = server->attr_bitmask,
        };
        struct nfs_removeres res = {
                .server = server,
@@ -2793,19 +2840,11 @@ static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
                .rpc_argp = &args,
                .rpc_resp = &res,
        };
-       int status = -ENOMEM;
-
-       res.dir_attr = nfs_alloc_fattr();
-       if (res.dir_attr == NULL)
-               goto out;
+       int status;
 
        status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
-       if (status == 0) {
+       if (status == 0)
                update_changeattr(dir, &res.cinfo);
-               nfs_post_op_update_inode(dir, res.dir_attr);
-       }
-       nfs_free_fattr(res.dir_attr);
-out:
        return status;
 }
 
@@ -2827,7 +2866,6 @@ static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
        struct nfs_removeargs *args = msg->rpc_argp;
        struct nfs_removeres *res = msg->rpc_resp;
 
-       args->bitmask = server->cache_consistency_bitmask;
        res->server = server;
        msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
        nfs41_init_sequence(&args->seq_args, &res->seq_res, 1);
@@ -2852,7 +2890,6 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
        if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
                return 0;
        update_changeattr(dir, &res->cinfo);
-       nfs_post_op_update_inode(dir, res->dir_attr);
        return 1;
 }
 
@@ -2863,7 +2900,6 @@ static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
        struct nfs_renameres *res = msg->rpc_resp;
 
        msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
-       arg->bitmask = server->attr_bitmask;
        res->server = server;
        nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1);
 }
@@ -2889,9 +2925,7 @@ static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
                return 0;
 
        update_changeattr(old_dir, &res->old_cinfo);
-       nfs_post_op_update_inode(old_dir, res->old_fattr);
        update_changeattr(new_dir, &res->new_cinfo);
-       nfs_post_op_update_inode(new_dir, res->new_fattr);
        return 1;
 }
 
@@ -2904,7 +2938,6 @@ static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
                .new_dir = NFS_FH(new_dir),
                .old_name = old_name,
                .new_name = new_name,
-               .bitmask = server->attr_bitmask,
        };
        struct nfs_renameres res = {
                .server = server,
@@ -2916,21 +2949,11 @@ static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
        };
        int status = -ENOMEM;
        
-       res.old_fattr = nfs_alloc_fattr();
-       res.new_fattr = nfs_alloc_fattr();
-       if (res.old_fattr == NULL || res.new_fattr == NULL)
-               goto out;
-
        status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
        if (!status) {
                update_changeattr(old_dir, &res.old_cinfo);
-               nfs_post_op_update_inode(old_dir, res.old_fattr);
                update_changeattr(new_dir, &res.new_cinfo);
-               nfs_post_op_update_inode(new_dir, res.new_fattr);
        }
-out:
-       nfs_free_fattr(res.new_fattr);
-       nfs_free_fattr(res.old_fattr);
        return status;
 }
 
@@ -2968,18 +2991,15 @@ static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *
        int status = -ENOMEM;
 
        res.fattr = nfs_alloc_fattr();
-       res.dir_attr = nfs_alloc_fattr();
-       if (res.fattr == NULL || res.dir_attr == NULL)
+       if (res.fattr == NULL)
                goto out;
 
        status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
        if (!status) {
                update_changeattr(dir, &res.cinfo);
-               nfs_post_op_update_inode(dir, res.dir_attr);
                nfs_post_op_update_inode(inode, res.fattr);
        }
 out:
-       nfs_free_fattr(res.dir_attr);
        nfs_free_fattr(res.fattr);
        return status;
 }
@@ -3002,7 +3022,6 @@ struct nfs4_createdata {
        struct nfs4_create_res res;
        struct nfs_fh fh;
        struct nfs_fattr fattr;
-       struct nfs_fattr dir_fattr;
 };
 
 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
@@ -3026,9 +3045,7 @@ static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
                data->res.server = server;
                data->res.fh = &data->fh;
                data->res.fattr = &data->fattr;
-               data->res.dir_fattr = &data->dir_fattr;
                nfs_fattr_init(data->res.fattr);
-               nfs_fattr_init(data->res.dir_fattr);
        }
        return data;
 }
@@ -3039,7 +3056,6 @@ static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_
                                    &data->arg.seq_args, &data->res.seq_res, 1);
        if (status == 0) {
                update_changeattr(dir, &data->res.dir_cinfo);
-               nfs_post_op_update_inode(dir, data->res.dir_fattr);
                status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
        }
        return status;
@@ -3335,12 +3351,12 @@ static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
 
 void __nfs4_read_done_cb(struct nfs_read_data *data)
 {
-       nfs_invalidate_atime(data->inode);
+       nfs_invalidate_atime(data->header->inode);
 }
 
 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data)
 {
-       struct nfs_server *server = NFS_SERVER(data->inode);
+       struct nfs_server *server = NFS_SERVER(data->header->inode);
 
        if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
                rpc_restart_call_prepare(task);
@@ -3375,7 +3391,7 @@ static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message
 
 static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data)
 {
-       if (nfs4_setup_sequence(NFS_SERVER(data->inode),
+       if (nfs4_setup_sequence(NFS_SERVER(data->header->inode),
                                &data->args.seq_args,
                                &data->res.seq_res,
                                task))
@@ -3383,25 +3399,9 @@ static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_da
        rpc_call_start(task);
 }
 
-/* Reset the the nfs_read_data to send the read to the MDS. */
-void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data)
-{
-       dprintk("%s Reset task for i/o through\n", __func__);
-       put_lseg(data->lseg);
-       data->lseg = NULL;
-       /* offsets will differ in the dense stripe case */
-       data->args.offset = data->mds_offset;
-       data->ds_clp = NULL;
-       data->args.fh     = NFS_FH(data->inode);
-       data->read_done_cb = nfs4_read_done_cb;
-       task->tk_ops = data->mds_ops;
-       rpc_task_reset_client(task, NFS_CLIENT(data->inode));
-}
-EXPORT_SYMBOL_GPL(nfs4_reset_read);
-
 static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data)
 {
-       struct inode *inode = data->inode;
+       struct inode *inode = data->header->inode;
        
        if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
                rpc_restart_call_prepare(task);
@@ -3409,7 +3409,7 @@ static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data
        }
        if (task->tk_status >= 0) {
                renew_lease(NFS_SERVER(inode), data->timestamp);
-               nfs_post_op_update_inode_force_wcc(inode, data->res.fattr);
+               nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
        }
        return 0;
 }
@@ -3422,32 +3422,30 @@ static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
                nfs4_write_done_cb(task, data);
 }
 
-/* Reset the the nfs_write_data to send the write to the MDS. */
-void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data)
+static
+bool nfs4_write_need_cache_consistency_data(const struct nfs_write_data *data)
 {
-       dprintk("%s Reset task for i/o through\n", __func__);
-       put_lseg(data->lseg);
-       data->lseg          = NULL;
-       data->ds_clp        = NULL;
-       data->write_done_cb = nfs4_write_done_cb;
-       data->args.fh       = NFS_FH(data->inode);
-       data->args.bitmask  = data->res.server->cache_consistency_bitmask;
-       data->args.offset   = data->mds_offset;
-       data->res.fattr     = &data->fattr;
-       task->tk_ops        = data->mds_ops;
-       rpc_task_reset_client(task, NFS_CLIENT(data->inode));
+       const struct nfs_pgio_header *hdr = data->header;
+
+       /* Don't request attributes for pNFS or O_DIRECT writes */
+       if (data->ds_clp != NULL || hdr->dreq != NULL)
+               return false;
+       /* Otherwise, request attributes if and only if we don't hold
+        * a delegation
+        */
+       return nfs_have_delegation(hdr->inode, FMODE_READ) == 0;
 }
-EXPORT_SYMBOL_GPL(nfs4_reset_write);
 
 static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
 {
-       struct nfs_server *server = NFS_SERVER(data->inode);
+       struct nfs_server *server = NFS_SERVER(data->header->inode);
 
-       if (data->lseg) {
+       if (!nfs4_write_need_cache_consistency_data(data)) {
                data->args.bitmask = NULL;
                data->res.fattr = NULL;
        } else
                data->args.bitmask = server->cache_consistency_bitmask;
+
        if (!data->write_done_cb)
                data->write_done_cb = nfs4_write_done_cb;
        data->res.server = server;
@@ -3458,6 +3456,16 @@ static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_messag
 }
 
 static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data)
+{
+       if (nfs4_setup_sequence(NFS_SERVER(data->header->inode),
+                               &data->args.seq_args,
+                               &data->res.seq_res,
+                               task))
+               return;
+       rpc_call_start(task);
+}
+
+static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
 {
        if (nfs4_setup_sequence(NFS_SERVER(data->inode),
                                &data->args.seq_args,
@@ -3467,7 +3475,7 @@ static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_
        rpc_call_start(task);
 }
 
-static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_write_data *data)
+static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
 {
        struct inode *inode = data->inode;
 
@@ -3475,28 +3483,22 @@ static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_write_data *dat
                rpc_restart_call_prepare(task);
                return -EAGAIN;
        }
-       nfs_refresh_inode(inode, data->res.fattr);
        return 0;
 }
 
-static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
+static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
 {
        if (!nfs4_sequence_done(task, &data->res.seq_res))
                return -EAGAIN;
-       return data->write_done_cb(task, data);
+       return data->commit_done_cb(task, data);
 }
 
-static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
+static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
 {
        struct nfs_server *server = NFS_SERVER(data->inode);
 
-       if (data->lseg) {
-               data->args.bitmask = NULL;
-               data->res.fattr = NULL;
-       } else
-               data->args.bitmask = server->cache_consistency_bitmask;
-       if (!data->write_done_cb)
-               data->write_done_cb = nfs4_commit_done_cb;
+       if (data->commit_done_cb == NULL)
+               data->commit_done_cb = nfs4_commit_done_cb;
        data->res.server = server;
        msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
        nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
@@ -3905,7 +3907,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
                case -NFS4ERR_SEQ_MISORDERED:
                        dprintk("%s ERROR %d, Reset session\n", __func__,
                                task->tk_status);
-                       nfs4_schedule_session_recovery(clp->cl_session);
+                       nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
                        task->tk_status = 0;
                        return -EAGAIN;
 #endif /* CONFIG_NFS_V4_1 */
@@ -3931,13 +3933,21 @@ wait_on_recovery:
        return -EAGAIN;
 }
 
-static void nfs4_construct_boot_verifier(struct nfs_client *clp,
-                                        nfs4_verifier *bootverf)
+static void nfs4_init_boot_verifier(const struct nfs_client *clp,
+                                   nfs4_verifier *bootverf)
 {
        __be32 verf[2];
 
-       verf[0] = htonl((u32)clp->cl_boot_time.tv_sec);
-       verf[1] = htonl((u32)clp->cl_boot_time.tv_nsec);
+       if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
+               /* An impossible timestamp guarantees this value
+                * will never match a generated boot time. */
+               verf[0] = 0;
+               verf[1] = (__be32)(NSEC_PER_SEC + 1);
+       } else {
+               struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
+               verf[0] = (__be32)nn->boot_time.tv_sec;
+               verf[1] = (__be32)nn->boot_time.tv_nsec;
+       }
        memcpy(bootverf->data, verf, sizeof(bootverf->data));
 }
 
@@ -3960,7 +3970,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
        int loop = 0;
        int status;
 
-       nfs4_construct_boot_verifier(clp, &sc_verifier);
+       nfs4_init_boot_verifier(clp, &sc_verifier);
 
        for(;;) {
                rcu_read_lock();
@@ -4104,7 +4114,7 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co
        nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
        data->args.fhandle = &data->fh;
        data->args.stateid = &data->stateid;
-       data->args.bitmask = server->attr_bitmask;
+       data->args.bitmask = server->cache_consistency_bitmask;
        nfs_copy_fh(&data->fh, NFS_FH(inode));
        nfs4_stateid_copy(&data->stateid, stateid);
        data->res.fattr = &data->fattr;
@@ -4125,9 +4135,10 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co
        if (status != 0)
                goto out;
        status = data->rpc_status;
-       if (status != 0)
-               goto out;
-       nfs_refresh_inode(inode, &data->fattr);
+       if (status == 0)
+               nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
+       else
+               nfs_refresh_inode(inode, &data->fattr);
 out:
        rpc_put_task(task);
        return status;
@@ -4837,7 +4848,7 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
                        case -NFS4ERR_BAD_HIGH_SLOT:
                        case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
                        case -NFS4ERR_DEADSESSION:
-                               nfs4_schedule_session_recovery(server->nfs_client->cl_session);
+                               nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
                                goto out;
                        case -ERESTARTSYS:
                                /*
@@ -5079,7 +5090,8 @@ out_inval:
 }
 
 static bool
-nfs41_same_server_scope(struct server_scope *a, struct server_scope *b)
+nfs41_same_server_scope(struct nfs41_server_scope *a,
+                       struct nfs41_server_scope *b)
 {
        if (a->server_scope_sz == b->server_scope_sz &&
            memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
@@ -5088,6 +5100,61 @@ nfs41_same_server_scope(struct server_scope *a, struct server_scope *b)
        return false;
 }
 
+/*
+ * nfs4_proc_bind_conn_to_session()
+ *
+ * The 4.1 client currently uses the same TCP connection for the
+ * fore and backchannel.
+ */
+int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
+{
+       int status;
+       struct nfs41_bind_conn_to_session_res res;
+       struct rpc_message msg = {
+               .rpc_proc =
+                       &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
+               .rpc_argp = clp,
+               .rpc_resp = &res,
+               .rpc_cred = cred,
+       };
+
+       dprintk("--> %s\n", __func__);
+       BUG_ON(clp == NULL);
+
+       res.session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
+       if (unlikely(res.session == NULL)) {
+               status = -ENOMEM;
+               goto out;
+       }
+
+       status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
+       if (status == 0) {
+               if (memcmp(res.session->sess_id.data,
+                   clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
+                       dprintk("NFS: %s: Session ID mismatch\n", __func__);
+                       status = -EIO;
+                       goto out_session;
+               }
+               if (res.dir != NFS4_CDFS4_BOTH) {
+                       dprintk("NFS: %s: Unexpected direction from server\n",
+                               __func__);
+                       status = -EIO;
+                       goto out_session;
+               }
+               if (res.use_conn_in_rdma_mode) {
+                       dprintk("NFS: %s: Server returned RDMA mode = true\n",
+                               __func__);
+                       status = -EIO;
+                       goto out_session;
+               }
+       }
+out_session:
+       kfree(res.session);
+out:
+       dprintk("<-- %s status= %d\n", __func__, status);
+       return status;
+}
+
 /*
  * nfs4_proc_exchange_id()
  *
@@ -5105,7 +5172,7 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
                .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER,
        };
        struct nfs41_exchange_id_res res = {
-               .client = clp,
+               0
        };
        int status;
        struct rpc_message msg = {
@@ -5118,7 +5185,7 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
        dprintk("--> %s\n", __func__);
        BUG_ON(clp == NULL);
 
-       nfs4_construct_boot_verifier(clp, &verifier);
+       nfs4_init_boot_verifier(clp, &verifier);
 
        args.id_len = scnprintf(args.id, sizeof(args.id),
                                "%s/%s/%u",
@@ -5126,59 +5193,135 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
                                clp->cl_rpcclient->cl_nodename,
                                clp->cl_rpcclient->cl_auth->au_flavor);
 
-       res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL);
-       if (unlikely(!res.server_scope)) {
+       res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
+                                       GFP_NOFS);
+       if (unlikely(res.server_owner == NULL)) {
                status = -ENOMEM;
                goto out;
        }
 
-       res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_KERNEL);
-       if (unlikely(!res.impl_id)) {
+       res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
+                                       GFP_NOFS);
+       if (unlikely(res.server_scope == NULL)) {
+               status = -ENOMEM;
+               goto out_server_owner;
+       }
+
+       res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
+       if (unlikely(res.impl_id == NULL)) {
                status = -ENOMEM;
                goto out_server_scope;
        }
 
        status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
-       if (!status)
-               status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags);
+       if (status == 0)
+               status = nfs4_check_cl_exchange_flags(res.flags);
+
+       if (status == 0) {
+               clp->cl_clientid = res.clientid;
+               clp->cl_exchange_flags = (res.flags & ~EXCHGID4_FLAG_CONFIRMED_R);
+               if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R))
+                       clp->cl_seqid = res.seqid;
+
+               kfree(clp->cl_serverowner);
+               clp->cl_serverowner = res.server_owner;
+               res.server_owner = NULL;
 
-       if (!status) {
                /* use the most recent implementation id */
-               kfree(clp->impl_id);
-               clp->impl_id = res.impl_id;
-       } else
-               kfree(res.impl_id);
+               kfree(clp->cl_implid);
+               clp->cl_implid = res.impl_id;
 
-       if (!status) {
-               if (clp->server_scope &&
-                   !nfs41_same_server_scope(clp->server_scope,
+               if (clp->cl_serverscope != NULL &&
+                   !nfs41_same_server_scope(clp->cl_serverscope,
                                             res.server_scope)) {
                        dprintk("%s: server_scope mismatch detected\n",
                                __func__);
                        set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
-                       kfree(clp->server_scope);
-                       clp->server_scope = NULL;
+                       kfree(clp->cl_serverscope);
+                       clp->cl_serverscope = NULL;
                }
 
-               if (!clp->server_scope) {
-                       clp->server_scope = res.server_scope;
+               if (clp->cl_serverscope == NULL) {
+                       clp->cl_serverscope = res.server_scope;
                        goto out;
                }
-       }
+       } else
+               kfree(res.impl_id);
 
+out_server_owner:
+       kfree(res.server_owner);
 out_server_scope:
        kfree(res.server_scope);
 out:
-       if (clp->impl_id)
+       if (clp->cl_implid != NULL)
                dprintk("%s: Server Implementation ID: "
                        "domain: %s, name: %s, date: %llu,%u\n",
-                       __func__, clp->impl_id->domain, clp->impl_id->name,
-                       clp->impl_id->date.seconds,
-                       clp->impl_id->date.nseconds);
+                       __func__, clp->cl_implid->domain, clp->cl_implid->name,
+                       clp->cl_implid->date.seconds,
+                       clp->cl_implid->date.nseconds);
        dprintk("<-- %s status= %d\n", __func__, status);
        return status;
 }
 
+static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
+               struct rpc_cred *cred)
+{
+       struct rpc_message msg = {
+               .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
+               .rpc_argp = clp,
+               .rpc_cred = cred,
+       };
+       int status;
+
+       status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
+       if (status)
+               pr_warn("NFS: Got error %d from the server %s on "
+                       "DESTROY_CLIENTID.", status, clp->cl_hostname);
+       return status;
+}
+
+static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
+               struct rpc_cred *cred)
+{
+       unsigned int loop;
+       int ret;
+
+       for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
+               ret = _nfs4_proc_destroy_clientid(clp, cred);
+               switch (ret) {
+               case -NFS4ERR_DELAY:
+               case -NFS4ERR_CLIENTID_BUSY:
+                       ssleep(1);
+                       break;
+               default:
+                       return ret;
+               }
+       }
+       return 0;
+}
+
+int nfs4_destroy_clientid(struct nfs_client *clp)
+{
+       struct rpc_cred *cred;
+       int ret = 0;
+
+       if (clp->cl_mvops->minor_version < 1)
+               goto out;
+       if (clp->cl_exchange_flags == 0)
+               goto out;
+       cred = nfs4_get_exchange_id_cred(clp);
+       ret = nfs4_proc_destroy_clientid(clp, cred);
+       if (cred)
+               put_rpccred(cred);
+       switch (ret) {
+       case 0:
+       case -NFS4ERR_STALE_CLIENTID:
+               clp->cl_exchange_flags = 0;
+       }
+out:
+       return ret;
+}
+
 struct nfs4_get_lease_time_data {
        struct nfs4_get_lease_time_args *args;
        struct nfs4_get_lease_time_res *res;
@@ -5399,8 +5542,12 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
 void nfs4_destroy_session(struct nfs4_session *session)
 {
        struct rpc_xprt *xprt;
+       struct rpc_cred *cred;
 
-       nfs4_proc_destroy_session(session);
+       cred = nfs4_get_exchange_id_cred(session->clp);
+       nfs4_proc_destroy_session(session, cred);
+       if (cred)
+               put_rpccred(cred);
 
        rcu_read_lock();
        xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt);
@@ -5510,7 +5657,8 @@ static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
        return nfs4_verify_back_channel_attrs(args, session);
 }
 
-static int _nfs4_proc_create_session(struct nfs_client *clp)
+static int _nfs4_proc_create_session(struct nfs_client *clp,
+               struct rpc_cred *cred)
 {
        struct nfs4_session *session = clp->cl_session;
        struct nfs41_create_session_args args = {
@@ -5524,6 +5672,7 @@ static int _nfs4_proc_create_session(struct nfs_client *clp)
                .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
                .rpc_argp = &args,
                .rpc_resp = &res,
+               .rpc_cred = cred,
        };
        int status;
 
@@ -5548,7 +5697,7 @@ static int _nfs4_proc_create_session(struct nfs_client *clp)
  * It is the responsibility of the caller to verify the session is
  * expired before calling this routine.
  */
-int nfs4_proc_create_session(struct nfs_client *clp)
+int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred)
 {
        int status;
        unsigned *ptr;
@@ -5556,7 +5705,7 @@ int nfs4_proc_create_session(struct nfs_client *clp)
 
        dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
 
-       status = _nfs4_proc_create_session(clp);
+       status = _nfs4_proc_create_session(clp, cred);
        if (status)
                goto out;
 
@@ -5578,10 +5727,15 @@ out:
  * Issue the over-the-wire RPC DESTROY_SESSION.
  * The caller must serialize access to this routine.
  */
-int nfs4_proc_destroy_session(struct nfs4_session *session)
+int nfs4_proc_destroy_session(struct nfs4_session *session,
+               struct rpc_cred *cred)
 {
+       struct rpc_message msg = {
+               .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
+               .rpc_argp = session,
+               .rpc_cred = cred,
+       };
        int status = 0;
-       struct rpc_message msg;
 
        dprintk("--> nfs4_proc_destroy_session\n");
 
@@ -5589,10 +5743,6 @@ int nfs4_proc_destroy_session(struct nfs4_session *session)
        if (session->clp->cl_cons_state != NFS_CS_READY)
                return status;
 
-       msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION];
-       msg.rpc_argp = session;
-       msg.rpc_resp = NULL;
-       msg.rpc_cred = NULL;
        status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
 
        if (status)
@@ -5604,53 +5754,79 @@ int nfs4_proc_destroy_session(struct nfs4_session *session)
        return status;
 }
 
+/*
+ * With sessions, the client is not marked ready until after a
+ * successful EXCHANGE_ID and CREATE_SESSION.
+ *
+ * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
+ * other versions of NFS can be tried.
+ */
+static int nfs41_check_session_ready(struct nfs_client *clp)
+{
+       int ret;
+       
+       if (clp->cl_cons_state == NFS_CS_SESSION_INITING) {
+               ret = nfs4_client_recover_expired_lease(clp);
+               if (ret)
+                       return ret;
+       }
+       if (clp->cl_cons_state < NFS_CS_READY)
+               return -EPROTONOSUPPORT;
+       smp_rmb();
+       return 0;
+}
+
 int nfs4_init_session(struct nfs_server *server)
 {
        struct nfs_client *clp = server->nfs_client;
        struct nfs4_session *session;
        unsigned int rsize, wsize;
-       int ret;
 
        if (!nfs4_has_session(clp))
                return 0;
 
        session = clp->cl_session;
-       if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
-               return 0;
+       spin_lock(&clp->cl_lock);
+       if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
 
-       rsize = server->rsize;
-       if (rsize == 0)
-               rsize = NFS_MAX_FILE_IO_SIZE;
-       wsize = server->wsize;
-       if (wsize == 0)
-               wsize = NFS_MAX_FILE_IO_SIZE;
+               rsize = server->rsize;
+               if (rsize == 0)
+                       rsize = NFS_MAX_FILE_IO_SIZE;
+               wsize = server->wsize;
+               if (wsize == 0)
+                       wsize = NFS_MAX_FILE_IO_SIZE;
 
-       session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
-       session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
+               session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
+               session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
+       }
+       spin_unlock(&clp->cl_lock);
 
-       ret = nfs4_recover_expired_lease(server);
-       if (!ret)
-               ret = nfs4_check_client_ready(clp);
-       return ret;
+       return nfs41_check_session_ready(clp);
 }
 
-int nfs4_init_ds_session(struct nfs_client *clp)
+int nfs4_init_ds_session(struct nfs_client *clp, unsigned long lease_time)
 {
        struct nfs4_session *session = clp->cl_session;
        int ret;
 
-       if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
-               return 0;
-
-       ret = nfs4_client_recover_expired_lease(clp);
-       if (!ret)
-               /* Test for the DS role */
-               if (!is_ds_client(clp))
-                       ret = -ENODEV;
-       if (!ret)
-               ret = nfs4_check_client_ready(clp);
-       return ret;
+       spin_lock(&clp->cl_lock);
+       if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
+               /*
+                * Do not set NFS_CS_CHECK_LEASE_TIME instead set the
+                * DS lease to be equal to the MDS lease.
+                */
+               clp->cl_lease_time = lease_time;
+               clp->cl_last_renewal = jiffies;
+       }
+       spin_unlock(&clp->cl_lock);
 
+       ret = nfs41_check_session_ready(clp);
+       if (ret)
+               return ret;
+       /* Test for the DS role */
+       if (!is_ds_client(clp))
+               return -ENODEV;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
 
@@ -6557,6 +6733,7 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
        .file_inode_ops = &nfs4_file_inode_operations,
        .file_ops       = &nfs4_file_operations,
        .getroot        = nfs4_proc_get_root,
+       .submount       = nfs4_submount,
        .getattr        = nfs4_proc_getattr,
        .setattr        = nfs4_proc_setattr,
        .lookup         = nfs4_proc_lookup,
@@ -6589,13 +6766,13 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
        .write_rpc_prepare = nfs4_proc_write_rpc_prepare,
        .write_done     = nfs4_write_done,
        .commit_setup   = nfs4_proc_commit_setup,
+       .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
        .commit_done    = nfs4_commit_done,
        .lock           = nfs4_proc_lock,
        .clear_acl_cache = nfs4_zap_acl_attr,
        .close_context  = nfs4_close_context,
        .open_context   = nfs4_atomic_open,
        .init_client    = nfs4_init_client,
-       .secinfo        = nfs4_proc_secinfo,
 };
 
 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
index dc484c0eae7f9706716e4a73cabe03857dd3ae15..6930bec91bca22a8f8f7cf0548dcfe9cc6964762 100644 (file)
@@ -49,7 +49,7 @@
 #include "nfs4_fs.h"
 #include "delegation.h"
 
-#define NFSDBG_FACILITY        NFSDBG_PROC
+#define NFSDBG_FACILITY                NFSDBG_STATE
 
 void
 nfs4_renew_state(struct work_struct *work)
index 7f0fcfc1fe9db51e9bc3748f511163dfed7cdce7..c679b9ecef634c80d4738e3cc2a9624f51c327c2 100644 (file)
@@ -57,6 +57,8 @@
 #include "internal.h"
 #include "pnfs.h"
 
+#define NFSDBG_FACILITY                NFSDBG_STATE
+
 #define OPENOWNER_POOL_SIZE    8
 
 const nfs4_stateid zero_stateid;
@@ -254,7 +256,7 @@ int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
                goto out;
        set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
 do_confirm:
-       status = nfs4_proc_create_session(clp);
+       status = nfs4_proc_create_session(clp, cred);
        if (status != 0)
                goto out;
        clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
@@ -1106,6 +1108,8 @@ void nfs4_schedule_lease_recovery(struct nfs_client *clp)
                return;
        if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
                set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
+       dprintk("%s: scheduling lease recovery for server %s\n", __func__,
+                       clp->cl_hostname);
        nfs4_schedule_state_manager(clp);
 }
 EXPORT_SYMBOL_GPL(nfs4_schedule_lease_recovery);
@@ -1122,6 +1126,8 @@ static void nfs40_handle_cb_pathdown(struct nfs_client *clp)
 {
        set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
        nfs_expire_all_delegations(clp);
+       dprintk("%s: handling CB_PATHDOWN recovery for server %s\n", __func__,
+                       clp->cl_hostname);
 }
 
 void nfs4_schedule_path_down_recovery(struct nfs_client *clp)
@@ -1158,6 +1164,8 @@ void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4
        struct nfs_client *clp = server->nfs_client;
 
        nfs4_state_mark_reclaim_nograce(clp, state);
+       dprintk("%s: scheduling stateid recovery for server %s\n", __func__,
+                       clp->cl_hostname);
        nfs4_schedule_state_manager(clp);
 }
 EXPORT_SYMBOL_GPL(nfs4_schedule_stateid_recovery);
@@ -1491,19 +1499,25 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
                case -NFS4ERR_BADSLOT:
                case -NFS4ERR_BAD_HIGH_SLOT:
                case -NFS4ERR_DEADSESSION:
-               case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
                case -NFS4ERR_SEQ_FALSE_RETRY:
                case -NFS4ERR_SEQ_MISORDERED:
                        set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
                        /* Zero session reset errors */
                        break;
+               case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+                       set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
+                       break;
                case -EKEYEXPIRED:
                        /* Nothing we can do */
                        nfs4_warn_keyexpired(clp->cl_hostname);
                        break;
                default:
+                       dprintk("%s: failed to handle error %d for server %s\n",
+                                       __func__, error, clp->cl_hostname);
                        return error;
        }
+       dprintk("%s: handled error %d for server %s\n", __func__, error,
+                       clp->cl_hostname);
        return 0;
 }
 
@@ -1572,34 +1586,82 @@ out:
        return nfs4_recovery_handle_error(clp, status);
 }
 
+/* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors
+ * on EXCHANGE_ID for v4.1
+ */
+static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
+{
+       switch (status) {
+       case -NFS4ERR_SEQ_MISORDERED:
+               if (test_and_set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state))
+                       return -ESERVERFAULT;
+               /* Lease confirmation error: retry after purging the lease */
+               ssleep(1);
+       case -NFS4ERR_CLID_INUSE:
+       case -NFS4ERR_STALE_CLIENTID:
+               clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
+               break;
+       case -EACCES:
+               if (clp->cl_machine_cred == NULL)
+                       return -EACCES;
+               /* Handle case where the user hasn't set up machine creds */
+               nfs4_clear_machine_cred(clp);
+       case -NFS4ERR_DELAY:
+       case -ETIMEDOUT:
+       case -EAGAIN:
+               ssleep(1);
+               break;
+
+       case -NFS4ERR_MINOR_VERS_MISMATCH:
+               if (clp->cl_cons_state == NFS_CS_SESSION_INITING)
+                       nfs_mark_client_ready(clp, -EPROTONOSUPPORT);
+               dprintk("%s: exit with error %d for server %s\n",
+                               __func__, -EPROTONOSUPPORT, clp->cl_hostname);
+               return -EPROTONOSUPPORT;
+       case -EKEYEXPIRED:
+               nfs4_warn_keyexpired(clp->cl_hostname);
+       case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
+                                * in nfs4_exchange_id */
+       default:
+               dprintk("%s: exit with error %d for server %s\n", __func__,
+                               status, clp->cl_hostname);
+               return status;
+       }
+       set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+       dprintk("%s: handled error %d for server %s\n", __func__, status,
+                       clp->cl_hostname);
+       return 0;
+}
+
 static int nfs4_reclaim_lease(struct nfs_client *clp)
 {
        struct rpc_cred *cred;
        const struct nfs4_state_recovery_ops *ops =
                clp->cl_mvops->reboot_recovery_ops;
-       int status = -ENOENT;
+       int status;
 
        cred = ops->get_clid_cred(clp);
-       if (cred != NULL) {
-               status = ops->establish_clid(clp, cred);
-               put_rpccred(cred);
-               /* Handle case where the user hasn't set up machine creds */
-               if (status == -EACCES && cred == clp->cl_machine_cred) {
-                       nfs4_clear_machine_cred(clp);
-                       status = -EAGAIN;
-               }
-               if (status == -NFS4ERR_MINOR_VERS_MISMATCH)
-                       status = -EPROTONOSUPPORT;
-       }
-       return status;
+       if (cred == NULL)
+               return -ENOENT;
+       status = ops->establish_clid(clp, cred);
+       put_rpccred(cred);
+       if (status != 0)
+               return nfs4_handle_reclaim_lease_error(clp, status);
+       return 0;
 }
 
 #ifdef CONFIG_NFS_V4_1
-void nfs4_schedule_session_recovery(struct nfs4_session *session)
+void nfs4_schedule_session_recovery(struct nfs4_session *session, int err)
 {
        struct nfs_client *clp = session->clp;
 
-       set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
+       switch (err) {
+       default:
+               set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
+               break;
+       case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+               set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
+       }
        nfs4_schedule_lease_recovery(clp);
 }
 EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery);
@@ -1607,14 +1669,19 @@ EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery);
 void nfs41_handle_recall_slot(struct nfs_client *clp)
 {
        set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
+       dprintk("%s: scheduling slot recall for server %s\n", __func__,
+                       clp->cl_hostname);
        nfs4_schedule_state_manager(clp);
 }
 
 static void nfs4_reset_all_state(struct nfs_client *clp)
 {
        if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
-               clp->cl_boot_time = CURRENT_TIME;
+               set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
+               clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
                nfs4_state_start_reclaim_nograce(clp);
+               dprintk("%s: scheduling reset of all state for server %s!\n",
+                               __func__, clp->cl_hostname);
                nfs4_schedule_state_manager(clp);
        }
 }
@@ -1623,33 +1690,50 @@ static void nfs41_handle_server_reboot(struct nfs_client *clp)
 {
        if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
                nfs4_state_start_reclaim_reboot(clp);
+               dprintk("%s: server %s rebooted!\n", __func__,
+                               clp->cl_hostname);
                nfs4_schedule_state_manager(clp);
        }
 }
 
 static void nfs41_handle_state_revoked(struct nfs_client *clp)
 {
-       /* Temporary */
        nfs4_reset_all_state(clp);
+       dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
 }
 
 static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp)
 {
        /* This will need to handle layouts too */
        nfs_expire_all_delegations(clp);
+       dprintk("%s: Recallable state revoked on server %s!\n", __func__,
+                       clp->cl_hostname);
 }
 
-static void nfs41_handle_cb_path_down(struct nfs_client *clp)
+static void nfs41_handle_backchannel_fault(struct nfs_client *clp)
 {
        nfs_expire_all_delegations(clp);
        if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0)
                nfs4_schedule_state_manager(clp);
+       dprintk("%s: server %s declared a backchannel fault\n", __func__,
+                       clp->cl_hostname);
+}
+
+static void nfs41_handle_cb_path_down(struct nfs_client *clp)
+{
+       if (test_and_set_bit(NFS4CLNT_BIND_CONN_TO_SESSION,
+               &clp->cl_state) == 0)
+               nfs4_schedule_state_manager(clp);
 }
 
 void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
 {
        if (!flags)
                return;
+
+       dprintk("%s: \"%s\" (client ID %llx) flags=0x%08x\n",
+               __func__, clp->cl_hostname, clp->cl_clientid, flags);
+
        if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
                nfs41_handle_server_reboot(clp);
        if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
@@ -1659,18 +1743,21 @@ void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
                nfs41_handle_state_revoked(clp);
        if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
                nfs41_handle_recallable_state_revoked(clp);
-       if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
-                           SEQ4_STATUS_BACKCHANNEL_FAULT |
-                           SEQ4_STATUS_CB_PATH_DOWN_SESSION))
+       if (flags & SEQ4_STATUS_BACKCHANNEL_FAULT)
+               nfs41_handle_backchannel_fault(clp);
+       else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
+                               SEQ4_STATUS_CB_PATH_DOWN_SESSION))
                nfs41_handle_cb_path_down(clp);
 }
 
 static int nfs4_reset_session(struct nfs_client *clp)
 {
+       struct rpc_cred *cred;
        int status;
 
        nfs4_begin_drain_session(clp);
-       status = nfs4_proc_destroy_session(clp->cl_session);
+       cred = nfs4_get_exchange_id_cred(clp);
+       status = nfs4_proc_destroy_session(clp->cl_session, cred);
        if (status && status != -NFS4ERR_BADSESSION &&
            status != -NFS4ERR_DEADSESSION) {
                status = nfs4_recovery_handle_error(clp, status);
@@ -1678,19 +1765,26 @@ static int nfs4_reset_session(struct nfs_client *clp)
        }
 
        memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN);
-       status = nfs4_proc_create_session(clp);
+       status = nfs4_proc_create_session(clp, cred);
        if (status) {
-               status = nfs4_recovery_handle_error(clp, status);
+               dprintk("%s: session reset failed with status %d for server %s!\n",
+                       __func__, status, clp->cl_hostname);
+               status = nfs4_handle_reclaim_lease_error(clp, status);
                goto out;
        }
        clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
        /* create_session negotiated new slot table */
        clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
+       clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
+       dprintk("%s: session reset was successful for server %s!\n",
+                       __func__, clp->cl_hostname);
 
         /* Let the state manager reestablish state */
        if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
                nfs41_setup_state_renewal(clp);
 out:
+       if (cred)
+               put_rpccred(cred);
        return status;
 }
 
@@ -1722,37 +1816,41 @@ static int nfs4_recall_slot(struct nfs_client *clp)
        return 0;
 }
 
-#else /* CONFIG_NFS_V4_1 */
-static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
-static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; }
-static int nfs4_recall_slot(struct nfs_client *clp) { return 0; }
-#endif /* CONFIG_NFS_V4_1 */
-
-/* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors
- * on EXCHANGE_ID for v4.1
- */
-static void nfs4_set_lease_expired(struct nfs_client *clp, int status)
+static int nfs4_bind_conn_to_session(struct nfs_client *clp)
 {
-       switch (status) {
-       case -NFS4ERR_CLID_INUSE:
-       case -NFS4ERR_STALE_CLIENTID:
-               clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
+       struct rpc_cred *cred;
+       int ret;
+
+       nfs4_begin_drain_session(clp);
+       cred = nfs4_get_exchange_id_cred(clp);
+       ret = nfs4_proc_bind_conn_to_session(clp, cred);
+       if (cred)
+               put_rpccred(cred);
+       clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
+       switch (ret) {
+       case 0:
+               dprintk("%s: bind_conn_to_session was successful for server %s!\n",
+                       __func__, clp->cl_hostname);
                break;
        case -NFS4ERR_DELAY:
-       case -ETIMEDOUT:
-       case -EAGAIN:
                ssleep(1);
+               set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
                break;
-
-       case -EKEYEXPIRED:
-               nfs4_warn_keyexpired(clp->cl_hostname);
-       case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
-                                * in nfs4_exchange_id */
        default:
-               return;
+               return nfs4_recovery_handle_error(clp, ret);
        }
-       set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+       return 0;
 }
+#else /* CONFIG_NFS_V4_1 */
+static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
+static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; }
+static int nfs4_recall_slot(struct nfs_client *clp) { return 0; }
+
+static int nfs4_bind_conn_to_session(struct nfs_client *clp)
+{
+       return 0;
+}
+#endif /* CONFIG_NFS_V4_1 */
 
 static void nfs4_state_manager(struct nfs_client *clp)
 {
@@ -1760,19 +1858,21 @@ static void nfs4_state_manager(struct nfs_client *clp)
 
        /* Ensure exclusive access to NFSv4 state */
        do {
+               if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
+                       status = nfs4_reclaim_lease(clp);
+                       if (status < 0)
+                               goto out_error;
+                       clear_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
+                       set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+               }
+
                if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
                        /* We're going to have to re-establish a clientid */
                        status = nfs4_reclaim_lease(clp);
-                       if (status) {
-                               nfs4_set_lease_expired(clp, status);
-                               if (test_bit(NFS4CLNT_LEASE_EXPIRED,
-                                                       &clp->cl_state))
-                                       continue;
-                               if (clp->cl_cons_state ==
-                                                       NFS_CS_SESSION_INITING)
-                                       nfs_mark_client_ready(clp, status);
+                       if (status < 0)
                                goto out_error;
-                       }
+                       if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
+                               continue;
                        clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
 
                        if (test_and_clear_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH,
@@ -1803,6 +1903,15 @@ static void nfs4_state_manager(struct nfs_client *clp)
                                goto out_error;
                }
 
+               /* Send BIND_CONN_TO_SESSION */
+               if (test_and_clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION,
+                               &clp->cl_state) && nfs4_has_session(clp)) {
+                       status = nfs4_bind_conn_to_session(clp);
+                       if (status < 0)
+                               goto out_error;
+                       continue;
+               }
+
                /* First recover reboot state... */
                if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
                        status = nfs4_do_reclaim(clp,
index c54aae364beebd38833151f97328c3edfe7c2337..ee4a74db95d0b1b7ea49e8dd1263f0504f2fffa8 100644 (file)
 #include <linux/nfs4.h>
 #include <linux/nfs_fs.h>
 #include <linux/nfs_idmap.h>
+
 #include "nfs4_fs.h"
 #include "internal.h"
 #include "pnfs.h"
+#include "netns.h"
 
 #define NFSDBG_FACILITY                NFSDBG_XDR
 
@@ -99,9 +101,12 @@ static int nfs4_stat_to_errno(int);
 #define nfs4_path_maxsz                (1 + ((3 + NFS4_MAXPATHLEN) >> 2))
 #define nfs4_owner_maxsz       (1 + XDR_QUADLEN(IDMAP_NAMESZ))
 #define nfs4_group_maxsz       (1 + XDR_QUADLEN(IDMAP_NAMESZ))
+/* We support only one layout type per file system */
+#define decode_mdsthreshold_maxsz (1 + 1 + nfs4_fattr_bitmap_maxsz + 1 + 8)
 /* This is based on getfattr, which uses the most attributes: */
 #define nfs4_fattr_value_maxsz (1 + (1 + 2 + 2 + 4 + 2 + 1 + 1 + 2 + 2 + \
-                               3 + 3 + 3 + nfs4_owner_maxsz + nfs4_group_maxsz))
+                               3 + 3 + 3 + nfs4_owner_maxsz + \
+                               nfs4_group_maxsz + decode_mdsthreshold_maxsz))
 #define nfs4_fattr_maxsz       (nfs4_fattr_bitmap_maxsz + \
                                nfs4_fattr_value_maxsz)
 #define decode_getattr_maxsz    (op_decode_hdr_maxsz + nfs4_fattr_maxsz)
@@ -321,8 +326,20 @@ static int nfs4_stat_to_errno(int);
                                     1 /* csr_flags */ + \
                                     decode_channel_attrs_maxsz + \
                                     decode_channel_attrs_maxsz)
+#define encode_bind_conn_to_session_maxsz  (op_encode_hdr_maxsz + \
+                                    /* bctsa_sessid */ \
+                                    XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
+                                    1 /* bctsa_dir */ + \
+                                    1 /* bctsa_use_conn_in_rdma_mode */)
+#define decode_bind_conn_to_session_maxsz  (op_decode_hdr_maxsz +      \
+                                    /* bctsr_sessid */ \
+                                    XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
+                                    1 /* bctsr_dir */ + \
+                                    1 /* bctsr_use_conn_in_rdma_mode */)
 #define encode_destroy_session_maxsz    (op_encode_hdr_maxsz + 4)
 #define decode_destroy_session_maxsz    (op_decode_hdr_maxsz)
+#define encode_destroy_clientid_maxsz   (op_encode_hdr_maxsz + 2)
+#define decode_destroy_clientid_maxsz   (op_decode_hdr_maxsz)
 #define encode_sequence_maxsz  (op_encode_hdr_maxsz + \
                                XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 4)
 #define decode_sequence_maxsz  (op_decode_hdr_maxsz + \
@@ -421,30 +438,22 @@ static int nfs4_stat_to_errno(int);
 #define NFS4_enc_commit_sz     (compound_encode_hdr_maxsz + \
                                encode_sequence_maxsz + \
                                encode_putfh_maxsz + \
-                               encode_commit_maxsz + \
-                               encode_getattr_maxsz)
+                               encode_commit_maxsz)
 #define NFS4_dec_commit_sz     (compound_decode_hdr_maxsz + \
                                decode_sequence_maxsz + \
                                decode_putfh_maxsz + \
-                               decode_commit_maxsz + \
-                               decode_getattr_maxsz)
+                               decode_commit_maxsz)
 #define NFS4_enc_open_sz        (compound_encode_hdr_maxsz + \
                                encode_sequence_maxsz + \
                                encode_putfh_maxsz + \
-                               encode_savefh_maxsz + \
                                encode_open_maxsz + \
                                encode_getfh_maxsz + \
-                               encode_getattr_maxsz + \
-                               encode_restorefh_maxsz + \
                                encode_getattr_maxsz)
 #define NFS4_dec_open_sz        (compound_decode_hdr_maxsz + \
                                decode_sequence_maxsz + \
                                decode_putfh_maxsz + \
-                               decode_savefh_maxsz + \
                                decode_open_maxsz + \
                                decode_getfh_maxsz + \
-                               decode_getattr_maxsz + \
-                               decode_restorefh_maxsz + \
                                decode_getattr_maxsz)
 #define NFS4_enc_open_confirm_sz \
                                (compound_encode_hdr_maxsz + \
@@ -595,47 +604,37 @@ static int nfs4_stat_to_errno(int);
 #define NFS4_enc_remove_sz     (compound_encode_hdr_maxsz + \
                                encode_sequence_maxsz + \
                                encode_putfh_maxsz + \
-                               encode_remove_maxsz + \
-                               encode_getattr_maxsz)
+                               encode_remove_maxsz)
 #define NFS4_dec_remove_sz     (compound_decode_hdr_maxsz + \
                                decode_sequence_maxsz + \
                                decode_putfh_maxsz + \
-                               decode_remove_maxsz + \
-                               decode_getattr_maxsz)
+                               decode_remove_maxsz)
 #define NFS4_enc_rename_sz     (compound_encode_hdr_maxsz + \
                                encode_sequence_maxsz + \
                                encode_putfh_maxsz + \
                                encode_savefh_maxsz + \
                                encode_putfh_maxsz + \
-                               encode_rename_maxsz + \
-                               encode_getattr_maxsz + \
-                               encode_restorefh_maxsz + \
-                               encode_getattr_maxsz)
+                               encode_rename_maxsz)
 #define NFS4_dec_rename_sz     (compound_decode_hdr_maxsz + \
                                decode_sequence_maxsz + \
                                decode_putfh_maxsz + \
                                decode_savefh_maxsz + \
                                decode_putfh_maxsz + \
-                               decode_rename_maxsz + \
-                               decode_getattr_maxsz + \
-                               decode_restorefh_maxsz + \
-                               decode_getattr_maxsz)
+                               decode_rename_maxsz)
 #define NFS4_enc_link_sz       (compound_encode_hdr_maxsz + \
                                encode_sequence_maxsz + \
                                encode_putfh_maxsz + \
                                encode_savefh_maxsz + \
                                encode_putfh_maxsz + \
                                encode_link_maxsz + \
-                               decode_getattr_maxsz + \
                                encode_restorefh_maxsz + \
-                               decode_getattr_maxsz)
+                               encode_getattr_maxsz)
 #define NFS4_dec_link_sz       (compound_decode_hdr_maxsz + \
                                decode_sequence_maxsz + \
                                decode_putfh_maxsz + \
                                decode_savefh_maxsz + \
                                decode_putfh_maxsz + \
                                decode_link_maxsz + \
-                               decode_getattr_maxsz + \
                                decode_restorefh_maxsz + \
                                decode_getattr_maxsz)
 #define NFS4_enc_symlink_sz    (compound_encode_hdr_maxsz + \
@@ -653,20 +652,14 @@ static int nfs4_stat_to_errno(int);
 #define NFS4_enc_create_sz     (compound_encode_hdr_maxsz + \
                                encode_sequence_maxsz + \
                                encode_putfh_maxsz + \
-                               encode_savefh_maxsz + \
                                encode_create_maxsz + \
                                encode_getfh_maxsz + \
-                               encode_getattr_maxsz + \
-                               encode_restorefh_maxsz + \
                                encode_getattr_maxsz)
 #define NFS4_dec_create_sz     (compound_decode_hdr_maxsz + \
                                decode_sequence_maxsz + \
                                decode_putfh_maxsz + \
-                               decode_savefh_maxsz + \
                                decode_create_maxsz + \
                                decode_getfh_maxsz + \
-                               decode_getattr_maxsz + \
-                               decode_restorefh_maxsz + \
                                decode_getattr_maxsz)
 #define NFS4_enc_pathconf_sz   (compound_encode_hdr_maxsz + \
                                encode_sequence_maxsz + \
@@ -738,6 +731,12 @@ static int nfs4_stat_to_errno(int);
                                decode_putfh_maxsz + \
                                decode_secinfo_maxsz)
 #if defined(CONFIG_NFS_V4_1)
+#define NFS4_enc_bind_conn_to_session_sz \
+                               (compound_encode_hdr_maxsz + \
+                                encode_bind_conn_to_session_maxsz)
+#define NFS4_dec_bind_conn_to_session_sz \
+                               (compound_decode_hdr_maxsz + \
+                                decode_bind_conn_to_session_maxsz)
 #define NFS4_enc_exchange_id_sz \
                                (compound_encode_hdr_maxsz + \
                                 encode_exchange_id_maxsz)
@@ -754,6 +753,10 @@ static int nfs4_stat_to_errno(int);
                                         encode_destroy_session_maxsz)
 #define NFS4_dec_destroy_session_sz    (compound_decode_hdr_maxsz + \
                                         decode_destroy_session_maxsz)
+#define NFS4_enc_destroy_clientid_sz   (compound_encode_hdr_maxsz + \
+                                        encode_destroy_clientid_maxsz)
+#define NFS4_dec_destroy_clientid_sz   (compound_decode_hdr_maxsz + \
+                                        decode_destroy_clientid_maxsz)
 #define NFS4_enc_sequence_sz \
                                (compound_decode_hdr_maxsz + \
                                 encode_sequence_maxsz)
@@ -1103,7 +1106,7 @@ static void encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg
        encode_nfs4_stateid(xdr, arg->stateid);
 }
 
-static void encode_commit(struct xdr_stream *xdr, const struct nfs_writeargs *args, struct compound_hdr *hdr)
+static void encode_commit(struct xdr_stream *xdr, const struct nfs_commitargs *args, struct compound_hdr *hdr)
 {
        __be32 *p;
 
@@ -1194,6 +1197,16 @@ static void encode_getfattr(struct xdr_stream *xdr, const u32* bitmask, struct c
                           bitmask[1] & nfs4_fattr_bitmap[1], hdr);
 }
 
+static void encode_getfattr_open(struct xdr_stream *xdr, const u32 *bitmask,
+                                struct compound_hdr *hdr)
+{
+       encode_getattr_three(xdr,
+                            bitmask[0] & nfs4_fattr_bitmap[0],
+                            bitmask[1] & nfs4_fattr_bitmap[1],
+                            bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD,
+                            hdr);
+}
+
 static void encode_fsinfo(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr)
 {
        encode_getattr_three(xdr,
@@ -1678,6 +1691,20 @@ static void encode_secinfo(struct xdr_stream *xdr, const struct qstr *name, stru
 
 #if defined(CONFIG_NFS_V4_1)
 /* NFSv4.1 operations */
+static void encode_bind_conn_to_session(struct xdr_stream *xdr,
+                                  struct nfs4_session *session,
+                                  struct compound_hdr *hdr)
+{
+       __be32 *p;
+
+       encode_op_hdr(xdr, OP_BIND_CONN_TO_SESSION,
+               decode_bind_conn_to_session_maxsz, hdr);
+       encode_opaque_fixed(xdr, session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
+       p = xdr_reserve_space(xdr, 8);
+       *p++ = cpu_to_be32(NFS4_CDFC4_BACK_OR_BOTH);
+       *p = 0; /* use_conn_in_rdma_mode = False */
+}
+
 static void encode_exchange_id(struct xdr_stream *xdr,
                               struct nfs41_exchange_id_args *args,
                               struct compound_hdr *hdr)
@@ -1726,6 +1753,7 @@ static void encode_create_session(struct xdr_stream *xdr,
        char machine_name[NFS4_MAX_MACHINE_NAME_LEN];
        uint32_t len;
        struct nfs_client *clp = args->client;
+       struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
        u32 max_resp_sz_cached;
 
        /*
@@ -1767,7 +1795,7 @@ static void encode_create_session(struct xdr_stream *xdr,
        *p++ = cpu_to_be32(RPC_AUTH_UNIX);                      /* auth_sys */
 
        /* authsys_parms rfc1831 */
-       *p++ = cpu_to_be32((u32)clp->cl_boot_time.tv_nsec);     /* stamp */
+       *p++ = (__be32)nn->boot_time.tv_nsec;           /* stamp */
        p = xdr_encode_opaque(p, machine_name, len);
        *p++ = cpu_to_be32(0);                          /* UID */
        *p++ = cpu_to_be32(0);                          /* GID */
@@ -1782,6 +1810,14 @@ static void encode_destroy_session(struct xdr_stream *xdr,
        encode_opaque_fixed(xdr, session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
 }
 
+static void encode_destroy_clientid(struct xdr_stream *xdr,
+                                  uint64_t clientid,
+                                  struct compound_hdr *hdr)
+{
+       encode_op_hdr(xdr, OP_DESTROY_CLIENTID, decode_destroy_clientid_maxsz, hdr);
+       encode_uint64(xdr, clientid);
+}
+
 static void encode_reclaim_complete(struct xdr_stream *xdr,
                                    struct nfs41_reclaim_complete_args *args,
                                    struct compound_hdr *hdr)
@@ -2064,7 +2100,6 @@ static void nfs4_xdr_enc_remove(struct rpc_rqst *req, struct xdr_stream *xdr,
        encode_sequence(xdr, &args->seq_args, &hdr);
        encode_putfh(xdr, args->fh, &hdr);
        encode_remove(xdr, &args->name, &hdr);
-       encode_getfattr(xdr, args->bitmask, &hdr);
        encode_nops(&hdr);
 }
 
@@ -2084,9 +2119,6 @@ static void nfs4_xdr_enc_rename(struct rpc_rqst *req, struct xdr_stream *xdr,
        encode_savefh(xdr, &hdr);
        encode_putfh(xdr, args->new_dir, &hdr);
        encode_rename(xdr, args->old_name, args->new_name, &hdr);
-       encode_getfattr(xdr, args->bitmask, &hdr);
-       encode_restorefh(xdr, &hdr);
-       encode_getfattr(xdr, args->bitmask, &hdr);
        encode_nops(&hdr);
 }
 
@@ -2106,7 +2138,6 @@ static void nfs4_xdr_enc_link(struct rpc_rqst *req, struct xdr_stream *xdr,
        encode_savefh(xdr, &hdr);
        encode_putfh(xdr, args->dir_fh, &hdr);
        encode_link(xdr, args->name, &hdr);
-       encode_getfattr(xdr, args->bitmask, &hdr);
        encode_restorefh(xdr, &hdr);
        encode_getfattr(xdr, args->bitmask, &hdr);
        encode_nops(&hdr);
@@ -2125,12 +2156,9 @@ static void nfs4_xdr_enc_create(struct rpc_rqst *req, struct xdr_stream *xdr,
        encode_compound_hdr(xdr, req, &hdr);
        encode_sequence(xdr, &args->seq_args, &hdr);
        encode_putfh(xdr, args->dir_fh, &hdr);
-       encode_savefh(xdr, &hdr);
        encode_create(xdr, args, &hdr);
        encode_getfh(xdr, &hdr);
        encode_getfattr(xdr, args->bitmask, &hdr);
-       encode_restorefh(xdr, &hdr);
-       encode_getfattr(xdr, args->bitmask, &hdr);
        encode_nops(&hdr);
 }
 
@@ -2191,12 +2219,9 @@ static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr,
        encode_compound_hdr(xdr, req, &hdr);
        encode_sequence(xdr, &args->seq_args, &hdr);
        encode_putfh(xdr, args->fh, &hdr);
-       encode_savefh(xdr, &hdr);
        encode_open(xdr, args, &hdr);
        encode_getfh(xdr, &hdr);
-       encode_getfattr(xdr, args->bitmask, &hdr);
-       encode_restorefh(xdr, &hdr);
-       encode_getfattr(xdr, args->dir_bitmask, &hdr);
+       encode_getfattr_open(xdr, args->bitmask, &hdr);
        encode_nops(&hdr);
 }
 
@@ -2448,7 +2473,7 @@ static void nfs4_xdr_enc_write(struct rpc_rqst *req, struct xdr_stream *xdr,
  *  a COMMIT request
  */
 static void nfs4_xdr_enc_commit(struct rpc_rqst *req, struct xdr_stream *xdr,
-                               struct nfs_writeargs *args)
+                               struct nfs_commitargs *args)
 {
        struct compound_hdr hdr = {
                .minorversion = nfs4_xdr_minorversion(&args->seq_args),
@@ -2458,8 +2483,6 @@ static void nfs4_xdr_enc_commit(struct rpc_rqst *req, struct xdr_stream *xdr,
        encode_sequence(xdr, &args->seq_args, &hdr);
        encode_putfh(xdr, args->fh, &hdr);
        encode_commit(xdr, args, &hdr);
-       if (args->bitmask)
-               encode_getfattr(xdr, args->bitmask, &hdr);
        encode_nops(&hdr);
 }
 
@@ -2602,8 +2625,8 @@ static void nfs4_xdr_enc_delegreturn(struct rpc_rqst *req,
        encode_compound_hdr(xdr, req, &hdr);
        encode_sequence(xdr, &args->seq_args, &hdr);
        encode_putfh(xdr, args->fhandle, &hdr);
-       encode_delegreturn(xdr, args->stateid, &hdr);
        encode_getfattr(xdr, args->bitmask, &hdr);
+       encode_delegreturn(xdr, args->stateid, &hdr);
        encode_nops(&hdr);
 }
 
@@ -2650,6 +2673,22 @@ static void nfs4_xdr_enc_secinfo(struct rpc_rqst *req,
 }
 
 #if defined(CONFIG_NFS_V4_1)
+/*
+ * BIND_CONN_TO_SESSION request
+ */
+static void nfs4_xdr_enc_bind_conn_to_session(struct rpc_rqst *req,
+                               struct xdr_stream *xdr,
+                               struct nfs_client *clp)
+{
+       struct compound_hdr hdr = {
+               .minorversion = clp->cl_mvops->minor_version,
+       };
+
+       encode_compound_hdr(xdr, req, &hdr);
+       encode_bind_conn_to_session(xdr, clp->cl_session, &hdr);
+       encode_nops(&hdr);
+}
+
 /*
  * EXCHANGE_ID request
  */
@@ -2698,6 +2737,22 @@ static void nfs4_xdr_enc_destroy_session(struct rpc_rqst *req,
        encode_nops(&hdr);
 }
 
+/*
+ * a DESTROY_CLIENTID request
+ */
+static void nfs4_xdr_enc_destroy_clientid(struct rpc_rqst *req,
+                                        struct xdr_stream *xdr,
+                                        struct nfs_client *clp)
+{
+       struct compound_hdr hdr = {
+               .minorversion = clp->cl_mvops->minor_version,
+       };
+
+       encode_compound_hdr(xdr, req, &hdr);
+       encode_destroy_clientid(xdr, clp->cl_clientid, &hdr);
+       encode_nops(&hdr);
+}
+
 /*
  * a SEQUENCE request
  */
@@ -4102,7 +4157,7 @@ static int decode_verifier(struct xdr_stream *xdr, void *verifier)
        return decode_opaque_fixed(xdr, verifier, NFS4_VERIFIER_SIZE);
 }
 
-static int decode_commit(struct xdr_stream *xdr, struct nfs_writeres *res)
+static int decode_commit(struct xdr_stream *xdr, struct nfs_commitres *res)
 {
        int status;
 
@@ -4220,6 +4275,110 @@ xdr_error:
        return status;
 }
 
+static int decode_threshold_hint(struct xdr_stream *xdr,
+                                 uint32_t *bitmap,
+                                 uint64_t *res,
+                                 uint32_t hint_bit)
+{
+       __be32 *p;
+
+       *res = 0;
+       if (likely(bitmap[0] & hint_bit)) {
+               p = xdr_inline_decode(xdr, 8);
+               if (unlikely(!p))
+                       goto out_overflow;
+               xdr_decode_hyper(p, res);
+       }
+       return 0;
+out_overflow:
+       print_overflow_msg(__func__, xdr);
+       return -EIO;
+}
+
+static int decode_first_threshold_item4(struct xdr_stream *xdr,
+                                       struct nfs4_threshold *res)
+{
+       __be32 *p, *savep;
+       uint32_t bitmap[3] = {0,}, attrlen;
+       int status;
+
+       /* layout type */
+       p = xdr_inline_decode(xdr, 4);
+       if (unlikely(!p)) {
+               print_overflow_msg(__func__, xdr);
+               return -EIO;
+       }
+       res->l_type = be32_to_cpup(p);
+
+       /* thi_hintset bitmap */
+       status = decode_attr_bitmap(xdr, bitmap);
+       if (status < 0)
+               goto xdr_error;
+
+       /* thi_hintlist length */
+       status = decode_attr_length(xdr, &attrlen, &savep);
+       if (status < 0)
+               goto xdr_error;
+       /* thi_hintlist */
+       status = decode_threshold_hint(xdr, bitmap, &res->rd_sz, THRESHOLD_RD);
+       if (status < 0)
+               goto xdr_error;
+       status = decode_threshold_hint(xdr, bitmap, &res->wr_sz, THRESHOLD_WR);
+       if (status < 0)
+               goto xdr_error;
+       status = decode_threshold_hint(xdr, bitmap, &res->rd_io_sz,
+                                      THRESHOLD_RD_IO);
+       if (status < 0)
+               goto xdr_error;
+       status = decode_threshold_hint(xdr, bitmap, &res->wr_io_sz,
+                                      THRESHOLD_WR_IO);
+       if (status < 0)
+               goto xdr_error;
+
+       status = verify_attr_len(xdr, savep, attrlen);
+       res->bm = bitmap[0];
+
+       dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
+                __func__, res->bm, res->rd_sz, res->wr_sz, res->rd_io_sz,
+               res->wr_io_sz);
+xdr_error:
+       dprintk("%s ret=%d!\n", __func__, status);
+       return status;
+}
+
+/*
+ * Thresholds on pNFS direct I/O vrs MDS I/O
+ */
+static int decode_attr_mdsthreshold(struct xdr_stream *xdr,
+                                   uint32_t *bitmap,
+                                   struct nfs4_threshold *res)
+{
+       __be32 *p;
+       int status = 0;
+       uint32_t num;
+
+       if (unlikely(bitmap[2] & (FATTR4_WORD2_MDSTHRESHOLD - 1U)))
+               return -EIO;
+       if (likely(bitmap[2] & FATTR4_WORD2_MDSTHRESHOLD)) {
+               p = xdr_inline_decode(xdr, 4);
+               if (unlikely(!p))
+                       goto out_overflow;
+               num = be32_to_cpup(p);
+               if (num == 0)
+                       return 0;
+               if (num > 1)
+                       printk(KERN_INFO "%s: Warning: Multiple pNFS layout "
+                               "drivers per filesystem not supported\n",
+                               __func__);
+
+               status = decode_first_threshold_item4(xdr, res);
+       }
+       return status;
+out_overflow:
+       print_overflow_msg(__func__, xdr);
+       return -EIO;
+}
+
 static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
                struct nfs_fattr *fattr, struct nfs_fh *fh,
                struct nfs4_fs_locations *fs_loc,
@@ -4326,6 +4485,10 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
                goto xdr_error;
        fattr->valid |= status;
 
+       status = decode_attr_mdsthreshold(xdr, bitmap, fattr->mdsthreshold);
+       if (status < 0)
+               goto xdr_error;
+
 xdr_error:
        dprintk("%s: xdr returned %d\n", __func__, -status);
        return status;
@@ -5156,7 +5319,6 @@ static int decode_exchange_id(struct xdr_stream *xdr,
        uint32_t dummy;
        char *dummy_str;
        int status;
-       struct nfs_client *clp = res->client;
        uint32_t impl_id_count;
 
        status = decode_op_hdr(xdr, OP_EXCHANGE_ID);
@@ -5166,36 +5328,39 @@ static int decode_exchange_id(struct xdr_stream *xdr,
        p = xdr_inline_decode(xdr, 8);
        if (unlikely(!p))
                goto out_overflow;
-       xdr_decode_hyper(p, &clp->cl_clientid);
+       xdr_decode_hyper(p, &res->clientid);
        p = xdr_inline_decode(xdr, 12);
        if (unlikely(!p))
                goto out_overflow;
-       clp->cl_seqid = be32_to_cpup(p++);
-       clp->cl_exchange_flags = be32_to_cpup(p++);
+       res->seqid = be32_to_cpup(p++);
+       res->flags = be32_to_cpup(p++);
 
        /* We ask for SP4_NONE */
        dummy = be32_to_cpup(p);
        if (dummy != SP4_NONE)
                return -EIO;
 
-       /* Throw away minor_id */
+       /* server_owner4.so_minor_id */
        p = xdr_inline_decode(xdr, 8);
        if (unlikely(!p))
                goto out_overflow;
+       p = xdr_decode_hyper(p, &res->server_owner->minor_id);
 
-       /* Throw away Major id */
+       /* server_owner4.so_major_id */
        status = decode_opaque_inline(xdr, &dummy, &dummy_str);
        if (unlikely(status))
                return status;
+       if (unlikely(dummy > NFS4_OPAQUE_LIMIT))
+               return -EIO;
+       memcpy(res->server_owner->major_id, dummy_str, dummy);
+       res->server_owner->major_id_sz = dummy;
 
-       /* Save server_scope */
+       /* server_scope4 */
        status = decode_opaque_inline(xdr, &dummy, &dummy_str);
        if (unlikely(status))
                return status;
-
        if (unlikely(dummy > NFS4_OPAQUE_LIMIT))
                return -EIO;
-
        memcpy(res->server_scope->server_scope, dummy_str, dummy);
        res->server_scope->server_scope_sz = dummy;
 
@@ -5276,6 +5441,37 @@ static int decode_sessionid(struct xdr_stream *xdr, struct nfs4_sessionid *sid)
        return decode_opaque_fixed(xdr, sid->data, NFS4_MAX_SESSIONID_LEN);
 }
 
+static int decode_bind_conn_to_session(struct xdr_stream *xdr,
+                               struct nfs41_bind_conn_to_session_res *res)
+{
+       __be32 *p;
+       int status;
+
+       status = decode_op_hdr(xdr, OP_BIND_CONN_TO_SESSION);
+       if (!status)
+               status = decode_sessionid(xdr, &res->session->sess_id);
+       if (unlikely(status))
+               return status;
+
+       /* dir flags, rdma mode bool */
+       p = xdr_inline_decode(xdr, 8);
+       if (unlikely(!p))
+               goto out_overflow;
+
+       res->dir = be32_to_cpup(p++);
+       if (res->dir == 0 || res->dir > NFS4_CDFS4_BOTH)
+               return -EIO;
+       if (be32_to_cpup(p) == 0)
+               res->use_conn_in_rdma_mode = false;
+       else
+               res->use_conn_in_rdma_mode = true;
+
+       return 0;
+out_overflow:
+       print_overflow_msg(__func__, xdr);
+       return -EIO;
+}
+
 static int decode_create_session(struct xdr_stream *xdr,
                                 struct nfs41_create_session_res *res)
 {
@@ -5312,6 +5508,11 @@ static int decode_destroy_session(struct xdr_stream *xdr, void *dummy)
        return decode_op_hdr(xdr, OP_DESTROY_SESSION);
 }
 
+static int decode_destroy_clientid(struct xdr_stream *xdr, void *dummy)
+{
+       return decode_op_hdr(xdr, OP_DESTROY_CLIENTID);
+}
+
 static int decode_reclaim_complete(struct xdr_stream *xdr, void *dummy)
 {
        return decode_op_hdr(xdr, OP_RECLAIM_COMPLETE);
@@ -5800,9 +6001,6 @@ static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
        if (status)
                goto out;
        status = decode_remove(xdr, &res->cinfo);
-       if (status)
-               goto out;
-       decode_getfattr(xdr, res->dir_attr, res->server);
 out:
        return status;
 }
@@ -5832,15 +6030,6 @@ static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
        if (status)
                goto out;
        status = decode_rename(xdr, &res->old_cinfo, &res->new_cinfo);
-       if (status)
-               goto out;
-       /* Current FH is target directory */
-       if (decode_getfattr(xdr, res->new_fattr, res->server))
-               goto out;
-       status = decode_restorefh(xdr);
-       if (status)
-               goto out;
-       decode_getfattr(xdr, res->old_fattr, res->server);
 out:
        return status;
 }
@@ -5876,8 +6065,6 @@ static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
         * Note order: OP_LINK leaves the directory as the current
         *             filehandle.
         */
-       if (decode_getfattr(xdr, res->dir_attr, res->server))
-               goto out;
        status = decode_restorefh(xdr);
        if (status)
                goto out;
@@ -5902,9 +6089,6 @@ static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
        if (status)
                goto out;
        status = decode_putfh(xdr);
-       if (status)
-               goto out;
-       status = decode_savefh(xdr);
        if (status)
                goto out;
        status = decode_create(xdr, &res->dir_cinfo);
@@ -5913,12 +6097,7 @@ static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
        status = decode_getfh(xdr, res->fh);
        if (status)
                goto out;
-       if (decode_getfattr(xdr, res->fattr, res->server))
-               goto out;
-       status = decode_restorefh(xdr);
-       if (status)
-               goto out;
-       decode_getfattr(xdr, res->dir_fattr, res->server);
+       decode_getfattr(xdr, res->fattr, res->server);
 out:
        return status;
 }
@@ -6073,9 +6252,6 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
        if (status)
                goto out;
        status = decode_putfh(xdr);
-       if (status)
-               goto out;
-       status = decode_savefh(xdr);
        if (status)
                goto out;
        status = decode_open(xdr, res);
@@ -6083,11 +6259,7 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
                goto out;
        if (decode_getfh(xdr, &res->fh) != 0)
                goto out;
-       if (decode_getfattr(xdr, res->f_attr, res->server) != 0)
-               goto out;
-       if (decode_restorefh(xdr) != 0)
-               goto out;
-       decode_getfattr(xdr, res->dir_attr, res->server);
+       decode_getfattr(xdr, res->f_attr, res->server);
 out:
        return status;
 }
@@ -6353,7 +6525,7 @@ out:
  * Decode COMMIT response
  */
 static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
-                              struct nfs_writeres *res)
+                              struct nfs_commitres *res)
 {
        struct compound_hdr hdr;
        int status;
@@ -6368,10 +6540,6 @@ static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
        if (status)
                goto out;
        status = decode_commit(xdr, res);
-       if (status)
-               goto out;
-       if (res->fattr)
-               decode_getfattr(xdr, res->fattr, res->server);
 out:
        return status;
 }
@@ -6527,10 +6695,10 @@ static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp,
        status = decode_putfh(xdr);
        if (status != 0)
                goto out;
-       status = decode_delegreturn(xdr);
+       status = decode_getfattr(xdr, res->fattr, res->server);
        if (status != 0)
                goto out;
-       decode_getfattr(xdr, res->fattr, res->server);
+       status = decode_delegreturn(xdr);
 out:
        return status;
 }
@@ -6590,6 +6758,22 @@ out:
 }
 
 #if defined(CONFIG_NFS_V4_1)
+/*
+ * Decode BIND_CONN_TO_SESSION response
+ */
+static int nfs4_xdr_dec_bind_conn_to_session(struct rpc_rqst *rqstp,
+                                       struct xdr_stream *xdr,
+                                       void *res)
+{
+       struct compound_hdr hdr;
+       int status;
+
+       status = decode_compound_hdr(xdr, &hdr);
+       if (!status)
+               status = decode_bind_conn_to_session(xdr, res);
+       return status;
+}
+
 /*
  * Decode EXCHANGE_ID response
  */
@@ -6638,6 +6822,22 @@ static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp,
        return status;
 }
 
+/*
+ * Decode DESTROY_CLIENTID response
+ */
+static int nfs4_xdr_dec_destroy_clientid(struct rpc_rqst *rqstp,
+                                       struct xdr_stream *xdr,
+                                       void *res)
+{
+       struct compound_hdr hdr;
+       int status;
+
+       status = decode_compound_hdr(xdr, &hdr);
+       if (!status)
+               status = decode_destroy_clientid(xdr, res);
+       return status;
+}
+
 /*
  * Decode SEQUENCE response
  */
@@ -7085,6 +7285,9 @@ struct rpc_procinfo       nfs4_procedures[] = {
        PROC(TEST_STATEID,      enc_test_stateid,       dec_test_stateid),
        PROC(FREE_STATEID,      enc_free_stateid,       dec_free_stateid),
        PROC(GETDEVICELIST,     enc_getdevicelist,      dec_getdevicelist),
+       PROC(BIND_CONN_TO_SESSION,
+                       enc_bind_conn_to_session, dec_bind_conn_to_session),
+       PROC(DESTROY_CLIENTID,  enc_destroy_clientid,   dec_destroy_clientid),
 #endif /* CONFIG_NFS_V4_1 */
 };
 
index 4bff4a3dab4602ffa8fe1f48df5d3adc3e8709c3..b47277baebab92930bee6c1fbac445fd8978a6b9 100644 (file)
@@ -211,7 +211,7 @@ static void copy_single_comp(struct ore_components *oc, unsigned c,
        memcpy(ocomp->cred, src_comp->oc_cap.cred, sizeof(ocomp->cred));
 }
 
-int __alloc_objio_seg(unsigned numdevs, gfp_t gfp_flags,
+static int __alloc_objio_seg(unsigned numdevs, gfp_t gfp_flags,
                       struct objio_segment **pseg)
 {
 /*     This is the in memory structure of the objio_segment
@@ -440,11 +440,12 @@ static void _read_done(struct ore_io_state *ios, void *private)
 
 int objio_read_pagelist(struct nfs_read_data *rdata)
 {
+       struct nfs_pgio_header *hdr = rdata->header;
        struct objio_state *objios;
        int ret;
 
-       ret = objio_alloc_io_state(NFS_I(rdata->inode)->layout, true,
-                       rdata->lseg, rdata->args.pages, rdata->args.pgbase,
+       ret = objio_alloc_io_state(NFS_I(hdr->inode)->layout, true,
+                       hdr->lseg, rdata->args.pages, rdata->args.pgbase,
                        rdata->args.offset, rdata->args.count, rdata,
                        GFP_KERNEL, &objios);
        if (unlikely(ret))
@@ -483,12 +484,12 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
 {
        struct objio_state *objios = priv;
        struct nfs_write_data *wdata = objios->oir.rpcdata;
+       struct address_space *mapping = wdata->header->inode->i_mapping;
        pgoff_t index = offset / PAGE_SIZE;
-       struct page *page = find_get_page(wdata->inode->i_mapping, index);
+       struct page *page = find_get_page(mapping, index);
 
        if (!page) {
-               page = find_or_create_page(wdata->inode->i_mapping,
-                                               index, GFP_NOFS);
+               page = find_or_create_page(mapping, index, GFP_NOFS);
                if (unlikely(!page)) {
                        dprintk("%s: grab_cache_page Failed index=0x%lx\n",
                                __func__, index);
@@ -518,11 +519,12 @@ static const struct _ore_r4w_op _r4w_op = {
 
 int objio_write_pagelist(struct nfs_write_data *wdata, int how)
 {
+       struct nfs_pgio_header *hdr = wdata->header;
        struct objio_state *objios;
        int ret;
 
-       ret = objio_alloc_io_state(NFS_I(wdata->inode)->layout, false,
-                       wdata->lseg, wdata->args.pages, wdata->args.pgbase,
+       ret = objio_alloc_io_state(NFS_I(hdr->inode)->layout, false,
+                       hdr->lseg, wdata->args.pages, wdata->args.pgbase,
                        wdata->args.offset, wdata->args.count, wdata, GFP_NOFS,
                        &objios);
        if (unlikely(ret))
index 595c5fc21a19d15efaab48bff059336d7762c1b3..8746135453011dc70d30ebbd5b70e2b813f7b15d 100644 (file)
@@ -258,7 +258,7 @@ objlayout_read_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
        if (status >= 0)
                rdata->res.count = status;
        else
-               rdata->pnfs_error = status;
+               rdata->header->pnfs_error = status;
        objlayout_iodone(oir);
        /* must not use oir after this point */
 
@@ -279,12 +279,14 @@ objlayout_read_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
 enum pnfs_try_status
 objlayout_read_pagelist(struct nfs_read_data *rdata)
 {
+       struct nfs_pgio_header *hdr = rdata->header;
+       struct inode *inode = hdr->inode;
        loff_t offset = rdata->args.offset;
        size_t count = rdata->args.count;
        int err;
        loff_t eof;
 
-       eof = i_size_read(rdata->inode);
+       eof = i_size_read(inode);
        if (unlikely(offset + count > eof)) {
                if (offset >= eof) {
                        err = 0;
@@ -297,17 +299,17 @@ objlayout_read_pagelist(struct nfs_read_data *rdata)
        }
 
        rdata->res.eof = (offset + count) >= eof;
-       _fix_verify_io_params(rdata->lseg, &rdata->args.pages,
+       _fix_verify_io_params(hdr->lseg, &rdata->args.pages,
                              &rdata->args.pgbase,
                              rdata->args.offset, rdata->args.count);
 
        dprintk("%s: inode(%lx) offset 0x%llx count 0x%Zx eof=%d\n",
-               __func__, rdata->inode->i_ino, offset, count, rdata->res.eof);
+               __func__, inode->i_ino, offset, count, rdata->res.eof);
 
        err = objio_read_pagelist(rdata);
  out:
        if (unlikely(err)) {
-               rdata->pnfs_error = err;
+               hdr->pnfs_error = err;
                dprintk("%s: Returned Error %d\n", __func__, err);
                return PNFS_NOT_ATTEMPTED;
        }
@@ -340,7 +342,7 @@ objlayout_write_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
                wdata->res.count = status;
                wdata->verf.committed = oir->committed;
        } else {
-               wdata->pnfs_error = status;
+               wdata->header->pnfs_error = status;
        }
        objlayout_iodone(oir);
        /* must not use oir after this point */
@@ -363,15 +365,16 @@ enum pnfs_try_status
 objlayout_write_pagelist(struct nfs_write_data *wdata,
                         int how)
 {
+       struct nfs_pgio_header *hdr = wdata->header;
        int err;
 
-       _fix_verify_io_params(wdata->lseg, &wdata->args.pages,
+       _fix_verify_io_params(hdr->lseg, &wdata->args.pages,
                              &wdata->args.pgbase,
                              wdata->args.offset, wdata->args.count);
 
        err = objio_write_pagelist(wdata, how);
        if (unlikely(err)) {
-               wdata->pnfs_error = err;
+               hdr->pnfs_error = err;
                dprintk("%s: Returned Error %d\n", __func__, err);
                return PNFS_NOT_ATTEMPTED;
        }
index d21fceaa9f6263fecff450506653c21ba055872f..aed913c833f422bbf6a88e2726be5eec6d9bbc40 100644 (file)
 
 static struct kmem_cache *nfs_page_cachep;
 
+bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
+{
+       p->npages = pagecount;
+       if (pagecount <= ARRAY_SIZE(p->page_array))
+               p->pagevec = p->page_array;
+       else {
+               p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
+               if (!p->pagevec)
+                       p->npages = 0;
+       }
+       return p->pagevec != NULL;
+}
+
+void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
+                      struct nfs_pgio_header *hdr,
+                      void (*release)(struct nfs_pgio_header *hdr))
+{
+       hdr->req = nfs_list_entry(desc->pg_list.next);
+       hdr->inode = desc->pg_inode;
+       hdr->cred = hdr->req->wb_context->cred;
+       hdr->io_start = req_offset(hdr->req);
+       hdr->good_bytes = desc->pg_count;
+       hdr->dreq = desc->pg_dreq;
+       hdr->release = release;
+       hdr->completion_ops = desc->pg_completion_ops;
+       if (hdr->completion_ops->init_hdr)
+               hdr->completion_ops->init_hdr(hdr);
+}
+
+void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
+{
+       spin_lock(&hdr->lock);
+       if (pos < hdr->io_start + hdr->good_bytes) {
+               set_bit(NFS_IOHDR_ERROR, &hdr->flags);
+               clear_bit(NFS_IOHDR_EOF, &hdr->flags);
+               hdr->good_bytes = pos - hdr->io_start;
+               hdr->error = error;
+       }
+       spin_unlock(&hdr->lock);
+}
+
 static inline struct nfs_page *
 nfs_page_alloc(void)
 {
@@ -76,12 +117,8 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
         * long write-back delay. This will be adjusted in
         * update_nfs_request below if the region is not locked. */
        req->wb_page    = page;
-       atomic_set(&req->wb_complete, 0);
        req->wb_index   = page->index;
        page_cache_get(page);
-       BUG_ON(PagePrivate(page));
-       BUG_ON(!PageLocked(page));
-       BUG_ON(page->mapping->host != inode);
        req->wb_offset  = offset;
        req->wb_pgbase  = offset;
        req->wb_bytes   = count;
@@ -104,6 +141,15 @@ void nfs_unlock_request(struct nfs_page *req)
        clear_bit(PG_BUSY, &req->wb_flags);
        smp_mb__after_clear_bit();
        wake_up_bit(&req->wb_flags, PG_BUSY);
+}
+
+/**
+ * nfs_unlock_and_release_request - Unlock request and release the nfs_page
+ * @req:
+ */
+void nfs_unlock_and_release_request(struct nfs_page *req)
+{
+       nfs_unlock_request(req);
        nfs_release_request(req);
 }
 
@@ -203,6 +249,7 @@ EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
 void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
                     struct inode *inode,
                     const struct nfs_pageio_ops *pg_ops,
+                    const struct nfs_pgio_completion_ops *compl_ops,
                     size_t bsize,
                     int io_flags)
 {
@@ -215,9 +262,11 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
        desc->pg_recoalesce = 0;
        desc->pg_inode = inode;
        desc->pg_ops = pg_ops;
+       desc->pg_completion_ops = compl_ops;
        desc->pg_ioflags = io_flags;
        desc->pg_error = 0;
        desc->pg_lseg = NULL;
+       desc->pg_dreq = NULL;
 }
 
 /**
@@ -241,12 +290,12 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
                return false;
        if (req->wb_context->state != prev->wb_context->state)
                return false;
-       if (req->wb_index != (prev->wb_index + 1))
-               return false;
        if (req->wb_pgbase != 0)
                return false;
        if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
                return false;
+       if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
+               return false;
        return pgio->pg_ops->pg_test(pgio, prev, req);
 }
 
index 38512bcd2e98b4c82e3b03e2592061c06897abe5..b8323aa7b54384af8f51b84b3077d98b8f22d951 100644 (file)
@@ -395,6 +395,9 @@ mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
        dprintk("%s:Begin lo %p\n", __func__, lo);
 
        if (list_empty(&lo->plh_segs)) {
+               /* Reset MDS Threshold I/O counters */
+               NFS_I(lo->plh_inode)->write_io = 0;
+               NFS_I(lo->plh_inode)->read_io = 0;
                if (!test_and_set_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags))
                        put_layout_hdr_locked(lo);
                return 0;
@@ -455,6 +458,7 @@ pnfs_destroy_layout(struct nfs_inode *nfsi)
        spin_unlock(&nfsi->vfs_inode.i_lock);
        pnfs_free_lseg_list(&tmp_list);
 }
+EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
 
 /*
  * Called by the state manger to remove all layouts established under an
@@ -692,6 +696,7 @@ out:
        dprintk("<-- %s status: %d\n", __func__, status);
        return status;
 }
+EXPORT_SYMBOL_GPL(_pnfs_return_layout);
 
 bool pnfs_roc(struct inode *ino)
 {
@@ -930,6 +935,81 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo,
        return ret;
 }
 
+/*
+ * Use mdsthreshold hints set at each OPEN to determine if I/O should go
+ * to the MDS or over pNFS
+ *
+ * The nfs_inode read_io and write_io fields are cumulative counters reset
+ * when there are no layout segments. Note that in pnfs_update_layout iomode
+ * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
+ * WRITE request.
+ *
+ * A return of true means use MDS I/O.
+ *
+ * From rfc 5661:
+ * If a file's size is smaller than the file size threshold, data accesses
+ * SHOULD be sent to the metadata server.  If an I/O request has a length that
+ * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
+ * server.  If both file size and I/O size are provided, the client SHOULD
+ * reach or exceed  both thresholds before sending its read or write
+ * requests to the data server.
+ */
+static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
+                                    struct inode *ino, int iomode)
+{
+       struct nfs4_threshold *t = ctx->mdsthreshold;
+       struct nfs_inode *nfsi = NFS_I(ino);
+       loff_t fsize = i_size_read(ino);
+       bool size = false, size_set = false, io = false, io_set = false, ret = false;
+
+       if (t == NULL)
+               return ret;
+
+       dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
+               __func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);
+
+       switch (iomode) {
+       case IOMODE_READ:
+               if (t->bm & THRESHOLD_RD) {
+                       dprintk("%s fsize %llu\n", __func__, fsize);
+                       size_set = true;
+                       if (fsize < t->rd_sz)
+                               size = true;
+               }
+               if (t->bm & THRESHOLD_RD_IO) {
+                       dprintk("%s nfsi->read_io %llu\n", __func__,
+                               nfsi->read_io);
+                       io_set = true;
+                       if (nfsi->read_io < t->rd_io_sz)
+                               io = true;
+               }
+               break;
+       case IOMODE_RW:
+               if (t->bm & THRESHOLD_WR) {
+                       dprintk("%s fsize %llu\n", __func__, fsize);
+                       size_set = true;
+                       if (fsize < t->wr_sz)
+                               size = true;
+               }
+               if (t->bm & THRESHOLD_WR_IO) {
+                       dprintk("%s nfsi->write_io %llu\n", __func__,
+                               nfsi->write_io);
+                       io_set = true;
+                       if (nfsi->write_io < t->wr_io_sz)
+                               io = true;
+               }
+               break;
+       }
+       if (size_set && io_set) {
+               if (size && io)
+                       ret = true;
+       } else if (size || io)
+               ret = true;
+
+       dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
+       return ret;
+}
+
 /*
  * Layout segment is retreived from the server if not cached.
  * The appropriate layout segment is referenced and returned to the caller.
@@ -957,6 +1037,10 @@ pnfs_update_layout(struct inode *ino,
 
        if (!pnfs_enabled_sb(NFS_SERVER(ino)))
                return NULL;
+
+       if (pnfs_within_mdsthreshold(ctx, ino, iomode))
+               return NULL;
+
        spin_lock(&ino->i_lock);
        lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
        if (lo == NULL) {
@@ -1082,6 +1166,10 @@ pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *r
 {
        BUG_ON(pgio->pg_lseg != NULL);
 
+       if (req->wb_offset != req->wb_pgbase) {
+               nfs_pageio_reset_read_mds(pgio);
+               return;
+       }
        pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
                                           req->wb_context,
                                           req_offset(req),
@@ -1100,6 +1188,10 @@ pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *
 {
        BUG_ON(pgio->pg_lseg != NULL);
 
+       if (req->wb_offset != req->wb_pgbase) {
+               nfs_pageio_reset_write_mds(pgio);
+               return;
+       }
        pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
                                           req->wb_context,
                                           req_offset(req),
@@ -1113,26 +1205,31 @@ pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *
 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
 
 bool
-pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode)
+pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode,
+                     const struct nfs_pgio_completion_ops *compl_ops)
 {
        struct nfs_server *server = NFS_SERVER(inode);
        struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
 
        if (ld == NULL)
                return false;
-       nfs_pageio_init(pgio, inode, ld->pg_read_ops, server->rsize, 0);
+       nfs_pageio_init(pgio, inode, ld->pg_read_ops, compl_ops,
+                       server->rsize, 0);
        return true;
 }
 
 bool
-pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, int ioflags)
+pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode,
+                      int ioflags,
+                      const struct nfs_pgio_completion_ops *compl_ops)
 {
        struct nfs_server *server = NFS_SERVER(inode);
        struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
 
        if (ld == NULL)
                return false;
-       nfs_pageio_init(pgio, inode, ld->pg_write_ops, server->wsize, ioflags);
+       nfs_pageio_init(pgio, inode, ld->pg_write_ops, compl_ops,
+                       server->wsize, ioflags);
        return true;
 }
 
@@ -1162,13 +1259,15 @@ pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
 }
 EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
 
-static int pnfs_write_done_resend_to_mds(struct inode *inode, struct list_head *head)
+int pnfs_write_done_resend_to_mds(struct inode *inode,
+                               struct list_head *head,
+                               const struct nfs_pgio_completion_ops *compl_ops)
 {
        struct nfs_pageio_descriptor pgio;
        LIST_HEAD(failed);
 
        /* Resend all requests through the MDS */
-       nfs_pageio_init_write_mds(&pgio, inode, FLUSH_STABLE);
+       nfs_pageio_init_write_mds(&pgio, inode, FLUSH_STABLE, compl_ops);
        while (!list_empty(head)) {
                struct nfs_page *req = nfs_list_entry(head->next);
 
@@ -1188,30 +1287,37 @@ static int pnfs_write_done_resend_to_mds(struct inode *inode, struct list_head *
        }
        return 0;
 }
+EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
+
+static void pnfs_ld_handle_write_error(struct nfs_write_data *data)
+{
+       struct nfs_pgio_header *hdr = data->header;
+
+       dprintk("pnfs write error = %d\n", hdr->pnfs_error);
+       if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
+           PNFS_LAYOUTRET_ON_ERROR) {
+               clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
+               pnfs_return_layout(hdr->inode);
+       }
+       if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
+               data->task.tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
+                                                       &hdr->pages,
+                                                       hdr->completion_ops);
+}
 
 /*
  * Called by non rpc-based layout drivers
  */
 void pnfs_ld_write_done(struct nfs_write_data *data)
 {
-       if (likely(!data->pnfs_error)) {
+       struct nfs_pgio_header *hdr = data->header;
+
+       if (!hdr->pnfs_error) {
                pnfs_set_layoutcommit(data);
-               data->mds_ops->rpc_call_done(&data->task, data);
-       } else {
-               dprintk("pnfs write error = %d\n", data->pnfs_error);
-               if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
-                                               PNFS_LAYOUTRET_ON_ERROR) {
-                       /* Don't lo_commit on error, Server will needs to
-                        * preform a file recovery.
-                        */
-                       clear_bit(NFS_INO_LAYOUTCOMMIT,
-                                 &NFS_I(data->inode)->flags);
-                       pnfs_return_layout(data->inode);
-               }
-               data->task.tk_status = pnfs_write_done_resend_to_mds(data->inode, &data->pages);
-       }
-       put_lseg(data->lseg);
-       data->mds_ops->rpc_release(data);
+               hdr->mds_ops->rpc_call_done(&data->task, data);
+       } else
+               pnfs_ld_handle_write_error(data);
+       hdr->mds_ops->rpc_release(data);
 }
 EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
 
@@ -1219,12 +1325,13 @@ static void
 pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
                struct nfs_write_data *data)
 {
-       list_splice_tail_init(&data->pages, &desc->pg_list);
-       if (data->req && list_empty(&data->req->wb_list))
-               nfs_list_add_request(data->req, &desc->pg_list);
-       nfs_pageio_reset_write_mds(desc);
-       desc->pg_recoalesce = 1;
-       put_lseg(data->lseg);
+       struct nfs_pgio_header *hdr = data->header;
+
+       if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+               list_splice_tail_init(&hdr->pages, &desc->pg_list);
+               nfs_pageio_reset_write_mds(desc);
+               desc->pg_recoalesce = 1;
+       }
        nfs_writedata_release(data);
 }
 
@@ -1234,23 +1341,18 @@ pnfs_try_to_write_data(struct nfs_write_data *wdata,
                        struct pnfs_layout_segment *lseg,
                        int how)
 {
-       struct inode *inode = wdata->inode;
+       struct nfs_pgio_header *hdr = wdata->header;
+       struct inode *inode = hdr->inode;
        enum pnfs_try_status trypnfs;
        struct nfs_server *nfss = NFS_SERVER(inode);
 
-       wdata->mds_ops = call_ops;
-       wdata->lseg = get_lseg(lseg);
+       hdr->mds_ops = call_ops;
 
        dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
                inode->i_ino, wdata->args.count, wdata->args.offset, how);
-
        trypnfs = nfss->pnfs_curr_ld->write_pagelist(wdata, how);
-       if (trypnfs == PNFS_NOT_ATTEMPTED) {
-               put_lseg(wdata->lseg);
-               wdata->lseg = NULL;
-       } else
+       if (trypnfs != PNFS_NOT_ATTEMPTED)
                nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
-
        dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
        return trypnfs;
 }
@@ -1266,7 +1368,7 @@ pnfs_do_multiple_writes(struct nfs_pageio_descriptor *desc, struct list_head *he
        while (!list_empty(head)) {
                enum pnfs_try_status trypnfs;
 
-               data = list_entry(head->next, struct nfs_write_data, list);
+               data = list_first_entry(head, struct nfs_write_data, list);
                list_del_init(&data->list);
 
                trypnfs = pnfs_try_to_write_data(data, call_ops, lseg, how);
@@ -1276,43 +1378,82 @@ pnfs_do_multiple_writes(struct nfs_pageio_descriptor *desc, struct list_head *he
        put_lseg(lseg);
 }
 
+static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
+{
+       put_lseg(hdr->lseg);
+       nfs_writehdr_free(hdr);
+}
+
 int
 pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
 {
-       LIST_HEAD(head);
+       struct nfs_write_header *whdr;
+       struct nfs_pgio_header *hdr;
        int ret;
 
-       ret = nfs_generic_flush(desc, &head);
-       if (ret != 0) {
+       whdr = nfs_writehdr_alloc();
+       if (!whdr) {
+               desc->pg_completion_ops->error_cleanup(&desc->pg_list);
                put_lseg(desc->pg_lseg);
                desc->pg_lseg = NULL;
-               return ret;
+               return -ENOMEM;
        }
-       pnfs_do_multiple_writes(desc, &head, desc->pg_ioflags);
-       return 0;
+       hdr = &whdr->header;
+       nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
+       hdr->lseg = get_lseg(desc->pg_lseg);
+       atomic_inc(&hdr->refcnt);
+       ret = nfs_generic_flush(desc, hdr);
+       if (ret != 0) {
+               put_lseg(desc->pg_lseg);
+               desc->pg_lseg = NULL;
+       } else
+               pnfs_do_multiple_writes(desc, &hdr->rpc_list, desc->pg_ioflags);
+       if (atomic_dec_and_test(&hdr->refcnt))
+               hdr->completion_ops->completion(hdr);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
 
-static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
+int pnfs_read_done_resend_to_mds(struct inode *inode,
+                               struct list_head *head,
+                               const struct nfs_pgio_completion_ops *compl_ops)
 {
        struct nfs_pageio_descriptor pgio;
+       LIST_HEAD(failed);
 
-       put_lseg(data->lseg);
-       data->lseg = NULL;
-       dprintk("pnfs write error = %d\n", data->pnfs_error);
-       if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
-                                               PNFS_LAYOUTRET_ON_ERROR)
-               pnfs_return_layout(data->inode);
-
-       nfs_pageio_init_read_mds(&pgio, data->inode);
-
-       while (!list_empty(&data->pages)) {
-               struct nfs_page *req = nfs_list_entry(data->pages.next);
+       /* Resend all requests through the MDS */
+       nfs_pageio_init_read_mds(&pgio, inode, compl_ops);
+       while (!list_empty(head)) {
+               struct nfs_page *req = nfs_list_entry(head->next);
 
                nfs_list_remove_request(req);
-               nfs_pageio_add_request(&pgio, req);
+               if (!nfs_pageio_add_request(&pgio, req))
+                       nfs_list_add_request(req, &failed);
        }
        nfs_pageio_complete(&pgio);
+
+       if (!list_empty(&failed)) {
+               list_move(&failed, head);
+               return -EIO;
+       }
+       return 0;
+}
+EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
+
+static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
+{
+       struct nfs_pgio_header *hdr = data->header;
+
+       dprintk("pnfs read error = %d\n", hdr->pnfs_error);
+       if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
+           PNFS_LAYOUTRET_ON_ERROR) {
+               clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
+               pnfs_return_layout(hdr->inode);
+       }
+       if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
+               data->task.tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
+                                                       &hdr->pages,
+                                                       hdr->completion_ops);
 }
 
 /*
@@ -1320,13 +1461,14 @@ static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
  */
 void pnfs_ld_read_done(struct nfs_read_data *data)
 {
-       if (likely(!data->pnfs_error)) {
+       struct nfs_pgio_header *hdr = data->header;
+
+       if (likely(!hdr->pnfs_error)) {
                __nfs4_read_done_cb(data);
-               data->mds_ops->rpc_call_done(&data->task, data);
+               hdr->mds_ops->rpc_call_done(&data->task, data);
        } else
                pnfs_ld_handle_read_error(data);
-       put_lseg(data->lseg);
-       data->mds_ops->rpc_release(data);
+       hdr->mds_ops->rpc_release(data);
 }
 EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
 
@@ -1334,11 +1476,13 @@ static void
 pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
                struct nfs_read_data *data)
 {
-       list_splice_tail_init(&data->pages, &desc->pg_list);
-       if (data->req && list_empty(&data->req->wb_list))
-               nfs_list_add_request(data->req, &desc->pg_list);
-       nfs_pageio_reset_read_mds(desc);
-       desc->pg_recoalesce = 1;
+       struct nfs_pgio_header *hdr = data->header;
+
+       if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+               list_splice_tail_init(&hdr->pages, &desc->pg_list);
+               nfs_pageio_reset_read_mds(desc);
+               desc->pg_recoalesce = 1;
+       }
        nfs_readdata_release(data);
 }
 
@@ -1350,23 +1494,19 @@ pnfs_try_to_read_data(struct nfs_read_data *rdata,
                       const struct rpc_call_ops *call_ops,
                       struct pnfs_layout_segment *lseg)
 {
-       struct inode *inode = rdata->inode;
+       struct nfs_pgio_header *hdr = rdata->header;
+       struct inode *inode = hdr->inode;
        struct nfs_server *nfss = NFS_SERVER(inode);
        enum pnfs_try_status trypnfs;
 
-       rdata->mds_ops = call_ops;
-       rdata->lseg = get_lseg(lseg);
+       hdr->mds_ops = call_ops;
 
        dprintk("%s: Reading ino:%lu %u@%llu\n",
                __func__, inode->i_ino, rdata->args.count, rdata->args.offset);
 
        trypnfs = nfss->pnfs_curr_ld->read_pagelist(rdata);
-       if (trypnfs == PNFS_NOT_ATTEMPTED) {
-               put_lseg(rdata->lseg);
-               rdata->lseg = NULL;
-       } else {
+       if (trypnfs != PNFS_NOT_ATTEMPTED)
                nfs_inc_stats(inode, NFSIOS_PNFS_READ);
-       }
        dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
        return trypnfs;
 }
@@ -1382,7 +1522,7 @@ pnfs_do_multiple_reads(struct nfs_pageio_descriptor *desc, struct list_head *hea
        while (!list_empty(head)) {
                enum pnfs_try_status trypnfs;
 
-               data = list_entry(head->next, struct nfs_read_data, list);
+               data = list_first_entry(head, struct nfs_read_data, list);
                list_del_init(&data->list);
 
                trypnfs = pnfs_try_to_read_data(data, call_ops, lseg);
@@ -1392,20 +1532,40 @@ pnfs_do_multiple_reads(struct nfs_pageio_descriptor *desc, struct list_head *hea
        put_lseg(lseg);
 }
 
+static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
+{
+       put_lseg(hdr->lseg);
+       nfs_readhdr_free(hdr);
+}
+
 int
 pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
 {
-       LIST_HEAD(head);
+       struct nfs_read_header *rhdr;
+       struct nfs_pgio_header *hdr;
        int ret;
 
-       ret = nfs_generic_pagein(desc, &head);
-       if (ret != 0) {
+       rhdr = nfs_readhdr_alloc();
+       if (!rhdr) {
+               desc->pg_completion_ops->error_cleanup(&desc->pg_list);
+               ret = -ENOMEM;
                put_lseg(desc->pg_lseg);
                desc->pg_lseg = NULL;
                return ret;
        }
-       pnfs_do_multiple_reads(desc, &head);
-       return 0;
+       hdr = &rhdr->header;
+       nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
+       hdr->lseg = get_lseg(desc->pg_lseg);
+       atomic_inc(&hdr->refcnt);
+       ret = nfs_generic_pagein(desc, hdr);
+       if (ret != 0) {
+               put_lseg(desc->pg_lseg);
+               desc->pg_lseg = NULL;
+       } else
+               pnfs_do_multiple_reads(desc, &hdr->rpc_list);
+       if (atomic_dec_and_test(&hdr->refcnt))
+               hdr->completion_ops->completion(hdr);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
 
@@ -1438,30 +1598,32 @@ EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
 void
 pnfs_set_layoutcommit(struct nfs_write_data *wdata)
 {
-       struct nfs_inode *nfsi = NFS_I(wdata->inode);
+       struct nfs_pgio_header *hdr = wdata->header;
+       struct inode *inode = hdr->inode;
+       struct nfs_inode *nfsi = NFS_I(inode);
        loff_t end_pos = wdata->mds_offset + wdata->res.count;
        bool mark_as_dirty = false;
 
-       spin_lock(&nfsi->vfs_inode.i_lock);
+       spin_lock(&inode->i_lock);
        if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
                mark_as_dirty = true;
                dprintk("%s: Set layoutcommit for inode %lu ",
-                       __func__, wdata->inode->i_ino);
+                       __func__, inode->i_ino);
        }
-       if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &wdata->lseg->pls_flags)) {
+       if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &hdr->lseg->pls_flags)) {
                /* references matched in nfs4_layoutcommit_release */
-               get_lseg(wdata->lseg);
+               get_lseg(hdr->lseg);
        }
        if (end_pos > nfsi->layout->plh_lwb)
                nfsi->layout->plh_lwb = end_pos;
-       spin_unlock(&nfsi->vfs_inode.i_lock);
+       spin_unlock(&inode->i_lock);
        dprintk("%s: lseg %p end_pos %llu\n",
-               __func__, wdata->lseg, nfsi->layout->plh_lwb);
+               __func__, hdr->lseg, nfsi->layout->plh_lwb);
 
        /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
         * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
        if (mark_as_dirty)
-               mark_inode_dirty_sync(wdata->inode);
+               mark_inode_dirty_sync(inode);
 }
 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
 
@@ -1550,3 +1712,15 @@ out_free:
        kfree(data);
        goto out;
 }
+
+struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
+{
+       struct nfs4_threshold *thp;
+
+       thp = kzalloc(sizeof(*thp), GFP_NOFS);
+       if (!thp) {
+               dprintk("%s mdsthreshold allocation failed\n", __func__);
+               return NULL;
+       }
+       return thp;
+}
index 442ebf68eeecf51dfaa6b8835318b53010eefe19..29fd23c0efdcb07c699c5e2e94c1e23dad8de103 100644 (file)
@@ -63,6 +63,7 @@ enum {
        NFS_LAYOUT_BULK_RECALL,         /* bulk recall affecting layout */
        NFS_LAYOUT_ROC,                 /* some lseg had roc bit set */
        NFS_LAYOUT_DESTROYED,           /* no new use of layout allowed */
+       NFS_LAYOUT_INVALID,             /* layout is being destroyed */
 };
 
 enum layoutdriver_policy_flags {
@@ -94,11 +95,20 @@ struct pnfs_layoutdriver_type {
        const struct nfs_pageio_ops *pg_read_ops;
        const struct nfs_pageio_ops *pg_write_ops;
 
+       struct pnfs_ds_commit_info *(*get_ds_info) (struct inode *inode);
        void (*mark_request_commit) (struct nfs_page *req,
-                                       struct pnfs_layout_segment *lseg);
-       void (*clear_request_commit) (struct nfs_page *req);
-       int (*scan_commit_lists) (struct inode *inode, int max, spinlock_t *lock);
-       int (*commit_pagelist)(struct inode *inode, struct list_head *mds_pages, int how);
+                                    struct pnfs_layout_segment *lseg,
+                                    struct nfs_commit_info *cinfo);
+       void (*clear_request_commit) (struct nfs_page *req,
+                                     struct nfs_commit_info *cinfo);
+       int (*scan_commit_lists) (struct nfs_commit_info *cinfo,
+                                 int max);
+       void (*recover_commit_reqs) (struct list_head *list,
+                                    struct nfs_commit_info *cinfo);
+       int (*commit_pagelist)(struct inode *inode,
+                              struct list_head *mds_pages,
+                              int how,
+                              struct nfs_commit_info *cinfo);
 
        /*
         * Return PNFS_ATTEMPTED to indicate the layout code has attempted
@@ -168,8 +178,10 @@ extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp);
 void get_layout_hdr(struct pnfs_layout_hdr *lo);
 void put_lseg(struct pnfs_layout_segment *lseg);
 
-bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *, struct inode *);
-bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *, struct inode *, int);
+bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *, struct inode *,
+                          const struct nfs_pgio_completion_ops *);
+bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *, struct inode *,
+                           int, const struct nfs_pgio_completion_ops *);
 
 void set_pnfs_layoutdriver(struct nfs_server *, const struct nfs_fh *, u32);
 void unset_pnfs_layoutdriver(struct nfs_server *);
@@ -211,6 +223,11 @@ struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino,
                                               gfp_t gfp_flags);
 
 void nfs4_deviceid_mark_client_invalid(struct nfs_client *clp);
+int pnfs_read_done_resend_to_mds(struct inode *inode, struct list_head *head,
+                       const struct nfs_pgio_completion_ops *compl_ops);
+int pnfs_write_done_resend_to_mds(struct inode *inode, struct list_head *head,
+                       const struct nfs_pgio_completion_ops *compl_ops);
+struct nfs4_threshold *pnfs_mdsthreshold_alloc(void);
 
 /* nfs4_deviceid_flags */
 enum {
@@ -261,49 +278,66 @@ static inline int pnfs_enabled_sb(struct nfs_server *nfss)
 }
 
 static inline int
-pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how)
+pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how,
+                struct nfs_commit_info *cinfo)
 {
-       if (!test_and_clear_bit(NFS_INO_PNFS_COMMIT, &NFS_I(inode)->flags))
+       if (cinfo->ds == NULL || cinfo->ds->ncommitting == 0)
                return PNFS_NOT_ATTEMPTED;
-       return NFS_SERVER(inode)->pnfs_curr_ld->commit_pagelist(inode, mds_pages, how);
+       return NFS_SERVER(inode)->pnfs_curr_ld->commit_pagelist(inode, mds_pages, how, cinfo);
+}
+
+static inline struct pnfs_ds_commit_info *
+pnfs_get_ds_info(struct inode *inode)
+{
+       struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
+
+       if (ld == NULL || ld->get_ds_info == NULL)
+               return NULL;
+       return ld->get_ds_info(inode);
 }
 
 static inline bool
-pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
+pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
+                        struct nfs_commit_info *cinfo)
 {
        struct inode *inode = req->wb_context->dentry->d_inode;
        struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
 
        if (lseg == NULL || ld->mark_request_commit == NULL)
                return false;
-       ld->mark_request_commit(req, lseg);
+       ld->mark_request_commit(req, lseg, cinfo);
        return true;
 }
 
 static inline bool
-pnfs_clear_request_commit(struct nfs_page *req)
+pnfs_clear_request_commit(struct nfs_page *req, struct nfs_commit_info *cinfo)
 {
        struct inode *inode = req->wb_context->dentry->d_inode;
        struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
 
        if (ld == NULL || ld->clear_request_commit == NULL)
                return false;
-       ld->clear_request_commit(req);
+       ld->clear_request_commit(req, cinfo);
        return true;
 }
 
 static inline int
-pnfs_scan_commit_lists(struct inode *inode, int max, spinlock_t *lock)
+pnfs_scan_commit_lists(struct inode *inode, struct nfs_commit_info *cinfo,
+                      int max)
 {
-       struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
-       int ret;
-
-       if (ld == NULL || ld->scan_commit_lists == NULL)
+       if (cinfo->ds == NULL || cinfo->ds->nwritten == 0)
                return 0;
-       ret = ld->scan_commit_lists(inode, max, lock);
-       if (ret != 0)
-               set_bit(NFS_INO_PNFS_COMMIT, &NFS_I(inode)->flags);
-       return ret;
+       else
+               return NFS_SERVER(inode)->pnfs_curr_ld->scan_commit_lists(cinfo, max);
+}
+
+static inline void
+pnfs_recover_commit_reqs(struct inode *inode, struct list_head *list,
+                        struct nfs_commit_info *cinfo)
+{
+       if (cinfo->ds == NULL || cinfo->ds->nwritten == 0)
+               return;
+       NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo);
 }
 
 /* Should the pNFS client commit and return the layout upon a setattr */
@@ -327,6 +361,14 @@ static inline int pnfs_return_layout(struct inode *ino)
        return 0;
 }
 
+static inline bool
+pnfs_use_threshold(struct nfs4_threshold **dst, struct nfs4_threshold *src,
+                  struct nfs_server *nfss)
+{
+       return (dst && src && src->bm != 0 &&
+                                       nfss->pnfs_curr_ld->id == src->l_type);
+}
+
 #ifdef NFS_DEBUG
 void nfs4_print_deviceid(const struct nfs4_deviceid *dev_id);
 #else
@@ -396,45 +438,74 @@ static inline void unset_pnfs_layoutdriver(struct nfs_server *s)
 {
 }
 
-static inline bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode)
+static inline bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode,
+                                        const struct nfs_pgio_completion_ops *compl_ops)
 {
        return false;
 }
 
-static inline bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, int ioflags)
+static inline bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, int ioflags,
+                                         const struct nfs_pgio_completion_ops *compl_ops)
 {
        return false;
 }
 
 static inline int
-pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how)
+pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how,
+                struct nfs_commit_info *cinfo)
 {
        return PNFS_NOT_ATTEMPTED;
 }
 
+static inline struct pnfs_ds_commit_info *
+pnfs_get_ds_info(struct inode *inode)
+{
+       return NULL;
+}
+
 static inline bool
-pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
+pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
+                        struct nfs_commit_info *cinfo)
 {
        return false;
 }
 
 static inline bool
-pnfs_clear_request_commit(struct nfs_page *req)
+pnfs_clear_request_commit(struct nfs_page *req, struct nfs_commit_info *cinfo)
 {
        return false;
 }
 
 static inline int
-pnfs_scan_commit_lists(struct inode *inode, int max, spinlock_t *lock)
+pnfs_scan_commit_lists(struct inode *inode, struct nfs_commit_info *cinfo,
+                      int max)
 {
        return 0;
 }
 
+static inline void
+pnfs_recover_commit_reqs(struct inode *inode, struct list_head *list,
+                        struct nfs_commit_info *cinfo)
+{
+}
+
 static inline int pnfs_layoutcommit_inode(struct inode *inode, bool sync)
 {
        return 0;
 }
 
+static inline bool
+pnfs_use_threshold(struct nfs4_threshold **dst, struct nfs4_threshold *src,
+                  struct nfs_server *nfss)
+{
+       return false;
+}
+
+static inline struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
+{
+       return NULL;
+}
+
 #endif /* CONFIG_NFS_V4_1 */
 
 #endif /* FS_NFS_PNFS_H */
index d6408b6437de4f9f0c55f9f211f61df129976b7c..a706b6bcc286a5a401318e868b0d1fbab2a206a4 100644 (file)
@@ -178,7 +178,7 @@ nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
 }
 
 static int
-nfs_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
+nfs_proc_lookup(struct inode *dir, struct qstr *name,
                struct nfs_fh *fhandle, struct nfs_fattr *fattr)
 {
        struct nfs_diropargs    arg = {
@@ -640,12 +640,14 @@ nfs_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
 
 static int nfs_read_done(struct rpc_task *task, struct nfs_read_data *data)
 {
+       struct inode *inode = data->header->inode;
+
        if (nfs_async_handle_expired_key(task))
                return -EAGAIN;
 
-       nfs_invalidate_atime(data->inode);
+       nfs_invalidate_atime(inode);
        if (task->tk_status >= 0) {
-               nfs_refresh_inode(data->inode, data->res.fattr);
+               nfs_refresh_inode(inode, data->res.fattr);
                /* Emulate the eof flag, which isn't normally needed in NFSv2
                 * as it is guaranteed to always return the file attributes
                 */
@@ -667,11 +669,13 @@ static void nfs_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_dat
 
 static int nfs_write_done(struct rpc_task *task, struct nfs_write_data *data)
 {
+       struct inode *inode = data->header->inode;
+
        if (nfs_async_handle_expired_key(task))
                return -EAGAIN;
 
        if (task->tk_status >= 0)
-               nfs_post_op_update_inode_force_wcc(data->inode, data->res.fattr);
+               nfs_post_op_update_inode_force_wcc(inode, data->res.fattr);
        return 0;
 }
 
@@ -687,8 +691,13 @@ static void nfs_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_d
        rpc_call_start(task);
 }
 
+static void nfs_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
+{
+       BUG();
+}
+
 static void
-nfs_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
+nfs_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
 {
        BUG();
 }
@@ -732,6 +741,7 @@ const struct nfs_rpc_ops nfs_v2_clientops = {
        .file_inode_ops = &nfs_file_inode_operations,
        .file_ops       = &nfs_file_operations,
        .getroot        = nfs_proc_get_root,
+       .submount       = nfs_submount,
        .getattr        = nfs_proc_getattr,
        .setattr        = nfs_proc_setattr,
        .lookup         = nfs_proc_lookup,
@@ -763,6 +773,7 @@ const struct nfs_rpc_ops nfs_v2_clientops = {
        .write_rpc_prepare = nfs_proc_write_rpc_prepare,
        .write_done     = nfs_write_done,
        .commit_setup   = nfs_proc_commit_setup,
+       .commit_rpc_prepare = nfs_proc_commit_rpc_prepare,
        .lock           = nfs_proc_lock,
        .lock_check_bounds = nfs_lock_check_bounds,
        .close_context  = nfs_close_context,
index 0a4be28c2ea3c76f57321bf765708924c4a2fdcf..86ced78362142119328ad827138c0c716668aeac 100644 (file)
 #define NFSDBG_FACILITY                NFSDBG_PAGECACHE
 
 static const struct nfs_pageio_ops nfs_pageio_read_ops;
-static const struct rpc_call_ops nfs_read_partial_ops;
-static const struct rpc_call_ops nfs_read_full_ops;
+static const struct rpc_call_ops nfs_read_common_ops;
+static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
 
 static struct kmem_cache *nfs_rdata_cachep;
 
-struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
+struct nfs_read_header *nfs_readhdr_alloc(void)
 {
-       struct nfs_read_data *p;
-
-       p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
-       if (p) {
-               INIT_LIST_HEAD(&p->pages);
-               p->npages = pagecount;
-               if (pagecount <= ARRAY_SIZE(p->page_array))
-                       p->pagevec = p->page_array;
-               else {
-                       p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
-                       if (!p->pagevec) {
-                               kmem_cache_free(nfs_rdata_cachep, p);
-                               p = NULL;
-                       }
-               }
+       struct nfs_read_header *rhdr;
+
+       rhdr = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
+       if (rhdr) {
+               struct nfs_pgio_header *hdr = &rhdr->header;
+
+               INIT_LIST_HEAD(&hdr->pages);
+               INIT_LIST_HEAD(&hdr->rpc_list);
+               spin_lock_init(&hdr->lock);
+               atomic_set(&hdr->refcnt, 0);
+       }
+       return rhdr;
+}
+
+static struct nfs_read_data *nfs_readdata_alloc(struct nfs_pgio_header *hdr,
+                                               unsigned int pagecount)
+{
+       struct nfs_read_data *data, *prealloc;
+
+       prealloc = &container_of(hdr, struct nfs_read_header, header)->rpc_data;
+       if (prealloc->header == NULL)
+               data = prealloc;
+       else
+               data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               goto out;
+
+       if (nfs_pgarray_set(&data->pages, pagecount)) {
+               data->header = hdr;
+               atomic_inc(&hdr->refcnt);
+       } else {
+               if (data != prealloc)
+                       kfree(data);
+               data = NULL;
        }
-       return p;
+out:
+       return data;
 }
 
-void nfs_readdata_free(struct nfs_read_data *p)
+void nfs_readhdr_free(struct nfs_pgio_header *hdr)
 {
-       if (p && (p->pagevec != &p->page_array[0]))
-               kfree(p->pagevec);
-       kmem_cache_free(nfs_rdata_cachep, p);
+       struct nfs_read_header *rhdr = container_of(hdr, struct nfs_read_header, header);
+
+       kmem_cache_free(nfs_rdata_cachep, rhdr);
 }
 
 void nfs_readdata_release(struct nfs_read_data *rdata)
 {
+       struct nfs_pgio_header *hdr = rdata->header;
+       struct nfs_read_header *read_header = container_of(hdr, struct nfs_read_header, header);
+
        put_nfs_open_context(rdata->args.context);
-       nfs_readdata_free(rdata);
+       if (rdata->pages.pagevec != rdata->pages.page_array)
+               kfree(rdata->pages.pagevec);
+       if (rdata != &read_header->rpc_data)
+               kfree(rdata);
+       else
+               rdata->header = NULL;
+       if (atomic_dec_and_test(&hdr->refcnt))
+               hdr->completion_ops->completion(hdr);
 }
 
 static
@@ -78,39 +108,11 @@ int nfs_return_empty_page(struct page *page)
        return 0;
 }
 
-static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
-{
-       unsigned int remainder = data->args.count - data->res.count;
-       unsigned int base = data->args.pgbase + data->res.count;
-       unsigned int pglen;
-       struct page **pages;
-
-       if (data->res.eof == 0 || remainder == 0)
-               return;
-       /*
-        * Note: "remainder" can never be negative, since we check for
-        *      this in the XDR code.
-        */
-       pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
-       base &= ~PAGE_CACHE_MASK;
-       pglen = PAGE_CACHE_SIZE - base;
-       for (;;) {
-               if (remainder <= pglen) {
-                       zero_user(*pages, base, remainder);
-                       break;
-               }
-               zero_user(*pages, base, pglen);
-               pages++;
-               remainder -= pglen;
-               pglen = PAGE_CACHE_SIZE;
-               base = 0;
-       }
-}
-
 void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
-               struct inode *inode)
+                             struct inode *inode,
+                             const struct nfs_pgio_completion_ops *compl_ops)
 {
-       nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops,
+       nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops, compl_ops,
                        NFS_SERVER(inode)->rsize, 0);
 }
 
@@ -121,11 +123,12 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
 }
 EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
 
-static void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
-               struct inode *inode)
+void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
+                         struct inode *inode,
+                         const struct nfs_pgio_completion_ops *compl_ops)
 {
-       if (!pnfs_pageio_init_read(pgio, inode))
-               nfs_pageio_init_read_mds(pgio, inode);
+       if (!pnfs_pageio_init_read(pgio, inode, compl_ops))
+               nfs_pageio_init_read_mds(pgio, inode, compl_ops);
 }
 
 int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
@@ -146,9 +149,10 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
        if (len < PAGE_CACHE_SIZE)
                zero_user_segment(page, len, PAGE_CACHE_SIZE);
 
-       nfs_pageio_init_read(&pgio, inode);
+       nfs_pageio_init_read(&pgio, inode, &nfs_async_read_completion_ops);
        nfs_pageio_add_request(&pgio, new);
        nfs_pageio_complete(&pgio);
+       NFS_I(inode)->read_io += pgio.pg_bytes_written;
        return 0;
 }
 
@@ -169,16 +173,49 @@ static void nfs_readpage_release(struct nfs_page *req)
        nfs_release_request(req);
 }
 
-int nfs_initiate_read(struct nfs_read_data *data, struct rpc_clnt *clnt,
-                     const struct rpc_call_ops *call_ops)
+/* Note io was page aligned */
+static void nfs_read_completion(struct nfs_pgio_header *hdr)
+{
+       unsigned long bytes = 0;
+
+       if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
+               goto out;
+       while (!list_empty(&hdr->pages)) {
+               struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+               struct page *page = req->wb_page;
+
+               if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
+                       if (bytes > hdr->good_bytes)
+                               zero_user(page, 0, PAGE_SIZE);
+                       else if (hdr->good_bytes - bytes < PAGE_SIZE)
+                               zero_user_segment(page,
+                                       hdr->good_bytes & ~PAGE_MASK,
+                                       PAGE_SIZE);
+               }
+               bytes += req->wb_bytes;
+               if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
+                       if (bytes <= hdr->good_bytes)
+                               SetPageUptodate(page);
+               } else
+                       SetPageUptodate(page);
+               nfs_list_remove_request(req);
+               nfs_readpage_release(req);
+       }
+out:
+       hdr->release(hdr);
+}
+
+int nfs_initiate_read(struct rpc_clnt *clnt,
+                     struct nfs_read_data *data,
+                     const struct rpc_call_ops *call_ops, int flags)
 {
-       struct inode *inode = data->inode;
+       struct inode *inode = data->header->inode;
        int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
        struct rpc_task *task;
        struct rpc_message msg = {
                .rpc_argp = &data->args,
                .rpc_resp = &data->res,
-               .rpc_cred = data->cred,
+               .rpc_cred = data->header->cred,
        };
        struct rpc_task_setup task_setup_data = {
                .task = &data->task,
@@ -187,7 +224,7 @@ int nfs_initiate_read(struct nfs_read_data *data, struct rpc_clnt *clnt,
                .callback_ops = call_ops,
                .callback_data = data,
                .workqueue = nfsiod_workqueue,
-               .flags = RPC_TASK_ASYNC | swap_flags,
+               .flags = RPC_TASK_ASYNC | swap_flags | flags,
        };
 
        /* Set up the initial task struct. */
@@ -212,19 +249,15 @@ EXPORT_SYMBOL_GPL(nfs_initiate_read);
 /*
  * Set up the NFS read request struct
  */
-static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
+static void nfs_read_rpcsetup(struct nfs_read_data *data,
                unsigned int count, unsigned int offset)
 {
-       struct inode *inode = req->wb_context->dentry->d_inode;
-
-       data->req         = req;
-       data->inode       = inode;
-       data->cred        = req->wb_context->cred;
+       struct nfs_page *req = data->header->req;
 
-       data->args.fh     = NFS_FH(inode);
+       data->args.fh     = NFS_FH(data->header->inode);
        data->args.offset = req_offset(req) + offset;
        data->args.pgbase = req->wb_pgbase + offset;
-       data->args.pages  = data->pagevec;
+       data->args.pages  = data->pages.pagevec;
        data->args.count  = count;
        data->args.context = get_nfs_open_context(req->wb_context);
        data->args.lock_context = req->wb_lock_context;
@@ -238,9 +271,9 @@ static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
 static int nfs_do_read(struct nfs_read_data *data,
                const struct rpc_call_ops *call_ops)
 {
-       struct inode *inode = data->args.context->dentry->d_inode;
+       struct inode *inode = data->header->inode;
 
-       return nfs_initiate_read(data, NFS_CLIENT(inode), call_ops);
+       return nfs_initiate_read(NFS_CLIENT(inode), data, call_ops, 0);
 }
 
 static int
@@ -253,7 +286,7 @@ nfs_do_multiple_reads(struct list_head *head,
        while (!list_empty(head)) {
                int ret2;
 
-               data = list_entry(head->next, struct nfs_read_data, list);
+               data = list_first_entry(head, struct nfs_read_data, list);
                list_del_init(&data->list);
 
                ret2 = nfs_do_read(data, call_ops);
@@ -275,6 +308,24 @@ nfs_async_read_error(struct list_head *head)
        }
 }
 
+static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
+       .error_cleanup = nfs_async_read_error,
+       .completion = nfs_read_completion,
+};
+
+static void nfs_pagein_error(struct nfs_pageio_descriptor *desc,
+               struct nfs_pgio_header *hdr)
+{
+       set_bit(NFS_IOHDR_REDO, &hdr->flags);
+       while (!list_empty(&hdr->rpc_list)) {
+               struct nfs_read_data *data = list_first_entry(&hdr->rpc_list,
+                               struct nfs_read_data, list);
+               list_del(&data->list);
+               nfs_readdata_release(data);
+       }
+       desc->pg_completion_ops->error_cleanup(&desc->pg_list);
+}
+
 /*
  * Generate multiple requests to fill a single page.
  *
@@ -288,93 +339,95 @@ nfs_async_read_error(struct list_head *head)
  * won't see the new data until our attribute cache is updated.  This is more
  * or less conventional NFS client behavior.
  */
-static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc, struct list_head *res)
+static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc,
+                           struct nfs_pgio_header *hdr)
 {
-       struct nfs_page *req = nfs_list_entry(desc->pg_list.next);
+       struct nfs_page *req = hdr->req;
        struct page *page = req->wb_page;
        struct nfs_read_data *data;
        size_t rsize = desc->pg_bsize, nbytes;
        unsigned int offset;
-       int requests = 0;
-       int ret = 0;
-
-       nfs_list_remove_request(req);
 
        offset = 0;
        nbytes = desc->pg_count;
        do {
                size_t len = min(nbytes,rsize);
 
-               data = nfs_readdata_alloc(1);
-               if (!data)
-                       goto out_bad;
-               data->pagevec[0] = page;
-               nfs_read_rpcsetup(req, data, len, offset);
-               list_add(&data->list, res);
-               requests++;
+               data = nfs_readdata_alloc(hdr, 1);
+               if (!data) {
+                       nfs_pagein_error(desc, hdr);
+                       return -ENOMEM;
+               }
+               data->pages.pagevec[0] = page;
+               nfs_read_rpcsetup(data, len, offset);
+               list_add(&data->list, &hdr->rpc_list);
                nbytes -= len;
                offset += len;
-       } while(nbytes != 0);
-       atomic_set(&req->wb_complete, requests);
-       desc->pg_rpc_callops = &nfs_read_partial_ops;
-       return ret;
-out_bad:
-       while (!list_empty(res)) {
-               data = list_entry(res->next, struct nfs_read_data, list);
-               list_del(&data->list);
-               nfs_readdata_release(data);
-       }
-       nfs_readpage_release(req);
-       return -ENOMEM;
+       } while (nbytes != 0);
+
+       nfs_list_remove_request(req);
+       nfs_list_add_request(req, &hdr->pages);
+       desc->pg_rpc_callops = &nfs_read_common_ops;
+       return 0;
 }
 
-static int nfs_pagein_one(struct nfs_pageio_descriptor *desc, struct list_head *res)
+static int nfs_pagein_one(struct nfs_pageio_descriptor *desc,
+                         struct nfs_pgio_header *hdr)
 {
        struct nfs_page         *req;
        struct page             **pages;
-       struct nfs_read_data    *data;
+       struct nfs_read_data    *data;
        struct list_head *head = &desc->pg_list;
-       int ret = 0;
 
-       data = nfs_readdata_alloc(nfs_page_array_len(desc->pg_base,
-                                                    desc->pg_count));
+       data = nfs_readdata_alloc(hdr, nfs_page_array_len(desc->pg_base,
+                                                         desc->pg_count));
        if (!data) {
-               nfs_async_read_error(head);
-               ret = -ENOMEM;
-               goto out;
+               nfs_pagein_error(desc, hdr);
+               return -ENOMEM;
        }
 
-       pages = data->pagevec;
+       pages = data->pages.pagevec;
        while (!list_empty(head)) {
                req = nfs_list_entry(head->next);
                nfs_list_remove_request(req);
-               nfs_list_add_request(req, &data->pages);
+               nfs_list_add_request(req, &hdr->pages);
                *pages++ = req->wb_page;
        }
-       req = nfs_list_entry(data->pages.next);
 
-       nfs_read_rpcsetup(req, data, desc->pg_count, 0);
-       list_add(&data->list, res);
-       desc->pg_rpc_callops = &nfs_read_full_ops;
-out:
-       return ret;
+       nfs_read_rpcsetup(data, desc->pg_count, 0);
+       list_add(&data->list, &hdr->rpc_list);
+       desc->pg_rpc_callops = &nfs_read_common_ops;
+       return 0;
 }
 
-int nfs_generic_pagein(struct nfs_pageio_descriptor *desc, struct list_head *head)
+int nfs_generic_pagein(struct nfs_pageio_descriptor *desc,
+                      struct nfs_pgio_header *hdr)
 {
        if (desc->pg_bsize < PAGE_CACHE_SIZE)
-               return nfs_pagein_multi(desc, head);
-       return nfs_pagein_one(desc, head);
+               return nfs_pagein_multi(desc, hdr);
+       return nfs_pagein_one(desc, hdr);
 }
 
 static int nfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
 {
-       LIST_HEAD(head);
+       struct nfs_read_header *rhdr;
+       struct nfs_pgio_header *hdr;
        int ret;
 
-       ret = nfs_generic_pagein(desc, &head);
+       rhdr = nfs_readhdr_alloc();
+       if (!rhdr) {
+               desc->pg_completion_ops->error_cleanup(&desc->pg_list);
+               return -ENOMEM;
+       }
+       hdr = &rhdr->header;
+       nfs_pgheader_init(desc, hdr, nfs_readhdr_free);
+       atomic_inc(&hdr->refcnt);
+       ret = nfs_generic_pagein(desc, hdr);
        if (ret == 0)
-               ret = nfs_do_multiple_reads(&head, desc->pg_rpc_callops);
+               ret = nfs_do_multiple_reads(&hdr->rpc_list,
+                                           desc->pg_rpc_callops);
+       if (atomic_dec_and_test(&hdr->refcnt))
+               hdr->completion_ops->completion(hdr);
        return ret;
 }
 
@@ -389,20 +442,21 @@ static const struct nfs_pageio_ops nfs_pageio_read_ops = {
  */
 int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
 {
+       struct inode *inode = data->header->inode;
        int status;
 
        dprintk("NFS: %s: %5u, (status %d)\n", __func__, task->tk_pid,
                        task->tk_status);
 
-       status = NFS_PROTO(data->inode)->read_done(task, data);
+       status = NFS_PROTO(inode)->read_done(task, data);
        if (status != 0)
                return status;
 
-       nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, data->res.count);
+       nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, data->res.count);
 
        if (task->tk_status == -ESTALE) {
-               set_bit(NFS_INO_STALE, &NFS_I(data->inode)->flags);
-               nfs_mark_for_revalidate(data->inode);
+               set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
+               nfs_mark_for_revalidate(inode);
        }
        return 0;
 }
@@ -412,15 +466,13 @@ static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data
        struct nfs_readargs *argp = &data->args;
        struct nfs_readres *resp = &data->res;
 
-       if (resp->eof || resp->count == argp->count)
-               return;
-
        /* This is a short read! */
-       nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
+       nfs_inc_stats(data->header->inode, NFSIOS_SHORTREAD);
        /* Has the server at least made some progress? */
-       if (resp->count == 0)
+       if (resp->count == 0) {
+               nfs_set_pgio_error(data->header, -EIO, argp->offset);
                return;
-
+       }
        /* Yes, so retry the read at the end of the data */
        data->mds_offset += resp->count;
        argp->offset += resp->count;
@@ -429,114 +481,46 @@ static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data
        rpc_restart_call_prepare(task);
 }
 
-/*
- * Handle a read reply that fills part of a page.
- */
-static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata)
+static void nfs_readpage_result_common(struct rpc_task *task, void *calldata)
 {
        struct nfs_read_data *data = calldata;
+       struct nfs_pgio_header *hdr = data->header;
+
+       /* Note the only returns of nfs_readpage_result are 0 and -EAGAIN */
        if (nfs_readpage_result(task, data) != 0)
                return;
        if (task->tk_status < 0)
-               return;
-
-       nfs_readpage_truncate_uninitialised_page(data);
-       nfs_readpage_retry(task, data);
+               nfs_set_pgio_error(hdr, task->tk_status, data->args.offset);
+       else if (data->res.eof) {
+               loff_t bound;
+
+               bound = data->args.offset + data->res.count;
+               spin_lock(&hdr->lock);
+               if (bound < hdr->io_start + hdr->good_bytes) {
+                       set_bit(NFS_IOHDR_EOF, &hdr->flags);
+                       clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
+                       hdr->good_bytes = bound - hdr->io_start;
+               }
+               spin_unlock(&hdr->lock);
+       } else if (data->res.count != data->args.count)
+               nfs_readpage_retry(task, data);
 }
 
-static void nfs_readpage_release_partial(void *calldata)
+static void nfs_readpage_release_common(void *calldata)
 {
-       struct nfs_read_data *data = calldata;
-       struct nfs_page *req = data->req;
-       struct page *page = req->wb_page;
-       int status = data->task.tk_status;
-
-       if (status < 0)
-               set_bit(PG_PARTIAL_READ_FAILED, &req->wb_flags);
-
-       if (atomic_dec_and_test(&req->wb_complete)) {
-               if (!test_bit(PG_PARTIAL_READ_FAILED, &req->wb_flags))
-                       SetPageUptodate(page);
-               nfs_readpage_release(req);
-       }
        nfs_readdata_release(calldata);
 }
 
 void nfs_read_prepare(struct rpc_task *task, void *calldata)
 {
        struct nfs_read_data *data = calldata;
-       NFS_PROTO(data->inode)->read_rpc_prepare(task, data);
-}
-
-static const struct rpc_call_ops nfs_read_partial_ops = {
-       .rpc_call_prepare = nfs_read_prepare,
-       .rpc_call_done = nfs_readpage_result_partial,
-       .rpc_release = nfs_readpage_release_partial,
-};
-
-static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data)
-{
-       unsigned int count = data->res.count;
-       unsigned int base = data->args.pgbase;
-       struct page **pages;
-
-       if (data->res.eof)
-               count = data->args.count;
-       if (unlikely(count == 0))
-               return;
-       pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
-       base &= ~PAGE_CACHE_MASK;
-       count += base;
-       for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
-               SetPageUptodate(*pages);
-       if (count == 0)
-               return;
-       /* Was this a short read? */
-       if (data->res.eof || data->res.count == data->args.count)
-               SetPageUptodate(*pages);
-}
-
-/*
- * This is the callback from RPC telling us whether a reply was
- * received or some error occurred (timeout or socket shutdown).
- */
-static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
-{
-       struct nfs_read_data *data = calldata;
-
-       if (nfs_readpage_result(task, data) != 0)
-               return;
-       if (task->tk_status < 0)
-               return;
-       /*
-        * Note: nfs_readpage_retry may change the values of
-        * data->args. In the multi-page case, we therefore need
-        * to ensure that we call nfs_readpage_set_pages_uptodate()
-        * first.
-        */
-       nfs_readpage_truncate_uninitialised_page(data);
-       nfs_readpage_set_pages_uptodate(data);
-       nfs_readpage_retry(task, data);
-}
-
-static void nfs_readpage_release_full(void *calldata)
-{
-       struct nfs_read_data *data = calldata;
-
-       while (!list_empty(&data->pages)) {
-               struct nfs_page *req = nfs_list_entry(data->pages.next);
-
-               nfs_list_remove_request(req);
-               nfs_readpage_release(req);
-       }
-       nfs_readdata_release(calldata);
+       NFS_PROTO(data->header->inode)->read_rpc_prepare(task, data);
 }
 
-static const struct rpc_call_ops nfs_read_full_ops = {
+static const struct rpc_call_ops nfs_read_common_ops = {
        .rpc_call_prepare = nfs_read_prepare,
-       .rpc_call_done = nfs_readpage_result_full,
-       .rpc_release = nfs_readpage_release_full,
+       .rpc_call_done = nfs_readpage_result_common,
+       .rpc_release = nfs_readpage_release_common,
 };
 
 /*
@@ -668,11 +652,12 @@ int nfs_readpages(struct file *filp, struct address_space *mapping,
        if (ret == 0)
                goto read_complete; /* all pages were read */
 
-       nfs_pageio_init_read(&pgio, inode);
+       nfs_pageio_init_read(&pgio, inode, &nfs_async_read_completion_ops);
 
        ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
 
        nfs_pageio_complete(&pgio);
+       NFS_I(inode)->read_io += pgio.pg_bytes_written;
        npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
        nfs_add_stats(inode, NFSIOS_READPAGES, npages);
 read_complete:
@@ -684,7 +669,7 @@ out:
 int __init nfs_init_readpagecache(void)
 {
        nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
-                                            sizeof(struct nfs_read_data),
+                                            sizeof(struct nfs_read_header),
                                             0, SLAB_HWCACHE_ALIGN,
                                             NULL);
        if (nfs_rdata_cachep == NULL)
index 4ac7fca7e4bf32fc01ac980c26dfcb255325f3b1..ff656c022684e9e2b0d94587cf9d807d670bd715 100644 (file)
@@ -66,6 +66,7 @@
 #include "pnfs.h"
 
 #define NFSDBG_FACILITY                NFSDBG_VFS
+#define NFS_TEXT_DATA          1
 
 #ifdef CONFIG_NFS_V3
 #define NFS_DEFAULT_VERSION 3
@@ -277,12 +278,22 @@ static match_table_t nfs_vers_tokens = {
        { Opt_vers_err, NULL }
 };
 
+struct nfs_mount_info {
+       void (*fill_super)(struct super_block *, struct nfs_mount_info *);
+       int (*set_security)(struct super_block *, struct dentry *, struct nfs_mount_info *);
+       struct nfs_parsed_mount_data *parsed;
+       struct nfs_clone_mount *cloned;
+       struct nfs_fh *mntfh;
+};
+
 static void nfs_umount_begin(struct super_block *);
 static int  nfs_statfs(struct dentry *, struct kstatfs *);
 static int  nfs_show_options(struct seq_file *, struct dentry *);
 static int  nfs_show_devname(struct seq_file *, struct dentry *);
 static int  nfs_show_path(struct seq_file *, struct dentry *);
 static int  nfs_show_stats(struct seq_file *, struct dentry *);
+static struct dentry *nfs_fs_mount_common(struct file_system_type *,
+               struct nfs_server *, int, const char *, struct nfs_mount_info *);
 static struct dentry *nfs_fs_mount(struct file_system_type *,
                int, const char *, void *);
 static struct dentry *nfs_xdev_mount(struct file_system_type *fs_type,
@@ -323,12 +334,11 @@ static const struct super_operations nfs_sops = {
 };
 
 #ifdef CONFIG_NFS_V4
-static int nfs4_validate_text_mount_data(void *options,
+static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *);
+static int nfs4_validate_mount_data(void *options,
        struct nfs_parsed_mount_data *args, const char *dev_name);
 static struct dentry *nfs4_try_mount(int flags, const char *dev_name,
-       struct nfs_parsed_mount_data *data);
-static struct dentry *nfs4_mount(struct file_system_type *fs_type,
-       int flags, const char *dev_name, void *raw_data);
+       struct nfs_mount_info *mount_info);
 static struct dentry *nfs4_remote_mount(struct file_system_type *fs_type,
        int flags, const char *dev_name, void *raw_data);
 static struct dentry *nfs4_xdev_mount(struct file_system_type *fs_type,
@@ -342,7 +352,7 @@ static void nfs4_kill_super(struct super_block *sb);
 static struct file_system_type nfs4_fs_type = {
        .owner          = THIS_MODULE,
        .name           = "nfs4",
-       .mount          = nfs4_mount,
+       .mount          = nfs_fs_mount,
        .kill_sb        = nfs4_kill_super,
        .fs_flags       = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
 };
@@ -786,8 +796,8 @@ static void show_pnfs(struct seq_file *m, struct nfs_server *server)
 
 static void show_implementation_id(struct seq_file *m, struct nfs_server *nfss)
 {
-       if (nfss->nfs_client && nfss->nfs_client->impl_id) {
-               struct nfs41_impl_id *impl_id = nfss->nfs_client->impl_id;
+       if (nfss->nfs_client && nfss->nfs_client->cl_implid) {
+               struct nfs41_impl_id *impl_id = nfss->nfs_client->cl_implid;
                seq_printf(m, "\n\timpl_id:\tname='%s',domain='%s',"
                           "date='%llu,%u'",
                           impl_id->name, impl_id->domain,
@@ -938,7 +948,7 @@ static void nfs_umount_begin(struct super_block *sb)
                rpc_killall_tasks(rpc);
 }
 
-static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int version)
+static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(void)
 {
        struct nfs_parsed_mount_data *data;
 
@@ -953,8 +963,8 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int ve
                data->nfs_server.protocol = XPRT_TRANSPORT_TCP;
                data->auth_flavors[0]   = RPC_AUTH_UNIX;
                data->auth_flavor_len   = 1;
-               data->version           = version;
                data->minorversion      = 0;
+               data->need_mount        = true;
                data->net               = current->nsproxy->net_ns;
                security_init_mnt_opts(&data->lsm_opts);
        }
@@ -1674,8 +1684,8 @@ static int nfs_walk_authlist(struct nfs_parsed_mount_data *args,
  * Use the remote server's MOUNT service to request the NFS file handle
  * corresponding to the provided path.
  */
-static int nfs_try_mount(struct nfs_parsed_mount_data *args,
-                        struct nfs_fh *root_fh)
+static int nfs_request_mount(struct nfs_parsed_mount_data *args,
+                            struct nfs_fh *root_fh)
 {
        rpc_authflavor_t server_authlist[NFS_MAX_SECFLAVORS];
        unsigned int server_authlist_len = ARRAY_SIZE(server_authlist);
@@ -1738,6 +1748,26 @@ static int nfs_try_mount(struct nfs_parsed_mount_data *args,
        return nfs_walk_authlist(args, &request);
 }
 
+static struct dentry *nfs_try_mount(int flags, const char *dev_name,
+                                   struct nfs_mount_info *mount_info)
+{
+       int status;
+       struct nfs_server *server;
+
+       if (mount_info->parsed->need_mount) {
+               status = nfs_request_mount(mount_info->parsed, mount_info->mntfh);
+               if (status)
+                       return ERR_PTR(status);
+       }
+
+       /* Get a volume representation */
+       server = nfs_create_server(mount_info->parsed, mount_info->mntfh);
+       if (IS_ERR(server))
+               return ERR_CAST(server);
+
+       return nfs_fs_mount_common(&nfs_fs_type, server, flags, dev_name, mount_info);
+}
+
 /*
  * Split "dev_name" into "hostname:export_path".
  *
@@ -1826,10 +1856,10 @@ out_path:
  * + breaking back: trying proto=udp after proto=tcp, v2 after v3,
  *   mountproto=tcp after mountproto=udp, and so on
  */
-static int nfs_validate_mount_data(void *options,
-                                  struct nfs_parsed_mount_data *args,
-                                  struct nfs_fh *mntfh,
-                                  const char *dev_name)
+static int nfs23_validate_mount_data(void *options,
+                                    struct nfs_parsed_mount_data *args,
+                                    struct nfs_fh *mntfh,
+                                    const char *dev_name)
 {
        struct nfs_mount_data *data = (struct nfs_mount_data *)options;
        struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
@@ -1883,6 +1913,7 @@ static int nfs_validate_mount_data(void *options,
                args->acregmax          = data->acregmax;
                args->acdirmin          = data->acdirmin;
                args->acdirmax          = data->acdirmax;
+               args->need_mount        = false;
 
                memcpy(sap, &data->addr, sizeof(data->addr));
                args->nfs_server.addrlen = sizeof(data->addr);
@@ -1934,43 +1965,8 @@ static int nfs_validate_mount_data(void *options,
                }
 
                break;
-       default: {
-               int status;
-
-               if (nfs_parse_mount_options((char *)options, args) == 0)
-                       return -EINVAL;
-
-               if (!nfs_verify_server_address(sap))
-                       goto out_no_address;
-
-               if (args->version == 4)
-#ifdef CONFIG_NFS_V4
-                       return nfs4_validate_text_mount_data(options,
-                                                            args, dev_name);
-#else
-                       goto out_v4_not_compiled;
-#endif
-
-               nfs_set_port(sap, &args->nfs_server.port, 0);
-
-               nfs_set_mount_transport_protocol(args);
-
-               status = nfs_parse_devname(dev_name,
-                                          &args->nfs_server.hostname,
-                                          PAGE_SIZE,
-                                          &args->nfs_server.export_path,
-                                          NFS_MAXPATHLEN);
-               if (!status)
-                       status = nfs_try_mount(args, mntfh);
-
-               kfree(args->nfs_server.export_path);
-               args->nfs_server.export_path = NULL;
-
-               if (status)
-                       return status;
-
-               break;
-               }
+       default:
+               return NFS_TEXT_DATA;
        }
 
 #ifndef CONFIG_NFS_V3
@@ -1999,12 +1995,6 @@ out_v3_not_compiled:
        return -EPROTONOSUPPORT;
 #endif /* !CONFIG_NFS_V3 */
 
-#ifndef CONFIG_NFS_V4
-out_v4_not_compiled:
-       dfprintk(MOUNT, "NFS: NFSv4 is not compiled into kernel\n");
-       return -EPROTONOSUPPORT;
-#endif /* !CONFIG_NFS_V4 */
-
 out_nomem:
        dfprintk(MOUNT, "NFS: not enough memory to handle mount options\n");
        return -ENOMEM;
@@ -2018,6 +2008,82 @@ out_invalid_fh:
        return -EINVAL;
 }
 
+#ifdef CONFIG_NFS_V4
+static int nfs_validate_mount_data(struct file_system_type *fs_type,
+                                  void *options,
+                                  struct nfs_parsed_mount_data *args,
+                                  struct nfs_fh *mntfh,
+                                  const char *dev_name)
+{
+       if (fs_type == &nfs_fs_type)
+               return nfs23_validate_mount_data(options, args, mntfh, dev_name);
+       return nfs4_validate_mount_data(options, args, dev_name);
+}
+#else
+static int nfs_validate_mount_data(struct file_system_type *fs_type,
+                                  void *options,
+                                  struct nfs_parsed_mount_data *args,
+                                  struct nfs_fh *mntfh,
+                                  const char *dev_name)
+{
+       return nfs23_validate_mount_data(options, args, mntfh, dev_name);
+}
+#endif
+
+static int nfs_validate_text_mount_data(void *options,
+                                       struct nfs_parsed_mount_data *args,
+                                       const char *dev_name)
+{
+       int port = 0;
+       int max_namelen = PAGE_SIZE;
+       int max_pathlen = NFS_MAXPATHLEN;
+       struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
+
+       if (nfs_parse_mount_options((char *)options, args) == 0)
+               return -EINVAL;
+
+       if (!nfs_verify_server_address(sap))
+               goto out_no_address;
+
+       if (args->version == 4) {
+#ifdef CONFIG_NFS_V4
+               port = NFS_PORT;
+               max_namelen = NFS4_MAXNAMLEN;
+               max_pathlen = NFS4_MAXPATHLEN;
+               nfs_validate_transport_protocol(args);
+               nfs4_validate_mount_flags(args);
+#else
+               goto out_v4_not_compiled;
+#endif /* CONFIG_NFS_V4 */
+       } else
+               nfs_set_mount_transport_protocol(args);
+
+       nfs_set_port(sap, &args->nfs_server.port, port);
+
+       if (args->auth_flavor_len > 1)
+               goto out_bad_auth;
+
+       return nfs_parse_devname(dev_name,
+                                  &args->nfs_server.hostname,
+                                  max_namelen,
+                                  &args->nfs_server.export_path,
+                                  max_pathlen);
+
+#ifndef CONFIG_NFS_V4
+out_v4_not_compiled:
+       dfprintk(MOUNT, "NFS: NFSv4 is not compiled into kernel\n");
+       return -EPROTONOSUPPORT;
+#endif /* !CONFIG_NFS_V4 */
+
+out_no_address:
+       dfprintk(MOUNT, "NFS: mount program didn't pass remote address\n");
+       return -EINVAL;
+
+out_bad_auth:
+       dfprintk(MOUNT, "NFS: Too many RPC auth flavours specified\n");
+       return -EINVAL;
+}
+
 static int
 nfs_compare_remount_data(struct nfs_server *nfss,
                         struct nfs_parsed_mount_data *data)
@@ -2129,8 +2195,9 @@ static inline void nfs_initialise_sb(struct super_block *sb)
  * Finish setting up an NFS2/3 superblock
  */
 static void nfs_fill_super(struct super_block *sb,
-                          struct nfs_parsed_mount_data *data)
+                          struct nfs_mount_info *mount_info)
 {
+       struct nfs_parsed_mount_data *data = mount_info->parsed;
        struct nfs_server *server = NFS_SB(sb);
 
        sb->s_blocksize_bits = 0;
@@ -2154,8 +2221,9 @@ static void nfs_fill_super(struct super_block *sb,
  * Finish setting up a cloned NFS2/3 superblock
  */
 static void nfs_clone_super(struct super_block *sb,
-                           const struct super_block *old_sb)
+                           struct nfs_mount_info *mount_info)
 {
+       const struct super_block *old_sb = mount_info->cloned->sb;
        struct nfs_server *server = NFS_SB(sb);
 
        sb->s_blocksize_bits = old_sb->s_blocksize_bits;
@@ -2278,52 +2346,70 @@ static int nfs_compare_super(struct super_block *sb, void *data)
        return nfs_compare_mount_options(sb, server, mntflags);
 }
 
+#ifdef CONFIG_NFS_FSCACHE
+static void nfs_get_cache_cookie(struct super_block *sb,
+                                struct nfs_parsed_mount_data *parsed,
+                                struct nfs_clone_mount *cloned)
+{
+       char *uniq = NULL;
+       int ulen = 0;
+
+       if (parsed && parsed->fscache_uniq) {
+               uniq = parsed->fscache_uniq;
+               ulen = strlen(parsed->fscache_uniq);
+       } else if (cloned) {
+               struct nfs_server *mnt_s = NFS_SB(cloned->sb);
+               if (mnt_s->fscache_key) {
+                       uniq = mnt_s->fscache_key->key.uniquifier;
+                       ulen = mnt_s->fscache_key->key.uniq_len;
+               };
+       }
+
+       nfs_fscache_get_super_cookie(sb, uniq, ulen);
+}
+#else
+static void nfs_get_cache_cookie(struct super_block *sb,
+                                struct nfs_parsed_mount_data *parsed,
+                                struct nfs_clone_mount *cloned)
+{
+}
+#endif
+
 static int nfs_bdi_register(struct nfs_server *server)
 {
        return bdi_register_dev(&server->backing_dev_info, server->s_dev);
 }
 
-static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
-       int flags, const char *dev_name, void *raw_data)
+static int nfs_set_sb_security(struct super_block *s, struct dentry *mntroot,
+                              struct nfs_mount_info *mount_info)
+{
+       return security_sb_set_mnt_opts(s, &mount_info->parsed->lsm_opts);
+}
+
+static int nfs_clone_sb_security(struct super_block *s, struct dentry *mntroot,
+                                struct nfs_mount_info *mount_info)
+{
+       /* clone any lsm security options from the parent to the new sb */
+       security_sb_clone_mnt_opts(mount_info->cloned->sb, s);
+       if (mntroot->d_inode->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops)
+               return -ESTALE;
+       return 0;
+}
+
+static struct dentry *nfs_fs_mount_common(struct file_system_type *fs_type,
+                                         struct nfs_server *server,
+                                         int flags, const char *dev_name,
+                                         struct nfs_mount_info *mount_info)
 {
-       struct nfs_server *server = NULL;
        struct super_block *s;
-       struct nfs_parsed_mount_data *data;
-       struct nfs_fh *mntfh;
        struct dentry *mntroot = ERR_PTR(-ENOMEM);
        int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
        struct nfs_sb_mountdata sb_mntdata = {
                .mntflags = flags,
+               .server = server,
        };
        int error;
 
-       data = nfs_alloc_parsed_mount_data(NFS_DEFAULT_VERSION);
-       mntfh = nfs_alloc_fhandle();
-       if (data == NULL || mntfh == NULL)
-               goto out;
-
-       /* Validate the mount data */
-       error = nfs_validate_mount_data(raw_data, data, mntfh, dev_name);
-       if (error < 0) {
-               mntroot = ERR_PTR(error);
-               goto out;
-       }
-
-#ifdef CONFIG_NFS_V4
-       if (data->version == 4) {
-               mntroot = nfs4_try_mount(flags, dev_name, data);
-               goto out;
-       }
-#endif /* CONFIG_NFS_V4 */
-
-       /* Get a volume representation */
-       server = nfs_create_server(data, mntfh);
-       if (IS_ERR(server)) {
-               mntroot = ERR_CAST(server);
-               goto out;
-       }
-       sb_mntdata.server = server;
-
        if (server->flags & NFS_MOUNT_UNSHARED)
                compare_super = NULL;
 
@@ -2351,23 +2437,21 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
 
        if (!s->s_root) {
                /* initial superblock/root creation */
-               nfs_fill_super(s, data);
-               nfs_fscache_get_super_cookie(s, data->fscache_uniq, NULL);
+               mount_info->fill_super(s, mount_info);
+               nfs_get_cache_cookie(s, mount_info->parsed, mount_info->cloned);
        }
 
-       mntroot = nfs_get_root(s, mntfh, dev_name);
+       mntroot = nfs_get_root(s, mount_info->mntfh, dev_name);
        if (IS_ERR(mntroot))
                goto error_splat_super;
 
-       error = security_sb_set_mnt_opts(s, &data->lsm_opts);
+       error = mount_info->set_security(s, mntroot, mount_info);
        if (error)
                goto error_splat_root;
 
        s->s_flags |= MS_ACTIVE;
 
 out:
-       nfs_free_parsed_mount_data(data);
-       nfs_free_fhandle(mntfh);
        return mntroot;
 
 out_err_nosb:
@@ -2385,6 +2469,43 @@ error_splat_bdi:
        goto out;
 }
 
+static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
+       int flags, const char *dev_name, void *raw_data)
+{
+       struct nfs_mount_info mount_info = {
+               .fill_super = nfs_fill_super,
+               .set_security = nfs_set_sb_security,
+       };
+       struct dentry *mntroot = ERR_PTR(-ENOMEM);
+       int error;
+
+       mount_info.parsed = nfs_alloc_parsed_mount_data();
+       mount_info.mntfh = nfs_alloc_fhandle();
+       if (mount_info.parsed == NULL || mount_info.mntfh == NULL)
+               goto out;
+
+       /* Validate the mount data */
+       error = nfs_validate_mount_data(fs_type, raw_data, mount_info.parsed, mount_info.mntfh, dev_name);
+       if (error == NFS_TEXT_DATA)
+               error = nfs_validate_text_mount_data(raw_data, mount_info.parsed, dev_name);
+       if (error < 0) {
+               mntroot = ERR_PTR(error);
+               goto out;
+       }
+
+#ifdef CONFIG_NFS_V4
+       if (mount_info.parsed->version == 4)
+               mntroot = nfs4_try_mount(flags, dev_name, &mount_info);
+       else
+#endif /* CONFIG_NFS_V4 */
+               mntroot = nfs_try_mount(flags, dev_name, &mount_info);
+
+out:
+       nfs_free_parsed_mount_data(mount_info.parsed);
+       nfs_free_fhandle(mount_info.mntfh);
+       return mntroot;
+}
+
 /*
  * Ensure that we unregister the bdi before kill_anon_super
  * releases the device name
@@ -2409,93 +2530,51 @@ static void nfs_kill_super(struct super_block *s)
 }
 
 /*
- * Clone an NFS2/3 server record on xdev traversal (FSID-change)
+ * Clone an NFS2/3/4 server record on xdev traversal (FSID-change)
  */
 static struct dentry *
-nfs_xdev_mount(struct file_system_type *fs_type, int flags,
-               const char *dev_name, void *raw_data)
+nfs_xdev_mount_common(struct file_system_type *fs_type, int flags,
+               const char *dev_name, struct nfs_mount_info *mount_info)
 {
-       struct nfs_clone_mount *data = raw_data;
-       struct super_block *s;
+       struct nfs_clone_mount *data = mount_info->cloned;
        struct nfs_server *server;
-       struct dentry *mntroot;
-       int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
-       struct nfs_sb_mountdata sb_mntdata = {
-               .mntflags = flags,
-       };
+       struct dentry *mntroot = ERR_PTR(-ENOMEM);
        int error;
 
-       dprintk("--> nfs_xdev_mount()\n");
+       dprintk("--> nfs_xdev_mount_common()\n");
+
+       mount_info->mntfh = data->fh;
 
        /* create a new volume representation */
        server = nfs_clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor);
        if (IS_ERR(server)) {
                error = PTR_ERR(server);
-               goto out_err_noserver;
-       }
-       sb_mntdata.server = server;
-
-       if (server->flags & NFS_MOUNT_UNSHARED)
-               compare_super = NULL;
-
-       /* -o noac implies -o sync */
-       if (server->flags & NFS_MOUNT_NOAC)
-               sb_mntdata.mntflags |= MS_SYNCHRONOUS;
-
-       /* Get a superblock - note that we may end up sharing one that already exists */
-       s = sget(&nfs_fs_type, compare_super, nfs_set_super, &sb_mntdata);
-       if (IS_ERR(s)) {
-               error = PTR_ERR(s);
-               goto out_err_nosb;
-       }
-
-       if (s->s_fs_info != server) {
-               nfs_free_server(server);
-               server = NULL;
-       } else {
-               error = nfs_bdi_register(server);
-               if (error)
-                       goto error_splat_bdi;
-       }
-
-       if (!s->s_root) {
-               /* initial superblock/root creation */
-               nfs_clone_super(s, data->sb);
-               nfs_fscache_get_super_cookie(s, NULL, data);
-       }
-
-       mntroot = nfs_get_root(s, data->fh, dev_name);
-       if (IS_ERR(mntroot)) {
-               error = PTR_ERR(mntroot);
-               goto error_splat_super;
-       }
-       if (mntroot->d_inode->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops) {
-               dput(mntroot);
-               error = -ESTALE;
-               goto error_splat_super;
+               goto out_err;
        }
 
-       s->s_flags |= MS_ACTIVE;
-
-       /* clone any lsm security options from the parent to the new sb */
-       security_sb_clone_mnt_opts(data->sb, s);
-
-       dprintk("<-- nfs_xdev_mount() = 0\n");
+       mntroot = nfs_fs_mount_common(fs_type, server, flags, dev_name, mount_info);
+       dprintk("<-- nfs_xdev_mount_common() = 0\n");
+out:
        return mntroot;
 
-out_err_nosb:
-       nfs_free_server(server);
-out_err_noserver:
-       dprintk("<-- nfs_xdev_mount() = %d [error]\n", error);
-       return ERR_PTR(error);
+out_err:
+       dprintk("<-- nfs_xdev_mount_common() = %d [error]\n", error);
+       goto out;
+}
 
-error_splat_super:
-       if (server && !s->s_root)
-               bdi_unregister(&server->backing_dev_info);
-error_splat_bdi:
-       deactivate_locked_super(s);
-       dprintk("<-- nfs_xdev_mount() = %d [splat]\n", error);
-       return ERR_PTR(error);
+/*
+ * Clone an NFS2/3 server record on xdev traversal (FSID-change)
+ */
+static struct dentry *
+nfs_xdev_mount(struct file_system_type *fs_type, int flags,
+               const char *dev_name, void *raw_data)
+{
+       struct nfs_mount_info mount_info = {
+               .fill_super = nfs_clone_super,
+               .set_security = nfs_clone_sb_security,
+               .cloned   = raw_data,
+       };
+       return nfs_xdev_mount_common(&nfs_fs_type, flags, dev_name, &mount_info);
 }
 
 #ifdef CONFIG_NFS_V4
@@ -2504,8 +2583,9 @@ error_splat_bdi:
  * Finish setting up a cloned NFS4 superblock
  */
 static void nfs4_clone_super(struct super_block *sb,
-                           const struct super_block *old_sb)
+                            struct nfs_mount_info *mount_info)
 {
+       const struct super_block *old_sb = mount_info->cloned->sb;
        sb->s_blocksize_bits = old_sb->s_blocksize_bits;
        sb->s_blocksize = old_sb->s_blocksize;
        sb->s_maxbytes = old_sb->s_maxbytes;
@@ -2523,7 +2603,8 @@ static void nfs4_clone_super(struct super_block *sb,
 /*
  * Set up an NFS4 superblock
  */
-static void nfs4_fill_super(struct super_block *sb)
+static void nfs4_fill_super(struct super_block *sb,
+                           struct nfs_mount_info *mount_info)
 {
        sb->s_time_gran = 1;
        sb->s_op = &nfs4_sops;
@@ -2542,37 +2623,6 @@ static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *args)
                         NFS_MOUNT_LOCAL_FLOCK|NFS_MOUNT_LOCAL_FCNTL);
 }
 
-static int nfs4_validate_text_mount_data(void *options,
-                                        struct nfs_parsed_mount_data *args,
-                                        const char *dev_name)
-{
-       struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
-
-       nfs_set_port(sap, &args->nfs_server.port, NFS_PORT);
-
-       nfs_validate_transport_protocol(args);
-
-       nfs4_validate_mount_flags(args);
-
-       if (args->version != 4) {
-               dfprintk(MOUNT,
-                        "NFS4: Illegal mount version\n");
-               return -EINVAL;
-       }
-
-       if (args->auth_flavor_len > 1) {
-               dfprintk(MOUNT,
-                        "NFS4: Too many RPC auth flavours specified\n");
-               return -EINVAL;
-       }
-
-       return nfs_parse_devname(dev_name,
-                                  &args->nfs_server.hostname,
-                                  NFS4_MAXNAMLEN,
-                                  &args->nfs_server.export_path,
-                                  NFS4_MAXPATHLEN);
-}
-
 /*
  * Validate NFSv4 mount options
  */
@@ -2643,13 +2693,7 @@ static int nfs4_validate_mount_data(void *options,
 
                break;
        default:
-               if (nfs_parse_mount_options((char *)options, args) == 0)
-                       return -EINVAL;
-
-               if (!nfs_verify_server_address(sap))
-                       return -EINVAL;
-
-               return nfs4_validate_text_mount_data(options, args, dev_name);
+               return NFS_TEXT_DATA;
        }
 
        return 0;
@@ -2673,91 +2717,26 @@ out_no_address:
  */
 static struct dentry *
 nfs4_remote_mount(struct file_system_type *fs_type, int flags,
-                 const char *dev_name, void *raw_data)
+                 const char *dev_name, void *info)
 {
-       struct nfs_parsed_mount_data *data = raw_data;
-       struct super_block *s;
+       struct nfs_mount_info *mount_info = info;
        struct nfs_server *server;
-       struct nfs_fh *mntfh;
-       struct dentry *mntroot;
-       int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
-       struct nfs_sb_mountdata sb_mntdata = {
-               .mntflags = flags,
-       };
-       int error = -ENOMEM;
+       struct dentry *mntroot = ERR_PTR(-ENOMEM);
 
-       mntfh = nfs_alloc_fhandle();
-       if (data == NULL || mntfh == NULL)
-               goto out;
+       mount_info->fill_super = nfs4_fill_super;
+       mount_info->set_security = nfs_set_sb_security;
 
        /* Get a volume representation */
-       server = nfs4_create_server(data, mntfh);
+       server = nfs4_create_server(mount_info->parsed, mount_info->mntfh);
        if (IS_ERR(server)) {
-               error = PTR_ERR(server);
+               mntroot = ERR_CAST(server);
                goto out;
        }
-       sb_mntdata.server = server;
 
-       if (server->flags & NFS4_MOUNT_UNSHARED)
-               compare_super = NULL;
-
-       /* -o noac implies -o sync */
-       if (server->flags & NFS_MOUNT_NOAC)
-               sb_mntdata.mntflags |= MS_SYNCHRONOUS;
-
-       /* Get a superblock - note that we may end up sharing one that already exists */
-       s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
-       if (IS_ERR(s)) {
-               error = PTR_ERR(s);
-               goto out_free;
-       }
-
-       if (s->s_fs_info != server) {
-               nfs_free_server(server);
-               server = NULL;
-       } else {
-               error = nfs_bdi_register(server);
-               if (error)
-                       goto error_splat_bdi;
-       }
-
-       if (!s->s_root) {
-               /* initial superblock/root creation */
-               nfs4_fill_super(s);
-               nfs_fscache_get_super_cookie(s, data->fscache_uniq, NULL);
-       }
-
-       mntroot = nfs4_get_root(s, mntfh, dev_name);
-       if (IS_ERR(mntroot)) {
-               error = PTR_ERR(mntroot);
-               goto error_splat_super;
-       }
-
-       error = security_sb_set_mnt_opts(s, &data->lsm_opts);
-       if (error)
-               goto error_splat_root;
-
-       s->s_flags |= MS_ACTIVE;
-
-       nfs_free_fhandle(mntfh);
-       return mntroot;
+       mntroot = nfs_fs_mount_common(fs_type, server, flags, dev_name, mount_info);
 
 out:
-       nfs_free_fhandle(mntfh);
-       return ERR_PTR(error);
-
-out_free:
-       nfs_free_server(server);
-       goto out;
-
-error_splat_root:
-       dput(mntroot);
-error_splat_super:
-       if (server && !s->s_root)
-               bdi_unregister(&server->backing_dev_info);
-error_splat_bdi:
-       deactivate_locked_super(s);
-       goto out;
+       return mntroot;
 }
 
 static struct vfsmount *nfs_do_root_mount(struct file_system_type *fs_type,
@@ -2869,17 +2848,18 @@ static struct dentry *nfs_follow_remote_path(struct vfsmount *root_mnt,
 }
 
 static struct dentry *nfs4_try_mount(int flags, const char *dev_name,
-                        struct nfs_parsed_mount_data *data)
+                        struct nfs_mount_info *mount_info)
 {
        char *export_path;
        struct vfsmount *root_mnt;
        struct dentry *res;
+       struct nfs_parsed_mount_data *data = mount_info->parsed;
 
        dfprintk(MOUNT, "--> nfs4_try_mount()\n");
 
        export_path = data->nfs_server.export_path;
        data->nfs_server.export_path = "/";
-       root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, data,
+       root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, mount_info,
                        data->nfs_server.hostname);
        data->nfs_server.export_path = export_path;
 
@@ -2891,38 +2871,6 @@ static struct dentry *nfs4_try_mount(int flags, const char *dev_name,
        return res;
 }
 
-/*
- * Get the superblock for an NFS4 mountpoint
- */
-static struct dentry *nfs4_mount(struct file_system_type *fs_type,
-       int flags, const char *dev_name, void *raw_data)
-{
-       struct nfs_parsed_mount_data *data;
-       int error = -ENOMEM;
-       struct dentry *res = ERR_PTR(-ENOMEM);
-
-       data = nfs_alloc_parsed_mount_data(4);
-       if (data == NULL)
-               goto out;
-
-       /* Validate the mount data */
-       error = nfs4_validate_mount_data(raw_data, data, dev_name);
-       if (error < 0) {
-               res = ERR_PTR(error);
-               goto out;
-       }
-
-       res = nfs4_try_mount(flags, dev_name, data);
-       if (IS_ERR(res))
-               error = PTR_ERR(res);
-
-out:
-       nfs_free_parsed_mount_data(data);
-       dprintk("<-- nfs4_mount() = %d%s\n", error,
-                       error != 0 ? " [error]" : "");
-       return res;
-}
-
 static void nfs4_kill_super(struct super_block *sb)
 {
        struct nfs_server *server = NFS_SB(sb);
@@ -2942,181 +2890,43 @@ static struct dentry *
 nfs4_xdev_mount(struct file_system_type *fs_type, int flags,
                 const char *dev_name, void *raw_data)
 {
-       struct nfs_clone_mount *data = raw_data;
-       struct super_block *s;
-       struct nfs_server *server;
-       struct dentry *mntroot;
-       int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
-       struct nfs_sb_mountdata sb_mntdata = {
-               .mntflags = flags,
+       struct nfs_mount_info mount_info = {
+               .fill_super = nfs4_clone_super,
+               .set_security = nfs_clone_sb_security,
+               .cloned = raw_data,
        };
-       int error;
-
-       dprintk("--> nfs4_xdev_mount()\n");
-
-       /* create a new volume representation */
-       server = nfs_clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor);
-       if (IS_ERR(server)) {
-               error = PTR_ERR(server);
-               goto out_err_noserver;
-       }
-       sb_mntdata.server = server;
-
-       if (server->flags & NFS4_MOUNT_UNSHARED)
-               compare_super = NULL;
-
-       /* -o noac implies -o sync */
-       if (server->flags & NFS_MOUNT_NOAC)
-               sb_mntdata.mntflags |= MS_SYNCHRONOUS;
-
-       /* Get a superblock - note that we may end up sharing one that already exists */
-       s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
-       if (IS_ERR(s)) {
-               error = PTR_ERR(s);
-               goto out_err_nosb;
-       }
-
-       if (s->s_fs_info != server) {
-               nfs_free_server(server);
-               server = NULL;
-       } else {
-               error = nfs_bdi_register(server);
-               if (error)
-                       goto error_splat_bdi;
-       }
-
-       if (!s->s_root) {
-               /* initial superblock/root creation */
-               nfs4_clone_super(s, data->sb);
-               nfs_fscache_get_super_cookie(s, NULL, data);
-       }
-
-       mntroot = nfs4_get_root(s, data->fh, dev_name);
-       if (IS_ERR(mntroot)) {
-               error = PTR_ERR(mntroot);
-               goto error_splat_super;
-       }
-       if (mntroot->d_inode->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops) {
-               dput(mntroot);
-               error = -ESTALE;
-               goto error_splat_super;
-       }
-
-       s->s_flags |= MS_ACTIVE;
-
-       security_sb_clone_mnt_opts(data->sb, s);
-
-       dprintk("<-- nfs4_xdev_mount() = 0\n");
-       return mntroot;
-
-out_err_nosb:
-       nfs_free_server(server);
-out_err_noserver:
-       dprintk("<-- nfs4_xdev_mount() = %d [error]\n", error);
-       return ERR_PTR(error);
-
-error_splat_super:
-       if (server && !s->s_root)
-               bdi_unregister(&server->backing_dev_info);
-error_splat_bdi:
-       deactivate_locked_super(s);
-       dprintk("<-- nfs4_xdev_mount() = %d [splat]\n", error);
-       return ERR_PTR(error);
+       return nfs_xdev_mount_common(&nfs4_fs_type, flags, dev_name, &mount_info);
 }
 
 static struct dentry *
 nfs4_remote_referral_mount(struct file_system_type *fs_type, int flags,
                           const char *dev_name, void *raw_data)
 {
-       struct nfs_clone_mount *data = raw_data;
-       struct super_block *s;
-       struct nfs_server *server;
-       struct dentry *mntroot;
-       struct nfs_fh *mntfh;
-       int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
-       struct nfs_sb_mountdata sb_mntdata = {
-               .mntflags = flags,
+       struct nfs_mount_info mount_info = {
+               .fill_super = nfs4_fill_super,
+               .set_security = nfs_clone_sb_security,
+               .cloned = raw_data,
        };
-       int error = -ENOMEM;
+       struct nfs_server *server;
+       struct dentry *mntroot = ERR_PTR(-ENOMEM);
 
        dprintk("--> nfs4_referral_get_sb()\n");
 
-       mntfh = nfs_alloc_fhandle();
-       if (mntfh == NULL)
-               goto out_err_nofh;
+       mount_info.mntfh = nfs_alloc_fhandle();
+       if (mount_info.cloned == NULL || mount_info.mntfh == NULL)
+               goto out;
 
        /* create a new volume representation */
-       server = nfs4_create_referral_server(data, mntfh);
+       server = nfs4_create_referral_server(mount_info.cloned, mount_info.mntfh);
        if (IS_ERR(server)) {
-               error = PTR_ERR(server);
-               goto out_err_noserver;
-       }
-       sb_mntdata.server = server;
-
-       if (server->flags & NFS4_MOUNT_UNSHARED)
-               compare_super = NULL;
-
-       /* -o noac implies -o sync */
-       if (server->flags & NFS_MOUNT_NOAC)
-               sb_mntdata.mntflags |= MS_SYNCHRONOUS;
-
-       /* Get a superblock - note that we may end up sharing one that already exists */
-       s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
-       if (IS_ERR(s)) {
-               error = PTR_ERR(s);
-               goto out_err_nosb;
-       }
-
-       if (s->s_fs_info != server) {
-               nfs_free_server(server);
-               server = NULL;
-       } else {
-               error = nfs_bdi_register(server);
-               if (error)
-                       goto error_splat_bdi;
-       }
-
-       if (!s->s_root) {
-               /* initial superblock/root creation */
-               nfs4_fill_super(s);
-               nfs_fscache_get_super_cookie(s, NULL, data);
-       }
-
-       mntroot = nfs4_get_root(s, mntfh, dev_name);
-       if (IS_ERR(mntroot)) {
-               error = PTR_ERR(mntroot);
-               goto error_splat_super;
-       }
-       if (mntroot->d_inode->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops) {
-               dput(mntroot);
-               error = -ESTALE;
-               goto error_splat_super;
+               mntroot = ERR_CAST(server);
+               goto out;
        }
 
-       s->s_flags |= MS_ACTIVE;
-
-       security_sb_clone_mnt_opts(data->sb, s);
-
-       nfs_free_fhandle(mntfh);
-       dprintk("<-- nfs4_referral_get_sb() = 0\n");
+       mntroot = nfs_fs_mount_common(&nfs4_fs_type, server, flags, dev_name, &mount_info);
+out:
+       nfs_free_fhandle(mount_info.mntfh);
        return mntroot;
-
-out_err_nosb:
-       nfs_free_server(server);
-out_err_noserver:
-       nfs_free_fhandle(mntfh);
-out_err_nofh:
-       dprintk("<-- nfs4_referral_get_sb() = %d [error]\n", error);
-       return ERR_PTR(error);
-
-error_splat_super:
-       if (server && !s->s_root)
-               bdi_unregister(&server->backing_dev_info);
-error_splat_bdi:
-       deactivate_locked_super(s);
-       nfs_free_fhandle(mntfh);
-       dprintk("<-- nfs4_referral_get_sb() = %d [splat]\n", error);
-       return ERR_PTR(error);
 }
 
 /*
index c07462320f6b5c41c09ba2ff054e75048951691f..e6fe3d69d14cbe0a5b75fc2cc5905c875f4c0181 100644 (file)
 /*
  * Local function declarations
  */
-static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc,
-                                 struct inode *inode, int ioflags);
 static void nfs_redirty_request(struct nfs_page *req);
-static const struct rpc_call_ops nfs_write_partial_ops;
-static const struct rpc_call_ops nfs_write_full_ops;
+static const struct rpc_call_ops nfs_write_common_ops;
 static const struct rpc_call_ops nfs_commit_ops;
+static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
+static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
 
 static struct kmem_cache *nfs_wdata_cachep;
 static mempool_t *nfs_wdata_mempool;
+static struct kmem_cache *nfs_cdata_cachep;
 static mempool_t *nfs_commit_mempool;
 
-struct nfs_write_data *nfs_commitdata_alloc(void)
+struct nfs_commit_data *nfs_commitdata_alloc(void)
 {
-       struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
+       struct nfs_commit_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
 
        if (p) {
                memset(p, 0, sizeof(*p));
@@ -62,46 +62,73 @@ struct nfs_write_data *nfs_commitdata_alloc(void)
 }
 EXPORT_SYMBOL_GPL(nfs_commitdata_alloc);
 
-void nfs_commit_free(struct nfs_write_data *p)
+void nfs_commit_free(struct nfs_commit_data *p)
 {
-       if (p && (p->pagevec != &p->page_array[0]))
-               kfree(p->pagevec);
        mempool_free(p, nfs_commit_mempool);
 }
 EXPORT_SYMBOL_GPL(nfs_commit_free);
 
-struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
+struct nfs_write_header *nfs_writehdr_alloc(void)
 {
-       struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
+       struct nfs_write_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
 
        if (p) {
+               struct nfs_pgio_header *hdr = &p->header;
+
                memset(p, 0, sizeof(*p));
-               INIT_LIST_HEAD(&p->pages);
-               p->npages = pagecount;
-               if (pagecount <= ARRAY_SIZE(p->page_array))
-                       p->pagevec = p->page_array;
-               else {
-                       p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
-                       if (!p->pagevec) {
-                               mempool_free(p, nfs_wdata_mempool);
-                               p = NULL;
-                       }
-               }
+               INIT_LIST_HEAD(&hdr->pages);
+               INIT_LIST_HEAD(&hdr->rpc_list);
+               spin_lock_init(&hdr->lock);
+               atomic_set(&hdr->refcnt, 0);
        }
        return p;
 }
 
-void nfs_writedata_free(struct nfs_write_data *p)
+static struct nfs_write_data *nfs_writedata_alloc(struct nfs_pgio_header *hdr,
+                                                 unsigned int pagecount)
+{
+       struct nfs_write_data *data, *prealloc;
+
+       prealloc = &container_of(hdr, struct nfs_write_header, header)->rpc_data;
+       if (prealloc->header == NULL)
+               data = prealloc;
+       else
+               data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               goto out;
+
+       if (nfs_pgarray_set(&data->pages, pagecount)) {
+               data->header = hdr;
+               atomic_inc(&hdr->refcnt);
+       } else {
+               if (data != prealloc)
+                       kfree(data);
+               data = NULL;
+       }
+out:
+       return data;
+}
+
+void nfs_writehdr_free(struct nfs_pgio_header *hdr)
 {
-       if (p && (p->pagevec != &p->page_array[0]))
-               kfree(p->pagevec);
-       mempool_free(p, nfs_wdata_mempool);
+       struct nfs_write_header *whdr = container_of(hdr, struct nfs_write_header, header);
+       mempool_free(whdr, nfs_wdata_mempool);
 }
 
 void nfs_writedata_release(struct nfs_write_data *wdata)
 {
+       struct nfs_pgio_header *hdr = wdata->header;
+       struct nfs_write_header *write_header = container_of(hdr, struct nfs_write_header, header);
+
        put_nfs_open_context(wdata->args.context);
-       nfs_writedata_free(wdata);
+       if (wdata->pages.pagevec != wdata->pages.page_array)
+               kfree(wdata->pages.pagevec);
+       if (wdata != &write_header->rpc_data)
+               kfree(wdata);
+       else
+               wdata->header = NULL;
+       if (atomic_dec_and_test(&hdr->refcnt))
+               hdr->completion_ops->completion(hdr);
 }
 
 static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
@@ -203,7 +230,6 @@ static int nfs_set_page_writeback(struct page *page)
                struct inode *inode = page->mapping->host;
                struct nfs_server *nfss = NFS_SERVER(inode);
 
-               page_cache_get(page);
                if (atomic_long_inc_return(&nfss->writeback) >
                                NFS_CONGESTION_ON_THRESH) {
                        set_bdi_congested(&nfss->backing_dev_info,
@@ -219,7 +245,6 @@ static void nfs_end_page_writeback(struct page *page)
        struct nfs_server *nfss = NFS_SERVER(inode);
 
        end_page_writeback(page);
-       page_cache_release(page);
        if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
                clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
 }
@@ -235,10 +260,10 @@ static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblo
                req = nfs_page_find_request_locked(page);
                if (req == NULL)
                        break;
-               if (nfs_lock_request_dontget(req))
+               if (nfs_lock_request(req))
                        break;
                /* Note: If we hold the page lock, as is the case in nfs_writepage,
-                *       then the call to nfs_lock_request_dontget() will always
+                *       then the call to nfs_lock_request() will always
                 *       succeed provided that someone hasn't already marked the
                 *       request as dirty (in which case we don't care).
                 */
@@ -310,7 +335,8 @@ static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc
        struct nfs_pageio_descriptor pgio;
        int err;
 
-       nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc));
+       nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc),
+                             &nfs_async_write_completion_ops);
        err = nfs_do_writepage(page, wbc, &pgio);
        nfs_pageio_complete(&pgio);
        if (err < 0)
@@ -353,7 +379,8 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
 
        nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
 
-       nfs_pageio_init_write(&pgio, inode, wb_priority(wbc));
+       nfs_pageio_init_write(&pgio, inode, wb_priority(wbc),
+                             &nfs_async_write_completion_ops);
        err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
        nfs_pageio_complete(&pgio);
 
@@ -379,7 +406,7 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
        struct nfs_inode *nfsi = NFS_I(inode);
 
        /* Lock the request! */
-       nfs_lock_request_dontget(req);
+       nfs_lock_request(req);
 
        spin_lock(&inode->i_lock);
        if (!nfsi->npages && nfs_have_delegation(inode, FMODE_WRITE))
@@ -421,65 +448,88 @@ nfs_mark_request_dirty(struct nfs_page *req)
 /**
  * nfs_request_add_commit_list - add request to a commit list
  * @req: pointer to a struct nfs_page
- * @head: commit list head
+ * @dst: commit list head
+ * @cinfo: holds list lock and accounting info
  *
- * This sets the PG_CLEAN bit, updates the inode global count of
+ * This sets the PG_CLEAN bit, updates the cinfo count of
  * number of outstanding requests requiring a commit as well as
  * the MM page stats.
  *
- * The caller must _not_ hold the inode->i_lock, but must be
+ * The caller must _not_ hold the cinfo->lock, but must be
  * holding the nfs_page lock.
  */
 void
-nfs_request_add_commit_list(struct nfs_page *req, struct list_head *head)
+nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
+                           struct nfs_commit_info *cinfo)
 {
-       struct inode *inode = req->wb_context->dentry->d_inode;
-
        set_bit(PG_CLEAN, &(req)->wb_flags);
-       spin_lock(&inode->i_lock);
-       nfs_list_add_request(req, head);
-       NFS_I(inode)->ncommit++;
-       spin_unlock(&inode->i_lock);
-       inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
-       inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);
-       __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+       spin_lock(cinfo->lock);
+       nfs_list_add_request(req, dst);
+       cinfo->mds->ncommit++;
+       spin_unlock(cinfo->lock);
+       if (!cinfo->dreq) {
+               inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
+               inc_bdi_stat(req->wb_page->mapping->backing_dev_info,
+                            BDI_RECLAIMABLE);
+               __mark_inode_dirty(req->wb_context->dentry->d_inode,
+                                  I_DIRTY_DATASYNC);
+       }
 }
 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
 
 /**
  * nfs_request_remove_commit_list - Remove request from a commit list
  * @req: pointer to a nfs_page
+ * @cinfo: holds list lock and accounting info
  *
- * This clears the PG_CLEAN bit, and updates the inode global count of
+ * This clears the PG_CLEAN bit, and updates the cinfo's count of
  * number of outstanding requests requiring a commit
  * It does not update the MM page stats.
  *
- * The caller _must_ hold the inode->i_lock and the nfs_page lock.
+ * The caller _must_ hold the cinfo->lock and the nfs_page lock.
  */
 void
-nfs_request_remove_commit_list(struct nfs_page *req)
+nfs_request_remove_commit_list(struct nfs_page *req,
+                              struct nfs_commit_info *cinfo)
 {
-       struct inode *inode = req->wb_context->dentry->d_inode;
-
        if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags))
                return;
        nfs_list_remove_request(req);
-       NFS_I(inode)->ncommit--;
+       cinfo->mds->ncommit--;
 }
 EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list);
 
+static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
+                                     struct inode *inode)
+{
+       cinfo->lock = &inode->i_lock;
+       cinfo->mds = &NFS_I(inode)->commit_info;
+       cinfo->ds = pnfs_get_ds_info(inode);
+       cinfo->dreq = NULL;
+       cinfo->completion_ops = &nfs_commit_completion_ops;
+}
+
+void nfs_init_cinfo(struct nfs_commit_info *cinfo,
+                   struct inode *inode,
+                   struct nfs_direct_req *dreq)
+{
+       if (dreq)
+               nfs_init_cinfo_from_dreq(cinfo, dreq);
+       else
+               nfs_init_cinfo_from_inode(cinfo, inode);
+}
+EXPORT_SYMBOL_GPL(nfs_init_cinfo);
 
 /*
  * Add a request to the inode's commit list.
  */
-static void
-nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
+void
+nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
+                       struct nfs_commit_info *cinfo)
 {
-       struct inode *inode = req->wb_context->dentry->d_inode;
-
-       if (pnfs_mark_request_commit(req, lseg))
+       if (pnfs_mark_request_commit(req, lseg, cinfo))
                return;
-       nfs_request_add_commit_list(req, &NFS_I(inode)->commit_list);
+       nfs_request_add_commit_list(req, &cinfo->mds->list, cinfo);
 }
 
 static void
@@ -494,11 +544,13 @@ nfs_clear_request_commit(struct nfs_page *req)
 {
        if (test_bit(PG_CLEAN, &req->wb_flags)) {
                struct inode *inode = req->wb_context->dentry->d_inode;
+               struct nfs_commit_info cinfo;
 
-               if (!pnfs_clear_request_commit(req)) {
-                       spin_lock(&inode->i_lock);
-                       nfs_request_remove_commit_list(req);
-                       spin_unlock(&inode->i_lock);
+               nfs_init_cinfo_from_inode(&cinfo, inode);
+               if (!pnfs_clear_request_commit(req, &cinfo)) {
+                       spin_lock(cinfo.lock);
+                       nfs_request_remove_commit_list(req, &cinfo);
+                       spin_unlock(cinfo.lock);
                }
                nfs_clear_page_commit(req->wb_page);
        }
@@ -508,28 +560,25 @@ static inline
 int nfs_write_need_commit(struct nfs_write_data *data)
 {
        if (data->verf.committed == NFS_DATA_SYNC)
-               return data->lseg == NULL;
-       else
-               return data->verf.committed != NFS_FILE_SYNC;
+               return data->header->lseg == NULL;
+       return data->verf.committed != NFS_FILE_SYNC;
 }
 
-static inline
-int nfs_reschedule_unstable_write(struct nfs_page *req,
-                                 struct nfs_write_data *data)
+#else
+static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
+                                     struct inode *inode)
 {
-       if (test_and_clear_bit(PG_NEED_COMMIT, &req->wb_flags)) {
-               nfs_mark_request_commit(req, data->lseg);
-               return 1;
-       }
-       if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
-               nfs_mark_request_dirty(req);
-               return 1;
-       }
-       return 0;
 }
-#else
-static void
-nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
+
+void nfs_init_cinfo(struct nfs_commit_info *cinfo,
+                   struct inode *inode,
+                   struct nfs_direct_req *dreq)
+{
+}
+
+void
+nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
+                       struct nfs_commit_info *cinfo)
 {
 }
 
@@ -544,25 +593,57 @@ int nfs_write_need_commit(struct nfs_write_data *data)
        return 0;
 }
 
-static inline
-int nfs_reschedule_unstable_write(struct nfs_page *req,
-                                 struct nfs_write_data *data)
+#endif
+
+static void nfs_write_completion(struct nfs_pgio_header *hdr)
 {
-       return 0;
+       struct nfs_commit_info cinfo;
+       unsigned long bytes = 0;
+
+       if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
+               goto out;
+       nfs_init_cinfo_from_inode(&cinfo, hdr->inode);
+       while (!list_empty(&hdr->pages)) {
+               struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+
+               bytes += req->wb_bytes;
+               nfs_list_remove_request(req);
+               if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
+                   (hdr->good_bytes < bytes)) {
+                       nfs_set_pageerror(req->wb_page);
+                       nfs_context_set_write_error(req->wb_context, hdr->error);
+                       goto remove_req;
+               }
+               if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) {
+                       nfs_mark_request_dirty(req);
+                       goto next;
+               }
+               if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) {
+                       nfs_mark_request_commit(req, hdr->lseg, &cinfo);
+                       goto next;
+               }
+remove_req:
+               nfs_inode_remove_request(req);
+next:
+               nfs_unlock_request(req);
+               nfs_end_page_writeback(req->wb_page);
+               nfs_release_request(req);
+       }
+out:
+       hdr->release(hdr);
 }
-#endif
 
 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
-static int
-nfs_need_commit(struct nfs_inode *nfsi)
+static unsigned long
+nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
 {
-       return nfsi->ncommit > 0;
+       return cinfo->mds->ncommit;
 }
 
-/* i_lock held by caller */
-static int
-nfs_scan_commit_list(struct list_head *src, struct list_head *dst, int max,
-               spinlock_t *lock)
+/* cinfo->lock held by caller */
+int
+nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
+                    struct nfs_commit_info *cinfo, int max)
 {
        struct nfs_page *req, *tmp;
        int ret = 0;
@@ -570,12 +651,13 @@ nfs_scan_commit_list(struct list_head *src, struct list_head *dst, int max,
        list_for_each_entry_safe(req, tmp, src, wb_list) {
                if (!nfs_lock_request(req))
                        continue;
-               if (cond_resched_lock(lock))
+               kref_get(&req->wb_kref);
+               if (cond_resched_lock(cinfo->lock))
                        list_safe_reset_next(req, tmp, wb_list);
-               nfs_request_remove_commit_list(req);
+               nfs_request_remove_commit_list(req, cinfo);
                nfs_list_add_request(req, dst);
                ret++;
-               if (ret == max)
+               if ((ret == max) && !cinfo->dreq)
                        break;
        }
        return ret;
@@ -584,37 +666,38 @@ nfs_scan_commit_list(struct list_head *src, struct list_head *dst, int max,
 /*
  * nfs_scan_commit - Scan an inode for commit requests
  * @inode: NFS inode to scan
- * @dst: destination list
+ * @dst: mds destination list
+ * @cinfo: mds and ds lists of reqs ready to commit
  *
  * Moves requests from the inode's 'commit' request list.
  * The requests are *not* checked to ensure that they form a contiguous set.
  */
-static int
-nfs_scan_commit(struct inode *inode, struct list_head *dst)
+int
+nfs_scan_commit(struct inode *inode, struct list_head *dst,
+               struct nfs_commit_info *cinfo)
 {
-       struct nfs_inode *nfsi = NFS_I(inode);
        int ret = 0;
 
-       spin_lock(&inode->i_lock);
-       if (nfsi->ncommit > 0) {
+       spin_lock(cinfo->lock);
+       if (cinfo->mds->ncommit > 0) {
                const int max = INT_MAX;
 
-               ret = nfs_scan_commit_list(&nfsi->commit_list, dst, max,
-                               &inode->i_lock);
-               ret += pnfs_scan_commit_lists(inode, max - ret,
-                               &inode->i_lock);
+               ret = nfs_scan_commit_list(&cinfo->mds->list, dst,
+                                          cinfo, max);
+               ret += pnfs_scan_commit_lists(inode, cinfo, max - ret);
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(cinfo->lock);
        return ret;
 }
 
 #else
-static inline int nfs_need_commit(struct nfs_inode *nfsi)
+static unsigned long nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
 {
        return 0;
 }
 
-static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst)
+int nfs_scan_commit(struct inode *inode, struct list_head *dst,
+                   struct nfs_commit_info *cinfo)
 {
        return 0;
 }
@@ -659,7 +742,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
                    || end < req->wb_offset)
                        goto out_flushme;
 
-               if (nfs_lock_request_dontget(req))
+               if (nfs_lock_request(req))
                        break;
 
                /* The request is locked, so wait and then retry */
@@ -729,7 +812,7 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
        nfs_grow_file(page, offset, count);
        nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
        nfs_mark_request_dirty(req);
-       nfs_unlock_request(req);
+       nfs_unlock_and_release_request(req);
        return 0;
 }
 
@@ -766,10 +849,14 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
  * the PageUptodate() flag. In this case, we will need to turn off
  * write optimisations that depend on the page contents being correct.
  */
-static int nfs_write_pageuptodate(struct page *page, struct inode *inode)
+static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
 {
-       return PageUptodate(page) &&
-               !(NFS_I(inode)->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA));
+       if (nfs_have_delegated_attributes(inode))
+               goto out;
+       if (NFS_I(inode)->cache_validity & NFS_INO_REVAL_PAGECACHE)
+               return false;
+out:
+       return PageUptodate(page) != 0;
 }
 
 /*
@@ -815,17 +902,6 @@ int nfs_updatepage(struct file *file, struct page *page,
        return status;
 }
 
-static void nfs_writepage_release(struct nfs_page *req,
-                                 struct nfs_write_data *data)
-{
-       struct page *page = req->wb_page;
-
-       if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req, data))
-               nfs_inode_remove_request(req);
-       nfs_unlock_request(req);
-       nfs_end_page_writeback(page);
-}
-
 static int flush_task_priority(int how)
 {
        switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
@@ -837,18 +913,18 @@ static int flush_task_priority(int how)
        return RPC_PRIORITY_NORMAL;
 }
 
-int nfs_initiate_write(struct nfs_write_data *data,
-                      struct rpc_clnt *clnt,
+int nfs_initiate_write(struct rpc_clnt *clnt,
+                      struct nfs_write_data *data,
                       const struct rpc_call_ops *call_ops,
-                      int how)
+                      int how, int flags)
 {
-       struct inode *inode = data->inode;
+       struct inode *inode = data->header->inode;
        int priority = flush_task_priority(how);
        struct rpc_task *task;
        struct rpc_message msg = {
                .rpc_argp = &data->args,
                .rpc_resp = &data->res,
-               .rpc_cred = data->cred,
+               .rpc_cred = data->header->cred,
        };
        struct rpc_task_setup task_setup_data = {
                .rpc_client = clnt,
@@ -857,7 +933,7 @@ int nfs_initiate_write(struct nfs_write_data *data,
                .callback_ops = call_ops,
                .callback_data = data,
                .workqueue = nfsiod_workqueue,
-               .flags = RPC_TASK_ASYNC,
+               .flags = RPC_TASK_ASYNC | flags,
                .priority = priority,
        };
        int ret = 0;
@@ -892,26 +968,21 @@ EXPORT_SYMBOL_GPL(nfs_initiate_write);
 /*
  * Set up the argument/result storage required for the RPC call.
  */
-static void nfs_write_rpcsetup(struct nfs_page *req,
-               struct nfs_write_data *data,
+static void nfs_write_rpcsetup(struct nfs_write_data *data,
                unsigned int count, unsigned int offset,
-               int how)
+               int how, struct nfs_commit_info *cinfo)
 {
-       struct inode *inode = req->wb_context->dentry->d_inode;
+       struct nfs_page *req = data->header->req;
 
        /* Set up the RPC argument and reply structs
         * NB: take care not to mess about with data->commit et al. */
 
-       data->req = req;
-       data->inode = inode = req->wb_context->dentry->d_inode;
-       data->cred = req->wb_context->cred;
-
-       data->args.fh     = NFS_FH(inode);
+       data->args.fh     = NFS_FH(data->header->inode);
        data->args.offset = req_offset(req) + offset;
        /* pnfs_set_layoutcommit needs this */
        data->mds_offset = data->args.offset;
        data->args.pgbase = req->wb_pgbase + offset;
-       data->args.pages  = data->pagevec;
+       data->args.pages  = data->pages.pagevec;
        data->args.count  = count;
        data->args.context = get_nfs_open_context(req->wb_context);
        data->args.lock_context = req->wb_lock_context;
@@ -920,7 +991,7 @@ static void nfs_write_rpcsetup(struct nfs_page *req,
        case 0:
                break;
        case FLUSH_COND_STABLE:
-               if (nfs_need_commit(NFS_I(inode)))
+               if (nfs_reqs_to_commit(cinfo))
                        break;
        default:
                data->args.stable = NFS_FILE_SYNC;
@@ -936,9 +1007,9 @@ static int nfs_do_write(struct nfs_write_data *data,
                const struct rpc_call_ops *call_ops,
                int how)
 {
-       struct inode *inode = data->args.context->dentry->d_inode;
+       struct inode *inode = data->header->inode;
 
-       return nfs_initiate_write(data, NFS_CLIENT(inode), call_ops, how);
+       return nfs_initiate_write(NFS_CLIENT(inode), data, call_ops, how, 0);
 }
 
 static int nfs_do_multiple_writes(struct list_head *head,
@@ -951,7 +1022,7 @@ static int nfs_do_multiple_writes(struct list_head *head,
        while (!list_empty(head)) {
                int ret2;
 
-               data = list_entry(head->next, struct nfs_write_data, list);
+               data = list_first_entry(head, struct nfs_write_data, list);
                list_del_init(&data->list);
                
                ret2 = nfs_do_write(data, call_ops, how);
@@ -967,31 +1038,60 @@ static int nfs_do_multiple_writes(struct list_head *head,
  */
 static void nfs_redirty_request(struct nfs_page *req)
 {
-       struct page *page = req->wb_page;
-
        nfs_mark_request_dirty(req);
        nfs_unlock_request(req);
-       nfs_end_page_writeback(page);
+       nfs_end_page_writeback(req->wb_page);
+       nfs_release_request(req);
+}
+
+static void nfs_async_write_error(struct list_head *head)
+{
+       struct nfs_page *req;
+
+       while (!list_empty(head)) {
+               req = nfs_list_entry(head->next);
+               nfs_list_remove_request(req);
+               nfs_redirty_request(req);
+       }
+}
+
+static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = {
+       .error_cleanup = nfs_async_write_error,
+       .completion = nfs_write_completion,
+};
+
+static void nfs_flush_error(struct nfs_pageio_descriptor *desc,
+               struct nfs_pgio_header *hdr)
+{
+       set_bit(NFS_IOHDR_REDO, &hdr->flags);
+       while (!list_empty(&hdr->rpc_list)) {
+               struct nfs_write_data *data = list_first_entry(&hdr->rpc_list,
+                               struct nfs_write_data, list);
+               list_del(&data->list);
+               nfs_writedata_release(data);
+       }
+       desc->pg_completion_ops->error_cleanup(&desc->pg_list);
 }
 
 /*
  * Generate multiple small requests to write out a single
  * contiguous dirty area on one page.
  */
-static int nfs_flush_multi(struct nfs_pageio_descriptor *desc, struct list_head *res)
+static int nfs_flush_multi(struct nfs_pageio_descriptor *desc,
+                          struct nfs_pgio_header *hdr)
 {
-       struct nfs_page *req = nfs_list_entry(desc->pg_list.next);
+       struct nfs_page *req = hdr->req;
        struct page *page = req->wb_page;
        struct nfs_write_data *data;
        size_t wsize = desc->pg_bsize, nbytes;
        unsigned int offset;
        int requests = 0;
-       int ret = 0;
+       struct nfs_commit_info cinfo;
 
-       nfs_list_remove_request(req);
+       nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
 
        if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
-           (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit ||
+           (desc->pg_moreio || nfs_reqs_to_commit(&cinfo) ||
             desc->pg_count > wsize))
                desc->pg_ioflags &= ~FLUSH_COND_STABLE;
 
@@ -1001,28 +1101,22 @@ static int nfs_flush_multi(struct nfs_pageio_descriptor *desc, struct list_head
        do {
                size_t len = min(nbytes, wsize);
 
-               data = nfs_writedata_alloc(1);
-               if (!data)
-                       goto out_bad;
-               data->pagevec[0] = page;
-               nfs_write_rpcsetup(req, data, len, offset, desc->pg_ioflags);
-               list_add(&data->list, res);
+               data = nfs_writedata_alloc(hdr, 1);
+               if (!data) {
+                       nfs_flush_error(desc, hdr);
+                       return -ENOMEM;
+               }
+               data->pages.pagevec[0] = page;
+               nfs_write_rpcsetup(data, len, offset, desc->pg_ioflags, &cinfo);
+               list_add(&data->list, &hdr->rpc_list);
                requests++;
                nbytes -= len;
                offset += len;
        } while (nbytes != 0);
-       atomic_set(&req->wb_complete, requests);
-       desc->pg_rpc_callops = &nfs_write_partial_ops;
-       return ret;
-
-out_bad:
-       while (!list_empty(res)) {
-               data = list_entry(res->next, struct nfs_write_data, list);
-               list_del(&data->list);
-               nfs_writedata_release(data);
-       }
-       nfs_redirty_request(req);
-       return -ENOMEM;
+       nfs_list_remove_request(req);
+       nfs_list_add_request(req, &hdr->pages);
+       desc->pg_rpc_callops = &nfs_write_common_ops;
+       return 0;
 }
 
 /*
@@ -1033,62 +1127,71 @@ out_bad:
  * This is the case if nfs_updatepage detects a conflicting request
  * that has been written but not committed.
  */
-static int nfs_flush_one(struct nfs_pageio_descriptor *desc, struct list_head *res)
+static int nfs_flush_one(struct nfs_pageio_descriptor *desc,
+                        struct nfs_pgio_header *hdr)
 {
        struct nfs_page         *req;
        struct page             **pages;
        struct nfs_write_data   *data;
        struct list_head *head = &desc->pg_list;
-       int ret = 0;
+       struct nfs_commit_info cinfo;
 
-       data = nfs_writedata_alloc(nfs_page_array_len(desc->pg_base,
-                                                     desc->pg_count));
+       data = nfs_writedata_alloc(hdr, nfs_page_array_len(desc->pg_base,
+                                                          desc->pg_count));
        if (!data) {
-               while (!list_empty(head)) {
-                       req = nfs_list_entry(head->next);
-                       nfs_list_remove_request(req);
-                       nfs_redirty_request(req);
-               }
-               ret = -ENOMEM;
-               goto out;
+               nfs_flush_error(desc, hdr);
+               return -ENOMEM;
        }
-       pages = data->pagevec;
+
+       nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
+       pages = data->pages.pagevec;
        while (!list_empty(head)) {
                req = nfs_list_entry(head->next);
                nfs_list_remove_request(req);
-               nfs_list_add_request(req, &data->pages);
+               nfs_list_add_request(req, &hdr->pages);
                *pages++ = req->wb_page;
        }
-       req = nfs_list_entry(data->pages.next);
 
        if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
-           (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit))
+           (desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
                desc->pg_ioflags &= ~FLUSH_COND_STABLE;
 
        /* Set up the argument struct */
-       nfs_write_rpcsetup(req, data, desc->pg_count, 0, desc->pg_ioflags);
-       list_add(&data->list, res);
-       desc->pg_rpc_callops = &nfs_write_full_ops;
-out:
-       return ret;
+       nfs_write_rpcsetup(data, desc->pg_count, 0, desc->pg_ioflags, &cinfo);
+       list_add(&data->list, &hdr->rpc_list);
+       desc->pg_rpc_callops = &nfs_write_common_ops;
+       return 0;
 }
 
-int nfs_generic_flush(struct nfs_pageio_descriptor *desc, struct list_head *head)
+int nfs_generic_flush(struct nfs_pageio_descriptor *desc,
+                     struct nfs_pgio_header *hdr)
 {
        if (desc->pg_bsize < PAGE_CACHE_SIZE)
-               return nfs_flush_multi(desc, head);
-       return nfs_flush_one(desc, head);
+               return nfs_flush_multi(desc, hdr);
+       return nfs_flush_one(desc, hdr);
 }
 
 static int nfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
 {
-       LIST_HEAD(head);
+       struct nfs_write_header *whdr;
+       struct nfs_pgio_header *hdr;
        int ret;
 
-       ret = nfs_generic_flush(desc, &head);
+       whdr = nfs_writehdr_alloc();
+       if (!whdr) {
+               desc->pg_completion_ops->error_cleanup(&desc->pg_list);
+               return -ENOMEM;
+       }
+       hdr = &whdr->header;
+       nfs_pgheader_init(desc, hdr, nfs_writehdr_free);
+       atomic_inc(&hdr->refcnt);
+       ret = nfs_generic_flush(desc, hdr);
        if (ret == 0)
-               ret = nfs_do_multiple_writes(&head, desc->pg_rpc_callops,
-                               desc->pg_ioflags);
+               ret = nfs_do_multiple_writes(&hdr->rpc_list,
+                                            desc->pg_rpc_callops,
+                                            desc->pg_ioflags);
+       if (atomic_dec_and_test(&hdr->refcnt))
+               hdr->completion_ops->completion(hdr);
        return ret;
 }
 
@@ -1098,9 +1201,10 @@ static const struct nfs_pageio_ops nfs_pageio_write_ops = {
 };
 
 void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
-                                 struct inode *inode, int ioflags)
+                              struct inode *inode, int ioflags,
+                              const struct nfs_pgio_completion_ops *compl_ops)
 {
-       nfs_pageio_init(pgio, inode, &nfs_pageio_write_ops,
+       nfs_pageio_init(pgio, inode, &nfs_pageio_write_ops, compl_ops,
                                NFS_SERVER(inode)->wsize, ioflags);
 }
 
@@ -1111,80 +1215,27 @@ void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
 }
 EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
 
-static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
-                                 struct inode *inode, int ioflags)
+void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
+                          struct inode *inode, int ioflags,
+                          const struct nfs_pgio_completion_ops *compl_ops)
 {
-       if (!pnfs_pageio_init_write(pgio, inode, ioflags))
-               nfs_pageio_init_write_mds(pgio, inode, ioflags);
+       if (!pnfs_pageio_init_write(pgio, inode, ioflags, compl_ops))
+               nfs_pageio_init_write_mds(pgio, inode, ioflags, compl_ops);
 }
 
-/*
- * Handle a write reply that flushed part of a page.
- */
-static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
+void nfs_write_prepare(struct rpc_task *task, void *calldata)
 {
-       struct nfs_write_data   *data = calldata;
-
-       dprintk("NFS: %5u write(%s/%lld %d@%lld)",
-               task->tk_pid,
-               data->req->wb_context->dentry->d_inode->i_sb->s_id,
-               (long long)
-                 NFS_FILEID(data->req->wb_context->dentry->d_inode),
-               data->req->wb_bytes, (long long)req_offset(data->req));
-
-       nfs_writeback_done(task, data);
+       struct nfs_write_data *data = calldata;
+       NFS_PROTO(data->header->inode)->write_rpc_prepare(task, data);
 }
 
-static void nfs_writeback_release_partial(void *calldata)
+void nfs_commit_prepare(struct rpc_task *task, void *calldata)
 {
-       struct nfs_write_data   *data = calldata;
-       struct nfs_page         *req = data->req;
-       struct page             *page = req->wb_page;
-       int status = data->task.tk_status;
+       struct nfs_commit_data *data = calldata;
 
-       if (status < 0) {
-               nfs_set_pageerror(page);
-               nfs_context_set_write_error(req->wb_context, status);
-               dprintk(", error = %d\n", status);
-               goto out;
-       }
-
-       if (nfs_write_need_commit(data)) {
-               struct inode *inode = page->mapping->host;
-
-               spin_lock(&inode->i_lock);
-               if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
-                       /* Do nothing we need to resend the writes */
-               } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
-                       memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
-                       dprintk(" defer commit\n");
-               } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
-                       set_bit(PG_NEED_RESCHED, &req->wb_flags);
-                       clear_bit(PG_NEED_COMMIT, &req->wb_flags);
-                       dprintk(" server reboot detected\n");
-               }
-               spin_unlock(&inode->i_lock);
-       } else
-               dprintk(" OK\n");
-
-out:
-       if (atomic_dec_and_test(&req->wb_complete))
-               nfs_writepage_release(req, data);
-       nfs_writedata_release(calldata);
+       NFS_PROTO(data->inode)->commit_rpc_prepare(task, data);
 }
 
-void nfs_write_prepare(struct rpc_task *task, void *calldata)
-{
-       struct nfs_write_data *data = calldata;
-       NFS_PROTO(data->inode)->write_rpc_prepare(task, data);
-}
-
-static const struct rpc_call_ops nfs_write_partial_ops = {
-       .rpc_call_prepare = nfs_write_prepare,
-       .rpc_call_done = nfs_writeback_done_partial,
-       .rpc_release = nfs_writeback_release_partial,
-};
-
 /*
  * Handle a write reply that flushes a whole page.
  *
@@ -1192,59 +1243,37 @@ static const struct rpc_call_ops nfs_write_partial_ops = {
  *       writebacks since the page->count is kept > 1 for as long
  *       as the page has a write request pending.
  */
-static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
+static void nfs_writeback_done_common(struct rpc_task *task, void *calldata)
 {
        struct nfs_write_data   *data = calldata;
 
        nfs_writeback_done(task, data);
 }
 
-static void nfs_writeback_release_full(void *calldata)
+static void nfs_writeback_release_common(void *calldata)
 {
        struct nfs_write_data   *data = calldata;
+       struct nfs_pgio_header *hdr = data->header;
        int status = data->task.tk_status;
+       struct nfs_page *req = hdr->req;
 
-       /* Update attributes as result of writeback. */
-       while (!list_empty(&data->pages)) {
-               struct nfs_page *req = nfs_list_entry(data->pages.next);
-               struct page *page = req->wb_page;
-
-               nfs_list_remove_request(req);
-
-               dprintk("NFS: %5u write (%s/%lld %d@%lld)",
-                       data->task.tk_pid,
-                       req->wb_context->dentry->d_inode->i_sb->s_id,
-                       (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
-                       req->wb_bytes,
-                       (long long)req_offset(req));
-
-               if (status < 0) {
-                       nfs_set_pageerror(page);
-                       nfs_context_set_write_error(req->wb_context, status);
-                       dprintk(", error = %d\n", status);
-                       goto remove_request;
-               }
-
-               if (nfs_write_need_commit(data)) {
+       if ((status >= 0) && nfs_write_need_commit(data)) {
+               spin_lock(&hdr->lock);
+               if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags))
+                       ; /* Do nothing */
+               else if (!test_and_set_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags))
                        memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
-                       nfs_mark_request_commit(req, data->lseg);
-                       dprintk(" marked for commit\n");
-                       goto next;
-               }
-               dprintk(" OK\n");
-remove_request:
-               nfs_inode_remove_request(req);
-       next:
-               nfs_unlock_request(req);
-               nfs_end_page_writeback(page);
+               else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf)))
+                       set_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags);
+               spin_unlock(&hdr->lock);
        }
-       nfs_writedata_release(calldata);
+       nfs_writedata_release(data);
 }
 
-static const struct rpc_call_ops nfs_write_full_ops = {
+static const struct rpc_call_ops nfs_write_common_ops = {
        .rpc_call_prepare = nfs_write_prepare,
-       .rpc_call_done = nfs_writeback_done_full,
-       .rpc_release = nfs_writeback_release_full,
+       .rpc_call_done = nfs_writeback_done_common,
+       .rpc_release = nfs_writeback_release_common,
 };
 
 
@@ -1255,6 +1284,7 @@ void nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
 {
        struct nfs_writeargs    *argp = &data->args;
        struct nfs_writeres     *resp = &data->res;
+       struct inode            *inode = data->header->inode;
        int status;
 
        dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
@@ -1267,10 +1297,10 @@ void nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
         * another writer had changed the file, but some applications
         * depend on tighter cache coherency when writing.
         */
-       status = NFS_PROTO(data->inode)->write_done(task, data);
+       status = NFS_PROTO(inode)->write_done(task, data);
        if (status != 0)
                return;
-       nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
+       nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
 
 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
        if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
@@ -1288,46 +1318,47 @@ void nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
                if (time_before(complain, jiffies)) {
                        dprintk("NFS:       faulty NFS server %s:"
                                " (committed = %d) != (stable = %d)\n",
-                               NFS_SERVER(data->inode)->nfs_client->cl_hostname,
+                               NFS_SERVER(inode)->nfs_client->cl_hostname,
                                resp->verf->committed, argp->stable);
                        complain = jiffies + 300 * HZ;
                }
        }
 #endif
-       /* Is this a short write? */
-       if (task->tk_status >= 0 && resp->count < argp->count) {
+       if (task->tk_status < 0)
+               nfs_set_pgio_error(data->header, task->tk_status, argp->offset);
+       else if (resp->count < argp->count) {
                static unsigned long    complain;
 
-               nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);
+               /* This a short write! */
+               nfs_inc_stats(inode, NFSIOS_SHORTWRITE);
 
                /* Has the server at least made some progress? */
-               if (resp->count != 0) {
-                       /* Was this an NFSv2 write or an NFSv3 stable write? */
-                       if (resp->verf->committed != NFS_UNSTABLE) {
-                               /* Resend from where the server left off */
-                               data->mds_offset += resp->count;
-                               argp->offset += resp->count;
-                               argp->pgbase += resp->count;
-                               argp->count -= resp->count;
-                       } else {
-                               /* Resend as a stable write in order to avoid
-                                * headaches in the case of a server crash.
-                                */
-                               argp->stable = NFS_FILE_SYNC;
+               if (resp->count == 0) {
+                       if (time_before(complain, jiffies)) {
+                               printk(KERN_WARNING
+                                      "NFS: Server wrote zero bytes, expected %u.\n",
+                                      argp->count);
+                               complain = jiffies + 300 * HZ;
                        }
-                       rpc_restart_call_prepare(task);
+                       nfs_set_pgio_error(data->header, -EIO, argp->offset);
+                       task->tk_status = -EIO;
                        return;
                }
-               if (time_before(complain, jiffies)) {
-                       printk(KERN_WARNING
-                              "NFS: Server wrote zero bytes, expected %u.\n",
-                                       argp->count);
-                       complain = jiffies + 300 * HZ;
+               /* Was this an NFSv2 write or an NFSv3 stable write? */
+               if (resp->verf->committed != NFS_UNSTABLE) {
+                       /* Resend from where the server left off */
+                       data->mds_offset += resp->count;
+                       argp->offset += resp->count;
+                       argp->pgbase += resp->count;
+                       argp->count -= resp->count;
+               } else {
+                       /* Resend as a stable write in order to avoid
+                        * headaches in the case of a server crash.
+                        */
+                       argp->stable = NFS_FILE_SYNC;
                }
-               /* Can't do anything about it except throw an error. */
-               task->tk_status = -EIO;
+               rpc_restart_call_prepare(task);
        }
-       return;
 }
 
 
@@ -1347,26 +1378,23 @@ static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait)
        return (ret < 0) ? ret : 1;
 }
 
-void nfs_commit_clear_lock(struct nfs_inode *nfsi)
+static void nfs_commit_clear_lock(struct nfs_inode *nfsi)
 {
        clear_bit(NFS_INO_COMMIT, &nfsi->flags);
        smp_mb__after_clear_bit();
        wake_up_bit(&nfsi->flags, NFS_INO_COMMIT);
 }
-EXPORT_SYMBOL_GPL(nfs_commit_clear_lock);
 
-void nfs_commitdata_release(void *data)
+void nfs_commitdata_release(struct nfs_commit_data *data)
 {
-       struct nfs_write_data *wdata = data;
-
-       put_nfs_open_context(wdata->args.context);
-       nfs_commit_free(wdata);
+       put_nfs_open_context(data->context);
+       nfs_commit_free(data);
 }
 EXPORT_SYMBOL_GPL(nfs_commitdata_release);
 
-int nfs_initiate_commit(struct nfs_write_data *data, struct rpc_clnt *clnt,
+int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
                        const struct rpc_call_ops *call_ops,
-                       int how)
+                       int how, int flags)
 {
        struct rpc_task *task;
        int priority = flush_task_priority(how);
@@ -1382,7 +1410,7 @@ int nfs_initiate_commit(struct nfs_write_data *data, struct rpc_clnt *clnt,
                .callback_ops = call_ops,
                .callback_data = data,
                .workqueue = nfsiod_workqueue,
-               .flags = RPC_TASK_ASYNC,
+               .flags = RPC_TASK_ASYNC | flags,
                .priority = priority,
        };
        /* Set up the initial task struct.  */
@@ -1403,9 +1431,10 @@ EXPORT_SYMBOL_GPL(nfs_initiate_commit);
 /*
  * Set up the argument/result storage required for the RPC call.
  */
-void nfs_init_commit(struct nfs_write_data *data,
-                           struct list_head *head,
-                           struct pnfs_layout_segment *lseg)
+void nfs_init_commit(struct nfs_commit_data *data,
+                    struct list_head *head,
+                    struct pnfs_layout_segment *lseg,
+                    struct nfs_commit_info *cinfo)
 {
        struct nfs_page *first = nfs_list_entry(head->next);
        struct inode *inode = first->wb_context->dentry->d_inode;
@@ -1419,13 +1448,14 @@ void nfs_init_commit(struct nfs_write_data *data,
        data->cred        = first->wb_context->cred;
        data->lseg        = lseg; /* reference transferred */
        data->mds_ops     = &nfs_commit_ops;
+       data->completion_ops = cinfo->completion_ops;
+       data->dreq        = cinfo->dreq;
 
        data->args.fh     = NFS_FH(data->inode);
        /* Note: we always request a commit of the entire inode */
        data->args.offset = 0;
        data->args.count  = 0;
-       data->args.context = get_nfs_open_context(first->wb_context);
-       data->res.count   = 0;
+       data->context     = get_nfs_open_context(first->wb_context);
        data->res.fattr   = &data->fattr;
        data->res.verf    = &data->verf;
        nfs_fattr_init(&data->fattr);
@@ -1433,18 +1463,21 @@ void nfs_init_commit(struct nfs_write_data *data,
 EXPORT_SYMBOL_GPL(nfs_init_commit);
 
 void nfs_retry_commit(struct list_head *page_list,
-                     struct pnfs_layout_segment *lseg)
+                     struct pnfs_layout_segment *lseg,
+                     struct nfs_commit_info *cinfo)
 {
        struct nfs_page *req;
 
        while (!list_empty(page_list)) {
                req = nfs_list_entry(page_list->next);
                nfs_list_remove_request(req);
-               nfs_mark_request_commit(req, lseg);
-               dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
-               dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
-                            BDI_RECLAIMABLE);
-               nfs_unlock_request(req);
+               nfs_mark_request_commit(req, lseg, cinfo);
+               if (!cinfo->dreq) {
+                       dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
+                       dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
+                                    BDI_RECLAIMABLE);
+               }
+               nfs_unlock_and_release_request(req);
        }
 }
 EXPORT_SYMBOL_GPL(nfs_retry_commit);
@@ -1453,9 +1486,10 @@ EXPORT_SYMBOL_GPL(nfs_retry_commit);
  * Commit dirty pages
  */
 static int
-nfs_commit_list(struct inode *inode, struct list_head *head, int how)
+nfs_commit_list(struct inode *inode, struct list_head *head, int how,
+               struct nfs_commit_info *cinfo)
 {
-       struct nfs_write_data   *data;
+       struct nfs_commit_data  *data;
 
        data = nfs_commitdata_alloc();
 
@@ -1463,11 +1497,13 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how)
                goto out_bad;
 
        /* Set up the argument struct */
-       nfs_init_commit(data, head, NULL);
-       return nfs_initiate_commit(data, NFS_CLIENT(inode), data->mds_ops, how);
+       nfs_init_commit(data, head, NULL, cinfo);
+       atomic_inc(&cinfo->mds->rpcs_out);
+       return nfs_initiate_commit(NFS_CLIENT(inode), data, data->mds_ops,
+                                  how, 0);
  out_bad:
-       nfs_retry_commit(head, NULL);
-       nfs_commit_clear_lock(NFS_I(inode));
+       nfs_retry_commit(head, NULL, cinfo);
+       cinfo->completion_ops->error_cleanup(NFS_I(inode));
        return -ENOMEM;
 }
 
@@ -1476,7 +1512,7 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how)
  */
 static void nfs_commit_done(struct rpc_task *task, void *calldata)
 {
-       struct nfs_write_data   *data = calldata;
+       struct nfs_commit_data  *data = calldata;
 
         dprintk("NFS: %5u nfs_commit_done (status %d)\n",
                                 task->tk_pid, task->tk_status);
@@ -1485,10 +1521,11 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata)
        NFS_PROTO(data->inode)->commit_done(task, data);
 }
 
-void nfs_commit_release_pages(struct nfs_write_data *data)
+static void nfs_commit_release_pages(struct nfs_commit_data *data)
 {
        struct nfs_page *req;
        int status = data->task.tk_status;
+       struct nfs_commit_info cinfo;
 
        while (!list_empty(&data->pages)) {
                req = nfs_list_entry(data->pages.next);
@@ -1519,42 +1556,59 @@ void nfs_commit_release_pages(struct nfs_write_data *data)
                dprintk(" mismatch\n");
                nfs_mark_request_dirty(req);
        next:
-               nfs_unlock_request(req);
+               nfs_unlock_and_release_request(req);
        }
+       nfs_init_cinfo(&cinfo, data->inode, data->dreq);
+       if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
+               nfs_commit_clear_lock(NFS_I(data->inode));
 }
-EXPORT_SYMBOL_GPL(nfs_commit_release_pages);
 
 static void nfs_commit_release(void *calldata)
 {
-       struct nfs_write_data *data = calldata;
+       struct nfs_commit_data *data = calldata;
 
-       nfs_commit_release_pages(data);
-       nfs_commit_clear_lock(NFS_I(data->inode));
+       data->completion_ops->completion(data);
        nfs_commitdata_release(calldata);
 }
 
 static const struct rpc_call_ops nfs_commit_ops = {
-       .rpc_call_prepare = nfs_write_prepare,
+       .rpc_call_prepare = nfs_commit_prepare,
        .rpc_call_done = nfs_commit_done,
        .rpc_release = nfs_commit_release,
 };
 
+static const struct nfs_commit_completion_ops nfs_commit_completion_ops = {
+       .completion = nfs_commit_release_pages,
+       .error_cleanup = nfs_commit_clear_lock,
+};
+
+int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
+                           int how, struct nfs_commit_info *cinfo)
+{
+       int status;
+
+       status = pnfs_commit_list(inode, head, how, cinfo);
+       if (status == PNFS_NOT_ATTEMPTED)
+               status = nfs_commit_list(inode, head, how, cinfo);
+       return status;
+}
+
 int nfs_commit_inode(struct inode *inode, int how)
 {
        LIST_HEAD(head);
+       struct nfs_commit_info cinfo;
        int may_wait = how & FLUSH_SYNC;
        int res;
 
        res = nfs_commit_set_lock(NFS_I(inode), may_wait);
        if (res <= 0)
                goto out_mark_dirty;
-       res = nfs_scan_commit(inode, &head);
+       nfs_init_cinfo_from_inode(&cinfo, inode);
+       res = nfs_scan_commit(inode, &head, &cinfo);
        if (res) {
                int error;
 
-               error = pnfs_commit_list(inode, &head, how);
-               if (error == PNFS_NOT_ATTEMPTED)
-                       error = nfs_commit_list(inode, &head, how);
+               error = nfs_generic_commit_list(inode, &head, how, &cinfo);
                if (error < 0)
                        return error;
                if (!may_wait)
@@ -1585,14 +1639,14 @@ static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_contr
        int ret = 0;
 
        /* no commits means nothing needs to be done */
-       if (!nfsi->ncommit)
+       if (!nfsi->commit_info.ncommit)
                return ret;
 
        if (wbc->sync_mode == WB_SYNC_NONE) {
                /* Don't commit yet if this is a non-blocking flush and there
                 * are a lot of outstanding writes for this mapping.
                 */
-               if (nfsi->ncommit <= (nfsi->npages >> 1))
+               if (nfsi->commit_info.ncommit <= (nfsi->npages >> 1))
                        goto out_mark_dirty;
 
                /* don't wait for the COMMIT response */
@@ -1665,7 +1719,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
                req = nfs_page_find_request(page);
                if (req == NULL)
                        break;
-               if (nfs_lock_request_dontget(req)) {
+               if (nfs_lock_request(req)) {
                        nfs_clear_request_commit(req);
                        nfs_inode_remove_request(req);
                        /*
@@ -1673,7 +1727,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
                         * page as being dirty
                         */
                        cancel_dirty_page(page, PAGE_CACHE_SIZE);
-                       nfs_unlock_request(req);
+                       nfs_unlock_and_release_request(req);
                        break;
                }
                ret = nfs_wait_on_request(req);
@@ -1742,7 +1796,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
 int __init nfs_init_writepagecache(void)
 {
        nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
-                                            sizeof(struct nfs_write_data),
+                                            sizeof(struct nfs_write_header),
                                             0, SLAB_HWCACHE_ALIGN,
                                             NULL);
        if (nfs_wdata_cachep == NULL)
@@ -1753,6 +1807,13 @@ int __init nfs_init_writepagecache(void)
        if (nfs_wdata_mempool == NULL)
                return -ENOMEM;
 
+       nfs_cdata_cachep = kmem_cache_create("nfs_commit_data",
+                                            sizeof(struct nfs_commit_data),
+                                            0, SLAB_HWCACHE_ALIGN,
+                                            NULL);
+       if (nfs_cdata_cachep == NULL)
+               return -ENOMEM;
+
        nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
                                                      nfs_wdata_cachep);
        if (nfs_commit_mempool == NULL)
index 204438cc914ea522b83907aaf618bb0dbcfa4068..34a10d78b839f4c73b3d851e19820bc712129f36 100644 (file)
@@ -11,7 +11,7 @@ int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp)
        struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors;
 
        for (f = exp->ex_flavors; f < end; f++) {
-               if (f->pseudoflavor == rqstp->rq_flavor)
+               if (f->pseudoflavor == rqstp->rq_cred.cr_flavor)
                        return f->flags;
        }
        return exp->ex_flags;
index 8e9689abbc0c7594fb6aa6cb7fb7735018165162..ba233499b9a5fc1b374bc7d79ad8f636f01135b0 100644 (file)
 #include <linux/namei.h>
 #include <linux/module.h>
 #include <linux/exportfs.h>
+#include <linux/sunrpc/svc_xprt.h>
 
 #include <net/ipv6.h>
 
 #include "nfsd.h"
 #include "nfsfh.h"
+#include "netns.h"
 
 #define NFSDDBG_FACILITY       NFSDDBG_EXPORT
 
@@ -38,7 +40,6 @@ typedef struct svc_export     svc_export;
 #define        EXPKEY_HASHBITS         8
 #define        EXPKEY_HASHMAX          (1 << EXPKEY_HASHBITS)
 #define        EXPKEY_HASHMASK         (EXPKEY_HASHMAX -1)
-static struct cache_head *expkey_table[EXPKEY_HASHMAX];
 
 static void expkey_put(struct kref *ref)
 {
@@ -71,9 +72,9 @@ static int expkey_upcall(struct cache_detail *cd, struct cache_head *h)
        return sunrpc_cache_pipe_upcall(cd, h, expkey_request);
 }
 
-static struct svc_expkey *svc_expkey_update(struct svc_expkey *new, struct svc_expkey *old);
-static struct svc_expkey *svc_expkey_lookup(struct svc_expkey *);
-static struct cache_detail svc_expkey_cache;
+static struct svc_expkey *svc_expkey_update(struct cache_detail *cd, struct svc_expkey *new,
+                                           struct svc_expkey *old);
+static struct svc_expkey *svc_expkey_lookup(struct cache_detail *cd, struct svc_expkey *);
 
 static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
 {
@@ -131,7 +132,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
        key.ek_fsidtype = fsidtype;
        memcpy(key.ek_fsid, buf, len);
 
-       ek = svc_expkey_lookup(&key);
+       ek = svc_expkey_lookup(cd, &key);
        err = -ENOMEM;
        if (!ek)
                goto out;
@@ -145,7 +146,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
        err = 0;
        if (len == 0) {
                set_bit(CACHE_NEGATIVE, &key.h.flags);
-               ek = svc_expkey_update(&key, ek);
+               ek = svc_expkey_update(cd, &key, ek);
                if (!ek)
                        err = -ENOMEM;
        } else {
@@ -155,7 +156,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
 
                dprintk("Found the path %s\n", buf);
 
-               ek = svc_expkey_update(&key, ek);
+               ek = svc_expkey_update(cd, &key, ek);
                if (!ek)
                        err = -ENOMEM;
                path_put(&key.ek_path);
@@ -163,7 +164,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
        cache_flush();
  out:
        if (ek)
-               cache_put(&ek->h, &svc_expkey_cache);
+               cache_put(&ek->h, cd);
        if (dom)
                auth_domain_put(dom);
        kfree(buf);
@@ -239,10 +240,9 @@ static struct cache_head *expkey_alloc(void)
                return NULL;
 }
 
-static struct cache_detail svc_expkey_cache = {
+static struct cache_detail svc_expkey_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = EXPKEY_HASHMAX,
-       .hash_table     = expkey_table,
        .name           = "nfsd.fh",
        .cache_put      = expkey_put,
        .cache_upcall   = expkey_upcall,
@@ -268,13 +268,12 @@ svc_expkey_hash(struct svc_expkey *item)
 }
 
 static struct svc_expkey *
-svc_expkey_lookup(struct svc_expkey *item)
+svc_expkey_lookup(struct cache_detail *cd, struct svc_expkey *item)
 {
        struct cache_head *ch;
        int hash = svc_expkey_hash(item);
 
-       ch = sunrpc_cache_lookup(&svc_expkey_cache, &item->h,
-                                hash);
+       ch = sunrpc_cache_lookup(cd, &item->h, hash);
        if (ch)
                return container_of(ch, struct svc_expkey, h);
        else
@@ -282,13 +281,13 @@ svc_expkey_lookup(struct svc_expkey *item)
 }
 
 static struct svc_expkey *
-svc_expkey_update(struct svc_expkey *new, struct svc_expkey *old)
+svc_expkey_update(struct cache_detail *cd, struct svc_expkey *new,
+                 struct svc_expkey *old)
 {
        struct cache_head *ch;
        int hash = svc_expkey_hash(new);
 
-       ch = sunrpc_cache_update(&svc_expkey_cache, &new->h,
-                                &old->h, hash);
+       ch = sunrpc_cache_update(cd, &new->h, &old->h, hash);
        if (ch)
                return container_of(ch, struct svc_expkey, h);
        else
@@ -299,8 +298,6 @@ svc_expkey_update(struct svc_expkey *new, struct svc_expkey *old)
 #define        EXPORT_HASHBITS         8
 #define        EXPORT_HASHMAX          (1<< EXPORT_HASHBITS)
 
-static struct cache_head *export_table[EXPORT_HASHMAX];
-
 static void nfsd4_fslocs_free(struct nfsd4_fs_locations *fsloc)
 {
        int i;
@@ -525,6 +522,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
                goto out1;
 
        exp.ex_client = dom;
+       exp.cd = cd;
 
        /* expiry */
        err = -EINVAL;
@@ -672,6 +670,7 @@ static void svc_export_init(struct cache_head *cnew, struct cache_head *citem)
        new->ex_fslocs.locations = NULL;
        new->ex_fslocs.locations_count = 0;
        new->ex_fslocs.migrated = 0;
+       new->cd = item->cd;
 }
 
 static void export_update(struct cache_head *cnew, struct cache_head *citem)
@@ -707,10 +706,9 @@ static struct cache_head *svc_export_alloc(void)
                return NULL;
 }
 
-struct cache_detail svc_export_cache = {
+static struct cache_detail svc_export_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = EXPORT_HASHMAX,
-       .hash_table     = export_table,
        .name           = "nfsd.export",
        .cache_put      = svc_export_put,
        .cache_upcall   = svc_export_upcall,
@@ -739,8 +737,7 @@ svc_export_lookup(struct svc_export *exp)
        struct cache_head *ch;
        int hash = svc_export_hash(exp);
 
-       ch = sunrpc_cache_lookup(&svc_export_cache, &exp->h,
-                                hash);
+       ch = sunrpc_cache_lookup(exp->cd, &exp->h, hash);
        if (ch)
                return container_of(ch, struct svc_export, h);
        else
@@ -753,9 +750,7 @@ svc_export_update(struct svc_export *new, struct svc_export *old)
        struct cache_head *ch;
        int hash = svc_export_hash(old);
 
-       ch = sunrpc_cache_update(&svc_export_cache, &new->h,
-                                &old->h,
-                                hash);
+       ch = sunrpc_cache_update(old->cd, &new->h, &old->h, hash);
        if (ch)
                return container_of(ch, struct svc_export, h);
        else
@@ -764,7 +759,8 @@ svc_export_update(struct svc_export *new, struct svc_export *old)
 
 
 static struct svc_expkey *
-exp_find_key(svc_client *clp, int fsid_type, u32 *fsidv, struct cache_req *reqp)
+exp_find_key(struct cache_detail *cd, svc_client *clp, int fsid_type,
+            u32 *fsidv, struct cache_req *reqp)
 {
        struct svc_expkey key, *ek;
        int err;
@@ -776,18 +772,18 @@ exp_find_key(svc_client *clp, int fsid_type, u32 *fsidv, struct cache_req *reqp)
        key.ek_fsidtype = fsid_type;
        memcpy(key.ek_fsid, fsidv, key_len(fsid_type));
 
-       ek = svc_expkey_lookup(&key);
+       ek = svc_expkey_lookup(cd, &key);
        if (ek == NULL)
                return ERR_PTR(-ENOMEM);
-       err = cache_check(&svc_expkey_cache, &ek->h, reqp);
+       err = cache_check(cd, &ek->h, reqp);
        if (err)
                return ERR_PTR(err);
        return ek;
 }
 
 
-static svc_export *exp_get_by_name(svc_client *clp, const struct path *path,
-                                    struct cache_req *reqp)
+static svc_export *exp_get_by_name(struct cache_detail *cd, svc_client *clp,
+                                  const struct path *path, struct cache_req *reqp)
 {
        struct svc_export *exp, key;
        int err;
@@ -797,11 +793,12 @@ static svc_export *exp_get_by_name(svc_client *clp, const struct path *path,
 
        key.ex_client = clp;
        key.ex_path = *path;
+       key.cd = cd;
 
        exp = svc_export_lookup(&key);
        if (exp == NULL)
                return ERR_PTR(-ENOMEM);
-       err = cache_check(&svc_export_cache, &exp->h, reqp);
+       err = cache_check(cd, &exp->h, reqp);
        if (err)
                return ERR_PTR(err);
        return exp;
@@ -810,16 +807,17 @@ static svc_export *exp_get_by_name(svc_client *clp, const struct path *path,
 /*
  * Find the export entry for a given dentry.
  */
-static struct svc_export *exp_parent(svc_client *clp, struct path *path)
+static struct svc_export *exp_parent(struct cache_detail *cd, svc_client *clp,
+                                    struct path *path)
 {
        struct dentry *saved = dget(path->dentry);
-       svc_export *exp = exp_get_by_name(clp, path, NULL);
+       svc_export *exp = exp_get_by_name(cd, clp, path, NULL);
 
        while (PTR_ERR(exp) == -ENOENT && !IS_ROOT(path->dentry)) {
                struct dentry *parent = dget_parent(path->dentry);
                dput(path->dentry);
                path->dentry = parent;
-               exp = exp_get_by_name(clp, path, NULL);
+               exp = exp_get_by_name(cd, clp, path, NULL);
        }
        dput(path->dentry);
        path->dentry = saved;
@@ -834,13 +832,16 @@ static struct svc_export *exp_parent(svc_client *clp, struct path *path)
  * since its harder to fool a kernel module than a user space program.
  */
 int
-exp_rootfh(svc_client *clp, char *name, struct knfsd_fh *f, int maxsize)
+exp_rootfh(struct net *net, svc_client *clp, char *name,
+          struct knfsd_fh *f, int maxsize)
 {
        struct svc_export       *exp;
        struct path             path;
        struct inode            *inode;
        struct svc_fh           fh;
        int                     err;
+       struct nfsd_net         *nn = net_generic(net, nfsd_net_id);
+       struct cache_detail     *cd = nn->svc_export_cache;
 
        err = -EPERM;
        /* NB: we probably ought to check that it's NUL-terminated */
@@ -853,7 +854,7 @@ exp_rootfh(svc_client *clp, char *name, struct knfsd_fh *f, int maxsize)
        dprintk("nfsd: exp_rootfh(%s [%p] %s:%s/%ld)\n",
                 name, path.dentry, clp->name,
                 inode->i_sb->s_id, inode->i_ino);
-       exp = exp_parent(clp, &path);
+       exp = exp_parent(cd, clp, &path);
        if (IS_ERR(exp)) {
                err = PTR_ERR(exp);
                goto out;
@@ -875,16 +876,18 @@ out:
        return err;
 }
 
-static struct svc_export *exp_find(struct auth_domain *clp, int fsid_type,
+static struct svc_export *exp_find(struct cache_detail *cd,
+                                  struct auth_domain *clp, int fsid_type,
                                   u32 *fsidv, struct cache_req *reqp)
 {
        struct svc_export *exp;
-       struct svc_expkey *ek = exp_find_key(clp, fsid_type, fsidv, reqp);
+       struct nfsd_net *nn = net_generic(cd->net, nfsd_net_id);
+       struct svc_expkey *ek = exp_find_key(nn->svc_expkey_cache, clp, fsid_type, fsidv, reqp);
        if (IS_ERR(ek))
                return ERR_CAST(ek);
 
-       exp = exp_get_by_name(clp, &ek->ek_path, reqp);
-       cache_put(&ek->h, &svc_expkey_cache);
+       exp = exp_get_by_name(cd, clp, &ek->ek_path, reqp);
+       cache_put(&ek->h, nn->svc_expkey_cache);
 
        if (IS_ERR(exp))
                return ERR_CAST(exp);
@@ -901,13 +904,13 @@ __be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp)
                return 0;
        /* ip-address based client; check sec= export option: */
        for (f = exp->ex_flavors; f < end; f++) {
-               if (f->pseudoflavor == rqstp->rq_flavor)
+               if (f->pseudoflavor == rqstp->rq_cred.cr_flavor)
                        return 0;
        }
        /* defaults in absence of sec= options: */
        if (exp->ex_nflavors == 0) {
-               if (rqstp->rq_flavor == RPC_AUTH_NULL ||
-                   rqstp->rq_flavor == RPC_AUTH_UNIX)
+               if (rqstp->rq_cred.cr_flavor == RPC_AUTH_NULL ||
+                   rqstp->rq_cred.cr_flavor == RPC_AUTH_UNIX)
                        return 0;
        }
        return nfserr_wrongsec;
@@ -926,12 +929,14 @@ struct svc_export *
 rqst_exp_get_by_name(struct svc_rqst *rqstp, struct path *path)
 {
        struct svc_export *gssexp, *exp = ERR_PTR(-ENOENT);
+       struct nfsd_net *nn = net_generic(rqstp->rq_xprt->xpt_net, nfsd_net_id);
+       struct cache_detail *cd = nn->svc_export_cache;
 
        if (rqstp->rq_client == NULL)
                goto gss;
 
        /* First try the auth_unix client: */
-       exp = exp_get_by_name(rqstp->rq_client, path, &rqstp->rq_chandle);
+       exp = exp_get_by_name(cd, rqstp->rq_client, path, &rqstp->rq_chandle);
        if (PTR_ERR(exp) == -ENOENT)
                goto gss;
        if (IS_ERR(exp))
@@ -943,7 +948,7 @@ gss:
        /* Otherwise, try falling back on gss client */
        if (rqstp->rq_gssclient == NULL)
                return exp;
-       gssexp = exp_get_by_name(rqstp->rq_gssclient, path, &rqstp->rq_chandle);
+       gssexp = exp_get_by_name(cd, rqstp->rq_gssclient, path, &rqstp->rq_chandle);
        if (PTR_ERR(gssexp) == -ENOENT)
                return exp;
        if (!IS_ERR(exp))
@@ -955,12 +960,15 @@ struct svc_export *
 rqst_exp_find(struct svc_rqst *rqstp, int fsid_type, u32 *fsidv)
 {
        struct svc_export *gssexp, *exp = ERR_PTR(-ENOENT);
+       struct nfsd_net *nn = net_generic(rqstp->rq_xprt->xpt_net, nfsd_net_id);
+       struct cache_detail *cd = nn->svc_export_cache;
 
        if (rqstp->rq_client == NULL)
                goto gss;
 
        /* First try the auth_unix client: */
-       exp = exp_find(rqstp->rq_client, fsid_type, fsidv, &rqstp->rq_chandle);
+       exp = exp_find(cd, rqstp->rq_client, fsid_type,
+                      fsidv, &rqstp->rq_chandle);
        if (PTR_ERR(exp) == -ENOENT)
                goto gss;
        if (IS_ERR(exp))
@@ -972,7 +980,7 @@ gss:
        /* Otherwise, try falling back on gss client */
        if (rqstp->rq_gssclient == NULL)
                return exp;
-       gssexp = exp_find(rqstp->rq_gssclient, fsid_type, fsidv,
+       gssexp = exp_find(cd, rqstp->rq_gssclient, fsid_type, fsidv,
                                                &rqstp->rq_chandle);
        if (PTR_ERR(gssexp) == -ENOENT)
                return exp;
@@ -1029,13 +1037,15 @@ exp_pseudoroot(struct svc_rqst *rqstp, struct svc_fh *fhp)
 /* Iterator */
 
 static void *e_start(struct seq_file *m, loff_t *pos)
-       __acquires(svc_export_cache.hash_lock)
+       __acquires(((struct cache_detail *)m->private)->hash_lock)
 {
        loff_t n = *pos;
        unsigned hash, export;
        struct cache_head *ch;
-       
-       read_lock(&svc_export_cache.hash_lock);
+       struct cache_detail *cd = m->private;
+       struct cache_head **export_table = cd->hash_table;
+
+       read_lock(&cd->hash_lock);
        if (!n--)
                return SEQ_START_TOKEN;
        hash = n >> 32;
@@ -1060,6 +1070,8 @@ static void *e_next(struct seq_file *m, void *p, loff_t *pos)
 {
        struct cache_head *ch = p;
        int hash = (*pos >> 32);
+       struct cache_detail *cd = m->private;
+       struct cache_head **export_table = cd->hash_table;
 
        if (p == SEQ_START_TOKEN)
                hash = 0;
@@ -1082,9 +1094,11 @@ static void *e_next(struct seq_file *m, void *p, loff_t *pos)
 }
 
 static void e_stop(struct seq_file *m, void *p)
-       __releases(svc_export_cache.hash_lock)
+       __releases(((struct cache_detail *)m->private)->hash_lock)
 {
-       read_unlock(&svc_export_cache.hash_lock);
+       struct cache_detail *cd = m->private;
+
+       read_unlock(&cd->hash_lock);
 }
 
 static struct flags {
@@ -1195,6 +1209,7 @@ static int e_show(struct seq_file *m, void *p)
 {
        struct cache_head *cp = p;
        struct svc_export *exp = container_of(cp, struct svc_export, h);
+       struct cache_detail *cd = m->private;
 
        if (p == SEQ_START_TOKEN) {
                seq_puts(m, "# Version 1.1\n");
@@ -1203,10 +1218,10 @@ static int e_show(struct seq_file *m, void *p)
        }
 
        cache_get(&exp->h);
-       if (cache_check(&svc_export_cache, &exp->h, NULL))
+       if (cache_check(cd, &exp->h, NULL))
                return 0;
-       cache_put(&exp->h, &svc_export_cache);
-       return svc_export_show(m, &svc_export_cache, cp);
+       exp_put(exp);
+       return svc_export_show(m, cd, cp);
 }
 
 const struct seq_operations nfs_exports_op = {
@@ -1216,48 +1231,70 @@ const struct seq_operations nfs_exports_op = {
        .show   = e_show,
 };
 
-
 /*
  * Initialize the exports module.
  */
 int
-nfsd_export_init(void)
+nfsd_export_init(struct net *net)
 {
        int rv;
-       dprintk("nfsd: initializing export module.\n");
+       struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+       dprintk("nfsd: initializing export module (net: %p).\n", net);
 
-       rv = cache_register_net(&svc_export_cache, &init_net);
+       nn->svc_export_cache = cache_create_net(&svc_export_cache_template, net);
+       if (IS_ERR(nn->svc_export_cache))
+               return PTR_ERR(nn->svc_export_cache);
+       rv = cache_register_net(nn->svc_export_cache, net);
        if (rv)
-               return rv;
-       rv = cache_register_net(&svc_expkey_cache, &init_net);
+               goto destroy_export_cache;
+
+       nn->svc_expkey_cache = cache_create_net(&svc_expkey_cache_template, net);
+       if (IS_ERR(nn->svc_expkey_cache)) {
+               rv = PTR_ERR(nn->svc_expkey_cache);
+               goto unregister_export_cache;
+       }
+       rv = cache_register_net(nn->svc_expkey_cache, net);
        if (rv)
-               cache_unregister_net(&svc_export_cache, &init_net);
-       return rv;
+               goto destroy_expkey_cache;
+       return 0;
 
+destroy_expkey_cache:
+       cache_destroy_net(nn->svc_expkey_cache, net);
+unregister_export_cache:
+       cache_unregister_net(nn->svc_export_cache, net);
+destroy_export_cache:
+       cache_destroy_net(nn->svc_export_cache, net);
+       return rv;
 }
 
 /*
  * Flush exports table - called when last nfsd thread is killed
  */
 void
-nfsd_export_flush(void)
+nfsd_export_flush(struct net *net)
 {
-       cache_purge(&svc_expkey_cache);
-       cache_purge(&svc_export_cache);
+       struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+       cache_purge(nn->svc_expkey_cache);
+       cache_purge(nn->svc_export_cache);
 }
 
 /*
  * Shutdown the exports module.
  */
 void
-nfsd_export_shutdown(void)
+nfsd_export_shutdown(struct net *net)
 {
+       struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
-       dprintk("nfsd: shutting down export module.\n");
+       dprintk("nfsd: shutting down export module (net: %p).\n", net);
 
-       cache_unregister_net(&svc_expkey_cache, &init_net);
-       cache_unregister_net(&svc_export_cache, &init_net);
-       svcauth_unix_purge();
+       cache_unregister_net(nn->svc_expkey_cache, net);
+       cache_unregister_net(nn->svc_export_cache, net);
+       cache_destroy_net(nn->svc_expkey_cache, net);
+       cache_destroy_net(nn->svc_export_cache, net);
+       svcauth_unix_purge(net);
 
-       dprintk("nfsd: export shutdown complete.\n");
+       dprintk("nfsd: export shutdown complete (net: %p).\n", net);
 }
index 9559ce468732e7c00ae40cd4fc3a379a525ecef7..e6c38159622fe6bc337f3d24ada7db683838ceac 100644 (file)
@@ -58,6 +58,7 @@ static int nfsd_inject_set(void *op_ptr, u64 val)
 
 static int nfsd_inject_get(void *data, u64 *val)
 {
+       *val = 0;
        return 0;
 }
 
index 2f3be1321534375b65cac5705806edda29c6088d..9d513efc01baad65a0807284082315c3d5a68206 100644 (file)
 #define IDMAP_NAMESZ 128
 
 #ifdef CONFIG_NFSD_V4
-int nfsd_idmap_init(void);
-void nfsd_idmap_shutdown(void);
+int nfsd_idmap_init(struct net *);
+void nfsd_idmap_shutdown(struct net *);
 #else
-static inline int nfsd_idmap_init(void)
+static inline int nfsd_idmap_init(struct net *net)
 {
        return 0;
 }
-static inline void nfsd_idmap_shutdown(void)
+static inline void nfsd_idmap_shutdown(struct net *net)
 {
 }
 #endif
index 12e0cff435b43c06689cab12763a5890942cd37d..39365636b244fbfc7aaac3a794f83f87d7ea69a6 100644 (file)
@@ -28,6 +28,12 @@ struct cld_net;
 
 struct nfsd_net {
        struct cld_net *cld_net;
+
+       struct cache_detail *svc_expkey_cache;
+       struct cache_detail *svc_export_cache;
+
+       struct cache_detail *idtoname_cache;
+       struct cache_detail *nametoid_cache;
 };
 
 extern int nfsd_net_id;
index c8e9f637153ab3e44ba293f7097e7b32e77d4e54..a5fd6b982f277ce648bbd528947964ea2ef63c73 100644 (file)
@@ -650,9 +650,10 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
        struct rpc_clnt *client;
 
        if (clp->cl_minorversion == 0) {
-               if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5))
+               if (!clp->cl_cred.cr_principal &&
+                               (clp->cl_flavor >= RPC_AUTH_GSS_KRB5))
                        return -EINVAL;
-               args.client_name = clp->cl_principal;
+               args.client_name = clp->cl_cred.cr_principal;
                args.prognumber = conn->cb_prog,
                args.protocol = XPRT_TRANSPORT_TCP;
                args.authflavor = clp->cl_flavor;
index 322d11ce06a452858ed0e547cf1e70e7883c08ee..dae36f1dee95e68defce943bedf01efc46d61a54 100644 (file)
 #include <linux/seq_file.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include <linux/sunrpc/svc_xprt.h>
 #include <net/net_namespace.h>
 #include "idmap.h"
 #include "nfsd.h"
+#include "netns.h"
 
 /*
  * Turn off idmapping when using AUTH_SYS.
@@ -107,8 +109,6 @@ ent_alloc(void)
  * ID -> Name cache
  */
 
-static struct cache_head *idtoname_table[ENT_HASHMAX];
-
 static uint32_t
 idtoname_hash(struct ent *ent)
 {
@@ -183,13 +183,13 @@ warn_no_idmapd(struct cache_detail *detail, int has_died)
 
 
 static int         idtoname_parse(struct cache_detail *, char *, int);
-static struct ent *idtoname_lookup(struct ent *);
-static struct ent *idtoname_update(struct ent *, struct ent *);
+static struct ent *idtoname_lookup(struct cache_detail *, struct ent *);
+static struct ent *idtoname_update(struct cache_detail *, struct ent *,
+                                  struct ent *);
 
-static struct cache_detail idtoname_cache = {
+static struct cache_detail idtoname_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = ENT_HASHMAX,
-       .hash_table     = idtoname_table,
        .name           = "nfs4.idtoname",
        .cache_put      = ent_put,
        .cache_upcall   = idtoname_upcall,
@@ -244,7 +244,7 @@ idtoname_parse(struct cache_detail *cd, char *buf, int buflen)
                goto out;
 
        error = -ENOMEM;
-       res = idtoname_lookup(&ent);
+       res = idtoname_lookup(cd, &ent);
        if (!res)
                goto out;
 
@@ -260,11 +260,11 @@ idtoname_parse(struct cache_detail *cd, char *buf, int buflen)
        else
                memcpy(ent.name, buf1, sizeof(ent.name));
        error = -ENOMEM;
-       res = idtoname_update(&ent, res);
+       res = idtoname_update(cd, &ent, res);
        if (res == NULL)
                goto out;
 
-       cache_put(&res->h, &idtoname_cache);
+       cache_put(&res->h, cd);
 
        error = 0;
 out:
@@ -275,10 +275,9 @@ out:
 
 
 static struct ent *
-idtoname_lookup(struct ent *item)
+idtoname_lookup(struct cache_detail *cd, struct ent *item)
 {
-       struct cache_head *ch = sunrpc_cache_lookup(&idtoname_cache,
-                                                   &item->h,
+       struct cache_head *ch = sunrpc_cache_lookup(cd, &item->h,
                                                    idtoname_hash(item));
        if (ch)
                return container_of(ch, struct ent, h);
@@ -287,10 +286,9 @@ idtoname_lookup(struct ent *item)
 }
 
 static struct ent *
-idtoname_update(struct ent *new, struct ent *old)
+idtoname_update(struct cache_detail *cd, struct ent *new, struct ent *old)
 {
-       struct cache_head *ch = sunrpc_cache_update(&idtoname_cache,
-                                                   &new->h, &old->h,
+       struct cache_head *ch = sunrpc_cache_update(cd, &new->h, &old->h,
                                                    idtoname_hash(new));
        if (ch)
                return container_of(ch, struct ent, h);
@@ -303,8 +301,6 @@ idtoname_update(struct ent *new, struct ent *old)
  * Name -> ID cache
  */
 
-static struct cache_head *nametoid_table[ENT_HASHMAX];
-
 static inline int
 nametoid_hash(struct ent *ent)
 {
@@ -359,14 +355,14 @@ nametoid_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h)
        return 0;
 }
 
-static struct ent *nametoid_lookup(struct ent *);
-static struct ent *nametoid_update(struct ent *, struct ent *);
+static struct ent *nametoid_lookup(struct cache_detail *, struct ent *);
+static struct ent *nametoid_update(struct cache_detail *, struct ent *,
+                                  struct ent *);
 static int         nametoid_parse(struct cache_detail *, char *, int);
 
-static struct cache_detail nametoid_cache = {
+static struct cache_detail nametoid_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = ENT_HASHMAX,
-       .hash_table     = nametoid_table,
        .name           = "nfs4.nametoid",
        .cache_put      = ent_put,
        .cache_upcall   = nametoid_upcall,
@@ -426,14 +422,14 @@ nametoid_parse(struct cache_detail *cd, char *buf, int buflen)
                set_bit(CACHE_NEGATIVE, &ent.h.flags);
 
        error = -ENOMEM;
-       res = nametoid_lookup(&ent);
+       res = nametoid_lookup(cd, &ent);
        if (res == NULL)
                goto out;
-       res = nametoid_update(&ent, res);
+       res = nametoid_update(cd, &ent, res);
        if (res == NULL)
                goto out;
 
-       cache_put(&res->h, &nametoid_cache);
+       cache_put(&res->h, cd);
        error = 0;
 out:
        kfree(buf1);
@@ -443,10 +439,9 @@ out:
 
 
 static struct ent *
-nametoid_lookup(struct ent *item)
+nametoid_lookup(struct cache_detail *cd, struct ent *item)
 {
-       struct cache_head *ch = sunrpc_cache_lookup(&nametoid_cache,
-                                                   &item->h,
+       struct cache_head *ch = sunrpc_cache_lookup(cd, &item->h,
                                                    nametoid_hash(item));
        if (ch)
                return container_of(ch, struct ent, h);
@@ -455,10 +450,9 @@ nametoid_lookup(struct ent *item)
 }
 
 static struct ent *
-nametoid_update(struct ent *new, struct ent *old)
+nametoid_update(struct cache_detail *cd, struct ent *new, struct ent *old)
 {
-       struct cache_head *ch = sunrpc_cache_update(&nametoid_cache,
-                                                   &new->h, &old->h,
+       struct cache_head *ch = sunrpc_cache_update(cd, &new->h, &old->h,
                                                    nametoid_hash(new));
        if (ch)
                return container_of(ch, struct ent, h);
@@ -471,34 +465,55 @@ nametoid_update(struct ent *new, struct ent *old)
  */
 
 int
-nfsd_idmap_init(void)
+nfsd_idmap_init(struct net *net)
 {
        int rv;
+       struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
-       rv = cache_register_net(&idtoname_cache, &init_net);
+       nn->idtoname_cache = cache_create_net(&idtoname_cache_template, net);
+       if (IS_ERR(nn->idtoname_cache))
+               return PTR_ERR(nn->idtoname_cache);
+       rv = cache_register_net(nn->idtoname_cache, net);
        if (rv)
-               return rv;
-       rv = cache_register_net(&nametoid_cache, &init_net);
+               goto destroy_idtoname_cache;
+       nn->nametoid_cache = cache_create_net(&nametoid_cache_template, net);
+       if (IS_ERR(nn->nametoid_cache)) {
+               rv = PTR_ERR(nn->idtoname_cache);
+               goto unregister_idtoname_cache;
+       }
+       rv = cache_register_net(nn->nametoid_cache, net);
        if (rv)
-               cache_unregister_net(&idtoname_cache, &init_net);
+               goto destroy_nametoid_cache;
+       return 0;
+
+destroy_nametoid_cache:
+       cache_destroy_net(nn->nametoid_cache, net);
+unregister_idtoname_cache:
+       cache_unregister_net(nn->idtoname_cache, net);
+destroy_idtoname_cache:
+       cache_destroy_net(nn->idtoname_cache, net);
        return rv;
 }
 
 void
-nfsd_idmap_shutdown(void)
+nfsd_idmap_shutdown(struct net *net)
 {
-       cache_unregister_net(&idtoname_cache, &init_net);
-       cache_unregister_net(&nametoid_cache, &init_net);
+       struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+       cache_unregister_net(nn->idtoname_cache, net);
+       cache_unregister_net(nn->nametoid_cache, net);
+       cache_destroy_net(nn->idtoname_cache, net);
+       cache_destroy_net(nn->nametoid_cache, net);
 }
 
 static int
 idmap_lookup(struct svc_rqst *rqstp,
-               struct ent *(*lookup_fn)(struct ent *), struct ent *key,
-               struct cache_detail *detail, struct ent **item)
+               struct ent *(*lookup_fn)(struct cache_detail *, struct ent *),
+               struct ent *key, struct cache_detail *detail, struct ent **item)
 {
        int ret;
 
-       *item = lookup_fn(key);
+       *item = lookup_fn(detail, key);
        if (!*item)
                return -ENOMEM;
  retry:
@@ -506,7 +521,7 @@ idmap_lookup(struct svc_rqst *rqstp,
 
        if (ret == -ETIMEDOUT) {
                struct ent *prev_item = *item;
-               *item = lookup_fn(key);
+               *item = lookup_fn(detail, key);
                if (*item != prev_item)
                        goto retry;
                cache_put(&(*item)->h, detail);
@@ -531,19 +546,20 @@ idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen
                .type = type,
        };
        int ret;
+       struct nfsd_net *nn = net_generic(rqstp->rq_xprt->xpt_net, nfsd_net_id);
 
        if (namelen + 1 > sizeof(key.name))
                return nfserr_badowner;
        memcpy(key.name, name, namelen);
        key.name[namelen] = '\0';
        strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
-       ret = idmap_lookup(rqstp, nametoid_lookup, &key, &nametoid_cache, &item);
+       ret = idmap_lookup(rqstp, nametoid_lookup, &key, nn->nametoid_cache, &item);
        if (ret == -ENOENT)
                return nfserr_badowner;
        if (ret)
                return nfserrno(ret);
        *id = item->id;
-       cache_put(&item->h, &nametoid_cache);
+       cache_put(&item->h, nn->nametoid_cache);
        return 0;
 }
 
@@ -555,9 +571,10 @@ idmap_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name)
                .type = type,
        };
        int ret;
+       struct nfsd_net *nn = net_generic(rqstp->rq_xprt->xpt_net, nfsd_net_id);
 
        strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
-       ret = idmap_lookup(rqstp, idtoname_lookup, &key, &idtoname_cache, &item);
+       ret = idmap_lookup(rqstp, idtoname_lookup, &key, nn->idtoname_cache, &item);
        if (ret == -ENOENT)
                return sprintf(name, "%u", id);
        if (ret)
@@ -565,7 +582,7 @@ idmap_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name)
        ret = strlen(item->name);
        BUG_ON(ret > IDMAP_NAMESZ);
        memcpy(name, item->name, ret);
-       cache_put(&item->h, &idtoname_cache);
+       cache_put(&item->h, nn->idtoname_cache);
        return ret;
 }
 
@@ -588,7 +605,7 @@ numeric_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namel
 static __be32
 do_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, uid_t *id)
 {
-       if (nfs4_disable_idmapping && rqstp->rq_flavor < RPC_AUTH_GSS)
+       if (nfs4_disable_idmapping && rqstp->rq_cred.cr_flavor < RPC_AUTH_GSS)
                if (numeric_name_to_id(rqstp, type, name, namelen, id))
                        return 0;
                /*
@@ -601,7 +618,7 @@ do_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, u
 static int
 do_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name)
 {
-       if (nfs4_disable_idmapping && rqstp->rq_flavor < RPC_AUTH_GSS)
+       if (nfs4_disable_idmapping && rqstp->rq_cred.cr_flavor < RPC_AUTH_GSS)
                return sprintf(name, "%u", id);
        return idmap_id_to_name(rqstp, type, id, name);
 }
index ed3f9206a0ee87c914f133492f1f6011775bdef8..5ff0b7b9fc08f22f39cc1f2d83062baceb773bdc 100644 (file)
@@ -570,7 +570,7 @@ static ssize_t
 cld_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
 {
        struct cld_upcall *tmp, *cup;
-       struct cld_msg *cmsg = (struct cld_msg *)src;
+       struct cld_msg __user *cmsg = (struct cld_msg __user *)src;
        uint32_t xid;
        struct nfsd_net *nn = net_generic(filp->f_dentry->d_sb->s_fs_info,
                                                nfsd_net_id);
@@ -1029,7 +1029,7 @@ rpc_pipefs_event(struct notifier_block *nb, unsigned long event, void *ptr)
        return ret;
 }
 
-struct notifier_block nfsd4_cld_block = {
+static struct notifier_block nfsd4_cld_block = {
        .notifier_call = rpc_pipefs_event,
 };
 
index 7f71c69cdcdfdcbd7245a71820b9f13e1a0bb135..8fdc9ec5c5d359f8defb2766e710eb35fc08c3b0 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/sunrpc/clnt.h>
 #include "xdr4.h"
 #include "vfs.h"
+#include "current_stateid.h"
 
 #define NFSDDBG_FACILITY                NFSDDBG_PROC
 
@@ -447,37 +448,69 @@ static struct list_head close_lru;
  *
  * which we should reject.
  */
-static void
-set_access(unsigned int *access, unsigned long bmap) {
+static unsigned int
+bmap_to_share_mode(unsigned long bmap) {
        int i;
+       unsigned int access = 0;
 
-       *access = 0;
        for (i = 1; i < 4; i++) {
                if (test_bit(i, &bmap))
-                       *access |= i;
-       }
-}
-
-static void
-set_deny(unsigned int *deny, unsigned long bmap) {
-       int i;
-
-       *deny = 0;
-       for (i = 0; i < 4; i++) {
-               if (test_bit(i, &bmap))
-                       *deny |= i ;
+                       access |= i;
        }
+       return access;
 }
 
-static int
+static bool
 test_share(struct nfs4_ol_stateid *stp, struct nfsd4_open *open) {
        unsigned int access, deny;
 
-       set_access(&access, stp->st_access_bmap);
-       set_deny(&deny, stp->st_deny_bmap);
+       access = bmap_to_share_mode(stp->st_access_bmap);
+       deny = bmap_to_share_mode(stp->st_deny_bmap);
        if ((access & open->op_share_deny) || (deny & open->op_share_access))
-               return 0;
-       return 1;
+               return false;
+       return true;
+}
+
+/* set share access for a given stateid */
+static inline void
+set_access(u32 access, struct nfs4_ol_stateid *stp)
+{
+       __set_bit(access, &stp->st_access_bmap);
+}
+
+/* clear share access for a given stateid */
+static inline void
+clear_access(u32 access, struct nfs4_ol_stateid *stp)
+{
+       __clear_bit(access, &stp->st_access_bmap);
+}
+
+/* test whether a given stateid has access */
+static inline bool
+test_access(u32 access, struct nfs4_ol_stateid *stp)
+{
+       return test_bit(access, &stp->st_access_bmap);
+}
+
+/* set share deny for a given stateid */
+static inline void
+set_deny(u32 access, struct nfs4_ol_stateid *stp)
+{
+       __set_bit(access, &stp->st_deny_bmap);
+}
+
+/* clear share deny for a given stateid */
+static inline void
+clear_deny(u32 access, struct nfs4_ol_stateid *stp)
+{
+       __clear_bit(access, &stp->st_deny_bmap);
+}
+
+/* test whether a given stateid is denying specific access */
+static inline bool
+test_deny(u32 access, struct nfs4_ol_stateid *stp)
+{
+       return test_bit(access, &stp->st_deny_bmap);
 }
 
 static int nfs4_access_to_omode(u32 access)
@@ -493,6 +526,20 @@ static int nfs4_access_to_omode(u32 access)
        BUG();
 }
 
+/* release all access and file references for a given stateid */
+static void
+release_all_access(struct nfs4_ol_stateid *stp)
+{
+       int i;
+
+       for (i = 1; i < 4; i++) {
+               if (test_access(i, stp))
+                       nfs4_file_put_access(stp->st_file,
+                                            nfs4_access_to_omode(i));
+               clear_access(i, stp);
+       }
+}
+
 static void unhash_generic_stateid(struct nfs4_ol_stateid *stp)
 {
        list_del(&stp->st_perfile);
@@ -501,16 +548,7 @@ static void unhash_generic_stateid(struct nfs4_ol_stateid *stp)
 
 static void close_generic_stateid(struct nfs4_ol_stateid *stp)
 {
-       int i;
-
-       if (stp->st_access_bmap) {
-               for (i = 1; i < 4; i++) {
-                       if (test_bit(i, &stp->st_access_bmap))
-                               nfs4_file_put_access(stp->st_file,
-                                               nfs4_access_to_omode(i));
-                       __clear_bit(i, &stp->st_access_bmap);
-               }
-       }
+       release_all_access(stp);
        put_nfs4_file(stp->st_file);
        stp->st_file = NULL;
 }
@@ -885,7 +923,7 @@ static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct n
        struct nfsd4_session *new;
        struct nfsd4_channel_attrs *fchan = &cses->fore_channel;
        int numslots, slotsize;
-       int status;
+       __be32 status;
        int idx;
 
        /*
@@ -984,7 +1022,8 @@ static inline void
 renew_client_locked(struct nfs4_client *clp)
 {
        if (is_client_expired(clp)) {
-               dprintk("%s: client (clientid %08x/%08x) already expired\n",
+               WARN_ON(1);
+               printk("%s: client (clientid %08x/%08x) already expired\n",
                        __func__,
                        clp->cl_clientid.cl_boot,
                        clp->cl_clientid.cl_id);
@@ -1049,9 +1088,7 @@ free_client(struct nfs4_client *clp)
                list_del(&ses->se_perclnt);
                nfsd4_put_session_locked(ses);
        }
-       if (clp->cl_cred.cr_group_info)
-               put_group_info(clp->cl_cred.cr_group_info);
-       kfree(clp->cl_principal);
+       free_svc_cred(&clp->cl_cred);
        kfree(clp->cl_name.data);
        kfree(clp);
 }
@@ -1132,12 +1169,21 @@ static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
        target->cl_clientid.cl_id = source->cl_clientid.cl_id; 
 }
 
-static void copy_cred(struct svc_cred *target, struct svc_cred *source)
+static int copy_cred(struct svc_cred *target, struct svc_cred *source)
 {
+       if (source->cr_principal) {
+               target->cr_principal =
+                               kstrdup(source->cr_principal, GFP_KERNEL);
+               if (target->cr_principal == NULL)
+                       return -ENOMEM;
+       } else
+               target->cr_principal = NULL;
+       target->cr_flavor = source->cr_flavor;
        target->cr_uid = source->cr_uid;
        target->cr_gid = source->cr_gid;
        target->cr_group_info = source->cr_group_info;
        get_group_info(target->cr_group_info);
+       return 0;
 }
 
 static int same_name(const char *n1, const char *n2)
@@ -1157,11 +1203,31 @@ same_clid(clientid_t *cl1, clientid_t *cl2)
        return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
 }
 
-/* XXX what about NGROUP */
+static bool groups_equal(struct group_info *g1, struct group_info *g2)
+{
+       int i;
+
+       if (g1->ngroups != g2->ngroups)
+               return false;
+       for (i=0; i<g1->ngroups; i++)
+               if (GROUP_AT(g1, i) != GROUP_AT(g2, i))
+                       return false;
+       return true;
+}
+
 static int
 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
 {
-       return cr1->cr_uid == cr2->cr_uid;
+       if ((cr1->cr_flavor != cr2->cr_flavor)
+               || (cr1->cr_uid != cr2->cr_uid)
+               || (cr1->cr_gid != cr2->cr_gid)
+               || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
+               return false;
+       if (cr1->cr_principal == cr2->cr_principal)
+               return true;
+       if (!cr1->cr_principal || !cr2->cr_principal)
+               return false;
+       return 0 == strcmp(cr1->cr_principal, cr1->cr_principal);
 }
 
 static void gen_clid(struct nfs4_client *clp)
@@ -1204,25 +1270,20 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
 {
        struct nfs4_client *clp;
        struct sockaddr *sa = svc_addr(rqstp);
-       char *princ;
+       int ret;
 
        clp = alloc_client(name);
        if (clp == NULL)
                return NULL;
 
        INIT_LIST_HEAD(&clp->cl_sessions);
-
-       princ = svc_gss_principal(rqstp);
-       if (princ) {
-               clp->cl_principal = kstrdup(princ, GFP_KERNEL);
-               if (clp->cl_principal == NULL) {
-                       spin_lock(&client_lock);
-                       free_client(clp);
-                       spin_unlock(&client_lock);
-                       return NULL;
-               }
+       ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
+       if (ret) {
+               spin_lock(&client_lock);
+               free_client(clp);
+               spin_unlock(&client_lock);
+               return NULL;
        }
-
        idr_init(&clp->cl_stateids);
        memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
        atomic_set(&clp->cl_refcount, 0);
@@ -1240,8 +1301,6 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
        rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
        copy_verf(clp, verf);
        rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
-       clp->cl_flavor = rqstp->rq_flavor;
-       copy_cred(&clp->cl_cred, &rqstp->rq_cred);
        gen_confirm(clp);
        clp->cl_cb_session = NULL;
        return clp;
@@ -1470,18 +1529,32 @@ nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
        clid->flags = new->cl_exchange_flags;
 }
 
+static bool client_has_state(struct nfs4_client *clp)
+{
+       /*
+        * Note clp->cl_openowners check isn't quite right: there's no
+        * need to count owners without stateid's.
+        *
+        * Also note we should probably be using this in 4.0 case too.
+        */
+       return !list_empty(&clp->cl_openowners)
+               || !list_empty(&clp->cl_delegations)
+               || !list_empty(&clp->cl_sessions);
+}
+
 __be32
 nfsd4_exchange_id(struct svc_rqst *rqstp,
                  struct nfsd4_compound_state *cstate,
                  struct nfsd4_exchange_id *exid)
 {
        struct nfs4_client *unconf, *conf, *new;
-       int status;
+       __be32 status;
        unsigned int            strhashval;
        char                    dname[HEXDIR_LEN];
        char                    addr_str[INET6_ADDRSTRLEN];
        nfs4_verifier           verf = exid->verifier;
        struct sockaddr         *sa = svc_addr(rqstp);
+       bool    update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
 
        rpc_ntop(sa, addr_str, sizeof(addr_str));
        dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
@@ -1507,71 +1580,63 @@ nfsd4_exchange_id(struct svc_rqst *rqstp,
        status = nfs4_make_rec_clidname(dname, &exid->clname);
 
        if (status)
-               goto error;
+               return status;
 
        strhashval = clientstr_hashval(dname);
 
+       /* Cases below refer to rfc 5661 section 18.35.4: */
        nfs4_lock_state();
-       status = nfs_ok;
-
        conf = find_confirmed_client_by_str(dname, strhashval);
        if (conf) {
-               if (!clp_used_exchangeid(conf)) {
-                       status = nfserr_clid_inuse; /* XXX: ? */
-                       goto out;
-               }
-               if (!same_verf(&verf, &conf->cl_verifier)) {
-                       /* 18.35.4 case 8 */
-                       if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
+               bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
+               bool verfs_match = same_verf(&verf, &conf->cl_verifier);
+
+               if (update) {
+                       if (!clp_used_exchangeid(conf)) { /* buggy client */
+                               status = nfserr_inval;
+                               goto out;
+                       }
+                       if (!creds_match) { /* case 9 */
+                               status = nfserr_perm;
+                               goto out;
+                       }
+                       if (!verfs_match) { /* case 8 */
                                status = nfserr_not_same;
                                goto out;
                        }
-                       /* Client reboot: destroy old state */
-                       expire_client(conf);
-                       goto out_new;
+                       /* case 6 */
+                       exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
+                       new = conf;
+                       goto out_copy;
                }
-               if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
-                       /* 18.35.4 case 9 */
-                       if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
-                               status = nfserr_perm;
+               if (!creds_match) { /* case 3 */
+                       if (client_has_state(conf)) {
+                               status = nfserr_clid_inuse;
                                goto out;
                        }
                        expire_client(conf);
                        goto out_new;
                }
-               /*
-                * Set bit when the owner id and verifier map to an already
-                * confirmed client id (18.35.3).
-                */
-               exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
-
-               /*
-                * Falling into 18.35.4 case 2, possible router replay.
-                * Leave confirmed record intact and return same result.
-                */
-               copy_verf(conf, &verf);
-               new = conf;
-               goto out_copy;
+               if (verfs_match) { /* case 2 */
+                       conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
+                       new = conf;
+                       goto out_copy;
+               }
+               /* case 5, client reboot */
+               goto out_new;
        }
 
-       /* 18.35.4 case 7 */
-       if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
+       if (update) { /* case 7 */
                status = nfserr_noent;
                goto out;
        }
 
        unconf  = find_unconfirmed_client_by_str(dname, strhashval);
-       if (unconf) {
-               /*
-                * Possible retry or client restart.  Per 18.35.4 case 4,
-                * a new unconfirmed record should be generated regardless
-                * of whether any properties have changed.
-                */
+       if (unconf) /* case 4, possible retry or client restart */
                expire_client(unconf);
-       }
 
+       /* case 1 (normal case) */
 out_new:
-       /* Normal case */
        new = create_client(exid->clname, dname, rqstp, &verf);
        if (new == NULL) {
                status = nfserr_jukebox;
@@ -1584,7 +1649,7 @@ out_copy:
        exid->clientid.cl_boot = new->cl_clientid.cl_boot;
        exid->clientid.cl_id = new->cl_clientid.cl_id;
 
-       exid->seqid = 1;
+       exid->seqid = new->cl_cs_slot.sl_seqid + 1;
        nfsd4_set_ex_flags(new, exid);
 
        dprintk("nfsd4_exchange_id seqid %d flags %x\n",
@@ -1593,12 +1658,10 @@ out_copy:
 
 out:
        nfs4_unlock_state();
-error:
-       dprintk("nfsd4_exchange_id returns %d\n", ntohl(status));
        return status;
 }
 
-static int
+static __be32
 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
 {
        dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
@@ -1626,7 +1689,7 @@ check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
  */
 static void
 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
-                          struct nfsd4_clid_slot *slot, int nfserr)
+                          struct nfsd4_clid_slot *slot, __be32 nfserr)
 {
        slot->sl_status = nfserr;
        memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
@@ -1657,7 +1720,7 @@ nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
                                /* seqid, slotID, slotID, slotID, status */ \
                        5 ) * sizeof(__be32))
 
-static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs fchannel)
+static bool check_forechannel_attrs(struct nfsd4_channel_attrs fchannel)
 {
        return fchannel.maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ
                || fchannel.maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ;
@@ -1673,7 +1736,7 @@ nfsd4_create_session(struct svc_rqst *rqstp,
        struct nfsd4_session *new;
        struct nfsd4_clid_slot *cs_slot = NULL;
        bool confirm_me = false;
-       int status = 0;
+       __be32 status = 0;
 
        if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
                return nfserr_inval;
@@ -1686,16 +1749,10 @@ nfsd4_create_session(struct svc_rqst *rqstp,
                cs_slot = &conf->cl_cs_slot;
                status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
                if (status == nfserr_replay_cache) {
-                       dprintk("Got a create_session replay! seqid= %d\n",
-                               cs_slot->sl_seqid);
-                       /* Return the cached reply status */
                        status = nfsd4_replay_create_session(cr_ses, cs_slot);
                        goto out;
                } else if (cr_ses->seqid != cs_slot->sl_seqid + 1) {
                        status = nfserr_seq_misordered;
-                       dprintk("Sequence misordered!\n");
-                       dprintk("Expected seqid= %d but got seqid= %d\n",
-                               cs_slot->sl_seqid, cr_ses->seqid);
                        goto out;
                }
        } else if (unconf) {
@@ -1704,7 +1761,6 @@ nfsd4_create_session(struct svc_rqst *rqstp,
                        status = nfserr_clid_inuse;
                        goto out;
                }
-
                cs_slot = &unconf->cl_cs_slot;
                status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
                if (status) {
@@ -1712,7 +1768,6 @@ nfsd4_create_session(struct svc_rqst *rqstp,
                        status = nfserr_seq_misordered;
                        goto out;
                }
-
                confirm_me = true;
                conf = unconf;
        } else {
@@ -1749,8 +1804,14 @@ nfsd4_create_session(struct svc_rqst *rqstp,
 
        /* cache solo and embedded create sessions under the state lock */
        nfsd4_cache_create_session(cr_ses, cs_slot, status);
-       if (confirm_me)
+       if (confirm_me) {
+               unsigned int hash = clientstr_hashval(unconf->cl_recdir);
+               struct nfs4_client *old =
+                       find_confirmed_client_by_str(conf->cl_recdir, hash);
+               if (old)
+                       expire_client(old);
                move_to_confirmed(conf);
+       }
 out:
        nfs4_unlock_state();
        dprintk("%s returns %d\n", __func__, ntohl(status));
@@ -1818,7 +1879,7 @@ nfsd4_destroy_session(struct svc_rqst *r,
                      struct nfsd4_destroy_session *sessionid)
 {
        struct nfsd4_session *ses;
-       u32 status = nfserr_badsession;
+       __be32 status = nfserr_badsession;
 
        /* Notes:
         * - The confirmed nfs4_client->cl_sessionid holds destroyed sessinid
@@ -1914,7 +1975,7 @@ nfsd4_sequence(struct svc_rqst *rqstp,
        struct nfsd4_session *session;
        struct nfsd4_slot *slot;
        struct nfsd4_conn *conn;
-       int status;
+       __be32 status;
 
        if (resp->opcnt != 1)
                return nfserr_sequence_pos;
@@ -2008,18 +2069,11 @@ out:
        return status;
 }
 
-static inline bool has_resources(struct nfs4_client *clp)
-{
-       return !list_empty(&clp->cl_openowners)
-               || !list_empty(&clp->cl_delegations)
-               || !list_empty(&clp->cl_sessions);
-}
-
 __be32
 nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc)
 {
        struct nfs4_client *conf, *unconf, *clp;
-       int status = 0;
+       __be32 status = 0;
 
        nfs4_lock_state();
        unconf = find_unconfirmed_client(&dc->clientid);
@@ -2028,7 +2082,7 @@ nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *csta
        if (conf) {
                clp = conf;
 
-               if (!is_client_expired(conf) && has_resources(conf)) {
+               if (!is_client_expired(conf) && client_has_state(conf)) {
                        status = nfserr_clientid_busy;
                        goto out;
                }
@@ -2055,7 +2109,7 @@ out:
 __be32
 nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
 {
-       int status = 0;
+       __be32 status = 0;
 
        if (rc->rca_one_fs) {
                if (!cstate->current_fh.fh_dentry)
@@ -2106,17 +2160,13 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        if (status)
                return status;
 
-       /* 
-        * XXX The Duplicate Request Cache (DRC) has been checked (??)
-        * We get here on a DRC miss.
-        */
-
        strhashval = clientstr_hashval(dname);
 
+       /* Cases below refer to rfc 3530 section 14.2.33: */
        nfs4_lock_state();
        conf = find_confirmed_client_by_str(dname, strhashval);
        if (conf) {
-               /* RFC 3530 14.2.33 CASE 0: */
+               /* case 0: */
                status = nfserr_clid_inuse;
                if (clp_used_exchangeid(conf))
                        goto out;
@@ -2129,63 +2179,18 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                        goto out;
                }
        }
-       /*
-        * section 14.2.33 of RFC 3530 (under the heading "IMPLEMENTATION")
-        * has a description of SETCLIENTID request processing consisting
-        * of 5 bullet points, labeled as CASE0 - CASE4 below.
-        */
        unconf = find_unconfirmed_client_by_str(dname, strhashval);
+       if (unconf)
+               expire_client(unconf);
        status = nfserr_jukebox;
-       if (!conf) {
-               /*
-                * RFC 3530 14.2.33 CASE 4:
-                * placed first, because it is the normal case
-                */
-               if (unconf)
-                       expire_client(unconf);
-               new = create_client(clname, dname, rqstp, &clverifier);
-               if (new == NULL)
-                       goto out;
-               gen_clid(new);
-       } else if (same_verf(&conf->cl_verifier, &clverifier)) {
-               /*
-                * RFC 3530 14.2.33 CASE 1:
-                * probable callback update
-                */
-               if (unconf) {
-                       /* Note this is removing unconfirmed {*x***},
-                        * which is stronger than RFC recommended {vxc**}.
-                        * This has the advantage that there is at most
-                        * one {*x***} in either list at any time.
-                        */
-                       expire_client(unconf);
-               }
-               new = create_client(clname, dname, rqstp, &clverifier);
-               if (new == NULL)
-                       goto out;
+       new = create_client(clname, dname, rqstp, &clverifier);
+       if (new == NULL)
+               goto out;
+       if (conf && same_verf(&conf->cl_verifier, &clverifier))
+               /* case 1: probable callback update */
                copy_clid(new, conf);
-       } else if (!unconf) {
-               /*
-                * RFC 3530 14.2.33 CASE 2:
-                * probable client reboot; state will be removed if
-                * confirmed.
-                */
-               new = create_client(clname, dname, rqstp, &clverifier);
-               if (new == NULL)
-                       goto out;
-               gen_clid(new);
-       } else {
-               /*
-                * RFC 3530 14.2.33 CASE 3:
-                * probable client reboot; state will be removed if
-                * confirmed.
-                */
-               expire_client(unconf);
-               new = create_client(clname, dname, rqstp, &clverifier);
-               if (new == NULL)
-                       goto out;
+       else /* case 4 (new client) or cases 2, 3 (client reboot): */
                gen_clid(new);
-       }
        /*
         * XXX: we should probably set this at creation time, and check
         * for consistent minorversion use throughout:
@@ -2203,17 +2208,11 @@ out:
 }
 
 
-/*
- * Section 14.2.34 of RFC 3530 (under the heading "IMPLEMENTATION") has
- * a description of SETCLIENTID_CONFIRM request processing consisting of 4
- * bullets, labeled as CASE1 - CASE4 below.
- */
 __be32
 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
                         struct nfsd4_compound_state *cstate,
                         struct nfsd4_setclientid_confirm *setclientid_confirm)
 {
-       struct sockaddr *sa = svc_addr(rqstp);
        struct nfs4_client *conf, *unconf;
        nfs4_verifier confirm = setclientid_confirm->sc_confirm; 
        clientid_t * clid = &setclientid_confirm->sc_clientid;
@@ -2221,84 +2220,44 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
 
        if (STALE_CLIENTID(clid))
                return nfserr_stale_clientid;
-       /* 
-        * XXX The Duplicate Request Cache (DRC) has been checked (??)
-        * We get here on a DRC miss.
-        */
-
        nfs4_lock_state();
 
        conf = find_confirmed_client(clid);
        unconf = find_unconfirmed_client(clid);
-
-       status = nfserr_clid_inuse;
-       if (conf && !rpc_cmp_addr((struct sockaddr *) &conf->cl_addr, sa))
-               goto out;
-       if (unconf && !rpc_cmp_addr((struct sockaddr *) &unconf->cl_addr, sa))
-               goto out;
-
        /*
-        * section 14.2.34 of RFC 3530 has a description of
-        * SETCLIENTID_CONFIRM request processing consisting
-        * of 4 bullet points, labeled as CASE1 - CASE4 below.
+        * We try hard to give out unique clientid's, so if we get an
+        * attempt to confirm the same clientid with a different cred,
+        * there's a bug somewhere.  Let's charitably assume it's our
+        * bug.
         */
-       if (conf && unconf && same_verf(&confirm, &unconf->cl_confirm)) {
-               /*
-                * RFC 3530 14.2.34 CASE 1:
-                * callback update
-                */
-               if (!same_creds(&conf->cl_cred, &unconf->cl_cred))
-                       status = nfserr_clid_inuse;
-               else {
-                       nfsd4_change_callback(conf, &unconf->cl_cb_conn);
-                       nfsd4_probe_callback(conf);
-                       expire_client(unconf);
+       status = nfserr_serverfault;
+       if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
+               goto out;
+       if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
+               goto out;
+       /* cases below refer to rfc 3530 section 14.2.34: */
+       if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
+               if (conf && !unconf) /* case 2: probable retransmit */
                        status = nfs_ok;
+               else /* case 4: client hasn't noticed we rebooted yet? */
+                       status = nfserr_stale_clientid;
+               goto out;
+       }
+       status = nfs_ok;
+       if (conf) { /* case 1: callback update */
+               nfsd4_change_callback(conf, &unconf->cl_cb_conn);
+               nfsd4_probe_callback(conf);
+               expire_client(unconf);
+       } else { /* case 3: normal case; new or rebooted client */
+               unsigned int hash = clientstr_hashval(unconf->cl_recdir);
 
+               conf = find_confirmed_client_by_str(unconf->cl_recdir, hash);
+               if (conf) {
+                       nfsd4_client_record_remove(conf);
+                       expire_client(conf);
                }
-       } else if (conf && !unconf) {
-               /*
-                * RFC 3530 14.2.34 CASE 2:
-                * probable retransmitted request; play it safe and
-                * do nothing.
-                */
-               if (!same_creds(&conf->cl_cred, &rqstp->rq_cred))
-                       status = nfserr_clid_inuse;
-               else
-                       status = nfs_ok;
-       } else if (!conf && unconf
-                       && same_verf(&unconf->cl_confirm, &confirm)) {
-               /*
-                * RFC 3530 14.2.34 CASE 3:
-                * Normal case; new or rebooted client:
-                */
-               if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred)) {
-                       status = nfserr_clid_inuse;
-               } else {
-                       unsigned int hash =
-                               clientstr_hashval(unconf->cl_recdir);
-                       conf = find_confirmed_client_by_str(unconf->cl_recdir,
-                                                           hash);
-                       if (conf) {
-                               nfsd4_client_record_remove(conf);
-                               expire_client(conf);
-                       }
-                       move_to_confirmed(unconf);
-                       conf = unconf;
-                       nfsd4_probe_callback(conf);
-                       status = nfs_ok;
-               }
-       } else if ((!conf || (conf && !same_verf(&conf->cl_confirm, &confirm)))
-           && (!unconf || (unconf && !same_verf(&unconf->cl_confirm,
-                                                               &confirm)))) {
-               /*
-                * RFC 3530 14.2.34 CASE 4:
-                * Client probably hasn't noticed that we rebooted yet.
-                */
-               status = nfserr_stale_clientid;
-       } else {
-               /* check that we have hit one of the cases...*/
-               status = nfserr_clid_inuse;
+               move_to_confirmed(unconf);
+               nfsd4_probe_callback(unconf);
        }
 out:
        nfs4_unlock_state();
@@ -2454,8 +2413,8 @@ static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
        stp->st_file = fp;
        stp->st_access_bmap = 0;
        stp->st_deny_bmap = 0;
-       __set_bit(open->op_share_access, &stp->st_access_bmap);
-       __set_bit(open->op_share_deny, &stp->st_deny_bmap);
+       set_access(open->op_share_access, stp);
+       set_deny(open->op_share_deny, stp);
        stp->st_openstp = NULL;
 }
 
@@ -2534,8 +2493,8 @@ nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
        ret = nfserr_locked;
        /* Search for conflicting share reservations */
        list_for_each_entry(stp, &fp->fi_stateids, st_perfile) {
-               if (test_bit(deny_type, &stp->st_deny_bmap) ||
-                   test_bit(NFS4_SHARE_DENY_BOTH, &stp->st_deny_bmap))
+               if (test_deny(deny_type, stp) ||
+                   test_deny(NFS4_SHARE_DENY_BOTH, stp))
                        goto out;
        }
        ret = nfs_ok;
@@ -2791,7 +2750,7 @@ nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *c
        bool new_access;
        __be32 status;
 
-       new_access = !test_bit(op_share_access, &stp->st_access_bmap);
+       new_access = !test_access(op_share_access, stp);
        if (new_access) {
                status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open);
                if (status)
@@ -2806,8 +2765,8 @@ nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *c
                return status;
        }
        /* remember the open */
-       __set_bit(op_share_access, &stp->st_access_bmap);
-       __set_bit(open->op_share_deny, &stp->st_deny_bmap);
+       set_access(op_share_access, stp);
+       set_deny(open->op_share_deny, stp);
 
        return nfs_ok;
 }
@@ -3155,10 +3114,17 @@ out:
 static struct lock_manager nfsd4_manager = {
 };
 
+static bool grace_ended;
+
 static void
 nfsd4_end_grace(void)
 {
+       /* do nothing if grace period already ended */
+       if (grace_ended)
+               return;
+
        dprintk("NFSD: end of grace period\n");
+       grace_ended = true;
        nfsd4_record_grace_done(&init_net, boot_time);
        locks_end_grace(&nfsd4_manager);
        /*
@@ -3183,8 +3149,7 @@ nfs4_laundromat(void)
        nfs4_lock_state();
 
        dprintk("NFSD: laundromat service - starting\n");
-       if (locks_in_grace())
-               nfsd4_end_grace();
+       nfsd4_end_grace();
        INIT_LIST_HEAD(&reaplist);
        spin_lock(&client_lock);
        list_for_each_safe(pos, next, &client_lru) {
@@ -3276,18 +3241,18 @@ STALE_STATEID(stateid_t *stateid)
 }
 
 static inline int
-access_permit_read(unsigned long access_bmap)
+access_permit_read(struct nfs4_ol_stateid *stp)
 {
-       return test_bit(NFS4_SHARE_ACCESS_READ, &access_bmap) ||
-               test_bit(NFS4_SHARE_ACCESS_BOTH, &access_bmap) ||
-               test_bit(NFS4_SHARE_ACCESS_WRITE, &access_bmap);
+       return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
+               test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
+               test_access(NFS4_SHARE_ACCESS_WRITE, stp);
 }
 
 static inline int
-access_permit_write(unsigned long access_bmap)
+access_permit_write(struct nfs4_ol_stateid *stp)
 {
-       return test_bit(NFS4_SHARE_ACCESS_WRITE, &access_bmap) ||
-               test_bit(NFS4_SHARE_ACCESS_BOTH, &access_bmap);
+       return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
+               test_access(NFS4_SHARE_ACCESS_BOTH, stp);
 }
 
 static
@@ -3298,9 +3263,9 @@ __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
        /* For lock stateid's, we test the parent open, not the lock: */
        if (stp->st_openstp)
                stp = stp->st_openstp;
-       if ((flags & WR_STATE) && (!access_permit_write(stp->st_access_bmap)))
+       if ((flags & WR_STATE) && !access_permit_write(stp))
                 goto out;
-       if ((flags & RD_STATE) && (!access_permit_read(stp->st_access_bmap)))
+       if ((flags & RD_STATE) && !access_permit_read(stp))
                 goto out;
        status = nfs_ok;
 out:
@@ -3340,7 +3305,7 @@ static bool stateid_generation_after(stateid_t *a, stateid_t *b)
        return (s32)a->si_generation - (s32)b->si_generation > 0;
 }
 
-static int check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
+static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
 {
        /*
         * When sessions are used the stateid generation number is ignored
@@ -3649,10 +3614,10 @@ out:
 
 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
 {
-       if (!test_bit(access, &stp->st_access_bmap))
+       if (!test_access(access, stp))
                return;
        nfs4_file_put_access(stp->st_file, nfs4_access_to_omode(access));
-       __clear_bit(access, &stp->st_access_bmap);
+       clear_access(access, stp);
 }
 
 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
@@ -3674,12 +3639,12 @@ static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_ac
 }
 
 static void
-reset_union_bmap_deny(unsigned long deny, unsigned long *bmap)
+reset_union_bmap_deny(unsigned long deny, struct nfs4_ol_stateid *stp)
 {
        int i;
        for (i = 0; i < 4; i++) {
                if ((i & deny) != i)
-                       __clear_bit(i, bmap);
+                       clear_deny(i, stp);
        }
 }
 
@@ -3706,19 +3671,19 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
        if (status)
                goto out; 
        status = nfserr_inval;
-       if (!test_bit(od->od_share_access, &stp->st_access_bmap)) {
-               dprintk("NFSD:access not a subset current bitmap: 0x%lx, input access=%08x\n",
+       if (!test_access(od->od_share_access, stp)) {
+               dprintk("NFSD: access not a subset current bitmap: 0x%lx, input access=%08x\n",
                        stp->st_access_bmap, od->od_share_access);
                goto out;
        }
-       if (!test_bit(od->od_share_deny, &stp->st_deny_bmap)) {
+       if (!test_deny(od->od_share_deny, stp)) {
                dprintk("NFSD:deny not a subset current bitmap: 0x%lx, input deny=%08x\n",
                        stp->st_deny_bmap, od->od_share_deny);
                goto out;
        }
        nfs4_stateid_downgrade(stp, od->od_share_access);
 
-       reset_union_bmap_deny(od->od_share_deny, &stp->st_deny_bmap);
+       reset_union_bmap_deny(od->od_share_deny, stp);
 
        update_stateid(&stp->st_stid.sc_stateid);
        memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
@@ -4008,13 +3973,13 @@ static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
        struct nfs4_file *fp = lock_stp->st_file;
        int oflag = nfs4_access_to_omode(access);
 
-       if (test_bit(access, &lock_stp->st_access_bmap))
+       if (test_access(access, lock_stp))
                return;
        nfs4_file_get_access(fp, oflag);
-       __set_bit(access, &lock_stp->st_access_bmap);
+       set_access(access, lock_stp);
 }
 
-__be32 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, struct nfs4_ol_stateid *ost, struct nfsd4_lock *lock, struct nfs4_ol_stateid **lst, bool *new)
+static __be32 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, struct nfs4_ol_stateid *ost, struct nfsd4_lock *lock, struct nfs4_ol_stateid **lst, bool *new)
 {
        struct nfs4_file *fi = ost->st_file;
        struct nfs4_openowner *oo = openowner(ost->st_stateowner);
@@ -4055,7 +4020,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        struct nfs4_openowner *open_sop = NULL;
        struct nfs4_lockowner *lock_sop = NULL;
        struct nfs4_ol_stateid *lock_stp;
-       struct nfs4_file *fp;
        struct file *filp = NULL;
        struct file_lock file_lock;
        struct file_lock conflock;
@@ -4123,7 +4087,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                        goto out;
        }
        lock_sop = lockowner(lock_stp->st_stateowner);
-       fp = lock_stp->st_file;
 
        lkflg = setlkflg(lock->lk_type);
        status = nfs4_check_openmode(lock_stp, lkflg);
@@ -4715,6 +4678,7 @@ nfs4_state_start(void)
        nfsd4_client_tracking_init(&init_net);
        boot_time = get_seconds();
        locks_start_grace(&nfsd4_manager);
+       grace_ended = false;
        printk(KERN_INFO "NFSD: starting %ld-second grace period\n",
               nfsd4_grace);
        ret = set_callback_cred();
index 74c00bc92b9af6b01e95e55c119b90d61fbf9d34..4949667c84ea0c3d687a46faf0a455c410c39b6f 100644 (file)
@@ -1674,12 +1674,12 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
 
 static void write32(__be32 **p, u32 n)
 {
-       *(*p)++ = n;
+       *(*p)++ = htonl(n);
 }
 
 static void write64(__be32 **p, u64 n)
 {
-       write32(p, (u32)(n >> 32));
+       write32(p, (n >> 32));
        write32(p, (u32)n);
 }
 
@@ -1744,15 +1744,16 @@ static void encode_seqid_op_tail(struct nfsd4_compoundres *resp, __be32 *save, _
 }
 
 /* Encode as an array of strings the string given with components
- * separated @sep.
+ * separated @sep, escaped with esc_enter and esc_exit.
  */
-static __be32 nfsd4_encode_components(char sep, char *components,
-                                  __be32 **pp, int *buflen)
+static __be32 nfsd4_encode_components_esc(char sep, char *components,
+                                  __be32 **pp, int *buflen,
+                                  char esc_enter, char esc_exit)
 {
        __be32 *p = *pp;
        __be32 *countp = p;
        int strlen, count=0;
-       char *str, *end;
+       char *str, *end, *next;
 
        dprintk("nfsd4_encode_components(%s)\n", components);
        if ((*buflen -= 4) < 0)
@@ -1760,8 +1761,23 @@ static __be32 nfsd4_encode_components(char sep, char *components,
        WRITE32(0); /* We will fill this in with @count later */
        end = str = components;
        while (*end) {
-               for (; *end && (*end != sep); end++)
-                       ; /* Point to end of component */
+               bool found_esc = false;
+
+               /* try to parse as esc_start, ..., esc_end, sep */
+               if (*str == esc_enter) {
+                       for (; *end && (*end != esc_exit); end++)
+                               /* find esc_exit or end of string */;
+                       next = end + 1;
+                       if (*end && (!*next || *next == sep)) {
+                               str++;
+                               found_esc = true;
+                       }
+               }
+
+               if (!found_esc)
+                       for (; *end && (*end != sep); end++)
+                               /* find sep or end of string */;
+
                strlen = end - str;
                if (strlen) {
                        if ((*buflen -= ((XDR_QUADLEN(strlen) << 2) + 4)) < 0)
@@ -1780,6 +1796,15 @@ static __be32 nfsd4_encode_components(char sep, char *components,
        return 0;
 }
 
+/* Encode as an array of strings the string given with components
+ * separated @sep.
+ */
+static __be32 nfsd4_encode_components(char sep, char *components,
+                                  __be32 **pp, int *buflen)
+{
+       return nfsd4_encode_components_esc(sep, components, pp, buflen, 0, 0);
+}
+
 /*
  * encode a location element of a fs_locations structure
  */
@@ -1789,7 +1814,8 @@ static __be32 nfsd4_encode_fs_location4(struct nfsd4_fs_location *location,
        __be32 status;
        __be32 *p = *pp;
 
-       status = nfsd4_encode_components(':', location->hosts, &p, buflen);
+       status = nfsd4_encode_components_esc(':', location->hosts, &p, buflen,
+                                               '[', ']');
        if (status)
                return status;
        status = nfsd4_encode_components('/', location->path, &p, buflen);
@@ -3251,7 +3277,7 @@ nfsd4_encode_write(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_w
 }
 
 static __be32
-nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, int nfserr,
+nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
                         struct nfsd4_exchange_id *exid)
 {
        __be32 *p;
@@ -3306,7 +3332,7 @@ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, int nfserr,
 }
 
 static __be32
-nfsd4_encode_create_session(struct nfsd4_compoundres *resp, int nfserr,
+nfsd4_encode_create_session(struct nfsd4_compoundres *resp, __be32 nfserr,
                            struct nfsd4_create_session *sess)
 {
        __be32 *p;
@@ -3355,14 +3381,14 @@ nfsd4_encode_create_session(struct nfsd4_compoundres *resp, int nfserr,
 }
 
 static __be32
-nfsd4_encode_destroy_session(struct nfsd4_compoundres *resp, int nfserr,
+nfsd4_encode_destroy_session(struct nfsd4_compoundres *resp, __be32 nfserr,
                             struct nfsd4_destroy_session *destroy_session)
 {
        return nfserr;
 }
 
 static __be32
-nfsd4_encode_free_stateid(struct nfsd4_compoundres *resp, int nfserr,
+nfsd4_encode_free_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
                          struct nfsd4_free_stateid *free_stateid)
 {
        __be32 *p;
@@ -3371,13 +3397,13 @@ nfsd4_encode_free_stateid(struct nfsd4_compoundres *resp, int nfserr,
                return nfserr;
 
        RESERVE_SPACE(4);
-       WRITE32(nfserr);
+       *p++ = nfserr;
        ADJUST_ARGS();
        return nfserr;
 }
 
 static __be32
-nfsd4_encode_sequence(struct nfsd4_compoundres *resp, int nfserr,
+nfsd4_encode_sequence(struct nfsd4_compoundres *resp, __be32 nfserr,
                      struct nfsd4_sequence *seq)
 {
        __be32 *p;
@@ -3399,8 +3425,8 @@ nfsd4_encode_sequence(struct nfsd4_compoundres *resp, int nfserr,
        return 0;
 }
 
-__be32
-nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, int nfserr,
+static __be32
+nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
                          struct nfsd4_test_stateid *test_stateid)
 {
        struct nfsd4_test_stateid_id *stateid, *next;
@@ -3503,7 +3529,7 @@ static nfsd4_enc nfsd4_enc_ops[] = {
  * Our se_fmaxresp_cached will always be a multiple of PAGE_SIZE, and so
  * will be at least a page and will therefore hold the xdr_buf head.
  */
-int nfsd4_check_resp_size(struct nfsd4_compoundres *resp, u32 pad)
+__be32 nfsd4_check_resp_size(struct nfsd4_compoundres *resp, u32 pad)
 {
        struct xdr_buf *xb = &resp->rqstp->rq_res;
        struct nfsd4_session *session = NULL;
index 2c53be6d357957332478ed1d55d62c85cf9f26cc..c55298ed5772577e5afe3bd613c3e5a0df3b69dd 100644 (file)
@@ -127,7 +127,17 @@ static const struct file_operations transaction_ops = {
 
 static int exports_open(struct inode *inode, struct file *file)
 {
-       return seq_open(file, &nfs_exports_op);
+       int err;
+       struct seq_file *seq;
+       struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
+
+       err = seq_open(file, &nfs_exports_op);
+       if (err)
+               return err;
+
+       seq = file->private_data;
+       seq->private = nn->svc_export_cache;
+       return 0;
 }
 
 static const struct file_operations exports_operations = {
@@ -345,7 +355,7 @@ static ssize_t write_filehandle(struct file *file, char *buf, size_t size)
        if (!dom)
                return -ENOMEM;
 
-       len = exp_rootfh(dom, path, &fh,  maxsize);
+       len = exp_rootfh(&init_net, dom, path, &fh,  maxsize);
        auth_domain_put(dom);
        if (len)
                return len;
@@ -651,6 +661,7 @@ static ssize_t __write_ports_addfd(char *buf)
 {
        char *mesg = buf;
        int fd, err;
+       struct net *net = &init_net;
 
        err = get_int(&mesg, &fd);
        if (err != 0 || fd < 0)
@@ -662,6 +673,8 @@ static ssize_t __write_ports_addfd(char *buf)
 
        err = svc_addsock(nfsd_serv, fd, buf, SIMPLE_TRANSACTION_LIMIT);
        if (err < 0) {
+               if (nfsd_serv->sv_nrthreads == 1)
+                       svc_shutdown_net(nfsd_serv, net);
                svc_destroy(nfsd_serv);
                return err;
        }
@@ -699,6 +712,7 @@ static ssize_t __write_ports_addxprt(char *buf)
        char transport[16];
        struct svc_xprt *xprt;
        int port, err;
+       struct net *net = &init_net;
 
        if (sscanf(buf, "%15s %4u", transport, &port) != 2)
                return -EINVAL;
@@ -710,12 +724,12 @@ static ssize_t __write_ports_addxprt(char *buf)
        if (err != 0)
                return err;
 
-       err = svc_create_xprt(nfsd_serv, transport, &init_net,
+       err = svc_create_xprt(nfsd_serv, transport, net,
                                PF_INET, port, SVC_SOCK_ANONYMOUS);
        if (err < 0)
                goto out_err;
 
-       err = svc_create_xprt(nfsd_serv, transport, &init_net,
+       err = svc_create_xprt(nfsd_serv, transport, net,
                                PF_INET6, port, SVC_SOCK_ANONYMOUS);
        if (err < 0 && err != -EAFNOSUPPORT)
                goto out_close;
@@ -724,12 +738,14 @@ static ssize_t __write_ports_addxprt(char *buf)
        nfsd_serv->sv_nrthreads--;
        return 0;
 out_close:
-       xprt = svc_find_xprt(nfsd_serv, transport, &init_net, PF_INET, port);
+       xprt = svc_find_xprt(nfsd_serv, transport, net, PF_INET, port);
        if (xprt != NULL) {
                svc_close_xprt(xprt);
                svc_xprt_put(xprt);
        }
 out_err:
+       if (nfsd_serv->sv_nrthreads == 1)
+               svc_shutdown_net(nfsd_serv, net);
        svc_destroy(nfsd_serv);
        return err;
 }
@@ -1127,7 +1143,34 @@ static int create_proc_exports_entry(void)
 #endif
 
 int nfsd_net_id;
+
+static __net_init int nfsd_init_net(struct net *net)
+{
+       int retval;
+
+       retval = nfsd_export_init(net);
+       if (retval)
+               goto out_export_error;
+       retval = nfsd_idmap_init(net);
+       if (retval)
+               goto out_idmap_error;
+       return 0;
+
+out_idmap_error:
+       nfsd_export_shutdown(net);
+out_export_error:
+       return retval;
+}
+
+static __net_exit void nfsd_exit_net(struct net *net)
+{
+       nfsd_idmap_shutdown(net);
+       nfsd_export_shutdown(net);
+}
+
 static struct pernet_operations nfsd_net_ops = {
+       .init = nfsd_init_net,
+       .exit = nfsd_exit_net,
        .id   = &nfsd_net_id,
        .size = sizeof(struct nfsd_net),
 };
@@ -1154,16 +1197,10 @@ static int __init init_nfsd(void)
        retval = nfsd_reply_cache_init();
        if (retval)
                goto out_free_stat;
-       retval = nfsd_export_init();
-       if (retval)
-               goto out_free_cache;
        nfsd_lockd_init();      /* lockd->nfsd callbacks */
-       retval = nfsd_idmap_init();
-       if (retval)
-               goto out_free_lockd;
        retval = create_proc_exports_entry();
        if (retval)
-               goto out_free_idmap;
+               goto out_free_lockd;
        retval = register_filesystem(&nfsd_fs_type);
        if (retval)
                goto out_free_all;
@@ -1171,12 +1208,8 @@ static int __init init_nfsd(void)
 out_free_all:
        remove_proc_entry("fs/nfs/exports", NULL);
        remove_proc_entry("fs/nfs", NULL);
-out_free_idmap:
-       nfsd_idmap_shutdown();
 out_free_lockd:
        nfsd_lockd_shutdown();
-       nfsd_export_shutdown();
-out_free_cache:
        nfsd_reply_cache_shutdown();
 out_free_stat:
        nfsd_stat_shutdown();
@@ -1192,13 +1225,11 @@ out_unregister_notifier:
 
 static void __exit exit_nfsd(void)
 {
-       nfsd_export_shutdown();
        nfsd_reply_cache_shutdown();
        remove_proc_entry("fs/nfs/exports", NULL);
        remove_proc_entry("fs/nfs", NULL);
        nfsd_stat_shutdown();
        nfsd_lockd_shutdown();
-       nfsd_idmap_shutdown();
        nfsd4_free_slabs();
        nfsd_fault_inject_cleanup();
        unregister_filesystem(&nfsd_fs_type);
index 68454e75fce967b95bbb01158def83733de60976..cc793005a87cb4b5a79b7074c4861ca651085d1c 100644 (file)
@@ -636,7 +636,7 @@ fh_put(struct svc_fh *fhp)
 #endif
        }
        if (exp) {
-               cache_put(&exp->h, &svc_export_cache);
+               exp_put(exp);
                fhp->fh_export = NULL;
        }
        return;
index 28dfad39f0c50a626384c4363955e2b9d7e3212f..ee709fc8f58bc0b62a3f7ca64104630fe803b6d0 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/module.h>
 #include <linux/fs_struct.h>
 #include <linux/swap.h>
+#include <linux/nsproxy.h>
 
 #include <linux/sunrpc/stats.h>
 #include <linux/sunrpc/svcsock.h>
@@ -220,7 +221,7 @@ static int nfsd_startup(unsigned short port, int nrservs)
        ret = nfsd_init_socks(port);
        if (ret)
                goto out_racache;
-       ret = lockd_up();
+       ret = lockd_up(&init_net);
        if (ret)
                goto out_racache;
        ret = nfs4_state_start();
@@ -229,7 +230,7 @@ static int nfsd_startup(unsigned short port, int nrservs)
        nfsd_up = true;
        return 0;
 out_lockd:
-       lockd_down();
+       lockd_down(&init_net);
 out_racache:
        nfsd_racache_shutdown();
        return ret;
@@ -246,7 +247,7 @@ static void nfsd_shutdown(void)
        if (!nfsd_up)
                return;
        nfs4_state_shutdown();
-       lockd_down();
+       lockd_down(&init_net);
        nfsd_racache_shutdown();
        nfsd_up = false;
 }
@@ -261,7 +262,7 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
 
        printk(KERN_WARNING "nfsd: last server has exited, flushing export "
                            "cache\n");
-       nfsd_export_flush();
+       nfsd_export_flush(net);
 }
 
 void nfsd_reset_versions(void)
@@ -330,6 +331,8 @@ static int nfsd_get_default_max_blksize(void)
 
 int nfsd_create_serv(void)
 {
+       int error;
+
        WARN_ON(!mutex_is_locked(&nfsd_mutex));
        if (nfsd_serv) {
                svc_get(nfsd_serv);
@@ -343,6 +346,12 @@ int nfsd_create_serv(void)
        if (nfsd_serv == NULL)
                return -ENOMEM;
 
+       error = svc_bind(nfsd_serv, current->nsproxy->net_ns);
+       if (error < 0) {
+               svc_destroy(nfsd_serv);
+               return error;
+       }
+
        set_max_drc();
        do_gettimeofday(&nfssvc_boot);          /* record boot time */
        return 0;
@@ -373,6 +382,7 @@ int nfsd_set_nrthreads(int n, int *nthreads)
        int i = 0;
        int tot = 0;
        int err = 0;
+       struct net *net = &init_net;
 
        WARN_ON(!mutex_is_locked(&nfsd_mutex));
 
@@ -417,6 +427,9 @@ int nfsd_set_nrthreads(int n, int *nthreads)
                if (err)
                        break;
        }
+
+       if (nfsd_serv->sv_nrthreads == 1)
+               svc_shutdown_net(nfsd_serv, net);
        svc_destroy(nfsd_serv);
 
        return err;
@@ -432,6 +445,7 @@ nfsd_svc(unsigned short port, int nrservs)
 {
        int     error;
        bool    nfsd_up_before;
+       struct net *net = &init_net;
 
        mutex_lock(&nfsd_mutex);
        dprintk("nfsd: creating service\n");
@@ -464,6 +478,8 @@ out_shutdown:
        if (error < 0 && !nfsd_up_before)
                nfsd_shutdown();
 out_destroy:
+       if (nfsd_serv->sv_nrthreads == 1)
+               svc_shutdown_net(nfsd_serv, net);
        svc_destroy(nfsd_serv);         /* Release server */
 out:
        mutex_unlock(&nfsd_mutex);
@@ -547,6 +563,9 @@ nfsd(void *vrqstp)
        nfsdstats.th_cnt --;
 
 out:
+       if (rqstp->rq_server->sv_nrthreads == 1)
+               svc_shutdown_net(rqstp->rq_server, &init_net);
+
        /* Release the thread */
        svc_exit_thread(rqstp);
 
@@ -659,8 +678,12 @@ int nfsd_pool_stats_open(struct inode *inode, struct file *file)
 int nfsd_pool_stats_release(struct inode *inode, struct file *file)
 {
        int ret = seq_release(inode, file);
+       struct net *net = &init_net;
+
        mutex_lock(&nfsd_mutex);
        /* this function really, really should have been called svc_put() */
+       if (nfsd_serv->sv_nrthreads == 1)
+               svc_shutdown_net(nfsd_serv, net);
        svc_destroy(nfsd_serv);
        mutex_unlock(&nfsd_mutex);
        return ret;
index 89ab137d379a3f6756b5b5616083862e8f22d88f..849091e16ea6afd43e4ddd2dbd17962fdd87ad85 100644 (file)
@@ -232,7 +232,6 @@ struct nfs4_client {
        time_t                  cl_time;        /* time of last lease renewal */
        struct sockaddr_storage cl_addr;        /* client ipaddress */
        u32                     cl_flavor;      /* setclientid pseudoflavor */
-       char                    *cl_principal;  /* setclientid principal name */
        struct svc_cred         cl_cred;        /* setclientid principal */
        clientid_t              cl_clientid;    /* generated by server */
        nfs4_verifier           cl_confirm;     /* generated by server */
index 568666156ea4f59525d67207551ee8c45a3b730e..c8bd9c3be7f747410622fd1172b2c7243886f838 100644 (file)
@@ -2039,7 +2039,7 @@ nfsd_readdir(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t *offsetp,
        if (err)
                goto out;
 
-       offset = vfs_llseek(file, offset, 0);
+       offset = vfs_llseek(file, offset, SEEK_SET);
        if (offset < 0) {
                err = nfserrno((int)offset);
                goto out_close;
index 1b3501598ab5dbb4609ba19e4f7c3322b29f70ba..acd127d4ee821660e71fe1e38ef1c804962f6508 100644 (file)
@@ -60,7 +60,7 @@ struct nfsd4_compound_state {
        __be32                  *datap;
        size_t                  iovlen;
        u32                     minorversion;
-       u32                     status;
+       __be32                  status;
        stateid_t       current_stateid;
        stateid_t       save_stateid;
        /* to indicate current and saved state id presents */
@@ -364,7 +364,7 @@ struct nfsd4_test_stateid_id {
 };
 
 struct nfsd4_test_stateid {
-       __be32          ts_num_ids;
+       u32             ts_num_ids;
        struct list_head ts_stateid_list;
 };
 
@@ -549,7 +549,7 @@ int nfs4svc_decode_compoundargs(struct svc_rqst *, __be32 *,
                struct nfsd4_compoundargs *);
 int nfs4svc_encode_compoundres(struct svc_rqst *, __be32 *,
                struct nfsd4_compoundres *);
-int nfsd4_check_resp_size(struct nfsd4_compoundres *, u32);
+__be32 nfsd4_check_resp_size(struct nfsd4_compoundres *, u32);
 void nfsd4_encode_operation(struct nfsd4_compoundres *, struct nfsd4_op *);
 void nfsd4_encode_replay(struct nfsd4_compoundres *resp, struct nfsd4_op *op);
 __be32 nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
index 26601529dc17c7ff9f5bc7d4c249a59371cd14c4..62cebc8e1a1fd49ceec5684de547bd8115eedac2 100644 (file)
@@ -37,6 +37,7 @@ int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
         * This function should be implemented when the writeback function
         * will be implemented.
         */
+       struct the_nilfs *nilfs;
        struct inode *inode = file->f_mapping->host;
        int err;
 
@@ -45,18 +46,21 @@ int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
                return err;
        mutex_lock(&inode->i_mutex);
 
-       if (!nilfs_inode_dirty(inode)) {
-               mutex_unlock(&inode->i_mutex);
-               return 0;
+       if (nilfs_inode_dirty(inode)) {
+               if (datasync)
+                       err = nilfs_construct_dsync_segment(inode->i_sb, inode,
+                                                           0, LLONG_MAX);
+               else
+                       err = nilfs_construct_segment(inode->i_sb);
        }
-
-       if (datasync)
-               err = nilfs_construct_dsync_segment(inode->i_sb, inode, 0,
-                                                   LLONG_MAX);
-       else
-               err = nilfs_construct_segment(inode->i_sb);
-
        mutex_unlock(&inode->i_mutex);
+
+       nilfs = inode->i_sb->s_fs_info;
+       if (!err && nilfs_test_opt(nilfs, BARRIER)) {
+               err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+               if (err != -EIO)
+                       err = 0;
+       }
        return err;
 }
 
index 8f7b95ac1f7e438ffe8de2bab81b3b8c40d1eeb9..7cc64465ec2699960f045f37cd29bab8931c76ff 100644 (file)
@@ -734,7 +734,7 @@ void nilfs_evict_inode(struct inode *inode)
        if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
                if (inode->i_data.nrpages)
                        truncate_inode_pages(&inode->i_data, 0);
-               end_writeback(inode);
+               clear_inode(inode);
                nilfs_clear_inode(inode);
                return;
        }
@@ -746,7 +746,7 @@ void nilfs_evict_inode(struct inode *inode)
        /* TODO: some of the following operations may fail.  */
        nilfs_truncate_bmap(ii, 0);
        nilfs_mark_inode_dirty(inode);
-       end_writeback(inode);
+       clear_inode(inode);
 
        ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
        if (!ret)
index 2a70fce70c65be1151783e3aba3c221e39642ba7..06658caa18bd229ab42e01b876538efaf2c48882 100644 (file)
@@ -692,8 +692,14 @@ static int nilfs_ioctl_sync(struct inode *inode, struct file *filp,
        if (ret < 0)
                return ret;
 
+       nilfs = inode->i_sb->s_fs_info;
+       if (nilfs_test_opt(nilfs, BARRIER)) {
+               ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+               if (ret == -EIO)
+                       return ret;
+       }
+
        if (argp != NULL) {
-               nilfs = inode->i_sb->s_fs_info;
                down_read(&nilfs->ns_segctor_sem);
                cno = nilfs->ns_cno - 1;
                up_read(&nilfs->ns_segctor_sem);
index 0bb2c2010b9512ba5fd971fdbdc34eba2abab886..b72847988b78d96d99b7571d17fea769e463c6b0 100644 (file)
@@ -508,31 +508,29 @@ static struct dentry *nilfs_fh_to_parent(struct super_block *sb, struct fid *fh,
        return nilfs_get_dentry(sb, fid->cno, fid->parent_ino, fid->parent_gen);
 }
 
-static int nilfs_encode_fh(struct dentry *dentry, __u32 *fh, int *lenp,
-                          int connectable)
+static int nilfs_encode_fh(struct inode *inode, __u32 *fh, int *lenp,
+                          struct inode *parent)
 {
        struct nilfs_fid *fid = (struct nilfs_fid *)fh;
-       struct inode *inode = dentry->d_inode;
        struct nilfs_root *root = NILFS_I(inode)->i_root;
        int type;
 
-       if (*lenp < NILFS_FID_SIZE_NON_CONNECTABLE ||
-           (connectable && *lenp < NILFS_FID_SIZE_CONNECTABLE))
+       if (parent && *lenp < NILFS_FID_SIZE_CONNECTABLE) {
+               *lenp = NILFS_FID_SIZE_CONNECTABLE;
+               return 255;
+       }
+       if (*lenp < NILFS_FID_SIZE_NON_CONNECTABLE) {
+               *lenp = NILFS_FID_SIZE_NON_CONNECTABLE;
                return 255;
+       }
 
        fid->cno = root->cno;
        fid->ino = inode->i_ino;
        fid->gen = inode->i_generation;
 
-       if (connectable && !S_ISDIR(inode->i_mode)) {
-               struct inode *parent;
-
-               spin_lock(&dentry->d_lock);
-               parent = dentry->d_parent->d_inode;
+       if (parent) {
                fid->parent_ino = parent->i_ino;
                fid->parent_gen = parent->i_generation;
-               spin_unlock(&dentry->d_lock);
-
                type = FILEID_NILFS_WITH_PARENT;
                *lenp = NILFS_FID_SIZE_CONNECTABLE;
        } else {
index a39edc41becc29e76c67b366de2032ad3314268a..e2ce79ef48c467d02f18c0c9fd4ad409c8ca0b5e 100644 (file)
@@ -30,7 +30,7 @@ config NLS_DEFAULT
          cp949, cp950, cp1251, cp1255, euc-jp, euc-kr, gb2312, iso8859-1,
          iso8859-2, iso8859-3, iso8859-4, iso8859-5, iso8859-6, iso8859-7,
          iso8859-8, iso8859-9, iso8859-13, iso8859-14, iso8859-15,
-         koi8-r, koi8-ru, koi8-u, sjis, tis-620, utf8.
+         koi8-r, koi8-ru, koi8-u, sjis, tis-620, macroman, utf8.
          If you specify a wrong value, it will use the built-in NLS;
          compatible with iso8859-1.
 
@@ -452,6 +452,161 @@ config NLS_KOI8_U
          input/output character sets. Say Y here for the preferred Ukrainian
          (koi8-u) and Belarusian (koi8-ru) character sets.
 
+config NLS_MAC_ROMAN
+       tristate "Codepage macroman"
+       ---help---
+         The Apple HFS file system family can deal with filenames in
+         native language character sets. These character sets are stored in
+         so-called MAC codepages. You need to include the appropriate
+         codepage if you want to be able to read/write these filenames on
+         Mac partitions correctly. This does apply to the filenames
+         only, not to the file contents. You can include several codepages;
+         say Y here if you want to include the Mac codepage that is used for
+         much of Europe -- United Kingdom, Germany, Spain, Italy, and [add
+         more countries here].
+
+         If unsure, say Y.
+
+config NLS_MAC_CELTIC
+       tristate "Codepage macceltic"
+       ---help---
+         The Apple HFS file system family can deal with filenames in
+         native language character sets. These character sets are stored in
+         so-called MAC codepages. You need to include the appropriate
+         codepage if you want to be able to read/write these filenames on
+         Mac partitions correctly. This does apply to the filenames
+         only, not to the file contents. You can include several codepages;
+         say Y here if you want to include the Mac codepage that is used for
+         Celtic.
+
+         If unsure, say Y.
+
+config NLS_MAC_CENTEURO
+       tristate "Codepage maccenteuro"
+       ---help---
+         The Apple HFS file system family can deal with filenames in
+         native language character sets. These character sets are stored in
+         so-called MAC codepages. You need to include the appropriate
+         codepage if you want to be able to read/write these filenames on
+         Mac partitions correctly. This does apply to the filenames
+         only, not to the file contents. You can include several codepages;
+         say Y here if you want to include the Mac codepage that is used for
+         Central Europe.
+
+         If unsure, say Y.
+
+config NLS_MAC_CROATIAN
+       tristate "Codepage maccroatian"
+       ---help---
+         The Apple HFS file system family can deal with filenames in
+         native language character sets. These character sets are stored in
+         so-called MAC codepages. You need to include the appropriate
+         codepage if you want to be able to read/write these filenames on
+         Mac partitions correctly. This does apply to the filenames
+         only, not to the file contents. You can include several codepages;
+         say Y here if you want to include the Mac codepage that is used for
+         Croatian.
+
+         If unsure, say Y.
+
+config NLS_MAC_CYRILLIC
+       tristate "Codepage maccyrillic"
+       ---help---
+         The Apple HFS file system family can deal with filenames in
+         native language character sets. These character sets are stored in
+         so-called MAC codepages. You need to include the appropriate
+         codepage if you want to be able to read/write these filenames on
+         Mac partitions correctly. This does apply to the filenames
+         only, not to the file contents. You can include several codepages;
+         say Y here if you want to include the Mac codepage that is used for
+         Cyrillic.
+
+         If unsure, say Y.
+
+config NLS_MAC_GAELIC
+       tristate "Codepage macgaelic"
+       ---help---
+         The Apple HFS file system family can deal with filenames in
+         native language character sets. These character sets are stored in
+         so-called MAC codepages. You need to include the appropriate
+         codepage if you want to be able to read/write these filenames on
+         Mac partitions correctly. This does apply to the filenames
+         only, not to the file contents. You can include several codepages;
+         say Y here if you want to include the Mac codepage that is used for
+         Gaelic.
+
+         If unsure, say Y.
+
+config NLS_MAC_GREEK
+       tristate "Codepage macgreek"
+       ---help---
+         The Apple HFS file system family can deal with filenames in
+         native language character sets. These character sets are stored in
+         so-called MAC codepages. You need to include the appropriate
+         codepage if you want to be able to read/write these filenames on
+         Mac partitions correctly. This does apply to the filenames
+         only, not to the file contents. You can include several codepages;
+         say Y here if you want to include the Mac codepage that is used for
+         Greek.
+
+         If unsure, say Y.
+
+config NLS_MAC_ICELAND
+       tristate "Codepage maciceland"
+       ---help---
+         The Apple HFS file system family can deal with filenames in
+         native language character sets. These character sets are stored in
+         so-called MAC codepages. You need to include the appropriate
+         codepage if you want to be able to read/write these filenames on
+         Mac partitions correctly. This does apply to the filenames
+         only, not to the file contents. You can include several codepages;
+         say Y here if you want to include the Mac codepage that is used for
+         Iceland.
+
+         If unsure, say Y.
+
+config NLS_MAC_INUIT
+       tristate "Codepage macinuit"
+       ---help---
+         The Apple HFS file system family can deal with filenames in
+         native language character sets. These character sets are stored in
+         so-called MAC codepages. You need to include the appropriate
+         codepage if you want to be able to read/write these filenames on
+         Mac partitions correctly. This does apply to the filenames
+         only, not to the file contents. You can include several codepages;
+         say Y here if you want to include the Mac codepage that is used for
+         Inuit.
+
+         If unsure, say Y.
+
+config NLS_MAC_ROMANIAN
+       tristate "Codepage macromanian"
+       ---help---
+         The Apple HFS file system family can deal with filenames in
+         native language character sets. These character sets are stored in
+         so-called MAC codepages. You need to include the appropriate
+         codepage if you want to be able to read/write these filenames on
+         Mac partitions correctly. This does apply to the filenames
+         only, not to the file contents. You can include several codepages;
+         say Y here if you want to include the Mac codepage that is used for
+         Romanian.
+
+         If unsure, say Y.
+
+config NLS_MAC_TURKISH
+       tristate "Codepage macturkish"
+       ---help---
+         The Apple HFS file system family can deal with filenames in
+         native language character sets. These character sets are stored in
+         so-called MAC codepages. You need to include the appropriate
+         codepage if you want to be able to read/write these filenames on
+         Mac partitions correctly. This does apply to the filenames
+         only, not to the file contents. You can include several codepages;
+         say Y here if you want to include the Mac codepage that is used for
+         Turkish.
+
+         If unsure, say Y.
+
 config NLS_UTF8
        tristate "NLS UTF-8"
        help
index f499dd7c3905bfcee723ab86bf231b13dc27300a..8ae37c1b524995d49a8b0a0ba38362ebfd749f47 100644 (file)
@@ -42,3 +42,14 @@ obj-$(CONFIG_NLS_ISO8859_15) += nls_iso8859-15.o
 obj-$(CONFIG_NLS_KOI8_R)       += nls_koi8-r.o
 obj-$(CONFIG_NLS_KOI8_U)       += nls_koi8-u.o nls_koi8-ru.o
 obj-$(CONFIG_NLS_UTF8)         += nls_utf8.o
+obj-$(CONFIG_NLS_MAC_CELTIC)    += mac-celtic.o
+obj-$(CONFIG_NLS_MAC_CENTEURO)  += mac-centeuro.o
+obj-$(CONFIG_NLS_MAC_CROATIAN)  += mac-croatian.o
+obj-$(CONFIG_NLS_MAC_CYRILLIC)  += mac-cyrillic.o
+obj-$(CONFIG_NLS_MAC_GAELIC)    += mac-gaelic.o
+obj-$(CONFIG_NLS_MAC_GREEK)     += mac-greek.o
+obj-$(CONFIG_NLS_MAC_ICELAND)   += mac-iceland.o
+obj-$(CONFIG_NLS_MAC_INUIT)     += mac-inuit.o
+obj-$(CONFIG_NLS_MAC_ROMANIAN)  += mac-romanian.o
+obj-$(CONFIG_NLS_MAC_ROMAN)     += mac-roman.o
+obj-$(CONFIG_NLS_MAC_TURKISH)   += mac-turkish.o
diff --git a/fs/nls/mac-celtic.c b/fs/nls/mac-celtic.c
new file mode 100644 (file)
index 0000000..634a8b7
--- /dev/null
@@ -0,0 +1,602 @@
+/*
+ * linux/fs/nls/mac-celtic.c
+ *
+ * Charset macceltic translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+       /* 0x00 */
+       0x0000, 0x0001, 0x0002, 0x0003,
+       0x0004, 0x0005, 0x0006, 0x0007,
+       0x0008, 0x0009, 0x000a, 0x000b,
+       0x000c, 0x000d, 0x000e, 0x000f,
+       /* 0x10 */
+       0x0010, 0x0011, 0x0012, 0x0013,
+       0x0014, 0x0015, 0x0016, 0x0017,
+       0x0018, 0x0019, 0x001a, 0x001b,
+       0x001c, 0x001d, 0x001e, 0x001f,
+       /* 0x20 */
+       0x0020, 0x0021, 0x0022, 0x0023,
+       0x0024, 0x0025, 0x0026, 0x0027,
+       0x0028, 0x0029, 0x002a, 0x002b,
+       0x002c, 0x002d, 0x002e, 0x002f,
+       /* 0x30 */
+       0x0030, 0x0031, 0x0032, 0x0033,
+       0x0034, 0x0035, 0x0036, 0x0037,
+       0x0038, 0x0039, 0x003a, 0x003b,
+       0x003c, 0x003d, 0x003e, 0x003f,
+       /* 0x40 */
+       0x0040, 0x0041, 0x0042, 0x0043,
+       0x0044, 0x0045, 0x0046, 0x0047,
+       0x0048, 0x0049, 0x004a, 0x004b,
+       0x004c, 0x004d, 0x004e, 0x004f,
+       /* 0x50 */
+       0x0050, 0x0051, 0x0052, 0x0053,
+       0x0054, 0x0055, 0x0056, 0x0057,
+       0x0058, 0x0059, 0x005a, 0x005b,
+       0x005c, 0x005d, 0x005e, 0x005f,
+       /* 0x60 */
+       0x0060, 0x0061, 0x0062, 0x0063,
+       0x0064, 0x0065, 0x0066, 0x0067,
+       0x0068, 0x0069, 0x006a, 0x006b,
+       0x006c, 0x006d, 0x006e, 0x006f,
+       /* 0x70 */
+       0x0070, 0x0071, 0x0072, 0x0073,
+       0x0074, 0x0075, 0x0076, 0x0077,
+       0x0078, 0x0079, 0x007a, 0x007b,
+       0x007c, 0x007d, 0x007e, 0x007f,
+       /* 0x80 */
+       0x00c4, 0x00c5, 0x00c7, 0x00c9,
+       0x00d1, 0x00d6, 0x00dc, 0x00e1,
+       0x00e0, 0x00e2, 0x00e4, 0x00e3,
+       0x00e5, 0x00e7, 0x00e9, 0x00e8,
+       /* 0x90 */
+       0x00ea, 0x00eb, 0x00ed, 0x00ec,
+       0x00ee, 0x00ef, 0x00f1, 0x00f3,
+       0x00f2, 0x00f4, 0x00f6, 0x00f5,
+       0x00fa, 0x00f9, 0x00fb, 0x00fc,
+       /* 0xa0 */
+       0x2020, 0x00b0, 0x00a2, 0x00a3,
+       0x00a7, 0x2022, 0x00b6, 0x00df,
+       0x00ae, 0x00a9, 0x2122, 0x00b4,
+       0x00a8, 0x2260, 0x00c6, 0x00d8,
+       /* 0xb0 */
+       0x221e, 0x00b1, 0x2264, 0x2265,
+       0x00a5, 0x00b5, 0x2202, 0x2211,
+       0x220f, 0x03c0, 0x222b, 0x00aa,
+       0x00ba, 0x03a9, 0x00e6, 0x00f8,
+       /* 0xc0 */
+       0x00bf, 0x00a1, 0x00ac, 0x221a,
+       0x0192, 0x2248, 0x2206, 0x00ab,
+       0x00bb, 0x2026, 0x00a0, 0x00c0,
+       0x00c3, 0x00d5, 0x0152, 0x0153,
+       /* 0xd0 */
+       0x2013, 0x2014, 0x201c, 0x201d,
+       0x2018, 0x2019, 0x00f7, 0x25ca,
+       0x00ff, 0x0178, 0x2044, 0x20ac,
+       0x2039, 0x203a, 0x0176, 0x0177,
+       /* 0xe0 */
+       0x2021, 0x00b7, 0x1ef2, 0x1ef3,
+       0x2030, 0x00c2, 0x00ca, 0x00c1,
+       0x00cb, 0x00c8, 0x00cd, 0x00ce,
+       0x00cf, 0x00cc, 0x00d3, 0x00d4,
+       /* 0xf0 */
+       0x2663, 0x00d2, 0x00da, 0x00db,
+       0x00d9, 0x0131, 0x00dd, 0x00fd,
+       0x0174, 0x0175, 0x1e84, 0x1e85,
+       0x1e80, 0x1e81, 0x1e82, 0x1e83,
+};
+
+static const unsigned char page00[256] = {
+       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+       0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+       0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+       0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+       0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+       0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+       0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+       0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+       0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+       0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+       0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+       0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+       0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+       0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+       0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0xca, 0xc1, 0xa2, 0xa3, 0x00, 0xb4, 0x00, 0xa4, /* 0xa0-0xa7 */
+       0xac, 0xa9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0x00, /* 0xa8-0xaf */
+       0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
+       0x00, 0x00, 0xbc, 0xc8, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
+       0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xae, 0x82, /* 0xc0-0xc7 */
+       0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
+       0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+       0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0xf6, 0x00, 0xa7, /* 0xd8-0xdf */
+       0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xbe, 0x8d, /* 0xe0-0xe7 */
+       0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
+       0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
+       0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0xf7, 0x00, 0xd8, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0xf8, 0xf9, 0xde, 0xdf, /* 0x70-0x77 */
+       0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page03[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page1e[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0xfc, 0xfd, 0xfe, 0xff, 0xfa, 0xfb, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0xe2, 0xe3, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0xd4, 0xd5, 0x00, 0x00, 0xd2, 0xd3, 0x00, 0x00, /* 0x18-0x1f */
+       0xa0, 0xe0, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+       0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
+       0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page25[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page26[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+       page00, page01, NULL,   page03, NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   page1e, NULL,
+       page20, page21, page22, NULL,   NULL,   page25, page26, NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x00-0x07 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x08-0x0f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x10-0x17 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x18-0x1f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x20-0x27 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x28-0x2f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x30-0x37 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x38-0x3f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x40-0x47 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x48-0x4f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x50-0x57 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x58-0x5f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x60-0x67 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x68-0x6f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x70-0x77 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x78-0x7f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x80-0x87 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x88-0x8f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x90-0x97 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x98-0x9f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa0-0xa7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa8-0xaf */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb0-0xb7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb8-0xbf */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc0-0xc7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc8-0xcf */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd0-0xd7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd8-0xdf */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe0-0xe7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe8-0xef */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0-0xf7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+       const unsigned char *uni2charset;
+       unsigned char cl = uni & 0x00ff;
+       unsigned char ch = (uni & 0xff00) >> 8;
+
+       if (boundlen <= 0)
+               return -ENAMETOOLONG;
+
+       uni2charset = page_uni2charset[ch];
+       if (uni2charset && uni2charset[cl])
+               out[0] = uni2charset[cl];
+       else
+               return -EINVAL;
+       return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+       *uni = charset2uni[*rawstring];
+       if (*uni == 0x0000)
+               return -EINVAL;
+       return 1;
+}
+
+static struct nls_table table = {
+       .charset        = "macceltic",
+       .uni2char       = uni2char,
+       .char2uni       = char2uni,
+       .charset2lower  = charset2lower,
+       .charset2upper  = charset2upper,
+       .owner          = THIS_MODULE,
+};
+
+static int __init init_nls_macceltic(void)
+{
+       return register_nls(&table);
+}
+
+static void __exit exit_nls_macceltic(void)
+{
+       unregister_nls(&table);
+}
+
+module_init(init_nls_macceltic)
+module_exit(exit_nls_macceltic)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-centeuro.c b/fs/nls/mac-centeuro.c
new file mode 100644 (file)
index 0000000..979e626
--- /dev/null
@@ -0,0 +1,532 @@
+/*
+ * linux/fs/nls/mac-centeuro.c
+ *
+ * Charset maccenteuro translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+       /* 0x00 */
+       0x0000, 0x0001, 0x0002, 0x0003,
+       0x0004, 0x0005, 0x0006, 0x0007,
+       0x0008, 0x0009, 0x000a, 0x000b,
+       0x000c, 0x000d, 0x000e, 0x000f,
+       /* 0x10 */
+       0x0010, 0x0011, 0x0012, 0x0013,
+       0x0014, 0x0015, 0x0016, 0x0017,
+       0x0018, 0x0019, 0x001a, 0x001b,
+       0x001c, 0x001d, 0x001e, 0x001f,
+       /* 0x20 */
+       0x0020, 0x0021, 0x0022, 0x0023,
+       0x0024, 0x0025, 0x0026, 0x0027,
+       0x0028, 0x0029, 0x002a, 0x002b,
+       0x002c, 0x002d, 0x002e, 0x002f,
+       /* 0x30 */
+       0x0030, 0x0031, 0x0032, 0x0033,
+       0x0034, 0x0035, 0x0036, 0x0037,
+       0x0038, 0x0039, 0x003a, 0x003b,
+       0x003c, 0x003d, 0x003e, 0x003f,
+       /* 0x40 */
+       0x0040, 0x0041, 0x0042, 0x0043,
+       0x0044, 0x0045, 0x0046, 0x0047,
+       0x0048, 0x0049, 0x004a, 0x004b,
+       0x004c, 0x004d, 0x004e, 0x004f,
+       /* 0x50 */
+       0x0050, 0x0051, 0x0052, 0x0053,
+       0x0054, 0x0055, 0x0056, 0x0057,
+       0x0058, 0x0059, 0x005a, 0x005b,
+       0x005c, 0x005d, 0x005e, 0x005f,
+       /* 0x60 */
+       0x0060, 0x0061, 0x0062, 0x0063,
+       0x0064, 0x0065, 0x0066, 0x0067,
+       0x0068, 0x0069, 0x006a, 0x006b,
+       0x006c, 0x006d, 0x006e, 0x006f,
+       /* 0x70 */
+       0x0070, 0x0071, 0x0072, 0x0073,
+       0x0074, 0x0075, 0x0076, 0x0077,
+       0x0078, 0x0079, 0x007a, 0x007b,
+       0x007c, 0x007d, 0x007e, 0x007f,
+       /* 0x80 */
+       0x00c4, 0x0100, 0x0101, 0x00c9,
+       0x0104, 0x00d6, 0x00dc, 0x00e1,
+       0x0105, 0x010c, 0x00e4, 0x010d,
+       0x0106, 0x0107, 0x00e9, 0x0179,
+       /* 0x90 */
+       0x017a, 0x010e, 0x00ed, 0x010f,
+       0x0112, 0x0113, 0x0116, 0x00f3,
+       0x0117, 0x00f4, 0x00f6, 0x00f5,
+       0x00fa, 0x011a, 0x011b, 0x00fc,
+       /* 0xa0 */
+       0x2020, 0x00b0, 0x0118, 0x00a3,
+       0x00a7, 0x2022, 0x00b6, 0x00df,
+       0x00ae, 0x00a9, 0x2122, 0x0119,
+       0x00a8, 0x2260, 0x0123, 0x012e,
+       /* 0xb0 */
+       0x012f, 0x012a, 0x2264, 0x2265,
+       0x012b, 0x0136, 0x2202, 0x2211,
+       0x0142, 0x013b, 0x013c, 0x013d,
+       0x013e, 0x0139, 0x013a, 0x0145,
+       /* 0xc0 */
+       0x0146, 0x0143, 0x00ac, 0x221a,
+       0x0144, 0x0147, 0x2206, 0x00ab,
+       0x00bb, 0x2026, 0x00a0, 0x0148,
+       0x0150, 0x00d5, 0x0151, 0x014c,
+       /* 0xd0 */
+       0x2013, 0x2014, 0x201c, 0x201d,
+       0x2018, 0x2019, 0x00f7, 0x25ca,
+       0x014d, 0x0154, 0x0155, 0x0158,
+       0x2039, 0x203a, 0x0159, 0x0156,
+       /* 0xe0 */
+       0x0157, 0x0160, 0x201a, 0x201e,
+       0x0161, 0x015a, 0x015b, 0x00c1,
+       0x0164, 0x0165, 0x00cd, 0x017d,
+       0x017e, 0x016a, 0x00d3, 0x00d4,
+       /* 0xf0 */
+       0x016b, 0x016e, 0x00da, 0x016f,
+       0x0170, 0x0171, 0x0172, 0x0173,
+       0x00dd, 0x00fd, 0x0137, 0x017b,
+       0x0141, 0x017c, 0x0122, 0x02c7,
+};
+
+static const unsigned char page00[256] = {
+       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+       0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+       0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+       0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+       0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+       0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+       0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+       0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+       0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+       0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+       0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+       0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+       0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+       0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+       0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0xca, 0x00, 0x00, 0xa3, 0x00, 0x00, 0x00, 0xa4, /* 0xa0-0xa7 */
+       0xac, 0xa9, 0x00, 0xc7, 0xc2, 0x00, 0xa8, 0x00, /* 0xa8-0xaf */
+       0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa6, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0xc8, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0xe7, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x83, 0x00, 0x00, 0x00, 0xea, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0xf2, 0x00, 0x86, 0xf8, 0x00, 0xa7, /* 0xd8-0xdf */
+       0x00, 0x87, 0x00, 0x00, 0x8a, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x8e, 0x00, 0x00, 0x00, 0x92, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x9c, 0x00, 0x9f, 0xf9, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+       0x81, 0x82, 0x00, 0x00, 0x84, 0x88, 0x8c, 0x8d, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x89, 0x8b, 0x91, 0x93, /* 0x08-0x0f */
+       0x00, 0x00, 0x94, 0x95, 0x00, 0x00, 0x96, 0x98, /* 0x10-0x17 */
+       0xa2, 0xab, 0x9d, 0x9e, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0xfe, 0xae, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0xb1, 0xb4, 0x00, 0x00, 0xaf, 0xb0, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb5, 0xfa, /* 0x30-0x37 */
+       0x00, 0xbd, 0xbe, 0xb9, 0xba, 0xbb, 0xbc, 0x00, /* 0x38-0x3f */
+       0x00, 0xfc, 0xb8, 0xc1, 0xc4, 0xbf, 0xc0, 0xc5, /* 0x40-0x47 */
+       0xcb, 0x00, 0x00, 0x00, 0xcf, 0xd8, 0x00, 0x00, /* 0x48-0x4f */
+       0xcc, 0xce, 0x00, 0x00, 0xd9, 0xda, 0xdf, 0xe0, /* 0x50-0x57 */
+       0xdb, 0xde, 0xe5, 0xe6, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0xe1, 0xe4, 0x00, 0x00, 0xe8, 0xe9, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0xed, 0xf0, 0x00, 0x00, 0xf1, 0xf3, /* 0x68-0x6f */
+       0xf4, 0xf5, 0xf6, 0xf7, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x8f, 0x90, 0xfb, 0xfd, 0xeb, 0xec, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page02[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
+       0xa0, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+       0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page25[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+       page00, page01, page02, NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       page20, page21, page22, NULL,   NULL,   page25, NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+       const unsigned char *uni2charset;
+       unsigned char cl = uni & 0x00ff;
+       unsigned char ch = (uni & 0xff00) >> 8;
+
+       if (boundlen <= 0)
+               return -ENAMETOOLONG;
+
+       uni2charset = page_uni2charset[ch];
+       if (uni2charset && uni2charset[cl])
+               out[0] = uni2charset[cl];
+       else
+               return -EINVAL;
+       return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+       *uni = charset2uni[*rawstring];
+       if (*uni == 0x0000)
+               return -EINVAL;
+       return 1;
+}
+
+static struct nls_table table = {
+       .charset        = "maccenteuro",
+       .uni2char       = uni2char,
+       .char2uni       = char2uni,
+       .charset2lower  = charset2lower,
+       .charset2upper  = charset2upper,
+       .owner          = THIS_MODULE,
+};
+
+static int __init init_nls_maccenteuro(void)
+{
+       return register_nls(&table);
+}
+
+static void __exit exit_nls_maccenteuro(void)
+{
+       unregister_nls(&table);
+}
+
+module_init(init_nls_maccenteuro)
+module_exit(exit_nls_maccenteuro)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-croatian.c b/fs/nls/mac-croatian.c
new file mode 100644 (file)
index 0000000..dd3f675
--- /dev/null
@@ -0,0 +1,602 @@
+/*
+ * linux/fs/nls/mac-croatian.c
+ *
+ * Charset maccroatian translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+       /* 0x00 */
+       0x0000, 0x0001, 0x0002, 0x0003,
+       0x0004, 0x0005, 0x0006, 0x0007,
+       0x0008, 0x0009, 0x000a, 0x000b,
+       0x000c, 0x000d, 0x000e, 0x000f,
+       /* 0x10 */
+       0x0010, 0x0011, 0x0012, 0x0013,
+       0x0014, 0x0015, 0x0016, 0x0017,
+       0x0018, 0x0019, 0x001a, 0x001b,
+       0x001c, 0x001d, 0x001e, 0x001f,
+       /* 0x20 */
+       0x0020, 0x0021, 0x0022, 0x0023,
+       0x0024, 0x0025, 0x0026, 0x0027,
+       0x0028, 0x0029, 0x002a, 0x002b,
+       0x002c, 0x002d, 0x002e, 0x002f,
+       /* 0x30 */
+       0x0030, 0x0031, 0x0032, 0x0033,
+       0x0034, 0x0035, 0x0036, 0x0037,
+       0x0038, 0x0039, 0x003a, 0x003b,
+       0x003c, 0x003d, 0x003e, 0x003f,
+       /* 0x40 */
+       0x0040, 0x0041, 0x0042, 0x0043,
+       0x0044, 0x0045, 0x0046, 0x0047,
+       0x0048, 0x0049, 0x004a, 0x004b,
+       0x004c, 0x004d, 0x004e, 0x004f,
+       /* 0x50 */
+       0x0050, 0x0051, 0x0052, 0x0053,
+       0x0054, 0x0055, 0x0056, 0x0057,
+       0x0058, 0x0059, 0x005a, 0x005b,
+       0x005c, 0x005d, 0x005e, 0x005f,
+       /* 0x60 */
+       0x0060, 0x0061, 0x0062, 0x0063,
+       0x0064, 0x0065, 0x0066, 0x0067,
+       0x0068, 0x0069, 0x006a, 0x006b,
+       0x006c, 0x006d, 0x006e, 0x006f,
+       /* 0x70 */
+       0x0070, 0x0071, 0x0072, 0x0073,
+       0x0074, 0x0075, 0x0076, 0x0077,
+       0x0078, 0x0079, 0x007a, 0x007b,
+       0x007c, 0x007d, 0x007e, 0x007f,
+       /* 0x80 */
+       0x00c4, 0x00c5, 0x00c7, 0x00c9,
+       0x00d1, 0x00d6, 0x00dc, 0x00e1,
+       0x00e0, 0x00e2, 0x00e4, 0x00e3,
+       0x00e5, 0x00e7, 0x00e9, 0x00e8,
+       /* 0x90 */
+       0x00ea, 0x00eb, 0x00ed, 0x00ec,
+       0x00ee, 0x00ef, 0x00f1, 0x00f3,
+       0x00f2, 0x00f4, 0x00f6, 0x00f5,
+       0x00fa, 0x00f9, 0x00fb, 0x00fc,
+       /* 0xa0 */
+       0x2020, 0x00b0, 0x00a2, 0x00a3,
+       0x00a7, 0x2022, 0x00b6, 0x00df,
+       0x00ae, 0x0160, 0x2122, 0x00b4,
+       0x00a8, 0x2260, 0x017d, 0x00d8,
+       /* 0xb0 */
+       0x221e, 0x00b1, 0x2264, 0x2265,
+       0x2206, 0x00b5, 0x2202, 0x2211,
+       0x220f, 0x0161, 0x222b, 0x00aa,
+       0x00ba, 0x03a9, 0x017e, 0x00f8,
+       /* 0xc0 */
+       0x00bf, 0x00a1, 0x00ac, 0x221a,
+       0x0192, 0x2248, 0x0106, 0x00ab,
+       0x010c, 0x2026, 0x00a0, 0x00c0,
+       0x00c3, 0x00d5, 0x0152, 0x0153,
+       /* 0xd0 */
+       0x0110, 0x2014, 0x201c, 0x201d,
+       0x2018, 0x2019, 0x00f7, 0x25ca,
+       0xf8ff, 0x00a9, 0x2044, 0x20ac,
+       0x2039, 0x203a, 0x00c6, 0x00bb,
+       /* 0xe0 */
+       0x2013, 0x00b7, 0x201a, 0x201e,
+       0x2030, 0x00c2, 0x0107, 0x00c1,
+       0x010d, 0x00c8, 0x00cd, 0x00ce,
+       0x00cf, 0x00cc, 0x00d3, 0x00d4,
+       /* 0xf0 */
+       0x0111, 0x00d2, 0x00da, 0x00db,
+       0x00d9, 0x0131, 0x02c6, 0x02dc,
+       0x00af, 0x03c0, 0x00cb, 0x02da,
+       0x00b8, 0x00ca, 0x00e6, 0x02c7,
+};
+
+static const unsigned char page00[256] = {
+       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+       0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+       0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+       0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+       0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+       0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+       0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+       0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+       0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+       0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+       0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+       0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+       0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+       0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+       0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0xca, 0xc1, 0xa2, 0xa3, 0x00, 0x00, 0x00, 0xa4, /* 0xa0-0xa7 */
+       0xac, 0xd9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0xf8, /* 0xa8-0xaf */
+       0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
+       0xfc, 0x00, 0xbc, 0xdf, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
+       0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xde, 0x82, /* 0xc0-0xc7 */
+       0xe9, 0x83, 0xfd, 0xfa, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
+       0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+       0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0x00, 0x00, 0xa7, /* 0xd8-0xdf */
+       0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xfe, 0x8d, /* 0xe0-0xe7 */
+       0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
+       0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
+       0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xe6, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0xc8, 0xe8, 0x00, 0x00, /* 0x08-0x0f */
+       0xd0, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0xa9, 0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0xae, 0xbe, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page02[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xff, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0xfb, 0x00, 0xf7, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page03[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0xf9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0xe0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
+       0xa0, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+       0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xb4, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
+       0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page25[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char pagef8[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+       page00, page01, page02, page03, NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       page20, page21, page22, NULL,   NULL,   page25, NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       pagef8, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+       const unsigned char *uni2charset;
+       unsigned char cl = uni & 0x00ff;
+       unsigned char ch = (uni & 0xff00) >> 8;
+
+       if (boundlen <= 0)
+               return -ENAMETOOLONG;
+
+       uni2charset = page_uni2charset[ch];
+       if (uni2charset && uni2charset[cl])
+               out[0] = uni2charset[cl];
+       else
+               return -EINVAL;
+       return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+       *uni = charset2uni[*rawstring];
+       if (*uni == 0x0000)
+               return -EINVAL;
+       return 1;
+}
+
+static struct nls_table table = {
+       .charset        = "maccroatian",
+       .uni2char       = uni2char,
+       .char2uni       = char2uni,
+       .charset2lower  = charset2lower,
+       .charset2upper  = charset2upper,
+       .owner          = THIS_MODULE,
+};
+
+static int __init init_nls_maccroatian(void)
+{
+       return register_nls(&table);
+}
+
+static void __exit exit_nls_maccroatian(void)
+{
+       unregister_nls(&table);
+}
+
+module_init(init_nls_maccroatian)
+module_exit(exit_nls_maccroatian)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-cyrillic.c b/fs/nls/mac-cyrillic.c
new file mode 100644 (file)
index 0000000..1112c84
--- /dev/null
@@ -0,0 +1,497 @@
+/*
+ * linux/fs/nls/mac-cyrillic.c
+ *
+ * Charset maccyrillic translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+       /* 0x00 */
+       0x0000, 0x0001, 0x0002, 0x0003,
+       0x0004, 0x0005, 0x0006, 0x0007,
+       0x0008, 0x0009, 0x000a, 0x000b,
+       0x000c, 0x000d, 0x000e, 0x000f,
+       /* 0x10 */
+       0x0010, 0x0011, 0x0012, 0x0013,
+       0x0014, 0x0015, 0x0016, 0x0017,
+       0x0018, 0x0019, 0x001a, 0x001b,
+       0x001c, 0x001d, 0x001e, 0x001f,
+       /* 0x20 */
+       0x0020, 0x0021, 0x0022, 0x0023,
+       0x0024, 0x0025, 0x0026, 0x0027,
+       0x0028, 0x0029, 0x002a, 0x002b,
+       0x002c, 0x002d, 0x002e, 0x002f,
+       /* 0x30 */
+       0x0030, 0x0031, 0x0032, 0x0033,
+       0x0034, 0x0035, 0x0036, 0x0037,
+       0x0038, 0x0039, 0x003a, 0x003b,
+       0x003c, 0x003d, 0x003e, 0x003f,
+       /* 0x40 */
+       0x0040, 0x0041, 0x0042, 0x0043,
+       0x0044, 0x0045, 0x0046, 0x0047,
+       0x0048, 0x0049, 0x004a, 0x004b,
+       0x004c, 0x004d, 0x004e, 0x004f,
+       /* 0x50 */
+       0x0050, 0x0051, 0x0052, 0x0053,
+       0x0054, 0x0055, 0x0056, 0x0057,
+       0x0058, 0x0059, 0x005a, 0x005b,
+       0x005c, 0x005d, 0x005e, 0x005f,
+       /* 0x60 */
+       0x0060, 0x0061, 0x0062, 0x0063,
+       0x0064, 0x0065, 0x0066, 0x0067,
+       0x0068, 0x0069, 0x006a, 0x006b,
+       0x006c, 0x006d, 0x006e, 0x006f,
+       /* 0x70 */
+       0x0070, 0x0071, 0x0072, 0x0073,
+       0x0074, 0x0075, 0x0076, 0x0077,
+       0x0078, 0x0079, 0x007a, 0x007b,
+       0x007c, 0x007d, 0x007e, 0x007f,
+       /* 0x80 */
+       0x0410, 0x0411, 0x0412, 0x0413,
+       0x0414, 0x0415, 0x0416, 0x0417,
+       0x0418, 0x0419, 0x041a, 0x041b,
+       0x041c, 0x041d, 0x041e, 0x041f,
+       /* 0x90 */
+       0x0420, 0x0421, 0x0422, 0x0423,
+       0x0424, 0x0425, 0x0426, 0x0427,
+       0x0428, 0x0429, 0x042a, 0x042b,
+       0x042c, 0x042d, 0x042e, 0x042f,
+       /* 0xa0 */
+       0x2020, 0x00b0, 0x0490, 0x00a3,
+       0x00a7, 0x2022, 0x00b6, 0x0406,
+       0x00ae, 0x00a9, 0x2122, 0x0402,
+       0x0452, 0x2260, 0x0403, 0x0453,
+       /* 0xb0 */
+       0x221e, 0x00b1, 0x2264, 0x2265,
+       0x0456, 0x00b5, 0x0491, 0x0408,
+       0x0404, 0x0454, 0x0407, 0x0457,
+       0x0409, 0x0459, 0x040a, 0x045a,
+       /* 0xc0 */
+       0x0458, 0x0405, 0x00ac, 0x221a,
+       0x0192, 0x2248, 0x2206, 0x00ab,
+       0x00bb, 0x2026, 0x00a0, 0x040b,
+       0x045b, 0x040c, 0x045c, 0x0455,
+       /* 0xd0 */
+       0x2013, 0x2014, 0x201c, 0x201d,
+       0x2018, 0x2019, 0x00f7, 0x201e,
+       0x040e, 0x045e, 0x040f, 0x045f,
+       0x2116, 0x0401, 0x0451, 0x044f,
+       /* 0xe0 */
+       0x0430, 0x0431, 0x0432, 0x0433,
+       0x0434, 0x0435, 0x0436, 0x0437,
+       0x0438, 0x0439, 0x043a, 0x043b,
+       0x043c, 0x043d, 0x043e, 0x043f,
+       /* 0xf0 */
+       0x0440, 0x0441, 0x0442, 0x0443,
+       0x0444, 0x0445, 0x0446, 0x0447,
+       0x0448, 0x0449, 0x044a, 0x044b,
+       0x044c, 0x044d, 0x044e, 0x20ac,
+};
+
+static const unsigned char page00[256] = {
+       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+       0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+       0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+       0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+       0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+       0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+       0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+       0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+       0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+       0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+       0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+       0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+       0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+       0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+       0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0xca, 0x00, 0x00, 0xa3, 0x00, 0x00, 0x00, 0xa4, /* 0xa0-0xa7 */
+       0x00, 0xa9, 0x00, 0xc7, 0xc2, 0x00, 0xa8, 0x00, /* 0xa8-0xaf */
+       0xa1, 0xb1, 0x00, 0x00, 0x00, 0xb5, 0xa6, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0xc8, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd6, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page04[256] = {
+       0x00, 0xdd, 0xab, 0xae, 0xb8, 0xc1, 0xa7, 0xba, /* 0x00-0x07 */
+       0xb7, 0xbc, 0xbe, 0xcb, 0xcd, 0x00, 0xd8, 0xda, /* 0x08-0x0f */
+       0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x10-0x17 */
+       0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x18-0x1f */
+       0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x20-0x27 */
+       0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x28-0x2f */
+       0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x30-0x37 */
+       0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x38-0x3f */
+       0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0x40-0x47 */
+       0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf, /* 0x48-0x4f */
+       0x00, 0xde, 0xac, 0xaf, 0xb9, 0xcf, 0xb4, 0xbb, /* 0x50-0x57 */
+       0xc0, 0xbd, 0xbf, 0xcc, 0xce, 0x00, 0xd9, 0xdb, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0xa2, 0xb6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0xd4, 0xd5, 0x00, 0x00, 0xd2, 0xd3, 0xd7, 0x00, /* 0x18-0x1f */
+       0xa0, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+       page00, page01, NULL,   NULL,   page04, NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       page20, page21, page22, NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+       const unsigned char *uni2charset;
+       unsigned char cl = uni & 0x00ff;
+       unsigned char ch = (uni & 0xff00) >> 8;
+
+       if (boundlen <= 0)
+               return -ENAMETOOLONG;
+
+       uni2charset = page_uni2charset[ch];
+       if (uni2charset && uni2charset[cl])
+               out[0] = uni2charset[cl];
+       else
+               return -EINVAL;
+       return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+       *uni = charset2uni[*rawstring];
+       if (*uni == 0x0000)
+               return -EINVAL;
+       return 1;
+}
+
+static struct nls_table table = {
+       .charset        = "maccyrillic",
+       .uni2char       = uni2char,
+       .char2uni       = char2uni,
+       .charset2lower  = charset2lower,
+       .charset2upper  = charset2upper,
+       .owner          = THIS_MODULE,
+};
+
+static int __init init_nls_maccyrillic(void)
+{
+       return register_nls(&table);
+}
+
+static void __exit exit_nls_maccyrillic(void)
+{
+       unregister_nls(&table);
+}
+
+module_init(init_nls_maccyrillic)
+module_exit(exit_nls_maccyrillic)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-gaelic.c b/fs/nls/mac-gaelic.c
new file mode 100644 (file)
index 0000000..2de9158
--- /dev/null
@@ -0,0 +1,567 @@
+/*
+ * linux/fs/nls/mac-gaelic.c
+ *
+ * Charset macgaelic translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+       /* 0x00 */
+       0x0000, 0x0001, 0x0002, 0x0003,
+       0x0004, 0x0005, 0x0006, 0x0007,
+       0x0008, 0x0009, 0x000a, 0x000b,
+       0x000c, 0x000d, 0x000e, 0x000f,
+       /* 0x10 */
+       0x0010, 0x0011, 0x0012, 0x0013,
+       0x0014, 0x0015, 0x0016, 0x0017,
+       0x0018, 0x0019, 0x001a, 0x001b,
+       0x001c, 0x001d, 0x001e, 0x001f,
+       /* 0x20 */
+       0x0020, 0x0021, 0x0022, 0x0023,
+       0x0024, 0x0025, 0x0026, 0x0027,
+       0x0028, 0x0029, 0x002a, 0x002b,
+       0x002c, 0x002d, 0x002e, 0x002f,
+       /* 0x30 */
+       0x0030, 0x0031, 0x0032, 0x0033,
+       0x0034, 0x0035, 0x0036, 0x0037,
+       0x0038, 0x0039, 0x003a, 0x003b,
+       0x003c, 0x003d, 0x003e, 0x003f,
+       /* 0x40 */
+       0x0040, 0x0041, 0x0042, 0x0043,
+       0x0044, 0x0045, 0x0046, 0x0047,
+       0x0048, 0x0049, 0x004a, 0x004b,
+       0x004c, 0x004d, 0x004e, 0x004f,
+       /* 0x50 */
+       0x0050, 0x0051, 0x0052, 0x0053,
+       0x0054, 0x0055, 0x0056, 0x0057,
+       0x0058, 0x0059, 0x005a, 0x005b,
+       0x005c, 0x005d, 0x005e, 0x005f,
+       /* 0x60 */
+       0x0060, 0x0061, 0x0062, 0x0063,
+       0x0064, 0x0065, 0x0066, 0x0067,
+       0x0068, 0x0069, 0x006a, 0x006b,
+       0x006c, 0x006d, 0x006e, 0x006f,
+       /* 0x70 */
+       0x0070, 0x0071, 0x0072, 0x0073,
+       0x0074, 0x0075, 0x0076, 0x0077,
+       0x0078, 0x0079, 0x007a, 0x007b,
+       0x007c, 0x007d, 0x007e, 0x007f,
+       /* 0x80 */
+       0x00c4, 0x00c5, 0x00c7, 0x00c9,
+       0x00d1, 0x00d6, 0x00dc, 0x00e1,
+       0x00e0, 0x00e2, 0x00e4, 0x00e3,
+       0x00e5, 0x00e7, 0x00e9, 0x00e8,
+       /* 0x90 */
+       0x00ea, 0x00eb, 0x00ed, 0x00ec,
+       0x00ee, 0x00ef, 0x00f1, 0x00f3,
+       0x00f2, 0x00f4, 0x00f6, 0x00f5,
+       0x00fa, 0x00f9, 0x00fb, 0x00fc,
+       /* 0xa0 */
+       0x2020, 0x00b0, 0x00a2, 0x00a3,
+       0x00a7, 0x2022, 0x00b6, 0x00df,
+       0x00ae, 0x00a9, 0x2122, 0x00b4,
+       0x00a8, 0x2260, 0x00c6, 0x00d8,
+       /* 0xb0 */
+       0x1e02, 0x00b1, 0x2264, 0x2265,
+       0x1e03, 0x010a, 0x010b, 0x1e0a,
+       0x1e0b, 0x1e1e, 0x1e1f, 0x0120,
+       0x0121, 0x1e40, 0x00e6, 0x00f8,
+       /* 0xc0 */
+       0x1e41, 0x1e56, 0x1e57, 0x027c,
+       0x0192, 0x017f, 0x1e60, 0x00ab,
+       0x00bb, 0x2026, 0x00a0, 0x00c0,
+       0x00c3, 0x00d5, 0x0152, 0x0153,
+       /* 0xd0 */
+       0x2013, 0x2014, 0x201c, 0x201d,
+       0x2018, 0x2019, 0x1e61, 0x1e9b,
+       0x00ff, 0x0178, 0x1e6a, 0x20ac,
+       0x2039, 0x203a, 0x0176, 0x0177,
+       /* 0xe0 */
+       0x1e6b, 0x00b7, 0x1ef2, 0x1ef3,
+       0x204a, 0x00c2, 0x00ca, 0x00c1,
+       0x00cb, 0x00c8, 0x00cd, 0x00ce,
+       0x00cf, 0x00cc, 0x00d3, 0x00d4,
+       /* 0xf0 */
+       0x2663, 0x00d2, 0x00da, 0x00db,
+       0x00d9, 0x0131, 0x00dd, 0x00fd,
+       0x0174, 0x0175, 0x1e84, 0x1e85,
+       0x1e80, 0x1e81, 0x1e82, 0x1e83,
+};
+
+static const unsigned char page00[256] = {
+       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+       0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+       0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+       0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+       0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+       0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+       0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+       0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+       0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+       0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+       0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+       0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+       0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+       0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+       0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0xca, 0x00, 0xa2, 0xa3, 0x00, 0x00, 0x00, 0xa4, /* 0xa0-0xa7 */
+       0xac, 0xa9, 0x00, 0xc7, 0x00, 0x00, 0xa8, 0x00, /* 0xa8-0xaf */
+       0xa1, 0xb1, 0x00, 0x00, 0xab, 0x00, 0xa6, 0xe1, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0xc8, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xae, 0x82, /* 0xc0-0xc7 */
+       0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
+       0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+       0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0xf6, 0x00, 0xa7, /* 0xd8-0xdf */
+       0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xbe, 0x8d, /* 0xe0-0xe7 */
+       0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
+       0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0x00, /* 0xf0-0xf7 */
+       0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0xf7, 0x00, 0xd8, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0xb5, 0xb6, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0xbb, 0xbc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0xf8, 0xf9, 0xde, 0xdf, /* 0x70-0x77 */
+       0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc5, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page02[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page1e[256] = {
+       0x00, 0x00, 0xb0, 0xb4, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0xb7, 0xb8, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb9, 0xba, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0xbd, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0xc2, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0xc6, 0xd6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0xda, 0xe0, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0xfc, 0xfd, 0xfe, 0xff, 0xfa, 0xfb, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0xe2, 0xe3, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0xd4, 0xd5, 0x00, 0x00, 0xd2, 0xd3, 0x00, 0x00, /* 0x18-0x1f */
+       0xa0, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page26[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+       page00, page01, page02, NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   page1e, NULL,
+       page20, page21, page22, NULL,   NULL,   NULL,   page26, NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x00-0x07 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x08-0x0f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x10-0x17 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x18-0x1f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x20-0x27 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x28-0x2f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x30-0x37 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x38-0x3f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x40-0x47 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x48-0x4f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x50-0x57 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x58-0x5f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x60-0x67 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x68-0x6f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x70-0x77 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x78-0x7f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x80-0x87 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x88-0x8f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x90-0x97 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x98-0x9f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa0-0xa7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa8-0xaf */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb0-0xb7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb8-0xbf */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc0-0xc7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc8-0xcf */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd0-0xd7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd8-0xdf */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe0-0xe7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe8-0xef */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0-0xf7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+       const unsigned char *uni2charset;
+       unsigned char cl = uni & 0x00ff;
+       unsigned char ch = (uni & 0xff00) >> 8;
+
+       if (boundlen <= 0)
+               return -ENAMETOOLONG;
+
+       uni2charset = page_uni2charset[ch];
+       if (uni2charset && uni2charset[cl])
+               out[0] = uni2charset[cl];
+       else
+               return -EINVAL;
+       return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+       *uni = charset2uni[*rawstring];
+       if (*uni == 0x0000)
+               return -EINVAL;
+       return 1;
+}
+
+static struct nls_table table = {
+       .charset        = "macgaelic",
+       .uni2char       = uni2char,
+       .char2uni       = char2uni,
+       .charset2lower  = charset2lower,
+       .charset2upper  = charset2upper,
+       .owner          = THIS_MODULE,
+};
+
+static int __init init_nls_macgaelic(void)
+{
+       return register_nls(&table);
+}
+
+static void __exit exit_nls_macgaelic(void)
+{
+       unregister_nls(&table);
+}
+
+module_init(init_nls_macgaelic)
+module_exit(exit_nls_macgaelic)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-greek.c b/fs/nls/mac-greek.c
new file mode 100644 (file)
index 0000000..a863100
--- /dev/null
@@ -0,0 +1,497 @@
+/*
+ * linux/fs/nls/mac-greek.c
+ *
+ * Charset macgreek translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+       /* 0x00 */
+       0x0000, 0x0001, 0x0002, 0x0003,
+       0x0004, 0x0005, 0x0006, 0x0007,
+       0x0008, 0x0009, 0x000a, 0x000b,
+       0x000c, 0x000d, 0x000e, 0x000f,
+       /* 0x10 */
+       0x0010, 0x0011, 0x0012, 0x0013,
+       0x0014, 0x0015, 0x0016, 0x0017,
+       0x0018, 0x0019, 0x001a, 0x001b,
+       0x001c, 0x001d, 0x001e, 0x001f,
+       /* 0x20 */
+       0x0020, 0x0021, 0x0022, 0x0023,
+       0x0024, 0x0025, 0x0026, 0x0027,
+       0x0028, 0x0029, 0x002a, 0x002b,
+       0x002c, 0x002d, 0x002e, 0x002f,
+       /* 0x30 */
+       0x0030, 0x0031, 0x0032, 0x0033,
+       0x0034, 0x0035, 0x0036, 0x0037,
+       0x0038, 0x0039, 0x003a, 0x003b,
+       0x003c, 0x003d, 0x003e, 0x003f,
+       /* 0x40 */
+       0x0040, 0x0041, 0x0042, 0x0043,
+       0x0044, 0x0045, 0x0046, 0x0047,
+       0x0048, 0x0049, 0x004a, 0x004b,
+       0x004c, 0x004d, 0x004e, 0x004f,
+       /* 0x50 */
+       0x0050, 0x0051, 0x0052, 0x0053,
+       0x0054, 0x0055, 0x0056, 0x0057,
+       0x0058, 0x0059, 0x005a, 0x005b,
+       0x005c, 0x005d, 0x005e, 0x005f,
+       /* 0x60 */
+       0x0060, 0x0061, 0x0062, 0x0063,
+       0x0064, 0x0065, 0x0066, 0x0067,
+       0x0068, 0x0069, 0x006a, 0x006b,
+       0x006c, 0x006d, 0x006e, 0x006f,
+       /* 0x70 */
+       0x0070, 0x0071, 0x0072, 0x0073,
+       0x0074, 0x0075, 0x0076, 0x0077,
+       0x0078, 0x0079, 0x007a, 0x007b,
+       0x007c, 0x007d, 0x007e, 0x007f,
+       /* 0x80 */
+       0x00c4, 0x00b9, 0x00b2, 0x00c9,
+       0x00b3, 0x00d6, 0x00dc, 0x0385,
+       0x00e0, 0x00e2, 0x00e4, 0x0384,
+       0x00a8, 0x00e7, 0x00e9, 0x00e8,
+       /* 0x90 */
+       0x00ea, 0x00eb, 0x00a3, 0x2122,
+       0x00ee, 0x00ef, 0x2022, 0x00bd,
+       0x2030, 0x00f4, 0x00f6, 0x00a6,
+       0x20ac, 0x00f9, 0x00fb, 0x00fc,
+       /* 0xa0 */
+       0x2020, 0x0393, 0x0394, 0x0398,
+       0x039b, 0x039e, 0x03a0, 0x00df,
+       0x00ae, 0x00a9, 0x03a3, 0x03aa,
+       0x00a7, 0x2260, 0x00b0, 0x00b7,
+       /* 0xb0 */
+       0x0391, 0x00b1, 0x2264, 0x2265,
+       0x00a5, 0x0392, 0x0395, 0x0396,
+       0x0397, 0x0399, 0x039a, 0x039c,
+       0x03a6, 0x03ab, 0x03a8, 0x03a9,
+       /* 0xc0 */
+       0x03ac, 0x039d, 0x00ac, 0x039f,
+       0x03a1, 0x2248, 0x03a4, 0x00ab,
+       0x00bb, 0x2026, 0x00a0, 0x03a5,
+       0x03a7, 0x0386, 0x0388, 0x0153,
+       /* 0xd0 */
+       0x2013, 0x2015, 0x201c, 0x201d,
+       0x2018, 0x2019, 0x00f7, 0x0389,
+       0x038a, 0x038c, 0x038e, 0x03ad,
+       0x03ae, 0x03af, 0x03cc, 0x038f,
+       /* 0xe0 */
+       0x03cd, 0x03b1, 0x03b2, 0x03c8,
+       0x03b4, 0x03b5, 0x03c6, 0x03b3,
+       0x03b7, 0x03b9, 0x03be, 0x03ba,
+       0x03bb, 0x03bc, 0x03bd, 0x03bf,
+       /* 0xf0 */
+       0x03c0, 0x03ce, 0x03c1, 0x03c3,
+       0x03c4, 0x03b8, 0x03c9, 0x03c2,
+       0x03c7, 0x03c5, 0x03b6, 0x03ca,
+       0x03cb, 0x0390, 0x03b0, 0x00ad,
+};
+
+static const unsigned char page00[256] = {
+       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+       0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+       0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+       0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+       0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+       0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+       0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+       0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+       0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+       0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+       0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+       0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+       0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+       0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+       0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0xca, 0x00, 0x00, 0x92, 0x00, 0xb4, 0x9b, 0xac, /* 0xa0-0xa7 */
+       0x8c, 0xa9, 0x00, 0xc7, 0xc2, 0xff, 0xa8, 0x00, /* 0xa8-0xaf */
+       0xae, 0xb1, 0x82, 0x84, 0x00, 0x00, 0x00, 0xaf, /* 0xb0-0xb7 */
+       0x00, 0x81, 0x00, 0xc8, 0x00, 0x97, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x83, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x86, 0x00, 0x00, 0xa7, /* 0xd8-0xdf */
+       0x88, 0x00, 0x89, 0x00, 0x8a, 0x00, 0x00, 0x8d, /* 0xe0-0xe7 */
+       0x8f, 0x8e, 0x90, 0x91, 0x00, 0x00, 0x94, 0x95, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x99, 0x00, 0x9a, 0xd6, /* 0xf0-0xf7 */
+       0x00, 0x9d, 0x00, 0x9e, 0x9f, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page03[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x8b, 0x87, 0xcd, 0x00, /* 0x80-0x87 */
+       0xce, 0xd7, 0xd8, 0x00, 0xd9, 0x00, 0xda, 0xdf, /* 0x88-0x8f */
+       0xfd, 0xb0, 0xb5, 0xa1, 0xa2, 0xb6, 0xb7, 0xb8, /* 0x90-0x97 */
+       0xa3, 0xb9, 0xba, 0xa4, 0xbb, 0xc1, 0xa5, 0xc3, /* 0x98-0x9f */
+       0xa6, 0xc4, 0x00, 0xaa, 0xc6, 0xcb, 0xbc, 0xcc, /* 0xa0-0xa7 */
+       0xbe, 0xbf, 0xab, 0xbd, 0xc0, 0xdb, 0xdc, 0xdd, /* 0xa8-0xaf */
+       0xfe, 0xe1, 0xe2, 0xe7, 0xe4, 0xe5, 0xfa, 0xe8, /* 0xb0-0xb7 */
+       0xf5, 0xe9, 0xeb, 0xec, 0xed, 0xee, 0xea, 0xef, /* 0xb8-0xbf */
+       0xf0, 0xf2, 0xf7, 0xf3, 0xf4, 0xf9, 0xe6, 0xf8, /* 0xc0-0xc7 */
+       0xe3, 0xf6, 0xfb, 0xfc, 0xde, 0xe0, 0xf1, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0xd0, 0x00, 0xd1, 0x00, 0x00, /* 0x10-0x17 */
+       0xd4, 0xd5, 0x00, 0x00, 0xd2, 0xd3, 0x00, 0x00, /* 0x18-0x1f */
+       0xa0, 0x00, 0x96, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x98, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x9c, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x93, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+       page00, page01, NULL,   page03, NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       page20, page21, page22, NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+       const unsigned char *uni2charset;
+       unsigned char cl = uni & 0x00ff;
+       unsigned char ch = (uni & 0xff00) >> 8;
+
+       if (boundlen <= 0)
+               return -ENAMETOOLONG;
+
+       uni2charset = page_uni2charset[ch];
+       if (uni2charset && uni2charset[cl])
+               out[0] = uni2charset[cl];
+       else
+               return -EINVAL;
+       return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+       *uni = charset2uni[*rawstring];
+       if (*uni == 0x0000)
+               return -EINVAL;
+       return 1;
+}
+
+static struct nls_table table = {
+       .charset        = "macgreek",
+       .uni2char       = uni2char,
+       .char2uni       = char2uni,
+       .charset2lower  = charset2lower,
+       .charset2upper  = charset2upper,
+       .owner          = THIS_MODULE,
+};
+
+static int __init init_nls_macgreek(void)
+{
+       return register_nls(&table);
+}
+
+static void __exit exit_nls_macgreek(void)
+{
+       unregister_nls(&table);
+}
+
+module_init(init_nls_macgreek)
+module_exit(exit_nls_macgreek)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-iceland.c b/fs/nls/mac-iceland.c
new file mode 100644 (file)
index 0000000..babe299
--- /dev/null
@@ -0,0 +1,602 @@
+/*
+ * linux/fs/nls/mac-iceland.c
+ *
+ * Charset maciceland translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+       /* 0x00 */
+       0x0000, 0x0001, 0x0002, 0x0003,
+       0x0004, 0x0005, 0x0006, 0x0007,
+       0x0008, 0x0009, 0x000a, 0x000b,
+       0x000c, 0x000d, 0x000e, 0x000f,
+       /* 0x10 */
+       0x0010, 0x0011, 0x0012, 0x0013,
+       0x0014, 0x0015, 0x0016, 0x0017,
+       0x0018, 0x0019, 0x001a, 0x001b,
+       0x001c, 0x001d, 0x001e, 0x001f,
+       /* 0x20 */
+       0x0020, 0x0021, 0x0022, 0x0023,
+       0x0024, 0x0025, 0x0026, 0x0027,
+       0x0028, 0x0029, 0x002a, 0x002b,
+       0x002c, 0x002d, 0x002e, 0x002f,
+       /* 0x30 */
+       0x0030, 0x0031, 0x0032, 0x0033,
+       0x0034, 0x0035, 0x0036, 0x0037,
+       0x0038, 0x0039, 0x003a, 0x003b,
+       0x003c, 0x003d, 0x003e, 0x003f,
+       /* 0x40 */
+       0x0040, 0x0041, 0x0042, 0x0043,
+       0x0044, 0x0045, 0x0046, 0x0047,
+       0x0048, 0x0049, 0x004a, 0x004b,
+       0x004c, 0x004d, 0x004e, 0x004f,
+       /* 0x50 */
+       0x0050, 0x0051, 0x0052, 0x0053,
+       0x0054, 0x0055, 0x0056, 0x0057,
+       0x0058, 0x0059, 0x005a, 0x005b,
+       0x005c, 0x005d, 0x005e, 0x005f,
+       /* 0x60 */
+       0x0060, 0x0061, 0x0062, 0x0063,
+       0x0064, 0x0065, 0x0066, 0x0067,
+       0x0068, 0x0069, 0x006a, 0x006b,
+       0x006c, 0x006d, 0x006e, 0x006f,
+       /* 0x70 */
+       0x0070, 0x0071, 0x0072, 0x0073,
+       0x0074, 0x0075, 0x0076, 0x0077,
+       0x0078, 0x0079, 0x007a, 0x007b,
+       0x007c, 0x007d, 0x007e, 0x007f,
+       /* 0x80 */
+       0x00c4, 0x00c5, 0x00c7, 0x00c9,
+       0x00d1, 0x00d6, 0x00dc, 0x00e1,
+       0x00e0, 0x00e2, 0x00e4, 0x00e3,
+       0x00e5, 0x00e7, 0x00e9, 0x00e8,
+       /* 0x90 */
+       0x00ea, 0x00eb, 0x00ed, 0x00ec,
+       0x00ee, 0x00ef, 0x00f1, 0x00f3,
+       0x00f2, 0x00f4, 0x00f6, 0x00f5,
+       0x00fa, 0x00f9, 0x00fb, 0x00fc,
+       /* 0xa0 */
+       0x00dd, 0x00b0, 0x00a2, 0x00a3,
+       0x00a7, 0x2022, 0x00b6, 0x00df,
+       0x00ae, 0x00a9, 0x2122, 0x00b4,
+       0x00a8, 0x2260, 0x00c6, 0x00d8,
+       /* 0xb0 */
+       0x221e, 0x00b1, 0x2264, 0x2265,
+       0x00a5, 0x00b5, 0x2202, 0x2211,
+       0x220f, 0x03c0, 0x222b, 0x00aa,
+       0x00ba, 0x03a9, 0x00e6, 0x00f8,
+       /* 0xc0 */
+       0x00bf, 0x00a1, 0x00ac, 0x221a,
+       0x0192, 0x2248, 0x2206, 0x00ab,
+       0x00bb, 0x2026, 0x00a0, 0x00c0,
+       0x00c3, 0x00d5, 0x0152, 0x0153,
+       /* 0xd0 */
+       0x2013, 0x2014, 0x201c, 0x201d,
+       0x2018, 0x2019, 0x00f7, 0x25ca,
+       0x00ff, 0x0178, 0x2044, 0x20ac,
+       0x00d0, 0x00f0, 0x00de, 0x00fe,
+       /* 0xe0 */
+       0x00fd, 0x00b7, 0x201a, 0x201e,
+       0x2030, 0x00c2, 0x00ca, 0x00c1,
+       0x00cb, 0x00c8, 0x00cd, 0x00ce,
+       0x00cf, 0x00cc, 0x00d3, 0x00d4,
+       /* 0xf0 */
+       0xf8ff, 0x00d2, 0x00da, 0x00db,
+       0x00d9, 0x0131, 0x02c6, 0x02dc,
+       0x00af, 0x02d8, 0x02d9, 0x02da,
+       0x00b8, 0x02dd, 0x02db, 0x02c7,
+};
+
+static const unsigned char page00[256] = {
+       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+       0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+       0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+       0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+       0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+       0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+       0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+       0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+       0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+       0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+       0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+       0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+       0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+       0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+       0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0xca, 0xc1, 0xa2, 0xa3, 0x00, 0xb4, 0x00, 0xa4, /* 0xa0-0xa7 */
+       0xac, 0xa9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0xf8, /* 0xa8-0xaf */
+       0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
+       0xfc, 0x00, 0xbc, 0xc8, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
+       0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xae, 0x82, /* 0xc0-0xc7 */
+       0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
+       0xdc, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+       0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0xa0, 0xde, 0xa7, /* 0xd8-0xdf */
+       0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xbe, 0x8d, /* 0xe0-0xe7 */
+       0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
+       0xdd, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
+       0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0xe0, 0xdf, 0xd8, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page02[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xff, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0xf9, 0xfa, 0xfb, 0xfe, 0xf7, 0xfd, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page03[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+       0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
+       0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page25[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char pagef8[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+       page00, page01, page02, page03, NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       page20, page21, page22, NULL,   NULL,   page25, NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       pagef8, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+       const unsigned char *uni2charset;
+       unsigned char cl = uni & 0x00ff;
+       unsigned char ch = (uni & 0xff00) >> 8;
+
+       if (boundlen <= 0)
+               return -ENAMETOOLONG;
+
+       uni2charset = page_uni2charset[ch];
+       if (uni2charset && uni2charset[cl])
+               out[0] = uni2charset[cl];
+       else
+               return -EINVAL;
+       return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+       *uni = charset2uni[*rawstring];
+       if (*uni == 0x0000)
+               return -EINVAL;
+       return 1;
+}
+
+static struct nls_table table = {
+       .charset        = "maciceland",
+       .uni2char       = uni2char,
+       .char2uni       = char2uni,
+       .charset2lower  = charset2lower,
+       .charset2upper  = charset2upper,
+       .owner          = THIS_MODULE,
+};
+
+static int __init init_nls_maciceland(void)
+{
+       return register_nls(&table);
+}
+
+static void __exit exit_nls_maciceland(void)
+{
+       unregister_nls(&table);
+}
+
+module_init(init_nls_maciceland)
+module_exit(exit_nls_maciceland)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-inuit.c b/fs/nls/mac-inuit.c
new file mode 100644 (file)
index 0000000..312364f
--- /dev/null
@@ -0,0 +1,532 @@
+/*
+ * linux/fs/nls/mac-inuit.c
+ *
+ * Charset macinuit translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+       /* 0x00 */
+       0x0000, 0x0001, 0x0002, 0x0003,
+       0x0004, 0x0005, 0x0006, 0x0007,
+       0x0008, 0x0009, 0x000a, 0x000b,
+       0x000c, 0x000d, 0x000e, 0x000f,
+       /* 0x10 */
+       0x0010, 0x0011, 0x0012, 0x0013,
+       0x0014, 0x0015, 0x0016, 0x0017,
+       0x0018, 0x0019, 0x001a, 0x001b,
+       0x001c, 0x001d, 0x001e, 0x001f,
+       /* 0x20 */
+       0x0020, 0x0021, 0x0022, 0x0023,
+       0x0024, 0x0025, 0x0026, 0x0027,
+       0x0028, 0x0029, 0x002a, 0x002b,
+       0x002c, 0x002d, 0x002e, 0x002f,
+       /* 0x30 */
+       0x0030, 0x0031, 0x0032, 0x0033,
+       0x0034, 0x0035, 0x0036, 0x0037,
+       0x0038, 0x0039, 0x003a, 0x003b,
+       0x003c, 0x003d, 0x003e, 0x003f,
+       /* 0x40 */
+       0x0040, 0x0041, 0x0042, 0x0043,
+       0x0044, 0x0045, 0x0046, 0x0047,
+       0x0048, 0x0049, 0x004a, 0x004b,
+       0x004c, 0x004d, 0x004e, 0x004f,
+       /* 0x50 */
+       0x0050, 0x0051, 0x0052, 0x0053,
+       0x0054, 0x0055, 0x0056, 0x0057,
+       0x0058, 0x0059, 0x005a, 0x005b,
+       0x005c, 0x005d, 0x005e, 0x005f,
+       /* 0x60 */
+       0x0060, 0x0061, 0x0062, 0x0063,
+       0x0064, 0x0065, 0x0066, 0x0067,
+       0x0068, 0x0069, 0x006a, 0x006b,
+       0x006c, 0x006d, 0x006e, 0x006f,
+       /* 0x70 */
+       0x0070, 0x0071, 0x0072, 0x0073,
+       0x0074, 0x0075, 0x0076, 0x0077,
+       0x0078, 0x0079, 0x007a, 0x007b,
+       0x007c, 0x007d, 0x007e, 0x007f,
+       /* 0x80 */
+       0x1403, 0x1404, 0x1405, 0x1406,
+       0x140a, 0x140b, 0x1431, 0x1432,
+       0x1433, 0x1434, 0x1438, 0x1439,
+       0x1449, 0x144e, 0x144f, 0x1450,
+       /* 0x90 */
+       0x1451, 0x1455, 0x1456, 0x1466,
+       0x146d, 0x146e, 0x146f, 0x1470,
+       0x1472, 0x1473, 0x1483, 0x148b,
+       0x148c, 0x148d, 0x148e, 0x1490,
+       /* 0xa0 */
+       0x1491, 0x00b0, 0x14a1, 0x14a5,
+       0x14a6, 0x2022, 0x00b6, 0x14a7,
+       0x00ae, 0x00a9, 0x2122, 0x14a8,
+       0x14aa, 0x14ab, 0x14bb, 0x14c2,
+       /* 0xb0 */
+       0x14c3, 0x14c4, 0x14c5, 0x14c7,
+       0x14c8, 0x14d0, 0x14ef, 0x14f0,
+       0x14f1, 0x14f2, 0x14f4, 0x14f5,
+       0x1505, 0x14d5, 0x14d6, 0x14d7,
+       /* 0xc0 */
+       0x14d8, 0x14da, 0x14db, 0x14ea,
+       0x1528, 0x1529, 0x152a, 0x152b,
+       0x152d, 0x2026, 0x00a0, 0x152e,
+       0x153e, 0x1555, 0x1556, 0x1557,
+       /* 0xd0 */
+       0x2013, 0x2014, 0x201c, 0x201d,
+       0x2018, 0x2019, 0x1558, 0x1559,
+       0x155a, 0x155d, 0x1546, 0x1547,
+       0x1548, 0x1549, 0x154b, 0x154c,
+       /* 0xe0 */
+       0x1550, 0x157f, 0x1580, 0x1581,
+       0x1582, 0x1583, 0x1584, 0x1585,
+       0x158f, 0x1590, 0x1591, 0x1592,
+       0x1593, 0x1594, 0x1595, 0x1671,
+       /* 0xf0 */
+       0x1672, 0x1673, 0x1674, 0x1675,
+       0x1676, 0x1596, 0x15a0, 0x15a1,
+       0x15a2, 0x15a3, 0x15a4, 0x15a5,
+       0x15a6, 0x157c, 0x0141, 0x0142,
+};
+
+static const unsigned char page00[256] = {
+       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+       0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+       0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+       0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+       0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+       0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+       0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+       0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+       0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+       0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+       0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+       0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+       0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+       0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+       0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0xca, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0xa9, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x00, /* 0xa8-0xaf */
+       0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa6, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0xfe, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page14[256] = {
+       0x00, 0x00, 0x00, 0x80, 0x81, 0x82, 0x83, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x84, 0x85, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x86, 0x87, 0x88, 0x89, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x8a, 0x8b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x8c, 0x00, 0x00, 0x00, 0x00, 0x8d, 0x8e, /* 0x48-0x4f */
+       0x8f, 0x90, 0x00, 0x00, 0x00, 0x91, 0x92, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x93, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x94, 0x95, 0x96, /* 0x68-0x6f */
+       0x97, 0x00, 0x98, 0x99, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x9a, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x9b, 0x9c, 0x9d, 0x9e, 0x00, /* 0x88-0x8f */
+       0x9f, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0xa2, 0x00, 0x00, 0x00, 0xa3, 0xa4, 0xa7, /* 0xa0-0xa7 */
+       0xab, 0x00, 0xac, 0xad, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0xae, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0xaf, 0xb0, 0xb1, 0xb2, 0x00, 0xb3, /* 0xc0-0xc7 */
+       0xb4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0xb5, 0x00, 0x00, 0x00, 0x00, 0xbd, 0xbe, 0xbf, /* 0xd0-0xd7 */
+       0xc0, 0x00, 0xc1, 0xc2, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0x00, 0xb6, /* 0xe8-0xef */
+       0xb7, 0xb8, 0xb9, 0x00, 0xba, 0xbb, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page15[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0xc4, 0xc5, 0xc6, 0xc7, 0x00, 0xc8, 0xcb, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xda, 0xdb, /* 0x40-0x47 */
+       0xdc, 0xdd, 0x00, 0xde, 0xdf, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0xe0, 0x00, 0x00, 0x00, 0x00, 0xcd, 0xce, 0xcf, /* 0x50-0x57 */
+       0xd6, 0xd7, 0xd8, 0x00, 0x00, 0xd9, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x00, 0xe1, /* 0x78-0x7f */
+       0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe8, /* 0x88-0x8f */
+       0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xf5, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page16[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0xd4, 0xd5, 0x00, 0x00, 0xd2, 0xd3, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+       page00, page01, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   page14, page15, page16, NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       page20, page21, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x00-0x07 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x08-0x0f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x10-0x17 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x18-0x1f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x20-0x27 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x28-0x2f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x30-0x37 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x38-0x3f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x40-0x47 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x48-0x4f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x50-0x57 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x58-0x5f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x60-0x67 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x68-0x6f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x70-0x77 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x78-0x7f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x80-0x87 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x88-0x8f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x90-0x97 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x98-0x9f */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa0-0xa7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa8-0xaf */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb0-0xb7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb8-0xbf */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc0-0xc7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc8-0xcf */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd0-0xd7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd8-0xdf */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe0-0xe7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe8-0xef */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0-0xf7 */
+       0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+       const unsigned char *uni2charset;
+       unsigned char cl = uni & 0x00ff;
+       unsigned char ch = (uni & 0xff00) >> 8;
+
+       if (boundlen <= 0)
+               return -ENAMETOOLONG;
+
+       uni2charset = page_uni2charset[ch];
+       if (uni2charset && uni2charset[cl])
+               out[0] = uni2charset[cl];
+       else
+               return -EINVAL;
+       return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+       *uni = charset2uni[*rawstring];
+       if (*uni == 0x0000)
+               return -EINVAL;
+       return 1;
+}
+
+static struct nls_table table = {
+       .charset        = "macinuit",
+       .uni2char       = uni2char,
+       .char2uni       = char2uni,
+       .charset2lower  = charset2lower,
+       .charset2upper  = charset2upper,
+       .owner          = THIS_MODULE,
+};
+
+static int __init init_nls_macinuit(void)
+{
+       return register_nls(&table);
+}
+
+static void __exit exit_nls_macinuit(void)
+{
+       unregister_nls(&table);
+}
+
+module_init(init_nls_macinuit)
+module_exit(exit_nls_macinuit)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-roman.c b/fs/nls/mac-roman.c
new file mode 100644 (file)
index 0000000..53ce080
--- /dev/null
@@ -0,0 +1,637 @@
+/*
+ * linux/fs/nls/mac-roman.c
+ *
+ * Charset macroman translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+       /* 0x00 */
+       0x0000, 0x0001, 0x0002, 0x0003,
+       0x0004, 0x0005, 0x0006, 0x0007,
+       0x0008, 0x0009, 0x000a, 0x000b,
+       0x000c, 0x000d, 0x000e, 0x000f,
+       /* 0x10 */
+       0x0010, 0x0011, 0x0012, 0x0013,
+       0x0014, 0x0015, 0x0016, 0x0017,
+       0x0018, 0x0019, 0x001a, 0x001b,
+       0x001c, 0x001d, 0x001e, 0x001f,
+       /* 0x20 */
+       0x0020, 0x0021, 0x0022, 0x0023,
+       0x0024, 0x0025, 0x0026, 0x0027,
+       0x0028, 0x0029, 0x002a, 0x002b,
+       0x002c, 0x002d, 0x002e, 0x002f,
+       /* 0x30 */
+       0x0030, 0x0031, 0x0032, 0x0033,
+       0x0034, 0x0035, 0x0036, 0x0037,
+       0x0038, 0x0039, 0x003a, 0x003b,
+       0x003c, 0x003d, 0x003e, 0x003f,
+       /* 0x40 */
+       0x0040, 0x0041, 0x0042, 0x0043,
+       0x0044, 0x0045, 0x0046, 0x0047,
+       0x0048, 0x0049, 0x004a, 0x004b,
+       0x004c, 0x004d, 0x004e, 0x004f,
+       /* 0x50 */
+       0x0050, 0x0051, 0x0052, 0x0053,
+       0x0054, 0x0055, 0x0056, 0x0057,
+       0x0058, 0x0059, 0x005a, 0x005b,
+       0x005c, 0x005d, 0x005e, 0x005f,
+       /* 0x60 */
+       0x0060, 0x0061, 0x0062, 0x0063,
+       0x0064, 0x0065, 0x0066, 0x0067,
+       0x0068, 0x0069, 0x006a, 0x006b,
+       0x006c, 0x006d, 0x006e, 0x006f,
+       /* 0x70 */
+       0x0070, 0x0071, 0x0072, 0x0073,
+       0x0074, 0x0075, 0x0076, 0x0077,
+       0x0078, 0x0079, 0x007a, 0x007b,
+       0x007c, 0x007d, 0x007e, 0x007f,
+       /* 0x80 */
+       0x00c4, 0x00c5, 0x00c7, 0x00c9,
+       0x00d1, 0x00d6, 0x00dc, 0x00e1,
+       0x00e0, 0x00e2, 0x00e4, 0x00e3,
+       0x00e5, 0x00e7, 0x00e9, 0x00e8,
+       /* 0x90 */
+       0x00ea, 0x00eb, 0x00ed, 0x00ec,
+       0x00ee, 0x00ef, 0x00f1, 0x00f3,
+       0x00f2, 0x00f4, 0x00f6, 0x00f5,
+       0x00fa, 0x00f9, 0x00fb, 0x00fc,
+       /* 0xa0 */
+       0x2020, 0x00b0, 0x00a2, 0x00a3,
+       0x00a7, 0x2022, 0x00b6, 0x00df,
+       0x00ae, 0x00a9, 0x2122, 0x00b4,
+       0x00a8, 0x2260, 0x00c6, 0x00d8,
+       /* 0xb0 */
+       0x221e, 0x00b1, 0x2264, 0x2265,
+       0x00a5, 0x00b5, 0x2202, 0x2211,
+       0x220f, 0x03c0, 0x222b, 0x00aa,
+       0x00ba, 0x03a9, 0x00e6, 0x00f8,
+       /* 0xc0 */
+       0x00bf, 0x00a1, 0x00ac, 0x221a,
+       0x0192, 0x2248, 0x2206, 0x00ab,
+       0x00bb, 0x2026, 0x00a0, 0x00c0,
+       0x00c3, 0x00d5, 0x0152, 0x0153,
+       /* 0xd0 */
+       0x2013, 0x2014, 0x201c, 0x201d,
+       0x2018, 0x2019, 0x00f7, 0x25ca,
+       0x00ff, 0x0178, 0x2044, 0x20ac,
+       0x2039, 0x203a, 0xfb01, 0xfb02,
+       /* 0xe0 */
+       0x2021, 0x00b7, 0x201a, 0x201e,
+       0x2030, 0x00c2, 0x00ca, 0x00c1,
+       0x00cb, 0x00c8, 0x00cd, 0x00ce,
+       0x00cf, 0x00cc, 0x00d3, 0x00d4,
+       /* 0xf0 */
+       0xf8ff, 0x00d2, 0x00da, 0x00db,
+       0x00d9, 0x0131, 0x02c6, 0x02dc,
+       0x00af, 0x02d8, 0x02d9, 0x02da,
+       0x00b8, 0x02dd, 0x02db, 0x02c7,
+};
+
+static const unsigned char page00[256] = {
+       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+       0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+       0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+       0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+       0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+       0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+       0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+       0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+       0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+       0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+       0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+       0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+       0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+       0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+       0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0xca, 0xc1, 0xa2, 0xa3, 0x00, 0xb4, 0x00, 0xa4, /* 0xa0-0xa7 */
+       0xac, 0xa9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0xf8, /* 0xa8-0xaf */
+       0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
+       0xfc, 0x00, 0xbc, 0xc8, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
+       0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xae, 0x82, /* 0xc0-0xc7 */
+       0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
+       0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+       0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0x00, 0x00, 0xa7, /* 0xd8-0xdf */
+       0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xbe, 0x8d, /* 0xe0-0xe7 */
+       0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
+       0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
+       0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0x00, 0x00, 0xd8, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page02[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xff, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0xf9, 0xfa, 0xfb, 0xfe, 0xf7, 0xfd, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page03[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
+       0xa0, 0xe0, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+       0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
+       0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page25[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char pagef8[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, /* 0xf8-0xff */
+};
+
+static const unsigned char pagefb[256] = {
+       0x00, 0xde, 0xdf, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+       page00, page01, page02, page03, NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       page20, page21, page22, NULL,   NULL,   page25, NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       pagef8, NULL,   NULL,   pagefb, NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+       const unsigned char *uni2charset;
+       unsigned char cl = uni & 0x00ff;
+       unsigned char ch = (uni & 0xff00) >> 8;
+
+       if (boundlen <= 0)
+               return -ENAMETOOLONG;
+
+       uni2charset = page_uni2charset[ch];
+       if (uni2charset && uni2charset[cl])
+               out[0] = uni2charset[cl];
+       else
+               return -EINVAL;
+       return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+       *uni = charset2uni[*rawstring];
+       if (*uni == 0x0000)
+               return -EINVAL;
+       return 1;
+}
+
+static struct nls_table table = {
+       .charset        = "macroman",
+       .uni2char       = uni2char,
+       .char2uni       = char2uni,
+       .charset2lower  = charset2lower,
+       .charset2upper  = charset2upper,
+       .owner          = THIS_MODULE,
+};
+
+static int __init init_nls_macroman(void)
+{
+       return register_nls(&table);
+}
+
+static void __exit exit_nls_macroman(void)
+{
+       unregister_nls(&table);
+}
+
+module_init(init_nls_macroman)
+module_exit(exit_nls_macroman)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-romanian.c b/fs/nls/mac-romanian.c
new file mode 100644 (file)
index 0000000..add6f7a
--- /dev/null
@@ -0,0 +1,602 @@
+/*
+ * linux/fs/nls/mac-romanian.c
+ *
+ * Charset macromanian translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+       /* 0x00 */
+       0x0000, 0x0001, 0x0002, 0x0003,
+       0x0004, 0x0005, 0x0006, 0x0007,
+       0x0008, 0x0009, 0x000a, 0x000b,
+       0x000c, 0x000d, 0x000e, 0x000f,
+       /* 0x10 */
+       0x0010, 0x0011, 0x0012, 0x0013,
+       0x0014, 0x0015, 0x0016, 0x0017,
+       0x0018, 0x0019, 0x001a, 0x001b,
+       0x001c, 0x001d, 0x001e, 0x001f,
+       /* 0x20 */
+       0x0020, 0x0021, 0x0022, 0x0023,
+       0x0024, 0x0025, 0x0026, 0x0027,
+       0x0028, 0x0029, 0x002a, 0x002b,
+       0x002c, 0x002d, 0x002e, 0x002f,
+       /* 0x30 */
+       0x0030, 0x0031, 0x0032, 0x0033,
+       0x0034, 0x0035, 0x0036, 0x0037,
+       0x0038, 0x0039, 0x003a, 0x003b,
+       0x003c, 0x003d, 0x003e, 0x003f,
+       /* 0x40 */
+       0x0040, 0x0041, 0x0042, 0x0043,
+       0x0044, 0x0045, 0x0046, 0x0047,
+       0x0048, 0x0049, 0x004a, 0x004b,
+       0x004c, 0x004d, 0x004e, 0x004f,
+       /* 0x50 */
+       0x0050, 0x0051, 0x0052, 0x0053,
+       0x0054, 0x0055, 0x0056, 0x0057,
+       0x0058, 0x0059, 0x005a, 0x005b,
+       0x005c, 0x005d, 0x005e, 0x005f,
+       /* 0x60 */
+       0x0060, 0x0061, 0x0062, 0x0063,
+       0x0064, 0x0065, 0x0066, 0x0067,
+       0x0068, 0x0069, 0x006a, 0x006b,
+       0x006c, 0x006d, 0x006e, 0x006f,
+       /* 0x70 */
+       0x0070, 0x0071, 0x0072, 0x0073,
+       0x0074, 0x0075, 0x0076, 0x0077,
+       0x0078, 0x0079, 0x007a, 0x007b,
+       0x007c, 0x007d, 0x007e, 0x007f,
+       /* 0x80 */
+       0x00c4, 0x00c5, 0x00c7, 0x00c9,
+       0x00d1, 0x00d6, 0x00dc, 0x00e1,
+       0x00e0, 0x00e2, 0x00e4, 0x00e3,
+       0x00e5, 0x00e7, 0x00e9, 0x00e8,
+       /* 0x90 */
+       0x00ea, 0x00eb, 0x00ed, 0x00ec,
+       0x00ee, 0x00ef, 0x00f1, 0x00f3,
+       0x00f2, 0x00f4, 0x00f6, 0x00f5,
+       0x00fa, 0x00f9, 0x00fb, 0x00fc,
+       /* 0xa0 */
+       0x2020, 0x00b0, 0x00a2, 0x00a3,
+       0x00a7, 0x2022, 0x00b6, 0x00df,
+       0x00ae, 0x00a9, 0x2122, 0x00b4,
+       0x00a8, 0x2260, 0x0102, 0x0218,
+       /* 0xb0 */
+       0x221e, 0x00b1, 0x2264, 0x2265,
+       0x00a5, 0x00b5, 0x2202, 0x2211,
+       0x220f, 0x03c0, 0x222b, 0x00aa,
+       0x00ba, 0x03a9, 0x0103, 0x0219,
+       /* 0xc0 */
+       0x00bf, 0x00a1, 0x00ac, 0x221a,
+       0x0192, 0x2248, 0x2206, 0x00ab,
+       0x00bb, 0x2026, 0x00a0, 0x00c0,
+       0x00c3, 0x00d5, 0x0152, 0x0153,
+       /* 0xd0 */
+       0x2013, 0x2014, 0x201c, 0x201d,
+       0x2018, 0x2019, 0x00f7, 0x25ca,
+       0x00ff, 0x0178, 0x2044, 0x20ac,
+       0x2039, 0x203a, 0x021a, 0x021b,
+       /* 0xe0 */
+       0x2021, 0x00b7, 0x201a, 0x201e,
+       0x2030, 0x00c2, 0x00ca, 0x00c1,
+       0x00cb, 0x00c8, 0x00cd, 0x00ce,
+       0x00cf, 0x00cc, 0x00d3, 0x00d4,
+       /* 0xf0 */
+       0xf8ff, 0x00d2, 0x00da, 0x00db,
+       0x00d9, 0x0131, 0x02c6, 0x02dc,
+       0x00af, 0x02d8, 0x02d9, 0x02da,
+       0x00b8, 0x02dd, 0x02db, 0x02c7,
+};
+
+static const unsigned char page00[256] = {
+       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+       0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+       0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+       0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+       0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+       0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+       0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+       0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+       0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+       0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+       0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+       0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+       0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+       0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+       0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0xca, 0xc1, 0xa2, 0xa3, 0x00, 0xb4, 0x00, 0xa4, /* 0xa0-0xa7 */
+       0xac, 0xa9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0xf8, /* 0xa8-0xaf */
+       0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
+       0xfc, 0x00, 0xbc, 0xc8, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
+       0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0x00, 0x82, /* 0xc0-0xc7 */
+       0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
+       0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+       0x00, 0xf4, 0xf2, 0xf3, 0x86, 0x00, 0x00, 0xa7, /* 0xd8-0xdf */
+       0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0x00, 0x8d, /* 0xe0-0xe7 */
+       0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
+       0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
+       0x00, 0x9d, 0x9c, 0x9e, 0x9f, 0x00, 0x00, 0xd8, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+       0x00, 0x00, 0xae, 0xbe, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page02[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0xaf, 0xbf, 0xde, 0xdf, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xff, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0xf9, 0xfa, 0xfb, 0xfe, 0xf7, 0xfd, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page03[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
+       0xa0, 0xe0, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+       0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
+       0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page25[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char pagef8[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+       page00, page01, page02, page03, NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       page20, page21, page22, NULL,   NULL,   page25, NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       pagef8, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+       const unsigned char *uni2charset;
+       unsigned char cl = uni & 0x00ff;
+       unsigned char ch = (uni & 0xff00) >> 8;
+
+       if (boundlen <= 0)
+               return -ENAMETOOLONG;
+
+       uni2charset = page_uni2charset[ch];
+       if (uni2charset && uni2charset[cl])
+               out[0] = uni2charset[cl];
+       else
+               return -EINVAL;
+       return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+       *uni = charset2uni[*rawstring];
+       if (*uni == 0x0000)
+               return -EINVAL;
+       return 1;
+}
+
+static struct nls_table table = {
+       .charset        = "macromanian",
+       .uni2char       = uni2char,
+       .char2uni       = char2uni,
+       .charset2lower  = charset2lower,
+       .charset2upper  = charset2upper,
+       .owner          = THIS_MODULE,
+};
+
+static int __init init_nls_macromanian(void)
+{
+       return register_nls(&table);
+}
+
+static void __exit exit_nls_macromanian(void)
+{
+       unregister_nls(&table);
+}
+
+module_init(init_nls_macromanian)
+module_exit(exit_nls_macromanian)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-turkish.c b/fs/nls/mac-turkish.c
new file mode 100644 (file)
index 0000000..dffa96d
--- /dev/null
@@ -0,0 +1,602 @@
+/*
+ * linux/fs/nls/mac-turkish.c
+ *
+ * Charset macturkish translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc.  All rights reserved.  Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+       /* 0x00 */
+       0x0000, 0x0001, 0x0002, 0x0003,
+       0x0004, 0x0005, 0x0006, 0x0007,
+       0x0008, 0x0009, 0x000a, 0x000b,
+       0x000c, 0x000d, 0x000e, 0x000f,
+       /* 0x10 */
+       0x0010, 0x0011, 0x0012, 0x0013,
+       0x0014, 0x0015, 0x0016, 0x0017,
+       0x0018, 0x0019, 0x001a, 0x001b,
+       0x001c, 0x001d, 0x001e, 0x001f,
+       /* 0x20 */
+       0x0020, 0x0021, 0x0022, 0x0023,
+       0x0024, 0x0025, 0x0026, 0x0027,
+       0x0028, 0x0029, 0x002a, 0x002b,
+       0x002c, 0x002d, 0x002e, 0x002f,
+       /* 0x30 */
+       0x0030, 0x0031, 0x0032, 0x0033,
+       0x0034, 0x0035, 0x0036, 0x0037,
+       0x0038, 0x0039, 0x003a, 0x003b,
+       0x003c, 0x003d, 0x003e, 0x003f,
+       /* 0x40 */
+       0x0040, 0x0041, 0x0042, 0x0043,
+       0x0044, 0x0045, 0x0046, 0x0047,
+       0x0048, 0x0049, 0x004a, 0x004b,
+       0x004c, 0x004d, 0x004e, 0x004f,
+       /* 0x50 */
+       0x0050, 0x0051, 0x0052, 0x0053,
+       0x0054, 0x0055, 0x0056, 0x0057,
+       0x0058, 0x0059, 0x005a, 0x005b,
+       0x005c, 0x005d, 0x005e, 0x005f,
+       /* 0x60 */
+       0x0060, 0x0061, 0x0062, 0x0063,
+       0x0064, 0x0065, 0x0066, 0x0067,
+       0x0068, 0x0069, 0x006a, 0x006b,
+       0x006c, 0x006d, 0x006e, 0x006f,
+       /* 0x70 */
+       0x0070, 0x0071, 0x0072, 0x0073,
+       0x0074, 0x0075, 0x0076, 0x0077,
+       0x0078, 0x0079, 0x007a, 0x007b,
+       0x007c, 0x007d, 0x007e, 0x007f,
+       /* 0x80 */
+       0x00c4, 0x00c5, 0x00c7, 0x00c9,
+       0x00d1, 0x00d6, 0x00dc, 0x00e1,
+       0x00e0, 0x00e2, 0x00e4, 0x00e3,
+       0x00e5, 0x00e7, 0x00e9, 0x00e8,
+       /* 0x90 */
+       0x00ea, 0x00eb, 0x00ed, 0x00ec,
+       0x00ee, 0x00ef, 0x00f1, 0x00f3,
+       0x00f2, 0x00f4, 0x00f6, 0x00f5,
+       0x00fa, 0x00f9, 0x00fb, 0x00fc,
+       /* 0xa0 */
+       0x2020, 0x00b0, 0x00a2, 0x00a3,
+       0x00a7, 0x2022, 0x00b6, 0x00df,
+       0x00ae, 0x00a9, 0x2122, 0x00b4,
+       0x00a8, 0x2260, 0x00c6, 0x00d8,
+       /* 0xb0 */
+       0x221e, 0x00b1, 0x2264, 0x2265,
+       0x00a5, 0x00b5, 0x2202, 0x2211,
+       0x220f, 0x03c0, 0x222b, 0x00aa,
+       0x00ba, 0x03a9, 0x00e6, 0x00f8,
+       /* 0xc0 */
+       0x00bf, 0x00a1, 0x00ac, 0x221a,
+       0x0192, 0x2248, 0x2206, 0x00ab,
+       0x00bb, 0x2026, 0x00a0, 0x00c0,
+       0x00c3, 0x00d5, 0x0152, 0x0153,
+       /* 0xd0 */
+       0x2013, 0x2014, 0x201c, 0x201d,
+       0x2018, 0x2019, 0x00f7, 0x25ca,
+       0x00ff, 0x0178, 0x011e, 0x011f,
+       0x0130, 0x0131, 0x015e, 0x015f,
+       /* 0xe0 */
+       0x2021, 0x00b7, 0x201a, 0x201e,
+       0x2030, 0x00c2, 0x00ca, 0x00c1,
+       0x00cb, 0x00c8, 0x00cd, 0x00ce,
+       0x00cf, 0x00cc, 0x00d3, 0x00d4,
+       /* 0xf0 */
+       0xf8ff, 0x00d2, 0x00da, 0x00db,
+       0x00d9, 0xf8a0, 0x02c6, 0x02dc,
+       0x00af, 0x02d8, 0x02d9, 0x02da,
+       0x00b8, 0x02dd, 0x02db, 0x02c7,
+};
+
+static const unsigned char page00[256] = {
+       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+       0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+       0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+       0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+       0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+       0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+       0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+       0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+       0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+       0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+       0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+       0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+       0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+       0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+       0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0xca, 0xc1, 0xa2, 0xa3, 0x00, 0xb4, 0x00, 0xa4, /* 0xa0-0xa7 */
+       0xac, 0xa9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0xf8, /* 0xa8-0xaf */
+       0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
+       0xfc, 0x00, 0xbc, 0xc8, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
+       0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xae, 0x82, /* 0xc0-0xc7 */
+       0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
+       0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+       0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0x00, 0x00, 0xa7, /* 0xd8-0xdf */
+       0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xbe, 0x8d, /* 0xe0-0xe7 */
+       0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
+       0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
+       0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0x00, 0x00, 0xd8, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xda, 0xdb, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0xdf, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page02[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xff, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0xf9, 0xfa, 0xfb, 0xfe, 0xf7, 0xfd, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page03[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
+       0xa0, 0xe0, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+       0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
+       0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page25[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char pagef8[256] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+       0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+       page00, page01, page02, page03, NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       page20, page21, page22, NULL,   NULL,   page25, NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+       pagef8, NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+       const unsigned char *uni2charset;
+       unsigned char cl = uni & 0x00ff;
+       unsigned char ch = (uni & 0xff00) >> 8;
+
+       if (boundlen <= 0)
+               return -ENAMETOOLONG;
+
+       uni2charset = page_uni2charset[ch];
+       if (uni2charset && uni2charset[cl])
+               out[0] = uni2charset[cl];
+       else
+               return -EINVAL;
+       return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+       *uni = charset2uni[*rawstring];
+       if (*uni == 0x0000)
+               return -EINVAL;
+       return 1;
+}
+
+static struct nls_table table = {
+       .charset        = "macturkish",
+       .uni2char       = uni2char,
+       .char2uni       = char2uni,
+       .charset2lower  = charset2lower,
+       .charset2upper  = charset2upper,
+       .owner          = THIS_MODULE,
+};
+
+static int __init init_nls_macturkish(void)
+{
+       return register_nls(&table);
+}
+
+static void __exit exit_nls_macturkish(void)
+{
+       unregister_nls(&table);
+}
+
+module_init(init_nls_macturkish)
+module_exit(exit_nls_macturkish)
+
+MODULE_LICENSE("Dual BSD/GPL");
index ccb14d3fc0de99790d282ae17ce984338ca309ab..b39c5c161adb64bff0d33faa64f41d8f4a9942cd 100644 (file)
@@ -123,7 +123,7 @@ int __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
 }
 EXPORT_SYMBOL_GPL(__fsnotify_parent);
 
-static int send_to_group(struct inode *to_tell, struct vfsmount *mnt,
+static int send_to_group(struct inode *to_tell,
                         struct fsnotify_mark *inode_mark,
                         struct fsnotify_mark *vfsmount_mark,
                         __u32 mask, void *data,
@@ -168,10 +168,10 @@ static int send_to_group(struct inode *to_tell, struct vfsmount *mnt,
                        vfsmount_test_mask &= ~inode_mark->ignored_mask;
        }
 
-       pr_debug("%s: group=%p to_tell=%p mnt=%p mask=%x inode_mark=%p"
+       pr_debug("%s: group=%p to_tell=%p mask=%x inode_mark=%p"
                 " inode_test_mask=%x vfsmount_mark=%p vfsmount_test_mask=%x"
                 " data=%p data_is=%d cookie=%d event=%p\n",
-                __func__, group, to_tell, mnt, mask, inode_mark,
+                __func__, group, to_tell, mask, inode_mark,
                 inode_test_mask, vfsmount_mark, vfsmount_test_mask, data,
                 data_is, cookie, *event);
 
@@ -258,16 +258,16 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
 
                if (inode_group > vfsmount_group) {
                        /* handle inode */
-                       ret = send_to_group(to_tell, NULL, inode_mark, NULL, mask, data,
+                       ret = send_to_group(to_tell, inode_mark, NULL, mask, data,
                                            data_is, cookie, file_name, &event);
                        /* we didn't use the vfsmount_mark */
                        vfsmount_group = NULL;
                } else if (vfsmount_group > inode_group) {
-                       ret = send_to_group(to_tell, &mnt->mnt, NULL, vfsmount_mark, mask, data,
+                       ret = send_to_group(to_tell, NULL, vfsmount_mark, mask, data,
                                            data_is, cookie, file_name, &event);
                        inode_group = NULL;
                } else {
-                       ret = send_to_group(to_tell, &mnt->mnt, inode_mark, vfsmount_mark,
+                       ret = send_to_group(to_tell, inode_mark, vfsmount_mark,
                                            mask, data, data_is, cookie, file_name,
                                            &event);
                }
index 8639169221c7aed21c0bd600ab4ef1a0d8102cb1..7389d2d5e51d257c72f9fb0c1468c38a28b309e4 100644 (file)
@@ -2096,7 +2096,9 @@ static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb,
        err = file_remove_suid(file);
        if (err)
                goto out;
-       file_update_time(file);
+       err = file_update_time(file);
+       if (err)
+               goto out;
        written = ntfs_file_buffered_write(iocb, iov, nr_segs, pos, ppos,
                        count);
 out:
index 2eaa66652944f52be4641435569db8efccdc5304..c6dbd3db6ca8817fba7495a5456ce479e2351b03 100644 (file)
@@ -2258,7 +2258,7 @@ void ntfs_evict_big_inode(struct inode *vi)
        ntfs_inode *ni = NTFS_I(vi);
 
        truncate_inode_pages(&vi->i_data, 0);
-       end_writeback(vi);
+       clear_inode(vi);
 
 #ifdef NTFS_RW
        if (NInoDirty(ni)) {
index c7ee03c22226253d970cce94beb11f6353b3e1d0..0725e605465040b6b1e7c5e7744c5243968158c9 100644 (file)
@@ -422,45 +422,46 @@ int ocfs2_block_check_validate(void *data, size_t blocksize,
                               struct ocfs2_blockcheck_stats *stats)
 {
        int rc = 0;
-       struct ocfs2_block_check check;
+       u32 bc_crc32e;
+       u16 bc_ecc;
        u32 crc, ecc;
 
        ocfs2_blockcheck_inc_check(stats);
 
-       check.bc_crc32e = le32_to_cpu(bc->bc_crc32e);
-       check.bc_ecc = le16_to_cpu(bc->bc_ecc);
+       bc_crc32e = le32_to_cpu(bc->bc_crc32e);
+       bc_ecc = le16_to_cpu(bc->bc_ecc);
 
        memset(bc, 0, sizeof(struct ocfs2_block_check));
 
        /* Fast path - if the crc32 validates, we're good to go */
        crc = crc32_le(~0, data, blocksize);
-       if (crc == check.bc_crc32e)
+       if (crc == bc_crc32e)
                goto out;
 
        ocfs2_blockcheck_inc_failure(stats);
        mlog(ML_ERROR,
             "CRC32 failed: stored: 0x%x, computed 0x%x. Applying ECC.\n",
-            (unsigned int)check.bc_crc32e, (unsigned int)crc);
+            (unsigned int)bc_crc32e, (unsigned int)crc);
 
        /* Ok, try ECC fixups */
        ecc = ocfs2_hamming_encode_block(data, blocksize);
-       ocfs2_hamming_fix_block(data, blocksize, ecc ^ check.bc_ecc);
+       ocfs2_hamming_fix_block(data, blocksize, ecc ^ bc_ecc);
 
        /* And check the crc32 again */
        crc = crc32_le(~0, data, blocksize);
-       if (crc == check.bc_crc32e) {
+       if (crc == bc_crc32e) {
                ocfs2_blockcheck_inc_recover(stats);
                goto out;
        }
 
        mlog(ML_ERROR, "Fixed CRC32 failed: stored: 0x%x, computed 0x%x\n",
-            (unsigned int)check.bc_crc32e, (unsigned int)crc);
+            (unsigned int)bc_crc32e, (unsigned int)crc);
 
        rc = -EIO;
 
 out:
-       bc->bc_crc32e = cpu_to_le32(check.bc_crc32e);
-       bc->bc_ecc = cpu_to_le16(check.bc_ecc);
+       bc->bc_crc32e = cpu_to_le32(bc_crc32e);
+       bc->bc_ecc = cpu_to_le16(bc_ecc);
 
        return rc;
 }
@@ -528,7 +529,8 @@ int ocfs2_block_check_validate_bhs(struct buffer_head **bhs, int nr,
                                   struct ocfs2_blockcheck_stats *stats)
 {
        int i, rc = 0;
-       struct ocfs2_block_check check;
+       u32 bc_crc32e;
+       u16 bc_ecc;
        u32 crc, ecc, fix;
 
        BUG_ON(nr < 0);
@@ -538,21 +540,21 @@ int ocfs2_block_check_validate_bhs(struct buffer_head **bhs, int nr,
 
        ocfs2_blockcheck_inc_check(stats);
 
-       check.bc_crc32e = le32_to_cpu(bc->bc_crc32e);
-       check.bc_ecc = le16_to_cpu(bc->bc_ecc);
+       bc_crc32e = le32_to_cpu(bc->bc_crc32e);
+       bc_ecc = le16_to_cpu(bc->bc_ecc);
 
        memset(bc, 0, sizeof(struct ocfs2_block_check));
 
        /* Fast path - if the crc32 validates, we're good to go */
        for (i = 0, crc = ~0; i < nr; i++)
                crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size);
-       if (crc == check.bc_crc32e)
+       if (crc == bc_crc32e)
                goto out;
 
        ocfs2_blockcheck_inc_failure(stats);
        mlog(ML_ERROR,
             "CRC32 failed: stored: %u, computed %u.  Applying ECC.\n",
-            (unsigned int)check.bc_crc32e, (unsigned int)crc);
+            (unsigned int)bc_crc32e, (unsigned int)crc);
 
        /* Ok, try ECC fixups */
        for (i = 0, ecc = 0; i < nr; i++) {
@@ -565,7 +567,7 @@ int ocfs2_block_check_validate_bhs(struct buffer_head **bhs, int nr,
                                                bhs[i]->b_size * 8,
                                                bhs[i]->b_size * 8 * i);
        }
-       fix = ecc ^ check.bc_ecc;
+       fix = ecc ^ bc_ecc;
        for (i = 0; i < nr; i++) {
                /*
                 * Try the fix against each buffer.  It will only affect
@@ -578,19 +580,19 @@ int ocfs2_block_check_validate_bhs(struct buffer_head **bhs, int nr,
        /* And check the crc32 again */
        for (i = 0, crc = ~0; i < nr; i++)
                crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size);
-       if (crc == check.bc_crc32e) {
+       if (crc == bc_crc32e) {
                ocfs2_blockcheck_inc_recover(stats);
                goto out;
        }
 
        mlog(ML_ERROR, "Fixed CRC32 failed: stored: %u, computed %u\n",
-            (unsigned int)check.bc_crc32e, (unsigned int)crc);
+            (unsigned int)bc_crc32e, (unsigned int)crc);
 
        rc = -EIO;
 
 out:
-       bc->bc_crc32e = cpu_to_le32(check.bc_crc32e);
-       bc->bc_ecc = cpu_to_le16(check.bc_ecc);
+       bc->bc_crc32e = cpu_to_le32(bc_crc32e);
+       bc->bc_ecc = cpu_to_le16(bc_ecc);
 
        return rc;
 }
index 3a3ed4bb794b0d6c75e7e321b042b1b4128fbd27..fbec0be6232622ddda0c3ed4ed49c50cc0129386 100644 (file)
@@ -293,7 +293,7 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data,
        struct dlm_proxy_ast *past = (struct dlm_proxy_ast *) msg->buf;
        char *name;
        struct list_head *iter, *head=NULL;
-       u64 cookie;
+       __be64 cookie;
        u32 flags;
        u8 node;
 
index a5952ceecba5a83147389ad4a1cd24972ee0bfbe..de854cca12a2d23dea5652d3dad38461c7dbde13 100644 (file)
@@ -679,7 +679,7 @@ struct dlm_query_join_packet {
 };
 
 union dlm_query_join_response {
-       u32 intval;
+       __be32 intval;
        struct dlm_query_join_packet packet;
 };
 
@@ -755,8 +755,8 @@ struct dlm_query_region {
 struct dlm_node_info {
        u8 ni_nodenum;
        u8 pad1;
-       u16 ni_ipv4_port;
-       u32 ni_ipv4_address;
+       __be16 ni_ipv4_port;
+       __be32 ni_ipv4_address;
 };
 
 struct dlm_query_nodeinfo {
index 92f2ead0fab6de22fa138cc4410dee6e1544216c..9e89d70df337fc98836e87f90e38887a716843e6 100644 (file)
@@ -818,7 +818,7 @@ static void dlm_query_join_packet_to_wire(struct dlm_query_join_packet *packet,
        union dlm_query_join_response response;
 
        response.packet = *packet;
-       *wire = cpu_to_be32(response.intval);
+       *wire = be32_to_cpu(response.intval);
 }
 
 static void dlm_query_join_wire_to_packet(u32 wire,
index 3b5825ef3193e31aed188eff90015dccd363c0d4..e31d6ae013abba789b8d72591e8a460f6a8c7afc 100644 (file)
@@ -367,7 +367,7 @@ static void dlmfs_evict_inode(struct inode *inode)
        int status;
        struct dlmfs_inode_private *ip;
 
-       end_writeback(inode);
+       clear_inode(inode);
 
        mlog(0, "inode %lu\n", inode->i_ino);
 
index 745db42528d5fd2f875a099177158f347fd361e7..322216a5f0dd1e0f2e178540781b3c6fd263c985 100644 (file)
@@ -177,21 +177,23 @@ bail:
        return parent;
 }
 
-static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len,
-                          int connectable)
+static int ocfs2_encode_fh(struct inode *inode, u32 *fh_in, int *max_len,
+                          struct inode *parent)
 {
-       struct inode *inode = dentry->d_inode;
        int len = *max_len;
        int type = 1;
        u64 blkno;
        u32 generation;
        __le32 *fh = (__force __le32 *) fh_in;
 
+#ifdef TRACE_HOOKS_ARE_NOT_BRAINDEAD_IN_YOUR_OPINION
+#error "You go ahead and fix that mess, then.  Somehow"
        trace_ocfs2_encode_fh_begin(dentry, dentry->d_name.len,
                                    dentry->d_name.name,
                                    fh, len, connectable);
+#endif
 
-       if (connectable && (len < 6)) {
+       if (parent && (len < 6)) {
                *max_len = 6;
                type = 255;
                goto bail;
@@ -211,12 +213,7 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len,
        fh[1] = cpu_to_le32((u32)(blkno & 0xffffffff));
        fh[2] = cpu_to_le32(generation);
 
-       if (connectable && !S_ISDIR(inode->i_mode)) {
-               struct inode *parent;
-
-               spin_lock(&dentry->d_lock);
-
-               parent = dentry->d_parent->d_inode;
+       if (parent) {
                blkno = OCFS2_I(parent)->ip_blkno;
                generation = parent->i_generation;
 
@@ -224,8 +221,6 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len,
                fh[4] = cpu_to_le32((u32)(blkno & 0xffffffff));
                fh[5] = cpu_to_le32(generation);
 
-               spin_unlock(&dentry->d_lock);
-
                len = 6;
                type = 2;
 
index 17454a904d7bf488093de9f3db61dc529e0f8e3c..d89e08a81eda8875fcd59d76c5e1d75827e7d7ac 100644 (file)
@@ -273,11 +273,13 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
        inode->i_gid = le32_to_cpu(fe->i_gid);
 
        /* Fast symlinks will have i_size but no allocated clusters. */
-       if (S_ISLNK(inode->i_mode) && !fe->i_clusters)
+       if (S_ISLNK(inode->i_mode) && !fe->i_clusters) {
                inode->i_blocks = 0;
-       else
+               inode->i_mapping->a_ops = &ocfs2_fast_symlink_aops;
+       } else {
                inode->i_blocks = ocfs2_inode_sector_count(inode);
-       inode->i_mapping->a_ops = &ocfs2_aops;
+               inode->i_mapping->a_ops = &ocfs2_aops;
+       }
        inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime);
        inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec);
        inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime);
@@ -331,10 +333,7 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
                    OCFS2_I(inode)->ip_dir_lock_gen = 1;
                    break;
            case S_IFLNK:
-                   if (ocfs2_inode_is_fast_symlink(inode))
-                       inode->i_op = &ocfs2_fast_symlink_inode_operations;
-                   else
-                       inode->i_op = &ocfs2_symlink_inode_operations;
+                   inode->i_op = &ocfs2_symlink_inode_operations;
                    i_size_write(inode, le64_to_cpu(fe->i_size));
                    break;
            default:
@@ -1069,7 +1068,7 @@ static void ocfs2_clear_inode(struct inode *inode)
        int status;
        struct ocfs2_inode_info *oi = OCFS2_I(inode);
 
-       end_writeback(inode);
+       clear_inode(inode);
        trace_ocfs2_clear_inode((unsigned long long)oi->ip_blkno,
                                inode->i_nlink);
 
index a1a1bfd652c90d49521ad3ea12a908f9a168c1e9..d96f7f81d8dd3257f49bb02885db296af22881cb 100644 (file)
@@ -864,7 +864,7 @@ int ocfs2_info_handle(struct inode *inode, struct ocfs2_info *info,
                if (status)
                        break;
 
-               reqp = (struct ocfs2_info_request *)(unsigned long)req_addr;
+               reqp = (struct ocfs2_info_request __user *)(unsigned long)req_addr;
                if (!reqp) {
                        status = -EINVAL;
                        goto bail;
@@ -888,9 +888,11 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        struct ocfs2_space_resv sr;
        struct ocfs2_new_group_input input;
        struct reflink_arguments args;
-       const char *old_path, *new_path;
+       const char __user *old_path;
+       const char __user *new_path;
        bool preserve;
        struct ocfs2_info info;
+       void __user *argp = (void __user *)arg;
 
        switch (cmd) {
        case OCFS2_IOC_GETFLAGS:
@@ -937,17 +939,15 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 
                return ocfs2_group_add(inode, &input);
        case OCFS2_IOC_REFLINK:
-               if (copy_from_user(&args, (struct reflink_arguments *)arg,
-                                  sizeof(args)))
+               if (copy_from_user(&args, argp, sizeof(args)))
                        return -EFAULT;
-               old_path = (const char *)(unsigned long)args.old_path;
-               new_path = (const char *)(unsigned long)args.new_path;
+               old_path = (const char __user *)(unsigned long)args.old_path;
+               new_path = (const char __user *)(unsigned long)args.new_path;
                preserve = (args.preserve != 0);
 
                return ocfs2_reflink_ioctl(inode, old_path, new_path, preserve);
        case OCFS2_IOC_INFO:
-               if (copy_from_user(&info, (struct ocfs2_info __user *)arg,
-                                  sizeof(struct ocfs2_info)))
+               if (copy_from_user(&info, argp, sizeof(struct ocfs2_info)))
                        return -EFAULT;
 
                return ocfs2_info_handle(inode, &info, 0);
@@ -960,22 +960,20 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                if (!capable(CAP_SYS_ADMIN))
                        return -EPERM;
 
-               if (copy_from_user(&range, (struct fstrim_range *)arg,
-                   sizeof(range)))
+               if (copy_from_user(&range, argp, sizeof(range)))
                        return -EFAULT;
 
                ret = ocfs2_trim_fs(sb, &range);
                if (ret < 0)
                        return ret;
 
-               if (copy_to_user((struct fstrim_range *)arg, &range,
-                   sizeof(range)))
+               if (copy_to_user(argp, &range, sizeof(range)))
                        return -EFAULT;
 
                return 0;
        }
        case OCFS2_IOC_MOVE_EXT:
-               return ocfs2_ioctl_move_extents(filp, (void __user *)arg);
+               return ocfs2_ioctl_move_extents(filp, argp);
        default:
                return -ENOTTY;
        }
@@ -988,6 +986,7 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
        struct reflink_arguments args;
        struct inode *inode = file->f_path.dentry->d_inode;
        struct ocfs2_info info;
+       void __user *argp = (void __user *)arg;
 
        switch (cmd) {
        case OCFS2_IOC32_GETFLAGS:
@@ -1006,16 +1005,14 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
        case FITRIM:
                break;
        case OCFS2_IOC_REFLINK:
-               if (copy_from_user(&args, (struct reflink_arguments *)arg,
-                                  sizeof(args)))
+               if (copy_from_user(&args, argp, sizeof(args)))
                        return -EFAULT;
                preserve = (args.preserve != 0);
 
                return ocfs2_reflink_ioctl(inode, compat_ptr(args.old_path),
                                           compat_ptr(args.new_path), preserve);
        case OCFS2_IOC_INFO:
-               if (copy_from_user(&info, (struct ocfs2_info __user *)arg,
-                                  sizeof(struct ocfs2_info)))
+               if (copy_from_user(&info, argp, sizeof(struct ocfs2_info)))
                        return -EFAULT;
 
                return ocfs2_info_handle(inode, &info, 1);
index b1e3fce72ea4767bf795c692e98faacebc42797c..6083432f667e3077eb466842ef0f00136d0b4b6f 100644 (file)
@@ -1082,8 +1082,7 @@ int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp)
        context->file = filp;
 
        if (argp) {
-               if (copy_from_user(&range, (struct ocfs2_move_extents *)argp,
-                                  sizeof(range))) {
+               if (copy_from_user(&range, argp, sizeof(range))) {
                        status = -EFAULT;
                        goto out;
                }
@@ -1138,8 +1137,7 @@ out:
         * length and new_offset even if failure happens somewhere.
         */
        if (argp) {
-               if (copy_to_user((struct ocfs2_move_extents *)argp, &range,
-                               sizeof(range)))
+               if (copy_to_user(argp, &range, sizeof(range)))
                        status = -EFAULT;
        }
 
index a9856e3eaaf09753b4921d56ccdfb172db5cad7b..9f39c640cddf2076b951295dde5ef68217b26452 100644 (file)
@@ -1724,15 +1724,16 @@ static int ocfs2_symlink(struct inode *dir,
        fe = (struct ocfs2_dinode *) new_fe_bh->b_data;
        inode->i_rdev = 0;
        newsize = l - 1;
+       inode->i_op = &ocfs2_symlink_inode_operations;
        if (l > ocfs2_fast_symlink_chars(sb)) {
                u32 offset = 0;
 
-               inode->i_op = &ocfs2_symlink_inode_operations;
                status = dquot_alloc_space_nodirty(inode,
                    ocfs2_clusters_to_bytes(osb->sb, 1));
                if (status)
                        goto bail;
                did_quota = 1;
+               inode->i_mapping->a_ops = &ocfs2_aops;
                status = ocfs2_add_inode_data(osb, inode, &offset, 1, 0,
                                              new_fe_bh,
                                              handle, data_ac, NULL,
@@ -1750,7 +1751,7 @@ static int ocfs2_symlink(struct inode *dir,
                i_size_write(inode, newsize);
                inode->i_blocks = ocfs2_inode_sector_count(inode);
        } else {
-               inode->i_op = &ocfs2_fast_symlink_inode_operations;
+               inode->i_mapping->a_ops = &ocfs2_fast_symlink_aops;
                memcpy((char *) fe->id2.i_symlink, symname, l);
                i_size_write(inode, newsize);
                inode->i_blocks = 0;
index 5d22872e2bb36012b711ac7720a90bee24717b15..f1fbb4b552ad3649238becdd9c21d4b138d5c8d7 100644 (file)
 #include "buffer_head_io.h"
 
 
-static char *ocfs2_fast_symlink_getlink(struct inode *inode,
-                                       struct buffer_head **bh)
+static int ocfs2_fast_symlink_readpage(struct file *unused, struct page *page)
 {
-       int status;
-       char *link = NULL;
+       struct inode *inode = page->mapping->host;
+       struct buffer_head *bh;
+       int status = ocfs2_read_inode_block(inode, &bh);
        struct ocfs2_dinode *fe;
+       const char *link;
+       void *kaddr;
+       size_t len;
 
-       status = ocfs2_read_inode_block(inode, bh);
        if (status < 0) {
                mlog_errno(status);
-               link = ERR_PTR(status);
-               goto bail;
+               return status;
        }
 
-       fe = (struct ocfs2_dinode *) (*bh)->b_data;
+       fe = (struct ocfs2_dinode *) bh->b_data;
        link = (char *) fe->id2.i_symlink;
-bail:
-
-       return link;
-}
-
-static int ocfs2_readlink(struct dentry *dentry,
-                         char __user *buffer,
-                         int buflen)
-{
-       int ret;
-       char *link;
-       struct buffer_head *bh = NULL;
-       struct inode *inode = dentry->d_inode;
-
-       link = ocfs2_fast_symlink_getlink(inode, &bh);
-       if (IS_ERR(link)) {
-               ret = PTR_ERR(link);
-               goto out;
-       }
-
-       /*
-        * Without vfsmount we can't update atime now,
-        * but we will update atime here ultimately.
-        */
-       ret = vfs_readlink(dentry, buffer, buflen, link);
-
+       /* will be less than a page size */
+       len = strnlen(link, ocfs2_fast_symlink_chars(inode->i_sb));
+       kaddr = kmap_atomic(page);
+       memcpy(kaddr, link, len + 1);
+       kunmap_atomic(kaddr);
+       SetPageUptodate(page);
+       unlock_page(page);
        brelse(bh);
-out:
-       if (ret < 0)
-               mlog_errno(ret);
-       return ret;
+       return 0;
 }
 
-static void *ocfs2_fast_follow_link(struct dentry *dentry,
-                                   struct nameidata *nd)
-{
-       int status = 0;
-       int len;
-       char *target, *link = ERR_PTR(-ENOMEM);
-       struct inode *inode = dentry->d_inode;
-       struct buffer_head *bh = NULL;
-
-       BUG_ON(!ocfs2_inode_is_fast_symlink(inode));
-       target = ocfs2_fast_symlink_getlink(inode, &bh);
-       if (IS_ERR(target)) {
-               status = PTR_ERR(target);
-               mlog_errno(status);
-               goto bail;
-       }
-
-       /* Fast symlinks can't be large */
-       len = strnlen(target, ocfs2_fast_symlink_chars(inode->i_sb));
-       link = kzalloc(len + 1, GFP_NOFS);
-       if (!link) {
-               status = -ENOMEM;
-               mlog_errno(status);
-               goto bail;
-       }
-
-       memcpy(link, target, len);
-
-bail:
-       nd_set_link(nd, status ? ERR_PTR(status) : link);
-       brelse(bh);
-
-       if (status)
-               mlog_errno(status);
-       return NULL;
-}
-
-static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
-{
-       char *link = nd_get_link(nd);
-       if (!IS_ERR(link))
-               kfree(link);
-}
+const struct address_space_operations ocfs2_fast_symlink_aops = {
+       .readpage               = ocfs2_fast_symlink_readpage,
+};
 
 const struct inode_operations ocfs2_symlink_inode_operations = {
-       .readlink       = page_readlink,
+       .readlink       = generic_readlink,
        .follow_link    = page_follow_link_light,
        .put_link       = page_put_link,
        .getattr        = ocfs2_getattr,
@@ -159,15 +98,3 @@ const struct inode_operations ocfs2_symlink_inode_operations = {
        .removexattr    = generic_removexattr,
        .fiemap         = ocfs2_fiemap,
 };
-const struct inode_operations ocfs2_fast_symlink_inode_operations = {
-       .readlink       = ocfs2_readlink,
-       .follow_link    = ocfs2_fast_follow_link,
-       .put_link       = ocfs2_fast_put_link,
-       .getattr        = ocfs2_getattr,
-       .setattr        = ocfs2_setattr,
-       .setxattr       = generic_setxattr,
-       .getxattr       = generic_getxattr,
-       .listxattr      = ocfs2_listxattr,
-       .removexattr    = generic_removexattr,
-       .fiemap         = ocfs2_fiemap,
-};
index 65a6c9c6ad51d1018147cff4685743dd22bae935..71ee4245e9192274552ef9492412b36b068e72d6 100644 (file)
@@ -27,7 +27,7 @@
 #define OCFS2_SYMLINK_H
 
 extern const struct inode_operations ocfs2_symlink_inode_operations;
-extern const struct inode_operations ocfs2_fast_symlink_inode_operations;
+extern const struct address_space_operations ocfs2_fast_symlink_aops;
 
 /*
  * Test whether an inode is a fast symlink.
index dbc842222589fdaf87a45a76ec257761f49b2037..e6213b3725d1b60cdc0b66a40c406df1f5d401c2 100644 (file)
@@ -184,7 +184,7 @@ int omfs_sync_inode(struct inode *inode)
 static void omfs_evict_inode(struct inode *inode)
 {
        truncate_inode_pages(&inode->i_data, 0);
-       end_writeback(inode);
+       clear_inode(inode);
 
        if (inode->i_nlink)
                return;
index d54301219d04f1c8fed18d6de15ed590a593e2ab..d6c79a0dffc7b0827b09562e11fa0f610af5657d 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -654,10 +654,23 @@ static inline int __get_file_write_access(struct inode *inode,
        return error;
 }
 
-static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
-                                       struct file *f,
-                                       int (*open)(struct inode *, struct file *),
-                                       const struct cred *cred)
+int open_check_o_direct(struct file *f)
+{
+       /* NB: we're sure to have correct a_ops only after f_op->open */
+       if (f->f_flags & O_DIRECT) {
+               if (!f->f_mapping->a_ops ||
+                   ((!f->f_mapping->a_ops->direct_IO) &&
+                   (!f->f_mapping->a_ops->get_xip_mem))) {
+                       return -EINVAL;
+               }
+       }
+       return 0;
+}
+
+static struct file *do_dentry_open(struct dentry *dentry, struct vfsmount *mnt,
+                                  struct file *f,
+                                  int (*open)(struct inode *, struct file *),
+                                  const struct cred *cred)
 {
        static const struct file_operations empty_fops = {};
        struct inode *inode;
@@ -713,16 +726,6 @@ static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
 
        file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping);
 
-       /* NB: we're sure to have correct a_ops only after f_op->open */
-       if (f->f_flags & O_DIRECT) {
-               if (!f->f_mapping->a_ops ||
-                   ((!f->f_mapping->a_ops->direct_IO) &&
-                   (!f->f_mapping->a_ops->get_xip_mem))) {
-                       fput(f);
-                       f = ERR_PTR(-EINVAL);
-               }
-       }
-
        return f;
 
 cleanup_all:
@@ -744,12 +747,29 @@ cleanup_all:
        f->f_path.dentry = NULL;
        f->f_path.mnt = NULL;
 cleanup_file:
-       put_filp(f);
        dput(dentry);
        mntput(mnt);
        return ERR_PTR(error);
 }
 
+static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
+                               struct file *f,
+                               int (*open)(struct inode *, struct file *),
+                               const struct cred *cred)
+{
+       struct file *res = do_dentry_open(dentry, mnt, f, open, cred);
+       if (!IS_ERR(res)) {
+               int error = open_check_o_direct(f);
+               if (error) {
+                       fput(res);
+                       res = ERR_PTR(error);
+               }
+       } else {
+               put_filp(f);
+       }
+       return res;
+}
+
 /**
  * lookup_instantiate_filp - instantiates the open intent filp
  * @nd: pointer to nameidata
@@ -804,13 +824,31 @@ struct file *nameidata_to_filp(struct nameidata *nd)
 
        /* Pick up the filp from the open intent */
        filp = nd->intent.open.file;
-       nd->intent.open.file = NULL;
 
        /* Has the filesystem initialised the file for us? */
-       if (filp->f_path.dentry == NULL) {
+       if (filp->f_path.dentry != NULL) {
+               nd->intent.open.file = NULL;
+       } else {
+               struct file *res;
+
                path_get(&nd->path);
-               filp = __dentry_open(nd->path.dentry, nd->path.mnt, filp,
-                                    NULL, cred);
+               res = do_dentry_open(nd->path.dentry, nd->path.mnt,
+                                    filp, NULL, cred);
+               if (!IS_ERR(res)) {
+                       int error;
+
+                       nd->intent.open.file = NULL;
+                       BUG_ON(res != filp);
+
+                       error = open_check_o_direct(filp);
+                       if (error) {
+                               fput(filp);
+                               filp = ERR_PTR(error);
+                       }
+               } else {
+                       /* Allow nd->intent.open.file to be recycled */
+                       filp = res;
+               }
        }
        return filp;
 }
index fec5e4ad071a36bb8783bdcc8c40c07c614340a5..49c1065256fd10d9d5fdca3cf449b1e56bd58a0a 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -654,8 +654,11 @@ out:
                wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
                kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
        }
-       if (ret > 0)
-               file_update_time(filp);
+       if (ret > 0) {
+               int err = file_update_time(filp);
+               if (err)
+                       ret = err;
+       }
        return ret;
 }
 
@@ -693,7 +696,7 @@ static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 
                        return put_user(count, (int __user *)arg);
                default:
-                       return -EINVAL;
+                       return -ENOIOCTLCMD;
        }
 }
 
index ab5fa9e1a79ac8277ac1cb51db6a92e55ba2c935..bed378db075813350362c39f423d1b4335240bfa 100644 (file)
@@ -257,12 +257,12 @@ int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry,
                prev_src_mnt  = child;
        }
 out:
-       br_write_lock(vfsmount_lock);
+       br_write_lock(&vfsmount_lock);
        while (!list_empty(&tmp_list)) {
                child = list_first_entry(&tmp_list, struct mount, mnt_hash);
                umount_tree(child, 0, &umount_list);
        }
-       br_write_unlock(vfsmount_lock);
+       br_write_unlock(&vfsmount_lock);
        release_mounts(&umount_list);
        return ret;
 }
index dc4c5a7b9eceb767c0d7305ce5e1b609cdcdaef3..c1c207c36caefeb1fcbc8d782504cfcbf1b616d8 100644 (file)
@@ -370,7 +370,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
                        struct pid *pid, struct task_struct *task, int whole)
 {
        unsigned long vsize, eip, esp, wchan = ~0UL;
-       long priority, nice;
+       int priority, nice;
        int tty_pgrp = -1, tty_nr = 0;
        sigset_t sigign, sigcatch;
        char state;
@@ -492,7 +492,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
        seq_put_decimal_ull(m, ' ', 0);
        seq_put_decimal_ull(m, ' ', start_time);
        seq_put_decimal_ull(m, ' ', vsize);
-       seq_put_decimal_ll(m, ' ', mm ? get_mm_rss(mm) : 0);
+       seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
        seq_put_decimal_ull(m, ' ', rsslim);
        seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
        seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
@@ -517,9 +517,23 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
        seq_put_decimal_ull(m, ' ', delayacct_blkio_ticks(task));
        seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
        seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
-       seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_data : 0);
-       seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->end_data : 0);
-       seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_brk : 0);
+
+       if (mm && permitted) {
+               seq_put_decimal_ull(m, ' ', mm->start_data);
+               seq_put_decimal_ull(m, ' ', mm->end_data);
+               seq_put_decimal_ull(m, ' ', mm->start_brk);
+               seq_put_decimal_ull(m, ' ', mm->arg_start);
+               seq_put_decimal_ull(m, ' ', mm->arg_end);
+               seq_put_decimal_ull(m, ' ', mm->env_start);
+               seq_put_decimal_ull(m, ' ', mm->env_end);
+       } else
+               seq_printf(m, " 0 0 0 0 0 0 0");
+
+       if (permitted)
+               seq_put_decimal_ll(m, ' ', task->exit_code);
+       else
+               seq_put_decimal_ll(m, ' ', 0);
+
        seq_putc(m, '\n');
        if (mm)
                mmput(mm);
@@ -565,3 +579,126 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
 
        return 0;
 }
+
+#ifdef CONFIG_CHECKPOINT_RESTORE
+static struct pid *
+get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
+{
+       struct task_struct *start, *task;
+       struct pid *pid = NULL;
+
+       read_lock(&tasklist_lock);
+
+       start = pid_task(proc_pid(inode), PIDTYPE_PID);
+       if (!start)
+               goto out;
+
+       /*
+        * Lets try to continue searching first, this gives
+        * us significant speedup on children-rich processes.
+        */
+       if (pid_prev) {
+               task = pid_task(pid_prev, PIDTYPE_PID);
+               if (task && task->real_parent == start &&
+                   !(list_empty(&task->sibling))) {
+                       if (list_is_last(&task->sibling, &start->children))
+                               goto out;
+                       task = list_first_entry(&task->sibling,
+                                               struct task_struct, sibling);
+                       pid = get_pid(task_pid(task));
+                       goto out;
+               }
+       }
+
+       /*
+        * Slow search case.
+        *
+        * We might miss some children here if children
+        * are exited while we were not holding the lock,
+        * but it was never promised to be accurate that
+        * much.
+        *
+        * "Just suppose that the parent sleeps, but N children
+        *  exit after we printed their tids. Now the slow paths
+        *  skips N extra children, we miss N tasks." (c)
+        *
+        * So one need to stop or freeze the leader and all
+        * its children to get a precise result.
+        */
+       list_for_each_entry(task, &start->children, sibling) {
+               if (pos-- == 0) {
+                       pid = get_pid(task_pid(task));
+                       break;
+               }
+       }
+
+out:
+       read_unlock(&tasklist_lock);
+       return pid;
+}
+
+static int children_seq_show(struct seq_file *seq, void *v)
+{
+       struct inode *inode = seq->private;
+       pid_t pid;
+
+       pid = pid_nr_ns(v, inode->i_sb->s_fs_info);
+       return seq_printf(seq, "%d ", pid);
+}
+
+static void *children_seq_start(struct seq_file *seq, loff_t *pos)
+{
+       return get_children_pid(seq->private, NULL, *pos);
+}
+
+static void *children_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       struct pid *pid;
+
+       pid = get_children_pid(seq->private, v, *pos + 1);
+       put_pid(v);
+
+       ++*pos;
+       return pid;
+}
+
+static void children_seq_stop(struct seq_file *seq, void *v)
+{
+       put_pid(v);
+}
+
+static const struct seq_operations children_seq_ops = {
+       .start  = children_seq_start,
+       .next   = children_seq_next,
+       .stop   = children_seq_stop,
+       .show   = children_seq_show,
+};
+
+static int children_seq_open(struct inode *inode, struct file *file)
+{
+       struct seq_file *m;
+       int ret;
+
+       ret = seq_open(file, &children_seq_ops);
+       if (ret)
+               return ret;
+
+       m = file->private_data;
+       m->private = inode;
+
+       return ret;
+}
+
+int children_seq_release(struct inode *inode, struct file *file)
+{
+       seq_release(inode, file);
+       return 0;
+}
+
+const struct file_operations proc_tid_children_operations = {
+       .open    = children_seq_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = children_seq_release,
+};
+#endif /* CONFIG_CHECKPOINT_RESTORE */
index d2d3108a611c8cf96b6d1aa275270a3929556ccf..437195f204e14e908bdda87ace0e8b0ce81efb21 100644 (file)
@@ -199,11 +199,6 @@ static int proc_root_link(struct dentry *dentry, struct path *path)
        return result;
 }
 
-struct mm_struct *mm_for_maps(struct task_struct *task)
-{
-       return mm_access(task, PTRACE_MODE_READ);
-}
-
 static int proc_pid_cmdline(struct task_struct *task, char * buffer)
 {
        int res = 0;
@@ -243,7 +238,7 @@ out:
 
 static int proc_pid_auxv(struct task_struct *task, char *buffer)
 {
-       struct mm_struct *mm = mm_for_maps(task);
+       struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
        int res = PTR_ERR(mm);
        if (mm && !IS_ERR(mm)) {
                unsigned int nwords = 0;
@@ -411,12 +406,13 @@ static const struct file_operations proc_lstats_operations = {
 
 static int proc_oom_score(struct task_struct *task, char *buffer)
 {
+       unsigned long totalpages = totalram_pages + total_swap_pages;
        unsigned long points = 0;
 
        read_lock(&tasklist_lock);
        if (pid_alive(task))
-               points = oom_badness(task, NULL, NULL,
-                                       totalram_pages + total_swap_pages);
+               points = oom_badness(task, NULL, NULL, totalpages) *
+                                               1000 / totalpages;
        read_unlock(&tasklist_lock);
        return sprintf(buffer, "%lu\n", points);
 }
@@ -678,7 +674,7 @@ static const struct file_operations proc_single_file_operations = {
        .release        = single_release,
 };
 
-static int mem_open(struct inode* inode, struct file* file)
+static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
 {
        struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
        struct mm_struct *mm;
@@ -686,7 +682,7 @@ static int mem_open(struct inode* inode, struct file* file)
        if (!task)
                return -ESRCH;
 
-       mm = mm_access(task, PTRACE_MODE_ATTACH);
+       mm = mm_access(task, mode);
        put_task_struct(task);
 
        if (IS_ERR(mm))
@@ -706,6 +702,11 @@ static int mem_open(struct inode* inode, struct file* file)
        return 0;
 }
 
+static int mem_open(struct inode *inode, struct file *file)
+{
+       return __mem_open(inode, file, PTRACE_MODE_ATTACH);
+}
+
 static ssize_t mem_rw(struct file *file, char __user *buf,
                        size_t count, loff_t *ppos, int write)
 {
@@ -802,30 +803,29 @@ static const struct file_operations proc_mem_operations = {
        .release        = mem_release,
 };
 
+static int environ_open(struct inode *inode, struct file *file)
+{
+       return __mem_open(inode, file, PTRACE_MODE_READ);
+}
+
 static ssize_t environ_read(struct file *file, char __user *buf,
                        size_t count, loff_t *ppos)
 {
-       struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
        char *page;
        unsigned long src = *ppos;
-       int ret = -ESRCH;
-       struct mm_struct *mm;
+       int ret = 0;
+       struct mm_struct *mm = file->private_data;
 
-       if (!task)
-               goto out_no_task;
+       if (!mm)
+               return 0;
 
-       ret = -ENOMEM;
        page = (char *)__get_free_page(GFP_TEMPORARY);
        if (!page)
-               goto out;
-
-
-       mm = mm_for_maps(task);
-       ret = PTR_ERR(mm);
-       if (!mm || IS_ERR(mm))
-               goto out_free;
+               return -ENOMEM;
 
        ret = 0;
+       if (!atomic_inc_not_zero(&mm->mm_users))
+               goto free;
        while (count > 0) {
                int this_len, retval, max_len;
 
@@ -837,7 +837,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
                max_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
                this_len = (this_len > max_len) ? max_len : this_len;
 
-               retval = access_process_vm(task, (mm->env_start + src),
+               retval = access_remote_vm(mm, (mm->env_start + src),
                        page, this_len, 0);
 
                if (retval <= 0) {
@@ -856,19 +856,18 @@ static ssize_t environ_read(struct file *file, char __user *buf,
                count -= retval;
        }
        *ppos = src;
-
        mmput(mm);
-out_free:
+
+free:
        free_page((unsigned long) page);
-out:
-       put_task_struct(task);
-out_no_task:
        return ret;
 }
 
 static const struct file_operations proc_environ_operations = {
+       .open           = environ_open,
        .read           = environ_read,
        .llseek         = generic_file_llseek,
+       .release        = mem_release,
 };
 
 static ssize_t oom_adjust_read(struct file *file, char __user *buf,
@@ -1804,7 +1803,7 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
                        rcu_read_lock();
                        file = fcheck_files(files, fd);
                        if (file) {
-                               unsigned i_mode, f_mode = file->f_mode;
+                               unsigned f_mode = file->f_mode;
 
                                rcu_read_unlock();
                                put_files_struct(files);
@@ -1820,12 +1819,14 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
                                        inode->i_gid = GLOBAL_ROOT_GID;
                                }
 
-                               i_mode = S_IFLNK;
-                               if (f_mode & FMODE_READ)
-                                       i_mode |= S_IRUSR | S_IXUSR;
-                               if (f_mode & FMODE_WRITE)
-                                       i_mode |= S_IWUSR | S_IXUSR;
-                               inode->i_mode = i_mode;
+                               if (S_ISLNK(inode->i_mode)) {
+                                       unsigned i_mode = S_IFLNK;
+                                       if (f_mode & FMODE_READ)
+                                               i_mode |= S_IRUSR | S_IXUSR;
+                                       if (f_mode & FMODE_WRITE)
+                                               i_mode |= S_IWUSR | S_IXUSR;
+                                       inode->i_mode = i_mode;
+                               }
 
                                security_task_to_inode(task, inode);
                                put_task_struct(task);
@@ -1849,7 +1850,7 @@ static const struct dentry_operations tid_fd_dentry_operations =
 static struct dentry *proc_fd_instantiate(struct inode *dir,
        struct dentry *dentry, struct task_struct *task, const void *ptr)
 {
-       unsigned fd = *(const unsigned *)ptr;
+       unsigned fd = (unsigned long)ptr;
        struct inode *inode;
        struct proc_inode *ei;
        struct dentry *error = ERR_PTR(-ENOENT);
@@ -1860,6 +1861,7 @@ static struct dentry *proc_fd_instantiate(struct inode *dir,
        ei = PROC_I(inode);
        ei->fd = fd;
 
+       inode->i_mode = S_IFLNK;
        inode->i_op = &proc_pid_link_inode_operations;
        inode->i_size = 64;
        ei->op.proc_get_link = proc_fd_link;
@@ -1886,7 +1888,7 @@ static struct dentry *proc_lookupfd_common(struct inode *dir,
        if (fd == ~0U)
                goto out;
 
-       result = instantiate(dir, dentry, task, &fd);
+       result = instantiate(dir, dentry, task, (void *)(unsigned long)fd);
 out:
        put_task_struct(task);
 out_no_task:
@@ -1929,21 +1931,22 @@ static int proc_readfd_common(struct file * filp, void * dirent,
                             fd++, filp->f_pos++) {
                                char name[PROC_NUMBUF];
                                int len;
+                               int rv;
 
                                if (!fcheck_files(files, fd))
                                        continue;
                                rcu_read_unlock();
 
                                len = snprintf(name, sizeof(name), "%d", fd);
-                               if (proc_fill_cache(filp, dirent, filldir,
-                                                   name, len, instantiate,
-                                                   p, &fd) < 0) {
-                                       rcu_read_lock();
-                                       break;
-                               }
+                               rv = proc_fill_cache(filp, dirent, filldir,
+                                                    name, len, instantiate, p,
+                                                    (void *)(unsigned long)fd);
+                               if (rv < 0)
+                                       goto out_fd_loop;
                                rcu_read_lock();
                        }
                        rcu_read_unlock();
+out_fd_loop:
                        put_files_struct(files);
        }
 out:
@@ -2023,11 +2026,8 @@ static int map_files_d_revalidate(struct dentry *dentry, struct nameidata *nd)
        if (!task)
                goto out_notask;
 
-       if (!ptrace_may_access(task, PTRACE_MODE_READ))
-               goto out;
-
-       mm = get_task_mm(task);
-       if (!mm)
+       mm = mm_access(task, PTRACE_MODE_READ);
+       if (IS_ERR_OR_NULL(mm))
                goto out;
 
        if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) {
@@ -2356,7 +2356,7 @@ static const struct inode_operations proc_fd_inode_operations = {
 static struct dentry *proc_fdinfo_instantiate(struct inode *dir,
        struct dentry *dentry, struct task_struct *task, const void *ptr)
 {
-       unsigned fd = *(unsigned *)ptr;
+       unsigned fd = (unsigned long)ptr;
        struct inode *inode;
        struct proc_inode *ei;
        struct dentry *error = ERR_PTR(-ENOENT);
@@ -3403,6 +3403,9 @@ static const struct pid_entry tid_base_stuff[] = {
        ONE("stat",      S_IRUGO, proc_tid_stat),
        ONE("statm",     S_IRUGO, proc_pid_statm),
        REG("maps",      S_IRUGO, proc_tid_maps_operations),
+#ifdef CONFIG_CHECKPOINT_RESTORE
+       REG("children",  S_IRUGO, proc_tid_children_operations),
+#endif
 #ifdef CONFIG_NUMA
        REG("numa_maps", S_IRUGO, proc_tid_numa_maps_operations),
 #endif
index 554ecc54799fff058067c19d2261d56bbc6391d7..7ac817b64a7193b71cf867f72fc0d94d68294c90 100644 (file)
@@ -33,7 +33,7 @@ static void proc_evict_inode(struct inode *inode)
        const struct proc_ns_operations *ns_ops;
 
        truncate_inode_pages(&inode->i_data, 0);
-       end_writeback(inode);
+       clear_inode(inode);
 
        /* Stop tracking associated processes */
        put_pid(PROC_I(inode)->pid);
index 5f79bb8b4c60211620c8af8212e9ec97c0978dc1..eca4aca5b6e227c11bb13c100deac3861b747a44 100644 (file)
@@ -31,8 +31,6 @@ struct vmalloc_info {
        unsigned long   largest_chunk;
 };
 
-extern struct mm_struct *mm_for_maps(struct task_struct *);
-
 #ifdef CONFIG_MMU
 #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
 extern void get_vmalloc_info(struct vmalloc_info *vmi);
@@ -56,6 +54,7 @@ extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
                                struct pid *pid, struct task_struct *task);
 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
 
+extern const struct file_operations proc_tid_children_operations;
 extern const struct file_operations proc_pid_maps_operations;
 extern const struct file_operations proc_tid_maps_operations;
 extern const struct file_operations proc_pid_numa_maps_operations;
index 1030a716d155b4a19a91972b2ead074733b4add9..4540b8f76f163fbaaef250b6facf161424a90869 100644 (file)
@@ -125,7 +125,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
        if (!priv->task)
                return ERR_PTR(-ESRCH);
 
-       mm = mm_for_maps(priv->task);
+       mm = mm_access(priv->task, PTRACE_MODE_READ);
        if (!mm || IS_ERR(mm))
                return mm;
        down_read(&mm->mmap_sem);
@@ -393,6 +393,7 @@ struct mem_size_stats {
        unsigned long anonymous;
        unsigned long anonymous_thp;
        unsigned long swap;
+       unsigned long nonlinear;
        u64 pss;
 };
 
@@ -402,24 +403,33 @@ static void smaps_pte_entry(pte_t ptent, unsigned long addr,
 {
        struct mem_size_stats *mss = walk->private;
        struct vm_area_struct *vma = mss->vma;
-       struct page *page;
+       pgoff_t pgoff = linear_page_index(vma, addr);
+       struct page *page = NULL;
        int mapcount;
 
-       if (is_swap_pte(ptent)) {
-               mss->swap += ptent_size;
-               return;
+       if (pte_present(ptent)) {
+               page = vm_normal_page(vma, addr, ptent);
+       } else if (is_swap_pte(ptent)) {
+               swp_entry_t swpent = pte_to_swp_entry(ptent);
+
+               if (!non_swap_entry(swpent))
+                       mss->swap += ptent_size;
+               else if (is_migration_entry(swpent))
+                       page = migration_entry_to_page(swpent);
+       } else if (pte_file(ptent)) {
+               if (pte_to_pgoff(ptent) != pgoff)
+                       mss->nonlinear += ptent_size;
        }
 
-       if (!pte_present(ptent))
-               return;
-
-       page = vm_normal_page(vma, addr, ptent);
        if (!page)
                return;
 
        if (PageAnon(page))
                mss->anonymous += ptent_size;
 
+       if (page->index != pgoff)
+               mss->nonlinear += ptent_size;
+
        mss->resident += ptent_size;
        /* Accumulate the size in pages that have been accessed. */
        if (pte_young(ptent) || PageReferenced(page))
@@ -521,6 +531,10 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
                   (vma->vm_flags & VM_LOCKED) ?
                        (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
 
+       if (vma->vm_flags & VM_NONLINEAR)
+               seq_printf(m, "Nonlinear:      %8lu kB\n",
+                               mss.nonlinear >> 10);
+
        if (m->count < m->size)  /* vma is copied successfully */
                m->version = (vma != get_gate_vma(task->mm))
                        ? vma->vm_start : 0;
@@ -700,6 +714,7 @@ struct pagemapread {
 
 #define PM_PRESENT          PM_STATUS(4LL)
 #define PM_SWAP             PM_STATUS(2LL)
+#define PM_FILE             PM_STATUS(1LL)
 #define PM_NOT_PRESENT      PM_PSHIFT(PAGE_SHIFT)
 #define PM_END_OF_BUFFER    1
 
@@ -733,22 +748,33 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end,
        return err;
 }
 
-static u64 swap_pte_to_pagemap_entry(pte_t pte)
-{
-       swp_entry_t e = pte_to_swp_entry(pte);
-       return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
-}
-
-static void pte_to_pagemap_entry(pagemap_entry_t *pme, pte_t pte)
+static void pte_to_pagemap_entry(pagemap_entry_t *pme,
+               struct vm_area_struct *vma, unsigned long addr, pte_t pte)
 {
-       if (is_swap_pte(pte))
-               *pme = make_pme(PM_PFRAME(swap_pte_to_pagemap_entry(pte))
-                               | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP);
-       else if (pte_present(pte))
-               *pme = make_pme(PM_PFRAME(pte_pfn(pte))
-                               | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
-       else
+       u64 frame, flags;
+       struct page *page = NULL;
+
+       if (pte_present(pte)) {
+               frame = pte_pfn(pte);
+               flags = PM_PRESENT;
+               page = vm_normal_page(vma, addr, pte);
+       } else if (is_swap_pte(pte)) {
+               swp_entry_t entry = pte_to_swp_entry(pte);
+
+               frame = swp_type(entry) |
+                       (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
+               flags = PM_SWAP;
+               if (is_migration_entry(entry))
+                       page = migration_entry_to_page(entry);
+       } else {
                *pme = make_pme(PM_NOT_PRESENT);
+               return;
+       }
+
+       if (page && !PageAnon(page))
+               flags |= PM_FILE;
+
+       *pme = make_pme(PM_PFRAME(frame) | PM_PSHIFT(PAGE_SHIFT) | flags);
 }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -784,7 +810,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 
        /* find the first VMA at or above 'addr' */
        vma = find_vma(walk->mm, addr);
-       if (pmd_trans_huge_lock(pmd, vma) == 1) {
+       if (vma && pmd_trans_huge_lock(pmd, vma) == 1) {
                for (; addr != end; addr += PAGE_SIZE) {
                        unsigned long offset;
 
@@ -815,7 +841,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
                if (vma && (vma->vm_start <= addr) &&
                    !is_vm_hugetlb_page(vma)) {
                        pte = pte_offset_map(pmd, addr);
-                       pte_to_pagemap_entry(&pme, *pte);
+                       pte_to_pagemap_entry(&pme, vma, addr, *pte);
                        /* unmap before userspace copy */
                        pte_unmap(pte);
                }
@@ -869,11 +895,11 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
  * For each page in the address space, this file contains one 64-bit entry
  * consisting of the following:
  *
- * Bits 0-55  page frame number (PFN) if present
+ * Bits 0-54  page frame number (PFN) if present
  * Bits 0-4   swap type if swapped
- * Bits 5-55  swap offset if swapped
+ * Bits 5-54  swap offset if swapped
  * Bits 55-60 page shift (page size = 1<<page shift)
- * Bit  61    reserved for future use
+ * Bit  61    page is file-page or shared-anon
  * Bit  62    page swapped
  * Bit  63    page present
  *
@@ -919,7 +945,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
        if (!pm.buffer)
                goto out_task;
 
-       mm = mm_for_maps(task);
+       mm = mm_access(task, PTRACE_MODE_READ);
        ret = PTR_ERR(mm);
        if (!mm || IS_ERR(mm))
                goto out_free;
index 74fe164d1b233924f6a4d3e8ddd24624660dd63a..1ccfa537f5f5dfaac3c1351dabe391f0ca3e6d22 100644 (file)
@@ -223,7 +223,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
        if (!priv->task)
                return ERR_PTR(-ESRCH);
 
-       mm = mm_for_maps(priv->task);
+       mm = mm_access(priv->task, PTRACE_MODE_READ);
        if (!mm || IS_ERR(mm)) {
                put_task_struct(priv->task);
                priv->task = NULL;
index 12412852d88a94d574bacebb5e64200f202db852..5e289a7cbad17d8547458f1d8b2526f2e85e5cb9 100644 (file)
@@ -23,12 +23,12 @@ static unsigned mounts_poll(struct file *file, poll_table *wait)
 
        poll_wait(file, &p->ns->poll, wait);
 
-       br_read_lock(vfsmount_lock);
+       br_read_lock(&vfsmount_lock);
        if (p->m.poll_event != ns->event) {
                p->m.poll_event = ns->event;
                res |= POLLERR | POLLPRI;
        }
-       br_read_unlock(vfsmount_lock);
+       br_read_unlock(&vfsmount_lock);
 
        return res;
 }
index 19507889bb7ff123b3922fa439fc0c8ef1ded913..aeb19e68e0860a436998223cf854ff5502730e53 100644 (file)
@@ -85,7 +85,7 @@ static void pstore_evict_inode(struct inode *inode)
        struct pstore_private   *p = inode->i_private;
        unsigned long           flags;
 
-       end_writeback(inode);
+       clear_inode(inode);
        if (p) {
                spin_lock_irqsave(&allpstore_lock, flags);
                list_del(&p->list);
index d69a1d1d7e154553f3b7ae6c85602f3f9e2a9104..10cbe841cb7ecca3a823da92621ec3e614a2be09 100644 (file)
  * spinlock to internal buffers before writing.
  *
  * Lock ordering (including related VFS locks) is the following:
- *   i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock >
+ *   dqonoff_mutex > i_mutex > journal_lock > dqptr_sem > dquot->dq_lock >
  *   dqio_mutex
+ * dqonoff_mutex > i_mutex comes from dquot_quota_sync, dquot_enable, etc.
  * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
  * dqptr_sem. But filesystem has to count with the fact that functions such as
  * dquot_alloc_space() acquire dqptr_sem and they usually have to be called
  * from inside a transaction to keep filesystem consistency after a crash. Also
  * filesystems usually want to do some IO on dquot from ->mark_dirty which is
  * called with dqptr_sem held.
- * i_mutex on quota files is special (it's below dqio_mutex)
  */
 
 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
@@ -638,7 +638,7 @@ int dquot_quota_sync(struct super_block *sb, int type, int wait)
        dqstats_inc(DQST_SYNCS);
        mutex_unlock(&dqopt->dqonoff_mutex);
 
-       if (!wait || (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE))
+       if (!wait || (dqopt->flags & DQUOT_QUOTA_SYS_FILE))
                return 0;
 
        /* This is not very clever (and fast) but currently I don't know about
@@ -652,18 +652,17 @@ int dquot_quota_sync(struct super_block *sb, int type, int wait)
         * Now when everything is written we can discard the pagecache so
         * that userspace sees the changes.
         */
-       mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
+       mutex_lock(&dqopt->dqonoff_mutex);
        for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
                if (type != -1 && cnt != type)
                        continue;
                if (!sb_has_quota_active(sb, cnt))
                        continue;
-               mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex,
-                                 I_MUTEX_QUOTA);
-               truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0);
-               mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex);
+               mutex_lock(&dqopt->files[cnt]->i_mutex);
+               truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
+               mutex_unlock(&dqopt->files[cnt]->i_mutex);
        }
-       mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
+       mutex_unlock(&dqopt->dqonoff_mutex);
 
        return 0;
 }
@@ -907,14 +906,14 @@ static void add_dquot_ref(struct super_block *sb, int type)
                        spin_unlock(&inode->i_lock);
                        continue;
                }
-#ifdef CONFIG_QUOTA_DEBUG
-               if (unlikely(inode_get_rsv_space(inode) > 0))
-                       reserved = 1;
-#endif
                __iget(inode);
                spin_unlock(&inode->i_lock);
                spin_unlock(&inode_sb_list_lock);
 
+#ifdef CONFIG_QUOTA_DEBUG
+               if (unlikely(inode_get_rsv_space(inode) > 0))
+                       reserved = 1;
+#endif
                iput(old_inode);
                __dquot_initialize(inode, type);
 
@@ -2037,8 +2036,7 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
                        /* If quota was reenabled in the meantime, we have
                         * nothing to do */
                        if (!sb_has_quota_loaded(sb, cnt)) {
-                               mutex_lock_nested(&toputinode[cnt]->i_mutex,
-                                                 I_MUTEX_QUOTA);
+                               mutex_lock(&toputinode[cnt]->i_mutex);
                                toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
                                  S_NOATIME | S_NOQUOTA);
                                truncate_inode_pages(&toputinode[cnt]->i_data,
@@ -2133,7 +2131,7 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
                /* We don't want quota and atime on quota files (deadlocks
                 * possible) Also nobody should write to the file - we use
                 * special IO operations which ignore the immutable bit. */
-               mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
+               mutex_lock(&inode->i_mutex);
                oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
                                             S_NOQUOTA);
                inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
@@ -2180,7 +2178,7 @@ out_file_init:
        iput(inode);
 out_lock:
        if (oldflags != -1) {
-               mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
+               mutex_lock(&inode->i_mutex);
                /* Set the flags back (in the case of accidental quotaon()
                 * on a wrong file we don't want to mess up the flags) */
                inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE);
index ffc99d22e0a3656711f14ac7e094cc954d1d90bd..c20614f86c01ed88ed36a65e9dfafdabfd3ba4d3 100644 (file)
@@ -633,8 +633,7 @@ ssize_t do_loop_readv_writev(struct file *filp, struct iovec *iov,
 ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
                              unsigned long nr_segs, unsigned long fast_segs,
                              struct iovec *fast_pointer,
-                             struct iovec **ret_pointer,
-                             int check_access)
+                             struct iovec **ret_pointer)
 {
        unsigned long seg;
        ssize_t ret;
@@ -690,7 +689,7 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
                        ret = -EINVAL;
                        goto out;
                }
-               if (check_access
+               if (type >= 0
                    && unlikely(!access_ok(vrfy_dir(type), buf, len))) {
                        ret = -EFAULT;
                        goto out;
@@ -723,7 +722,7 @@ static ssize_t do_readv_writev(int type, struct file *file,
        }
 
        ret = rw_copy_check_uvector(type, uvector, nr_segs,
-                                   ARRAY_SIZE(iovstack), iovstack, &iov, 1);
+                                   ARRAY_SIZE(iovstack), iovstack, &iov);
        if (ret <= 0)
                goto out;
 
index cc0a8227cddf688f70e289c427666057ce98e613..39e3370d79cf1e6399843137e2d64165baf49a03 100644 (file)
@@ -108,11 +108,11 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
        int error;
        struct file * file;
        struct readdir_callback buf;
+       int fput_needed;
 
-       error = -EBADF;
-       file = fget(fd);
+       file = fget_light(fd, &fput_needed);
        if (!file)
-               goto out;
+               return -EBADF;
 
        buf.result = 0;
        buf.dirent = dirent;
@@ -121,8 +121,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
        if (buf.result)
                error = buf.result;
 
-       fput(file);
-out:
+       fput_light(file, fput_needed);
        return error;
 }
 
@@ -195,16 +194,15 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
        struct file * file;
        struct linux_dirent __user * lastdirent;
        struct getdents_callback buf;
+       int fput_needed;
        int error;
 
-       error = -EFAULT;
        if (!access_ok(VERIFY_WRITE, dirent, count))
-               goto out;
+               return -EFAULT;
 
-       error = -EBADF;
-       file = fget(fd);
+       file = fget_light(fd, &fput_needed);
        if (!file)
-               goto out;
+               return -EBADF;
 
        buf.current_dir = dirent;
        buf.previous = NULL;
@@ -221,8 +219,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
                else
                        error = count - buf.count;
        }
-       fput(file);
-out:
+       fput_light(file, fput_needed);
        return error;
 }
 
@@ -278,16 +275,15 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
        struct file * file;
        struct linux_dirent64 __user * lastdirent;
        struct getdents_callback64 buf;
+       int fput_needed;
        int error;
 
-       error = -EFAULT;
        if (!access_ok(VERIFY_WRITE, dirent, count))
-               goto out;
+               return -EFAULT;
 
-       error = -EBADF;
-       file = fget(fd);
+       file = fget_light(fd, &fput_needed);
        if (!file)
-               goto out;
+               return -EBADF;
 
        buf.current_dir = dirent;
        buf.previous = NULL;
@@ -305,7 +301,6 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
                else
                        error = count - buf.count;
        }
-       fput(file);
-out:
+       fput_light(file, fput_needed);
        return error;
 }
index 494c315c74175cd2e917eae8a419f6d570afb0de..a6d4268fb6c11798db5f8339bd14ab297cd9b21f 100644 (file)
@@ -76,14 +76,14 @@ void reiserfs_evict_inode(struct inode *inode)
                ;
        }
       out:
-       end_writeback(inode);   /* note this must go after the journal_end to prevent deadlock */
+       clear_inode(inode);     /* note this must go after the journal_end to prevent deadlock */
        dquot_drop(inode);
        inode->i_blocks = 0;
        reiserfs_write_unlock_once(inode->i_sb, depth);
        return;
 
 no_delete:
-       end_writeback(inode);
+       clear_inode(inode);
        dquot_drop(inode);
 }
 
@@ -1592,13 +1592,12 @@ struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid,
                (fh_type == 6) ? fid->raw[5] : 0);
 }
 
-int reiserfs_encode_fh(struct dentry *dentry, __u32 * data, int *lenp,
-                      int need_parent)
+int reiserfs_encode_fh(struct inode *inode, __u32 * data, int *lenp,
+                      struct inode *parent)
 {
-       struct inode *inode = dentry->d_inode;
        int maxlen = *lenp;
 
-       if (need_parent && (maxlen < 5)) {
+       if (parent && (maxlen < 5)) {
                *lenp = 5;
                return 255;
        } else if (maxlen < 3) {
@@ -1610,20 +1609,15 @@ int reiserfs_encode_fh(struct dentry *dentry, __u32 * data, int *lenp,
        data[1] = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
        data[2] = inode->i_generation;
        *lenp = 3;
-       /* no room for directory info? return what we've stored so far */
-       if (maxlen < 5 || !need_parent)
-               return 3;
-
-       spin_lock(&dentry->d_lock);
-       inode = dentry->d_parent->d_inode;
-       data[3] = inode->i_ino;
-       data[4] = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
-       *lenp = 5;
-       if (maxlen >= 6) {
-               data[5] = inode->i_generation;
-               *lenp = 6;
-       }
-       spin_unlock(&dentry->d_lock);
+       if (parent) {
+               data[3] = parent->i_ino;
+               data[4] = le32_to_cpu(INODE_PKEY(parent)->k_dir_id);
+               *lenp = 5;
+               if (maxlen >= 6) {
+                       data[5] = parent->i_generation;
+                       *lenp = 6;
+               }
+       }
        return *lenp;
 }
 
index b1a08573fe14277961aa3039ce0fb587d4f889cc..afcadcc03e8ac87c7f25f3e2393b3c108daaf91d 100644 (file)
@@ -1923,6 +1923,8 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
         * the workqueue job (flush_async_commit) needs this lock
         */
        reiserfs_write_unlock(sb);
+
+       cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work);
        flush_workqueue(commit_wq);
 
        if (!reiserfs_mounted_fs_count) {
@@ -3231,8 +3233,6 @@ int journal_mark_dirty(struct reiserfs_transaction_handle *th,
                               th->t_trans_id, journal->j_trans_id);
        }
 
-       sb->s_dirt = 1;
-
        prepared = test_clear_buffer_journal_prepared(bh);
        clear_buffer_journal_restore_dirty(bh);
        /* already in this transaction, we are done */
@@ -3316,6 +3316,7 @@ int journal_mark_dirty(struct reiserfs_transaction_handle *th,
                journal->j_first = cn;
                journal->j_last = cn;
        }
+       reiserfs_schedule_old_flush(sb);
        return 0;
 }
 
@@ -3492,7 +3493,7 @@ static void flush_async_commits(struct work_struct *work)
 ** flushes any old transactions to disk
 ** ends the current transaction if it is too old
 */
-int reiserfs_flush_old_commits(struct super_block *sb)
+void reiserfs_flush_old_commits(struct super_block *sb)
 {
        time_t now;
        struct reiserfs_transaction_handle th;
@@ -3502,9 +3503,8 @@ int reiserfs_flush_old_commits(struct super_block *sb)
        /* safety check so we don't flush while we are replaying the log during
         * mount
         */
-       if (list_empty(&journal->j_journal_list)) {
-               return 0;
-       }
+       if (list_empty(&journal->j_journal_list))
+               return;
 
        /* check the current transaction.  If there are no writers, and it is
         * too old, finish it, and force the commit blocks to disk
@@ -3526,7 +3526,6 @@ int reiserfs_flush_old_commits(struct super_block *sb)
                        do_journal_end(&th, sb, 1, COMMIT_NOW | WAIT);
                }
        }
-       return sb->s_dirt;
 }
 
 /*
@@ -3955,7 +3954,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
         ** it tells us if we should continue with the journal_end, or just return
         */
        if (!check_journal_end(th, sb, nblocks, flags)) {
-               sb->s_dirt = 1;
+               reiserfs_schedule_old_flush(sb);
                wake_queued_writers(sb);
                reiserfs_async_progress_wait(sb);
                goto out;
index a59d27126338e43939f8fc04942acf13c956f1e4..33215f57ea06ce3026ef2d488832a337a361bd71 100644 (file)
@@ -480,6 +480,11 @@ struct reiserfs_sb_info {
        struct dentry *priv_root;       /* root of /.reiserfs_priv */
        struct dentry *xattr_root;      /* root of /.reiserfs_priv/xattrs */
        int j_errno;
+
+       int work_queued;              /* non-zero delayed work is queued */
+       struct delayed_work old_work; /* old transactions flush delayed work */
+       spinlock_t old_work_lock;     /* protects old_work and work_queued */
+
 #ifdef CONFIG_QUOTA
        char *s_qf_names[MAXQUOTAS];
        int s_jquota_fmt;
@@ -2452,7 +2457,7 @@ struct reiserfs_transaction_handle *reiserfs_persistent_transaction(struct
 int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *);
 int reiserfs_commit_page(struct inode *inode, struct page *page,
                         unsigned from, unsigned to);
-int reiserfs_flush_old_commits(struct super_block *);
+void reiserfs_flush_old_commits(struct super_block *);
 int reiserfs_commit_for_inode(struct inode *);
 int reiserfs_inode_needs_commit(struct inode *);
 void reiserfs_update_inode_transaction(struct inode *);
@@ -2487,6 +2492,7 @@ void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...);
 int reiserfs_allocate_list_bitmaps(struct super_block *s,
                                   struct reiserfs_list_bitmap *, unsigned int);
 
+void reiserfs_schedule_old_flush(struct super_block *s);
 void add_save_link(struct reiserfs_transaction_handle *th,
                   struct inode *inode, int truncate);
 int remove_save_link(struct inode *inode, int truncate);
@@ -2611,8 +2617,8 @@ struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
                                     int fh_len, int fh_type);
 struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid,
                                     int fh_len, int fh_type);
-int reiserfs_encode_fh(struct dentry *dentry, __u32 * data, int *lenp,
-                      int connectable);
+int reiserfs_encode_fh(struct inode *inode, __u32 * data, int *lenp,
+                      struct inode *parent);
 
 int reiserfs_truncate_file(struct inode *, int update_timestamps);
 void make_cpu_key(struct cpu_key *cpu_key, struct inode *inode, loff_t offset,
index 9a17f63c3fd7f3618a44bdf946476e4957dcd50d..3ce02cff5e90bd1c26374e12e15a6f56ea8c8803 100644 (file)
@@ -200,7 +200,6 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
                                          (bmap_nr_new - bmap_nr)));
        PUT_SB_BLOCK_COUNT(s, block_count_new);
        PUT_SB_BMAP_NR(s, bmap_would_wrap(bmap_nr_new) ? : bmap_nr_new);
-       s->s_dirt = 1;
 
        journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s));
 
index 8b7616ef06d89a47904866decd2a8123461cc4d7..651ce767b55d8241e283b3001d7fc9e6d803b317 100644 (file)
@@ -72,20 +72,58 @@ static int reiserfs_sync_fs(struct super_block *s, int wait)
        if (!journal_begin(&th, s, 1))
                if (!journal_end_sync(&th, s, 1))
                        reiserfs_flush_old_commits(s);
-       s->s_dirt = 0;  /* Even if it's not true.
-                        * We'll loop forever in sync_supers otherwise */
        reiserfs_write_unlock(s);
        return 0;
 }
 
-static void reiserfs_write_super(struct super_block *s)
+static void flush_old_commits(struct work_struct *work)
 {
+       struct reiserfs_sb_info *sbi;
+       struct super_block *s;
+
+       sbi = container_of(work, struct reiserfs_sb_info, old_work.work);
+       s = sbi->s_journal->j_work_sb;
+
+       spin_lock(&sbi->old_work_lock);
+       sbi->work_queued = 0;
+       spin_unlock(&sbi->old_work_lock);
+
        reiserfs_sync_fs(s, 1);
 }
 
+void reiserfs_schedule_old_flush(struct super_block *s)
+{
+       struct reiserfs_sb_info *sbi = REISERFS_SB(s);
+       unsigned long delay;
+
+       if (s->s_flags & MS_RDONLY)
+               return;
+
+       spin_lock(&sbi->old_work_lock);
+       if (!sbi->work_queued) {
+               delay = msecs_to_jiffies(dirty_writeback_interval * 10);
+               queue_delayed_work(system_long_wq, &sbi->old_work, delay);
+               sbi->work_queued = 1;
+       }
+       spin_unlock(&sbi->old_work_lock);
+}
+
+static void cancel_old_flush(struct super_block *s)
+{
+       struct reiserfs_sb_info *sbi = REISERFS_SB(s);
+
+       cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
+       spin_lock(&sbi->old_work_lock);
+       sbi->work_queued = 0;
+       spin_unlock(&sbi->old_work_lock);
+}
+
 static int reiserfs_freeze(struct super_block *s)
 {
        struct reiserfs_transaction_handle th;
+
+       cancel_old_flush(s);
+
        reiserfs_write_lock(s);
        if (!(s->s_flags & MS_RDONLY)) {
                int err = journal_begin(&th, s, 1);
@@ -99,7 +137,6 @@ static int reiserfs_freeze(struct super_block *s)
                        journal_end_sync(&th, s, 1);
                }
        }
-       s->s_dirt = 0;
        reiserfs_write_unlock(s);
        return 0;
 }
@@ -483,9 +520,6 @@ static void reiserfs_put_super(struct super_block *s)
 
        reiserfs_write_lock(s);
 
-       if (s->s_dirt)
-               reiserfs_write_super(s);
-
        /* change file system state to current state if it was mounted with read-write permissions */
        if (!(s->s_flags & MS_RDONLY)) {
                if (!journal_begin(&th, s, 10)) {
@@ -692,7 +726,6 @@ static const struct super_operations reiserfs_sops = {
        .dirty_inode = reiserfs_dirty_inode,
        .evict_inode = reiserfs_evict_inode,
        .put_super = reiserfs_put_super,
-       .write_super = reiserfs_write_super,
        .sync_fs = reiserfs_sync_fs,
        .freeze_fs = reiserfs_freeze,
        .unfreeze_fs = reiserfs_unfreeze,
@@ -1400,7 +1433,6 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
        err = journal_end(&th, s, 10);
        if (err)
                goto out_err;
-       s->s_dirt = 0;
 
        if (!(*mount_flags & MS_RDONLY)) {
                dquot_resume(s, -1);
@@ -1730,19 +1762,21 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
                return -ENOMEM;
        s->s_fs_info = sbi;
        /* Set default values for options: non-aggressive tails, RO on errors */
-       REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
-       REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_ERROR_RO);
-       REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
+       sbi->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
+       sbi->s_mount_opt |= (1 << REISERFS_ERROR_RO);
+       sbi->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
        /* no preallocation minimum, be smart in
           reiserfs_file_write instead */
-       REISERFS_SB(s)->s_alloc_options.preallocmin = 0;
+       sbi->s_alloc_options.preallocmin = 0;
        /* Preallocate by 16 blocks (17-1) at once */
-       REISERFS_SB(s)->s_alloc_options.preallocsize = 17;
+       sbi->s_alloc_options.preallocsize = 17;
        /* setup default block allocator options */
        reiserfs_init_alloc_options(s);
 
-       mutex_init(&REISERFS_SB(s)->lock);
-       REISERFS_SB(s)->lock_depth = -1;
+       spin_lock_init(&sbi->old_work_lock);
+       INIT_DELAYED_WORK(&sbi->old_work, flush_old_commits);
+       mutex_init(&sbi->lock);
+       sbi->lock_depth = -1;
 
        jdev_name = NULL;
        if (reiserfs_parse_options
@@ -1751,8 +1785,8 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
                goto error_unlocked;
        }
        if (jdev_name && jdev_name[0]) {
-               REISERFS_SB(s)->s_jdev = kstrdup(jdev_name, GFP_KERNEL);
-               if (!REISERFS_SB(s)->s_jdev) {
+               sbi->s_jdev = kstrdup(jdev_name, GFP_KERNEL);
+               if (!sbi->s_jdev) {
                        SWARN(silent, s, "", "Cannot allocate memory for "
                                "journal device name");
                        goto error;
@@ -1810,7 +1844,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
        /* make data=ordered the default */
        if (!reiserfs_data_log(s) && !reiserfs_data_ordered(s) &&
            !reiserfs_data_writeback(s)) {
-               REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_DATA_ORDERED);
+               sbi->s_mount_opt |= (1 << REISERFS_DATA_ORDERED);
        }
 
        if (reiserfs_data_log(s)) {
@@ -2003,6 +2037,8 @@ error_unlocked:
                reiserfs_write_unlock(s);
        }
 
+       cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
+
        reiserfs_free_bitmap_cache(s);
        if (SB_BUFFER_WITH_SB(s))
                brelse(SB_BUFFER_WITH_SB(s));
@@ -2270,7 +2306,6 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
                        (unsigned long long)off, (unsigned long long)len);
                return -EIO;
        }
-       mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
        while (towrite > 0) {
                tocopy = sb->s_blocksize - offset < towrite ?
                    sb->s_blocksize - offset : towrite;
@@ -2302,16 +2337,13 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
                blk++;
        }
 out:
-       if (len == towrite) {
-               mutex_unlock(&inode->i_mutex);
+       if (len == towrite)
                return err;
-       }
        if (inode->i_size < off + len - towrite)
                i_size_write(inode, off + len - towrite);
        inode->i_version++;
        inode->i_mtime = inode->i_ctime = CURRENT_TIME;
        mark_inode_dirty(inode);
-       mutex_unlock(&inode->i_mutex);
        return len - towrite;
 }
 
index 17d33d09fc16f4843c72f9c7444dfdb3fb738952..bae321569dfa7283b290a2e2d65c99bb10b622f4 100644 (file)
@@ -614,7 +614,6 @@ SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
        return ret;
 }
 
-#ifdef HAVE_SET_RESTORE_SIGMASK
 static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
                       fd_set __user *exp, struct timespec __user *tsp,
                       const sigset_t __user *sigmask, size_t sigsetsize)
@@ -686,7 +685,6 @@ SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
 
        return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize);
 }
-#endif /* HAVE_SET_RESTORE_SIGMASK */
 
 #ifdef __ARCH_WANT_SYS_OLD_SELECT
 struct sel_arg_struct {
@@ -941,7 +939,6 @@ SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
        return ret;
 }
 
-#ifdef HAVE_SET_RESTORE_SIGMASK
 SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
                struct timespec __user *, tsp, const sigset_t __user *, sigmask,
                size_t, sigsetsize)
@@ -992,4 +989,3 @@ SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
 
        return ret;
 }
-#endif /* HAVE_SET_RESTORE_SIGMASK */
index 7ae2a574cb25a64902128f53832b317202dbee8f..9f35a37173de0de1f7fbd8d80ca8ad39b50e3782 100644 (file)
@@ -269,12 +269,13 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
                if (ufd < 0)
                        kfree(ctx);
        } else {
-               struct file *file = fget(ufd);
+               int fput_needed;
+               struct file *file = fget_light(ufd, &fput_needed);
                if (!file)
                        return -EBADF;
                ctx = file->private_data;
                if (file->f_op != &signalfd_fops) {
-                       fput(file);
+                       fput_light(file, fput_needed);
                        return -EINVAL;
                }
                spin_lock_irq(&current->sighand->siglock);
@@ -282,7 +283,7 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
                spin_unlock_irq(&current->sighand->siglock);
 
                wake_up(&current->sighand->signalfd_wqh);
-               fput(file);
+               fput_light(file, fput_needed);
        }
 
        return ufd;
index f8476841eb04e08edc2ad069e97cddc3da242e7f..c9f1318a3b820b363526576036c4894205552921 100644 (file)
@@ -1003,8 +1003,10 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
                mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
                ret = file_remove_suid(out);
                if (!ret) {
-                       file_update_time(out);
-                       ret = splice_from_pipe_feed(pipe, &sd, pipe_to_file);
+                       ret = file_update_time(out);
+                       if (!ret)
+                               ret = splice_from_pipe_feed(pipe, &sd,
+                                                           pipe_to_file);
                }
                mutex_unlock(&inode->i_mutex);
        } while (ret > 0);
@@ -1388,7 +1390,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
  */
 static int get_iovec_page_array(const struct iovec __user *iov,
                                unsigned int nr_vecs, struct page **pages,
-                               struct partial_page *partial, int aligned,
+                               struct partial_page *partial, bool aligned,
                                unsigned int pipe_buffers)
 {
        int buffers = 0, error = 0;
@@ -1626,7 +1628,7 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
                return -ENOMEM;
 
        spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages,
-                                           spd.partial, flags & SPLICE_F_GIFT,
+                                           spd.partial, false,
                                            pipe->buffers);
        if (spd.nr_pages <= 0)
                ret = spd.nr_pages;
index 43e6b6fe4e855684a197c48ed6bb8dee70f95467..95ad5c0e586c9f64fe492e141387b5092956d553 100644 (file)
@@ -87,11 +87,12 @@ int user_statfs(const char __user *pathname, struct kstatfs *st)
 
 int fd_statfs(int fd, struct kstatfs *st)
 {
-       struct file *file = fget(fd);
+       int fput_needed;
+       struct file *file = fget_light(fd, &fput_needed);
        int error = -EBADF;
        if (file) {
                error = vfs_statfs(&file->f_path, st);
-               fput(file);
+               fput_light(file, fput_needed);
        }
        return error;
 }
index 0e8db939d96f8fdaa072df7e6fcadb15a5781a84..11e3d1c449018dcf9a95c352746f46d6522c4cb2 100644 (file)
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -188,11 +188,12 @@ static int do_fsync(unsigned int fd, int datasync)
 {
        struct file *file;
        int ret = -EBADF;
+       int fput_needed;
 
-       file = fget(fd);
+       file = fget_light(fd, &fput_needed);
        if (file) {
                ret = vfs_fsync(file, datasync);
-               fput(file);
+               fput_light(file, fput_needed);
        }
        return ret;
 }
index 907c2b3af7589614114a7aa2d91d5969e81d1f6f..0ce3ccf7f401ba6748cb84735e54130fca403506 100644 (file)
@@ -310,7 +310,7 @@ void sysfs_evict_inode(struct inode *inode)
        struct sysfs_dirent *sd  = inode->i_private;
 
        truncate_inode_pages(&inode->i_data, 0);
-       end_writeback(inode);
+       clear_inode(inode);
        sysfs_put(sd);
 }
 
index 3da5ce25faf0ca3e75d49c0c0de3c2c9f3f5d302..08d0b2568cd35e060a9e6b8ffda87d7d6569cbdb 100644 (file)
@@ -316,7 +316,7 @@ static void sysv_evict_inode(struct inode *inode)
                sysv_truncate(inode);
        }
        invalidate_inode_buffers(inode);
-       end_writeback(inode);
+       clear_inode(inode);
        if (!inode->i_nlink)
                sysv_free_inode(inode);
 }
index 62a2727f4ecf71809f518dd206922fc7f9234f0d..a6d42efc76d227d62289f852982160442d7e5cea 100644 (file)
@@ -1127,16 +1127,7 @@ int ubifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
        struct ubifs_inode *ui = ubifs_inode(inode);
 
        mutex_lock(&ui->ui_mutex);
-       stat->dev = inode->i_sb->s_dev;
-       stat->ino = inode->i_ino;
-       stat->mode = inode->i_mode;
-       stat->nlink = inode->i_nlink;
-       stat->uid = inode->i_uid;
-       stat->gid = inode->i_gid;
-       stat->rdev = inode->i_rdev;
-       stat->atime = inode->i_atime;
-       stat->mtime = inode->i_mtime;
-       stat->ctime = inode->i_ctime;
+       generic_fillattr(inode, stat);
        stat->blksize = UBIFS_BLOCK_SIZE;
        stat->size = ui->ui_size;
 
index 001acccac0d6dfea4335e04cef5460ee7d397818..5862dd9d278402fe140ad9096fd8eacb7c1f1909 100644 (file)
@@ -378,7 +378,7 @@ out:
                smp_wmb();
        }
 done:
-       end_writeback(inode);
+       clear_inode(inode);
 }
 
 static void ubifs_dirty_inode(struct inode *inode, int flags)
index 7d7528008359eca147778d09a303ca7c5424f5c4..873e1bab9c4c70cf45ed6bac6e46495decfbd55d 100644 (file)
@@ -80,7 +80,7 @@ void udf_evict_inode(struct inode *inode)
        } else
                truncate_inode_pages(&inode->i_data, 0);
        invalidate_inode_buffers(inode);
-       end_writeback(inode);
+       clear_inode(inode);
        if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
            inode->i_size != iinfo->i_lenExtents) {
                udf_warn(inode->i_sb, "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
index a165c66e3eef2249379890c60a4c7d4111e8df4d..18024178ac4c040a3f23181ff2dc1a5cc48f2dc8 100644 (file)
@@ -1260,16 +1260,15 @@ static struct dentry *udf_fh_to_parent(struct super_block *sb,
                                 fid->udf.parent_partref,
                                 fid->udf.parent_generation);
 }
-static int udf_encode_fh(struct dentry *de, __u32 *fh, int *lenp,
-                        int connectable)
+static int udf_encode_fh(struct inode *inode, __u32 *fh, int *lenp,
+                        struct inode *parent)
 {
        int len = *lenp;
-       struct inode *inode =  de->d_inode;
        struct kernel_lb_addr location = UDF_I(inode)->i_location;
        struct fid *fid = (struct fid *)fh;
        int type = FILEID_UDF_WITHOUT_PARENT;
 
-       if (connectable && (len < 5)) {
+       if (parent && (len < 5)) {
                *lenp = 5;
                return 255;
        } else if (len < 3) {
@@ -1282,14 +1281,11 @@ static int udf_encode_fh(struct dentry *de, __u32 *fh, int *lenp,
        fid->udf.partref = location.partitionReferenceNum;
        fid->udf.generation = inode->i_generation;
 
-       if (connectable && !S_ISDIR(inode->i_mode)) {
-               spin_lock(&de->d_lock);
-               inode = de->d_parent->d_inode;
-               location = UDF_I(inode)->i_location;
+       if (parent) {
+               location = UDF_I(parent)->i_location;
                fid->udf.parent_block = location.logicalBlockNum;
                fid->udf.parent_partref = location.partitionReferenceNum;
                fid->udf.parent_generation = inode->i_generation;
-               spin_unlock(&de->d_lock);
                *lenp = 5;
                type = FILEID_UDF_WITH_PARENT;
        }
index 7cdd3953d67e80733b252b6587aa47e72be46938..dd7c89d8a1c1b728deb32c7dc541c4a6e136711c 100644 (file)
@@ -895,7 +895,7 @@ void ufs_evict_inode(struct inode * inode)
        }
 
        invalidate_inode_buffers(inode);
-       end_writeback(inode);
+       clear_inode(inode);
 
        if (want_delete) {
                lock_ufs(inode->i_sb);
index ba653f3dc1bc9c66010290e53e0bb2a5b8fd94b1..fa4dbe451e278eab0f52bbacc110b157a300cdad 100644 (file)
@@ -140,18 +140,19 @@ long do_utimes(int dfd, const char __user *filename, struct timespec *times,
                goto out;
 
        if (filename == NULL && dfd != AT_FDCWD) {
+               int fput_needed;
                struct file *file;
 
                if (flags & AT_SYMLINK_NOFOLLOW)
                        goto out;
 
-               file = fget(dfd);
+               file = fget_light(dfd, &fput_needed);
                error = -EBADF;
                if (!file)
                        goto out;
 
                error = utimes_common(&file->f_path, times);
-               fput(file);
+               fput_light(file, fput_needed);
        } else {
                struct path path;
                int lookup_flags = 0;
index 3c8c1cc333c7c79dfa105049a62d8b6e1c28661c..1d7ac379045879b827b196f0d7a7420fe33c783d 100644 (file)
@@ -399,11 +399,12 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
 SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
                const void __user *,value, size_t, size, int, flags)
 {
+       int fput_needed;
        struct file *f;
        struct dentry *dentry;
        int error = -EBADF;
 
-       f = fget(fd);
+       f = fget_light(fd, &fput_needed);
        if (!f)
                return error;
        dentry = f->f_path.dentry;
@@ -413,7 +414,7 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
                error = setxattr(dentry, name, value, size, flags);
                mnt_drop_write_file(f);
        }
-       fput(f);
+       fput_light(f, fput_needed);
        return error;
 }
 
@@ -486,15 +487,16 @@ SYSCALL_DEFINE4(lgetxattr, const char __user *, pathname,
 SYSCALL_DEFINE4(fgetxattr, int, fd, const char __user *, name,
                void __user *, value, size_t, size)
 {
+       int fput_needed;
        struct file *f;
        ssize_t error = -EBADF;
 
-       f = fget(fd);
+       f = fget_light(fd, &fput_needed);
        if (!f)
                return error;
        audit_inode(NULL, f->f_path.dentry);
        error = getxattr(f->f_path.dentry, name, value, size);
-       fput(f);
+       fput_light(f, fput_needed);
        return error;
 }
 
@@ -566,15 +568,16 @@ SYSCALL_DEFINE3(llistxattr, const char __user *, pathname, char __user *, list,
 
 SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
 {
+       int fput_needed;
        struct file *f;
        ssize_t error = -EBADF;
 
-       f = fget(fd);
+       f = fget_light(fd, &fput_needed);
        if (!f)
                return error;
        audit_inode(NULL, f->f_path.dentry);
        error = listxattr(f->f_path.dentry, list, size);
-       fput(f);
+       fput_light(f, fput_needed);
        return error;
 }
 
@@ -634,11 +637,12 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
 
 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
 {
+       int fput_needed;
        struct file *f;
        struct dentry *dentry;
        int error = -EBADF;
 
-       f = fget(fd);
+       f = fget_light(fd, &fput_needed);
        if (!f)
                return error;
        dentry = f->f_path.dentry;
@@ -648,7 +652,7 @@ SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
                error = removexattr(dentry, name);
                mnt_drop_write_file(f);
        }
-       fput(f);
+       fput_light(f, fput_needed);
        return error;
 }
 
index a907de565db3bf287f7d1a7894fff23f85ca18d5..4a7286c1dc80d270af40a3733870bb9dd769ee82 100644 (file)
@@ -46,7 +46,7 @@ kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize)
 }
 
 void *
-kmem_alloc(size_t size, unsigned int __nocast flags)
+kmem_alloc(size_t size, xfs_km_flags_t flags)
 {
        int     retries = 0;
        gfp_t   lflags = kmem_flags_convert(flags);
@@ -65,7 +65,7 @@ kmem_alloc(size_t size, unsigned int __nocast flags)
 }
 
 void *
-kmem_zalloc(size_t size, unsigned int __nocast flags)
+kmem_zalloc(size_t size, xfs_km_flags_t flags)
 {
        void    *ptr;
 
@@ -87,7 +87,7 @@ kmem_free(const void *ptr)
 
 void *
 kmem_realloc(const void *ptr, size_t newsize, size_t oldsize,
-            unsigned int __nocast flags)
+            xfs_km_flags_t flags)
 {
        void    *new;
 
@@ -102,7 +102,7 @@ kmem_realloc(const void *ptr, size_t newsize, size_t oldsize,
 }
 
 void *
-kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags)
+kmem_zone_alloc(kmem_zone_t *zone, xfs_km_flags_t flags)
 {
        int     retries = 0;
        gfp_t   lflags = kmem_flags_convert(flags);
@@ -121,7 +121,7 @@ kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags)
 }
 
 void *
-kmem_zone_zalloc(kmem_zone_t *zone, unsigned int __nocast flags)
+kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags)
 {
        void    *ptr;
 
index ab7c53fe346e2273311a1b73bee866ab8c4f8d7f..b2f2620f9a87b9f1bf6836c8faf3d5039e7af94f 100644 (file)
  * General memory allocation interfaces
  */
 
-#define KM_SLEEP       0x0001u
-#define KM_NOSLEEP     0x0002u
-#define KM_NOFS                0x0004u
-#define KM_MAYFAIL     0x0008u
+typedef unsigned __bitwise xfs_km_flags_t;
+#define KM_SLEEP       ((__force xfs_km_flags_t)0x0001u)
+#define KM_NOSLEEP     ((__force xfs_km_flags_t)0x0002u)
+#define KM_NOFS                ((__force xfs_km_flags_t)0x0004u)
+#define KM_MAYFAIL     ((__force xfs_km_flags_t)0x0008u)
 
 /*
  * We use a special process flag to avoid recursive callbacks into
@@ -38,7 +39,7 @@
  * warnings, so we explicitly skip any generic ones (silly of us).
  */
 static inline gfp_t
-kmem_flags_convert(unsigned int __nocast flags)
+kmem_flags_convert(xfs_km_flags_t flags)
 {
        gfp_t   lflags;
 
@@ -54,9 +55,9 @@ kmem_flags_convert(unsigned int __nocast flags)
        return lflags;
 }
 
-extern void *kmem_alloc(size_t, unsigned int __nocast);
-extern void *kmem_zalloc(size_t, unsigned int __nocast);
-extern void *kmem_realloc(const void *, size_t, size_t, unsigned int __nocast);
+extern void *kmem_alloc(size_t, xfs_km_flags_t);
+extern void *kmem_zalloc(size_t, xfs_km_flags_t);
+extern void *kmem_realloc(const void *, size_t, size_t, xfs_km_flags_t);
 extern void  kmem_free(const void *);
 
 static inline void *kmem_zalloc_large(size_t size)
@@ -107,7 +108,7 @@ kmem_zone_destroy(kmem_zone_t *zone)
                kmem_cache_destroy(zone);
 }
 
-extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast);
-extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
+extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t);
+extern void *kmem_zone_zalloc(kmem_zone_t *, xfs_km_flags_t);
 
 #endif /* __XFS_SUPPORT_KMEM_H__ */
index 2d25d19c4ea17b991fa4a43dcbb340e5c957c461..42679223a0fde641e3013980fbd1e733dc6ec60e 100644 (file)
@@ -52,19 +52,18 @@ static int xfs_fileid_length(int fileid_type)
 
 STATIC int
 xfs_fs_encode_fh(
-       struct dentry           *dentry,
-       __u32                   *fh,
-       int                     *max_len,
-       int                     connectable)
+       struct inode    *inode,
+       __u32           *fh,
+       int             *max_len,
+       struct inode    *parent)
 {
        struct fid              *fid = (struct fid *)fh;
        struct xfs_fid64        *fid64 = (struct xfs_fid64 *)fh;
-       struct inode            *inode = dentry->d_inode;
        int                     fileid_type;
        int                     len;
 
        /* Directories don't need their parent encoded, they have ".." */
-       if (S_ISDIR(inode->i_mode) || !connectable)
+       if (!parent)
                fileid_type = FILEID_INO32_GEN;
        else
                fileid_type = FILEID_INO32_GEN_PARENT;
@@ -96,20 +95,16 @@ xfs_fs_encode_fh(
 
        switch (fileid_type) {
        case FILEID_INO32_GEN_PARENT:
-               spin_lock(&dentry->d_lock);
-               fid->i32.parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
-               fid->i32.parent_gen = dentry->d_parent->d_inode->i_generation;
-               spin_unlock(&dentry->d_lock);
+               fid->i32.parent_ino = XFS_I(parent)->i_ino;
+               fid->i32.parent_gen = parent->i_generation;
                /*FALLTHRU*/
        case FILEID_INO32_GEN:
                fid->i32.ino = XFS_I(inode)->i_ino;
                fid->i32.gen = inode->i_generation;
                break;
        case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG:
-               spin_lock(&dentry->d_lock);
-               fid64->parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
-               fid64->parent_gen = dentry->d_parent->d_inode->i_generation;
-               spin_unlock(&dentry->d_lock);
+               fid64->parent_ino = XFS_I(parent)->i_ino;
+               fid64->parent_gen = parent->i_generation;
                /*FALLTHRU*/
        case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG:
                fid64->ino = XFS_I(inode)->i_ino;
index 8d214b87f6bb06ed1f7ed204cdfda8da9345172f..9f7ec15a65222e2fe318e0ab81ac9cca0a664b4a 100644 (file)
@@ -586,8 +586,11 @@ restart:
         * lock above.  Eventually we should look into a way to avoid
         * the pointless lock roundtrip.
         */
-       if (likely(!(file->f_mode & FMODE_NOCMTIME)))
-               file_update_time(file);
+       if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
+               error = file_update_time(file);
+               if (error)
+                       return error;
+       }
 
        /*
         * If we're writing the file then make sure to clear the setuid and
index 6b965bf450e44d5972fc689d486deec3dd5c8094..f30d9807dc48a0535084da1afd5a7620389adcd5 100644 (file)
@@ -3152,7 +3152,7 @@ xlog_ticket_alloc(
        int             cnt,
        char            client,
        bool            permanent,
-       int             alloc_flags)
+       xfs_km_flags_t  alloc_flags)
 {
        struct xlog_ticket *tic;
        uint            num_headers;
index 735ff1ee53da447eee9c5b88d54e9007ee988138..5bc33261f5be6311fb5732f42db25e17064abded 100644 (file)
@@ -555,7 +555,7 @@ extern void  xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int);
 extern kmem_zone_t *xfs_log_ticket_zone;
 struct xlog_ticket *xlog_ticket_alloc(struct log *log, int unit_bytes,
                                int count, char client, bool permanent,
-                               int alloc_flags);
+                               xfs_km_flags_t alloc_flags);
 
 
 static inline void
index 2fcfd5b0b046830f4453298e5fb771de366b974f..0d9de41a7151568621cfad89a1a95a97b2ba8b4a 100644 (file)
@@ -932,7 +932,7 @@ xfs_fs_evict_inode(
        trace_xfs_evict_inode(ip);
 
        truncate_inode_pages(&inode->i_data, 0);
-       end_writeback(inode);
+       clear_inode(inode);
        XFS_STATS_INC(vn_rele);
        XFS_STATS_INC(vn_remove);
        XFS_STATS_DEC(vn_active);
index cdf896fcbfa43810c83bfbc7a99f84f118a4d75d..fdf324508c5ee467c6055f0866e1be88c387942d 100644 (file)
@@ -584,7 +584,7 @@ xfs_trans_t *
 _xfs_trans_alloc(
        xfs_mount_t     *mp,
        uint            type,
-       uint            memflags)
+       xfs_km_flags_t  memflags)
 {
        xfs_trans_t     *tp;
 
index 7ab99e1898c8de10e875aff23599d140c9867c4b..7c37b533aa8e5c169f0ef98643f96958df3788df 100644 (file)
@@ -443,7 +443,7 @@ typedef struct xfs_trans {
  * XFS transaction mechanism exported interfaces.
  */
 xfs_trans_t    *xfs_trans_alloc(struct xfs_mount *, uint);
-xfs_trans_t    *_xfs_trans_alloc(struct xfs_mount *, uint, uint);
+xfs_trans_t    *_xfs_trans_alloc(struct xfs_mount *, uint, xfs_km_flags_t);
 xfs_trans_t    *xfs_trans_dup(xfs_trans_t *);
 int            xfs_trans_reserve(xfs_trans_t *, uint, uint, uint,
                                  uint, uint);
index 53f91b1ae53a425f0c5c2b487dcb51e658affa77..2c85a0f647b7a44e7d2e0cbb20071a9e27b322f6 100644 (file)
@@ -8,6 +8,7 @@ header-y += int-ll64.h
 header-y += ioctl.h
 header-y += ioctls.h
 header-y += ipcbuf.h
+header-y += kvm_para.h
 header-y += mman-common.h
 header-y += mman.h
 header-y += msgbuf.h
index 4ae54e07de83d2b970d88807f662bcfe300f2ddc..a7b0914348fd0fce08ed7cb42bbe2c8fbacbf8f9 100644 (file)
@@ -28,5 +28,9 @@
 #error Inconsistent word size. Check asm/bitsperlong.h
 #endif
 
+#ifndef BITS_PER_LONG_LONG
+#define BITS_PER_LONG_LONG 64
+#endif
+
 #endif /* __KERNEL__ */
 #endif /* __ASM_GENERIC_BITS_PER_LONG */
index 85a3ffaa024208e0e00299641b320e67b0d4999e..abfb2682de7f33b0dce686447849e9c49b85c0a5 100644 (file)
@@ -3,13 +3,15 @@
 
 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
 /*
- * These two functions are only for dma allocator.
+ * These three functions are only for dma allocator.
  * Don't use them in device drivers.
  */
 int dma_alloc_from_coherent(struct device *dev, ssize_t size,
                                       dma_addr_t *dma_handle, void **ret);
 int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
 
+int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
+                           void *cpu_addr, size_t size, int *ret);
 /*
  * Standard interface
  */
diff --git a/include/asm-generic/dma-contiguous.h b/include/asm-generic/dma-contiguous.h
new file mode 100644 (file)
index 0000000..c544356
--- /dev/null
@@ -0,0 +1,28 @@
+#ifndef ASM_DMA_CONTIGUOUS_H
+#define ASM_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+#ifdef CONFIG_CMA
+
+#include <linux/device.h>
+#include <linux/dma-contiguous.h>
+
+static inline struct cma *dev_get_cma_area(struct device *dev)
+{
+       if (dev && dev->cma_area)
+               return dev->cma_area;
+       return dma_contiguous_default_area;
+}
+
+static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
+{
+       if (dev)
+               dev->cma_area = cma;
+       if (!dev || !dma_contiguous_default_area)
+               dma_contiguous_default_area = cma;
+}
+
+#endif
+#endif
+
+#endif
diff --git a/include/asm-generic/kvm_para.h b/include/asm-generic/kvm_para.h
new file mode 100644 (file)
index 0000000..5cba37f
--- /dev/null
@@ -0,0 +1,22 @@
+#ifndef _ASM_GENERIC_KVM_PARA_H
+#define _ASM_GENERIC_KVM_PARA_H
+
+#ifdef __KERNEL__
+
+/*
+ * This function is used by architectures that support kvm to avoid issuing
+ * false soft lockup messages.
+ */
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+       return false;
+}
+
+static inline unsigned int kvm_arch_para_features(void)
+{
+       return 0;
+}
+
+#endif /* _KERNEL__ */
+
+#endif
index 125c54e985170440a7ccd99f41fc35b922be150c..6f2b45a9b6bc425b7df6231f474516e1bd4c1344 100644 (file)
@@ -158,9 +158,8 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
 #endif
 
 #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
-extern pmd_t pmdp_splitting_flush(struct vm_area_struct *vma,
-                                 unsigned long address,
-                                 pmd_t *pmdp);
+extern void pmdp_splitting_flush(struct vm_area_struct *vma,
+                                unsigned long address, pmd_t *pmdp);
 #endif
 
 #ifndef __HAVE_ARCH_PTE_SAME
@@ -446,6 +445,18 @@ static inline int pmd_write(pmd_t pmd)
 #endif /* __HAVE_ARCH_PMD_WRITE */
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
+#ifndef pmd_read_atomic
+static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
+{
+       /*
+        * Depend on compiler for an atomic pmd read. NOTE: this is
+        * only going to work, if the pmdval_t isn't larger than
+        * an unsigned long.
+        */
+       return *pmdp;
+}
+#endif
+
 /*
  * This function is meant to be used by sites walking pagetables with
  * the mmap_sem hold in read mode to protect against MADV_DONTNEED and
@@ -459,11 +470,17 @@ static inline int pmd_write(pmd_t pmd)
  * undefined so behaving like if the pmd was none is safe (because it
  * can return none anyway). The compiler level barrier() is critically
  * important to compute the two checks atomically on the same pmdval.
+ *
+ * For 32bit kernels with a 64bit large pmd_t this automatically takes
+ * care of reading the pmd atomically to avoid SMP race conditions
+ * against pmd_populate() when the mmap_sem is hold for reading by the
+ * caller (a special atomic read not done by "gcc" as in the generic
+ * version above, is also needed when THP is disabled because the page
+ * fault can populate the pmd from under us).
  */
 static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
 {
-       /* depend on compiler for an atomic pmd read */
-       pmd_t pmdval = *pmd;
+       pmd_t pmdval = pmd_read_atomic(pmd);
        /*
         * The barrier will stabilize the pmdval in a register or on
         * the stack so that it will stop changing under the code.
index 91d44bd4dde32574bb6365a5526ac33a19992050..fe74fccf18db75742d240151ec358049861b7406 100644 (file)
@@ -23,10 +23,6 @@ typedef __kernel_ulong_t __kernel_ino_t;
 typedef unsigned int   __kernel_mode_t;
 #endif
 
-#ifndef __kernel_nlink_t
-typedef __kernel_ulong_t __kernel_nlink_t;
-#endif
-
 #ifndef __kernel_pid_t
 typedef int            __kernel_pid_t;
 #endif
diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h
new file mode 100644 (file)
index 0000000..3f21f1b
--- /dev/null
@@ -0,0 +1,52 @@
+#ifndef _ASM_WORD_AT_A_TIME_H
+#define _ASM_WORD_AT_A_TIME_H
+
+/*
+ * This says "generic", but it's actually big-endian only.
+ * Little-endian can use more efficient versions of these
+ * interfaces, see for example
+ *      arch/x86/include/asm/word-at-a-time.h
+ * for those.
+ */
+
+#include <linux/kernel.h>
+
+struct word_at_a_time {
+       const unsigned long high_bits, low_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0xfe) + 1, REPEAT_BYTE(0x7f) }
+
+/* Bit set in the bytes that have a zero */
+static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c)
+{
+       unsigned long mask = (val & c->low_bits) + c->low_bits;
+       return ~(mask | rhs);
+}
+
+#define create_zero_mask(mask) (mask)
+
+static inline long find_zero(unsigned long mask)
+{
+       long byte = 0;
+#ifdef CONFIG_64BIT
+       if (mask >> 32)
+               mask >>= 32;
+       else
+               byte = 4;
+#endif
+       if (mask >> 16)
+               mask >>= 16;
+       else
+               byte += 2;
+       return (mask >> 8) ? byte : byte + 1;
+}
+
+static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+{
+       unsigned long rhs = val | c->low_bits;
+       *data = rhs;
+       return (val + c->high_bits) & ~rhs;
+}
+
+#endif /* _ASM_WORD_AT_A_TIME_H */
index 6bd325fedc873ae4785a619e8a679f339d562485..19a240446fca657e9928e93defa3ead34905ee56 100644 (file)
@@ -31,7 +31,7 @@
 
 static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
 {
-       if (size != 0 && nmemb > ULONG_MAX / size)
+       if (size != 0 && nmemb > SIZE_MAX / size)
                return NULL;
 
        if (size * nmemb <= PAGE_SIZE)
@@ -44,7 +44,7 @@ static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
 /* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
 static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
 {
-       if (size != 0 && nmemb > ULONG_MAX / size)
+       if (size != 0 && nmemb > SIZE_MAX / size)
                return NULL;
 
        if (size * nmemb <= PAGE_SIZE)
index 4cd59b95858f9eab1e47a3dafde27c9176175c9c..8760be30b3750a8f4d8e8e01692ccf92bb3b1931 100644 (file)
@@ -225,6 +225,8 @@ header-y += kd.h
 header-y += kdev_t.h
 header-y += kernel.h
 header-y += kernelcapi.h
+header-y += kernel-page-flags.h
+header-y += kexec.h
 header-y += keyboard.h
 header-y += keyctl.h
 header-y += l2tp.h
index e64ce2cfee9959f5b231b879d3afef2b5abd57cc..02549017212a2113c42a36354d5da37be1e23eec 100644 (file)
@@ -92,6 +92,8 @@ struct pl08x_bus_data {
  * right now
  * @serving: the virtual channel currently being served by this physical
  * channel
+ * @locked: channel unavailable for the system, e.g. dedicated to secure
+ * world
  */
 struct pl08x_phy_chan {
        unsigned int id;
@@ -99,6 +101,7 @@ struct pl08x_phy_chan {
        spinlock_t lock;
        int signal;
        struct pl08x_dma_chan *serving;
+       bool locked;
 };
 
 /**
index 47bedc0eee6939b8c1f397177c8c0ef6a5148f9c..0a95e730fcea706a185f52af4b3c563ee40e7d7f 100644 (file)
@@ -5,7 +5,7 @@
 #ifndef _LINUX_APPLE_BL_H
 #define _LINUX_APPLE_BL_H
 
-#ifdef CONFIG_BACKLIGHT_APPLE
+#if defined(CONFIG_BACKLIGHT_APPLE) || defined(CONFIG_BACKLIGHT_APPLE_MODULE)
 
 extern int apple_bl_register(void);
 extern void apple_bl_unregister(void);
index 4d94eb8bcbccc224bd0206cb435b1f052ddaaebf..26435890dc87a6c3b9d37f14571833fb1afe0164 100644 (file)
@@ -269,6 +269,14 @@ extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set
 extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int);
 extern unsigned int bvec_nr_vecs(unsigned short idx);
 
+#ifdef CONFIG_BLK_CGROUP
+int bio_associate_current(struct bio *bio);
+void bio_disassociate_task(struct bio *bio);
+#else  /* CONFIG_BLK_CGROUP */
+static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
+static inline void bio_disassociate_task(struct bio *bio) { }
+#endif /* CONFIG_BLK_CGROUP */
+
 /*
  * bio_set is used to allow other portions of the IO system to
  * allocate their own private memory pools for bio and iovec structures.
index 4053cbd4490edb6530eee7bb332ab95138352d17..0edb65dd8eddd35de6b0c383a27d889ec00f2193 100644 (file)
@@ -14,6 +14,8 @@ struct bio;
 struct bio_integrity_payload;
 struct page;
 struct block_device;
+struct io_context;
+struct cgroup_subsys_state;
 typedef void (bio_end_io_t) (struct bio *, int);
 typedef void (bio_destructor_t) (struct bio *);
 
@@ -66,6 +68,14 @@ struct bio {
        bio_end_io_t            *bi_end_io;
 
        void                    *bi_private;
+#ifdef CONFIG_BLK_CGROUP
+       /*
+        * Optional ioc and css associated with this bio.  Put on bio
+        * release.  Read comment on top of bio_associate_current().
+        */
+       struct io_context       *bi_ioc;
+       struct cgroup_subsys_state *bi_css;
+#endif
 #if defined(CONFIG_BLK_DEV_INTEGRITY)
        struct bio_integrity_payload *bi_integrity;  /* data integrity */
 #endif
index 4d4ac24a263ea956457d4ea4a63f1431408a6d90..ba43f408baa38907a8f63a64314e07bb622a6e7f 100644 (file)
@@ -32,10 +32,17 @@ struct blk_trace;
 struct request;
 struct sg_io_hdr;
 struct bsg_job;
+struct blkcg_gq;
 
 #define BLKDEV_MIN_RQ  4
 #define BLKDEV_MAX_RQ  128     /* Default maximum */
 
+/*
+ * Maximum number of blkcg policies allowed to be registered concurrently.
+ * Defined here to simplify include dependency.
+ */
+#define BLKCG_MAX_POLS         2
+
 struct request;
 typedef void (rq_end_io_fn)(struct request *, int);
 
@@ -363,6 +370,11 @@ struct request_queue {
        struct list_head        timeout_list;
 
        struct list_head        icq_list;
+#ifdef CONFIG_BLK_CGROUP
+       DECLARE_BITMAP          (blkcg_pols, BLKCG_MAX_POLS);
+       struct blkcg_gq         *root_blkg;
+       struct list_head        blkg_list;
+#endif
 
        struct queue_limits     limits;
 
@@ -390,12 +402,17 @@ struct request_queue {
 
        struct mutex            sysfs_lock;
 
+       int                     bypass_depth;
+
 #if defined(CONFIG_BLK_DEV_BSG)
        bsg_job_fn              *bsg_job_fn;
        int                     bsg_job_size;
        struct bsg_class_device bsg_dev;
 #endif
 
+#ifdef CONFIG_BLK_CGROUP
+       struct list_head        all_q_node;
+#endif
 #ifdef CONFIG_BLK_DEV_THROTTLING
        /* Throttle data */
        struct throtl_data *td;
@@ -407,7 +424,7 @@ struct request_queue {
 #define        QUEUE_FLAG_SYNCFULL     3       /* read queue has been filled */
 #define QUEUE_FLAG_ASYNCFULL   4       /* write queue has been filled */
 #define QUEUE_FLAG_DEAD                5       /* queue being torn down */
-#define QUEUE_FLAG_ELVSWITCH   6       /* don't use elevator, just do FIFO */
+#define QUEUE_FLAG_BYPASS      6       /* act as dumb FIFO queue */
 #define QUEUE_FLAG_BIDI                7       /* queue supports bidi requests */
 #define QUEUE_FLAG_NOMERGES     8      /* disable merge attempts */
 #define QUEUE_FLAG_SAME_COMP   9       /* complete on same CPU-group */
@@ -491,6 +508,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
 #define blk_queue_tagged(q)    test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
 #define blk_queue_stopped(q)   test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
 #define blk_queue_dead(q)      test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
+#define blk_queue_bypass(q)    test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
 #define blk_queue_nomerges(q)  test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
 #define blk_queue_noxmerges(q) \
        test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
index 1a0cd270bb7a0850f643cc3902aa5f943279ec8c..324fe08ea3b140b7b8b92f7129ad334df2e260d5 100644 (file)
@@ -135,9 +135,6 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
 extern int reserve_bootmem_generic(unsigned long addr, unsigned long size,
                                   int flags);
 
-extern void *alloc_bootmem_section(unsigned long size,
-                                  unsigned long section_nr);
-
 #ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP
 extern void *alloc_remap(int nid, unsigned long size);
 #else
index 72961c39576a4af5eb68a45f6bebc18e88935d2e..aaac4bba6f5c7faa1d2b95b13983323658c8188c 100644 (file)
@@ -30,6 +30,13 @@ struct pt_regs;
 #define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
 #define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); }))
 
+/*
+ * BUILD_BUG_ON_INVALID() permits the compiler to check the validity of the
+ * expression but avoids the generation of any code, even if that expression
+ * has side-effects.
+ */
+#define BUILD_BUG_ON_INVALID(e) ((void)(sizeof((__force long)(e))))
+
 /**
  * BUILD_BUG_ON - break compile if a condition is true.
  * @condition: the condition which the compiler should know is false.
index aa13392a7efbf2234add8465540869b75930a25b..d4080f309b5699d5fbaa5dbe96d38a1977be7693 100644 (file)
 struct ceph_auth_client;
 struct ceph_authorizer;
 
+struct ceph_auth_handshake {
+       struct ceph_authorizer *authorizer;
+       void *authorizer_buf;
+       size_t authorizer_buf_len;
+       void *authorizer_reply_buf;
+       size_t authorizer_reply_buf_len;
+};
+
 struct ceph_auth_client_ops {
        const char *name;
 
@@ -43,9 +51,7 @@ struct ceph_auth_client_ops {
         * the response to authenticate the service.
         */
        int (*create_authorizer)(struct ceph_auth_client *ac, int peer_type,
-                                struct ceph_authorizer **a,
-                                void **buf, size_t *len,
-                                void **reply_buf, size_t *reply_len);
+                                struct ceph_auth_handshake *auth);
        int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
                                       struct ceph_authorizer *a, size_t len);
        void (*destroy_authorizer)(struct ceph_auth_client *ac,
index b8c60694b2b0977d4ecdb982d8bcaea5f0ef2673..e81ab30d4896329e29d47bea33d92e5634da6a89 100644 (file)
@@ -65,7 +65,7 @@ struct ceph_file_layout {
        __le32 fl_object_stripe_unit;  /* UNUSED.  for per-object parity, if any */
 
        /* object -> pg layout */
-       __le32 fl_pg_preferred; /* preferred primary for pg (-1 for none) */
+       __le32 fl_unused;       /* unused; used to be preferred primary (-1) */
        __le32 fl_pg_pool;      /* namespace, crush ruleset, rep level */
 } __attribute__ ((packed));
 
@@ -384,7 +384,7 @@ union ceph_mds_request_args {
                __le32 stripe_count;         /* ... */
                __le32 object_size;
                __le32 file_replication;
-               __le32 preferred;
+               __le32 unused;               /* used to be preferred osd */
        } __attribute__ ((packed)) open;
        struct {
                __le32 flags;
index 220ae21e819b1fb2623d19d8cf4f619862f11c42..d8615dee5808d3f55c93a38c6fdb66113f09a691 100644 (file)
@@ -46,9 +46,14 @@ static inline void ceph_decode_copy(void **p, void *pv, size_t n)
 /*
  * bounds check input.
  */
+static inline int ceph_has_room(void **p, void *end, size_t n)
+{
+       return end >= *p && n <= end - *p;
+}
+
 #define ceph_decode_need(p, end, n, bad)               \
        do {                                            \
-               if (unlikely(*(p) + (n) > (end)))       \
+               if (!likely(ceph_has_room(p, end, n)))  \
                        goto bad;                       \
        } while (0)
 
@@ -167,7 +172,7 @@ static inline void ceph_encode_string(void **p, void *end,
 
 #define ceph_encode_need(p, end, n, bad)               \
        do {                                            \
-               if (unlikely(*(p) + (n) > (end)))       \
+               if (!likely(ceph_has_room(p, end, n)))  \
                        goto bad;                       \
        } while (0)
 
index 3bff047f6b0f19d1037e4e7157fc785dd213f4cc..2521a95fa6d98597d1fafe4d4cff23ce9dc0f069 100644 (file)
@@ -25,9 +25,9 @@ struct ceph_connection_operations {
        void (*dispatch) (struct ceph_connection *con, struct ceph_msg *m);
 
        /* authorize an outgoing connection */
-       int (*get_authorizer) (struct ceph_connection *con,
-                              void **buf, int *len, int *proto,
-                              void **reply_buf, int *reply_len, int force_new);
+       struct ceph_auth_handshake *(*get_authorizer) (
+                               struct ceph_connection *con,
+                              int *proto, int force_new);
        int (*verify_authorizer_reply) (struct ceph_connection *con, int len);
        int (*invalidate_authorizer)(struct ceph_connection *con);
 
index 7c05ac202d90650069d4713ac2e13b3be9b86024..cedfb1a8434a11a0ba0b32348a0687e0ff9e7836 100644 (file)
@@ -6,9 +6,10 @@
 #include <linux/mempool.h>
 #include <linux/rbtree.h>
 
-#include "types.h"
-#include "osdmap.h"
-#include "messenger.h"
+#include <linux/ceph/types.h>
+#include <linux/ceph/osdmap.h>
+#include <linux/ceph/messenger.h>
+#include <linux/ceph/auth.h>
 
 /* 
  * Maximum object name size 
@@ -40,9 +41,7 @@ struct ceph_osd {
        struct list_head o_requests;
        struct list_head o_linger_requests;
        struct list_head o_osd_lru;
-       struct ceph_authorizer *o_authorizer;
-       void *o_authorizer_buf, *o_authorizer_reply_buf;
-       size_t o_authorizer_buf_len, o_authorizer_reply_buf_len;
+       struct ceph_auth_handshake o_auth;
        unsigned long lru_ttl;
        int o_marked_for_keepalive;
        struct list_head o_keepalive_item;
index ba4c205cbb016a141495e2e872ee6a3cfd57253a..311ef8d6aa9efc41b89ea1e6af9b07f538a84440 100644 (file)
@@ -65,8 +65,6 @@ struct ceph_osdmap {
 #define ceph_file_layout_cas_hash(l) ((__s32)le32_to_cpu((l).fl_cas_hash))
 #define ceph_file_layout_object_su(l) \
        ((__s32)le32_to_cpu((l).fl_object_stripe_unit))
-#define ceph_file_layout_pg_preferred(l) \
-       ((__s32)le32_to_cpu((l).fl_pg_preferred))
 #define ceph_file_layout_pg_pool(l) \
        ((__s32)le32_to_cpu((l).fl_pg_pool))
 
index 5e4312b6f5ccb1072273ce94e235874c74dd852d..eb3f84bc53254590661418d40c442ba90022a244 100644 (file)
@@ -30,7 +30,7 @@ struct clk {
        const struct clk_ops    *ops;
        struct clk_hw           *hw;
        struct clk              *parent;
-       char                    **parent_names;
+       const char              **parent_names;
        struct clk              **parents;
        u8                      num_parents;
        unsigned long           rate;
@@ -55,12 +55,22 @@ struct clk {
  * alternative macro for static initialization
  */
 
-extern struct clk_ops clk_fixed_rate_ops;
+#define DEFINE_CLK(_name, _ops, _flags, _parent_names,         \
+               _parents)                                       \
+       static struct clk _name = {                             \
+               .name = #_name,                                 \
+               .ops = &_ops,                                   \
+               .hw = &_name##_hw.hw,                           \
+               .parent_names = _parent_names,                  \
+               .num_parents = ARRAY_SIZE(_parent_names),       \
+               .parents = _parents,                            \
+               .flags = _flags,                                \
+       }
 
 #define DEFINE_CLK_FIXED_RATE(_name, _flags, _rate,            \
                                _fixed_rate_flags)              \
        static struct clk _name;                                \
-       static char *_name##_parent_names[] = {};               \
+       static const char *_name##_parent_names[] = {};         \
        static struct clk_fixed_rate _name##_hw = {             \
                .hw = {                                         \
                        .clk = &_name,                          \
@@ -68,23 +78,14 @@ extern struct clk_ops clk_fixed_rate_ops;
                .fixed_rate = _rate,                            \
                .flags = _fixed_rate_flags,                     \
        };                                                      \
-       static struct clk _name = {                             \
-               .name = #_name,                                 \
-               .ops = &clk_fixed_rate_ops,                     \
-               .hw = &_name##_hw.hw,                           \
-               .parent_names = _name##_parent_names,           \
-               .num_parents =                                  \
-                       ARRAY_SIZE(_name##_parent_names),       \
-               .flags = _flags,                                \
-       };
-
-extern struct clk_ops clk_gate_ops;
+       DEFINE_CLK(_name, clk_fixed_rate_ops, _flags,           \
+                       _name##_parent_names, NULL);
 
 #define DEFINE_CLK_GATE(_name, _parent_name, _parent_ptr,      \
                                _flags, _reg, _bit_idx,         \
                                _gate_flags, _lock)             \
        static struct clk _name;                                \
-       static char *_name##_parent_names[] = {                 \
+       static const char *_name##_parent_names[] = {           \
                _parent_name,                                   \
        };                                                      \
        static struct clk *_name##_parents[] = {                \
@@ -99,24 +100,14 @@ extern struct clk_ops clk_gate_ops;
                .flags = _gate_flags,                           \
                .lock = _lock,                                  \
        };                                                      \
-       static struct clk _name = {                             \
-               .name = #_name,                                 \
-               .ops = &clk_gate_ops,                           \
-               .hw = &_name##_hw.hw,                           \
-               .parent_names = _name##_parent_names,           \
-               .num_parents =                                  \
-                       ARRAY_SIZE(_name##_parent_names),       \
-               .parents = _name##_parents,                     \
-               .flags = _flags,                                \
-       };
-
-extern struct clk_ops clk_divider_ops;
+       DEFINE_CLK(_name, clk_gate_ops, _flags,                 \
+                       _name##_parent_names, _name##_parents);
 
 #define DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr,   \
                                _flags, _reg, _shift, _width,   \
                                _divider_flags, _lock)          \
        static struct clk _name;                                \
-       static char *_name##_parent_names[] = {                 \
+       static const char *_name##_parent_names[] = {           \
                _parent_name,                                   \
        };                                                      \
        static struct clk *_name##_parents[] = {                \
@@ -132,18 +123,8 @@ extern struct clk_ops clk_divider_ops;
                .flags = _divider_flags,                        \
                .lock = _lock,                                  \
        };                                                      \
-       static struct clk _name = {                             \
-               .name = #_name,                                 \
-               .ops = &clk_divider_ops,                        \
-               .hw = &_name##_hw.hw,                           \
-               .parent_names = _name##_parent_names,           \
-               .num_parents =                                  \
-                       ARRAY_SIZE(_name##_parent_names),       \
-               .parents = _name##_parents,                     \
-               .flags = _flags,                                \
-       };
-
-extern struct clk_ops clk_mux_ops;
+       DEFINE_CLK(_name, clk_divider_ops, _flags,              \
+                       _name##_parent_names, _name##_parents);
 
 #define DEFINE_CLK_MUX(_name, _parent_names, _parents, _flags, \
                                _reg, _shift, _width,           \
@@ -159,16 +140,28 @@ extern struct clk_ops clk_mux_ops;
                .flags = _mux_flags,                            \
                .lock = _lock,                                  \
        };                                                      \
-       static struct clk _name = {                             \
-               .name = #_name,                                 \
-               .ops = &clk_mux_ops,                            \
-               .hw = &_name##_hw.hw,                           \
-               .parent_names = _parent_names,                  \
-               .num_parents =                                  \
-                       ARRAY_SIZE(_parent_names),              \
-               .parents = _parents,                            \
-               .flags = _flags,                                \
-       };
+       DEFINE_CLK(_name, clk_mux_ops, _flags, _parent_names,   \
+                       _parents);
+
+#define DEFINE_CLK_FIXED_FACTOR(_name, _parent_name,           \
+                               _parent_ptr, _flags,            \
+                               _mult, _div)                    \
+       static struct clk _name;                                \
+       static const char *_name##_parent_names[] = {           \
+               _parent_name,                                   \
+       };                                                      \
+       static struct clk *_name##_parents[] = {                \
+               _parent_ptr,                                    \
+       };                                                      \
+       static struct clk_fixed_factor _name##_hw = {           \
+               .hw = {                                         \
+                       .clk = &_name,                          \
+               },                                              \
+               .mult = _mult,                                  \
+               .div = _div,                                    \
+       };                                                      \
+       DEFINE_CLK(_name, clk_fixed_factor_ops, _flags,         \
+                       _name##_parent_names, _name##_parents);
 
 /**
  * __clk_init - initialize the data structures in a struct clk
@@ -189,8 +182,12 @@ extern struct clk_ops clk_mux_ops;
  *
  * It is not necessary to call clk_register if __clk_init is used directly with
  * statically initialized clock data.
+ *
+ * Returns 0 on success, otherwise an error code.
  */
-void __clk_init(struct device *dev, struct clk *clk);
+int __clk_init(struct device *dev, struct clk *clk);
+
+struct clk *__clk_register(struct device *dev, struct clk_hw *hw);
 
 #endif /* CONFIG_COMMON_CLK */
 #endif /* CLK_PRIVATE_H */
index 5508897ad376054102e263958693573260ebe23b..4a0b483986c3b7ef812c2878ba9309a7bf248d1b 100644 (file)
 
 #ifdef CONFIG_COMMON_CLK
 
-/**
- * struct clk_hw - handle for traversing from a struct clk to its corresponding
- * hardware-specific structure.  struct clk_hw should be declared within struct
- * clk_foo and then referenced by the struct clk instance that uses struct
- * clk_foo's clk_ops
- *
- * clk: pointer to the struct clk instance that points back to this struct
- * clk_hw instance
- */
-struct clk_hw {
-       struct clk *clk;
-};
-
 /*
  * flags used across common struct clk.  these flags should only affect the
  * top-level framework.  custom flags for dealing with hardware specifics
@@ -39,6 +26,8 @@ struct clk_hw {
 #define CLK_IGNORE_UNUSED      BIT(3) /* do not gate even if unused */
 #define CLK_IS_ROOT            BIT(4) /* root clk, has no parent */
 
+struct clk_hw;
+
 /**
  * struct clk_ops -  Callback operations for hardware clocks; these are to
  * be provided by the clock implementation, and will be called by drivers
@@ -88,19 +77,11 @@ struct clk_hw {
  *             array index into the value programmed into the hardware.
  *             Returns 0 on success, -EERROR otherwise.
  *
- * @set_rate:  Change the rate of this clock. If this callback returns
- *             CLK_SET_RATE_PARENT, the rate change will be propagated to the
- *             parent clock (which may propagate again if the parent clock
- *             also sets this flag). The requested rate of the parent is
- *             passed back from the callback in the second 'unsigned long *'
- *             argument.  Note that it is up to the hardware clock's set_rate
- *             implementation to insure that clocks do not run out of spec
- *             when propgating the call to set_rate up to the parent.  One way
- *             to do this is to gate the clock (via clk_disable and/or
- *             clk_unprepare) before calling clk_set_rate, then ungating it
- *             afterward.  If your clock also has the CLK_GATE_SET_RATE flag
- *             set then this will insure safety.  Returns 0 on success,
- *             -EERROR otherwise.
+ * @set_rate:  Change the rate of this clock. The requested rate is specified
+ *             by the second argument, which should typically be the return
+ *             of .round_rate call.  The third argument gives the parent rate
+ *             which is likely helpful for most .set_rate implementation.
+ *             Returns 0 on success, -EERROR otherwise.
  *
  * The clk_enable/clk_disable and clk_prepare/clk_unprepare pairs allow
  * implementations to split any work between atomic (enable) and sleepable
@@ -125,10 +106,46 @@ struct clk_ops {
                                        unsigned long *);
        int             (*set_parent)(struct clk_hw *hw, u8 index);
        u8              (*get_parent)(struct clk_hw *hw);
-       int             (*set_rate)(struct clk_hw *hw, unsigned long);
+       int             (*set_rate)(struct clk_hw *hw, unsigned long,
+                                   unsigned long);
        void            (*init)(struct clk_hw *hw);
 };
 
+/**
+ * struct clk_init_data - holds init data that's common to all clocks and is
+ * shared between the clock provider and the common clock framework.
+ *
+ * @name: clock name
+ * @ops: operations this clock supports
+ * @parent_names: array of string names for all possible parents
+ * @num_parents: number of possible parents
+ * @flags: framework-level hints and quirks
+ */
+struct clk_init_data {
+       const char              *name;
+       const struct clk_ops    *ops;
+       const char              **parent_names;
+       u8                      num_parents;
+       unsigned long           flags;
+};
+
+/**
+ * struct clk_hw - handle for traversing from a struct clk to its corresponding
+ * hardware-specific structure.  struct clk_hw should be declared within struct
+ * clk_foo and then referenced by the struct clk instance that uses struct
+ * clk_foo's clk_ops
+ *
+ * @clk: pointer to the struct clk instance that points back to this struct
+ * clk_hw instance
+ *
+ * @init: pointer to struct clk_init_data that contains the init data shared
+ * with the common clock framework.
+ */
+struct clk_hw {
+       struct clk *clk;
+       struct clk_init_data *init;
+};
+
 /*
  * DOC: Basic clock implementations common to many platforms
  *
@@ -149,6 +166,7 @@ struct clk_fixed_rate {
        u8              flags;
 };
 
+extern const struct clk_ops clk_fixed_rate_ops;
 struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
                const char *parent_name, unsigned long flags,
                unsigned long fixed_rate);
@@ -165,7 +183,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
  * Clock which can gate its output.  Implements .enable & .disable
  *
  * Flags:
- * CLK_GATE_SET_DISABLE - by default this clock sets the bit at bit_idx to
+ * CLK_GATE_SET_TO_DISABLE - by default this clock sets the bit at bit_idx to
  *     enable the clock.  Setting this flag does the opposite: setting the bit
  *     disable the clock and clearing it enables the clock
  */
@@ -175,11 +193,11 @@ struct clk_gate {
        u8              bit_idx;
        u8              flags;
        spinlock_t      *lock;
-       char            *parent[1];
 };
 
 #define CLK_GATE_SET_TO_DISABLE                BIT(0)
 
+extern const struct clk_ops clk_gate_ops;
 struct clk *clk_register_gate(struct device *dev, const char *name,
                const char *parent_name, unsigned long flags,
                void __iomem *reg, u8 bit_idx,
@@ -212,12 +230,12 @@ struct clk_divider {
        u8              width;
        u8              flags;
        spinlock_t      *lock;
-       char            *parent[1];
 };
 
 #define CLK_DIVIDER_ONE_BASED          BIT(0)
 #define CLK_DIVIDER_POWER_OF_TWO       BIT(1)
 
+extern const struct clk_ops clk_divider_ops;
 struct clk *clk_register_divider(struct device *dev, const char *name,
                const char *parent_name, unsigned long flags,
                void __iomem *reg, u8 shift, u8 width,
@@ -238,7 +256,7 @@ struct clk *clk_register_divider(struct device *dev, const char *name,
  *
  * Flags:
  * CLK_MUX_INDEX_ONE - register index starts at 1, not 0
- * CLK_MUX_INDEX_BITWISE - register index is a single bit (power of two)
+ * CLK_MUX_INDEX_BIT - register index is a single bit (power of two)
  */
 struct clk_mux {
        struct clk_hw   hw;
@@ -252,29 +270,49 @@ struct clk_mux {
 #define CLK_MUX_INDEX_ONE              BIT(0)
 #define CLK_MUX_INDEX_BIT              BIT(1)
 
+extern const struct clk_ops clk_mux_ops;
 struct clk *clk_register_mux(struct device *dev, const char *name,
-               char **parent_names, u8 num_parents, unsigned long flags,
+               const char **parent_names, u8 num_parents, unsigned long flags,
                void __iomem *reg, u8 shift, u8 width,
                u8 clk_mux_flags, spinlock_t *lock);
 
+/**
+ * struct clk_fixed_factor - fixed multiplier and divider clock
+ *
+ * @hw:                handle between common and hardware-specific interfaces
+ * @mult:      multiplier
+ * @div:       divider
+ *
+ * Clock with a fixed multiplier and divider. The output frequency is the
+ * parent clock rate divided by div and multiplied by mult.
+ * Implements .recalc_rate, .set_rate and .round_rate
+ */
+
+struct clk_fixed_factor {
+       struct clk_hw   hw;
+       unsigned int    mult;
+       unsigned int    div;
+};
+
+extern struct clk_ops clk_fixed_factor_ops;
+struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
+               const char *parent_name, unsigned long flags,
+               unsigned int mult, unsigned int div);
+
 /**
  * clk_register - allocate a new clock, register it and return an opaque cookie
  * @dev: device that is registering this clock
- * @name: clock name
- * @ops: operations this clock supports
  * @hw: link to hardware-specific clock data
- * @parent_names: array of string names for all possible parents
- * @num_parents: number of possible parents
- * @flags: framework-level hints and quirks
  *
  * clk_register is the primary interface for populating the clock tree with new
  * clock nodes.  It returns a pointer to the newly allocated struct clk which
  * cannot be dereferenced by driver code but may be used in conjuction with the
- * rest of the clock API.
+ * rest of the clock API.  In the event of an error clk_register will return an
+ * error code; drivers must test for an error code after calling clk_register.
  */
-struct clk *clk_register(struct device *dev, const char *name,
-               const struct clk_ops *ops, struct clk_hw *hw,
-               char **parent_names, u8 num_parents, unsigned long flags);
+struct clk *clk_register(struct device *dev, struct clk_hw *hw);
+
+void clk_unregister(struct clk *clk);
 
 /* helper functions */
 const char *__clk_get_name(struct clk *clk);
index 70cf722ac3af298c879b6ff1741a11d2f199197b..ad5c43e8ae8ae7eeca38e0308aa7f77c41bfdc6d 100644 (file)
@@ -81,7 +81,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb);
 
 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb);
 
-#endif /* !CONFIG_COMMON_CLK */
+#endif
 
 /**
  * clk_get - lookup and obtain a reference to a clock producer.
@@ -252,7 +252,7 @@ void devm_clk_put(struct device *dev, struct clk *clk);
  * Returns rounded clock rate in Hz, or negative errno.
  */
 long clk_round_rate(struct clk *clk, unsigned long rate);
+
 /**
  * clk_set_rate - set the clock rate for a clock source
  * @clk: clock source
@@ -261,7 +261,7 @@ long clk_round_rate(struct clk *clk, unsigned long rate);
  * Returns success (0) or negative errno.
  */
 int clk_set_rate(struct clk *clk, unsigned long rate);
+
 /**
  * clk_set_parent - set the parent clock source for this clock
  * @clk: clock source
index 5d46217f84adfaab0dbe679a7612da7062bb72c6..4e890394ef996e709c490439be23f0c6fe24292f 100644 (file)
@@ -577,8 +577,7 @@ extern ssize_t compat_rw_copy_check_uvector(int type,
                const struct compat_iovec __user *uvector,
                unsigned long nr_segs,
                unsigned long fast_segs, struct iovec *fast_pointer,
-               struct iovec **ret_pointer,
-               int check_access);
+               struct iovec **ret_pointer);
 
 extern void __user *compat_alloc_user_space(unsigned long len);
 
index 7230bb59a06fec1f09ec379027729ee472193c07..2e9b9ebbeb78927681026ddbceb0255bc9197aad 100644 (file)
@@ -177,6 +177,7 @@ extern void put_online_cpus(void);
 #define hotcpu_notifier(fn, pri)       cpu_notifier(fn, pri)
 #define register_hotcpu_notifier(nb)   register_cpu_notifier(nb)
 #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
+void clear_tasks_mm_cpumask(int cpu);
 int cpu_down(unsigned int cpu);
 
 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
index 917dc5aeb1d4de6eff77341bc55c67df68c77fb6..ebbed2ce66379bd986fbf83f11e7ae8c32bf6070 100644 (file)
@@ -277,17 +277,13 @@ static inline void put_cred(const struct cred *_cred)
  * @task: The task to query
  *
  * Access the objective credentials of a task.  The caller must hold the RCU
- * readlock or the task must be dead and unable to change its own credentials.
+ * readlock.
  *
  * The result of this function should not be passed directly to get_cred();
  * rather get_task_cred() should be used instead.
  */
-#define __task_cred(task)                                              \
-       ({                                                              \
-               const struct task_struct *__t = (task);                 \
-               rcu_dereference_check(__t->real_cred,                   \
-                                     task_is_dead(__t));               \
-       })
+#define __task_cred(task)      \
+       rcu_dereference((task)->real_cred)
 
 /**
  * get_current_cred - Get the current task's subjective credentials
index 97e435b191f411380bbc546530c725093095231c..7c4750811b966e7d865484c2cf7020199c628164 100644 (file)
@@ -151,16 +151,6 @@ struct crush_map {
        struct crush_bucket **buckets;
        struct crush_rule **rules;
 
-       /*
-        * Parent pointers to identify the parent bucket a device or
-        * bucket in the hierarchy.  If an item appears more than
-        * once, this is the _last_ time it appeared (where buckets
-        * are processed in bucket id order, from -1 on down to
-        * -max_buckets.
-        */
-       __u32 *bucket_parents;
-       __u32 *device_parents;
-
        __s32 max_buckets;
        __u32 max_rules;
        __s32 max_devices;
@@ -168,8 +158,7 @@ struct crush_map {
 
 
 /* crush.c */
-extern int crush_get_bucket_item_weight(struct crush_bucket *b, int pos);
-extern void crush_calc_parents(struct crush_map *map);
+extern int crush_get_bucket_item_weight(const struct crush_bucket *b, int pos);
 extern void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b);
 extern void crush_destroy_bucket_list(struct crush_bucket_list *b);
 extern void crush_destroy_bucket_tree(struct crush_bucket_tree *b);
@@ -177,4 +166,9 @@ extern void crush_destroy_bucket_straw(struct crush_bucket_straw *b);
 extern void crush_destroy_bucket(struct crush_bucket *b);
 extern void crush_destroy(struct crush_map *map);
 
+static inline int crush_calc_tree_node(int i)
+{
+       return ((i+1) << 1)-1;
+}
+
 #endif
index c46b99c18bb0ca772f87c567f4ab46cec54932db..71d79f44a7d0753faeb61a3072efdbacc1aa2371 100644 (file)
 
 #include "crush.h"
 
-extern int crush_find_rule(struct crush_map *map, int pool, int type, int size);
-extern int crush_do_rule(struct crush_map *map,
+extern int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size);
+extern int crush_do_rule(const struct crush_map *map,
                         int ruleno,
                         int x, int *result, int result_max,
-                        int forcefeed,    /* -1 for none */
-                        __u32 *weights);
+                        const __u32 *weights);
 
 #endif
index ae36b72c22f3c49402fa4b84f8d529d00e777704..66c434f5dd1e05243d616f408679fa4a843336cb 100644 (file)
@@ -93,6 +93,10 @@ struct dentry *debugfs_create_regset32(const char *name, umode_t mode,
 int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
                         int nregs, void __iomem *base, char *prefix);
 
+struct dentry *debugfs_create_u32_array(const char *name, umode_t mode,
+                                       struct dentry *parent,
+                                       u32 *array, u32 elements);
+
 bool debugfs_initialized(void);
 
 #else
@@ -219,6 +223,13 @@ static inline bool debugfs_initialized(void)
        return false;
 }
 
+static inline struct dentry *debugfs_create_u32_array(const char *name, umode_t mode,
+                                       struct dentry *parent,
+                                       u32 *array, u32 elements)
+{
+       return ERR_PTR(-ENODEV);
+}
+
 #endif
 
 #endif
index e04f5776f6d0ef3ee0b4554638adbb9e67be026c..161d96241b1b4da3b9a0909749709a33bcca33c5 100644 (file)
@@ -667,6 +667,10 @@ struct device {
 
        struct dma_coherent_mem *dma_mem; /* internal for coherent mem
                                             override */
+#ifdef CONFIG_CMA
+       struct cma *cma_area;           /* contiguous memory area for dma
+                                          allocations */
+#endif
        /* arch specific additions */
        struct dev_archdata     archdata;
 
index 3efbfc2145c3ad97412392700a4992b534a8dc5b..eb48f3816df95d358b763ebb5e68afa62fccd830 100644 (file)
@@ -61,6 +61,13 @@ struct dma_buf_attachment;
  *                This Callback must not sleep.
  * @kmap: maps a page from the buffer into kernel address space.
  * @kunmap: [optional] unmaps a page from the buffer.
+ * @mmap: used to expose the backing storage to userspace. Note that the
+ *       mapping needs to be coherent - if the exporter doesn't directly
+ *       support this, it needs to fake coherency by shooting down any ptes
+ *       when transitioning away from the cpu domain.
+ * @vmap: [optional] creates a virtual mapping for the buffer into kernel
+ *       address space. Same restrictions as for vmap and friends apply.
+ * @vunmap: [optional] unmaps a vmap from the buffer
  */
 struct dma_buf_ops {
        int (*attach)(struct dma_buf *, struct device *,
@@ -92,6 +99,11 @@ struct dma_buf_ops {
        void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
        void *(*kmap)(struct dma_buf *, unsigned long);
        void (*kunmap)(struct dma_buf *, unsigned long, void *);
+
+       int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
+
+       void *(*vmap)(struct dma_buf *);
+       void (*vunmap)(struct dma_buf *, void *vaddr);
 };
 
 /**
@@ -167,6 +179,11 @@ void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
 void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
 void *dma_buf_kmap(struct dma_buf *, unsigned long);
 void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
+
+int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
+                unsigned long);
+void *dma_buf_vmap(struct dma_buf *);
+void dma_buf_vunmap(struct dma_buf *, void *vaddr);
 #else
 
 static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
@@ -248,6 +265,22 @@ static inline void dma_buf_kunmap(struct dma_buf *dmabuf,
                                  unsigned long pnum, void *vaddr)
 {
 }
+
+static inline int dma_buf_mmap(struct dma_buf *dmabuf,
+                              struct vm_area_struct *vma,
+                              unsigned long pgoff)
+{
+       return -ENODEV;
+}
+
+static inline void *dma_buf_vmap(struct dma_buf *dmabuf)
+{
+       return NULL;
+}
+
+static inline void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+{
+}
 #endif /* CONFIG_DMA_SHARED_BUFFER */
 
 #endif /* __DMA_BUF_H__ */
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
new file mode 100644 (file)
index 0000000..2f303e4
--- /dev/null
@@ -0,0 +1,110 @@
+#ifndef __LINUX_CMA_H
+#define __LINUX_CMA_H
+
+/*
+ * Contiguous Memory Allocator for DMA mapping framework
+ * Copyright (c) 2010-2011 by Samsung Electronics.
+ * Written by:
+ *     Marek Szyprowski <m.szyprowski@samsung.com>
+ *     Michal Nazarewicz <mina86@mina86.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License or (at your optional) any later version of the license.
+ */
+
+/*
+ * Contiguous Memory Allocator
+ *
+ *   The Contiguous Memory Allocator (CMA) makes it possible to
+ *   allocate big contiguous chunks of memory after the system has
+ *   booted.
+ *
+ * Why is it needed?
+ *
+ *   Various devices on embedded systems have no scatter-getter and/or
+ *   IO map support and require contiguous blocks of memory to
+ *   operate.  They include devices such as cameras, hardware video
+ *   coders, etc.
+ *
+ *   Such devices often require big memory buffers (a full HD frame
+ *   is, for instance, more then 2 mega pixels large, i.e. more than 6
+ *   MB of memory), which makes mechanisms such as kmalloc() or
+ *   alloc_page() ineffective.
+ *
+ *   At the same time, a solution where a big memory region is
+ *   reserved for a device is suboptimal since often more memory is
+ *   reserved then strictly required and, moreover, the memory is
+ *   inaccessible to page system even if device drivers don't use it.
+ *
+ *   CMA tries to solve this issue by operating on memory regions
+ *   where only movable pages can be allocated from.  This way, kernel
+ *   can use the memory for pagecache and when device driver requests
+ *   it, allocated pages can be migrated.
+ *
+ * Driver usage
+ *
+ *   CMA should not be used by the device drivers directly. It is
+ *   only a helper framework for dma-mapping subsystem.
+ *
+ *   For more information, see kernel-docs in drivers/base/dma-contiguous.c
+ */
+
+#ifdef __KERNEL__
+
+struct cma;
+struct page;
+struct device;
+
+#ifdef CONFIG_CMA
+
+/*
+ * There is always at least global CMA area and a few optional device
+ * private areas configured in kernel .config.
+ */
+#define MAX_CMA_AREAS  (1 + CONFIG_CMA_AREAS)
+
+extern struct cma *dma_contiguous_default_area;
+
+void dma_contiguous_reserve(phys_addr_t addr_limit);
+int dma_declare_contiguous(struct device *dev, unsigned long size,
+                          phys_addr_t base, phys_addr_t limit);
+
+struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+                                      unsigned int order);
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+                                int count);
+
+#else
+
+#define MAX_CMA_AREAS  (0)
+
+static inline void dma_contiguous_reserve(phys_addr_t limit) { }
+
+static inline
+int dma_declare_contiguous(struct device *dev, unsigned long size,
+                          phys_addr_t base, phys_addr_t limit)
+{
+       return -ENOSYS;
+}
+
+static inline
+struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+                                      unsigned int order)
+{
+       return NULL;
+}
+
+static inline
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+                                int count)
+{
+       return false;
+}
+
+#endif
+
+#endif
+
+#endif
index f9a2e5e67a5423e204389b8d01e0f83d41df1a30..56377df391242d4639db14d9b23a8fa915db9335 100644 (file)
@@ -615,11 +615,13 @@ static inline int dmaengine_slave_config(struct dma_chan *chan,
 }
 
 static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
-       struct dma_chan *chan, void *buf, size_t len,
+       struct dma_chan *chan, dma_addr_t buf, size_t len,
        enum dma_transfer_direction dir, unsigned long flags)
 {
        struct scatterlist sg;
-       sg_init_one(&sg, buf, len);
+       sg_init_table(&sg, 1);
+       sg_dma_address(&sg) = buf;
+       sg_dma_len(&sg) = len;
 
        return chan->device->device_prep_slave_sg(chan, &sg, 1,
                                                  dir, flags, NULL);
@@ -633,6 +635,18 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
                                                  dir, flags, NULL);
 }
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+struct rio_dma_ext;
+static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
+       struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
+       enum dma_transfer_direction dir, unsigned long flags,
+       struct rio_dma_ext *rio_ext)
+{
+       return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
+                                                 dir, flags, rio_ext);
+}
+#endif
+
 static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
                struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
                size_t period_len, enum dma_transfer_direction dir)
index 9e5f5607eba36b918db8beb8a3bf0c1a807e9ecc..47e3d48505843064b02fe9e7e167a1a591f5b83f 100644 (file)
@@ -53,7 +53,7 @@
 
 
 extern const char *drbd_buildtag(void);
-#define REL_VERSION "8.3.11"
+#define REL_VERSION "8.3.13"
 #define API_VERSION 88
 #define PRO_VERSION_MIN 86
 #define PRO_VERSION_MAX 96
@@ -112,8 +112,8 @@ enum drbd_ret_code {
        ERR_OPEN_MD_DISK        = 105,
        ERR_DISK_NOT_BDEV       = 107,
        ERR_MD_NOT_BDEV         = 108,
-       ERR_DISK_TO_SMALL       = 111,
-       ERR_MD_DISK_TO_SMALL    = 112,
+       ERR_DISK_TOO_SMALL      = 111,
+       ERR_MD_DISK_TOO_SMALL   = 112,
        ERR_BDCLAIM_DISK        = 114,
        ERR_BDCLAIM_MD_DISK     = 115,
        ERR_MD_IDX_INVALID      = 116,
index 447c36752385a52bc661adcea0cec1ca37ffad7f..fb670bf603f7c730469d0c56c22abb7f6274d192 100644 (file)
 #define DRBD_TIMEOUT_MAX 600
 #define DRBD_TIMEOUT_DEF 60       /* 6 seconds */
 
+ /* If backing disk takes longer than disk_timeout, mark the disk as failed */
+#define DRBD_DISK_TIMEOUT_MIN 0    /* 0 = disabled */
+#define DRBD_DISK_TIMEOUT_MAX 6000 /* 10 Minutes */
+#define DRBD_DISK_TIMEOUT_DEF 0    /* disabled */
+
   /* active connection retries when C_WF_CONNECTION */
 #define DRBD_CONNECT_INT_MIN 1
 #define DRBD_CONNECT_INT_MAX 120
@@ -60,7 +65,7 @@
 
  /* timeout for the ping packets.*/
 #define DRBD_PING_TIMEO_MIN  1
-#define DRBD_PING_TIMEO_MAX  100
+#define DRBD_PING_TIMEO_MAX  300
 #define DRBD_PING_TIMEO_DEF  5
 
   /* max number of write requests between write barriers */
index ab6159e4fcf0c6122cf840aef801fef822b0ab46..a8706f08ab367cfb9b8b9ac79b7f62cb94d0a876 100644 (file)
@@ -31,9 +31,12 @@ NL_PACKET(disk_conf, 3,
        NL_INTEGER(     56,     T_MAY_IGNORE,   max_bio_bvecs)
        NL_BIT(         57,     T_MAY_IGNORE,   no_disk_barrier)
        NL_BIT(         58,     T_MAY_IGNORE,   no_disk_drain)
+       NL_INTEGER(     89,     T_MAY_IGNORE,   disk_timeout)
 )
 
-NL_PACKET(detach, 4, )
+NL_PACKET(detach, 4,
+       NL_BIT(         88,     T_MANDATORY,    detach_force)
+)
 
 NL_PACKET(net_conf, 5,
        NL_STRING(      8,      T_MANDATORY,    my_addr,        128)
index c621d762bb2c9d18fd4ccda585c78491cafb1730..91ba3bae42ee53b7dac7c891284cee2b32ecb32a 100644 (file)
@@ -70,6 +70,25 @@ enum dev_type {
 #define DEV_FLAG_X32           BIT(DEV_X32)
 #define DEV_FLAG_X64           BIT(DEV_X64)
 
+/**
+ * enum hw_event_mc_err_type - type of the detected error
+ *
+ * @HW_EVENT_ERR_CORRECTED:    Corrected Error - Indicates that an ECC
+ *                             corrected error was detected
+ * @HW_EVENT_ERR_UNCORRECTED:  Uncorrected Error - Indicates an error that
+ *                             can't be corrected by ECC, but it is not
+ *                             fatal (maybe it is on an unused memory area,
+ *                             or the memory controller could recover from
+ *                             it for example, by re-trying the operation).
+ * @HW_EVENT_ERR_FATAL:                Fatal Error - Uncorrected error that could not
+ *                             be recovered.
+ */
+enum hw_event_mc_err_type {
+       HW_EVENT_ERR_CORRECTED,
+       HW_EVENT_ERR_UNCORRECTED,
+       HW_EVENT_ERR_FATAL,
+};
+
 /**
  * enum mem_type - memory types. For a more detailed reference, please see
  *                     http://en.wikipedia.org/wiki/DRAM
@@ -312,39 +331,142 @@ enum scrub_type {
  * PS - I enjoyed writing all that about as much as you enjoyed reading it.
  */
 
+/**
+ * enum edac_mc_layer - memory controller hierarchy layer
+ *
+ * @EDAC_MC_LAYER_BRANCH:      memory layer is named "branch"
+ * @EDAC_MC_LAYER_CHANNEL:     memory layer is named "channel"
+ * @EDAC_MC_LAYER_SLOT:                memory layer is named "slot"
+ * @EDAC_MC_LAYER_CHIP_SELECT: memory layer is named "chip select"
+ *
+ * This enum is used by the drivers to tell edac_mc_sysfs what name should
+ * be used when describing a memory stick location.
+ */
+enum edac_mc_layer_type {
+       EDAC_MC_LAYER_BRANCH,
+       EDAC_MC_LAYER_CHANNEL,
+       EDAC_MC_LAYER_SLOT,
+       EDAC_MC_LAYER_CHIP_SELECT,
+};
+
+/**
+ * struct edac_mc_layer - describes the memory controller hierarchy
+ * @layer:             layer type
+ * @size:              number of components per layer. For example,
+ *                     if the channel layer has two channels, size = 2
+ * @is_virt_csrow:     This layer is part of the "csrow" when old API
+ *                     compatibility mode is enabled. Otherwise, it is
+ *                     a channel
+ */
+struct edac_mc_layer {
+       enum edac_mc_layer_type type;
+       unsigned                size;
+       bool                    is_virt_csrow;
+};
+
+/*
+ * Maximum number of layers used by the memory controller to uniquely
+ * identify a single memory stick.
+ * NOTE: Changing this constant requires not only to change the constant
+ * below, but also to change the existing code at the core, as there are
+ * some code there that are optimized for 3 layers.
+ */
+#define EDAC_MAX_LAYERS                3
+
+/**
+ * EDAC_DIMM_PTR - Macro responsible to find a pointer inside a pointer array
+ *                for the element given by [layer0,layer1,layer2] position
+ *
+ * @layers:    a struct edac_mc_layer array, describing how many elements
+ *             were allocated for each layer
+ * @var:       name of the var where we want to get the pointer
+ *             (like mci->dimms)
+ * @n_layers:  Number of layers at the @layers array
+ * @layer0:    layer0 position
+ * @layer1:    layer1 position. Unused if n_layers < 2
+ * @layer2:    layer2 position. Unused if n_layers < 3
+ *
+ * For 1 layer, this macro returns &var[layer0]
+ * For 2 layers, this macro is similar to allocate a bi-dimensional array
+ *             and to return "&var[layer0][layer1]"
+ * For 3 layers, this macro is similar to allocate a tri-dimensional array
+ *             and to return "&var[layer0][layer1][layer2]"
+ *
+ * A loop could be used here to make it more generic, but, as we only have
+ * 3 layers, this is a little faster.
+ * By design, layers can never be 0 or more than 3. If that ever happens,
+ * a NULL is returned, causing an OOPS during the memory allocation routine,
+ * with would point to the developer that he's doing something wrong.
+ */
+#define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({ \
+       typeof(var) __p;                                                \
+       if ((nlayers) == 1)                                             \
+               __p = &var[layer0];                                     \
+       else if ((nlayers) == 2)                                        \
+               __p = &var[(layer1) + ((layers[1]).size * (layer0))];   \
+       else if ((nlayers) == 3)                                        \
+               __p = &var[(layer2) + ((layers[2]).size * ((layer1) +   \
+                           ((layers[1]).size * (layer0))))];           \
+       else                                                            \
+               __p = NULL;                                             \
+       __p;                                                            \
+})
+
+
+/* FIXME: add the proper per-location error counts */
+struct dimm_info {
+       char label[EDAC_MC_LABEL_LEN + 1];      /* DIMM label on motherboard */
+
+       /* Memory location data */
+       unsigned location[EDAC_MAX_LAYERS];
+
+       struct mem_ctl_info *mci;       /* the parent */
+
+       u32 grain;              /* granularity of reported error in bytes */
+       enum dev_type dtype;    /* memory device type */
+       enum mem_type mtype;    /* memory dimm type */
+       enum edac_type edac_mode;       /* EDAC mode for this dimm */
+
+       u32 nr_pages;                   /* number of pages on this dimm */
+
+       unsigned csrow, cschannel;      /* Points to the old API data */
+};
+
 /**
  * struct rank_info - contains the information for one DIMM rank
  *
  * @chan_idx:  channel number where the rank is (typically, 0 or 1)
  * @ce_count:  number of correctable errors for this rank
- * @label:     DIMM label. Different ranks for the same DIMM should be
- *             filled, on userspace, with the same label.
- *             FIXME: The core currently won't enforce it.
  * @csrow:     A pointer to the chip select row structure (the parent
  *             structure). The location of the rank is given by
  *             the (csrow->csrow_idx, chan_idx) vector.
+ * @dimm:      A pointer to the DIMM structure, where the DIMM label
+ *             information is stored.
+ *
+ * FIXME: Currently, the EDAC core model will assume one DIMM per rank.
+ *       This is a bad assumption, but it makes this patch easier. Later
+ *       patches in this series will fix this issue.
  */
 struct rank_info {
        int chan_idx;
-       u32 ce_count;
-       char label[EDAC_MC_LABEL_LEN + 1];
-       struct csrow_info *csrow;       /* the parent */
+       struct csrow_info *csrow;
+       struct dimm_info *dimm;
+
+       u32 ce_count;           /* Correctable Errors for this csrow */
 };
 
 struct csrow_info {
-       unsigned long first_page;       /* first page number in dimm */
-       unsigned long last_page;        /* last page number in dimm */
+       /* Used only by edac_mc_find_csrow_by_page() */
+       unsigned long first_page;       /* first page number in csrow */
+       unsigned long last_page;        /* last page number in csrow */
        unsigned long page_mask;        /* used for interleaving -
-                                        * 0UL for non intlv
-                                        */
-       u32 nr_pages;           /* number of pages in csrow */
-       u32 grain;              /* granularity of reported error in bytes */
-       int csrow_idx;          /* the chip-select row */
-       enum dev_type dtype;    /* memory device type */
+                                        * 0UL for non intlv */
+
+       int csrow_idx;                  /* the chip-select row */
+
        u32 ue_count;           /* Uncorrectable Errors for this csrow */
        u32 ce_count;           /* Correctable Errors for this csrow */
-       enum mem_type mtype;    /* memory csrow type */
-       enum edac_type edac_mode;       /* EDAC mode for this csrow */
+
        struct mem_ctl_info *mci;       /* the parent */
 
        struct kobject kobj;    /* sysfs kobject for this csrow */
@@ -426,8 +548,20 @@ struct mem_ctl_info {
        unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci,
                                           unsigned long page);
        int mc_idx;
-       int nr_csrows;
        struct csrow_info *csrows;
+       unsigned nr_csrows, num_cschannel;
+
+       /* Memory Controller hierarchy */
+       unsigned n_layers;
+       struct edac_mc_layer *layers;
+       bool mem_is_per_rank;
+
+       /*
+        * DIMM info. Will eventually remove the entire csrows_info some day
+        */
+       unsigned tot_dimms;
+       struct dimm_info *dimms;
+
        /*
         * FIXME - what about controllers on other busses? - IDs must be
         * unique.  dev pointer should be sufficiently unique, but
@@ -440,12 +574,16 @@ struct mem_ctl_info {
        const char *dev_name;
        char proc_name[MC_PROC_NAME_MAX_LEN + 1];
        void *pvt_info;
-       u32 ue_noinfo_count;    /* Uncorrectable Errors w/o info */
-       u32 ce_noinfo_count;    /* Correctable Errors w/o info */
-       u32 ue_count;           /* Total Uncorrectable Errors for this MC */
-       u32 ce_count;           /* Total Correctable Errors for this MC */
        unsigned long start_time;       /* mci load start time (in jiffies) */
 
+       /*
+        * drivers shouldn't access those fields directly, as the core
+        * already handles that.
+        */
+       u32 ce_noinfo_count, ue_noinfo_count;
+       u32 ue_mc, ce_mc;
+       u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
+
        struct completion complete;
 
        /* edac sysfs device control */
@@ -458,7 +596,7 @@ struct mem_ctl_info {
         * by the low level driver.
         *
         * Set by the low level driver to provide attributes at the
-        * controller level, same level as 'ue_count' and 'ce_count' above.
+        * controller level.
         * An array of structures, NULL terminated
         *
         * If attributes are desired, then set to array of attributes
index 7d4e0356f329253f8932e712693544d31e01cacd..c03af7687bb4fdd916d5bf3c835e801376132392 100644 (file)
@@ -28,12 +28,13 @@ typedef int (elevator_may_queue_fn) (struct request_queue *, int);
 
 typedef void (elevator_init_icq_fn) (struct io_cq *);
 typedef void (elevator_exit_icq_fn) (struct io_cq *);
-typedef int (elevator_set_req_fn) (struct request_queue *, struct request *, gfp_t);
+typedef int (elevator_set_req_fn) (struct request_queue *, struct request *,
+                                  struct bio *, gfp_t);
 typedef void (elevator_put_req_fn) (struct request *);
 typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *);
 typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
 
-typedef void *(elevator_init_fn) (struct request_queue *);
+typedef int (elevator_init_fn) (struct request_queue *);
 typedef void (elevator_exit_fn) (struct elevator_queue *);
 
 struct elevator_ops
@@ -129,7 +130,8 @@ extern void elv_unregister_queue(struct request_queue *q);
 extern int elv_may_queue(struct request_queue *, int);
 extern void elv_abort_queue(struct request_queue *);
 extern void elv_completed_request(struct request_queue *, struct request *);
-extern int elv_set_request(struct request_queue *, struct request *, gfp_t);
+extern int elv_set_request(struct request_queue *q, struct request *rq,
+                          struct bio *bio, gfp_t gfp_mask);
 extern void elv_put_request(struct request_queue *, struct request *);
 extern void elv_drain_elevator(struct request_queue *);
 
index 2d09bfa5c2628a3e1e396350b7d9d14f1a2791c8..e0de516374da37de6a95c35e79ab7cabb899d177 100644 (file)
@@ -17,6 +17,7 @@
 #define ENOIOCTLCMD    515     /* No ioctl command */
 #define ERESTART_RESTARTBLOCK 516 /* restart by calling sys_restart_syscall */
 #define EPROBE_DEFER   517     /* Driver requests probe retry */
+#define EOPENSTALE     518     /* open found a stale dentry */
 
 /* Defined for the NFSv3 protocol */
 #define EBADHANDLE     521     /* Illegal NFS file handle */
index 91bb4f27238cf156cb3ecc3daac22d37684aebd3..3c3ef19a625a26a38944cb06afca97b872d48234 100644 (file)
@@ -34,7 +34,7 @@ void eventfd_ctx_put(struct eventfd_ctx *ctx);
 struct file *eventfd_fget(int fd);
 struct eventfd_ctx *eventfd_ctx_fdget(int fd);
 struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
-int eventfd_signal(struct eventfd_ctx *ctx, int n);
+__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
 ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt);
 int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait,
                                  __u64 *cnt);
index 3a4cef5322dcab4d6b50b96243fa7187b2da1ebd..12291a7ee2759164026ac602ab1712b5d66faef0 100644 (file)
@@ -165,8 +165,8 @@ struct fid {
  */
 
 struct export_operations {
-       int (*encode_fh)(struct dentry *de, __u32 *fh, int *max_len,
-                       int connectable);
+       int (*encode_fh)(struct inode *inode, __u32 *fh, int *max_len,
+                       struct inode *parent);
        struct dentry * (*fh_to_dentry)(struct super_block *sb, struct fid *fid,
                        int fh_len, int fh_type);
        struct dentry * (*fh_to_parent)(struct super_block *sb, struct fid *fid,
index d31cb682e17371e0271947d59f2311818316152d..ac3f1c605843201e8e3e4561bc84d2b910b9f5ba 100644 (file)
@@ -554,6 +554,10 @@ struct fb_cursor_user {
 #define FB_EVENT_FB_UNBIND              0x0E
 /*      CONSOLE-SPECIFIC: remap all consoles to new fb - for vga switcheroo */
 #define FB_EVENT_REMAP_ALL_CONSOLE      0x0F
+/*      A hardware display blank early change occured */
+#define FB_EARLY_EVENT_BLANK           0x10
+/*      A hardware display blank revert early change occured */
+#define FB_R_EARLY_EVENT_BLANK         0x11
 
 struct fb_event {
        struct fb_info *info;
@@ -607,6 +611,7 @@ struct fb_deferred_io {
        struct mutex lock; /* mutex that protects the page list */
        struct list_head pagelist; /* list of touched pages */
        /* callback */
+       void (*first_io)(struct fb_info *info);
        void (*deferred_io)(struct fb_info *info, struct list_head *pagelist);
 };
 #endif
index c0e53372b082f445a6d713839452015de367e8fb..17fd887c798f3fff64aaf27bf82a81a2d237ee31 100644 (file)
@@ -173,6 +173,15 @@ struct inodes_stat_t {
 #define WRITE_FUA              (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA)
 #define WRITE_FLUSH_FUA                (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
 
+
+/*
+ * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
+ * that indicates that they should check the contents of the iovec are
+ * valid, but not check the memory that the iovec elements
+ * points too.
+ */
+#define CHECK_IOVEC_ONLY -1
+
 #define SEL_IN         1
 #define SEL_OUT                2
 #define SEL_EX         4
@@ -793,13 +802,14 @@ struct inode {
                unsigned int __i_nlink;
        };
        dev_t                   i_rdev;
+       loff_t                  i_size;
        struct timespec         i_atime;
        struct timespec         i_mtime;
        struct timespec         i_ctime;
        spinlock_t              i_lock; /* i_blocks, i_bytes, maybe i_size */
        unsigned short          i_bytes;
+       unsigned int            i_blkbits;
        blkcnt_t                i_blocks;
-       loff_t                  i_size;
 
 #ifdef __NEED_I_SIZE_ORDERED
        seqcount_t              i_size_seqcount;
@@ -819,9 +829,8 @@ struct inode {
                struct list_head        i_dentry;
                struct rcu_head         i_rcu;
        };
-       atomic_t                i_count;
-       unsigned int            i_blkbits;
        u64                     i_version;
+       atomic_t                i_count;
        atomic_t                i_dio_count;
        atomic_t                i_writecount;
        const struct file_operations    *i_fop; /* former ->i_op->default_file_ops */
@@ -1681,9 +1690,9 @@ struct inode_operations {
        ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
        ssize_t (*listxattr) (struct dentry *, char *, size_t);
        int (*removexattr) (struct dentry *, const char *);
-       void (*truncate_range)(struct inode *, loff_t, loff_t);
        int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
                      u64 len);
+       int (*update_time)(struct inode *, struct timespec *, int);
 } ____cacheline_aligned;
 
 struct seq_file;
@@ -1691,8 +1700,7 @@ struct seq_file;
 ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
                              unsigned long nr_segs, unsigned long fast_segs,
                              struct iovec *fast_pointer,
-                             struct iovec **ret_pointer,
-                             int check_access);
+                             struct iovec **ret_pointer);
 
 extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
 extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
@@ -1764,8 +1772,8 @@ struct super_operations {
  * I_FREEING           Set when inode is about to be freed but still has dirty
  *                     pages or buffers attached or the inode itself is still
  *                     dirty.
- * I_CLEAR             Added by end_writeback().  In this state the inode is clean
- *                     and can be destroyed.  Inode keeps I_FREEING.
+ * I_CLEAR             Added by clear_inode().  In this state the inode is
+ *                     clean and can be destroyed.  Inode keeps I_FREEING.
  *
  *                     Inodes that are I_WILL_FREE, I_FREEING or I_CLEAR are
  *                     prohibited for many purposes.  iget() must wait for
@@ -1773,9 +1781,10 @@ struct super_operations {
  *                     anew.  Other functions will just ignore such inodes,
  *                     if appropriate.  I_NEW is used for waiting.
  *
- * I_SYNC              Synchonized write of dirty inode data.  The bits is
- *                     set during data writeback, and cleared with a wakeup
- *                     on the bit address once it is done.
+ * I_SYNC              Writeback of inode is running. The bit is set during
+ *                     data writeback, and cleared with a wakeup on the bit
+ *                     address once it is done. The bit is also used to pin
+ *                     the inode in memory for flusher thread.
  *
  * I_REFERENCED                Marks the inode as recently references on the LRU list.
  *
@@ -1842,6 +1851,13 @@ static inline void inode_inc_iversion(struct inode *inode)
        spin_unlock(&inode->i_lock);
 }
 
+enum file_time_flags {
+       S_ATIME = 1,
+       S_MTIME = 2,
+       S_CTIME = 4,
+       S_VERSION = 8,
+};
+
 extern void touch_atime(struct path *);
 static inline void file_accessed(struct file *file)
 {
@@ -2349,7 +2365,7 @@ extern unsigned int get_next_ino(void);
 
 extern void __iget(struct inode * inode);
 extern void iget_failed(struct inode *);
-extern void end_writeback(struct inode *);
+extern void clear_inode(struct inode *);
 extern void __destroy_inode(struct inode *);
 extern struct inode *new_inode_pseudo(struct super_block *sb);
 extern struct inode *new_inode(struct super_block *sb);
@@ -2453,8 +2469,6 @@ enum {
 };
 
 void dio_end_io(struct bio *bio, int error);
-void inode_dio_wait(struct inode *inode);
-void inode_dio_done(struct inode *inode);
 
 ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
        struct block_device *bdev, const struct iovec *iov, loff_t offset,
@@ -2469,12 +2483,11 @@ static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
                                    offset, nr_segs, get_block, NULL, NULL,
                                    DIO_LOCKING | DIO_SKIP_HOLES);
 }
-#else
-static inline void inode_dio_wait(struct inode *inode)
-{
-}
 #endif
 
+void inode_dio_wait(struct inode *inode);
+void inode_dio_done(struct inode *inode);
+
 extern const struct file_operations generic_ro_fops;
 
 #define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
@@ -2578,7 +2591,7 @@ extern int inode_change_ok(const struct inode *, struct iattr *);
 extern int inode_newsize_ok(const struct inode *, loff_t offset);
 extern void setattr_copy(struct inode *inode, const struct iattr *attr);
 
-extern void file_update_time(struct file *file);
+extern int file_update_time(struct file *file);
 
 extern int generic_show_options(struct seq_file *m, struct dentry *root);
 extern void save_mount_options(struct super_block *sb, char *options);
index 203d7c4a3e1142e67979a74f6711040d83cbd5a6..55d8702383991464eb868262d724e1373218df75 100644 (file)
@@ -15,14 +15,6 @@ struct mxs_dma_data {
        int chan_irq;
 };
 
-static inline int mxs_dma_is_apbh(struct dma_chan *chan)
-{
-       return !strcmp(dev_name(chan->device->dev), "mxs-dma-apbh");
-}
-
-static inline int mxs_dma_is_apbx(struct dma_chan *chan)
-{
-       return !strcmp(dev_name(chan->device->dev), "mxs-dma-apbx");
-}
-
+extern int mxs_dma_is_apbh(struct dma_chan *chan);
+extern int mxs_dma_is_apbx(struct dma_chan *chan);
 #endif /* __MACH_MXS_DMA_H__ */
index 91d0e0a34ef3185a6051d8394cab63dfb76a04cb..63d966d5c2ea7a382c2f42cc664c7804dec86f73 100644 (file)
@@ -60,7 +60,7 @@
 #define FS_EVENTS_POSS_ON_CHILD   (FS_ACCESS | FS_MODIFY | FS_ATTRIB |\
                                   FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN |\
                                   FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\
-                                  FS_DELETE)
+                                  FS_DELETE | FS_OPEN_PERM | FS_ACCESS_PERM)
 
 #define FS_MOVE                        (FS_MOVED_FROM | FS_MOVED_TO)
 
index 73c28dea10ae395f1a7a7f4a517f174578dde313..7a114016ac7de83cf44190ebadec99b12f924cf4 100644 (file)
@@ -110,6 +110,9 @@ extern int lockdep_genl_is_held(void);
 #define genl_dereference(p)                                    \
        rcu_dereference_protected(p, lockdep_genl_is_held())
 
+#define MODULE_ALIAS_GENL_FAMILY(family)\
+ MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family)
+
 #endif /* __KERNEL__ */
 
 #endif /* __LINUX_GENERIC_NETLINK_H */
index 581e74b7df95e3d063bbbcaece35aafe60fad774..1e49be49d3247cb74cc798aa2772a085102851a5 100644 (file)
@@ -391,4 +391,16 @@ static inline bool pm_suspended_storage(void)
 }
 #endif /* CONFIG_PM_SLEEP */
 
+#ifdef CONFIG_CMA
+
+/* The below functions must be run on a range from a single zone. */
+extern int alloc_contig_range(unsigned long start, unsigned long end,
+                             unsigned migratetype);
+extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
+
+/* CMA stuff */
+extern void init_cma_reserved_pageblock(struct page *page);
+
+#endif
+
 #endif /* __LINUX_GFP_H */
diff --git a/include/linux/gpio-i2cmux.h b/include/linux/gpio-i2cmux.h
deleted file mode 100644 (file)
index 4a333bb..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * gpio-i2cmux interface to platform code
- *
- * Peter Korsgaard <peter.korsgaard@barco.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _LINUX_GPIO_I2CMUX_H
-#define _LINUX_GPIO_I2CMUX_H
-
-/* MUX has no specific idle mode */
-#define GPIO_I2CMUX_NO_IDLE    ((unsigned)-1)
-
-/**
- * struct gpio_i2cmux_platform_data - Platform-dependent data for gpio-i2cmux
- * @parent: Parent I2C bus adapter number
- * @base_nr: Base I2C bus number to number adapters from or zero for dynamic
- * @values: Array of bitmasks of GPIO settings (low/high) for each
- *     position
- * @n_values: Number of multiplexer positions (busses to instantiate)
- * @gpios: Array of GPIO numbers used to control MUX
- * @n_gpios: Number of GPIOs used to control MUX
- * @idle: Bitmask to write to MUX when idle or GPIO_I2CMUX_NO_IDLE if not used
- */
-struct gpio_i2cmux_platform_data {
-       int parent;
-       int base_nr;
-       const unsigned *values;
-       int n_values;
-       const unsigned *gpios;
-       int n_gpios;
-       unsigned idle;
-};
-
-#endif /* _LINUX_GPIO_I2CMUX_H */
index c8af7a2efb5288e582e3fba9df08308d6c3f8076..4c59b11311870e74ebbeb110f77e2017be97c3bb 100644 (file)
@@ -59,6 +59,8 @@ extern pmd_t *page_check_address_pmd(struct page *page,
 #define HPAGE_PMD_MASK HPAGE_MASK
 #define HPAGE_PMD_SIZE HPAGE_SIZE
 
+extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
+
 #define transparent_hugepage_enabled(__vma)                            \
        ((transparent_hugepage_flags &                                  \
          (1<<TRANSPARENT_HUGEPAGE_FLAG) ||                             \
index 000837e126e64b3b0e64d67aff67a3c9626e4c5e..d5d6bbe2259e527b34d63abc4a93d64c469e8dd6 100644 (file)
@@ -284,6 +284,14 @@ static inline unsigned int blocks_per_huge_page(struct hstate *h)
 
 #include <asm/hugetlb.h>
 
+#ifndef arch_make_huge_pte
+static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
+                                      struct page *page, int writable)
+{
+       return entry;
+}
+#endif
+
 static inline struct hstate *page_hstate(struct page *page)
 {
        return size_to_hstate(PAGE_SIZE << compound_order(page));
diff --git a/include/linux/i2c-mux-gpio.h b/include/linux/i2c-mux-gpio.h
new file mode 100644 (file)
index 0000000..a36343a
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * i2c-mux-gpio interface to platform code
+ *
+ * Peter Korsgaard <peter.korsgaard@barco.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_I2C_MUX_GPIO_H
+#define _LINUX_I2C_MUX_GPIO_H
+
+/* MUX has no specific idle mode */
+#define I2C_MUX_GPIO_NO_IDLE   ((unsigned)-1)
+
+/**
+ * struct i2c_mux_gpio_platform_data - Platform-dependent data for i2c-mux-gpio
+ * @parent: Parent I2C bus adapter number
+ * @base_nr: Base I2C bus number to number adapters from or zero for dynamic
+ * @values: Array of bitmasks of GPIO settings (low/high) for each
+ *     position
+ * @n_values: Number of multiplexer positions (busses to instantiate)
+ * @gpios: Array of GPIO numbers used to control MUX
+ * @n_gpios: Number of GPIOs used to control MUX
+ * @idle: Bitmask to write to MUX when idle or GPIO_I2CMUX_NO_IDLE if not used
+ */
+struct i2c_mux_gpio_platform_data {
+       int parent;
+       int base_nr;
+       const unsigned *values;
+       int n_values;
+       const unsigned *gpios;
+       int n_gpios;
+       unsigned idle;
+};
+
+#endif /* _LINUX_I2C_MUX_GPIO_H */
index 747f0cde41640e08cbbcc75654e7275ae4ea2baf..c790838300148f2021bb97072f4267cd216861cb 100644 (file)
@@ -34,7 +34,8 @@
  * mux control.
  */
 struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
-                               void *mux_dev, u32 force_nr, u32 chan_id,
+                               struct device *mux_dev,
+                               void *mux_priv, u32 force_nr, u32 chan_id,
                                int (*select) (struct i2c_adapter *,
                                               void *mux_dev, u32 chan_id),
                                int (*deselect) (struct i2c_adapter *,
index 195d8b3d9cfbf9b9363367f5604aaec029c68e20..ddfa04108baf14ade92a85d3a3f909ca9ab30589 100644 (file)
@@ -232,6 +232,7 @@ struct i2c_client {
 #define to_i2c_client(d) container_of(d, struct i2c_client, dev)
 
 extern struct i2c_client *i2c_verify_client(struct device *dev);
+extern struct i2c_adapter *i2c_verify_adapter(struct device *dev);
 
 static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj)
 {
@@ -540,7 +541,7 @@ struct i2c_msg {
        __u16 flags;
 #define I2C_M_TEN              0x0010  /* this is a ten bit chip address */
 #define I2C_M_RD               0x0001  /* read data, from slave to master */
-#define I2C_M_NOSTART          0x4000  /* if I2C_FUNC_PROTOCOL_MANGLING */
+#define I2C_M_NOSTART          0x4000  /* if I2C_FUNC_NOSTART */
 #define I2C_M_REV_DIR_ADDR     0x2000  /* if I2C_FUNC_PROTOCOL_MANGLING */
 #define I2C_M_IGNORE_NAK       0x1000  /* if I2C_FUNC_PROTOCOL_MANGLING */
 #define I2C_M_NO_RD_ACK                0x0800  /* if I2C_FUNC_PROTOCOL_MANGLING */
@@ -553,8 +554,9 @@ struct i2c_msg {
 
 #define I2C_FUNC_I2C                   0x00000001
 #define I2C_FUNC_10BIT_ADDR            0x00000002
-#define I2C_FUNC_PROTOCOL_MANGLING     0x00000004 /* I2C_M_NOSTART etc. */
+#define I2C_FUNC_PROTOCOL_MANGLING     0x00000004 /* I2C_M_IGNORE_NAK etc. */
 #define I2C_FUNC_SMBUS_PEC             0x00000008
+#define I2C_FUNC_NOSTART               0x00000010 /* I2C_M_NOSTART */
 #define I2C_FUNC_SMBUS_BLOCK_PROC_CALL 0x00008000 /* SMBus 2.0 */
 #define I2C_FUNC_SMBUS_QUICK           0x00010000
 #define I2C_FUNC_SMBUS_READ_BYTE       0x00020000
index c91171599cb68825709aa12a9d79e80026bd9dfb..e68a8e53bb59acf87c2dd07259f77ce491d5ec1e 100644 (file)
@@ -142,8 +142,6 @@ request_any_context_irq(unsigned int irq, irq_handler_t handler,
 extern int __must_check
 request_percpu_irq(unsigned int irq, irq_handler_t handler,
                   const char *devname, void __percpu *percpu_dev_id);
-
-extern void exit_irq_thread(void);
 #else
 
 extern int __must_check
@@ -177,8 +175,6 @@ request_percpu_irq(unsigned int irq, irq_handler_t handler,
 {
        return request_irq(irq, handler, 0, devname, percpu_dev_id);
 }
-
-static inline void exit_irq_thread(void) { }
 #endif
 
 extern void free_irq(unsigned int, void *);
index 1a30180630343dabd49aa886fc7621964912afdf..df38db2ef45bbf50def2b9de1970225c8a76c6de 100644 (file)
@@ -6,11 +6,7 @@
 #include <linux/workqueue.h>
 
 enum {
-       ICQ_IOPRIO_CHANGED      = 1 << 0,
-       ICQ_CGROUP_CHANGED      = 1 << 1,
        ICQ_EXITED              = 1 << 2,
-
-       ICQ_CHANGED_MASK        = ICQ_IOPRIO_CHANGED | ICQ_CGROUP_CHANGED,
 };
 
 /*
@@ -100,6 +96,7 @@ struct io_cq {
  */
 struct io_context {
        atomic_long_t refcount;
+       atomic_t active_ref;
        atomic_t nr_tasks;
 
        /* all the fields below are protected by this lock */
@@ -120,29 +117,37 @@ struct io_context {
        struct work_struct release_work;
 };
 
-static inline struct io_context *ioc_task_link(struct io_context *ioc)
+/**
+ * get_io_context_active - get active reference on ioc
+ * @ioc: ioc of interest
+ *
+ * Only iocs with active reference can issue new IOs.  This function
+ * acquires an active reference on @ioc.  The caller must already have an
+ * active reference on @ioc.
+ */
+static inline void get_io_context_active(struct io_context *ioc)
 {
-       /*
-        * if ref count is zero, don't allow sharing (ioc is going away, it's
-        * a race).
-        */
-       if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
-               atomic_inc(&ioc->nr_tasks);
-               return ioc;
-       }
+       WARN_ON_ONCE(atomic_long_read(&ioc->refcount) <= 0);
+       WARN_ON_ONCE(atomic_read(&ioc->active_ref) <= 0);
+       atomic_long_inc(&ioc->refcount);
+       atomic_inc(&ioc->active_ref);
+}
+
+static inline void ioc_task_link(struct io_context *ioc)
+{
+       get_io_context_active(ioc);
 
-       return NULL;
+       WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0);
+       atomic_inc(&ioc->nr_tasks);
 }
 
 struct task_struct;
 #ifdef CONFIG_BLOCK
 void put_io_context(struct io_context *ioc);
+void put_io_context_active(struct io_context *ioc);
 void exit_io_context(struct task_struct *task);
 struct io_context *get_task_io_context(struct task_struct *task,
                                       gfp_t gfp_flags, int node);
-void ioc_ioprio_changed(struct io_context *ioc, int ioprio);
-void ioc_cgroup_changed(struct io_context *ioc);
-unsigned int icq_get_changed(struct io_cq *icq);
 #else
 struct io_context;
 static inline void put_io_context(struct io_context *ioc) { }
index d937580417ba668d343b30b1741d59139f7924b9..450293f6d68b6a9bfdbd74fdc3304a63c122505a 100644 (file)
@@ -35,12 +35,13 @@ struct iommu_domain;
 #define IOMMU_FAULT_WRITE      0x1
 
 typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
-                               struct device *, unsigned long, int);
+                       struct device *, unsigned long, int, void *);
 
 struct iommu_domain {
        struct iommu_ops *ops;
        void *priv;
        iommu_fault_handler_t handler;
+       void *handler_token;
 };
 
 #define IOMMU_CAP_CACHE_COHERENCY      0x1
@@ -95,7 +96,7 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
 extern int iommu_domain_has_cap(struct iommu_domain *domain,
                                unsigned long cap);
 extern void iommu_set_fault_handler(struct iommu_domain *domain,
-                                       iommu_fault_handler_t handler);
+                       iommu_fault_handler_t handler, void *token);
 extern int iommu_device_group(struct device *dev, unsigned int *groupid);
 
 /**
@@ -132,7 +133,8 @@ static inline int report_iommu_fault(struct iommu_domain *domain,
         * invoke it.
         */
        if (domain->handler)
-               ret = domain->handler(domain, dev, iova, flags);
+               ret = domain->handler(domain, dev, iova, flags,
+                                               domain->handler_token);
 
        return ret;
 }
@@ -191,7 +193,7 @@ static inline int domain_has_cap(struct iommu_domain *domain,
 }
 
 static inline void iommu_set_fault_handler(struct iommu_domain *domain,
-                                       iommu_fault_handler_t handler)
+                               iommu_fault_handler_t handler, void *token)
 {
 }
 
index 76dad48088474462b5f21deeff0414b40aca6b8e..beb9ce1c2c233595e47602d264782e56fdfc4682 100644 (file)
@@ -42,26 +42,14 @@ enum {
 };
 
 /*
- * if process has set io priority explicitly, use that. if not, convert
- * the cpu scheduler nice value to an io priority
+ * Fallback BE priority
  */
 #define IOPRIO_NORM    (4)
-static inline int task_ioprio(struct io_context *ioc)
-{
-       if (ioprio_valid(ioc->ioprio))
-               return IOPRIO_PRIO_DATA(ioc->ioprio);
-
-       return IOPRIO_NORM;
-}
-
-static inline int task_ioprio_class(struct io_context *ioc)
-{
-       if (ioprio_valid(ioc->ioprio))
-               return IOPRIO_PRIO_CLASS(ioc->ioprio);
-
-       return IOPRIO_CLASS_BE;
-}
 
+/*
+ * if process has set io priority explicitly, use that. if not, convert
+ * the cpu scheduler nice value to an io priority
+ */
 static inline int task_nice_ioprio(struct task_struct *task)
 {
        return (task_nice(task) + 20) / 5;
index 8a297a5e794cc8e51c22351098b80a35ce43ef09..5499c92a91539afcc0987d49fe6477acad2d16e4 100644 (file)
@@ -62,6 +62,8 @@ struct ipc_namespace {
        unsigned int    mq_queues_max;   /* initialized to DFLT_QUEUESMAX */
        unsigned int    mq_msg_max;      /* initialized to DFLT_MSGMAX */
        unsigned int    mq_msgsize_max;  /* initialized to DFLT_MSGSIZEMAX */
+       unsigned int    mq_msg_default;
+       unsigned int    mq_msgsize_default;
 
        /* user_ns which owns the ipc ns */
        struct user_namespace *user_ns;
@@ -90,11 +92,41 @@ static inline void shm_destroy_orphaned(struct ipc_namespace *ns) {}
 
 #ifdef CONFIG_POSIX_MQUEUE
 extern int mq_init_ns(struct ipc_namespace *ns);
-/* default values */
-#define DFLT_QUEUESMAX 256     /* max number of message queues */
-#define DFLT_MSGMAX    10      /* max number of messages in each queue */
-#define HARD_MSGMAX    (32768*sizeof(void *)/4)
-#define DFLT_MSGSIZEMAX 8192   /* max message size */
+/*
+ * POSIX Message Queue default values:
+ *
+ * MIN_*: Lowest value an admin can set the maximum unprivileged limit to
+ * DFLT_*MAX: Default values for the maximum unprivileged limits
+ * DFLT_{MSG,MSGSIZE}: Default values used when the user doesn't supply
+ *   an attribute to the open call and the queue must be created
+ * HARD_*: Highest value the maximums can be set to.  These are enforced
+ *   on CAP_SYS_RESOURCE apps as well making them inviolate (so make them
+ *   suitably high)
+ *
+ * POSIX Requirements:
+ *   Per app minimum openable message queues - 8.  This does not map well
+ *     to the fact that we limit the number of queues on a per namespace
+ *     basis instead of a per app basis.  So, make the default high enough
+ *     that no given app should have a hard time opening 8 queues.
+ *   Minimum maximum for HARD_MSGMAX - 32767.  I bumped this to 65536.
+ *   Minimum maximum for HARD_MSGSIZEMAX - POSIX is silent on this.  However,
+ *     we have run into a situation where running applications in the wild
+ *     require this to be at least 5MB, and preferably 10MB, so I set the
+ *     value to 16MB in hopes that this user is the worst of the bunch and
+ *     the new maximum will handle anyone else.  I may have to revisit this
+ *     in the future.
+ */
+#define MIN_QUEUESMAX                  1
+#define DFLT_QUEUESMAX               256
+#define HARD_QUEUESMAX              1024
+#define MIN_MSGMAX                     1
+#define DFLT_MSG                      10U
+#define DFLT_MSGMAX                   10
+#define HARD_MSGMAX                65536
+#define MIN_MSGSIZEMAX               128
+#define DFLT_MSGSIZE                8192U
+#define DFLT_MSGSIZEMAX                     8192
+#define HARD_MSGSIZEMAX            (16*1024*1024)
 #else
 static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; }
 #endif
index d211732b9e99c8a6d95bff50b56505c651285a70..c8f32975f0e41699164c772701a183184379ba32 100644 (file)
@@ -479,12 +479,6 @@ struct transaction_s
         * How many handles used this transaction? [t_handle_lock]
         */
        int t_handle_count;
-
-       /*
-        * This transaction is being forced and some process is
-        * waiting for it to finish.
-        */
-       unsigned int t_synchronous_commit:1;
 };
 
 /**
@@ -531,6 +525,8 @@ struct transaction_s
  *  transaction
  * @j_commit_request: Sequence number of the most recent transaction wanting
  *     commit
+ * @j_commit_waited: Sequence number of the most recent transaction someone
+ *     is waiting for to commit.
  * @j_uuid: Uuid of client object.
  * @j_task: Pointer to the current commit thread for this journal
  * @j_max_transaction_buffers:  Maximum number of metadata buffers to allow in a
@@ -695,6 +691,13 @@ struct journal_s
         */
        tid_t                   j_commit_request;
 
+       /*
+        * Sequence number of the most recent transaction someone is waiting
+        * for to commit.
+        * [j_state_lock]
+        */
+       tid_t                   j_commit_waited;
+
        /*
         * Journal uuid: identifies the object (filesystem, LVM volume etc)
         * backed by this journal.  This will eventually be replaced by an array
@@ -861,7 +864,8 @@ extern int     journal_destroy    (journal_t *);
 extern int        journal_recover    (journal_t *journal);
 extern int        journal_wipe       (journal_t *, int);
 extern int        journal_skip_recovery        (journal_t *);
-extern void       journal_update_superblock    (journal_t *, int);
+extern void       journal_update_sb_log_tail   (journal_t *, tid_t, unsigned int,
+                                                int);
 extern void       journal_abort      (journal_t *, int);
 extern int        journal_errno      (journal_t *);
 extern void       journal_ack_err    (journal_t *);
index 912c30a8ddb1e47cd732fbd95f281238ca601ae0..f334c7fab96762ab4131c9886df87d4d6d4dde9d 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/mutex.h>
 #include <linux/timer.h>
 #include <linux/slab.h>
+#include <crypto/hash.h>
 #endif
 
 #define journal_oom_retry 1
@@ -147,12 +148,24 @@ typedef struct journal_header_s
 #define JBD2_CRC32_CHKSUM   1
 #define JBD2_MD5_CHKSUM     2
 #define JBD2_SHA1_CHKSUM    3
+#define JBD2_CRC32C_CHKSUM  4
 
 #define JBD2_CRC32_CHKSUM_SIZE 4
 
 #define JBD2_CHECKSUM_BYTES (32 / sizeof(u32))
 /*
  * Commit block header for storing transactional checksums:
+ *
+ * NOTE: If FEATURE_COMPAT_CHECKSUM (checksum v1) is set, the h_chksum*
+ * fields are used to store a checksum of the descriptor and data blocks.
+ *
+ * If FEATURE_INCOMPAT_CSUM_V2 (checksum v2) is set, then the h_chksum
+ * field is used to store crc32c(uuid+commit_block).  Each journal metadata
+ * block gets its own checksum, and data block checksums are stored in
+ * journal_block_tag (in the descriptor).  The other h_chksum* fields are
+ * not used.
+ *
+ * Checksum v1 and v2 are mutually exclusive features.
  */
 struct commit_header {
        __be32          h_magic;
@@ -175,13 +188,19 @@ struct commit_header {
 typedef struct journal_block_tag_s
 {
        __be32          t_blocknr;      /* The on-disk block number */
-       __be32          t_flags;        /* See below */
+       __be16          t_checksum;     /* truncated crc32c(uuid+seq+block) */
+       __be16          t_flags;        /* See below */
        __be32          t_blocknr_high; /* most-significant high 32bits. */
 } journal_block_tag_t;
 
 #define JBD2_TAG_SIZE32 (offsetof(journal_block_tag_t, t_blocknr_high))
 #define JBD2_TAG_SIZE64 (sizeof(journal_block_tag_t))
 
+/* Tail of descriptor block, for checksumming */
+struct jbd2_journal_block_tail {
+       __be32          t_checksum;     /* crc32c(uuid+descr_block) */
+};
+
 /*
  * The revoke descriptor: used on disk to describe a series of blocks to
  * be revoked from the log
@@ -192,6 +211,10 @@ typedef struct jbd2_journal_revoke_header_s
        __be32           r_count;       /* Count of bytes used in the block */
 } jbd2_journal_revoke_header_t;
 
+/* Tail of revoke block, for checksumming */
+struct jbd2_journal_revoke_tail {
+       __be32          r_checksum;     /* crc32c(uuid+revoke_block) */
+};
 
 /* Definitions for the journal tag flags word: */
 #define JBD2_FLAG_ESCAPE               1       /* on-disk block is escaped */
@@ -241,7 +264,10 @@ typedef struct journal_superblock_s
        __be32  s_max_trans_data;       /* Limit of data blocks per trans. */
 
 /* 0x0050 */
-       __u32   s_padding[44];
+       __u8    s_checksum_type;        /* checksum type */
+       __u8    s_padding2[3];
+       __u32   s_padding[42];
+       __be32  s_checksum;             /* crc32c(superblock) */
 
 /* 0x0100 */
        __u8    s_users[16*48];         /* ids of all fs'es sharing the log */
@@ -263,13 +289,15 @@ typedef struct journal_superblock_s
 #define JBD2_FEATURE_INCOMPAT_REVOKE           0x00000001
 #define JBD2_FEATURE_INCOMPAT_64BIT            0x00000002
 #define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT     0x00000004
+#define JBD2_FEATURE_INCOMPAT_CSUM_V2          0x00000008
 
 /* Features known to this kernel version: */
 #define JBD2_KNOWN_COMPAT_FEATURES     JBD2_FEATURE_COMPAT_CHECKSUM
 #define JBD2_KNOWN_ROCOMPAT_FEATURES   0
 #define JBD2_KNOWN_INCOMPAT_FEATURES   (JBD2_FEATURE_INCOMPAT_REVOKE | \
                                        JBD2_FEATURE_INCOMPAT_64BIT | \
-                                       JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)
+                                       JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | \
+                                       JBD2_FEATURE_INCOMPAT_CSUM_V2)
 
 #ifdef __KERNEL__
 
@@ -939,6 +967,12 @@ struct journal_s
         * superblock pointer here
         */
        void *j_private;
+
+       /* Reference to checksum algorithm driver via cryptoapi */
+       struct crypto_shash *j_chksum_driver;
+
+       /* Precomputed journal UUID checksum for seeding other checksums */
+       __u32 j_csum_seed;
 };
 
 /*
@@ -1268,6 +1302,25 @@ static inline int jbd_space_needed(journal_t *journal)
 
 extern int jbd_blocks_per_page(struct inode *inode);
 
+static inline u32 jbd2_chksum(journal_t *journal, u32 crc,
+                             const void *address, unsigned int length)
+{
+       struct {
+               struct shash_desc shash;
+               char ctx[crypto_shash_descsize(journal->j_chksum_driver)];
+       } desc;
+       int err;
+
+       desc.shash.tfm = journal->j_chksum_driver;
+       desc.shash.flags = 0;
+       *(u32 *)desc.ctx = crc;
+
+       err = crypto_shash_update(&desc.shash, address, length);
+       BUG_ON(err);
+
+       return *(u32 *)desc.ctx;
+}
+
 #ifdef __KERNEL__
 
 #define buffer_trace_init(bh)  do {} while (0)
index 6230f8556a4eeac37bcaaa83a4cc913177e09c56..6133679bc4c01ace20a0114fd50ff7c3481c7eb9 100644 (file)
@@ -12,6 +12,7 @@ enum jbd_state_bits {
        BH_State,               /* Pins most journal_head state */
        BH_JournalHead,         /* Pins bh->b_private and jh->b_bh */
        BH_Unshadow,            /* Dummy bit, for BJ_Shadow wakeup filtering */
+       BH_Verified,            /* Metadata block has been verified ok */
        BH_JBDPrivateStart,     /* First bit available for private use by FS */
 };
 
@@ -24,6 +25,7 @@ TAS_BUFFER_FNS(Revoked, revoked)
 BUFFER_FNS(RevokeValid, revokevalid)
 TAS_BUFFER_FNS(RevokeValid, revokevalid)
 BUFFER_FNS(Freed, freed)
+BUFFER_FNS(Verified, verified)
 
 static inline struct buffer_head *jh2bh(struct journal_head *jh)
 {
index 387571959dd9a1219c90dc952c29f8382698f397..6883e197acb9e939156c4934d9cc7150b1b107f5 100644 (file)
@@ -36,6 +36,7 @@ const char *kallsyms_lookup(unsigned long addr,
 
 /* Look up a kernel symbol and return it in a text buffer. */
 extern int sprint_symbol(char *buffer, unsigned long address);
+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
 extern int sprint_backtrace(char *buffer, unsigned long address);
 
 /* Look up a kernel symbol and print it to the kernel messages. */
@@ -80,6 +81,12 @@ static inline int sprint_symbol(char *buffer, unsigned long addr)
        return 0;
 }
 
+static inline int sprint_symbol_no_offset(char *buffer, unsigned long addr)
+{
+       *buffer = '\0';
+       return 0;
+}
+
 static inline int sprint_backtrace(char *buffer, unsigned long addr)
 {
        *buffer = '\0';
diff --git a/include/linux/kcmp.h b/include/linux/kcmp.h
new file mode 100644 (file)
index 0000000..2dcd1b3
--- /dev/null
@@ -0,0 +1,17 @@
+#ifndef _LINUX_KCMP_H
+#define _LINUX_KCMP_H
+
+/* Comparison type */
+enum kcmp_type {
+       KCMP_FILE,
+       KCMP_VM,
+       KCMP_FILES,
+       KCMP_FS,
+       KCMP_SIGHAND,
+       KCMP_IO,
+       KCMP_SYSVSEM,
+
+       KCMP_TYPES,
+};
+
+#endif /* _LINUX_KCMP_H */
index 26a65711676f421dd1e287734ac4f781c713c8b0..a1bdf6966357b6b914d101c7545ef306fa395bdb 100644 (file)
@@ -32,6 +32,8 @@
 #define KPF_KSM                        21
 #define KPF_THP                        22
 
+#ifdef __KERNEL__
+
 /* kernel hacking assistances
  * WARNING: subject to change, never rely on them!
  */
@@ -44,4 +46,6 @@
 #define KPF_ARCH               38
 #define KPF_UNCACHED           39
 
+#endif /* __KERNEL__ */
+
 #endif /* LINUX_KERNEL_PAGE_FLAGS_H */
index ec55a3c8ba77db1ceee13a8e2cff5b014895b2e2..e07f5e0c5df4400eb34ac0d89150003e512381b0 100644 (file)
@@ -35,6 +35,7 @@
 #define LLONG_MAX      ((long long)(~0ULL>>1))
 #define LLONG_MIN      (-LLONG_MAX - 1)
 #define ULLONG_MAX     (~0ULL)
+#define SIZE_MAX       (~(size_t)0)
 
 #define STACK_MAGIC    0xdeadbeef
 
index 0d7d6a1b172f29fde03d030168b253197b84f479..37c5f7261142c24fa582121d2bdfd9479285f681 100644 (file)
@@ -1,8 +1,58 @@
 #ifndef LINUX_KEXEC_H
 #define LINUX_KEXEC_H
 
-#ifdef CONFIG_KEXEC
+/* kexec system call -  It loads the new kernel to boot into.
+ * kexec does not sync, or unmount filesystems so if you need
+ * that to happen you need to do that yourself.
+ */
+
 #include <linux/types.h>
+
+/* kexec flags for different usage scenarios */
+#define KEXEC_ON_CRASH         0x00000001
+#define KEXEC_PRESERVE_CONTEXT 0x00000002
+#define KEXEC_ARCH_MASK                0xffff0000
+
+/* These values match the ELF architecture values.
+ * Unless there is a good reason that should continue to be the case.
+ */
+#define KEXEC_ARCH_DEFAULT ( 0 << 16)
+#define KEXEC_ARCH_386     ( 3 << 16)
+#define KEXEC_ARCH_X86_64  (62 << 16)
+#define KEXEC_ARCH_PPC     (20 << 16)
+#define KEXEC_ARCH_PPC64   (21 << 16)
+#define KEXEC_ARCH_IA_64   (50 << 16)
+#define KEXEC_ARCH_ARM     (40 << 16)
+#define KEXEC_ARCH_S390    (22 << 16)
+#define KEXEC_ARCH_SH      (42 << 16)
+#define KEXEC_ARCH_MIPS_LE (10 << 16)
+#define KEXEC_ARCH_MIPS    ( 8 << 16)
+
+/* The artificial cap on the number of segments passed to kexec_load. */
+#define KEXEC_SEGMENT_MAX 16
+
+#ifndef __KERNEL__
+/*
+ * This structure is used to hold the arguments that are used when
+ * loading  kernel binaries.
+ */
+struct kexec_segment {
+       const void *buf;
+       size_t bufsz;
+       const void *mem;
+       size_t memsz;
+};
+
+/* Load a new kernel image as described by the kexec_segment array
+ * consisting of passed number of segments at the entry-point address.
+ * The flags allow different useage types.
+ */
+extern int kexec_load(void *, size_t, struct kexec_segment *,
+               unsigned long int);
+#endif /* __KERNEL__ */
+
+#ifdef __KERNEL__
+#ifdef CONFIG_KEXEC
 #include <linux/list.h>
 #include <linux/linkage.h>
 #include <linux/compat.h>
@@ -67,11 +117,10 @@ typedef unsigned long kimage_entry_t;
 #define IND_DONE         0x4
 #define IND_SOURCE       0x8
 
-#define KEXEC_SEGMENT_MAX 16
 struct kexec_segment {
        void __user *buf;
        size_t bufsz;
-       unsigned long mem;      /* User space sees this as a (void *) ... */
+       unsigned long mem;
        size_t memsz;
 };
 
@@ -175,25 +224,6 @@ extern struct kimage *kexec_crash_image;
 #define kexec_flush_icache_page(page)
 #endif
 
-#define KEXEC_ON_CRASH         0x00000001
-#define KEXEC_PRESERVE_CONTEXT 0x00000002
-#define KEXEC_ARCH_MASK                0xffff0000
-
-/* These values match the ELF architecture values.
- * Unless there is a good reason that should continue to be the case.
- */
-#define KEXEC_ARCH_DEFAULT ( 0 << 16)
-#define KEXEC_ARCH_386     ( 3 << 16)
-#define KEXEC_ARCH_X86_64  (62 << 16)
-#define KEXEC_ARCH_PPC     (20 << 16)
-#define KEXEC_ARCH_PPC64   (21 << 16)
-#define KEXEC_ARCH_IA_64   (50 << 16)
-#define KEXEC_ARCH_ARM     (40 << 16)
-#define KEXEC_ARCH_S390    (22 << 16)
-#define KEXEC_ARCH_SH      (42 << 16)
-#define KEXEC_ARCH_MIPS_LE (10 << 16)
-#define KEXEC_ARCH_MIPS    ( 8 << 16)
-
 /* List of defined/legal kexec flags */
 #ifndef CONFIG_KEXEC_JUMP
 #define KEXEC_FLAGS    KEXEC_ON_CRASH
@@ -228,4 +258,5 @@ struct task_struct;
 static inline void crash_kexec(struct pt_regs *regs) { }
 static inline int kexec_should_crash(struct task_struct *p) { return 0; }
 #endif /* CONFIG_KEXEC */
+#endif /* __KERNEL__ */
 #endif /* LINUX_KEXEC_H */
index 5231800770e1ea3b8cc0a9b081668b4edbdf55b8..4cd22ed627efd79205860b0efa6dde15a07897ed 100644 (file)
@@ -308,9 +308,6 @@ static inline bool key_is_instantiated(const struct key *key)
 #ifdef CONFIG_SYSCTL
 extern ctl_table key_sysctls[];
 #endif
-
-extern void key_replace_session_keyring(void);
-
 /*
  * the userspace interface
  */
@@ -334,7 +331,6 @@ extern void key_init(void);
 #define key_fsuid_changed(t)           do { } while(0)
 #define key_fsgid_changed(t)           do { } while(0)
 #define key_init()                     do { } while(0)
-#define key_replace_session_keyring()  do { } while(0)
 
 #endif /* CONFIG_KEYS */
 #endif /* __KERNEL__ */
index dd99c329e1616ec76af1c9e8b2d11cee08434aea..5398d5807075cd2649f99363e32c49e09c5d6a75 100644 (file)
@@ -66,40 +66,10 @@ struct subprocess_info {
        void *data;
 };
 
-/* Allocate a subprocess_info structure */
-struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
-                                                 char **envp, gfp_t gfp_mask);
-
-/* Set various pieces of state into the subprocess_info structure */
-void call_usermodehelper_setfns(struct subprocess_info *info,
-                   int (*init)(struct subprocess_info *info, struct cred *new),
-                   void (*cleanup)(struct subprocess_info *info),
-                   void *data);
-
-/* Actually execute the sub-process */
-int call_usermodehelper_exec(struct subprocess_info *info, int wait);
-
-/* Free the subprocess_info. This is only needed if you're not going
-   to call call_usermodehelper_exec */
-void call_usermodehelper_freeinfo(struct subprocess_info *info);
-
-static inline int
+extern int
 call_usermodehelper_fns(char *path, char **argv, char **envp, int wait,
                        int (*init)(struct subprocess_info *info, struct cred *new),
-                       void (*cleanup)(struct subprocess_info *), void *data)
-{
-       struct subprocess_info *info;
-       gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
-
-       info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
-
-       if (info == NULL)
-               return -ENOMEM;
-
-       call_usermodehelper_setfns(info, init, cleanup, data);
-
-       return call_usermodehelper_exec(info, wait);
-}
+                       void (*cleanup)(struct subprocess_info *), void *data);
 
 static inline int
 call_usermodehelper(char *path, char **argv, char **envp, int wait)
index 6c322a90b92f8439886a954b37d58f62f29abb43..09f2b3aa2da7e8b674943ac36b9bdd618f0b5611 100644 (file)
@@ -449,6 +449,30 @@ struct kvm_ppc_pvinfo {
        __u8  pad[108];
 };
 
+/* for KVM_PPC_GET_SMMU_INFO */
+#define KVM_PPC_PAGE_SIZES_MAX_SZ      8
+
+struct kvm_ppc_one_page_size {
+       __u32 page_shift;       /* Page shift (or 0) */
+       __u32 pte_enc;          /* Encoding in the HPTE (>>12) */
+};
+
+struct kvm_ppc_one_seg_page_size {
+       __u32 page_shift;       /* Base page shift of segment (or 0) */
+       __u32 slb_enc;          /* SLB encoding for BookS */
+       struct kvm_ppc_one_page_size enc[KVM_PPC_PAGE_SIZES_MAX_SZ];
+};
+
+#define KVM_PPC_PAGE_SIZES_REAL                0x00000001
+#define KVM_PPC_1T_SEGMENTS            0x00000002
+
+struct kvm_ppc_smmu_info {
+       __u64 flags;
+       __u32 slb_size;
+       __u32 pad;
+       struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ];
+};
+
 #define KVMIO 0xAE
 
 /* machine type bits, to be used as argument to KVM_CREATE_VM */
@@ -589,6 +613,10 @@ struct kvm_ppc_pvinfo {
 #define KVM_CAP_S390_UCONTROL 73
 #define KVM_CAP_SYNC_REGS 74
 #define KVM_CAP_PCI_2_3 75
+#define KVM_CAP_KVMCLOCK_CTRL 76
+#define KVM_CAP_SIGNAL_MSI 77
+#define KVM_CAP_PPC_GET_SMMU_INFO 78
+#define KVM_CAP_S390_COW 79
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -714,6 +742,14 @@ struct kvm_one_reg {
        __u64 addr;
 };
 
+struct kvm_msi {
+       __u32 address_lo;
+       __u32 address_hi;
+       __u32 data;
+       __u32 flags;
+       __u8  pad[16];
+};
+
 /*
  * ioctls for VM fds
  */
@@ -788,6 +824,10 @@ struct kvm_s390_ucas_mapping {
 /* Available with KVM_CAP_PCI_2_3 */
 #define KVM_ASSIGN_SET_INTX_MASK  _IOW(KVMIO,  0xa4, \
                                       struct kvm_assigned_pci_dev)
+/* Available with KVM_CAP_SIGNAL_MSI */
+#define KVM_SIGNAL_MSI            _IOW(KVMIO,  0xa5, struct kvm_msi)
+/* Available with KVM_CAP_PPC_GET_SMMU_INFO */
+#define KVM_PPC_GET_SMMU_INFO    _IOR(KVMIO,  0xa6, struct kvm_ppc_smmu_info)
 
 /*
  * ioctls for vcpu fds
@@ -859,6 +899,8 @@ struct kvm_s390_ucas_mapping {
 /* Available with KVM_CAP_ONE_REG */
 #define KVM_GET_ONE_REG                  _IOW(KVMIO,  0xab, struct kvm_one_reg)
 #define KVM_SET_ONE_REG                  _IOW(KVMIO,  0xac, struct kvm_one_reg)
+/* VM is being stopped by host */
+#define KVM_KVMCLOCK_CTRL        _IO(KVMIO,   0xad)
 
 #define KVM_DEV_ASSIGN_ENABLE_IOMMU    (1 << 0)
 #define KVM_DEV_ASSIGN_PCI_2_3         (1 << 1)
index 72cbf08d45fbc878d5198eda807b12dbdffa6268..c4464356b35b0af21eaafe6cbd1d2d7b4f549814 100644 (file)
 #define KVM_MMIO_SIZE 8
 #endif
 
+/*
+ * If we support unaligned MMIO, at most one fragment will be split into two:
+ */
+#ifdef KVM_UNALIGNED_MMIO
+#  define KVM_EXTRA_MMIO_FRAGMENTS 1
+#else
+#  define KVM_EXTRA_MMIO_FRAGMENTS 0
+#endif
+
+#define KVM_USER_MMIO_SIZE 8
+
+#define KVM_MAX_MMIO_FRAGMENTS \
+       (KVM_MMIO_SIZE / KVM_USER_MMIO_SIZE + KVM_EXTRA_MMIO_FRAGMENTS)
+
 /*
  * vcpu->requests bit members
  */
@@ -68,10 +82,11 @@ struct kvm_io_range {
        struct kvm_io_device *dev;
 };
 
+#define NR_IOBUS_DEVS 1000
+
 struct kvm_io_bus {
        int                   dev_count;
-#define NR_IOBUS_DEVS 300
-       struct kvm_io_range range[NR_IOBUS_DEVS];
+       struct kvm_io_range range[];
 };
 
 enum kvm_bus {
@@ -113,7 +128,18 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
 enum {
        OUTSIDE_GUEST_MODE,
        IN_GUEST_MODE,
-       EXITING_GUEST_MODE
+       EXITING_GUEST_MODE,
+       READING_SHADOW_PAGE_TABLES,
+};
+
+/*
+ * Sometimes a large or cross-page mmio needs to be broken up into separate
+ * exits for userspace servicing.
+ */
+struct kvm_mmio_fragment {
+       gpa_t gpa;
+       void *data;
+       unsigned len;
 };
 
 struct kvm_vcpu {
@@ -143,10 +169,9 @@ struct kvm_vcpu {
        int mmio_needed;
        int mmio_read_completed;
        int mmio_is_write;
-       int mmio_size;
-       int mmio_index;
-       unsigned char mmio_data[KVM_MMIO_SIZE];
-       gpa_t mmio_phys_addr;
+       int mmio_cur_fragment;
+       int mmio_nr_fragments;
+       struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
 #endif
 
 #ifdef CONFIG_KVM_ASYNC_PF
@@ -178,8 +203,6 @@ struct kvm_memory_slot {
        unsigned long flags;
        unsigned long *rmap;
        unsigned long *dirty_bitmap;
-       unsigned long *dirty_bitmap_head;
-       unsigned long nr_dirty_pages;
        struct kvm_arch_memory_slot arch;
        unsigned long userspace_addr;
        int user_alloc;
@@ -438,6 +461,8 @@ void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
                             gfn_t gfn);
 
 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
+void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
+bool kvm_vcpu_yield_to(struct kvm_vcpu *target);
 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
 void kvm_resched(struct kvm_vcpu *vcpu);
 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
@@ -506,6 +531,7 @@ int kvm_arch_hardware_setup(void);
 void kvm_arch_hardware_unsetup(void);
 void kvm_arch_check_processor_compat(void *rtn);
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
 
 void kvm_free_physmem(struct kvm *kvm);
 
@@ -521,6 +547,15 @@ static inline void kvm_arch_free_vm(struct kvm *kvm)
 }
 #endif
 
+static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
+{
+#ifdef __KVM_HAVE_ARCH_WQP
+       return vcpu->arch.wqp;
+#else
+       return &vcpu->wq;
+#endif
+}
+
 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
 void kvm_arch_destroy_vm(struct kvm *kvm);
 void kvm_free_all_assigned_devices(struct kvm *kvm);
@@ -769,6 +804,8 @@ int kvm_set_irq_routing(struct kvm *kvm,
                        unsigned flags);
 void kvm_free_irq_routing(struct kvm *kvm);
 
+int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
+
 #else
 
 static inline void kvm_free_irq_routing(struct kvm *kvm) {}
index 8877123f2d6e61dd8b22da7c9b8994f13d05e7f9..e00c3b0ebc6bd7303afdb0ffa448a31c0d000911 100644 (file)
@@ -40,6 +40,16 @@ struct lcd_ops {
        /* Get the LCD panel power status (0: full on, 1..3: controller
           power on, flat panel power off, 4: full off), see FB_BLANK_XXX */
        int (*get_power)(struct lcd_device *);
+       /*
+        * Enable or disable power to the LCD(0: on; 4: off, see FB_BLANK_XXX)
+        * and this callback would be called proir to fb driver's callback.
+        *
+        * P.S. note that if early_set_power is not NULL then early fb notifier
+        *      would be registered.
+        */
+       int (*early_set_power)(struct lcd_device *, int power);
+       /* revert the effects of the early blank event. */
+       int (*r_early_set_power)(struct lcd_device *, int power);
        /* Enable or disable power to the LCD (0: on; 4: off, see FB_BLANK_XXX) */
        int (*set_power)(struct lcd_device *, int power);
        /* Get the current contrast setting (0-max_contrast) */
index eeae6e742471f0bbf796439ba21668e511bc4df5..4b133479d6ea30ab5eab246931a04d6cfd353abd 100644 (file)
@@ -92,7 +92,7 @@ struct lm3530_pwm_data {
  * @als2_resistor_sel: internal resistance from ALS2 input to ground
  * @als_vmin: als input voltage calibrated for max brightness in mV
  * @als_vmax: als input voltage calibrated for min brightness in mV
- * @brt_val: brightness value (0-255)
+ * @brt_val: brightness value (0-127)
  * @pwm_data: PWM control functions (only valid when the mode is PWM)
  */
 struct lm3530_platform_data {
index 5884def15a24872a2b4be5ef9abe815fad233388..39eee41d8c6f4deee39d6904087a567657a2349e 100644 (file)
@@ -73,6 +73,8 @@ struct led_classdev {
        struct led_trigger      *trigger;
        struct list_head         trig_list;
        void                    *trigger_data;
+       /* true if activated - deactivate routine uses it to do cleanup */
+       bool                    activated;
 #endif
 };
 
index 87f402ccec55567330943ab774ffb12ae21c7da8..f01e5f6d1f07a4966927bb7acd5707f8f77904c8 100644 (file)
 #include <linux/lockdep.h>
 #include <linux/percpu.h>
 #include <linux/cpu.h>
+#include <linux/notifier.h>
 
 /* can make br locks by using local lock for read side, global lock for write */
-#define br_lock_init(name)     name##_lock_init()
-#define br_read_lock(name)     name##_local_lock()
-#define br_read_unlock(name)   name##_local_unlock()
-#define br_write_lock(name)    name##_global_lock_online()
-#define br_write_unlock(name)  name##_global_unlock_online()
+#define br_lock_init(name)     lg_lock_init(name, #name)
+#define br_read_lock(name)     lg_local_lock(name)
+#define br_read_unlock(name)   lg_local_unlock(name)
+#define br_write_lock(name)    lg_global_lock(name)
+#define br_write_unlock(name)  lg_global_unlock(name)
 
-#define DECLARE_BRLOCK(name)   DECLARE_LGLOCK(name)
 #define DEFINE_BRLOCK(name)    DEFINE_LGLOCK(name)
 
-
-#define lg_lock_init(name)     name##_lock_init()
-#define lg_local_lock(name)    name##_local_lock()
-#define lg_local_unlock(name)  name##_local_unlock()
-#define lg_local_lock_cpu(name, cpu)   name##_local_lock_cpu(cpu)
-#define lg_local_unlock_cpu(name, cpu) name##_local_unlock_cpu(cpu)
-#define lg_global_lock(name)   name##_global_lock()
-#define lg_global_unlock(name) name##_global_unlock()
-#define lg_global_lock_online(name) name##_global_lock_online()
-#define lg_global_unlock_online(name) name##_global_unlock_online()
-
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 #define LOCKDEP_INIT_MAP lockdep_init_map
 
 #define DEFINE_LGLOCK_LOCKDEP(name)
 #endif
 
-
-#define DECLARE_LGLOCK(name)                                           \
- extern void name##_lock_init(void);                                   \
- extern void name##_local_lock(void);                                  \
- extern void name##_local_unlock(void);                                        \
- extern void name##_local_lock_cpu(int cpu);                           \
- extern void name##_local_unlock_cpu(int cpu);                         \
- extern void name##_global_lock(void);                                 \
- extern void name##_global_unlock(void);                               \
- extern void name##_global_lock_online(void);                          \
- extern void name##_global_unlock_online(void);                                \
+struct lglock {
+       arch_spinlock_t __percpu *lock;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       struct lock_class_key lock_key;
+       struct lockdep_map    lock_dep_map;
+#endif
+};
 
 #define DEFINE_LGLOCK(name)                                            \
-                                                                       \
- DEFINE_SPINLOCK(name##_cpu_lock);                                     \
- cpumask_t name##_cpus __read_mostly;                                  \
- DEFINE_PER_CPU(arch_spinlock_t, name##_lock);                         \
- DEFINE_LGLOCK_LOCKDEP(name);                                          \
-                                                                       \
- static int                                                            \
- name##_lg_cpu_callback(struct notifier_block *nb,                     \
-                               unsigned long action, void *hcpu)       \
- {                                                                     \
-       switch (action & ~CPU_TASKS_FROZEN) {                           \
-       case CPU_UP_PREPARE:                                            \
-               spin_lock(&name##_cpu_lock);                            \
-               cpu_set((unsigned long)hcpu, name##_cpus);              \
-               spin_unlock(&name##_cpu_lock);                          \
-               break;                                                  \
-       case CPU_UP_CANCELED: case CPU_DEAD:                            \
-               spin_lock(&name##_cpu_lock);                            \
-               cpu_clear((unsigned long)hcpu, name##_cpus);            \
-               spin_unlock(&name##_cpu_lock);                          \
-       }                                                               \
-       return NOTIFY_OK;                                               \
- }                                                                     \
- static struct notifier_block name##_lg_cpu_notifier = {               \
-       .notifier_call = name##_lg_cpu_callback,                        \
- };                                                                    \
- void name##_lock_init(void) {                                         \
-       int i;                                                          \
-       LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
-       for_each_possible_cpu(i) {                                      \
-               arch_spinlock_t *lock;                                  \
-               lock = &per_cpu(name##_lock, i);                        \
-               *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;     \
-       }                                                               \
-       register_hotcpu_notifier(&name##_lg_cpu_notifier);              \
-       get_online_cpus();                                              \
-       for_each_online_cpu(i)                                          \
-               cpu_set(i, name##_cpus);                                \
-       put_online_cpus();                                              \
- }                                                                     \
- EXPORT_SYMBOL(name##_lock_init);                                      \
-                                                                       \
- void name##_local_lock(void) {                                                \
-       arch_spinlock_t *lock;                                          \
-       preempt_disable();                                              \
-       rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_);     \
-       lock = &__get_cpu_var(name##_lock);                             \
-       arch_spin_lock(lock);                                           \
- }                                                                     \
- EXPORT_SYMBOL(name##_local_lock);                                     \
-                                                                       \
- void name##_local_unlock(void) {                                      \
-       arch_spinlock_t *lock;                                          \
-       rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_);             \
-       lock = &__get_cpu_var(name##_lock);                             \
-       arch_spin_unlock(lock);                                         \
-       preempt_enable();                                               \
- }                                                                     \
- EXPORT_SYMBOL(name##_local_unlock);                                   \
-                                                                       \
- void name##_local_lock_cpu(int cpu) {                                 \
-       arch_spinlock_t *lock;                                          \
-       preempt_disable();                                              \
-       rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_);     \
-       lock = &per_cpu(name##_lock, cpu);                              \
-       arch_spin_lock(lock);                                           \
- }                                                                     \
- EXPORT_SYMBOL(name##_local_lock_cpu);                                 \
-                                                                       \
- void name##_local_unlock_cpu(int cpu) {                               \
-       arch_spinlock_t *lock;                                          \
-       rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_);             \
-       lock = &per_cpu(name##_lock, cpu);                              \
-       arch_spin_unlock(lock);                                         \
-       preempt_enable();                                               \
- }                                                                     \
- EXPORT_SYMBOL(name##_local_unlock_cpu);                               \
-                                                                       \
- void name##_global_lock_online(void) {                                        \
-       int i;                                                          \
-       spin_lock(&name##_cpu_lock);                                    \
-       rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);           \
-       for_each_cpu(i, &name##_cpus) {                                 \
-               arch_spinlock_t *lock;                                  \
-               lock = &per_cpu(name##_lock, i);                        \
-               arch_spin_lock(lock);                                   \
-       }                                                               \
- }                                                                     \
- EXPORT_SYMBOL(name##_global_lock_online);                             \
-                                                                       \
- void name##_global_unlock_online(void) {                              \
-       int i;                                                          \
-       rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);              \
-       for_each_cpu(i, &name##_cpus) {                                 \
-               arch_spinlock_t *lock;                                  \
-               lock = &per_cpu(name##_lock, i);                        \
-               arch_spin_unlock(lock);                                 \
-       }                                                               \
-       spin_unlock(&name##_cpu_lock);                                  \
- }                                                                     \
- EXPORT_SYMBOL(name##_global_unlock_online);                           \
-                                                                       \
- void name##_global_lock(void) {                                       \
-       int i;                                                          \
-       preempt_disable();                                              \
-       rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);           \
-       for_each_possible_cpu(i) {                                      \
-               arch_spinlock_t *lock;                                  \
-               lock = &per_cpu(name##_lock, i);                        \
-               arch_spin_lock(lock);                                   \
-       }                                                               \
- }                                                                     \
- EXPORT_SYMBOL(name##_global_lock);                                    \
-                                                                       \
- void name##_global_unlock(void) {                                     \
-       int i;                                                          \
-       rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);              \
-       for_each_possible_cpu(i) {                                      \
-               arch_spinlock_t *lock;                                  \
-               lock = &per_cpu(name##_lock, i);                        \
-               arch_spin_unlock(lock);                                 \
-       }                                                               \
-       preempt_enable();                                               \
- }                                                                     \
- EXPORT_SYMBOL(name##_global_unlock);
+       DEFINE_LGLOCK_LOCKDEP(name);                                    \
+       DEFINE_PER_CPU(arch_spinlock_t, name ## _lock)                  \
+       = __ARCH_SPIN_LOCK_UNLOCKED;                                    \
+       struct lglock name = { .lock = &name ## _lock }
+
+void lg_lock_init(struct lglock *lg, char *name);
+void lg_local_lock(struct lglock *lg);
+void lg_local_unlock(struct lglock *lg);
+void lg_local_lock_cpu(struct lglock *lg, int cpu);
+void lg_local_unlock_cpu(struct lglock *lg, int cpu);
+void lg_global_lock(struct lglock *lg);
+void lg_global_unlock(struct lglock *lg);
+
 #endif
index 11a966e5f829e9d9862589e393c1576780cfed48..4d24d64578c4c6f4baca418bad5b64ba5fc30555 100644 (file)
@@ -54,7 +54,7 @@ extern void   nlmclnt_done(struct nlm_host *host);
 
 extern int     nlmclnt_proc(struct nlm_host *host, int cmd,
                                        struct file_lock *fl);
-extern int     lockd_up(void);
-extern void    lockd_down(void);
+extern int     lockd_up(struct net *net);
+extern void    lockd_down(struct net *net);
 
 #endif /* LINUX_LOCKD_BIND_H */
index f94efd2f6c275b0ff14859e2451da53a6e3115a9..83e7ba90d6e5d5eb1b652ec1b7ce21dc47a4a9a9 100644 (file)
@@ -63,12 +63,7 @@ extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
                                        gfp_t gfp_mask);
 
 struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
-struct lruvec *mem_cgroup_lru_add_list(struct zone *, struct page *,
-                                      enum lru_list);
-void mem_cgroup_lru_del_list(struct page *, enum lru_list);
-void mem_cgroup_lru_del(struct page *);
-struct lruvec *mem_cgroup_lru_move_lists(struct zone *, struct page *,
-                                        enum lru_list, enum lru_list);
+struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
 
 /* For coalescing uncharge for reducing memcg' overhead*/
 extern void mem_cgroup_uncharge_start(void);
@@ -79,6 +74,8 @@ extern void mem_cgroup_uncharge_cache_page(struct page *page);
 
 extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
                                     int order);
+bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
+                                 struct mem_cgroup *memcg);
 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg);
 
 extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
@@ -92,10 +89,13 @@ static inline
 int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
 {
        struct mem_cgroup *memcg;
+       int match;
+
        rcu_read_lock();
        memcg = mem_cgroup_from_task(rcu_dereference((mm)->owner));
+       match = __mem_cgroup_same_or_subtree(cgroup, memcg);
        rcu_read_unlock();
-       return cgroup == memcg;
+       return match;
 }
 
 extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
@@ -114,17 +114,11 @@ void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
 /*
  * For memory reclaim.
  */
-int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg,
-                                   struct zone *zone);
-int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg,
-                                   struct zone *zone);
+int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
+int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec);
 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
-unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
-                                       int nid, int zid, unsigned int lrumask);
-struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
-                                                     struct zone *zone);
-struct zone_reclaim_stat*
-mem_cgroup_get_reclaim_stat_from_page(struct page *page);
+unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
+void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
                                        struct task_struct *p);
 extern void mem_cgroup_replace_page_cache(struct page *oldpage,
@@ -251,25 +245,8 @@ static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
        return &zone->lruvec;
 }
 
-static inline struct lruvec *mem_cgroup_lru_add_list(struct zone *zone,
-                                                    struct page *page,
-                                                    enum lru_list lru)
-{
-       return &zone->lruvec;
-}
-
-static inline void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
-{
-}
-
-static inline void mem_cgroup_lru_del(struct page *page)
-{
-}
-
-static inline struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
-                                                      struct page *page,
-                                                      enum lru_list from,
-                                                      enum lru_list to)
+static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
+                                                   struct zone *zone)
 {
        return &zone->lruvec;
 }
@@ -333,35 +310,27 @@ static inline bool mem_cgroup_disabled(void)
 }
 
 static inline int
-mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
+mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
 {
        return 1;
 }
 
 static inline int
-mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone)
+mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
 {
        return 1;
 }
 
 static inline unsigned long
-mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
-                               unsigned int lru_mask)
+mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
 {
        return 0;
 }
 
-
-static inline struct zone_reclaim_stat*
-mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone)
-{
-       return NULL;
-}
-
-static inline struct zone_reclaim_stat*
-mem_cgroup_get_reclaim_stat_from_page(struct page *page)
+static inline void
+mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
+                             int increment)
 {
-       return NULL;
 }
 
 static inline void
index 7c727a90d70da6229afeb9c9cf7c8601841863e2..4aa42732e47f34ca29392180fbee0fc73a086cd2 100644 (file)
@@ -225,8 +225,8 @@ static inline void check_highest_zone(enum zone_type k)
                policy_zone = k;
 }
 
-int do_migrate_pages(struct mm_struct *mm,
-       const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags);
+int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
+                    const nodemask_t *to, int flags);
 
 
 #ifdef CONFIG_TMPFS
@@ -354,9 +354,8 @@ static inline bool mempolicy_nodemask_intersects(struct task_struct *tsk,
        return false;
 }
 
-static inline int do_migrate_pages(struct mm_struct *mm,
-                       const nodemask_t *from_nodes,
-                       const nodemask_t *to_nodes, int flags)
+static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
+                                  const nodemask_t *to, int flags)
 {
        return 0;
 }
index fccc3002f271de0e3d7e46cf7ce2525794176b2b..91dd3ef63e9925dc8c99ebe3141260c58882e7f9 100644 (file)
@@ -7,6 +7,7 @@
 #ifndef MFD_AB8500_H
 #define MFD_AB8500_H
 
+#include <linux/atomic.h>
 #include <linux/mutex.h>
 
 struct device;
@@ -194,6 +195,14 @@ enum ab8500_version {
 #define AB9540_INT_GPIO52F             123
 #define AB9540_INT_GPIO53F             124
 #define AB9540_INT_GPIO54F             125 /* not 8505 */
+/* ab8500_irq_regoffset[16] -> IT[Source|Latch|Mask]25 */
+#define AB8505_INT_KEYSTUCK            128
+#define AB8505_INT_IKR                 129
+#define AB8505_INT_IKP                 130
+#define AB8505_INT_KP                  131
+#define AB8505_INT_KEYDEGLITCH         132
+#define AB8505_INT_MODPWRSTATUSF       134
+#define AB8505_INT_MODPWRSTATUSR       135
 
 /*
  * AB8500_AB9540_NR_IRQS is used when configuring the IRQ numbers for the
@@ -203,8 +212,8 @@ enum ab8500_version {
  * which is larger.
  */
 #define AB8500_NR_IRQS                 112
-#define AB8505_NR_IRQS                 128
-#define AB9540_NR_IRQS                 128
+#define AB8505_NR_IRQS                 136
+#define AB9540_NR_IRQS                 136
 /* This is set to the roof of any AB8500 chip variant IRQ counts */
 #define AB8500_MAX_NR_IRQS             AB9540_NR_IRQS
 
@@ -216,6 +225,7 @@ enum ab8500_version {
  * @dev: parent device
  * @lock: read/write operations lock
  * @irq_lock: genirq bus lock
+ * @transfer_ongoing: 0 if no transfer ongoing
  * @irq: irq line
  * @version: chip version id (e.g. ab8500 or ab9540)
  * @chip_id: chip revision id
@@ -234,7 +244,7 @@ struct ab8500 {
        struct device   *dev;
        struct mutex    lock;
        struct mutex    irq_lock;
-
+       atomic_t        transfer_ongoing;
        int             irq_base;
        int             irq;
        enum ab8500_version version;
@@ -280,6 +290,8 @@ extern int __devinit ab8500_init(struct ab8500 *ab8500,
                                 enum ab8500_version version);
 extern int __devexit ab8500_exit(struct ab8500 *ab8500);
 
+extern int ab8500_suspend(struct ab8500 *ab8500);
+
 static inline int is_ab8500(struct ab8500 *ab)
 {
        return ab->version == AB8500_VERSION_AB8500;
index 22c1007d3ec56b970f51fcaf176288decf379548..7f92acf03d9e37ee420b9f211bfeb68f9bef6f65 100644 (file)
@@ -34,7 +34,7 @@ struct anatop {
        spinlock_t reglock;
 };
 
-extern u32 anatop_get_bits(struct anatop *, u32, int, int);
-extern void anatop_set_bits(struct anatop *, u32, int, int, u32);
+extern u32 anatop_read_reg(struct anatop *, u32);
+extern void anatop_write_reg(struct anatop *, u32, u32, u32);
 
 #endif /*  __LINUX_MFD_ANATOP_H */
index ef6faa5cee46d49a3df43f0567146e777dc8c07c..e1148d037e7b63a6c9ee9c0159671ad82abd38cc 100644 (file)
@@ -31,6 +31,8 @@ struct asic3_platform_data {
 
        unsigned int gpio_base;
 
+       unsigned int clock_rate;
+
        struct asic3_led *leds;
 };
 
index 8313cd9658e391487ee07d085ad585071184da63..0507c4c21a7d18859a78cfc089a1ad4b8b47a327 100644 (file)
 
 #include <linux/mfd/da9052/reg.h>
 
+/* Common - HWMON Channel Definations */
+#define DA9052_ADC_VDDOUT      0
+#define DA9052_ADC_ICH         1
+#define DA9052_ADC_TBAT        2
+#define DA9052_ADC_VBAT        3
+#define DA9052_ADC_IN4         4
+#define DA9052_ADC_IN5         5
+#define DA9052_ADC_IN6         6
+#define DA9052_ADC_TSI         7
+#define DA9052_ADC_TJUNC       8
+#define DA9052_ADC_VBBAT       9
+
 #define DA9052_IRQ_DCIN        0
 #define DA9052_IRQ_VBUS        1
 #define DA9052_IRQ_DCINREM     2
@@ -79,6 +91,9 @@ struct da9052 {
        struct device *dev;
        struct regmap *regmap;
 
+       struct mutex auxadc_lock;
+       struct completion done;
+
        int irq_base;
        struct regmap_irq_chip_data *irq_data;
        u8 chip_id;
@@ -86,6 +101,10 @@ struct da9052 {
        int chip_irq;
 };
 
+/* ADC API */
+int da9052_adc_manual_read(struct da9052 *da9052, unsigned char channel);
+int da9052_adc_read_temp(struct da9052 *da9052);
+
 /* Device I/O API */
 static inline int da9052_reg_read(struct da9052 *da9052, unsigned char reg)
 {
diff --git a/include/linux/mfd/lm3533.h b/include/linux/mfd/lm3533.h
new file mode 100644 (file)
index 0000000..594bc59
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+ * lm3533.h -- LM3533 interface
+ *
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * Author: Johan Hovold <jhovold@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_MFD_LM3533_H
+#define __LINUX_MFD_LM3533_H
+
+#define LM3533_ATTR_RO(_name) \
+       DEVICE_ATTR(_name, S_IRUGO, show_##_name, NULL)
+#define LM3533_ATTR_RW(_name) \
+       DEVICE_ATTR(_name, S_IRUGO | S_IWUSR , show_##_name, store_##_name)
+
+struct device;
+struct regmap;
+
+struct lm3533 {
+       struct device *dev;
+
+       struct regmap *regmap;
+
+       int gpio_hwen;
+       int irq;
+
+       unsigned have_als:1;
+       unsigned have_backlights:1;
+       unsigned have_leds:1;
+};
+
+struct lm3533_ctrlbank {
+       struct lm3533 *lm3533;
+       struct device *dev;
+       int id;
+};
+
+struct lm3533_als_platform_data {
+       unsigned pwm_mode:1;            /* PWM input mode (default analog) */
+       u8 r_select;                    /* 1 - 127 (ignored in PWM-mode) */
+};
+
+struct lm3533_bl_platform_data {
+       char *name;
+       u16 max_current;                /* 5000 - 29800 uA (800 uA step) */
+       u8 default_brightness;          /* 0 - 255 */
+       u8 pwm;                         /* 0 - 0x3f */
+};
+
+struct lm3533_led_platform_data {
+       char *name;
+       const char *default_trigger;
+       u16 max_current;                /* 5000 - 29800 uA (800 uA step) */
+       u8 pwm;                         /* 0 - 0x3f */
+};
+
+enum lm3533_boost_freq {
+       LM3533_BOOST_FREQ_500KHZ,
+       LM3533_BOOST_FREQ_1000KHZ,
+};
+
+enum lm3533_boost_ovp {
+       LM3533_BOOST_OVP_16V,
+       LM3533_BOOST_OVP_24V,
+       LM3533_BOOST_OVP_32V,
+       LM3533_BOOST_OVP_40V,
+};
+
+struct lm3533_platform_data {
+       int gpio_hwen;
+
+       enum lm3533_boost_ovp boost_ovp;
+       enum lm3533_boost_freq boost_freq;
+
+       struct lm3533_als_platform_data *als;
+
+       struct lm3533_bl_platform_data *backlights;
+       int num_backlights;
+
+       struct lm3533_led_platform_data *leds;
+       int num_leds;
+};
+
+extern int lm3533_ctrlbank_enable(struct lm3533_ctrlbank *cb);
+extern int lm3533_ctrlbank_disable(struct lm3533_ctrlbank *cb);
+
+extern int lm3533_ctrlbank_set_brightness(struct lm3533_ctrlbank *cb, u8 val);
+extern int lm3533_ctrlbank_get_brightness(struct lm3533_ctrlbank *cb, u8 *val);
+extern int lm3533_ctrlbank_set_max_current(struct lm3533_ctrlbank *cb,
+                                                               u16 imax);
+extern int lm3533_ctrlbank_set_pwm(struct lm3533_ctrlbank *cb, u8 val);
+extern int lm3533_ctrlbank_get_pwm(struct lm3533_ctrlbank *cb, u8 *val);
+
+extern int lm3533_read(struct lm3533 *lm3533, u8 reg, u8 *val);
+extern int lm3533_write(struct lm3533 *lm3533, u8 reg, u8 val);
+extern int lm3533_update(struct lm3533 *lm3533, u8 reg, u8 val, u8 mask);
+
+#endif /* __LINUX_MFD_LM3533_H */
diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h
new file mode 100644 (file)
index 0000000..fec5256
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ *  linux/drivers/mfd/lpc_ich.h
+ *
+ *  Copyright (c) 2012 Extreme Engineering Solution, Inc.
+ *  Author: Aaron Sierra <asierra@xes-inc.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef LPC_ICH_H
+#define LPC_ICH_H
+
+/* Watchdog resources */
+#define ICH_RES_IO_TCO 0
+#define ICH_RES_IO_SMI 1
+#define ICH_RES_MEM_OFF        2
+#define ICH_RES_MEM_GCS        0
+
+/* GPIO resources */
+#define ICH_RES_GPIO   0
+#define ICH_RES_GPE0   1
+
+/* GPIO compatibility */
+#define ICH_I3100_GPIO         0x401
+#define ICH_V5_GPIO            0x501
+#define ICH_V6_GPIO            0x601
+#define ICH_V7_GPIO            0x701
+#define ICH_V9_GPIO            0x801
+#define ICH_V10CORP_GPIO       0xa01
+#define ICH_V10CONS_GPIO       0xa11
+
+struct lpc_ich_info {
+       char name[32];
+       unsigned int iTCO_version;
+       unsigned int gpio_version;
+};
+
+#endif
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
new file mode 100644 (file)
index 0000000..68263c5
--- /dev/null
@@ -0,0 +1,227 @@
+/*
+ * max77693-private.h - Voltage regulator driver for the Maxim 77693
+ *
+ *  Copyright (C) 2012 Samsung Electrnoics
+ *  SangYoung Son <hello.son@samsung.com>
+ *
+ * This program is not provided / owned by Maxim Integrated Products.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef __LINUX_MFD_MAX77693_PRIV_H
+#define __LINUX_MFD_MAX77693_PRIV_H
+
+#include <linux/i2c.h>
+
+#define MAX77693_NUM_IRQ_MUIC_REGS     3
+#define MAX77693_REG_INVALID           (0xff)
+
+/* Slave addr = 0xCC: PMIC, Charger, Flash LED */
+enum max77693_pmic_reg {
+       MAX77693_LED_REG_IFLASH1                        = 0x00,
+       MAX77693_LED_REG_IFLASH2                        = 0x01,
+       MAX77693_LED_REG_ITORCH                         = 0x02,
+       MAX77693_LED_REG_ITORCHTIMER                    = 0x03,
+       MAX77693_LED_REG_FLASH_TIMER                    = 0x04,
+       MAX77693_LED_REG_FLASH_EN                       = 0x05,
+       MAX77693_LED_REG_MAX_FLASH1                     = 0x06,
+       MAX77693_LED_REG_MAX_FLASH2                     = 0x07,
+       MAX77693_LED_REG_MAX_FLASH3                     = 0x08,
+       MAX77693_LED_REG_MAX_FLASH4                     = 0x09,
+       MAX77693_LED_REG_VOUT_CNTL                      = 0x0A,
+       MAX77693_LED_REG_VOUT_FLASH1                    = 0x0B,
+       MAX77693_LED_REG_VOUT_FLASH2                    = 0x0C,
+       MAX77693_LED_REG_FLASH_INT                      = 0x0E,
+       MAX77693_LED_REG_FLASH_INT_MASK                 = 0x0F,
+       MAX77693_LED_REG_FLASH_INT_STATUS               = 0x10,
+
+       MAX77693_PMIC_REG_PMIC_ID1                      = 0x20,
+       MAX77693_PMIC_REG_PMIC_ID2                      = 0x21,
+       MAX77693_PMIC_REG_INTSRC                        = 0x22,
+       MAX77693_PMIC_REG_INTSRC_MASK                   = 0x23,
+       MAX77693_PMIC_REG_TOPSYS_INT                    = 0x24,
+       MAX77693_PMIC_REG_TOPSYS_INT_MASK               = 0x26,
+       MAX77693_PMIC_REG_TOPSYS_STAT                   = 0x28,
+       MAX77693_PMIC_REG_MAINCTRL1                     = 0x2A,
+       MAX77693_PMIC_REG_LSCNFG                        = 0x2B,
+
+       MAX77693_CHG_REG_CHG_INT                        = 0xB0,
+       MAX77693_CHG_REG_CHG_INT_MASK                   = 0xB1,
+       MAX77693_CHG_REG_CHG_INT_OK                     = 0xB2,
+       MAX77693_CHG_REG_CHG_DETAILS_00                 = 0xB3,
+       MAX77693_CHG_REG_CHG_DETAILS_01                 = 0xB4,
+       MAX77693_CHG_REG_CHG_DETAILS_02                 = 0xB5,
+       MAX77693_CHG_REG_CHG_DETAILS_03                 = 0xB6,
+       MAX77693_CHG_REG_CHG_CNFG_00                    = 0xB7,
+       MAX77693_CHG_REG_CHG_CNFG_01                    = 0xB8,
+       MAX77693_CHG_REG_CHG_CNFG_02                    = 0xB9,
+       MAX77693_CHG_REG_CHG_CNFG_03                    = 0xBA,
+       MAX77693_CHG_REG_CHG_CNFG_04                    = 0xBB,
+       MAX77693_CHG_REG_CHG_CNFG_05                    = 0xBC,
+       MAX77693_CHG_REG_CHG_CNFG_06                    = 0xBD,
+       MAX77693_CHG_REG_CHG_CNFG_07                    = 0xBE,
+       MAX77693_CHG_REG_CHG_CNFG_08                    = 0xBF,
+       MAX77693_CHG_REG_CHG_CNFG_09                    = 0xC0,
+       MAX77693_CHG_REG_CHG_CNFG_10                    = 0xC1,
+       MAX77693_CHG_REG_CHG_CNFG_11                    = 0xC2,
+       MAX77693_CHG_REG_CHG_CNFG_12                    = 0xC3,
+       MAX77693_CHG_REG_CHG_CNFG_13                    = 0xC4,
+       MAX77693_CHG_REG_CHG_CNFG_14                    = 0xC5,
+       MAX77693_CHG_REG_SAFEOUT_CTRL                   = 0xC6,
+
+       MAX77693_PMIC_REG_END,
+};
+
+/* Slave addr = 0x4A: MUIC */
+enum max77693_muic_reg {
+       MAX77693_MUIC_REG_ID            = 0x00,
+       MAX77693_MUIC_REG_INT1          = 0x01,
+       MAX77693_MUIC_REG_INT2          = 0x02,
+       MAX77693_MUIC_REG_INT3          = 0x03,
+       MAX77693_MUIC_REG_STATUS1       = 0x04,
+       MAX77693_MUIC_REG_STATUS2       = 0x05,
+       MAX77693_MUIC_REG_STATUS3       = 0x06,
+       MAX77693_MUIC_REG_INTMASK1      = 0x07,
+       MAX77693_MUIC_REG_INTMASK2      = 0x08,
+       MAX77693_MUIC_REG_INTMASK3      = 0x09,
+       MAX77693_MUIC_REG_CDETCTRL1     = 0x0A,
+       MAX77693_MUIC_REG_CDETCTRL2     = 0x0B,
+       MAX77693_MUIC_REG_CTRL1         = 0x0C,
+       MAX77693_MUIC_REG_CTRL2         = 0x0D,
+       MAX77693_MUIC_REG_CTRL3         = 0x0E,
+
+       MAX77693_MUIC_REG_END,
+};
+
+/* Slave addr = 0x90: Haptic */
+enum max77693_haptic_reg {
+       MAX77693_HAPTIC_REG_STATUS              = 0x00,
+       MAX77693_HAPTIC_REG_CONFIG1             = 0x01,
+       MAX77693_HAPTIC_REG_CONFIG2             = 0x02,
+       MAX77693_HAPTIC_REG_CONFIG_CHNL         = 0x03,
+       MAX77693_HAPTIC_REG_CONFG_CYC1          = 0x04,
+       MAX77693_HAPTIC_REG_CONFG_CYC2          = 0x05,
+       MAX77693_HAPTIC_REG_CONFIG_PER1         = 0x06,
+       MAX77693_HAPTIC_REG_CONFIG_PER2         = 0x07,
+       MAX77693_HAPTIC_REG_CONFIG_PER3         = 0x08,
+       MAX77693_HAPTIC_REG_CONFIG_PER4         = 0x09,
+       MAX77693_HAPTIC_REG_CONFIG_DUTY1        = 0x0A,
+       MAX77693_HAPTIC_REG_CONFIG_DUTY2        = 0x0B,
+       MAX77693_HAPTIC_REG_CONFIG_PWM1         = 0x0C,
+       MAX77693_HAPTIC_REG_CONFIG_PWM2         = 0x0D,
+       MAX77693_HAPTIC_REG_CONFIG_PWM3         = 0x0E,
+       MAX77693_HAPTIC_REG_CONFIG_PWM4         = 0x0F,
+       MAX77693_HAPTIC_REG_REV                 = 0x10,
+
+       MAX77693_HAPTIC_REG_END,
+};
+
+enum max77693_irq_source {
+       LED_INT = 0,
+       TOPSYS_INT,
+       CHG_INT,
+       MUIC_INT1,
+       MUIC_INT2,
+       MUIC_INT3,
+
+       MAX77693_IRQ_GROUP_NR,
+};
+
+enum max77693_irq {
+       /* PMIC - FLASH */
+       MAX77693_LED_IRQ_FLED2_OPEN,
+       MAX77693_LED_IRQ_FLED2_SHORT,
+       MAX77693_LED_IRQ_FLED1_OPEN,
+       MAX77693_LED_IRQ_FLED1_SHORT,
+       MAX77693_LED_IRQ_MAX_FLASH,
+
+       /* PMIC - TOPSYS */
+       MAX77693_TOPSYS_IRQ_T120C_INT,
+       MAX77693_TOPSYS_IRQ_T140C_INT,
+       MAX77693_TOPSYS_IRQ_LOWSYS_INT,
+
+       /* PMIC - Charger */
+       MAX77693_CHG_IRQ_BYP_I,
+       MAX77693_CHG_IRQ_THM_I,
+       MAX77693_CHG_IRQ_BAT_I,
+       MAX77693_CHG_IRQ_CHG_I,
+       MAX77693_CHG_IRQ_CHGIN_I,
+
+       /* MUIC INT1 */
+       MAX77693_MUIC_IRQ_INT1_ADC,
+       MAX77693_MUIC_IRQ_INT1_ADC_LOW,
+       MAX77693_MUIC_IRQ_INT1_ADC_ERR,
+       MAX77693_MUIC_IRQ_INT1_ADC1K,
+
+       /* MUIC INT2 */
+       MAX77693_MUIC_IRQ_INT2_CHGTYP,
+       MAX77693_MUIC_IRQ_INT2_CHGDETREUN,
+       MAX77693_MUIC_IRQ_INT2_DCDTMR,
+       MAX77693_MUIC_IRQ_INT2_DXOVP,
+       MAX77693_MUIC_IRQ_INT2_VBVOLT,
+       MAX77693_MUIC_IRQ_INT2_VIDRM,
+
+       /* MUIC INT3 */
+       MAX77693_MUIC_IRQ_INT3_EOC,
+       MAX77693_MUIC_IRQ_INT3_CGMBC,
+       MAX77693_MUIC_IRQ_INT3_OVP,
+       MAX77693_MUIC_IRQ_INT3_MBCCHG_ERR,
+       MAX77693_MUIC_IRQ_INT3_CHG_ENABLED,
+       MAX77693_MUIC_IRQ_INT3_BAT_DET,
+
+       MAX77693_IRQ_NR,
+};
+
+struct max77693_dev {
+       struct device *dev;
+       struct i2c_client *i2c;         /* 0xCC , PMIC, Charger, Flash LED */
+       struct i2c_client *muic;        /* 0x4A , MUIC */
+       struct i2c_client *haptic;      /* 0x90 , Haptic */
+       struct mutex iolock;
+
+       int type;
+
+       struct regmap *regmap;
+       struct regmap *regmap_muic;
+       struct regmap *regmap_haptic;
+
+       struct irq_domain *irq_domain;
+
+       int irq;
+       int irq_gpio;
+       bool wakeup;
+       struct mutex irqlock;
+       int irq_masks_cur[MAX77693_IRQ_GROUP_NR];
+       int irq_masks_cache[MAX77693_IRQ_GROUP_NR];
+};
+
+enum max77693_types {
+       TYPE_MAX77693,
+};
+
+extern int max77693_read_reg(struct regmap *map, u8 reg, u8 *dest);
+extern int max77693_bulk_read(struct regmap *map, u8 reg, int count,
+                               u8 *buf);
+extern int max77693_write_reg(struct regmap *map, u8 reg, u8 value);
+extern int max77693_bulk_write(struct regmap *map, u8 reg, int count,
+                               u8 *buf);
+extern int max77693_update_reg(struct regmap *map, u8 reg, u8 val, u8 mask);
+
+extern int max77693_irq_init(struct max77693_dev *max77686);
+extern void max77693_irq_exit(struct max77693_dev *max77686);
+extern int max77693_irq_resume(struct max77693_dev *max77686);
+
+#endif /*  __LINUX_MFD_MAX77693_PRIV_H */
diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h
new file mode 100644 (file)
index 0000000..1d28ae9
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * max77693.h - Driver for the Maxim 77693
+ *
+ *  Copyright (C) 2012 Samsung Electrnoics
+ *  SangYoung Son <hello.son@samsung.com>
+ *
+ * This program is not provided / owned by Maxim Integrated Products.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * This driver is based on max8997.h
+ *
+ * MAX77693 has PMIC, Charger, Flash LED, Haptic, MUIC devices.
+ * The devices share the same I2C bus and included in
+ * this mfd driver.
+ */
+
+#ifndef __LINUX_MFD_MAX77693_H
+#define __LINUX_MFD_MAX77693_H
+
+struct max77693_platform_data {
+       int wakeup;
+};
+#endif /* __LINUX_MFD_MAX77693_H */
diff --git a/include/linux/mfd/sta2x11-mfd.h b/include/linux/mfd/sta2x11-mfd.h
new file mode 100644 (file)
index 0000000..d179227
--- /dev/null
@@ -0,0 +1,324 @@
+/*
+ * Copyright (c) 2009-2011 Wind River Systems, Inc.
+ * Copyright (c) 2011 ST Microelectronics (Alessandro Rubini)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * The STMicroelectronics ConneXt (STA2X11) chip has several unrelated
+ * functions in one PCI endpoint functions. This driver simply
+ * registers the platform devices in this iomemregion and exports a few
+ * functions to access common registers
+ */
+
+#ifndef __STA2X11_MFD_H
+#define __STA2X11_MFD_H
+#include <linux/types.h>
+#include <linux/pci.h>
+
+/*
+ * The MFD PCI block includes the GPIO peripherals and other register blocks.
+ * For GPIO, we have 32*4 bits (I use "gsta" for "gpio sta2x11".)
+ */
+#define GSTA_GPIO_PER_BLOCK    32
+#define GSTA_NR_BLOCKS         4
+#define GSTA_NR_GPIO           (GSTA_GPIO_PER_BLOCK * GSTA_NR_BLOCKS)
+
+/* Pinconfig is set by the board definition: altfunc, pull-up, pull-down */
+struct sta2x11_gpio_pdata {
+       unsigned pinconfig[GSTA_NR_GPIO];
+};
+
+/* Macros below lifted from sh_pfc.h, with minor differences */
+#define PINMUX_TYPE_NONE               0
+#define PINMUX_TYPE_FUNCTION           1
+#define PINMUX_TYPE_OUTPUT_LOW         2
+#define PINMUX_TYPE_OUTPUT_HIGH                3
+#define PINMUX_TYPE_INPUT              4
+#define PINMUX_TYPE_INPUT_PULLUP       5
+#define PINMUX_TYPE_INPUT_PULLDOWN     6
+
+/* Give names to GPIO pins, like PXA does, taken from the manual */
+#define STA2X11_GPIO0                  0
+#define STA2X11_GPIO1                  1
+#define STA2X11_GPIO2                  2
+#define STA2X11_GPIO3                  3
+#define STA2X11_GPIO4                  4
+#define STA2X11_GPIO5                  5
+#define STA2X11_GPIO6                  6
+#define STA2X11_GPIO7                  7
+#define STA2X11_GPIO8_RGBOUT_RED7      8
+#define STA2X11_GPIO9_RGBOUT_RED6      9
+#define STA2X11_GPIO10_RGBOUT_RED5     10
+#define STA2X11_GPIO11_RGBOUT_RED4     11
+#define STA2X11_GPIO12_RGBOUT_RED3     12
+#define STA2X11_GPIO13_RGBOUT_RED2     13
+#define STA2X11_GPIO14_RGBOUT_RED1     14
+#define STA2X11_GPIO15_RGBOUT_RED0     15
+#define STA2X11_GPIO16_RGBOUT_GREEN7   16
+#define STA2X11_GPIO17_RGBOUT_GREEN6   17
+#define STA2X11_GPIO18_RGBOUT_GREEN5   18
+#define STA2X11_GPIO19_RGBOUT_GREEN4   19
+#define STA2X11_GPIO20_RGBOUT_GREEN3   20
+#define STA2X11_GPIO21_RGBOUT_GREEN2   21
+#define STA2X11_GPIO22_RGBOUT_GREEN1   22
+#define STA2X11_GPIO23_RGBOUT_GREEN0   23
+#define STA2X11_GPIO24_RGBOUT_BLUE7    24
+#define STA2X11_GPIO25_RGBOUT_BLUE6    25
+#define STA2X11_GPIO26_RGBOUT_BLUE5    26
+#define STA2X11_GPIO27_RGBOUT_BLUE4    27
+#define STA2X11_GPIO28_RGBOUT_BLUE3    28
+#define STA2X11_GPIO29_RGBOUT_BLUE2    29
+#define STA2X11_GPIO30_RGBOUT_BLUE1    30
+#define STA2X11_GPIO31_RGBOUT_BLUE0    31
+#define STA2X11_GPIO32_RGBOUT_VSYNCH   32
+#define STA2X11_GPIO33_RGBOUT_HSYNCH   33
+#define STA2X11_GPIO34_RGBOUT_DEN      34
+#define STA2X11_GPIO35_ETH_CRS_DV      35
+#define STA2X11_GPIO36_ETH_TXD1                36
+#define STA2X11_GPIO37_ETH_TXD0                37
+#define STA2X11_GPIO38_ETH_TX_EN       38
+#define STA2X11_GPIO39_MDIO            39
+#define STA2X11_GPIO40_ETH_REF_CLK     40
+#define STA2X11_GPIO41_ETH_RXD1                41
+#define STA2X11_GPIO42_ETH_RXD0                42
+#define STA2X11_GPIO43_MDC             43
+#define STA2X11_GPIO44_CAN_TX          44
+#define STA2X11_GPIO45_CAN_RX          45
+#define STA2X11_GPIO46_MLB_DAT         46
+#define STA2X11_GPIO47_MLB_SIG         47
+#define STA2X11_GPIO48_SPI0_CLK                48
+#define STA2X11_GPIO49_SPI0_TXD                49
+#define STA2X11_GPIO50_SPI0_RXD                50
+#define STA2X11_GPIO51_SPI0_FRM                51
+#define STA2X11_GPIO52_SPI1_CLK                52
+#define STA2X11_GPIO53_SPI1_TXD                53
+#define STA2X11_GPIO54_SPI1_RXD                54
+#define STA2X11_GPIO55_SPI1_FRM                55
+#define STA2X11_GPIO56_SPI2_CLK                56
+#define STA2X11_GPIO57_SPI2_TXD                57
+#define STA2X11_GPIO58_SPI2_RXD                58
+#define STA2X11_GPIO59_SPI2_FRM                59
+#define STA2X11_GPIO60_I2C0_SCL                60
+#define STA2X11_GPIO61_I2C0_SDA                61
+#define STA2X11_GPIO62_I2C1_SCL                62
+#define STA2X11_GPIO63_I2C1_SDA                63
+#define STA2X11_GPIO64_I2C2_SCL                64
+#define STA2X11_GPIO65_I2C2_SDA                65
+#define STA2X11_GPIO66_I2C3_SCL                66
+#define STA2X11_GPIO67_I2C3_SDA                67
+#define STA2X11_GPIO68_MSP0_RCK                68
+#define STA2X11_GPIO69_MSP0_RXD                69
+#define STA2X11_GPIO70_MSP0_RFS                70
+#define STA2X11_GPIO71_MSP0_TCK                71
+#define STA2X11_GPIO72_MSP0_TXD                72
+#define STA2X11_GPIO73_MSP0_TFS                73
+#define STA2X11_GPIO74_MSP0_SCK                74
+#define STA2X11_GPIO75_MSP1_CK         75
+#define STA2X11_GPIO76_MSP1_RXD                76
+#define STA2X11_GPIO77_MSP1_FS         77
+#define STA2X11_GPIO78_MSP1_TXD                78
+#define STA2X11_GPIO79_MSP2_CK         79
+#define STA2X11_GPIO80_MSP2_RXD                80
+#define STA2X11_GPIO81_MSP2_FS         81
+#define STA2X11_GPIO82_MSP2_TXD                82
+#define STA2X11_GPIO83_MSP3_CK         83
+#define STA2X11_GPIO84_MSP3_RXD                84
+#define STA2X11_GPIO85_MSP3_FS         85
+#define STA2X11_GPIO86_MSP3_TXD                86
+#define STA2X11_GPIO87_MSP4_CK         87
+#define STA2X11_GPIO88_MSP4_RXD                88
+#define STA2X11_GPIO89_MSP4_FS         89
+#define STA2X11_GPIO90_MSP4_TXD                90
+#define STA2X11_GPIO91_MSP5_CK         91
+#define STA2X11_GPIO92_MSP5_RXD                92
+#define STA2X11_GPIO93_MSP5_FS         93
+#define STA2X11_GPIO94_MSP5_TXD                94
+#define STA2X11_GPIO95_SDIO3_DAT3      95
+#define STA2X11_GPIO96_SDIO3_DAT2      96
+#define STA2X11_GPIO97_SDIO3_DAT1      97
+#define STA2X11_GPIO98_SDIO3_DAT0      98
+#define STA2X11_GPIO99_SDIO3_CLK       99
+#define STA2X11_GPIO100_SDIO3_CMD      100
+#define STA2X11_GPIO101                        101
+#define STA2X11_GPIO102                        102
+#define STA2X11_GPIO103                        103
+#define STA2X11_GPIO104                        104
+#define STA2X11_GPIO105_SDIO2_DAT3     105
+#define STA2X11_GPIO106_SDIO2_DAT2     106
+#define STA2X11_GPIO107_SDIO2_DAT1     107
+#define STA2X11_GPIO108_SDIO2_DAT0     108
+#define STA2X11_GPIO109_SDIO2_CLK      109
+#define STA2X11_GPIO110_SDIO2_CMD      110
+#define STA2X11_GPIO111                        111
+#define STA2X11_GPIO112                        112
+#define STA2X11_GPIO113                        113
+#define STA2X11_GPIO114                        114
+#define STA2X11_GPIO115_SDIO1_DAT3     115
+#define STA2X11_GPIO116_SDIO1_DAT2     116
+#define STA2X11_GPIO117_SDIO1_DAT1     117
+#define STA2X11_GPIO118_SDIO1_DAT0     118
+#define STA2X11_GPIO119_SDIO1_CLK      119
+#define STA2X11_GPIO120_SDIO1_CMD      120
+#define STA2X11_GPIO121                        121
+#define STA2X11_GPIO122                        122
+#define STA2X11_GPIO123                        123
+#define STA2X11_GPIO124                        124
+#define STA2X11_GPIO125_UART2_TXD      125
+#define STA2X11_GPIO126_UART2_RXD      126
+#define STA2X11_GPIO127_UART3_TXD      127
+
+/*
+ * The APB bridge has its own registers, needed by our users as well.
+ * They are accessed with the following read/mask/write function.
+ */
+u32 sta2x11_apbreg_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val);
+
+/* CAN and MLB */
+#define APBREG_BSR     0x00    /* Bridge Status Reg */
+#define APBREG_PAER    0x08    /* Peripherals Address Error Reg */
+#define APBREG_PWAC    0x20    /* Peripheral Write Access Control reg */
+#define APBREG_PRAC    0x40    /* Peripheral Read Access Control reg */
+#define APBREG_PCG     0x60    /* Peripheral Clock Gating Reg */
+#define APBREG_PUR     0x80    /* Peripheral Under Reset Reg */
+#define APBREG_EMU_PCG 0xA0    /* Emulator Peripheral Clock Gating Reg */
+
+#define APBREG_CAN     (1 << 1)
+#define APBREG_MLB     (1 << 3)
+
+/* SARAC */
+#define APBREG_BSR_SARAC     0x100 /* Bridge Status Reg */
+#define APBREG_PAER_SARAC    0x108 /* Peripherals Address Error Reg */
+#define APBREG_PWAC_SARAC    0x120 /* Peripheral Write Access Control reg */
+#define APBREG_PRAC_SARAC    0x140 /* Peripheral Read Access Control reg */
+#define APBREG_PCG_SARAC     0x160 /* Peripheral Clock Gating Reg */
+#define APBREG_PUR_SARAC     0x180 /* Peripheral Under Reset Reg */
+#define APBREG_EMU_PCG_SARAC 0x1A0 /* Emulator Peripheral Clock Gating Reg */
+
+#define APBREG_SARAC   (1 << 2)
+
+/*
+ * The system controller has its own registers. Some of these are accessed
+ * by out users as well, using the following read/mask/write/function
+ */
+u32 sta2x11_sctl_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val);
+
+#define SCTL_SCCTL             0x00    /* System controller control register */
+#define SCTL_ARMCFG            0x04    /* ARM configuration register */
+#define SCTL_SCPLLCTL          0x08    /* PLL control status register */
+#define SCTL_SCPLLFCTRL                0x0c    /* PLL frequency control register */
+#define SCTL_SCRESFRACT                0x10    /* PLL fractional input register */
+#define SCTL_SCRESCTRL1                0x14    /* Peripheral reset control 1 */
+#define SCTL_SCRESXTRL2                0x18    /* Peripheral reset control 2 */
+#define SCTL_SCPEREN0          0x1c    /* Peripheral clock enable register 0 */
+#define SCTL_SCPEREN1          0x20    /* Peripheral clock enable register 1 */
+#define SCTL_SCPEREN2          0x24    /* Peripheral clock enable register 2 */
+#define SCTL_SCGRST            0x28    /* Peripheral global reset */
+#define SCTL_SCPCIPMCR1                0x30    /* PCI power management control 1 */
+#define SCTL_SCPCIPMCR2                0x34    /* PCI power management control 2 */
+#define SCTL_SCPCIPMSR1                0x38    /* PCI power management status 1 */
+#define SCTL_SCPCIPMSR2                0x3c    /* PCI power management status 2 */
+#define SCTL_SCPCIPMSR3                0x40    /* PCI power management status 3 */
+#define SCTL_SCINTREN          0x44    /* Interrupt enable */
+#define SCTL_SCRISR            0x48    /* RAW interrupt status */
+#define SCTL_SCCLKSTAT0                0x4c    /* Peripheral clocks status 0 */
+#define SCTL_SCCLKSTAT1                0x50    /* Peripheral clocks status 1 */
+#define SCTL_SCCLKSTAT2                0x54    /* Peripheral clocks status 2 */
+#define SCTL_SCRSTSTA          0x58    /* Reset status register */
+
+#define SCTL_SCRESCTRL1_USB_PHY_POR    (1 << 0)
+#define SCTL_SCRESCTRL1_USB_OTG        (1 << 1)
+#define SCTL_SCRESCTRL1_USB_HRST       (1 << 2)
+#define SCTL_SCRESCTRL1_USB_PHY_HOST   (1 << 3)
+#define SCTL_SCRESCTRL1_SATAII (1 << 4)
+#define SCTL_SCRESCTRL1_VIP            (1 << 5)
+#define SCTL_SCRESCTRL1_PER_MMC0       (1 << 6)
+#define SCTL_SCRESCTRL1_PER_MMC1       (1 << 7)
+#define SCTL_SCRESCTRL1_PER_GPIO0      (1 << 8)
+#define SCTL_SCRESCTRL1_PER_GPIO1      (1 << 9)
+#define SCTL_SCRESCTRL1_PER_GPIO2      (1 << 10)
+#define SCTL_SCRESCTRL1_PER_GPIO3      (1 << 11)
+#define SCTL_SCRESCTRL1_PER_MTU0       (1 << 12)
+#define SCTL_SCRESCTRL1_KER_SPI0       (1 << 13)
+#define SCTL_SCRESCTRL1_KER_SPI1       (1 << 14)
+#define SCTL_SCRESCTRL1_KER_SPI2       (1 << 15)
+#define SCTL_SCRESCTRL1_KER_MCI0       (1 << 16)
+#define SCTL_SCRESCTRL1_KER_MCI1       (1 << 17)
+#define SCTL_SCRESCTRL1_PRE_HSI2C0     (1 << 18)
+#define SCTL_SCRESCTRL1_PER_HSI2C1     (1 << 19)
+#define SCTL_SCRESCTRL1_PER_HSI2C2     (1 << 20)
+#define SCTL_SCRESCTRL1_PER_HSI2C3     (1 << 21)
+#define SCTL_SCRESCTRL1_PER_MSP0       (1 << 22)
+#define SCTL_SCRESCTRL1_PER_MSP1       (1 << 23)
+#define SCTL_SCRESCTRL1_PER_MSP2       (1 << 24)
+#define SCTL_SCRESCTRL1_PER_MSP3       (1 << 25)
+#define SCTL_SCRESCTRL1_PER_MSP4       (1 << 26)
+#define SCTL_SCRESCTRL1_PER_MSP5       (1 << 27)
+#define SCTL_SCRESCTRL1_PER_MMC        (1 << 28)
+#define SCTL_SCRESCTRL1_KER_MSP0       (1 << 29)
+#define SCTL_SCRESCTRL1_KER_MSP1       (1 << 30)
+#define SCTL_SCRESCTRL1_KER_MSP2       (1 << 31)
+
+#define SCTL_SCPEREN0_UART0            (1 << 0)
+#define SCTL_SCPEREN0_UART1            (1 << 1)
+#define SCTL_SCPEREN0_UART2            (1 << 2)
+#define SCTL_SCPEREN0_UART3            (1 << 3)
+#define SCTL_SCPEREN0_MSP0             (1 << 4)
+#define SCTL_SCPEREN0_MSP1             (1 << 5)
+#define SCTL_SCPEREN0_MSP2             (1 << 6)
+#define SCTL_SCPEREN0_MSP3             (1 << 7)
+#define SCTL_SCPEREN0_MSP4             (1 << 8)
+#define SCTL_SCPEREN0_MSP5             (1 << 9)
+#define SCTL_SCPEREN0_SPI0             (1 << 10)
+#define SCTL_SCPEREN0_SPI1             (1 << 11)
+#define SCTL_SCPEREN0_SPI2             (1 << 12)
+#define SCTL_SCPEREN0_I2C0             (1 << 13)
+#define SCTL_SCPEREN0_I2C1             (1 << 14)
+#define SCTL_SCPEREN0_I2C2             (1 << 15)
+#define SCTL_SCPEREN0_I2C3             (1 << 16)
+#define SCTL_SCPEREN0_SVDO_LVDS                (1 << 17)
+#define SCTL_SCPEREN0_USB_HOST         (1 << 18)
+#define SCTL_SCPEREN0_USB_OTG          (1 << 19)
+#define SCTL_SCPEREN0_MCI0             (1 << 20)
+#define SCTL_SCPEREN0_MCI1             (1 << 21)
+#define SCTL_SCPEREN0_MCI2             (1 << 22)
+#define SCTL_SCPEREN0_MCI3             (1 << 23)
+#define SCTL_SCPEREN0_SATA             (1 << 24)
+#define SCTL_SCPEREN0_ETHERNET         (1 << 25)
+#define SCTL_SCPEREN0_VIC              (1 << 26)
+#define SCTL_SCPEREN0_DMA_AUDIO                (1 << 27)
+#define SCTL_SCPEREN0_DMA_SOC          (1 << 28)
+#define SCTL_SCPEREN0_RAM              (1 << 29)
+#define SCTL_SCPEREN0_VIP              (1 << 30)
+#define SCTL_SCPEREN0_ARM              (1 << 31)
+
+#define SCTL_SCPEREN1_UART0            (1 << 0)
+#define SCTL_SCPEREN1_UART1            (1 << 1)
+#define SCTL_SCPEREN1_UART2            (1 << 2)
+#define SCTL_SCPEREN1_UART3            (1 << 3)
+#define SCTL_SCPEREN1_MSP0             (1 << 4)
+#define SCTL_SCPEREN1_MSP1             (1 << 5)
+#define SCTL_SCPEREN1_MSP2             (1 << 6)
+#define SCTL_SCPEREN1_MSP3             (1 << 7)
+#define SCTL_SCPEREN1_MSP4             (1 << 8)
+#define SCTL_SCPEREN1_MSP5             (1 << 9)
+#define SCTL_SCPEREN1_SPI0             (1 << 10)
+#define SCTL_SCPEREN1_SPI1             (1 << 11)
+#define SCTL_SCPEREN1_SPI2             (1 << 12)
+#define SCTL_SCPEREN1_I2C0             (1 << 13)
+#define SCTL_SCPEREN1_I2C1             (1 << 14)
+#define SCTL_SCPEREN1_I2C2             (1 << 15)
+#define SCTL_SCPEREN1_I2C3             (1 << 16)
+#define SCTL_SCPEREN1_USB_PHY          (1 << 17)
+
+#endif /* __STA2X11_MFD_H */
index 8516fd1eaabc7882127d269bf4db58366e93bb57..f8d5b4d5843fc02ad7728c207992d0b33ac0c5a7 100644 (file)
@@ -117,7 +117,7 @@ struct matrix_keymap_data;
  * @no_autorepeat: disable key autorepeat
  */
 struct stmpe_keypad_platform_data {
-       struct matrix_keymap_data *keymap_data;
+       const struct matrix_keymap_data *keymap_data;
        unsigned int debounce_ms;
        unsigned int scan_count;
        bool no_autorepeat;
index 1c6c2860d1a60a6350ff46e7d45321ed8be95a80..dd8dc0a6c46243141ea59e325cde465fb993bce5 100644 (file)
@@ -18,6 +18,7 @@
 #define __LINUX_MFD_TPS65910_H
 
 #include <linux/gpio.h>
+#include <linux/regmap.h>
 
 /* TPS chip id list */
 #define TPS65910                       0
 #define TPS65910_SLEEP_CONTROL_EXT_INPUT_EN3           0x4
 #define TPS65911_SLEEP_CONTROL_EXT_INPUT_SLEEP         0x8
 
+/*
+ * Sleep keepon data: Maintains the state in sleep mode
+ * @therm_keepon: Keep on the thermal monitoring in sleep state.
+ * @clkout32k_keepon: Keep on the 32KHz clock output in sleep state.
+ * @i2chs_keepon: Keep on high speed internal clock in sleep state.
+ */
+struct tps65910_sleep_keepon_data {
+       unsigned therm_keepon:1;
+       unsigned clkout32k_keepon:1;
+       unsigned i2chs_keepon:1;
+};
+
 /**
  * struct tps65910_board
  * Board platform data may be used to initialize regulators.
@@ -794,6 +807,8 @@ struct tps65910_board {
        int irq_base;
        int vmbch_threshold;
        int vmbch2_threshold;
+       bool en_dev_slp;
+       struct tps65910_sleep_keepon_data *slp_keepon;
        bool en_gpio_sleep[TPS6591X_MAX_NUM_GPIO];
        unsigned long regulator_ext_sleep_control[TPS65910_NUM_REGS];
        struct regulator_init_data *tps65910_pmic_init_data[TPS65910_NUM_REGS];
@@ -809,16 +824,14 @@ struct tps65910 {
        struct regmap *regmap;
        struct mutex io_mutex;
        unsigned int id;
-       int (*read)(struct tps65910 *tps65910, u8 reg, int size, void *dest);
-       int (*write)(struct tps65910 *tps65910, u8 reg, int size, void *src);
 
        /* Client devices */
        struct tps65910_pmic *pmic;
        struct tps65910_rtc *rtc;
        struct tps65910_power *power;
 
-       /* GPIO Handling */
-       struct gpio_chip gpio;
+       /* Device node parsed board data */
+       struct tps65910_board *of_plat_data;
 
        /* IRQ Handling */
        struct mutex irq_lock;
@@ -826,6 +839,7 @@ struct tps65910 {
        int irq_base;
        int irq_num;
        u32 irq_mask;
+       struct irq_domain *domain;
 };
 
 struct tps65910_platform_data {
@@ -833,9 +847,6 @@ struct tps65910_platform_data {
        int irq_base;
 };
 
-int tps65910_set_bits(struct tps65910 *tps65910, u8 reg, u8 mask);
-int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask);
-void tps65910_gpio_init(struct tps65910 *tps65910, int gpio_base);
 int tps65910_irq_init(struct tps65910 *tps65910, int irq,
                struct tps65910_platform_data *pdata);
 int tps65910_irq_exit(struct tps65910 *tps65910);
@@ -845,4 +856,28 @@ static inline int tps65910_chip_id(struct tps65910 *tps65910)
        return tps65910->id;
 }
 
+static inline int tps65910_reg_read(struct tps65910 *tps65910, u8 reg,
+               unsigned int *val)
+{
+       return regmap_read(tps65910->regmap, reg, val);
+}
+
+static inline int tps65910_reg_write(struct tps65910 *tps65910, u8 reg,
+               unsigned int val)
+{
+       return regmap_write(tps65910->regmap, reg, val);
+}
+
+static inline int tps65910_reg_set_bits(struct tps65910 *tps65910, u8 reg,
+               u8 mask)
+{
+       return regmap_update_bits(tps65910->regmap, reg, mask, mask);
+}
+
+static inline int tps65910_reg_clear_bits(struct tps65910 *tps65910, u8 reg,
+               u8 mask)
+{
+       return regmap_update_bits(tps65910->regmap, reg, mask, 0);
+}
+
 #endif /*  __LINUX_MFD_TPS65910_H */
index b15b5f03f5c44c74473f30e46411876000bf5863..6659487c31e7a010c96bcef991f05b546fa103d1 100644 (file)
@@ -27,6 +27,7 @@
 
 #include <linux/interrupt.h>
 #include <linux/mfd/core.h>
+#include <linux/regulator/consumer.h>
 
 #define TWL6040_REG_ASICID             0x01
 #define TWL6040_REG_ASICREV            0x02
@@ -203,6 +204,7 @@ struct regmap;
 struct twl6040 {
        struct device *dev;
        struct regmap *regmap;
+       struct regulator_bulk_data supplies[2]; /* supplies for vio, v2v1 */
        struct mutex mutex;
        struct mutex io_mutex;
        struct mutex irq_mutex;
index 4b1211859f74a9dad29a555c758b868e29a392d1..4a3b83a776148eb2ace611436b38ab42e931450e 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <linux/completion.h>
 #include <linux/interrupt.h>
+#include <linux/irqdomain.h>
 #include <linux/list.h>
 #include <linux/regmap.h>
 
 #define WM831X_FLL_CLK_SRC_WIDTH                     2  /* FLL_CLK_SRC - [1:0] */
 
 struct regulator_dev;
+struct irq_domain;
 
 #define WM831X_NUM_IRQ_REGS 5
 #define WM831X_NUM_GPIO_REGS 16
@@ -367,7 +369,7 @@ struct wm831x {
 
        int irq;  /* Our chip IRQ */
        struct mutex irq_lock;
-       int irq_base;
+       struct irq_domain *irq_domain;
        int irq_masks_cur[WM831X_NUM_IRQ_REGS];   /* Currently active value */
        int irq_masks_cache[WM831X_NUM_IRQ_REGS]; /* Cached hardware value */
 
@@ -382,7 +384,8 @@ struct wm831x {
 
        /* Used by the interrupt controller code to post writes */
        int gpio_update[WM831X_NUM_GPIO_REGS];
-       bool gpio_level[WM831X_NUM_GPIO_REGS];
+       bool gpio_level_high[WM831X_NUM_GPIO_REGS];
+       bool gpio_level_low[WM831X_NUM_GPIO_REGS];
 
        struct mutex auxadc_lock;
        struct list_head auxadc_pending;
@@ -417,6 +420,11 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq);
 void wm831x_irq_exit(struct wm831x *wm831x);
 void wm831x_auxadc_init(struct wm831x *wm831x);
 
+static inline int wm831x_irq(struct wm831x *wm831x, int irq)
+{
+       return irq_create_mapping(wm831x->irq_domain, irq);
+}
+
 extern struct regmap_config wm831x_regmap_config;
 
 #endif
index 98fcc977e82be5acd0805d3474ef3e12f4b9fb3f..9192b6404a7347d5b6e665925a421cf1024d3c24 100644 (file)
@@ -602,6 +602,7 @@ extern const u16 wm8352_mode2_defaults[];
 extern const u16 wm8352_mode3_defaults[];
 
 struct wm8350;
+struct regmap;
 
 struct wm8350_hwmon {
        struct platform_device *pdev;
@@ -612,13 +613,7 @@ struct wm8350 {
        struct device *dev;
 
        /* device IO */
-       union {
-               struct i2c_client *i2c_client;
-               struct spi_device *spi_device;
-       };
-       int (*read_dev)(struct wm8350 *wm8350, char reg, int size, void *dest);
-       int (*write_dev)(struct wm8350 *wm8350, char reg, int size,
-                        void *src);
+       struct regmap *regmap;
        u16 *reg_cache;
 
        struct mutex auxadc_mutex;
index 0147b696851072fde8a6074cb762257aa1654aad..2de565b94d0c39e9c0e3b049bbcf0237cceddb8f 100644 (file)
 #include <linux/mfd/wm8400.h>
 #include <linux/mutex.h>
 #include <linux/platform_device.h>
-
-struct regmap;
+#include <linux/regmap.h>
 
 #define WM8400_REGISTER_COUNT 0x55
 
 struct wm8400 {
        struct device *dev;
-
-       struct mutex io_lock;
        struct regmap *regmap;
 
-       u16 reg_cache[WM8400_REGISTER_COUNT];
-
        struct platform_device regulators[6];
 };
 
@@ -930,6 +925,11 @@ struct wm8400 {
 
 u16 wm8400_reg_read(struct wm8400 *wm8400, u8 reg);
 int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data);
-int wm8400_set_bits(struct wm8400 *wm8400, u8 reg, u16 mask, u16 val);
+
+static inline int wm8400_set_bits(struct wm8400 *wm8400, u8 reg,
+                                 u16 mask, u16 val)
+{
+       return regmap_update_bits(wm8400->regmap, reg, mask, val);
+}
 
 #endif
index 6695c3ec4518ccfe73ebe1c307a24721d7fef943..1f173306bf0508ddeb3a18de8175fac44c857ee3 100644 (file)
@@ -57,6 +57,7 @@ struct wm8994 {
 
        enum wm8994_type type;
        int revision;
+       int cust_id;
 
        struct device *dev;
        struct regmap *regmap;
index 86e6a032a07833f3dbee4f8d399a6e63e61ef93a..053548961c15df6c81b1e6128e056f2806806cbc 100644 (file)
 /*
  * R256 (0x100) - Chip Revision
  */
+#define WM8994_CUST_ID_MASK                     0xFF00  /* CUST_ID - [15:8] */
+#define WM8994_CUST_ID_SHIFT                         8  /* CUST_ID - [15:8] */
+#define WM8994_CUST_ID_WIDTH                         8  /* CUST_ID - [15:8] */
 #define WM8994_CHIP_REV_MASK                    0x000F  /* CHIP_REV - [3:0] */
 #define WM8994_CHIP_REV_SHIFT                        0  /* CHIP_REV - [3:0] */
 #define WM8994_CHIP_REV_WIDTH                        4  /* CHIP_REV - [3:0] */
index 6e27fa99e8b978d785c196ca5acb89123058f14d..6a8f002b8ed3bb77e77b48c611611b6c708b1979 100644 (file)
@@ -64,6 +64,7 @@ enum {
        MLX4_MAX_NUM_PF         = 16,
        MLX4_MAX_NUM_VF         = 64,
        MLX4_MFUNC_MAX          = 80,
+       MLX4_MAX_EQ_NUM         = 1024,
        MLX4_MFUNC_EQ_NUM       = 4,
        MLX4_MFUNC_MAX_EQES     = 8,
        MLX4_MFUNC_EQE_MASK     = (MLX4_MFUNC_MAX_EQES - 1)
@@ -239,6 +240,10 @@ static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
        return (major << 32) | (minor << 16) | subminor;
 }
 
+struct mlx4_phys_caps {
+       u32                     num_phys_eqs;
+};
+
 struct mlx4_caps {
        u64                     fw_ver;
        u32                     function;
@@ -499,6 +504,7 @@ struct mlx4_dev {
        unsigned long           flags;
        unsigned long           num_slaves;
        struct mlx4_caps        caps;
+       struct mlx4_phys_caps   phys_caps;
        struct radix_tree_root  qp_table_tree;
        u8                      rev_id;
        char                    board_id[MLX4_BOARD_ID_LEN];
index 7d5c37f24c63af1915b2eb32056f55d63fb04aa6..b36d08ce5c578dcd18e224828217ded481de54ee 100644 (file)
@@ -321,6 +321,7 @@ static inline int is_vmalloc_or_module_addr(const void *x)
 static inline void compound_lock(struct page *page)
 {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       VM_BUG_ON(PageSlab(page));
        bit_spin_lock(PG_compound_lock, &page->flags);
 #endif
 }
@@ -328,6 +329,7 @@ static inline void compound_lock(struct page *page)
 static inline void compound_unlock(struct page *page)
 {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       VM_BUG_ON(PageSlab(page));
        bit_spin_unlock(PG_compound_lock, &page->flags);
 #endif
 }
@@ -871,8 +873,6 @@ extern void pagefault_out_of_memory(void);
 extern void show_free_areas(unsigned int flags);
 extern bool skip_free_areas_node(unsigned int flags, int nid);
 
-int shmem_lock(struct file *file, int lock, struct user_struct *user);
-struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
 int shmem_zero_setup(struct vm_area_struct *);
 
 extern int can_do_mlock(void);
@@ -951,11 +951,9 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
 extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
 extern void truncate_setsize(struct inode *inode, loff_t newsize);
 extern int vmtruncate(struct inode *inode, loff_t offset);
-extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end);
 void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
 int truncate_inode_page(struct address_space *mapping, struct page *page);
 int generic_error_remove_page(struct address_space *mapping, struct page *page);
-
 int invalidate_inode_page(struct page *page);
 
 #ifdef CONFIG_MMU
@@ -1394,7 +1392,7 @@ extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned lo
 extern unsigned long mmap_region(struct file *file, unsigned long addr,
        unsigned long len, unsigned long flags,
        vm_flags_t vm_flags, unsigned long pgoff);
-extern unsigned long do_mmap(struct file *, unsigned long,
+extern unsigned long do_mmap_pgoff(struct file *, unsigned long,
         unsigned long, unsigned long,
         unsigned long, unsigned long);
 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
index 227fd3e9a9c9370398478005d01344f5a42cb02d..1397ccf81e91f16d937c3f0b2e7d361104dce5f1 100644 (file)
@@ -21,22 +21,22 @@ static inline int page_is_file_cache(struct page *page)
        return !PageSwapBacked(page);
 }
 
-static inline void
-add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list lru)
+static __always_inline void add_page_to_lru_list(struct page *page,
+                               struct lruvec *lruvec, enum lru_list lru)
 {
-       struct lruvec *lruvec;
-
-       lruvec = mem_cgroup_lru_add_list(zone, page, lru);
+       int nr_pages = hpage_nr_pages(page);
+       mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
        list_add(&page->lru, &lruvec->lists[lru]);
-       __mod_zone_page_state(zone, NR_LRU_BASE + lru, hpage_nr_pages(page));
+       __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages);
 }
 
-static inline void
-del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list lru)
+static __always_inline void del_page_from_lru_list(struct page *page,
+                               struct lruvec *lruvec, enum lru_list lru)
 {
-       mem_cgroup_lru_del_list(page, lru);
+       int nr_pages = hpage_nr_pages(page);
+       mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
        list_del(&page->lru);
-       __mod_zone_page_state(zone, NR_LRU_BASE + lru, -hpage_nr_pages(page));
+       __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, -nr_pages);
 }
 
 /**
@@ -61,7 +61,7 @@ static inline enum lru_list page_lru_base_type(struct page *page)
  * Returns the LRU list a page was on, as an index into the array of LRU
  * lists; and clears its Unevictable or Active flags, ready for freeing.
  */
-static inline enum lru_list page_off_lru(struct page *page)
+static __always_inline enum lru_list page_off_lru(struct page *page)
 {
        enum lru_list lru;
 
@@ -85,7 +85,7 @@ static inline enum lru_list page_off_lru(struct page *page)
  * Returns the LRU list a page should be on, as an index
  * into the array of LRU lists.
  */
-static inline enum lru_list page_lru(struct page *page)
+static __always_inline enum lru_list page_lru(struct page *page)
 {
        enum lru_list lru;
 
index 26574c726121cb9faab6b6fd3af517c73840869f..dad95bdd06d798545cea969d9cd4b9091e8a3089 100644 (file)
@@ -345,17 +345,6 @@ struct mm_struct {
        /* Architecture-specific MM context */
        mm_context_t context;
 
-       /* Swap token stuff */
-       /*
-        * Last value of global fault stamp as seen by this process.
-        * In other words, this value gives an indication of how long
-        * it has been since this task got the token.
-        * Look at mm/thrash.c
-        */
-       unsigned int faultstamp;
-       unsigned int token_priority;
-       unsigned int last_interval;
-
        unsigned long flags; /* Must use atomic bitops to access the bits */
 
        struct core_state *core_state; /* coredumping support */
index 629b823f88362b44001cf09dc1559d1e86780dad..d76513b5b2631b1daf7b9aa4c4b03f1d52d83d32 100644 (file)
@@ -58,6 +58,10 @@ struct mmc_ext_csd {
        unsigned int            generic_cmd6_time;      /* Units: 10ms */
        unsigned int            power_off_longtime;     /* Units: ms */
        unsigned int            hs_max_dtr;
+#define MMC_HIGH_26_MAX_DTR    26000000
+#define MMC_HIGH_52_MAX_DTR    52000000
+#define MMC_HIGH_DDR_MAX_DTR   52000000
+#define MMC_HS200_MAX_DTR      200000000
        unsigned int            sectors;
        unsigned int            card_type;
        unsigned int            hc_erase_size;          /* In sectors */
index 8f66e28f5a0f9a2b9f79c80a7c778eee7d4472ce..7a7ebd367cfdd2591e01b3c69346c4c43464b74e 100644 (file)
@@ -125,6 +125,7 @@ struct dw_mci {
        struct mmc_request      *mrq;
        struct mmc_command      *cmd;
        struct mmc_data         *data;
+       struct workqueue_struct *card_workqueue;
 
        /* DMA interface members*/
        int                     use_dma;
index cbde4b7e675ed28eda507784c185d6ebc6b30215..0707d228d7f11d675743aa8d58b5b5c05274295f 100644 (file)
@@ -297,6 +297,7 @@ struct mmc_host {
 
        unsigned int            sdio_irqs;
        struct task_struct      *sdio_irq_thread;
+       bool                    sdio_irq_pending;
        atomic_t                sdio_irq_thread_abort;
 
        mmc_pm_flag_t           pm_flags;       /* requested pm features */
@@ -352,6 +353,7 @@ extern int mmc_cache_ctrl(struct mmc_host *, u8);
 static inline void mmc_signal_sdio_irq(struct mmc_host *host)
 {
        host->ops->enable_sdio_irq(host, 0);
+       host->sdio_irq_pending = true;
        wake_up_process(host->sdio_irq_thread);
 }
 
index b822a2cb6008d16d1dee1474c62d9a8301da754e..d425cab144d9ee752657b6de6053329f19f70436 100644 (file)
@@ -354,66 +354,6 @@ struct _mmc_csd {
 #define EXT_CSD_CARD_TYPE_SDR_1_2V     (1<<5)  /* Card can run at 200MHz */
                                                /* SDR mode @1.2V I/O */
 
-#define EXT_CSD_CARD_TYPE_SDR_200      (EXT_CSD_CARD_TYPE_SDR_1_8V | \
-                                        EXT_CSD_CARD_TYPE_SDR_1_2V)
-
-#define EXT_CSD_CARD_TYPE_SDR_ALL      (EXT_CSD_CARD_TYPE_SDR_200 | \
-                                        EXT_CSD_CARD_TYPE_52 | \
-                                        EXT_CSD_CARD_TYPE_26)
-
-#define        EXT_CSD_CARD_TYPE_SDR_1_2V_ALL  (EXT_CSD_CARD_TYPE_SDR_1_2V | \
-                                        EXT_CSD_CARD_TYPE_52 | \
-                                        EXT_CSD_CARD_TYPE_26)
-
-#define        EXT_CSD_CARD_TYPE_SDR_1_8V_ALL  (EXT_CSD_CARD_TYPE_SDR_1_8V | \
-                                        EXT_CSD_CARD_TYPE_52 | \
-                                        EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_8V    (EXT_CSD_CARD_TYPE_SDR_1_2V | \
-                                                EXT_CSD_CARD_TYPE_DDR_1_8V | \
-                                                EXT_CSD_CARD_TYPE_52 | \
-                                                EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_8V    (EXT_CSD_CARD_TYPE_SDR_1_8V | \
-                                                EXT_CSD_CARD_TYPE_DDR_1_8V | \
-                                                EXT_CSD_CARD_TYPE_52 | \
-                                                EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_2V    (EXT_CSD_CARD_TYPE_SDR_1_2V | \
-                                                EXT_CSD_CARD_TYPE_DDR_1_2V | \
-                                                EXT_CSD_CARD_TYPE_52 | \
-                                                EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_2V    (EXT_CSD_CARD_TYPE_SDR_1_8V | \
-                                                EXT_CSD_CARD_TYPE_DDR_1_2V | \
-                                                EXT_CSD_CARD_TYPE_52 | \
-                                                EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_52      (EXT_CSD_CARD_TYPE_SDR_1_2V | \
-                                                EXT_CSD_CARD_TYPE_DDR_52 | \
-                                                EXT_CSD_CARD_TYPE_52 | \
-                                                EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_52      (EXT_CSD_CARD_TYPE_SDR_1_8V | \
-                                                EXT_CSD_CARD_TYPE_DDR_52 | \
-                                                EXT_CSD_CARD_TYPE_52 | \
-                                                EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_8V     (EXT_CSD_CARD_TYPE_SDR_200 | \
-                                                EXT_CSD_CARD_TYPE_DDR_1_8V | \
-                                                EXT_CSD_CARD_TYPE_52 | \
-                                                EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_2V     (EXT_CSD_CARD_TYPE_SDR_200 | \
-                                                EXT_CSD_CARD_TYPE_DDR_1_2V | \
-                                                EXT_CSD_CARD_TYPE_52 | \
-                                                EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_ALL_DDR_52       (EXT_CSD_CARD_TYPE_SDR_200 | \
-                                                EXT_CSD_CARD_TYPE_DDR_52 | \
-                                                EXT_CSD_CARD_TYPE_52 | \
-                                                EXT_CSD_CARD_TYPE_26)
-
 #define EXT_CSD_BUS_WIDTH_1    0       /* Card is in 1 bit mode */
 #define EXT_CSD_BUS_WIDTH_4    1       /* Card is in 4 bit mode */
 #define EXT_CSD_BUS_WIDTH_8    2       /* Card is in 8 bit mode */
diff --git a/include/linux/mmc/mxs-mmc.h b/include/linux/mmc/mxs-mmc.h
new file mode 100644 (file)
index 0000000..7c2ad3a
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MMC_MXS_MMC_H__
+#define __LINUX_MMC_MXS_MMC_H__
+
+struct mxs_mmc_platform_data {
+       int wp_gpio;    /* write protect pin */
+       unsigned int flags;
+#define SLOTF_4_BIT_CAPABLE    (1 << 0)
+#define SLOTF_8_BIT_CAPABLE    (1 << 1)
+};
+
+#endif /* __LINUX_MMC_MXS_MMC_H__ */
index c04ecfe03f7ffeca4781049faa3a0018a91ea368..580bd587d916cfa28116e814b24639e1719efe5c 100644 (file)
@@ -4,7 +4,7 @@
 #ifdef CONFIG_DEBUG_VM
 #define VM_BUG_ON(cond) BUG_ON(cond)
 #else
-#define VM_BUG_ON(cond) do { (void)(cond); } while (0)
+#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
 #endif
 
 #ifdef CONFIG_DEBUG_VIRTUAL
index 41aa49b74821a43ba84ad4a936a2df1ae8055262..2427706f78b4d7043b5476b310d5a203631dbf90 100644 (file)
  */
 #define PAGE_ALLOC_COSTLY_ORDER 3
 
-#define MIGRATE_UNMOVABLE     0
-#define MIGRATE_RECLAIMABLE   1
-#define MIGRATE_MOVABLE       2
-#define MIGRATE_PCPTYPES      3 /* the number of types on the pcp lists */
-#define MIGRATE_RESERVE       3
-#define MIGRATE_ISOLATE       4 /* can't allocate from here */
-#define MIGRATE_TYPES         5
+enum {
+       MIGRATE_UNMOVABLE,
+       MIGRATE_RECLAIMABLE,
+       MIGRATE_MOVABLE,
+       MIGRATE_PCPTYPES,       /* the number of types on the pcp lists */
+       MIGRATE_RESERVE = MIGRATE_PCPTYPES,
+#ifdef CONFIG_CMA
+       /*
+        * MIGRATE_CMA migration type is designed to mimic the way
+        * ZONE_MOVABLE works.  Only movable pages can be allocated
+        * from MIGRATE_CMA pageblocks and page allocator never
+        * implicitly change migration type of MIGRATE_CMA pageblock.
+        *
+        * The way to use it is to change migratetype of a range of
+        * pageblocks to MIGRATE_CMA which can be done by
+        * __free_pageblock_cma() function.  What is important though
+        * is that a range of pageblocks must be aligned to
+        * MAX_ORDER_NR_PAGES should biggest page be bigger then
+        * a single pageblock.
+        */
+       MIGRATE_CMA,
+#endif
+       MIGRATE_ISOLATE,        /* can't allocate from here */
+       MIGRATE_TYPES
+};
+
+#ifdef CONFIG_CMA
+#  define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
+#  define cma_wmark_pages(zone)        zone->min_cma_pages
+#else
+#  define is_migrate_cma(migratetype) false
+#  define cma_wmark_pages(zone) 0
+#endif
 
 #define for_each_migratetype_order(order, type) \
        for (order = 0; order < MAX_ORDER; order++) \
@@ -159,8 +185,25 @@ static inline int is_unevictable_lru(enum lru_list lru)
        return (lru == LRU_UNEVICTABLE);
 }
 
+struct zone_reclaim_stat {
+       /*
+        * The pageout code in vmscan.c keeps track of how many of the
+        * mem/swap backed and file backed pages are refeferenced.
+        * The higher the rotated/scanned ratio, the more valuable
+        * that cache is.
+        *
+        * The anon LRU stats live in [0], file LRU stats in [1]
+        */
+       unsigned long           recent_rotated[2];
+       unsigned long           recent_scanned[2];
+};
+
 struct lruvec {
        struct list_head lists[NR_LRU_LISTS];
+       struct zone_reclaim_stat reclaim_stat;
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+       struct zone *zone;
+#endif
 };
 
 /* Mask used at gathering information at once (see memcontrol.c) */
@@ -169,16 +212,12 @@ struct lruvec {
 #define LRU_ALL_EVICTABLE (LRU_ALL_FILE | LRU_ALL_ANON)
 #define LRU_ALL             ((1 << NR_LRU_LISTS) - 1)
 
-/* Isolate inactive pages */
-#define ISOLATE_INACTIVE       ((__force isolate_mode_t)0x1)
-/* Isolate active pages */
-#define ISOLATE_ACTIVE         ((__force isolate_mode_t)0x2)
 /* Isolate clean file */
-#define ISOLATE_CLEAN          ((__force isolate_mode_t)0x4)
+#define ISOLATE_CLEAN          ((__force isolate_mode_t)0x1)
 /* Isolate unmapped file */
-#define ISOLATE_UNMAPPED       ((__force isolate_mode_t)0x8)
+#define ISOLATE_UNMAPPED       ((__force isolate_mode_t)0x2)
 /* Isolate for asynchronous migration */
-#define ISOLATE_ASYNC_MIGRATE  ((__force isolate_mode_t)0x10)
+#define ISOLATE_ASYNC_MIGRATE  ((__force isolate_mode_t)0x4)
 
 /* LRU Isolation modes. */
 typedef unsigned __bitwise__ isolate_mode_t;
@@ -287,19 +326,6 @@ enum zone_type {
 #error ZONES_SHIFT -- too many zones configured adjust calculation
 #endif
 
-struct zone_reclaim_stat {
-       /*
-        * The pageout code in vmscan.c keeps track of how many of the
-        * mem/swap backed and file backed pages are refeferenced.
-        * The higher the rotated/scanned ratio, the more valuable
-        * that cache is.
-        *
-        * The anon LRU stats live in [0], file LRU stats in [1]
-        */
-       unsigned long           recent_rotated[2];
-       unsigned long           recent_scanned[2];
-};
-
 struct zone {
        /* Fields commonly accessed by the page allocator */
 
@@ -346,6 +372,13 @@ struct zone {
 #ifdef CONFIG_MEMORY_HOTPLUG
        /* see spanned/present_pages for more description */
        seqlock_t               span_seqlock;
+#endif
+#ifdef CONFIG_CMA
+       /*
+        * CMA needs to increase watermark levels during the allocation
+        * process to make sure that the system is not starved.
+        */
+       unsigned long           min_cma_pages;
 #endif
        struct free_area        free_area[MAX_ORDER];
 
@@ -374,8 +407,6 @@ struct zone {
        spinlock_t              lru_lock;
        struct lruvec           lruvec;
 
-       struct zone_reclaim_stat reclaim_stat;
-
        unsigned long           pages_scanned;     /* since last reclaim */
        unsigned long           flags;             /* zone flags, see below */
 
@@ -701,6 +732,17 @@ extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
                                     unsigned long size,
                                     enum memmap_context context);
 
+extern void lruvec_init(struct lruvec *lruvec, struct zone *zone);
+
+static inline struct zone *lruvec_zone(struct lruvec *lruvec)
+{
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+       return lruvec->zone;
+#else
+       return container_of(lruvec, struct zone, lruvec);
+#endif
+}
+
 #ifdef CONFIG_HAVE_MEMORY_PRESENT
 void memory_present(int nid, unsigned long start, unsigned long end);
 #else
index 34066e65fdeb327b4da8e0fd58e4dc143d985fc4..11cc2ac67e756af2b633a8badabcc49f4ffe8cb7 100644 (file)
@@ -21,8 +21,9 @@
 #define CT_LE_W(v)     cpu_to_le16(v)
 #define CT_LE_L(v)     cpu_to_le32(v)
 
+#define MSDOS_ROOT_INO  1      /* The root inode number */
+#define MSDOS_FSINFO_INO 2     /* Used for managing the FSINFO block */
 
-#define MSDOS_ROOT_INO 1       /* == MINIX_ROOT_INO */
 #define MSDOS_DIR_BITS 5       /* log2(sizeof(struct msdos_dir_entry)) */
 
 /* directory limit */
index 69b6dbf46b5edd4700aec7343b2c589702ef118e..ed3c4e09f3d1f36ff2553bace163186a0be2784b 100644 (file)
 #define GPMI_NAND_RES_SIZE     6
 
 /* Resource names for the GPMI NAND driver. */
-#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME  "GPMI NAND GPMI Registers"
+#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME  "gpmi-nand"
 #define GPMI_NAND_GPMI_INTERRUPT_RES_NAME  "GPMI NAND GPMI Interrupt"
-#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME   "GPMI NAND BCH Registers"
-#define GPMI_NAND_BCH_INTERRUPT_RES_NAME   "GPMI NAND BCH Interrupt"
+#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME   "bch"
+#define GPMI_NAND_BCH_INTERRUPT_RES_NAME   "bch"
 #define GPMI_NAND_DMA_CHANNELS_RES_NAME    "GPMI NAND DMA Channels"
-#define GPMI_NAND_DMA_INTERRUPT_RES_NAME   "GPMI NAND DMA Interrupt"
+#define GPMI_NAND_DMA_INTERRUPT_RES_NAME   "gpmi-dma"
 
 /**
  * struct gpmi_nand_platform_data - GPMI NAND driver platform data.
index cf5ea8cdcf8e3a9a325da9a6f28f6fb120ea1319..63dadc0dfb629a74f6e578a16255ffc4cdb4d56a 100644 (file)
@@ -157,6 +157,15 @@ struct mtd_info {
        unsigned int erasesize_mask;
        unsigned int writesize_mask;
 
+       /*
+        * read ops return -EUCLEAN if max number of bitflips corrected on any
+        * one region comprising an ecc step equals or exceeds this value.
+        * Settable by driver, else defaults to ecc_strength.  User can override
+        * in sysfs.  N.B. The meaning of the -EUCLEAN return code has changed;
+        * see Documentation/ABI/testing/sysfs-class-mtd for more detail.
+        */
+       unsigned int bitflip_threshold;
+
        // Kernel-only stuff starts here.
        const char *name;
        int index;
@@ -164,7 +173,7 @@ struct mtd_info {
        /* ECC layout structure pointer - read only! */
        struct nand_ecclayout *ecclayout;
 
-       /* max number of correctible bit errors per writesize */
+       /* max number of correctible bit errors per ecc step */
        unsigned int ecc_strength;
 
        /* Data for variable erase regions. If numeraseregions is zero,
index 1482340d3d9f5e0a6ba5d61a020b112320618362..57977c6405292347f8026fd23f7bc4572312e345 100644 (file)
@@ -161,8 +161,6 @@ typedef enum {
  * Option constants for bizarre disfunctionality and real
  * features.
  */
-/* Chip can not auto increment pages */
-#define NAND_NO_AUTOINCR       0x00000001
 /* Buswidth is 16 bit */
 #define NAND_BUSWIDTH_16       0x00000002
 /* Device supports partial programming without padding */
@@ -207,7 +205,6 @@ typedef enum {
        (NAND_NO_PADDING | NAND_CACHEPRG | NAND_COPYBACK)
 
 /* Macros to identify the above */
-#define NAND_CANAUTOINCR(chip) (!(chip->options & NAND_NO_AUTOINCR))
 #define NAND_MUST_PAD(chip) (!(chip->options & NAND_NO_PADDING))
 #define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG))
 #define NAND_HAS_COPYBACK(chip) ((chip->options & NAND_COPYBACK))
@@ -216,7 +213,7 @@ typedef enum {
                                        && (chip->page_shift > 9))
 
 /* Mask to zero out the chip options, which come from the id table */
-#define NAND_CHIPOPTIONS_MSK   (0x0000ffff & ~NAND_NO_AUTOINCR)
+#define NAND_CHIPOPTIONS_MSK   0x0000ffff
 
 /* Non chip related options */
 /* This option skips the bbt scan during initialization. */
@@ -363,21 +360,20 @@ struct nand_ecc_ctrl {
        int (*correct)(struct mtd_info *mtd, uint8_t *dat, uint8_t *read_ecc,
                        uint8_t *calc_ecc);
        int (*read_page_raw)(struct mtd_info *mtd, struct nand_chip *chip,
-                       uint8_t *buf, int page);
+                       uint8_t *buf, int oob_required, int page);
        void (*write_page_raw)(struct mtd_info *mtd, struct nand_chip *chip,
-                       const uint8_t *buf);
+                       const uint8_t *buf, int oob_required);
        int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip,
-                       uint8_t *buf, int page);
+                       uint8_t *buf, int oob_required, int page);
        int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip,
                        uint32_t offs, uint32_t len, uint8_t *buf);
        void (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
-                       const uint8_t *buf);
+                       const uint8_t *buf, int oob_required);
        int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
                        int page);
        int (*read_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
-                       int page, int sndcmd);
-       int (*read_oob)(struct mtd_info *mtd, struct nand_chip *chip, int page,
-                       int sndcmd);
+                       int page);
+       int (*read_oob)(struct mtd_info *mtd, struct nand_chip *chip, int page);
        int (*write_oob)(struct mtd_info *mtd, struct nand_chip *chip,
                        int page);
 };
@@ -459,6 +455,8 @@ struct nand_buffers {
  * @pagemask:          [INTERN] page number mask = number of (pages / chip) - 1
  * @pagebuf:           [INTERN] holds the pagenumber which is currently in
  *                     data_buf.
+ * @pagebuf_bitflips:  [INTERN] holds the bitflip count for the page which is
+ *                     currently in data_buf.
  * @subpagesize:       [INTERN] holds the subpagesize
  * @onfi_version:      [INTERN] holds the chip ONFI version (BCD encoded),
  *                     non 0 if ONFI supported.
@@ -505,7 +503,8 @@ struct nand_chip {
        int (*errstat)(struct mtd_info *mtd, struct nand_chip *this, int state,
                        int status, int page);
        int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
-                       const uint8_t *buf, int page, int cached, int raw);
+                       const uint8_t *buf, int oob_required, int page,
+                       int cached, int raw);
 
        int chip_delay;
        unsigned int options;
@@ -519,6 +518,7 @@ struct nand_chip {
        uint64_t chipsize;
        int pagemask;
        int pagebuf;
+       unsigned int pagebuf_bitflips;
        int subpagesize;
        uint8_t cellinfo;
        int badblockpos;
@@ -654,6 +654,7 @@ struct platform_nand_ctrl {
        void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
        void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
        void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
+       unsigned char (*read_byte)(struct mtd_info *mtd);
        void *priv;
 };
 
index 30b0c4e78f91ce8a43ca2c13ab5845f8a30fd9f1..51bf8ada6dc0166b103c4d11067d92fbb2496d78 100644 (file)
@@ -18,7 +18,6 @@
 struct mv643xx_eth_shared_platform_data {
        struct mbus_dram_target_info    *dram;
        struct platform_device  *shared_smi;
-       unsigned int            t_clk;
        /*
         * Max packet size for Tx IP/Layer 4 checksum, when set to 0, default
         * limit of 9KiB will be used.
index 2d7510f389346a1987b60ded53ca3bf33ad8f603..e9ac2df079ba7517b8d5c8da00e3a234075f5063 100644 (file)
@@ -313,5 +313,8 @@ extern int kernel_sock_shutdown(struct socket *sock,
        MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \
                     "-type-" __stringify(type))
 
+#define MODULE_ALIAS_NET_PF_PROTO_NAME(pf, proto, name) \
+       MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \
+                    name)
 #endif /* __KERNEL__ */
 #endif /* _LINUX_NET_H */
index e7fd468f71268f5f2727260638980c2bf8aef315..d94cb14315196f7a35e64651bdd9465aba1c477e 100644 (file)
@@ -2795,15 +2795,15 @@ do {                                                            \
 #define netif_info(priv, type, dev, fmt, args...)              \
        netif_level(info, priv, type, dev, fmt, ##args)
 
-#if defined(DEBUG)
-#define netif_dbg(priv, type, dev, format, args...)            \
-       netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
-#elif defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG)
 #define netif_dbg(priv, type, netdev, format, args...)         \
 do {                                                           \
        if (netif_msg_##type(priv))                             \
                dynamic_netdev_dbg(netdev, format, ##args);     \
 } while (0)
+#elif defined(DEBUG)
+#define netif_dbg(priv, type, dev, format, args...)            \
+       netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
 #else
 #define netif_dbg(priv, type, dev, format, args...)                    \
 ({                                                                     \
index 0987146b0637a1fd1f9a4ea03fb7040b1d4314cd..af2d2fa30eee960a6b0f263f43e6b4a39ad5f245 100644 (file)
 #define NFS4_CDFC4_FORE_OR_BOTH 0x3
 #define NFS4_CDFC4_BACK_OR_BOTH 0x7
 
+#define NFS4_CDFS4_FORE 0x1
+#define NFS4_CDFS4_BACK 0x2
+#define NFS4_CDFS4_BOTH 0x3
+
 #define NFS4_SET_TO_SERVER_TIME        0
 #define NFS4_SET_TO_CLIENT_TIME        1
 
@@ -526,6 +530,13 @@ enum lock_type4 {
 #define FATTR4_WORD1_MOUNTED_ON_FILEID  (1UL << 23)
 #define FATTR4_WORD1_FS_LAYOUT_TYPES    (1UL << 30)
 #define FATTR4_WORD2_LAYOUT_BLKSIZE     (1UL << 1)
+#define FATTR4_WORD2_MDSTHRESHOLD       (1UL << 4)
+
+/* MDS threshold bitmap bits */
+#define THRESHOLD_RD                    (1UL << 0)
+#define THRESHOLD_WR                    (1UL << 1)
+#define THRESHOLD_RD_IO                 (1UL << 2)
+#define THRESHOLD_WR_IO                 (1UL << 3)
 
 #define NFSPROC4_NULL 0
 #define NFSPROC4_COMPOUND 1
@@ -596,6 +607,8 @@ enum {
        NFSPROC4_CLNT_TEST_STATEID,
        NFSPROC4_CLNT_FREE_STATEID,
        NFSPROC4_CLNT_GETDEVICELIST,
+       NFSPROC4_CLNT_BIND_CONN_TO_SESSION,
+       NFSPROC4_CLNT_DESTROY_CLIENTID,
 };
 
 /* nfs41 types */
index 52a1bdb4ee2bad0a668262c7b67bf8003f738095..b23cfc120edb46c5b285de63edeff61fb03242ec 100644 (file)
@@ -102,6 +102,7 @@ struct nfs_open_context {
        int error;
 
        struct list_head list;
+       struct nfs4_threshold   *mdsthreshold;
 };
 
 struct nfs_open_dir_context {
@@ -179,8 +180,7 @@ struct nfs_inode {
        __be32                  cookieverf[2];
 
        unsigned long           npages;
-       unsigned long           ncommit;
-       struct list_head        commit_list;
+       struct nfs_mds_commit_info commit_info;
 
        /* Open contexts for shared mmap writes */
        struct list_head        open_files;
@@ -201,8 +201,10 @@ struct nfs_inode {
 
        /* pNFS layout information */
        struct pnfs_layout_hdr *layout;
-       atomic_t                commits_outstanding;
 #endif /* CONFIG_NFS_V4*/
+       /* how many bytes have been written/read and how many bytes queued up */
+       __u64 write_io;
+       __u64 read_io;
 #ifdef CONFIG_NFS_FSCACHE
        struct fscache_cookie   *fscache;
 #endif
@@ -230,7 +232,6 @@ struct nfs_inode {
 #define NFS_INO_FSCACHE                (5)             /* inode can be cached by FS-Cache */
 #define NFS_INO_FSCACHE_LOCK   (6)             /* FS-Cache cookie management lock */
 #define NFS_INO_COMMIT         (7)             /* inode is committing unstable writes */
-#define NFS_INO_PNFS_COMMIT    (8)             /* use pnfs code for commit */
 #define NFS_INO_LAYOUTCOMMIT   (9)             /* layoutcommit required */
 #define NFS_INO_LAYOUTCOMMITTING (10)          /* layoutcommit inflight */
 
@@ -317,11 +318,6 @@ static inline int nfs_server_capable(struct inode *inode, int cap)
        return NFS_SERVER(inode)->caps & cap;
 }
 
-static inline int NFS_USE_READDIRPLUS(struct inode *inode)
-{
-       return test_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
-}
-
 static inline void nfs_set_verifier(struct dentry * dentry, unsigned long verf)
 {
        dentry->d_time = verf;
@@ -552,8 +548,8 @@ extern int nfs_wb_page(struct inode *inode, struct page* page);
 extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
 extern int  nfs_commit_inode(struct inode *, int);
-extern struct nfs_write_data *nfs_commitdata_alloc(void);
-extern void nfs_commit_free(struct nfs_write_data *wdata);
+extern struct nfs_commit_data *nfs_commitdata_alloc(void);
+extern void nfs_commit_free(struct nfs_commit_data *data);
 #else
 static inline int
 nfs_commit_inode(struct inode *inode, int how)
@@ -568,12 +564,6 @@ nfs_have_writebacks(struct inode *inode)
        return NFS_I(inode)->npages != 0;
 }
 
-/*
- * Allocate nfs_write_data structures
- */
-extern struct nfs_write_data *nfs_writedata_alloc(unsigned int npages);
-extern void nfs_writedata_free(struct nfs_write_data *);
-
 /*
  * linux/fs/nfs/read.c
  */
@@ -584,12 +574,6 @@ extern int  nfs_readpage_result(struct rpc_task *, struct nfs_read_data *);
 extern int  nfs_readpage_async(struct nfs_open_context *, struct inode *,
                               struct page *);
 
-/*
- * Allocate nfs_read_data structures
- */
-extern struct nfs_read_data *nfs_readdata_alloc(unsigned int npages);
-extern void nfs_readdata_free(struct nfs_read_data *);
-
 /*
  * linux/fs/nfs3proc.c
  */
@@ -654,6 +638,7 @@ nfs_fileid_to_ino_t(u64 fileid)
 #define NFSDBG_FSCACHE         0x0800
 #define NFSDBG_PNFS            0x1000
 #define NFSDBG_PNFS_LD         0x2000
+#define NFSDBG_STATE           0x4000
 #define NFSDBG_ALL             0xFFFF
 
 #ifdef __KERNEL__
index 7073fc74481cb6e1d69b0278e26c87c52cbc349e..fbb78fb09bd25c925d65207643bf61da614167d8 100644 (file)
@@ -17,7 +17,7 @@ struct nfs4_sequence_args;
 struct nfs4_sequence_res;
 struct nfs_server;
 struct nfs4_minor_version_ops;
-struct server_scope;
+struct nfs41_server_scope;
 struct nfs41_impl_id;
 
 /*
@@ -35,6 +35,9 @@ struct nfs_client {
 #define NFS_CS_RENEWD          3               /* - renewd started */
 #define NFS_CS_STOP_RENEW      4               /* no more state to renew */
 #define NFS_CS_CHECK_LEASE_TIME        5               /* need to check lease time */
+       unsigned long           cl_flags;       /* behavior switches */
+#define NFS_CS_NORESVPORT      0               /* - use ephemeral src port */
+#define NFS_CS_DISCRTRY                1               /* - disconnect on RPC retry */
        struct sockaddr_storage cl_addr;        /* server identifier */
        size_t                  cl_addrlen;
        char *                  cl_hostname;    /* hostname of server */
@@ -61,9 +64,6 @@ struct nfs_client {
 
        struct rpc_wait_queue   cl_rpcwaitq;
 
-       /* used for the setclientid verifier */
-       struct timespec         cl_boot_time;
-
        /* idmapper */
        struct idmap *          cl_idmap;
 
@@ -79,16 +79,17 @@ struct nfs_client {
        u32                     cl_seqid;
        /* The flags used for obtaining the clientid during EXCHANGE_ID */
        u32                     cl_exchange_flags;
-       struct nfs4_session     *cl_session;    /* sharred session */
+       struct nfs4_session     *cl_session;    /* shared session */
+       struct nfs41_server_owner *cl_serverowner;
+       struct nfs41_server_scope *cl_serverscope;
+       struct nfs41_impl_id    *cl_implid;
 #endif /* CONFIG_NFS_V4 */
 
 #ifdef CONFIG_NFS_FSCACHE
        struct fscache_cookie   *fscache;       /* client index cache cookie */
 #endif
 
-       struct server_scope     *server_scope;  /* from exchange_id */
-       struct nfs41_impl_id    *impl_id;       /* from exchange_id */
-       struct net              *net;
+       struct net              *cl_net;
 };
 
 /*
index eac30d6bec17c78db77a050e269ae0336e00e372..88d166b555e8539fa8ec5cb896c56ecea7417bdc 100644 (file)
@@ -27,7 +27,6 @@ enum {
        PG_CLEAN,
        PG_NEED_COMMIT,
        PG_NEED_RESCHED,
-       PG_PARTIAL_READ_FAILED,
        PG_COMMIT_TO_DS,
 };
 
@@ -37,7 +36,6 @@ struct nfs_page {
        struct page             *wb_page;       /* page to read in/write out */
        struct nfs_open_context *wb_context;    /* File state context info */
        struct nfs_lock_context *wb_lock_context;       /* lock context info */
-       atomic_t                wb_complete;    /* i/os we're waiting for */
        pgoff_t                 wb_index;       /* Offset >> PAGE_CACHE_SHIFT */
        unsigned int            wb_offset,      /* Offset & ~PAGE_CACHE_MASK */
                                wb_pgbase,      /* Start of page data */
@@ -68,7 +66,9 @@ struct nfs_pageio_descriptor {
        int                     pg_ioflags;
        int                     pg_error;
        const struct rpc_call_ops *pg_rpc_callops;
+       const struct nfs_pgio_completion_ops *pg_completion_ops;
        struct pnfs_layout_segment *pg_lseg;
+       struct nfs_direct_req   *pg_dreq;
 };
 
 #define NFS_WBACK_BUSY(req)    (test_bit(PG_BUSY,&(req)->wb_flags))
@@ -84,6 +84,7 @@ extern        void nfs_release_request(struct nfs_page *req);
 extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
                             struct inode *inode,
                             const struct nfs_pageio_ops *pg_ops,
+                            const struct nfs_pgio_completion_ops *compl_ops,
                             size_t bsize,
                             int how);
 extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *,
@@ -95,26 +96,17 @@ extern bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
                                struct nfs_page *req);
 extern  int nfs_wait_on_request(struct nfs_page *);
 extern void nfs_unlock_request(struct nfs_page *req);
+extern void nfs_unlock_and_release_request(struct nfs_page *req);
 
 /*
- * Lock the page of an asynchronous request without getting a new reference
+ * Lock the page of an asynchronous request
  */
-static inline int
-nfs_lock_request_dontget(struct nfs_page *req)
-{
-       return !test_and_set_bit(PG_BUSY, &req->wb_flags);
-}
-
 static inline int
 nfs_lock_request(struct nfs_page *req)
 {
-       if (test_and_set_bit(PG_BUSY, &req->wb_flags))
-               return 0;
-       kref_get(&req->wb_kref);
-       return 1;
+       return !test_and_set_bit(PG_BUSY, &req->wb_flags);
 }
 
-
 /**
  * nfs_list_add_request - Insert a request into a list
  * @req: request
index 7ba3551a0414a867cffe38e52cc720004e32592e..d1a7bf51c326dc7f103aae60874a667f3307b373 100644 (file)
@@ -35,6 +35,15 @@ static inline int nfs_fsid_equal(const struct nfs_fsid *a, const struct nfs_fsid
        return a->major == b->major && a->minor == b->minor;
 }
 
+struct nfs4_threshold {
+       __u32   bm;
+       __u32   l_type;
+       __u64   rd_sz;
+       __u64   wr_sz;
+       __u64   rd_io_sz;
+       __u64   wr_io_sz;
+};
+
 struct nfs_fattr {
        unsigned int            valid;          /* which fields are valid */
        umode_t                 mode;
@@ -67,6 +76,7 @@ struct nfs_fattr {
        unsigned long           gencount;
        struct nfs4_string      *owner_name;
        struct nfs4_string      *group_name;
+       struct nfs4_threshold   *mdsthreshold;  /* pNFS threshold hints */
 };
 
 #define NFS_ATTR_FATTR_TYPE            (1U << 0)
@@ -106,14 +116,14 @@ struct nfs_fattr {
                | NFS_ATTR_FATTR_FILEID \
                | NFS_ATTR_FATTR_ATIME \
                | NFS_ATTR_FATTR_MTIME \
-               | NFS_ATTR_FATTR_CTIME)
+               | NFS_ATTR_FATTR_CTIME \
+               | NFS_ATTR_FATTR_CHANGE)
 #define NFS_ATTR_FATTR_V2 (NFS_ATTR_FATTR \
                | NFS_ATTR_FATTR_BLOCKS_USED)
 #define NFS_ATTR_FATTR_V3 (NFS_ATTR_FATTR \
                | NFS_ATTR_FATTR_SPACE_USED)
 #define NFS_ATTR_FATTR_V4 (NFS_ATTR_FATTR \
-               | NFS_ATTR_FATTR_SPACE_USED \
-               | NFS_ATTR_FATTR_CHANGE)
+               | NFS_ATTR_FATTR_SPACE_USED)
 
 /*
  * Info on the file system
@@ -338,7 +348,6 @@ struct nfs_openargs {
        const struct qstr *     name;
        const struct nfs_server *server;         /* Needed for ID mapping */
        const u32 *             bitmask;
-       const u32 *             dir_bitmask;
        __u32                   claim;
        struct nfs4_sequence_args       seq_args;
 };
@@ -349,7 +358,6 @@ struct nfs_openres {
        struct nfs4_change_info cinfo;
        __u32                   rflags;
        struct nfs_fattr *      f_attr;
-       struct nfs_fattr *      dir_attr;
        struct nfs_seqid *      seqid;
        const struct nfs_server *server;
        fmode_t                 delegation_type;
@@ -518,13 +526,30 @@ struct nfs_writeres {
        struct nfs4_sequence_res        seq_res;
 };
 
+/*
+ * Arguments to the commit call.
+ */
+struct nfs_commitargs {
+       struct nfs_fh           *fh;
+       __u64                   offset;
+       __u32                   count;
+       const u32               *bitmask;
+       struct nfs4_sequence_args       seq_args;
+};
+
+struct nfs_commitres {
+       struct nfs_fattr        *fattr;
+       struct nfs_writeverf    *verf;
+       const struct nfs_server *server;
+       struct nfs4_sequence_res        seq_res;
+};
+
 /*
  * Common arguments to the unlink call
  */
 struct nfs_removeargs {
        const struct nfs_fh     *fh;
        struct qstr             name;
-       const u32 *             bitmask;
        struct nfs4_sequence_args       seq_args;
 };
 
@@ -543,7 +568,6 @@ struct nfs_renameargs {
        const struct nfs_fh             *new_dir;
        const struct qstr               *old_name;
        const struct qstr               *new_name;
-       const u32                       *bitmask;
        struct nfs4_sequence_args       seq_args;
 };
 
@@ -839,7 +863,6 @@ struct nfs4_create_res {
        struct nfs_fh *                 fh;
        struct nfs_fattr *              fattr;
        struct nfs4_change_info         dir_cinfo;
-       struct nfs_fattr *              dir_fattr;
        struct nfs4_sequence_res        seq_res;
 };
 
@@ -1061,6 +1084,21 @@ struct nfstime4 {
 };
 
 #ifdef CONFIG_NFS_V4_1
+
+struct pnfs_commit_bucket {
+       struct list_head written;
+       struct list_head committing;
+       struct pnfs_layout_segment *wlseg;
+       struct pnfs_layout_segment *clseg;
+};
+
+struct pnfs_ds_commit_info {
+       int nwritten;
+       int ncommitting;
+       int nbuckets;
+       struct pnfs_commit_bucket *buckets;
+};
+
 #define NFS4_EXCHANGE_ID_LEN   (48)
 struct nfs41_exchange_id_args {
        struct nfs_client               *client;
@@ -1070,13 +1108,13 @@ struct nfs41_exchange_id_args {
        u32                             flags;
 };
 
-struct server_owner {
+struct nfs41_server_owner {
        uint64_t                        minor_id;
        uint32_t                        major_id_sz;
        char                            major_id[NFS4_OPAQUE_LIMIT];
 };
 
-struct server_scope {
+struct nfs41_server_scope {
        uint32_t                        server_scope_sz;
        char                            server_scope[NFS4_OPAQUE_LIMIT];
 };
@@ -1087,10 +1125,18 @@ struct nfs41_impl_id {
        struct nfstime4                 date;
 };
 
+struct nfs41_bind_conn_to_session_res {
+       struct nfs4_session             *session;
+       u32                             dir;
+       bool                            use_conn_in_rdma_mode;
+};
+
 struct nfs41_exchange_id_res {
-       struct nfs_client               *client;
+       u64                             clientid;
+       u32                             seqid;
        u32                             flags;
-       struct server_scope             *server_scope;
+       struct nfs41_server_owner       *server_owner;
+       struct nfs41_server_scope       *server_scope;
        struct nfs41_impl_id            *impl_id;
 };
 
@@ -1143,35 +1189,114 @@ struct nfs41_free_stateid_res {
        struct nfs4_sequence_res        seq_res;
 };
 
+#else
+
+struct pnfs_ds_commit_info {
+};
+
 #endif /* CONFIG_NFS_V4_1 */
 
 struct nfs_page;
 
 #define NFS_PAGEVEC_SIZE       (8U)
 
+struct nfs_page_array {
+       struct page             **pagevec;
+       unsigned int            npages;         /* Max length of pagevec */
+       struct page             *page_array[NFS_PAGEVEC_SIZE];
+};
+
 struct nfs_read_data {
+       struct nfs_pgio_header  *header;
+       struct list_head        list;
        struct rpc_task         task;
-       struct inode            *inode;
-       struct rpc_cred         *cred;
        struct nfs_fattr        fattr;  /* fattr storage */
-       struct list_head        pages;  /* Coalesced read requests */
-       struct list_head        list;   /* lists of struct nfs_read_data */
-       struct nfs_page         *req;   /* multi ops per nfs_page */
-       struct page             **pagevec;
-       unsigned int            npages; /* Max length of pagevec */
        struct nfs_readargs args;
        struct nfs_readres  res;
        unsigned long           timestamp;      /* For lease renewal */
-       struct pnfs_layout_segment *lseg;
-       struct nfs_client       *ds_clp;        /* pNFS data server */
-       const struct rpc_call_ops *mds_ops;
        int (*read_done_cb) (struct rpc_task *task, struct nfs_read_data *data);
        __u64                   mds_offset;
+       struct nfs_page_array   pages;
+       struct nfs_client       *ds_clp;        /* pNFS data server */
+};
+
+/* used as flag bits in nfs_pgio_header */
+enum {
+       NFS_IOHDR_ERROR = 0,
+       NFS_IOHDR_EOF,
+       NFS_IOHDR_REDO,
+       NFS_IOHDR_NEED_COMMIT,
+       NFS_IOHDR_NEED_RESCHED,
+};
+
+struct nfs_pgio_header {
+       struct inode            *inode;
+       struct rpc_cred         *cred;
+       struct list_head        pages;
+       struct list_head        rpc_list;
+       atomic_t                refcnt;
+       struct nfs_page         *req;
+       struct pnfs_layout_segment *lseg;
+       loff_t                  io_start;
+       const struct rpc_call_ops *mds_ops;
+       void (*release) (struct nfs_pgio_header *hdr);
+       const struct nfs_pgio_completion_ops *completion_ops;
+       struct nfs_direct_req   *dreq;
+       spinlock_t              lock;
+       /* fields protected by lock */
        int                     pnfs_error;
-       struct page             *page_array[NFS_PAGEVEC_SIZE];
+       int                     error;          /* merge with pnfs_error */
+       unsigned long           good_bytes;     /* boundary of good data */
+       unsigned long           flags;
+};
+
+struct nfs_read_header {
+       struct nfs_pgio_header  header;
+       struct nfs_read_data    rpc_data;
 };
 
 struct nfs_write_data {
+       struct nfs_pgio_header  *header;
+       struct list_head        list;
+       struct rpc_task         task;
+       struct nfs_fattr        fattr;
+       struct nfs_writeverf    verf;
+       struct nfs_writeargs    args;           /* argument struct */
+       struct nfs_writeres     res;            /* result struct */
+       unsigned long           timestamp;      /* For lease renewal */
+       int (*write_done_cb) (struct rpc_task *task, struct nfs_write_data *data);
+       __u64                   mds_offset;     /* Filelayout dense stripe */
+       struct nfs_page_array   pages;
+       struct nfs_client       *ds_clp;        /* pNFS data server */
+};
+
+struct nfs_write_header {
+       struct nfs_pgio_header  header;
+       struct nfs_write_data   rpc_data;
+};
+
+struct nfs_mds_commit_info {
+       atomic_t rpcs_out;
+       unsigned long           ncommit;
+       struct list_head        list;
+};
+
+struct nfs_commit_data;
+struct nfs_inode;
+struct nfs_commit_completion_ops {
+       void (*error_cleanup) (struct nfs_inode *nfsi);
+       void (*completion) (struct nfs_commit_data *data);
+};
+
+struct nfs_commit_info {
+       spinlock_t                      *lock;
+       struct nfs_mds_commit_info      *mds;
+       struct pnfs_ds_commit_info      *ds;
+       struct nfs_direct_req           *dreq;  /* O_DIRECT request */
+       const struct nfs_commit_completion_ops *completion_ops;
+};
+
+struct nfs_commit_data {
        struct rpc_task         task;
        struct inode            *inode;
        struct rpc_cred         *cred;
@@ -1179,22 +1304,22 @@ struct nfs_write_data {
        struct nfs_writeverf    verf;
        struct list_head        pages;          /* Coalesced requests we wish to flush */
        struct list_head        list;           /* lists of struct nfs_write_data */
-       struct nfs_page         *req;           /* multi ops per nfs_page */
-       struct page             **pagevec;
-       unsigned int            npages;         /* Max length of pagevec */
-       struct nfs_writeargs    args;           /* argument struct */
-       struct nfs_writeres     res;            /* result struct */
+       struct nfs_direct_req   *dreq;          /* O_DIRECT request */
+       struct nfs_commitargs   args;           /* argument struct */
+       struct nfs_commitres    res;            /* result struct */
+       struct nfs_open_context *context;
        struct pnfs_layout_segment *lseg;
        struct nfs_client       *ds_clp;        /* pNFS data server */
        int                     ds_commit_index;
        const struct rpc_call_ops *mds_ops;
-       int (*write_done_cb) (struct rpc_task *task, struct nfs_write_data *data);
-#ifdef CONFIG_NFS_V4
-       unsigned long           timestamp;      /* For lease renewal */
-#endif
-       __u64                   mds_offset;     /* Filelayout dense stripe */
-       int                     pnfs_error;
-       struct page             *page_array[NFS_PAGEVEC_SIZE];
+       const struct nfs_commit_completion_ops *completion_ops;
+       int (*commit_done_cb) (struct rpc_task *task, struct nfs_commit_data *data);
+};
+
+struct nfs_pgio_completion_ops {
+       void    (*error_cleanup)(struct list_head *head);
+       void    (*init_hdr)(struct nfs_pgio_header *hdr);
+       void    (*completion)(struct nfs_pgio_header *hdr);
 };
 
 struct nfs_unlinkdata {
@@ -1234,11 +1359,13 @@ struct nfs_rpc_ops {
 
        int     (*getroot) (struct nfs_server *, struct nfs_fh *,
                            struct nfs_fsinfo *);
+       struct vfsmount *(*submount) (struct nfs_server *, struct dentry *,
+                                     struct nfs_fh *, struct nfs_fattr *);
        int     (*getattr) (struct nfs_server *, struct nfs_fh *,
                            struct nfs_fattr *);
        int     (*setattr) (struct dentry *, struct nfs_fattr *,
                            struct iattr *);
-       int     (*lookup)  (struct rpc_clnt *clnt, struct inode *, struct qstr *,
+       int     (*lookup)  (struct inode *, struct qstr *,
                            struct nfs_fh *, struct nfs_fattr *);
        int     (*access)  (struct inode *, struct nfs_access_entry *);
        int     (*readlink)(struct inode *, struct page *, unsigned int,
@@ -1277,8 +1404,9 @@ struct nfs_rpc_ops {
        void    (*write_setup)  (struct nfs_write_data *, struct rpc_message *);
        void    (*write_rpc_prepare)(struct rpc_task *, struct nfs_write_data *);
        int     (*write_done)  (struct rpc_task *, struct nfs_write_data *);
-       void    (*commit_setup) (struct nfs_write_data *, struct rpc_message *);
-       int     (*commit_done) (struct rpc_task *, struct nfs_write_data *);
+       void    (*commit_setup) (struct nfs_commit_data *, struct rpc_message *);
+       void    (*commit_rpc_prepare)(struct rpc_task *, struct nfs_commit_data *);
+       int     (*commit_done) (struct rpc_task *, struct nfs_commit_data *);
        int     (*lock)(struct file *, int, struct file_lock *);
        int     (*lock_check_bounds)(const struct file_lock *);
        void    (*clear_acl_cache)(struct inode *);
@@ -1287,9 +1415,9 @@ struct nfs_rpc_ops {
                                struct nfs_open_context *ctx,
                                int open_flags,
                                struct iattr *iattr);
-       int     (*init_client) (struct nfs_client *, const struct rpc_timeout *,
-                               const char *, rpc_authflavor_t, int);
-       int     (*secinfo)(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *);
+       struct nfs_client *
+               (*init_client) (struct nfs_client *, const struct rpc_timeout *,
+                               const char *, rpc_authflavor_t);
 };
 
 /*
index f85308e688fd712f039ac45a5f442240113ad0bf..e33f747b173c500d02639dfd5257de093a79fff0 100644 (file)
@@ -103,6 +103,7 @@ struct svc_export {
        struct nfsd4_fs_locations ex_fslocs;
        int                     ex_nflavors;
        struct exp_flavor_info  ex_flavors[MAX_SECINFO_LIST];
+       struct cache_detail     *cd;
 };
 
 /* an "export key" (expkey) maps a filehandlefragement to an
@@ -129,24 +130,22 @@ __be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp);
 /*
  * Function declarations
  */
-int                    nfsd_export_init(void);
-void                   nfsd_export_shutdown(void);
-void                   nfsd_export_flush(void);
+int                    nfsd_export_init(struct net *);
+void                   nfsd_export_shutdown(struct net *);
+void                   nfsd_export_flush(struct net *);
 struct svc_export *    rqst_exp_get_by_name(struct svc_rqst *,
                                             struct path *);
 struct svc_export *    rqst_exp_parent(struct svc_rqst *,
                                        struct path *);
 struct svc_export *    rqst_find_fsidzero_export(struct svc_rqst *);
-int                    exp_rootfh(struct auth_domain *, 
+int                    exp_rootfh(struct net *, struct auth_domain *,
                                        char *path, struct knfsd_fh *, int maxsize);
 __be32                 exp_pseudoroot(struct svc_rqst *, struct svc_fh *);
 __be32                 nfserrno(int errno);
 
-extern struct cache_detail svc_export_cache;
-
 static inline void exp_put(struct svc_export *exp)
 {
-       cache_put(&exp->h, &svc_export_cache);
+       cache_put(&exp->h, exp->cd);
 }
 
 static inline void exp_get(struct svc_export *exp)
index 0efe8d465f555d4a2c0f5e7ac26d97c28867db68..1cb775f8e663d6d2dc60d79b9be79ddaae480273 100644 (file)
@@ -20,6 +20,10 @@ extern void of_i2c_register_devices(struct i2c_adapter *adap);
 /* must call put_device() when done with returned i2c_client device */
 extern struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
 
+/* must call put_device() when done with returned i2c_adapter device */
+extern struct i2c_adapter *of_find_i2c_adapter_by_node(
+                                               struct device_node *node);
+
 #else
 static inline void of_i2c_register_devices(struct i2c_adapter *adap)
 {
index f93e21700d3eeeb4848f3ff3419fee6de59fe2be..bb115deb7612815d016aa2ec99c0bbe7c1cab212 100644 (file)
@@ -5,7 +5,7 @@
 
 struct pci_dev;
 struct of_irq;
-int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
+int of_irq_map_pci(const struct pci_dev *pdev, struct of_irq *out_irq);
 
 struct device_node;
 struct device_node *of_pci_find_child_device(struct device_node *parent,
index 3d7647536b0304ba40013aa2399d551b6779b4cf..e4c29bc72e70297af00eb276538840e72b163eda 100644 (file)
@@ -43,8 +43,9 @@ enum oom_constraint {
 extern void compare_swap_oom_score_adj(int old_val, int new_val);
 extern int test_set_oom_score_adj(int new_val);
 
-extern unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
-                       const nodemask_t *nodemask, unsigned long totalpages);
+extern unsigned long oom_badness(struct task_struct *p,
+               struct mem_cgroup *memcg, const nodemask_t *nodemask,
+               unsigned long totalpages);
 extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
 extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
 
index 051c1b1ede4e668651f5310a88771115087dcdc9..3bdcab30ca4121e5067cb2eac291d039c6eee07e 100644 (file)
@@ -3,7 +3,7 @@
 
 /*
  * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
- * If specified range includes migrate types other than MOVABLE,
+ * If specified range includes migrate types other than MOVABLE or CMA,
  * this will fail with -EBUSY.
  *
  * For isolating all pages in the range finally, the caller have to
  * test it.
  */
 extern int
-start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
+start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+                        unsigned migratetype);
 
 /*
  * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
  * target range is [start_pfn, end_pfn)
  */
 extern int
-undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
+undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+                       unsigned migratetype);
 
 /*
- * test all pages in [start_pfn, end_pfn)are isolated or not.
+ * Test all pages in [start_pfn, end_pfn) are isolated or not.
  */
-extern int
-test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn);
+int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn);
 
 /*
- * Internal funcs.Changes pageblock's migrate type.
- * Please use make_pagetype_isolated()/make_pagetype_movable().
+ * Internal functions. Changes pageblock's migrate type.
  */
 extern int set_migratetype_isolate(struct page *page);
-extern void unset_migratetype_isolate(struct page *page);
+extern void unset_migratetype_isolate(struct page *page, unsigned migratetype);
 
 
 #endif
index efa26b4da8d2b9d2479284bc9cbaeff61913d634..7cfad3bbb0cc214d37c6312a86a0a317fde12edd 100644 (file)
@@ -460,11 +460,11 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
  */
 static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
 {
-       int ret;
+       int ret = 0;
        char __user *end = uaddr + size - 1;
 
        if (unlikely(size == 0))
-               return 0;
+               return ret;
 
        /*
         * Writing zeroes into userspace here is OK, because we know that if
@@ -489,11 +489,11 @@ static inline int fault_in_multipages_readable(const char __user *uaddr,
                                               int size)
 {
        volatile char c;
-       int ret;
+       int ret = 0;
        const char __user *end = uaddr + size - 1;
 
        if (unlikely(size == 0))
-               return 0;
+               return ret;
 
        while (uaddr <= end) {
                ret = __get_user(c, uaddr);
index 17b7b5b01b4ad5feaf5ba91ef554406496b9fd91..d8c379dba6adbb36ae9df6c18adea304e2c6b45c 100644 (file)
@@ -687,7 +687,7 @@ int __must_check pci_bus_add_device(struct pci_dev *dev);
 void pci_read_bridge_bases(struct pci_bus *child);
 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
                                          struct resource *res);
-u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin);
+u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
 extern struct pci_dev *pci_dev_get(struct pci_dev *dev);
@@ -1692,7 +1692,8 @@ extern void pci_release_bus_of_node(struct pci_bus *bus);
 /* Arch may override this (weak) */
 extern struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus);
 
-static inline struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
+static inline struct device_node *
+pci_device_to_OF_node(const struct pci_dev *pdev)
 {
        return pdev ? pdev->dev.of_node : NULL;
 }
index 3329965ed63f3aa1c2b54e49d82917d8f34a3839..ab741b0d007402daf8feee824e0aae91751bafe6 100644 (file)
 #define PCI_DEVICE_ID_INTEL_MRST_SD2   0x084F
 #define PCI_DEVICE_ID_INTEL_I960       0x0960
 #define PCI_DEVICE_ID_INTEL_I960RM     0x0962
+#define PCI_DEVICE_ID_INTEL_CENTERTON_ILB      0x0c60
 #define PCI_DEVICE_ID_INTEL_8257X_SOL  0x1062
 #define PCI_DEVICE_ID_INTEL_82573E_SOL 0x1085
 #define PCI_DEVICE_ID_INTEL_82573L_SOL 0x108F
index 4f75e531c112c176b7a29146c6581e857442dba2..241065c9ce51832962f0fce4c93696d4b09dcb9d 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/power_supply.h>
 
 enum data_source {
+       CM_BATTERY_PRESENT,
+       CM_NO_BATTERY,
        CM_FUEL_GAUGE,
        CM_CHARGER_STAT,
 };
@@ -29,6 +31,16 @@ enum polling_modes {
        CM_POLL_CHARGING_ONLY,
 };
 
+enum cm_event_types {
+       CM_EVENT_UNKNOWN = 0,
+       CM_EVENT_BATT_FULL,
+       CM_EVENT_BATT_IN,
+       CM_EVENT_BATT_OUT,
+       CM_EVENT_EXT_PWR_IN_OUT,
+       CM_EVENT_CHG_START_STOP,
+       CM_EVENT_OTHERS,
+};
+
 /**
  * struct charger_global_desc
  * @rtc_name: the name of RTC used to wake up the system from suspend.
@@ -38,11 +50,18 @@ enum polling_modes {
  *     rtc_only_wakeup() returning false.
  *     If the RTC given to CM is the only wakeup reason,
  *     rtc_only_wakeup should return true.
+ * @assume_timer_stops_in_suspend:
+ *     Assume that the jiffy timer stops in suspend-to-RAM.
+ *     When enabled, CM does not rely on jiffies value in
+ *     suspend_again and assumes that jiffies value does not
+ *     change during suspend.
  */
 struct charger_global_desc {
        char *rtc_name;
 
        bool (*rtc_only_wakeup)(void);
+
+       bool assume_timer_stops_in_suspend;
 };
 
 /**
@@ -50,6 +69,11 @@ struct charger_global_desc {
  * @psy_name: the name of power-supply-class for charger manager
  * @polling_mode:
  *     Determine which polling mode will be used
+ * @fullbatt_vchkdrop_ms:
+ * @fullbatt_vchkdrop_uV:
+ *     Check voltage drop after the battery is fully charged.
+ *     If it has dropped more than fullbatt_vchkdrop_uV after
+ *     fullbatt_vchkdrop_ms, CM will restart charging.
  * @fullbatt_uV: voltage in microvolt
  *     If it is not being charged and VBATT >= fullbatt_uV,
  *     it is assumed to be full.
@@ -76,6 +100,8 @@ struct charger_desc {
        enum polling_modes polling_mode;
        unsigned int polling_interval_ms;
 
+       unsigned int fullbatt_vchkdrop_ms;
+       unsigned int fullbatt_vchkdrop_uV;
        unsigned int fullbatt_uV;
 
        enum data_source battery_present;
@@ -101,6 +127,11 @@ struct charger_desc {
  * @fuel_gauge: power_supply for fuel gauge
  * @charger_stat: array of power_supply for chargers
  * @charger_enabled: the state of charger
+ * @fullbatt_vchk_jiffies_at:
+ *     jiffies at the time full battery check will occur.
+ * @fullbatt_vchk_uV: voltage in microvolt
+ *     criteria for full battery
+ * @fullbatt_vchk_work: work queue for full battery check
  * @emergency_stop:
  *     When setting true, stop charging
  * @last_temp_mC: the measured temperature in milli-Celsius
@@ -121,6 +152,10 @@ struct charger_manager {
 
        bool charger_enabled;
 
+       unsigned long fullbatt_vchk_jiffies_at;
+       unsigned int fullbatt_vchk_uV;
+       struct delayed_work fullbatt_vchk_work;
+
        int emergency_stop;
        int last_temp_mC;
 
@@ -134,14 +169,13 @@ struct charger_manager {
 #ifdef CONFIG_CHARGER_MANAGER
 extern int setup_charger_manager(struct charger_global_desc *gd);
 extern bool cm_suspend_again(void);
+extern void cm_notify_event(struct power_supply *psy,
+                               enum cm_event_types type, char *msg);
 #else
-static void __maybe_unused setup_charger_manager(struct charger_global_desc *gd)
-{ }
-
-static bool __maybe_unused cm_suspend_again(void)
-{
-       return false;
-}
+static inline int setup_charger_manager(struct charger_global_desc *gd)
+{ return 0; }
+static inline bool cm_suspend_again(void) { return false; }
+static inline void cm_notify_event(struct power_supply *psy,
+                               enum cm_event_types type, char *msg) { }
 #endif
-
 #endif /* _CHARGER_MANAGER_H */
index e01b167e66f068223f86321109b77687ce5ef50c..89dd84f47c6ed6041cde8b9259c8e737072a6ffa 100644 (file)
@@ -116,6 +116,18 @@ enum max17042_register {
        MAX17042_VFSOC          = 0xFF,
 };
 
+/* Registers specific to max17047/50 */
+enum max17047_register {
+       MAX17047_QRTbl00        = 0x12,
+       MAX17047_FullSOCThr     = 0x13,
+       MAX17047_QRTbl10        = 0x22,
+       MAX17047_QRTbl20        = 0x32,
+       MAX17047_V_empty        = 0x3A,
+       MAX17047_QRTbl30        = 0x42,
+};
+
+enum max170xx_chip_type {MAX17042, MAX17047};
+
 /*
  * used for setting a register to a desired value
  * addr : address for a register
@@ -144,6 +156,7 @@ struct max17042_config_data {
        u16     shdntimer;      /* 0x03F */
 
        /* App data */
+       u16     full_soc_thresh;        /* 0x13 */
        u16     design_cap;     /* 0x18 */
        u16     ichgt_term;     /* 0x1E */
 
@@ -162,6 +175,10 @@ struct max17042_config_data {
        u16     lavg_empty;     /* 0x36 */
        u16     dqacc;          /* 0x45 */
        u16     dpacc;          /* 0x46 */
+       u16     qrtbl00;        /* 0x12 */
+       u16     qrtbl10;        /* 0x22 */
+       u16     qrtbl20;        /* 0x32 */
+       u16     qrtbl30;        /* 0x42 */
 
        /* Cell technology from power_supply.h */
        u16     cell_technology;
index c38c13db8832e7b3c15440807e7e7719a5603792..3b912bee28d1693b8c6617f637354ed2869d306f 100644 (file)
@@ -96,6 +96,7 @@ enum power_supply_property {
        POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
        POWER_SUPPLY_PROP_VOLTAGE_NOW,
        POWER_SUPPLY_PROP_VOLTAGE_AVG,
+       POWER_SUPPLY_PROP_VOLTAGE_OCV,
        POWER_SUPPLY_PROP_CURRENT_MAX,
        POWER_SUPPLY_PROP_CURRENT_NOW,
        POWER_SUPPLY_PROP_CURRENT_AVG,
@@ -211,7 +212,7 @@ extern void power_supply_changed(struct power_supply *psy);
 extern int power_supply_am_i_supplied(struct power_supply *psy);
 extern int power_supply_set_battery_charged(struct power_supply *psy);
 
-#if defined(CONFIG_POWER_SUPPLY) || defined(CONFIG_POWER_SUPPLY_MODULE)
+#ifdef CONFIG_POWER_SUPPLY
 extern int power_supply_is_system_supplied(void);
 #else
 static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
@@ -261,6 +262,7 @@ static inline bool power_supply_is_watt_property(enum power_supply_property psp)
        case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
        case POWER_SUPPLY_PROP_VOLTAGE_NOW:
        case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+       case POWER_SUPPLY_PROP_VOLTAGE_OCV:
        case POWER_SUPPLY_PROP_POWER_NOW:
                return 1;
        default:
index 78b76e24cc7eed661d7696200c4c4f2291425a0d..711e0a30aaccc84b3a1bf5d15794b139433b5309 100644 (file)
 # define PR_SET_MM_START_STACK         5
 # define PR_SET_MM_START_BRK           6
 # define PR_SET_MM_BRK                 7
+# define PR_SET_MM_ARG_START           8
+# define PR_SET_MM_ARG_END             9
+# define PR_SET_MM_ENV_START           10
+# define PR_SET_MM_ENV_END             11
+# define PR_SET_MM_AUXV                        12
+# define PR_SET_MM_EXE_FILE            13
 
 /*
  * Set specific pid that is allowed to ptrace the current task.
index fb201896a8b07136db13bcef486fdb382b7ee3c2..7d7fbe2ef7822089c802c5654b4e0ec243f24a80 100644 (file)
@@ -119,7 +119,7 @@ int __must_check res_counter_charge_locked(struct res_counter *counter,
                                           unsigned long val, bool force);
 int __must_check res_counter_charge(struct res_counter *counter,
                unsigned long val, struct res_counter **limit_fail_at);
-int __must_check res_counter_charge_nofail(struct res_counter *counter,
+int res_counter_charge_nofail(struct res_counter *counter,
                unsigned long val, struct res_counter **limit_fail_at);
 
 /*
@@ -135,6 +135,9 @@ int __must_check res_counter_charge_nofail(struct res_counter *counter,
 void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
 void res_counter_uncharge(struct res_counter *counter, unsigned long val);
 
+void res_counter_uncharge_until(struct res_counter *counter,
+                               struct res_counter *top,
+                               unsigned long val);
 /**
  * res_counter_margin - calculate chargeable space of a counter
  * @cnt: the counter
index 4d50611112ba118e69df87888a81f764aee4c346..a90ebadd9da055bb5130782246872a0ef53d8438 100644 (file)
@@ -20,6 +20,9 @@
 #include <linux/errno.h>
 #include <linux/device.h>
 #include <linux/rio_regs.h>
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+#include <linux/dmaengine.h>
+#endif
 
 #define RIO_NO_HOPCOUNT                -1
 #define RIO_INVALID_DESTID     0xffff
@@ -254,6 +257,9 @@ struct rio_mport {
        u32 phys_efptr;
        unsigned char name[40];
        void *priv;             /* Master port private data */
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+       struct dma_device       dma;
+#endif
 };
 
 /**
@@ -395,6 +401,47 @@ union rio_pw_msg {
        u32 raw[RIO_PW_MSG_SIZE/sizeof(u32)];
 };
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+
+/**
+ * enum rio_write_type - RIO write transaction types used in DMA transfers
+ *
+ * Note: RapidIO specification defines write (NWRITE) and
+ * write-with-response (NWRITE_R) data transfer operations.
+ * Existing DMA controllers that service RapidIO may use one of these operations
+ * for entire data transfer or their combination with only the last data packet
+ * requires response.
+ */
+enum rio_write_type {
+       RDW_DEFAULT,            /* default method used by DMA driver */
+       RDW_ALL_NWRITE,         /* all packets use NWRITE */
+       RDW_ALL_NWRITE_R,       /* all packets use NWRITE_R */
+       RDW_LAST_NWRITE_R,      /* last packet uses NWRITE_R, others - NWRITE */
+};
+
+struct rio_dma_ext {
+       u16 destid;
+       u64 rio_addr;   /* low 64-bits of 66-bit RapidIO address */
+       u8  rio_addr_u;  /* upper 2-bits of 66-bit RapidIO address */
+       enum rio_write_type wr_type; /* preferred RIO write operation type */
+};
+
+struct rio_dma_data {
+       /* Local data (as scatterlist) */
+       struct scatterlist      *sg;    /* I/O scatter list */
+       unsigned int            sg_len; /* size of scatter list */
+       /* Remote device address (flat buffer) */
+       u64 rio_addr;   /* low 64-bits of 66-bit RapidIO address */
+       u8  rio_addr_u;  /* upper 2-bits of 66-bit RapidIO address */
+       enum rio_write_type wr_type; /* preferred RIO write operation type */
+};
+
+static inline struct rio_mport *dma_to_mport(struct dma_device *ddev)
+{
+       return container_of(ddev, struct rio_mport, dma);
+}
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
 /* Architecture and hardware-specific functions */
 extern int rio_register_mport(struct rio_mport *);
 extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int);
index 7f07470e1ed9443e20cbc1cafe0d696e04bdb8c6..31ad146be3168bd127bdd083a0288f940f1b154b 100644 (file)
@@ -377,6 +377,15 @@ void rio_unregister_driver(struct rio_driver *);
 struct rio_dev *rio_dev_get(struct rio_dev *);
 void rio_dev_put(struct rio_dev *);
 
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+extern struct dma_chan *rio_request_dma(struct rio_dev *rdev);
+extern void rio_release_dma(struct dma_chan *dchan);
+extern struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(
+               struct rio_dev *rdev, struct dma_chan *dchan,
+               struct rio_dma_data *data,
+               enum dma_transfer_direction direction, unsigned long flags);
+#endif
+
 /**
  * rio_name - Get the unique RIO device identifier
  * @rdev: RIO device
index fd07c4542cee4f60784081267fb069787919a5bc..3fce545df394c61b3c8a7f4babfdbbee212131a8 100644 (file)
@@ -173,8 +173,6 @@ enum ttu_flags {
 };
 #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
 
-bool is_vma_temporary_stack(struct vm_area_struct *vma);
-
 int try_to_unmap(struct page *, enum ttu_flags flags);
 int try_to_unmap_one(struct page *, struct vm_area_struct *,
                        unsigned long address, enum ttu_flags flags);
index fcabfb4873c8dd6e8466233de9d557be4bf2101e..f071b3922c67f7a253c0b5f978b4bec1b7a690df 100644 (file)
@@ -91,6 +91,9 @@ struct rtc_pll_info {
 #define RTC_PLL_GET    _IOR('p', 0x11, struct rtc_pll_info)  /* Get PLL correction */
 #define RTC_PLL_SET    _IOW('p', 0x12, struct rtc_pll_info)  /* Set PLL correction */
 
+#define RTC_VL_READ    _IOR('p', 0x13, int)    /* Voltage low detector */
+#define RTC_VL_CLR     _IO('p', 0x14)          /* Clear voltage low information */
+
 /* interrupt flags */
 #define RTC_IRQF 0x80  /* Any of the following is active */
 #define RTC_PF 0x40    /* Periodic interrupt */
diff --git a/include/linux/rtc/ds1307.h b/include/linux/rtc/ds1307.h
new file mode 100644 (file)
index 0000000..291b1c4
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * ds1307.h - platform_data for the ds1307 (and variants) rtc driver
+ * (C) Copyright 2012 by Wolfram Sang, Pengutronix e.K.
+ * same license as the driver
+ */
+
+#ifndef _LINUX_DS1307_H
+#define _LINUX_DS1307_H
+
+#include <linux/types.h>
+
+#define DS1307_TRICKLE_CHARGER_250_OHM 0x01
+#define DS1307_TRICKLE_CHARGER_2K_OHM  0x02
+#define DS1307_TRICKLE_CHARGER_4K_OHM  0x03
+#define DS1307_TRICKLE_CHARGER_NO_DIODE        0x04
+#define DS1307_TRICKLE_CHARGER_DIODE   0x08
+
+struct ds1307_platform_data {
+       u8 trickle_charger_setup;
+};
+
+#endif /* _LINUX_DS1307_H */
index f45c0b280b5d39873aaca3a3d67b1a01362adba8..f34437e835a7069dfdc4660dbed593ebb371d0e9 100644 (file)
@@ -1301,11 +1301,6 @@ struct task_struct {
        unsigned sched_reset_on_fork:1;
        unsigned sched_contributes_to_load:1;
 
-#ifdef CONFIG_GENERIC_HARDIRQS
-       /* IRQ handler threads */
-       unsigned irq_thread:1;
-#endif
-
        pid_t pid;
        pid_t tgid;
 
@@ -1313,10 +1308,9 @@ struct task_struct {
        /* Canary value for the -fstack-protector gcc feature */
        unsigned long stack_canary;
 #endif
-
-       /* 
+       /*
         * pointers to (original) parent process, youngest child, younger sibling,
-        * older sibling, respectively.  (p->father can be replaced with 
+        * older sibling, respectively.  (p->father can be replaced with
         * p->real_parent->pid)
         */
        struct task_struct __rcu *real_parent; /* real parent process */
@@ -1363,8 +1357,6 @@ struct task_struct {
                                         * credentials (COW) */
        const struct cred __rcu *cred;  /* effective (overridable) subjective task
                                         * credentials (COW) */
-       struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
-
        char comm[TASK_COMM_LEN]; /* executable name excluding path
                                     - access with [gs]et_task_comm (which lock
                                       it with task_lock())
@@ -1400,6 +1392,8 @@ struct task_struct {
        int (*notifier)(void *priv);
        void *notifier_data;
        sigset_t *notifier_mask;
+       struct hlist_head task_works;
+
        struct audit_context *audit_context;
 #ifdef CONFIG_AUDITSYSCALL
        uid_t loginuid;
@@ -2213,6 +2207,20 @@ extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
 extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
 extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
 
+static inline void restore_saved_sigmask(void)
+{
+       if (test_and_clear_restore_sigmask())
+               __set_current_blocked(&current->saved_sigmask);
+}
+
+static inline sigset_t *sigmask_to_save(void)
+{
+       sigset_t *res = &current->blocked;
+       if (unlikely(test_restore_sigmask()))
+               res = &current->saved_sigmask;
+       return res;
+}
+
 static inline int kill_cad_pid(int sig, int priv)
 {
        return kill_pid(cad_pid, sig, priv);
index ab0e091ce5facf0047c57191f9e631fd5c4bb791..4e5a73cdbbef18463920022626931d02c0540eb9 100644 (file)
@@ -86,9 +86,9 @@ extern int cap_inode_setxattr(struct dentry *dentry, const char *name,
 extern int cap_inode_removexattr(struct dentry *dentry, const char *name);
 extern int cap_inode_need_killpriv(struct dentry *dentry);
 extern int cap_inode_killpriv(struct dentry *dentry);
-extern int cap_file_mmap(struct file *file, unsigned long reqprot,
-                        unsigned long prot, unsigned long flags,
-                        unsigned long addr, unsigned long addr_only);
+extern int cap_mmap_addr(unsigned long addr);
+extern int cap_mmap_file(struct file *file, unsigned long reqprot,
+                        unsigned long prot, unsigned long flags);
 extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags);
 extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
                          unsigned long arg4, unsigned long arg5);
@@ -586,15 +586,17 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  *     simple integer value.  When @arg represents a user space pointer, it
  *     should never be used by the security module.
  *     Return 0 if permission is granted.
- * @file_mmap :
+ * @mmap_addr :
+ *     Check permissions for a mmap operation at @addr.
+ *     @addr contains virtual address that will be used for the operation.
+ *     Return 0 if permission is granted.
+ * @mmap_file :
  *     Check permissions for a mmap operation.  The @file may be NULL, e.g.
  *     if mapping anonymous memory.
  *     @file contains the file structure for file to map (may be NULL).
  *     @reqprot contains the protection requested by the application.
  *     @prot contains the protection that will be applied by the kernel.
  *     @flags contains the operational flags.
- *     @addr contains virtual address that will be used for the operation.
- *     @addr_only contains a boolean: 0 if file-backed VMA, otherwise 1.
  *     Return 0 if permission is granted.
  * @file_mprotect:
  *     Check permissions before changing memory access permissions.
@@ -1481,10 +1483,10 @@ struct security_operations {
        void (*file_free_security) (struct file *file);
        int (*file_ioctl) (struct file *file, unsigned int cmd,
                           unsigned long arg);
-       int (*file_mmap) (struct file *file,
+       int (*mmap_addr) (unsigned long addr);
+       int (*mmap_file) (struct file *file,
                          unsigned long reqprot, unsigned long prot,
-                         unsigned long flags, unsigned long addr,
-                         unsigned long addr_only);
+                         unsigned long flags);
        int (*file_mprotect) (struct vm_area_struct *vma,
                              unsigned long reqprot,
                              unsigned long prot);
@@ -1743,9 +1745,9 @@ int security_file_permission(struct file *file, int mask);
 int security_file_alloc(struct file *file);
 void security_file_free(struct file *file);
 int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-int security_file_mmap(struct file *file, unsigned long reqprot,
-                       unsigned long prot, unsigned long flags,
-                       unsigned long addr, unsigned long addr_only);
+int security_mmap_file(struct file *file, unsigned long prot,
+                       unsigned long flags);
+int security_mmap_addr(unsigned long addr);
 int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
                           unsigned long prot);
 int security_file_lock(struct file *file, unsigned int cmd);
@@ -2181,13 +2183,15 @@ static inline int security_file_ioctl(struct file *file, unsigned int cmd,
        return 0;
 }
 
-static inline int security_file_mmap(struct file *file, unsigned long reqprot,
-                                    unsigned long prot,
-                                    unsigned long flags,
-                                    unsigned long addr,
-                                    unsigned long addr_only)
+static inline int security_mmap_file(struct file *file, unsigned long prot,
+                                    unsigned long flags)
+{
+       return 0;
+}
+
+static inline int security_mmap_addr(unsigned long addr)
 {
-       return cap_file_mmap(file, reqprot, prot, flags, addr, addr_only);
+       return cap_mmap_addr(addr);
 }
 
 static inline int security_file_mprotect(struct vm_area_struct *vma,
index 17046cc484bced6426c36b81f43ce43fdf0ffde4..26b424adc84299b6a53c9ad986e4d9350d16b68a 100644 (file)
@@ -250,12 +250,13 @@ extern long do_sigpending(void __user *, unsigned long);
 extern int do_sigtimedwait(const sigset_t *, siginfo_t *,
                                const struct timespec *);
 extern int sigprocmask(int, sigset_t *, sigset_t *);
-extern void set_current_blocked(const sigset_t *);
+extern void set_current_blocked(sigset_t *);
+extern void __set_current_blocked(const sigset_t *);
 extern int show_unhandled_signals;
 extern int sigsuspend(sigset_t *);
 
 extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie);
-extern void block_sigmask(struct k_sigaction *ka, int signr);
+extern void signal_delivered(int sig, siginfo_t *info, struct k_sigaction *ka, struct pt_regs *regs, int stepping);
 extern void exit_signals(struct task_struct *tsk);
 
 extern struct kmem_cache *sighand_cachep;
index 0e501714d47fa1a885b79079d7eb82bbbb71d831..b534a1be540a0e254e39adf7443e2fdf3151b7f6 100644 (file)
@@ -1896,8 +1896,6 @@ static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
 {
        int delta = 0;
 
-       if (headroom < NET_SKB_PAD)
-               headroom = NET_SKB_PAD;
        if (headroom > skb_headroom(skb))
                delta = headroom - skb_headroom(skb);
 
index a595dce6b0c7596d1481e2c87a2b55028c66a449..67d5d94b783a4b4ba97b53fc9d0adf9fd885af34 100644 (file)
@@ -242,7 +242,7 @@ size_t ksize(const void *);
  */
 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
 {
-       if (size != 0 && n > ULONG_MAX / size)
+       if (size != 0 && n > SIZE_MAX / size)
                return NULL;
        return __kmalloc(n * size, flags);
 }
diff --git a/include/linux/spi/orion_spi.h b/include/linux/spi/orion_spi.h
deleted file mode 100644 (file)
index b4d9fa6..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * orion_spi.h
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __LINUX_SPI_ORION_SPI_H
-#define __LINUX_SPI_ORION_SPI_H
-
-struct orion_spi_info {
-       u32     tclk;           /* no <linux/clk.h> support yet */
-};
-
-
-#endif
diff --git a/include/linux/stmp_device.h b/include/linux/stmp_device.h
new file mode 100644 (file)
index 0000000..6cf7ec9
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * basic functions for devices following the "stmp" style register layout
+ *
+ * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __STMP_DEVICE_H__
+#define __STMP_DEVICE_H__
+
+#define STMP_OFFSET_REG_SET    0x4
+#define STMP_OFFSET_REG_CLR    0x8
+#define STMP_OFFSET_REG_TOG    0xc
+
+extern int stmp_reset_block(void __iomem *);
+#endif /* __STMP_DEVICE_H__ */
index 51b29ac45a8e7b26583df0217ab37a0d939ad6da..40e0a273faea3c07470e19fd23673fda89543f9b 100644 (file)
@@ -232,7 +232,6 @@ struct svc_rqst {
        struct svc_pool *       rq_pool;        /* thread pool */
        struct svc_procedure *  rq_procinfo;    /* procedure info */
        struct auth_ops *       rq_authop;      /* authentication flavour */
-       u32                     rq_flavor;      /* pseudoflavor */
        struct svc_cred         rq_cred;        /* auth info */
        void *                  rq_xprt_ctxt;   /* transport specific context ptr */
        struct svc_deferred_req*rq_deferred;    /* deferred request we are replaying */
@@ -416,6 +415,7 @@ struct svc_procedure {
  */
 int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
 void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
+int svc_bind(struct svc_serv *serv, struct net *net);
 struct svc_serv *svc_create(struct svc_program *, unsigned int,
                            void (*shutdown)(struct svc_serv *, struct net *net));
 struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
index 548790e9113b317dbc8de0c46a691df3c0030269..dd74084a9799891309f54db25b8259ae3388f3c8 100644 (file)
 #include <linux/sunrpc/msg_prot.h>
 #include <linux/sunrpc/cache.h>
 #include <linux/hash.h>
+#include <linux/cred.h>
 
-#define SVC_CRED_NGROUPS       32
 struct svc_cred {
        uid_t                   cr_uid;
        gid_t                   cr_gid;
        struct group_info       *cr_group_info;
+       u32                     cr_flavor; /* pseudoflavor */
+       char                    *cr_principal; /* for gss */
 };
 
+static inline void free_svc_cred(struct svc_cred *cred)
+{
+       if (cred->cr_group_info)
+               put_group_info(cred->cr_group_info);
+       kfree(cred->cr_principal);
+}
+
 struct svc_rqst;               /* forward decl */
 struct in6_addr;
 
@@ -131,7 +140,7 @@ extern struct auth_domain *auth_domain_lookup(char *name, struct auth_domain *ne
 extern struct auth_domain *auth_domain_find(char *name);
 extern struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr);
 extern int auth_unix_forget_old(struct auth_domain *dom);
-extern void svcauth_unix_purge(void);
+extern void svcauth_unix_purge(struct net *net);
 extern void svcauth_unix_info_release(struct svc_xprt *xpt);
 extern int svcauth_unix_set_client(struct svc_rqst *rqstp);
 
index 7c32daa025eb07b644d8185a27c8ea10d8b7c55f..726aff1a52011fcdfd3ab1e11b8a82ff1dbea703 100644 (file)
@@ -22,7 +22,6 @@ int gss_svc_init_net(struct net *net);
 void gss_svc_shutdown_net(struct net *net);
 int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name);
 u32 svcauth_gss_flavor(struct auth_domain *dom);
-char *svc_gss_principal(struct svc_rqst *);
 
 #endif /* __KERNEL__ */
 #endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */
index b1fd5c7925feab91948ee46f0b5140b4c0ae5399..b6661933e252643956cf3e8d389267f535d49653 100644 (file)
@@ -221,8 +221,8 @@ extern unsigned int nr_free_pagecache_pages(void);
 /* linux/mm/swap.c */
 extern void __lru_cache_add(struct page *, enum lru_list lru);
 extern void lru_cache_add_lru(struct page *, enum lru_list lru);
-extern void lru_add_page_tail(struct zone* zone,
-                             struct page *page, struct page *page_tail);
+extern void lru_add_page_tail(struct page *page, struct page *page_tail,
+                             struct lruvec *lruvec);
 extern void activate_page(struct page *);
 extern void mark_page_accessed(struct page *);
 extern void lru_add_drain(void);
@@ -251,7 +251,7 @@ static inline void lru_cache_add_file(struct page *page)
 /* linux/mm/vmscan.c */
 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
                                        gfp_t gfp_mask, nodemask_t *mask);
-extern int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file);
+extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
                                                  gfp_t gfp_mask, bool noswap);
 extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
@@ -351,31 +351,14 @@ extern int swap_type_of(dev_t, sector_t, struct block_device **);
 extern unsigned int count_swap_pages(int, int);
 extern sector_t map_swap_page(struct page *, struct block_device **);
 extern sector_t swapdev_block(int, pgoff_t);
+extern int page_swapcount(struct page *);
 extern int reuse_swap_page(struct page *);
 extern int try_to_free_swap(struct page *);
 struct backing_dev_info;
 
-/* linux/mm/thrash.c */
-extern struct mm_struct *swap_token_mm;
-extern void grab_swap_token(struct mm_struct *);
-extern void __put_swap_token(struct mm_struct *);
-extern void disable_swap_token(struct mem_cgroup *memcg);
-
-static inline int has_swap_token(struct mm_struct *mm)
-{
-       return (mm == swap_token_mm);
-}
-
-static inline void put_swap_token(struct mm_struct *mm)
-{
-       if (has_swap_token(mm))
-               __put_swap_token(mm);
-}
-
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
 extern void
 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout);
-extern int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep);
 #else
 static inline void
 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
@@ -462,6 +445,11 @@ static inline void delete_from_swap_cache(struct page *page)
 {
 }
 
+static inline int page_swapcount(struct page *page)
+{
+       return 0;
+}
+
 #define reuse_swap_page(page)  (page_mapcount(page) == 1)
 
 static inline int try_to_free_swap(struct page *page)
@@ -476,37 +464,11 @@ static inline swp_entry_t get_swap_page(void)
        return entry;
 }
 
-/* linux/mm/thrash.c */
-static inline void put_swap_token(struct mm_struct *mm)
-{
-}
-
-static inline void grab_swap_token(struct mm_struct *mm)
-{
-}
-
-static inline int has_swap_token(struct mm_struct *mm)
-{
-       return 0;
-}
-
-static inline void disable_swap_token(struct mem_cgroup *memcg)
-{
-}
-
 static inline void
 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
 {
 }
 
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
-static inline int
-mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep)
-{
-       return 0;
-}
-#endif
-
 #endif /* CONFIG_SWAP */
 #endif /* __KERNEL__*/
 #endif /* _LINUX_SWAP_H */
index 3de3acb84a952ead111b90391756873efc15ebcb..19439c75c5b255751e2467b5405861763f131fd5 100644 (file)
@@ -858,4 +858,6 @@ asmlinkage long sys_process_vm_writev(pid_t pid,
                                      unsigned long riovcnt,
                                      unsigned long flags);
 
+asmlinkage long sys_kcmp(pid_t pid1, pid_t pid2, int type,
+                        unsigned long idx1, unsigned long idx2);
 #endif
diff --git a/include/linux/task_work.h b/include/linux/task_work.h
new file mode 100644 (file)
index 0000000..294d5d5
--- /dev/null
@@ -0,0 +1,33 @@
+#ifndef _LINUX_TASK_WORK_H
+#define _LINUX_TASK_WORK_H
+
+#include <linux/list.h>
+#include <linux/sched.h>
+
+struct task_work;
+typedef void (*task_work_func_t)(struct task_work *);
+
+struct task_work {
+       struct hlist_node hlist;
+       task_work_func_t func;
+       void *data;
+};
+
+static inline void
+init_task_work(struct task_work *twork, task_work_func_t func, void *data)
+{
+       twork->func = func;
+       twork->data = data;
+}
+
+int task_work_add(struct task_struct *task, struct task_work *twork, bool);
+struct task_work *task_work_cancel(struct task_struct *, task_work_func_t);
+void task_work_run(void);
+
+static inline void exit_task_work(struct task_struct *task)
+{
+       if (unlikely(!hlist_empty(&task->task_works)))
+               task_work_run();
+}
+
+#endif /* _LINUX_TASK_WORK_H */
index db78775eff3b209b534d157d4cec7d90eec8b175..ccc1899bd62e991e4649b72e7145010f93f948f9 100644 (file)
@@ -8,6 +8,7 @@
 #define _LINUX_THREAD_INFO_H
 
 #include <linux/types.h>
+#include <linux/bug.h>
 
 struct timespec;
 struct compat_timespec;
@@ -125,10 +126,26 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
 static inline void set_restore_sigmask(void)
 {
        set_thread_flag(TIF_RESTORE_SIGMASK);
-       set_thread_flag(TIF_SIGPENDING);
+       WARN_ON(!test_thread_flag(TIF_SIGPENDING));
+}
+static inline void clear_restore_sigmask(void)
+{
+       clear_thread_flag(TIF_RESTORE_SIGMASK);
+}
+static inline bool test_restore_sigmask(void)
+{
+       return test_thread_flag(TIF_RESTORE_SIGMASK);
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+       return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
 }
 #endif /* TIF_RESTORE_SIGMASK && !HAVE_SET_RESTORE_SIGMASK */
 
+#ifndef HAVE_SET_RESTORE_SIGMASK
+#error "no set_restore_sigmask() provided and default one won't work"
+#endif
+
 #endif /* __KERNEL__ */
 
 #endif /* _LINUX_THREAD_INFO_H */
index 51bd91d911c3b3233e90301a4092a84750827776..6a4d82bedb03d4f6e9742069c40324c0441265f0 100644 (file)
@@ -49,6 +49,7 @@
 #include <linux/sched.h>
 #include <linux/ptrace.h>
 #include <linux/security.h>
+#include <linux/task_work.h>
 struct linux_binprm;
 
 /*
@@ -153,7 +154,6 @@ static inline void tracehook_signal_handler(int sig, siginfo_t *info,
                ptrace_notify(SIGTRAP);
 }
 
-#ifdef TIF_NOTIFY_RESUME
 /**
  * set_notify_resume - cause tracehook_notify_resume() to be called
  * @task:              task that will call tracehook_notify_resume()
@@ -165,8 +165,10 @@ static inline void tracehook_signal_handler(int sig, siginfo_t *info,
  */
 static inline void set_notify_resume(struct task_struct *task)
 {
+#ifdef TIF_NOTIFY_RESUME
        if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME))
                kick_process(task);
+#endif
 }
 
 /**
@@ -184,7 +186,14 @@ static inline void set_notify_resume(struct task_struct *task)
  */
 static inline void tracehook_notify_resume(struct pt_regs *regs)
 {
+       /*
+        * The caller just cleared TIF_NOTIFY_RESUME. This barrier
+        * pairs with task_work_add()->set_notify_resume() after
+        * hlist_add_head(task->task_works);
+        */
+       smp_mb__after_clear_bit();
+       if (unlikely(!hlist_empty(&current->task_works)))
+               task_work_run();
 }
-#endif /* TIF_NOTIFY_RESUME */
 
 #endif /* <linux/tracehook.h> */
index 4990ef2b1fb75501a0af41d4199896279de136f1..9f47ab540f65e997b79b0a16c52332c564354234 100644 (file)
@@ -268,7 +268,6 @@ struct tty_struct {
        struct mutex ldisc_mutex;
        struct tty_ldisc *ldisc;
 
-       struct mutex legacy_mutex;
        struct mutex termios_mutex;
        spinlock_t ctrl_lock;
        /* Termios values are protected by the termios mutex */
@@ -606,12 +605,8 @@ extern long vt_compat_ioctl(struct tty_struct *tty,
 
 /* tty_mutex.c */
 /* functions for preparation of BKL removal */
-extern void __lockfunc tty_lock(struct tty_struct *tty);
-extern void __lockfunc tty_unlock(struct tty_struct *tty);
-extern void __lockfunc tty_lock_pair(struct tty_struct *tty,
-                               struct tty_struct *tty2);
-extern void __lockfunc tty_unlock_pair(struct tty_struct *tty,
-                               struct tty_struct *tty2);
+extern void __lockfunc tty_lock(void) __acquires(tty_lock);
+extern void __lockfunc tty_unlock(void) __releases(tty_lock);
 
 /*
  * this shall be called only from where BTM is held (like close)
@@ -626,9 +621,9 @@ extern void __lockfunc tty_unlock_pair(struct tty_struct *tty,
 static inline void tty_wait_until_sent_from_close(struct tty_struct *tty,
                long timeout)
 {
-       tty_unlock(tty); /* tty->ops->close holds the BTM, drop it while waiting */
+       tty_unlock(); /* tty->ops->close holds the BTM, drop it while waiting */
        tty_wait_until_sent(tty, timeout);
-       tty_lock(tty);
+       tty_lock();
 }
 
 /*
@@ -643,16 +638,16 @@ static inline void tty_wait_until_sent_from_close(struct tty_struct *tty,
  *
  * Do not use in new code.
  */
-#define wait_event_interruptible_tty(tty, wq, condition)               \
+#define wait_event_interruptible_tty(wq, condition)                    \
 ({                                                                     \
        int __ret = 0;                                                  \
        if (!(condition)) {                                             \
-               __wait_event_interruptible_tty(tty, wq, condition, __ret);      \
+               __wait_event_interruptible_tty(wq, condition, __ret);   \
        }                                                               \
        __ret;                                                          \
 })
 
-#define __wait_event_interruptible_tty(tty, wq, condition, ret)                \
+#define __wait_event_interruptible_tty(wq, condition, ret)             \
 do {                                                                   \
        DEFINE_WAIT(__wait);                                            \
                                                                        \
@@ -661,9 +656,9 @@ do {                                                                        \
                if (condition)                                          \
                        break;                                          \
                if (!signal_pending(current)) {                         \
-                       tty_unlock(tty);                                        \
+                       tty_unlock();                                   \
                        schedule();                                     \
-                       tty_lock(tty);                                  \
+                       tty_lock();                                     \
                        continue;                                       \
                }                                                       \
                ret = -ERESTARTSYS;                                     \
index 7f480db60231a714b9e520f3a16856c5d4e4a5e1..9c1bd539ea70e780e0e926b54bfc9320d3ec34a4 100644 (file)
@@ -25,7 +25,7 @@ typedef __kernel_dev_t                dev_t;
 typedef __kernel_ino_t         ino_t;
 typedef __kernel_mode_t                mode_t;
 typedef unsigned short         umode_t;
-typedef __kernel_nlink_t       nlink_t;
+typedef __u32                  nlink_t;
 typedef __kernel_off_t         off_t;
 typedef __kernel_pid_t         pid_t;
 typedef __kernel_daddr_t       daddr_t;
index ac40716b44e9a2a9ee1c13d1e878a195ce7215b0..da70f0facd2b77215e79860e5af8104da30b03a2 100644 (file)
@@ -45,6 +45,8 @@ struct watchdog_info {
 #define        WDIOF_SETTIMEOUT        0x0080  /* Set timeout (in seconds) */
 #define        WDIOF_MAGICCLOSE        0x0100  /* Supports magic close char */
 #define        WDIOF_PRETIMEOUT        0x0200  /* Pretimeout (in seconds), get/set */
+#define        WDIOF_ALARMONLY         0x0400  /* Watchdog triggers a management or
+                                          other external alarm not a reboot */
 #define        WDIOF_KEEPALIVEPING     0x8000  /* Keep alive ping reply */
 
 #define        WDIOS_DISABLECARD       0x0001  /* Turn off the watchdog timer */
@@ -54,6 +56,8 @@ struct watchdog_info {
 #ifdef __KERNEL__
 
 #include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
 
 struct watchdog_ops;
 struct watchdog_device;
@@ -67,6 +71,8 @@ struct watchdog_device;
  * @status:    The routine that shows the status of the watchdog device.
  * @set_timeout:The routine for setting the watchdog devices timeout value.
  * @get_timeleft:The routine that get's the time that's left before a reset.
+ * @ref:       The ref operation for dyn. allocated watchdog_device structs
+ * @unref:     The unref operation for dyn. allocated watchdog_device structs
  * @ioctl:     The routines that handles extra ioctl calls.
  *
  * The watchdog_ops structure contains a list of low-level operations
@@ -84,11 +90,17 @@ struct watchdog_ops {
        unsigned int (*status)(struct watchdog_device *);
        int (*set_timeout)(struct watchdog_device *, unsigned int);
        unsigned int (*get_timeleft)(struct watchdog_device *);
+       void (*ref)(struct watchdog_device *);
+       void (*unref)(struct watchdog_device *);
        long (*ioctl)(struct watchdog_device *, unsigned int, unsigned long);
 };
 
 /** struct watchdog_device - The structure that defines a watchdog device
  *
+ * @id:                The watchdog's ID. (Allocated by watchdog_register_device)
+ * @cdev:      The watchdog's Character device.
+ * @dev:       The device for our watchdog
+ * @parent:    The parent bus device
  * @info:      Pointer to a watchdog_info structure.
  * @ops:       Pointer to the list of watchdog operations.
  * @bootstatus:        Status of the watchdog device at boot.
@@ -96,6 +108,7 @@ struct watchdog_ops {
  * @min_timeout:The watchdog devices minimum timeout value.
  * @max_timeout:The watchdog devices maximum timeout value.
  * @driver-data:Pointer to the drivers private data.
+ * @lock:      Lock for watchdog core internal use only.
  * @status:    Field that contains the devices internal status bits.
  *
  * The watchdog_device structure contains all information about a
@@ -103,8 +116,15 @@ struct watchdog_ops {
  *
  * The driver-data field may not be accessed directly. It must be accessed
  * via the watchdog_set_drvdata and watchdog_get_drvdata helpers.
+ *
+ * The lock field is for watchdog core internal use only and should not be
+ * touched.
  */
 struct watchdog_device {
+       int id;
+       struct cdev cdev;
+       struct device *dev;
+       struct device *parent;
        const struct watchdog_info *info;
        const struct watchdog_ops *ops;
        unsigned int bootstatus;
@@ -112,12 +132,14 @@ struct watchdog_device {
        unsigned int min_timeout;
        unsigned int max_timeout;
        void *driver_data;
+       struct mutex lock;
        unsigned long status;
 /* Bit numbers for status flags */
 #define WDOG_ACTIVE            0       /* Is the watchdog running/active */
 #define WDOG_DEV_OPEN          1       /* Opened via /dev/watchdog ? */
 #define WDOG_ALLOW_RELEASE     2       /* Did we receive the magic char ? */
 #define WDOG_NO_WAY_OUT                3       /* Is 'nowayout' feature set ? */
+#define WDOG_UNREGISTERED      4       /* Has the device been unregistered */
 };
 
 #ifdef CONFIG_WATCHDOG_NOWAYOUT
@@ -128,6 +150,12 @@ struct watchdog_device {
 #define WATCHDOG_NOWAYOUT_INIT_STATUS  0
 #endif
 
+/* Use the following function to check wether or not the watchdog is active */
+static inline bool watchdog_active(struct watchdog_device *wdd)
+{
+       return test_bit(WDOG_ACTIVE, &wdd->status);
+}
+
 /* Use the following function to set the nowayout feature */
 static inline void watchdog_set_nowayout(struct watchdog_device *wdd, bool nowayout)
 {
index a2b84f598e2b1a3068c3c4ec422df05dcf094d6a..6d0a0fcd80e7fcc820042d996221fa7ad3442b10 100644 (file)
@@ -58,7 +58,6 @@ extern const char *wb_reason_name[];
  * in a manner such that unspecified fields are set to zero.
  */
 struct writeback_control {
-       enum writeback_sync_modes sync_mode;
        long nr_to_write;               /* Write this many pages, and decrement
                                           this for each page written */
        long pages_skipped;             /* Pages which were not written */
@@ -71,6 +70,8 @@ struct writeback_control {
        loff_t range_start;
        loff_t range_end;
 
+       enum writeback_sync_modes sync_mode;
+
        unsigned for_kupdate:1;         /* A kupdate writeback */
        unsigned for_background:1;      /* A background writeback */
        unsigned tagged_writepages:1;   /* tag-and-write to avoid livelock */
@@ -94,6 +95,7 @@ long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
                                enum wb_reason reason);
 long wb_do_writeback(struct bdi_writeback *wb, int force_wait);
 void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
+void inode_wait_for_writeback(struct inode *inode);
 
 /* writeback.h requires fs.h; it, too, is not included from here. */
 static inline void wait_on_inode(struct inode *inode)
@@ -101,12 +103,6 @@ static inline void wait_on_inode(struct inode *inode)
        might_sleep();
        wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE);
 }
-static inline void inode_sync_wait(struct inode *inode)
-{
-       might_sleep();
-       wait_on_bit(&inode->i_state, __I_SYNC, inode_wait,
-                                                       TASK_UNINTERRUPTIBLE);
-}
 
 
 /*
index 9808877c2ab91a609a79494fbd4945dd8def5bd3..a7a683e30b64e6beb2bc87907c85576d85385007 100644 (file)
@@ -42,6 +42,7 @@
 #include <net/netlabel.h>
 #include <net/request_sock.h>
 #include <linux/atomic.h>
+#include <asm/unaligned.h>
 
 /* known doi values */
 #define CIPSO_V4_DOI_UNKNOWN          0x00000000
@@ -285,7 +286,33 @@ static inline int cipso_v4_skbuff_getattr(const struct sk_buff *skb,
 static inline int cipso_v4_validate(const struct sk_buff *skb,
                                    unsigned char **option)
 {
-       return -ENOSYS;
+       unsigned char *opt = *option;
+       unsigned char err_offset = 0;
+       u8 opt_len = opt[1];
+       u8 opt_iter;
+
+       if (opt_len < 8) {
+               err_offset = 1;
+               goto out;
+       }
+
+       if (get_unaligned_be32(&opt[2]) == 0) {
+               err_offset = 2;
+               goto out;
+       }
+
+       for (opt_iter = 6; opt_iter < opt_len;) {
+               if (opt[opt_iter + 1] > (opt_len - opt_iter)) {
+                       err_offset = opt_iter + 1;
+                       goto out;
+               }
+               opt_iter += opt[opt_iter + 1];
+       }
+
+out:
+       *option = opt + err_offset;
+       return err_offset;
+
 }
 #endif /* CONFIG_NETLABEL */
 
index bed833d9796aed86bac5ca7d45ad53cde3447e52..8197eadca819633eb97f3919286a7e6b19bbcddd 100644 (file)
@@ -60,6 +60,7 @@ struct dst_entry {
 #define DST_NOCOUNT            0x0020
 #define DST_NOPEER             0x0040
 #define DST_FAKE_RTABLE                0x0080
+#define DST_XFRM_TUNNEL                0x0100
 
        short                   error;
        short                   obsolete;
index d89f0582b6b6f1a907d108712ddf44eaa13b08d6..4a45216995635cccc4a919b5e5506a87fe90a49c 100644 (file)
@@ -46,6 +46,7 @@
 #include <linux/list_nulls.h>
 #include <linux/timer.h>
 #include <linux/cache.h>
+#include <linux/bitops.h>
 #include <linux/lockdep.h>
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>      /* struct sk_buff */
@@ -921,12 +922,23 @@ struct proto {
 #endif
 };
 
+/*
+ * Bits in struct cg_proto.flags
+ */
+enum cg_proto_flags {
+       /* Currently active and new sockets should be assigned to cgroups */
+       MEMCG_SOCK_ACTIVE,
+       /* It was ever activated; we must disarm static keys on destruction */
+       MEMCG_SOCK_ACTIVATED,
+};
+
 struct cg_proto {
        void                    (*enter_memory_pressure)(struct sock *sk);
        struct res_counter      *memory_allocated;      /* Current allocated memory. */
        struct percpu_counter   *sockets_allocated;     /* Current number of sockets. */
        int                     *memory_pressure;
        long                    *sysctl_mem;
+       unsigned long           flags;
        /*
         * memcg field is used to find which memcg we belong directly
         * Each memcg struct can hold more than one cg_proto, so container_of
@@ -942,6 +954,16 @@ struct cg_proto {
 extern int proto_register(struct proto *prot, int alloc_slab);
 extern void proto_unregister(struct proto *prot);
 
+static inline bool memcg_proto_active(struct cg_proto *cg_proto)
+{
+       return test_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
+}
+
+static inline bool memcg_proto_activated(struct cg_proto *cg_proto)
+{
+       return test_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags);
+}
+
 #ifdef SOCK_REFCNT_DEBUG
 static inline void sk_refcnt_debug_inc(struct sock *sk)
 {
diff --git a/include/scsi/fcoe_sysfs.h b/include/scsi/fcoe_sysfs.h
new file mode 100644 (file)
index 0000000..604cb9b
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2011-2012 Intel Corporation.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef FCOE_SYSFS
+#define FCOE_SYSFS
+
+#include <linux/if_ether.h>
+#include <linux/device.h>
+#include <scsi/fc/fc_fcoe.h>
+
+struct fcoe_ctlr_device;
+struct fcoe_fcf_device;
+
+struct fcoe_sysfs_function_template {
+       void (*get_fcoe_ctlr_link_fail)(struct fcoe_ctlr_device *);
+       void (*get_fcoe_ctlr_vlink_fail)(struct fcoe_ctlr_device *);
+       void (*get_fcoe_ctlr_miss_fka)(struct fcoe_ctlr_device *);
+       void (*get_fcoe_ctlr_symb_err)(struct fcoe_ctlr_device *);
+       void (*get_fcoe_ctlr_err_block)(struct fcoe_ctlr_device *);
+       void (*get_fcoe_ctlr_fcs_error)(struct fcoe_ctlr_device *);
+       void (*get_fcoe_ctlr_mode)(struct fcoe_ctlr_device *);
+       void (*get_fcoe_fcf_selected)(struct fcoe_fcf_device *);
+       void (*get_fcoe_fcf_vlan_id)(struct fcoe_fcf_device *);
+};
+
+#define dev_to_ctlr(d)                                 \
+       container_of((d), struct fcoe_ctlr_device, dev)
+
+enum fip_conn_type {
+       FIP_CONN_TYPE_UNKNOWN,
+       FIP_CONN_TYPE_FABRIC,
+       FIP_CONN_TYPE_VN2VN,
+};
+
+struct fcoe_ctlr_device {
+       u32                             id;
+
+       struct device                   dev;
+       struct fcoe_sysfs_function_template *f;
+
+       struct list_head                fcfs;
+       char                            work_q_name[20];
+       struct workqueue_struct         *work_q;
+       char                            devloss_work_q_name[20];
+       struct workqueue_struct         *devloss_work_q;
+       struct mutex                    lock;
+
+       int                             fcf_dev_loss_tmo;
+       enum fip_conn_type              mode;
+
+       /* expected in host order for displaying */
+       struct fcoe_fc_els_lesb         lesb;
+};
+
+static inline void *fcoe_ctlr_device_priv(const struct fcoe_ctlr_device *ctlr)
+{
+       return (void *)(ctlr + 1);
+}
+
+/* fcf states */
+enum fcf_state {
+       FCOE_FCF_STATE_UNKNOWN,
+       FCOE_FCF_STATE_DISCONNECTED,
+       FCOE_FCF_STATE_CONNECTED,
+       FCOE_FCF_STATE_DELETED,
+};
+
+struct fcoe_fcf_device {
+       u32                 id;
+       struct device       dev;
+       struct list_head    peers;
+       struct work_struct  delete_work;
+       struct delayed_work dev_loss_work;
+       u32                 dev_loss_tmo;
+       void                *priv;
+       enum fcf_state      state;
+
+       u64                 fabric_name;
+       u64                 switch_name;
+       u32                 fc_map;
+       u16                 vfid;
+       u8                  mac[ETH_ALEN];
+       u8                  priority;
+       u32                 fka_period;
+       u8                  selected;
+       u16                 vlan_id;
+};
+
+#define dev_to_fcf(d)                                  \
+       container_of((d), struct fcoe_fcf_device, dev)
+/* parentage should never be missing */
+#define fcoe_fcf_dev_to_ctlr_dev(x)            \
+       dev_to_ctlr((x)->dev.parent)
+#define fcoe_fcf_device_priv(x)                        \
+       ((x)->priv)
+
+struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
+                           struct fcoe_sysfs_function_template *f,
+                           int priv_size);
+void fcoe_ctlr_device_delete(struct fcoe_ctlr_device *);
+struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *,
+                                           struct fcoe_fcf_device *);
+void fcoe_fcf_device_delete(struct fcoe_fcf_device *);
+
+int __init fcoe_sysfs_setup(void);
+void __exit fcoe_sysfs_teardown(void);
+
+#endif /* FCOE_SYSFS */
index cfdb55f0937e37002d21be6eaf9f81833683aa4b..22b07cc99808562c86a3de2728ec0557291e56dc 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/random.h>
 #include <scsi/fc/fc_fcoe.h>
 #include <scsi/libfc.h>
+#include <scsi/fcoe_sysfs.h>
 
 #define FCOE_MAX_CMD_LEN       16      /* Supported CDB length */
 
@@ -158,9 +159,25 @@ struct fcoe_ctlr {
        spinlock_t ctlr_lock;
 };
 
+/**
+ * fcoe_ctlr_priv() - Return the private data from a fcoe_ctlr
+ * @cltr: The fcoe_ctlr whose private data will be returned
+ */
+static inline void *fcoe_ctlr_priv(const struct fcoe_ctlr *ctlr)
+{
+       return (void *)(ctlr + 1);
+}
+
+#define fcoe_ctlr_to_ctlr_dev(x)                                       \
+       (struct fcoe_ctlr_device *)(((struct fcoe_ctlr_device *)(x)) - 1)
+
 /**
  * struct fcoe_fcf - Fibre-Channel Forwarder
  * @list:       list linkage
+ * @event_work:  Work for FC Transport actions queue
+ * @event:       The event to be processed
+ * @fip:         The controller that the FCF was discovered on
+ * @fcf_dev:     The associated fcoe_fcf_device instance
  * @time:       system time (jiffies) when an advertisement was last received
  * @switch_name: WWN of switch from advertisement
  * @fabric_name: WWN of fabric from advertisement
@@ -182,6 +199,9 @@ struct fcoe_ctlr {
  */
 struct fcoe_fcf {
        struct list_head list;
+       struct work_struct event_work;
+       struct fcoe_ctlr *fip;
+       struct fcoe_fcf_device *fcf_dev;
        unsigned long time;
 
        u64 switch_name;
@@ -198,6 +218,9 @@ struct fcoe_fcf {
        u8 fd_flags:1;
 };
 
+#define fcoe_fcf_to_fcf_dev(x)                 \
+       ((x)->fcf_dev)
+
 /**
  * struct fcoe_rport - VN2VN remote port
  * @time:      time of create or last beacon packet received from node
@@ -333,6 +356,10 @@ void fcoe_queue_timer(ulong lport);
 int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen,
                           struct fcoe_percpu_s *fps);
 
+/* FCoE Sysfs helpers */
+void fcoe_fcf_get_selected(struct fcoe_fcf_device *);
+void fcoe_ctlr_get_fip_mode(struct fcoe_ctlr_device *);
+
 /**
  * struct netdev_list
  * A mapping from netdevice to fcoe_transport
index aff64d82d713b44785240f8bc47697a336132bed..da6f2591c25e1ab11869244410df8c7b6704e339 100644 (file)
@@ -36,19 +36,17 @@ DECLARE_EVENT_CLASS(jbd_commit,
 
        TP_STRUCT__entry(
                __field(        dev_t,  dev                     )
-               __field(        char,   sync_commit             )
                __field(        int,    transaction             )
        ),
 
        TP_fast_assign(
                __entry->dev            = journal->j_fs_dev->bd_dev;
-               __entry->sync_commit = commit_transaction->t_synchronous_commit;
                __entry->transaction    = commit_transaction->t_tid;
        ),
 
-       TP_printk("dev %d,%d transaction %d sync %d",
+       TP_printk("dev %d,%d transaction %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
-                 __entry->transaction, __entry->sync_commit)
+                 __entry->transaction)
 );
 
 DEFINE_EVENT(jbd_commit, jbd_start_commit,
@@ -87,19 +85,17 @@ TRACE_EVENT(jbd_drop_transaction,
 
        TP_STRUCT__entry(
                __field(        dev_t,  dev                     )
-               __field(        char,   sync_commit             )
                __field(        int,    transaction             )
        ),
 
        TP_fast_assign(
                __entry->dev            = journal->j_fs_dev->bd_dev;
-               __entry->sync_commit = commit_transaction->t_synchronous_commit;
                __entry->transaction    = commit_transaction->t_tid;
        ),
 
-       TP_printk("dev %d,%d transaction %d sync %d",
+       TP_printk("dev %d,%d transaction %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
-                 __entry->transaction, __entry->sync_commit)
+                 __entry->transaction)
 );
 
 TRACE_EVENT(jbd_end_commit,
@@ -109,21 +105,19 @@ TRACE_EVENT(jbd_end_commit,
 
        TP_STRUCT__entry(
                __field(        dev_t,  dev                     )
-               __field(        char,   sync_commit             )
                __field(        int,    transaction             )
                __field(        int,    head                    )
        ),
 
        TP_fast_assign(
                __entry->dev            = journal->j_fs_dev->bd_dev;
-               __entry->sync_commit = commit_transaction->t_synchronous_commit;
                __entry->transaction    = commit_transaction->t_tid;
                __entry->head           = journal->j_tail_sequence;
        ),
 
-       TP_printk("dev %d,%d transaction %d sync %d head %d",
+       TP_printk("dev %d,%d transaction %d head %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
-                 __entry->transaction, __entry->sync_commit, __entry->head)
+                 __entry->transaction, __entry->head)
 );
 
 TRACE_EVENT(jbd_do_submit_data,
@@ -133,19 +127,17 @@ TRACE_EVENT(jbd_do_submit_data,
 
        TP_STRUCT__entry(
                __field(        dev_t,  dev                     )
-               __field(        char,   sync_commit             )
                __field(        int,    transaction             )
        ),
 
        TP_fast_assign(
                __entry->dev            = journal->j_fs_dev->bd_dev;
-               __entry->sync_commit = commit_transaction->t_synchronous_commit;
                __entry->transaction    = commit_transaction->t_tid;
        ),
 
-       TP_printk("dev %d,%d transaction %d sync %d",
+       TP_printk("dev %d,%d transaction %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
-                  __entry->transaction, __entry->sync_commit)
+                  __entry->transaction)
 );
 
 TRACE_EVENT(jbd_cleanup_journal_tail,
@@ -177,24 +169,23 @@ TRACE_EVENT(jbd_cleanup_journal_tail,
                  __entry->block_nr, __entry->freed)
 );
 
-TRACE_EVENT(jbd_update_superblock_end,
-       TP_PROTO(journal_t *journal, int wait),
+TRACE_EVENT(journal_write_superblock,
+       TP_PROTO(journal_t *journal, int write_op),
 
-       TP_ARGS(journal, wait),
+       TP_ARGS(journal, write_op),
 
        TP_STRUCT__entry(
                __field(        dev_t,  dev                     )
-               __field(        int,    wait                    )
+               __field(        int,    write_op                )
        ),
 
        TP_fast_assign(
                __entry->dev            = journal->j_fs_dev->bd_dev;
-               __entry->wait           = wait;
+               __entry->write_op       = write_op;
        ),
 
-       TP_printk("dev %d,%d wait %d",
-                 MAJOR(__entry->dev), MINOR(__entry->dev),
-                  __entry->wait)
+       TP_printk("dev %d,%d write_op %x", MAJOR(__entry->dev),
+                 MINOR(__entry->dev), __entry->write_op)
 );
 
 #endif /* _TRACE_JBD_H */
index f64560e204bc1f84cd6c8ba5e6ba7f1f6938c656..bab3b87e4064e9fd804182474127c2a84e49ef29 100644 (file)
@@ -13,7 +13,7 @@
 #define RECLAIM_WB_ANON                0x0001u
 #define RECLAIM_WB_FILE                0x0002u
 #define RECLAIM_WB_MIXED       0x0010u
-#define RECLAIM_WB_SYNC                0x0004u
+#define RECLAIM_WB_SYNC                0x0004u /* Unused, all reclaim async */
 #define RECLAIM_WB_ASYNC       0x0008u
 
 #define show_reclaim_flags(flags)                              \
                {RECLAIM_WB_ASYNC,      "RECLAIM_WB_ASYNC"}     \
                ) : "RECLAIM_WB_NONE"
 
-#define trace_reclaim_flags(page, sync) ( \
+#define trace_reclaim_flags(page) ( \
        (page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
-       (sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC)   \
+       (RECLAIM_WB_ASYNC) \
        )
 
-#define trace_shrink_flags(file, sync) ( \
-       (sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_MIXED : \
-                       (file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON)) |  \
-       (sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \
+#define trace_shrink_flags(file) \
+       ( \
+               (file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
+               (RECLAIM_WB_ASYNC) \
        )
 
 TRACE_EVENT(mm_vmscan_kswapd_sleep,
@@ -263,22 +263,16 @@ DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template,
                unsigned long nr_requested,
                unsigned long nr_scanned,
                unsigned long nr_taken,
-               unsigned long nr_lumpy_taken,
-               unsigned long nr_lumpy_dirty,
-               unsigned long nr_lumpy_failed,
                isolate_mode_t isolate_mode,
                int file),
 
-       TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode, file),
+       TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file),
 
        TP_STRUCT__entry(
                __field(int, order)
                __field(unsigned long, nr_requested)
                __field(unsigned long, nr_scanned)
                __field(unsigned long, nr_taken)
-               __field(unsigned long, nr_lumpy_taken)
-               __field(unsigned long, nr_lumpy_dirty)
-               __field(unsigned long, nr_lumpy_failed)
                __field(isolate_mode_t, isolate_mode)
                __field(int, file)
        ),
@@ -288,22 +282,16 @@ DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template,
                __entry->nr_requested = nr_requested;
                __entry->nr_scanned = nr_scanned;
                __entry->nr_taken = nr_taken;
-               __entry->nr_lumpy_taken = nr_lumpy_taken;
-               __entry->nr_lumpy_dirty = nr_lumpy_dirty;
-               __entry->nr_lumpy_failed = nr_lumpy_failed;
                __entry->isolate_mode = isolate_mode;
                __entry->file = file;
        ),
 
-       TP_printk("isolate_mode=%d order=%d nr_requested=%lu nr_scanned=%lu nr_taken=%lu contig_taken=%lu contig_dirty=%lu contig_failed=%lu file=%d",
+       TP_printk("isolate_mode=%d order=%d nr_requested=%lu nr_scanned=%lu nr_taken=%lu file=%d",
                __entry->isolate_mode,
                __entry->order,
                __entry->nr_requested,
                __entry->nr_scanned,
                __entry->nr_taken,
-               __entry->nr_lumpy_taken,
-               __entry->nr_lumpy_dirty,
-               __entry->nr_lumpy_failed,
                __entry->file)
 );
 
@@ -313,13 +301,10 @@ DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_lru_isolate,
                unsigned long nr_requested,
                unsigned long nr_scanned,
                unsigned long nr_taken,
-               unsigned long nr_lumpy_taken,
-               unsigned long nr_lumpy_dirty,
-               unsigned long nr_lumpy_failed,
                isolate_mode_t isolate_mode,
                int file),
 
-       TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode, file)
+       TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file)
 
 );
 
@@ -329,13 +314,10 @@ DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_memcg_isolate,
                unsigned long nr_requested,
                unsigned long nr_scanned,
                unsigned long nr_taken,
-               unsigned long nr_lumpy_taken,
-               unsigned long nr_lumpy_dirty,
-               unsigned long nr_lumpy_failed,
                isolate_mode_t isolate_mode,
                int file),
 
-       TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode, file)
+       TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file)
 
 );
 
@@ -395,88 +377,6 @@ TRACE_EVENT(mm_vmscan_lru_shrink_inactive,
                show_reclaim_flags(__entry->reclaim_flags))
 );
 
-TRACE_EVENT(replace_swap_token,
-       TP_PROTO(struct mm_struct *old_mm,
-                struct mm_struct *new_mm),
-
-       TP_ARGS(old_mm, new_mm),
-
-       TP_STRUCT__entry(
-               __field(struct mm_struct*,      old_mm)
-               __field(unsigned int,           old_prio)
-               __field(struct mm_struct*,      new_mm)
-               __field(unsigned int,           new_prio)
-       ),
-
-       TP_fast_assign(
-               __entry->old_mm   = old_mm;
-               __entry->old_prio = old_mm ? old_mm->token_priority : 0;
-               __entry->new_mm   = new_mm;
-               __entry->new_prio = new_mm->token_priority;
-       ),
-
-       TP_printk("old_token_mm=%p old_prio=%u new_token_mm=%p new_prio=%u",
-                 __entry->old_mm, __entry->old_prio,
-                 __entry->new_mm, __entry->new_prio)
-);
-
-DECLARE_EVENT_CLASS(put_swap_token_template,
-       TP_PROTO(struct mm_struct *swap_token_mm),
-
-       TP_ARGS(swap_token_mm),
-
-       TP_STRUCT__entry(
-               __field(struct mm_struct*, swap_token_mm)
-       ),
-
-       TP_fast_assign(
-               __entry->swap_token_mm = swap_token_mm;
-       ),
-
-       TP_printk("token_mm=%p", __entry->swap_token_mm)
-);
-
-DEFINE_EVENT(put_swap_token_template, put_swap_token,
-       TP_PROTO(struct mm_struct *swap_token_mm),
-       TP_ARGS(swap_token_mm)
-);
-
-DEFINE_EVENT_CONDITION(put_swap_token_template, disable_swap_token,
-       TP_PROTO(struct mm_struct *swap_token_mm),
-       TP_ARGS(swap_token_mm),
-       TP_CONDITION(swap_token_mm != NULL)
-);
-
-TRACE_EVENT_CONDITION(update_swap_token_priority,
-       TP_PROTO(struct mm_struct *mm,
-                unsigned int old_prio,
-                struct mm_struct *swap_token_mm),
-
-       TP_ARGS(mm, old_prio, swap_token_mm),
-
-       TP_CONDITION(mm->token_priority != old_prio),
-
-       TP_STRUCT__entry(
-               __field(struct mm_struct*, mm)
-               __field(unsigned int, old_prio)
-               __field(unsigned int, new_prio)
-               __field(struct mm_struct*, swap_token_mm)
-               __field(unsigned int, swap_token_prio)
-       ),
-
-       TP_fast_assign(
-               __entry->mm             = mm;
-               __entry->old_prio       = old_prio;
-               __entry->new_prio       = mm->token_priority;
-               __entry->swap_token_mm  = swap_token_mm;
-               __entry->swap_token_prio = swap_token_mm ? swap_token_mm->token_priority : 0;
-       ),
-
-       TP_printk("mm=%p old_prio=%u new_prio=%u swap_token_mm=%p token_prio=%u",
-                 __entry->mm, __entry->old_prio, __entry->new_prio,
-                 __entry->swap_token_mm, __entry->swap_token_prio)
-);
-
 #endif /* _TRACE_VMSCAN_H */
 
 /* This part must be outside protection */
index 7b81887b023f9deb9f46994722c03542ad97a6db..b453d92c225347f95605db68e33dd50b628fb0f0 100644 (file)
@@ -372,6 +372,35 @@ TRACE_EVENT(balance_dirty_pages,
          )
 );
 
+TRACE_EVENT(writeback_sb_inodes_requeue,
+
+       TP_PROTO(struct inode *inode),
+       TP_ARGS(inode),
+
+       TP_STRUCT__entry(
+               __array(char, name, 32)
+               __field(unsigned long, ino)
+               __field(unsigned long, state)
+               __field(unsigned long, dirtied_when)
+       ),
+
+       TP_fast_assign(
+               strncpy(__entry->name,
+                       dev_name(inode_to_bdi(inode)->dev), 32);
+               __entry->ino            = inode->i_ino;
+               __entry->state          = inode->i_state;
+               __entry->dirtied_when   = inode->dirtied_when;
+       ),
+
+       TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu",
+                 __entry->name,
+                 __entry->ino,
+                 show_inode_state(__entry->state),
+                 __entry->dirtied_when,
+                 (jiffies - __entry->dirtied_when) / HZ
+       )
+);
+
 DECLARE_EVENT_CLASS(writeback_congest_waited_template,
 
        TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
@@ -450,13 +479,6 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
        )
 );
 
-DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_requeue,
-       TP_PROTO(struct inode *inode,
-                struct writeback_control *wbc,
-                unsigned long nr_to_write),
-       TP_ARGS(inode, wbc, nr_to_write)
-);
-
 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
        TP_PROTO(struct inode *inode,
                 struct writeback_control *wbc,
diff --git a/include/video/auo_k190xfb.h b/include/video/auo_k190xfb.h
new file mode 100644 (file)
index 0000000..609efe8
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+ * Definitions for AUO-K190X framebuffer drivers
+ *
+ * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_VIDEO_AUO_K190XFB_H_
+#define _LINUX_VIDEO_AUO_K190XFB_H_
+
+/* Controller standby command needs a param */
+#define AUOK190X_QUIRK_STANDBYPARAM    (1 << 0)
+
+/* Controller standby is completely broken */
+#define AUOK190X_QUIRK_STANDBYBROKEN   (1 << 1)
+
+/*
+ * Resolutions for the displays
+ */
+#define AUOK190X_RESOLUTION_800_600            0
+#define AUOK190X_RESOLUTION_1024_768           1
+
+/*
+ * struct used by auok190x. board specific stuff comes from *board
+ */
+struct auok190xfb_par {
+       struct fb_info *info;
+       struct auok190x_board *board;
+
+       struct regulator *regulator;
+
+       struct mutex io_lock;
+       struct delayed_work work;
+       wait_queue_head_t waitq;
+       int resolution;
+       int rotation;
+       int consecutive_threshold;
+       int update_cnt;
+
+       /* panel and controller informations */
+       int epd_type;
+       int panel_size_int;
+       int panel_size_float;
+       int panel_model;
+       int tcon_version;
+       int lut_version;
+
+       /* individual controller callbacks */
+       void (*update_partial)(struct auok190xfb_par *par, u16 y1, u16 y2);
+       void (*update_all)(struct auok190xfb_par *par);
+       bool (*need_refresh)(struct auok190xfb_par *par);
+       void (*init)(struct auok190xfb_par *par);
+       void (*recover)(struct auok190xfb_par *par);
+
+       int update_mode; /* mode to use for updates */
+       int last_mode; /* update mode last used */
+       int flash;
+
+       /* power management */
+       int autosuspend_delay;
+       bool standby;
+       bool manual_standby;
+};
+
+/**
+ * Board specific platform-data
+ * @init:              initialize the controller interface
+ * @cleanup:           cleanup the controller interface
+ * @wait_for_rdy:      wait until the controller is not busy anymore
+ * @set_ctl:           change an interface control
+ * @set_hdb:           write a value to the data register
+ * @get_hdb:           read a value from the data register
+ * @setup_irq:         method to setup the irq handling on the busy gpio
+ * @gpio_nsleep:       sleep gpio
+ * @gpio_nrst:         reset gpio
+ * @gpio_nbusy:                busy gpio
+ * @resolution:                one of the AUOK190X_RESOLUTION constants
+ * @rotation:          rotation of the framebuffer
+ * @quirks:            controller quirks to honor
+ * @fps:               frames per second for defio
+ */
+struct auok190x_board {
+       int (*init)(struct auok190xfb_par *);
+       void (*cleanup)(struct auok190xfb_par *);
+       int (*wait_for_rdy)(struct auok190xfb_par *);
+
+       void (*set_ctl)(struct auok190xfb_par *, unsigned char, u8);
+       void (*set_hdb)(struct auok190xfb_par *, u16);
+       u16 (*get_hdb)(struct auok190xfb_par *);
+
+       int (*setup_irq)(struct fb_info *);
+
+       int gpio_nsleep;
+       int gpio_nrst;
+       int gpio_nbusy;
+
+       int resolution;
+       int rotation;
+       int quirks;
+       int fps;
+};
+
+#endif
index 8847a9d6dd42db8d878a1235eef4e92019bad883..bd8cabd344db7242dc61bb5643dee70c6d349e09 100644 (file)
@@ -14,7 +14,7 @@
 
 #define DP_TIMEOUT_LOOP_COUNT 100
 #define MAX_CR_LOOP 5
-#define MAX_EQ_LOOP 4
+#define MAX_EQ_LOOP 5
 
 enum link_rate_type {
        LINK_RATE_1_62GBPS = 0x06,
index 772c770535f1ee02075b632a4bb1ca91bda078dc..83ce5e667d471077cdeda3ccfd9c4945bd1f5b04 100644 (file)
@@ -315,6 +315,7 @@ struct mipi_dsim_lcd_device {
        int                     id;
        int                     bus_id;
        int                     irq;
+       int                     panel_reverse;
 
        struct mipi_dsim_device *master;
        void                    *platform_data;
index 1c46a14341dd9892816a4398a274a7aa43cb5db1..c8e59b4a3364264df1719090162f19cf0b250dd8 100644 (file)
@@ -51,6 +51,8 @@
 
 struct omap_dss_device;
 struct omap_overlay_manager;
+struct snd_aes_iec958;
+struct snd_cea_861_aud_if;
 
 enum omap_display_type {
        OMAP_DISPLAY_TYPE_NONE          = 0,
@@ -158,6 +160,13 @@ enum omap_dss_display_state {
        OMAP_DSS_DISPLAY_SUSPENDED,
 };
 
+enum omap_dss_audio_state {
+       OMAP_DSS_AUDIO_DISABLED = 0,
+       OMAP_DSS_AUDIO_ENABLED,
+       OMAP_DSS_AUDIO_CONFIGURED,
+       OMAP_DSS_AUDIO_PLAYING,
+};
+
 /* XXX perhaps this should be removed */
 enum omap_dss_overlay_managers {
        OMAP_DSS_OVL_MGR_LCD,
@@ -166,8 +175,9 @@ enum omap_dss_overlay_managers {
 };
 
 enum omap_dss_rotation_type {
-       OMAP_DSS_ROT_DMA = 0,
-       OMAP_DSS_ROT_VRFB = 1,
+       OMAP_DSS_ROT_DMA        = 1 << 0,
+       OMAP_DSS_ROT_VRFB       = 1 << 1,
+       OMAP_DSS_ROT_TILER      = 1 << 2,
 };
 
 /* clockwise rotation angle */
@@ -309,6 +319,7 @@ struct omap_dss_board_info {
        struct omap_dss_device *default_device;
        int (*dsi_enable_pads)(int dsi_id, unsigned lane_mask);
        void (*dsi_disable_pads)(int dsi_id, unsigned lane_mask);
+       int (*set_min_bus_tput)(struct device *dev, unsigned long r);
 };
 
 /* Init with the board info */
@@ -316,11 +327,6 @@ extern int omap_display_init(struct omap_dss_board_info *board_data);
 /* HDMI mux init*/
 extern int omap_hdmi_init(enum omap_hdmi_flags flags);
 
-struct omap_display_platform_data {
-       struct omap_dss_board_info *board_data;
-       /* TODO: Additional members to be added when PM is considered */
-};
-
 struct omap_video_timings {
        /* Unit: pixels */
        u16 x_res;
@@ -587,6 +593,8 @@ struct omap_dss_device {
 
        enum omap_dss_display_state state;
 
+       enum omap_dss_audio_state audio_state;
+
        /* platform specific  */
        int (*platform_enable)(struct omap_dss_device *dssdev);
        void (*platform_disable)(struct omap_dss_device *dssdev);
@@ -599,6 +607,11 @@ struct omap_dss_hdmi_data
        int hpd_gpio;
 };
 
+struct omap_dss_audio {
+       struct snd_aes_iec958 *iec;
+       struct snd_cea_861_aud_if *cea;
+};
+
 struct omap_dss_driver {
        struct device_driver driver;
 
@@ -646,6 +659,24 @@ struct omap_dss_driver {
 
        int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
        bool (*detect)(struct omap_dss_device *dssdev);
+
+       /*
+        * For display drivers that support audio. This encompasses
+        * HDMI and DisplayPort at the moment.
+        */
+       /*
+        * Note: These functions might sleep. Do not call while
+        * holding a spinlock/readlock.
+        */
+       int (*audio_enable)(struct omap_dss_device *dssdev);
+       void (*audio_disable)(struct omap_dss_device *dssdev);
+       bool (*audio_supported)(struct omap_dss_device *dssdev);
+       int (*audio_config)(struct omap_dss_device *dssdev,
+               struct omap_dss_audio *audio);
+       /* Note: These functions may not sleep */
+       int (*audio_start)(struct omap_dss_device *dssdev);
+       void (*audio_stop)(struct omap_dss_device *dssdev);
+
 };
 
 int omap_dss_register_driver(struct omap_dss_driver *);
@@ -670,6 +701,8 @@ struct omap_overlay *omap_dss_get_overlay(int num);
 void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
                u16 *xres, u16 *yres);
 int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev);
+void omapdss_default_get_timings(struct omap_dss_device *dssdev,
+               struct omap_video_timings *timings);
 
 typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
 int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
index 728f9de9c258255d868709fc8ae2eac8987c682a..63d20efa254a045d53993e146e6e2f33adf50c16 100644 (file)
@@ -18,9 +18,11 @@ struct clk;
 /*
  * flags format
  *
- * 0x0000000A
+ * 0x00000CBA
  *
  * A: Audio source select
+ * B: Int output option
+ * C: Chip specific option
  */
 
 /* Audio source select */
@@ -30,6 +32,14 @@ struct clk;
 #define HDMI_SND_SRC_DSD       (2 << 0)
 #define HDMI_SND_SRC_HBR       (3 << 0)
 
+/* Int output option */
+#define HDMI_OUTPUT_PUSH_PULL  (1 << 4) /* System control : output mode */
+#define HDMI_OUTPUT_POLARITY_HI        (1 << 5) /* System control : output polarity */
+
+/* Chip specific option */
+#define HDMI_32BIT_REG         (1 << 8)
+#define HDMI_HAS_HTOP1         (1 << 9)
+
 struct sh_mobile_hdmi_info {
        unsigned int                     flags;
        long (*clk_optimize_parent)(unsigned long target, unsigned long *best_freq,
diff --git a/include/xen/acpi.h b/include/xen/acpi.h
new file mode 100644 (file)
index 0000000..48a9c01
--- /dev/null
@@ -0,0 +1,58 @@
+/******************************************************************************
+ * acpi.h
+ * acpi file for domain 0 kernel
+ *
+ * Copyright (c) 2011 Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+ * Copyright (c) 2011 Yu Ke <ke.yu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _XEN_ACPI_H
+#define _XEN_ACPI_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_XEN_DOM0
+#include <asm/xen/hypervisor.h>
+#include <xen/xen.h>
+#include <linux/acpi.h>
+
+int xen_acpi_notify_hypervisor_state(u8 sleep_state,
+                                    u32 pm1a_cnt, u32 pm1b_cnd);
+
+static inline void xen_acpi_sleep_register(void)
+{
+       if (xen_initial_domain())
+               acpi_os_set_prepare_sleep(
+                       &xen_acpi_notify_hypervisor_state);
+}
+#else
+static inline void xen_acpi_sleep_register(void)
+{
+}
+#endif
+
+#endif /* _XEN_ACPI_H */
index 0f773708e02c034f624ca61a3e186bb8e9d68080..04399b28e821bf694a58d706e0b6c9f66e9c7d9f 100644 (file)
@@ -103,6 +103,9 @@ int xen_irq_from_pirq(unsigned pirq);
 /* Return the pirq allocated to the irq. */
 int xen_pirq_from_irq(unsigned irq);
 
+/* Return the irq allocated to the gsi */
+int xen_irq_from_gsi(unsigned gsi);
+
 /* Determine whether to ignore this IRQ if it is passed to a guest. */
 int xen_test_irq_shared(int irq);
 
index 15f8a00ff003953639a4caba0e97109b8db06b36..11e27c3af3cb45c63d9c689442096b4afd66c55f 100644 (file)
@@ -46,6 +46,8 @@
 
 #include <xen/features.h>
 
+#define GNTTAB_RESERVED_XENSTORE 1
+
 /* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
 #define NR_GRANT_FRAMES 4
 
index ac5f0fe47ed9262d6aa79680f67ec7cb186c43c3..bbee8c6a349dec65d57c0f683fcb4d6c621111b5 100644 (file)
@@ -38,4 +38,7 @@
 #define IOCTL_XENBUS_BACKEND_EVTCHN                    \
        _IOC(_IOC_NONE, 'B', 0, 0)
 
+#define IOCTL_XENBUS_BACKEND_SETUP                     \
+       _IOC(_IOC_NONE, 'B', 1, 0)
+
 #endif /* __LINUX_XEN_XENBUS_DEV_H__ */
index 81816b82860b51a7cb0a064d939d4702c4daaaed..d07dcf9fc8a9a8f05a570298d106c29e28f7167f 100644 (file)
@@ -167,7 +167,7 @@ config KERNEL_BZIP2
        depends on HAVE_KERNEL_BZIP2
        help
          Its compression ratio and speed is intermediate.
-         Decompression speed is slowest among the three.  The kernel
+         Decompression speed is slowest among the choices.  The kernel
          size is about 10% smaller with bzip2, in comparison to gzip.
          Bzip2 uses a large amount of memory. For modern kernels you
          will need at least 8MB RAM or more for booting.
@@ -176,10 +176,9 @@ config KERNEL_LZMA
        bool "LZMA"
        depends on HAVE_KERNEL_LZMA
        help
-         The most recent compression algorithm.
-         Its ratio is best, decompression speed is between the other
-         two. Compression is slowest.  The kernel size is about 33%
-         smaller with LZMA in comparison to gzip.
+         This compression algorithm's ratio is best.  Decompression speed
+         is between gzip and bzip2.  Compression is slowest.
+         The kernel size is about 33% smaller with LZMA in comparison to gzip.
 
 config KERNEL_XZ
        bool "XZ"
@@ -200,7 +199,7 @@ config KERNEL_LZO
        bool "LZO"
        depends on HAVE_KERNEL_LZO
        help
-         Its compression ratio is the poorest among the 4. The kernel
+         Its compression ratio is the poorest among the choices. The kernel
          size is about 10% bigger than gzip; however its speed
          (both compression and decompression) is the fastest.
 
@@ -803,7 +802,7 @@ config RT_GROUP_SCHED
 endif #CGROUP_SCHED
 
 config BLK_CGROUP
-       tristate "Block IO controller"
+       bool "Block IO controller"
        depends on BLOCK
        default n
        ---help---
index 42b0707c348108b98f6ce05ae1a98ad4f64a0e86..d3f0aeed2d39fe06aa07cb4147f747af8b7597ee 100644 (file)
@@ -1,3 +1,13 @@
+/*
+ * Many of the syscalls used in this file expect some of the arguments
+ * to be __user pointers not __kernel pointers.  To limit the sparse
+ * noise, turn off sparse checking for this file.
+ */
+#ifdef __CHECKER__
+#undef __CHECKER__
+#warning "Sparse checking disabled for this file"
+#endif
+
 #include <linux/module.h>
 #include <linux/sched.h>
 #include <linux/ctype.h>
@@ -330,7 +340,7 @@ static int __init do_mount_root(char *name, char *fs, int flags, void *data)
        if (err)
                return err;
 
-       sys_chdir((const char __user __force *)"/root");
+       sys_chdir("/root");
        s = current->fs->pwd.dentry->d_sb;
        ROOT_DEV = s->s_dev;
        printk(KERN_INFO
@@ -556,5 +566,5 @@ void __init prepare_namespace(void)
 out:
        devtmpfs_mount("dev");
        sys_mount(".", "/", NULL, MS_MOVE, NULL);
-       sys_chroot((const char __user __force *)".");
+       sys_chroot(".");
 }
index 9047330c73e9b8fed1098131513d6a8a1bdf0f8b..135959a276bef21628556119247be2e4d0d83610 100644 (file)
@@ -1,3 +1,13 @@
+/*
+ * Many of the syscalls used in this file expect some of the arguments
+ * to be __user pointers not __kernel pointers.  To limit the sparse
+ * noise, turn off sparse checking for this file.
+ */
+#ifdef __CHECKER__
+#undef __CHECKER__
+#warning "Sparse checking disabled for this file"
+#endif
+
 #include <linux/unistd.h>
 #include <linux/kernel.h>
 #include <linux/fs.h>
index 32c4799b8c91bb483f418cd6a39e1e11eda0cd7a..8cb6db54285ba64f81af9ba2b388a7c216ceec61 100644 (file)
@@ -1,3 +1,13 @@
+/*
+ * Many of the syscalls used in this file expect some of the arguments
+ * to be __user pointers not __kernel pointers.  To limit the sparse
+ * noise, turn off sparse checking for this file.
+ */
+#ifdef __CHECKER__
+#undef __CHECKER__
+#warning "Sparse checking disabled for this file"
+#endif
+
 #include <linux/delay.h>
 #include <linux/raid/md_u.h>
 #include <linux/raid/md_p.h>
@@ -283,7 +293,7 @@ static void __init autodetect_raid(void)
 
        wait_for_device_probe();
 
-       fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
+       fd = sys_open("/dev/md0", 0, 0);
        if (fd >= 0) {
                sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
                sys_close(fd);
index 6212586df29ace81e239b71d01b95826cb636507..6be2879cca66971859b1c51ff80451616ab13975 100644 (file)
@@ -1,3 +1,12 @@
+/*
+ * Many of the syscalls used in this file expect some of the arguments
+ * to be __user pointers not __kernel pointers.  To limit the sparse
+ * noise, turn off sparse checking for this file.
+ */
+#ifdef __CHECKER__
+#undef __CHECKER__
+#warning "Sparse checking disabled for this file"
+#endif
 
 #include <linux/kernel.h>
 #include <linux/fs.h>
@@ -181,7 +190,7 @@ int __init rd_load_image(char *from)
        char rotator[4] = { '|' , '/' , '-' , '\\' };
 #endif
 
-       out_fd = sys_open((const char __user __force *) "/dev/ram", O_RDWR, 0);
+       out_fd = sys_open("/dev/ram", O_RDWR, 0);
        if (out_fd < 0)
                goto out;
 
@@ -280,7 +289,7 @@ noclose_input:
        sys_close(out_fd);
 out:
        kfree(buf);
-       sys_unlink((const char __user __force *) "/dev/ram");
+       sys_unlink("/dev/ram");
        return res;
 }
 
index 8216c303b0821b15f1a353a2fea7af84abb8f4bf..84c6bf111300878a095a9fb3f8f91678dbe10b65 100644 (file)
@@ -1,3 +1,13 @@
+/*
+ * Many of the syscalls used in this file expect some of the arguments
+ * to be __user pointers not __kernel pointers.  To limit the sparse
+ * noise, turn off sparse checking for this file.
+ */
+#ifdef __CHECKER__
+#undef __CHECKER__
+#warning "Sparse checking disabled for this file"
+#endif
+
 #include <linux/init.h>
 #include <linux/fs.h>
 #include <linux/slab.h>
@@ -74,7 +84,7 @@ static void __init free_hash(void)
        }
 }
 
-static long __init do_utime(char __user *filename, time_t mtime)
+static long __init do_utime(char *filename, time_t mtime)
 {
        struct timespec t[2];
 
@@ -529,7 +539,7 @@ static void __init clean_rootfs(void)
        struct linux_dirent64 *dirp;
        int num;
 
-       fd = sys_open((const char __user __force *) "/", O_RDONLY, 0);
+       fd = sys_open("/", O_RDONLY, 0);
        WARN_ON(fd < 0);
        if (fd < 0)
                return;
@@ -589,7 +599,7 @@ static int __init populate_rootfs(void)
                }
                printk(KERN_INFO "rootfs image is not initramfs (%s)"
                                "; looks like an initrd\n", err);
-               fd = sys_open((const char __user __force *) "/initrd.image",
+               fd = sys_open("/initrd.image",
                              O_WRONLY|O_CREAT, 0700);
                if (fd >= 0) {
                        sys_write(fd, (char *)initrd_start,
index 0c09366b96f3a634365c945c1a2d987a5afbf55e..383d638340b8417c8e31f53b935f8e8d833b2707 100644 (file)
 #include <linux/ipc_namespace.h>
 #include <linux/sysctl.h>
 
-/*
- * Define the ranges various user-specified maximum values can
- * be set to.
- */
-#define MIN_MSGMAX     1               /* min value for msg_max */
-#define MAX_MSGMAX     HARD_MSGMAX     /* max value for msg_max */
-#define MIN_MSGSIZEMAX 128             /* min value for msgsize_max */
-#define MAX_MSGSIZEMAX (8192*128)      /* max value for msgsize_max */
-
 #ifdef CONFIG_PROC_SYSCTL
 static void *get_mq(ctl_table *table)
 {
@@ -31,16 +22,6 @@ static void *get_mq(ctl_table *table)
        return which;
 }
 
-static int proc_mq_dointvec(ctl_table *table, int write,
-       void __user *buffer, size_t *lenp, loff_t *ppos)
-{
-       struct ctl_table mq_table;
-       memcpy(&mq_table, table, sizeof(mq_table));
-       mq_table.data = get_mq(table);
-
-       return proc_dointvec(&mq_table, write, buffer, lenp, ppos);
-}
-
 static int proc_mq_dointvec_minmax(ctl_table *table, int write,
        void __user *buffer, size_t *lenp, loff_t *ppos)
 {
@@ -52,15 +33,17 @@ static int proc_mq_dointvec_minmax(ctl_table *table, int write,
                                        lenp, ppos);
 }
 #else
-#define proc_mq_dointvec NULL
 #define proc_mq_dointvec_minmax NULL
 #endif
 
+static int msg_queues_limit_min = MIN_QUEUESMAX;
+static int msg_queues_limit_max = HARD_QUEUESMAX;
+
 static int msg_max_limit_min = MIN_MSGMAX;
-static int msg_max_limit_max = MAX_MSGMAX;
+static int msg_max_limit_max = HARD_MSGMAX;
 
 static int msg_maxsize_limit_min = MIN_MSGSIZEMAX;
-static int msg_maxsize_limit_max = MAX_MSGSIZEMAX;
+static int msg_maxsize_limit_max = HARD_MSGSIZEMAX;
 
 static ctl_table mq_sysctls[] = {
        {
@@ -68,7 +51,9 @@ static ctl_table mq_sysctls[] = {
                .data           = &init_ipc_ns.mq_queues_max,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_mq_dointvec,
+               .proc_handler   = proc_mq_dointvec_minmax,
+               .extra1         = &msg_queues_limit_min,
+               .extra2         = &msg_queues_limit_max,
        },
        {
                .procname       = "msg_max",
@@ -88,6 +73,24 @@ static ctl_table mq_sysctls[] = {
                .extra1         = &msg_maxsize_limit_min,
                .extra2         = &msg_maxsize_limit_max,
        },
+       {
+               .procname       = "msg_default",
+               .data           = &init_ipc_ns.mq_msg_default,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_mq_dointvec_minmax,
+               .extra1         = &msg_max_limit_min,
+               .extra2         = &msg_max_limit_max,
+       },
+       {
+               .procname       = "msgsize_default",
+               .data           = &init_ipc_ns.mq_msgsize_default,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_mq_dointvec_minmax,
+               .extra1         = &msg_maxsize_limit_min,
+               .extra2         = &msg_maxsize_limit_max,
+       },
        {}
 };
 
index b6a0d46fbad71ea84705a5d8644308398ea9cb9b..8ce57691e7b60994d9cc97620b7550c603df5391 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/mqueue.h>
 #include <linux/msg.h>
 #include <linux/skbuff.h>
+#include <linux/vmalloc.h>
 #include <linux/netlink.h>
 #include <linux/syscalls.h>
 #include <linux/audit.h>
 #define STATE_PENDING  1
 #define STATE_READY    2
 
+struct posix_msg_tree_node {
+       struct rb_node          rb_node;
+       struct list_head        msg_list;
+       int                     priority;
+};
+
 struct ext_wait_queue {                /* queue of sleeping tasks */
        struct task_struct *task;
        struct list_head list;
@@ -61,7 +68,8 @@ struct mqueue_inode_info {
        struct inode vfs_inode;
        wait_queue_head_t wait_q;
 
-       struct msg_msg **messages;
+       struct rb_root msg_tree;
+       struct posix_msg_tree_node *node_cache;
        struct mq_attr attr;
 
        struct sigevent notify;
@@ -109,6 +117,103 @@ static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
        return ns;
 }
 
+/* Auxiliary functions to manipulate messages' list */
+static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
+{
+       struct rb_node **p, *parent = NULL;
+       struct posix_msg_tree_node *leaf;
+
+       p = &info->msg_tree.rb_node;
+       while (*p) {
+               parent = *p;
+               leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
+
+               if (likely(leaf->priority == msg->m_type))
+                       goto insert_msg;
+               else if (msg->m_type < leaf->priority)
+                       p = &(*p)->rb_left;
+               else
+                       p = &(*p)->rb_right;
+       }
+       if (info->node_cache) {
+               leaf = info->node_cache;
+               info->node_cache = NULL;
+       } else {
+               leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
+               if (!leaf)
+                       return -ENOMEM;
+               rb_init_node(&leaf->rb_node);
+               INIT_LIST_HEAD(&leaf->msg_list);
+               info->qsize += sizeof(*leaf);
+       }
+       leaf->priority = msg->m_type;
+       rb_link_node(&leaf->rb_node, parent, p);
+       rb_insert_color(&leaf->rb_node, &info->msg_tree);
+insert_msg:
+       info->attr.mq_curmsgs++;
+       info->qsize += msg->m_ts;
+       list_add_tail(&msg->m_list, &leaf->msg_list);
+       return 0;
+}
+
+static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
+{
+       struct rb_node **p, *parent = NULL;
+       struct posix_msg_tree_node *leaf;
+       struct msg_msg *msg;
+
+try_again:
+       p = &info->msg_tree.rb_node;
+       while (*p) {
+               parent = *p;
+               /*
+                * During insert, low priorities go to the left and high to the
+                * right.  On receive, we want the highest priorities first, so
+                * walk all the way to the right.
+                */
+               p = &(*p)->rb_right;
+       }
+       if (!parent) {
+               if (info->attr.mq_curmsgs) {
+                       pr_warn_once("Inconsistency in POSIX message queue, "
+                                    "no tree element, but supposedly messages "
+                                    "should exist!\n");
+                       info->attr.mq_curmsgs = 0;
+               }
+               return NULL;
+       }
+       leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
+       if (unlikely(list_empty(&leaf->msg_list))) {
+               pr_warn_once("Inconsistency in POSIX message queue, "
+                            "empty leaf node but we haven't implemented "
+                            "lazy leaf delete!\n");
+               rb_erase(&leaf->rb_node, &info->msg_tree);
+               if (info->node_cache) {
+                       info->qsize -= sizeof(*leaf);
+                       kfree(leaf);
+               } else {
+                       info->node_cache = leaf;
+               }
+               goto try_again;
+       } else {
+               msg = list_first_entry(&leaf->msg_list,
+                                      struct msg_msg, m_list);
+               list_del(&msg->m_list);
+               if (list_empty(&leaf->msg_list)) {
+                       rb_erase(&leaf->rb_node, &info->msg_tree);
+                       if (info->node_cache) {
+                               info->qsize -= sizeof(*leaf);
+                               kfree(leaf);
+                       } else {
+                               info->node_cache = leaf;
+                       }
+               }
+       }
+       info->attr.mq_curmsgs--;
+       info->qsize -= msg->m_ts;
+       return msg;
+}
+
 static struct inode *mqueue_get_inode(struct super_block *sb,
                struct ipc_namespace *ipc_ns, umode_t mode,
                struct mq_attr *attr)
@@ -129,7 +234,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
 
        if (S_ISREG(mode)) {
                struct mqueue_inode_info *info;
-               unsigned long mq_bytes, mq_msg_tblsz;
+               unsigned long mq_bytes, mq_treesize;
 
                inode->i_fop = &mqueue_file_operations;
                inode->i_size = FILENT_SIZE;
@@ -143,20 +248,36 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
                info->notify_user_ns = NULL;
                info->qsize = 0;
                info->user = NULL;      /* set when all is ok */
+               info->msg_tree = RB_ROOT;
+               info->node_cache = NULL;
                memset(&info->attr, 0, sizeof(info->attr));
-               info->attr.mq_maxmsg = ipc_ns->mq_msg_max;
-               info->attr.mq_msgsize = ipc_ns->mq_msgsize_max;
+               info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
+                                          ipc_ns->mq_msg_default);
+               info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
+                                           ipc_ns->mq_msgsize_default);
                if (attr) {
                        info->attr.mq_maxmsg = attr->mq_maxmsg;
                        info->attr.mq_msgsize = attr->mq_msgsize;
                }
-               mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *);
-               info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL);
-               if (!info->messages)
-                       goto out_inode;
+               /*
+                * We used to allocate a static array of pointers and account
+                * the size of that array as well as one msg_msg struct per
+                * possible message into the queue size. That's no longer
+                * accurate as the queue is now an rbtree and will grow and
+                * shrink depending on usage patterns.  We can, however, still
+                * account one msg_msg struct per message, but the nodes are
+                * allocated depending on priority usage, and most programs
+                * only use one, or a handful, of priorities.  However, since
+                * this is pinned memory, we need to assume worst case, so
+                * that means the min(mq_maxmsg, max_priorities) * struct
+                * posix_msg_tree_node.
+                */
+               mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
+                       min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
+                       sizeof(struct posix_msg_tree_node);
 
-               mq_bytes = (mq_msg_tblsz +
-                       (info->attr.mq_maxmsg * info->attr.mq_msgsize));
+               mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
+                                         info->attr.mq_msgsize);
 
                spin_lock(&mq_lock);
                if (u->mq_bytes + mq_bytes < u->mq_bytes ||
@@ -247,11 +368,11 @@ static void mqueue_evict_inode(struct inode *inode)
 {
        struct mqueue_inode_info *info;
        struct user_struct *user;
-       unsigned long mq_bytes;
-       int i;
+       unsigned long mq_bytes, mq_treesize;
        struct ipc_namespace *ipc_ns;
+       struct msg_msg *msg;
 
-       end_writeback(inode);
+       clear_inode(inode);
 
        if (S_ISDIR(inode->i_mode))
                return;
@@ -259,14 +380,19 @@ static void mqueue_evict_inode(struct inode *inode)
        ipc_ns = get_ns_from_inode(inode);
        info = MQUEUE_I(inode);
        spin_lock(&info->lock);
-       for (i = 0; i < info->attr.mq_curmsgs; i++)
-               free_msg(info->messages[i]);
-       kfree(info->messages);
+       while ((msg = msg_get(info)) != NULL)
+               free_msg(msg);
+       kfree(info->node_cache);
        spin_unlock(&info->lock);
 
        /* Total amount of bytes accounted for the mqueue */
-       mq_bytes = info->attr.mq_maxmsg * (sizeof(struct msg_msg *)
-           + info->attr.mq_msgsize);
+       mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
+               min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
+               sizeof(struct posix_msg_tree_node);
+
+       mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
+                                 info->attr.mq_msgsize);
+
        user = info->user;
        if (user) {
                spin_lock(&mq_lock);
@@ -300,8 +426,9 @@ static int mqueue_create(struct inode *dir, struct dentry *dentry,
                error = -EACCES;
                goto out_unlock;
        }
-       if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
-                       !capable(CAP_SYS_RESOURCE)) {
+       if (ipc_ns->mq_queues_count >= HARD_QUEUESMAX ||
+           (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
+            !capable(CAP_SYS_RESOURCE))) {
                error = -ENOSPC;
                goto out_unlock;
        }
@@ -485,26 +612,6 @@ static struct ext_wait_queue *wq_get_first_waiter(
        return list_entry(ptr, struct ext_wait_queue, list);
 }
 
-/* Auxiliary functions to manipulate messages' list */
-static void msg_insert(struct msg_msg *ptr, struct mqueue_inode_info *info)
-{
-       int k;
-
-       k = info->attr.mq_curmsgs - 1;
-       while (k >= 0 && info->messages[k]->m_type >= ptr->m_type) {
-               info->messages[k + 1] = info->messages[k];
-               k--;
-       }
-       info->attr.mq_curmsgs++;
-       info->qsize += ptr->m_ts;
-       info->messages[k + 1] = ptr;
-}
-
-static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
-{
-       info->qsize -= info->messages[--info->attr.mq_curmsgs]->m_ts;
-       return info->messages[info->attr.mq_curmsgs];
-}
 
 static inline void set_cookie(struct sk_buff *skb, char code)
 {
@@ -585,24 +692,30 @@ static void remove_notification(struct mqueue_inode_info *info)
 
 static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
 {
+       int mq_treesize;
+       unsigned long total_size;
+
        if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
-               return 0;
+               return -EINVAL;
        if (capable(CAP_SYS_RESOURCE)) {
-               if (attr->mq_maxmsg > HARD_MSGMAX)
-                       return 0;
+               if (attr->mq_maxmsg > HARD_MSGMAX ||
+                   attr->mq_msgsize > HARD_MSGSIZEMAX)
+                       return -EINVAL;
        } else {
                if (attr->mq_maxmsg > ipc_ns->mq_msg_max ||
                                attr->mq_msgsize > ipc_ns->mq_msgsize_max)
-                       return 0;
+                       return -EINVAL;
        }
        /* check for overflow */
        if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
-               return 0;
-       if ((unsigned long)(attr->mq_maxmsg * (attr->mq_msgsize
-           + sizeof (struct msg_msg *))) <
-           (unsigned long)(attr->mq_maxmsg * attr->mq_msgsize))
-               return 0;
-       return 1;
+               return -EOVERFLOW;
+       mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) +
+               min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) *
+               sizeof(struct posix_msg_tree_node);
+       total_size = attr->mq_maxmsg * attr->mq_msgsize;
+       if (total_size + mq_treesize < total_size)
+               return -EOVERFLOW;
+       return 0;
 }
 
 /*
@@ -617,12 +730,21 @@ static struct file *do_create(struct ipc_namespace *ipc_ns, struct dentry *dir,
        int ret;
 
        if (attr) {
-               if (!mq_attr_ok(ipc_ns, attr)) {
-                       ret = -EINVAL;
+               ret = mq_attr_ok(ipc_ns, attr);
+               if (ret)
                        goto out;
-               }
                /* store for use during create */
                dentry->d_fsdata = attr;
+       } else {
+               struct mq_attr def_attr;
+
+               def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
+                                        ipc_ns->mq_msg_default);
+               def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
+                                         ipc_ns->mq_msgsize_default);
+               ret = mq_attr_ok(ipc_ns, &def_attr);
+               if (ret)
+                       goto out;
        }
 
        mode &= ~current_umask();
@@ -837,7 +959,8 @@ static inline void pipelined_receive(struct mqueue_inode_info *info)
                wake_up_interruptible(&info->wait_q);
                return;
        }
-       msg_insert(sender->msg, info);
+       if (msg_insert(sender->msg, info))
+               return;
        list_del(&sender->list);
        sender->state = STATE_PENDING;
        wake_up_process(sender->task);
@@ -857,7 +980,8 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
        struct mqueue_inode_info *info;
        ktime_t expires, *timeout = NULL;
        struct timespec ts;
-       int ret;
+       struct posix_msg_tree_node *new_leaf = NULL;
+       int ret = 0;
 
        if (u_abs_timeout) {
                int res = prepare_timeout(u_abs_timeout, &expires, &ts);
@@ -905,34 +1029,60 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
        msg_ptr->m_ts = msg_len;
        msg_ptr->m_type = msg_prio;
 
+       /*
+        * msg_insert really wants us to have a valid, spare node struct so
+        * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
+        * fall back to that if necessary.
+        */
+       if (!info->node_cache)
+               new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
+
        spin_lock(&info->lock);
 
+       if (!info->node_cache && new_leaf) {
+               /* Save our speculative allocation into the cache */
+               rb_init_node(&new_leaf->rb_node);
+               INIT_LIST_HEAD(&new_leaf->msg_list);
+               info->node_cache = new_leaf;
+               info->qsize += sizeof(*new_leaf);
+               new_leaf = NULL;
+       } else {
+               kfree(new_leaf);
+       }
+
        if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
                if (filp->f_flags & O_NONBLOCK) {
-                       spin_unlock(&info->lock);
                        ret = -EAGAIN;
                } else {
                        wait.task = current;
                        wait.msg = (void *) msg_ptr;
                        wait.state = STATE_NONE;
                        ret = wq_sleep(info, SEND, timeout, &wait);
+                       /*
+                        * wq_sleep must be called with info->lock held, and
+                        * returns with the lock released
+                        */
+                       goto out_free;
                }
-               if (ret < 0)
-                       free_msg(msg_ptr);
        } else {
                receiver = wq_get_first_waiter(info, RECV);
                if (receiver) {
                        pipelined_send(info, msg_ptr, receiver);
                } else {
                        /* adds message to the queue */
-                       msg_insert(msg_ptr, info);
+                       ret = msg_insert(msg_ptr, info);
+                       if (ret)
+                               goto out_unlock;
                        __do_notify(info);
                }
                inode->i_atime = inode->i_mtime = inode->i_ctime =
                                CURRENT_TIME;
-               spin_unlock(&info->lock);
-               ret = 0;
        }
+out_unlock:
+       spin_unlock(&info->lock);
+out_free:
+       if (ret)
+               free_msg(msg_ptr);
 out_fput:
        fput(filp);
 out:
@@ -951,6 +1101,7 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
        struct ext_wait_queue wait;
        ktime_t expires, *timeout = NULL;
        struct timespec ts;
+       struct posix_msg_tree_node *new_leaf = NULL;
 
        if (u_abs_timeout) {
                int res = prepare_timeout(u_abs_timeout, &expires, &ts);
@@ -986,7 +1137,26 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
                goto out_fput;
        }
 
+       /*
+        * msg_insert really wants us to have a valid, spare node struct so
+        * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
+        * fall back to that if necessary.
+        */
+       if (!info->node_cache)
+               new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
+
        spin_lock(&info->lock);
+
+       if (!info->node_cache && new_leaf) {
+               /* Save our speculative allocation into the cache */
+               rb_init_node(&new_leaf->rb_node);
+               INIT_LIST_HEAD(&new_leaf->msg_list);
+               info->node_cache = new_leaf;
+               info->qsize += sizeof(*new_leaf);
+       } else {
+               kfree(new_leaf);
+       }
+
        if (info->attr.mq_curmsgs == 0) {
                if (filp->f_flags & O_NONBLOCK) {
                        spin_unlock(&info->lock);
@@ -1251,6 +1421,8 @@ int mq_init_ns(struct ipc_namespace *ns)
        ns->mq_queues_max    = DFLT_QUEUESMAX;
        ns->mq_msg_max       = DFLT_MSGMAX;
        ns->mq_msgsize_max   = DFLT_MSGSIZEMAX;
+       ns->mq_msg_default   = DFLT_MSG;
+       ns->mq_msgsize_default  = DFLT_MSGSIZE;
 
        ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns);
        if (IS_ERR(ns->mq_mnt)) {
index 406c5b208193373b979ce82bffe6617250ea64ed..5e2cbfdab6fc0d6b96a19c321a9208dda8cd130d 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -1036,6 +1036,10 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
        sfd->file = shp->shm_file;
        sfd->vm_ops = NULL;
 
+       err = security_mmap_file(file, prot, flags);
+       if (err)
+               goto out_fput;
+
        down_write(&current->mm->mmap_sem);
        if (addr && !(shmflg & SHM_REMAP)) {
                err = -EINVAL;
@@ -1050,7 +1054,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
                        goto invalid;
        }
                
-       user_addr = do_mmap (file, addr, size, prot, flags, 0);
+       user_addr = do_mmap_pgoff(file, addr, size, prot, flags, 0);
        *raddr = user_addr;
        err = 0;
        if (IS_ERR_VALUE(user_addr))
@@ -1058,6 +1062,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
 invalid:
        up_write(&current->mm->mmap_sem);
 
+out_fput:
        fput(file);
 
 out_nattch:
index 6c07f30fa9b7e678e23b2038474a5b508b7fa661..c0cc67ad764ceddbe9f226ee1bfb90c4055f19ff 100644 (file)
@@ -5,12 +5,12 @@
 obj-y     = fork.o exec_domain.o panic.o printk.o \
            cpu.o exit.o itimer.o time.o softirq.o resource.o \
            sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
-           signal.o sys.o kmod.o workqueue.o pid.o \
+           signal.o sys.o kmod.o workqueue.o pid.o task_work.o \
            rcupdate.o extable.o params.o posix-timers.o \
            kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
            hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
            notifier.o ksysfs.o cred.o \
-           async.o range.o groups.o
+           async.o range.o groups.o lglock.o
 
 ifdef CONFIG_FUNCTION_TRACER
 # Do not trace debug files and internal ftrace files
@@ -25,6 +25,9 @@ endif
 obj-y += sched/
 obj-y += power/
 
+ifeq ($(CONFIG_CHECKPOINT_RESTORE),y)
+obj-$(CONFIG_X86) += kcmp.o
+endif
 obj-$(CONFIG_FREEZER) += freezer.o
 obj-$(CONFIG_PROFILING) += profile.o
 obj-$(CONFIG_STACKTRACE) += stacktrace.o
index a0c6af34d50063b31f6bd1fd1fc1de8b299d10ce..0f3527d6184a1597fb81f00a878df15eeab0ecf8 100644 (file)
@@ -5132,7 +5132,7 @@ EXPORT_SYMBOL_GPL(css_depth);
  * @root: the css supporsed to be an ancestor of the child.
  *
  * Returns true if "root" is an ancestor of "child" in its hierarchy. Because
- * this function reads css->id, this use rcu_dereference() and rcu_read_lock().
+ * this function reads css->id, the caller must hold rcu_read_lock().
  * But, considering usual usage, the csses should be valid objects after test.
  * Assuming that the caller will do some action to the child if this returns
  * returns true, the caller must take "child";s reference count.
@@ -5144,18 +5144,18 @@ bool css_is_ancestor(struct cgroup_subsys_state *child,
 {
        struct css_id *child_id;
        struct css_id *root_id;
-       bool ret = true;
 
-       rcu_read_lock();
        child_id  = rcu_dereference(child->id);
+       if (!child_id)
+               return false;
        root_id = rcu_dereference(root->id);
-       if (!child_id
-           || !root_id
-           || (child_id->depth < root_id->depth)
-           || (child_id->stack[root_id->depth] != root_id->id))
-               ret = false;
-       rcu_read_unlock();
-       return ret;
+       if (!root_id)
+               return false;
+       if (child_id->depth < root_id->depth)
+               return false;
+       if (child_id->stack[root_id->depth] != root_id->id)
+               return false;
+       return true;
 }
 
 void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css)
index 0e6353cf147abf51c4f760054971915063ece928..a4eb5227a19e482eaf2b821c94de845228381bdd 100644 (file)
 #include <linux/sched.h>
 #include <linux/unistd.h>
 #include <linux/cpu.h>
+#include <linux/oom.h>
+#include <linux/rcupdate.h>
 #include <linux/export.h>
+#include <linux/bug.h>
 #include <linux/kthread.h>
 #include <linux/stop_machine.h>
 #include <linux/mutex.h>
@@ -173,6 +176,47 @@ void __ref unregister_cpu_notifier(struct notifier_block *nb)
 }
 EXPORT_SYMBOL(unregister_cpu_notifier);
 
+/**
+ * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
+ * @cpu: a CPU id
+ *
+ * This function walks all processes, finds a valid mm struct for each one and
+ * then clears a corresponding bit in mm's cpumask.  While this all sounds
+ * trivial, there are various non-obvious corner cases, which this function
+ * tries to solve in a safe manner.
+ *
+ * Also note that the function uses a somewhat relaxed locking scheme, so it may
+ * be called only for an already offlined CPU.
+ */
+void clear_tasks_mm_cpumask(int cpu)
+{
+       struct task_struct *p;
+
+       /*
+        * This function is called after the cpu is taken down and marked
+        * offline, so its not like new tasks will ever get this cpu set in
+        * their mm mask. -- Peter Zijlstra
+        * Thus, we may use rcu_read_lock() here, instead of grabbing
+        * full-fledged tasklist_lock.
+        */
+       WARN_ON(cpu_online(cpu));
+       rcu_read_lock();
+       for_each_process(p) {
+               struct task_struct *t;
+
+               /*
+                * Main thread might exit, but other threads may still have
+                * a valid mm. Find one.
+                */
+               t = find_lock_task_mm(p);
+               if (!t)
+                       continue;
+               cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
+               task_unlock(t);
+       }
+       rcu_read_unlock();
+}
+
 static inline void check_for_tasks(int cpu)
 {
        struct task_struct *p;
index 249152e15308c9d3a36922c2d10989a961e221d9..9656a3c36503dee343813149bbf1153bb6aea05a 100644 (file)
@@ -81,7 +81,7 @@ int cpu_pm_unregister_notifier(struct notifier_block *nb)
 EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
 
 /**
- * cpm_pm_enter - CPU low power entry notifier
+ * cpu_pm_enter - CPU low power entry notifier
  *
  * Notifies listeners that a single CPU is entering a low power state that may
  * cause some blocks in the same power domain as the cpu to reset.
@@ -89,7 +89,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
  * Must be called on the affected CPU with interrupts disabled.  Platform is
  * responsible for ensuring that cpu_pm_enter is not called twice on the same
  * CPU before cpu_pm_exit is called. Notified drivers can include VFP
- * co-processor, interrupt controller and it's PM extensions, local CPU
+ * co-processor, interrupt controller and its PM extensions, local CPU
  * timers context save/restore which shouldn't be interrupted. Hence it
  * must be called with interrupts disabled.
  *
@@ -115,13 +115,13 @@ int cpu_pm_enter(void)
 EXPORT_SYMBOL_GPL(cpu_pm_enter);
 
 /**
- * cpm_pm_exit - CPU low power exit notifier
+ * cpu_pm_exit - CPU low power exit notifier
  *
  * Notifies listeners that a single CPU is exiting a low power state that may
  * have caused some blocks in the same power domain as the cpu to reset.
  *
  * Notified drivers can include VFP co-processor, interrupt controller
- * and it's PM extensions, local CPU timers context save/restore which
+ * and its PM extensions, local CPU timers context save/restore which
  * shouldn't be interrupted. Hence it must be called with interrupts disabled.
  *
  * Return conditions are same as __raw_notifier_call_chain.
@@ -139,7 +139,7 @@ int cpu_pm_exit(void)
 EXPORT_SYMBOL_GPL(cpu_pm_exit);
 
 /**
- * cpm_cluster_pm_enter - CPU cluster low power entry notifier
+ * cpu_cluster_pm_enter - CPU cluster low power entry notifier
  *
  * Notifies listeners that all cpus in a power domain are entering a low power
  * state that may cause some blocks in the same power domain to reset.
@@ -147,7 +147,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_exit);
  * Must be called after cpu_pm_enter has been called on all cpus in the power
  * domain, and before cpu_pm_exit has been called on any cpu in the power
  * domain. Notified drivers can include VFP co-processor, interrupt controller
- * and it's PM extensions, local CPU timers context save/restore which
+ * and its PM extensions, local CPU timers context save/restore which
  * shouldn't be interrupted. Hence it must be called with interrupts disabled.
  *
  * Must be called with interrupts disabled.
@@ -174,7 +174,7 @@ int cpu_cluster_pm_enter(void)
 EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
 
 /**
- * cpm_cluster_pm_exit - CPU cluster low power exit notifier
+ * cpu_cluster_pm_exit - CPU cluster low power exit notifier
  *
  * Notifies listeners that all cpus in a power domain are exiting form a
  * low power state that may have caused some blocks in the same power domain
@@ -183,7 +183,7 @@ EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
  * Must be called after cpu_pm_exit has been called on all cpus in the power
  * domain, and before cpu_pm_exit has been called on any cpu in the power
  * domain. Notified drivers can include VFP co-processor, interrupt controller
- * and it's PM extensions, local CPU timers context save/restore which
+ * and its PM extensions, local CPU timers context save/restore which
  * shouldn't be interrupted. Hence it must be called with interrupts disabled.
  *
  * Return conditions are same as __raw_notifier_call_chain.
index 430557ea488f3625243455afcdd6b2f9f481ac19..de728ac50d821b9f38340534a4ba6202137d55a2 100644 (file)
@@ -207,13 +207,6 @@ void exit_creds(struct task_struct *tsk)
        validate_creds(cred);
        alter_cred_subscribers(cred, -1);
        put_cred(cred);
-
-       cred = (struct cred *) tsk->replacement_session_keyring;
-       if (cred) {
-               tsk->replacement_session_keyring = NULL;
-               validate_creds(cred);
-               put_cred(cred);
-       }
 }
 
 /**
@@ -396,8 +389,6 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
        struct cred *new;
        int ret;
 
-       p->replacement_session_keyring = NULL;
-
        if (
 #ifdef CONFIG_KEYS
                !p->cred->thread_keyring &&
index 910a0716e17ab4124ddd07f22d502e19ed9f30de..34867cc5b42a77f325c204bb2fd09e1fabe38955 100644 (file)
@@ -884,9 +884,9 @@ static void check_stack_usage(void)
 
        spin_lock(&low_water_lock);
        if (free < lowest_to_date) {
-               printk(KERN_WARNING "%s used greatest stack depth: %lu bytes "
-                               "left\n",
-                               current->comm, free);
+               printk(KERN_WARNING "%s (%d) used greatest stack depth: "
+                               "%lu bytes left\n",
+                               current->comm, task_pid_nr(current), free);
                lowest_to_date = free;
        }
        spin_unlock(&low_water_lock);
@@ -946,12 +946,13 @@ void do_exit(long code)
        exit_signals(tsk);  /* sets PF_EXITING */
        /*
         * tsk->flags are checked in the futex code to protect against
-        * an exiting task cleaning up the robust pi futexes.
+        * an exiting task cleaning up the robust pi futexes, and in
+        * task_work_add() to avoid the race with exit_task_work().
         */
        smp_mb();
        raw_spin_unlock_wait(&tsk->pi_lock);
 
-       exit_irq_thread();
+       exit_task_work(tsk);
 
        if (unlikely(in_atomic()))
                printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
@@ -1214,7 +1215,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
        unsigned long state;
        int retval, status, traced;
        pid_t pid = task_pid_vnr(p);
-       uid_t uid = from_kuid_munged(current_user_ns(), __task_cred(p)->uid);
+       uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
        struct siginfo __user *infop;
 
        if (!likely(wo->wo_flags & WEXITED))
index 47b4e4f379f94c2b726aa9babdcbbd26508e8dc1..ab5211b9e622cf94d07b7bfb4ccfd9bac85e7b79 100644 (file)
@@ -386,7 +386,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
                }
                charge = 0;
                if (mpnt->vm_flags & VM_ACCOUNT) {
-                       unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
+                       unsigned long len;
+                       len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
                        if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
                                goto fail_nomem;
                        charge = len;
@@ -614,7 +615,6 @@ void mmput(struct mm_struct *mm)
                        list_del(&mm->mmlist);
                        spin_unlock(&mmlist_lock);
                }
-               put_swap_token(mm);
                if (mm->binfmt)
                        module_put(mm->binfmt->module);
                mmdrop(mm);
@@ -787,9 +787,6 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
        /* Get rid of any cached register state */
        deactivate_mm(tsk, mm);
 
-       if (tsk->vfork_done)
-               complete_vfork_done(tsk);
-
        /*
         * If we're exiting normally, clear a user-space tid field if
         * requested.  We leave this alone when dying by signal, to leave
@@ -810,6 +807,13 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
                }
                tsk->clear_child_tid = NULL;
        }
+
+       /*
+        * All done, finally we can wake up parent and return this mm to him.
+        * Also kthread_stop() uses this completion for synchronization.
+        */
+       if (tsk->vfork_done)
+               complete_vfork_done(tsk);
 }
 
 /*
@@ -831,10 +835,6 @@ struct mm_struct *dup_mm(struct task_struct *tsk)
        memcpy(mm, oldmm, sizeof(*mm));
        mm_init_cpumask(mm);
 
-       /* Initializing for Swap token stuff */
-       mm->token_priority = 0;
-       mm->last_interval = 0;
-
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
        mm->pmd_huge_pte = NULL;
 #endif
@@ -913,10 +913,6 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
                goto fail_nomem;
 
 good_mm:
-       /* Initializing for Swap token stuff */
-       mm->token_priority = 0;
-       mm->last_interval = 0;
-
        tsk->mm = mm;
        tsk->active_mm = mm;
        return 0;
@@ -984,9 +980,8 @@ static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
         * Share io context with parent, if CLONE_IO is set
         */
        if (clone_flags & CLONE_IO) {
-               tsk->io_context = ioc_task_link(ioc);
-               if (unlikely(!tsk->io_context))
-                       return -ENOMEM;
+               ioc_task_link(ioc);
+               tsk->io_context = ioc;
        } else if (ioprio_valid(ioc->ioprio)) {
                new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE);
                if (unlikely(!new_ioc))
@@ -1420,6 +1415,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
         */
        p->group_leader = p;
        INIT_LIST_HEAD(&p->thread_group);
+       INIT_HLIST_HEAD(&p->task_works);
 
        /* Now that the task is set up, run cgroup callbacks if
         * necessary. We need to run them before the task is visible
index bb32326afe8796be6ebbf240fbd3ef4bb1fc8cbe..ea0c6c2ae6f747d0bd8dff198e5e1f6b6934f050 100644 (file)
@@ -7,6 +7,8 @@
  * This file contains driver APIs to the irq subsystem.
  */
 
+#define pr_fmt(fmt) "genirq: " fmt
+
 #include <linux/irq.h>
 #include <linux/kthread.h>
 #include <linux/module.h>
@@ -14,6 +16,7 @@
 #include <linux/interrupt.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
+#include <linux/task_work.h>
 
 #include "internals.h"
 
@@ -565,7 +568,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
                 * IRQF_TRIGGER_* but the PIC does not support multiple
                 * flow-types?
                 */
-               pr_debug("genirq: No set_type function for IRQ %d (%s)\n", irq,
+               pr_debug("No set_type function for IRQ %d (%s)\n", irq,
                         chip ? (chip->name ? : "unknown") : "unknown");
                return 0;
        }
@@ -600,7 +603,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
                ret = 0;
                break;
        default:
-               pr_err("genirq: Setting trigger mode %lu for irq %u failed (%pF)\n",
+               pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
                       flags, irq, chip->irq_set_type);
        }
        if (unmask)
@@ -773,11 +776,39 @@ static void wake_threads_waitq(struct irq_desc *desc)
                wake_up(&desc->wait_for_threads);
 }
 
+static void irq_thread_dtor(struct task_work *unused)
+{
+       struct task_struct *tsk = current;
+       struct irq_desc *desc;
+       struct irqaction *action;
+
+       if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
+               return;
+
+       action = kthread_data(tsk);
+
+       pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
+              tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
+
+
+       desc = irq_to_desc(action->irq);
+       /*
+        * If IRQTF_RUNTHREAD is set, we need to decrement
+        * desc->threads_active and wake possible waiters.
+        */
+       if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
+               wake_threads_waitq(desc);
+
+       /* Prevent a stale desc->threads_oneshot */
+       irq_finalize_oneshot(desc, action);
+}
+
 /*
  * Interrupt handler thread
  */
 static int irq_thread(void *data)
 {
+       struct task_work on_exit_work;
        static const struct sched_param param = {
                .sched_priority = MAX_USER_RT_PRIO/2,
        };
@@ -793,7 +824,9 @@ static int irq_thread(void *data)
                handler_fn = irq_thread_fn;
 
        sched_setscheduler(current, SCHED_FIFO, &param);
-       current->irq_thread = 1;
+
+       init_task_work(&on_exit_work, irq_thread_dtor, NULL);
+       task_work_add(current, &on_exit_work, false);
 
        while (!irq_wait_for_interrupt(action)) {
                irqreturn_t action_ret;
@@ -815,44 +848,11 @@ static int irq_thread(void *data)
         * cannot touch the oneshot mask at this point anymore as
         * __setup_irq() might have given out currents thread_mask
         * again.
-        *
-        * Clear irq_thread. Otherwise exit_irq_thread() would make
-        * fuzz about an active irq thread going into nirvana.
         */
-       current->irq_thread = 0;
+       task_work_cancel(current, irq_thread_dtor);
        return 0;
 }
 
-/*
- * Called from do_exit()
- */
-void exit_irq_thread(void)
-{
-       struct task_struct *tsk = current;
-       struct irq_desc *desc;
-       struct irqaction *action;
-
-       if (!tsk->irq_thread)
-               return;
-
-       action = kthread_data(tsk);
-
-       pr_err("genirq: exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
-              tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
-
-       desc = irq_to_desc(action->irq);
-
-       /*
-        * If IRQTF_RUNTHREAD is set, we need to decrement
-        * desc->threads_active and wake possible waiters.
-        */
-       if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
-               wake_threads_waitq(desc);
-
-       /* Prevent a stale desc->threads_oneshot */
-       irq_finalize_oneshot(desc, action);
-}
-
 static void irq_setup_forced_threading(struct irqaction *new)
 {
        if (!force_irqthreads)
@@ -1044,7 +1044,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                 * has. The type flags are unreliable as the
                 * underlying chip implementation can override them.
                 */
-               pr_err("genirq: Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
+               pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
                       irq);
                ret = -EINVAL;
                goto out_mask;
@@ -1095,7 +1095,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 
                if (nmsk != omsk)
                        /* hope the handler works with current  trigger mode */
-                       pr_warning("genirq: irq %d uses trigger mode %u; requested %u\n",
+                       pr_warning("irq %d uses trigger mode %u; requested %u\n",
                                   irq, nmsk, omsk);
        }
 
@@ -1133,7 +1133,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 
 mismatch:
        if (!(new->flags & IRQF_PROBE_SHARED)) {
-               pr_err("genirq: Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
+               pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
                       irq, new->flags, new->name, old->flags, old->name);
 #ifdef CONFIG_DEBUG_SHIRQ
                dump_stack();
index 079f1d39a8b84a9105852864cbc8ceaf8875d5ac..2169feeba529be9843ea844e7281e1cdb31377a2 100644 (file)
@@ -343,7 +343,7 @@ int lookup_symbol_attrs(unsigned long addr, unsigned long *size,
 
 /* Look up a kernel symbol and return it in a text buffer. */
 static int __sprint_symbol(char *buffer, unsigned long address,
-                          int symbol_offset)
+                          int symbol_offset, int add_offset)
 {
        char *modname;
        const char *name;
@@ -358,13 +358,13 @@ static int __sprint_symbol(char *buffer, unsigned long address,
        if (name != buffer)
                strcpy(buffer, name);
        len = strlen(buffer);
-       buffer += len;
        offset -= symbol_offset;
 
+       if (add_offset)
+               len += sprintf(buffer + len, "+%#lx/%#lx", offset, size);
+
        if (modname)
-               len += sprintf(buffer, "+%#lx/%#lx [%s]", offset, size, modname);
-       else
-               len += sprintf(buffer, "+%#lx/%#lx", offset, size);
+               len += sprintf(buffer + len, " [%s]", modname);
 
        return len;
 }
@@ -382,11 +382,27 @@ static int __sprint_symbol(char *buffer, unsigned long address,
  */
 int sprint_symbol(char *buffer, unsigned long address)
 {
-       return __sprint_symbol(buffer, address, 0);
+       return __sprint_symbol(buffer, address, 0, 1);
 }
-
 EXPORT_SYMBOL_GPL(sprint_symbol);
 
+/**
+ * sprint_symbol_no_offset - Look up a kernel symbol and return it in a text buffer
+ * @buffer: buffer to be stored
+ * @address: address to lookup
+ *
+ * This function looks up a kernel symbol with @address and stores its name
+ * and module name to @buffer if possible. If no symbol was found, just saves
+ * its @address as is.
+ *
+ * This function returns the number of bytes stored in @buffer.
+ */
+int sprint_symbol_no_offset(char *buffer, unsigned long address)
+{
+       return __sprint_symbol(buffer, address, 0, 0);
+}
+EXPORT_SYMBOL_GPL(sprint_symbol_no_offset);
+
 /**
  * sprint_backtrace - Look up a backtrace symbol and return it in a text buffer
  * @buffer: buffer to be stored
@@ -403,7 +419,7 @@ EXPORT_SYMBOL_GPL(sprint_symbol);
  */
 int sprint_backtrace(char *buffer, unsigned long address)
 {
-       return __sprint_symbol(buffer, address, -1);
+       return __sprint_symbol(buffer, address, -1, 1);
 }
 
 /* Look up a kernel symbol and print it to the kernel messages. */
diff --git a/kernel/kcmp.c b/kernel/kcmp.c
new file mode 100644 (file)
index 0000000..30b7b22
--- /dev/null
@@ -0,0 +1,196 @@
+#include <linux/kernel.h>
+#include <linux/syscalls.h>
+#include <linux/fdtable.h>
+#include <linux/string.h>
+#include <linux/random.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/cache.h>
+#include <linux/bug.h>
+#include <linux/err.h>
+#include <linux/kcmp.h>
+
+#include <asm/unistd.h>
+
+/*
+ * We don't expose the real in-memory order of objects for security reasons.
+ * But still the comparison results should be suitable for sorting. So we
+ * obfuscate kernel pointers values and compare the production instead.
+ *
+ * The obfuscation is done in two steps. First we xor the kernel pointer with
+ * a random value, which puts pointer into a new position in a reordered space.
+ * Secondly we multiply the xor production with a large odd random number to
+ * permute its bits even more (the odd multiplier guarantees that the product
+ * is unique ever after the high bits are truncated, since any odd number is
+ * relative prime to 2^n).
+ *
+ * Note also that the obfuscation itself is invisible to userspace and if needed
+ * it can be changed to an alternate scheme.
+ */
+static unsigned long cookies[KCMP_TYPES][2] __read_mostly;
+
+static long kptr_obfuscate(long v, int type)
+{
+       return (v ^ cookies[type][0]) * cookies[type][1];
+}
+
+/*
+ * 0 - equal, i.e. v1 = v2
+ * 1 - less than, i.e. v1 < v2
+ * 2 - greater than, i.e. v1 > v2
+ * 3 - not equal but ordering unavailable (reserved for future)
+ */
+static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type)
+{
+       long ret;
+
+       ret = kptr_obfuscate((long)v1, type) - kptr_obfuscate((long)v2, type);
+
+       return (ret < 0) | ((ret > 0) << 1);
+}
+
+/* The caller must have pinned the task */
+static struct file *
+get_file_raw_ptr(struct task_struct *task, unsigned int idx)
+{
+       struct file *file = NULL;
+
+       task_lock(task);
+       rcu_read_lock();
+
+       if (task->files)
+               file = fcheck_files(task->files, idx);
+
+       rcu_read_unlock();
+       task_unlock(task);
+
+       return file;
+}
+
+static void kcmp_unlock(struct mutex *m1, struct mutex *m2)
+{
+       if (likely(m2 != m1))
+               mutex_unlock(m2);
+       mutex_unlock(m1);
+}
+
+static int kcmp_lock(struct mutex *m1, struct mutex *m2)
+{
+       int err;
+
+       if (m2 > m1)
+               swap(m1, m2);
+
+       err = mutex_lock_killable(m1);
+       if (!err && likely(m1 != m2)) {
+               err = mutex_lock_killable_nested(m2, SINGLE_DEPTH_NESTING);
+               if (err)
+                       mutex_unlock(m1);
+       }
+
+       return err;
+}
+
+SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
+               unsigned long, idx1, unsigned long, idx2)
+{
+       struct task_struct *task1, *task2;
+       int ret;
+
+       rcu_read_lock();
+
+       /*
+        * Tasks are looked up in caller's PID namespace only.
+        */
+       task1 = find_task_by_vpid(pid1);
+       task2 = find_task_by_vpid(pid2);
+       if (!task1 || !task2)
+               goto err_no_task;
+
+       get_task_struct(task1);
+       get_task_struct(task2);
+
+       rcu_read_unlock();
+
+       /*
+        * One should have enough rights to inspect task details.
+        */
+       ret = kcmp_lock(&task1->signal->cred_guard_mutex,
+                       &task2->signal->cred_guard_mutex);
+       if (ret)
+               goto err;
+       if (!ptrace_may_access(task1, PTRACE_MODE_READ) ||
+           !ptrace_may_access(task2, PTRACE_MODE_READ)) {
+               ret = -EPERM;
+               goto err_unlock;
+       }
+
+       switch (type) {
+       case KCMP_FILE: {
+               struct file *filp1, *filp2;
+
+               filp1 = get_file_raw_ptr(task1, idx1);
+               filp2 = get_file_raw_ptr(task2, idx2);
+
+               if (filp1 && filp2)
+                       ret = kcmp_ptr(filp1, filp2, KCMP_FILE);
+               else
+                       ret = -EBADF;
+               break;
+       }
+       case KCMP_VM:
+               ret = kcmp_ptr(task1->mm, task2->mm, KCMP_VM);
+               break;
+       case KCMP_FILES:
+               ret = kcmp_ptr(task1->files, task2->files, KCMP_FILES);
+               break;
+       case KCMP_FS:
+               ret = kcmp_ptr(task1->fs, task2->fs, KCMP_FS);
+               break;
+       case KCMP_SIGHAND:
+               ret = kcmp_ptr(task1->sighand, task2->sighand, KCMP_SIGHAND);
+               break;
+       case KCMP_IO:
+               ret = kcmp_ptr(task1->io_context, task2->io_context, KCMP_IO);
+               break;
+       case KCMP_SYSVSEM:
+#ifdef CONFIG_SYSVIPC
+               ret = kcmp_ptr(task1->sysvsem.undo_list,
+                              task2->sysvsem.undo_list,
+                              KCMP_SYSVSEM);
+#else
+               ret = -EOPNOTSUPP;
+#endif
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+err_unlock:
+       kcmp_unlock(&task1->signal->cred_guard_mutex,
+                   &task2->signal->cred_guard_mutex);
+err:
+       put_task_struct(task1);
+       put_task_struct(task2);
+
+       return ret;
+
+err_no_task:
+       rcu_read_unlock();
+       return -ESRCH;
+}
+
+static __init int kcmp_cookies_init(void)
+{
+       int i;
+
+       get_random_bytes(cookies, sizeof(cookies));
+
+       for (i = 0; i < KCMP_TYPES; i++)
+               cookies[i][1] |= (~(~0UL >>  1) | 1);
+
+       return 0;
+}
+arch_initcall(kcmp_cookies_init);
index 05698a7415fea66ea604b87959bde93f5b2673a3..ff2c7cb86d770aaf51712e330dc0f1e8a72a26e6 100644 (file)
@@ -221,13 +221,12 @@ fail:
        return 0;
 }
 
-void call_usermodehelper_freeinfo(struct subprocess_info *info)
+static void call_usermodehelper_freeinfo(struct subprocess_info *info)
 {
        if (info->cleanup)
                (*info->cleanup)(info);
        kfree(info);
 }
-EXPORT_SYMBOL(call_usermodehelper_freeinfo);
 
 static void umh_complete(struct subprocess_info *sub_info)
 {
@@ -410,7 +409,7 @@ EXPORT_SYMBOL_GPL(usermodehelper_read_unlock);
 
 /**
  * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled.
- * depth: New value to assign to usermodehelper_disabled.
+ * @depth: New value to assign to usermodehelper_disabled.
  *
  * Change the value of usermodehelper_disabled (under umhelper_sem locked for
  * writing) and wakeup tasks waiting for it to change.
@@ -479,6 +478,7 @@ static void helper_unlock(void)
  * structure.  This should be passed to call_usermodehelper_exec to
  * exec the process and free the structure.
  */
+static
 struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
                                                  char **envp, gfp_t gfp_mask)
 {
@@ -494,7 +494,6 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
   out:
        return sub_info;
 }
-EXPORT_SYMBOL(call_usermodehelper_setup);
 
 /**
  * call_usermodehelper_setfns - set a cleanup/init function
@@ -512,6 +511,7 @@ EXPORT_SYMBOL(call_usermodehelper_setup);
  * Function must be runnable in either a process context or the
  * context in which call_usermodehelper_exec is called.
  */
+static
 void call_usermodehelper_setfns(struct subprocess_info *info,
                    int (*init)(struct subprocess_info *info, struct cred *new),
                    void (*cleanup)(struct subprocess_info *info),
@@ -521,7 +521,6 @@ void call_usermodehelper_setfns(struct subprocess_info *info,
        info->init = init;
        info->data = data;
 }
-EXPORT_SYMBOL(call_usermodehelper_setfns);
 
 /**
  * call_usermodehelper_exec - start a usermode application
@@ -535,6 +534,7 @@ EXPORT_SYMBOL(call_usermodehelper_setfns);
  * asynchronously if wait is not set, and runs as a child of keventd.
  * (ie. it runs with full root capabilities).
  */
+static
 int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
 {
        DECLARE_COMPLETION_ONSTACK(done);
@@ -576,7 +576,25 @@ unlock:
        helper_unlock();
        return retval;
 }
-EXPORT_SYMBOL(call_usermodehelper_exec);
+
+int call_usermodehelper_fns(
+       char *path, char **argv, char **envp, int wait,
+       int (*init)(struct subprocess_info *info, struct cred *new),
+       void (*cleanup)(struct subprocess_info *), void *data)
+{
+       struct subprocess_info *info;
+       gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
+
+       info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
+
+       if (info == NULL)
+               return -ENOMEM;
+
+       call_usermodehelper_setfns(info, init, cleanup, data);
+
+       return call_usermodehelper_exec(info, wait);
+}
+EXPORT_SYMBOL(call_usermodehelper_fns);
 
 static int proc_cap_handler(struct ctl_table *table, int write,
                         void __user *buffer, size_t *lenp, loff_t *ppos)
diff --git a/kernel/lglock.c b/kernel/lglock.c
new file mode 100644 (file)
index 0000000..6535a66
--- /dev/null
@@ -0,0 +1,89 @@
+/* See include/linux/lglock.h for description */
+#include <linux/module.h>
+#include <linux/lglock.h>
+#include <linux/cpu.h>
+#include <linux/string.h>
+
+/*
+ * Note there is no uninit, so lglocks cannot be defined in
+ * modules (but it's fine to use them from there)
+ * Could be added though, just undo lg_lock_init
+ */
+
+void lg_lock_init(struct lglock *lg, char *name)
+{
+       LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0);
+}
+EXPORT_SYMBOL(lg_lock_init);
+
+void lg_local_lock(struct lglock *lg)
+{
+       arch_spinlock_t *lock;
+
+       preempt_disable();
+       rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_);
+       lock = this_cpu_ptr(lg->lock);
+       arch_spin_lock(lock);
+}
+EXPORT_SYMBOL(lg_local_lock);
+
+void lg_local_unlock(struct lglock *lg)
+{
+       arch_spinlock_t *lock;
+
+       rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
+       lock = this_cpu_ptr(lg->lock);
+       arch_spin_unlock(lock);
+       preempt_enable();
+}
+EXPORT_SYMBOL(lg_local_unlock);
+
+void lg_local_lock_cpu(struct lglock *lg, int cpu)
+{
+       arch_spinlock_t *lock;
+
+       preempt_disable();
+       rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_);
+       lock = per_cpu_ptr(lg->lock, cpu);
+       arch_spin_lock(lock);
+}
+EXPORT_SYMBOL(lg_local_lock_cpu);
+
+void lg_local_unlock_cpu(struct lglock *lg, int cpu)
+{
+       arch_spinlock_t *lock;
+
+       rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
+       lock = per_cpu_ptr(lg->lock, cpu);
+       arch_spin_unlock(lock);
+       preempt_enable();
+}
+EXPORT_SYMBOL(lg_local_unlock_cpu);
+
+void lg_global_lock(struct lglock *lg)
+{
+       int i;
+
+       preempt_disable();
+       rwlock_acquire(&lg->lock_dep_map, 0, 0, _RET_IP_);
+       for_each_possible_cpu(i) {
+               arch_spinlock_t *lock;
+               lock = per_cpu_ptr(lg->lock, i);
+               arch_spin_lock(lock);
+       }
+}
+EXPORT_SYMBOL(lg_global_lock);
+
+void lg_global_unlock(struct lglock *lg)
+{
+       int i;
+
+       rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
+       for_each_possible_cpu(i) {
+               arch_spinlock_t *lock;
+               lock = per_cpu_ptr(lg->lock, i);
+               arch_spin_unlock(lock);
+       }
+       preempt_enable();
+}
+EXPORT_SYMBOL(lg_global_unlock);
index 57bc1fd35b3cbe6bffdbfe71af5f13fc00648b81..16b20e38c4a1e26e64db477f76bbbfcf1787b75b 100644 (file)
@@ -149,7 +149,12 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
 {
        int nr;
        int rc;
-       struct task_struct *task;
+       struct task_struct *task, *me = current;
+
+       /* Ignore SIGCHLD causing any terminated children to autoreap */
+       spin_lock_irq(&me->sighand->siglock);
+       me->sighand->action[SIGCHLD - 1].sa.sa_handler = SIG_IGN;
+       spin_unlock_irq(&me->sighand->siglock);
 
        /*
         * The last thread in the cgroup-init thread group is terminating.
@@ -191,6 +196,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
        return;
 }
 
+#ifdef CONFIG_CHECKPOINT_RESTORE
 static int pid_ns_ctl_handler(struct ctl_table *table, int write,
                void __user *buffer, size_t *lenp, loff_t *ppos)
 {
@@ -218,8 +224,8 @@ static struct ctl_table pid_ns_ctl_table[] = {
        },
        { }
 };
-
 static struct ctl_path kern_path[] = { { .procname = "kernel", }, { } };
+#endif /* CONFIG_CHECKPOINT_RESTORE */
 
 int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
 {
@@ -253,7 +259,10 @@ int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
 static __init int pid_namespaces_init(void)
 {
        pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC);
+
+#ifdef CONFIG_CHECKPOINT_RESTORE
        register_sysctl_paths(kern_path, pid_ns_ctl_table);
+#endif
        return 0;
 }
 
index bebe2b170d49ffd4c5b96590114f40d4d9fb69f9..ad581aa2369a2ed8f925c395b2b4eadd9d8640f2 100644 (file)
@@ -94,13 +94,15 @@ void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
        counter->usage -= val;
 }
 
-void res_counter_uncharge(struct res_counter *counter, unsigned long val)
+void res_counter_uncharge_until(struct res_counter *counter,
+                               struct res_counter *top,
+                               unsigned long val)
 {
        unsigned long flags;
        struct res_counter *c;
 
        local_irq_save(flags);
-       for (c = counter; c != NULL; c = c->parent) {
+       for (c = counter; c != top; c = c->parent) {
                spin_lock(&c->lock);
                res_counter_uncharge_locked(c, val);
                spin_unlock(&c->lock);
@@ -108,6 +110,10 @@ void res_counter_uncharge(struct res_counter *counter, unsigned long val)
        local_irq_restore(flags);
 }
 
+void res_counter_uncharge(struct res_counter *counter, unsigned long val)
+{
+       res_counter_uncharge_until(counter, NULL, val);
+}
 
 static inline unsigned long long *
 res_counter_member(struct res_counter *counter, int member)
index 7e8ea66a8c016ffc934997256835f3fdc735f2f8..e1d2b8ee76d5bcd22d552c21cdf2102eaf6c7627 100644 (file)
@@ -515,8 +515,8 @@ out:
  * @root: root resource descriptor
  * @new: resource descriptor desired by caller
  * @size: requested resource region size
- * @min: minimum size to allocate
- * @max: maximum size to allocate
+ * @min: minimum boundary to allocate
+ * @max: maximum boundary to allocate
  * @align: alignment requested, in bytes
  * @alignf: alignment function, optional, called if not NULL
  * @alignf_data: arbitrary data to pass to the @alignf function
index f7b4182176331c2f3c667117fa60d48040d44a9e..677102789cf22d4847936782f6c6f67085421927 100644 (file)
@@ -1656,19 +1656,18 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
        info.si_signo = sig;
        info.si_errno = 0;
        /*
-        * we are under tasklist_lock here so our parent is tied to
-        * us and cannot exit and release its namespace.
+        * We are under tasklist_lock here so our parent is tied to
+        * us and cannot change.
         *
-        * the only it can is to switch its nsproxy with sys_unshare,
-        * bu uncharing pid namespaces is not allowed, so we'll always
-        * see relevant namespace
+        * task_active_pid_ns will always return the same pid namespace
+        * until a task passes through release_task.
         *
         * write_lock() currently calls preempt_disable() which is the
         * same as rcu_read_lock(), but according to Oleg, this is not
         * correct to rely on this
         */
        rcu_read_lock();
-       info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
+       info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
        info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
                                       task_uid(tsk));
        rcu_read_unlock();
@@ -2369,24 +2368,34 @@ relock:
 }
 
 /**
- * block_sigmask - add @ka's signal mask to current->blocked
- * @ka: action for @signr
- * @signr: signal that has been successfully delivered
+ * signal_delivered - 
+ * @sig:               number of signal being delivered
+ * @info:              siginfo_t of signal being delivered
+ * @ka:                        sigaction setting that chose the handler
+ * @regs:              user register state
+ * @stepping:          nonzero if debugger single-step or block-step in use
  *
  * This function should be called when a signal has succesfully been
- * delivered. It adds the mask of signals for @ka to current->blocked
- * so that they are blocked during the execution of the signal
- * handler. In addition, @signr will be blocked unless %SA_NODEFER is
- * set in @ka->sa.sa_flags.
+ * delivered. It updates the blocked signals accordingly (@ka->sa.sa_mask
+ * is always blocked, and the signal itself is blocked unless %SA_NODEFER
+ * is set in @ka->sa.sa_flags.  Tracing is notified.
  */
-void block_sigmask(struct k_sigaction *ka, int signr)
+void signal_delivered(int sig, siginfo_t *info, struct k_sigaction *ka,
+                       struct pt_regs *regs, int stepping)
 {
        sigset_t blocked;
 
+       /* A signal was successfully delivered, and the
+          saved sigmask was stored on the signal frame,
+          and will be restored by sigreturn.  So we can
+          simply clear the restore sigmask flag.  */
+       clear_restore_sigmask();
+
        sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
        if (!(ka->sa.sa_flags & SA_NODEFER))
-               sigaddset(&blocked, signr);
+               sigaddset(&blocked, sig);
        set_current_blocked(&blocked);
+       tracehook_signal_handler(sig, info, ka, regs, stepping);
 }
 
 /*
@@ -2519,7 +2528,16 @@ static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
  * It is wrong to change ->blocked directly, this helper should be used
  * to ensure the process can't miss a shared signal we are going to block.
  */
-void set_current_blocked(const sigset_t *newset)
+void set_current_blocked(sigset_t *newset)
+{
+       struct task_struct *tsk = current;
+       sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
+       spin_lock_irq(&tsk->sighand->siglock);
+       __set_task_blocked(tsk, newset);
+       spin_unlock_irq(&tsk->sighand->siglock);
+}
+
+void __set_current_blocked(const sigset_t *newset)
 {
        struct task_struct *tsk = current;
 
@@ -2559,7 +2577,7 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
                return -EINVAL;
        }
 
-       set_current_blocked(&newset);
+       __set_current_blocked(&newset);
        return 0;
 }
 
@@ -3133,7 +3151,7 @@ SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
                        return -EINVAL;
                }
 
-               set_current_blocked(&new_blocked);
+               __set_current_blocked(&new_blocked);
        }
 
        if (oset) {
@@ -3197,7 +3215,6 @@ SYSCALL_DEFINE1(ssetmask, int, newmask)
        int old = current->blocked.sig[0];
        sigset_t newset;
 
-       siginitset(&newset, newmask & ~(sigmask(SIGKILL) | sigmask(SIGSTOP)));
        set_current_blocked(&newset);
 
        return old;
@@ -3236,11 +3253,8 @@ SYSCALL_DEFINE0(pause)
 
 #endif
 
-#ifdef HAVE_SET_RESTORE_SIGMASK
 int sigsuspend(sigset_t *set)
 {
-       sigdelsetmask(set, sigmask(SIGKILL)|sigmask(SIGSTOP));
-
        current->saved_sigmask = current->blocked;
        set_current_blocked(set);
 
@@ -3249,7 +3263,6 @@ int sigsuspend(sigset_t *set)
        set_restore_sigmask();
        return -ERESTARTNOHAND;
 }
-#endif
 
 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
 /**
index 6df42624e454aeb236ab1c9413d6fdf5f676365f..9ff89cb9657a681d15714447d7d3c64939fbbe50 100644 (file)
@@ -36,6 +36,8 @@
 #include <linux/personality.h>
 #include <linux/ptrace.h>
 #include <linux/fs_struct.h>
+#include <linux/file.h>
+#include <linux/mount.h>
 #include <linux/gfp.h>
 #include <linux/syscore_ops.h>
 #include <linux/version.h>
@@ -1378,8 +1380,8 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
                memcpy(u->nodename, tmp, len);
                memset(u->nodename + len, 0, sizeof(u->nodename) - len);
                errno = 0;
+               uts_proc_notify(UTS_PROC_HOSTNAME);
        }
-       uts_proc_notify(UTS_PROC_HOSTNAME);
        up_write(&uts_sem);
        return errno;
 }
@@ -1429,8 +1431,8 @@ SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
                memcpy(u->domainname, tmp, len);
                memset(u->domainname + len, 0, sizeof(u->domainname) - len);
                errno = 0;
+               uts_proc_notify(UTS_PROC_DOMAINNAME);
        }
-       uts_proc_notify(UTS_PROC_DOMAINNAME);
        up_write(&uts_sem);
        return errno;
 }
@@ -1784,77 +1786,102 @@ SYSCALL_DEFINE1(umask, int, mask)
 }
 
 #ifdef CONFIG_CHECKPOINT_RESTORE
+static bool vma_flags_mismatch(struct vm_area_struct *vma,
+                              unsigned long required,
+                              unsigned long banned)
+{
+       return (vma->vm_flags & required) != required ||
+               (vma->vm_flags & banned);
+}
+
+static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
+{
+       struct file *exe_file;
+       struct dentry *dentry;
+       int err;
+
+       /*
+        * Setting new mm::exe_file is only allowed when no VM_EXECUTABLE vma's
+        * remain. So perform a quick test first.
+        */
+       if (mm->num_exe_file_vmas)
+               return -EBUSY;
+
+       exe_file = fget(fd);
+       if (!exe_file)
+               return -EBADF;
+
+       dentry = exe_file->f_path.dentry;
+
+       /*
+        * Because the original mm->exe_file points to executable file, make
+        * sure that this one is executable as well, to avoid breaking an
+        * overall picture.
+        */
+       err = -EACCES;
+       if (!S_ISREG(dentry->d_inode->i_mode)   ||
+           exe_file->f_path.mnt->mnt_flags & MNT_NOEXEC)
+               goto exit;
+
+       err = inode_permission(dentry->d_inode, MAY_EXEC);
+       if (err)
+               goto exit;
+
+       /*
+        * The symlink can be changed only once, just to disallow arbitrary
+        * transitions malicious software might bring in. This means one
+        * could make a snapshot over all processes running and monitor
+        * /proc/pid/exe changes to notice unusual activity if needed.
+        */
+       down_write(&mm->mmap_sem);
+       if (likely(!mm->exe_file))
+               set_mm_exe_file(mm, exe_file);
+       else
+               err = -EBUSY;
+       up_write(&mm->mmap_sem);
+
+exit:
+       fput(exe_file);
+       return err;
+}
+
 static int prctl_set_mm(int opt, unsigned long addr,
                        unsigned long arg4, unsigned long arg5)
 {
        unsigned long rlim = rlimit(RLIMIT_DATA);
-       unsigned long vm_req_flags;
-       unsigned long vm_bad_flags;
-       struct vm_area_struct *vma;
-       int error = 0;
        struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       int error;
 
-       if (arg4 | arg5)
+       if (arg5 || (arg4 && opt != PR_SET_MM_AUXV))
                return -EINVAL;
 
        if (!capable(CAP_SYS_RESOURCE))
                return -EPERM;
 
+       if (opt == PR_SET_MM_EXE_FILE)
+               return prctl_set_mm_exe_file(mm, (unsigned int)addr);
+
        if (addr >= TASK_SIZE)
                return -EINVAL;
 
+       error = -EINVAL;
+
        down_read(&mm->mmap_sem);
        vma = find_vma(mm, addr);
 
-       if (opt != PR_SET_MM_START_BRK && opt != PR_SET_MM_BRK) {
-               /* It must be existing VMA */
-               if (!vma || vma->vm_start > addr)
-                       goto out;
-       }
-
-       error = -EINVAL;
        switch (opt) {
        case PR_SET_MM_START_CODE:
+               mm->start_code = addr;
+               break;
        case PR_SET_MM_END_CODE:
-               vm_req_flags = VM_READ | VM_EXEC;
-               vm_bad_flags = VM_WRITE | VM_MAYSHARE;
-
-               if ((vma->vm_flags & vm_req_flags) != vm_req_flags ||
-                   (vma->vm_flags & vm_bad_flags))
-                       goto out;
-
-               if (opt == PR_SET_MM_START_CODE)
-                       mm->start_code = addr;
-               else
-                       mm->end_code = addr;
+               mm->end_code = addr;
                break;
-
        case PR_SET_MM_START_DATA:
-       case PR_SET_MM_END_DATA:
-               vm_req_flags = VM_READ | VM_WRITE;
-               vm_bad_flags = VM_EXEC | VM_MAYSHARE;
-
-               if ((vma->vm_flags & vm_req_flags) != vm_req_flags ||
-                   (vma->vm_flags & vm_bad_flags))
-                       goto out;
-
-               if (opt == PR_SET_MM_START_DATA)
-                       mm->start_data = addr;
-               else
-                       mm->end_data = addr;
+               mm->start_data = addr;
                break;
-
-       case PR_SET_MM_START_STACK:
-
-#ifdef CONFIG_STACK_GROWSUP
-               vm_req_flags = VM_READ | VM_WRITE | VM_GROWSUP;
-#else
-               vm_req_flags = VM_READ | VM_WRITE | VM_GROWSDOWN;
-#endif
-               if ((vma->vm_flags & vm_req_flags) != vm_req_flags)
-                       goto out;
-
-               mm->start_stack = addr;
+       case PR_SET_MM_END_DATA:
+               mm->end_data = addr;
                break;
 
        case PR_SET_MM_START_BRK:
@@ -1881,16 +1908,77 @@ static int prctl_set_mm(int opt, unsigned long addr,
                mm->brk = addr;
                break;
 
+       /*
+        * If command line arguments and environment
+        * are placed somewhere else on stack, we can
+        * set them up here, ARG_START/END to setup
+        * command line argumets and ENV_START/END
+        * for environment.
+        */
+       case PR_SET_MM_START_STACK:
+       case PR_SET_MM_ARG_START:
+       case PR_SET_MM_ARG_END:
+       case PR_SET_MM_ENV_START:
+       case PR_SET_MM_ENV_END:
+               if (!vma) {
+                       error = -EFAULT;
+                       goto out;
+               }
+#ifdef CONFIG_STACK_GROWSUP
+               if (vma_flags_mismatch(vma, VM_READ | VM_WRITE | VM_GROWSUP, 0))
+#else
+               if (vma_flags_mismatch(vma, VM_READ | VM_WRITE | VM_GROWSDOWN, 0))
+#endif
+                       goto out;
+               if (opt == PR_SET_MM_START_STACK)
+                       mm->start_stack = addr;
+               else if (opt == PR_SET_MM_ARG_START)
+                       mm->arg_start = addr;
+               else if (opt == PR_SET_MM_ARG_END)
+                       mm->arg_end = addr;
+               else if (opt == PR_SET_MM_ENV_START)
+                       mm->env_start = addr;
+               else if (opt == PR_SET_MM_ENV_END)
+                       mm->env_end = addr;
+               break;
+
+       /*
+        * This doesn't move auxiliary vector itself
+        * since it's pinned to mm_struct, but allow
+        * to fill vector with new values. It's up
+        * to a caller to provide sane values here
+        * otherwise user space tools which use this
+        * vector might be unhappy.
+        */
+       case PR_SET_MM_AUXV: {
+               unsigned long user_auxv[AT_VECTOR_SIZE];
+
+               if (arg4 > sizeof(user_auxv))
+                       goto out;
+               up_read(&mm->mmap_sem);
+
+               if (copy_from_user(user_auxv, (const void __user *)addr, arg4))
+                       return -EFAULT;
+
+               /* Make sure the last entry is always AT_NULL */
+               user_auxv[AT_VECTOR_SIZE - 2] = 0;
+               user_auxv[AT_VECTOR_SIZE - 1] = 0;
+
+               BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
+
+               task_lock(current);
+               memcpy(mm->saved_auxv, user_auxv, arg4);
+               task_unlock(current);
+
+               return 0;
+       }
        default:
-               error = -EINVAL;
                goto out;
        }
 
        error = 0;
-
 out:
        up_read(&mm->mmap_sem);
-
        return error;
 }
 #else /* CONFIG_CHECKPOINT_RESTORE */
@@ -2114,7 +2202,6 @@ int orderly_poweroff(bool force)
                NULL
        };
        int ret = -ENOMEM;
-       struct subprocess_info *info;
 
        if (argv == NULL) {
                printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n",
@@ -2122,18 +2209,16 @@ int orderly_poweroff(bool force)
                goto out;
        }
 
-       info = call_usermodehelper_setup(argv[0], argv, envp, GFP_ATOMIC);
-       if (info == NULL) {
-               argv_free(argv);
-               goto out;
-       }
-
-       call_usermodehelper_setfns(info, NULL, argv_cleanup, NULL);
+       ret = call_usermodehelper_fns(argv[0], argv, envp, UMH_NO_WAIT,
+                                     NULL, argv_cleanup, NULL);
+out:
+       if (likely(!ret))
+               return 0;
 
-       ret = call_usermodehelper_exec(info, UMH_NO_WAIT);
+       if (ret == -ENOMEM)
+               argv_free(argv);
 
-  out:
-       if (ret && force) {
+       if (force) {
                printk(KERN_WARNING "Failed to start orderly shutdown: "
                       "forcing the issue\n");
 
index 47bfa16430d7dc764c17a06f4c40dd142ef6a88a..dbff751e408647badd0d7e92b935962bfc3ef8e2 100644 (file)
@@ -203,3 +203,6 @@ cond_syscall(sys_fanotify_mark);
 cond_syscall(sys_name_to_handle_at);
 cond_syscall(sys_open_by_handle_at);
 cond_syscall(compat_sys_open_by_handle_at);
+
+/* compare kernel pointers */
+cond_syscall(sys_kcmp);
diff --git a/kernel/task_work.c b/kernel/task_work.c
new file mode 100644 (file)
index 0000000..82d1c79
--- /dev/null
@@ -0,0 +1,84 @@
+#include <linux/spinlock.h>
+#include <linux/task_work.h>
+#include <linux/tracehook.h>
+
+int
+task_work_add(struct task_struct *task, struct task_work *twork, bool notify)
+{
+       unsigned long flags;
+       int err = -ESRCH;
+
+#ifndef TIF_NOTIFY_RESUME
+       if (notify)
+               return -ENOTSUPP;
+#endif
+       /*
+        * We must not insert the new work if the task has already passed
+        * exit_task_work(). We rely on do_exit()->raw_spin_unlock_wait()
+        * and check PF_EXITING under pi_lock.
+        */
+       raw_spin_lock_irqsave(&task->pi_lock, flags);
+       if (likely(!(task->flags & PF_EXITING))) {
+               hlist_add_head(&twork->hlist, &task->task_works);
+               err = 0;
+       }
+       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+
+       /* test_and_set_bit() implies mb(), see tracehook_notify_resume(). */
+       if (likely(!err) && notify)
+               set_notify_resume(task);
+       return err;
+}
+
+struct task_work *
+task_work_cancel(struct task_struct *task, task_work_func_t func)
+{
+       unsigned long flags;
+       struct task_work *twork;
+       struct hlist_node *pos;
+
+       raw_spin_lock_irqsave(&task->pi_lock, flags);
+       hlist_for_each_entry(twork, pos, &task->task_works, hlist) {
+               if (twork->func == func) {
+                       hlist_del(&twork->hlist);
+                       goto found;
+               }
+       }
+       twork = NULL;
+ found:
+       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+
+       return twork;
+}
+
+void task_work_run(void)
+{
+       struct task_struct *task = current;
+       struct hlist_head task_works;
+       struct hlist_node *pos;
+
+       raw_spin_lock_irq(&task->pi_lock);
+       hlist_move_list(&task->task_works, &task_works);
+       raw_spin_unlock_irq(&task->pi_lock);
+
+       if (unlikely(hlist_empty(&task_works)))
+               return;
+       /*
+        * We use hlist to save the space in task_struct, but we want fifo.
+        * Find the last entry, the list should be short, then process them
+        * in reverse order.
+        */
+       for (pos = task_works.first; pos->next; pos = pos->next)
+               ;
+
+       for (;;) {
+               struct hlist_node **pprev = pos->pprev;
+               struct task_work *twork = container_of(pos, struct task_work,
+                                                       hlist);
+               twork->func(twork);
+
+               if (pprev == &task_works.first)
+                       break;
+               pos = container_of(pprev, struct hlist_node, next);
+       }
+}
index 6420cda62336c1194d02c86203a91e18766a037b..1d0f6a8a0e5e83680c0df3b28836a5f6a2103a39 100644 (file)
@@ -1486,6 +1486,11 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
        if (!buffer)
                return size;
 
+       /* Make sure the requested buffer exists */
+       if (cpu_id != RING_BUFFER_ALL_CPUS &&
+           !cpumask_test_cpu(cpu_id, buffer->cpumask))
+               return size;
+
        size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
        size *= BUF_PAGE_SIZE;
 
index df30ee08bdd42161105a9564871cc80491d0b7aa..e5e1d85b8c7c23090ce59b7e5b1e868535c85ef0 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/sysctl.h>
 
 #include <asm/irq_regs.h>
+#include <linux/kvm_para.h>
 #include <linux/perf_event.h>
 
 int watchdog_enabled = 1;
@@ -280,6 +281,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
                        __this_cpu_write(softlockup_touch_sync, false);
                        sched_clock_tick();
                }
+
+               /* Clear the guest paused flag on watchdog reset */
+               kvm_check_and_clear_guest_paused();
                __touch_watchdog();
                return HRTIMER_RESTART;
        }
@@ -292,6 +296,14 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
         */
        duration = is_softlockup(touch_ts);
        if (unlikely(duration)) {
+               /*
+                * If a virtual machine is stopped by the host it can look to
+                * the watchdog like a soft lockup, check to see if the host
+                * stopped the vm before we issue the warning
+                */
+               if (kvm_check_and_clear_guest_paused())
+                       return HRTIMER_RESTART;
+
                /* only warn once */
                if (__this_cpu_read(soft_watchdog_warn) == true)
                        return HRTIMER_RESTART;
index 98230ac3db29327544535a5d9500ce07e0fae649..a9e15403434ef9adcec951800048752b8cbb1275 100644 (file)
@@ -19,6 +19,9 @@ config RATIONAL
 config GENERIC_STRNCPY_FROM_USER
        bool
 
+config GENERIC_STRNLEN_USER
+       bool
+
 config GENERIC_FIND_FIRST_BIT
        bool
 
@@ -36,6 +39,9 @@ config GENERIC_IO
        boolean
        default n
 
+config STMP_DEVICE
+       bool
+
 config CRC_CCITT
        tristate "CRC-CCITT functions"
        help
index b98df505f335de2e6562c3196d72f924f57f7f10..8c31a0cb75e97746af1cd02906ac149105250cc2 100644 (file)
@@ -126,6 +126,9 @@ obj-$(CONFIG_CLZ_TAB) += clz_tab.o
 obj-$(CONFIG_DDR) += jedec_ddr_data.o
 
 obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o
+obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o
+
+obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
 
 hostprogs-y    := gen_crc32table
 clean-files    := crc32table.h
index b5a8b6ad2454e32ec2444509fdb7aff4c80ec2d4..06fdfa1aeba712283c1e1d706424c1279abaf0ca 100644 (file)
@@ -369,7 +369,8 @@ EXPORT_SYMBOL(bitmap_find_next_zero_area);
  * @nmaskbits: size of bitmap, in bits
  *
  * Exactly @nmaskbits bits are displayed.  Hex digits are grouped into
- * comma-separated sets of eight digits per set.
+ * comma-separated sets of eight digits per set.  Returns the number of
+ * characters which were written to *buf, excluding the trailing \0.
  */
 int bitmap_scnprintf(char *buf, unsigned int buflen,
        const unsigned long *maskp, int nmaskbits)
@@ -517,8 +518,8 @@ EXPORT_SYMBOL(bitmap_parse_user);
  *
  * Helper routine for bitmap_scnlistprintf().  Write decimal number
  * or range to buf, suppressing output past buf+buflen, with optional
- * comma-prefix.  Return len of what would be written to buf, if it
- * all fit.
+ * comma-prefix.  Return len of what was written to *buf, excluding the
+ * trailing \0.
  */
 static inline int bscnl_emit(char *buf, int buflen, int rbot, int rtop, int len)
 {
@@ -544,9 +545,8 @@ static inline int bscnl_emit(char *buf, int buflen, int rbot, int rtop, int len)
  * the range.  Output format is compatible with the format
  * accepted as input by bitmap_parselist().
  *
- * The return value is the number of characters which would be
- * generated for the given input, excluding the trailing '\0', as
- * per ISO C99.
+ * The return value is the number of characters which were written to *buf
+ * excluding the trailing '\0', as per ISO C99's scnprintf.
  */
 int bitmap_scnlistprintf(char *buf, unsigned int buflen,
        const unsigned long *maskp, int nmaskbits)
index 13ef2338be4150d1345a80a510d67f410a7c2c73..518aea714d21d9dea1c7d52b0cf6878482ca5d82 100644 (file)
@@ -430,7 +430,7 @@ static struct dma_debug_entry *__dma_entry_alloc(void)
  */
 static struct dma_debug_entry *dma_entry_alloc(void)
 {
-       struct dma_debug_entry *entry = NULL;
+       struct dma_debug_entry *entry;
        unsigned long flags;
 
        spin_lock_irqsave(&free_entries_lock, flags);
@@ -438,11 +438,14 @@ static struct dma_debug_entry *dma_entry_alloc(void)
        if (list_empty(&free_entries)) {
                pr_err("DMA-API: debugging out of memory - disabling\n");
                global_disable = true;
-               goto out;
+               spin_unlock_irqrestore(&free_entries_lock, flags);
+               return NULL;
        }
 
        entry = __dma_entry_alloc();
 
+       spin_unlock_irqrestore(&free_entries_lock, flags);
+
 #ifdef CONFIG_STACKTRACE
        entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
        entry->stacktrace.entries = entry->st_entries;
@@ -450,9 +453,6 @@ static struct dma_debug_entry *dma_entry_alloc(void)
        save_stack_trace(&entry->stacktrace);
 #endif
 
-out:
-       spin_unlock_irqrestore(&free_entries_lock, flags);
-
        return entry;
 }
 
index 6ab4587d052b5994f3be952b732bfc5a613664f9..0777c5a45fa04f3464100dfdf24dbee15aecfd98 100644 (file)
 #include <linux/jiffies.h>
 #include <linux/dynamic_queue_limits.h>
 
-#define POSDIFF(A, B) ((A) > (B) ? (A) - (B) : 0)
+#define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0)
+#define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0)
 
 /* Records completed count and recalculates the queue limit */
 void dql_completed(struct dql *dql, unsigned int count)
 {
        unsigned int inprogress, prev_inprogress, limit;
-       unsigned int ovlimit, all_prev_completed, completed;
+       unsigned int ovlimit, completed, num_queued;
+       bool all_prev_completed;
+
+       num_queued = ACCESS_ONCE(dql->num_queued);
 
        /* Can't complete more than what's in queue */
-       BUG_ON(count > dql->num_queued - dql->num_completed);
+       BUG_ON(count > num_queued - dql->num_completed);
 
        completed = dql->num_completed + count;
        limit = dql->limit;
-       ovlimit = POSDIFF(dql->num_queued - dql->num_completed, limit);
-       inprogress = dql->num_queued - completed;
+       ovlimit = POSDIFF(num_queued - dql->num_completed, limit);
+       inprogress = num_queued - completed;
        prev_inprogress = dql->prev_num_queued - dql->num_completed;
-       all_prev_completed = POSDIFF(completed, dql->prev_num_queued);
+       all_prev_completed = AFTER_EQ(completed, dql->prev_num_queued);
 
        if ((ovlimit && !inprogress) ||
            (dql->prev_ovlimit && all_prev_completed)) {
@@ -104,7 +108,7 @@ void dql_completed(struct dql *dql, unsigned int count)
        dql->prev_ovlimit = ovlimit;
        dql->prev_last_obj_cnt = dql->last_obj_cnt;
        dql->num_completed = completed;
-       dql->prev_num_queued = dql->num_queued;
+       dql->prev_num_queued = num_queued;
 }
 EXPORT_SYMBOL(dql_completed);
 
index 3810b481f940bac6450f3e7342ec5dbfdb385318..23a5e031cd8bc43605655da59bce57eadb3304b8 100644 (file)
@@ -31,6 +31,9 @@ void __list_add(struct list_head *new,
                "list_add corruption. prev->next should be "
                "next (%p), but was %p. (prev=%p).\n",
                next, prev->next, prev);
+       WARN(new == prev || new == next,
+            "list_add double add: new=%p, prev=%p, next=%p.\n",
+            new, prev, next);
        next->prev = new;
        new->next = next;
        new->prev = prev;
index 86516f5588e31782676087fd49fe45fd87538c03..d7c878cc006cf62ac2b0b0abb1a3a69fa62c64e2 100644 (file)
@@ -72,12 +72,25 @@ static unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH + 1] __read_mostly;
  */
 static struct kmem_cache *radix_tree_node_cachep;
 
+/*
+ * The radix tree is variable-height, so an insert operation not only has
+ * to build the branch to its corresponding item, it also has to build the
+ * branch to existing items if the size has to be increased (by
+ * radix_tree_extend).
+ *
+ * The worst case is a zero height tree with just a single item at index 0,
+ * and then inserting an item at index ULONG_MAX. This requires 2 new branches
+ * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared.
+ * Hence:
+ */
+#define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
+
 /*
  * Per-cpu pool of preloaded nodes
  */
 struct radix_tree_preload {
        int nr;
-       struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
+       struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
 };
 static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
 
index 525d160d44f05c112acca5b2866b566ded334e19..d0ec4f3d1593031b5498dcc120822b41c423a3c5 100644 (file)
@@ -58,7 +58,7 @@ static void spin_dump(raw_spinlock_t *lock, const char *msg)
        printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
                msg, raw_smp_processor_id(),
                current->comm, task_pid_nr(current));
-       printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, "
+       printk(KERN_EMERG " lock: %ps, .magic: %08x, .owner: %s/%d, "
                        ".owner_cpu: %d\n",
                lock, lock->magic,
                owner ? owner->comm : "<none>",
diff --git a/lib/stmp_device.c b/lib/stmp_device.c
new file mode 100644 (file)
index 0000000..8ac9bcc
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 1999 ARM Limited
+ * Copyright (C) 2000 Deep Blue Solutions Ltd
+ * Copyright 2006-2007,2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
+ * Copyright 2009 Ilya Yanok, Emcraft Systems Ltd, yanok@emcraft.com
+ * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/stmp_device.h>
+
+#define STMP_MODULE_CLKGATE    (1 << 30)
+#define STMP_MODULE_SFTRST     (1 << 31)
+
+/*
+ * Clear the bit and poll it cleared.  This is usually called with
+ * a reset address and mask being either SFTRST(bit 31) or CLKGATE
+ * (bit 30).
+ */
+static int stmp_clear_poll_bit(void __iomem *addr, u32 mask)
+{
+       int timeout = 0x400;
+
+       writel(mask, addr + STMP_OFFSET_REG_CLR);
+       udelay(1);
+       while ((readl(addr) & mask) && --timeout)
+               /* nothing */;
+
+       return !timeout;
+}
+
+int stmp_reset_block(void __iomem *reset_addr)
+{
+       int ret;
+       int timeout = 0x400;
+
+       /* clear and poll SFTRST */
+       ret = stmp_clear_poll_bit(reset_addr, STMP_MODULE_SFTRST);
+       if (unlikely(ret))
+               goto error;
+
+       /* clear CLKGATE */
+       writel(STMP_MODULE_CLKGATE, reset_addr + STMP_OFFSET_REG_CLR);
+
+       /* set SFTRST to reset the block */
+       writel(STMP_MODULE_SFTRST, reset_addr + STMP_OFFSET_REG_SET);
+       udelay(1);
+
+       /* poll CLKGATE becoming set */
+       while ((!(readl(reset_addr) & STMP_MODULE_CLKGATE)) && --timeout)
+               /* nothing */;
+       if (unlikely(!timeout))
+               goto error;
+
+       /* clear and poll SFTRST */
+       ret = stmp_clear_poll_bit(reset_addr, STMP_MODULE_SFTRST);
+       if (unlikely(ret))
+               goto error;
+
+       /* clear and poll CLKGATE */
+       ret = stmp_clear_poll_bit(reset_addr, STMP_MODULE_CLKGATE);
+       if (unlikely(ret))
+               goto error;
+
+       return 0;
+
+error:
+       pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
+       return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(stmp_reset_block);
index dd4ece372699e5b717626ff0a558d919a118d60e..1cffc223bff52b7e153dff043ae9149073050934 100644 (file)
 int string_get_size(u64 size, const enum string_size_units units,
                    char *buf, int len)
 {
-       const char *units_10[] = { "B", "kB", "MB", "GB", "TB", "PB",
+       static const char *units_10[] = { "B", "kB", "MB", "GB", "TB", "PB",
                                   "EB", "ZB", "YB", NULL};
-       const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB",
+       static const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB",
                                 "EiB", "ZiB", "YiB", NULL };
-       const char **units_str[] = {
+       static const char **units_str[] = {
                [STRING_UNITS_10] =  units_10,
                [STRING_UNITS_2] = units_2,
        };
-       const unsigned int divisor[] = {
+       static const unsigned int divisor[] = {
                [STRING_UNITS_10] = 1000,
                [STRING_UNITS_2] = 1024,
        };
index c4c09b0e96bac19ad91cf6dae91d2785af901801..bb2b201d6ad0397a77df53d1841f148af4465170 100644 (file)
@@ -4,37 +4,7 @@
 #include <linux/errno.h>
 
 #include <asm/byteorder.h>
-
-static inline long find_zero(unsigned long mask)
-{
-       long byte = 0;
-
-#ifdef __BIG_ENDIAN
-#ifdef CONFIG_64BIT
-       if (mask >> 32)
-               mask >>= 32;
-       else
-               byte = 4;
-#endif
-       if (mask >> 16)
-               mask >>= 16;
-       else
-               byte += 2;
-       return (mask >> 8) ? byte : byte + 1;
-#else
-#ifdef CONFIG_64BIT
-       if (!((unsigned int) mask)) {
-               mask >>= 32;
-               byte = 4;
-       }
-#endif
-       if (!(mask & 0xffff)) {
-               mask >>= 16;
-               byte += 2;
-       }
-       return (mask & 0xff) ? byte : byte + 1;
-#endif
-}
+#include <asm/word-at-a-time.h>
 
 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 #define IS_UNALIGNED(src, dst) 0
@@ -51,8 +21,7 @@ static inline long find_zero(unsigned long mask)
  */
 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
 {
-       const unsigned long high_bits = REPEAT_BYTE(0xfe) + 1;
-       const unsigned long low_bits = REPEAT_BYTE(0x7f);
+       const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
        long res = 0;
 
        /*
@@ -66,18 +35,16 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src, long
                goto byte_at_a_time;
 
        while (max >= sizeof(unsigned long)) {
-               unsigned long c, v, rhs;
+               unsigned long c, data;
 
                /* Fall back to byte-at-a-time if we get a page fault */
                if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
                        break;
-               rhs = c | low_bits;
-               v = (c + high_bits) & ~rhs;
                *(unsigned long *)(dst+res) = c;
-               if (v) {
-                       v = (c & low_bits) + low_bits;
-                       v = ~(v | rhs);
-                       return res + find_zero(v);
+               if (has_zero(c, &data, &constants)) {
+                       data = prep_zero_mask(c, data, &constants);
+                       data = create_zero_mask(data);
+                       return res + find_zero(data);
                }
                res += sizeof(unsigned long);
                max -= sizeof(unsigned long);
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
new file mode 100644 (file)
index 0000000..a28df52
--- /dev/null
@@ -0,0 +1,138 @@
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/uaccess.h>
+
+#include <asm/word-at-a-time.h>
+
+/* Set bits in the first 'n' bytes when loaded from memory */
+#ifdef __LITTLE_ENDIAN
+#  define aligned_byte_mask(n) ((1ul << 8*(n))-1)
+#else
+#  define aligned_byte_mask(n) (~0xfful << (BITS_PER_LONG - 8 - 8*(n)))
+#endif
+
+/*
+ * Do a strnlen, return length of string *with* final '\0'.
+ * 'count' is the user-supplied count, while 'max' is the
+ * address space maximum.
+ *
+ * Return 0 for exceptions (which includes hitting the address
+ * space maximum), or 'count+1' if hitting the user-supplied
+ * maximum count.
+ *
+ * NOTE! We can sometimes overshoot the user-supplied maximum
+ * if it fits in a aligned 'long'. The caller needs to check
+ * the return value against "> max".
+ */
+static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
+{
+       const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+       long align, res = 0;
+       unsigned long c;
+
+       /*
+        * Truncate 'max' to the user-specified limit, so that
+        * we only have one limit we need to check in the loop
+        */
+       if (max > count)
+               max = count;
+
+       /*
+        * Do everything aligned. But that means that we
+        * need to also expand the maximum..
+        */
+       align = (sizeof(long) - 1) & (unsigned long)src;
+       src -= align;
+       max += align;
+
+       if (unlikely(__get_user(c,(unsigned long __user *)src)))
+               return 0;
+       c |= aligned_byte_mask(align);
+
+       for (;;) {
+               unsigned long data;
+               if (has_zero(c, &data, &constants)) {
+                       data = prep_zero_mask(c, data, &constants);
+                       data = create_zero_mask(data);
+                       return res + find_zero(data) + 1 - align;
+               }
+               res += sizeof(unsigned long);
+               if (unlikely(max < sizeof(unsigned long)))
+                       break;
+               max -= sizeof(unsigned long);
+               if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
+                       return 0;
+       }
+       res -= align;
+
+       /*
+        * Uhhuh. We hit 'max'. But was that the user-specified maximum
+        * too? If so, return the marker for "too long".
+        */
+       if (res >= count)
+               return count+1;
+
+       /*
+        * Nope: we hit the address space limit, and we still had more
+        * characters the caller would have wanted. That's 0.
+        */
+       return 0;
+}
+
+/**
+ * strnlen_user: - Get the size of a user string INCLUDING final NUL.
+ * @str: The string to measure.
+ * @count: Maximum count (including NUL character)
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * Get the size of a NUL-terminated string in user space.
+ *
+ * Returns the size of the string INCLUDING the terminating NUL.
+ * If the string is too long, returns 'count+1'.
+ * On exception (or invalid count), returns 0.
+ */
+long strnlen_user(const char __user *str, long count)
+{
+       unsigned long max_addr, src_addr;
+
+       if (unlikely(count <= 0))
+               return 0;
+
+       max_addr = user_addr_max();
+       src_addr = (unsigned long)str;
+       if (likely(src_addr < max_addr)) {
+               unsigned long max = max_addr - src_addr;
+               return do_strnlen_user(str, count, max);
+       }
+       return 0;
+}
+EXPORT_SYMBOL(strnlen_user);
+
+/**
+ * strlen_user: - Get the size of a user string INCLUDING final NUL.
+ * @str: The string to measure.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * Get the size of a NUL-terminated string in user space.
+ *
+ * Returns the size of the string INCLUDING the terminating NUL.
+ * On exception, returns 0.
+ *
+ * If there is a limit on the length of a valid string, you may wish to
+ * consider using strnlen_user() instead.
+ */
+long strlen_user(const char __user *str)
+{
+       unsigned long max_addr, src_addr;
+
+       max_addr = user_addr_max();
+       src_addr = (unsigned long)str;
+       if (likely(src_addr < max_addr)) {
+               unsigned long max = max_addr - src_addr;
+               return do_strnlen_user(str, ~0ul, max);
+       }
+       return 0;
+}
+EXPORT_SYMBOL(strlen_user);
index 414f46ed1dcda9ae63be846cc6cf6e1b9bb7bb2e..45bc1f83a5ada665297bc0b9dcd3a6ad72e2ec72 100644 (file)
@@ -130,11 +130,9 @@ void swiotlb_print_info(void)
        pstart = virt_to_phys(io_tlb_start);
        pend = virt_to_phys(io_tlb_end);
 
-       printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n",
-              bytes >> 20, io_tlb_start, io_tlb_end);
-       printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n",
-              (unsigned long long)pstart,
-              (unsigned long long)pend);
+       printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n",
+              (unsigned long long)pstart, (unsigned long long)pend - 1,
+              bytes >> 20, io_tlb_start, io_tlb_end - 1);
 }
 
 void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
index d55769d63cb8b3efb8182a0f58a73ed496768b21..bea3f3fa3f02a920fb92b9293c0adf06d72a52a6 100644 (file)
@@ -11,7 +11,7 @@ struct test_fail {
 };
 
 #define DEFINE_TEST_FAIL(test) \
-       const struct test_fail test[] __initdata
+       const struct test_fail test[] __initconst
 
 #define DECLARE_TEST_OK(type, test_type)       \
        test_type {                             \
@@ -21,7 +21,7 @@ struct test_fail {
        }
 
 #define DEFINE_TEST_OK(type, test)     \
-       const type test[] __initdata
+       const type test[] __initconst
 
 #define TEST_FAIL(fn, type, fmt, test)                                 \
 {                                                                      \
index abbabec9720a1947ffa5364ae9aab9ec9696ecfe..c3f36d415bdf43034415c801b8e3f922f5e5b928 100644 (file)
@@ -112,106 +112,199 @@ int skip_atoi(const char **s)
 /* Decimal conversion is by far the most typical, and is used
  * for /proc and /sys data. This directly impacts e.g. top performance
  * with many processes running. We optimize it for speed
- * using code from
- * http://www.cs.uiowa.edu/~jones/bcd/decimal.html
- * (with permission from the author, Douglas W. Jones). */
+ * using ideas described at <http://www.cs.uiowa.edu/~jones/bcd/divide.html>
+ * (with permission from the author, Douglas W. Jones).
+ */
 
-/* Formats correctly any integer in [0,99999].
- * Outputs from one to five digits depending on input.
- * On i386 gcc 4.1.2 -O2: ~250 bytes of code. */
+#if BITS_PER_LONG != 32 || BITS_PER_LONG_LONG != 64
+/* Formats correctly any integer in [0, 999999999] */
 static noinline_for_stack
-char *put_dec_trunc(char *buf, unsigned q)
+char *put_dec_full9(char *buf, unsigned q)
 {
-       unsigned d3, d2, d1, d0;
-       d1 = (q>>4) & 0xf;
-       d2 = (q>>8) & 0xf;
-       d3 = (q>>12);
-
-       d0 = 6*(d3 + d2 + d1) + (q & 0xf);
-       q = (d0 * 0xcd) >> 11;
-       d0 = d0 - 10*q;
-       *buf++ = d0 + '0'; /* least significant digit */
-       d1 = q + 9*d3 + 5*d2 + d1;
-       if (d1 != 0) {
-               q = (d1 * 0xcd) >> 11;
-               d1 = d1 - 10*q;
-               *buf++ = d1 + '0'; /* next digit */
-
-               d2 = q + 2*d2;
-               if ((d2 != 0) || (d3 != 0)) {
-                       q = (d2 * 0xd) >> 7;
-                       d2 = d2 - 10*q;
-                       *buf++ = d2 + '0'; /* next digit */
-
-                       d3 = q + 4*d3;
-                       if (d3 != 0) {
-                               q = (d3 * 0xcd) >> 11;
-                               d3 = d3 - 10*q;
-                               *buf++ = d3 + '0';  /* next digit */
-                               if (q != 0)
-                                       *buf++ = q + '0'; /* most sign. digit */
-                       }
-               }
-       }
+       unsigned r;
 
+       /*
+        * Possible ways to approx. divide by 10
+        * (x * 0x1999999a) >> 32 x < 1073741829 (multiply must be 64-bit)
+        * (x * 0xcccd) >> 19     x <      81920 (x < 262149 when 64-bit mul)
+        * (x * 0x6667) >> 18     x <      43699
+        * (x * 0x3334) >> 17     x <      16389
+        * (x * 0x199a) >> 16     x <      16389
+        * (x * 0x0ccd) >> 15     x <      16389
+        * (x * 0x0667) >> 14     x <       2739
+        * (x * 0x0334) >> 13     x <       1029
+        * (x * 0x019a) >> 12     x <       1029
+        * (x * 0x00cd) >> 11     x <       1029 shorter code than * 0x67 (on i386)
+        * (x * 0x0067) >> 10     x <        179
+        * (x * 0x0034) >>  9     x <         69 same
+        * (x * 0x001a) >>  8     x <         69 same
+        * (x * 0x000d) >>  7     x <         69 same, shortest code (on i386)
+        * (x * 0x0007) >>  6     x <         19
+        * See <http://www.cs.uiowa.edu/~jones/bcd/divide.html>
+        */
+       r      = (q * (uint64_t)0x1999999a) >> 32;
+       *buf++ = (q - 10 * r) + '0'; /* 1 */
+       q      = (r * (uint64_t)0x1999999a) >> 32;
+       *buf++ = (r - 10 * q) + '0'; /* 2 */
+       r      = (q * (uint64_t)0x1999999a) >> 32;
+       *buf++ = (q - 10 * r) + '0'; /* 3 */
+       q      = (r * (uint64_t)0x1999999a) >> 32;
+       *buf++ = (r - 10 * q) + '0'; /* 4 */
+       r      = (q * (uint64_t)0x1999999a) >> 32;
+       *buf++ = (q - 10 * r) + '0'; /* 5 */
+       /* Now value is under 10000, can avoid 64-bit multiply */
+       q      = (r * 0x199a) >> 16;
+       *buf++ = (r - 10 * q)  + '0'; /* 6 */
+       r      = (q * 0xcd) >> 11;
+       *buf++ = (q - 10 * r)  + '0'; /* 7 */
+       q      = (r * 0xcd) >> 11;
+       *buf++ = (r - 10 * q) + '0'; /* 8 */
+       *buf++ = q + '0'; /* 9 */
        return buf;
 }
-/* Same with if's removed. Always emits five digits */
+#endif
+
+/* Similar to above but do not pad with zeros.
+ * Code can be easily arranged to print 9 digits too, but our callers
+ * always call put_dec_full9() instead when the number has 9 decimal digits.
+ */
 static noinline_for_stack
-char *put_dec_full(char *buf, unsigned q)
+char *put_dec_trunc8(char *buf, unsigned r)
 {
-       /* BTW, if q is in [0,9999], 8-bit ints will be enough, */
-       /* but anyway, gcc produces better code with full-sized ints */
-       unsigned d3, d2, d1, d0;
-       d1 = (q>>4) & 0xf;
-       d2 = (q>>8) & 0xf;
-       d3 = (q>>12);
+       unsigned q;
+
+       /* Copy of previous function's body with added early returns */
+       q      = (r * (uint64_t)0x1999999a) >> 32;
+       *buf++ = (r - 10 * q) + '0'; /* 2 */
+       if (q == 0)
+               return buf;
+       r      = (q * (uint64_t)0x1999999a) >> 32;
+       *buf++ = (q - 10 * r) + '0'; /* 3 */
+       if (r == 0)
+               return buf;
+       q      = (r * (uint64_t)0x1999999a) >> 32;
+       *buf++ = (r - 10 * q) + '0'; /* 4 */
+       if (q == 0)
+               return buf;
+       r      = (q * (uint64_t)0x1999999a) >> 32;
+       *buf++ = (q - 10 * r) + '0'; /* 5 */
+       if (r == 0)
+               return buf;
+       q      = (r * 0x199a) >> 16;
+       *buf++ = (r - 10 * q)  + '0'; /* 6 */
+       if (q == 0)
+               return buf;
+       r      = (q * 0xcd) >> 11;
+       *buf++ = (q - 10 * r)  + '0'; /* 7 */
+       if (r == 0)
+               return buf;
+       q      = (r * 0xcd) >> 11;
+       *buf++ = (r - 10 * q) + '0'; /* 8 */
+       if (q == 0)
+               return buf;
+       *buf++ = q + '0'; /* 9 */
+       return buf;
+}
 
-       /*
-        * Possible ways to approx. divide by 10
-        * gcc -O2 replaces multiply with shifts and adds
-        * (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386)
-        * (x * 0x67) >> 10:  1100111
-        * (x * 0x34) >> 9:    110100 - same
-        * (x * 0x1a) >> 8:     11010 - same
-        * (x * 0x0d) >> 7:      1101 - same, shortest code (on i386)
-        */
-       d0 = 6*(d3 + d2 + d1) + (q & 0xf);
-       q = (d0 * 0xcd) >> 11;
-       d0 = d0 - 10*q;
-       *buf++ = d0 + '0';
-       d1 = q + 9*d3 + 5*d2 + d1;
-               q = (d1 * 0xcd) >> 11;
-               d1 = d1 - 10*q;
-               *buf++ = d1 + '0';
-
-               d2 = q + 2*d2;
-                       q = (d2 * 0xd) >> 7;
-                       d2 = d2 - 10*q;
-                       *buf++ = d2 + '0';
-
-                       d3 = q + 4*d3;
-                               q = (d3 * 0xcd) >> 11; /* - shorter code */
-                               /* q = (d3 * 0x67) >> 10; - would also work */
-                               d3 = d3 - 10*q;
-                               *buf++ = d3 + '0';
-                                       *buf++ = q + '0';
+/* There are two algorithms to print larger numbers.
+ * One is generic: divide by 1000000000 and repeatedly print
+ * groups of (up to) 9 digits. It's conceptually simple,
+ * but requires a (unsigned long long) / 1000000000 division.
+ *
+ * Second algorithm splits 64-bit unsigned long long into 16-bit chunks,
+ * manipulates them cleverly and generates groups of 4 decimal digits.
+ * It so happens that it does NOT require long long division.
+ *
+ * If long is > 32 bits, division of 64-bit values is relatively easy,
+ * and we will use the first algorithm.
+ * If long long is > 64 bits (strange architecture with VERY large long long),
+ * second algorithm can't be used, and we again use the first one.
+ *
+ * Else (if long is 32 bits and long long is 64 bits) we use second one.
+ */
 
-       return buf;
+#if BITS_PER_LONG != 32 || BITS_PER_LONG_LONG != 64
+
+/* First algorithm: generic */
+
+static
+char *put_dec(char *buf, unsigned long long n)
+{
+       if (n >= 100*1000*1000) {
+               while (n >= 1000*1000*1000)
+                       buf = put_dec_full9(buf, do_div(n, 1000*1000*1000));
+               if (n >= 100*1000*1000)
+                       return put_dec_full9(buf, n);
+       }
+       return put_dec_trunc8(buf, n);
 }
-/* No inlining helps gcc to use registers better */
+
+#else
+
+/* Second algorithm: valid only for 64-bit long longs */
+
 static noinline_for_stack
-char *put_dec(char *buf, unsigned long long num)
+char *put_dec_full4(char *buf, unsigned q)
 {
-       while (1) {
-               unsigned rem;
-               if (num < 100000)
-                       return put_dec_trunc(buf, num);
-               rem = do_div(num, 100000);
-               buf = put_dec_full(buf, rem);
-       }
+       unsigned r;
+       r      = (q * 0xcccd) >> 19;
+       *buf++ = (q - 10 * r) + '0';
+       q      = (r * 0x199a) >> 16;
+       *buf++ = (r - 10 * q)  + '0';
+       r      = (q * 0xcd) >> 11;
+       *buf++ = (q - 10 * r)  + '0';
+       *buf++ = r + '0';
+       return buf;
+}
+
+/* Based on code by Douglas W. Jones found at
+ * <http://www.cs.uiowa.edu/~jones/bcd/decimal.html#sixtyfour>
+ * (with permission from the author).
+ * Performs no 64-bit division and hence should be fast on 32-bit machines.
+ */
+static
+char *put_dec(char *buf, unsigned long long n)
+{
+       uint32_t d3, d2, d1, q, h;
+
+       if (n < 100*1000*1000)
+               return put_dec_trunc8(buf, n);
+
+       d1  = ((uint32_t)n >> 16); /* implicit "& 0xffff" */
+       h   = (n >> 32);
+       d2  = (h      ) & 0xffff;
+       d3  = (h >> 16); /* implicit "& 0xffff" */
+
+       q   = 656 * d3 + 7296 * d2 + 5536 * d1 + ((uint32_t)n & 0xffff);
+
+       buf = put_dec_full4(buf, q % 10000);
+       q   = q / 10000;
+
+       d1  = q + 7671 * d3 + 9496 * d2 + 6 * d1;
+       buf = put_dec_full4(buf, d1 % 10000);
+       q   = d1 / 10000;
+
+       d2  = q + 4749 * d3 + 42 * d2;
+       buf = put_dec_full4(buf, d2 % 10000);
+       q   = d2 / 10000;
+
+       d3  = q + 281 * d3;
+       if (!d3)
+               goto done;
+       buf = put_dec_full4(buf, d3 % 10000);
+       q   = d3 / 10000;
+       if (!q)
+               goto done;
+       buf = put_dec_full4(buf, q);
+ done:
+       while (buf[-1] == '0')
+               --buf;
+
+       return buf;
 }
 
+#endif
+
 /*
  * Convert passed number to decimal string.
  * Returns the length of string.  On buffer overflow, returns 0.
@@ -220,16 +313,22 @@ char *put_dec(char *buf, unsigned long long num)
  */
 int num_to_str(char *buf, int size, unsigned long long num)
 {
-       char tmp[21];           /* Enough for 2^64 in decimal */
+       char tmp[sizeof(num) * 3];
        int idx, len;
 
-       len = put_dec(tmp, num) - tmp;
+       /* put_dec() may work incorrectly for num = 0 (generate "", not "0") */
+       if (num <= 9) {
+               tmp[0] = '0' + num;
+               len = 1;
+       } else {
+               len = put_dec(tmp, num) - tmp;
+       }
 
        if (len > size)
                return 0;
        for (idx = 0; idx < len; ++idx)
                buf[idx] = tmp[len - idx - 1];
-       return  len;
+       return len;
 }
 
 #define ZEROPAD        1               /* pad with zero */
@@ -284,6 +383,7 @@ char *number(char *buf, char *end, unsigned long long num,
        char locase;
        int need_pfx = ((spec.flags & SPECIAL) && spec.base != 10);
        int i;
+       bool is_zero = num == 0LL;
 
        /* locase = 0 or 0x20. ORing digits or letters with 'locase'
         * produces same digits or (maybe lowercased) letters */
@@ -305,15 +405,16 @@ char *number(char *buf, char *end, unsigned long long num,
                }
        }
        if (need_pfx) {
-               spec.field_width--;
                if (spec.base == 16)
+                       spec.field_width -= 2;
+               else if (!is_zero)
                        spec.field_width--;
        }
 
        /* generate full string in tmp[], in reverse order */
        i = 0;
-       if (num == 0)
-               tmp[i++] = '0';
+       if (num < spec.base)
+               tmp[i++] = digits[num] | locase;
        /* Generic code, for any base:
        else do {
                tmp[i++] = (digits[do_div(num,base)] | locase);
@@ -353,9 +454,11 @@ char *number(char *buf, char *end, unsigned long long num,
        }
        /* "0x" / "0" prefix */
        if (need_pfx) {
-               if (buf < end)
-                       *buf = '0';
-               ++buf;
+               if (spec.base == 16 || !is_zero) {
+                       if (buf < end)
+                               *buf = '0';
+                       ++buf;
+               }
                if (spec.base == 16) {
                        if (buf < end)
                                *buf = ('X' | locase);
@@ -436,7 +539,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
        else if (ext != 'f' && ext != 's')
                sprint_symbol(sym, value);
        else
-               kallsyms_lookup(value, NULL, NULL, NULL, sym);
+               sprint_symbol_no_offset(sym, value);
 
        return string(buf, end, sym, spec);
 #else
@@ -607,7 +710,7 @@ char *ip4_string(char *p, const u8 *addr, const char *fmt)
        }
        for (i = 0; i < 4; i++) {
                char temp[3];   /* hold each IP quad in reverse order */
-               int digits = put_dec_trunc(temp, addr[index]) - temp;
+               int digits = put_dec_trunc8(temp, addr[index]) - temp;
                if (leading_zeros) {
                        if (digits < 3)
                                *p++ = '0';
@@ -866,13 +969,15 @@ static noinline_for_stack
 char *pointer(const char *fmt, char *buf, char *end, void *ptr,
              struct printf_spec spec)
 {
+       int default_width = 2 * sizeof(void *) + (spec.flags & SPECIAL ? 2 : 0);
+
        if (!ptr && *fmt != 'K') {
                /*
                 * Print (null) with the same width as a pointer so it makes
                 * tabular output look nice.
                 */
                if (spec.field_width == -1)
-                       spec.field_width = 2 * sizeof(void *);
+                       spec.field_width = default_width;
                return string(buf, end, "(null)", spec);
        }
 
@@ -927,7 +1032,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
                 */
                if (in_irq() || in_serving_softirq() || in_nmi()) {
                        if (spec.field_width == -1)
-                               spec.field_width = 2 * sizeof(void *);
+                               spec.field_width = default_width;
                        return string(buf, end, "pK-error", spec);
                }
                if (!((kptr_restrict == 0) ||
@@ -944,7 +1049,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
        }
        spec.flags |= SMALL;
        if (spec.field_width == -1) {
-               spec.field_width = 2 * sizeof(void *);
+               spec.field_width = default_width;
                spec.flags |= ZEROPAD;
        }
        spec.base = 16;
index e338407f1225f0873a8eb20761c86fd82db1dfc0..b2176374b98e5e678ec93acda28cc0891d1c3717 100644 (file)
@@ -198,7 +198,7 @@ config COMPACTION
 config MIGRATION
        bool "Page migration"
        def_bool y
-       depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION
+       depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA
        help
          Allows the migration of the physical location of pages of processes
          while the virtual addresses are not changed. This is useful in
@@ -349,6 +349,16 @@ choice
          benefit.
 endchoice
 
+config CROSS_MEMORY_ATTACH
+       bool "Cross Memory Support"
+       depends on MMU
+       default y
+       help
+         Enabling this option adds the system calls process_vm_readv and
+         process_vm_writev which allow a process with the correct privileges
+         to directly read from or write to to another process's address space.
+         See the man page for more details.
+
 #
 # UP and nommu archs use km based percpu allocator
 #
index 50ec00ef2a0e85a11ef8202529b405b069702a8d..a156285ce88d9a19e529b54b8836efac559b7af7 100644 (file)
@@ -5,15 +5,18 @@
 mmu-y                  := nommu.o
 mmu-$(CONFIG_MMU)      := fremap.o highmem.o madvise.o memory.o mincore.o \
                           mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
-                          vmalloc.o pagewalk.o pgtable-generic.o \
-                          process_vm_access.o
+                          vmalloc.o pagewalk.o pgtable-generic.o
+
+ifdef CONFIG_CROSS_MEMORY_ATTACH
+mmu-$(CONFIG_MMU)      += process_vm_access.o
+endif
 
 obj-y                  := filemap.o mempool.o oom_kill.o fadvise.o \
                           maccess.o page_alloc.o page-writeback.o \
                           readahead.o swap.o truncate.o vmscan.o shmem.o \
                           prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
                           page_isolation.o mm_init.o mmu_context.o percpu.o \
-                          $(mmu-y)
+                          compaction.o $(mmu-y)
 obj-y += init-mm.o
 
 ifdef CONFIG_NO_BOOTMEM
@@ -25,14 +28,13 @@ endif
 obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
 
 obj-$(CONFIG_BOUNCE)   += bounce.o
-obj-$(CONFIG_SWAP)     += page_io.o swap_state.o swapfile.o thrash.o
+obj-$(CONFIG_SWAP)     += page_io.o swap_state.o swapfile.o
 obj-$(CONFIG_HAS_DMA)  += dmapool.o
 obj-$(CONFIG_HUGETLBFS)        += hugetlb.o
 obj-$(CONFIG_NUMA)     += mempolicy.o
 obj-$(CONFIG_SPARSEMEM)        += sparse.o
 obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
 obj-$(CONFIG_SLOB) += slob.o
-obj-$(CONFIG_COMPACTION) += compaction.o
 obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
 obj-$(CONFIG_KSM) += ksm.o
 obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o
index 0131170c9d540a572c7b2ba3108ca5c2d9db30b7..ec4fcb7a56c8975492d656940906af6153136e51 100644 (file)
@@ -77,16 +77,16 @@ unsigned long __init bootmem_bootmap_pages(unsigned long pages)
  */
 static void __init link_bootmem(bootmem_data_t *bdata)
 {
-       struct list_head *iter;
+       bootmem_data_t *ent;
 
-       list_for_each(iter, &bdata_list) {
-               bootmem_data_t *ent;
-
-               ent = list_entry(iter, bootmem_data_t, list);
-               if (bdata->node_min_pfn < ent->node_min_pfn)
-                       break;
+       list_for_each_entry(ent, &bdata_list, list) {
+               if (bdata->node_min_pfn < ent->node_min_pfn) {
+                       list_add_tail(&bdata->list, &ent->list);
+                       return;
+               }
        }
-       list_add_tail(&bdata->list, iter);
+
+       list_add_tail(&bdata->list, &bdata_list);
 }
 
 /*
@@ -203,7 +203,8 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
                } else {
                        unsigned long off = 0;
 
-                       while (vec && off < BITS_PER_LONG) {
+                       vec >>= start & (BITS_PER_LONG - 1);
+                       while (vec) {
                                if (vec & 1) {
                                        page = pfn_to_page(start + off);
                                        __free_pages_bootmem(page, 0);
@@ -467,7 +468,7 @@ static unsigned long __init align_off(struct bootmem_data *bdata,
        return ALIGN(base + off, align) - base;
 }
 
-static void * __init alloc_bootmem_core(struct bootmem_data *bdata,
+static void * __init alloc_bootmem_bdata(struct bootmem_data *bdata,
                                        unsigned long size, unsigned long align,
                                        unsigned long goal, unsigned long limit)
 {
@@ -588,14 +589,14 @@ static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
                p_bdata = bootmem_arch_preferred_node(bdata, size, align,
                                                        goal, limit);
                if (p_bdata)
-                       return alloc_bootmem_core(p_bdata, size, align,
+                       return alloc_bootmem_bdata(p_bdata, size, align,
                                                        goal, limit);
        }
 #endif
        return NULL;
 }
 
-static void * __init ___alloc_bootmem_nopanic(unsigned long size,
+static void * __init alloc_bootmem_core(unsigned long size,
                                        unsigned long align,
                                        unsigned long goal,
                                        unsigned long limit)
@@ -603,7 +604,6 @@ static void * __init ___alloc_bootmem_nopanic(unsigned long size,
        bootmem_data_t *bdata;
        void *region;
 
-restart:
        region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit);
        if (region)
                return region;
@@ -614,11 +614,25 @@ restart:
                if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))
                        break;
 
-               region = alloc_bootmem_core(bdata, size, align, goal, limit);
+               region = alloc_bootmem_bdata(bdata, size, align, goal, limit);
                if (region)
                        return region;
        }
 
+       return NULL;
+}
+
+static void * __init ___alloc_bootmem_nopanic(unsigned long size,
+                                             unsigned long align,
+                                             unsigned long goal,
+                                             unsigned long limit)
+{
+       void *ptr;
+
+restart:
+       ptr = alloc_bootmem_core(size, align, goal, limit);
+       if (ptr)
+               return ptr;
        if (goal) {
                goal = 0;
                goto restart;
@@ -684,21 +698,56 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align,
        return ___alloc_bootmem(size, align, goal, limit);
 }
 
-static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
+static void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
                                unsigned long size, unsigned long align,
                                unsigned long goal, unsigned long limit)
 {
        void *ptr;
 
-       ptr = alloc_arch_preferred_bootmem(bdata, size, align, goal, limit);
+again:
+       ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size,
+                                          align, goal, limit);
        if (ptr)
                return ptr;
 
-       ptr = alloc_bootmem_core(bdata, size, align, goal, limit);
+       ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, goal, limit);
        if (ptr)
                return ptr;
 
-       return ___alloc_bootmem(size, align, goal, limit);
+       ptr = alloc_bootmem_core(size, align, goal, limit);
+       if (ptr)
+               return ptr;
+
+       if (goal) {
+               goal = 0;
+               goto again;
+       }
+
+       return NULL;
+}
+
+void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
+                                  unsigned long align, unsigned long goal)
+{
+       if (WARN_ON_ONCE(slab_is_available()))
+               return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
+
+       return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
+}
+
+void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
+                                   unsigned long align, unsigned long goal,
+                                   unsigned long limit)
+{
+       void *ptr;
+
+       ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
+       if (ptr)
+               return ptr;
+
+       printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
+       panic("Out of memory");
+       return NULL;
 }
 
 /**
@@ -722,7 +771,7 @@ void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
        if (WARN_ON_ONCE(slab_is_available()))
                return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 
-       return  ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
+       return  ___alloc_bootmem_node(pgdat, size, align, goal, 0);
 }
 
 void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
@@ -743,7 +792,7 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
                unsigned long new_goal;
 
                new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
-               ptr = alloc_bootmem_core(pgdat->bdata, size, align,
+               ptr = alloc_bootmem_bdata(pgdat->bdata, size, align,
                                                 new_goal, 0);
                if (ptr)
                        return ptr;
@@ -754,47 +803,6 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
 
 }
 
-#ifdef CONFIG_SPARSEMEM
-/**
- * alloc_bootmem_section - allocate boot memory from a specific section
- * @size: size of the request in bytes
- * @section_nr: sparse map section to allocate from
- *
- * Return NULL on failure.
- */
-void * __init alloc_bootmem_section(unsigned long size,
-                                   unsigned long section_nr)
-{
-       bootmem_data_t *bdata;
-       unsigned long pfn, goal;
-
-       pfn = section_nr_to_pfn(section_nr);
-       goal = pfn << PAGE_SHIFT;
-       bdata = &bootmem_node_data[early_pfn_to_nid(pfn)];
-
-       return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, 0);
-}
-#endif
-
-void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
-                                  unsigned long align, unsigned long goal)
-{
-       void *ptr;
-
-       if (WARN_ON_ONCE(slab_is_available()))
-               return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
-
-       ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0);
-       if (ptr)
-               return ptr;
-
-       ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
-       if (ptr)
-               return ptr;
-
-       return __alloc_bootmem_nopanic(size, align, goal);
-}
-
 #ifndef ARCH_LOW_ADDRESS_LIMIT
 #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
 #endif
@@ -839,6 +847,6 @@ void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
        if (WARN_ON_ONCE(slab_is_available()))
                return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 
-       return ___alloc_bootmem_node(pgdat->bdata, size, align,
-                               goal, ARCH_LOW_ADDRESS_LIMIT);
+       return ___alloc_bootmem_node(pgdat, size, align,
+                                    goal, ARCH_LOW_ADDRESS_LIMIT);
 }
index 5646c740f613ed1ec8b34a094f76d7934eed1aac..32e6f4136fa2297e13a6ac51444d50c18b78e9a3 100644 (file)
@@ -80,7 +80,7 @@ EXPORT_SYMBOL(__cleancache_init_shared_fs);
 static int cleancache_get_key(struct inode *inode,
                              struct cleancache_filekey *key)
 {
-       int (*fhfn)(struct dentry *, __u32 *fh, int *, int);
+       int (*fhfn)(struct inode *, __u32 *fh, int *, struct inode *);
        int len = 0, maxlen = CLEANCACHE_KEY_MAX;
        struct super_block *sb = inode->i_sb;
 
@@ -88,9 +88,7 @@ static int cleancache_get_key(struct inode *inode,
        if (sb->s_export_op != NULL) {
                fhfn = sb->s_export_op->encode_fh;
                if  (fhfn) {
-                       struct dentry d;
-                       d.d_inode = inode;
-                       len = (*fhfn)(&d, &key->u.fh[0], &maxlen, 0);
+                       len = (*fhfn)(inode, &key->u.fh[0], &maxlen, NULL);
                        if (len <= 0 || len == 255)
                                return -1;
                        if (maxlen > CLEANCACHE_KEY_MAX)
index 74a8c825ff289a12079701dfc42be4e1787b8d8f..7ea259d82a998c0397e976216ac861576e638100 100644 (file)
 #include <linux/sysfs.h>
 #include "internal.h"
 
+#if defined CONFIG_COMPACTION || defined CONFIG_CMA
+
 #define CREATE_TRACE_POINTS
 #include <trace/events/compaction.h>
 
-/*
- * compact_control is used to track pages being migrated and the free pages
- * they are being migrated to during memory compaction. The free_pfn starts
- * at the end of a zone and migrate_pfn begins at the start. Movable pages
- * are moved to the end of a zone during a compaction run and the run
- * completes when free_pfn <= migrate_pfn
- */
-struct compact_control {
-       struct list_head freepages;     /* List of free pages to migrate to */
-       struct list_head migratepages;  /* List of pages being migrated */
-       unsigned long nr_freepages;     /* Number of isolated free pages */
-       unsigned long nr_migratepages;  /* Number of pages to migrate */
-       unsigned long free_pfn;         /* isolate_freepages search base */
-       unsigned long migrate_pfn;      /* isolate_migratepages search base */
-       bool sync;                      /* Synchronous migration */
-
-       int order;                      /* order a direct compactor needs */
-       int migratetype;                /* MOVABLE, RECLAIMABLE etc */
-       struct zone *zone;
-};
-
 static unsigned long release_freepages(struct list_head *freelist)
 {
        struct page *page, *next;
@@ -54,24 +35,35 @@ static unsigned long release_freepages(struct list_head *freelist)
        return count;
 }
 
-/* Isolate free pages onto a private freelist. Must hold zone->lock */
-static unsigned long isolate_freepages_block(struct zone *zone,
-                               unsigned long blockpfn,
-                               struct list_head *freelist)
+static void map_pages(struct list_head *list)
+{
+       struct page *page;
+
+       list_for_each_entry(page, list, lru) {
+               arch_alloc_page(page, 0);
+               kernel_map_pages(page, 1, 1);
+       }
+}
+
+static inline bool migrate_async_suitable(int migratetype)
+{
+       return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
+}
+
+/*
+ * Isolate free pages onto a private freelist. Caller must hold zone->lock.
+ * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
+ * pages inside of the pageblock (even though it may still end up isolating
+ * some pages).
+ */
+static unsigned long isolate_freepages_block(unsigned long blockpfn,
+                               unsigned long end_pfn,
+                               struct list_head *freelist,
+                               bool strict)
 {
-       unsigned long zone_end_pfn, end_pfn;
        int nr_scanned = 0, total_isolated = 0;
        struct page *cursor;
 
-       /* Get the last PFN we should scan for free pages at */
-       zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
-       end_pfn = min(blockpfn + pageblock_nr_pages, zone_end_pfn);
-
-       /* Find the first usable PFN in the block to initialse page cursor */
-       for (; blockpfn < end_pfn; blockpfn++) {
-               if (pfn_valid_within(blockpfn))
-                       break;
-       }
        cursor = pfn_to_page(blockpfn);
 
        /* Isolate free pages. This assumes the block is valid */
@@ -79,15 +71,23 @@ static unsigned long isolate_freepages_block(struct zone *zone,
                int isolated, i;
                struct page *page = cursor;
 
-               if (!pfn_valid_within(blockpfn))
+               if (!pfn_valid_within(blockpfn)) {
+                       if (strict)
+                               return 0;
                        continue;
+               }
                nr_scanned++;
 
-               if (!PageBuddy(page))
+               if (!PageBuddy(page)) {
+                       if (strict)
+                               return 0;
                        continue;
+               }
 
                /* Found a free page, break it into order-0 pages */
                isolated = split_free_page(page);
+               if (!isolated && strict)
+                       return 0;
                total_isolated += isolated;
                for (i = 0; i < isolated; i++) {
                        list_add(&page->lru, freelist);
@@ -105,114 +105,71 @@ static unsigned long isolate_freepages_block(struct zone *zone,
        return total_isolated;
 }
 
-/* Returns true if the page is within a block suitable for migration to */
-static bool suitable_migration_target(struct page *page)
-{
-
-       int migratetype = get_pageblock_migratetype(page);
-
-       /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
-       if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
-               return false;
-
-       /* If the page is a large free page, then allow migration */
-       if (PageBuddy(page) && page_order(page) >= pageblock_order)
-               return true;
-
-       /* If the block is MIGRATE_MOVABLE, allow migration */
-       if (migratetype == MIGRATE_MOVABLE)
-               return true;
-
-       /* Otherwise skip the block */
-       return false;
-}
-
-/*
- * Based on information in the current compact_control, find blocks
- * suitable for isolating free pages from and then isolate them.
+/**
+ * isolate_freepages_range() - isolate free pages.
+ * @start_pfn: The first PFN to start isolating.
+ * @end_pfn:   The one-past-last PFN.
+ *
+ * Non-free pages, invalid PFNs, or zone boundaries within the
+ * [start_pfn, end_pfn) range are considered errors, cause function to
+ * undo its actions and return zero.
+ *
+ * Otherwise, function returns one-past-the-last PFN of isolated page
+ * (which may be greater then end_pfn if end fell in a middle of
+ * a free page).
  */
-static void isolate_freepages(struct zone *zone,
-                               struct compact_control *cc)
+unsigned long
+isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
 {
-       struct page *page;
-       unsigned long high_pfn, low_pfn, pfn;
-       unsigned long flags;
-       int nr_freepages = cc->nr_freepages;
-       struct list_head *freelist = &cc->freepages;
-
-       /*
-        * Initialise the free scanner. The starting point is where we last
-        * scanned from (or the end of the zone if starting). The low point
-        * is the end of the pageblock the migration scanner is using.
-        */
-       pfn = cc->free_pfn;
-       low_pfn = cc->migrate_pfn + pageblock_nr_pages;
+       unsigned long isolated, pfn, block_end_pfn, flags;
+       struct zone *zone = NULL;
+       LIST_HEAD(freelist);
 
-       /*
-        * Take care that if the migration scanner is at the end of the zone
-        * that the free scanner does not accidentally move to the next zone
-        * in the next isolation cycle.
-        */
-       high_pfn = min(low_pfn, pfn);
-
-       /*
-        * Isolate free pages until enough are available to migrate the
-        * pages on cc->migratepages. We stop searching if the migrate
-        * and free page scanners meet or enough free pages are isolated.
-        */
-       for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
-                                       pfn -= pageblock_nr_pages) {
-               unsigned long isolated;
+       if (pfn_valid(start_pfn))
+               zone = page_zone(pfn_to_page(start_pfn));
 
-               if (!pfn_valid(pfn))
-                       continue;
+       for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
+               if (!pfn_valid(pfn) || zone != page_zone(pfn_to_page(pfn)))
+                       break;
 
                /*
-                * Check for overlapping nodes/zones. It's possible on some
-                * configurations to have a setup like
-                * node0 node1 node0
-                * i.e. it's possible that all pages within a zones range of
-                * pages do not belong to a single zone.
+                * On subsequent iterations ALIGN() is actually not needed,
+                * but we keep it that we not to complicate the code.
                 */
-               page = pfn_to_page(pfn);
-               if (page_zone(page) != zone)
-                       continue;
+               block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
+               block_end_pfn = min(block_end_pfn, end_pfn);
 
-               /* Check the block is suitable for migration */
-               if (!suitable_migration_target(page))
-                       continue;
+               spin_lock_irqsave(&zone->lock, flags);
+               isolated = isolate_freepages_block(pfn, block_end_pfn,
+                                                  &freelist, true);
+               spin_unlock_irqrestore(&zone->lock, flags);
 
                /*
-                * Found a block suitable for isolating free pages from. Now
-                * we disabled interrupts, double check things are ok and
-                * isolate the pages. This is to minimise the time IRQs
-                * are disabled
+                * In strict mode, isolate_freepages_block() returns 0 if
+                * there are any holes in the block (ie. invalid PFNs or
+                * non-free pages).
                 */
-               isolated = 0;
-               spin_lock_irqsave(&zone->lock, flags);
-               if (suitable_migration_target(page)) {
-                       isolated = isolate_freepages_block(zone, pfn, freelist);
-                       nr_freepages += isolated;
-               }
-               spin_unlock_irqrestore(&zone->lock, flags);
+               if (!isolated)
+                       break;
 
                /*
-                * Record the highest PFN we isolated pages from. When next
-                * looking for free pages, the search will restart here as
-                * page migration may have returned some pages to the allocator
+                * If we managed to isolate pages, it is always (1 << n) *
+                * pageblock_nr_pages for some non-negative n.  (Max order
+                * page may span two pageblocks).
                 */
-               if (isolated)
-                       high_pfn = max(high_pfn, pfn);
        }
 
        /* split_free_page does not map the pages */
-       list_for_each_entry(page, freelist, lru) {
-               arch_alloc_page(page, 0);
-               kernel_map_pages(page, 1, 1);
+       map_pages(&freelist);
+
+       if (pfn < end_pfn) {
+               /* Loop terminated early, cleanup. */
+               release_freepages(&freelist);
+               return 0;
        }
 
-       cc->free_pfn = high_pfn;
-       cc->nr_freepages = nr_freepages;
+       /* We don't use freelists for anything. */
+       return pfn;
 }
 
 /* Update the number of anon and file isolated pages in the zone */
@@ -243,37 +200,34 @@ static bool too_many_isolated(struct zone *zone)
        return isolated > (inactive + active) / 2;
 }
 
-/* possible outcome of isolate_migratepages */
-typedef enum {
-       ISOLATE_ABORT,          /* Abort compaction now */
-       ISOLATE_NONE,           /* No pages isolated, continue scanning */
-       ISOLATE_SUCCESS,        /* Pages isolated, migrate */
-} isolate_migrate_t;
-
-/*
- * Isolate all pages that can be migrated from the block pointed to by
- * the migrate scanner within compact_control.
+/**
+ * isolate_migratepages_range() - isolate all migrate-able pages in range.
+ * @zone:      Zone pages are in.
+ * @cc:                Compaction control structure.
+ * @low_pfn:   The first PFN of the range.
+ * @end_pfn:   The one-past-the-last PFN of the range.
+ *
+ * Isolate all pages that can be migrated from the range specified by
+ * [low_pfn, end_pfn).  Returns zero if there is a fatal signal
+ * pending), otherwise PFN of the first page that was not scanned
+ * (which may be both less, equal to or more then end_pfn).
+ *
+ * Assumes that cc->migratepages is empty and cc->nr_migratepages is
+ * zero.
+ *
+ * Apart from cc->migratepages and cc->nr_migratetypes this function
+ * does not modify any cc's fields, in particular it does not modify
+ * (or read for that matter) cc->migrate_pfn.
  */
-static isolate_migrate_t isolate_migratepages(struct zone *zone,
-                                       struct compact_control *cc)
+unsigned long
+isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
+                          unsigned long low_pfn, unsigned long end_pfn)
 {
-       unsigned long low_pfn, end_pfn;
        unsigned long last_pageblock_nr = 0, pageblock_nr;
        unsigned long nr_scanned = 0, nr_isolated = 0;
        struct list_head *migratelist = &cc->migratepages;
-       isolate_mode_t mode = ISOLATE_ACTIVE|ISOLATE_INACTIVE;
-
-       /* Do not scan outside zone boundaries */
-       low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
-
-       /* Only scan within a pageblock boundary */
-       end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
-
-       /* Do not cross the free scanner or scan within a memory hole */
-       if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
-               cc->migrate_pfn = end_pfn;
-               return ISOLATE_NONE;
-       }
+       isolate_mode_t mode = 0;
+       struct lruvec *lruvec;
 
        /*
         * Ensure that there are not too many pages isolated from the LRU
@@ -283,12 +237,12 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
        while (unlikely(too_many_isolated(zone))) {
                /* async migration should just abort */
                if (!cc->sync)
-                       return ISOLATE_ABORT;
+                       return 0;
 
                congestion_wait(BLK_RW_ASYNC, HZ/10);
 
                if (fatal_signal_pending(current))
-                       return ISOLATE_ABORT;
+                       return 0;
        }
 
        /* Time to isolate some pages for migration */
@@ -351,7 +305,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
                 */
                pageblock_nr = low_pfn >> pageblock_order;
                if (!cc->sync && last_pageblock_nr != pageblock_nr &&
-                               get_pageblock_migratetype(page) != MIGRATE_MOVABLE) {
+                   !migrate_async_suitable(get_pageblock_migratetype(page))) {
                        low_pfn += pageblock_nr_pages;
                        low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
                        last_pageblock_nr = pageblock_nr;
@@ -374,14 +328,16 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
                if (!cc->sync)
                        mode |= ISOLATE_ASYNC_MIGRATE;
 
+               lruvec = mem_cgroup_page_lruvec(page, zone);
+
                /* Try isolate the page */
-               if (__isolate_lru_page(page, mode, 0) != 0)
+               if (__isolate_lru_page(page, mode) != 0)
                        continue;
 
                VM_BUG_ON(PageTransCompound(page));
 
                /* Successfully isolated */
-               del_page_from_lru_list(zone, page, page_lru(page));
+               del_page_from_lru_list(page, lruvec, page_lru(page));
                list_add(&page->lru, migratelist);
                cc->nr_migratepages++;
                nr_isolated++;
@@ -396,11 +352,124 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
        acct_isolated(zone, cc);
 
        spin_unlock_irq(&zone->lru_lock);
-       cc->migrate_pfn = low_pfn;
 
        trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
 
-       return ISOLATE_SUCCESS;
+       return low_pfn;
+}
+
+#endif /* CONFIG_COMPACTION || CONFIG_CMA */
+#ifdef CONFIG_COMPACTION
+
+/* Returns true if the page is within a block suitable for migration to */
+static bool suitable_migration_target(struct page *page)
+{
+
+       int migratetype = get_pageblock_migratetype(page);
+
+       /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
+       if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
+               return false;
+
+       /* If the page is a large free page, then allow migration */
+       if (PageBuddy(page) && page_order(page) >= pageblock_order)
+               return true;
+
+       /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
+       if (migrate_async_suitable(migratetype))
+               return true;
+
+       /* Otherwise skip the block */
+       return false;
+}
+
+/*
+ * Based on information in the current compact_control, find blocks
+ * suitable for isolating free pages from and then isolate them.
+ */
+static void isolate_freepages(struct zone *zone,
+                               struct compact_control *cc)
+{
+       struct page *page;
+       unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
+       unsigned long flags;
+       int nr_freepages = cc->nr_freepages;
+       struct list_head *freelist = &cc->freepages;
+
+       /*
+        * Initialise the free scanner. The starting point is where we last
+        * scanned from (or the end of the zone if starting). The low point
+        * is the end of the pageblock the migration scanner is using.
+        */
+       pfn = cc->free_pfn;
+       low_pfn = cc->migrate_pfn + pageblock_nr_pages;
+
+       /*
+        * Take care that if the migration scanner is at the end of the zone
+        * that the free scanner does not accidentally move to the next zone
+        * in the next isolation cycle.
+        */
+       high_pfn = min(low_pfn, pfn);
+
+       zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
+
+       /*
+        * Isolate free pages until enough are available to migrate the
+        * pages on cc->migratepages. We stop searching if the migrate
+        * and free page scanners meet or enough free pages are isolated.
+        */
+       for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
+                                       pfn -= pageblock_nr_pages) {
+               unsigned long isolated;
+
+               if (!pfn_valid(pfn))
+                       continue;
+
+               /*
+                * Check for overlapping nodes/zones. It's possible on some
+                * configurations to have a setup like
+                * node0 node1 node0
+                * i.e. it's possible that all pages within a zones range of
+                * pages do not belong to a single zone.
+                */
+               page = pfn_to_page(pfn);
+               if (page_zone(page) != zone)
+                       continue;
+
+               /* Check the block is suitable for migration */
+               if (!suitable_migration_target(page))
+                       continue;
+
+               /*
+                * Found a block suitable for isolating free pages from. Now
+                * we disabled interrupts, double check things are ok and
+                * isolate the pages. This is to minimise the time IRQs
+                * are disabled
+                */
+               isolated = 0;
+               spin_lock_irqsave(&zone->lock, flags);
+               if (suitable_migration_target(page)) {
+                       end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
+                       isolated = isolate_freepages_block(pfn, end_pfn,
+                                                          freelist, false);
+                       nr_freepages += isolated;
+               }
+               spin_unlock_irqrestore(&zone->lock, flags);
+
+               /*
+                * Record the highest PFN we isolated pages from. When next
+                * looking for free pages, the search will restart here as
+                * page migration may have returned some pages to the allocator
+                */
+               if (isolated)
+                       high_pfn = max(high_pfn, pfn);
+       }
+
+       /* split_free_page does not map the pages */
+       map_pages(freelist);
+
+       cc->free_pfn = high_pfn;
+       cc->nr_freepages = nr_freepages;
 }
 
 /*
@@ -449,6 +518,44 @@ static void update_nr_listpages(struct compact_control *cc)
        cc->nr_freepages = nr_freepages;
 }
 
+/* possible outcome of isolate_migratepages */
+typedef enum {
+       ISOLATE_ABORT,          /* Abort compaction now */
+       ISOLATE_NONE,           /* No pages isolated, continue scanning */
+       ISOLATE_SUCCESS,        /* Pages isolated, migrate */
+} isolate_migrate_t;
+
+/*
+ * Isolate all pages that can be migrated from the block pointed to by
+ * the migrate scanner within compact_control.
+ */
+static isolate_migrate_t isolate_migratepages(struct zone *zone,
+                                       struct compact_control *cc)
+{
+       unsigned long low_pfn, end_pfn;
+
+       /* Do not scan outside zone boundaries */
+       low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
+
+       /* Only scan within a pageblock boundary */
+       end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
+
+       /* Do not cross the free scanner or scan within a memory hole */
+       if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
+               cc->migrate_pfn = end_pfn;
+               return ISOLATE_NONE;
+       }
+
+       /* Perform the isolation */
+       low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn);
+       if (!low_pfn)
+               return ISOLATE_ABORT;
+
+       cc->migrate_pfn = low_pfn;
+
+       return ISOLATE_SUCCESS;
+}
+
 static int compact_finished(struct zone *zone,
                            struct compact_control *cc)
 {
@@ -795,3 +902,5 @@ void compaction_unregister_node(struct node *node)
        return device_remove_file(&node->dev, &dev_attr_compact);
 }
 #endif /* CONFIG_SYSFS && CONFIG_NUMA */
+
+#endif /* CONFIG_COMPACTION */
index 79c4b2b0b14eec1d05c93e3493dd02e0fd182829..a4a5260b0279b77b37738540b1e8c24fb446a3e5 100644 (file)
@@ -29,7 +29,6 @@
 #include <linux/pagevec.h>
 #include <linux/blkdev.h>
 #include <linux/security.h>
-#include <linux/syscalls.h>
 #include <linux/cpuset.h>
 #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
 #include <linux/memcontrol.h>
@@ -1478,44 +1477,6 @@ out:
 }
 EXPORT_SYMBOL(generic_file_aio_read);
 
-static ssize_t
-do_readahead(struct address_space *mapping, struct file *filp,
-            pgoff_t index, unsigned long nr)
-{
-       if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
-               return -EINVAL;
-
-       force_page_cache_readahead(mapping, filp, index, nr);
-       return 0;
-}
-
-SYSCALL_DEFINE(readahead)(int fd, loff_t offset, size_t count)
-{
-       ssize_t ret;
-       struct file *file;
-
-       ret = -EBADF;
-       file = fget(fd);
-       if (file) {
-               if (file->f_mode & FMODE_READ) {
-                       struct address_space *mapping = file->f_mapping;
-                       pgoff_t start = offset >> PAGE_CACHE_SHIFT;
-                       pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
-                       unsigned long len = end - start + 1;
-                       ret = do_readahead(mapping, file, start, len);
-               }
-               fput(file);
-       }
-       return ret;
-}
-#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
-asmlinkage long SyS_readahead(long fd, loff_t offset, long count)
-{
-       return SYSC_readahead((int) fd, offset, (size_t) count);
-}
-SYSCALL_ALIAS(sys_readahead, SyS_readahead);
-#endif
-
 #ifdef CONFIG_MMU
 /**
  * page_cache_read - adds requested page to the page cache if not already there
@@ -1938,71 +1899,6 @@ struct page *read_cache_page(struct address_space *mapping,
 }
 EXPORT_SYMBOL(read_cache_page);
 
-/*
- * The logic we want is
- *
- *     if suid or (sgid and xgrp)
- *             remove privs
- */
-int should_remove_suid(struct dentry *dentry)
-{
-       umode_t mode = dentry->d_inode->i_mode;
-       int kill = 0;
-
-       /* suid always must be killed */
-       if (unlikely(mode & S_ISUID))
-               kill = ATTR_KILL_SUID;
-
-       /*
-        * sgid without any exec bits is just a mandatory locking mark; leave
-        * it alone.  If some exec bits are set, it's a real sgid; kill it.
-        */
-       if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
-               kill |= ATTR_KILL_SGID;
-
-       if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
-               return kill;
-
-       return 0;
-}
-EXPORT_SYMBOL(should_remove_suid);
-
-static int __remove_suid(struct dentry *dentry, int kill)
-{
-       struct iattr newattrs;
-
-       newattrs.ia_valid = ATTR_FORCE | kill;
-       return notify_change(dentry, &newattrs);
-}
-
-int file_remove_suid(struct file *file)
-{
-       struct dentry *dentry = file->f_path.dentry;
-       struct inode *inode = dentry->d_inode;
-       int killsuid;
-       int killpriv;
-       int error = 0;
-
-       /* Fast path for nothing security related */
-       if (IS_NOSEC(inode))
-               return 0;
-
-       killsuid = should_remove_suid(dentry);
-       killpriv = security_inode_need_killpriv(dentry);
-
-       if (killpriv < 0)
-               return killpriv;
-       if (killpriv)
-               error = security_inode_killpriv(dentry);
-       if (!error && killsuid)
-               error = __remove_suid(dentry, killsuid);
-       if (!error && (inode->i_sb->s_flags & MS_NOSEC))
-               inode->i_flags |= S_NOSEC;
-
-       return error;
-}
-EXPORT_SYMBOL(file_remove_suid);
-
 static size_t __iovec_copy_from_user_inatomic(char *vaddr,
                        const struct iovec *iov, size_t base, size_t bytes)
 {
@@ -2528,7 +2424,9 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
        if (err)
                goto out;
 
-       file_update_time(file);
+       err = file_update_time(file);
+       if (err)
+               goto out;
 
        /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
        if (unlikely(file->f_flags & O_DIRECT)) {
index a4eb3113222912c9aada14bd92c6b68d01577b73..213ca1f5340980e1ce6fad8d4f12e50858d61397 100644 (file)
@@ -426,7 +426,9 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len,
        if (ret)
                goto out_backing;
 
-       file_update_time(filp);
+       ret = file_update_time(filp);
+       if (ret)
+               goto out_backing;
 
        ret = __xip_file_write (filp, buf, count, pos, ppos);
 
index f0e5306eeb55e8e179da3abbe6c033045b6ad073..57c4b93090151f2acbc1271b7b214fe5bc96478c 100644 (file)
@@ -636,16 +636,12 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
                                        unsigned long haddr, pmd_t *pmd,
                                        struct page *page)
 {
-       int ret = 0;
        pgtable_t pgtable;
 
        VM_BUG_ON(!PageCompound(page));
        pgtable = pte_alloc_one(mm, haddr);
-       if (unlikely(!pgtable)) {
-               mem_cgroup_uncharge_page(page);
-               put_page(page);
+       if (unlikely(!pgtable))
                return VM_FAULT_OOM;
-       }
 
        clear_huge_page(page, haddr, HPAGE_PMD_NR);
        __SetPageUptodate(page);
@@ -675,7 +671,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
                spin_unlock(&mm->page_table_lock);
        }
 
-       return ret;
+       return 0;
 }
 
 static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
@@ -724,8 +720,14 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        put_page(page);
                        goto out;
                }
+               if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd,
+                                                         page))) {
+                       mem_cgroup_uncharge_page(page);
+                       put_page(page);
+                       goto out;
+               }
 
-               return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page);
+               return 0;
        }
 out:
        /*
@@ -950,6 +952,8 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                count_vm_event(THP_FAULT_FALLBACK);
                ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
                                                   pmd, orig_pmd, page, haddr);
+               if (ret & VM_FAULT_OOM)
+                       split_huge_page(page);
                put_page(page);
                goto out;
        }
@@ -957,6 +961,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
        if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
                put_page(new_page);
+               split_huge_page(page);
                put_page(page);
                ret |= VM_FAULT_OOM;
                goto out;
@@ -968,8 +973,10 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
        spin_lock(&mm->page_table_lock);
        put_page(page);
        if (unlikely(!pmd_same(*pmd, orig_pmd))) {
+               spin_unlock(&mm->page_table_lock);
                mem_cgroup_uncharge_page(new_page);
                put_page(new_page);
+               goto out;
        } else {
                pmd_t entry;
                VM_BUG_ON(!PageHead(page));
@@ -1224,10 +1231,13 @@ static void __split_huge_page_refcount(struct page *page)
 {
        int i;
        struct zone *zone = page_zone(page);
+       struct lruvec *lruvec;
        int tail_count = 0;
 
        /* prevent PageLRU to go away from under us, and freeze lru stats */
        spin_lock_irq(&zone->lru_lock);
+       lruvec = mem_cgroup_page_lruvec(page, zone);
+
        compound_lock(page);
        /* complete memcg works before add pages to LRU */
        mem_cgroup_split_huge_fixup(page);
@@ -1302,13 +1312,12 @@ static void __split_huge_page_refcount(struct page *page)
                BUG_ON(!PageDirty(page_tail));
                BUG_ON(!PageSwapBacked(page_tail));
 
-
-               lru_add_page_tail(zone, page, page_tail);
+               lru_add_page_tail(page, page_tail, lruvec);
        }
        atomic_sub(tail_count, &page->_count);
        BUG_ON(atomic_read(&page->_count) <= 0);
 
-       __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
+       __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1);
        __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
 
        ClearPageCompound(page);
index ae8f708e3d75acd9c64f495ea74b8f1cb5ecc2c3..e198831276a3eab77b4a89fc0e1457a5a45d025d 100644 (file)
@@ -273,8 +273,8 @@ static long region_count(struct list_head *head, long f, long t)
 
        /* Locate each segment we overlap with, and count that overlap. */
        list_for_each_entry(rg, head, link) {
-               int seg_from;
-               int seg_to;
+               long seg_from;
+               long seg_to;
 
                if (rg->to <= f)
                        continue;
@@ -2157,6 +2157,15 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
                kref_get(&reservations->refs);
 }
 
+static void resv_map_put(struct vm_area_struct *vma)
+{
+       struct resv_map *reservations = vma_resv_map(vma);
+
+       if (!reservations)
+               return;
+       kref_put(&reservations->refs, resv_map_release);
+}
+
 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
 {
        struct hstate *h = hstate_vma(vma);
@@ -2173,7 +2182,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
                reserve = (end - start) -
                        region_count(&reservations->regions, start, end);
 
-               kref_put(&reservations->refs, resv_map_release);
+               resv_map_put(vma);
 
                if (reserve) {
                        hugetlb_acct_memory(h, -reserve);
@@ -2213,6 +2222,7 @@ static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
        }
        entry = pte_mkyoung(entry);
        entry = pte_mkhuge(entry);
+       entry = arch_make_huge_pte(entry, vma, page, writable);
 
        return entry;
 }
@@ -2990,12 +3000,16 @@ int hugetlb_reserve_pages(struct inode *inode,
                set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
        }
 
-       if (chg < 0)
-               return chg;
+       if (chg < 0) {
+               ret = chg;
+               goto out_err;
+       }
 
        /* There must be enough pages in the subpool for the mapping */
-       if (hugepage_subpool_get_pages(spool, chg))
-               return -ENOSPC;
+       if (hugepage_subpool_get_pages(spool, chg)) {
+               ret = -ENOSPC;
+               goto out_err;
+       }
 
        /*
         * Check enough hugepages are available for the reservation.
@@ -3004,7 +3018,7 @@ int hugetlb_reserve_pages(struct inode *inode,
        ret = hugetlb_acct_memory(h, chg);
        if (ret < 0) {
                hugepage_subpool_put_pages(spool, chg);
-               return ret;
+               goto out_err;
        }
 
        /*
@@ -3021,6 +3035,10 @@ int hugetlb_reserve_pages(struct inode *inode,
        if (!vma || vma->vm_flags & VM_MAYSHARE)
                region_add(&inode->i_mapping->private_list, from, to);
        return 0;
+out_err:
+       if (vma)
+               resv_map_put(vma);
+       return ret;
 }
 
 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
index 2189af491783f958c1337ebf1f1bb14dc5cd8b5d..2ba87fbfb75b9755e279d39af93359693afe66fd 100644 (file)
@@ -100,6 +100,39 @@ extern void prep_compound_page(struct page *page, unsigned long order);
 extern bool is_free_buddy_page(struct page *page);
 #endif
 
+#if defined CONFIG_COMPACTION || defined CONFIG_CMA
+
+/*
+ * in mm/compaction.c
+ */
+/*
+ * compact_control is used to track pages being migrated and the free pages
+ * they are being migrated to during memory compaction. The free_pfn starts
+ * at the end of a zone and migrate_pfn begins at the start. Movable pages
+ * are moved to the end of a zone during a compaction run and the run
+ * completes when free_pfn <= migrate_pfn
+ */
+struct compact_control {
+       struct list_head freepages;     /* List of free pages to migrate to */
+       struct list_head migratepages;  /* List of pages being migrated */
+       unsigned long nr_freepages;     /* Number of isolated free pages */
+       unsigned long nr_migratepages;  /* Number of pages to migrate */
+       unsigned long free_pfn;         /* isolate_freepages search base */
+       unsigned long migrate_pfn;      /* isolate_migratepages search base */
+       bool sync;                      /* Synchronous migration */
+
+       int order;                      /* order a direct compactor needs */
+       int migratetype;                /* MOVABLE, RECLAIMABLE etc */
+       struct zone *zone;
+};
+
+unsigned long
+isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn);
+unsigned long
+isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
+                          unsigned long low_pfn, unsigned long end_pfn);
+
+#endif
 
 /*
  * function for dealing with page's order in buddy system.
@@ -131,7 +164,8 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
  * to determine if it's being mapped into a LOCKED vma.
  * If so, mark page as mlocked.
  */
-static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page)
+static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
+                                   struct page *page)
 {
        VM_BUG_ON(PageLRU(page));
 
@@ -189,7 +223,7 @@ extern unsigned long vma_address(struct page *page,
                                 struct vm_area_struct *vma);
 #endif
 #else /* !CONFIG_MMU */
-static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
+static inline int mlocked_vma_newpage(struct vm_area_struct *v, struct page *p)
 {
        return 0;
 }
@@ -309,3 +343,7 @@ extern u64 hwpoison_filter_flags_mask;
 extern u64 hwpoison_filter_flags_value;
 extern u64 hwpoison_filter_memcg;
 extern u32 hwpoison_filter_enable;
+
+extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
+        unsigned long, unsigned long,
+        unsigned long, unsigned long);
index 1ccbba5b667414e717db987b85da9e0f1691268f..deff1b64a08c36ef4857590e9913717488953014 100644 (file)
 #include <linux/mempolicy.h>
 #include <linux/page-isolation.h>
 #include <linux/hugetlb.h>
+#include <linux/falloc.h>
 #include <linux/sched.h>
 #include <linux/ksm.h>
+#include <linux/fs.h>
 
 /*
  * Any behaviour which results in changes to the vma->vm_flags needs to
@@ -200,8 +202,7 @@ static long madvise_remove(struct vm_area_struct *vma,
                                struct vm_area_struct **prev,
                                unsigned long start, unsigned long end)
 {
-       struct address_space *mapping;
-       loff_t offset, endoff;
+       loff_t offset;
        int error;
 
        *prev = NULL;   /* tell sys_madvise we drop mmap_sem */
@@ -217,16 +218,14 @@ static long madvise_remove(struct vm_area_struct *vma,
        if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
                return -EACCES;
 
-       mapping = vma->vm_file->f_mapping;
-
        offset = (loff_t)(start - vma->vm_start)
                        + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
-       endoff = (loff_t)(end - vma->vm_start - 1)
-                       + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
 
-       /* vmtruncate_range needs to take i_mutex */
+       /* filesystem's fallocate may need to take i_mutex */
        up_read(&current->mm->mmap_sem);
-       error = vmtruncate_range(mapping->host, offset, endoff);
+       error = do_fallocate(vma->vm_file,
+                               FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+                               offset, end - start);
        down_read(&current->mm->mmap_sem);
        return error;
 }
index a44eab3157f8dc4b25e686b643ccacbc8449e041..952123eba43371a5e6d26ff8a5b7ad934747c027 100644 (file)
@@ -37,6 +37,8 @@ struct memblock memblock __initdata_memblock = {
 
 int memblock_debug __initdata_memblock;
 static int memblock_can_resize __initdata_memblock;
+static int memblock_memory_in_slab __initdata_memblock = 0;
+static int memblock_reserved_in_slab __initdata_memblock = 0;
 
 /* inline so we don't get a warning when pr_debug is compiled out */
 static inline const char *memblock_type_name(struct memblock_type *type)
@@ -187,6 +189,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
        struct memblock_region *new_array, *old_array;
        phys_addr_t old_size, new_size, addr;
        int use_slab = slab_is_available();
+       int *in_slab;
 
        /* We don't allow resizing until we know about the reserved regions
         * of memory that aren't suitable for allocation
@@ -198,6 +201,12 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
        old_size = type->max * sizeof(struct memblock_region);
        new_size = old_size << 1;
 
+       /* Retrieve the slab flag */
+       if (type == &memblock.memory)
+               in_slab = &memblock_memory_in_slab;
+       else
+               in_slab = &memblock_reserved_in_slab;
+
        /* Try to find some space for it.
         *
         * WARNING: We assume that either slab_is_available() and we use it or
@@ -212,14 +221,15 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
        if (use_slab) {
                new_array = kmalloc(new_size, GFP_KERNEL);
                addr = new_array ? __pa(new_array) : 0;
-       } else
+       } else {
                addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t));
+               new_array = addr ? __va(addr) : 0;
+       }
        if (!addr) {
                pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
                       memblock_type_name(type), type->max, type->max * 2);
                return -1;
        }
-       new_array = __va(addr);
 
        memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]",
                 memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1);
@@ -234,22 +244,24 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
        type->regions = new_array;
        type->max <<= 1;
 
-       /* If we use SLAB that's it, we are done */
-       if (use_slab)
-               return 0;
-
-       /* Add the new reserved region now. Should not fail ! */
-       BUG_ON(memblock_reserve(addr, new_size));
-
-       /* If the array wasn't our static init one, then free it. We only do
-        * that before SLAB is available as later on, we don't know whether
-        * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
-        * anyways
+       /* Free old array. We needn't free it if the array is the
+        * static one
         */
-       if (old_array != memblock_memory_init_regions &&
-           old_array != memblock_reserved_init_regions)
+       if (*in_slab)
+               kfree(old_array);
+       else if (old_array != memblock_memory_init_regions &&
+                old_array != memblock_reserved_init_regions)
                memblock_free(__pa(old_array), old_size);
 
+       /* Reserve the new array if that comes from the memblock.
+        * Otherwise, we needn't do it
+        */
+       if (!use_slab)
+               BUG_ON(memblock_reserve(addr, new_size));
+
+       /* Update slab flag */
+       *in_slab = use_slab;
+
        return 0;
 }
 
index f342778a0c0a2649b00a3a284036ea17f05023a1..ac35bccadb7b9f53606d445a961e442e891aa94a 100644 (file)
@@ -59,7 +59,7 @@
 
 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
 #define MEM_CGROUP_RECLAIM_RETRIES     5
-struct mem_cgroup *root_mem_cgroup __read_mostly;
+static struct mem_cgroup *root_mem_cgroup __read_mostly;
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
@@ -73,7 +73,7 @@ static int really_do_swap_account __initdata = 0;
 #endif
 
 #else
-#define do_swap_account                (0)
+#define do_swap_account                0
 #endif
 
 
@@ -88,18 +88,31 @@ enum mem_cgroup_stat_index {
        MEM_CGROUP_STAT_RSS,       /* # of pages charged as anon rss */
        MEM_CGROUP_STAT_FILE_MAPPED,  /* # of pages charged as file rss */
        MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
-       MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
        MEM_CGROUP_STAT_NSTATS,
 };
 
+static const char * const mem_cgroup_stat_names[] = {
+       "cache",
+       "rss",
+       "mapped_file",
+       "swap",
+};
+
 enum mem_cgroup_events_index {
        MEM_CGROUP_EVENTS_PGPGIN,       /* # of pages paged in */
        MEM_CGROUP_EVENTS_PGPGOUT,      /* # of pages paged out */
-       MEM_CGROUP_EVENTS_COUNT,        /* # of pages paged in/out */
        MEM_CGROUP_EVENTS_PGFAULT,      /* # of page-faults */
        MEM_CGROUP_EVENTS_PGMAJFAULT,   /* # of major page-faults */
        MEM_CGROUP_EVENTS_NSTATS,
 };
+
+static const char * const mem_cgroup_events_names[] = {
+       "pgpgin",
+       "pgpgout",
+       "pgfault",
+       "pgmajfault",
+};
+
 /*
  * Per memcg event counter is incremented at every pagein/pageout. With THP,
  * it will be incremated by the number of pages. This counter is used for
@@ -112,13 +125,14 @@ enum mem_cgroup_events_target {
        MEM_CGROUP_TARGET_NUMAINFO,
        MEM_CGROUP_NTARGETS,
 };
-#define THRESHOLDS_EVENTS_TARGET (128)
-#define SOFTLIMIT_EVENTS_TARGET (1024)
-#define NUMAINFO_EVENTS_TARGET (1024)
+#define THRESHOLDS_EVENTS_TARGET 128
+#define SOFTLIMIT_EVENTS_TARGET 1024
+#define NUMAINFO_EVENTS_TARGET 1024
 
 struct mem_cgroup_stat_cpu {
        long count[MEM_CGROUP_STAT_NSTATS];
        unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
+       unsigned long nr_page_events;
        unsigned long targets[MEM_CGROUP_NTARGETS];
 };
 
@@ -138,7 +152,6 @@ struct mem_cgroup_per_zone {
 
        struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
 
-       struct zone_reclaim_stat reclaim_stat;
        struct rb_node          tree_node;      /* RB tree node */
        unsigned long long      usage_in_excess;/* Set to the value by which */
                                                /* the soft limit is exceeded*/
@@ -182,7 +195,7 @@ struct mem_cgroup_threshold {
 
 /* For threshold */
 struct mem_cgroup_threshold_ary {
-       /* An array index points to threshold just below usage. */
+       /* An array index points to threshold just below or equal to usage. */
        int current_threshold;
        /* Size of entries[] */
        unsigned int size;
@@ -245,8 +258,8 @@ struct mem_cgroup {
                 */
                struct rcu_head rcu_freeing;
                /*
-                * But when using vfree(), that cannot be done at
-                * interrupt time, so we must then queue the work.
+                * We also need some space for a worker in deferred freeing.
+                * By the time we call it, rcu_freeing is no longer in use.
                 */
                struct work_struct work_freeing;
        };
@@ -305,7 +318,7 @@ struct mem_cgroup {
        /*
         * percpu counter.
         */
-       struct mem_cgroup_stat_cpu *stat;
+       struct mem_cgroup_stat_cpu __percpu *stat;
        /*
         * used when a cpu is offlined or other synchronizations
         * See mem_cgroup_read_stat().
@@ -360,8 +373,8 @@ static bool move_file(void)
  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
  * limit reclaim to prevent infinite loops, if they ever occur.
  */
-#define        MEM_CGROUP_MAX_RECLAIM_LOOPS            (100)
-#define        MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2)
+#define        MEM_CGROUP_MAX_RECLAIM_LOOPS            100
+#define        MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
 
 enum charge_type {
        MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
@@ -377,8 +390,8 @@ enum charge_type {
 #define _MEM                   (0)
 #define _MEMSWAP               (1)
 #define _OOM_TYPE              (2)
-#define MEMFILE_PRIVATE(x, val)        (((x) << 16) | (val))
-#define MEMFILE_TYPE(val)      (((val) >> 16) & 0xffff)
+#define MEMFILE_PRIVATE(x, val)        ((x) << 16 | (val))
+#define MEMFILE_TYPE(val)      ((val) >> 16 & 0xffff)
 #define MEMFILE_ATTR(val)      ((val) & 0xffff)
 /* Used for OOM nofiier */
 #define OOM_CONTROL            (0)
@@ -404,6 +417,7 @@ void sock_update_memcg(struct sock *sk)
 {
        if (mem_cgroup_sockets_enabled) {
                struct mem_cgroup *memcg;
+               struct cg_proto *cg_proto;
 
                BUG_ON(!sk->sk_prot->proto_cgroup);
 
@@ -423,9 +437,10 @@ void sock_update_memcg(struct sock *sk)
 
                rcu_read_lock();
                memcg = mem_cgroup_from_task(current);
-               if (!mem_cgroup_is_root(memcg)) {
+               cg_proto = sk->sk_prot->proto_cgroup(memcg);
+               if (!mem_cgroup_is_root(memcg) && memcg_proto_active(cg_proto)) {
                        mem_cgroup_get(memcg);
-                       sk->sk_cgrp = sk->sk_prot->proto_cgroup(memcg);
+                       sk->sk_cgrp = cg_proto;
                }
                rcu_read_unlock();
        }
@@ -454,6 +469,19 @@ EXPORT_SYMBOL(tcp_proto_cgroup);
 #endif /* CONFIG_INET */
 #endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
 
+#if defined(CONFIG_INET) && defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM)
+static void disarm_sock_keys(struct mem_cgroup *memcg)
+{
+       if (!memcg_proto_activated(&memcg->tcp_mem.cg_proto))
+               return;
+       static_key_slow_dec(&memcg_socket_limit_enabled);
+}
+#else
+static void disarm_sock_keys(struct mem_cgroup *memcg)
+{
+}
+#endif
+
 static void drain_all_stock_async(struct mem_cgroup *memcg);
 
 static struct mem_cgroup_per_zone *
@@ -718,12 +746,21 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
                nr_pages = -nr_pages; /* for event */
        }
 
-       __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages);
+       __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
 
        preempt_enable();
 }
 
 unsigned long
+mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
+{
+       struct mem_cgroup_per_zone *mz;
+
+       mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
+       return mz->lru_size[lru];
+}
+
+static unsigned long
 mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
                        unsigned int lru_mask)
 {
@@ -770,7 +807,7 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
 {
        unsigned long val, next;
 
-       val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
+       val = __this_cpu_read(memcg->stat->nr_page_events);
        next = __this_cpu_read(memcg->stat->targets[target]);
        /* from time_after() in jiffies.h */
        if ((long)next - (long)val < 0) {
@@ -1013,7 +1050,7 @@ EXPORT_SYMBOL(mem_cgroup_count_vm_event);
 /**
  * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
  * @zone: zone of the wanted lruvec
- * @mem: memcg of the wanted lruvec
+ * @memcg: memcg of the wanted lruvec
  *
  * Returns the lru list vector holding pages for the given @zone and
  * @mem.  This can be the global zone lruvec, if the memory controller
@@ -1046,19 +1083,11 @@ struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
  */
 
 /**
- * mem_cgroup_lru_add_list - account for adding an lru page and return lruvec
- * @zone: zone of the page
+ * mem_cgroup_page_lruvec - return lruvec for adding an lru page
  * @page: the page
- * @lru: current lru
- *
- * This function accounts for @page being added to @lru, and returns
- * the lruvec for the given @zone and the memcg @page is charged to.
- *
- * The callsite is then responsible for physically linking the page to
- * the returned lruvec->lists[@lru].
+ * @zone: zone of the page
  */
-struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
-                                      enum lru_list lru)
+struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
 {
        struct mem_cgroup_per_zone *mz;
        struct mem_cgroup *memcg;
@@ -1071,7 +1100,7 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
        memcg = pc->mem_cgroup;
 
        /*
-        * Surreptitiously switch any uncharged page to root:
+        * Surreptitiously switch any uncharged offlist page to root:
         * an uncharged page off lru does nothing to secure
         * its former mem_cgroup from sudden removal.
         *
@@ -1079,85 +1108,60 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
         * under page_cgroup lock: between them, they make all uses
         * of pc->mem_cgroup safe.
         */
-       if (!PageCgroupUsed(pc) && memcg != root_mem_cgroup)
+       if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
                pc->mem_cgroup = memcg = root_mem_cgroup;
 
        mz = page_cgroup_zoneinfo(memcg, page);
-       /* compound_order() is stabilized through lru_lock */
-       mz->lru_size[lru] += 1 << compound_order(page);
        return &mz->lruvec;
 }
 
 /**
- * mem_cgroup_lru_del_list - account for removing an lru page
- * @page: the page
- * @lru: target lru
- *
- * This function accounts for @page being removed from @lru.
+ * mem_cgroup_update_lru_size - account for adding or removing an lru page
+ * @lruvec: mem_cgroup per zone lru vector
+ * @lru: index of lru list the page is sitting on
+ * @nr_pages: positive when adding or negative when removing
  *
- * The callsite is then responsible for physically unlinking
- * @page->lru.
+ * This function must be called when a page is added to or removed from an
+ * lru list.
  */
-void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
+void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
+                               int nr_pages)
 {
        struct mem_cgroup_per_zone *mz;
-       struct mem_cgroup *memcg;
-       struct page_cgroup *pc;
+       unsigned long *lru_size;
 
        if (mem_cgroup_disabled())
                return;
 
-       pc = lookup_page_cgroup(page);
-       memcg = pc->mem_cgroup;
-       VM_BUG_ON(!memcg);
-       mz = page_cgroup_zoneinfo(memcg, page);
-       /* huge page split is done under lru_lock. so, we have no races. */
-       VM_BUG_ON(mz->lru_size[lru] < (1 << compound_order(page)));
-       mz->lru_size[lru] -= 1 << compound_order(page);
-}
-
-void mem_cgroup_lru_del(struct page *page)
-{
-       mem_cgroup_lru_del_list(page, page_lru(page));
-}
-
-/**
- * mem_cgroup_lru_move_lists - account for moving a page between lrus
- * @zone: zone of the page
- * @page: the page
- * @from: current lru
- * @to: target lru
- *
- * This function accounts for @page being moved between the lrus @from
- * and @to, and returns the lruvec for the given @zone and the memcg
- * @page is charged to.
- *
- * The callsite is then responsible for physically relinking
- * @page->lru to the returned lruvec->lists[@to].
- */
-struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
-                                        struct page *page,
-                                        enum lru_list from,
-                                        enum lru_list to)
-{
-       /* XXX: Optimize this, especially for @from == @to */
-       mem_cgroup_lru_del_list(page, from);
-       return mem_cgroup_lru_add_list(zone, page, to);
+       mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
+       lru_size = mz->lru_size + lru;
+       *lru_size += nr_pages;
+       VM_BUG_ON((long)(*lru_size) < 0);
 }
 
 /*
  * Checks whether given mem is same or in the root_mem_cgroup's
  * hierarchy subtree
  */
+bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
+                                 struct mem_cgroup *memcg)
+{
+       if (root_memcg == memcg)
+               return true;
+       if (!root_memcg->use_hierarchy)
+               return false;
+       return css_is_ancestor(&memcg->css, &root_memcg->css);
+}
+
 static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
-               struct mem_cgroup *memcg)
+                                      struct mem_cgroup *memcg)
 {
-       if (root_memcg != memcg) {
-               return (root_memcg->use_hierarchy &&
-                       css_is_ancestor(&memcg->css, &root_memcg->css));
-       }
+       bool ret;
 
-       return true;
+       rcu_read_lock();
+       ret = __mem_cgroup_same_or_subtree(root_memcg, memcg);
+       rcu_read_unlock();
+       return ret;
 }
 
 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
@@ -1195,19 +1199,15 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
        return ret;
 }
 
-int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
+int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
 {
        unsigned long inactive_ratio;
-       int nid = zone_to_nid(zone);
-       int zid = zone_idx(zone);
        unsigned long inactive;
        unsigned long active;
        unsigned long gb;
 
-       inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
-                                               BIT(LRU_INACTIVE_ANON));
-       active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
-                                             BIT(LRU_ACTIVE_ANON));
+       inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
+       active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
 
        gb = (inactive + active) >> (30 - PAGE_SHIFT);
        if (gb)
@@ -1218,49 +1218,17 @@ int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
        return inactive * inactive_ratio < active;
 }
 
-int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone)
+int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
 {
        unsigned long active;
        unsigned long inactive;
-       int zid = zone_idx(zone);
-       int nid = zone_to_nid(zone);
 
-       inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
-                                               BIT(LRU_INACTIVE_FILE));
-       active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
-                                             BIT(LRU_ACTIVE_FILE));
+       inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_FILE);
+       active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_FILE);
 
        return (active > inactive);
 }
 
-struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
-                                                     struct zone *zone)
-{
-       int nid = zone_to_nid(zone);
-       int zid = zone_idx(zone);
-       struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
-
-       return &mz->reclaim_stat;
-}
-
-struct zone_reclaim_stat *
-mem_cgroup_get_reclaim_stat_from_page(struct page *page)
-{
-       struct page_cgroup *pc;
-       struct mem_cgroup_per_zone *mz;
-
-       if (mem_cgroup_disabled())
-               return NULL;
-
-       pc = lookup_page_cgroup(page);
-       if (!PageCgroupUsed(pc))
-               return NULL;
-       /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
-       smp_rmb();
-       mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
-       return &mz->reclaim_stat;
-}
-
 #define mem_cgroup_from_res_counter(counter, member)   \
        container_of(counter, struct mem_cgroup, member)
 
@@ -1634,7 +1602,7 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
  * unused nodes. But scan_nodes is lazily updated and may not cotain
  * enough new information. We need to do double check.
  */
-bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
+static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
 {
        int nid;
 
@@ -1669,7 +1637,7 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
        return 0;
 }
 
-bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
+static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
 {
        return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
 }
@@ -1843,7 +1811,8 @@ static void memcg_oom_recover(struct mem_cgroup *memcg)
 /*
  * try to call OOM killer. returns false if we should exit memory-reclaim loop.
  */
-bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
+static bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask,
+                                 int order)
 {
        struct oom_wait_info owait;
        bool locked, need_to_kill;
@@ -1992,7 +1961,7 @@ struct memcg_stock_pcp {
        unsigned int nr_pages;
        struct work_struct work;
        unsigned long flags;
-#define FLUSHING_CACHED_CHARGE (0)
+#define FLUSHING_CACHED_CHARGE 0
 };
 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
 static DEFINE_MUTEX(percpu_charge_mutex);
@@ -2139,7 +2108,7 @@ static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
        int i;
 
        spin_lock(&memcg->pcp_counter_lock);
-       for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) {
+       for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
                long x = per_cpu(memcg->stat->count[i], cpu);
 
                per_cpu(memcg->stat->count[i], cpu) = 0;
@@ -2426,6 +2395,24 @@ static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
        }
 }
 
+/*
+ * Cancel chrages in this cgroup....doesn't propagate to parent cgroup.
+ * This is useful when moving usage to parent cgroup.
+ */
+static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg,
+                                       unsigned int nr_pages)
+{
+       unsigned long bytes = nr_pages * PAGE_SIZE;
+
+       if (mem_cgroup_is_root(memcg))
+               return;
+
+       res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes);
+       if (do_swap_account)
+               res_counter_uncharge_until(&memcg->memsw,
+                                               memcg->memsw.parent, bytes);
+}
+
 /*
  * A helper function to get mem_cgroup from ID. must be called under
  * rcu_read_lock(). The caller must check css_is_removed() or some if
@@ -2481,6 +2468,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
 {
        struct page_cgroup *pc = lookup_page_cgroup(page);
        struct zone *uninitialized_var(zone);
+       struct lruvec *lruvec;
        bool was_on_lru = false;
        bool anon;
 
@@ -2503,8 +2491,9 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
                zone = page_zone(page);
                spin_lock_irq(&zone->lru_lock);
                if (PageLRU(page)) {
+                       lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
                        ClearPageLRU(page);
-                       del_page_from_lru_list(zone, page, page_lru(page));
+                       del_page_from_lru_list(page, lruvec, page_lru(page));
                        was_on_lru = true;
                }
        }
@@ -2522,9 +2511,10 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
 
        if (lrucare) {
                if (was_on_lru) {
+                       lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
                        VM_BUG_ON(PageLRU(page));
                        SetPageLRU(page);
-                       add_page_to_lru_list(zone, page, page_lru(page));
+                       add_page_to_lru_list(page, lruvec, page_lru(page));
                }
                spin_unlock_irq(&zone->lru_lock);
        }
@@ -2547,7 +2537,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 
-#define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MIGRATION))
+#define PCGF_NOCOPY_AT_SPLIT (1 << PCG_LOCK | 1 << PCG_MIGRATION)
 /*
  * Because tail pages are not marked as "used", set it. We're under
  * zone->lru_lock, 'splitting on pmd' and compound_lock.
@@ -2578,23 +2568,19 @@ void mem_cgroup_split_huge_fixup(struct page *head)
  * @pc:        page_cgroup of the page.
  * @from: mem_cgroup which the page is moved from.
  * @to:        mem_cgroup which the page is moved to. @from != @to.
- * @uncharge: whether we should call uncharge and css_put against @from.
  *
  * The caller must confirm following.
  * - page is not on LRU (isolate_page() is useful.)
  * - compound_lock is held when nr_pages > 1
  *
- * This function doesn't do "charge" nor css_get to new cgroup. It should be
- * done by a caller(__mem_cgroup_try_charge would be useful). If @uncharge is
- * true, this function does "uncharge" from old cgroup, but it doesn't if
- * @uncharge is false, so a caller should do "uncharge".
+ * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
+ * from old cgroup.
  */
 static int mem_cgroup_move_account(struct page *page,
                                   unsigned int nr_pages,
                                   struct page_cgroup *pc,
                                   struct mem_cgroup *from,
-                                  struct mem_cgroup *to,
-                                  bool uncharge)
+                                  struct mem_cgroup *to)
 {
        unsigned long flags;
        int ret;
@@ -2628,9 +2614,6 @@ static int mem_cgroup_move_account(struct page *page,
                preempt_enable();
        }
        mem_cgroup_charge_statistics(from, anon, -nr_pages);
-       if (uncharge)
-               /* This is not "cancel", but cancel_charge does all we need. */
-               __mem_cgroup_cancel_charge(from, nr_pages);
 
        /* caller should have done css_get */
        pc->mem_cgroup = to;
@@ -2664,15 +2647,13 @@ static int mem_cgroup_move_parent(struct page *page,
                                  struct mem_cgroup *child,
                                  gfp_t gfp_mask)
 {
-       struct cgroup *cg = child->css.cgroup;
-       struct cgroup *pcg = cg->parent;
        struct mem_cgroup *parent;
        unsigned int nr_pages;
        unsigned long uninitialized_var(flags);
        int ret;
 
        /* Is ROOT ? */
-       if (!pcg)
+       if (mem_cgroup_is_root(child))
                return -EINVAL;
 
        ret = -EBUSY;
@@ -2683,21 +2664,23 @@ static int mem_cgroup_move_parent(struct page *page,
 
        nr_pages = hpage_nr_pages(page);
 
-       parent = mem_cgroup_from_cont(pcg);
-       ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false);
-       if (ret)
-               goto put_back;
+       parent = parent_mem_cgroup(child);
+       /*
+        * If no parent, move charges to root cgroup.
+        */
+       if (!parent)
+               parent = root_mem_cgroup;
 
        if (nr_pages > 1)
                flags = compound_lock_irqsave(page);
 
-       ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent, true);
-       if (ret)
-               __mem_cgroup_cancel_charge(parent, nr_pages);
+       ret = mem_cgroup_move_account(page, nr_pages,
+                               pc, child, parent);
+       if (!ret)
+               __mem_cgroup_cancel_local_charge(child, nr_pages);
 
        if (nr_pages > 1)
                compound_unlock_irqrestore(page, flags);
-put_back:
        putback_lru_page(page);
 put:
        put_page(page);
@@ -2845,24 +2828,7 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
         */
        if (do_swap_account && PageSwapCache(page)) {
                swp_entry_t ent = {.val = page_private(page)};
-               struct mem_cgroup *swap_memcg;
-               unsigned short id;
-
-               id = swap_cgroup_record(ent, 0);
-               rcu_read_lock();
-               swap_memcg = mem_cgroup_lookup(id);
-               if (swap_memcg) {
-                       /*
-                        * This recorded memcg can be obsolete one. So, avoid
-                        * calling css_tryget
-                        */
-                       if (!mem_cgroup_is_root(swap_memcg))
-                               res_counter_uncharge(&swap_memcg->memsw,
-                                                    PAGE_SIZE);
-                       mem_cgroup_swap_statistics(swap_memcg, false);
-                       mem_cgroup_put(swap_memcg);
-               }
-               rcu_read_unlock();
+               mem_cgroup_uncharge_swap(ent);
        }
        /*
         * At swapin, we may charge account against cgroup which has no tasks.
@@ -3155,7 +3121,6 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
  * @entry: swap entry to be moved
  * @from:  mem_cgroup which the entry is moved from
  * @to:  mem_cgroup which the entry is moved to
- * @need_fixup: whether we should fixup res_counters and refcounts.
  *
  * It succeeds only when the swap_cgroup's record for this entry is the same
  * as the mem_cgroup's id of @from.
@@ -3166,7 +3131,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
  * both res and memsw, and called css_get().
  */
 static int mem_cgroup_move_swap_account(swp_entry_t entry,
-               struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
+                               struct mem_cgroup *from, struct mem_cgroup *to)
 {
        unsigned short old_id, new_id;
 
@@ -3185,24 +3150,13 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry,
                 * swap-in, the refcount of @to might be decreased to 0.
                 */
                mem_cgroup_get(to);
-               if (need_fixup) {
-                       if (!mem_cgroup_is_root(from))
-                               res_counter_uncharge(&from->memsw, PAGE_SIZE);
-                       mem_cgroup_put(from);
-                       /*
-                        * we charged both to->res and to->memsw, so we should
-                        * uncharge to->res.
-                        */
-                       if (!mem_cgroup_is_root(to))
-                               res_counter_uncharge(&to->res, PAGE_SIZE);
-               }
                return 0;
        }
        return -EINVAL;
 }
 #else
 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
-               struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
+                               struct mem_cgroup *from, struct mem_cgroup *to)
 {
        return -EINVAL;
 }
@@ -3363,7 +3317,7 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
 void mem_cgroup_replace_page_cache(struct page *oldpage,
                                  struct page *newpage)
 {
-       struct mem_cgroup *memcg;
+       struct mem_cgroup *memcg = NULL;
        struct page_cgroup *pc;
        enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
 
@@ -3373,11 +3327,20 @@ void mem_cgroup_replace_page_cache(struct page *oldpage,
        pc = lookup_page_cgroup(oldpage);
        /* fix accounting on old pages */
        lock_page_cgroup(pc);
-       memcg = pc->mem_cgroup;
-       mem_cgroup_charge_statistics(memcg, false, -1);
-       ClearPageCgroupUsed(pc);
+       if (PageCgroupUsed(pc)) {
+               memcg = pc->mem_cgroup;
+               mem_cgroup_charge_statistics(memcg, false, -1);
+               ClearPageCgroupUsed(pc);
+       }
        unlock_page_cgroup(pc);
 
+       /*
+        * When called from shmem_replace_page(), in some cases the
+        * oldpage has already been charged, and in some cases not.
+        */
+       if (!memcg)
+               return;
+
        if (PageSwapBacked(oldpage))
                type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
 
@@ -3793,7 +3756,7 @@ try_to_free:
        goto move_account;
 }
 
-int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
+static int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
 {
        return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
 }
@@ -4051,103 +4014,13 @@ static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
 }
 #endif
 
-
-/* For read statistics */
-enum {
-       MCS_CACHE,
-       MCS_RSS,
-       MCS_FILE_MAPPED,
-       MCS_PGPGIN,
-       MCS_PGPGOUT,
-       MCS_SWAP,
-       MCS_PGFAULT,
-       MCS_PGMAJFAULT,
-       MCS_INACTIVE_ANON,
-       MCS_ACTIVE_ANON,
-       MCS_INACTIVE_FILE,
-       MCS_ACTIVE_FILE,
-       MCS_UNEVICTABLE,
-       NR_MCS_STAT,
-};
-
-struct mcs_total_stat {
-       s64 stat[NR_MCS_STAT];
-};
-
-struct {
-       char *local_name;
-       char *total_name;
-} memcg_stat_strings[NR_MCS_STAT] = {
-       {"cache", "total_cache"},
-       {"rss", "total_rss"},
-       {"mapped_file", "total_mapped_file"},
-       {"pgpgin", "total_pgpgin"},
-       {"pgpgout", "total_pgpgout"},
-       {"swap", "total_swap"},
-       {"pgfault", "total_pgfault"},
-       {"pgmajfault", "total_pgmajfault"},
-       {"inactive_anon", "total_inactive_anon"},
-       {"active_anon", "total_active_anon"},
-       {"inactive_file", "total_inactive_file"},
-       {"active_file", "total_active_file"},
-       {"unevictable", "total_unevictable"}
-};
-
-
-static void
-mem_cgroup_get_local_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s)
-{
-       s64 val;
-
-       /* per cpu stat */
-       val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_CACHE);
-       s->stat[MCS_CACHE] += val * PAGE_SIZE;
-       val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_RSS);
-       s->stat[MCS_RSS] += val * PAGE_SIZE;
-       val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
-       s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
-       val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGIN);
-       s->stat[MCS_PGPGIN] += val;
-       val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGOUT);
-       s->stat[MCS_PGPGOUT] += val;
-       if (do_swap_account) {
-               val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_SWAPOUT);
-               s->stat[MCS_SWAP] += val * PAGE_SIZE;
-       }
-       val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGFAULT);
-       s->stat[MCS_PGFAULT] += val;
-       val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGMAJFAULT);
-       s->stat[MCS_PGMAJFAULT] += val;
-
-       /* per zone stat */
-       val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_ANON));
-       s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
-       val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_ANON));
-       s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
-       val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_FILE));
-       s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
-       val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_FILE));
-       s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
-       val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
-       s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
-}
-
-static void
-mem_cgroup_get_total_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s)
-{
-       struct mem_cgroup *iter;
-
-       for_each_mem_cgroup_tree(iter, memcg)
-               mem_cgroup_get_local_stat(iter, s);
-}
-
 #ifdef CONFIG_NUMA
-static int mem_control_numa_stat_show(struct seq_file *m, void *arg)
+static int mem_control_numa_stat_show(struct cgroup *cont, struct cftype *cft,
+                                     struct seq_file *m)
 {
        int nid;
        unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
        unsigned long node_nr;
-       struct cgroup *cont = m->private;
        struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
 
        total_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL);
@@ -4188,64 +4061,100 @@ static int mem_control_numa_stat_show(struct seq_file *m, void *arg)
 }
 #endif /* CONFIG_NUMA */
 
+static const char * const mem_cgroup_lru_names[] = {
+       "inactive_anon",
+       "active_anon",
+       "inactive_file",
+       "active_file",
+       "unevictable",
+};
+
+static inline void mem_cgroup_lru_names_not_uptodate(void)
+{
+       BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
+}
+
 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
-                                struct cgroup_map_cb *cb)
+                                struct seq_file *m)
 {
        struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
-       struct mcs_total_stat mystat;
-       int i;
-
-       memset(&mystat, 0, sizeof(mystat));
-       mem_cgroup_get_local_stat(memcg, &mystat);
+       struct mem_cgroup *mi;
+       unsigned int i;
 
-
-       for (i = 0; i < NR_MCS_STAT; i++) {
-               if (i == MCS_SWAP && !do_swap_account)
+       for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
+               if (i == MEM_CGROUP_STAT_SWAPOUT && !do_swap_account)
                        continue;
-               cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
+               seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
+                          mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
        }
 
+       for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
+               seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
+                          mem_cgroup_read_events(memcg, i));
+
+       for (i = 0; i < NR_LRU_LISTS; i++)
+               seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
+                          mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
+
        /* Hierarchical information */
        {
                unsigned long long limit, memsw_limit;
                memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit);
-               cb->fill(cb, "hierarchical_memory_limit", limit);
+               seq_printf(m, "hierarchical_memory_limit %llu\n", limit);
                if (do_swap_account)
-                       cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
+                       seq_printf(m, "hierarchical_memsw_limit %llu\n",
+                                  memsw_limit);
        }
 
-       memset(&mystat, 0, sizeof(mystat));
-       mem_cgroup_get_total_stat(memcg, &mystat);
-       for (i = 0; i < NR_MCS_STAT; i++) {
-               if (i == MCS_SWAP && !do_swap_account)
+       for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
+               long long val = 0;
+
+               if (i == MEM_CGROUP_STAT_SWAPOUT && !do_swap_account)
                        continue;
-               cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
+               for_each_mem_cgroup_tree(mi, memcg)
+                       val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
+               seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
+       }
+
+       for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
+               unsigned long long val = 0;
+
+               for_each_mem_cgroup_tree(mi, memcg)
+                       val += mem_cgroup_read_events(mi, i);
+               seq_printf(m, "total_%s %llu\n",
+                          mem_cgroup_events_names[i], val);
+       }
+
+       for (i = 0; i < NR_LRU_LISTS; i++) {
+               unsigned long long val = 0;
+
+               for_each_mem_cgroup_tree(mi, memcg)
+                       val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
+               seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
        }
 
 #ifdef CONFIG_DEBUG_VM
        {
                int nid, zid;
                struct mem_cgroup_per_zone *mz;
+               struct zone_reclaim_stat *rstat;
                unsigned long recent_rotated[2] = {0, 0};
                unsigned long recent_scanned[2] = {0, 0};
 
                for_each_online_node(nid)
                        for (zid = 0; zid < MAX_NR_ZONES; zid++) {
                                mz = mem_cgroup_zoneinfo(memcg, nid, zid);
+                               rstat = &mz->lruvec.reclaim_stat;
 
-                               recent_rotated[0] +=
-                                       mz->reclaim_stat.recent_rotated[0];
-                               recent_rotated[1] +=
-                                       mz->reclaim_stat.recent_rotated[1];
-                               recent_scanned[0] +=
-                                       mz->reclaim_stat.recent_scanned[0];
-                               recent_scanned[1] +=
-                                       mz->reclaim_stat.recent_scanned[1];
+                               recent_rotated[0] += rstat->recent_rotated[0];
+                               recent_rotated[1] += rstat->recent_rotated[1];
+                               recent_scanned[0] += rstat->recent_scanned[0];
+                               recent_scanned[1] += rstat->recent_scanned[1];
                        }
-               cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
-               cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
-               cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
-               cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
+               seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
+               seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
+               seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
+               seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
        }
 #endif
 
@@ -4307,7 +4216,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
        usage = mem_cgroup_usage(memcg, swap);
 
        /*
-        * current_threshold points to threshold just below usage.
+        * current_threshold points to threshold just below or equal to usage.
         * If it's not true, a threshold was crossed after last
         * call of __mem_cgroup_threshold().
         */
@@ -4433,14 +4342,15 @@ static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
        /* Find current threshold */
        new->current_threshold = -1;
        for (i = 0; i < size; i++) {
-               if (new->entries[i].threshold < usage) {
+               if (new->entries[i].threshold <= usage) {
                        /*
                         * new->current_threshold will not be used until
                         * rcu_assign_pointer(), so it's safe to increment
                         * it here.
                         */
                        ++new->current_threshold;
-               }
+               } else
+                       break;
        }
 
        /* Free old spare buffer and save old primary buffer as spare */
@@ -4509,7 +4419,7 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
                        continue;
 
                new->entries[j] = thresholds->primary->entries[i];
-               if (new->entries[j].threshold < usage) {
+               if (new->entries[j].threshold <= usage) {
                        /*
                         * new->current_threshold will not be used
                         * until rcu_assign_pointer(), so it's safe to increment
@@ -4623,22 +4533,6 @@ static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
        return 0;
 }
 
-#ifdef CONFIG_NUMA
-static const struct file_operations mem_control_numa_stat_file_operations = {
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-
-static int mem_control_numa_stat_open(struct inode *unused, struct file *file)
-{
-       struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
-
-       file->f_op = &mem_control_numa_stat_file_operations;
-       return single_open(file, mem_control_numa_stat_show, cont);
-}
-#endif /* CONFIG_NUMA */
-
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
 {
@@ -4694,7 +4588,7 @@ static struct cftype mem_cgroup_files[] = {
        },
        {
                .name = "stat",
-               .read_map = mem_control_stat_show,
+               .read_seq_string = mem_control_stat_show,
        },
        {
                .name = "force_empty",
@@ -4726,8 +4620,7 @@ static struct cftype mem_cgroup_files[] = {
 #ifdef CONFIG_NUMA
        {
                .name = "numa_stat",
-               .open = mem_control_numa_stat_open,
-               .mode = S_IRUGO,
+               .read_seq_string = mem_control_numa_stat_show,
        },
 #endif
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
@@ -4764,7 +4657,6 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
 {
        struct mem_cgroup_per_node *pn;
        struct mem_cgroup_per_zone *mz;
-       enum lru_list lru;
        int zone, tmp = node;
        /*
         * This routine is called against possible nodes.
@@ -4782,8 +4674,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
 
        for (zone = 0; zone < MAX_NR_ZONES; zone++) {
                mz = &pn->zoneinfo[zone];
-               for_each_lru(lru)
-                       INIT_LIST_HEAD(&mz->lruvec.lists[lru]);
+               lruvec_init(&mz->lruvec, &NODE_DATA(node)->node_zones[zone]);
                mz->usage_in_excess = 0;
                mz->on_tree = false;
                mz->memcg = memcg;
@@ -4826,23 +4717,40 @@ out_free:
 }
 
 /*
- * Helpers for freeing a vzalloc()ed mem_cgroup by RCU,
+ * Helpers for freeing a kmalloc()ed/vzalloc()ed mem_cgroup by RCU,
  * but in process context.  The work_freeing structure is overlaid
  * on the rcu_freeing structure, which itself is overlaid on memsw.
  */
-static void vfree_work(struct work_struct *work)
+static void free_work(struct work_struct *work)
 {
        struct mem_cgroup *memcg;
+       int size = sizeof(struct mem_cgroup);
 
        memcg = container_of(work, struct mem_cgroup, work_freeing);
-       vfree(memcg);
+       /*
+        * We need to make sure that (at least for now), the jump label
+        * destruction code runs outside of the cgroup lock. This is because
+        * get_online_cpus(), which is called from the static_branch update,
+        * can't be called inside the cgroup_lock. cpusets are the ones
+        * enforcing this dependency, so if they ever change, we might as well.
+        *
+        * schedule_work() will guarantee this happens. Be careful if you need
+        * to move this code around, and make sure it is outside
+        * the cgroup_lock.
+        */
+       disarm_sock_keys(memcg);
+       if (size < PAGE_SIZE)
+               kfree(memcg);
+       else
+               vfree(memcg);
 }
-static void vfree_rcu(struct rcu_head *rcu_head)
+
+static void free_rcu(struct rcu_head *rcu_head)
 {
        struct mem_cgroup *memcg;
 
        memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing);
-       INIT_WORK(&memcg->work_freeing, vfree_work);
+       INIT_WORK(&memcg->work_freeing, free_work);
        schedule_work(&memcg->work_freeing);
 }
 
@@ -4868,10 +4776,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
                free_mem_cgroup_per_zone_info(memcg, node);
 
        free_percpu(memcg->stat);
-       if (sizeof(struct mem_cgroup) < PAGE_SIZE)
-               kfree_rcu(memcg, rcu_freeing);
-       else
-               call_rcu(&memcg->rcu_freeing, vfree_rcu);
+       call_rcu(&memcg->rcu_freeing, free_rcu);
 }
 
 static void mem_cgroup_get(struct mem_cgroup *memcg)
@@ -5135,7 +5040,7 @@ static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
                return NULL;
        if (PageAnon(page)) {
                /* we don't move shared anon */
-               if (!move_anon() || page_mapcount(page) > 2)
+               if (!move_anon())
                        return NULL;
        } else if (!move_file())
                /* we ignore mapcount for file pages */
@@ -5146,32 +5051,37 @@ static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
        return page;
 }
 
+#ifdef CONFIG_SWAP
 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
                        unsigned long addr, pte_t ptent, swp_entry_t *entry)
 {
-       int usage_count;
        struct page *page = NULL;
        swp_entry_t ent = pte_to_swp_entry(ptent);
 
        if (!move_anon() || non_swap_entry(ent))
                return NULL;
-       usage_count = mem_cgroup_count_swap_user(ent, &page);
-       if (usage_count > 1) { /* we don't move shared anon */
-               if (page)
-                       put_page(page);
-               return NULL;
-       }
+       /*
+        * Because lookup_swap_cache() updates some statistics counter,
+        * we call find_get_page() with swapper_space directly.
+        */
+       page = find_get_page(&swapper_space, ent.val);
        if (do_swap_account)
                entry->val = ent.val;
 
        return page;
 }
+#else
+static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
+                       unsigned long addr, pte_t ptent, swp_entry_t *entry)
+{
+       return NULL;
+}
+#endif
 
 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
                        unsigned long addr, pte_t ptent, swp_entry_t *entry)
 {
        struct page *page = NULL;
-       struct inode *inode;
        struct address_space *mapping;
        pgoff_t pgoff;
 
@@ -5180,7 +5090,6 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
        if (!move_file())
                return NULL;
 
-       inode = vma->vm_file->f_path.dentry->d_inode;
        mapping = vma->vm_file->f_mapping;
        if (pte_none(ptent))
                pgoff = linear_page_index(vma, addr);
@@ -5479,8 +5388,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
                        if (!isolate_lru_page(page)) {
                                pc = lookup_page_cgroup(page);
                                if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
-                                                            pc, mc.from, mc.to,
-                                                            false)) {
+                                                       pc, mc.from, mc.to)) {
                                        mc.precharge -= HPAGE_PMD_NR;
                                        mc.moved_charge += HPAGE_PMD_NR;
                                }
@@ -5510,7 +5418,7 @@ retry:
                                goto put;
                        pc = lookup_page_cgroup(page);
                        if (!mem_cgroup_move_account(page, 1, pc,
-                                                    mc.from, mc.to, false)) {
+                                                    mc.from, mc.to)) {
                                mc.precharge--;
                                /* we uncharge from mc.from later. */
                                mc.moved_charge++;
@@ -5521,8 +5429,7 @@ put:                      /* get_mctgt_type() gets the page */
                        break;
                case MC_TARGET_SWAP:
                        ent = target.ent;
-                       if (!mem_cgroup_move_swap_account(ent,
-                                               mc.from, mc.to, false)) {
+                       if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
                                mc.precharge--;
                                /* we fixup refcnts and charges later. */
                                mc.moved_swap++;
@@ -5598,7 +5505,6 @@ static void mem_cgroup_move_task(struct cgroup *cont,
        if (mm) {
                if (mc.to)
                        mem_cgroup_move_charge(mm);
-               put_swap_token(mm);
                mmput(mm);
        }
        if (mc.to)
index 97cc2733551ad29bfed762a448c65676be21c415..ab1e7145e2909c8e1a02359fb1e148118c42e1b2 100644 (file)
@@ -1388,23 +1388,23 @@ static int get_any_page(struct page *p, unsigned long pfn, int flags)
         */
        if (!get_page_unless_zero(compound_head(p))) {
                if (PageHuge(p)) {
-                       pr_info("get_any_page: %#lx free huge page\n", pfn);
+                       pr_info("%s: %#lx free huge page\n", __func__, pfn);
                        ret = dequeue_hwpoisoned_huge_page(compound_head(p));
                } else if (is_free_buddy_page(p)) {
-                       pr_info("get_any_page: %#lx free buddy page\n", pfn);
+                       pr_info("%s: %#lx free buddy page\n", __func__, pfn);
                        /* Set hwpoison bit while page is still isolated */
                        SetPageHWPoison(p);
                        ret = 0;
                } else {
-                       pr_info("get_any_page: %#lx: unknown zero refcount page type %lx\n",
-                               pfn, p->flags);
+                       pr_info("%s: %#lx: unknown zero refcount page type %lx\n",
+                               __func__, pfn, p->flags);
                        ret = -EIO;
                }
        } else {
                /* Not a free page */
                ret = 1;
        }
-       unset_migratetype_isolate(p);
+       unset_migratetype_isolate(p, MIGRATE_MOVABLE);
        unlock_memory_hotplug();
        return ret;
 }
index e40f6759ba98b9ebb0ad7af82e6cef6a13382942..1b7dc662bf9f229063cb3e7b97e8e4c22147b92b 100644 (file)
@@ -2908,7 +2908,6 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        delayacct_set_flag(DELAYACCT_PF_SWAPIN);
        page = lookup_swap_cache(entry);
        if (!page) {
-               grab_swap_token(mm); /* Contend for token _before_ read-in */
                page = swapin_readahead(entry,
                                        GFP_HIGHUSER_MOVABLE, vma, address);
                if (!page) {
@@ -2938,6 +2937,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        }
 
        locked = lock_page_or_retry(page, mm, flags);
+
        delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
        if (!locked) {
                ret |= VM_FAULT_RETRY;
@@ -3486,6 +3486,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        if (unlikely(is_vm_hugetlb_page(vma)))
                return hugetlb_fault(mm, vma, address, flags);
 
+retry:
        pgd = pgd_offset(mm, address);
        pud = pud_alloc(mm, pgd, address);
        if (!pud)
@@ -3499,13 +3500,24 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                                          pmd, flags);
        } else {
                pmd_t orig_pmd = *pmd;
+               int ret;
+
                barrier();
                if (pmd_trans_huge(orig_pmd)) {
                        if (flags & FAULT_FLAG_WRITE &&
                            !pmd_write(orig_pmd) &&
-                           !pmd_trans_splitting(orig_pmd))
-                               return do_huge_pmd_wp_page(mm, vma, address,
-                                                          pmd, orig_pmd);
+                           !pmd_trans_splitting(orig_pmd)) {
+                               ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
+                                                         orig_pmd);
+                               /*
+                                * If COW results in an oom, the huge pmd will
+                                * have been split, so retry the fault on the
+                                * pte for a smaller charge.
+                                */
+                               if (unlikely(ret & VM_FAULT_OOM))
+                                       goto retry;
+                               return ret;
+                       }
                        return 0;
                }
        }
index 6629fafd6ce4a65eae9421dd9d68b25805bca993..0d7e3ec8e0f3cc997b5fa0b422f5fa39caffe413 100644 (file)
@@ -74,8 +74,7 @@ static struct resource *register_memory_resource(u64 start, u64 size)
        res->end = start + size - 1;
        res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
        if (request_resource(&iomem_resource, res) < 0) {
-               printk("System RAM resource %llx - %llx cannot be added\n",
-               (unsigned long long)res->start, (unsigned long long)res->end);
+               printk("System RAM resource %pR cannot be added\n", res);
                kfree(res);
                res = NULL;
        }
@@ -502,8 +501,10 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages)
                online_pages_range);
        if (ret) {
                mutex_unlock(&zonelists_mutex);
-               printk(KERN_DEBUG "online_pages %lx at %lx failed\n",
-                       nr_pages, pfn);
+               printk(KERN_DEBUG "online_pages [mem %#010llx-%#010llx] failed\n",
+                      (unsigned long long) pfn << PAGE_SHIFT,
+                      (((unsigned long long) pfn + nr_pages)
+                           << PAGE_SHIFT) - 1);
                memory_notify(MEM_CANCEL_ONLINE, &arg);
                unlock_memory_hotplug();
                return ret;
@@ -891,7 +892,7 @@ static int __ref offline_pages(unsigned long start_pfn,
        nr_pages = end_pfn - start_pfn;
 
        /* set above range as isolated */
-       ret = start_isolate_page_range(start_pfn, end_pfn);
+       ret = start_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
        if (ret)
                goto out;
 
@@ -956,7 +957,7 @@ repeat:
           We cannot do rollback at this point. */
        offline_isolated_pages(start_pfn, end_pfn);
        /* reset pagetype flags and makes migrate type to be MOVABLE */
-       undo_isolate_page_range(start_pfn, end_pfn);
+       undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
        /* removal success */
        zone->present_pages -= offlined_pages;
        zone->zone_pgdat->node_present_pages -= offlined_pages;
@@ -977,11 +978,12 @@ repeat:
        return 0;
 
 failed_removal:
-       printk(KERN_INFO "memory offlining %lx to %lx failed\n",
-               start_pfn, end_pfn);
+       printk(KERN_INFO "memory offlining [mem %#010llx-%#010llx] failed\n",
+              (unsigned long long) start_pfn << PAGE_SHIFT,
+              ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
        memory_notify(MEM_CANCEL_OFFLINE, &arg);
        /* pushback to free area */
-       undo_isolate_page_range(start_pfn, end_pfn);
+       undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
 
 out:
        unlock_memory_hotplug();
index 88f9422b92e73afc2be25161f20a7957d4a54160..f15c1b24ca1822c7808b1ea825ca932a9e063b23 100644 (file)
@@ -390,7 +390,7 @@ static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
 {
        if (!pol)
                return;
-       if (!mpol_store_user_nodemask(pol) && step == 0 &&
+       if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
            nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
                return;
 
@@ -950,8 +950,8 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
  *
  * Returns the number of page that could not be moved.
  */
-int do_migrate_pages(struct mm_struct *mm,
-       const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
+int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
+                    const nodemask_t *to, int flags)
 {
        int busy = 0;
        int err;
@@ -963,7 +963,7 @@ int do_migrate_pages(struct mm_struct *mm,
 
        down_read(&mm->mmap_sem);
 
-       err = migrate_vmas(mm, from_nodes, to_nodes, flags);
+       err = migrate_vmas(mm, from, to, flags);
        if (err)
                goto out;
 
@@ -998,14 +998,34 @@ int do_migrate_pages(struct mm_struct *mm,
         * moved to an empty node, then there is nothing left worth migrating.
         */
 
-       tmp = *from_nodes;
+       tmp = *from;
        while (!nodes_empty(tmp)) {
                int s,d;
                int source = -1;
                int dest = 0;
 
                for_each_node_mask(s, tmp) {
-                       d = node_remap(s, *from_nodes, *to_nodes);
+
+                       /*
+                        * do_migrate_pages() tries to maintain the relative
+                        * node relationship of the pages established between
+                        * threads and memory areas.
+                         *
+                        * However if the number of source nodes is not equal to
+                        * the number of destination nodes we can not preserve
+                        * this node relative relationship.  In that case, skip
+                        * copying memory from a node that is in the destination
+                        * mask.
+                        *
+                        * Example: [2,3,4] -> [3,4,5] moves everything.
+                        *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
+                        */
+
+                       if ((nodes_weight(*from) != nodes_weight(*to)) &&
+                                               (node_isset(s, *to)))
+                               continue;
+
+                       d = node_remap(s, *from, *to);
                        if (s == d)
                                continue;
 
@@ -1065,8 +1085,8 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
 {
 }
 
-int do_migrate_pages(struct mm_struct *mm,
-       const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
+int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
+                    const nodemask_t *to, int flags)
 {
        return -ENOSYS;
 }
index ab81d482ae6f1cac508fce796c8902432e32c3ad..be26d5cbe56b34d63f8c8ac8b799782f9ef6424b 100644 (file)
@@ -436,7 +436,10 @@ void migrate_page_copy(struct page *newpage, struct page *page)
                 * is actually a signal that all of the page has become dirty.
                 * Whereas only part of our page may be dirty.
                 */
-               __set_page_dirty_nobuffers(newpage);
+               if (PageSwapBacked(page))
+                       SetPageDirty(newpage);
+               else
+                       __set_page_dirty_nobuffers(newpage);
        }
 
        mlock_migrate_page(newpage, page);
index e8dcfc7de866e2b7c1a1dccd90eb4a9aff89161d..3edfcdfa42d9f27a5238780065220ec3b4fc702a 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -971,15 +971,13 @@ static inline unsigned long round_hint_to_min(unsigned long hint)
  * The caller must hold down_write(&current->mm->mmap_sem).
  */
 
-static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
                        unsigned long len, unsigned long prot,
                        unsigned long flags, unsigned long pgoff)
 {
        struct mm_struct * mm = current->mm;
        struct inode *inode;
        vm_flags_t vm_flags;
-       int error;
-       unsigned long reqprot = prot;
 
        /*
         * Does the application expect PROT_READ to imply PROT_EXEC?
@@ -1101,39 +1099,9 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
                }
        }
 
-       error = security_file_mmap(file, reqprot, prot, flags, addr, 0);
-       if (error)
-               return error;
-
        return mmap_region(file, addr, len, flags, vm_flags, pgoff);
 }
 
-unsigned long do_mmap(struct file *file, unsigned long addr,
-       unsigned long len, unsigned long prot,
-       unsigned long flag, unsigned long offset)
-{
-       if (unlikely(offset + PAGE_ALIGN(len) < offset))
-               return -EINVAL;
-       if (unlikely(offset & ~PAGE_MASK))
-               return -EINVAL;
-       return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
-}
-EXPORT_SYMBOL(do_mmap);
-
-unsigned long vm_mmap(struct file *file, unsigned long addr,
-       unsigned long len, unsigned long prot,
-       unsigned long flag, unsigned long offset)
-{
-       unsigned long ret;
-       struct mm_struct *mm = current->mm;
-
-       down_write(&mm->mmap_sem);
-       ret = do_mmap(file, addr, len, prot, flag, offset);
-       up_write(&mm->mmap_sem);
-       return ret;
-}
-EXPORT_SYMBOL(vm_mmap);
-
 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
                unsigned long, prot, unsigned long, flags,
                unsigned long, fd, unsigned long, pgoff)
@@ -1165,10 +1133,7 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
 
        flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
 
-       down_write(&current->mm->mmap_sem);
-       retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-       up_write(&current->mm->mmap_sem);
-
+       retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
        if (file)
                fput(file);
 out:
@@ -1629,7 +1594,9 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
        if (addr & ~PAGE_MASK)
                return -EINVAL;
 
-       return arch_rebalance_pgtables(addr, len);
+       addr = arch_rebalance_pgtables(addr, len);
+       error = security_mmap_addr(addr);
+       return error ? error : addr;
 }
 
 EXPORT_SYMBOL(get_unmapped_area);
@@ -1639,33 +1606,34 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 {
        struct vm_area_struct *vma = NULL;
 
-       if (mm) {
-               /* Check the cache first. */
-               /* (Cache hit rate is typically around 35%.) */
-               vma = mm->mmap_cache;
-               if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
-                       struct rb_node * rb_node;
-
-                       rb_node = mm->mm_rb.rb_node;
-                       vma = NULL;
-
-                       while (rb_node) {
-                               struct vm_area_struct * vma_tmp;
-
-                               vma_tmp = rb_entry(rb_node,
-                                               struct vm_area_struct, vm_rb);
-
-                               if (vma_tmp->vm_end > addr) {
-                                       vma = vma_tmp;
-                                       if (vma_tmp->vm_start <= addr)
-                                               break;
-                                       rb_node = rb_node->rb_left;
-                               } else
-                                       rb_node = rb_node->rb_right;
-                       }
-                       if (vma)
-                               mm->mmap_cache = vma;
+       if (WARN_ON_ONCE(!mm))          /* Remove this in linux-3.6 */
+               return NULL;
+
+       /* Check the cache first. */
+       /* (Cache hit rate is typically around 35%.) */
+       vma = mm->mmap_cache;
+       if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
+               struct rb_node *rb_node;
+
+               rb_node = mm->mm_rb.rb_node;
+               vma = NULL;
+
+               while (rb_node) {
+                       struct vm_area_struct *vma_tmp;
+
+                       vma_tmp = rb_entry(rb_node,
+                                          struct vm_area_struct, vm_rb);
+
+                       if (vma_tmp->vm_end > addr) {
+                               vma = vma_tmp;
+                               if (vma_tmp->vm_start <= addr)
+                                       break;
+                               rb_node = rb_node->rb_left;
+                       } else
+                               rb_node = rb_node->rb_right;
                }
+               if (vma)
+                       mm->mmap_cache = vma;
        }
        return vma;
 }
@@ -1818,7 +1786,7 @@ int expand_downwards(struct vm_area_struct *vma,
                return -ENOMEM;
 
        address &= PAGE_MASK;
-       error = security_file_mmap(NULL, 0, 0, 0, address, 1);
+       error = security_mmap_addr(address);
        if (error)
                return error;
 
@@ -2158,7 +2126,6 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
 
        return 0;
 }
-EXPORT_SYMBOL(do_munmap);
 
 int vm_munmap(unsigned long start, size_t len)
 {
@@ -2206,10 +2173,6 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
        if (!len)
                return addr;
 
-       error = security_file_mmap(NULL, 0, 0, 0, addr, 1);
-       if (error)
-               return error;
-
        flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
 
        error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
@@ -2562,10 +2525,6 @@ int install_special_mapping(struct mm_struct *mm,
        vma->vm_ops = &special_mapping_vmops;
        vma->vm_private_data = pages;
 
-       ret = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
-       if (ret)
-               goto out;
-
        ret = insert_vm_struct(mm, vma);
        if (ret)
                goto out;
index 7cf7b7ddc7c552d4d514a696b30d6cd20a649d4d..6830eab5bf09c5c5432801ed7ad23d4bad054779 100644 (file)
@@ -86,3 +86,17 @@ int memmap_valid_within(unsigned long pfn,
        return 1;
 }
 #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
+
+void lruvec_init(struct lruvec *lruvec, struct zone *zone)
+{
+       enum lru_list lru;
+
+       memset(lruvec, 0, sizeof(struct lruvec));
+
+       for_each_lru(lru)
+               INIT_LIST_HEAD(&lruvec->lists[lru]);
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+       lruvec->zone = zone;
+#endif
+}
index db8d983b5a7d7a2d6746ccbf74471d2ced3cdd96..21fed202ddad865bb3ee70d07d8ebce17fd37493 100644 (file)
@@ -371,10 +371,6 @@ static unsigned long mremap_to(unsigned long addr,
        if ((addr <= new_addr) && (addr+old_len) > new_addr)
                goto out;
 
-       ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
-       if (ret)
-               goto out;
-
        ret = do_munmap(mm, new_addr, new_len);
        if (ret)
                goto out;
@@ -432,15 +428,17 @@ static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
  * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
  * This option implies MREMAP_MAYMOVE.
  */
-unsigned long do_mremap(unsigned long addr,
-       unsigned long old_len, unsigned long new_len,
-       unsigned long flags, unsigned long new_addr)
+SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
+               unsigned long, new_len, unsigned long, flags,
+               unsigned long, new_addr)
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
        unsigned long ret = -EINVAL;
        unsigned long charged = 0;
 
+       down_write(&current->mm->mmap_sem);
+
        if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
                goto out;
 
@@ -530,25 +528,11 @@ unsigned long do_mremap(unsigned long addr,
                        goto out;
                }
 
-               ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
-               if (ret)
-                       goto out;
                ret = move_vma(vma, addr, old_len, new_len, new_addr);
        }
 out:
        if (ret & ~PAGE_MASK)
                vm_unacct_memory(charged);
-       return ret;
-}
-
-SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
-               unsigned long, new_len, unsigned long, flags,
-               unsigned long, new_addr)
-{
-       unsigned long ret;
-
-       down_write(&current->mm->mmap_sem);
-       ret = do_mremap(addr, old_len, new_len, flags, new_addr);
        up_write(&current->mm->mmap_sem);
        return ret;
 }
index 1983fb1c7026c0ef4c1e705298876d0ab2449c55..d23415c001bc4c5847986c7659261ca335888382 100644 (file)
@@ -274,86 +274,85 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align,
        return ___alloc_bootmem(size, align, goal, limit);
 }
 
-/**
- * __alloc_bootmem_node - allocate boot memory from a specific node
- * @pgdat: node to allocate from
- * @size: size of the request in bytes
- * @align: alignment of the region
- * @goal: preferred starting address of the region
- *
- * The goal is dropped if it can not be satisfied and the allocation will
- * fall back to memory below @goal.
- *
- * Allocation may fall back to any node in the system if the specified node
- * can not hold the requested memory.
- *
- * The function panics if the request can not be satisfied.
- */
-void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
-                                  unsigned long align, unsigned long goal)
+static void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
+                                                  unsigned long size,
+                                                  unsigned long align,
+                                                  unsigned long goal,
+                                                  unsigned long limit)
 {
        void *ptr;
 
-       if (WARN_ON_ONCE(slab_is_available()))
-               return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
-
 again:
        ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
-                                        goal, -1ULL);
+                                       goal, limit);
        if (ptr)
                return ptr;
 
        ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
-                                       goal, -1ULL);
-       if (!ptr && goal) {
+                                       goal, limit);
+       if (ptr)
+               return ptr;
+
+       if (goal) {
                goal = 0;
                goto again;
        }
-       return ptr;
+
+       return NULL;
 }
 
-void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
+void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
                                   unsigned long align, unsigned long goal)
 {
-       return __alloc_bootmem_node(pgdat, size, align, goal);
+       if (WARN_ON_ONCE(slab_is_available()))
+               return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
+
+       return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
 }
 
-#ifdef CONFIG_SPARSEMEM
-/**
- * alloc_bootmem_section - allocate boot memory from a specific section
- * @size: size of the request in bytes
- * @section_nr: sparse map section to allocate from
- *
- * Return NULL on failure.
- */
-void * __init alloc_bootmem_section(unsigned long size,
-                                   unsigned long section_nr)
+void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
+                                   unsigned long align, unsigned long goal,
+                                   unsigned long limit)
 {
-       unsigned long pfn, goal, limit;
+       void *ptr;
 
-       pfn = section_nr_to_pfn(section_nr);
-       goal = pfn << PAGE_SHIFT;
-       limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
+       ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, limit);
+       if (ptr)
+               return ptr;
 
-       return __alloc_memory_core_early(early_pfn_to_nid(pfn), size,
-                                        SMP_CACHE_BYTES, goal, limit);
+       printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
+       panic("Out of memory");
+       return NULL;
 }
-#endif
 
-void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
+/**
+ * __alloc_bootmem_node - allocate boot memory from a specific node
+ * @pgdat: node to allocate from
+ * @size: size of the request in bytes
+ * @align: alignment of the region
+ * @goal: preferred starting address of the region
+ *
+ * The goal is dropped if it can not be satisfied and the allocation will
+ * fall back to memory below @goal.
+ *
+ * Allocation may fall back to any node in the system if the specified node
+ * can not hold the requested memory.
+ *
+ * The function panics if the request can not be satisfied.
+ */
+void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
                                   unsigned long align, unsigned long goal)
 {
-       void *ptr;
-
        if (WARN_ON_ONCE(slab_is_available()))
                return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 
-       ptr =  __alloc_memory_core_early(pgdat->node_id, size, align,
-                                                goal, -1ULL);
-       if (ptr)
-               return ptr;
+       return ___alloc_bootmem_node(pgdat, size, align, goal, 0);
+}
 
-       return __alloc_bootmem_nopanic(size, align, goal);
+void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
+                                  unsigned long align, unsigned long goal)
+{
+       return __alloc_bootmem_node(pgdat, size, align, goal);
 }
 
 #ifndef ARCH_LOW_ADDRESS_LIMIT
@@ -397,16 +396,9 @@ void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
 void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
                                       unsigned long align, unsigned long goal)
 {
-       void *ptr;
-
        if (WARN_ON_ONCE(slab_is_available()))
                return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 
-       ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
-                               goal, ARCH_LOW_ADDRESS_LIMIT);
-       if (ptr)
-               return ptr;
-
-       return  __alloc_memory_core_early(MAX_NUMNODES, size, align,
-                               goal, ARCH_LOW_ADDRESS_LIMIT);
+       return ___alloc_bootmem_node(pgdat, size, align, goal,
+                                    ARCH_LOW_ADDRESS_LIMIT);
 }
index bb8f4f004a82ce57abb0653a9a8ed72d533f5c45..c4acfbc099727b3f5151ed5917961ff59efb7481 100644 (file)
@@ -889,7 +889,6 @@ static int validate_mmap_request(struct file *file,
                                 unsigned long *_capabilities)
 {
        unsigned long capabilities, rlen;
-       unsigned long reqprot = prot;
        int ret;
 
        /* do the simple checks first */
@@ -1047,7 +1046,7 @@ static int validate_mmap_request(struct file *file,
        }
 
        /* allow the security API to have its say */
-       ret = security_file_mmap(file, reqprot, prot, flags, addr, 0);
+       ret = security_mmap_addr(addr);
        if (ret < 0)
                return ret;
 
@@ -1233,7 +1232,7 @@ enomem:
 /*
  * handle mapping creation for uClinux
  */
-static unsigned long do_mmap_pgoff(struct file *file,
+unsigned long do_mmap_pgoff(struct file *file,
                            unsigned long addr,
                            unsigned long len,
                            unsigned long prot,
@@ -1471,32 +1470,6 @@ error_getting_region:
        return -ENOMEM;
 }
 
-unsigned long do_mmap(struct file *file, unsigned long addr,
-       unsigned long len, unsigned long prot,
-       unsigned long flag, unsigned long offset)
-{
-       if (unlikely(offset + PAGE_ALIGN(len) < offset))
-               return -EINVAL;
-       if (unlikely(offset & ~PAGE_MASK))
-               return -EINVAL;
-       return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
-}
-EXPORT_SYMBOL(do_mmap);
-
-unsigned long vm_mmap(struct file *file, unsigned long addr,
-       unsigned long len, unsigned long prot,
-       unsigned long flag, unsigned long offset)
-{
-       unsigned long ret;
-       struct mm_struct *mm = current->mm;
-
-       down_write(&mm->mmap_sem);
-       ret = do_mmap(file, addr, len, prot, flag, offset);
-       up_write(&mm->mmap_sem);
-       return ret;
-}
-EXPORT_SYMBOL(vm_mmap);
-
 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
                unsigned long, prot, unsigned long, flags,
                unsigned long, fd, unsigned long, pgoff)
@@ -1513,9 +1486,7 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
 
        flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
 
-       down_write(&current->mm->mmap_sem);
-       retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-       up_write(&current->mm->mmap_sem);
+       ret = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
 
        if (file)
                fput(file);
index 9f09a1fde9f9954616473e1452c9e7e67c0778b8..ed0e19677360fa55f62e3944208e82cab7eeacc8 100644 (file)
@@ -180,10 +180,10 @@ static bool oom_unkillable_task(struct task_struct *p,
  * predictable as possible.  The goal is to return the highest value for the
  * task consuming the most memory to avoid subsequent oom failures.
  */
-unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
-                     const nodemask_t *nodemask, unsigned long totalpages)
+unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
+                         const nodemask_t *nodemask, unsigned long totalpages)
 {
-       long points;
+       unsigned long points;
 
        if (oom_unkillable_task(p, memcg, nodemask))
                return 0;
@@ -197,22 +197,12 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
                return 0;
        }
 
-       /*
-        * The memory controller may have a limit of 0 bytes, so avoid a divide
-        * by zero, if necessary.
-        */
-       if (!totalpages)
-               totalpages = 1;
-
        /*
         * The baseline for the badness score is the proportion of RAM that each
         * task's rss, pagetable and swap space use.
         */
-       points = get_mm_rss(p->mm) + p->mm->nr_ptes;
-       points += get_mm_counter(p->mm, MM_SWAPENTS);
-
-       points *= 1000;
-       points /= totalpages;
+       points = get_mm_rss(p->mm) + p->mm->nr_ptes +
+                get_mm_counter(p->mm, MM_SWAPENTS);
        task_unlock(p);
 
        /*
@@ -220,23 +210,20 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
         * implementation used by LSMs.
         */
        if (has_capability_noaudit(p, CAP_SYS_ADMIN))
-               points -= 30;
+               points -= 30 * totalpages / 1000;
 
        /*
         * /proc/pid/oom_score_adj ranges from -1000 to +1000 such that it may
         * either completely disable oom killing or always prefer a certain
         * task.
         */
-       points += p->signal->oom_score_adj;
+       points += p->signal->oom_score_adj * totalpages / 1000;
 
        /*
-        * Never return 0 for an eligible task that may be killed since it's
-        * possible that no single user task uses more than 0.1% of memory and
-        * no single admin tasks uses more than 3.0%.
+        * Never return 0 for an eligible task regardless of the root bonus and
+        * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
         */
-       if (points <= 0)
-               return 1;
-       return (points < 1000) ? points : 1000;
+       return points ? points : 1;
 }
 
 /*
@@ -314,7 +301,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
 {
        struct task_struct *g, *p;
        struct task_struct *chosen = NULL;
-       *ppoints = 0;
+       unsigned long chosen_points = 0;
 
        do_each_thread(g, p) {
                unsigned int points;
@@ -354,7 +341,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
                         */
                        if (p == current) {
                                chosen = p;
-                               *ppoints = 1000;
+                               chosen_points = ULONG_MAX;
                        } else if (!force_kill) {
                                /*
                                 * If this task is not being ptraced on exit,
@@ -367,12 +354,13 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
                }
 
                points = oom_badness(p, memcg, nodemask, totalpages);
-               if (points > *ppoints) {
+               if (points > chosen_points) {
                        chosen = p;
-                       *ppoints = points;
+                       chosen_points = points;
                }
        } while_each_thread(g, p);
 
+       *ppoints = chosen_points * 1000 / totalpages;
        return chosen;
 }
 
@@ -572,7 +560,7 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
        }
 
        check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
-       limit = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT;
+       limit = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
        read_lock(&tasklist_lock);
        p = select_bad_process(&points, limit, memcg, NULL, false);
        if (p && PTR_ERR(p) != -1UL)
index 26adea8ca2e7dd9ebdbc9c0938498a1a8a56324d..93d8d2f7108ccb8b3c5a73de42361f23d98049c1 100644 (file)
@@ -204,7 +204,7 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
  * Returns the global number of pages potentially available for dirty
  * page cache.  This is the base value for the global dirty limits.
  */
-unsigned long global_dirtyable_memory(void)
+static unsigned long global_dirtyable_memory(void)
 {
        unsigned long x;
 
@@ -1568,6 +1568,7 @@ void writeback_set_ratelimit(void)
        unsigned long background_thresh;
        unsigned long dirty_thresh;
        global_dirty_limits(&background_thresh, &dirty_thresh);
+       global_dirty_limit = dirty_thresh;
        ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
        if (ratelimit_pages < 16)
                ratelimit_pages = 16;
index 1851df600438ab260bc419fe04b7e6d432073f7b..44030096da631b5f49b4ee1a81d0b9ace59f5c7e 100644 (file)
@@ -57,6 +57,7 @@
 #include <linux/ftrace_event.h>
 #include <linux/memcontrol.h>
 #include <linux/prefetch.h>
+#include <linux/migrate.h>
 #include <linux/page-debug-flags.h>
 
 #include <asm/tlbflush.h>
@@ -513,10 +514,10 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
  * free pages of length of (1 << order) and marked with _mapcount -2. Page's
  * order is recorded in page_private(page) field.
  * So when we are allocating or freeing one, we can derive the state of the
- * other.  That is, if we allocate a small block, and both were   
- * free, the remainder of the region must be split into blocks.   
+ * other.  That is, if we allocate a small block, and both were
+ * free, the remainder of the region must be split into blocks.
  * If a block is freed, and its buddy is also free, then this
- * triggers coalescing into a block of larger size.            
+ * triggers coalescing into a block of larger size.
  *
  * -- wli
  */
@@ -749,6 +750,24 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
        __free_pages(page, order);
 }
 
+#ifdef CONFIG_CMA
+/* Free whole pageblock and set it's migration type to MIGRATE_CMA. */
+void __init init_cma_reserved_pageblock(struct page *page)
+{
+       unsigned i = pageblock_nr_pages;
+       struct page *p = page;
+
+       do {
+               __ClearPageReserved(p);
+               set_page_count(p, 0);
+       } while (++p, --i);
+
+       set_page_refcounted(page);
+       set_pageblock_migratetype(page, MIGRATE_CMA);
+       __free_pages(page, pageblock_order);
+       totalram_pages += pageblock_nr_pages;
+}
+#endif
 
 /*
  * The order of subdivision here is critical for the IO subsystem.
@@ -874,11 +893,17 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
  * This array describes the order lists are fallen back to when
  * the free lists for the desirable migrate type are depleted
  */
-static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
-       [MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },
-       [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },
-       [MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
-       [MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */
+static int fallbacks[MIGRATE_TYPES][4] = {
+       [MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,     MIGRATE_RESERVE },
+       [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,     MIGRATE_RESERVE },
+#ifdef CONFIG_CMA
+       [MIGRATE_MOVABLE]     = { MIGRATE_CMA,         MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
+       [MIGRATE_CMA]         = { MIGRATE_RESERVE }, /* Never used */
+#else
+       [MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE,   MIGRATE_RESERVE },
+#endif
+       [MIGRATE_RESERVE]     = { MIGRATE_RESERVE }, /* Never used */
+       [MIGRATE_ISOLATE]     = { MIGRATE_RESERVE }, /* Never used */
 };
 
 /*
@@ -973,12 +998,12 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
        /* Find the largest possible block of pages in the other list */
        for (current_order = MAX_ORDER-1; current_order >= order;
                                                --current_order) {
-               for (i = 0; i < MIGRATE_TYPES - 1; i++) {
+               for (i = 0;; i++) {
                        migratetype = fallbacks[start_migratetype][i];
 
                        /* MIGRATE_RESERVE handled later if necessary */
                        if (migratetype == MIGRATE_RESERVE)
-                               continue;
+                               break;
 
                        area = &(zone->free_area[current_order]);
                        if (list_empty(&area->free_list[migratetype]))
@@ -993,11 +1018,18 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
                         * pages to the preferred allocation list. If falling
                         * back for a reclaimable kernel allocation, be more
                         * aggressive about taking ownership of free pages
+                        *
+                        * On the other hand, never change migration
+                        * type of MIGRATE_CMA pageblocks nor move CMA
+                        * pages on different free lists. We don't
+                        * want unmovable pages to be allocated from
+                        * MIGRATE_CMA areas.
                         */
-                       if (unlikely(current_order >= (pageblock_order >> 1)) ||
-                                       start_migratetype == MIGRATE_RECLAIMABLE ||
-                                       page_group_by_mobility_disabled) {
-                               unsigned long pages;
+                       if (!is_migrate_cma(migratetype) &&
+                           (unlikely(current_order >= pageblock_order / 2) ||
+                            start_migratetype == MIGRATE_RECLAIMABLE ||
+                            page_group_by_mobility_disabled)) {
+                               int pages;
                                pages = move_freepages_block(zone, page,
                                                                start_migratetype);
 
@@ -1015,11 +1047,14 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
                        rmv_page_order(page);
 
                        /* Take ownership for orders >= pageblock_order */
-                       if (current_order >= pageblock_order)
+                       if (current_order >= pageblock_order &&
+                           !is_migrate_cma(migratetype))
                                change_pageblock_range(page, current_order,
                                                        start_migratetype);
 
-                       expand(zone, page, order, current_order, area, migratetype);
+                       expand(zone, page, order, current_order, area,
+                              is_migrate_cma(migratetype)
+                            ? migratetype : start_migratetype);
 
                        trace_mm_page_alloc_extfrag(page, order, current_order,
                                start_migratetype, migratetype);
@@ -1061,17 +1096,17 @@ retry_reserve:
        return page;
 }
 
-/* 
+/*
  * Obtain a specified number of elements from the buddy allocator, all under
  * a single hold of the lock, for efficiency.  Add them to the supplied list.
  * Returns the number of new pages which were placed at *list.
  */
-static int rmqueue_bulk(struct zone *zone, unsigned int order, 
+static int rmqueue_bulk(struct zone *zone, unsigned int order,
                        unsigned long count, struct list_head *list,
                        int migratetype, int cold)
 {
-       int i;
-       
+       int mt = migratetype, i;
+
        spin_lock(&zone->lock);
        for (i = 0; i < count; ++i) {
                struct page *page = __rmqueue(zone, order, migratetype);
@@ -1091,7 +1126,12 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
                        list_add(&page->lru, list);
                else
                        list_add_tail(&page->lru, list);
-               set_page_private(page, migratetype);
+               if (IS_ENABLED(CONFIG_CMA)) {
+                       mt = get_pageblock_migratetype(page);
+                       if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE)
+                               mt = migratetype;
+               }
+               set_page_private(page, mt);
                list = &page->lru;
        }
        __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
@@ -1371,8 +1411,12 @@ int split_free_page(struct page *page)
 
        if (order >= pageblock_order - 1) {
                struct page *endpage = page + (1 << order) - 1;
-               for (; page < endpage; page += pageblock_nr_pages)
-                       set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+               for (; page < endpage; page += pageblock_nr_pages) {
+                       int mt = get_pageblock_migratetype(page);
+                       if (mt != MIGRATE_ISOLATE && !is_migrate_cma(mt))
+                               set_pageblock_migratetype(page,
+                                                         MIGRATE_MOVABLE);
+               }
        }
 
        return 1 << order;
@@ -2086,16 +2130,13 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
 }
 #endif /* CONFIG_COMPACTION */
 
-/* The really slow allocator path where we enter direct reclaim */
-static inline struct page *
-__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
-       struct zonelist *zonelist, enum zone_type high_zoneidx,
-       nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
-       int migratetype, unsigned long *did_some_progress)
+/* Perform direct synchronous page reclaim */
+static int
+__perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
+                 nodemask_t *nodemask)
 {
-       struct page *page = NULL;
        struct reclaim_state reclaim_state;
-       bool drained = false;
+       int progress;
 
        cond_resched();
 
@@ -2106,7 +2147,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
        reclaim_state.reclaimed_slab = 0;
        current->reclaim_state = &reclaim_state;
 
-       *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
+       progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
 
        current->reclaim_state = NULL;
        lockdep_clear_current_reclaim_state();
@@ -2114,6 +2155,21 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
 
        cond_resched();
 
+       return progress;
+}
+
+/* The really slow allocator path where we enter direct reclaim */
+static inline struct page *
+__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
+       struct zonelist *zonelist, enum zone_type high_zoneidx,
+       nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
+       int migratetype, unsigned long *did_some_progress)
+{
+       struct page *page = NULL;
+       bool drained = false;
+
+       *did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
+                                              nodemask);
        if (unlikely(!(*did_some_progress)))
                return NULL;
 
@@ -4244,25 +4300,24 @@ static inline void setup_usemap(struct pglist_data *pgdat,
 
 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
 
-/* Return a sensible default order for the pageblock size. */
-static inline int pageblock_default_order(void)
-{
-       if (HPAGE_SHIFT > PAGE_SHIFT)
-               return HUGETLB_PAGE_ORDER;
-
-       return MAX_ORDER-1;
-}
-
 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
-static inline void __init set_pageblock_order(unsigned int order)
+static inline void __init set_pageblock_order(void)
 {
+       unsigned int order;
+
        /* Check that pageblock_nr_pages has not already been setup */
        if (pageblock_order)
                return;
 
+       if (HPAGE_SHIFT > PAGE_SHIFT)
+               order = HUGETLB_PAGE_ORDER;
+       else
+               order = MAX_ORDER - 1;
+
        /*
         * Assume the largest contiguous order of interest is a huge page.
-        * This value may be variable depending on boot parameters on IA64
+        * This value may be variable depending on boot parameters on IA64 and
+        * powerpc.
         */
        pageblock_order = order;
 }
@@ -4270,15 +4325,13 @@ static inline void __init set_pageblock_order(unsigned int order)
 
 /*
  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
- * and pageblock_default_order() are unused as pageblock_order is set
- * at compile-time. See include/linux/pageblock-flags.h for the values of
- * pageblock_order based on the kernel config
+ * is unused as pageblock_order is set at compile-time. See
+ * include/linux/pageblock-flags.h for the values of pageblock_order based on
+ * the kernel config
  */
-static inline int pageblock_default_order(unsigned int order)
+static inline void set_pageblock_order(void)
 {
-       return MAX_ORDER-1;
 }
-#define set_pageblock_order(x) do {} while (0)
 
 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
 
@@ -4301,11 +4354,10 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
        init_waitqueue_head(&pgdat->kswapd_wait);
        pgdat->kswapd_max_order = 0;
        pgdat_page_cgroup_init(pgdat);
-       
+
        for (j = 0; j < MAX_NR_ZONES; j++) {
                struct zone *zone = pgdat->node_zones + j;
                unsigned long size, realsize, memmap_pages;
-               enum lru_list lru;
 
                size = zone_spanned_pages_in_node(nid, j, zones_size);
                realsize = size - zone_absent_pages_in_node(nid, j,
@@ -4355,18 +4407,13 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                zone->zone_pgdat = pgdat;
 
                zone_pcp_init(zone);
-               for_each_lru(lru)
-                       INIT_LIST_HEAD(&zone->lruvec.lists[lru]);
-               zone->reclaim_stat.recent_rotated[0] = 0;
-               zone->reclaim_stat.recent_rotated[1] = 0;
-               zone->reclaim_stat.recent_scanned[0] = 0;
-               zone->reclaim_stat.recent_scanned[1] = 0;
+               lruvec_init(&zone->lruvec, zone);
                zap_zone_vm_stats(zone);
                zone->flags = 0;
                if (!size)
                        continue;
 
-               set_pageblock_order(pageblock_default_order());
+               set_pageblock_order();
                setup_usemap(pgdat, zone, size);
                ret = init_currently_empty_zone(zone, zone_start_pfn,
                                                size, MEMMAP_EARLY);
@@ -4759,7 +4806,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
        find_zone_movable_pfns_for_nodes();
 
        /* Print out the zone ranges */
-       printk("Zone PFN ranges:\n");
+       printk("Zone ranges:\n");
        for (i = 0; i < MAX_NR_ZONES; i++) {
                if (i == ZONE_MOVABLE)
                        continue;
@@ -4768,22 +4815,25 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
                                arch_zone_highest_possible_pfn[i])
                        printk(KERN_CONT "empty\n");
                else
-                       printk(KERN_CONT "%0#10lx -> %0#10lx\n",
-                               arch_zone_lowest_possible_pfn[i],
-                               arch_zone_highest_possible_pfn[i]);
+                       printk(KERN_CONT "[mem %0#10lx-%0#10lx]\n",
+                               arch_zone_lowest_possible_pfn[i] << PAGE_SHIFT,
+                               (arch_zone_highest_possible_pfn[i]
+                                       << PAGE_SHIFT) - 1);
        }
 
        /* Print out the PFNs ZONE_MOVABLE begins at in each node */
-       printk("Movable zone start PFN for each node\n");
+       printk("Movable zone start for each node\n");
        for (i = 0; i < MAX_NUMNODES; i++) {
                if (zone_movable_pfn[i])
-                       printk("  Node %d: %lu\n", i, zone_movable_pfn[i]);
+                       printk("  Node %d: %#010lx\n", i,
+                              zone_movable_pfn[i] << PAGE_SHIFT);
        }
 
        /* Print out the early_node_map[] */
-       printk("Early memory PFN ranges\n");
+       printk("Early memory node ranges\n");
        for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
-               printk("  %3d: %0#10lx -> %0#10lx\n", nid, start_pfn, end_pfn);
+               printk("  node %3d: [mem %#010lx-%#010lx]\n", nid,
+                      start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
 
        /* Initialise every node */
        mminit_verify_pageflags_layout();
@@ -4976,14 +5026,7 @@ static void setup_per_zone_lowmem_reserve(void)
        calculate_totalreserve_pages();
 }
 
-/**
- * setup_per_zone_wmarks - called when min_free_kbytes changes
- * or when memory is hot-{added|removed}
- *
- * Ensures that the watermark[min,low,high] values for each zone are set
- * correctly with respect to min_free_kbytes.
- */
-void setup_per_zone_wmarks(void)
+static void __setup_per_zone_wmarks(void)
 {
        unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
        unsigned long lowmem_pages = 0;
@@ -5030,6 +5073,11 @@ void setup_per_zone_wmarks(void)
 
                zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
                zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
+
+               zone->watermark[WMARK_MIN] += cma_wmark_pages(zone);
+               zone->watermark[WMARK_LOW] += cma_wmark_pages(zone);
+               zone->watermark[WMARK_HIGH] += cma_wmark_pages(zone);
+
                setup_zone_migrate_reserve(zone);
                spin_unlock_irqrestore(&zone->lock, flags);
        }
@@ -5038,6 +5086,20 @@ void setup_per_zone_wmarks(void)
        calculate_totalreserve_pages();
 }
 
+/**
+ * setup_per_zone_wmarks - called when min_free_kbytes changes
+ * or when memory is hot-{added|removed}
+ *
+ * Ensures that the watermark[min,low,high] values for each zone are set
+ * correctly with respect to min_free_kbytes.
+ */
+void setup_per_zone_wmarks(void)
+{
+       mutex_lock(&zonelists_mutex);
+       __setup_per_zone_wmarks();
+       mutex_unlock(&zonelists_mutex);
+}
+
 /*
  * The inactive anon list should be small enough that the VM never has to
  * do too much work, but large enough that each inactive page has a chance
@@ -5415,14 +5477,16 @@ static int
 __count_immobile_pages(struct zone *zone, struct page *page, int count)
 {
        unsigned long pfn, iter, found;
+       int mt;
+
        /*
         * For avoiding noise data, lru_add_drain_all() should be called
         * If ZONE_MOVABLE, the zone never contains immobile pages
         */
        if (zone_idx(zone) == ZONE_MOVABLE)
                return true;
-
-       if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE)
+       mt = get_pageblock_migratetype(page);
+       if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
                return true;
 
        pfn = page_to_pfn(page);
@@ -5539,7 +5603,7 @@ out:
        return ret;
 }
 
-void unset_migratetype_isolate(struct page *page)
+void unset_migratetype_isolate(struct page *page, unsigned migratetype)
 {
        struct zone *zone;
        unsigned long flags;
@@ -5547,12 +5611,259 @@ void unset_migratetype_isolate(struct page *page)
        spin_lock_irqsave(&zone->lock, flags);
        if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
                goto out;
-       set_pageblock_migratetype(page, MIGRATE_MOVABLE);
-       move_freepages_block(zone, page, MIGRATE_MOVABLE);
+       set_pageblock_migratetype(page, migratetype);
+       move_freepages_block(zone, page, migratetype);
 out:
        spin_unlock_irqrestore(&zone->lock, flags);
 }
 
+#ifdef CONFIG_CMA
+
+static unsigned long pfn_max_align_down(unsigned long pfn)
+{
+       return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
+                            pageblock_nr_pages) - 1);
+}
+
+static unsigned long pfn_max_align_up(unsigned long pfn)
+{
+       return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
+                               pageblock_nr_pages));
+}
+
+static struct page *
+__alloc_contig_migrate_alloc(struct page *page, unsigned long private,
+                            int **resultp)
+{
+       return alloc_page(GFP_HIGHUSER_MOVABLE);
+}
+
+/* [start, end) must belong to a single zone. */
+static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
+{
+       /* This function is based on compact_zone() from compaction.c. */
+
+       unsigned long pfn = start;
+       unsigned int tries = 0;
+       int ret = 0;
+
+       struct compact_control cc = {
+               .nr_migratepages = 0,
+               .order = -1,
+               .zone = page_zone(pfn_to_page(start)),
+               .sync = true,
+       };
+       INIT_LIST_HEAD(&cc.migratepages);
+
+       migrate_prep_local();
+
+       while (pfn < end || !list_empty(&cc.migratepages)) {
+               if (fatal_signal_pending(current)) {
+                       ret = -EINTR;
+                       break;
+               }
+
+               if (list_empty(&cc.migratepages)) {
+                       cc.nr_migratepages = 0;
+                       pfn = isolate_migratepages_range(cc.zone, &cc,
+                                                        pfn, end);
+                       if (!pfn) {
+                               ret = -EINTR;
+                               break;
+                       }
+                       tries = 0;
+               } else if (++tries == 5) {
+                       ret = ret < 0 ? ret : -EBUSY;
+                       break;
+               }
+
+               ret = migrate_pages(&cc.migratepages,
+                                   __alloc_contig_migrate_alloc,
+                                   0, false, MIGRATE_SYNC);
+       }
+
+       putback_lru_pages(&cc.migratepages);
+       return ret > 0 ? 0 : ret;
+}
+
+/*
+ * Update zone's cma pages counter used for watermark level calculation.
+ */
+static inline void __update_cma_watermarks(struct zone *zone, int count)
+{
+       unsigned long flags;
+       spin_lock_irqsave(&zone->lock, flags);
+       zone->min_cma_pages += count;
+       spin_unlock_irqrestore(&zone->lock, flags);
+       setup_per_zone_wmarks();
+}
+
+/*
+ * Trigger memory pressure bump to reclaim some pages in order to be able to
+ * allocate 'count' pages in single page units. Does similar work as
+ *__alloc_pages_slowpath() function.
+ */
+static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count)
+{
+       enum zone_type high_zoneidx = gfp_zone(gfp_mask);
+       struct zonelist *zonelist = node_zonelist(0, gfp_mask);
+       int did_some_progress = 0;
+       int order = 1;
+
+       /*
+        * Increase level of watermarks to force kswapd do his job
+        * to stabilise at new watermark level.
+        */
+       __update_cma_watermarks(zone, count);
+
+       /* Obey watermarks as if the page was being allocated */
+       while (!zone_watermark_ok(zone, 0, low_wmark_pages(zone), 0, 0)) {
+               wake_all_kswapd(order, zonelist, high_zoneidx, zone_idx(zone));
+
+               did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
+                                                     NULL);
+               if (!did_some_progress) {
+                       /* Exhausted what can be done so it's blamo time */
+                       out_of_memory(zonelist, gfp_mask, order, NULL, false);
+               }
+       }
+
+       /* Restore original watermark levels. */
+       __update_cma_watermarks(zone, -count);
+
+       return count;
+}
+
+/**
+ * alloc_contig_range() -- tries to allocate given range of pages
+ * @start:     start PFN to allocate
+ * @end:       one-past-the-last PFN to allocate
+ * @migratetype:       migratetype of the underlaying pageblocks (either
+ *                     #MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks
+ *                     in range must have the same migratetype and it must
+ *                     be either of the two.
+ *
+ * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
+ * aligned, however it's the caller's responsibility to guarantee that
+ * we are the only thread that changes migrate type of pageblocks the
+ * pages fall in.
+ *
+ * The PFN range must belong to a single zone.
+ *
+ * Returns zero on success or negative error code.  On success all
+ * pages which PFN is in [start, end) are allocated for the caller and
+ * need to be freed with free_contig_range().
+ */
+int alloc_contig_range(unsigned long start, unsigned long end,
+                      unsigned migratetype)
+{
+       struct zone *zone = page_zone(pfn_to_page(start));
+       unsigned long outer_start, outer_end;
+       int ret = 0, order;
+
+       /*
+        * What we do here is we mark all pageblocks in range as
+        * MIGRATE_ISOLATE.  Because pageblock and max order pages may
+        * have different sizes, and due to the way page allocator
+        * work, we align the range to biggest of the two pages so
+        * that page allocator won't try to merge buddies from
+        * different pageblocks and change MIGRATE_ISOLATE to some
+        * other migration type.
+        *
+        * Once the pageblocks are marked as MIGRATE_ISOLATE, we
+        * migrate the pages from an unaligned range (ie. pages that
+        * we are interested in).  This will put all the pages in
+        * range back to page allocator as MIGRATE_ISOLATE.
+        *
+        * When this is done, we take the pages in range from page
+        * allocator removing them from the buddy system.  This way
+        * page allocator will never consider using them.
+        *
+        * This lets us mark the pageblocks back as
+        * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
+        * aligned range but not in the unaligned, original range are
+        * put back to page allocator so that buddy can use them.
+        */
+
+       ret = start_isolate_page_range(pfn_max_align_down(start),
+                                      pfn_max_align_up(end), migratetype);
+       if (ret)
+               goto done;
+
+       ret = __alloc_contig_migrate_range(start, end);
+       if (ret)
+               goto done;
+
+       /*
+        * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
+        * aligned blocks that are marked as MIGRATE_ISOLATE.  What's
+        * more, all pages in [start, end) are free in page allocator.
+        * What we are going to do is to allocate all pages from
+        * [start, end) (that is remove them from page allocator).
+        *
+        * The only problem is that pages at the beginning and at the
+        * end of interesting range may be not aligned with pages that
+        * page allocator holds, ie. they can be part of higher order
+        * pages.  Because of this, we reserve the bigger range and
+        * once this is done free the pages we are not interested in.
+        *
+        * We don't have to hold zone->lock here because the pages are
+        * isolated thus they won't get removed from buddy.
+        */
+
+       lru_add_drain_all();
+       drain_all_pages();
+
+       order = 0;
+       outer_start = start;
+       while (!PageBuddy(pfn_to_page(outer_start))) {
+               if (++order >= MAX_ORDER) {
+                       ret = -EBUSY;
+                       goto done;
+               }
+               outer_start &= ~0UL << order;
+       }
+
+       /* Make sure the range is really isolated. */
+       if (test_pages_isolated(outer_start, end)) {
+               pr_warn("alloc_contig_range test_pages_isolated(%lx, %lx) failed\n",
+                      outer_start, end);
+               ret = -EBUSY;
+               goto done;
+       }
+
+       /*
+        * Reclaim enough pages to make sure that contiguous allocation
+        * will not starve the system.
+        */
+       __reclaim_pages(zone, GFP_HIGHUSER_MOVABLE, end-start);
+
+       /* Grab isolated pages from freelists. */
+       outer_end = isolate_freepages_range(outer_start, end);
+       if (!outer_end) {
+               ret = -EBUSY;
+               goto done;
+       }
+
+       /* Free head and tail (if any) */
+       if (start != outer_start)
+               free_contig_range(outer_start, start - outer_start);
+       if (end != outer_end)
+               free_contig_range(end, outer_end - end);
+
+done:
+       undo_isolate_page_range(pfn_max_align_down(start),
+                               pfn_max_align_up(end), migratetype);
+       return ret;
+}
+
+void free_contig_range(unsigned long pfn, unsigned nr_pages)
+{
+       for (; nr_pages--; ++pfn)
+               __free_page(pfn_to_page(pfn));
+}
+#endif
+
 #ifdef CONFIG_MEMORY_HOTREMOVE
 /*
  * All pages in the range must be isolated before calling this.
@@ -5621,7 +5932,7 @@ bool is_free_buddy_page(struct page *page)
 }
 #endif
 
-static struct trace_print_flags pageflag_names[] = {
+static const struct trace_print_flags pageflag_names[] = {
        {1UL << PG_locked,              "locked"        },
        {1UL << PG_error,               "error"         },
        {1UL << PG_referenced,          "referenced"    },
@@ -5656,7 +5967,9 @@ static struct trace_print_flags pageflag_names[] = {
 #ifdef CONFIG_MEMORY_FAILURE
        {1UL << PG_hwpoison,            "hwpoison"      },
 #endif
-       {-1UL,                          NULL            },
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       {1UL << PG_compound_lock,       "compound_lock" },
+#endif
 };
 
 static void dump_page_flags(unsigned long flags)
@@ -5665,12 +5978,14 @@ static void dump_page_flags(unsigned long flags)
        unsigned long mask;
        int i;
 
+       BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS);
+
        printk(KERN_ALERT "page flags: %#lx(", flags);
 
        /* remove zone id */
        flags &= (1UL << NR_PAGEFLAGS) - 1;
 
-       for (i = 0; pageflag_names[i].name && flags; i++) {
+       for (i = 0; i < ARRAY_SIZE(pageflag_names) && flags; i++) {
 
                mask = pageflag_names[i].mask;
                if ((flags & mask) != mask)
index 4ae42bb4089241d146d8a62429cebc600fd7d14e..c9f04774f2b8117354bc6f38230bf94e645f701b 100644 (file)
@@ -24,6 +24,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
  * to be MIGRATE_ISOLATE.
  * @start_pfn: The lower PFN of the range to be isolated.
  * @end_pfn: The upper PFN of the range to be isolated.
+ * @migratetype: migrate type to set in error recovery.
  *
  * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
  * the range will never be allocated. Any free pages and pages freed in the
@@ -32,8 +33,8 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
  * start_pfn/end_pfn must be aligned to pageblock_order.
  * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
  */
-int
-start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
+int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+                            unsigned migratetype)
 {
        unsigned long pfn;
        unsigned long undo_pfn;
@@ -56,7 +57,7 @@ undo:
        for (pfn = start_pfn;
             pfn < undo_pfn;
             pfn += pageblock_nr_pages)
-               unset_migratetype_isolate(pfn_to_page(pfn));
+               unset_migratetype_isolate(pfn_to_page(pfn), migratetype);
 
        return -EBUSY;
 }
@@ -64,8 +65,8 @@ undo:
 /*
  * Make isolated pages available again.
  */
-int
-undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
+int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+                           unsigned migratetype)
 {
        unsigned long pfn;
        struct page *page;
@@ -77,7 +78,7 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
                page = __first_valid_page(pfn, pageblock_nr_pages);
                if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
                        continue;
-               unset_migratetype_isolate(page);
+               unset_migratetype_isolate(page, migratetype);
        }
        return 0;
 }
@@ -86,7 +87,7 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
  * all pages in [start_pfn...end_pfn) must be in the same zone.
  * zone->lock must be held before call this.
  *
- * Returns 1 if all pages in the range is isolated.
+ * Returns 1 if all pages in the range are isolated.
  */
 static int
 __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
index 5a74fea182f152bd2fee6182e6b06f1dbaa4f922..74c0ddaa6fa0df019c590994fa79b136dd1b4abd 100644 (file)
@@ -109,8 +109,8 @@ pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
 
 #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-pmd_t pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
-                          pmd_t *pmdp)
+void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
+                         pmd_t *pmdp)
 {
        pmd_t pmd = pmd_mksplitting(*pmdp);
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
index c20ff48994c29050953c79fcdb0633e690bb653e..926b466497492f3f8463ebc623adc4fbddf9547a 100644 (file)
@@ -371,15 +371,15 @@ static ssize_t process_vm_rw(pid_t pid,
        /* Check iovecs */
        if (vm_write)
                rc = rw_copy_check_uvector(WRITE, lvec, liovcnt, UIO_FASTIOV,
-                                          iovstack_l, &iov_l, 1);
+                                          iovstack_l, &iov_l);
        else
                rc = rw_copy_check_uvector(READ, lvec, liovcnt, UIO_FASTIOV,
-                                          iovstack_l, &iov_l, 1);
+                                          iovstack_l, &iov_l);
        if (rc <= 0)
                goto free_iovecs;
 
-       rc = rw_copy_check_uvector(READ, rvec, riovcnt, UIO_FASTIOV,
-                                  iovstack_r, &iov_r, 0);
+       rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV,
+                                  iovstack_r, &iov_r);
        if (rc <= 0)
                goto free_iovecs;
 
@@ -438,16 +438,16 @@ compat_process_vm_rw(compat_pid_t pid,
        if (vm_write)
                rc = compat_rw_copy_check_uvector(WRITE, lvec, liovcnt,
                                                  UIO_FASTIOV, iovstack_l,
-                                                 &iov_l, 1);
+                                                 &iov_l);
        else
                rc = compat_rw_copy_check_uvector(READ, lvec, liovcnt,
                                                  UIO_FASTIOV, iovstack_l,
-                                                 &iov_l, 1);
+                                                 &iov_l);
        if (rc <= 0)
                goto free_iovecs;
-       rc = compat_rw_copy_check_uvector(READ, rvec, riovcnt,
+       rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt,
                                          UIO_FASTIOV, iovstack_r,
-                                         &iov_r, 0);
+                                         &iov_r);
        if (rc <= 0)
                goto free_iovecs;
 
index cbcbb02f3e28ab9667d196e6316588856206df16..ea8f8fa21649d7069543e19e0e48ae38d63c175e 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/task_io_accounting_ops.h>
 #include <linux/pagevec.h>
 #include <linux/pagemap.h>
+#include <linux/syscalls.h>
+#include <linux/file.h>
 
 /*
  * Initialise a struct file's readahead state.  Assumes that the caller has
@@ -562,3 +564,41 @@ page_cache_async_readahead(struct address_space *mapping,
        ondemand_readahead(mapping, ra, filp, true, offset, req_size);
 }
 EXPORT_SYMBOL_GPL(page_cache_async_readahead);
+
+static ssize_t
+do_readahead(struct address_space *mapping, struct file *filp,
+            pgoff_t index, unsigned long nr)
+{
+       if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
+               return -EINVAL;
+
+       force_page_cache_readahead(mapping, filp, index, nr);
+       return 0;
+}
+
+SYSCALL_DEFINE(readahead)(int fd, loff_t offset, size_t count)
+{
+       ssize_t ret;
+       struct file *file;
+
+       ret = -EBADF;
+       file = fget(fd);
+       if (file) {
+               if (file->f_mode & FMODE_READ) {
+                       struct address_space *mapping = file->f_mapping;
+                       pgoff_t start = offset >> PAGE_CACHE_SHIFT;
+                       pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
+                       unsigned long len = end - start + 1;
+                       ret = do_readahead(mapping, file, start, len);
+               }
+               fput(file);
+       }
+       return ret;
+}
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+asmlinkage long SyS_readahead(long fd, loff_t offset, long count)
+{
+       return SYSC_readahead((int) fd, offset, (size_t) count);
+}
+SYSCALL_ALIAS(sys_readahead, SyS_readahead);
+#endif
index 5b5ad584ffb7dd7c9e00a885a018aaadba22f371..0f3b7cda2a24c5705ea4ad6e7ef127f53fdf3633 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -755,12 +755,6 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma,
                pte_unmap_unlock(pte, ptl);
        }
 
-       /* Pretend the page is referenced if the task has the
-          swap token and is in the middle of a page fault. */
-       if (mm != current->mm && has_swap_token(mm) &&
-                       rwsem_is_locked(&mm->mmap_sem))
-               referenced++;
-
        (*mapcount)--;
 
        if (referenced)
index d7b433a1ef5e2537dd6c36699d9635d45999e38d..585bd220a21ee4e5eefaec2b9c46dc9ae68fde98 100644 (file)
@@ -53,6 +53,7 @@ static struct vfsmount *shm_mnt;
 #include <linux/blkdev.h>
 #include <linux/pagevec.h>
 #include <linux/percpu_counter.h>
+#include <linux/falloc.h>
 #include <linux/splice.h>
 #include <linux/security.h>
 #include <linux/swapops.h>
@@ -83,12 +84,25 @@ struct shmem_xattr {
        char value[0];
 };
 
+/*
+ * shmem_fallocate and shmem_writepage communicate via inode->i_private
+ * (with i_mutex making sure that it has only one user at a time):
+ * we would prefer not to enlarge the shmem inode just for that.
+ */
+struct shmem_falloc {
+       pgoff_t start;          /* start of range currently being fallocated */
+       pgoff_t next;           /* the next page offset to be fallocated */
+       pgoff_t nr_falloced;    /* how many new pages have been fallocated */
+       pgoff_t nr_unswapped;   /* how often writepage refused to swap out */
+};
+
 /* Flag allocation requirements to shmem_getpage */
 enum sgp_type {
        SGP_READ,       /* don't exceed i_size, don't allocate page */
        SGP_CACHE,      /* don't exceed i_size, may allocate page */
        SGP_DIRTY,      /* like SGP_CACHE, but set new page dirty */
-       SGP_WRITE,      /* may exceed i_size, may allocate page */
+       SGP_WRITE,      /* may exceed i_size, may allocate !Uptodate page */
+       SGP_FALLOC,     /* like SGP_WRITE, but make existing page Uptodate */
 };
 
 #ifdef CONFIG_TMPFS
@@ -103,6 +117,9 @@ static unsigned long shmem_default_max_inodes(void)
 }
 #endif
 
+static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
+static int shmem_replace_page(struct page **pagep, gfp_t gfp,
+                               struct shmem_inode_info *info, pgoff_t index);
 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
        struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
 
@@ -423,27 +440,31 @@ void shmem_unlock_mapping(struct address_space *mapping)
 
 /*
  * Remove range of pages and swap entries from radix tree, and free them.
+ * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
  */
-void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
+static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
+                                                                bool unfalloc)
 {
        struct address_space *mapping = inode->i_mapping;
        struct shmem_inode_info *info = SHMEM_I(inode);
        pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-       unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
-       pgoff_t end = (lend >> PAGE_CACHE_SHIFT);
+       pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT;
+       unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1);
+       unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
        struct pagevec pvec;
        pgoff_t indices[PAGEVEC_SIZE];
        long nr_swaps_freed = 0;
        pgoff_t index;
        int i;
 
-       BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
+       if (lend == -1)
+               end = -1;       /* unsigned, so actually very big */
 
        pagevec_init(&pvec, 0);
        index = start;
-       while (index <= end) {
+       while (index < end) {
                pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
-                       min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
+                               min(end - index, (pgoff_t)PAGEVEC_SIZE),
                                                        pvec.pages, indices);
                if (!pvec.nr)
                        break;
@@ -452,10 +473,12 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
                        struct page *page = pvec.pages[i];
 
                        index = indices[i];
-                       if (index > end)
+                       if (index >= end)
                                break;
 
                        if (radix_tree_exceptional_entry(page)) {
+                               if (unfalloc)
+                                       continue;
                                nr_swaps_freed += !shmem_free_swap(mapping,
                                                                index, page);
                                continue;
@@ -463,9 +486,11 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
 
                        if (!trylock_page(page))
                                continue;
-                       if (page->mapping == mapping) {
-                               VM_BUG_ON(PageWriteback(page));
-                               truncate_inode_page(mapping, page);
+                       if (!unfalloc || !PageUptodate(page)) {
+                               if (page->mapping == mapping) {
+                                       VM_BUG_ON(PageWriteback(page));
+                                       truncate_inode_page(mapping, page);
+                               }
                        }
                        unlock_page(page);
                }
@@ -476,30 +501,47 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
                index++;
        }
 
-       if (partial) {
+       if (partial_start) {
                struct page *page = NULL;
                shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
                if (page) {
-                       zero_user_segment(page, partial, PAGE_CACHE_SIZE);
+                       unsigned int top = PAGE_CACHE_SIZE;
+                       if (start > end) {
+                               top = partial_end;
+                               partial_end = 0;
+                       }
+                       zero_user_segment(page, partial_start, top);
+                       set_page_dirty(page);
+                       unlock_page(page);
+                       page_cache_release(page);
+               }
+       }
+       if (partial_end) {
+               struct page *page = NULL;
+               shmem_getpage(inode, end, &page, SGP_READ, NULL);
+               if (page) {
+                       zero_user_segment(page, 0, partial_end);
                        set_page_dirty(page);
                        unlock_page(page);
                        page_cache_release(page);
                }
        }
+       if (start >= end)
+               return;
 
        index = start;
        for ( ; ; ) {
                cond_resched();
                pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
-                       min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
+                               min(end - index, (pgoff_t)PAGEVEC_SIZE),
                                                        pvec.pages, indices);
                if (!pvec.nr) {
-                       if (index == start)
+                       if (index == start || unfalloc)
                                break;
                        index = start;
                        continue;
                }
-               if (index == start && indices[0] > end) {
+               if ((index == start || unfalloc) && indices[0] >= end) {
                        shmem_deswap_pagevec(&pvec);
                        pagevec_release(&pvec);
                        break;
@@ -509,19 +551,23 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
                        struct page *page = pvec.pages[i];
 
                        index = indices[i];
-                       if (index > end)
+                       if (index >= end)
                                break;
 
                        if (radix_tree_exceptional_entry(page)) {
+                               if (unfalloc)
+                                       continue;
                                nr_swaps_freed += !shmem_free_swap(mapping,
                                                                index, page);
                                continue;
                        }
 
                        lock_page(page);
-                       if (page->mapping == mapping) {
-                               VM_BUG_ON(PageWriteback(page));
-                               truncate_inode_page(mapping, page);
+                       if (!unfalloc || !PageUptodate(page)) {
+                               if (page->mapping == mapping) {
+                                       VM_BUG_ON(PageWriteback(page));
+                                       truncate_inode_page(mapping, page);
+                               }
                        }
                        unlock_page(page);
                }
@@ -535,7 +581,11 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
        info->swapped -= nr_swaps_freed;
        shmem_recalc_inode(inode);
        spin_unlock(&info->lock);
+}
 
+void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
+{
+       shmem_undo_range(inode, lstart, lend, false);
        inode->i_ctime = inode->i_mtime = CURRENT_TIME;
 }
 EXPORT_SYMBOL_GPL(shmem_truncate_range);
@@ -597,19 +647,20 @@ static void shmem_evict_inode(struct inode *inode)
        }
        BUG_ON(inode->i_blocks);
        shmem_free_inode(inode->i_sb);
-       end_writeback(inode);
+       clear_inode(inode);
 }
 
 /*
  * If swap found in inode, free it and move page from swapcache to filecache.
  */
 static int shmem_unuse_inode(struct shmem_inode_info *info,
-                            swp_entry_t swap, struct page *page)
+                            swp_entry_t swap, struct page **pagep)
 {
        struct address_space *mapping = info->vfs_inode.i_mapping;
        void *radswap;
        pgoff_t index;
-       int error;
+       gfp_t gfp;
+       int error = 0;
 
        radswap = swp_to_radix_entry(swap);
        index = radix_tree_locate_item(&mapping->page_tree, radswap);
@@ -625,22 +676,37 @@ static int shmem_unuse_inode(struct shmem_inode_info *info,
        if (shmem_swaplist.next != &info->swaplist)
                list_move_tail(&shmem_swaplist, &info->swaplist);
 
+       gfp = mapping_gfp_mask(mapping);
+       if (shmem_should_replace_page(*pagep, gfp)) {
+               mutex_unlock(&shmem_swaplist_mutex);
+               error = shmem_replace_page(pagep, gfp, info, index);
+               mutex_lock(&shmem_swaplist_mutex);
+               /*
+                * We needed to drop mutex to make that restrictive page
+                * allocation; but the inode might already be freed by now,
+                * and we cannot refer to inode or mapping or info to check.
+                * However, we do hold page lock on the PageSwapCache page,
+                * so can check if that still has our reference remaining.
+                */
+               if (!page_swapcount(*pagep))
+                       error = -ENOENT;
+       }
+
        /*
         * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
         * but also to hold up shmem_evict_inode(): so inode cannot be freed
         * beneath us (pagelock doesn't help until the page is in pagecache).
         */
-       error = shmem_add_to_page_cache(page, mapping, index,
+       if (!error)
+               error = shmem_add_to_page_cache(*pagep, mapping, index,
                                                GFP_NOWAIT, radswap);
-       /* which does mem_cgroup_uncharge_cache_page on error */
-
        if (error != -ENOMEM) {
                /*
                 * Truncation and eviction use free_swap_and_cache(), which
                 * only does trylock page: if we raced, best clean up here.
                 */
-               delete_from_swap_cache(page);
-               set_page_dirty(page);
+               delete_from_swap_cache(*pagep);
+               set_page_dirty(*pagep);
                if (!error) {
                        spin_lock(&info->lock);
                        info->swapped--;
@@ -660,7 +726,14 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
        struct list_head *this, *next;
        struct shmem_inode_info *info;
        int found = 0;
-       int error;
+       int error = 0;
+
+       /*
+        * There's a faint possibility that swap page was replaced before
+        * caller locked it: it will come back later with the right page.
+        */
+       if (unlikely(!PageSwapCache(page)))
+               goto out;
 
        /*
         * Charge page using GFP_KERNEL while we can wait, before taking
@@ -676,7 +749,7 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
        list_for_each_safe(this, next, &shmem_swaplist) {
                info = list_entry(this, struct shmem_inode_info, swaplist);
                if (info->swapped)
-                       found = shmem_unuse_inode(info, swap, page);
+                       found = shmem_unuse_inode(info, swap, &page);
                else
                        list_del_init(&info->swaplist);
                cond_resched();
@@ -685,8 +758,6 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
        }
        mutex_unlock(&shmem_swaplist_mutex);
 
-       if (!found)
-               mem_cgroup_uncharge_cache_page(page);
        if (found < 0)
                error = found;
 out:
@@ -727,6 +798,38 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
                WARN_ON_ONCE(1);        /* Still happens? Tell us about it! */
                goto redirty;
        }
+
+       /*
+        * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
+        * value into swapfile.c, the only way we can correctly account for a
+        * fallocated page arriving here is now to initialize it and write it.
+        *
+        * That's okay for a page already fallocated earlier, but if we have
+        * not yet completed the fallocation, then (a) we want to keep track
+        * of this page in case we have to undo it, and (b) it may not be a
+        * good idea to continue anyway, once we're pushing into swap.  So
+        * reactivate the page, and let shmem_fallocate() quit when too many.
+        */
+       if (!PageUptodate(page)) {
+               if (inode->i_private) {
+                       struct shmem_falloc *shmem_falloc;
+                       spin_lock(&inode->i_lock);
+                       shmem_falloc = inode->i_private;
+                       if (shmem_falloc &&
+                           index >= shmem_falloc->start &&
+                           index < shmem_falloc->next)
+                               shmem_falloc->nr_unswapped++;
+                       else
+                               shmem_falloc = NULL;
+                       spin_unlock(&inode->i_lock);
+                       if (shmem_falloc)
+                               goto redirty;
+               }
+               clear_highpage(page);
+               flush_dcache_page(page);
+               SetPageUptodate(page);
+       }
+
        swap = get_swap_page();
        if (!swap.val)
                goto redirty;
@@ -855,6 +958,84 @@ static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
 }
 #endif
 
+/*
+ * When a page is moved from swapcache to shmem filecache (either by the
+ * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
+ * shmem_unuse_inode()), it may have been read in earlier from swap, in
+ * ignorance of the mapping it belongs to.  If that mapping has special
+ * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
+ * we may need to copy to a suitable page before moving to filecache.
+ *
+ * In a future release, this may well be extended to respect cpuset and
+ * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
+ * but for now it is a simple matter of zone.
+ */
+static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
+{
+       return page_zonenum(page) > gfp_zone(gfp);
+}
+
+static int shmem_replace_page(struct page **pagep, gfp_t gfp,
+                               struct shmem_inode_info *info, pgoff_t index)
+{
+       struct page *oldpage, *newpage;
+       struct address_space *swap_mapping;
+       pgoff_t swap_index;
+       int error;
+
+       oldpage = *pagep;
+       swap_index = page_private(oldpage);
+       swap_mapping = page_mapping(oldpage);
+
+       /*
+        * We have arrived here because our zones are constrained, so don't
+        * limit chance of success by further cpuset and node constraints.
+        */
+       gfp &= ~GFP_CONSTRAINT_MASK;
+       newpage = shmem_alloc_page(gfp, info, index);
+       if (!newpage)
+               return -ENOMEM;
+       VM_BUG_ON(shmem_should_replace_page(newpage, gfp));
+
+       *pagep = newpage;
+       page_cache_get(newpage);
+       copy_highpage(newpage, oldpage);
+
+       VM_BUG_ON(!PageLocked(oldpage));
+       __set_page_locked(newpage);
+       VM_BUG_ON(!PageUptodate(oldpage));
+       SetPageUptodate(newpage);
+       VM_BUG_ON(!PageSwapBacked(oldpage));
+       SetPageSwapBacked(newpage);
+       VM_BUG_ON(!swap_index);
+       set_page_private(newpage, swap_index);
+       VM_BUG_ON(!PageSwapCache(oldpage));
+       SetPageSwapCache(newpage);
+
+       /*
+        * Our caller will very soon move newpage out of swapcache, but it's
+        * a nice clean interface for us to replace oldpage by newpage there.
+        */
+       spin_lock_irq(&swap_mapping->tree_lock);
+       error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
+                                                                  newpage);
+       __inc_zone_page_state(newpage, NR_FILE_PAGES);
+       __dec_zone_page_state(oldpage, NR_FILE_PAGES);
+       spin_unlock_irq(&swap_mapping->tree_lock);
+       BUG_ON(error);
+
+       mem_cgroup_replace_page_cache(oldpage, newpage);
+       lru_cache_add_anon(newpage);
+
+       ClearPageSwapCache(oldpage);
+       set_page_private(oldpage, 0);
+
+       unlock_page(oldpage);
+       page_cache_release(oldpage);
+       page_cache_release(oldpage);
+       return 0;
+}
+
 /*
  * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
  *
@@ -872,6 +1053,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
        swp_entry_t swap;
        int error;
        int once = 0;
+       int alloced = 0;
 
        if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
                return -EFBIG;
@@ -883,19 +1065,21 @@ repeat:
                page = NULL;
        }
 
-       if (sgp != SGP_WRITE &&
+       if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
            ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
                error = -EINVAL;
                goto failed;
        }
 
+       /* fallocated page? */
+       if (page && !PageUptodate(page)) {
+               if (sgp != SGP_READ)
+                       goto clear;
+               unlock_page(page);
+               page_cache_release(page);
+               page = NULL;
+       }
        if (page || (sgp == SGP_READ && !swap.val)) {
-               /*
-                * Once we can get the page lock, it must be uptodate:
-                * if there were an error in reading back from swap,
-                * the page would not be inserted into the filecache.
-                */
-               BUG_ON(page && !PageUptodate(page));
                *pagep = page;
                return 0;
        }
@@ -923,19 +1107,20 @@ repeat:
 
                /* We have to do this with page locked to prevent races */
                lock_page(page);
+               if (!PageSwapCache(page) || page->mapping) {
+                       error = -EEXIST;        /* try again */
+                       goto failed;
+               }
                if (!PageUptodate(page)) {
                        error = -EIO;
                        goto failed;
                }
                wait_on_page_writeback(page);
 
-               /* Someone may have already done it for us */
-               if (page->mapping) {
-                       if (page->mapping == mapping &&
-                           page->index == index)
-                               goto done;
-                       error = -EEXIST;
-                       goto failed;
+               if (shmem_should_replace_page(page, gfp)) {
+                       error = shmem_replace_page(&page, gfp, info, index);
+                       if (error)
+                               goto failed;
                }
 
                error = mem_cgroup_cache_charge(page, current->mm,
@@ -991,19 +1176,36 @@ repeat:
                inode->i_blocks += BLOCKS_PER_PAGE;
                shmem_recalc_inode(inode);
                spin_unlock(&info->lock);
+               alloced = true;
 
-               clear_highpage(page);
-               flush_dcache_page(page);
-               SetPageUptodate(page);
+               /*
+                * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
+                */
+               if (sgp == SGP_FALLOC)
+                       sgp = SGP_WRITE;
+clear:
+               /*
+                * Let SGP_WRITE caller clear ends if write does not fill page;
+                * but SGP_FALLOC on a page fallocated earlier must initialize
+                * it now, lest undo on failure cancel our earlier guarantee.
+                */
+               if (sgp != SGP_WRITE) {
+                       clear_highpage(page);
+                       flush_dcache_page(page);
+                       SetPageUptodate(page);
+               }
                if (sgp == SGP_DIRTY)
                        set_page_dirty(page);
        }
-done:
+
        /* Perhaps the file has been truncated since we checked */
-       if (sgp != SGP_WRITE &&
+       if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
            ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
                error = -EINVAL;
-               goto trunc;
+               if (alloced)
+                       goto trunc;
+               else
+                       goto failed;
        }
        *pagep = page;
        return 0;
@@ -1012,6 +1214,7 @@ done:
         * Error recovery.
         */
 trunc:
+       info = SHMEM_I(inode);
        ClearPageDirty(page);
        delete_from_page_cache(page);
        spin_lock(&info->lock);
@@ -1019,6 +1222,7 @@ trunc:
        inode->i_blocks -= BLOCKS_PER_PAGE;
        spin_unlock(&info->lock);
 decused:
+       sbinfo = SHMEM_SB(inode->i_sb);
        if (sbinfo->max_blocks)
                percpu_counter_add(&sbinfo->used_blocks, -1);
 unacct:
@@ -1204,6 +1408,14 @@ shmem_write_end(struct file *file, struct address_space *mapping,
        if (pos + copied > inode->i_size)
                i_size_write(inode, pos + copied);
 
+       if (!PageUptodate(page)) {
+               if (copied < PAGE_CACHE_SIZE) {
+                       unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+                       zero_user_segments(page, 0, from,
+                                       from + copied, PAGE_CACHE_SIZE);
+               }
+               SetPageUptodate(page);
+       }
        set_page_dirty(page);
        unlock_page(page);
        page_cache_release(page);
@@ -1462,6 +1674,199 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
        return error;
 }
 
+/*
+ * llseek SEEK_DATA or SEEK_HOLE through the radix_tree.
+ */
+static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
+                                   pgoff_t index, pgoff_t end, int origin)
+{
+       struct page *page;
+       struct pagevec pvec;
+       pgoff_t indices[PAGEVEC_SIZE];
+       bool done = false;
+       int i;
+
+       pagevec_init(&pvec, 0);
+       pvec.nr = 1;            /* start small: we may be there already */
+       while (!done) {
+               pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
+                                       pvec.nr, pvec.pages, indices);
+               if (!pvec.nr) {
+                       if (origin == SEEK_DATA)
+                               index = end;
+                       break;
+               }
+               for (i = 0; i < pvec.nr; i++, index++) {
+                       if (index < indices[i]) {
+                               if (origin == SEEK_HOLE) {
+                                       done = true;
+                                       break;
+                               }
+                               index = indices[i];
+                       }
+                       page = pvec.pages[i];
+                       if (page && !radix_tree_exceptional_entry(page)) {
+                               if (!PageUptodate(page))
+                                       page = NULL;
+                       }
+                       if (index >= end ||
+                           (page && origin == SEEK_DATA) ||
+                           (!page && origin == SEEK_HOLE)) {
+                               done = true;
+                               break;
+                       }
+               }
+               shmem_deswap_pagevec(&pvec);
+               pagevec_release(&pvec);
+               pvec.nr = PAGEVEC_SIZE;
+               cond_resched();
+       }
+       return index;
+}
+
+static loff_t shmem_file_llseek(struct file *file, loff_t offset, int origin)
+{
+       struct address_space *mapping;
+       struct inode *inode;
+       pgoff_t start, end;
+       loff_t new_offset;
+
+       if (origin != SEEK_DATA && origin != SEEK_HOLE)
+               return generic_file_llseek_size(file, offset, origin,
+                                                       MAX_LFS_FILESIZE);
+       mapping = file->f_mapping;
+       inode = mapping->host;
+       mutex_lock(&inode->i_mutex);
+       /* We're holding i_mutex so we can access i_size directly */
+
+       if (offset < 0)
+               offset = -EINVAL;
+       else if (offset >= inode->i_size)
+               offset = -ENXIO;
+       else {
+               start = offset >> PAGE_CACHE_SHIFT;
+               end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+               new_offset = shmem_seek_hole_data(mapping, start, end, origin);
+               new_offset <<= PAGE_CACHE_SHIFT;
+               if (new_offset > offset) {
+                       if (new_offset < inode->i_size)
+                               offset = new_offset;
+                       else if (origin == SEEK_DATA)
+                               offset = -ENXIO;
+                       else
+                               offset = inode->i_size;
+               }
+       }
+
+       if (offset >= 0 && offset != file->f_pos) {
+               file->f_pos = offset;
+               file->f_version = 0;
+       }
+       mutex_unlock(&inode->i_mutex);
+       return offset;
+}
+
+static long shmem_fallocate(struct file *file, int mode, loff_t offset,
+                                                        loff_t len)
+{
+       struct inode *inode = file->f_path.dentry->d_inode;
+       struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
+       struct shmem_falloc shmem_falloc;
+       pgoff_t start, index, end;
+       int error;
+
+       mutex_lock(&inode->i_mutex);
+
+       if (mode & FALLOC_FL_PUNCH_HOLE) {
+               struct address_space *mapping = file->f_mapping;
+               loff_t unmap_start = round_up(offset, PAGE_SIZE);
+               loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
+
+               if ((u64)unmap_end > (u64)unmap_start)
+                       unmap_mapping_range(mapping, unmap_start,
+                                           1 + unmap_end - unmap_start, 0);
+               shmem_truncate_range(inode, offset, offset + len - 1);
+               /* No need to unmap again: hole-punching leaves COWed pages */
+               error = 0;
+               goto out;
+       }
+
+       /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
+       error = inode_newsize_ok(inode, offset + len);
+       if (error)
+               goto out;
+
+       start = offset >> PAGE_CACHE_SHIFT;
+       end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+       /* Try to avoid a swapstorm if len is impossible to satisfy */
+       if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
+               error = -ENOSPC;
+               goto out;
+       }
+
+       shmem_falloc.start = start;
+       shmem_falloc.next  = start;
+       shmem_falloc.nr_falloced = 0;
+       shmem_falloc.nr_unswapped = 0;
+       spin_lock(&inode->i_lock);
+       inode->i_private = &shmem_falloc;
+       spin_unlock(&inode->i_lock);
+
+       for (index = start; index < end; index++) {
+               struct page *page;
+
+               /*
+                * Good, the fallocate(2) manpage permits EINTR: we may have
+                * been interrupted because we are using up too much memory.
+                */
+               if (signal_pending(current))
+                       error = -EINTR;
+               else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
+                       error = -ENOMEM;
+               else
+                       error = shmem_getpage(inode, index, &page, SGP_FALLOC,
+                                                                       NULL);
+               if (error) {
+                       /* Remove the !PageUptodate pages we added */
+                       shmem_undo_range(inode,
+                               (loff_t)start << PAGE_CACHE_SHIFT,
+                               (loff_t)index << PAGE_CACHE_SHIFT, true);
+                       goto undone;
+               }
+
+               /*
+                * Inform shmem_writepage() how far we have reached.
+                * No need for lock or barrier: we have the page lock.
+                */
+               shmem_falloc.next++;
+               if (!PageUptodate(page))
+                       shmem_falloc.nr_falloced++;
+
+               /*
+                * If !PageUptodate, leave it that way so that freeable pages
+                * can be recognized if we need to rollback on error later.
+                * But set_page_dirty so that memory pressure will swap rather
+                * than free the pages we are allocating (and SGP_CACHE pages
+                * might still be clean: we now need to mark those dirty too).
+                */
+               set_page_dirty(page);
+               unlock_page(page);
+               page_cache_release(page);
+               cond_resched();
+       }
+
+       if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
+               i_size_write(inode, offset + len);
+       inode->i_ctime = CURRENT_TIME;
+undone:
+       spin_lock(&inode->i_lock);
+       inode->i_private = NULL;
+       spin_unlock(&inode->i_lock);
+out:
+       mutex_unlock(&inode->i_mutex);
+       return error;
+}
+
 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
        struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
@@ -1665,6 +2070,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
                kaddr = kmap_atomic(page);
                memcpy(kaddr, symname, len);
                kunmap_atomic(kaddr);
+               SetPageUptodate(page);
                set_page_dirty(page);
                unlock_page(page);
                page_cache_release(page);
@@ -2033,11 +2439,9 @@ static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
        return dentry;
 }
 
-static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
-                               int connectable)
+static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
+                               struct inode *parent)
 {
-       struct inode *inode = dentry->d_inode;
-
        if (*len < 3) {
                *len = 3;
                return 255;
@@ -2270,6 +2674,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
                }
        }
        sb->s_export_op = &shmem_export_ops;
+       sb->s_flags |= MS_NOSEC;
 #else
        sb->s_flags |= MS_NOUSER;
 #endif
@@ -2364,7 +2769,7 @@ static const struct address_space_operations shmem_aops = {
 static const struct file_operations shmem_file_operations = {
        .mmap           = shmem_mmap,
 #ifdef CONFIG_TMPFS
-       .llseek         = generic_file_llseek,
+       .llseek         = shmem_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
        .aio_read       = shmem_file_aio_read,
@@ -2372,12 +2777,12 @@ static const struct file_operations shmem_file_operations = {
        .fsync          = noop_fsync,
        .splice_read    = shmem_file_splice_read,
        .splice_write   = generic_file_splice_write,
+       .fallocate      = shmem_fallocate,
 #endif
 };
 
 static const struct inode_operations shmem_inode_operations = {
        .setattr        = shmem_setattr,
-       .truncate_range = shmem_truncate_range,
 #ifdef CONFIG_TMPFS_XATTR
        .setxattr       = shmem_setxattr,
        .getxattr       = shmem_getxattr,
index 80848cd3901cc8ab052f43d52e2415fb3b7f4829..8c691fa1cf3c78a91fa301bcd0cd323df1b28a1e 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1369,7 +1369,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
 
        inc_slabs_node(s, page_to_nid(page), page->objects);
        page->slab = s;
-       page->flags |= 1 << PG_slab;
+       __SetPageSlab(page);
 
        start = page_address(page);
 
@@ -1514,15 +1514,19 @@ static inline void *acquire_slab(struct kmem_cache *s,
                freelist = page->freelist;
                counters = page->counters;
                new.counters = counters;
-               if (mode)
+               if (mode) {
                        new.inuse = page->objects;
+                       new.freelist = NULL;
+               } else {
+                       new.freelist = freelist;
+               }
 
                VM_BUG_ON(new.frozen);
                new.frozen = 1;
 
        } while (!__cmpxchg_double_slab(s, page,
                        freelist, counters,
-                       NULL, new.counters,
+                       new.freelist, new.counters,
                        "lock and freeze"));
 
        remove_partial(n, page);
@@ -1564,7 +1568,6 @@ static void *get_partial_node(struct kmem_cache *s,
                        object = t;
                        available =  page->objects - page->inuse;
                } else {
-                       page->freelist = t;
                        available = put_cpu_partial(s, page, 0);
                        stat(s, CPU_PARTIAL_NODE);
                }
@@ -1579,7 +1582,7 @@ static void *get_partial_node(struct kmem_cache *s,
 /*
  * Get a page from somewhere. Search in increasing NUMA distances.
  */
-static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
+static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
                struct kmem_cache_cpu *c)
 {
 #ifdef CONFIG_NUMA
@@ -2766,7 +2769,7 @@ static unsigned long calculate_alignment(unsigned long flags,
 }
 
 static void
-init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
+init_kmem_cache_node(struct kmem_cache_node *n)
 {
        n->nr_partial = 0;
        spin_lock_init(&n->list_lock);
@@ -2836,7 +2839,7 @@ static void early_kmem_cache_node_alloc(int node)
        init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
        init_tracking(kmem_cache_node, n);
 #endif
-       init_kmem_cache_node(n, kmem_cache_node);
+       init_kmem_cache_node(n);
        inc_slabs_node(kmem_cache_node, node, page->objects);
 
        add_partial(n, page, DEACTIVATE_TO_HEAD);
@@ -2876,7 +2879,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s)
                }
 
                s->node[node] = n;
-               init_kmem_cache_node(n, s);
+               init_kmem_cache_node(n);
        }
        return 1;
 }
@@ -3625,7 +3628,7 @@ static int slab_mem_going_online_callback(void *arg)
                        ret = -ENOMEM;
                        goto out;
                }
-               init_kmem_cache_node(n, s);
+               init_kmem_cache_node(n);
                s->node[nid] = n;
        }
 out:
@@ -3968,9 +3971,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
                        }
                        return s;
                }
-               kfree(n);
                kfree(s);
        }
+       kfree(n);
 err:
        up_write(&slub_lock);
 
index a8bc7d364deb0a764cbd28956f1853fbb3ce421c..6a4bf9160e855ae1e2d61fefb4922918f710bb24 100644 (file)
@@ -273,10 +273,10 @@ static unsigned long *__kmalloc_section_usemap(void)
 #ifdef CONFIG_MEMORY_HOTREMOVE
 static unsigned long * __init
 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
-                                        unsigned long count)
+                                        unsigned long size)
 {
-       unsigned long section_nr;
-
+       pg_data_t *host_pgdat;
+       unsigned long goal;
        /*
         * A page may contain usemaps for other sections preventing the
         * page being freed and making a section unremovable while
@@ -287,8 +287,10 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
         * from the same section as the pgdat where possible to avoid
         * this problem.
         */
-       section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
-       return alloc_bootmem_section(usemap_size() * count, section_nr);
+       goal = __pa(pgdat) & PAGE_SECTION_MASK;
+       host_pgdat = NODE_DATA(early_pfn_to_nid(goal >> PAGE_SHIFT));
+       return __alloc_bootmem_node_nopanic(host_pgdat, size,
+                                           SMP_CACHE_BYTES, goal);
 }
 
 static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
@@ -332,9 +334,9 @@ static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
 #else
 static unsigned long * __init
 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
-                                        unsigned long count)
+                                        unsigned long size)
 {
-       return NULL;
+       return alloc_bootmem_node_nopanic(pgdat, size);
 }
 
 static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
@@ -352,13 +354,10 @@ static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map,
        int size = usemap_size();
 
        usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
-                                                                usemap_count);
+                                                         size * usemap_count);
        if (!usemap) {
-               usemap = alloc_bootmem_node(NODE_DATA(nodeid), size * usemap_count);
-               if (!usemap) {
-                       printk(KERN_WARNING "%s: allocation failed\n", __func__);
-                       return;
-               }
+               printk(KERN_WARNING "%s: allocation failed\n", __func__);
+               return;
        }
 
        for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
index 5c13f13389721fe60756ffb4dabe66d0c1e86e47..4e7e2ec67078f783750eb43e8dc45db5600b1b94 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -47,13 +47,15 @@ static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
 static void __page_cache_release(struct page *page)
 {
        if (PageLRU(page)) {
-               unsigned long flags;
                struct zone *zone = page_zone(page);
+               struct lruvec *lruvec;
+               unsigned long flags;
 
                spin_lock_irqsave(&zone->lru_lock, flags);
+               lruvec = mem_cgroup_page_lruvec(page, zone);
                VM_BUG_ON(!PageLRU(page));
                __ClearPageLRU(page);
-               del_page_from_lru_list(zone, page, page_off_lru(page));
+               del_page_from_lru_list(page, lruvec, page_off_lru(page));
                spin_unlock_irqrestore(&zone->lru_lock, flags);
        }
 }
@@ -82,6 +84,25 @@ static void put_compound_page(struct page *page)
                if (likely(page != page_head &&
                           get_page_unless_zero(page_head))) {
                        unsigned long flags;
+
+                       /*
+                        * THP can not break up slab pages so avoid taking
+                        * compound_lock().  Slab performs non-atomic bit ops
+                        * on page->flags for better performance.  In particular
+                        * slab_unlock() in slub used to be a hot path.  It is
+                        * still hot on arches that do not support
+                        * this_cpu_cmpxchg_double().
+                        */
+                       if (PageSlab(page_head)) {
+                               if (PageTail(page)) {
+                                       if (put_page_testzero(page_head))
+                                               VM_BUG_ON(1);
+
+                                       atomic_dec(&page->_mapcount);
+                                       goto skip_lock_tail;
+                               } else
+                                       goto skip_lock;
+                       }
                        /*
                         * page_head wasn't a dangling pointer but it
                         * may not be a head page anymore by the time
@@ -92,10 +113,10 @@ static void put_compound_page(struct page *page)
                        if (unlikely(!PageTail(page))) {
                                /* __split_huge_page_refcount run before us */
                                compound_unlock_irqrestore(page_head, flags);
-                               VM_BUG_ON(PageHead(page_head));
+skip_lock:
                                if (put_page_testzero(page_head))
                                        __put_single_page(page_head);
-                       out_put_single:
+out_put_single:
                                if (put_page_testzero(page))
                                        __put_single_page(page);
                                return;
@@ -115,6 +136,8 @@ static void put_compound_page(struct page *page)
                        VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
                        VM_BUG_ON(atomic_read(&page->_count) != 0);
                        compound_unlock_irqrestore(page_head, flags);
+
+skip_lock_tail:
                        if (put_page_testzero(page_head)) {
                                if (PageHead(page_head))
                                        __put_compound_page(page_head);
@@ -162,6 +185,18 @@ bool __get_page_tail(struct page *page)
        struct page *page_head = compound_trans_head(page);
 
        if (likely(page != page_head && get_page_unless_zero(page_head))) {
+
+               /* Ref to put_compound_page() comment. */
+               if (PageSlab(page_head)) {
+                       if (likely(PageTail(page))) {
+                               __get_page_tail_foll(page, false);
+                               return true;
+                       } else {
+                               put_page(page_head);
+                               return false;
+                       }
+               }
+
                /*
                 * page_head wasn't a dangling pointer but it
                 * may not be a head page anymore by the time
@@ -202,11 +237,12 @@ void put_pages_list(struct list_head *pages)
 EXPORT_SYMBOL(put_pages_list);
 
 static void pagevec_lru_move_fn(struct pagevec *pvec,
-                               void (*move_fn)(struct page *page, void *arg),
-                               void *arg)
+       void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
+       void *arg)
 {
        int i;
        struct zone *zone = NULL;
+       struct lruvec *lruvec;
        unsigned long flags = 0;
 
        for (i = 0; i < pagevec_count(pvec); i++) {
@@ -220,7 +256,8 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
                        spin_lock_irqsave(&zone->lru_lock, flags);
                }
 
-               (*move_fn)(page, arg);
+               lruvec = mem_cgroup_page_lruvec(page, zone);
+               (*move_fn)(page, lruvec, arg);
        }
        if (zone)
                spin_unlock_irqrestore(&zone->lru_lock, flags);
@@ -228,16 +265,13 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
        pagevec_reinit(pvec);
 }
 
-static void pagevec_move_tail_fn(struct page *page, void *arg)
+static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
+                                void *arg)
 {
        int *pgmoved = arg;
 
        if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
                enum lru_list lru = page_lru_base_type(page);
-               struct lruvec *lruvec;
-
-               lruvec = mem_cgroup_lru_move_lists(page_zone(page),
-                                                  page, lru, lru);
                list_move_tail(&page->lru, &lruvec->lists[lru]);
                (*pgmoved)++;
        }
@@ -276,41 +310,30 @@ void rotate_reclaimable_page(struct page *page)
        }
 }
 
-static void update_page_reclaim_stat(struct zone *zone, struct page *page,
+static void update_page_reclaim_stat(struct lruvec *lruvec,
                                     int file, int rotated)
 {
-       struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat;
-       struct zone_reclaim_stat *memcg_reclaim_stat;
-
-       memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
+       struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
 
        reclaim_stat->recent_scanned[file]++;
        if (rotated)
                reclaim_stat->recent_rotated[file]++;
-
-       if (!memcg_reclaim_stat)
-               return;
-
-       memcg_reclaim_stat->recent_scanned[file]++;
-       if (rotated)
-               memcg_reclaim_stat->recent_rotated[file]++;
 }
 
-static void __activate_page(struct page *page, void *arg)
+static void __activate_page(struct page *page, struct lruvec *lruvec,
+                           void *arg)
 {
-       struct zone *zone = page_zone(page);
-
        if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
                int file = page_is_file_cache(page);
                int lru = page_lru_base_type(page);
-               del_page_from_lru_list(zone, page, lru);
 
+               del_page_from_lru_list(page, lruvec, lru);
                SetPageActive(page);
                lru += LRU_ACTIVE;
-               add_page_to_lru_list(zone, page, lru);
-               __count_vm_event(PGACTIVATE);
+               add_page_to_lru_list(page, lruvec, lru);
 
-               update_page_reclaim_stat(zone, page, file, 1);
+               __count_vm_event(PGACTIVATE);
+               update_page_reclaim_stat(lruvec, file, 1);
        }
 }
 
@@ -347,7 +370,7 @@ void activate_page(struct page *page)
        struct zone *zone = page_zone(page);
 
        spin_lock_irq(&zone->lru_lock);
-       __activate_page(page, NULL);
+       __activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL);
        spin_unlock_irq(&zone->lru_lock);
 }
 #endif
@@ -414,11 +437,13 @@ void lru_cache_add_lru(struct page *page, enum lru_list lru)
 void add_page_to_unevictable_list(struct page *page)
 {
        struct zone *zone = page_zone(page);
+       struct lruvec *lruvec;
 
        spin_lock_irq(&zone->lru_lock);
+       lruvec = mem_cgroup_page_lruvec(page, zone);
        SetPageUnevictable(page);
        SetPageLRU(page);
-       add_page_to_lru_list(zone, page, LRU_UNEVICTABLE);
+       add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
        spin_unlock_irq(&zone->lru_lock);
 }
 
@@ -443,11 +468,11 @@ void add_page_to_unevictable_list(struct page *page)
  * be write it out by flusher threads as this is much more effective
  * than the single-page writeout from reclaim.
  */
-static void lru_deactivate_fn(struct page *page, void *arg)
+static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
+                             void *arg)
 {
        int lru, file;
        bool active;
-       struct zone *zone = page_zone(page);
 
        if (!PageLRU(page))
                return;
@@ -460,13 +485,13 @@ static void lru_deactivate_fn(struct page *page, void *arg)
                return;
 
        active = PageActive(page);
-
        file = page_is_file_cache(page);
        lru = page_lru_base_type(page);
-       del_page_from_lru_list(zone, page, lru + active);
+
+       del_page_from_lru_list(page, lruvec, lru + active);
        ClearPageActive(page);
        ClearPageReferenced(page);
-       add_page_to_lru_list(zone, page, lru);
+       add_page_to_lru_list(page, lruvec, lru);
 
        if (PageWriteback(page) || PageDirty(page)) {
                /*
@@ -476,19 +501,17 @@ static void lru_deactivate_fn(struct page *page, void *arg)
                 */
                SetPageReclaim(page);
        } else {
-               struct lruvec *lruvec;
                /*
                 * The page's writeback ends up during pagevec
                 * We moves tha page into tail of inactive.
                 */
-               lruvec = mem_cgroup_lru_move_lists(zone, page, lru, lru);
                list_move_tail(&page->lru, &lruvec->lists[lru]);
                __count_vm_event(PGROTATED);
        }
 
        if (active)
                __count_vm_event(PGDEACTIVATE);
-       update_page_reclaim_stat(zone, page, file, 0);
+       update_page_reclaim_stat(lruvec, file, 0);
 }
 
 /*
@@ -588,6 +611,7 @@ void release_pages(struct page **pages, int nr, int cold)
        int i;
        LIST_HEAD(pages_to_free);
        struct zone *zone = NULL;
+       struct lruvec *lruvec;
        unsigned long uninitialized_var(flags);
 
        for (i = 0; i < nr; i++) {
@@ -615,9 +639,11 @@ void release_pages(struct page **pages, int nr, int cold)
                                zone = pagezone;
                                spin_lock_irqsave(&zone->lru_lock, flags);
                        }
+
+                       lruvec = mem_cgroup_page_lruvec(page, zone);
                        VM_BUG_ON(!PageLRU(page));
                        __ClearPageLRU(page);
-                       del_page_from_lru_list(zone, page, page_off_lru(page));
+                       del_page_from_lru_list(page, lruvec, page_off_lru(page));
                }
 
                list_add(&page->lru, &pages_to_free);
@@ -649,8 +675,8 @@ EXPORT_SYMBOL(__pagevec_release);
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 /* used by __split_huge_page_refcount() */
-void lru_add_page_tail(struct zone* zone,
-                      struct page *page, struct page *page_tail)
+void lru_add_page_tail(struct page *page, struct page *page_tail,
+                      struct lruvec *lruvec)
 {
        int uninitialized_var(active);
        enum lru_list lru;
@@ -659,7 +685,8 @@ void lru_add_page_tail(struct zone* zone,
        VM_BUG_ON(!PageHead(page));
        VM_BUG_ON(PageCompound(page_tail));
        VM_BUG_ON(PageLRU(page_tail));
-       VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&zone->lru_lock));
+       VM_BUG_ON(NR_CPUS != 1 &&
+                 !spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
 
        SetPageLRU(page_tail);
 
@@ -688,20 +715,20 @@ void lru_add_page_tail(struct zone* zone,
                 * Use the standard add function to put page_tail on the list,
                 * but then correct its position so they all end up in order.
                 */
-               add_page_to_lru_list(zone, page_tail, lru);
+               add_page_to_lru_list(page_tail, lruvec, lru);
                list_head = page_tail->lru.prev;
                list_move_tail(&page_tail->lru, list_head);
        }
 
        if (!PageUnevictable(page))
-               update_page_reclaim_stat(zone, page_tail, file, active);
+               update_page_reclaim_stat(lruvec, file, active);
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
-static void __pagevec_lru_add_fn(struct page *page, void *arg)
+static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
+                                void *arg)
 {
        enum lru_list lru = (enum lru_list)arg;
-       struct zone *zone = page_zone(page);
        int file = is_file_lru(lru);
        int active = is_active_lru(lru);
 
@@ -712,8 +739,8 @@ static void __pagevec_lru_add_fn(struct page *page, void *arg)
        SetPageLRU(page);
        if (active)
                SetPageActive(page);
-       add_page_to_lru_list(zone, page, lru);
-       update_page_reclaim_stat(zone, page, file, active);
+       add_page_to_lru_list(page, lruvec, lru);
+       update_page_reclaim_stat(lruvec, file, active);
 }
 
 /*
index fafc26d1b1dc885d2541eda3bdc5705a4fe56012..457b10baef59414f591fb0bfab2b54619c5209b0 100644 (file)
@@ -601,7 +601,7 @@ void swapcache_free(swp_entry_t entry, struct page *page)
  * This does not give an exact answer when swap count is continued,
  * but does include the high COUNT_CONTINUED flag to allow for that.
  */
-static inline int page_swapcount(struct page *page)
+int page_swapcount(struct page *page)
 {
        int count = 0;
        struct swap_info_struct *p;
@@ -717,37 +717,6 @@ int free_swap_and_cache(swp_entry_t entry)
        return p != NULL;
 }
 
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
-/**
- * mem_cgroup_count_swap_user - count the user of a swap entry
- * @ent: the swap entry to be checked
- * @pagep: the pointer for the swap cache page of the entry to be stored
- *
- * Returns the number of the user of the swap entry. The number is valid only
- * for swaps of anonymous pages.
- * If the entry is found on swap cache, the page is stored to pagep with
- * refcount of it being incremented.
- */
-int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep)
-{
-       struct page *page;
-       struct swap_info_struct *p;
-       int count = 0;
-
-       page = find_get_page(&swapper_space, ent.val);
-       if (page)
-               count += page_mapcount(page);
-       p = swap_info_get(ent);
-       if (p) {
-               count += swap_count(p->swap_map[swp_offset(ent)]);
-               spin_unlock(&swap_lock);
-       }
-
-       *pagep = page;
-       return count;
-}
-#endif
-
 #ifdef CONFIG_HIBERNATION
 /*
  * Find the swap type that corresponds to given device (if any).
diff --git a/mm/thrash.c b/mm/thrash.c
deleted file mode 100644 (file)
index 57ad495..0000000
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * mm/thrash.c
- *
- * Copyright (C) 2004, Red Hat, Inc.
- * Copyright (C) 2004, Rik van Riel <riel@redhat.com>
- * Released under the GPL, see the file COPYING for details.
- *
- * Simple token based thrashing protection, using the algorithm
- * described in: http://www.cse.ohio-state.edu/hpcs/WWW/HTML/publications/abs05-1.html
- *
- * Sep 2006, Ashwin Chaugule <ashwin.chaugule@celunite.com>
- * Improved algorithm to pass token:
- * Each task has a priority which is incremented if it contended
- * for the token in an interval less than its previous attempt.
- * If the token is acquired, that task's priority is boosted to prevent
- * the token from bouncing around too often and to let the task make
- * some progress in its execution.
- */
-
-#include <linux/jiffies.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/swap.h>
-#include <linux/memcontrol.h>
-
-#include <trace/events/vmscan.h>
-
-#define TOKEN_AGING_INTERVAL   (0xFF)
-
-static DEFINE_SPINLOCK(swap_token_lock);
-struct mm_struct *swap_token_mm;
-static struct mem_cgroup *swap_token_memcg;
-
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
-static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
-{
-       struct mem_cgroup *memcg;
-
-       memcg = try_get_mem_cgroup_from_mm(mm);
-       if (memcg)
-               css_put(mem_cgroup_css(memcg));
-
-       return memcg;
-}
-#else
-static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
-{
-       return NULL;
-}
-#endif
-
-void grab_swap_token(struct mm_struct *mm)
-{
-       int current_interval;
-       unsigned int old_prio = mm->token_priority;
-       static unsigned int global_faults;
-       static unsigned int last_aging;
-
-       global_faults++;
-
-       current_interval = global_faults - mm->faultstamp;
-
-       if (!spin_trylock(&swap_token_lock))
-               return;
-
-       /* First come first served */
-       if (!swap_token_mm)
-               goto replace_token;
-
-       /*
-        * Usually, we don't need priority aging because long interval faults
-        * makes priority decrease quickly. But there is one exception. If the
-        * token owner task is sleeping, it never make long interval faults.
-        * Thus, we need a priority aging mechanism instead. The requirements
-        * of priority aging are
-        *  1) An aging interval is reasonable enough long. Too short aging
-        *     interval makes quick swap token lost and decrease performance.
-        *  2) The swap token owner task have to get priority aging even if
-        *     it's under sleep.
-        */
-       if ((global_faults - last_aging) > TOKEN_AGING_INTERVAL) {
-               swap_token_mm->token_priority /= 2;
-               last_aging = global_faults;
-       }
-
-       if (mm == swap_token_mm) {
-               mm->token_priority += 2;
-               goto update_priority;
-       }
-
-       if (current_interval < mm->last_interval)
-               mm->token_priority++;
-       else {
-               if (likely(mm->token_priority > 0))
-                       mm->token_priority--;
-       }
-
-       /* Check if we deserve the token */
-       if (mm->token_priority > swap_token_mm->token_priority)
-               goto replace_token;
-
-update_priority:
-       trace_update_swap_token_priority(mm, old_prio, swap_token_mm);
-
-out:
-       mm->faultstamp = global_faults;
-       mm->last_interval = current_interval;
-       spin_unlock(&swap_token_lock);
-       return;
-
-replace_token:
-       mm->token_priority += 2;
-       trace_replace_swap_token(swap_token_mm, mm);
-       swap_token_mm = mm;
-       swap_token_memcg = swap_token_memcg_from_mm(mm);
-       last_aging = global_faults;
-       goto out;
-}
-
-/* Called on process exit. */
-void __put_swap_token(struct mm_struct *mm)
-{
-       spin_lock(&swap_token_lock);
-       if (likely(mm == swap_token_mm)) {
-               trace_put_swap_token(swap_token_mm);
-               swap_token_mm = NULL;
-               swap_token_memcg = NULL;
-       }
-       spin_unlock(&swap_token_lock);
-}
-
-static bool match_memcg(struct mem_cgroup *a, struct mem_cgroup *b)
-{
-       if (!a)
-               return true;
-       if (!b)
-               return true;
-       if (a == b)
-               return true;
-       return false;
-}
-
-void disable_swap_token(struct mem_cgroup *memcg)
-{
-       /* memcg reclaim don't disable unrelated mm token. */
-       if (match_memcg(memcg, swap_token_memcg)) {
-               spin_lock(&swap_token_lock);
-               if (match_memcg(memcg, swap_token_memcg)) {
-                       trace_disable_swap_token(swap_token_mm);
-                       swap_token_mm = NULL;
-                       swap_token_memcg = NULL;
-               }
-               spin_unlock(&swap_token_lock);
-       }
-}
index 61a183b89df6d15c358e7afc2b418411abe5e728..75801acdaac77449a15750c4b11b5e5f3739cb75 100644 (file)
@@ -602,31 +602,6 @@ int vmtruncate(struct inode *inode, loff_t newsize)
 }
 EXPORT_SYMBOL(vmtruncate);
 
-int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend)
-{
-       struct address_space *mapping = inode->i_mapping;
-       loff_t holebegin = round_up(lstart, PAGE_SIZE);
-       loff_t holelen = 1 + lend - holebegin;
-
-       /*
-        * If the underlying filesystem is not going to provide
-        * a way to truncate a range of blocks (punch a hole) -
-        * we should return failure right now.
-        */
-       if (!inode->i_op->truncate_range)
-               return -ENOSYS;
-
-       mutex_lock(&inode->i_mutex);
-       inode_dio_wait(inode);
-       unmap_mapping_range(mapping, holebegin, holelen, 1);
-       inode->i_op->truncate_range(inode, lstart, lend);
-       /* unmap again to remove racily COWed private pages */
-       unmap_mapping_range(mapping, holebegin, holelen, 1);
-       mutex_unlock(&inode->i_mutex);
-
-       return 0;
-}
-
 /**
  * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
  * @inode: inode
index ae962b31de888a55990769aae948bac3ef0db338..8c7265afa29f2109b884907daa050b79f0b25f8b 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -4,6 +4,7 @@
 #include <linux/export.h>
 #include <linux/err.h>
 #include <linux/sched.h>
+#include <linux/security.h>
 #include <asm/uaccess.h>
 
 #include "internal.h"
@@ -341,6 +342,35 @@ int __attribute__((weak)) get_user_pages_fast(unsigned long start,
 }
 EXPORT_SYMBOL_GPL(get_user_pages_fast);
 
+unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
+       unsigned long len, unsigned long prot,
+       unsigned long flag, unsigned long pgoff)
+{
+       unsigned long ret;
+       struct mm_struct *mm = current->mm;
+
+       ret = security_mmap_file(file, prot, flag);
+       if (!ret) {
+               down_write(&mm->mmap_sem);
+               ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff);
+               up_write(&mm->mmap_sem);
+       }
+       return ret;
+}
+
+unsigned long vm_mmap(struct file *file, unsigned long addr,
+       unsigned long len, unsigned long prot,
+       unsigned long flag, unsigned long offset)
+{
+       if (unlikely(offset + PAGE_ALIGN(len) < offset))
+               return -EINVAL;
+       if (unlikely(offset & ~PAGE_MASK))
+               return -EINVAL;
+
+       return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
+}
+EXPORT_SYMBOL(vm_mmap);
+
 /* Tracepoints definitions. */
 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
index 94dff883b449e8c1ed3aba28c1b7fd98f41c08db..2aad49981b5740ba496f30dad6eb5d5a0d862edf 100644 (file)
@@ -1185,9 +1185,10 @@ void __init vmalloc_init(void)
        /* Import existing vmlist entries. */
        for (tmp = vmlist; tmp; tmp = tmp->next) {
                va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
-               va->flags = tmp->flags | VM_VM_AREA;
+               va->flags = VM_VM_AREA;
                va->va_start = (unsigned long)tmp->addr;
                va->va_end = va->va_start + tmp->size;
+               va->vm = tmp;
                __insert_vmap_area(va);
        }
 
@@ -2375,8 +2376,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
                return NULL;
        }
 
-       vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL);
-       vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL);
+       vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
+       vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
        if (!vas || !vms)
                goto err_free2;
 
index 33dc256033b5020c3679a4578af854c640fc826a..eeb3bc9d1d361b6f20821073485f1b8e7c4931d3 100644 (file)
 #define CREATE_TRACE_POINTS
 #include <trace/events/vmscan.h>
 
-/*
- * reclaim_mode determines how the inactive list is shrunk
- * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages
- * RECLAIM_MODE_ASYNC:  Do not block
- * RECLAIM_MODE_SYNC:   Allow blocking e.g. call wait_on_page_writeback
- * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference
- *                     page from the LRU and reclaim all pages within a
- *                     naturally aligned range
- * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number of
- *                     order-0 pages and then compact the zone
- */
-typedef unsigned __bitwise__ reclaim_mode_t;
-#define RECLAIM_MODE_SINGLE            ((__force reclaim_mode_t)0x01u)
-#define RECLAIM_MODE_ASYNC             ((__force reclaim_mode_t)0x02u)
-#define RECLAIM_MODE_SYNC              ((__force reclaim_mode_t)0x04u)
-#define RECLAIM_MODE_LUMPYRECLAIM      ((__force reclaim_mode_t)0x08u)
-#define RECLAIM_MODE_COMPACTION                ((__force reclaim_mode_t)0x10u)
-
 struct scan_control {
        /* Incremented by the number of inactive pages that were scanned */
        unsigned long nr_scanned;
@@ -96,11 +78,8 @@ struct scan_control {
 
        int order;
 
-       /*
-        * Intend to reclaim enough continuous memory rather than reclaim
-        * enough amount of memory. i.e, mode for high order allocation.
-        */
-       reclaim_mode_t reclaim_mode;
+       /* Scan (total_size >> priority) pages at once */
+       int priority;
 
        /*
         * The memory cgroup that hit its limit and as a result is the
@@ -115,11 +94,6 @@ struct scan_control {
        nodemask_t      *nodemask;
 };
 
-struct mem_cgroup_zone {
-       struct mem_cgroup *mem_cgroup;
-       struct zone *zone;
-};
-
 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
 
 #ifdef ARCH_HAS_PREFETCH
@@ -164,44 +138,21 @@ static bool global_reclaim(struct scan_control *sc)
 {
        return !sc->target_mem_cgroup;
 }
-
-static bool scanning_global_lru(struct mem_cgroup_zone *mz)
-{
-       return !mz->mem_cgroup;
-}
 #else
 static bool global_reclaim(struct scan_control *sc)
 {
        return true;
 }
-
-static bool scanning_global_lru(struct mem_cgroup_zone *mz)
-{
-       return true;
-}
 #endif
 
-static struct zone_reclaim_stat *get_reclaim_stat(struct mem_cgroup_zone *mz)
-{
-       if (!scanning_global_lru(mz))
-               return mem_cgroup_get_reclaim_stat(mz->mem_cgroup, mz->zone);
-
-       return &mz->zone->reclaim_stat;
-}
-
-static unsigned long zone_nr_lru_pages(struct mem_cgroup_zone *mz,
-                                      enum lru_list lru)
+static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
 {
-       if (!scanning_global_lru(mz))
-               return mem_cgroup_zone_nr_lru_pages(mz->mem_cgroup,
-                                                   zone_to_nid(mz->zone),
-                                                   zone_idx(mz->zone),
-                                                   BIT(lru));
+       if (!mem_cgroup_disabled())
+               return mem_cgroup_get_lru_size(lruvec, lru);
 
-       return zone_page_state(mz->zone, NR_LRU_BASE + lru);
+       return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru);
 }
 
-
 /*
  * Add a shrinker callback to be called from the vm
  */
@@ -364,39 +315,6 @@ out:
        return ret;
 }
 
-static void set_reclaim_mode(int priority, struct scan_control *sc,
-                                  bool sync)
-{
-       reclaim_mode_t syncmode = sync ? RECLAIM_MODE_SYNC : RECLAIM_MODE_ASYNC;
-
-       /*
-        * Initially assume we are entering either lumpy reclaim or
-        * reclaim/compaction.Depending on the order, we will either set the
-        * sync mode or just reclaim order-0 pages later.
-        */
-       if (COMPACTION_BUILD)
-               sc->reclaim_mode = RECLAIM_MODE_COMPACTION;
-       else
-               sc->reclaim_mode = RECLAIM_MODE_LUMPYRECLAIM;
-
-       /*
-        * Avoid using lumpy reclaim or reclaim/compaction if possible by
-        * restricting when its set to either costly allocations or when
-        * under memory pressure
-        */
-       if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
-               sc->reclaim_mode |= syncmode;
-       else if (sc->order && priority < DEF_PRIORITY - 2)
-               sc->reclaim_mode |= syncmode;
-       else
-               sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
-}
-
-static void reset_reclaim_mode(struct scan_control *sc)
-{
-       sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
-}
-
 static inline int is_page_cache_freeable(struct page *page)
 {
        /*
@@ -416,10 +334,6 @@ static int may_write_to_queue(struct backing_dev_info *bdi,
                return 1;
        if (bdi == current->backing_dev_info)
                return 1;
-
-       /* lumpy reclaim for hugepage often need a lot of write */
-       if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
-               return 1;
        return 0;
 }
 
@@ -523,8 +437,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
                        /* synchronous write or broken a_ops? */
                        ClearPageReclaim(page);
                }
-               trace_mm_vmscan_writepage(page,
-                       trace_reclaim_flags(page, sc->reclaim_mode));
+               trace_mm_vmscan_writepage(page, trace_reclaim_flags(page));
                inc_zone_page_state(page, NR_VMSCAN_WRITE);
                return PAGE_SUCCESS;
        }
@@ -701,19 +614,15 @@ enum page_references {
 };
 
 static enum page_references page_check_references(struct page *page,
-                                                 struct mem_cgroup_zone *mz,
                                                  struct scan_control *sc)
 {
        int referenced_ptes, referenced_page;
        unsigned long vm_flags;
 
-       referenced_ptes = page_referenced(page, 1, mz->mem_cgroup, &vm_flags);
+       referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
+                                         &vm_flags);
        referenced_page = TestClearPageReferenced(page);
 
-       /* Lumpy reclaim - ignore references */
-       if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
-               return PAGEREF_RECLAIM;
-
        /*
         * Mlock lost the isolation race with us.  Let try_to_unmap()
         * move the page to the unevictable list.
@@ -722,7 +631,7 @@ static enum page_references page_check_references(struct page *page,
                return PAGEREF_RECLAIM;
 
        if (referenced_ptes) {
-               if (PageAnon(page))
+               if (PageSwapBacked(page))
                        return PAGEREF_ACTIVATE;
                /*
                 * All mapped pages start out with page table
@@ -763,9 +672,8 @@ static enum page_references page_check_references(struct page *page,
  * shrink_page_list() returns the number of reclaimed pages
  */
 static unsigned long shrink_page_list(struct list_head *page_list,
-                                     struct mem_cgroup_zone *mz,
+                                     struct zone *zone,
                                      struct scan_control *sc,
-                                     int priority,
                                      unsigned long *ret_nr_dirty,
                                      unsigned long *ret_nr_writeback)
 {
@@ -794,7 +702,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                        goto keep;
 
                VM_BUG_ON(PageActive(page));
-               VM_BUG_ON(page_zone(page) != mz->zone);
+               VM_BUG_ON(page_zone(page) != zone);
 
                sc->nr_scanned++;
 
@@ -813,22 +721,11 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 
                if (PageWriteback(page)) {
                        nr_writeback++;
-                       /*
-                        * Synchronous reclaim cannot queue pages for
-                        * writeback due to the possibility of stack overflow
-                        * but if it encounters a page under writeback, wait
-                        * for the IO to complete.
-                        */
-                       if ((sc->reclaim_mode & RECLAIM_MODE_SYNC) &&
-                           may_enter_fs)
-                               wait_on_page_writeback(page);
-                       else {
-                               unlock_page(page);
-                               goto keep_lumpy;
-                       }
+                       unlock_page(page);
+                       goto keep;
                }
 
-               references = page_check_references(page, mz, sc);
+               references = page_check_references(page, sc);
                switch (references) {
                case PAGEREF_ACTIVATE:
                        goto activate_locked;
@@ -879,7 +776,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                         * unless under significant pressure.
                         */
                        if (page_is_file_cache(page) &&
-                                       (!current_is_kswapd() || priority >= DEF_PRIORITY - 2)) {
+                                       (!current_is_kswapd() ||
+                                        sc->priority >= DEF_PRIORITY - 2)) {
                                /*
                                 * Immediately reclaim when written back.
                                 * Similar in principal to deactivate_page()
@@ -908,7 +806,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                                goto activate_locked;
                        case PAGE_SUCCESS:
                                if (PageWriteback(page))
-                                       goto keep_lumpy;
+                                       goto keep;
                                if (PageDirty(page))
                                        goto keep;
 
@@ -994,7 +892,6 @@ cull_mlocked:
                        try_to_free_swap(page);
                unlock_page(page);
                putback_lru_page(page);
-               reset_reclaim_mode(sc);
                continue;
 
 activate_locked:
@@ -1007,8 +904,6 @@ activate_locked:
 keep_locked:
                unlock_page(page);
 keep:
-               reset_reclaim_mode(sc);
-keep_lumpy:
                list_add(&page->lru, &ret_pages);
                VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
        }
@@ -1020,7 +915,7 @@ keep_lumpy:
         * will encounter the same problem
         */
        if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc))
-               zone_set_flag(mz->zone, ZONE_CONGESTED);
+               zone_set_flag(zone, ZONE_CONGESTED);
 
        free_hot_cold_page_list(&free_pages, 1);
 
@@ -1041,34 +936,15 @@ keep_lumpy:
  *
  * returns 0 on success, -ve errno on failure.
  */
-int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
+int __isolate_lru_page(struct page *page, isolate_mode_t mode)
 {
-       bool all_lru_mode;
        int ret = -EINVAL;
 
        /* Only take pages on the LRU. */
        if (!PageLRU(page))
                return ret;
 
-       all_lru_mode = (mode & (ISOLATE_ACTIVE|ISOLATE_INACTIVE)) ==
-               (ISOLATE_ACTIVE|ISOLATE_INACTIVE);
-
-       /*
-        * When checking the active state, we need to be sure we are
-        * dealing with comparible boolean values.  Take the logical not
-        * of each.
-        */
-       if (!all_lru_mode && !PageActive(page) != !(mode & ISOLATE_ACTIVE))
-               return ret;
-
-       if (!all_lru_mode && !!page_is_file_cache(page) != file)
-               return ret;
-
-       /*
-        * When this function is being called for lumpy reclaim, we
-        * initially look into all LRU pages, active, inactive and
-        * unevictable; only give shrink_page_list evictable pages.
-        */
+       /* Do not give back unevictable pages for compaction */
        if (PageUnevictable(page))
                return ret;
 
@@ -1135,54 +1011,39 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
  * Appropriate locks must be held before calling this function.
  *
  * @nr_to_scan:        The number of pages to look through on the list.
- * @mz:                The mem_cgroup_zone to pull pages from.
+ * @lruvec:    The LRU vector to pull pages from.
  * @dst:       The temp list to put pages on to.
  * @nr_scanned:        The number of pages that were scanned.
  * @sc:                The scan_control struct for this reclaim session
  * @mode:      One of the LRU isolation modes
- * @active:    True [1] if isolating active pages
- * @file:      True [1] if isolating file [!anon] pages
+ * @lru:       LRU list id for isolating
  *
  * returns how many pages were moved onto *@dst.
  */
 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
-               struct mem_cgroup_zone *mz, struct list_head *dst,
+               struct lruvec *lruvec, struct list_head *dst,
                unsigned long *nr_scanned, struct scan_control *sc,
-               isolate_mode_t mode, int active, int file)
+               isolate_mode_t mode, enum lru_list lru)
 {
-       struct lruvec *lruvec;
-       struct list_head *src;
+       struct list_head *src = &lruvec->lists[lru];
        unsigned long nr_taken = 0;
-       unsigned long nr_lumpy_taken = 0;
-       unsigned long nr_lumpy_dirty = 0;
-       unsigned long nr_lumpy_failed = 0;
        unsigned long scan;
-       int lru = LRU_BASE;
-
-       lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup);
-       if (active)
-               lru += LRU_ACTIVE;
-       if (file)
-               lru += LRU_FILE;
-       src = &lruvec->lists[lru];
 
        for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
                struct page *page;
-               unsigned long pfn;
-               unsigned long end_pfn;
-               unsigned long page_pfn;
-               int zone_id;
+               int nr_pages;
 
                page = lru_to_page(src);
                prefetchw_prev_lru_page(page, src, flags);
 
                VM_BUG_ON(!PageLRU(page));
 
-               switch (__isolate_lru_page(page, mode, file)) {
+               switch (__isolate_lru_page(page, mode)) {
                case 0:
-                       mem_cgroup_lru_del(page);
+                       nr_pages = hpage_nr_pages(page);
+                       mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
                        list_move(&page->lru, dst);
-                       nr_taken += hpage_nr_pages(page);
+                       nr_taken += nr_pages;
                        break;
 
                case -EBUSY:
@@ -1193,93 +1054,11 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                default:
                        BUG();
                }
-
-               if (!sc->order || !(sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM))
-                       continue;
-
-               /*
-                * Attempt to take all pages in the order aligned region
-                * surrounding the tag page.  Only take those pages of
-                * the same active state as that tag page.  We may safely
-                * round the target page pfn down to the requested order
-                * as the mem_map is guaranteed valid out to MAX_ORDER,
-                * where that page is in a different zone we will detect
-                * it from its zone id and abort this block scan.
-                */
-               zone_id = page_zone_id(page);
-               page_pfn = page_to_pfn(page);
-               pfn = page_pfn & ~((1 << sc->order) - 1);
-               end_pfn = pfn + (1 << sc->order);
-               for (; pfn < end_pfn; pfn++) {
-                       struct page *cursor_page;
-
-                       /* The target page is in the block, ignore it. */
-                       if (unlikely(pfn == page_pfn))
-                               continue;
-
-                       /* Avoid holes within the zone. */
-                       if (unlikely(!pfn_valid_within(pfn)))
-                               break;
-
-                       cursor_page = pfn_to_page(pfn);
-
-                       /* Check that we have not crossed a zone boundary. */
-                       if (unlikely(page_zone_id(cursor_page) != zone_id))
-                               break;
-
-                       /*
-                        * If we don't have enough swap space, reclaiming of
-                        * anon page which don't already have a swap slot is
-                        * pointless.
-                        */
-                       if (nr_swap_pages <= 0 && PageSwapBacked(cursor_page) &&
-                           !PageSwapCache(cursor_page))
-                               break;
-
-                       if (__isolate_lru_page(cursor_page, mode, file) == 0) {
-                               unsigned int isolated_pages;
-
-                               mem_cgroup_lru_del(cursor_page);
-                               list_move(&cursor_page->lru, dst);
-                               isolated_pages = hpage_nr_pages(cursor_page);
-                               nr_taken += isolated_pages;
-                               nr_lumpy_taken += isolated_pages;
-                               if (PageDirty(cursor_page))
-                                       nr_lumpy_dirty += isolated_pages;
-                               scan++;
-                               pfn += isolated_pages - 1;
-                       } else {
-                               /*
-                                * Check if the page is freed already.
-                                *
-                                * We can't use page_count() as that
-                                * requires compound_head and we don't
-                                * have a pin on the page here. If a
-                                * page is tail, we may or may not
-                                * have isolated the head, so assume
-                                * it's not free, it'd be tricky to
-                                * track the head status without a
-                                * page pin.
-                                */
-                               if (!PageTail(cursor_page) &&
-                                   !atomic_read(&cursor_page->_count))
-                                       continue;
-                               break;
-                       }
-               }
-
-               /* If we break out of the loop above, lumpy reclaim failed */
-               if (pfn < end_pfn)
-                       nr_lumpy_failed++;
        }
 
        *nr_scanned = scan;
-
-       trace_mm_vmscan_lru_isolate(sc->order,
-                       nr_to_scan, scan,
-                       nr_taken,
-                       nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed,
-                       mode, file);
+       trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan,
+                                   nr_taken, mode, is_file_lru(lru));
        return nr_taken;
 }
 
@@ -1316,15 +1095,16 @@ int isolate_lru_page(struct page *page)
 
        if (PageLRU(page)) {
                struct zone *zone = page_zone(page);
+               struct lruvec *lruvec;
 
                spin_lock_irq(&zone->lru_lock);
+               lruvec = mem_cgroup_page_lruvec(page, zone);
                if (PageLRU(page)) {
                        int lru = page_lru(page);
-                       ret = 0;
                        get_page(page);
                        ClearPageLRU(page);
-
-                       del_page_from_lru_list(zone, page, lru);
+                       del_page_from_lru_list(page, lruvec, lru);
+                       ret = 0;
                }
                spin_unlock_irq(&zone->lru_lock);
        }
@@ -1357,11 +1137,10 @@ static int too_many_isolated(struct zone *zone, int file,
 }
 
 static noinline_for_stack void
-putback_inactive_pages(struct mem_cgroup_zone *mz,
-                      struct list_head *page_list)
+putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
 {
-       struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
-       struct zone *zone = mz->zone;
+       struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
+       struct zone *zone = lruvec_zone(lruvec);
        LIST_HEAD(pages_to_free);
 
        /*
@@ -1379,9 +1158,13 @@ putback_inactive_pages(struct mem_cgroup_zone *mz,
                        spin_lock_irq(&zone->lru_lock);
                        continue;
                }
+
+               lruvec = mem_cgroup_page_lruvec(page, zone);
+
                SetPageLRU(page);
                lru = page_lru(page);
-               add_page_to_lru_list(zone, page, lru);
+               add_page_to_lru_list(page, lruvec, lru);
+
                if (is_active_lru(lru)) {
                        int file = is_file_lru(lru);
                        int numpages = hpage_nr_pages(page);
@@ -1390,7 +1173,7 @@ putback_inactive_pages(struct mem_cgroup_zone *mz,
                if (put_page_testzero(page)) {
                        __ClearPageLRU(page);
                        __ClearPageActive(page);
-                       del_page_from_lru_list(zone, page, lru);
+                       del_page_from_lru_list(page, lruvec, lru);
 
                        if (unlikely(PageCompound(page))) {
                                spin_unlock_irq(&zone->lru_lock);
@@ -1407,112 +1190,24 @@ putback_inactive_pages(struct mem_cgroup_zone *mz,
        list_splice(&pages_to_free, page_list);
 }
 
-static noinline_for_stack void
-update_isolated_counts(struct mem_cgroup_zone *mz,
-                      struct list_head *page_list,
-                      unsigned long *nr_anon,
-                      unsigned long *nr_file)
-{
-       struct zone *zone = mz->zone;
-       unsigned int count[NR_LRU_LISTS] = { 0, };
-       unsigned long nr_active = 0;
-       struct page *page;
-       int lru;
-
-       /*
-        * Count pages and clear active flags
-        */
-       list_for_each_entry(page, page_list, lru) {
-               int numpages = hpage_nr_pages(page);
-               lru = page_lru_base_type(page);
-               if (PageActive(page)) {
-                       lru += LRU_ACTIVE;
-                       ClearPageActive(page);
-                       nr_active += numpages;
-               }
-               count[lru] += numpages;
-       }
-
-       preempt_disable();
-       __count_vm_events(PGDEACTIVATE, nr_active);
-
-       __mod_zone_page_state(zone, NR_ACTIVE_FILE,
-                             -count[LRU_ACTIVE_FILE]);
-       __mod_zone_page_state(zone, NR_INACTIVE_FILE,
-                             -count[LRU_INACTIVE_FILE]);
-       __mod_zone_page_state(zone, NR_ACTIVE_ANON,
-                             -count[LRU_ACTIVE_ANON]);
-       __mod_zone_page_state(zone, NR_INACTIVE_ANON,
-                             -count[LRU_INACTIVE_ANON]);
-
-       *nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
-       *nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
-
-       __mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon);
-       __mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file);
-       preempt_enable();
-}
-
-/*
- * Returns true if a direct reclaim should wait on pages under writeback.
- *
- * If we are direct reclaiming for contiguous pages and we do not reclaim
- * everything in the list, try again and wait for writeback IO to complete.
- * This will stall high-order allocations noticeably. Only do that when really
- * need to free the pages under high memory pressure.
- */
-static inline bool should_reclaim_stall(unsigned long nr_taken,
-                                       unsigned long nr_freed,
-                                       int priority,
-                                       struct scan_control *sc)
-{
-       int lumpy_stall_priority;
-
-       /* kswapd should not stall on sync IO */
-       if (current_is_kswapd())
-               return false;
-
-       /* Only stall on lumpy reclaim */
-       if (sc->reclaim_mode & RECLAIM_MODE_SINGLE)
-               return false;
-
-       /* If we have reclaimed everything on the isolated list, no stall */
-       if (nr_freed == nr_taken)
-               return false;
-
-       /*
-        * For high-order allocations, there are two stall thresholds.
-        * High-cost allocations stall immediately where as lower
-        * order allocations such as stacks require the scanning
-        * priority to be much higher before stalling.
-        */
-       if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
-               lumpy_stall_priority = DEF_PRIORITY;
-       else
-               lumpy_stall_priority = DEF_PRIORITY / 3;
-
-       return priority <= lumpy_stall_priority;
-}
-
 /*
  * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
  * of reclaimed pages
  */
 static noinline_for_stack unsigned long
-shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
-                    struct scan_control *sc, int priority, int file)
+shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
+                    struct scan_control *sc, enum lru_list lru)
 {
        LIST_HEAD(page_list);
        unsigned long nr_scanned;
        unsigned long nr_reclaimed = 0;
        unsigned long nr_taken;
-       unsigned long nr_anon;
-       unsigned long nr_file;
        unsigned long nr_dirty = 0;
        unsigned long nr_writeback = 0;
-       isolate_mode_t isolate_mode = ISOLATE_INACTIVE;
-       struct zone *zone = mz->zone;
-       struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
+       isolate_mode_t isolate_mode = 0;
+       int file = is_file_lru(lru);
+       struct zone *zone = lruvec_zone(lruvec);
+       struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
 
        while (unlikely(too_many_isolated(zone, file, sc))) {
                congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -1522,10 +1217,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
                        return SWAP_CLUSTER_MAX;
        }
 
-       set_reclaim_mode(priority, sc, false);
-       if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
-               isolate_mode |= ISOLATE_ACTIVE;
-
        lru_add_drain();
 
        if (!sc->may_unmap)
@@ -1535,38 +1226,30 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
 
        spin_lock_irq(&zone->lru_lock);
 
-       nr_taken = isolate_lru_pages(nr_to_scan, mz, &page_list, &nr_scanned,
-                                    sc, isolate_mode, 0, file);
+       nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
+                                    &nr_scanned, sc, isolate_mode, lru);
+
+       __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
+       __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
+
        if (global_reclaim(sc)) {
                zone->pages_scanned += nr_scanned;
                if (current_is_kswapd())
-                       __count_zone_vm_events(PGSCAN_KSWAPD, zone,
-                                              nr_scanned);
+                       __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned);
                else
-                       __count_zone_vm_events(PGSCAN_DIRECT, zone,
-                                              nr_scanned);
+                       __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned);
        }
        spin_unlock_irq(&zone->lru_lock);
 
        if (nr_taken == 0)
                return 0;
 
-       update_isolated_counts(mz, &page_list, &nr_anon, &nr_file);
-
-       nr_reclaimed = shrink_page_list(&page_list, mz, sc, priority,
+       nr_reclaimed = shrink_page_list(&page_list, zone, sc,
                                                &nr_dirty, &nr_writeback);
 
-       /* Check if we should syncronously wait for writeback */
-       if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
-               set_reclaim_mode(priority, sc, true);
-               nr_reclaimed += shrink_page_list(&page_list, mz, sc,
-                                       priority, &nr_dirty, &nr_writeback);
-       }
-
        spin_lock_irq(&zone->lru_lock);
 
-       reclaim_stat->recent_scanned[0] += nr_anon;
-       reclaim_stat->recent_scanned[1] += nr_file;
+       reclaim_stat->recent_scanned[file] += nr_taken;
 
        if (global_reclaim(sc)) {
                if (current_is_kswapd())
@@ -1577,10 +1260,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
                                               nr_reclaimed);
        }
 
-       putback_inactive_pages(mz, &page_list);
+       putback_inactive_pages(lruvec, &page_list);
 
-       __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
-       __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
+       __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
 
        spin_unlock_irq(&zone->lru_lock);
 
@@ -1609,14 +1291,15 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
         * DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any
         *                     isolated page is PageWriteback
         */
-       if (nr_writeback && nr_writeback >= (nr_taken >> (DEF_PRIORITY-priority)))
+       if (nr_writeback && nr_writeback >=
+                       (nr_taken >> (DEF_PRIORITY - sc->priority)))
                wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
 
        trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
                zone_idx(zone),
                nr_scanned, nr_reclaimed,
-               priority,
-               trace_shrink_flags(file, sc->reclaim_mode));
+               sc->priority,
+               trace_shrink_flags(file));
        return nr_reclaimed;
 }
 
@@ -1638,30 +1321,32 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
  * But we had to alter page->flags anyway.
  */
 
-static void move_active_pages_to_lru(struct zone *zone,
+static void move_active_pages_to_lru(struct lruvec *lruvec,
                                     struct list_head *list,
                                     struct list_head *pages_to_free,
                                     enum lru_list lru)
 {
+       struct zone *zone = lruvec_zone(lruvec);
        unsigned long pgmoved = 0;
        struct page *page;
+       int nr_pages;
 
        while (!list_empty(list)) {
-               struct lruvec *lruvec;
-
                page = lru_to_page(list);
+               lruvec = mem_cgroup_page_lruvec(page, zone);
 
                VM_BUG_ON(PageLRU(page));
                SetPageLRU(page);
 
-               lruvec = mem_cgroup_lru_add_list(zone, page, lru);
+               nr_pages = hpage_nr_pages(page);
+               mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
                list_move(&page->lru, &lruvec->lists[lru]);
-               pgmoved += hpage_nr_pages(page);
+               pgmoved += nr_pages;
 
                if (put_page_testzero(page)) {
                        __ClearPageLRU(page);
                        __ClearPageActive(page);
-                       del_page_from_lru_list(zone, page, lru);
+                       del_page_from_lru_list(page, lruvec, lru);
 
                        if (unlikely(PageCompound(page))) {
                                spin_unlock_irq(&zone->lru_lock);
@@ -1677,9 +1362,9 @@ static void move_active_pages_to_lru(struct zone *zone,
 }
 
 static void shrink_active_list(unsigned long nr_to_scan,
-                              struct mem_cgroup_zone *mz,
+                              struct lruvec *lruvec,
                               struct scan_control *sc,
-                              int priority, int file)
+                              enum lru_list lru)
 {
        unsigned long nr_taken;
        unsigned long nr_scanned;
@@ -1688,15 +1373,14 @@ static void shrink_active_list(unsigned long nr_to_scan,
        LIST_HEAD(l_active);
        LIST_HEAD(l_inactive);
        struct page *page;
-       struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
+       struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
        unsigned long nr_rotated = 0;
-       isolate_mode_t isolate_mode = ISOLATE_ACTIVE;
-       struct zone *zone = mz->zone;
+       isolate_mode_t isolate_mode = 0;
+       int file = is_file_lru(lru);
+       struct zone *zone = lruvec_zone(lruvec);
 
        lru_add_drain();
 
-       reset_reclaim_mode(sc);
-
        if (!sc->may_unmap)
                isolate_mode |= ISOLATE_UNMAPPED;
        if (!sc->may_writepage)
@@ -1704,18 +1388,15 @@ static void shrink_active_list(unsigned long nr_to_scan,
 
        spin_lock_irq(&zone->lru_lock);
 
-       nr_taken = isolate_lru_pages(nr_to_scan, mz, &l_hold, &nr_scanned, sc,
-                                    isolate_mode, 1, file);
+       nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
+                                    &nr_scanned, sc, isolate_mode, lru);
        if (global_reclaim(sc))
                zone->pages_scanned += nr_scanned;
 
        reclaim_stat->recent_scanned[file] += nr_taken;
 
        __count_zone_vm_events(PGREFILL, zone, nr_scanned);
-       if (file)
-               __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken);
-       else
-               __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken);
+       __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
        __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
        spin_unlock_irq(&zone->lru_lock);
 
@@ -1737,7 +1418,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
                        }
                }
 
-               if (page_referenced(page, 0, mz->mem_cgroup, &vm_flags)) {
+               if (page_referenced(page, 0, sc->target_mem_cgroup,
+                                   &vm_flags)) {
                        nr_rotated += hpage_nr_pages(page);
                        /*
                         * Identify referenced, file-backed active pages and
@@ -1770,10 +1452,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
         */
        reclaim_stat->recent_rotated[file] += nr_rotated;
 
-       move_active_pages_to_lru(zone, &l_active, &l_hold,
-                                               LRU_ACTIVE + file * LRU_FILE);
-       move_active_pages_to_lru(zone, &l_inactive, &l_hold,
-                                               LRU_BASE   + file * LRU_FILE);
+       move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
+       move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
        __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
        spin_unlock_irq(&zone->lru_lock);
 
@@ -1796,13 +1476,12 @@ static int inactive_anon_is_low_global(struct zone *zone)
 
 /**
  * inactive_anon_is_low - check if anonymous pages need to be deactivated
- * @zone: zone to check
- * @sc:   scan control of this context
+ * @lruvec: LRU vector to check
  *
  * Returns true if the zone does not have enough inactive anon pages,
  * meaning some active anon pages need to be deactivated.
  */
-static int inactive_anon_is_low(struct mem_cgroup_zone *mz)
+static int inactive_anon_is_low(struct lruvec *lruvec)
 {
        /*
         * If we don't have swap space, anonymous page deactivation
@@ -1811,14 +1490,13 @@ static int inactive_anon_is_low(struct mem_cgroup_zone *mz)
        if (!total_swap_pages)
                return 0;
 
-       if (!scanning_global_lru(mz))
-               return mem_cgroup_inactive_anon_is_low(mz->mem_cgroup,
-                                                      mz->zone);
+       if (!mem_cgroup_disabled())
+               return mem_cgroup_inactive_anon_is_low(lruvec);
 
-       return inactive_anon_is_low_global(mz->zone);
+       return inactive_anon_is_low_global(lruvec_zone(lruvec));
 }
 #else
-static inline int inactive_anon_is_low(struct mem_cgroup_zone *mz)
+static inline int inactive_anon_is_low(struct lruvec *lruvec)
 {
        return 0;
 }
@@ -1836,7 +1514,7 @@ static int inactive_file_is_low_global(struct zone *zone)
 
 /**
  * inactive_file_is_low - check if file pages need to be deactivated
- * @mz: memory cgroup and zone to check
+ * @lruvec: LRU vector to check
  *
  * When the system is doing streaming IO, memory pressure here
  * ensures that active file pages get deactivated, until more
@@ -1848,44 +1526,39 @@ static int inactive_file_is_low_global(struct zone *zone)
  * This uses a different ratio than the anonymous pages, because
  * the page cache uses a use-once replacement algorithm.
  */
-static int inactive_file_is_low(struct mem_cgroup_zone *mz)
+static int inactive_file_is_low(struct lruvec *lruvec)
 {
-       if (!scanning_global_lru(mz))
-               return mem_cgroup_inactive_file_is_low(mz->mem_cgroup,
-                                                      mz->zone);
+       if (!mem_cgroup_disabled())
+               return mem_cgroup_inactive_file_is_low(lruvec);
 
-       return inactive_file_is_low_global(mz->zone);
+       return inactive_file_is_low_global(lruvec_zone(lruvec));
 }
 
-static int inactive_list_is_low(struct mem_cgroup_zone *mz, int file)
+static int inactive_list_is_low(struct lruvec *lruvec, enum lru_list lru)
 {
-       if (file)
-               return inactive_file_is_low(mz);
+       if (is_file_lru(lru))
+               return inactive_file_is_low(lruvec);
        else
-               return inactive_anon_is_low(mz);
+               return inactive_anon_is_low(lruvec);
 }
 
 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
-                                struct mem_cgroup_zone *mz,
-                                struct scan_control *sc, int priority)
+                                struct lruvec *lruvec, struct scan_control *sc)
 {
-       int file = is_file_lru(lru);
-
        if (is_active_lru(lru)) {
-               if (inactive_list_is_low(mz, file))
-                       shrink_active_list(nr_to_scan, mz, sc, priority, file);
+               if (inactive_list_is_low(lruvec, lru))
+                       shrink_active_list(nr_to_scan, lruvec, sc, lru);
                return 0;
        }
 
-       return shrink_inactive_list(nr_to_scan, mz, sc, priority, file);
+       return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
 }
 
-static int vmscan_swappiness(struct mem_cgroup_zone *mz,
-                            struct scan_control *sc)
+static int vmscan_swappiness(struct scan_control *sc)
 {
        if (global_reclaim(sc))
                return vm_swappiness;
-       return mem_cgroup_swappiness(mz->mem_cgroup);
+       return mem_cgroup_swappiness(sc->target_mem_cgroup);
 }
 
 /*
@@ -1896,17 +1569,18 @@ static int vmscan_swappiness(struct mem_cgroup_zone *mz,
  *
  * nr[0] = anon pages to scan; nr[1] = file pages to scan
  */
-static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
-                          unsigned long *nr, int priority)
+static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
+                          unsigned long *nr)
 {
        unsigned long anon, file, free;
        unsigned long anon_prio, file_prio;
        unsigned long ap, fp;
-       struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
+       struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
        u64 fraction[2], denominator;
        enum lru_list lru;
        int noswap = 0;
        bool force_scan = false;
+       struct zone *zone = lruvec_zone(lruvec);
 
        /*
         * If the zone or memcg is small, nr[l] can be 0.  This
@@ -1918,7 +1592,7 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
         * latencies, so it's better to scan a minimum amount there as
         * well.
         */
-       if (current_is_kswapd() && mz->zone->all_unreclaimable)
+       if (current_is_kswapd() && zone->all_unreclaimable)
                force_scan = true;
        if (!global_reclaim(sc))
                force_scan = true;
@@ -1932,16 +1606,16 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
                goto out;
        }
 
-       anon  = zone_nr_lru_pages(mz, LRU_ACTIVE_ANON) +
-               zone_nr_lru_pages(mz, LRU_INACTIVE_ANON);
-       file  = zone_nr_lru_pages(mz, LRU_ACTIVE_FILE) +
-               zone_nr_lru_pages(mz, LRU_INACTIVE_FILE);
+       anon  = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
+               get_lru_size(lruvec, LRU_INACTIVE_ANON);
+       file  = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
+               get_lru_size(lruvec, LRU_INACTIVE_FILE);
 
        if (global_reclaim(sc)) {
-               free  = zone_page_state(mz->zone, NR_FREE_PAGES);
+               free  = zone_page_state(zone, NR_FREE_PAGES);
                /* If we have very few page cache pages,
                   force-scan anon pages. */
-               if (unlikely(file + free <= high_wmark_pages(mz->zone))) {
+               if (unlikely(file + free <= high_wmark_pages(zone))) {
                        fraction[0] = 1;
                        fraction[1] = 0;
                        denominator = 1;
@@ -1953,8 +1627,8 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
         * With swappiness at 100, anonymous and file have the same priority.
         * This scanning priority is essentially the inverse of IO cost.
         */
-       anon_prio = vmscan_swappiness(mz, sc);
-       file_prio = 200 - vmscan_swappiness(mz, sc);
+       anon_prio = vmscan_swappiness(sc);
+       file_prio = 200 - anon_prio;
 
        /*
         * OK, so we have swap space and a fair amount of page cache
@@ -1967,7 +1641,7 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
         *
         * anon in [0], file in [1]
         */
-       spin_lock_irq(&mz->zone->lru_lock);
+       spin_lock_irq(&zone->lru_lock);
        if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
                reclaim_stat->recent_scanned[0] /= 2;
                reclaim_stat->recent_rotated[0] /= 2;
@@ -1983,12 +1657,12 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
         * proportional to the fraction of recently scanned pages on
         * each list that were recently referenced and in active use.
         */
-       ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
+       ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
        ap /= reclaim_stat->recent_rotated[0] + 1;
 
-       fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
+       fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
        fp /= reclaim_stat->recent_rotated[1] + 1;
-       spin_unlock_irq(&mz->zone->lru_lock);
+       spin_unlock_irq(&zone->lru_lock);
 
        fraction[0] = ap;
        fraction[1] = fp;
@@ -1998,9 +1672,9 @@ out:
                int file = is_file_lru(lru);
                unsigned long scan;
 
-               scan = zone_nr_lru_pages(mz, lru);
-               if (priority || noswap) {
-                       scan >>= priority;
+               scan = get_lru_size(lruvec, lru);
+               if (sc->priority || noswap || !vmscan_swappiness(sc)) {
+                       scan >>= sc->priority;
                        if (!scan && force_scan)
                                scan = SWAP_CLUSTER_MAX;
                        scan = div64_u64(scan * fraction[file], denominator);
@@ -2009,14 +1683,25 @@ out:
        }
 }
 
+/* Use reclaim/compaction for costly allocs or under memory pressure */
+static bool in_reclaim_compaction(struct scan_control *sc)
+{
+       if (COMPACTION_BUILD && sc->order &&
+                       (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
+                        sc->priority < DEF_PRIORITY - 2))
+               return true;
+
+       return false;
+}
+
 /*
- * Reclaim/compaction depends on a number of pages being freed. To avoid
- * disruption to the system, a small number of order-0 pages continue to be
- * rotated and reclaimed in the normal fashion. However, by the time we get
- * back to the allocator and call try_to_compact_zone(), we ensure that
- * there are enough free pages for it to be likely successful
+ * Reclaim/compaction is used for high-order allocation requests. It reclaims
+ * order-0 pages before compacting the zone. should_continue_reclaim() returns
+ * true if more pages should be reclaimed such that when the page allocator
+ * calls try_to_compact_zone() that it will have enough free pages to succeed.
+ * It will give up earlier than that if there is difficulty reclaiming pages.
  */
-static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
+static inline bool should_continue_reclaim(struct lruvec *lruvec,
                                        unsigned long nr_reclaimed,
                                        unsigned long nr_scanned,
                                        struct scan_control *sc)
@@ -2025,7 +1710,7 @@ static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
        unsigned long inactive_lru_pages;
 
        /* If not in reclaim/compaction mode, stop */
-       if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION))
+       if (!in_reclaim_compaction(sc))
                return false;
 
        /* Consider stopping depending on scan and reclaim activity */
@@ -2056,15 +1741,15 @@ static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
         * inactive lists are large enough, continue reclaiming
         */
        pages_for_compaction = (2UL << sc->order);
-       inactive_lru_pages = zone_nr_lru_pages(mz, LRU_INACTIVE_FILE);
+       inactive_lru_pages = get_lru_size(lruvec, LRU_INACTIVE_FILE);
        if (nr_swap_pages > 0)
-               inactive_lru_pages += zone_nr_lru_pages(mz, LRU_INACTIVE_ANON);
+               inactive_lru_pages += get_lru_size(lruvec, LRU_INACTIVE_ANON);
        if (sc->nr_reclaimed < pages_for_compaction &&
                        inactive_lru_pages > pages_for_compaction)
                return true;
 
        /* If compaction would go ahead or the allocation would succeed, stop */
-       switch (compaction_suitable(mz->zone, sc->order)) {
+       switch (compaction_suitable(lruvec_zone(lruvec), sc->order)) {
        case COMPACT_PARTIAL:
        case COMPACT_CONTINUE:
                return false;
@@ -2076,8 +1761,7 @@ static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
 /*
  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
  */
-static void shrink_mem_cgroup_zone(int priority, struct mem_cgroup_zone *mz,
-                                  struct scan_control *sc)
+static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
 {
        unsigned long nr[NR_LRU_LISTS];
        unsigned long nr_to_scan;
@@ -2089,7 +1773,7 @@ static void shrink_mem_cgroup_zone(int priority, struct mem_cgroup_zone *mz,
 restart:
        nr_reclaimed = 0;
        nr_scanned = sc->nr_scanned;
-       get_scan_count(mz, sc, nr, priority);
+       get_scan_count(lruvec, sc, nr);
 
        blk_start_plug(&plug);
        while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
@@ -2101,7 +1785,7 @@ restart:
                                nr[lru] -= nr_to_scan;
 
                                nr_reclaimed += shrink_list(lru, nr_to_scan,
-                                                           mz, sc, priority);
+                                                           lruvec, sc);
                        }
                }
                /*
@@ -2112,7 +1796,8 @@ restart:
                 * with multiple processes reclaiming pages, the total
                 * freeing target can get unreasonably large.
                 */
-               if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
+               if (nr_reclaimed >= nr_to_reclaim &&
+                   sc->priority < DEF_PRIORITY)
                        break;
        }
        blk_finish_plug(&plug);
@@ -2122,35 +1807,33 @@ restart:
         * Even if we did not try to evict anon pages at all, we want to
         * rebalance the anon lru active/inactive ratio.
         */
-       if (inactive_anon_is_low(mz))
-               shrink_active_list(SWAP_CLUSTER_MAX, mz, sc, priority, 0);
+       if (inactive_anon_is_low(lruvec))
+               shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
+                                  sc, LRU_ACTIVE_ANON);
 
        /* reclaim/compaction might need reclaim to continue */
-       if (should_continue_reclaim(mz, nr_reclaimed,
-                                       sc->nr_scanned - nr_scanned, sc))
+       if (should_continue_reclaim(lruvec, nr_reclaimed,
+                                   sc->nr_scanned - nr_scanned, sc))
                goto restart;
 
        throttle_vm_writeout(sc->gfp_mask);
 }
 
-static void shrink_zone(int priority, struct zone *zone,
-                       struct scan_control *sc)
+static void shrink_zone(struct zone *zone, struct scan_control *sc)
 {
        struct mem_cgroup *root = sc->target_mem_cgroup;
        struct mem_cgroup_reclaim_cookie reclaim = {
                .zone = zone,
-               .priority = priority,
+               .priority = sc->priority,
        };
        struct mem_cgroup *memcg;
 
        memcg = mem_cgroup_iter(root, NULL, &reclaim);
        do {
-               struct mem_cgroup_zone mz = {
-                       .mem_cgroup = memcg,
-                       .zone = zone,
-               };
+               struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
+
+               shrink_lruvec(lruvec, sc);
 
-               shrink_mem_cgroup_zone(priority, &mz, sc);
                /*
                 * Limit reclaim has historically picked one memcg and
                 * scanned it with decreasing priority levels until
@@ -2226,8 +1909,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
  * the caller that it should consider retrying the allocation instead of
  * further reclaim.
  */
-static bool shrink_zones(int priority, struct zonelist *zonelist,
-                                       struct scan_control *sc)
+static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
 {
        struct zoneref *z;
        struct zone *zone;
@@ -2254,7 +1936,8 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
                if (global_reclaim(sc)) {
                        if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                                continue;
-                       if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+                       if (zone->all_unreclaimable &&
+                                       sc->priority != DEF_PRIORITY)
                                continue;       /* Let kswapd poll it */
                        if (COMPACTION_BUILD) {
                                /*
@@ -2286,7 +1969,7 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
                        /* need some check for avoid more shrink_zone() */
                }
 
-               shrink_zone(priority, zone, sc);
+               shrink_zone(zone, sc);
        }
 
        return aborted_reclaim;
@@ -2337,7 +2020,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                                        struct scan_control *sc,
                                        struct shrink_control *shrink)
 {
-       int priority;
        unsigned long total_scanned = 0;
        struct reclaim_state *reclaim_state = current->reclaim_state;
        struct zoneref *z;
@@ -2350,11 +2032,9 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
        if (global_reclaim(sc))
                count_vm_event(ALLOCSTALL);
 
-       for (priority = DEF_PRIORITY; priority >= 0; priority--) {
+       do {
                sc->nr_scanned = 0;
-               if (!priority)
-                       disable_swap_token(sc->target_mem_cgroup);
-               aborted_reclaim = shrink_zones(priority, zonelist, sc);
+               aborted_reclaim = shrink_zones(zonelist, sc);
 
                /*
                 * Don't shrink slabs when reclaiming memory from
@@ -2396,7 +2076,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
 
                /* Take a nap, wait for some writeback to complete */
                if (!sc->hibernation_mode && sc->nr_scanned &&
-                   priority < DEF_PRIORITY - 2) {
+                   sc->priority < DEF_PRIORITY - 2) {
                        struct zone *preferred_zone;
 
                        first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
@@ -2404,7 +2084,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                                                &preferred_zone);
                        wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
                }
-       }
+       } while (--sc->priority >= 0);
 
 out:
        delayacct_freepages_end();
@@ -2442,6 +2122,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
                .may_unmap = 1,
                .may_swap = 1,
                .order = order,
+               .priority = DEF_PRIORITY,
                .target_mem_cgroup = NULL,
                .nodemask = nodemask,
        };
@@ -2474,17 +2155,15 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
                .may_unmap = 1,
                .may_swap = !noswap,
                .order = 0,
+               .priority = 0,
                .target_mem_cgroup = memcg,
        };
-       struct mem_cgroup_zone mz = {
-               .mem_cgroup = memcg,
-               .zone = zone,
-       };
+       struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
 
        sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
                        (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
 
-       trace_mm_vmscan_memcg_softlimit_reclaim_begin(0,
+       trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
                                                      sc.may_writepage,
                                                      sc.gfp_mask);
 
@@ -2495,7 +2174,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
         * will pick up pages from other mem cgroup's as well. We hack
         * the priority and make it zero.
         */
-       shrink_mem_cgroup_zone(0, &mz, &sc);
+       shrink_lruvec(lruvec, &sc);
 
        trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
 
@@ -2516,6 +2195,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
                .may_swap = !noswap,
                .nr_to_reclaim = SWAP_CLUSTER_MAX,
                .order = 0,
+               .priority = DEF_PRIORITY,
                .target_mem_cgroup = memcg,
                .nodemask = NULL, /* we don't care the placement */
                .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
@@ -2546,8 +2226,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
 }
 #endif
 
-static void age_active_anon(struct zone *zone, struct scan_control *sc,
-                           int priority)
+static void age_active_anon(struct zone *zone, struct scan_control *sc)
 {
        struct mem_cgroup *memcg;
 
@@ -2556,14 +2235,11 @@ static void age_active_anon(struct zone *zone, struct scan_control *sc,
 
        memcg = mem_cgroup_iter(NULL, NULL, NULL);
        do {
-               struct mem_cgroup_zone mz = {
-                       .mem_cgroup = memcg,
-                       .zone = zone,
-               };
+               struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
 
-               if (inactive_anon_is_low(&mz))
-                       shrink_active_list(SWAP_CLUSTER_MAX, &mz,
-                                          sc, priority, 0);
+               if (inactive_anon_is_low(lruvec))
+                       shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
+                                          sc, LRU_ACTIVE_ANON);
 
                memcg = mem_cgroup_iter(NULL, memcg, NULL);
        } while (memcg);
@@ -2672,7 +2348,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
 {
        int all_zones_ok;
        unsigned long balanced;
-       int priority;
        int i;
        int end_zone = 0;       /* Inclusive.  0 = ZONE_DMA */
        unsigned long total_scanned;
@@ -2696,18 +2371,15 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
        };
 loop_again:
        total_scanned = 0;
+       sc.priority = DEF_PRIORITY;
        sc.nr_reclaimed = 0;
        sc.may_writepage = !laptop_mode;
        count_vm_event(PAGEOUTRUN);
 
-       for (priority = DEF_PRIORITY; priority >= 0; priority--) {
+       do {
                unsigned long lru_pages = 0;
                int has_under_min_watermark_zone = 0;
 
-               /* The swap token gets in the way of swapout... */
-               if (!priority)
-                       disable_swap_token(NULL);
-
                all_zones_ok = 1;
                balanced = 0;
 
@@ -2721,14 +2393,15 @@ loop_again:
                        if (!populated_zone(zone))
                                continue;
 
-                       if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+                       if (zone->all_unreclaimable &&
+                           sc.priority != DEF_PRIORITY)
                                continue;
 
                        /*
                         * Do some background aging of the anon list, to give
                         * pages a chance to be referenced before reclaiming.
                         */
-                       age_active_anon(zone, &sc, priority);
+                       age_active_anon(zone, &sc);
 
                        /*
                         * If the number of buffer_heads in the machine
@@ -2776,7 +2449,8 @@ loop_again:
                        if (!populated_zone(zone))
                                continue;
 
-                       if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+                       if (zone->all_unreclaimable &&
+                           sc.priority != DEF_PRIORITY)
                                continue;
 
                        sc.nr_scanned = 0;
@@ -2820,7 +2494,7 @@ loop_again:
                                    !zone_watermark_ok_safe(zone, testorder,
                                        high_wmark_pages(zone) + balance_gap,
                                        end_zone, 0)) {
-                               shrink_zone(priority, zone, &sc);
+                               shrink_zone(zone, &sc);
 
                                reclaim_state->reclaimed_slab = 0;
                                nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
@@ -2877,7 +2551,7 @@ loop_again:
                 * OK, kswapd is getting into trouble.  Take a nap, then take
                 * another pass across the zones.
                 */
-               if (total_scanned && (priority < DEF_PRIORITY - 2)) {
+               if (total_scanned && (sc.priority < DEF_PRIORITY - 2)) {
                        if (has_under_min_watermark_zone)
                                count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
                        else
@@ -2892,7 +2566,7 @@ loop_again:
                 */
                if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
                        break;
-       }
+       } while (--sc.priority >= 0);
 out:
 
        /*
@@ -2942,7 +2616,8 @@ out:
                        if (!populated_zone(zone))
                                continue;
 
-                       if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+                       if (zone->all_unreclaimable &&
+                           sc.priority != DEF_PRIORITY)
                                continue;
 
                        /* Would compaction fail due to lack of free memory? */
@@ -3209,6 +2884,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
                .nr_to_reclaim = nr_to_reclaim,
                .hibernation_mode = 1,
                .order = 0,
+               .priority = DEF_PRIORITY,
        };
        struct shrink_control shrink = {
                .gfp_mask = sc.gfp_mask,
@@ -3386,7 +3062,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
        const unsigned long nr_pages = 1 << order;
        struct task_struct *p = current;
        struct reclaim_state reclaim_state;
-       int priority;
        struct scan_control sc = {
                .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
                .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
@@ -3395,6 +3070,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                                       SWAP_CLUSTER_MAX),
                .gfp_mask = gfp_mask,
                .order = order,
+               .priority = ZONE_RECLAIM_PRIORITY,
        };
        struct shrink_control shrink = {
                .gfp_mask = sc.gfp_mask,
@@ -3417,11 +3093,9 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                 * Free memory by calling shrink zone with increasing
                 * priorities until we have enough memory freed.
                 */
-               priority = ZONE_RECLAIM_PRIORITY;
                do {
-                       shrink_zone(priority, zone, &sc);
-                       priority--;
-               } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
+                       shrink_zone(zone, &sc);
+               } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
        }
 
        nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
@@ -3536,7 +3210,7 @@ int page_evictable(struct page *page, struct vm_area_struct *vma)
        if (mapping_unevictable(page_mapping(page)))
                return 0;
 
-       if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page)))
+       if (PageMlocked(page) || (vma && mlocked_vma_newpage(vma, page)))
                return 0;
 
        return 1;
@@ -3572,6 +3246,7 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
                        zone = pagezone;
                        spin_lock_irq(&zone->lru_lock);
                }
+               lruvec = mem_cgroup_page_lruvec(page, zone);
 
                if (!PageLRU(page) || !PageUnevictable(page))
                        continue;
@@ -3581,11 +3256,8 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
 
                        VM_BUG_ON(PageActive(page));
                        ClearPageUnevictable(page);
-                       __dec_zone_state(zone, NR_UNEVICTABLE);
-                       lruvec = mem_cgroup_lru_move_lists(zone, page,
-                                               LRU_UNEVICTABLE, lru);
-                       list_move(&page->lru, &lruvec->lists[lru]);
-                       __inc_zone_state(zone, NR_INACTIVE_ANON + lru);
+                       del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
+                       add_page_to_lru_list(page, lruvec, lru);
                        pgrescued++;
                }
        }
index 7db1b9bab4929d13b3b23dbe7b08c2c782e66ab1..1bbbbd9776ade1962a277c4e9c6d2161a2963c97 100644 (file)
@@ -613,6 +613,9 @@ static char * const migratetype_names[MIGRATE_TYPES] = {
        "Reclaimable",
        "Movable",
        "Reserve",
+#ifdef CONFIG_CMA
+       "CMA",
+#endif
        "Isolate",
 };
 
@@ -1220,7 +1223,6 @@ module_init(setup_vmstat)
 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
 #include <linux/debugfs.h>
 
-static struct dentry *extfrag_debug_root;
 
 /*
  * Return an index indicating how much of the available free memory is
@@ -1358,19 +1360,24 @@ static const struct file_operations extfrag_file_ops = {
 
 static int __init extfrag_debug_init(void)
 {
+       struct dentry *extfrag_debug_root;
+
        extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
        if (!extfrag_debug_root)
                return -ENOMEM;
 
        if (!debugfs_create_file("unusable_index", 0444,
                        extfrag_debug_root, NULL, &unusable_file_ops))
-               return -ENOMEM;
+               goto fail;
 
        if (!debugfs_create_file("extfrag_index", 0444,
                        extfrag_debug_root, NULL, &extfrag_file_ops))
-               return -ENOMEM;
+               goto fail;
 
        return 0;
+fail:
+       debugfs_remove_recursive(extfrag_debug_root);
+       return -ENOMEM;
 }
 
 module_init(extfrag_debug_init);
index aa5d73b786aca23793f7691bbab7f86ceb2e96d1..d1820ff14aee46cfc55cd1c169495004c3130818 100644 (file)
@@ -710,9 +710,9 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
                        break;
                }
 
-               tty_unlock(tty);
+               tty_unlock();
                schedule();
-               tty_lock(tty);
+               tty_lock();
        }
        set_current_state(TASK_RUNNING);
        remove_wait_queue(&dev->wait, &wait);
index 214c2bb43d6252056a7a77dcd2d8eb5877c719d1..925ca583c09c8eae2fbaebbd73603e9194132f97 100644 (file)
@@ -59,9 +59,7 @@ static int handle_reply(struct ceph_auth_client *ac, int result,
  */
 static int ceph_auth_none_create_authorizer(
        struct ceph_auth_client *ac, int peer_type,
-       struct ceph_authorizer **a,
-       void **buf, size_t *len,
-       void **reply_buf, size_t *reply_len)
+       struct ceph_auth_handshake *auth)
 {
        struct ceph_auth_none_info *ai = ac->private;
        struct ceph_none_authorizer *au = &ai->au;
@@ -82,11 +80,12 @@ static int ceph_auth_none_create_authorizer(
                dout("built authorizer len %d\n", au->buf_len);
        }
 
-       *a = (struct ceph_authorizer *)au;
-       *buf = au->buf;
-       *len = au->buf_len;
-       *reply_buf = au->reply_buf;
-       *reply_len = sizeof(au->reply_buf);
+       auth->authorizer = (struct ceph_authorizer *) au;
+       auth->authorizer_buf = au->buf;
+       auth->authorizer_buf_len = au->buf_len;
+       auth->authorizer_reply_buf = au->reply_buf;
+       auth->authorizer_reply_buf_len = sizeof (au->reply_buf);
+
        return 0;
 
 bad2:
index 1587dc6010c6276fd7c6e997bb7af6fe08d426be..a16bf14eb027cd4e765320f98b0fb378b5a5e7c4 100644 (file)
@@ -526,9 +526,7 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
 
 static int ceph_x_create_authorizer(
        struct ceph_auth_client *ac, int peer_type,
-       struct ceph_authorizer **a,
-       void **buf, size_t *len,
-       void **reply_buf, size_t *reply_len)
+       struct ceph_auth_handshake *auth)
 {
        struct ceph_x_authorizer *au;
        struct ceph_x_ticket_handler *th;
@@ -548,11 +546,12 @@ static int ceph_x_create_authorizer(
                return ret;
        }
 
-       *a = (struct ceph_authorizer *)au;
-       *buf = au->buf->vec.iov_base;
-       *len = au->buf->vec.iov_len;
-       *reply_buf = au->reply_buf;
-       *reply_len = sizeof(au->reply_buf);
+       auth->authorizer = (struct ceph_authorizer *) au;
+       auth->authorizer_buf = au->buf->vec.iov_base;
+       auth->authorizer_buf_len = au->buf->vec.iov_len;
+       auth->authorizer_reply_buf = au->reply_buf;
+       auth->authorizer_reply_buf_len = sizeof (au->reply_buf);
+
        return 0;
 }
 
index d6ebb13a18a4bc787eb249ad3e4b62ec1c174f97..089613234f032610c05f25a239c1d2053e768b45 100644 (file)
@@ -26,9 +26,9 @@ const char *crush_bucket_alg_name(int alg)
  * @b: bucket pointer
  * @p: item index in bucket
  */
-int crush_get_bucket_item_weight(struct crush_bucket *b, int p)
+int crush_get_bucket_item_weight(const struct crush_bucket *b, int p)
 {
-       if (p >= b->size)
+       if ((__u32)p >= b->size)
                return 0;
 
        switch (b->alg) {
@@ -37,38 +37,13 @@ int crush_get_bucket_item_weight(struct crush_bucket *b, int p)
        case CRUSH_BUCKET_LIST:
                return ((struct crush_bucket_list *)b)->item_weights[p];
        case CRUSH_BUCKET_TREE:
-               if (p & 1)
-                       return ((struct crush_bucket_tree *)b)->node_weights[p];
-               return 0;
+               return ((struct crush_bucket_tree *)b)->node_weights[crush_calc_tree_node(p)];
        case CRUSH_BUCKET_STRAW:
                return ((struct crush_bucket_straw *)b)->item_weights[p];
        }
        return 0;
 }
 
-/**
- * crush_calc_parents - Calculate parent vectors for the given crush map.
- * @map: crush_map pointer
- */
-void crush_calc_parents(struct crush_map *map)
-{
-       int i, b, c;
-
-       for (b = 0; b < map->max_buckets; b++) {
-               if (map->buckets[b] == NULL)
-                       continue;
-               for (i = 0; i < map->buckets[b]->size; i++) {
-                       c = map->buckets[b]->items[i];
-                       BUG_ON(c >= map->max_devices ||
-                              c < -map->max_buckets);
-                       if (c >= 0)
-                               map->device_parents[c] = map->buckets[b]->id;
-                       else
-                               map->bucket_parents[-1-c] = map->buckets[b]->id;
-               }
-       }
-}
-
 void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b)
 {
        kfree(b->h.perm);
@@ -87,6 +62,8 @@ void crush_destroy_bucket_list(struct crush_bucket_list *b)
 
 void crush_destroy_bucket_tree(struct crush_bucket_tree *b)
 {
+       kfree(b->h.perm);
+       kfree(b->h.items);
        kfree(b->node_weights);
        kfree(b);
 }
@@ -124,10 +101,9 @@ void crush_destroy_bucket(struct crush_bucket *b)
  */
 void crush_destroy(struct crush_map *map)
 {
-       int b;
-
        /* buckets */
        if (map->buckets) {
+               __s32 b;
                for (b = 0; b < map->max_buckets; b++) {
                        if (map->buckets[b] == NULL)
                                continue;
@@ -138,13 +114,12 @@ void crush_destroy(struct crush_map *map)
 
        /* rules */
        if (map->rules) {
+               __u32 b;
                for (b = 0; b < map->max_rules; b++)
                        kfree(map->rules[b]);
                kfree(map->rules);
        }
 
-       kfree(map->bucket_parents);
-       kfree(map->device_parents);
        kfree(map);
 }
 
index 363f8f7e6c3caa15fa03d0bcea1967b731f2ae1d..d7edc24333b84d5aab17da2d983878ff5044b2bb 100644 (file)
@@ -33,9 +33,9 @@
  * @type: storage ruleset type (user defined)
  * @size: output set size
  */
-int crush_find_rule(struct crush_map *map, int ruleset, int type, int size)
+int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size)
 {
-       int i;
+       __u32 i;
 
        for (i = 0; i < map->max_rules; i++) {
                if (map->rules[i] &&
@@ -73,7 +73,7 @@ static int bucket_perm_choose(struct crush_bucket *bucket,
        unsigned int i, s;
 
        /* start a new permutation if @x has changed */
-       if (bucket->perm_x != x || bucket->perm_n == 0) {
+       if (bucket->perm_x != (__u32)x || bucket->perm_n == 0) {
                dprintk("bucket %d new x=%d\n", bucket->id, x);
                bucket->perm_x = x;
 
@@ -153,8 +153,8 @@ static int bucket_list_choose(struct crush_bucket_list *bucket,
                        return bucket->h.items[i];
        }
 
-       BUG_ON(1);
-       return 0;
+       dprintk("bad list sums for bucket %d\n", bucket->h.id);
+       return bucket->h.items[0];
 }
 
 
@@ -220,7 +220,7 @@ static int bucket_tree_choose(struct crush_bucket_tree *bucket,
 static int bucket_straw_choose(struct crush_bucket_straw *bucket,
                               int x, int r)
 {
-       int i;
+       __u32 i;
        int high = 0;
        __u64 high_draw = 0;
        __u64 draw;
@@ -240,6 +240,7 @@ static int bucket_straw_choose(struct crush_bucket_straw *bucket,
 static int crush_bucket_choose(struct crush_bucket *in, int x, int r)
 {
        dprintk(" crush_bucket_choose %d x=%d r=%d\n", in->id, x, r);
+       BUG_ON(in->size == 0);
        switch (in->alg) {
        case CRUSH_BUCKET_UNIFORM:
                return bucket_uniform_choose((struct crush_bucket_uniform *)in,
@@ -254,7 +255,7 @@ static int crush_bucket_choose(struct crush_bucket *in, int x, int r)
                return bucket_straw_choose((struct crush_bucket_straw *)in,
                                           x, r);
        default:
-               BUG_ON(1);
+               dprintk("unknown bucket %d alg %d\n", in->id, in->alg);
                return in->items[0];
        }
 }
@@ -263,7 +264,7 @@ static int crush_bucket_choose(struct crush_bucket *in, int x, int r)
  * true if device is marked "out" (failed, fully offloaded)
  * of the cluster
  */
-static int is_out(struct crush_map *map, __u32 *weight, int item, int x)
+static int is_out(const struct crush_map *map, const __u32 *weight, int item, int x)
 {
        if (weight[item] >= 0x10000)
                return 0;
@@ -288,16 +289,16 @@ static int is_out(struct crush_map *map, __u32 *weight, int item, int x)
  * @recurse_to_leaf: true if we want one device under each item of given type
  * @out2: second output vector for leaf items (if @recurse_to_leaf)
  */
-static int crush_choose(struct crush_map *map,
+static int crush_choose(const struct crush_map *map,
                        struct crush_bucket *bucket,
-                       __u32 *weight,
+                       const __u32 *weight,
                        int x, int numrep, int type,
                        int *out, int outpos,
                        int firstn, int recurse_to_leaf,
                        int *out2)
 {
        int rep;
-       int ftotal, flocal;
+       unsigned int ftotal, flocal;
        int retry_descent, retry_bucket, skip_rep;
        struct crush_bucket *in = bucket;
        int r;
@@ -305,7 +306,7 @@ static int crush_choose(struct crush_map *map,
        int item = 0;
        int itemtype;
        int collide, reject;
-       const int orig_tries = 5; /* attempts before we fall back to search */
+       const unsigned int orig_tries = 5; /* attempts before we fall back to search */
 
        dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d\n", recurse_to_leaf ? "_LEAF" : "",
                bucket->id, x, outpos, numrep);
@@ -326,7 +327,7 @@ static int crush_choose(struct crush_map *map,
                                r = rep;
                                if (in->alg == CRUSH_BUCKET_UNIFORM) {
                                        /* be careful */
-                                       if (firstn || numrep >= in->size)
+                                       if (firstn || (__u32)numrep >= in->size)
                                                /* r' = r + f_total */
                                                r += ftotal;
                                        else if (in->size % numrep == 0)
@@ -355,7 +356,11 @@ static int crush_choose(struct crush_map *map,
                                        item = bucket_perm_choose(in, x, r);
                                else
                                        item = crush_bucket_choose(in, x, r);
-                               BUG_ON(item >= map->max_devices);
+                               if (item >= map->max_devices) {
+                                       dprintk("   bad item %d\n", item);
+                                       skip_rep = 1;
+                                       break;
+                               }
 
                                /* desired type? */
                                if (item < 0)
@@ -366,8 +371,12 @@ static int crush_choose(struct crush_map *map,
 
                                /* keep going? */
                                if (itemtype != type) {
-                                       BUG_ON(item >= 0 ||
-                                              (-1-item) >= map->max_buckets);
+                                       if (item >= 0 ||
+                                           (-1-item) >= map->max_buckets) {
+                                               dprintk("   bad item type %d\n", type);
+                                               skip_rep = 1;
+                                               break;
+                                       }
                                        in = map->buckets[-1-item];
                                        retry_bucket = 1;
                                        continue;
@@ -416,7 +425,7 @@ reject:
                                        if (collide && flocal < 3)
                                                /* retry locally a few times */
                                                retry_bucket = 1;
-                                       else if (flocal < in->size + orig_tries)
+                                       else if (flocal <= in->size + orig_tries)
                                                /* exhaustive bucket search */
                                                retry_bucket = 1;
                                        else if (ftotal < 20)
@@ -426,7 +435,7 @@ reject:
                                                /* else give up */
                                                skip_rep = 1;
                                        dprintk("  reject %d  collide %d  "
-                                               "ftotal %d  flocal %d\n",
+                                               "ftotal %u  flocal %u\n",
                                                reject, collide, ftotal,
                                                flocal);
                                }
@@ -455,15 +464,12 @@ reject:
  * @x: hash input
  * @result: pointer to result vector
  * @result_max: maximum result size
- * @force: force initial replica choice; -1 for none
  */
-int crush_do_rule(struct crush_map *map,
+int crush_do_rule(const struct crush_map *map,
                  int ruleno, int x, int *result, int result_max,
-                 int force, __u32 *weight)
+                 const __u32 *weight)
 {
        int result_len;
-       int force_context[CRUSH_MAX_DEPTH];
-       int force_pos = -1;
        int a[CRUSH_MAX_SET];
        int b[CRUSH_MAX_SET];
        int c[CRUSH_MAX_SET];
@@ -474,66 +480,44 @@ int crush_do_rule(struct crush_map *map,
        int osize;
        int *tmp;
        struct crush_rule *rule;
-       int step;
+       __u32 step;
        int i, j;
        int numrep;
        int firstn;
 
-       BUG_ON(ruleno >= map->max_rules);
+       if ((__u32)ruleno >= map->max_rules) {
+               dprintk(" bad ruleno %d\n", ruleno);
+               return 0;
+       }
 
        rule = map->rules[ruleno];
        result_len = 0;
        w = a;
        o = b;
 
-       /*
-        * determine hierarchical context of force, if any.  note
-        * that this may or may not correspond to the specific types
-        * referenced by the crush rule.
-        */
-       if (force >= 0 &&
-           force < map->max_devices &&
-           map->device_parents[force] != 0 &&
-           !is_out(map, weight, force, x)) {
-               while (1) {
-                       force_context[++force_pos] = force;
-                       if (force >= 0)
-                               force = map->device_parents[force];
-                       else
-                               force = map->bucket_parents[-1-force];
-                       if (force == 0)
-                               break;
-               }
-       }
-
        for (step = 0; step < rule->len; step++) {
+               struct crush_rule_step *curstep = &rule->steps[step];
+
                firstn = 0;
-               switch (rule->steps[step].op) {
+               switch (curstep->op) {
                case CRUSH_RULE_TAKE:
-                       w[0] = rule->steps[step].arg1;
-
-                       /* find position in force_context/hierarchy */
-                       while (force_pos >= 0 &&
-                              force_context[force_pos] != w[0])
-                               force_pos--;
-                       /* and move past it */
-                       if (force_pos >= 0)
-                               force_pos--;
-
+                       w[0] = curstep->arg1;
                        wsize = 1;
                        break;
 
                case CRUSH_RULE_CHOOSE_LEAF_FIRSTN:
                case CRUSH_RULE_CHOOSE_FIRSTN:
                        firstn = 1;
+                       /* fall through */
                case CRUSH_RULE_CHOOSE_LEAF_INDEP:
                case CRUSH_RULE_CHOOSE_INDEP:
-                       BUG_ON(wsize == 0);
+                       if (wsize == 0)
+                               break;
 
                        recurse_to_leaf =
-                               rule->steps[step].op ==
+                               curstep->op ==
                                 CRUSH_RULE_CHOOSE_LEAF_FIRSTN ||
-                               rule->steps[step].op ==
+                               curstep->op ==
                                CRUSH_RULE_CHOOSE_LEAF_INDEP;
 
                        /* reset output */
@@ -545,32 +529,18 @@ int crush_do_rule(struct crush_map *map,
                                 * basically, numrep <= 0 means relative to
                                 * the provided result_max
                                 */
-                               numrep = rule->steps[step].arg1;
+                               numrep = curstep->arg1;
                                if (numrep <= 0) {
                                        numrep += result_max;
                                        if (numrep <= 0)
                                                continue;
                                }
                                j = 0;
-                               if (osize == 0 && force_pos >= 0) {
-                                       /* skip any intermediate types */
-                                       while (force_pos &&
-                                              force_context[force_pos] < 0 &&
-                                              rule->steps[step].arg2 !=
-                                              map->buckets[-1 -
-                                              force_context[force_pos]]->type)
-                                               force_pos--;
-                                       o[osize] = force_context[force_pos];
-                                       if (recurse_to_leaf)
-                                               c[osize] = force_context[0];
-                                       j++;
-                                       force_pos--;
-                               }
                                osize += crush_choose(map,
                                                      map->buckets[-1-w[i]],
                                                      weight,
                                                      x, numrep,
-                                                     rule->steps[step].arg2,
+                                                     curstep->arg2,
                                                      o+osize, j,
                                                      firstn,
                                                      recurse_to_leaf, c+osize);
@@ -597,7 +567,9 @@ int crush_do_rule(struct crush_map *map,
                        break;
 
                default:
-                       BUG_ON(1);
+                       dprintk(" unknown op %d at step %d\n",
+                               curstep->op, step);
+                       break;
                }
        }
        return result_len;
index 36fa6bf684981688ff95c22788acb8db721271de..524f4e4f598b845a7242c0243efb1a4e6a843955 100644 (file)
@@ -653,54 +653,57 @@ static void prepare_write_keepalive(struct ceph_connection *con)
  * Connection negotiation.
  */
 
-static int prepare_connect_authorizer(struct ceph_connection *con)
+static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection *con,
+                                               int *auth_proto)
 {
-       void *auth_buf;
-       int auth_len = 0;
-       int auth_protocol = 0;
+       struct ceph_auth_handshake *auth;
+
+       if (!con->ops->get_authorizer) {
+               con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN;
+               con->out_connect.authorizer_len = 0;
+
+               return NULL;
+       }
+
+       /* Can't hold the mutex while getting authorizer */
 
        mutex_unlock(&con->mutex);
-       if (con->ops->get_authorizer)
-               con->ops->get_authorizer(con, &auth_buf, &auth_len,
-                                        &auth_protocol, &con->auth_reply_buf,
-                                        &con->auth_reply_buf_len,
-                                        con->auth_retry);
+
+       auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry);
+
        mutex_lock(&con->mutex);
 
-       if (test_bit(CLOSED, &con->state) ||
-           test_bit(OPENING, &con->state))
-               return -EAGAIN;
+       if (IS_ERR(auth))
+               return auth;
+       if (test_bit(CLOSED, &con->state) || test_bit(OPENING, &con->state))
+               return ERR_PTR(-EAGAIN);
 
-       con->out_connect.authorizer_protocol = cpu_to_le32(auth_protocol);
-       con->out_connect.authorizer_len = cpu_to_le32(auth_len);
+       con->auth_reply_buf = auth->authorizer_reply_buf;
+       con->auth_reply_buf_len = auth->authorizer_reply_buf_len;
 
-       if (auth_len)
-               ceph_con_out_kvec_add(con, auth_len, auth_buf);
 
-       return 0;
+       return auth;
 }
 
 /*
  * We connected to a peer and are saying hello.
  */
-static void prepare_write_banner(struct ceph_messenger *msgr,
-                                struct ceph_connection *con)
+static void prepare_write_banner(struct ceph_connection *con)
 {
-       ceph_con_out_kvec_reset(con);
        ceph_con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER);
-       ceph_con_out_kvec_add(con, sizeof (msgr->my_enc_addr),
-                                       &msgr->my_enc_addr);
+       ceph_con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr),
+                                       &con->msgr->my_enc_addr);
 
        con->out_more = 0;
        set_bit(WRITE_PENDING, &con->state);
 }
 
-static int prepare_write_connect(struct ceph_messenger *msgr,
-                                struct ceph_connection *con,
-                                int include_banner)
+static int prepare_write_connect(struct ceph_connection *con)
 {
        unsigned int global_seq = get_global_seq(con->msgr, 0);
        int proto;
+       int auth_proto;
+       struct ceph_auth_handshake *auth;
 
        switch (con->peer_name.type) {
        case CEPH_ENTITY_TYPE_MON:
@@ -719,23 +722,32 @@ static int prepare_write_connect(struct ceph_messenger *msgr,
        dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
             con->connect_seq, global_seq, proto);
 
-       con->out_connect.features = cpu_to_le64(msgr->supported_features);
+       con->out_connect.features = cpu_to_le64(con->msgr->supported_features);
        con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
        con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
        con->out_connect.global_seq = cpu_to_le32(global_seq);
        con->out_connect.protocol_version = cpu_to_le32(proto);
        con->out_connect.flags = 0;
 
-       if (include_banner)
-               prepare_write_banner(msgr, con);
-       else
-               ceph_con_out_kvec_reset(con);
-       ceph_con_out_kvec_add(con, sizeof (con->out_connect), &con->out_connect);
+       auth_proto = CEPH_AUTH_UNKNOWN;
+       auth = get_connect_authorizer(con, &auth_proto);
+       if (IS_ERR(auth))
+               return PTR_ERR(auth);
+
+       con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto);
+       con->out_connect.authorizer_len = auth ?
+               cpu_to_le32(auth->authorizer_buf_len) : 0;
+
+       ceph_con_out_kvec_add(con, sizeof (con->out_connect),
+                                       &con->out_connect);
+       if (auth && auth->authorizer_buf_len)
+               ceph_con_out_kvec_add(con, auth->authorizer_buf_len,
+                                       auth->authorizer_buf);
 
        con->out_more = 0;
        set_bit(WRITE_PENDING, &con->state);
 
-       return prepare_connect_authorizer(con);
+       return 0;
 }
 
 /*
@@ -992,11 +1004,10 @@ static int prepare_read_message(struct ceph_connection *con)
 
 
 static int read_partial(struct ceph_connection *con,
-                       int *to, int size, void *object)
+                       int end, int size, void *object)
 {
-       *to += size;
-       while (con->in_base_pos < *to) {
-               int left = *to - con->in_base_pos;
+       while (con->in_base_pos < end) {
+               int left = end - con->in_base_pos;
                int have = size - left;
                int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
                if (ret <= 0)
@@ -1012,37 +1023,52 @@ static int read_partial(struct ceph_connection *con,
  */
 static int read_partial_banner(struct ceph_connection *con)
 {
-       int ret, to = 0;
+       int size;
+       int end;
+       int ret;
 
        dout("read_partial_banner %p at %d\n", con, con->in_base_pos);
 
        /* peer's banner */
-       ret = read_partial(con, &to, strlen(CEPH_BANNER), con->in_banner);
+       size = strlen(CEPH_BANNER);
+       end = size;
+       ret = read_partial(con, end, size, con->in_banner);
        if (ret <= 0)
                goto out;
-       ret = read_partial(con, &to, sizeof(con->actual_peer_addr),
-                          &con->actual_peer_addr);
+
+       size = sizeof (con->actual_peer_addr);
+       end += size;
+       ret = read_partial(con, end, size, &con->actual_peer_addr);
        if (ret <= 0)
                goto out;
-       ret = read_partial(con, &to, sizeof(con->peer_addr_for_me),
-                          &con->peer_addr_for_me);
+
+       size = sizeof (con->peer_addr_for_me);
+       end += size;
+       ret = read_partial(con, end, size, &con->peer_addr_for_me);
        if (ret <= 0)
                goto out;
+
 out:
        return ret;
 }
 
 static int read_partial_connect(struct ceph_connection *con)
 {
-       int ret, to = 0;
+       int size;
+       int end;
+       int ret;
 
        dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
 
-       ret = read_partial(con, &to, sizeof(con->in_reply), &con->in_reply);
+       size = sizeof (con->in_reply);
+       end = size;
+       ret = read_partial(con, end, size, &con->in_reply);
        if (ret <= 0)
                goto out;
-       ret = read_partial(con, &to, le32_to_cpu(con->in_reply.authorizer_len),
-                          con->auth_reply_buf);
+
+       size = le32_to_cpu(con->in_reply.authorizer_len);
+       end += size;
+       ret = read_partial(con, end, size, con->auth_reply_buf);
        if (ret <= 0)
                goto out;
 
@@ -1377,7 +1403,8 @@ static int process_connect(struct ceph_connection *con)
                        return -1;
                }
                con->auth_retry = 1;
-               ret = prepare_write_connect(con->msgr, con, 0);
+               ceph_con_out_kvec_reset(con);
+               ret = prepare_write_connect(con);
                if (ret < 0)
                        return ret;
                prepare_read_connect(con);
@@ -1397,7 +1424,10 @@ static int process_connect(struct ceph_connection *con)
                       ENTITY_NAME(con->peer_name),
                       ceph_pr_addr(&con->peer_addr.in_addr));
                reset_connection(con);
-               prepare_write_connect(con->msgr, con, 0);
+               ceph_con_out_kvec_reset(con);
+               ret = prepare_write_connect(con);
+               if (ret < 0)
+                       return ret;
                prepare_read_connect(con);
 
                /* Tell ceph about it. */
@@ -1420,7 +1450,10 @@ static int process_connect(struct ceph_connection *con)
                     le32_to_cpu(con->out_connect.connect_seq),
                     le32_to_cpu(con->in_connect.connect_seq));
                con->connect_seq = le32_to_cpu(con->in_connect.connect_seq);
-               prepare_write_connect(con->msgr, con, 0);
+               ceph_con_out_kvec_reset(con);
+               ret = prepare_write_connect(con);
+               if (ret < 0)
+                       return ret;
                prepare_read_connect(con);
                break;
 
@@ -1434,7 +1467,10 @@ static int process_connect(struct ceph_connection *con)
                     le32_to_cpu(con->in_connect.global_seq));
                get_global_seq(con->msgr,
                               le32_to_cpu(con->in_connect.global_seq));
-               prepare_write_connect(con->msgr, con, 0);
+               ceph_con_out_kvec_reset(con);
+               ret = prepare_write_connect(con);
+               if (ret < 0)
+                       return ret;
                prepare_read_connect(con);
                break;
 
@@ -1491,10 +1527,10 @@ static int process_connect(struct ceph_connection *con)
  */
 static int read_partial_ack(struct ceph_connection *con)
 {
-       int to = 0;
+       int size = sizeof (con->in_temp_ack);
+       int end = size;
 
-       return read_partial(con, &to, sizeof(con->in_temp_ack),
-                           &con->in_temp_ack);
+       return read_partial(con, end, size, &con->in_temp_ack);
 }
 
 
@@ -1627,8 +1663,9 @@ static int read_partial_message_bio(struct ceph_connection *con,
 static int read_partial_message(struct ceph_connection *con)
 {
        struct ceph_msg *m = con->in_msg;
+       int size;
+       int end;
        int ret;
-       int to, left;
        unsigned int front_len, middle_len, data_len;
        bool do_datacrc = !con->msgr->nocrc;
        int skip;
@@ -1638,15 +1675,11 @@ static int read_partial_message(struct ceph_connection *con)
        dout("read_partial_message con %p msg %p\n", con, m);
 
        /* header */
-       while (con->in_base_pos < sizeof(con->in_hdr)) {
-               left = sizeof(con->in_hdr) - con->in_base_pos;
-               ret = ceph_tcp_recvmsg(con->sock,
-                                      (char *)&con->in_hdr + con->in_base_pos,
-                                      left);
-               if (ret <= 0)
-                       return ret;
-               con->in_base_pos += ret;
-       }
+       size = sizeof (con->in_hdr);
+       end = size;
+       ret = read_partial(con, end, size, &con->in_hdr);
+       if (ret <= 0)
+               return ret;
 
        crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc));
        if (cpu_to_le32(crc) != con->in_hdr.crc) {
@@ -1759,16 +1792,12 @@ static int read_partial_message(struct ceph_connection *con)
        }
 
        /* footer */
-       to = sizeof(m->hdr) + sizeof(m->footer);
-       while (con->in_base_pos < to) {
-               left = to - con->in_base_pos;
-               ret = ceph_tcp_recvmsg(con->sock, (char *)&m->footer +
-                                      (con->in_base_pos - sizeof(m->hdr)),
-                                      left);
-               if (ret <= 0)
-                       return ret;
-               con->in_base_pos += ret;
-       }
+       size = sizeof (m->footer);
+       end += size;
+       ret = read_partial(con, end, size, &m->footer);
+       if (ret <= 0)
+               return ret;
+
        dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
             m, front_len, m->footer.front_crc, middle_len,
             m->footer.middle_crc, data_len, m->footer.data_crc);
@@ -1835,7 +1864,6 @@ static void process_message(struct ceph_connection *con)
  */
 static int try_write(struct ceph_connection *con)
 {
-       struct ceph_messenger *msgr = con->msgr;
        int ret = 1;
 
        dout("try_write start %p state %lu nref %d\n", con, con->state,
@@ -1846,7 +1874,11 @@ more:
 
        /* open the socket first? */
        if (con->sock == NULL) {
-               prepare_write_connect(msgr, con, 1);
+               ceph_con_out_kvec_reset(con);
+               prepare_write_banner(con);
+               ret = prepare_write_connect(con);
+               if (ret < 0)
+                       goto out;
                prepare_read_banner(con);
                set_bit(CONNECTING, &con->state);
                clear_bit(NEGOTIATING, &con->state);
index 1b0ef3c4d393c5221d30c15eb24b935ee292e3fc..1ffebed5ce0f9a629ad2733349b8e33c326850d5 100644 (file)
@@ -278,7 +278,7 @@ static void osd_req_encode_op(struct ceph_osd_request *req,
 {
        dst->op = cpu_to_le16(src->op);
 
-       switch (dst->op) {
+       switch (src->op) {
        case CEPH_OSD_OP_READ:
        case CEPH_OSD_OP_WRITE:
                dst->extent.offset =
@@ -664,11 +664,11 @@ static void put_osd(struct ceph_osd *osd)
 {
        dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
             atomic_read(&osd->o_ref) - 1);
-       if (atomic_dec_and_test(&osd->o_ref)) {
+       if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) {
                struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
 
-               if (osd->o_authorizer)
-                       ac->ops->destroy_authorizer(ac, osd->o_authorizer);
+               if (ac->ops && ac->ops->destroy_authorizer)
+                       ac->ops->destroy_authorizer(ac, osd->o_auth.authorizer);
                kfree(osd);
        }
 }
@@ -841,6 +841,12 @@ static void register_request(struct ceph_osd_client *osdc,
 static void __unregister_request(struct ceph_osd_client *osdc,
                                 struct ceph_osd_request *req)
 {
+       if (RB_EMPTY_NODE(&req->r_node)) {
+               dout("__unregister_request %p tid %lld not registered\n",
+                       req, req->r_tid);
+               return;
+       }
+
        dout("__unregister_request %p tid %lld\n", req, req->r_tid);
        rb_erase(&req->r_node, &osdc->requests);
        osdc->num_requests--;
@@ -2108,37 +2114,32 @@ static void put_osd_con(struct ceph_connection *con)
 /*
  * authentication
  */
-static int get_authorizer(struct ceph_connection *con,
-                         void **buf, int *len, int *proto,
-                         void **reply_buf, int *reply_len, int force_new)
+/*
+ * Note: returned pointer is the address of a structure that's
+ * managed separately.  Caller must *not* attempt to free it.
+ */
+static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
+                                       int *proto, int force_new)
 {
        struct ceph_osd *o = con->private;
        struct ceph_osd_client *osdc = o->o_osdc;
        struct ceph_auth_client *ac = osdc->client->monc.auth;
-       int ret = 0;
+       struct ceph_auth_handshake *auth = &o->o_auth;
 
-       if (force_new && o->o_authorizer) {
-               ac->ops->destroy_authorizer(ac, o->o_authorizer);
-               o->o_authorizer = NULL;
-       }
-       if (o->o_authorizer == NULL) {
-               ret = ac->ops->create_authorizer(
-                       ac, CEPH_ENTITY_TYPE_OSD,
-                       &o->o_authorizer,
-                       &o->o_authorizer_buf,
-                       &o->o_authorizer_buf_len,
-                       &o->o_authorizer_reply_buf,
-                       &o->o_authorizer_reply_buf_len);
+       if (force_new && auth->authorizer) {
+               if (ac->ops && ac->ops->destroy_authorizer)
+                       ac->ops->destroy_authorizer(ac, auth->authorizer);
+               auth->authorizer = NULL;
+       }
+       if (!auth->authorizer && ac->ops && ac->ops->create_authorizer) {
+               int ret = ac->ops->create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
+                                                       auth);
                if (ret)
-                       return ret;
+                       return ERR_PTR(ret);
        }
-
        *proto = ac->protocol;
-       *buf = o->o_authorizer_buf;
-       *len = o->o_authorizer_buf_len;
-       *reply_buf = o->o_authorizer_reply_buf;
-       *reply_len = o->o_authorizer_reply_buf_len;
-       return 0;
+
+       return auth;
 }
 
 
@@ -2148,7 +2149,11 @@ static int verify_authorizer_reply(struct ceph_connection *con, int len)
        struct ceph_osd_client *osdc = o->o_osdc;
        struct ceph_auth_client *ac = osdc->client->monc.auth;
 
-       return ac->ops->verify_authorizer_reply(ac, o->o_authorizer, len);
+       /*
+        * XXX If ac->ops or ac->ops->verify_authorizer_reply is null,
+        * XXX which do we do:  succeed or fail?
+        */
+       return ac->ops->verify_authorizer_reply(ac, o->o_auth.authorizer, len);
 }
 
 static int invalidate_authorizer(struct ceph_connection *con)
@@ -2157,7 +2162,7 @@ static int invalidate_authorizer(struct ceph_connection *con)
        struct ceph_osd_client *osdc = o->o_osdc;
        struct ceph_auth_client *ac = osdc->client->monc.auth;
 
-       if (ac->ops->invalidate_authorizer)
+       if (ac->ops && ac->ops->invalidate_authorizer)
                ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
 
        return ceph_monc_validate_auth(&osdc->client->monc);
index 56e561a690044ee88b7fe22d4ed51c09910c8e99..81e3b84a77efdecb6c44603e7784a083fe94b980 100644 (file)
@@ -161,13 +161,6 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
        c->max_rules = ceph_decode_32(p);
        c->max_devices = ceph_decode_32(p);
 
-       c->device_parents = kcalloc(c->max_devices, sizeof(u32), GFP_NOFS);
-       if (c->device_parents == NULL)
-               goto badmem;
-       c->bucket_parents = kcalloc(c->max_buckets, sizeof(u32), GFP_NOFS);
-       if (c->bucket_parents == NULL)
-               goto badmem;
-
        c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS);
        if (c->buckets == NULL)
                goto badmem;
@@ -890,8 +883,12 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
                pglen = ceph_decode_32(p);
 
                if (pglen) {
-                       /* insert */
                        ceph_decode_need(p, end, pglen*sizeof(u32), bad);
+
+                       /* removing existing (if any) */
+                       (void) __remove_pg_mapping(&map->pg_temp, pgid);
+
+                       /* insert */
                        pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS);
                        if (!pg) {
                                err = -ENOMEM;
@@ -1000,7 +997,6 @@ int ceph_calc_object_layout(struct ceph_object_layout *ol,
 {
        unsigned int num, num_mask;
        struct ceph_pg pgid;
-       s32 preferred = (s32)le32_to_cpu(fl->fl_pg_preferred);
        int poolid = le32_to_cpu(fl->fl_pg_pool);
        struct ceph_pg_pool_info *pool;
        unsigned int ps;
@@ -1011,23 +1007,13 @@ int ceph_calc_object_layout(struct ceph_object_layout *ol,
        if (!pool)
                return -EIO;
        ps = ceph_str_hash(pool->v.object_hash, oid, strlen(oid));
-       if (preferred >= 0) {
-               ps += preferred;
-               num = le32_to_cpu(pool->v.lpg_num);
-               num_mask = pool->lpg_num_mask;
-       } else {
-               num = le32_to_cpu(pool->v.pg_num);
-               num_mask = pool->pg_num_mask;
-       }
+       num = le32_to_cpu(pool->v.pg_num);
+       num_mask = pool->pg_num_mask;
 
        pgid.ps = cpu_to_le16(ps);
-       pgid.preferred = cpu_to_le16(preferred);
+       pgid.preferred = cpu_to_le16(-1);
        pgid.pool = fl->fl_pg_pool;
-       if (preferred >= 0)
-               dout("calc_object_layout '%s' pgid %d.%xp%d\n", oid, poolid, ps,
-                    (int)preferred);
-       else
-               dout("calc_object_layout '%s' pgid %d.%x\n", oid, poolid, ps);
+       dout("calc_object_layout '%s' pgid %d.%x\n", oid, poolid, ps);
 
        ol->ol_pgid = pgid;
        ol->ol_stripe_unit = fl->fl_object_stripe_unit;
@@ -1045,24 +1031,18 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
        struct ceph_pg_mapping *pg;
        struct ceph_pg_pool_info *pool;
        int ruleno;
-       unsigned int poolid, ps, pps, t;
-       int preferred;
+       unsigned int poolid, ps, pps, t, r;
 
        poolid = le32_to_cpu(pgid.pool);
        ps = le16_to_cpu(pgid.ps);
-       preferred = (s16)le16_to_cpu(pgid.preferred);
 
        pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
        if (!pool)
                return NULL;
 
        /* pg_temp? */
-       if (preferred >= 0)
-               t = ceph_stable_mod(ps, le32_to_cpu(pool->v.lpg_num),
-                                   pool->lpgp_num_mask);
-       else
-               t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num),
-                                   pool->pgp_num_mask);
+       t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num),
+                           pool->pgp_num_mask);
        pgid.ps = cpu_to_le16(t);
        pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
        if (pg) {
@@ -1080,23 +1060,20 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
                return NULL;
        }
 
-       /* don't forcefeed bad device ids to crush */
-       if (preferred >= osdmap->max_osd ||
-           preferred >= osdmap->crush->max_devices)
-               preferred = -1;
-
-       if (preferred >= 0)
-               pps = ceph_stable_mod(ps,
-                                     le32_to_cpu(pool->v.lpgp_num),
-                                     pool->lpgp_num_mask);
-       else
-               pps = ceph_stable_mod(ps,
-                                     le32_to_cpu(pool->v.pgp_num),
-                                     pool->pgp_num_mask);
+       pps = ceph_stable_mod(ps,
+                             le32_to_cpu(pool->v.pgp_num),
+                             pool->pgp_num_mask);
        pps += poolid;
-       *num = crush_do_rule(osdmap->crush, ruleno, pps, osds,
-                            min_t(int, pool->v.size, *num),
-                            preferred, osdmap->osd_weight);
+       r = crush_do_rule(osdmap->crush, ruleno, pps, osds,
+                         min_t(int, pool->v.size, *num),
+                         osdmap->osd_weight);
+       if (r < 0) {
+               pr_err("error %d from crush rule: pool %d ruleset %d type %d"
+                      " size %d\n", r, poolid, pool->v.crush_ruleset,
+                      pool->v.type, pool->v.size);
+               return NULL;
+       }
+       *num = r;
        return osds;
 }
 
index 3252e7e0a0055ad07fa1c02979500650703f6820..ea5fb9fcc3f5937777db311ea88a75ae3f4b81f4 100644 (file)
@@ -468,3 +468,4 @@ module_exit(exit_net_drop_monitor);
 
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
+MODULE_ALIAS_GENL_FAMILY("NET_DM");
index 653f8c0aedc54aafb08c6f451157f7ca7e432efa..9e5b71fda6ec0d726bd356bce1658f55091ebcf8 100644 (file)
@@ -1592,6 +1592,11 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
        gfp_t gfp_mask;
        long timeo;
        int err;
+       int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+
+       err = -EMSGSIZE;
+       if (npages > MAX_SKB_FRAGS)
+               goto failure;
 
        gfp_mask = sk->sk_allocation;
        if (gfp_mask & __GFP_WAIT)
@@ -1610,14 +1615,12 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
                if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
                        skb = alloc_skb(header_len, gfp_mask);
                        if (skb) {
-                               int npages;
                                int i;
 
                                /* No pages, we're done... */
                                if (!data_len)
                                        break;
 
-                               npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
                                skb->truesize += data_len;
                                skb_shinfo(skb)->nr_frags = npages;
                                for (i = 0; i < npages; i++) {
index 89a47b35905dcc6e1c3bb94b0db7c6a32a61e8fc..cb982a61536fade811908a18e6119f513914741e 100644 (file)
@@ -459,28 +459,22 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
        struct esp_data *esp = x->data;
        u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
        u32 align = max_t(u32, blksize, esp->padlen);
-       u32 rem;
-
-       mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
-       rem = mtu & (align - 1);
-       mtu &= ~(align - 1);
+       unsigned int net_adj;
 
        switch (x->props.mode) {
-       case XFRM_MODE_TUNNEL:
-               break;
-       default:
        case XFRM_MODE_TRANSPORT:
-               /* The worst case */
-               mtu -= blksize - 4;
-               mtu += min_t(u32, blksize - 4, rem);
-               break;
        case XFRM_MODE_BEET:
-               /* The worst case. */
-               mtu += min_t(u32, IPV4_BEET_PHMAXLEN, rem);
+               net_adj = sizeof(struct iphdr);
                break;
+       case XFRM_MODE_TUNNEL:
+               net_adj = 0;
+               break;
+       default:
+               BUG();
        }
 
-       return mtu - 2;
+       return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
+                net_adj) & ~(align - 1)) + (net_adj - 2);
 }
 
 static void esp4_err(struct sk_buff *skb, u32 info)
index 95e61596e605db31b3212328781fcbd7ea2f8dae..f9ee7417f6a024b9357e84335c04b2cec80ae1e9 100644 (file)
@@ -377,7 +377,8 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
 
        flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
-                          sk->sk_protocol, inet_sk_flowi_flags(sk),
+                          sk->sk_protocol,
+                          inet_sk_flowi_flags(sk) & ~FLOWI_FLAG_PRECOW_METRICS,
                           (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
                           ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
        security_req_classify_flow(req, flowi4_to_flowi(fl4));
index a43b87dfe800c043c7fcc65af316b4d193ab3455..c8d28c433b2b0dc958f7bdebaa77f2b899dfd22e 100644 (file)
@@ -824,7 +824,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
  */
 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
                              struct request_sock *req,
-                             struct request_values *rvp)
+                             struct request_values *rvp,
+                             u16 queue_mapping)
 {
        const struct inet_request_sock *ireq = inet_rsk(req);
        struct flowi4 fl4;
@@ -840,6 +841,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
        if (skb) {
                __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
 
+               skb_set_queue_mapping(skb, queue_mapping);
                err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
                                            ireq->rmt_addr,
                                            ireq->opt);
@@ -854,7 +856,7 @@ static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
                              struct request_values *rvp)
 {
        TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
-       return tcp_v4_send_synack(sk, NULL, req, rvp);
+       return tcp_v4_send_synack(sk, NULL, req, rvp, 0);
 }
 
 /*
@@ -1422,7 +1424,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        tcp_rsk(req)->snt_synack = tcp_time_stamp;
 
        if (tcp_v4_send_synack(sk, dst, req,
-                              (struct request_values *)&tmp_ext) ||
+                              (struct request_values *)&tmp_ext,
+                              skb_get_queue_mapping(skb)) ||
            want_cookie)
                goto drop_and_free;
 
index 151703791bb0d43818700349fb0a585736c7dc19..b6f3583ddfe83eae73370c9070c567e452518f57 100644 (file)
@@ -74,9 +74,6 @@ void tcp_destroy_cgroup(struct mem_cgroup *memcg)
        percpu_counter_destroy(&tcp->tcp_sockets_allocated);
 
        val = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
-
-       if (val != RESOURCE_MAX)
-               static_key_slow_dec(&memcg_socket_limit_enabled);
 }
 EXPORT_SYMBOL(tcp_destroy_cgroup);
 
@@ -107,10 +104,33 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
                tcp->tcp_prot_mem[i] = min_t(long, val >> PAGE_SHIFT,
                                             net->ipv4.sysctl_tcp_mem[i]);
 
-       if (val == RESOURCE_MAX && old_lim != RESOURCE_MAX)
-               static_key_slow_dec(&memcg_socket_limit_enabled);
-       else if (old_lim == RESOURCE_MAX && val != RESOURCE_MAX)
-               static_key_slow_inc(&memcg_socket_limit_enabled);
+       if (val == RESOURCE_MAX)
+               clear_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
+       else if (val != RESOURCE_MAX) {
+               /*
+                * The active bit needs to be written after the static_key
+                * update. This is what guarantees that the socket activation
+                * function is the last one to run. See sock_update_memcg() for
+                * details, and note that we don't mark any socket as belonging
+                * to this memcg until that flag is up.
+                *
+                * We need to do this, because static_keys will span multiple
+                * sites, but we can't control their order. If we mark a socket
+                * as accounted, but the accounting functions are not patched in
+                * yet, we'll lose accounting.
+                *
+                * We never race with the readers in sock_update_memcg(),
+                * because when this value change, the code to process it is not
+                * patched in yet.
+                *
+                * The activated bit is used to guarantee that no two writers
+                * will do the update in the same memcg. Without that, we can't
+                * properly shutdown the static key.
+                */
+               if (!test_and_set_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags))
+                       static_key_slow_inc(&memcg_socket_limit_enabled);
+               set_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
+       }
 
        return 0;
 }
index 1e62b7557b00e1e0897f480d9391d018dfd01dcb..db1521fcda5b3fd182a3068c9b86d5161e5e4d56 100644 (file)
@@ -413,19 +413,15 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
        struct esp_data *esp = x->data;
        u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
        u32 align = max_t(u32, blksize, esp->padlen);
-       u32 rem;
+       unsigned int net_adj;
 
-       mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
-       rem = mtu & (align - 1);
-       mtu &= ~(align - 1);
-
-       if (x->props.mode != XFRM_MODE_TUNNEL) {
-               u32 padsize = ((blksize - 1) & 7) + 1;
-               mtu -= blksize - padsize;
-               mtu += min_t(u32, blksize - padsize, rem);
-       }
+       if (x->props.mode != XFRM_MODE_TUNNEL)
+               net_adj = sizeof(struct ipv6hdr);
+       else
+               net_adj = 0;
 
-       return mtu - 2;
+       return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
+                net_adj) & ~(align - 1)) + (net_adj - 2);
 }
 
 static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
index d99fdc699625ca34252a33e84e21d8179e0fad7a..17b8c67998bb80dc5e7052af210c7b64aa1471ee 100644 (file)
@@ -1187,6 +1187,29 @@ static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
        return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
 }
 
+static void ip6_append_data_mtu(int *mtu,
+                               int *maxfraglen,
+                               unsigned int fragheaderlen,
+                               struct sk_buff *skb,
+                               struct rt6_info *rt)
+{
+       if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
+               if (skb == NULL) {
+                       /* first fragment, reserve header_len */
+                       *mtu = *mtu - rt->dst.header_len;
+
+               } else {
+                       /*
+                        * this fragment is not first, the headers
+                        * space is regarded as data space.
+                        */
+                       *mtu = dst_mtu(rt->dst.path);
+               }
+               *maxfraglen = ((*mtu - fragheaderlen) & ~7)
+                             + fragheaderlen - sizeof(struct frag_hdr);
+       }
+}
+
 int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
        int offset, int len, int odd, struct sk_buff *skb),
        void *from, int length, int transhdrlen,
@@ -1196,7 +1219,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
        struct inet_sock *inet = inet_sk(sk);
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct inet_cork *cork;
-       struct sk_buff *skb;
+       struct sk_buff *skb, *skb_prev = NULL;
        unsigned int maxfraglen, fragheaderlen;
        int exthdrlen;
        int dst_exthdrlen;
@@ -1253,8 +1276,12 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
                inet->cork.fl.u.ip6 = *fl6;
                np->cork.hop_limit = hlimit;
                np->cork.tclass = tclass;
-               mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
-                     rt->dst.dev->mtu : dst_mtu(&rt->dst);
+               if (rt->dst.flags & DST_XFRM_TUNNEL)
+                       mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
+                             rt->dst.dev->mtu : dst_mtu(&rt->dst);
+               else
+                       mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
+                             rt->dst.dev->mtu : dst_mtu(rt->dst.path);
                if (np->frag_size < mtu) {
                        if (np->frag_size)
                                mtu = np->frag_size;
@@ -1350,25 +1377,27 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
                        unsigned int fraglen;
                        unsigned int fraggap;
                        unsigned int alloclen;
-                       struct sk_buff *skb_prev;
 alloc_new_skb:
-                       skb_prev = skb;
-
                        /* There's no room in the current skb */
-                       if (skb_prev)
-                               fraggap = skb_prev->len - maxfraglen;
+                       if (skb)
+                               fraggap = skb->len - maxfraglen;
                        else
                                fraggap = 0;
+                       /* update mtu and maxfraglen if necessary */
+                       if (skb == NULL || skb_prev == NULL)
+                               ip6_append_data_mtu(&mtu, &maxfraglen,
+                                                   fragheaderlen, skb, rt);
+
+                       skb_prev = skb;
 
                        /*
                         * If remaining data exceeds the mtu,
                         * we know we need more fragment(s).
                         */
                        datalen = length + fraggap;
-                       if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
-                               datalen = maxfraglen - fragheaderlen;
 
-                       fraglen = datalen + fragheaderlen;
+                       if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
+                               datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
                        if ((flags & MSG_MORE) &&
                            !(rt->dst.dev->features&NETIF_F_SG))
                                alloclen = mtu;
@@ -1377,13 +1406,16 @@ alloc_new_skb:
 
                        alloclen += dst_exthdrlen;
 
-                       /*
-                        * The last fragment gets additional space at tail.
-                        * Note: we overallocate on fragments with MSG_MODE
-                        * because we have no idea if we're the last one.
-                        */
-                       if (datalen == length + fraggap)
-                               alloclen += rt->dst.trailer_len;
+                       if (datalen != length + fraggap) {
+                               /*
+                                * this is not the last fragment, the trailer
+                                * space is regarded as data space.
+                                */
+                               datalen += rt->dst.trailer_len;
+                       }
+
+                       alloclen += rt->dst.trailer_len;
+                       fraglen = datalen + fragheaderlen;
 
                        /*
                         * We just reserve space for fragment header.
index 554d5999abc40534e37c7817f13722b620b821e7..3a9aec29581a14cb88f4948ec32737026bfd22ce 100644 (file)
@@ -476,7 +476,8 @@ out:
 
 
 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
-                             struct request_values *rvp)
+                             struct request_values *rvp,
+                             u16 queue_mapping)
 {
        struct inet6_request_sock *treq = inet6_rsk(req);
        struct ipv6_pinfo *np = inet6_sk(sk);
@@ -513,6 +514,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
                __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
 
                fl6.daddr = treq->rmt_addr;
+               skb_set_queue_mapping(skb, queue_mapping);
                err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
                err = net_xmit_eval(err);
        }
@@ -528,7 +530,7 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
                             struct request_values *rvp)
 {
        TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
-       return tcp_v6_send_synack(sk, req, rvp);
+       return tcp_v6_send_synack(sk, req, rvp, 0);
 }
 
 static void tcp_v6_reqsk_destructor(struct request_sock *req)
@@ -1213,7 +1215,8 @@ have_isn:
        security_inet_conn_request(sk, skb, req);
 
        if (tcp_v6_send_synack(sk, req,
-                              (struct request_values *)&tmp_ext) ||
+                              (struct request_values *)&tmp_ext,
+                              skb_get_queue_mapping(skb)) ||
            want_cookie)
                goto drop_and_free;
 
index 889f5d13d7ba342b5ea2a2c447b1c6858b553de2..70614e7affabded003aef1bf5ed4c097d4b515fa 100644 (file)
@@ -239,9 +239,16 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
        struct inet_sock *inet = inet_sk(sk);
        struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
-       int ret = -EINVAL;
+       int ret;
        int chk_addr_ret;
 
+       if (!sock_flag(sk, SOCK_ZAPPED))
+               return -EINVAL;
+       if (addr_len < sizeof(struct sockaddr_l2tpip))
+               return -EINVAL;
+       if (addr->l2tp_family != AF_INET)
+               return -EINVAL;
+
        ret = -EADDRINUSE;
        read_lock_bh(&l2tp_ip_lock);
        if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id))
@@ -272,6 +279,8 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        sk_del_node_init(sk);
        write_unlock_bh(&l2tp_ip_lock);
        ret = 0;
+       sock_reset_flag(sk, SOCK_ZAPPED);
+
 out:
        release_sock(sk);
 
@@ -288,6 +297,9 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
        struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
        int rc;
 
+       if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
+               return -EINVAL;
+
        if (addr_len < sizeof(*lsa))
                return -EINVAL;
 
@@ -311,6 +323,14 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
        return rc;
 }
 
+static int l2tp_ip_disconnect(struct sock *sk, int flags)
+{
+       if (sock_flag(sk, SOCK_ZAPPED))
+               return 0;
+
+       return udp_disconnect(sk, flags);
+}
+
 static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
                           int *uaddr_len, int peer)
 {
@@ -530,7 +550,7 @@ static struct proto l2tp_ip_prot = {
        .close             = l2tp_ip_close,
        .bind              = l2tp_ip_bind,
        .connect           = l2tp_ip_connect,
-       .disconnect        = udp_disconnect,
+       .disconnect        = l2tp_ip_disconnect,
        .ioctl             = udp_ioctl,
        .destroy           = l2tp_ip_destroy_sock,
        .setsockopt        = ip_setsockopt,
index 0291d8d85f302f3a244a13c0d6a6a6465137882e..35e1e4bde58730d8395e2870d552230bca3a9c3d 100644 (file)
@@ -258,6 +258,10 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        int addr_type;
        int err;
 
+       if (!sock_flag(sk, SOCK_ZAPPED))
+               return -EINVAL;
+       if (addr->l2tp_family != AF_INET6)
+               return -EINVAL;
        if (addr_len < sizeof(*addr))
                return -EINVAL;
 
@@ -331,6 +335,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        sk_del_node_init(sk);
        write_unlock_bh(&l2tp_ip6_lock);
 
+       sock_reset_flag(sk, SOCK_ZAPPED);
        release_sock(sk);
        return 0;
 
@@ -354,6 +359,9 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
        int     addr_type;
        int rc;
 
+       if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
+               return -EINVAL;
+
        if (addr_len < sizeof(*lsa))
                return -EINVAL;
 
@@ -383,6 +391,14 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
        return rc;
 }
 
+static int l2tp_ip6_disconnect(struct sock *sk, int flags)
+{
+       if (sock_flag(sk, SOCK_ZAPPED))
+               return 0;
+
+       return udp_disconnect(sk, flags);
+}
+
 static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
                            int *uaddr_len, int peer)
 {
@@ -689,7 +705,7 @@ static struct proto l2tp_ip6_prot = {
        .close             = l2tp_ip6_close,
        .bind              = l2tp_ip6_bind,
        .connect           = l2tp_ip6_connect,
-       .disconnect        = udp_disconnect,
+       .disconnect        = l2tp_ip6_disconnect,
        .ioctl             = udp_ioctl,
        .destroy           = l2tp_ip6_destroy_sock,
        .setsockopt        = ipv6_setsockopt,
index 8577264378fe0a88e6b3aadcdc6d96333782aa95..ddc553e76671bae0eac8dec5ace2e63035c45869 100644 (file)
@@ -923,5 +923,4 @@ MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
 MODULE_DESCRIPTION("L2TP netlink");
 MODULE_LICENSE("GPL");
 MODULE_VERSION("1.0");
-MODULE_ALIAS("net-pf-" __stringify(PF_NETLINK) "-proto-" \
-            __stringify(NETLINK_GENERIC) "-type-" "l2tp");
+MODULE_ALIAS_GENL_FAMILY("l2tp");
index b3b3c264ff66b970beac9b5090cd086610266419..04c3063089874fa7c8eb5ded233be3e5d7b86502 100644 (file)
@@ -1522,6 +1522,8 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
         * anymore. The timeout will be reset if the frame is ACKed by
         * the AP.
         */
+       ifmgd->probe_send_count++;
+
        if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
                ifmgd->nullfunc_failed = false;
                ieee80211_send_nullfunc(sdata->local, sdata, 0);
@@ -1538,7 +1540,6 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
                                         0, (u32) -1, true, false);
        }
 
-       ifmgd->probe_send_count++;
        ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
        run_again(ifmgd, ifmgd->probe_timeout);
        if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
index 5f827a6b0d8d1f7c494424be701a95ff1b6bf701..847215bb2a6fc63c1ed010dbfc9bd2d35cb6b7c0 100644 (file)
@@ -153,7 +153,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
 
        /* Don't calculate ACKs for QoS Frames with NoAck Policy set */
        if (ieee80211_is_data_qos(hdr->frame_control) &&
-           *(ieee80211_get_qos_ctl(hdr)) | IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
+           *(ieee80211_get_qos_ctl(hdr)) & IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
                dur = 0;
        else
                /* Time needed to transmit ACK
index 22f2216b397ea37b5845efc359c7d0e519261aec..a44c6807df01914a04c5675d1422d765260a8c29 100644 (file)
@@ -1371,6 +1371,12 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                }
        }
 
+       /* add back keys */
+       list_for_each_entry(sdata, &local->interfaces, list)
+               if (ieee80211_sdata_running(sdata))
+                       ieee80211_enable_keys(sdata);
+
+ wake_up:
        /*
         * Clear the WLAN_STA_BLOCK_BA flag so new aggregation
         * sessions can be established after a resume.
@@ -1392,12 +1398,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                mutex_unlock(&local->sta_mtx);
        }
 
-       /* add back keys */
-       list_for_each_entry(sdata, &local->interfaces, list)
-               if (ieee80211_sdata_running(sdata))
-                       ieee80211_enable_keys(sdata);
-
- wake_up:
        ieee80211_wake_queues_by_reason(hw,
                        IEEE80211_QUEUE_STOP_REASON_SUSPEND);
 
index 8340ace837f2eb309a707d84a8d71da0fa282bbd..2cc7c1ee769046c1b45ce677caa6f8dac3e4a1ff 100644 (file)
@@ -836,7 +836,7 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
 #ifdef CONFIG_MODULES
                if (res == NULL) {
                        genl_unlock();
-                       request_module("net-pf-%d-proto-%d-type-%s",
+                       request_module("net-pf-%d-proto-%d-family-%s",
                                       PF_NETLINK, NETLINK_GENERIC, name);
                        genl_lock();
                        res = genl_family_find_byname(name);
index edfaaaf164ebfab0eebda8b509d91fed1735daee..8d2b3d5a7c21e5ffb2063621a569e1f6280f52ad 100644 (file)
@@ -186,8 +186,7 @@ struct rds_ib_device {
        struct work_struct      free_work;
 };
 
-#define pcidev_to_node(pcidev) pcibus_to_node(pcidev->bus)
-#define ibdev_to_node(ibdev) pcidev_to_node(to_pci_dev(ibdev->dma_device))
+#define ibdev_to_node(ibdev) dev_to_node(ibdev->dma_device)
 #define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
 
 /* bits for i_ack_flags */
index 8522a4793374136fa4ab66aa9b325b48019801ac..ca8e0a57d945dabeb51147f338cef363672040d5 100644 (file)
@@ -16,8 +16,6 @@
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
 
-extern struct socket *sockfd_lookup(int fd, int *err); /* @@@ fix this */
-
 /*
  * The ATM queuing discipline provides a framework for invoking classifiers
  * (aka "filters"), which in turn select classes of this queuing discipline.
index 38f388c39dce89a5e6456514771f70ef975af1c0..107c4528654fd5867b8363ccdf66c648e9202a34 100644 (file)
@@ -381,21 +381,53 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
 }
 
 /*
- * We cannot currently handle tokens with rotated data.  We need a
- * generalized routine to rotate the data in place.  It is anticipated
- * that we won't encounter rotated data in the general case.
+ * We can shift data by up to LOCAL_BUF_LEN bytes in a pass.  If we need
+ * to do more than that, we shift repeatedly.  Kevin Coffman reports
+ * seeing 28 bytes as the value used by Microsoft clients and servers
+ * with AES, so this constant is chosen to allow handling 28 in one pass
+ * without using too much stack space.
+ *
+ * If that proves to a problem perhaps we could use a more clever
+ * algorithm.
  */
-static u32
-rotate_left(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, u16 rrc)
+#define LOCAL_BUF_LEN 32u
+
+static void rotate_buf_a_little(struct xdr_buf *buf, unsigned int shift)
 {
-       unsigned int realrrc = rrc % (buf->len - offset - GSS_KRB5_TOK_HDR_LEN);
+       char head[LOCAL_BUF_LEN];
+       char tmp[LOCAL_BUF_LEN];
+       unsigned int this_len, i;
+
+       BUG_ON(shift > LOCAL_BUF_LEN);
 
-       if (realrrc == 0)
-               return 0;
+       read_bytes_from_xdr_buf(buf, 0, head, shift);
+       for (i = 0; i + shift < buf->len; i += LOCAL_BUF_LEN) {
+               this_len = min(LOCAL_BUF_LEN, buf->len - (i + shift));
+               read_bytes_from_xdr_buf(buf, i+shift, tmp, this_len);
+               write_bytes_to_xdr_buf(buf, i, tmp, this_len);
+       }
+       write_bytes_to_xdr_buf(buf, buf->len - shift, head, shift);
+}
 
-       dprintk("%s: cannot process token with rotated data: "
-               "rrc %u, realrrc %u\n", __func__, rrc, realrrc);
-       return 1;
+static void _rotate_left(struct xdr_buf *buf, unsigned int shift)
+{
+       int shifted = 0;
+       int this_shift;
+
+       shift %= buf->len;
+       while (shifted < shift) {
+               this_shift = min(shift - shifted, LOCAL_BUF_LEN);
+               rotate_buf_a_little(buf, this_shift);
+               shifted += this_shift;
+       }
+}
+
+static void rotate_left(u32 base, struct xdr_buf *buf, unsigned int shift)
+{
+       struct xdr_buf subbuf;
+
+       xdr_buf_subsegment(buf, &subbuf, base, buf->len - base);
+       _rotate_left(&subbuf, shift);
 }
 
 static u32
@@ -495,11 +527,8 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
 
        seqnum = be64_to_cpup((__be64 *)(ptr + 8));
 
-       if (rrc != 0) {
-               err = rotate_left(kctx, offset, buf, rrc);
-               if (err)
-                       return GSS_S_FAILURE;
-       }
+       if (rrc != 0)
+               rotate_left(offset + 16, buf, rrc);
 
        err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf,
                                        &headskip, &tailskip);
index 28b62dbb6d1e4be36358055a9c231143f0e45d9e..73e95738660042e7a9d4e7cb252143ec74078265 100644 (file)
@@ -336,7 +336,6 @@ struct rsc {
        struct svc_cred         cred;
        struct gss_svc_seq_data seqdata;
        struct gss_ctx          *mechctx;
-       char                    *client_name;
 };
 
 static struct rsc *rsc_update(struct cache_detail *cd, struct rsc *new, struct rsc *old);
@@ -347,9 +346,7 @@ static void rsc_free(struct rsc *rsci)
        kfree(rsci->handle.data);
        if (rsci->mechctx)
                gss_delete_sec_context(&rsci->mechctx);
-       if (rsci->cred.cr_group_info)
-               put_group_info(rsci->cred.cr_group_info);
-       kfree(rsci->client_name);
+       free_svc_cred(&rsci->cred);
 }
 
 static void rsc_put(struct kref *ref)
@@ -387,7 +384,7 @@ rsc_init(struct cache_head *cnew, struct cache_head *ctmp)
        tmp->handle.data = NULL;
        new->mechctx = NULL;
        new->cred.cr_group_info = NULL;
-       new->client_name = NULL;
+       new->cred.cr_principal = NULL;
 }
 
 static void
@@ -402,8 +399,8 @@ update_rsc(struct cache_head *cnew, struct cache_head *ctmp)
        spin_lock_init(&new->seqdata.sd_lock);
        new->cred = tmp->cred;
        tmp->cred.cr_group_info = NULL;
-       new->client_name = tmp->client_name;
-       tmp->client_name = NULL;
+       new->cred.cr_principal = tmp->cred.cr_principal;
+       tmp->cred.cr_principal = NULL;
 }
 
 static struct cache_head *
@@ -501,8 +498,8 @@ static int rsc_parse(struct cache_detail *cd,
                /* get client name */
                len = qword_get(&mesg, buf, mlen);
                if (len > 0) {
-                       rsci.client_name = kstrdup(buf, GFP_KERNEL);
-                       if (!rsci.client_name)
+                       rsci.cred.cr_principal = kstrdup(buf, GFP_KERNEL);
+                       if (!rsci.cred.cr_principal)
                                goto out;
                }
 
@@ -932,16 +929,6 @@ struct gss_svc_data {
        struct rsc                      *rsci;
 };
 
-char *svc_gss_principal(struct svc_rqst *rqstp)
-{
-       struct gss_svc_data *gd = (struct gss_svc_data *)rqstp->rq_auth_data;
-
-       if (gd && gd->rsci)
-               return gd->rsci->client_name;
-       return NULL;
-}
-EXPORT_SYMBOL_GPL(svc_gss_principal);
-
 static int
 svcauth_gss_set_client(struct svc_rqst *rqstp)
 {
@@ -969,16 +956,17 @@ svcauth_gss_set_client(struct svc_rqst *rqstp)
 }
 
 static inline int
-gss_write_init_verf(struct cache_detail *cd, struct svc_rqst *rqstp, struct rsi *rsip)
+gss_write_init_verf(struct cache_detail *cd, struct svc_rqst *rqstp,
+               struct xdr_netobj *out_handle, int *major_status)
 {
        struct rsc *rsci;
        int        rc;
 
-       if (rsip->major_status != GSS_S_COMPLETE)
+       if (*major_status != GSS_S_COMPLETE)
                return gss_write_null_verf(rqstp);
-       rsci = gss_svc_searchbyctx(cd, &rsip->out_handle);
+       rsci = gss_svc_searchbyctx(cd, out_handle);
        if (rsci == NULL) {
-               rsip->major_status = GSS_S_NO_CONTEXT;
+               *major_status = GSS_S_NO_CONTEXT;
                return gss_write_null_verf(rqstp);
        }
        rc = gss_write_verf(rqstp, rsci->mechctx, GSS_SEQ_WIN);
@@ -986,22 +974,13 @@ gss_write_init_verf(struct cache_detail *cd, struct svc_rqst *rqstp, struct rsi
        return rc;
 }
 
-/*
- * Having read the cred already and found we're in the context
- * initiation case, read the verifier and initiate (or check the results
- * of) upcalls to userspace for help with context initiation.  If
- * the upcall results are available, write the verifier and result.
- * Otherwise, drop the request pending an answer to the upcall.
- */
-static int svcauth_gss_handle_init(struct svc_rqst *rqstp,
-                       struct rpc_gss_wire_cred *gc, __be32 *authp)
+static inline int
+gss_read_verf(struct rpc_gss_wire_cred *gc,
+             struct kvec *argv, __be32 *authp,
+             struct xdr_netobj *in_handle,
+             struct xdr_netobj *in_token)
 {
-       struct kvec *argv = &rqstp->rq_arg.head[0];
-       struct kvec *resv = &rqstp->rq_res.head[0];
        struct xdr_netobj tmpobj;
-       struct rsi *rsip, rsikey;
-       int ret;
-       struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
 
        /* Read the verifier; should be NULL: */
        *authp = rpc_autherr_badverf;
@@ -1011,24 +990,67 @@ static int svcauth_gss_handle_init(struct svc_rqst *rqstp,
                return SVC_DENIED;
        if (svc_getnl(argv) != 0)
                return SVC_DENIED;
-
        /* Martial context handle and token for upcall: */
        *authp = rpc_autherr_badcred;
        if (gc->gc_proc == RPC_GSS_PROC_INIT && gc->gc_ctx.len != 0)
                return SVC_DENIED;
-       memset(&rsikey, 0, sizeof(rsikey));
-       if (dup_netobj(&rsikey.in_handle, &gc->gc_ctx))
+       if (dup_netobj(in_handle, &gc->gc_ctx))
                return SVC_CLOSE;
        *authp = rpc_autherr_badverf;
        if (svc_safe_getnetobj(argv, &tmpobj)) {
-               kfree(rsikey.in_handle.data);
+               kfree(in_handle->data);
                return SVC_DENIED;
        }
-       if (dup_netobj(&rsikey.in_token, &tmpobj)) {
-               kfree(rsikey.in_handle.data);
+       if (dup_netobj(in_token, &tmpobj)) {
+               kfree(in_handle->data);
                return SVC_CLOSE;
        }
 
+       return 0;
+}
+
+static inline int
+gss_write_resv(struct kvec *resv, size_t size_limit,
+              struct xdr_netobj *out_handle, struct xdr_netobj *out_token,
+              int major_status, int minor_status)
+{
+       if (resv->iov_len + 4 > size_limit)
+               return -1;
+       svc_putnl(resv, RPC_SUCCESS);
+       if (svc_safe_putnetobj(resv, out_handle))
+               return -1;
+       if (resv->iov_len + 3 * 4 > size_limit)
+               return -1;
+       svc_putnl(resv, major_status);
+       svc_putnl(resv, minor_status);
+       svc_putnl(resv, GSS_SEQ_WIN);
+       if (svc_safe_putnetobj(resv, out_token))
+               return -1;
+       return 0;
+}
+
+/*
+ * Having read the cred already and found we're in the context
+ * initiation case, read the verifier and initiate (or check the results
+ * of) upcalls to userspace for help with context initiation.  If
+ * the upcall results are available, write the verifier and result.
+ * Otherwise, drop the request pending an answer to the upcall.
+ */
+static int svcauth_gss_handle_init(struct svc_rqst *rqstp,
+                       struct rpc_gss_wire_cred *gc, __be32 *authp)
+{
+       struct kvec *argv = &rqstp->rq_arg.head[0];
+       struct kvec *resv = &rqstp->rq_res.head[0];
+       struct rsi *rsip, rsikey;
+       int ret;
+       struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
+
+       memset(&rsikey, 0, sizeof(rsikey));
+       ret = gss_read_verf(gc, argv, authp,
+                           &rsikey.in_handle, &rsikey.in_token);
+       if (ret)
+               return ret;
+
        /* Perform upcall, or find upcall result: */
        rsip = rsi_lookup(sn->rsi_cache, &rsikey);
        rsi_free(&rsikey);
@@ -1040,19 +1062,12 @@ static int svcauth_gss_handle_init(struct svc_rqst *rqstp,
 
        ret = SVC_CLOSE;
        /* Got an answer to the upcall; use it: */
-       if (gss_write_init_verf(sn->rsc_cache, rqstp, rsip))
-               goto out;
-       if (resv->iov_len + 4 > PAGE_SIZE)
+       if (gss_write_init_verf(sn->rsc_cache, rqstp,
+                               &rsip->out_handle, &rsip->major_status))
                goto out;
-       svc_putnl(resv, RPC_SUCCESS);
-       if (svc_safe_putnetobj(resv, &rsip->out_handle))
-               goto out;
-       if (resv->iov_len + 3 * 4 > PAGE_SIZE)
-               goto out;
-       svc_putnl(resv, rsip->major_status);
-       svc_putnl(resv, rsip->minor_status);
-       svc_putnl(resv, GSS_SEQ_WIN);
-       if (svc_safe_putnetobj(resv, &rsip->out_token))
+       if (gss_write_resv(resv, PAGE_SIZE,
+                          &rsip->out_handle, &rsip->out_token,
+                          rsip->major_status, rsip->minor_status))
                goto out;
 
        ret = SVC_COMPLETE;
@@ -1192,7 +1207,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
                }
                svcdata->rsci = rsci;
                cache_get(&rsci->h);
-               rqstp->rq_flavor = gss_svc_to_pseudoflavor(
+               rqstp->rq_cred.cr_flavor = gss_svc_to_pseudoflavor(
                                        rsci->mechctx->mech_type, gc->gc_svc);
                ret = SVC_OK;
                goto out;
index 7fee13b331d193e1a46831c257ffd6fcf6a4fde8..f56f045778aedf4a0da1fcf2566eacf69c7c6c8a 100644 (file)
@@ -1286,6 +1286,8 @@ call_reserveresult(struct rpc_task *task)
        }
 
        switch (status) {
+       case -ENOMEM:
+               rpc_delay(task, HZ >> 2);
        case -EAGAIN:   /* woken up; retry */
                task->tk_action = call_reserve;
                return;
index fd2423991c2d4dc473223b128d9a761b3da5beea..04040476082e6efd5ef08f9c7e6444c0fec77929 100644 (file)
@@ -120,7 +120,7 @@ EXPORT_SYMBOL_GPL(rpc_pipe_generic_upcall);
 
 /**
  * rpc_queue_upcall - queue an upcall message to userspace
- * @inode: inode of upcall pipe on which to queue given message
+ * @pipe: upcall pipe on which to queue given message
  * @msg: message to queue
  *
  * Call with an @inode created by rpc_mkpipe() to queue an upcall.
@@ -819,9 +819,7 @@ static int rpc_rmdir_depopulate(struct dentry *dentry,
  * @parent: dentry of directory to create new "pipe" in
  * @name: name of pipe
  * @private: private data to associate with the pipe, for the caller's use
- * @ops: operations defining the behavior of the pipe: upcall, downcall,
- *     release_pipe, open_pipe, and destroy_msg.
- * @flags: rpc_pipe flags
+ * @pipe: &rpc_pipe containing input parameters
  *
  * Data is made available for userspace to read by calls to
  * rpc_queue_upcall().  The actual reads will result in calls to
@@ -943,7 +941,7 @@ struct dentry *rpc_create_client_dir(struct dentry *dentry,
 
 /**
  * rpc_remove_client_dir - Remove a directory created with rpc_create_client_dir()
- * @clnt: rpc client
+ * @dentry: dentry for the pipe
  */
 int rpc_remove_client_dir(struct dentry *dentry)
 {
@@ -1115,7 +1113,7 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_op = &s_ops;
        sb->s_time_gran = 1;
 
-       inode = rpc_get_inode(sb, S_IFDIR | 0755);
+       inode = rpc_get_inode(sb, S_IFDIR | S_IRUGO | S_IXUGO);
        sb->s_root = root = d_make_root(inode);
        if (!root)
                return -ENOMEM;
index 78ac39fd9fe7556a71dc361406904cf8bb32d7b2..92509ffe15fcacce5de331cbb205a84c4f718a86 100644 (file)
@@ -180,14 +180,16 @@ void rpcb_put_local(struct net *net)
        struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
        struct rpc_clnt *clnt = sn->rpcb_local_clnt;
        struct rpc_clnt *clnt4 = sn->rpcb_local_clnt4;
-       int shutdown;
+       int shutdown = 0;
 
        spin_lock(&sn->rpcb_clnt_lock);
-       if (--sn->rpcb_users == 0) {
-               sn->rpcb_local_clnt = NULL;
-               sn->rpcb_local_clnt4 = NULL;
+       if (sn->rpcb_users) {
+               if (--sn->rpcb_users == 0) {
+                       sn->rpcb_local_clnt = NULL;
+                       sn->rpcb_local_clnt4 = NULL;
+               }
+               shutdown = !sn->rpcb_users;
        }
-       shutdown = !sn->rpcb_users;
        spin_unlock(&sn->rpcb_clnt_lock);
 
        if (shutdown) {
@@ -394,6 +396,7 @@ static int rpcb_register_call(struct rpc_clnt *clnt, struct rpc_message *msg)
 
 /**
  * rpcb_register - set or unset a port registration with the local rpcbind svc
+ * @net: target network namespace
  * @prog: RPC program number to bind
  * @vers: RPC version number to bind
  * @prot: transport protocol to register
@@ -521,6 +524,7 @@ static int rpcb_unregister_all_protofamilies(struct sunrpc_net *sn,
 
 /**
  * rpcb_v4_register - set or unset a port registration with the local rpcbind
+ * @net: target network namespace
  * @program: RPC program number of service to (un)register
  * @version: RPC version number of service to (un)register
  * @address: address family, IP address, and port to (un)register
index 017c0117d1543a784dfe5130396c74f80879131a..7e9baaa1e543e55878dcb0d9bd0378a0e51754e0 100644 (file)
@@ -407,6 +407,14 @@ static int svc_uses_rpcbind(struct svc_serv *serv)
        return 0;
 }
 
+int svc_bind(struct svc_serv *serv, struct net *net)
+{
+       if (!svc_uses_rpcbind(serv))
+               return 0;
+       return svc_rpcb_setup(serv, net);
+}
+EXPORT_SYMBOL_GPL(svc_bind);
+
 /*
  * Create an RPC service
  */
@@ -471,15 +479,8 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
                spin_lock_init(&pool->sp_lock);
        }
 
-       if (svc_uses_rpcbind(serv)) {
-               if (svc_rpcb_setup(serv, current->nsproxy->net_ns) < 0) {
-                       kfree(serv->sv_pools);
-                       kfree(serv);
-                       return NULL;
-               }
-               if (!serv->sv_shutdown)
-                       serv->sv_shutdown = svc_rpcb_cleanup;
-       }
+       if (svc_uses_rpcbind(serv) && (!serv->sv_shutdown))
+               serv->sv_shutdown = svc_rpcb_cleanup;
 
        return serv;
 }
@@ -536,8 +537,6 @@ EXPORT_SYMBOL_GPL(svc_shutdown_net);
 void
 svc_destroy(struct svc_serv *serv)
 {
-       struct net *net = current->nsproxy->net_ns;
-
        dprintk("svc: svc_destroy(%s, %d)\n",
                                serv->sv_program->pg_name,
                                serv->sv_nrthreads);
@@ -552,8 +551,6 @@ svc_destroy(struct svc_serv *serv)
 
        del_timer_sync(&serv->sv_temptimer);
 
-       svc_shutdown_net(serv, net);
-
        /*
         * The last user is gone and thus all sockets have to be destroyed to
         * the point. Check this.
index b98ee35149121602b42ace9365bfd5f21e84767a..88f2bf671960d444e73d3d9eba2998f75ac2885b 100644 (file)
@@ -598,6 +598,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
 
        /* now allocate needed pages.  If we get a failure, sleep briefly */
        pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE;
+       BUG_ON(pages >= RPCSVC_MAXPAGES);
        for (i = 0; i < pages ; i++)
                while (rqstp->rq_pages[i] == NULL) {
                        struct page *p = alloc_page(GFP_KERNEL);
@@ -612,7 +613,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
                        rqstp->rq_pages[i] = p;
                }
        rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */
-       BUG_ON(pages >= RPCSVC_MAXPAGES);
 
        /* Make arg->head point to first page and arg->pages point to rest */
        arg = &rqstp->rq_arg;
@@ -973,7 +973,7 @@ void svc_close_net(struct svc_serv *serv, struct net *net)
        svc_clear_pools(serv, net);
        /*
         * At this point the sp_sockets lists will stay empty, since
-        * svc_enqueue will not add new entries without taking the
+        * svc_xprt_enqueue will not add new entries without taking the
         * sp_lock and checking XPT_BUSY.
         */
        svc_clear_list(&serv->sv_tempsocks, net);
index 71ec8530ec8cb7ac10abae2fbf6c2571b14c8c98..2777fa896645de3f063aa5ad67cb054bbb75a894 100644 (file)
@@ -347,17 +347,12 @@ static inline int ip_map_update(struct net *net, struct ip_map *ipm,
        return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry);
 }
 
-
-void svcauth_unix_purge(void)
+void svcauth_unix_purge(struct net *net)
 {
-       struct net *net;
-
-       for_each_net(net) {
-               struct sunrpc_net *sn;
+       struct sunrpc_net *sn;
 
-               sn = net_generic(net, sunrpc_net_id);
-               cache_purge(sn->ip_map_cache);
-       }
+       sn = net_generic(net, sunrpc_net_id);
+       cache_purge(sn->ip_map_cache);
 }
 EXPORT_SYMBOL_GPL(svcauth_unix_purge);
 
@@ -751,6 +746,7 @@ svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
        struct svc_cred *cred = &rqstp->rq_cred;
 
        cred->cr_group_info = NULL;
+       cred->cr_principal = NULL;
        rqstp->rq_client = NULL;
 
        if (argv->iov_len < 3*4)
@@ -778,7 +774,7 @@ svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
        svc_putnl(resv, RPC_AUTH_NULL);
        svc_putnl(resv, 0);
 
-       rqstp->rq_flavor = RPC_AUTH_NULL;
+       rqstp->rq_cred.cr_flavor = RPC_AUTH_NULL;
        return SVC_OK;
 }
 
@@ -816,6 +812,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
        int             len   = argv->iov_len;
 
        cred->cr_group_info = NULL;
+       cred->cr_principal = NULL;
        rqstp->rq_client = NULL;
 
        if ((len -= 3*4) < 0)
@@ -852,7 +849,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
        svc_putnl(resv, RPC_AUTH_NULL);
        svc_putnl(resv, 0);
 
-       rqstp->rq_flavor = RPC_AUTH_UNIX;
+       rqstp->rq_cred.cr_flavor = RPC_AUTH_UNIX;
        return SVC_OK;
 
 badcred:
index 6fe2dcead15027e6a5d4b7086121b319d8e05363..3c83035cdaa9940849fbfb1c729a6a2720b32702 100644 (file)
@@ -979,20 +979,21 @@ static void xprt_alloc_slot(struct rpc_task *task)
                list_del(&req->rq_list);
                goto out_init_req;
        }
-       req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT);
+       req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN);
        if (!IS_ERR(req))
                goto out_init_req;
        switch (PTR_ERR(req)) {
        case -ENOMEM:
-               rpc_delay(task, HZ >> 2);
                dprintk("RPC:       dynamic allocation of request slot "
                                "failed! Retrying\n");
+               task->tk_status = -ENOMEM;
                break;
        case -EAGAIN:
                rpc_sleep_on(&xprt->backlog, task, NULL);
                dprintk("RPC:       waiting for request slot\n");
+       default:
+               task->tk_status = -EAGAIN;
        }
-       task->tk_status = -EAGAIN;
        return;
 out_init_req:
        task->tk_status = 0;
index 61ceae0b956607cf0b7fa88b61f22bc79b1f673f..a157a2e64e18de17e33ae1c515e5b1501553b256 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 config WAN_ROUTER
-       tristate "WAN router"
+       tristate "WAN router (DEPRECATED)"
        depends on EXPERIMENTAL
        ---help---
          Wide Area Networks (WANs), such as X.25, frame relay and leased
index c53e8f42aa7506b897464a3716e282b0c837a549..ccfbd328a69d7948736157555bfa857236875156 100644 (file)
@@ -1921,6 +1921,9 @@ no_transform:
        }
 ok:
        xfrm_pols_put(pols, drop_pols);
+       if (dst && dst->xfrm &&
+           dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
+               dst->flags |= DST_XFRM_TUNNEL;
        return dst;
 
 nopol:
index faea0ec612bfed2932ca5dc25868fe00888a5afc..e5bd60ff48e3d553ef3921256123519df7b812b8 100755 (executable)
@@ -2382,6 +2382,19 @@ sub process {
                        }
                }
 
+               if ($line =~ /\bprintk\s*\(\s*KERN_([A-Z]+)/) {
+                       my $orig = $1;
+                       my $level = lc($orig);
+                       $level = "warn" if ($level eq "warning");
+                       WARN("PREFER_PR_LEVEL",
+                            "Prefer pr_$level(... to printk(KERN_$1, ...\n" . $herecurr);
+               }
+
+               if ($line =~ /\bpr_warning\s*\(/) {
+                       WARN("PREFER_PR_LEVEL",
+                            "Prefer pr_warn(... to pr_warning(...\n" . $herecurr);
+               }
+
 # function brace can't be on same line, except for #defines of do while,
 # or if closed on same line
                if (($line=~/$Type\s*$Ident\(.*\).*\s{/) and
@@ -2448,6 +2461,13 @@ sub process {
                                     "space prohibited between function name and open parenthesis '('\n" . $herecurr);
                        }
                }
+
+# check for whitespace before a non-naked semicolon
+               if ($line =~ /^\+.*\S\s+;/) {
+                       CHK("SPACING",
+                           "space prohibited before semicolon\n" . $herecurr);
+               }
+
 # Check operator spacing.
                if (!($line=~/\#\s*include/)) {
                        my $ops = qr{
diff --git a/scripts/coccinelle/misc/ifaddr.cocci b/scripts/coccinelle/misc/ifaddr.cocci
new file mode 100644 (file)
index 0000000..3e4089a
--- /dev/null
@@ -0,0 +1,35 @@
+/// the address of a variable or field is non-zero is likely always to bo
+/// non-zero
+///
+// Confidence: High
+// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6.  GPLv2.
+// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6.  GPLv2.
+// URL: http://coccinelle.lip6.fr/
+// Comments:
+// Options: -no_includes -include_headers
+
+virtual org
+virtual report
+virtual context
+
+@r@
+expression x;
+statement S1,S2;
+position p;
+@@
+
+*if@p (&x)
+ S1 else S2
+
+@script:python depends on org@
+p << r.p;
+@@
+
+cocci.print_main("test of a variable/field address",p)
+
+@script:python depends on report@
+p << r.p;
+@@
+
+msg = "ERROR: test of a variable/field address"
+coccilib.report.print_report(p[0],msg)
diff --git a/scripts/coccinelle/misc/noderef.cocci b/scripts/coccinelle/misc/noderef.cocci
new file mode 100644 (file)
index 0000000..c170721
--- /dev/null
@@ -0,0 +1,65 @@
+/// sizeof when applied to a pointer typed expression gives the size of
+/// the pointer
+///
+// Confidence: High
+// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6.  GPLv2.
+// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6.  GPLv2.
+// URL: http://coccinelle.lip6.fr/
+// Comments:
+// Options: -no_includes -include_headers
+
+virtual org
+virtual report
+virtual context
+virtual patch
+
+@depends on patch@
+expression *x;
+expression f;
+type T;
+@@
+
+(
+x = <+... sizeof(
+- x
++ *x
+   ) ...+>
+|
+f(...,(T)(x),...,sizeof(
+- x
++ *x
+   ),...)
+|
+f(...,sizeof(x),...,(T)(
+- x
++ *x
+   ),...)
+)
+
+@r depends on !patch@
+expression *x;
+expression f;
+position p;
+type T;
+@@
+
+(
+*x = <+... sizeof@p(x) ...+>
+|
+*f(...,(T)(x),...,sizeof@p(x),...)
+|
+*f(...,sizeof@p(x),...,(T)(x),...)
+)
+
+@script:python depends on org@
+p << r.p;
+@@
+
+cocci.print_main("application of sizeof to pointer",p)
+
+@script:python depends on report@
+p << r.p;
+@@
+
+msg = "ERROR: application of sizeof to pointer"
+coccilib.report.print_report(p[0],msg)
index a7c7c4b8e957311196f9b2eabef414ceaa144555..ed6653ef9702aa5320756537bb942e8a35123cb4 100755 (executable)
@@ -107,7 +107,8 @@ while [ "$1" != "" ] ; do
                ;;
 
        --set-str)
-               set_var "CONFIG_$ARG" "CONFIG_$ARG=\"$1\""
+               # sed swallows one level of escaping, so we need double-escaping
+               set_var "CONFIG_$ARG" "CONFIG_$ARG=\"${1//\"/\\\\\"}\""
                shift
                ;;
 
@@ -124,9 +125,11 @@ while [ "$1" != "" ] ; do
                        if [ $? != 0 ] ; then
                                echo undef
                        else
-                               V="${V/CONFIG_$ARG=/}"
-                               V="${V/\"/}"
-                               echo "$V"
+                               V="${V/#CONFIG_$ARG=/}"
+                               V="${V/#\"/}"
+                               V="${V/%\"/}"
+                               V="${V/\\\"/\"}"
+                               echo "${V}"
                        fi
                fi
                ;;
index f208f900ed3a8747f9bfd949b2221000d5021607..0dc4a2c779b119ec51e2eb5b3f17c5a4d02645fb 100644 (file)
@@ -574,8 +574,15 @@ int main(int ac, char **av)
        case alldefconfig:
        case randconfig:
                name = getenv("KCONFIG_ALLCONFIG");
-               if (name && !stat(name, &tmpstat)) {
-                       conf_read_simple(name, S_DEF_USER);
+               if (!name)
+                       break;
+               if ((strcmp(name, "") != 0) && (strcmp(name, "1") != 0)) {
+                       if (conf_read_simple(name, S_DEF_USER)) {
+                               fprintf(stderr,
+                                       _("*** Can't read seed configuration \"%s\"!\n"),
+                                       name);
+                               exit(1);
+                       }
                        break;
                }
                switch (input_mode) {
@@ -586,10 +593,13 @@ int main(int ac, char **av)
                case randconfig:        name = "allrandom.config"; break;
                default: break;
                }
-               if (!stat(name, &tmpstat))
-                       conf_read_simple(name, S_DEF_USER);
-               else if (!stat("all.config", &tmpstat))
-                       conf_read_simple("all.config", S_DEF_USER);
+               if (conf_read_simple(name, S_DEF_USER) &&
+                   conf_read_simple("all.config", S_DEF_USER)) {
+                       fprintf(stderr,
+                               _("*** KCONFIG_ALLCONFIG set, but no \"%s\" or \"all.config\" file found\n"),
+                               name);
+                       exit(1);
+               }
                break;
        default:
                break;
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
new file mode 100644 (file)
index 0000000..cd9c6c6
--- /dev/null
@@ -0,0 +1,221 @@
+#!/bin/sh
+#
+# link vmlinux
+#
+# vmlinux is linked from the objects selected by $(KBUILD_VMLINUX_INIT) and
+# $(KBUILD_VMLINUX_MAIN). Most are built-in.o files from top-level directories
+# in the kernel tree, others are specified in arch/$(ARCH)/Makefile.
+# Ordering when linking is important, and $(KBUILD_VMLINUX_INIT) must be first.
+#
+# vmlinux
+#   ^
+#   |
+#   +-< $(KBUILD_VMLINUX_INIT)
+#   |   +--< init/version.o + more
+#   |
+#   +--< $(KBUILD_VMLINUX_MAIN)
+#   |    +--< drivers/built-in.o mm/built-in.o + more
+#   |
+#   +-< ${kallsymso} (see description in KALLSYMS section)
+#
+# vmlinux version (uname -v) cannot be updated during normal
+# descending-into-subdirs phase since we do not yet know if we need to
+# update vmlinux.
+# Therefore this step is delayed until just before final link of vmlinux.
+#
+# System.map is generated to document addresses of all kernel symbols
+
+# Error out on error
+set -e
+
+# Nice output in kbuild format
+# Will be supressed by "make -s"
+info()
+{
+       if [ "${quiet}" != "silent_" ]; then
+               printf "  %-7s %s\n" ${1} ${2}
+       fi
+}
+
+# Link of vmlinux.o used for section mismatch analysis
+# ${1} output file
+modpost_link()
+{
+       ${LD} ${LDFLAGS} -r -o ${1} ${KBUILD_VMLINUX_INIT}                   \
+               --start-group ${KBUILD_VMLINUX_MAIN} --end-group
+}
+
+# Link of vmlinux
+# ${1} - optional extra .o files
+# ${2} - output file
+vmlinux_link()
+{
+       local lds="${objtree}/${KBUILD_LDS}"
+
+       if [ "${SRCARCH}" != "um" ]; then
+               ${LD} ${LDFLAGS} ${LDFLAGS_vmlinux} -o ${2}                  \
+                       -T ${lds} ${KBUILD_VMLINUX_INIT}                     \
+                       --start-group ${KBUILD_VMLINUX_MAIN} --end-group ${1}
+       else
+               ${CC} ${CFLAGS_vmlinux} -o ${2}                              \
+                       -Wl,-T,${lds} ${KBUILD_VMLINUX_INIT}                 \
+                       -Wl,--start-group                                    \
+                                ${KBUILD_VMLINUX_MAIN}                      \
+                       -Wl,--end-group                                      \
+                       -lutil ${1}
+               rm -f linux
+       fi
+}
+
+
+# Create ${2} .o file with all symbols from the ${1} object file
+kallsyms()
+{
+       info KSYM ${2}
+       local kallsymopt;
+
+       if [ -n "${CONFIG_KALLSYMS_ALL}" ]; then
+               kallsymopt=--all-symbols
+       fi
+
+       local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL}               \
+                     ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}"
+
+       ${NM} -n ${1} | \
+               scripts/kallsyms ${kallsymopt} | \
+               ${CC} ${aflags} -c -o ${2} -x assembler-with-cpp -
+}
+
+# Create map file with all symbols from ${1}
+# See mksymap for additional details
+mksysmap()
+{
+       ${CONFIG_SHELL} "${srctree}/scripts/mksysmap" ${1} ${2}
+}
+
+sortextable()
+{
+       ${objtree}/scripts/sortextable ${1}
+}
+
+# Delete output files in case of error
+trap cleanup SIGHUP SIGINT SIGQUIT SIGTERM ERR
+cleanup()
+{
+       rm -f .old_version
+       rm -f .tmp_System.map
+       rm -f .tmp_kallsyms*
+       rm -f .tmp_version
+       rm -f .tmp_vmlinux*
+       rm -f System.map
+       rm -f vmlinux
+       rm -f vmlinux.o
+}
+
+#
+#
+# Use "make V=1" to debug this script
+case "${KBUILD_VERBOSE}" in
+*1*)
+       set -x
+       ;;
+esac
+
+if [ "$1" = "clean" ]; then
+       cleanup
+       exit 0
+fi
+
+# We need access to CONFIG_ symbols
+. ./.config
+
+#link vmlinux.o
+info LD vmlinux.o
+modpost_link vmlinux.o
+
+# modpost vmlinux.o to check for section mismatches
+${MAKE} -f "${srctree}/scripts/Makefile.modpost" vmlinux.o
+
+# Update version
+info GEN .version
+if [ ! -r .version ]; then
+       rm -f .version;
+       echo 1 >.version;
+else
+       mv .version .old_version;
+       expr 0$(cat .old_version) + 1 >.version;
+fi;
+
+# final build of init/
+${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init
+
+kallsymso=""
+kallsyms_vmlinux=""
+if [ -n "${CONFIG_KALLSYMS}" ]; then
+
+       # kallsyms support
+       # Generate section listing all symbols and add it into vmlinux
+       # It's a three step process:
+       # 1)  Link .tmp_vmlinux1 so it has all symbols and sections,
+       #     but __kallsyms is empty.
+       #     Running kallsyms on that gives us .tmp_kallsyms1.o with
+       #     the right size
+       # 2)  Link .tmp_vmlinux2 so it now has a __kallsyms section of
+       #     the right size, but due to the added section, some
+       #     addresses have shifted.
+       #     From here, we generate a correct .tmp_kallsyms2.o
+       # 2a) We may use an extra pass as this has been necessary to
+       #     woraround some alignment related bugs.
+       #     KALLSYMS_EXTRA_PASS=1 is used to trigger this.
+       # 3)  The correct ${kallsymso} is linked into the final vmlinux.
+       #
+       # a)  Verify that the System.map from vmlinux matches the map from
+       #     ${kallsymso}.
+
+       kallsymso=.tmp_kallsyms2.o
+       kallsyms_vmlinux=.tmp_vmlinux2
+
+       # step 1
+       vmlinux_link "" .tmp_vmlinux1
+       kallsyms .tmp_vmlinux1 .tmp_kallsyms1.o
+
+       # step 2
+       vmlinux_link .tmp_kallsyms1.o .tmp_vmlinux2
+       kallsyms .tmp_vmlinux2 .tmp_kallsyms2.o
+
+       # step 2a
+       if [ -n "${KALLSYMS_EXTRA_PASS}" ]; then
+               kallsymso=.tmp_kallsyms3.o
+               kallsyms_vmlinux=.tmp_vmlinux3
+
+               vmlinux_link .tmp_kallsyms2.o .tmp_vmlinux3
+
+               kallsyms .tmp_vmlinux3 .tmp_kallsyms3.o
+       fi
+fi
+
+info LD vmlinux
+vmlinux_link "${kallsymso}" vmlinux
+
+if [ -n "${CONFIG_BUILDTIME_EXTABLE_SORT}" ]; then
+       info SORTEX vmlinux
+       sortextable vmlinux
+fi
+
+info SYSMAP System.map
+mksysmap vmlinux System.map
+
+# step a (see comment above)
+if [ -n "${CONFIG_KALLSYMS}" ]; then
+       mksysmap ${kallsyms_vmlinux} .tmp_System.map
+
+       if ! cmp -s System.map .tmp_System.map; then
+               echo Inconsistent kallsyms data
+               echo echo Try "make KALLSYMS_EXTRA_PASS=1" as a workaround
+               cleanup
+               exit 1
+       fi
+fi
+
+# We made a new kernel - delete old version file
+rm -f .old_version
index eee5f8ed2493c7c2278e6f2ce6a9cb1993e17123..c95fdda584147de330a53ef49b165304f6c8625c 100644 (file)
@@ -245,7 +245,7 @@ fi
 # Build header package
 (cd $srctree; find . -name Makefile -o -name Kconfig\* -o -name \*.pl > "$objtree/debian/hdrsrcfiles")
 (cd $srctree; find arch/$SRCARCH/include include scripts -type f >> "$objtree/debian/hdrsrcfiles")
-(cd $objtree; find .config Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles")
+(cd $objtree; find arch/$SRCARCH/include .config Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles")
 destdir=$kernel_headers_dir/usr/src/linux-headers-$version
 mkdir -p "$destdir"
 (cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf -)
index 032daab449b0bb3007562e795593a15d247a2c58..8ea39aabe94889a224c868757196afc084c578b8 100644 (file)
@@ -490,17 +490,9 @@ static int common_mmap(int op, struct file *file, unsigned long prot,
        return common_file_perm(op, file, mask);
 }
 
-static int apparmor_file_mmap(struct file *file, unsigned long reqprot,
-                             unsigned long prot, unsigned long flags,
-                             unsigned long addr, unsigned long addr_only)
+static int apparmor_mmap_file(struct file *file, unsigned long reqprot,
+                             unsigned long prot, unsigned long flags)
 {
-       int rc = 0;
-
-       /* do DAC check */
-       rc = cap_file_mmap(file, reqprot, prot, flags, addr, addr_only);
-       if (rc || addr_only)
-               return rc;
-
        return common_mmap(OP_FMMAP, file, prot, flags);
 }
 
@@ -646,7 +638,8 @@ static struct security_operations apparmor_ops = {
        .file_permission =              apparmor_file_permission,
        .file_alloc_security =          apparmor_file_alloc_security,
        .file_free_security =           apparmor_file_free_security,
-       .file_mmap =                    apparmor_file_mmap,
+       .mmap_file =                    apparmor_mmap_file,
+       .mmap_addr =                    cap_mmap_addr,
        .file_mprotect =                apparmor_file_mprotect,
        .file_lock =                    apparmor_file_lock,
 
index fca889676c5e9e5726f6c3136fb8c26cfe85f63c..61095df8b89ac452d50528144a67dca751d4f992 100644 (file)
@@ -949,7 +949,8 @@ void __init security_fixup_ops(struct security_operations *ops)
        set_to_cap_if_null(ops, file_alloc_security);
        set_to_cap_if_null(ops, file_free_security);
        set_to_cap_if_null(ops, file_ioctl);
-       set_to_cap_if_null(ops, file_mmap);
+       set_to_cap_if_null(ops, mmap_addr);
+       set_to_cap_if_null(ops, mmap_file);
        set_to_cap_if_null(ops, file_mprotect);
        set_to_cap_if_null(ops, file_lock);
        set_to_cap_if_null(ops, file_fcntl);
index e771cb1b2d7947f0c85651b38cc7c9c1d3da11d7..6dbae4650abe20208ff66eb27015e21b964d0344 100644 (file)
@@ -958,22 +958,15 @@ int cap_vm_enough_memory(struct mm_struct *mm, long pages)
 }
 
 /*
- * cap_file_mmap - check if able to map given addr
- * @file: unused
- * @reqprot: unused
- * @prot: unused
- * @flags: unused
+ * cap_mmap_addr - check if able to map given addr
  * @addr: address attempting to be mapped
- * @addr_only: unused
  *
  * If the process is attempting to map memory below dac_mmap_min_addr they need
  * CAP_SYS_RAWIO.  The other parameters to this function are unused by the
  * capability security module.  Returns 0 if this mapping should be allowed
  * -EPERM if not.
  */
-int cap_file_mmap(struct file *file, unsigned long reqprot,
-                 unsigned long prot, unsigned long flags,
-                 unsigned long addr, unsigned long addr_only)
+int cap_mmap_addr(unsigned long addr)
 {
        int ret = 0;
 
@@ -986,3 +979,9 @@ int cap_file_mmap(struct file *file, unsigned long reqprot,
        }
        return ret;
 }
+
+int cap_mmap_file(struct file *file, unsigned long reqprot,
+                 unsigned long prot, unsigned long flags)
+{
+       return 0;
+}
index fab4f8dda6c6fdf6acdebd1385bb39caae4c8b97..c92d42b021aa47c62dea181c6b90129d80ff7e53 100644 (file)
@@ -38,7 +38,7 @@ long compat_keyctl_instantiate_key_iov(
 
        ret = compat_rw_copy_check_uvector(WRITE, _payload_iov, ioc,
                                           ARRAY_SIZE(iovstack),
-                                          iovstack, &iov, 1);
+                                          iovstack, &iov);
        if (ret < 0)
                return ret;
        if (ret == 0)
index f711b094ed412e723207e5d75ceb86a0c81e4439..3dcbf86b0d31b9c7c9c889c80dacf34f9b33adc6 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <linux/sched.h>
 #include <linux/key-type.h>
+#include <linux/task_work.h>
 
 #ifdef __KDEBUG
 #define kenter(FMT, ...) \
@@ -148,6 +149,7 @@ extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags,
 #define KEY_LOOKUP_FOR_UNLINK  0x04
 
 extern long join_session_keyring(const char *name);
+extern void key_change_session_keyring(struct task_work *twork);
 
 extern struct work_struct key_gc_work;
 extern unsigned key_gc_delay;
index ddb3e05bc5fcd12fae86cf9c60edf6a1f57439c1..0f5b3f0272995dc7057f1306c346468174d3e464 100644 (file)
@@ -84,7 +84,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
        vm = false;
        if (_payload) {
                ret = -ENOMEM;
-               payload = kmalloc(plen, GFP_KERNEL);
+               payload = kmalloc(plen, GFP_KERNEL | __GFP_NOWARN);
                if (!payload) {
                        if (plen <= PAGE_SIZE)
                                goto error2;
@@ -1110,7 +1110,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
                goto no_payload;
 
        ret = rw_copy_check_uvector(WRITE, _payload_iov, ioc,
-                                   ARRAY_SIZE(iovstack), iovstack, &iov, 1);
+                                   ARRAY_SIZE(iovstack), iovstack, &iov);
        if (ret < 0)
                return ret;
        if (ret == 0)
@@ -1454,50 +1454,57 @@ long keyctl_get_security(key_serial_t keyid,
  */
 long keyctl_session_to_parent(void)
 {
-#ifdef TIF_NOTIFY_RESUME
        struct task_struct *me, *parent;
        const struct cred *mycred, *pcred;
-       struct cred *cred, *oldcred;
+       struct task_work *newwork, *oldwork;
        key_ref_t keyring_r;
+       struct cred *cred;
        int ret;
 
        keyring_r = lookup_user_key(KEY_SPEC_SESSION_KEYRING, 0, KEY_LINK);
        if (IS_ERR(keyring_r))
                return PTR_ERR(keyring_r);
 
+       ret = -ENOMEM;
+       newwork = kmalloc(sizeof(struct task_work), GFP_KERNEL);
+       if (!newwork)
+               goto error_keyring;
+
        /* our parent is going to need a new cred struct, a new tgcred struct
         * and new security data, so we allocate them here to prevent ENOMEM in
         * our parent */
-       ret = -ENOMEM;
        cred = cred_alloc_blank();
        if (!cred)
-               goto error_keyring;
+               goto error_newwork;
 
        cred->tgcred->session_keyring = key_ref_to_ptr(keyring_r);
-       keyring_r = NULL;
+       init_task_work(newwork, key_change_session_keyring, cred);
 
        me = current;
        rcu_read_lock();
        write_lock_irq(&tasklist_lock);
 
-       parent = me->real_parent;
        ret = -EPERM;
+       oldwork = NULL;
+       parent = me->real_parent;
 
        /* the parent mustn't be init and mustn't be a kernel thread */
        if (parent->pid <= 1 || !parent->mm)
-               goto not_permitted;
+               goto unlock;
 
        /* the parent must be single threaded */
        if (!thread_group_empty(parent))
-               goto not_permitted;
+               goto unlock;
 
        /* the parent and the child must have different session keyrings or
         * there's no point */
        mycred = current_cred();
        pcred = __task_cred(parent);
        if (mycred == pcred ||
-           mycred->tgcred->session_keyring == pcred->tgcred->session_keyring)
-               goto already_same;
+           mycred->tgcred->session_keyring == pcred->tgcred->session_keyring) {
+               ret = 0;
+               goto unlock;
+       }
 
        /* the parent must have the same effective ownership and mustn't be
         * SUID/SGID */
@@ -1507,50 +1514,40 @@ long keyctl_session_to_parent(void)
            pcred->gid  != mycred->egid ||
            pcred->egid != mycred->egid ||
            pcred->sgid != mycred->egid)
-               goto not_permitted;
+               goto unlock;
 
        /* the keyrings must have the same UID */
        if ((pcred->tgcred->session_keyring &&
             pcred->tgcred->session_keyring->uid != mycred->euid) ||
            mycred->tgcred->session_keyring->uid != mycred->euid)
-               goto not_permitted;
+               goto unlock;
 
-       /* if there's an already pending keyring replacement, then we replace
-        * that */
-       oldcred = parent->replacement_session_keyring;
+       /* cancel an already pending keyring replacement */
+       oldwork = task_work_cancel(parent, key_change_session_keyring);
 
        /* the replacement session keyring is applied just prior to userspace
         * restarting */
-       parent->replacement_session_keyring = cred;
-       cred = NULL;
-       set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME);
-
-       write_unlock_irq(&tasklist_lock);
-       rcu_read_unlock();
-       if (oldcred)
-               put_cred(oldcred);
-       return 0;
-
-already_same:
-       ret = 0;
-not_permitted:
+       ret = task_work_add(parent, newwork, true);
+       if (!ret)
+               newwork = NULL;
+unlock:
        write_unlock_irq(&tasklist_lock);
        rcu_read_unlock();
-       put_cred(cred);
+       if (oldwork) {
+               put_cred(oldwork->data);
+               kfree(oldwork);
+       }
+       if (newwork) {
+               put_cred(newwork->data);
+               kfree(newwork);
+       }
        return ret;
 
+error_newwork:
+       kfree(newwork);
 error_keyring:
        key_ref_put(keyring_r);
        return ret;
-
-#else /* !TIF_NOTIFY_RESUME */
-       /*
-        * To be removed when TIF_NOTIFY_RESUME has been implemented on
-        * m68k/xtensa
-        */
-#warning TIF_NOTIFY_RESUME not implemented
-       return -EOPNOTSUPP;
-#endif /* !TIF_NOTIFY_RESUME */
 }
 
 /*
index d71056db7b67501a085fd4a8feda5c841dd83094..4ad54eea1ea45554d5d931497671fdb32a33660b 100644 (file)
@@ -834,23 +834,17 @@ error:
  * Replace a process's session keyring on behalf of one of its children when
  * the target  process is about to resume userspace execution.
  */
-void key_replace_session_keyring(void)
+void key_change_session_keyring(struct task_work *twork)
 {
-       const struct cred *old;
-       struct cred *new;
-
-       if (!current->replacement_session_keyring)
-               return;
+       const struct cred *old = current_cred();
+       struct cred *new = twork->data;
 
-       write_lock_irq(&tasklist_lock);
-       new = current->replacement_session_keyring;
-       current->replacement_session_keyring = NULL;
-       write_unlock_irq(&tasklist_lock);
-
-       if (!new)
+       kfree(twork);
+       if (unlikely(current->flags & PF_EXITING)) {
+               put_cred(new);
                return;
+       }
 
-       old = current_cred();
        new->  uid      = old->  uid;
        new-> euid      = old-> euid;
        new-> suid      = old-> suid;
index cc3790315d2f15778fcc3c8a3ec7f2434282277d..000e7501752022089b82efeb153115498e55da60 100644 (file)
@@ -93,16 +93,9 @@ static void umh_keys_cleanup(struct subprocess_info *info)
 static int call_usermodehelper_keys(char *path, char **argv, char **envp,
                                        struct key *session_keyring, int wait)
 {
-       gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
-       struct subprocess_info *info =
-               call_usermodehelper_setup(path, argv, envp, gfp_mask);
-
-       if (!info)
-               return -ENOMEM;
-
-       call_usermodehelper_setfns(info, umh_keys_init, umh_keys_cleanup,
-                                       key_get(session_keyring));
-       return call_usermodehelper_exec(info, wait);
+       return call_usermodehelper_fns(path, argv, envp, wait,
+                                      umh_keys_init, umh_keys_cleanup,
+                                      key_get(session_keyring));
 }
 
 /*
index 5497a57fba0154a24b1b87835930e6cc685f855b..3efc9b12aef44016201b02eeafcc10140f17a240 100644 (file)
@@ -20,6 +20,9 @@
 #include <linux/ima.h>
 #include <linux/evm.h>
 #include <linux/fsnotify.h>
+#include <linux/mman.h>
+#include <linux/mount.h>
+#include <linux/personality.h>
 #include <net/flow.h>
 
 #define MAX_LSM_EVM_XATTR      2
@@ -657,18 +660,56 @@ int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        return security_ops->file_ioctl(file, cmd, arg);
 }
 
-int security_file_mmap(struct file *file, unsigned long reqprot,
-                       unsigned long prot, unsigned long flags,
-                       unsigned long addr, unsigned long addr_only)
+static inline unsigned long mmap_prot(struct file *file, unsigned long prot)
 {
-       int ret;
+       /*
+        * Does we have PROT_READ and does the application expect
+        * it to imply PROT_EXEC?  If not, nothing to talk about...
+        */
+       if ((prot & (PROT_READ | PROT_EXEC)) != PROT_READ)
+               return prot;
+       if (!(current->personality & READ_IMPLIES_EXEC))
+               return prot;
+       /*
+        * if that's an anonymous mapping, let it.
+        */
+       if (!file)
+               return prot | PROT_EXEC;
+       /*
+        * ditto if it's not on noexec mount, except that on !MMU we need
+        * BDI_CAP_EXEC_MMAP (== VM_MAYEXEC) in this case
+        */
+       if (!(file->f_path.mnt->mnt_flags & MNT_NOEXEC)) {
+#ifndef CONFIG_MMU
+               unsigned long caps = 0;
+               struct address_space *mapping = file->f_mapping;
+               if (mapping && mapping->backing_dev_info)
+                       caps = mapping->backing_dev_info->capabilities;
+               if (!(caps & BDI_CAP_EXEC_MAP))
+                       return prot;
+#endif
+               return prot | PROT_EXEC;
+       }
+       /* anything on noexec mount won't get PROT_EXEC */
+       return prot;
+}
 
-       ret = security_ops->file_mmap(file, reqprot, prot, flags, addr, addr_only);
+int security_mmap_file(struct file *file, unsigned long prot,
+                       unsigned long flags)
+{
+       int ret;
+       ret = security_ops->mmap_file(file, prot,
+                                       mmap_prot(file, prot), flags);
        if (ret)
                return ret;
        return ima_file_mmap(file, prot);
 }
 
+int security_mmap_addr(unsigned long addr)
+{
+       return security_ops->mmap_addr(addr);
+}
+
 int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
                            unsigned long prot)
 {
index fa2341b683314b0c5505f905e6712538555300ad..372ec6502aa8752dca83c3c507e2d0ce9cac84d1 100644 (file)
@@ -3083,9 +3083,7 @@ error:
        return rc;
 }
 
-static int selinux_file_mmap(struct file *file, unsigned long reqprot,
-                            unsigned long prot, unsigned long flags,
-                            unsigned long addr, unsigned long addr_only)
+static int selinux_mmap_addr(unsigned long addr)
 {
        int rc = 0;
        u32 sid = current_sid();
@@ -3104,10 +3102,12 @@ static int selinux_file_mmap(struct file *file, unsigned long reqprot,
        }
 
        /* do DAC check on address space usage */
-       rc = cap_file_mmap(file, reqprot, prot, flags, addr, addr_only);
-       if (rc || addr_only)
-               return rc;
+       return cap_mmap_addr(addr);
+}
 
+static int selinux_mmap_file(struct file *file, unsigned long reqprot,
+                            unsigned long prot, unsigned long flags)
+{
        if (selinux_checkreqprot)
                prot = reqprot;
 
@@ -5570,7 +5570,8 @@ static struct security_operations selinux_ops = {
        .file_alloc_security =          selinux_file_alloc_security,
        .file_free_security =           selinux_file_free_security,
        .file_ioctl =                   selinux_file_ioctl,
-       .file_mmap =                    selinux_file_mmap,
+       .mmap_file =                    selinux_mmap_file,
+       .mmap_addr =                    selinux_mmap_addr,
        .file_mprotect =                selinux_file_mprotect,
        .file_lock =                    selinux_file_lock,
        .file_fcntl =                   selinux_file_fcntl,
index 4e93f9ef970b25a78bca26ab2a49962024b3cb50..3ad2902512888282e299b64434a7790d9788e060 100644 (file)
@@ -1259,12 +1259,8 @@ static int sel_make_bools(void)
                if (!inode)
                        goto out;
 
-               ret = -EINVAL;
-               len = snprintf(page, PAGE_SIZE, "/%s/%s", BOOL_DIR_NAME, names[i]);
-               if (len < 0)
-                       goto out;
-
                ret = -ENAMETOOLONG;
+               len = snprintf(page, PAGE_SIZE, "/%s/%s", BOOL_DIR_NAME, names[i]);
                if (len >= PAGE_SIZE)
                        goto out;
 
@@ -1557,19 +1553,10 @@ static inline u32 sel_ino_to_perm(unsigned long ino)
 static ssize_t sel_read_class(struct file *file, char __user *buf,
                                size_t count, loff_t *ppos)
 {
-       ssize_t rc, len;
-       char *page;
        unsigned long ino = file->f_path.dentry->d_inode->i_ino;
-
-       page = (char *)__get_free_page(GFP_KERNEL);
-       if (!page)
-               return -ENOMEM;
-
-       len = snprintf(page, PAGE_SIZE, "%d", sel_ino_to_class(ino));
-       rc = simple_read_from_buffer(buf, count, ppos, page, len);
-       free_page((unsigned long)page);
-
-       return rc;
+       char res[TMPBUFLEN];
+       ssize_t len = snprintf(res, sizeof(res), "%d", sel_ino_to_class(ino));
+       return simple_read_from_buffer(buf, count, ppos, res, len);
 }
 
 static const struct file_operations sel_class_ops = {
@@ -1580,19 +1567,10 @@ static const struct file_operations sel_class_ops = {
 static ssize_t sel_read_perm(struct file *file, char __user *buf,
                                size_t count, loff_t *ppos)
 {
-       ssize_t rc, len;
-       char *page;
        unsigned long ino = file->f_path.dentry->d_inode->i_ino;
-
-       page = (char *)__get_free_page(GFP_KERNEL);
-       if (!page)
-               return -ENOMEM;
-
-       len = snprintf(page, PAGE_SIZE, "%d", sel_ino_to_perm(ino));
-       rc = simple_read_from_buffer(buf, count, ppos, page, len);
-       free_page((unsigned long)page);
-
-       return rc;
+       char res[TMPBUFLEN];
+       ssize_t len = snprintf(res, sizeof(res), "%d", sel_ino_to_perm(ino));
+       return simple_read_from_buffer(buf, count, ppos, res, len);
 }
 
 static const struct file_operations sel_perm_ops = {
index d583c054580889eff6f4e9080110aad7ae0370d1..ee0bb5735f35c98d6edfa7bb4c9590ef2a234cc4 100644 (file)
@@ -1171,7 +1171,7 @@ static int smack_file_fcntl(struct file *file, unsigned int cmd,
 }
 
 /**
- * smack_file_mmap :
+ * smack_mmap_file :
  * Check permissions for a mmap operation.  The @file may be NULL, e.g.
  * if mapping anonymous memory.
  * @file contains the file structure for file to map (may be NULL).
@@ -1180,10 +1180,9 @@ static int smack_file_fcntl(struct file *file, unsigned int cmd,
  * @flags contains the operational flags.
  * Return 0 if permission is granted.
  */
-static int smack_file_mmap(struct file *file,
+static int smack_mmap_file(struct file *file,
                           unsigned long reqprot, unsigned long prot,
-                          unsigned long flags, unsigned long addr,
-                          unsigned long addr_only)
+                          unsigned long flags)
 {
        struct smack_known *skp;
        struct smack_rule *srp;
@@ -1198,11 +1197,6 @@ static int smack_file_mmap(struct file *file,
        int tmay;
        int rc;
 
-       /* do DAC check on address space usage */
-       rc = cap_file_mmap(file, reqprot, prot, flags, addr, addr_only);
-       if (rc || addr_only)
-               return rc;
-
        if (file == NULL || file->f_dentry == NULL)
                return 0;
 
@@ -3482,7 +3476,8 @@ struct security_operations smack_ops = {
        .file_ioctl =                   smack_file_ioctl,
        .file_lock =                    smack_file_lock,
        .file_fcntl =                   smack_file_fcntl,
-       .file_mmap =                    smack_file_mmap,
+       .mmap_file =                    smack_mmap_file,
+       .mmap_addr =                    cap_mmap_addr,
        .file_set_fowner =              smack_file_set_fowner,
        .file_send_sigiotask =          smack_file_send_sigiotask,
        .file_receive =                 smack_file_receive,
index faedb1481b240d9195b097f1448f98df3dcb6912..8f312fa6c282c88db2cd0c84389096228fe70fc4 100644 (file)
@@ -313,9 +313,22 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
        snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base;
        snd_pcm_sframes_t hdelta, delta;
        unsigned long jdelta;
+       unsigned long curr_jiffies;
+       struct timespec curr_tstamp;
 
        old_hw_ptr = runtime->status->hw_ptr;
+
+       /*
+        * group pointer, time and jiffies reads to allow for more
+        * accurate correlations/corrections.
+        * The values are stored at the end of this routine after
+        * corrections for hw_ptr position
+        */
        pos = substream->ops->pointer(substream);
+       curr_jiffies = jiffies;
+       if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
+               snd_pcm_gettime(runtime, (struct timespec *)&curr_tstamp);
+
        if (pos == SNDRV_PCM_POS_XRUN) {
                xrun(substream);
                return -EPIPE;
@@ -343,7 +356,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
                delta = runtime->hw_ptr_interrupt + runtime->period_size;
                if (delta > new_hw_ptr) {
                        /* check for double acknowledged interrupts */
-                       hdelta = jiffies - runtime->hw_ptr_jiffies;
+                       hdelta = curr_jiffies - runtime->hw_ptr_jiffies;
                        if (hdelta > runtime->hw_ptr_buffer_jiffies/2) {
                                hw_base += runtime->buffer_size;
                                if (hw_base >= runtime->boundary)
@@ -388,7 +401,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
                 * Without regular period interrupts, we have to check
                 * the elapsed time to detect xruns.
                 */
-               jdelta = jiffies - runtime->hw_ptr_jiffies;
+               jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
                if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
                        goto no_delta_check;
                hdelta = jdelta - delta * HZ / runtime->rate;
@@ -430,7 +443,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
        if (hdelta < runtime->delay)
                goto no_jiffies_check;
        hdelta -= runtime->delay;
-       jdelta = jiffies - runtime->hw_ptr_jiffies;
+       jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
        if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) {
                delta = jdelta /
                        (((runtime->period_size * HZ) / runtime->rate)
@@ -492,9 +505,9 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
        }
        runtime->hw_ptr_base = hw_base;
        runtime->status->hw_ptr = new_hw_ptr;
-       runtime->hw_ptr_jiffies = jiffies;
+       runtime->hw_ptr_jiffies = curr_jiffies;
        if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
-               snd_pcm_gettime(runtime, (struct timespec *)&runtime->status->tstamp);
+               runtime->status->tstamp = curr_tstamp;
 
        return snd_pcm_update_state(substream, runtime);
 }
index eb09a3348325358b0a741d3408d48cc4e5e8fffd..41ca803a1fff9d1a4c60baa3889cc3171a990bf2 100644 (file)
@@ -2239,24 +2239,50 @@ void snd_hda_ctls_clear(struct hda_codec *codec)
 /* pseudo device locking
  * toggle card->shutdown to allow/disallow the device access (as a hack)
  */
-static int hda_lock_devices(struct snd_card *card)
+int snd_hda_lock_devices(struct hda_bus *bus)
 {
+       struct snd_card *card = bus->card;
+       struct hda_codec *codec;
+
        spin_lock(&card->files_lock);
-       if (card->shutdown) {
-               spin_unlock(&card->files_lock);
-               return -EINVAL;
-       }
+       if (card->shutdown)
+               goto err_unlock;
        card->shutdown = 1;
+       if (!list_empty(&card->ctl_files))
+               goto err_clear;
+
+       list_for_each_entry(codec, &bus->codec_list, list) {
+               int pcm;
+               for (pcm = 0; pcm < codec->num_pcms; pcm++) {
+                       struct hda_pcm *cpcm = &codec->pcm_info[pcm];
+                       if (!cpcm->pcm)
+                               continue;
+                       if (cpcm->pcm->streams[0].substream_opened ||
+                           cpcm->pcm->streams[1].substream_opened)
+                               goto err_clear;
+               }
+       }
        spin_unlock(&card->files_lock);
        return 0;
+
+ err_clear:
+       card->shutdown = 0;
+ err_unlock:
+       spin_unlock(&card->files_lock);
+       return -EINVAL;
 }
+EXPORT_SYMBOL_HDA(snd_hda_lock_devices);
 
-static void hda_unlock_devices(struct snd_card *card)
+void snd_hda_unlock_devices(struct hda_bus *bus)
 {
+       struct snd_card *card = bus->card;
+
+       card = bus->card;
        spin_lock(&card->files_lock);
        card->shutdown = 0;
        spin_unlock(&card->files_lock);
 }
+EXPORT_SYMBOL_HDA(snd_hda_unlock_devices);
 
 /**
  * snd_hda_codec_reset - Clear all objects assigned to the codec
@@ -2270,26 +2296,12 @@ static void hda_unlock_devices(struct snd_card *card)
  */
 int snd_hda_codec_reset(struct hda_codec *codec)
 {
-       struct snd_card *card = codec->bus->card;
-       int i, pcm;
+       struct hda_bus *bus = codec->bus;
+       struct snd_card *card = bus->card;
+       int i;
 
-       if (hda_lock_devices(card) < 0)
-               return -EBUSY;
-       /* check whether the codec isn't used by any mixer or PCM streams */
-       if (!list_empty(&card->ctl_files)) {
-               hda_unlock_devices(card);
+       if (snd_hda_lock_devices(bus) < 0)
                return -EBUSY;
-       }
-       for (pcm = 0; pcm < codec->num_pcms; pcm++) {
-               struct hda_pcm *cpcm = &codec->pcm_info[pcm];
-               if (!cpcm->pcm)
-                       continue;
-               if (cpcm->pcm->streams[0].substream_opened ||
-                   cpcm->pcm->streams[1].substream_opened) {
-                       hda_unlock_devices(card);
-                       return -EBUSY;
-               }
-       }
 
        /* OK, let it free */
 
@@ -2298,7 +2310,7 @@ int snd_hda_codec_reset(struct hda_codec *codec)
        codec->power_on = 0;
        codec->power_transition = 0;
        codec->power_jiffies = jiffies;
-       flush_workqueue(codec->bus->workq);
+       flush_workqueue(bus->workq);
 #endif
        snd_hda_ctls_clear(codec);
        /* relase PCMs */
@@ -2306,7 +2318,7 @@ int snd_hda_codec_reset(struct hda_codec *codec)
                if (codec->pcm_info[i].pcm) {
                        snd_device_free(card, codec->pcm_info[i].pcm);
                        clear_bit(codec->pcm_info[i].device,
-                                 codec->bus->pcm_dev_bits);
+                                 bus->pcm_dev_bits);
                }
        }
        if (codec->patch_ops.free)
@@ -2331,7 +2343,7 @@ int snd_hda_codec_reset(struct hda_codec *codec)
        codec->owner = NULL;
 
        /* allow device access again */
-       hda_unlock_devices(card);
+       snd_hda_unlock_devices(bus);
        return 0;
 }
 
index 54b52819fb47acf5ef8b2342643a1ece7d9a8c57..4fc3960c85917837508ef32b1d954737061fc926 100644 (file)
@@ -1023,6 +1023,9 @@ void snd_hda_codec_set_power_to_all(struct hda_codec *codec, hda_nid_t fg,
                                    unsigned int power_state,
                                    bool eapd_workaround);
 
+int snd_hda_lock_devices(struct hda_bus *bus);
+void snd_hda_unlock_devices(struct hda_bus *bus);
+
 /*
  * power management
  */
index 4ab8102f87ea63ccb83ccbb6c97df2d17fa13eb2..2b6392be451c688830cf9d42e346d0eee61ac1dc 100644 (file)
@@ -53,6 +53,8 @@
 #endif
 #include <sound/core.h>
 #include <sound/initval.h>
+#include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
 #include "hda_codec.h"
 
 
@@ -175,6 +177,13 @@ MODULE_DESCRIPTION("Intel HDA driver");
 #define SFX    "hda-intel: "
 #endif
 
+#if defined(CONFIG_PM) && defined(CONFIG_VGA_SWITCHEROO)
+#ifdef CONFIG_SND_HDA_CODEC_HDMI
+#define SUPPORT_VGA_SWITCHEROO
+#endif
+#endif
+
+
 /*
  * registers
  */
@@ -472,6 +481,12 @@ struct azx {
        unsigned int probing :1; /* codec probing phase */
        unsigned int snoop:1;
        unsigned int align_buffer_size:1;
+       unsigned int region_requested:1;
+
+       /* VGA-switcheroo setup */
+       unsigned int use_vga_switcheroo:1;
+       unsigned int init_failed:1; /* delayed init failed */
+       unsigned int disabled:1; /* disabled by VGA-switcher */
 
        /* for debugging */
        unsigned int last_cmd[AZX_MAX_CODECS];
@@ -538,7 +553,20 @@ enum {
 #define AZX_DCAPS_PRESET_CTHDA \
        (AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_4K_BDLE_BOUNDARY)
 
-static char *driver_short_names[] __devinitdata = {
+/*
+ * VGA-switcher support
+ */
+#ifdef SUPPORT_VGA_SWITCHEROO
+#define DELAYED_INIT_MARK
+#define DELAYED_INITDATA_MARK
+#define use_vga_switcheroo(chip)       ((chip)->use_vga_switcheroo)
+#else
+#define DELAYED_INIT_MARK      __devinit
+#define DELAYED_INITDATA_MARK  __devinitdata
+#define use_vga_switcheroo(chip)       0
+#endif
+
+static char *driver_short_names[] DELAYED_INITDATA_MARK = {
        [AZX_DRIVER_ICH] = "HDA Intel",
        [AZX_DRIVER_PCH] = "HDA Intel PCH",
        [AZX_DRIVER_SCH] = "HDA Intel MID",
@@ -959,6 +987,8 @@ static int azx_send_cmd(struct hda_bus *bus, unsigned int val)
 {
        struct azx *chip = bus->private_data;
 
+       if (chip->disabled)
+               return 0;
        chip->last_cmd[azx_command_addr(val)] = val;
        if (chip->single_cmd)
                return azx_single_send_cmd(bus, val);
@@ -971,6 +1001,8 @@ static unsigned int azx_get_response(struct hda_bus *bus,
                                     unsigned int addr)
 {
        struct azx *chip = bus->private_data;
+       if (chip->disabled)
+               return 0;
        if (chip->single_cmd)
                return azx_single_get_response(bus, addr);
        else
@@ -1236,6 +1268,11 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id)
 
        spin_lock(&chip->reg_lock);
 
+       if (chip->disabled) {
+               spin_unlock(&chip->reg_lock);
+               return IRQ_NONE;
+       }
+
        status = azx_readl(chip, INTSTS);
        if (status == 0) {
                spin_unlock(&chip->reg_lock);
@@ -1521,12 +1558,12 @@ static void azx_bus_reset(struct hda_bus *bus)
  */
 
 /* number of codec slots for each chipset: 0 = default slots (i.e. 4) */
-static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] __devinitdata = {
+static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] DELAYED_INITDATA_MARK = {
        [AZX_DRIVER_NVIDIA] = 8,
        [AZX_DRIVER_TERA] = 1,
 };
 
-static int __devinit azx_codec_create(struct azx *chip, const char *model)
+static int DELAYED_INIT_MARK azx_codec_create(struct azx *chip, const char *model)
 {
        struct hda_bus_template bus_temp;
        int c, codecs, err;
@@ -2444,6 +2481,105 @@ static void azx_notifier_unregister(struct azx *chip)
                unregister_reboot_notifier(&chip->reboot_notifier);
 }
 
+static int DELAYED_INIT_MARK azx_first_init(struct azx *chip);
+static int DELAYED_INIT_MARK azx_probe_continue(struct azx *chip);
+
+static struct pci_dev __devinit *get_bound_vga(struct pci_dev *pci);
+
+#ifdef SUPPORT_VGA_SWITCHEROO
+static void azx_vs_set_state(struct pci_dev *pci,
+                            enum vga_switcheroo_state state)
+{
+       struct snd_card *card = pci_get_drvdata(pci);
+       struct azx *chip = card->private_data;
+       bool disabled;
+
+       if (chip->init_failed)
+               return;
+
+       disabled = (state == VGA_SWITCHEROO_OFF);
+       if (chip->disabled == disabled)
+               return;
+
+       if (!chip->bus) {
+               chip->disabled = disabled;
+               if (!disabled) {
+                       snd_printk(KERN_INFO SFX
+                                  "%s: Start delayed initialization\n",
+                                  pci_name(chip->pci));
+                       if (azx_first_init(chip) < 0 ||
+                           azx_probe_continue(chip) < 0) {
+                               snd_printk(KERN_ERR SFX
+                                          "%s: initialization error\n",
+                                          pci_name(chip->pci));
+                               chip->init_failed = true;
+                       }
+               }
+       } else {
+               snd_printk(KERN_INFO SFX
+                          "%s %s via VGA-switcheroo\n",
+                          disabled ? "Disabling" : "Enabling",
+                          pci_name(chip->pci));
+               if (disabled) {
+                       azx_suspend(pci, PMSG_FREEZE);
+                       chip->disabled = true;
+                       snd_hda_lock_devices(chip->bus);
+               } else {
+                       snd_hda_unlock_devices(chip->bus);
+                       chip->disabled = false;
+                       azx_resume(pci);
+               }
+       }
+}
+
+static bool azx_vs_can_switch(struct pci_dev *pci)
+{
+       struct snd_card *card = pci_get_drvdata(pci);
+       struct azx *chip = card->private_data;
+
+       if (chip->init_failed)
+               return false;
+       if (chip->disabled || !chip->bus)
+               return true;
+       if (snd_hda_lock_devices(chip->bus))
+               return false;
+       snd_hda_unlock_devices(chip->bus);
+       return true;
+}
+
+static void __devinit init_vga_switcheroo(struct azx *chip)
+{
+       struct pci_dev *p = get_bound_vga(chip->pci);
+       if (p) {
+               snd_printk(KERN_INFO SFX
+                          "%s: Handle VGA-switcheroo audio client\n",
+                          pci_name(chip->pci));
+               chip->use_vga_switcheroo = 1;
+               pci_dev_put(p);
+       }
+}
+
+static const struct vga_switcheroo_client_ops azx_vs_ops = {
+       .set_gpu_state = azx_vs_set_state,
+       .can_switch = azx_vs_can_switch,
+};
+
+static int __devinit register_vga_switcheroo(struct azx *chip)
+{
+       if (!chip->use_vga_switcheroo)
+               return 0;
+       /* FIXME: currently only handling DIS controller
+        * is there any machine with two switchable HDMI audio controllers?
+        */
+       return vga_switcheroo_register_audio_client(chip->pci, &azx_vs_ops,
+                                                   VGA_SWITCHEROO_DIS,
+                                                   chip->bus != NULL);
+}
+#else
+#define init_vga_switcheroo(chip)              /* NOP */
+#define register_vga_switcheroo(chip)          0
+#endif /* SUPPORT_VGA_SWITCHER */
+
 /*
  * destructor
  */
@@ -2453,6 +2589,12 @@ static int azx_free(struct azx *chip)
 
        azx_notifier_unregister(chip);
 
+       if (use_vga_switcheroo(chip)) {
+               if (chip->disabled && chip->bus)
+                       snd_hda_unlock_devices(chip->bus);
+               vga_switcheroo_unregister_client(chip->pci);
+       }
+
        if (chip->initialized) {
                azx_clear_irq_pending(chip);
                for (i = 0; i < chip->num_streams; i++)
@@ -2482,7 +2624,8 @@ static int azx_free(struct azx *chip)
                mark_pages_wc(chip, &chip->posbuf, false);
                snd_dma_free_pages(&chip->posbuf);
        }
-       pci_release_regions(chip->pci);
+       if (chip->region_requested)
+               pci_release_regions(chip->pci);
        pci_disable_device(chip->pci);
        kfree(chip->azx_dev);
        kfree(chip);
@@ -2495,6 +2638,45 @@ static int azx_dev_free(struct snd_device *device)
        return azx_free(device->device_data);
 }
 
+/*
+ * Check of disabled HDMI controller by vga-switcheroo
+ */
+static struct pci_dev __devinit *get_bound_vga(struct pci_dev *pci)
+{
+       struct pci_dev *p;
+
+       /* check only discrete GPU */
+       switch (pci->vendor) {
+       case PCI_VENDOR_ID_ATI:
+       case PCI_VENDOR_ID_AMD:
+       case PCI_VENDOR_ID_NVIDIA:
+               if (pci->devfn == 1) {
+                       p = pci_get_domain_bus_and_slot(pci_domain_nr(pci->bus),
+                                                       pci->bus->number, 0);
+                       if (p) {
+                               if ((p->class >> 8) == PCI_CLASS_DISPLAY_VGA)
+                                       return p;
+                               pci_dev_put(p);
+                       }
+               }
+               break;
+       }
+       return NULL;
+}
+
+static bool __devinit check_hdmi_disabled(struct pci_dev *pci)
+{
+       bool vga_inactive = false;
+       struct pci_dev *p = get_bound_vga(pci);
+
+       if (p) {
+               if (vga_default_device() && p != vga_default_device())
+                       vga_inactive = true;
+               pci_dev_put(p);
+       }
+       return vga_inactive;
+}
+
 /*
  * white/black-listing for position_fix
  */
@@ -2672,12 +2854,11 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
                                int dev, unsigned int driver_caps,
                                struct azx **rchip)
 {
-       struct azx *chip;
-       int i, err;
-       unsigned short gcap;
        static struct snd_device_ops ops = {
                .dev_free = azx_dev_free,
        };
+       struct azx *chip;
+       int err;
 
        *rchip = NULL;
 
@@ -2703,6 +2884,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
        chip->dev_index = dev;
        INIT_WORK(&chip->irq_pending_work, azx_irq_pending_work);
        INIT_LIST_HEAD(&chip->pcm_list);
+       init_vga_switcheroo(chip);
 
        chip->position_fix[0] = chip->position_fix[1] =
                check_position_fix(chip, position_fix[dev]);
@@ -2730,6 +2912,53 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
                }
        }
 
+       if (check_hdmi_disabled(pci)) {
+               snd_printk(KERN_INFO SFX "VGA controller for %s is disabled\n",
+                          pci_name(pci));
+               if (use_vga_switcheroo(chip)) {
+                       snd_printk(KERN_INFO SFX "Delaying initialization\n");
+                       chip->disabled = true;
+                       goto ok;
+               }
+               kfree(chip);
+               pci_disable_device(pci);
+               return -ENXIO;
+       }
+
+       err = azx_first_init(chip);
+       if (err < 0) {
+               azx_free(chip);
+               return err;
+       }
+
+ ok:
+       err = register_vga_switcheroo(chip);
+       if (err < 0) {
+               snd_printk(KERN_ERR SFX
+                          "Error registering VGA-switcheroo client\n");
+               azx_free(chip);
+               return err;
+       }
+
+       err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+       if (err < 0) {
+               snd_printk(KERN_ERR SFX "Error creating device [card]!\n");
+               azx_free(chip);
+               return err;
+       }
+
+       *rchip = chip;
+       return 0;
+}
+
+static int DELAYED_INIT_MARK azx_first_init(struct azx *chip)
+{
+       int dev = chip->dev_index;
+       struct pci_dev *pci = chip->pci;
+       struct snd_card *card = chip->card;
+       int i, err;
+       unsigned short gcap;
+
 #if BITS_PER_LONG != 64
        /* Fix up base address on ULI M5461 */
        if (chip->driver_type == AZX_DRIVER_ULI) {
@@ -2741,28 +2970,23 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
 #endif
 
        err = pci_request_regions(pci, "ICH HD audio");
-       if (err < 0) {
-               kfree(chip);
-               pci_disable_device(pci);
+       if (err < 0)
                return err;
-       }
+       chip->region_requested = 1;
 
        chip->addr = pci_resource_start(pci, 0);
        chip->remap_addr = pci_ioremap_bar(pci, 0);
        if (chip->remap_addr == NULL) {
                snd_printk(KERN_ERR SFX "ioremap error\n");
-               err = -ENXIO;
-               goto errout;
+               return -ENXIO;
        }
 
        if (chip->msi)
                if (pci_enable_msi(pci) < 0)
                        chip->msi = 0;
 
-       if (azx_acquire_irq(chip, 0) < 0) {
-               err = -EBUSY;
-               goto errout;
-       }
+       if (azx_acquire_irq(chip, 0) < 0)
+               return -EBUSY;
 
        pci_set_master(pci);
        synchronize_irq(chip->irq);
@@ -2841,7 +3065,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
                                GFP_KERNEL);
        if (!chip->azx_dev) {
                snd_printk(KERN_ERR SFX "cannot malloc azx_dev\n");
-               goto errout;
+               return -ENOMEM;
        }
 
        for (i = 0; i < chip->num_streams; i++) {
@@ -2851,7 +3075,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
                                          BDL_SIZE, &chip->azx_dev[i].bdl);
                if (err < 0) {
                        snd_printk(KERN_ERR SFX "cannot allocate BDL\n");
-                       goto errout;
+                       return -ENOMEM;
                }
                mark_pages_wc(chip, &chip->azx_dev[i].bdl, true);
        }
@@ -2861,13 +3085,13 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
                                  chip->num_streams * 8, &chip->posbuf);
        if (err < 0) {
                snd_printk(KERN_ERR SFX "cannot allocate posbuf\n");
-               goto errout;
+               return -ENOMEM;
        }
        mark_pages_wc(chip, &chip->posbuf, true);
        /* allocate CORB/RIRB */
        err = azx_alloc_cmd_io(chip);
        if (err < 0)
-               goto errout;
+               return err;
 
        /* initialize streams */
        azx_init_stream(chip);
@@ -2879,14 +3103,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
        /* codec detection */
        if (!chip->codec_mask) {
                snd_printk(KERN_ERR SFX "no codecs found!\n");
-               err = -ENODEV;
-               goto errout;
-       }
-
-       err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
-       if (err <0) {
-               snd_printk(KERN_ERR SFX "Error creating device [card]!\n");
-               goto errout;
+               return -ENODEV;
        }
 
        strcpy(card->driver, "HDA-Intel");
@@ -2896,12 +3113,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
                 "%s at 0x%lx irq %i",
                 card->shortname, chip->addr, chip->irq);
 
-       *rchip = chip;
        return 0;
-
- errout:
-       azx_free(chip);
-       return err;
 }
 
 static void power_down_all_codecs(struct azx *chip)
@@ -2946,6 +3158,27 @@ static int __devinit azx_probe(struct pci_dev *pci,
                goto out_free;
        card->private_data = chip;
 
+       if (!chip->disabled) {
+               err = azx_probe_continue(chip);
+               if (err < 0)
+                       goto out_free;
+       }
+
+       pci_set_drvdata(pci, card);
+
+       dev++;
+       return 0;
+
+out_free:
+       snd_card_free(card);
+       return err;
+}
+
+static int DELAYED_INIT_MARK azx_probe_continue(struct azx *chip)
+{
+       int dev = chip->dev_index;
+       int err;
+
 #ifdef CONFIG_SND_HDA_INPUT_BEEP
        chip->beep_mode = beep_mode[dev];
 #endif
@@ -2979,25 +3212,26 @@ static int __devinit azx_probe(struct pci_dev *pci,
        if (err < 0)
                goto out_free;
 
-       err = snd_card_register(card);
+       err = snd_card_register(chip->card);
        if (err < 0)
                goto out_free;
 
-       pci_set_drvdata(pci, card);
        chip->running = 1;
        power_down_all_codecs(chip);
        azx_notifier_register(chip);
 
-       dev++;
-       return err;
+       return 0;
+
 out_free:
-       snd_card_free(card);
+       chip->init_failed = 1;
        return err;
 }
 
 static void __devexit azx_remove(struct pci_dev *pci)
 {
-       snd_card_free(pci_get_drvdata(pci));
+       struct snd_card *card = pci_get_drvdata(pci);
+       if (card)
+               snd_card_free(card);
        pci_set_drvdata(pci, NULL);
 }
 
index ff71dcef08ef1c0d3b2790121fbfda1ebd989758..224410e8e9e7461431063a8d1f23b8650362faca 100644 (file)
@@ -2368,6 +2368,7 @@ static struct alc_codec_rename_table rename_tbl[] = {
        { 0x10ec0269, 0xffff, 0xa023, "ALC259" },
        { 0x10ec0269, 0xffff, 0x6023, "ALC281X" },
        { 0x10ec0269, 0x00f0, 0x0020, "ALC269VC" },
+       { 0x10ec0269, 0x00f0, 0x0030, "ALC269VD" },
        { 0x10ec0887, 0x00f0, 0x0030, "ALC887-VD" },
        { 0x10ec0888, 0x00f0, 0x0030, "ALC888-VD" },
        { 0x10ec0888, 0xf0f0, 0x3020, "ALC886" },
@@ -5614,6 +5615,7 @@ enum {
        ALC269_TYPE_ALC269VA,
        ALC269_TYPE_ALC269VB,
        ALC269_TYPE_ALC269VC,
+       ALC269_TYPE_ALC269VD,
 };
 
 /*
@@ -5625,8 +5627,21 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
        static const hda_nid_t alc269_ssids[] = { 0, 0x1b, 0x14, 0x21 };
        static const hda_nid_t alc269va_ssids[] = { 0x15, 0x1b, 0x14, 0 };
        struct alc_spec *spec = codec->spec;
-       const hda_nid_t *ssids = spec->codec_variant == ALC269_TYPE_ALC269VA ?
-               alc269va_ssids : alc269_ssids;
+       const hda_nid_t *ssids;
+
+       switch (spec->codec_variant) {
+       case ALC269_TYPE_ALC269VA:
+       case ALC269_TYPE_ALC269VC:
+               ssids = alc269va_ssids;
+               break;
+       case ALC269_TYPE_ALC269VB:
+       case ALC269_TYPE_ALC269VD:
+               ssids = alc269_ssids;
+               break;
+       default:
+               ssids = alc269_ssids;
+               break;
+       }
 
        return alc_parse_auto_config(codec, alc269_ignore, ssids);
 }
@@ -5643,6 +5658,11 @@ static void alc269_toggle_power_output(struct hda_codec *codec, int power_up)
 
 static void alc269_shutup(struct hda_codec *codec)
 {
+       struct alc_spec *spec = codec->spec;
+
+       if (spec->codec_variant != ALC269_TYPE_ALC269VB)
+               return;
+
        if ((alc_get_coef0(codec) & 0x00ff) == 0x017)
                alc269_toggle_power_output(codec, 0);
        if ((alc_get_coef0(codec) & 0x00ff) == 0x018) {
@@ -5654,19 +5674,24 @@ static void alc269_shutup(struct hda_codec *codec)
 #ifdef CONFIG_PM
 static int alc269_resume(struct hda_codec *codec)
 {
-       if ((alc_get_coef0(codec) & 0x00ff) == 0x018) {
+       struct alc_spec *spec = codec->spec;
+
+       if (spec->codec_variant == ALC269_TYPE_ALC269VB ||
+                       (alc_get_coef0(codec) & 0x00ff) == 0x018) {
                alc269_toggle_power_output(codec, 0);
                msleep(150);
        }
 
        codec->patch_ops.init(codec);
 
-       if ((alc_get_coef0(codec) & 0x00ff) == 0x017) {
+       if (spec->codec_variant == ALC269_TYPE_ALC269VB ||
+                       (alc_get_coef0(codec) & 0x00ff) == 0x017) {
                alc269_toggle_power_output(codec, 1);
                msleep(200);
        }
 
-       if ((alc_get_coef0(codec) & 0x00ff) == 0x018)
+       if (spec->codec_variant == ALC269_TYPE_ALC269VB ||
+                       (alc_get_coef0(codec) & 0x00ff) == 0x018)
                alc269_toggle_power_output(codec, 1);
 
        snd_hda_codec_resume_amp(codec);
@@ -6081,6 +6106,9 @@ static int patch_alc269(struct hda_codec *codec)
                                err = alc_codec_rename(codec, "ALC3202");
                        spec->codec_variant = ALC269_TYPE_ALC269VC;
                        break;
+               case 0x0030:
+                       spec->codec_variant = ALC269_TYPE_ALC269VD;
+                       break;
                default:
                        alc_fix_pll_init(codec, 0x20, 0x04, 15);
                }
index 0a5027b94714afb9600b94922f6d89140271492d..b8ac8710f47fb200d602bc2917598a153cad6884 100644 (file)
@@ -1988,6 +1988,13 @@ static int hdspm_get_system_sample_rate(struct hdspm *hdspm)
        period = hdspm_read(hdspm, HDSPM_RD_PLL_FREQ);
        rate = hdspm_calc_dds_value(hdspm, period);
 
+       if (rate > 207000) {
+               /* Unreasonable high sample rate as seen on PCI MADI cards.
+                * Use the cached value instead.
+                */
+               rate = hdspm->system_sample_rate;
+       }
+
        return rate;
 }
 
index cf3ed0362c9ccf76cbbdc975c8788b33725b70f7..28dd76c7cb1c08ac308ca7730d9abfc8b02b0903 100644 (file)
@@ -543,7 +543,7 @@ static int imx_ssi_probe(struct platform_device *pdev)
                        ret);
                goto failed_clk;
        }
-       clk_enable(ssi->clk);
+       clk_prepare_enable(ssi->clk);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
@@ -641,7 +641,7 @@ failed_ac97:
 failed_ioremap:
        release_mem_region(res->start, resource_size(res));
 failed_get_resource:
-       clk_disable(ssi->clk);
+       clk_disable_unprepare(ssi->clk);
        clk_put(ssi->clk);
 failed_clk:
        kfree(ssi);
@@ -664,7 +664,7 @@ static int __devexit imx_ssi_remove(struct platform_device *pdev)
 
        iounmap(ssi->base);
        release_mem_region(res->start, resource_size(res));
-       clk_disable(ssi->clk);
+       clk_disable_unprepare(ssi->clk);
        clk_put(ssi->clk);
        kfree(ssi);
 
index 3cb9aa4299d38ff3845dfc83b96c7b701f73cea5..fa4556750451ce29f7236174356573b65e52e56c 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/slab.h>
 #include <linux/mbus.h>
 #include <linux/delay.h>
+#include <linux/clk.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
@@ -449,6 +450,14 @@ static __devinit int kirkwood_i2s_dev_probe(struct platform_device *pdev)
 
        priv->burst = data->burst;
 
+       priv->clk = clk_get(&pdev->dev, NULL);
+       if (IS_ERR(priv->clk)) {
+               dev_err(&pdev->dev, "no clock\n");
+               err = PTR_ERR(priv->clk);
+               goto err_ioremap;
+       }
+       clk_prepare_enable(priv->clk);
+
        return snd_soc_register_dai(&pdev->dev, &kirkwood_i2s_dai);
 
 err_ioremap:
@@ -466,6 +475,10 @@ static __devexit int kirkwood_i2s_dev_remove(struct platform_device *pdev)
        struct kirkwood_dma_data *priv = dev_get_drvdata(&pdev->dev);
 
        snd_soc_unregister_dai(&pdev->dev);
+
+       clk_disable_unprepare(priv->clk);
+       clk_put(priv->clk);
+
        iounmap(priv->io);
        release_mem_region(priv->mem->start, SZ_16K);
        kfree(priv);
index 9047436b3937248c91ba8a3630176dd169bd2fd5..f9084d83e6bd2ae178d562c2c243de4bb1fb6fcf 100644 (file)
@@ -123,6 +123,7 @@ struct kirkwood_dma_data {
        void __iomem *io;
        int irq;
        int burst;
+       struct clk *clk;
 };
 
 #endif
index 9ccfa5e1c11b4f236128663b4cfaca8bae197a75..57a2fa751085cc025abe5490deb2e8513b0a7d6b 100644 (file)
@@ -109,11 +109,12 @@ config SND_OMAP_SOC_OMAP_ABE_TWL6040
          - PandaBoard (4430)
          - PandaBoardES (4460)
 
-config SND_OMAP_SOC_OMAP4_HDMI
-       tristate "SoC Audio support for Texas Instruments OMAP4 HDMI"
-       depends on SND_OMAP_SOC && OMAP4_DSS_HDMI && OMAP2_DSS && ARCH_OMAP4
+config SND_OMAP_SOC_OMAP_HDMI
+       tristate "SoC Audio support for Texas Instruments OMAP HDMI"
+       depends on SND_OMAP_SOC && OMAP4_DSS_HDMI && OMAP2_DSS
        select SND_OMAP_SOC_HDMI
        select SND_SOC_OMAP_HDMI_CODEC
+       select OMAP4_DSS_HDMI_AUDIO
        help
          Say Y if you want to add support for SoC HDMI audio on Texas Instruments
          OMAP4 chips
index 1d656bce01d48e599f18e7a1ec74f91cd1fcb573..0e14dd3225650b0e6988f8ad1ec90d70788de3e5 100644 (file)
@@ -25,7 +25,7 @@ snd-soc-omap3pandora-objs := omap3pandora.o
 snd-soc-omap3beagle-objs := omap3beagle.o
 snd-soc-zoom2-objs := zoom2.o
 snd-soc-igep0020-objs := igep0020.o
-snd-soc-omap4-hdmi-objs := omap4-hdmi-card.o
+snd-soc-omap-hdmi-card-objs := omap-hdmi-card.o
 
 obj-$(CONFIG_SND_OMAP_SOC_N810) += snd-soc-n810.o
 obj-$(CONFIG_SND_OMAP_SOC_RX51) += snd-soc-rx51.o
@@ -41,4 +41,4 @@ obj-$(CONFIG_SND_OMAP_SOC_OMAP3_PANDORA) += snd-soc-omap3pandora.o
 obj-$(CONFIG_SND_OMAP_SOC_OMAP3_BEAGLE) += snd-soc-omap3beagle.o
 obj-$(CONFIG_SND_OMAP_SOC_ZOOM2) += snd-soc-zoom2.o
 obj-$(CONFIG_SND_OMAP_SOC_IGEP0020) += snd-soc-igep0020.o
-obj-$(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) += snd-soc-omap4-hdmi.o
+obj-$(CONFIG_SND_OMAP_SOC_OMAP_HDMI) += snd-soc-omap-hdmi-card.o
index e5f44440d1b948fda1de18196bd1190b5aa707a4..34835e8a9160ce16b1d074a1f8495d1124e5eac8 100644 (file)
@@ -109,6 +109,47 @@ static void omap_mcbsp_dump_reg(struct omap_mcbsp *mcbsp)
        dev_dbg(mcbsp->dev, "***********************\n");
 }
 
+static irqreturn_t omap_mcbsp_irq_handler(int irq, void *dev_id)
+{
+       struct omap_mcbsp *mcbsp = dev_id;
+       u16 irqst;
+
+       irqst = MCBSP_READ(mcbsp, IRQST);
+       dev_dbg(mcbsp->dev, "IRQ callback : 0x%x\n", irqst);
+
+       if (irqst & RSYNCERREN)
+               dev_err(mcbsp->dev, "RX Frame Sync Error!\n");
+       if (irqst & RFSREN)
+               dev_dbg(mcbsp->dev, "RX Frame Sync\n");
+       if (irqst & REOFEN)
+               dev_dbg(mcbsp->dev, "RX End Of Frame\n");
+       if (irqst & RRDYEN)
+               dev_dbg(mcbsp->dev, "RX Buffer Threshold Reached\n");
+       if (irqst & RUNDFLEN)
+               dev_err(mcbsp->dev, "RX Buffer Underflow!\n");
+       if (irqst & ROVFLEN)
+               dev_err(mcbsp->dev, "RX Buffer Overflow!\n");
+
+       if (irqst & XSYNCERREN)
+               dev_err(mcbsp->dev, "TX Frame Sync Error!\n");
+       if (irqst & XFSXEN)
+               dev_dbg(mcbsp->dev, "TX Frame Sync\n");
+       if (irqst & XEOFEN)
+               dev_dbg(mcbsp->dev, "TX End Of Frame\n");
+       if (irqst & XRDYEN)
+               dev_dbg(mcbsp->dev, "TX Buffer threshold Reached\n");
+       if (irqst & XUNDFLEN)
+               dev_err(mcbsp->dev, "TX Buffer Underflow!\n");
+       if (irqst & XOVFLEN)
+               dev_err(mcbsp->dev, "TX Buffer Overflow!\n");
+       if (irqst & XEMPTYEOFEN)
+               dev_dbg(mcbsp->dev, "TX Buffer empty at end of frame\n");
+
+       MCBSP_WRITE(mcbsp, IRQST, irqst);
+
+       return IRQ_HANDLED;
+}
+
 static irqreturn_t omap_mcbsp_tx_irq_handler(int irq, void *dev_id)
 {
        struct omap_mcbsp *mcbsp_tx = dev_id;
@@ -176,6 +217,10 @@ void omap_mcbsp_config(struct omap_mcbsp *mcbsp,
        /* Enable wakeup behavior */
        if (mcbsp->pdata->has_wakeup)
                MCBSP_WRITE(mcbsp, WAKEUPEN, XRDYEN | RRDYEN);
+
+       /* Enable TX/RX sync error interrupts by default */
+       if (mcbsp->irq)
+               MCBSP_WRITE(mcbsp, IRQEN, RSYNCERREN | XSYNCERREN);
 }
 
 /**
@@ -489,23 +534,25 @@ int omap_mcbsp_request(struct omap_mcbsp *mcbsp)
        MCBSP_WRITE(mcbsp, SPCR1, 0);
        MCBSP_WRITE(mcbsp, SPCR2, 0);
 
-       err = request_irq(mcbsp->tx_irq, omap_mcbsp_tx_irq_handler,
-                               0, "McBSP", (void *)mcbsp);
-       if (err != 0) {
-               dev_err(mcbsp->dev, "Unable to request TX IRQ %d "
-                               "for McBSP%d\n", mcbsp->tx_irq,
-                               mcbsp->id);
-               goto err_clk_disable;
-       }
+       if (mcbsp->irq) {
+               err = request_irq(mcbsp->irq, omap_mcbsp_irq_handler, 0,
+                                 "McBSP", (void *)mcbsp);
+               if (err != 0) {
+                       dev_err(mcbsp->dev, "Unable to request IRQ\n");
+                       goto err_clk_disable;
+               }
+       } else {
+               err = request_irq(mcbsp->tx_irq, omap_mcbsp_tx_irq_handler, 0,
+                                 "McBSP TX", (void *)mcbsp);
+               if (err != 0) {
+                       dev_err(mcbsp->dev, "Unable to request TX IRQ\n");
+                       goto err_clk_disable;
+               }
 
-       if (mcbsp->rx_irq) {
-               err = request_irq(mcbsp->rx_irq,
-                               omap_mcbsp_rx_irq_handler,
-                               0, "McBSP", (void *)mcbsp);
+               err = request_irq(mcbsp->rx_irq, omap_mcbsp_rx_irq_handler, 0,
+                                 "McBSP RX", (void *)mcbsp);
                if (err != 0) {
-                       dev_err(mcbsp->dev, "Unable to request RX IRQ %d "
-                                       "for McBSP%d\n", mcbsp->rx_irq,
-                                       mcbsp->id);
+                       dev_err(mcbsp->dev, "Unable to request RX IRQ\n");
                        goto err_free_irq;
                }
        }
@@ -542,9 +589,16 @@ void omap_mcbsp_free(struct omap_mcbsp *mcbsp)
        if (mcbsp->pdata->has_wakeup)
                MCBSP_WRITE(mcbsp, WAKEUPEN, 0);
 
-       if (mcbsp->rx_irq)
+       /* Disable interrupt requests */
+       if (mcbsp->irq)
+               MCBSP_WRITE(mcbsp, IRQEN, 0);
+
+       if (mcbsp->irq) {
+               free_irq(mcbsp->irq, (void *)mcbsp);
+       } else {
                free_irq(mcbsp->rx_irq, (void *)mcbsp);
-       free_irq(mcbsp->tx_irq, (void *)mcbsp);
+               free_irq(mcbsp->tx_irq, (void *)mcbsp);
+       }
 
        reg_cache = mcbsp->reg_cache;
 
@@ -754,7 +808,7 @@ THRESHOLD_PROP_BUILDER(max_tx_thres);
 THRESHOLD_PROP_BUILDER(max_rx_thres);
 
 static const char *dma_op_modes[] = {
-       "element", "threshold", "frame",
+       "element", "threshold",
 };
 
 static ssize_t dma_op_mode_show(struct device *dev,
@@ -949,13 +1003,24 @@ int __devinit omap_mcbsp_init(struct platform_device *pdev)
        else
                mcbsp->phys_dma_base = res->start;
 
-       mcbsp->tx_irq = platform_get_irq_byname(pdev, "tx");
-       mcbsp->rx_irq = platform_get_irq_byname(pdev, "rx");
-
-       /* From OMAP4 there will be a single irq line */
-       if (mcbsp->tx_irq == -ENXIO) {
-               mcbsp->tx_irq = platform_get_irq(pdev, 0);
-               mcbsp->rx_irq = 0;
+       /*
+        * OMAP1, 2 uses two interrupt lines: TX, RX
+        * OMAP2430, OMAP3 SoC have combined IRQ line as well.
+        * OMAP4 and newer SoC only have the combined IRQ line.
+        * Use the combined IRQ if available since it gives better debugging
+        * possibilities.
+        */
+       mcbsp->irq = platform_get_irq_byname(pdev, "common");
+       if (mcbsp->irq == -ENXIO) {
+               mcbsp->tx_irq = platform_get_irq_byname(pdev, "tx");
+
+               if (mcbsp->tx_irq == -ENXIO) {
+                       mcbsp->irq = platform_get_irq(pdev, 0);
+                       mcbsp->tx_irq = 0;
+               } else {
+                       mcbsp->rx_irq = platform_get_irq_byname(pdev, "rx");
+                       mcbsp->irq = 0;
+               }
        }
 
        res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
index a944fcc9073c619f82667c7c5a2bd0a8861f4f3f..262a6152111fe67badda011198c875974e76d377 100644 (file)
@@ -217,17 +217,20 @@ enum {
 /********************** McBSP DMA operating modes **************************/
 #define MCBSP_DMA_MODE_ELEMENT         0
 #define MCBSP_DMA_MODE_THRESHOLD       1
-#define MCBSP_DMA_MODE_FRAME           2
 
-/********************** McBSP WAKEUPEN bit definitions *********************/
+/********************** McBSP WAKEUPEN/IRQST/IRQEN bit definitions *********/
 #define RSYNCERREN             BIT(0)
 #define RFSREN                 BIT(1)
 #define REOFEN                 BIT(2)
 #define RRDYEN                 BIT(3)
+#define RUNDFLEN               BIT(4)
+#define ROVFLEN                        BIT(5)
 #define XSYNCERREN             BIT(7)
 #define XFSXEN                 BIT(8)
 #define XEOFEN                 BIT(9)
 #define XRDYEN                 BIT(10)
+#define XUNDFLEN               BIT(11)
+#define XOVFLEN                        BIT(12)
 #define XEMPTYEOFEN            BIT(14)
 
 /* Clock signal muxing options */
@@ -295,6 +298,7 @@ struct omap_mcbsp {
        int configured;
        u8 free;
 
+       int irq;
        int rx_irq;
        int tx_irq;
 
index 93bb8eee22b32cdcf00f4d6d2b1fa54510711fb4..9d93793d3077c61d8819760d4bd12b75e3896557 100644 (file)
 #include "omap-pcm.h"
 #include "../codecs/twl6040.h"
 
+struct abe_twl6040 {
+       int     jack_detection; /* board can detect jack events */
+       int     mclk_freq;      /* MCLK frequency speed for twl6040 */
+};
+
 static int omap_abe_hw_params(struct snd_pcm_substream *substream,
        struct snd_pcm_hw_params *params)
 {
@@ -47,13 +52,13 @@ static int omap_abe_hw_params(struct snd_pcm_substream *substream,
        struct snd_soc_dai *codec_dai = rtd->codec_dai;
        struct snd_soc_codec *codec = rtd->codec;
        struct snd_soc_card *card = codec->card;
-       struct omap_abe_twl6040_data *pdata = dev_get_platdata(card->dev);
+       struct abe_twl6040 *priv = snd_soc_card_get_drvdata(card);
        int clk_id, freq;
        int ret;
 
        clk_id = twl6040_get_clk_id(rtd->codec);
        if (clk_id == TWL6040_SYSCLK_SEL_HPPLL)
-               freq = pdata->mclk_freq;
+               freq = priv->mclk_freq;
        else if (clk_id == TWL6040_SYSCLK_SEL_LPPLL)
                freq = 32768;
        else
@@ -128,6 +133,9 @@ static const struct snd_soc_dapm_widget twl6040_dapm_widgets[] = {
        SND_SOC_DAPM_MIC("Main Handset Mic", NULL),
        SND_SOC_DAPM_MIC("Sub Handset Mic", NULL),
        SND_SOC_DAPM_LINE("Line In", NULL),
+
+       /* Digital microphones */
+       SND_SOC_DAPM_MIC("Digital Mic", NULL),
 };
 
 static const struct snd_soc_dapm_route audio_map[] = {
@@ -173,6 +181,7 @@ static int omap_abe_twl6040_init(struct snd_soc_pcm_runtime *rtd)
        struct snd_soc_card *card = codec->card;
        struct snd_soc_dapm_context *dapm = &codec->dapm;
        struct omap_abe_twl6040_data *pdata = dev_get_platdata(card->dev);
+       struct abe_twl6040 *priv = snd_soc_card_get_drvdata(card);
        int hs_trim;
        int ret = 0;
 
@@ -196,7 +205,7 @@ static int omap_abe_twl6040_init(struct snd_soc_pcm_runtime *rtd)
                                        TWL6040_HSF_TRIM_RIGHT(hs_trim));
 
        /* Headset jack detection only if it is supported */
-       if (pdata->jack_detection) {
+       if (priv->jack_detection) {
                ret = snd_soc_jack_new(codec, "Headset Jack",
                                        SND_JACK_HEADSET, &hs_jack);
                if (ret)
@@ -210,10 +219,6 @@ static int omap_abe_twl6040_init(struct snd_soc_pcm_runtime *rtd)
        return ret;
 }
 
-static const struct snd_soc_dapm_widget dmic_dapm_widgets[] = {
-       SND_SOC_DAPM_MIC("Digital Mic", NULL),
-};
-
 static const struct snd_soc_dapm_route dmic_audio_map[] = {
        {"DMic", NULL, "Digital Mic"},
        {"Digital Mic", NULL, "Digital Mic1 Bias"},
@@ -223,19 +228,13 @@ static int omap_abe_dmic_init(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_soc_codec *codec = rtd->codec;
        struct snd_soc_dapm_context *dapm = &codec->dapm;
-       int ret;
-
-       ret = snd_soc_dapm_new_controls(dapm, dmic_dapm_widgets,
-                               ARRAY_SIZE(dmic_dapm_widgets));
-       if (ret)
-               return ret;
 
        return snd_soc_dapm_add_routes(dapm, dmic_audio_map,
                                ARRAY_SIZE(dmic_audio_map));
 }
 
 /* Digital audio interface glue - connects codec <--> CPU */
-static struct snd_soc_dai_link twl6040_dmic_dai[] = {
+static struct snd_soc_dai_link abe_twl6040_dai_links[] = {
        {
                .name = "TWL6040",
                .stream_name = "TWL6040",
@@ -258,19 +257,6 @@ static struct snd_soc_dai_link twl6040_dmic_dai[] = {
        },
 };
 
-static struct snd_soc_dai_link twl6040_only_dai[] = {
-       {
-               .name = "TWL6040",
-               .stream_name = "TWL6040",
-               .cpu_dai_name = "omap-mcpdm",
-               .codec_dai_name = "twl6040-legacy",
-               .platform_name = "omap-pcm-audio",
-               .codec_name = "twl6040-codec",
-               .init = omap_abe_twl6040_init,
-               .ops = &omap_abe_ops,
-       },
-};
-
 /* Audio machine driver */
 static struct snd_soc_card omap_abe_card = {
        .owner = THIS_MODULE,
@@ -285,6 +271,8 @@ static __devinit int omap_abe_probe(struct platform_device *pdev)
 {
        struct omap_abe_twl6040_data *pdata = dev_get_platdata(&pdev->dev);
        struct snd_soc_card *card = &omap_abe_card;
+       struct abe_twl6040 *priv;
+       int num_links = 0;
        int ret;
 
        card->dev = &pdev->dev;
@@ -294,6 +282,10 @@ static __devinit int omap_abe_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
+       priv = devm_kzalloc(&pdev->dev, sizeof(struct abe_twl6040), GFP_KERNEL);
+       if (priv == NULL)
+               return -ENOMEM;
+
        if (pdata->card_name) {
                card->name = pdata->card_name;
        } else {
@@ -301,18 +293,24 @@ static __devinit int omap_abe_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       if (!pdata->mclk_freq) {
+       priv->jack_detection = pdata->jack_detection;
+       priv->mclk_freq = pdata->mclk_freq;
+
+
+       if (!priv->mclk_freq) {
                dev_err(&pdev->dev, "MCLK frequency missing\n");
                return -ENODEV;
        }
 
-       if (pdata->has_dmic) {
-               card->dai_link = twl6040_dmic_dai;
-               card->num_links = ARRAY_SIZE(twl6040_dmic_dai);
-       } else {
-               card->dai_link = twl6040_only_dai;
-               card->num_links = ARRAY_SIZE(twl6040_only_dai);
-       }
+       if (pdata->has_dmic)
+               num_links = 2;
+       else
+               num_links = 1;
+
+       card->dai_link = abe_twl6040_dai_links;
+       card->num_links = num_links;
+
+       snd_soc_card_set_drvdata(card, priv);
 
        ret = snd_soc_register_card(card);
        if (ret)
index 4dcb5a7e40e874c37440cbcf82073844ebf6d4d7..75f5dca0e8d2c0a1dd4d107ece0cd2023c9ad583 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/io.h>
 #include <linux/slab.h>
 #include <linux/pm_runtime.h>
+#include <linux/of_device.h>
 #include <plat/dma.h>
 
 #include <sound/core.h>
@@ -528,10 +529,17 @@ static int __devexit asoc_dmic_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id omap_dmic_of_match[] = {
+       { .compatible = "ti,omap4-dmic", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, omap_dmic_of_match);
+
 static struct platform_driver asoc_dmic_driver = {
        .driver = {
                .name = "omap-dmic",
                .owner = THIS_MODULE,
+               .of_match_table = omap_dmic_of_match,
        },
        .probe = asoc_dmic_probe,
        .remove = __devexit_p(asoc_dmic_remove),
diff --git a/sound/soc/omap/omap-hdmi-card.c b/sound/soc/omap/omap-hdmi-card.c
new file mode 100644 (file)
index 0000000..eaa2ea0
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * omap-hdmi-card.c
+ *
+ * OMAP ALSA SoC machine driver for TI OMAP HDMI
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Ricardo Neri <ricardo.neri@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <asm/mach-types.h>
+#include <video/omapdss.h>
+
+#define DRV_NAME "omap-hdmi-audio"
+
+static struct snd_soc_dai_link omap_hdmi_dai = {
+       .name = "HDMI",
+       .stream_name = "HDMI",
+       .cpu_dai_name = "omap-hdmi-audio-dai",
+       .platform_name = "omap-pcm-audio",
+       .codec_name = "hdmi-audio-codec",
+       .codec_dai_name = "omap-hdmi-hifi",
+};
+
+static struct snd_soc_card snd_soc_omap_hdmi = {
+       .name = "OMAPHDMI",
+       .owner = THIS_MODULE,
+       .dai_link = &omap_hdmi_dai,
+       .num_links = 1,
+};
+
+static __devinit int omap_hdmi_probe(struct platform_device *pdev)
+{
+       struct snd_soc_card *card = &snd_soc_omap_hdmi;
+       int ret;
+
+       card->dev = &pdev->dev;
+
+       ret = snd_soc_register_card(card);
+       if (ret) {
+               dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
+               card->dev = NULL;
+               return ret;
+       }
+       return 0;
+}
+
+static int __devexit omap_hdmi_remove(struct platform_device *pdev)
+{
+       struct snd_soc_card *card = platform_get_drvdata(pdev);
+
+       snd_soc_unregister_card(card);
+       card->dev = NULL;
+       return 0;
+}
+
+static struct platform_driver omap_hdmi_driver = {
+       .driver = {
+               .name = DRV_NAME,
+               .owner = THIS_MODULE,
+       },
+       .probe = omap_hdmi_probe,
+       .remove = __devexit_p(omap_hdmi_remove),
+};
+
+module_platform_driver(omap_hdmi_driver);
+
+MODULE_AUTHOR("Ricardo Neri <ricardo.neri@ti.com>");
+MODULE_DESCRIPTION("OMAP HDMI machine ASoC driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
index 38e0defa7078a9cf57f4d9c8fd7c2a6c3336fdd0..a08245d9203cddfd20e38254d02104c25c18f9a8 100644 (file)
 #include <sound/pcm_params.h>
 #include <sound/initval.h>
 #include <sound/soc.h>
+#include <sound/asound.h>
+#include <sound/asoundef.h>
+#include <video/omapdss.h>
 
 #include <plat/dma.h>
 #include "omap-pcm.h"
 #include "omap-hdmi.h"
 
-#define DRV_NAME "hdmi-audio-dai"
+#define DRV_NAME "omap-hdmi-audio-dai"
 
-static struct omap_pcm_dma_data omap_hdmi_dai_dma_params = {
-       .name = "HDMI playback",
-       .sync_mode = OMAP_DMA_SYNC_PACKET,
+struct hdmi_priv {
+       struct omap_pcm_dma_data dma_params;
+       struct omap_dss_audio dss_audio;
+       struct snd_aes_iec958 iec;
+       struct snd_cea_861_aud_if cea;
+       struct omap_dss_device *dssdev;
 };
 
 static int omap_hdmi_dai_startup(struct snd_pcm_substream *substream,
                                  struct snd_soc_dai *dai)
 {
+       struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
        int err;
        /*
         * Make sure that the period bytes are multiple of the DMA packet size.
@@ -52,46 +59,201 @@ static int omap_hdmi_dai_startup(struct snd_pcm_substream *substream,
         */
        err = snd_pcm_hw_constraint_step(substream->runtime, 0,
                                 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 128);
-       if (err < 0)
+       if (err < 0) {
+               dev_err(dai->dev, "could not apply constraint\n");
                return err;
+       }
 
+       if (!priv->dssdev->driver->audio_supported(priv->dssdev)) {
+               dev_err(dai->dev, "audio not supported\n");
+               return -ENODEV;
+       }
        return 0;
 }
 
+static int omap_hdmi_dai_prepare(struct snd_pcm_substream *substream,
+                               struct snd_soc_dai *dai)
+{
+       struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
+
+       return priv->dssdev->driver->audio_enable(priv->dssdev);
+}
+
 static int omap_hdmi_dai_hw_params(struct snd_pcm_substream *substream,
                                    struct snd_pcm_hw_params *params,
                                    struct snd_soc_dai *dai)
 {
+       struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
+       struct snd_aes_iec958 *iec = &priv->iec;
+       struct snd_cea_861_aud_if *cea = &priv->cea;
        int err = 0;
 
        switch (params_format(params)) {
        case SNDRV_PCM_FORMAT_S16_LE:
-               omap_hdmi_dai_dma_params.packet_size = 16;
+               priv->dma_params.packet_size = 16;
                break;
        case SNDRV_PCM_FORMAT_S24_LE:
-               omap_hdmi_dai_dma_params.packet_size = 32;
+               priv->dma_params.packet_size = 32;
                break;
        default:
-               err = -EINVAL;
+               dev_err(dai->dev, "format not supported!\n");
+               return -EINVAL;
        }
 
-       omap_hdmi_dai_dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
+       priv->dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
 
        snd_soc_dai_set_dma_data(dai, substream,
-                                &omap_hdmi_dai_dma_params);
+                                &priv->dma_params);
+
+       /*
+        * fill the IEC-60958 channel status word
+        */
+
+       /* specify IEC-60958-3 (commercial use) */
+       iec->status[0] &= ~IEC958_AES0_PROFESSIONAL;
+
+       /* specify that the audio is LPCM*/
+       iec->status[0] &= ~IEC958_AES0_NONAUDIO;
+
+       iec->status[0] |= IEC958_AES0_CON_NOT_COPYRIGHT;
+
+       iec->status[0] |= IEC958_AES0_CON_EMPHASIS_NONE;
+
+       iec->status[0] |= IEC958_AES1_PRO_MODE_NOTID;
+
+       iec->status[1] = IEC958_AES1_CON_GENERAL;
+
+       iec->status[2] |= IEC958_AES2_CON_SOURCE_UNSPEC;
+
+       iec->status[2] |= IEC958_AES2_CON_CHANNEL_UNSPEC;
+
+       switch (params_rate(params)) {
+       case 32000:
+               iec->status[3] |= IEC958_AES3_CON_FS_32000;
+               break;
+       case 44100:
+               iec->status[3] |= IEC958_AES3_CON_FS_44100;
+               break;
+       case 48000:
+               iec->status[3] |= IEC958_AES3_CON_FS_48000;
+               break;
+       case 88200:
+               iec->status[3] |= IEC958_AES3_CON_FS_88200;
+               break;
+       case 96000:
+               iec->status[3] |= IEC958_AES3_CON_FS_96000;
+               break;
+       case 176400:
+               iec->status[3] |= IEC958_AES3_CON_FS_176400;
+               break;
+       case 192000:
+               iec->status[3] |= IEC958_AES3_CON_FS_192000;
+               break;
+       default:
+               dev_err(dai->dev, "rate not supported!\n");
+               return -EINVAL;
+       }
+
+       /* specify the clock accuracy */
+       iec->status[3] |= IEC958_AES3_CON_CLOCK_1000PPM;
+
+       /*
+        * specify the word length. The same word length value can mean
+        * two different lengths. Hence, we need to specify the maximum
+        * word length as well.
+        */
+       switch (params_format(params)) {
+       case SNDRV_PCM_FORMAT_S16_LE:
+               iec->status[4] |= IEC958_AES4_CON_WORDLEN_20_16;
+               iec->status[4] &= ~IEC958_AES4_CON_MAX_WORDLEN_24;
+               break;
+       case SNDRV_PCM_FORMAT_S24_LE:
+               iec->status[4] |= IEC958_AES4_CON_WORDLEN_24_20;
+               iec->status[4] |= IEC958_AES4_CON_MAX_WORDLEN_24;
+               break;
+       default:
+               dev_err(dai->dev, "format not supported!\n");
+               return -EINVAL;
+       }
+
+       /*
+        * Fill the CEA-861 audio infoframe (see spec for details)
+        */
+
+       cea->db1_ct_cc = (params_channels(params) - 1)
+               & CEA861_AUDIO_INFOFRAME_DB1CC;
+       cea->db1_ct_cc |= CEA861_AUDIO_INFOFRAME_DB1CT_FROM_STREAM;
+
+       cea->db2_sf_ss = CEA861_AUDIO_INFOFRAME_DB2SF_FROM_STREAM;
+       cea->db2_sf_ss |= CEA861_AUDIO_INFOFRAME_DB2SS_FROM_STREAM;
+
+       cea->db3 = 0; /* not used, all zeros */
+
+       /*
+        * The OMAP HDMI IP requires to use the 8-channel channel code when
+        * transmitting more than two channels.
+        */
+       if (params_channels(params) == 2)
+               cea->db4_ca = 0x0;
+       else
+               cea->db4_ca = 0x13;
+
+       cea->db5_dminh_lsv = CEA861_AUDIO_INFOFRAME_DB5_DM_INH_PROHIBITED;
+       /* the expression is trivial but makes clear what we are doing */
+       cea->db5_dminh_lsv |= (0 & CEA861_AUDIO_INFOFRAME_DB5_LSV);
+
+       priv->dss_audio.iec = iec;
+       priv->dss_audio.cea = cea;
+
+       err = priv->dssdev->driver->audio_config(priv->dssdev,
+                                                &priv->dss_audio);
 
        return err;
 }
 
+static int omap_hdmi_dai_trigger(struct snd_pcm_substream *substream, int cmd,
+                               struct snd_soc_dai *dai)
+{
+       struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
+       int err = 0;
+
+       switch (cmd) {
+       case SNDRV_PCM_TRIGGER_START:
+       case SNDRV_PCM_TRIGGER_RESUME:
+       case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+               err = priv->dssdev->driver->audio_start(priv->dssdev);
+               break;
+       case SNDRV_PCM_TRIGGER_STOP:
+       case SNDRV_PCM_TRIGGER_SUSPEND:
+       case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+               priv->dssdev->driver->audio_stop(priv->dssdev);
+               break;
+       default:
+               err = -EINVAL;
+       }
+       return err;
+}
+
+static void omap_hdmi_dai_shutdown(struct snd_pcm_substream *substream,
+                               struct snd_soc_dai *dai)
+{
+       struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
+
+       priv->dssdev->driver->audio_disable(priv->dssdev);
+}
+
 static const struct snd_soc_dai_ops omap_hdmi_dai_ops = {
        .startup        = omap_hdmi_dai_startup,
        .hw_params      = omap_hdmi_dai_hw_params,
+       .prepare        = omap_hdmi_dai_prepare,
+       .trigger        = omap_hdmi_dai_trigger,
+       .shutdown       = omap_hdmi_dai_shutdown,
 };
 
 static struct snd_soc_dai_driver omap_hdmi_dai = {
        .playback = {
                .channels_min = 2,
-               .channels_max = 2,
+               .channels_max = 8,
                .rates = OMAP_HDMI_RATES,
                .formats = OMAP_HDMI_FORMATS,
        },
@@ -102,31 +264,77 @@ static __devinit int omap_hdmi_probe(struct platform_device *pdev)
 {
        int ret;
        struct resource *hdmi_rsrc;
+       struct hdmi_priv *hdmi_data;
+       bool hdmi_dev_found = false;
+
+       hdmi_data = devm_kzalloc(&pdev->dev, sizeof(*hdmi_data), GFP_KERNEL);
+       if (hdmi_data == NULL) {
+               dev_err(&pdev->dev, "Cannot allocate memory for HDMI data\n");
+               return -ENOMEM;
+       }
 
        hdmi_rsrc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!hdmi_rsrc) {
                dev_err(&pdev->dev, "Cannot obtain IORESOURCE_MEM HDMI\n");
-               return -EINVAL;
+               return -ENODEV;
        }
 
-       omap_hdmi_dai_dma_params.port_addr =  hdmi_rsrc->start
+       hdmi_data->dma_params.port_addr =  hdmi_rsrc->start
                + OMAP_HDMI_AUDIO_DMA_PORT;
 
        hdmi_rsrc = platform_get_resource(pdev, IORESOURCE_DMA, 0);
        if (!hdmi_rsrc) {
                dev_err(&pdev->dev, "Cannot obtain IORESOURCE_DMA HDMI\n");
-               return -EINVAL;
+               return -ENODEV;
        }
 
-       omap_hdmi_dai_dma_params.dma_req =  hdmi_rsrc->start;
+       hdmi_data->dma_params.dma_req =  hdmi_rsrc->start;
+       hdmi_data->dma_params.name = "HDMI playback";
+       hdmi_data->dma_params.sync_mode = OMAP_DMA_SYNC_PACKET;
+
+       /*
+        * TODO: We assume that there is only one DSS HDMI device. Future
+        * OMAP implementations may support more than one HDMI devices and
+        * we should provided separate audio support for all of them.
+        */
+       /* Find an HDMI device. */
+       for_each_dss_dev(hdmi_data->dssdev) {
+               omap_dss_get_device(hdmi_data->dssdev);
 
+               if (!hdmi_data->dssdev->driver) {
+                       omap_dss_put_device(hdmi_data->dssdev);
+                       continue;
+               }
+
+               if (hdmi_data->dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
+                       hdmi_dev_found = true;
+                       break;
+               }
+       }
+
+       if (!hdmi_dev_found) {
+               dev_err(&pdev->dev, "no driver for HDMI display found\n");
+               return -ENODEV;
+       }
+
+       dev_set_drvdata(&pdev->dev, hdmi_data);
        ret = snd_soc_register_dai(&pdev->dev, &omap_hdmi_dai);
+
        return ret;
 }
 
 static int __devexit omap_hdmi_remove(struct platform_device *pdev)
 {
+       struct hdmi_priv *hdmi_data = dev_get_drvdata(&pdev->dev);
+
        snd_soc_unregister_dai(&pdev->dev);
+
+       if (hdmi_data == NULL) {
+               dev_err(&pdev->dev, "cannot obtain HDMi data\n");
+               return -ENODEV;
+       }
+
+       omap_dss_put_device(hdmi_data->dssdev);
        return 0;
 }
 
index 34c298d5057e26dac9864a7e184aad7882c4c18a..6ad2bf4f269783e3d95513a19c3b8290368d8127 100644 (file)
@@ -28,7 +28,9 @@
 #define OMAP_HDMI_AUDIO_DMA_PORT 0x8c
 
 #define OMAP_HDMI_RATES        (SNDRV_PCM_RATE_32000 | \
-                               SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000)
+                               SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | \
+                               SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | \
+                               SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000)
 
 #define OMAP_HDMI_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
                                SNDRV_PCM_FMTBIT_S24_LE)
index 6912ac7cb6250ae8cf30b886d507e0ffdfe2df9a..1046083e90a079f594b99ef0f88b72bb7e8e5150 100644 (file)
@@ -71,18 +71,17 @@ static void omap_mcbsp_set_threshold(struct snd_pcm_substream *substream)
 
        dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
 
-       /* TODO: Currently, MODE_ELEMENT == MODE_FRAME */
-       if (mcbsp->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD)
-               /*
-                * Configure McBSP threshold based on either:
-                * packet_size, when the sDMA is in packet mode, or
-                * based on the period size.
-                */
-               if (dma_data->packet_size)
-                       words = dma_data->packet_size;
-               else
-                       words = snd_pcm_lib_period_bytes(substream) /
-                                                       (mcbsp->wlen / 8);
+       /*
+        * Configure McBSP threshold based on either:
+        * packet_size, when the sDMA is in packet mode, or based on the
+        * period size in THRESHOLD mode, otherwise use McBSP threshold = 1
+        * for mono streams.
+        */
+       if (dma_data->packet_size)
+               words = dma_data->packet_size;
+       else if (mcbsp->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD)
+               words = snd_pcm_lib_period_bytes(substream) /
+                                               (mcbsp->wlen / 8);
        else
                words = 1;
 
@@ -139,13 +138,15 @@ static int omap_mcbsp_dai_startup(struct snd_pcm_substream *substream,
        if (mcbsp->pdata->buffer_size) {
                /*
                * Rule for the buffer size. We should not allow
-               * smaller buffer than the FIFO size to avoid underruns
+               * smaller buffer than the FIFO size to avoid underruns.
+               * This applies only for the playback stream.
                */
-               snd_pcm_hw_rule_add(substream->runtime, 0,
-                                   SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
-                                   omap_mcbsp_hwrule_min_buffersize,
-                                   mcbsp,
-                                   SNDRV_PCM_HW_PARAM_CHANNELS, -1);
+               if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+                       snd_pcm_hw_rule_add(substream->runtime, 0,
+                                           SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+                                           omap_mcbsp_hwrule_min_buffersize,
+                                           mcbsp,
+                                           SNDRV_PCM_HW_PARAM_CHANNELS, -1);
 
                /* Make sure, that the period size is always even */
                snd_pcm_hw_constraint_step(substream->runtime, 0,
@@ -230,6 +231,7 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
        unsigned int format, div, framesize, master;
 
        dma_data = &mcbsp->dma_data[substream->stream];
+       channels = params_channels(params);
 
        switch (params_format(params)) {
        case SNDRV_PCM_FORMAT_S16_LE:
@@ -245,7 +247,6 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
        }
        if (mcbsp->pdata->buffer_size) {
                dma_data->set_threshold = omap_mcbsp_set_threshold;
-               /* TODO: Currently, MODE_ELEMENT == MODE_FRAME */
                if (mcbsp->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD) {
                        int period_words, max_thrsh;
 
@@ -283,6 +284,10 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
                        } else {
                                sync_mode = OMAP_DMA_SYNC_FRAME;
                        }
+               } else if (channels > 1) {
+                       /* Use packet mode for non mono streams */
+                       pkt_size = channels;
+                       sync_mode = OMAP_DMA_SYNC_PACKET;
                }
        }
 
@@ -301,7 +306,7 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
        regs->rcr1      &= ~(RFRLEN1(0x7f) | RWDLEN1(7));
        regs->xcr1      &= ~(XFRLEN1(0x7f) | XWDLEN1(7));
        format = mcbsp->fmt & SND_SOC_DAIFMT_FORMAT_MASK;
-       wpf = channels = params_channels(params);
+       wpf = channels;
        if (channels == 2 && (format == SND_SOC_DAIFMT_I2S ||
                              format == SND_SOC_DAIFMT_LEFT_J)) {
                /* Use dual-phase frames */
index 39705561131a6c10f4a62a486a31208ccbe075a4..59d47ab5b15d72dd996cb13b4a06f274d961d7df 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/irq.h>
 #include <linux/slab.h>
 #include <linux/pm_runtime.h>
+#include <linux/of_device.h>
 
 #include <sound/core.h>
 #include <sound/pcm.h>
@@ -507,10 +508,17 @@ static int __devexit asoc_mcpdm_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id omap_mcpdm_of_match[] = {
+       { .compatible = "ti,omap4-mcpdm", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, omap_mcpdm_of_match);
+
 static struct platform_driver asoc_mcpdm_driver = {
        .driver = {
                .name   = "omap-mcpdm",
                .owner  = THIS_MODULE,
+               .of_match_table = omap_mcpdm_of_match,
        },
 
        .probe  = asoc_mcpdm_probe,
diff --git a/sound/soc/omap/omap4-hdmi-card.c b/sound/soc/omap/omap4-hdmi-card.c
deleted file mode 100644 (file)
index 28d689b..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * omap4-hdmi-card.c
- *
- * OMAP ALSA SoC machine driver for TI OMAP4 HDMI
- * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Ricardo Neri <ricardo.neri@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/module.h>
-#include <sound/pcm.h>
-#include <sound/soc.h>
-#include <asm/mach-types.h>
-#include <video/omapdss.h>
-
-#define DRV_NAME "omap4-hdmi-audio"
-
-static int omap4_hdmi_dai_hw_params(struct snd_pcm_substream *substream,
-               struct snd_pcm_hw_params *params)
-{
-       int i;
-       struct omap_overlay_manager *mgr = NULL;
-       struct device *dev = substream->pcm->card->dev;
-
-       /* Find DSS HDMI device */
-       for (i = 0; i < omap_dss_get_num_overlay_managers(); i++) {
-               mgr = omap_dss_get_overlay_manager(i);
-               if (mgr && mgr->device
-                       && mgr->device->type == OMAP_DISPLAY_TYPE_HDMI)
-                       break;
-       }
-
-       if (i == omap_dss_get_num_overlay_managers()) {
-               dev_err(dev, "HDMI display device not found!\n");
-               return -ENODEV;
-       }
-
-       /* Make sure HDMI is power-on to avoid L3 interconnect errors */
-       if (mgr->device->state != OMAP_DSS_DISPLAY_ACTIVE) {
-               dev_err(dev, "HDMI display is not active!\n");
-               return -EIO;
-       }
-
-       return 0;
-}
-
-static struct snd_soc_ops omap4_hdmi_dai_ops = {
-       .hw_params = omap4_hdmi_dai_hw_params,
-};
-
-static struct snd_soc_dai_link omap4_hdmi_dai = {
-       .name = "HDMI",
-       .stream_name = "HDMI",
-       .cpu_dai_name = "hdmi-audio-dai",
-       .platform_name = "omap-pcm-audio",
-       .codec_name = "omapdss_hdmi",
-       .codec_dai_name = "hdmi-audio-codec",
-       .ops = &omap4_hdmi_dai_ops,
-};
-
-static struct snd_soc_card snd_soc_omap4_hdmi = {
-       .name = "OMAP4HDMI",
-       .owner = THIS_MODULE,
-       .dai_link = &omap4_hdmi_dai,
-       .num_links = 1,
-};
-
-static __devinit int omap4_hdmi_probe(struct platform_device *pdev)
-{
-       struct snd_soc_card *card = &snd_soc_omap4_hdmi;
-       int ret;
-
-       card->dev = &pdev->dev;
-
-       ret = snd_soc_register_card(card);
-       if (ret) {
-               dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
-               card->dev = NULL;
-               return ret;
-       }
-       return 0;
-}
-
-static int __devexit omap4_hdmi_remove(struct platform_device *pdev)
-{
-       struct snd_soc_card *card = platform_get_drvdata(pdev);
-
-       snd_soc_unregister_card(card);
-       card->dev = NULL;
-       return 0;
-}
-
-static struct platform_driver omap4_hdmi_driver = {
-       .driver = {
-               .name = "omap4-hdmi-audio",
-               .owner = THIS_MODULE,
-       },
-       .probe = omap4_hdmi_probe,
-       .remove = __devexit_p(omap4_hdmi_remove),
-};
-
-module_platform_driver(omap4_hdmi_driver);
-
-MODULE_AUTHOR("Ricardo Neri <ricardo.neri@ti.com>");
-MODULE_DESCRIPTION("OMAP4 HDMI machine ASoC driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRV_NAME);
index 7cee22515d9de55c505eae1dd460107b1eacee3a..2ef98536f1da9ecff07ebd3d6903a8bf78345029 100644 (file)
@@ -1052,6 +1052,13 @@ static int fsi_dma_quit(struct fsi_priv *fsi, struct fsi_stream *io)
        return 0;
 }
 
+static dma_addr_t fsi_dma_get_area(struct fsi_stream *io)
+{
+       struct snd_pcm_runtime *runtime = io->substream->runtime;
+
+       return io->dma + samples_to_bytes(runtime, io->buff_sample_pos);
+}
+
 static void fsi_dma_complete(void *data)
 {
        struct fsi_stream *io = (struct fsi_stream *)data;
@@ -1061,7 +1068,7 @@ static void fsi_dma_complete(void *data)
        enum dma_data_direction dir = fsi_stream_is_play(fsi, io) ?
                DMA_TO_DEVICE : DMA_FROM_DEVICE;
 
-       dma_sync_single_for_cpu(dai->dev, io->dma,
+       dma_sync_single_for_cpu(dai->dev, fsi_dma_get_area(io),
                        samples_to_bytes(runtime, io->period_samples), dir);
 
        io->buff_sample_pos += io->period_samples;
@@ -1078,13 +1085,6 @@ static void fsi_dma_complete(void *data)
        snd_pcm_period_elapsed(io->substream);
 }
 
-static dma_addr_t fsi_dma_get_area(struct fsi_stream *io)
-{
-       struct snd_pcm_runtime *runtime = io->substream->runtime;
-
-       return io->dma + samples_to_bytes(runtime, io->buff_sample_pos);
-}
-
 static void fsi_dma_do_tasklet(unsigned long data)
 {
        struct fsi_stream *io = (struct fsi_stream *)data;
@@ -1110,7 +1110,7 @@ static void fsi_dma_do_tasklet(unsigned long data)
        len     = samples_to_bytes(runtime, io->period_samples);
        buf     = fsi_dma_get_area(io);
 
-       dma_sync_single_for_device(dai->dev, io->dma, len, dir);
+       dma_sync_single_for_device(dai->dev, buf, len, dir);
 
        sg_init_table(&sg, 1);
        sg_set_page(&sg, pfn_to_page(PFN_DOWN(buf)),
@@ -1172,9 +1172,16 @@ static int fsi_dma_transfer(struct fsi_priv *fsi, struct fsi_stream *io)
 static void fsi_dma_push_start_stop(struct fsi_priv *fsi, struct fsi_stream *io,
                                 int start)
 {
+       struct fsi_master *master = fsi_get_master(fsi);
+       u32 clk  = fsi_is_port_a(fsi) ? CRA  : CRB;
        u32 enable = start ? DMA_ON : 0;
 
        fsi_reg_mask_set(fsi, OUT_DMAC, DMA_ON, enable);
+
+       dmaengine_terminate_all(io->chan);
+
+       if (fsi_is_clk_master(fsi))
+               fsi_master_mask_set(master, CLK_RST, clk, (enable) ? clk : 0);
 }
 
 static int fsi_dma_probe(struct fsi_priv *fsi, struct fsi_stream *io)
index 24839d932648c81849ecfe63214a8124d914dd0e..cdf8b7601973406c445c69c6e5cd4afbdbbdf23b 100644 (file)
@@ -788,6 +788,9 @@ static int snd_usb_pcm_check_knot(struct snd_pcm_runtime *runtime,
        int count = 0, needs_knot = 0;
        int err;
 
+       kfree(subs->rate_list.list);
+       subs->rate_list.list = NULL;
+
        list_for_each_entry(fp, &subs->fmt_list, list) {
                if (fp->rates & SNDRV_PCM_RATE_CONTINUOUS)
                        return 0;
index 998534992197591bde7328e44cd2725a7af84797..554828219c33cceaf0c5b9d3f57bd69d4a7a109f 100644 (file)
@@ -1434,8 +1434,11 @@ static int event_read_fields(struct event_format *event, struct format_field **f
 fail:
        free_token(token);
 fail_expect:
-       if (field)
+       if (field) {
+               free(field->type);
+               free(field->name);
                free(field);
+       }
        return -1;
 }
 
@@ -1712,6 +1715,8 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok)
 
                if (set_op_prio(arg) == -1) {
                        event->flags |= EVENT_FL_FAILED;
+                       /* arg->op.op (= token) will be freed at out_free */
+                       arg->op.op = NULL;
                        goto out_free;
                }
 
@@ -2124,6 +2129,13 @@ process_fields(struct event_format *event, struct print_flag_sym **list, char **
 
                free_token(token);
                type = process_arg(event, arg, &token);
+
+               if (type == EVENT_OP)
+                       type = process_op(event, arg, &token);
+
+               if (type == EVENT_ERROR)
+                       goto out_free;
+
                if (test_type_token(type, token, EVENT_DELIM, ","))
                        goto out_free;
 
@@ -2288,17 +2300,18 @@ process_dynamic_array(struct event_format *event, struct print_arg *arg, char **
        arg = alloc_arg();
        type = process_arg(event, arg, &token);
        if (type == EVENT_ERROR)
-               goto out_free;
+               goto out_free_arg;
 
        if (!test_type_token(type, token, EVENT_OP, "]"))
-               goto out_free;
+               goto out_free_arg;
 
        free_token(token);
        type = read_token_item(tok);
        return type;
 
+ out_free_arg:
+       free_arg(arg);
  out_free:
-       free(arg);
        free_token(token);
        *tok = NULL;
        return EVENT_ERROR;
@@ -3362,6 +3375,7 @@ process_defined_func(struct trace_seq *s, void *data, int size,
                        break;
                }
                farg = farg->next;
+               param = param->next;
        }
 
        ret = (*func_handle->func)(s, args);
index 2d40c5ed81d66a00fde6cc09917ac32dba196525..dfcfe2c131de6e3d8ea6c26cbf452c2834fd90d7 100644 (file)
@@ -325,9 +325,8 @@ static void free_events(struct event_list *events)
 }
 
 static struct filter_arg *
-create_arg_item(struct event_format *event,
-               const char *token, enum filter_arg_type type,
-               char **error_str)
+create_arg_item(struct event_format *event, const char *token,
+               enum event_type type, char **error_str)
 {
        struct format_field *field;
        struct filter_arg *arg;
@@ -1585,7 +1584,7 @@ get_value(struct event_format *event,
                const char *name;
 
                name = get_comm(event, record);
-               return (unsigned long long)name;
+               return (unsigned long)name;
        }
 
        pevent_read_number_field(field, record->data, &val);
index 42c6fd2ae85d19ac31fa5f0c191dfcf776ca60e9..767ea2436e1cd841a762ee8cdb039ba80a7120b3 100644 (file)
 
        # Default, disable using /dev/null
        dir = /root/.debug
+
+[annotate]
+
+       # Defaults
+       hide_src_code = false
+       use_offset = true
+       jump_arrows = true
+       show_nr_jumps = false
index 1d3d513beb9b1083c60531f349d5d0c06af64dfa..0eee64cfe9a0f48ced0ee2397068979763355cd4 100644 (file)
@@ -80,7 +80,7 @@ ifeq ("$(origin DEBUG)", "command line")
   PERF_DEBUG = $(DEBUG)
 endif
 ifndef PERF_DEBUG
-  CFLAGS_OPTIMIZE = -O6
+  CFLAGS_OPTIMIZE = -O6 -D_FORTIFY_SOURCE=2
 endif
 
 ifdef PARSER_DEBUG
@@ -89,7 +89,7 @@ ifdef PARSER_DEBUG
        PARSER_DEBUG_CFLAGS := -DPARSER_DEBUG
 endif
 
-CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) $(PARSER_DEBUG_CFLAGS)
+CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) $(PARSER_DEBUG_CFLAGS)
 EXTLIBS = -lpthread -lrt -lelf -lm
 ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
 ALL_LDFLAGS = $(LDFLAGS)
index 806e0a286634a6bd7b7a766706c32426730c7727..67522cf874053e24ff87d4fa3fca67e6845981be 100644 (file)
@@ -215,7 +215,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
        }
 
        if (total_nr_samples == 0) {
-               ui__warning("The %s file has no samples!\n", session->filename);
+               ui__error("The %s file has no samples!\n", session->filename);
                goto out_delete;
        }
 out_delete:
index e52d77ec7084e02e4fa318e873ff2cf1679a222d..acd78dc283411692f9e7e48b78085d9b5da63471 100644 (file)
@@ -116,7 +116,7 @@ static const char * const evlist_usage[] = {
 int cmd_evlist(int argc, const char **argv, const char *prefix __used)
 {
        struct perf_attr_details details = { .verbose = false, };
-       const char *input_name;
+       const char *input_name = NULL;
        const struct option options[] = {
                OPT_STRING('i', "input", &input_name, "file",
                            "Input file name"),
index e5cb08427e13f56ed7f9223d520a954ef0fc1cf2..f95840d04e4c7a224821e395600df2bbdc7e4323 100644 (file)
@@ -264,7 +264,7 @@ try_again:
                        }
 
                        if (err == ENOENT) {
-                               ui__warning("The %s event is not supported.\n",
+                               ui__error("The %s event is not supported.\n",
                                            event_name(pos));
                                exit(EXIT_FAILURE);
                        }
@@ -858,8 +858,8 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
                usage_with_options(record_usage, record_options);
 
        if (rec->force && rec->append_file) {
-               fprintf(stderr, "Can't overwrite and append at the same time."
-                               " You need to choose between -f and -A");
+               ui__error("Can't overwrite and append at the same time."
+                         " You need to choose between -f and -A");
                usage_with_options(record_usage, record_options);
        } else if (rec->append_file) {
                rec->write_mode = WRITE_APPEND;
@@ -868,8 +868,8 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
        }
 
        if (nr_cgroups && !rec->opts.target.system_wide) {
-               fprintf(stderr, "cgroup monitoring only available in"
-                       " system-wide mode\n");
+               ui__error("cgroup monitoring only available in"
+                         " system-wide mode\n");
                usage_with_options(record_usage, record_options);
        }
 
@@ -905,7 +905,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
                int saved_errno = errno;
 
                perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
-               ui__warning("%s", errbuf);
+               ui__error("%s", errbuf);
 
                err = -saved_errno;
                goto out_free_fd;
@@ -933,7 +933,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
        else if (rec->opts.freq) {
                rec->opts.default_interval = rec->opts.freq;
        } else {
-               fprintf(stderr, "frequency and count are zero, aborting\n");
+               ui__error("frequency and count are zero, aborting\n");
                err = -EINVAL;
                goto out_free_fd;
        }
index d58e41445d0d6dcfb4ca26fbe36eb9b7e02c596b..8c767c6bca91b7490cb1564da230c7c9021422e6 100644 (file)
@@ -251,13 +251,13 @@ static int perf_report__setup_sample_type(struct perf_report *rep)
 
        if (!(self->sample_type & PERF_SAMPLE_CALLCHAIN)) {
                if (sort__has_parent) {
-                       ui__warning("Selected --sort parent, but no "
+                       ui__error("Selected --sort parent, but no "
                                    "callchain data. Did you call "
                                    "'perf record' without -g?\n");
                        return -EINVAL;
                }
                if (symbol_conf.use_callchain) {
-                       ui__warning("Selected -g but no callchain data. Did "
+                       ui__error("Selected -g but no callchain data. Did "
                                    "you call 'perf record' without -g?\n");
                        return -1;
                }
@@ -266,17 +266,15 @@ static int perf_report__setup_sample_type(struct perf_report *rep)
                   !symbol_conf.use_callchain) {
                        symbol_conf.use_callchain = true;
                        if (callchain_register_param(&callchain_param) < 0) {
-                               ui__warning("Can't register callchain "
-                                           "params.\n");
+                               ui__error("Can't register callchain params.\n");
                                return -EINVAL;
                        }
        }
 
        if (sort__branch_mode == 1) {
                if (!(self->sample_type & PERF_SAMPLE_BRANCH_STACK)) {
-                       fprintf(stderr, "selected -b but no branch data."
-                                       " Did you call perf record without"
-                                       " -b?\n");
+                       ui__error("Selected -b but no branch data. "
+                                 "Did you call perf record without -b?\n");
                        return -1;
                }
        }
@@ -420,7 +418,7 @@ static int __cmd_report(struct perf_report *rep)
        }
 
        if (nr_samples == 0) {
-               ui__warning("The %s file has no samples!\n", session->filename);
+               ui__error("The %s file has no samples!\n", session->filename);
                goto out_delete;
        }
 
index 6031dce0429f8f93e267d530e9a91fec3ddaf74b..871b540293e132610bb6a50bb384289d89aca75c 100644 (file)
@@ -953,22 +953,22 @@ try_again:
                                attr->config = PERF_COUNT_SW_CPU_CLOCK;
                                if (counter->name) {
                                        free(counter->name);
-                                       counter->name = strdup(event_name(counter));
+                                       counter->name = NULL;
                                }
                                goto try_again;
                        }
 
                        if (err == ENOENT) {
-                               ui__warning("The %s event is not supported.\n",
+                               ui__error("The %s event is not supported.\n",
                                            event_name(counter));
                                goto out_err;
                        } else if (err == EMFILE) {
-                               ui__warning("Too many events are opened.\n"
+                               ui__error("Too many events are opened.\n"
                                            "Try again after reducing the number of events\n");
                                goto out_err;
                        }
 
-                       ui__warning("The sys_perf_event_open() syscall "
+                       ui__error("The sys_perf_event_open() syscall "
                                    "returned with %d (%s).  /bin/dmesg "
                                    "may provide additional information.\n"
                                    "No CONFIG_PERF_EVENTS=y kernel support "
@@ -978,7 +978,7 @@ try_again:
        }
 
        if (perf_evlist__mmap(evlist, top->mmap_pages, false) < 0) {
-               ui__warning("Failed to mmap with %d (%s)\n",
+               ui__error("Failed to mmap with %d (%s)\n",
                            errno, strerror(errno));
                goto out_err;
        }
@@ -994,12 +994,12 @@ static int perf_top__setup_sample_type(struct perf_top *top)
 {
        if (!top->sort_has_symbols) {
                if (symbol_conf.use_callchain) {
-                       ui__warning("Selected -g but \"sym\" not present in --sort/-s.");
+                       ui__error("Selected -g but \"sym\" not present in --sort/-s.");
                        return -EINVAL;
                }
        } else if (!top->dont_use_callchains && callchain_param.mode != CHAIN_NONE) {
                if (callchain_register_param(&callchain_param) < 0) {
-                       ui__warning("Can't register callchain params.\n");
+                       ui__error("Can't register callchain params.\n");
                        return -EINVAL;
                }
        }
@@ -1041,7 +1041,7 @@ static int __cmd_top(struct perf_top *top)
 
        if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
                                                            display_thread), top)) {
-               printf("Could not create display thread.\n");
+               ui__error("Could not create display thread.\n");
                exit(-1);
        }
 
@@ -1050,7 +1050,7 @@ static int __cmd_top(struct perf_top *top)
 
                param.sched_priority = top->realtime_prio;
                if (sched_setscheduler(0, SCHED_FIFO, &param)) {
-                       printf("Could not set realtime priority.\n");
+                       ui__error("Could not set realtime priority.\n");
                        exit(-1);
                }
        }
@@ -1274,7 +1274,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
                int saved_errno = errno;
 
                perf_target__strerror(&top.target, status, errbuf, BUFSIZ);
-               ui__warning("%s", errbuf);
+               ui__error("%s", errbuf);
 
                status = -saved_errno;
                goto out_delete_evlist;
@@ -1288,7 +1288,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
 
        if (!top.evlist->nr_entries &&
            perf_evlist__add_default(top.evlist) < 0) {
-               pr_err("Not enough memory for event selector list\n");
+               ui__error("Not enough memory for event selector list\n");
                return -ENOMEM;
        }
 
@@ -1305,7 +1305,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
        else if (top.freq) {
                top.default_interval = top.freq;
        } else {
-               fprintf(stderr, "frequency and count are zero, aborting\n");
+               ui__error("frequency and count are zero, aborting\n");
                exit(EXIT_FAILURE);
        }
 
index 14f1034f14f93efbef0b7e5a7ea3f642674fc856..f960ccb2edc6f38f8a7b7a3f0f1b740cbd351c2b 100644 (file)
@@ -227,7 +227,7 @@ struct perf_record_opts {
        unsigned int freq;
        unsigned int mmap_pages;
        unsigned int user_freq;
-       int          branch_stack;
+       u64          branch_stack;
        u64          default_interval;
        u64          user_interval;
 };
index cde4d0f0ddb99c8de13e581d1595ce948a7afe85..1818a531f1d3ea83346e48a131844cb7dc8212a5 100644 (file)
@@ -35,16 +35,16 @@ int ui_browser__set_color(struct ui_browser *browser, int color)
        return ret;
 }
 
-void ui_browser__set_percent_color(struct ui_browser *self,
+void ui_browser__set_percent_color(struct ui_browser *browser,
                                   double percent, bool current)
 {
-        int color = ui_browser__percent_color(self, percent, current);
-        ui_browser__set_color(self, color);
+        int color = ui_browser__percent_color(browser, percent, current);
+        ui_browser__set_color(browser, color);
 }
 
-void ui_browser__gotorc(struct ui_browser *self, int y, int x)
+void ui_browser__gotorc(struct ui_browser *browser, int y, int x)
 {
-       SLsmg_gotorc(self->y + y, self->x + x);
+       SLsmg_gotorc(browser->y + y, browser->x + x);
 }
 
 static struct list_head *
@@ -73,23 +73,23 @@ ui_browser__list_head_filter_prev_entries(struct ui_browser *browser,
        return NULL;
 }
 
-void ui_browser__list_head_seek(struct ui_browser *self, off_t offset, int whence)
+void ui_browser__list_head_seek(struct ui_browser *browser, off_t offset, int whence)
 {
-       struct list_head *head = self->entries;
+       struct list_head *head = browser->entries;
        struct list_head *pos;
 
-       if (self->nr_entries == 0)
+       if (browser->nr_entries == 0)
                return;
 
        switch (whence) {
        case SEEK_SET:
-               pos = ui_browser__list_head_filter_entries(self, head->next);
+               pos = ui_browser__list_head_filter_entries(browser, head->next);
                break;
        case SEEK_CUR:
-               pos = self->top;
+               pos = browser->top;
                break;
        case SEEK_END:
-               pos = ui_browser__list_head_filter_prev_entries(self, head->prev);
+               pos = ui_browser__list_head_filter_prev_entries(browser, head->prev);
                break;
        default:
                return;
@@ -99,18 +99,18 @@ void ui_browser__list_head_seek(struct ui_browser *self, off_t offset, int whenc
 
        if (offset > 0) {
                while (offset-- != 0)
-                       pos = ui_browser__list_head_filter_entries(self, pos->next);
+                       pos = ui_browser__list_head_filter_entries(browser, pos->next);
        } else {
                while (offset++ != 0)
-                       pos = ui_browser__list_head_filter_prev_entries(self, pos->prev);
+                       pos = ui_browser__list_head_filter_prev_entries(browser, pos->prev);
        }
 
-       self->top = pos;
+       browser->top = pos;
 }
 
-void ui_browser__rb_tree_seek(struct ui_browser *self, off_t offset, int whence)
+void ui_browser__rb_tree_seek(struct ui_browser *browser, off_t offset, int whence)
 {
-       struct rb_root *root = self->entries;
+       struct rb_root *root = browser->entries;
        struct rb_node *nd;
 
        switch (whence) {
@@ -118,7 +118,7 @@ void ui_browser__rb_tree_seek(struct ui_browser *self, off_t offset, int whence)
                nd = rb_first(root);
                break;
        case SEEK_CUR:
-               nd = self->top;
+               nd = browser->top;
                break;
        case SEEK_END:
                nd = rb_last(root);
@@ -135,23 +135,23 @@ void ui_browser__rb_tree_seek(struct ui_browser *self, off_t offset, int whence)
                        nd = rb_prev(nd);
        }
 
-       self->top = nd;
+       browser->top = nd;
 }
 
-unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self)
+unsigned int ui_browser__rb_tree_refresh(struct ui_browser *browser)
 {
        struct rb_node *nd;
        int row = 0;
 
-       if (self->top == NULL)
-                self->top = rb_first(self->entries);
+       if (browser->top == NULL)
+                browser->top = rb_first(browser->entries);
 
-       nd = self->top;
+       nd = browser->top;
 
        while (nd != NULL) {
-               ui_browser__gotorc(self, row, 0);
-               self->write(self, nd, row);
-               if (++row == self->height)
+               ui_browser__gotorc(browser, row, 0);
+               browser->write(browser, nd, row);
+               if (++row == browser->height)
                        break;
                nd = rb_next(nd);
        }
@@ -159,17 +159,17 @@ unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self)
        return row;
 }
 
-bool ui_browser__is_current_entry(struct ui_browser *self, unsigned row)
+bool ui_browser__is_current_entry(struct ui_browser *browser, unsigned row)
 {
-       return self->top_idx + row == self->index;
+       return browser->top_idx + row == browser->index;
 }
 
-void ui_browser__refresh_dimensions(struct ui_browser *self)
+void ui_browser__refresh_dimensions(struct ui_browser *browser)
 {
-       self->width = SLtt_Screen_Cols - 1;
-       self->height = SLtt_Screen_Rows - 2;
-       self->y = 1;
-       self->x = 0;
+       browser->width = SLtt_Screen_Cols - 1;
+       browser->height = SLtt_Screen_Rows - 2;
+       browser->y = 1;
+       browser->x = 0;
 }
 
 void ui_browser__handle_resize(struct ui_browser *browser)
@@ -225,10 +225,10 @@ bool ui_browser__dialog_yesno(struct ui_browser *browser, const char *text)
        return key == K_ENTER || toupper(key) == 'Y';
 }
 
-void ui_browser__reset_index(struct ui_browser *self)
+void ui_browser__reset_index(struct ui_browser *browser)
 {
-       self->index = self->top_idx = 0;
-       self->seek(self, 0, SEEK_SET);
+       browser->index = browser->top_idx = 0;
+       browser->seek(browser, 0, SEEK_SET);
 }
 
 void __ui_browser__show_title(struct ui_browser *browser, const char *title)
@@ -245,26 +245,26 @@ void ui_browser__show_title(struct ui_browser *browser, const char *title)
        pthread_mutex_unlock(&ui__lock);
 }
 
-int ui_browser__show(struct ui_browser *self, const char *title,
+int ui_browser__show(struct ui_browser *browser, const char *title,
                     const char *helpline, ...)
 {
        int err;
        va_list ap;
 
-       ui_browser__refresh_dimensions(self);
+       ui_browser__refresh_dimensions(browser);
 
        pthread_mutex_lock(&ui__lock);
-       __ui_browser__show_title(self, title);
+       __ui_browser__show_title(browser, title);
 
-       self->title = title;
-       free(self->helpline);
-       self->helpline = NULL;
+       browser->title = title;
+       free(browser->helpline);
+       browser->helpline = NULL;
 
        va_start(ap, helpline);
-       err = vasprintf(&self->helpline, helpline, ap);
+       err = vasprintf(&browser->helpline, helpline, ap);
        va_end(ap);
        if (err > 0)
-               ui_helpline__push(self->helpline);
+               ui_helpline__push(browser->helpline);
        pthread_mutex_unlock(&ui__lock);
        return err ? 0 : -1;
 }
@@ -350,7 +350,7 @@ void ui_browser__update_nr_entries(struct ui_browser *browser, u32 nr_entries)
        browser->seek(browser, browser->top_idx, SEEK_SET);
 }
 
-int ui_browser__run(struct ui_browser *self, int delay_secs)
+int ui_browser__run(struct ui_browser *browser, int delay_secs)
 {
        int err, key;
 
@@ -358,7 +358,7 @@ int ui_browser__run(struct ui_browser *self, int delay_secs)
                off_t offset;
 
                pthread_mutex_lock(&ui__lock);
-               err = __ui_browser__refresh(self);
+               err = __ui_browser__refresh(browser);
                SLsmg_refresh();
                pthread_mutex_unlock(&ui__lock);
                if (err < 0)
@@ -368,18 +368,18 @@ int ui_browser__run(struct ui_browser *self, int delay_secs)
 
                if (key == K_RESIZE) {
                        ui__refresh_dimensions(false);
-                       ui_browser__refresh_dimensions(self);
-                       __ui_browser__show_title(self, self->title);
-                       ui_helpline__puts(self->helpline);
+                       ui_browser__refresh_dimensions(browser);
+                       __ui_browser__show_title(browser, browser->title);
+                       ui_helpline__puts(browser->helpline);
                        continue;
                }
 
-               if (self->use_navkeypressed && !self->navkeypressed) {
+               if (browser->use_navkeypressed && !browser->navkeypressed) {
                        if (key == K_DOWN || key == K_UP ||
                            key == K_PGDN || key == K_PGUP ||
                            key == K_HOME || key == K_END ||
                            key == ' ') {
-                               self->navkeypressed = true;
+                               browser->navkeypressed = true;
                                continue;
                        } else
                                return key;
@@ -387,59 +387,59 @@ int ui_browser__run(struct ui_browser *self, int delay_secs)
 
                switch (key) {
                case K_DOWN:
-                       if (self->index == self->nr_entries - 1)
+                       if (browser->index == browser->nr_entries - 1)
                                break;
-                       ++self->index;
-                       if (self->index == self->top_idx + self->height) {
-                               ++self->top_idx;
-                               self->seek(self, +1, SEEK_CUR);
+                       ++browser->index;
+                       if (browser->index == browser->top_idx + browser->height) {
+                               ++browser->top_idx;
+                               browser->seek(browser, +1, SEEK_CUR);
                        }
                        break;
                case K_UP:
-                       if (self->index == 0)
+                       if (browser->index == 0)
                                break;
-                       --self->index;
-                       if (self->index < self->top_idx) {
-                               --self->top_idx;
-                               self->seek(self, -1, SEEK_CUR);
+                       --browser->index;
+                       if (browser->index < browser->top_idx) {
+                               --browser->top_idx;
+                               browser->seek(browser, -1, SEEK_CUR);
                        }
                        break;
                case K_PGDN:
                case ' ':
-                       if (self->top_idx + self->height > self->nr_entries - 1)
+                       if (browser->top_idx + browser->height > browser->nr_entries - 1)
                                break;
 
-                       offset = self->height;
-                       if (self->index + offset > self->nr_entries - 1)
-                               offset = self->nr_entries - 1 - self->index;
-                       self->index += offset;
-                       self->top_idx += offset;
-                       self->seek(self, +offset, SEEK_CUR);
+                       offset = browser->height;
+                       if (browser->index + offset > browser->nr_entries - 1)
+                               offset = browser->nr_entries - 1 - browser->index;
+                       browser->index += offset;
+                       browser->top_idx += offset;
+                       browser->seek(browser, +offset, SEEK_CUR);
                        break;
                case K_PGUP:
-                       if (self->top_idx == 0)
+                       if (browser->top_idx == 0)
                                break;
 
-                       if (self->top_idx < self->height)
-                               offset = self->top_idx;
+                       if (browser->top_idx < browser->height)
+                               offset = browser->top_idx;
                        else
-                               offset = self->height;
+                               offset = browser->height;
 
-                       self->index -= offset;
-                       self->top_idx -= offset;
-                       self->seek(self, -offset, SEEK_CUR);
+                       browser->index -= offset;
+                       browser->top_idx -= offset;
+                       browser->seek(browser, -offset, SEEK_CUR);
                        break;
                case K_HOME:
-                       ui_browser__reset_index(self);
+                       ui_browser__reset_index(browser);
                        break;
                case K_END:
-                       offset = self->height - 1;
-                       if (offset >= self->nr_entries)
-                               offset = self->nr_entries - 1;
+                       offset = browser->height - 1;
+                       if (offset >= browser->nr_entries)
+                               offset = browser->nr_entries - 1;
 
-                       self->index = self->nr_entries - 1;
-                       self->top_idx = self->index - offset;
-                       self->seek(self, -offset, SEEK_END);
+                       browser->index = browser->nr_entries - 1;
+                       browser->top_idx = browser->index - offset;
+                       browser->seek(browser, -offset, SEEK_END);
                        break;
                default:
                        return key;
@@ -448,22 +448,22 @@ int ui_browser__run(struct ui_browser *self, int delay_secs)
        return -1;
 }
 
-unsigned int ui_browser__list_head_refresh(struct ui_browser *self)
+unsigned int ui_browser__list_head_refresh(struct ui_browser *browser)
 {
        struct list_head *pos;
-       struct list_head *head = self->entries;
+       struct list_head *head = browser->entries;
        int row = 0;
 
-       if (self->top == NULL || self->top == self->entries)
-                self->top = ui_browser__list_head_filter_entries(self, head->next);
+       if (browser->top == NULL || browser->top == browser->entries)
+                browser->top = ui_browser__list_head_filter_entries(browser, head->next);
 
-       pos = self->top;
+       pos = browser->top;
 
        list_for_each_from(pos, head) {
-               if (!self->filter || !self->filter(self, pos)) {
-                       ui_browser__gotorc(self, row, 0);
-                       self->write(self, pos, row);
-                       if (++row == self->height)
+               if (!browser->filter || !browser->filter(browser, pos)) {
+                       ui_browser__gotorc(browser, row, 0);
+                       browser->write(browser, pos, row);
+                       if (++row == browser->height)
                                break;
                }
        }
@@ -708,4 +708,6 @@ void ui_browser__init(void)
                struct ui_browser__colorset *c = &ui_browser__colorsets[i++];
                sltt_set_color(c->colorset, c->name, c->fg, c->bg);
        }
+
+       annotate_browser__init();
 }
index dd96d82299022c0bb67924752177d3e6c2eaa096..af70314605e54e2468fe774822cdeecd2945cc7c 100644 (file)
@@ -69,4 +69,5 @@ void ui_browser__list_head_seek(struct ui_browser *self, off_t offset, int whenc
 unsigned int ui_browser__list_head_refresh(struct ui_browser *self);
 
 void ui_browser__init(void);
+void annotate_browser__init(void);
 #endif /* _PERF_UI_BROWSER_H_ */
index 6e0ef79be16907a781bcf31be96377671412f440..4deea6aaf9274f65887997fcd175fe9fd3bfc459 100644 (file)
@@ -19,6 +19,16 @@ struct browser_disasm_line {
        int             jump_sources;
 };
 
+static struct annotate_browser_opt {
+       bool hide_src_code,
+            use_offset,
+            jump_arrows,
+            show_nr_jumps;
+} annotate_browser__opts = {
+       .use_offset     = true,
+       .jump_arrows    = true,
+};
+
 struct annotate_browser {
        struct ui_browser b;
        struct rb_root    entries;
@@ -30,10 +40,6 @@ struct annotate_browser {
        int                 nr_entries;
        int                 max_jump_sources;
        int                 nr_jumps;
-       bool                hide_src_code;
-       bool                use_offset;
-       bool                jump_arrows;
-       bool                show_nr_jumps;
        bool                searching_backwards;
        u8                  addr_width;
        u8                  jumps_width;
@@ -48,11 +54,9 @@ static inline struct browser_disasm_line *disasm_line__browser(struct disasm_lin
        return (struct browser_disasm_line *)(dl + 1);
 }
 
-static bool disasm_line__filter(struct ui_browser *browser, void *entry)
+static bool disasm_line__filter(struct ui_browser *browser __used, void *entry)
 {
-       struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
-
-       if (ab->hide_src_code) {
+       if (annotate_browser__opts.hide_src_code) {
                struct disasm_line *dl = list_entry(entry, struct disasm_line, node);
                return dl->offset == -1;
        }
@@ -79,30 +83,30 @@ static int annotate_browser__set_jumps_percent_color(struct annotate_browser *br
         return ui_browser__set_color(&browser->b, color);
 }
 
-static void annotate_browser__write(struct ui_browser *self, void *entry, int row)
+static void annotate_browser__write(struct ui_browser *browser, void *entry, int row)
 {
-       struct annotate_browser *ab = container_of(self, struct annotate_browser, b);
+       struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
        struct disasm_line *dl = list_entry(entry, struct disasm_line, node);
        struct browser_disasm_line *bdl = disasm_line__browser(dl);
-       bool current_entry = ui_browser__is_current_entry(self, row);
-       bool change_color = (!ab->hide_src_code &&
-                            (!current_entry || (self->use_navkeypressed &&
-                                                !self->navkeypressed)));
-       int width = self->width, printed;
+       bool current_entry = ui_browser__is_current_entry(browser, row);
+       bool change_color = (!annotate_browser__opts.hide_src_code &&
+                            (!current_entry || (browser->use_navkeypressed &&
+                                                !browser->navkeypressed)));
+       int width = browser->width, printed;
        char bf[256];
 
        if (dl->offset != -1 && bdl->percent != 0.0) {
-               ui_browser__set_percent_color(self, bdl->percent, current_entry);
+               ui_browser__set_percent_color(browser, bdl->percent, current_entry);
                slsmg_printf("%6.2f ", bdl->percent);
        } else {
-               ui_browser__set_percent_color(self, 0, current_entry);
+               ui_browser__set_percent_color(browser, 0, current_entry);
                slsmg_write_nstring(" ", 7);
        }
 
        SLsmg_write_char(' ');
 
        /* The scroll bar isn't being used */
-       if (!self->navkeypressed)
+       if (!browser->navkeypressed)
                width += 1;
 
        if (!*dl->line)
@@ -116,14 +120,14 @@ static void annotate_browser__write(struct ui_browser *self, void *entry, int ro
                u64 addr = dl->offset;
                int color = -1;
 
-               if (!ab->use_offset)
+               if (!annotate_browser__opts.use_offset)
                        addr += ab->start;
 
-               if (!ab->use_offset) {
+               if (!annotate_browser__opts.use_offset) {
                        printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr);
                } else {
                        if (bdl->jump_sources) {
-                               if (ab->show_nr_jumps) {
+                               if (annotate_browser__opts.show_nr_jumps) {
                                        int prev;
                                        printed = scnprintf(bf, sizeof(bf), "%*d ",
                                                            ab->jumps_width,
@@ -131,7 +135,7 @@ static void annotate_browser__write(struct ui_browser *self, void *entry, int ro
                                        prev = annotate_browser__set_jumps_percent_color(ab, bdl->jump_sources,
                                                                                         current_entry);
                                        slsmg_write_nstring(bf, printed);
-                                       ui_browser__set_color(self, prev);
+                                       ui_browser__set_color(browser, prev);
                                }
 
                                printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ",
@@ -143,19 +147,19 @@ static void annotate_browser__write(struct ui_browser *self, void *entry, int ro
                }
 
                if (change_color)
-                       color = ui_browser__set_color(self, HE_COLORSET_ADDR);
+                       color = ui_browser__set_color(browser, HE_COLORSET_ADDR);
                slsmg_write_nstring(bf, printed);
                if (change_color)
-                       ui_browser__set_color(self, color);
+                       ui_browser__set_color(browser, color);
                if (dl->ins && dl->ins->ops->scnprintf) {
                        if (ins__is_jump(dl->ins)) {
                                bool fwd = dl->ops.target.offset > (u64)dl->offset;
 
-                               ui_browser__write_graph(self, fwd ? SLSMG_DARROW_CHAR :
+                               ui_browser__write_graph(browser, fwd ? SLSMG_DARROW_CHAR :
                                                                    SLSMG_UARROW_CHAR);
                                SLsmg_write_char(' ');
                        } else if (ins__is_call(dl->ins)) {
-                               ui_browser__write_graph(self, SLSMG_RARROW_CHAR);
+                               ui_browser__write_graph(browser, SLSMG_RARROW_CHAR);
                                SLsmg_write_char(' ');
                        } else {
                                slsmg_write_nstring(" ", 2);
@@ -164,12 +168,12 @@ static void annotate_browser__write(struct ui_browser *self, void *entry, int ro
                        if (strcmp(dl->name, "retq")) {
                                slsmg_write_nstring(" ", 2);
                        } else {
-                               ui_browser__write_graph(self, SLSMG_LARROW_CHAR);
+                               ui_browser__write_graph(browser, SLSMG_LARROW_CHAR);
                                SLsmg_write_char(' ');
                        }
                }
 
-               disasm_line__scnprintf(dl, bf, sizeof(bf), !ab->use_offset);
+               disasm_line__scnprintf(dl, bf, sizeof(bf), !annotate_browser__opts.use_offset);
                slsmg_write_nstring(bf, width - 10 - printed);
        }
 
@@ -184,7 +188,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
        struct browser_disasm_line *btarget, *bcursor;
        unsigned int from, to;
 
-       if (!cursor->ins || !ins__is_jump(cursor->ins) ||
+       if (!cursor || !cursor->ins || !ins__is_jump(cursor->ins) ||
            !disasm_line__has_offset(cursor))
                return;
 
@@ -195,7 +199,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
        bcursor = disasm_line__browser(cursor);
        btarget = disasm_line__browser(target);
 
-       if (ab->hide_src_code) {
+       if (annotate_browser__opts.hide_src_code) {
                from = bcursor->idx_asm;
                to = btarget->idx_asm;
        } else {
@@ -209,10 +213,9 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
 
 static unsigned int annotate_browser__refresh(struct ui_browser *browser)
 {
-       struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
        int ret = ui_browser__list_head_refresh(browser);
 
-       if (ab->jump_arrows)
+       if (annotate_browser__opts.jump_arrows)
                annotate_browser__draw_current_jump(browser);
 
        ui_browser__set_color(browser, HE_COLORSET_NORMAL);
@@ -272,27 +275,27 @@ static void disasm_rb_tree__insert(struct rb_root *root, struct browser_disasm_l
        rb_insert_color(&bdl->rb_node, root);
 }
 
-static void annotate_browser__set_top(struct annotate_browser *self,
+static void annotate_browser__set_top(struct annotate_browser *browser,
                                      struct disasm_line *pos, u32 idx)
 {
        unsigned back;
 
-       ui_browser__refresh_dimensions(&self->b);
-       back = self->b.height / 2;
-       self->b.top_idx = self->b.index = idx;
+       ui_browser__refresh_dimensions(&browser->b);
+       back = browser->b.height / 2;
+       browser->b.top_idx = browser->b.index = idx;
 
-       while (self->b.top_idx != 0 && back != 0) {
+       while (browser->b.top_idx != 0 && back != 0) {
                pos = list_entry(pos->node.prev, struct disasm_line, node);
 
-               if (disasm_line__filter(&self->b, &pos->node))
+               if (disasm_line__filter(&browser->b, &pos->node))
                        continue;
 
-               --self->b.top_idx;
+               --browser->b.top_idx;
                --back;
        }
 
-       self->b.top = pos;
-       self->b.navkeypressed = true;
+       browser->b.top = pos;
+       browser->b.navkeypressed = true;
 }
 
 static void annotate_browser__set_rb_top(struct annotate_browser *browser,
@@ -300,10 +303,14 @@ static void annotate_browser__set_rb_top(struct annotate_browser *browser,
 {
        struct browser_disasm_line *bpos;
        struct disasm_line *pos;
+       u32 idx;
 
        bpos = rb_entry(nd, struct browser_disasm_line, rb_node);
        pos = ((struct disasm_line *)bpos) - 1;
-       annotate_browser__set_top(browser, pos, bpos->idx);
+       idx = bpos->idx;
+       if (annotate_browser__opts.hide_src_code)
+               idx = bpos->idx_asm;
+       annotate_browser__set_top(browser, pos, idx);
        browser->curr_hot = nd;
 }
 
@@ -343,12 +350,12 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser)
        dl = list_entry(browser->b.top, struct disasm_line, node);
        bdl = disasm_line__browser(dl);
 
-       if (browser->hide_src_code) {
+       if (annotate_browser__opts.hide_src_code) {
                if (bdl->idx_asm < offset)
                        offset = bdl->idx;
 
                browser->b.nr_entries = browser->nr_entries;
-               browser->hide_src_code = false;
+               annotate_browser__opts.hide_src_code = false;
                browser->b.seek(&browser->b, -offset, SEEK_CUR);
                browser->b.top_idx = bdl->idx - offset;
                browser->b.index = bdl->idx;
@@ -363,7 +370,7 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser)
                        offset = bdl->idx_asm;
 
                browser->b.nr_entries = browser->nr_asm_entries;
-               browser->hide_src_code = true;
+               annotate_browser__opts.hide_src_code = true;
                browser->b.seek(&browser->b, -offset, SEEK_CUR);
                browser->b.top_idx = bdl->idx_asm - offset;
                browser->b.index = bdl->idx_asm;
@@ -372,6 +379,12 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser)
        return true;
 }
 
+static void annotate_browser__init_asm_mode(struct annotate_browser *browser)
+{
+       ui_browser__reset_index(&browser->b);
+       browser->b.nr_entries = browser->nr_asm_entries;
+}
+
 static bool annotate_browser__callq(struct annotate_browser *browser,
                                    int evidx, void (*timer)(void *arg),
                                    void *arg, int delay_secs)
@@ -574,33 +587,46 @@ bool annotate_browser__continue_search_reverse(struct annotate_browser *browser,
        return __annotate_browser__search_reverse(browser);
 }
 
-static int annotate_browser__run(struct annotate_browser *self, int evidx,
+static void annotate_browser__update_addr_width(struct annotate_browser *browser)
+{
+       if (annotate_browser__opts.use_offset)
+               browser->target_width = browser->min_addr_width;
+       else
+               browser->target_width = browser->max_addr_width;
+
+       browser->addr_width = browser->target_width;
+
+       if (annotate_browser__opts.show_nr_jumps)
+               browser->addr_width += browser->jumps_width + 1;
+}
+
+static int annotate_browser__run(struct annotate_browser *browser, int evidx,
                                 void(*timer)(void *arg),
                                 void *arg, int delay_secs)
 {
        struct rb_node *nd = NULL;
-       struct map_symbol *ms = self->b.priv;
+       struct map_symbol *ms = browser->b.priv;
        struct symbol *sym = ms->sym;
        const char *help = "Press 'h' for help on key bindings";
        int key;
 
-       if (ui_browser__show(&self->b, sym->name, help) < 0)
+       if (ui_browser__show(&browser->b, sym->name, help) < 0)
                return -1;
 
-       annotate_browser__calc_percent(self, evidx);
+       annotate_browser__calc_percent(browser, evidx);
 
-       if (self->curr_hot) {
-               annotate_browser__set_rb_top(self, self->curr_hot);
-               self->b.navkeypressed = false;
+       if (browser->curr_hot) {
+               annotate_browser__set_rb_top(browser, browser->curr_hot);
+               browser->b.navkeypressed = false;
        }
 
-       nd = self->curr_hot;
+       nd = browser->curr_hot;
 
        while (1) {
-               key = ui_browser__run(&self->b, delay_secs);
+               key = ui_browser__run(&browser->b, delay_secs);
 
                if (delay_secs != 0) {
-                       annotate_browser__calc_percent(self, evidx);
+                       annotate_browser__calc_percent(browser, evidx);
                        /*
                         * Current line focus got out of the list of most active
                         * lines, NULL it so that if TAB|UNTAB is pressed, we
@@ -622,21 +648,21 @@ static int annotate_browser__run(struct annotate_browser *self, int evidx,
                        if (nd != NULL) {
                                nd = rb_prev(nd);
                                if (nd == NULL)
-                                       nd = rb_last(&self->entries);
+                                       nd = rb_last(&browser->entries);
                        } else
-                               nd = self->curr_hot;
+                               nd = browser->curr_hot;
                        break;
                case K_UNTAB:
                        if (nd != NULL)
                                nd = rb_next(nd);
                                if (nd == NULL)
-                                       nd = rb_first(&self->entries);
+                                       nd = rb_first(&browser->entries);
                        else
-                               nd = self->curr_hot;
+                               nd = browser->curr_hot;
                        break;
                case K_F1:
                case 'h':
-                       ui_browser__help_window(&self->b,
+                       ui_browser__help_window(&browser->b,
                "UP/DOWN/PGUP\n"
                "PGDN/SPACE    Navigate\n"
                "q/ESC/CTRL+C  Exit\n\n"
@@ -652,57 +678,62 @@ static int annotate_browser__run(struct annotate_browser *self, int evidx,
                "?             Search previous string\n");
                        continue;
                case 'H':
-                       nd = self->curr_hot;
+                       nd = browser->curr_hot;
                        break;
                case 's':
-                       if (annotate_browser__toggle_source(self))
+                       if (annotate_browser__toggle_source(browser))
                                ui_helpline__puts(help);
                        continue;
                case 'o':
-                       self->use_offset = !self->use_offset;
-                       if (self->use_offset)
-                               self->target_width = self->min_addr_width;
-                       else
-                               self->target_width = self->max_addr_width;
-update_addr_width:
-                       self->addr_width = self->target_width;
-                       if (self->show_nr_jumps)
-                               self->addr_width += self->jumps_width + 1;
+                       annotate_browser__opts.use_offset = !annotate_browser__opts.use_offset;
+                       annotate_browser__update_addr_width(browser);
                        continue;
                case 'j':
-                       self->jump_arrows = !self->jump_arrows;
+                       annotate_browser__opts.jump_arrows = !annotate_browser__opts.jump_arrows;
                        continue;
                case 'J':
-                       self->show_nr_jumps = !self->show_nr_jumps;
-                       goto update_addr_width;
+                       annotate_browser__opts.show_nr_jumps = !annotate_browser__opts.show_nr_jumps;
+                       annotate_browser__update_addr_width(browser);
+                       continue;
                case '/':
-                       if (annotate_browser__search(self, delay_secs)) {
+                       if (annotate_browser__search(browser, delay_secs)) {
 show_help:
                                ui_helpline__puts(help);
                        }
                        continue;
                case 'n':
-                       if (self->searching_backwards ?
-                           annotate_browser__continue_search_reverse(self, delay_secs) :
-                           annotate_browser__continue_search(self, delay_secs))
+                       if (browser->searching_backwards ?
+                           annotate_browser__continue_search_reverse(browser, delay_secs) :
+                           annotate_browser__continue_search(browser, delay_secs))
                                goto show_help;
                        continue;
                case '?':
-                       if (annotate_browser__search_reverse(self, delay_secs))
+                       if (annotate_browser__search_reverse(browser, delay_secs))
                                goto show_help;
                        continue;
+               case 'D': {
+                       static int seq;
+                       ui_helpline__pop();
+                       ui_helpline__fpush("%d: nr_ent=%d, height=%d, idx=%d, top_idx=%d, nr_asm_entries=%d",
+                                          seq++, browser->b.nr_entries,
+                                          browser->b.height,
+                                          browser->b.index,
+                                          browser->b.top_idx,
+                                          browser->nr_asm_entries);
+               }
+                       continue;
                case K_ENTER:
                case K_RIGHT:
-                       if (self->selection == NULL)
+                       if (browser->selection == NULL)
                                ui_helpline__puts("Huh? No selection. Report to linux-kernel@vger.kernel.org");
-                       else if (self->selection->offset == -1)
+                       else if (browser->selection->offset == -1)
                                ui_helpline__puts("Actions are only available for assembly lines.");
-                       else if (!self->selection->ins) {
-                               if (strcmp(self->selection->name, "retq"))
+                       else if (!browser->selection->ins) {
+                               if (strcmp(browser->selection->name, "retq"))
                                        goto show_sup_ins;
                                goto out;
-                       } else if (!(annotate_browser__jump(self) ||
-                                    annotate_browser__callq(self, evidx, timer, arg, delay_secs))) {
+                       } else if (!(annotate_browser__jump(browser) ||
+                                    annotate_browser__callq(browser, evidx, timer, arg, delay_secs))) {
 show_sup_ins:
                                ui_helpline__puts("Actions are only available for 'callq', 'retq' & jump instructions.");
                        }
@@ -717,10 +748,10 @@ show_sup_ins:
                }
 
                if (nd != NULL)
-                       annotate_browser__set_rb_top(self, nd);
+                       annotate_browser__set_rb_top(browser, nd);
        }
 out:
-       ui_browser__hide(&self->b);
+       ui_browser__hide(&browser->b);
        return key;
 }
 
@@ -797,8 +828,6 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
                        .priv    = &ms,
                        .use_navkeypressed = true,
                },
-               .use_offset = true,
-               .jump_arrows = true,
        };
        int ret = -1;
 
@@ -855,6 +884,12 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
        browser.b.nr_entries = browser.nr_entries;
        browser.b.entries = &notes->src->source,
        browser.b.width += 18; /* Percentage */
+
+       if (annotate_browser__opts.hide_src_code)
+               annotate_browser__init_asm_mode(&browser);
+
+       annotate_browser__update_addr_width(&browser);
+
        ret = annotate_browser__run(&browser, evidx, timer, arg, delay_secs);
        list_for_each_entry_safe(pos, n, &notes->src->source, node) {
                list_del(&pos->node);
@@ -865,3 +900,52 @@ out_free_offsets:
        free(browser.offsets);
        return ret;
 }
+
+#define ANNOTATE_CFG(n) \
+       { .name = #n, .value = &annotate_browser__opts.n, }
+       
+/*
+ * Keep the entries sorted, they are bsearch'ed
+ */
+static struct annotate__config {
+       const char *name;
+       bool *value;
+} annotate__configs[] = {
+       ANNOTATE_CFG(hide_src_code),
+       ANNOTATE_CFG(jump_arrows),
+       ANNOTATE_CFG(show_nr_jumps),
+       ANNOTATE_CFG(use_offset),
+};
+
+#undef ANNOTATE_CFG
+
+static int annotate_config__cmp(const void *name, const void *cfgp)
+{
+       const struct annotate__config *cfg = cfgp;
+
+       return strcmp(name, cfg->name);
+}
+
+static int annotate__config(const char *var, const char *value, void *data __used)
+{
+       struct annotate__config *cfg;
+       const char *name;
+
+       if (prefixcmp(var, "annotate.") != 0)
+               return 0;
+
+       name = var + 9;
+       cfg = bsearch(name, annotate__configs, ARRAY_SIZE(annotate__configs),
+                     sizeof(struct annotate__config), annotate_config__cmp);
+
+       if (cfg == NULL)
+               return -1;
+
+       *cfg->value = perf_config_bool(name, value);
+       return 0;
+}
+
+void annotate_browser__init(void)
+{
+       perf_config(annotate__config, NULL);
+}
index a372a4b026354b4d9f3984bc7fd3b96a5523f95a..53f6697d014e788396b474c6be957893b8fd09ca 100644 (file)
@@ -26,21 +26,21 @@ struct hist_browser {
        bool                 has_symbols;
 };
 
-static int hists__browser_title(struct hists *self, char *bf, size_t size,
+static int hists__browser_title(struct hists *hists, char *bf, size_t size,
                                const char *ev_name);
 
-static void hist_browser__refresh_dimensions(struct hist_browser *self)
+static void hist_browser__refresh_dimensions(struct hist_browser *browser)
 {
        /* 3 == +/- toggle symbol before actual hist_entry rendering */
-       self->b.width = 3 + (hists__sort_list_width(self->hists) +
+       browser->b.width = 3 + (hists__sort_list_width(browser->hists) +
                             sizeof("[k]"));
 }
 
-static void hist_browser__reset(struct hist_browser *self)
+static void hist_browser__reset(struct hist_browser *browser)
 {
-       self->b.nr_entries = self->hists->nr_entries;
-       hist_browser__refresh_dimensions(self);
-       ui_browser__reset_index(&self->b);
+       browser->b.nr_entries = browser->hists->nr_entries;
+       hist_browser__refresh_dimensions(browser);
+       ui_browser__reset_index(&browser->b);
 }
 
 static char tree__folded_sign(bool unfolded)
@@ -48,32 +48,32 @@ static char tree__folded_sign(bool unfolded)
        return unfolded ? '-' : '+';
 }
 
-static char map_symbol__folded(const struct map_symbol *self)
+static char map_symbol__folded(const struct map_symbol *ms)
 {
-       return self->has_children ? tree__folded_sign(self->unfolded) : ' ';
+       return ms->has_children ? tree__folded_sign(ms->unfolded) : ' ';
 }
 
-static char hist_entry__folded(const struct hist_entry *self)
+static char hist_entry__folded(const struct hist_entry *he)
 {
-       return map_symbol__folded(&self->ms);
+       return map_symbol__folded(&he->ms);
 }
 
-static char callchain_list__folded(const struct callchain_list *self)
+static char callchain_list__folded(const struct callchain_list *cl)
 {
-       return map_symbol__folded(&self->ms);
+       return map_symbol__folded(&cl->ms);
 }
 
-static void map_symbol__set_folding(struct map_symbol *self, bool unfold)
+static void map_symbol__set_folding(struct map_symbol *ms, bool unfold)
 {
-       self->unfolded = unfold ? self->has_children : false;
+       ms->unfolded = unfold ? ms->has_children : false;
 }
 
-static int callchain_node__count_rows_rb_tree(struct callchain_node *self)
+static int callchain_node__count_rows_rb_tree(struct callchain_node *node)
 {
        int n = 0;
        struct rb_node *nd;
 
-       for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
+       for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) {
                struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
                struct callchain_list *chain;
                char folded_sign = ' '; /* No children */
@@ -123,23 +123,23 @@ static int callchain__count_rows(struct rb_root *chain)
        return n;
 }
 
-static bool map_symbol__toggle_fold(struct map_symbol *self)
+static bool map_symbol__toggle_fold(struct map_symbol *ms)
 {
-       if (!self)
+       if (!ms)
                return false;
 
-       if (!self->has_children)
+       if (!ms->has_children)
                return false;
 
-       self->unfolded = !self->unfolded;
+       ms->unfolded = !ms->unfolded;
        return true;
 }
 
-static void callchain_node__init_have_children_rb_tree(struct callchain_node *self)
+static void callchain_node__init_have_children_rb_tree(struct callchain_node *node)
 {
-       struct rb_node *nd = rb_first(&self->rb_root);
+       struct rb_node *nd = rb_first(&node->rb_root);
 
-       for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
+       for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) {
                struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
                struct callchain_list *chain;
                bool first = true;
@@ -158,49 +158,49 @@ static void callchain_node__init_have_children_rb_tree(struct callchain_node *se
        }
 }
 
-static void callchain_node__init_have_children(struct callchain_node *self)
+static void callchain_node__init_have_children(struct callchain_node *node)
 {
        struct callchain_list *chain;
 
-       list_for_each_entry(chain, &self->val, list)
-               chain->ms.has_children = !RB_EMPTY_ROOT(&self->rb_root);
+       list_for_each_entry(chain, &node->val, list)
+               chain->ms.has_children = !RB_EMPTY_ROOT(&node->rb_root);
 
-       callchain_node__init_have_children_rb_tree(self);
+       callchain_node__init_have_children_rb_tree(node);
 }
 
-static void callchain__init_have_children(struct rb_root *self)
+static void callchain__init_have_children(struct rb_root *root)
 {
        struct rb_node *nd;
 
-       for (nd = rb_first(self); nd; nd = rb_next(nd)) {
+       for (nd = rb_first(root); nd; nd = rb_next(nd)) {
                struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
                callchain_node__init_have_children(node);
        }
 }
 
-static void hist_entry__init_have_children(struct hist_entry *self)
+static void hist_entry__init_have_children(struct hist_entry *he)
 {
-       if (!self->init_have_children) {
-               self->ms.has_children = !RB_EMPTY_ROOT(&self->sorted_chain);
-               callchain__init_have_children(&self->sorted_chain);
-               self->init_have_children = true;
+       if (!he->init_have_children) {
+               he->ms.has_children = !RB_EMPTY_ROOT(&he->sorted_chain);
+               callchain__init_have_children(&he->sorted_chain);
+               he->init_have_children = true;
        }
 }
 
-static bool hist_browser__toggle_fold(struct hist_browser *self)
+static bool hist_browser__toggle_fold(struct hist_browser *browser)
 {
-       if (map_symbol__toggle_fold(self->selection)) {
-               struct hist_entry *he = self->he_selection;
+       if (map_symbol__toggle_fold(browser->selection)) {
+               struct hist_entry *he = browser->he_selection;
 
                hist_entry__init_have_children(he);
-               self->hists->nr_entries -= he->nr_rows;
+               browser->hists->nr_entries -= he->nr_rows;
 
                if (he->ms.unfolded)
                        he->nr_rows = callchain__count_rows(&he->sorted_chain);
                else
                        he->nr_rows = 0;
-               self->hists->nr_entries += he->nr_rows;
-               self->b.nr_entries = self->hists->nr_entries;
+               browser->hists->nr_entries += he->nr_rows;
+               browser->b.nr_entries = browser->hists->nr_entries;
 
                return true;
        }
@@ -209,12 +209,12 @@ static bool hist_browser__toggle_fold(struct hist_browser *self)
        return false;
 }
 
-static int callchain_node__set_folding_rb_tree(struct callchain_node *self, bool unfold)
+static int callchain_node__set_folding_rb_tree(struct callchain_node *node, bool unfold)
 {
        int n = 0;
        struct rb_node *nd;
 
-       for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
+       for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) {
                struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
                struct callchain_list *chain;
                bool has_children = false;
@@ -263,37 +263,37 @@ static int callchain__set_folding(struct rb_root *chain, bool unfold)
        return n;
 }
 
-static void hist_entry__set_folding(struct hist_entry *self, bool unfold)
+static void hist_entry__set_folding(struct hist_entry *he, bool unfold)
 {
-       hist_entry__init_have_children(self);
-       map_symbol__set_folding(&self->ms, unfold);
+       hist_entry__init_have_children(he);
+       map_symbol__set_folding(&he->ms, unfold);
 
-       if (self->ms.has_children) {
-               int n = callchain__set_folding(&self->sorted_chain, unfold);
-               self->nr_rows = unfold ? n : 0;
+       if (he->ms.has_children) {
+               int n = callchain__set_folding(&he->sorted_chain, unfold);
+               he->nr_rows = unfold ? n : 0;
        } else
-               self->nr_rows = 0;
+               he->nr_rows = 0;
 }
 
-static void hists__set_folding(struct hists *self, bool unfold)
+static void hists__set_folding(struct hists *hists, bool unfold)
 {
        struct rb_node *nd;
 
-       self->nr_entries = 0;
+       hists->nr_entries = 0;
 
-       for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
+       for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
                struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
                hist_entry__set_folding(he, unfold);
-               self->nr_entries += 1 + he->nr_rows;
+               hists->nr_entries += 1 + he->nr_rows;
        }
 }
 
-static void hist_browser__set_folding(struct hist_browser *self, bool unfold)
+static void hist_browser__set_folding(struct hist_browser *browser, bool unfold)
 {
-       hists__set_folding(self->hists, unfold);
-       self->b.nr_entries = self->hists->nr_entries;
+       hists__set_folding(browser->hists, unfold);
+       browser->b.nr_entries = browser->hists->nr_entries;
        /* Go to the start, we may be way after valid entries after a collapse */
-       ui_browser__reset_index(&self->b);
+       ui_browser__reset_index(&browser->b);
 }
 
 static void ui_browser__warn_lost_events(struct ui_browser *browser)
@@ -305,64 +305,64 @@ static void ui_browser__warn_lost_events(struct ui_browser *browser)
                "Or reduce the sampling frequency.");
 }
 
-static int hist_browser__run(struct hist_browser *self, const char *ev_name,
+static int hist_browser__run(struct hist_browser *browser, const char *ev_name,
                             void(*timer)(void *arg), void *arg, int delay_secs)
 {
        int key;
        char title[160];
 
-       self->b.entries = &self->hists->entries;
-       self->b.nr_entries = self->hists->nr_entries;
+       browser->b.entries = &browser->hists->entries;
+       browser->b.nr_entries = browser->hists->nr_entries;
 
-       hist_browser__refresh_dimensions(self);
-       hists__browser_title(self->hists, title, sizeof(title), ev_name);
+       hist_browser__refresh_dimensions(browser);
+       hists__browser_title(browser->hists, title, sizeof(title), ev_name);
 
-       if (ui_browser__show(&self->b, title,
+       if (ui_browser__show(&browser->b, title,
                             "Press '?' for help on key bindings") < 0)
                return -1;
 
        while (1) {
-               key = ui_browser__run(&self->b, delay_secs);
+               key = ui_browser__run(&browser->b, delay_secs);
 
                switch (key) {
                case K_TIMER:
                        timer(arg);
-                       ui_browser__update_nr_entries(&self->b, self->hists->nr_entries);
+                       ui_browser__update_nr_entries(&browser->b, browser->hists->nr_entries);
 
-                       if (self->hists->stats.nr_lost_warned !=
-                           self->hists->stats.nr_events[PERF_RECORD_LOST]) {
-                               self->hists->stats.nr_lost_warned =
-                                       self->hists->stats.nr_events[PERF_RECORD_LOST];
-                               ui_browser__warn_lost_events(&self->b);
+                       if (browser->hists->stats.nr_lost_warned !=
+                           browser->hists->stats.nr_events[PERF_RECORD_LOST]) {
+                               browser->hists->stats.nr_lost_warned =
+                                       browser->hists->stats.nr_events[PERF_RECORD_LOST];
+                               ui_browser__warn_lost_events(&browser->b);
                        }
 
-                       hists__browser_title(self->hists, title, sizeof(title), ev_name);
-                       ui_browser__show_title(&self->b, title);
+                       hists__browser_title(browser->hists, title, sizeof(title), ev_name);
+                       ui_browser__show_title(&browser->b, title);
                        continue;
                case 'D': { /* Debug */
                        static int seq;
-                       struct hist_entry *h = rb_entry(self->b.top,
+                       struct hist_entry *h = rb_entry(browser->b.top,
                                                        struct hist_entry, rb_node);
                        ui_helpline__pop();
                        ui_helpline__fpush("%d: nr_ent=(%d,%d), height=%d, idx=%d, fve: idx=%d, row_off=%d, nrows=%d",
-                                          seq++, self->b.nr_entries,
-                                          self->hists->nr_entries,
-                                          self->b.height,
-                                          self->b.index,
-                                          self->b.top_idx,
+                                          seq++, browser->b.nr_entries,
+                                          browser->hists->nr_entries,
+                                          browser->b.height,
+                                          browser->b.index,
+                                          browser->b.top_idx,
                                           h->row_offset, h->nr_rows);
                }
                        break;
                case 'C':
                        /* Collapse the whole world. */
-                       hist_browser__set_folding(self, false);
+                       hist_browser__set_folding(browser, false);
                        break;
                case 'E':
                        /* Expand the whole world. */
-                       hist_browser__set_folding(self, true);
+                       hist_browser__set_folding(browser, true);
                        break;
                case K_ENTER:
-                       if (hist_browser__toggle_fold(self))
+                       if (hist_browser__toggle_fold(browser))
                                break;
                        /* fall thru */
                default:
@@ -370,23 +370,23 @@ static int hist_browser__run(struct hist_browser *self, const char *ev_name,
                }
        }
 out:
-       ui_browser__hide(&self->b);
+       ui_browser__hide(&browser->b);
        return key;
 }
 
-static char *callchain_list__sym_name(struct callchain_list *self,
+static char *callchain_list__sym_name(struct callchain_list *cl,
                                      char *bf, size_t bfsize)
 {
-       if (self->ms.sym)
-               return self->ms.sym->name;
+       if (cl->ms.sym)
+               return cl->ms.sym->name;
 
-       snprintf(bf, bfsize, "%#" PRIx64, self->ip);
+       snprintf(bf, bfsize, "%#" PRIx64, cl->ip);
        return bf;
 }
 
 #define LEVEL_OFFSET_STEP 3
 
-static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *self,
+static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *browser,
                                                     struct callchain_node *chain_node,
                                                     u64 total, int level,
                                                     unsigned short row,
@@ -444,21 +444,21 @@ static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *self,
                        }
 
                        color = HE_COLORSET_NORMAL;
-                       width = self->b.width - (offset + extra_offset + 2);
-                       if (ui_browser__is_current_entry(&self->b, row)) {
-                               self->selection = &chain->ms;
+                       width = browser->b.width - (offset + extra_offset + 2);
+                       if (ui_browser__is_current_entry(&browser->b, row)) {
+                               browser->selection = &chain->ms;
                                color = HE_COLORSET_SELECTED;
                                *is_current_entry = true;
                        }
 
-                       ui_browser__set_color(&self->b, color);
-                       ui_browser__gotorc(&self->b, row, 0);
+                       ui_browser__set_color(&browser->b, color);
+                       ui_browser__gotorc(&browser->b, row, 0);
                        slsmg_write_nstring(" ", offset + extra_offset);
                        slsmg_printf("%c ", folded_sign);
                        slsmg_write_nstring(str, width);
                        free(alloc_str);
 
-                       if (++row == self->b.height)
+                       if (++row == browser->b.height)
                                goto out;
 do_next:
                        if (folded_sign == '+')
@@ -467,11 +467,11 @@ do_next:
 
                if (folded_sign == '-') {
                        const int new_level = level + (extra_offset ? 2 : 1);
-                       row += hist_browser__show_callchain_node_rb_tree(self, child, new_total,
+                       row += hist_browser__show_callchain_node_rb_tree(browser, child, new_total,
                                                                         new_level, row, row_offset,
                                                                         is_current_entry);
                }
-               if (row == self->b.height)
+               if (row == browser->b.height)
                        goto out;
                node = next;
        }
@@ -479,7 +479,7 @@ out:
        return row - first_row;
 }
 
-static int hist_browser__show_callchain_node(struct hist_browser *self,
+static int hist_browser__show_callchain_node(struct hist_browser *browser,
                                             struct callchain_node *node,
                                             int level, unsigned short row,
                                             off_t *row_offset,
@@ -488,7 +488,7 @@ static int hist_browser__show_callchain_node(struct hist_browser *self,
        struct callchain_list *chain;
        int first_row = row,
             offset = level * LEVEL_OFFSET_STEP,
-            width = self->b.width - offset;
+            width = browser->b.width - offset;
        char folded_sign = ' ';
 
        list_for_each_entry(chain, &node->val, list) {
@@ -503,26 +503,26 @@ static int hist_browser__show_callchain_node(struct hist_browser *self,
                }
 
                color = HE_COLORSET_NORMAL;
-               if (ui_browser__is_current_entry(&self->b, row)) {
-                       self->selection = &chain->ms;
+               if (ui_browser__is_current_entry(&browser->b, row)) {
+                       browser->selection = &chain->ms;
                        color = HE_COLORSET_SELECTED;
                        *is_current_entry = true;
                }
 
                s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
-               ui_browser__gotorc(&self->b, row, 0);
-               ui_browser__set_color(&self->b, color);
+               ui_browser__gotorc(&browser->b, row, 0);
+               ui_browser__set_color(&browser->b, color);
                slsmg_write_nstring(" ", offset);
                slsmg_printf("%c ", folded_sign);
                slsmg_write_nstring(s, width - 2);
 
-               if (++row == self->b.height)
+               if (++row == browser->b.height)
                        goto out;
        }
 
        if (folded_sign == '-')
-               row += hist_browser__show_callchain_node_rb_tree(self, node,
-                                                                self->hists->stats.total_period,
+               row += hist_browser__show_callchain_node_rb_tree(browser, node,
+                                                                browser->hists->stats.total_period,
                                                                 level + 1, row,
                                                                 row_offset,
                                                                 is_current_entry);
@@ -530,7 +530,7 @@ out:
        return row - first_row;
 }
 
-static int hist_browser__show_callchain(struct hist_browser *self,
+static int hist_browser__show_callchain(struct hist_browser *browser,
                                        struct rb_root *chain,
                                        int level, unsigned short row,
                                        off_t *row_offset,
@@ -542,31 +542,31 @@ static int hist_browser__show_callchain(struct hist_browser *self,
        for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
                struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
 
-               row += hist_browser__show_callchain_node(self, node, level,
+               row += hist_browser__show_callchain_node(browser, node, level,
                                                         row, row_offset,
                                                         is_current_entry);
-               if (row == self->b.height)
+               if (row == browser->b.height)
                        break;
        }
 
        return row - first_row;
 }
 
-static int hist_browser__show_entry(struct hist_browser *self,
+static int hist_browser__show_entry(struct hist_browser *browser,
                                    struct hist_entry *entry,
                                    unsigned short row)
 {
        char s[256];
        double percent;
        int printed = 0;
-       int width = self->b.width - 6; /* The percentage */
+       int width = browser->b.width - 6; /* The percentage */
        char folded_sign = ' ';
-       bool current_entry = ui_browser__is_current_entry(&self->b, row);
+       bool current_entry = ui_browser__is_current_entry(&browser->b, row);
        off_t row_offset = entry->row_offset;
 
        if (current_entry) {
-               self->he_selection = entry;
-               self->selection = &entry->ms;
+               browser->he_selection = entry;
+               browser->selection = &entry->ms;
        }
 
        if (symbol_conf.use_callchain) {
@@ -575,11 +575,11 @@ static int hist_browser__show_entry(struct hist_browser *self,
        }
 
        if (row_offset == 0) {
-               hist_entry__snprintf(entry, s, sizeof(s), self->hists);
-               percent = (entry->period * 100.0) / self->hists->stats.total_period;
+               hist_entry__snprintf(entry, s, sizeof(s), browser->hists);
+               percent = (entry->period * 100.0) / browser->hists->stats.total_period;
 
-               ui_browser__set_percent_color(&self->b, percent, current_entry);
-               ui_browser__gotorc(&self->b, row, 0);
+               ui_browser__set_percent_color(&browser->b, percent, current_entry);
+               ui_browser__gotorc(&browser->b, row, 0);
                if (symbol_conf.use_callchain) {
                        slsmg_printf("%c ", folded_sign);
                        width -= 2;
@@ -588,11 +588,11 @@ static int hist_browser__show_entry(struct hist_browser *self,
                slsmg_printf(" %5.2f%%", percent);
 
                /* The scroll bar isn't being used */
-               if (!self->b.navkeypressed)
+               if (!browser->b.navkeypressed)
                        width += 1;
 
-               if (!current_entry || !self->b.navkeypressed)
-                       ui_browser__set_color(&self->b, HE_COLORSET_NORMAL);
+               if (!current_entry || !browser->b.navkeypressed)
+                       ui_browser__set_color(&browser->b, HE_COLORSET_NORMAL);
 
                if (symbol_conf.show_nr_samples) {
                        slsmg_printf(" %11u", entry->nr_events);
@@ -610,12 +610,12 @@ static int hist_browser__show_entry(struct hist_browser *self,
        } else
                --row_offset;
 
-       if (folded_sign == '-' && row != self->b.height) {
-               printed += hist_browser__show_callchain(self, &entry->sorted_chain,
+       if (folded_sign == '-' && row != browser->b.height) {
+               printed += hist_browser__show_callchain(browser, &entry->sorted_chain,
                                                        1, row, &row_offset,
                                                        &current_entry);
                if (current_entry)
-                       self->he_selection = entry;
+                       browser->he_selection = entry;
        }
 
        return printed;
@@ -631,22 +631,22 @@ static void ui_browser__hists_init_top(struct ui_browser *browser)
        }
 }
 
-static unsigned int hist_browser__refresh(struct ui_browser *self)
+static unsigned int hist_browser__refresh(struct ui_browser *browser)
 {
        unsigned row = 0;
        struct rb_node *nd;
-       struct hist_browser *hb = container_of(self, struct hist_browser, b);
+       struct hist_browser *hb = container_of(browser, struct hist_browser, b);
 
-       ui_browser__hists_init_top(self);
+       ui_browser__hists_init_top(browser);
 
-       for (nd = self->top; nd; nd = rb_next(nd)) {
+       for (nd = browser->top; nd; nd = rb_next(nd)) {
                struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
 
                if (h->filtered)
                        continue;
 
                row += hist_browser__show_entry(hb, h, row);
-               if (row == self->height)
+               if (row == browser->height)
                        break;
        }
 
@@ -679,27 +679,27 @@ static struct rb_node *hists__filter_prev_entries(struct rb_node *nd)
        return NULL;
 }
 
-static void ui_browser__hists_seek(struct ui_browser *self,
+static void ui_browser__hists_seek(struct ui_browser *browser,
                                   off_t offset, int whence)
 {
        struct hist_entry *h;
        struct rb_node *nd;
        bool first = true;
 
-       if (self->nr_entries == 0)
+       if (browser->nr_entries == 0)
                return;
 
-       ui_browser__hists_init_top(self);
+       ui_browser__hists_init_top(browser);
 
        switch (whence) {
        case SEEK_SET:
-               nd = hists__filter_entries(rb_first(self->entries));
+               nd = hists__filter_entries(rb_first(browser->entries));
                break;
        case SEEK_CUR:
-               nd = self->top;
+               nd = browser->top;
                goto do_offset;
        case SEEK_END:
-               nd = hists__filter_prev_entries(rb_last(self->entries));
+               nd = hists__filter_prev_entries(rb_last(browser->entries));
                first = false;
                break;
        default:
@@ -710,7 +710,7 @@ static void ui_browser__hists_seek(struct ui_browser *self,
         * Moves not relative to the first visible entry invalidates its
         * row_offset:
         */
-       h = rb_entry(self->top, struct hist_entry, rb_node);
+       h = rb_entry(browser->top, struct hist_entry, rb_node);
        h->row_offset = 0;
 
        /*
@@ -738,7 +738,7 @@ do_offset:
                                } else {
                                        h->row_offset += offset;
                                        offset = 0;
-                                       self->top = nd;
+                                       browser->top = nd;
                                        break;
                                }
                        }
@@ -746,7 +746,7 @@ do_offset:
                        if (nd == NULL)
                                break;
                        --offset;
-                       self->top = nd;
+                       browser->top = nd;
                } while (offset != 0);
        } else if (offset < 0) {
                while (1) {
@@ -759,7 +759,7 @@ do_offset:
                                        } else {
                                                h->row_offset += offset;
                                                offset = 0;
-                                               self->top = nd;
+                                               browser->top = nd;
                                                break;
                                        }
                                } else {
@@ -769,7 +769,7 @@ do_offset:
                                        } else {
                                                h->row_offset = h->nr_rows + offset;
                                                offset = 0;
-                                               self->top = nd;
+                                               browser->top = nd;
                                                break;
                                        }
                                }
@@ -779,7 +779,7 @@ do_offset:
                        if (nd == NULL)
                                break;
                        ++offset;
-                       self->top = nd;
+                       browser->top = nd;
                        if (offset == 0) {
                                /*
                                 * Last unfiltered hist_entry, check if it is
@@ -794,7 +794,7 @@ do_offset:
                        first = false;
                }
        } else {
-               self->top = nd;
+               browser->top = nd;
                h = rb_entry(nd, struct hist_entry, rb_node);
                h->row_offset = 0;
        }
@@ -802,46 +802,46 @@ do_offset:
 
 static struct hist_browser *hist_browser__new(struct hists *hists)
 {
-       struct hist_browser *self = zalloc(sizeof(*self));
+       struct hist_browser *browser = zalloc(sizeof(*browser));
 
-       if (self) {
-               self->hists = hists;
-               self->b.refresh = hist_browser__refresh;
-               self->b.seek = ui_browser__hists_seek;
-               self->b.use_navkeypressed = true;
+       if (browser) {
+               browser->hists = hists;
+               browser->b.refresh = hist_browser__refresh;
+               browser->b.seek = ui_browser__hists_seek;
+               browser->b.use_navkeypressed = true;
                if (sort__branch_mode == 1)
-                       self->has_symbols = sort_sym_from.list.next != NULL;
+                       browser->has_symbols = sort_sym_from.list.next != NULL;
                else
-                       self->has_symbols = sort_sym.list.next != NULL;
+                       browser->has_symbols = sort_sym.list.next != NULL;
        }
 
-       return self;
+       return browser;
 }
 
-static void hist_browser__delete(struct hist_browser *self)
+static void hist_browser__delete(struct hist_browser *browser)
 {
-       free(self);
+       free(browser);
 }
 
-static struct hist_entry *hist_browser__selected_entry(struct hist_browser *self)
+static struct hist_entry *hist_browser__selected_entry(struct hist_browser *browser)
 {
-       return self->he_selection;
+       return browser->he_selection;
 }
 
-static struct thread *hist_browser__selected_thread(struct hist_browser *self)
+static struct thread *hist_browser__selected_thread(struct hist_browser *browser)
 {
-       return self->he_selection->thread;
+       return browser->he_selection->thread;
 }
 
-static int hists__browser_title(struct hists *self, char *bf, size_t size,
+static int hists__browser_title(struct hists *hists, char *bf, size_t size,
                                const char *ev_name)
 {
        char unit;
        int printed;
-       const struct dso *dso = self->dso_filter;
-       const struct thread *thread = self->thread_filter;
-       unsigned long nr_samples = self->stats.nr_events[PERF_RECORD_SAMPLE];
-       u64 nr_events = self->stats.total_period;
+       const struct dso *dso = hists->dso_filter;
+       const struct thread *thread = hists->thread_filter;
+       unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
+       u64 nr_events = hists->stats.total_period;
 
        nr_samples = convert_unit(nr_samples, &unit);
        printed = scnprintf(bf, size,
@@ -849,9 +849,9 @@ static int hists__browser_title(struct hists *self, char *bf, size_t size,
                           nr_samples, unit, ev_name, nr_events);
 
 
-       if (self->uid_filter_str)
+       if (hists->uid_filter_str)
                printed += snprintf(bf + printed, size - printed,
-                                   ", UID: %s", self->uid_filter_str);
+                                   ", UID: %s", hists->uid_filter_str);
        if (thread)
                printed += scnprintf(bf + printed, size - printed,
                                    ", Thread: %s(%d)",
@@ -879,8 +879,8 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                                    void(*timer)(void *arg), void *arg,
                                    int delay_secs)
 {
-       struct hists *self = &evsel->hists;
-       struct hist_browser *browser = hist_browser__new(self);
+       struct hists *hists = &evsel->hists;
+       struct hist_browser *browser = hist_browser__new(hists);
        struct branch_info *bi;
        struct pstack *fstack;
        char *options[16];
@@ -946,8 +946,8 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                                        "Please enter the name of symbol you want to see",
                                        buf, "ENTER: OK, ESC: Cancel",
                                        delay_secs * 2) == K_ENTER) {
-                               self->symbol_filter_str = *buf ? buf : NULL;
-                               hists__filter_by_symbol(self);
+                               hists->symbol_filter_str = *buf ? buf : NULL;
+                               hists__filter_by_symbol(hists);
                                hist_browser__reset(browser);
                        }
                        continue;
@@ -1128,7 +1128,7 @@ zoom_out_dso:
                                sort_dso.elide = true;
                                pstack__push(fstack, &browser->hists->dso_filter);
                        }
-                       hists__filter_by_dso(self);
+                       hists__filter_by_dso(hists);
                        hist_browser__reset(browser);
                } else if (choice == zoom_thread) {
 zoom_thread:
@@ -1146,7 +1146,7 @@ zoom_out_thread:
                                sort_thread.elide = true;
                                pstack__push(fstack, &browser->hists->thread_filter);
                        }
-                       hists__filter_by_thread(self);
+                       hists__filter_by_thread(hists);
                        hist_browser__reset(browser);
                }
        }
index 9f5f888f73e30723d5f9c0fd8281345f38e0635e..791fb15ce3507c2d42d695be12c1f87a16affa0f 100644 (file)
@@ -22,6 +22,7 @@ void setup_browser(bool fallback_to_pager)
                        break;
                /* fall through */
        default:
+               use_browser = 0;
                if (fallback_to_pager)
                        setup_pager();
                break;
index 0deac6a14b652df87c998b38203b0ab003f541ed..6faa3a18bfbd8514001e2d2dd25a9f9a52ce7910 100644 (file)
@@ -120,7 +120,7 @@ static char *parse_value(void)
 
 static inline int iskeychar(int c)
 {
-       return isalnum(c) || c == '-';
+       return isalnum(c) || c == '-' || c == '_';
 }
 
 static int get_value(config_fn_t fn, void *data, char *name, unsigned int len)
index 57e4ce57bbcc03faf7245f40a0ba3e20b43851d8..91d19138f3ec3891620b755fe0faf470d61d2319 100644 (file)
@@ -15,6 +15,7 @@
 #include "cpumap.h"
 #include "thread_map.h"
 #include "target.h"
+#include "../../include/linux/perf_event.h"
 
 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
 #define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
@@ -64,6 +65,95 @@ struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
        return evsel;
 }
 
+static const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
+       "cycles",
+       "instructions",
+       "cache-references",
+       "cache-misses",
+       "branches",
+       "branch-misses",
+       "bus-cycles",
+       "stalled-cycles-frontend",
+       "stalled-cycles-backend",
+       "ref-cycles",
+};
+
+const char *__perf_evsel__hw_name(u64 config)
+{
+       if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
+               return perf_evsel__hw_names[config];
+
+       return "unknown-hardware";
+}
+
+static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+       int colon = 0;
+       struct perf_event_attr *attr = &evsel->attr;
+       int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(attr->config));
+       bool exclude_guest_default = false;
+
+#define MOD_PRINT(context, mod)        do {                                    \
+               if (!attr->exclude_##context) {                         \
+                       if (!colon) colon = r++;                        \
+                       r += scnprintf(bf + r, size - r, "%c", mod);    \
+               } } while(0)
+
+       if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
+               MOD_PRINT(kernel, 'k');
+               MOD_PRINT(user, 'u');
+               MOD_PRINT(hv, 'h');
+               exclude_guest_default = true;
+       }
+
+       if (attr->precise_ip) {
+               if (!colon)
+                       colon = r++;
+               r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
+               exclude_guest_default = true;
+       }
+
+       if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
+               MOD_PRINT(host, 'H');
+               MOD_PRINT(guest, 'G');
+       }
+#undef MOD_PRINT
+       if (colon)
+               bf[colon] = ':';
+       return r;
+}
+
+int perf_evsel__name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+       int ret;
+
+       switch (evsel->attr.type) {
+       case PERF_TYPE_RAW:
+               ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
+               break;
+
+       case PERF_TYPE_HARDWARE:
+               ret = perf_evsel__hw_name(evsel, bf, size);
+               break;
+       default:
+               /*
+                * FIXME
+                *
+                * This is the minimal perf_evsel__name so that we can
+                * reconstruct event names taking into account event modifiers.
+                *
+                * The old event_name uses it now for raw anr hw events, so that
+                * we don't drag all the parsing stuff into the python binding.
+                *
+                * On the next devel cycle the rest of the event naming will be
+                * brought here.
+                */
+               return 0;
+       }
+
+       return ret;
+}
+
 void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
                        struct perf_evsel *first)
 {
index 3d6b3e4cb66bb9bfbb32b75958ef1f23a4880163..4ba8b564e6f47f039652ebac739d9d899d5881f5 100644 (file)
@@ -83,6 +83,9 @@ void perf_evsel__config(struct perf_evsel *evsel,
                        struct perf_record_opts *opts,
                        struct perf_evsel *first);
 
+const char* __perf_evsel__hw_name(u64 config);
+int perf_evsel__name(struct perf_evsel *evsel, char *bf, size_t size);
+
 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
 int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
index fac7d59309b83698cf22829936e86e1c511eb31e..05dbc8b3c767217ceb3204c7be46f5aedbb205c4 100644 (file)
@@ -62,19 +62,6 @@ static struct event_symbol event_symbols[] = {
 #define PERF_EVENT_TYPE(config)                __PERF_EVENT_FIELD(config, TYPE)
 #define PERF_EVENT_ID(config)          __PERF_EVENT_FIELD(config, EVENT)
 
-static const char *hw_event_names[PERF_COUNT_HW_MAX] = {
-       "cycles",
-       "instructions",
-       "cache-references",
-       "cache-misses",
-       "branches",
-       "branch-misses",
-       "bus-cycles",
-       "stalled-cycles-frontend",
-       "stalled-cycles-backend",
-       "ref-cycles",
-};
-
 static const char *sw_event_names[PERF_COUNT_SW_MAX] = {
        "cpu-clock",
        "task-clock",
@@ -300,6 +287,16 @@ const char *event_name(struct perf_evsel *evsel)
        u64 config = evsel->attr.config;
        int type = evsel->attr.type;
 
+       if (type == PERF_TYPE_RAW || type == PERF_TYPE_HARDWARE) {
+               /*
+                * XXX minimal fix, see comment on perf_evsen__name, this static buffer
+                * will go away together with event_name in the next devel cycle.
+                */
+               static char bf[128];
+               perf_evsel__name(evsel, bf, sizeof(bf));
+               return bf;
+       }
+
        if (evsel->name)
                return evsel->name;
 
@@ -317,9 +314,7 @@ const char *__event_name(int type, u64 config)
 
        switch (type) {
        case PERF_TYPE_HARDWARE:
-               if (config < PERF_COUNT_HW_MAX && hw_event_names[config])
-                       return hw_event_names[config];
-               return "unknown-hardware";
+               return __perf_evsel__hw_name(config);
 
        case PERF_TYPE_HW_CACHE: {
                u8 cache_type, cache_op, cache_result;
index 84d9bd7820049cdcb641e0efc96f22a28b8f210a..9b5f856cc28096b865d1c34c12073a23e5eb5a9c 100644 (file)
@@ -188,28 +188,27 @@ static struct thread_map *thread_map__new_by_pid_str(const char *pid_str)
                nt = realloc(threads, (sizeof(*threads) +
                                       sizeof(pid_t) * total_tasks));
                if (nt == NULL)
-                       goto out_free_threads;
+                       goto out_free_namelist;
 
                threads = nt;
 
-               if (threads) {
-                       for (i = 0; i < items; i++)
-                               threads->map[j++] = atoi(namelist[i]->d_name);
-                       threads->nr = total_tasks;
-               }
-
-               for (i = 0; i < items; i++)
+               for (i = 0; i < items; i++) {
+                       threads->map[j++] = atoi(namelist[i]->d_name);
                        free(namelist[i]);
+               }
+               threads->nr = total_tasks;
                free(namelist);
-
-               if (!threads)
-                       break;
        }
 
 out:
        strlist__delete(slist);
        return threads;
 
+out_free_namelist:
+       for (i = 0; i < items; i++)
+               free(namelist[i]);
+       free(namelist);
+
 out_free_threads:
        free(threads);
        threads = NULL;
index 28bc57ee757cf04d7b2166dc3e4b236b5fd19de6..a4162e15c25f89f32862a1f4fb2630c32f8c1c60 100644 (file)
@@ -1,4 +1,4 @@
-TARGETS = breakpoints vm
+TARGETS = breakpoints kcmp mqueue vm
 
 all:
        for TARGET in $(TARGETS); do \
diff --git a/tools/testing/selftests/kcmp/Makefile b/tools/testing/selftests/kcmp/Makefile
new file mode 100644 (file)
index 0000000..dc79b86
--- /dev/null
@@ -0,0 +1,29 @@
+uname_M := $(shell uname -m 2>/dev/null || echo not)
+ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/)
+ifeq ($(ARCH),i386)
+        ARCH := X86
+       CFLAGS := -DCONFIG_X86_32 -D__i386__
+endif
+ifeq ($(ARCH),x86_64)
+       ARCH := X86
+       CFLAGS := -DCONFIG_X86_64 -D__x86_64__
+endif
+
+CFLAGS += -I../../../../arch/x86/include/generated/
+CFLAGS += -I../../../../include/
+CFLAGS += -I../../../../usr/include/
+CFLAGS += -I../../../../arch/x86/include/
+
+all:
+ifeq ($(ARCH),X86)
+       gcc $(CFLAGS) kcmp_test.c -o run_test
+else
+       echo "Not an x86 target, can't build kcmp selftest"
+endif
+
+run-tests: all
+       ./kcmp_test
+
+clean:
+       rm -fr ./run_test
+       rm -fr ./test-file
diff --git a/tools/testing/selftests/kcmp/kcmp_test.c b/tools/testing/selftests/kcmp/kcmp_test.c
new file mode 100644 (file)
index 0000000..358cc6b
--- /dev/null
@@ -0,0 +1,94 @@
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <limits.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include <linux/unistd.h>
+#include <linux/kcmp.h>
+
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+
+static long sys_kcmp(int pid1, int pid2, int type, int fd1, int fd2)
+{
+       return syscall(__NR_kcmp, pid1, pid2, type, fd1, fd2);
+}
+
+int main(int argc, char **argv)
+{
+       const char kpath[] = "kcmp-test-file";
+       int pid1, pid2;
+       int fd1, fd2;
+       int status;
+
+       fd1 = open(kpath, O_RDWR | O_CREAT | O_TRUNC, 0644);
+       pid1 = getpid();
+
+       if (fd1 < 0) {
+               perror("Can't create file");
+               exit(1);
+       }
+
+       pid2 = fork();
+       if (pid2 < 0) {
+               perror("fork failed");
+               exit(1);
+       }
+
+       if (!pid2) {
+               int pid2 = getpid();
+               int ret;
+
+               fd2 = open(kpath, O_RDWR, 0644);
+               if (fd2 < 0) {
+                       perror("Can't open file");
+                       exit(1);
+               }
+
+               /* An example of output and arguments */
+               printf("pid1: %6d pid2: %6d FD: %2ld FILES: %2ld VM: %2ld "
+                      "FS: %2ld SIGHAND: %2ld IO: %2ld SYSVSEM: %2ld "
+                      "INV: %2ld\n",
+                      pid1, pid2,
+                      sys_kcmp(pid1, pid2, KCMP_FILE,          fd1, fd2),
+                      sys_kcmp(pid1, pid2, KCMP_FILES,         0, 0),
+                      sys_kcmp(pid1, pid2, KCMP_VM,            0, 0),
+                      sys_kcmp(pid1, pid2, KCMP_FS,            0, 0),
+                      sys_kcmp(pid1, pid2, KCMP_SIGHAND,       0, 0),
+                      sys_kcmp(pid1, pid2, KCMP_IO,            0, 0),
+                      sys_kcmp(pid1, pid2, KCMP_SYSVSEM,       0, 0),
+
+                       /* This one should fail */
+                      sys_kcmp(pid1, pid2, KCMP_TYPES + 1,     0, 0));
+
+               /* This one should return same fd */
+               ret = sys_kcmp(pid1, pid2, KCMP_FILE, fd1, fd1);
+               if (ret) {
+                       printf("FAIL: 0 expected but %d returned\n", ret);
+                       ret = -1;
+               } else
+                       printf("PASS: 0 returned as expected\n");
+
+               /* Compare with self */
+               ret = sys_kcmp(pid1, pid1, KCMP_VM, 0, 0);
+               if (ret) {
+                       printf("FAIL: 0 expected but %li returned\n", ret);
+                       ret = -1;
+               } else
+                       printf("PASS: 0 returned as expected\n");
+
+               exit(ret);
+       }
+
+       waitpid(pid2, &status, P_ALL);
+
+       return 0;
+}
diff --git a/tools/testing/selftests/mqueue/.gitignore b/tools/testing/selftests/mqueue/.gitignore
new file mode 100644 (file)
index 0000000..d8d4237
--- /dev/null
@@ -0,0 +1,2 @@
+mq_open_tests
+mq_perf_tests
diff --git a/tools/testing/selftests/mqueue/Makefile b/tools/testing/selftests/mqueue/Makefile
new file mode 100644 (file)
index 0000000..54c0aad
--- /dev/null
@@ -0,0 +1,10 @@
+all:
+       gcc -O2 -lrt mq_open_tests.c -o mq_open_tests
+       gcc -O2 -lrt -lpthread -lpopt -o mq_perf_tests mq_perf_tests.c
+
+run_tests:
+       ./mq_open_tests /test1
+       ./mq_perf_tests
+
+clean:
+       rm -f mq_open_tests mq_perf_tests
diff --git a/tools/testing/selftests/mqueue/mq_open_tests.c b/tools/testing/selftests/mqueue/mq_open_tests.c
new file mode 100644 (file)
index 0000000..711cc29
--- /dev/null
@@ -0,0 +1,492 @@
+/*
+ * This application is Copyright 2012 Red Hat, Inc.
+ *     Doug Ledford <dledford@redhat.com>
+ *
+ * mq_open_tests is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 3.
+ *
+ * mq_open_tests is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For the full text of the license, see <http://www.gnu.org/licenses/>.
+ *
+ * mq_open_tests.c
+ *   Tests the various situations that should either succeed or fail to
+ *   open a posix message queue and then reports whether or not they
+ *   did as they were supposed to.
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <string.h>
+#include <limits.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <mqueue.h>
+
+static char *usage =
+"Usage:\n"
+"  %s path\n"
+"\n"
+"      path    Path name of the message queue to create\n"
+"\n"
+"      Note: this program must be run as root in order to enable all tests\n"
+"\n";
+
+char *DEF_MSGS = "/proc/sys/fs/mqueue/msg_default";
+char *DEF_MSGSIZE = "/proc/sys/fs/mqueue/msgsize_default";
+char *MAX_MSGS = "/proc/sys/fs/mqueue/msg_max";
+char *MAX_MSGSIZE = "/proc/sys/fs/mqueue/msgsize_max";
+
+int default_settings;
+struct rlimit saved_limits, cur_limits;
+int saved_def_msgs, saved_def_msgsize, saved_max_msgs, saved_max_msgsize;
+int cur_def_msgs, cur_def_msgsize, cur_max_msgs, cur_max_msgsize;
+FILE *def_msgs, *def_msgsize, *max_msgs, *max_msgsize;
+char *queue_path;
+mqd_t queue = -1;
+
+static inline void __set(FILE *stream, int value, char *err_msg);
+void shutdown(int exit_val, char *err_cause, int line_no);
+static inline int get(FILE *stream);
+static inline void set(FILE *stream, int value);
+static inline void getr(int type, struct rlimit *rlim);
+static inline void setr(int type, struct rlimit *rlim);
+void validate_current_settings();
+static inline void test_queue(struct mq_attr *attr, struct mq_attr *result);
+static inline int test_queue_fail(struct mq_attr *attr, struct mq_attr *result);
+
+static inline void __set(FILE *stream, int value, char *err_msg)
+{
+       rewind(stream);
+       if (fprintf(stream, "%d", value) < 0)
+               perror(err_msg);
+}
+
+
+void shutdown(int exit_val, char *err_cause, int line_no)
+{
+       static int in_shutdown = 0;
+
+       /* In case we get called recursively by a set() call below */
+       if (in_shutdown++)
+               return;
+
+       seteuid(0);
+
+       if (queue != -1)
+               if (mq_close(queue))
+                       perror("mq_close() during shutdown");
+       if (queue_path)
+               /*
+                * Be silent if this fails, if we cleaned up already it's
+                * expected to fail
+                */
+               mq_unlink(queue_path);
+       if (default_settings) {
+               if (saved_def_msgs)
+                       __set(def_msgs, saved_def_msgs,
+                             "failed to restore saved_def_msgs");
+               if (saved_def_msgsize)
+                       __set(def_msgsize, saved_def_msgsize,
+                             "failed to restore saved_def_msgsize");
+       }
+       if (saved_max_msgs)
+               __set(max_msgs, saved_max_msgs,
+                     "failed to restore saved_max_msgs");
+       if (saved_max_msgsize)
+               __set(max_msgsize, saved_max_msgsize,
+                     "failed to restore saved_max_msgsize");
+       if (exit_val)
+               error(exit_val, errno, "%s at %d", err_cause, line_no);
+       exit(0);
+}
+
+static inline int get(FILE *stream)
+{
+       int value;
+       rewind(stream);
+       if (fscanf(stream, "%d", &value) != 1)
+               shutdown(4, "Error reading /proc entry", __LINE__ - 1);
+       return value;
+}
+
+static inline void set(FILE *stream, int value)
+{
+       int new_value;
+
+       rewind(stream);
+       if (fprintf(stream, "%d", value) < 0)
+               return shutdown(5, "Failed writing to /proc file",
+                               __LINE__ - 1);
+       new_value = get(stream);
+       if (new_value != value)
+               return shutdown(5, "We didn't get what we wrote to /proc back",
+                               __LINE__ - 1);
+}
+
+static inline void getr(int type, struct rlimit *rlim)
+{
+       if (getrlimit(type, rlim))
+               shutdown(6, "getrlimit()", __LINE__ - 1);
+}
+
+static inline void setr(int type, struct rlimit *rlim)
+{
+       if (setrlimit(type, rlim))
+               shutdown(7, "setrlimit()", __LINE__ - 1);
+}
+
+void validate_current_settings()
+{
+       int rlim_needed;
+
+       if (cur_limits.rlim_cur < 4096) {
+               printf("Current rlimit value for POSIX message queue bytes is "
+                      "unreasonably low,\nincreasing.\n\n");
+               cur_limits.rlim_cur = 8192;
+               cur_limits.rlim_max = 16384;
+               setr(RLIMIT_MSGQUEUE, &cur_limits);
+       }
+
+       if (default_settings) {
+               rlim_needed = (cur_def_msgs + 1) * (cur_def_msgsize + 1 +
+                                                   2 * sizeof(void *));
+               if (rlim_needed > cur_limits.rlim_cur) {
+                       printf("Temporarily lowering default queue parameters "
+                              "to something that will work\n"
+                              "with the current rlimit values.\n\n");
+                       set(def_msgs, 10);
+                       cur_def_msgs = 10;
+                       set(def_msgsize, 128);
+                       cur_def_msgsize = 128;
+               }
+       } else {
+               rlim_needed = (cur_max_msgs + 1) * (cur_max_msgsize + 1 +
+                                                   2 * sizeof(void *));
+               if (rlim_needed > cur_limits.rlim_cur) {
+                       printf("Temporarily lowering maximum queue parameters "
+                              "to something that will work\n"
+                              "with the current rlimit values in case this is "
+                              "a kernel that ties the default\n"
+                              "queue parameters to the maximum queue "
+                              "parameters.\n\n");
+                       set(max_msgs, 10);
+                       cur_max_msgs = 10;
+                       set(max_msgsize, 128);
+                       cur_max_msgsize = 128;
+               }
+       }
+}
+
+/*
+ * test_queue - Test opening a queue, shutdown if we fail.  This should
+ * only be called in situations that should never fail.  We clean up
+ * after ourselves and return the queue attributes in *result.
+ */
+static inline void test_queue(struct mq_attr *attr, struct mq_attr *result)
+{
+       int flags = O_RDWR | O_EXCL | O_CREAT;
+       int perms = DEFFILEMODE;
+
+       if ((queue = mq_open(queue_path, flags, perms, attr)) == -1)
+               shutdown(1, "mq_open()", __LINE__);
+       if (mq_getattr(queue, result))
+               shutdown(1, "mq_getattr()", __LINE__);
+       if (mq_close(queue))
+               shutdown(1, "mq_close()", __LINE__);
+       queue = -1;
+       if (mq_unlink(queue_path))
+               shutdown(1, "mq_unlink()", __LINE__);
+}
+
+/*
+ * Same as test_queue above, but failure is not fatal.
+ * Returns:
+ * 0 - Failed to create a queue
+ * 1 - Created a queue, attributes in *result
+ */
+static inline int test_queue_fail(struct mq_attr *attr, struct mq_attr *result)
+{
+       int flags = O_RDWR | O_EXCL | O_CREAT;
+       int perms = DEFFILEMODE;
+
+       if ((queue = mq_open(queue_path, flags, perms, attr)) == -1)
+               return 0;
+       if (mq_getattr(queue, result))
+               shutdown(1, "mq_getattr()", __LINE__);
+       if (mq_close(queue))
+               shutdown(1, "mq_close()", __LINE__);
+       queue = -1;
+       if (mq_unlink(queue_path))
+               shutdown(1, "mq_unlink()", __LINE__);
+       return 1;
+}
+
+int main(int argc, char *argv[])
+{
+       struct mq_attr attr, result;
+
+       if (argc != 2) {
+               fprintf(stderr, "Must pass a valid queue name\n\n");
+               fprintf(stderr, usage, argv[0]);
+               exit(1);
+       }
+
+       /*
+        * Although we can create a msg queue with a non-absolute path name,
+        * unlink will fail.  So, if the name doesn't start with a /, add one
+        * when we save it.
+        */
+       if (*argv[1] == '/')
+               queue_path = strdup(argv[1]);
+       else {
+               queue_path = malloc(strlen(argv[1]) + 2);
+               if (!queue_path) {
+                       perror("malloc()");
+                       exit(1);
+               }
+               queue_path[0] = '/';
+               queue_path[1] = 0;
+               strcat(queue_path, argv[1]);
+       }
+
+       if (getuid() != 0) {
+               fprintf(stderr, "Not running as root, but almost all tests "
+                       "require root in order to modify\nsystem settings.  "
+                       "Exiting.\n");
+               exit(1);
+       }
+
+       /* Find out what files there are for us to make tweaks in */
+       def_msgs = fopen(DEF_MSGS, "r+");
+       def_msgsize = fopen(DEF_MSGSIZE, "r+");
+       max_msgs = fopen(MAX_MSGS, "r+");
+       max_msgsize = fopen(MAX_MSGSIZE, "r+");
+
+       if (!max_msgs)
+               shutdown(2, "Failed to open msg_max", __LINE__);
+       if (!max_msgsize)
+               shutdown(2, "Failed to open msgsize_max", __LINE__);
+       if (def_msgs || def_msgsize)
+               default_settings = 1;
+
+       /* Load up the current system values for everything we can */
+       getr(RLIMIT_MSGQUEUE, &saved_limits);
+       cur_limits = saved_limits;
+       if (default_settings) {
+               saved_def_msgs = cur_def_msgs = get(def_msgs);
+               saved_def_msgsize = cur_def_msgsize = get(def_msgsize);
+       }
+       saved_max_msgs = cur_max_msgs = get(max_msgs);
+       saved_max_msgsize = cur_max_msgsize = get(max_msgsize);
+
+       /* Tell the user our initial state */
+       printf("\nInitial system state:\n");
+       printf("\tUsing queue path:\t\t%s\n", queue_path);
+       printf("\tRLIMIT_MSGQUEUE(soft):\t\t%d\n", saved_limits.rlim_cur);
+       printf("\tRLIMIT_MSGQUEUE(hard):\t\t%d\n", saved_limits.rlim_max);
+       printf("\tMaximum Message Size:\t\t%d\n", saved_max_msgsize);
+       printf("\tMaximum Queue Size:\t\t%d\n", saved_max_msgs);
+       if (default_settings) {
+               printf("\tDefault Message Size:\t\t%d\n", saved_def_msgsize);
+               printf("\tDefault Queue Size:\t\t%d\n", saved_def_msgs);
+       } else {
+               printf("\tDefault Message Size:\t\tNot Supported\n");
+               printf("\tDefault Queue Size:\t\tNot Supported\n");
+       }
+       printf("\n");
+
+       validate_current_settings();
+
+       printf("Adjusted system state for testing:\n");
+       printf("\tRLIMIT_MSGQUEUE(soft):\t\t%d\n", cur_limits.rlim_cur);
+       printf("\tRLIMIT_MSGQUEUE(hard):\t\t%d\n", cur_limits.rlim_max);
+       printf("\tMaximum Message Size:\t\t%d\n", cur_max_msgsize);
+       printf("\tMaximum Queue Size:\t\t%d\n", cur_max_msgs);
+       if (default_settings) {
+               printf("\tDefault Message Size:\t\t%d\n", cur_def_msgsize);
+               printf("\tDefault Queue Size:\t\t%d\n", cur_def_msgs);
+       }
+
+       printf("\n\nTest series 1, behavior when no attr struct "
+              "passed to mq_open:\n");
+       if (!default_settings) {
+               test_queue(NULL, &result);
+               printf("Given sane system settings, mq_open without an attr "
+                      "struct succeeds:\tPASS\n");
+               if (result.mq_maxmsg != cur_max_msgs ||
+                   result.mq_msgsize != cur_max_msgsize) {
+                       printf("Kernel does not support setting the default "
+                              "mq attributes,\nbut also doesn't tie the "
+                              "defaults to the maximums:\t\t\tPASS\n");
+               } else {
+                       set(max_msgs, ++cur_max_msgs);
+                       set(max_msgsize, ++cur_max_msgsize);
+                       test_queue(NULL, &result);
+                       if (result.mq_maxmsg == cur_max_msgs &&
+                           result.mq_msgsize == cur_max_msgsize)
+                               printf("Kernel does not support setting the "
+                                      "default mq attributes and\n"
+                                      "also ties system wide defaults to "
+                                      "the system wide maximums:\t\t"
+                                      "FAIL\n");
+                       else
+                               printf("Kernel does not support setting the "
+                                      "default mq attributes,\n"
+                                      "but also doesn't tie the defaults to "
+                                      "the maximums:\t\t\tPASS\n");
+               }
+       } else {
+               printf("Kernel supports setting defaults separately from "
+                      "maximums:\t\tPASS\n");
+               /*
+                * While we are here, go ahead and test that the kernel
+                * properly follows the default settings
+                */
+               test_queue(NULL, &result);
+               printf("Given sane values, mq_open without an attr struct "
+                      "succeeds:\t\tPASS\n");
+               if (result.mq_maxmsg != cur_def_msgs ||
+                   result.mq_msgsize != cur_def_msgsize)
+                       printf("Kernel supports setting defaults, but does "
+                              "not actually honor them:\tFAIL\n\n");
+               else {
+                       set(def_msgs, ++cur_def_msgs);
+                       set(def_msgsize, ++cur_def_msgsize);
+                       /* In case max was the same as the default */
+                       set(max_msgs, ++cur_max_msgs);
+                       set(max_msgsize, ++cur_max_msgsize);
+                       test_queue(NULL, &result);
+                       if (result.mq_maxmsg != cur_def_msgs ||
+                           result.mq_msgsize != cur_def_msgsize)
+                               printf("Kernel supports setting defaults, but "
+                                      "does not actually honor them:\t"
+                                      "FAIL\n");
+                       else
+                               printf("Kernel properly honors default setting "
+                                      "knobs:\t\t\t\tPASS\n");
+               }
+               set(def_msgs, cur_max_msgs + 1);
+               cur_def_msgs = cur_max_msgs + 1;
+               set(def_msgsize, cur_max_msgsize + 1);
+               cur_def_msgsize = cur_max_msgsize + 1;
+               if (cur_def_msgs * (cur_def_msgsize + 2 * sizeof(void *)) >=
+                   cur_limits.rlim_cur) {
+                       cur_limits.rlim_cur = (cur_def_msgs + 2) *
+                               (cur_def_msgsize + 2 * sizeof(void *));
+                       cur_limits.rlim_max = 2 * cur_limits.rlim_cur;
+                       setr(RLIMIT_MSGQUEUE, &cur_limits);
+               }
+               if (test_queue_fail(NULL, &result)) {
+                       if (result.mq_maxmsg == cur_max_msgs &&
+                           result.mq_msgsize == cur_max_msgsize)
+                               printf("Kernel properly limits default values "
+                                      "to lesser of default/max:\t\tPASS\n");
+                       else
+                               printf("Kernel does not properly set default "
+                                      "queue parameters when\ndefaults > "
+                                      "max:\t\t\t\t\t\t\t\tFAIL\n");
+               } else
+                       printf("Kernel fails to open mq because defaults are "
+                              "greater than maximums:\tFAIL\n");
+               set(def_msgs, --cur_def_msgs);
+               set(def_msgsize, --cur_def_msgsize);
+               cur_limits.rlim_cur = cur_limits.rlim_max = cur_def_msgs *
+                       cur_def_msgsize;
+               setr(RLIMIT_MSGQUEUE, &cur_limits);
+               if (test_queue_fail(NULL, &result))
+                       printf("Kernel creates queue even though defaults "
+                              "would exceed\nrlimit setting:"
+                              "\t\t\t\t\t\t\t\tFAIL\n");
+               else
+                       printf("Kernel properly fails to create queue when "
+                              "defaults would\nexceed rlimit:"
+                              "\t\t\t\t\t\t\t\tPASS\n");
+       }
+
+       /*
+        * Test #2 - open with an attr struct that exceeds rlimit
+        */
+       printf("\n\nTest series 2, behavior when attr struct is "
+              "passed to mq_open:\n");
+       cur_max_msgs = 32;
+       cur_max_msgsize = cur_limits.rlim_max >> 4;
+       set(max_msgs, cur_max_msgs);
+       set(max_msgsize, cur_max_msgsize);
+       attr.mq_maxmsg = cur_max_msgs;
+       attr.mq_msgsize = cur_max_msgsize;
+       if (test_queue_fail(&attr, &result))
+               printf("Queue open in excess of rlimit max when euid = 0 "
+                      "succeeded:\t\tFAIL\n");
+       else
+               printf("Queue open in excess of rlimit max when euid = 0 "
+                      "failed:\t\tPASS\n");
+       attr.mq_maxmsg = cur_max_msgs + 1;
+       attr.mq_msgsize = 10;
+       if (test_queue_fail(&attr, &result))
+               printf("Queue open with mq_maxmsg > limit when euid = 0 "
+                      "succeeded:\t\tPASS\n");
+       else
+               printf("Queue open with mq_maxmsg > limit when euid = 0 "
+                      "failed:\t\tFAIL\n");
+       attr.mq_maxmsg = 1;
+       attr.mq_msgsize = cur_max_msgsize + 1;
+       if (test_queue_fail(&attr, &result))
+               printf("Queue open with mq_msgsize > limit when euid = 0 "
+                      "succeeded:\t\tPASS\n");
+       else
+               printf("Queue open with mq_msgsize > limit when euid = 0 "
+                      "failed:\t\tFAIL\n");
+       attr.mq_maxmsg = 65536;
+       attr.mq_msgsize = 65536;
+       if (test_queue_fail(&attr, &result))
+               printf("Queue open with total size > 2GB when euid = 0 "
+                      "succeeded:\t\tFAIL\n");
+       else
+               printf("Queue open with total size > 2GB when euid = 0 "
+                      "failed:\t\t\tPASS\n");
+       seteuid(99);
+       attr.mq_maxmsg = cur_max_msgs;
+       attr.mq_msgsize = cur_max_msgsize;
+       if (test_queue_fail(&attr, &result))
+               printf("Queue open in excess of rlimit max when euid = 99 "
+                      "succeeded:\t\tFAIL\n");
+       else
+               printf("Queue open in excess of rlimit max when euid = 99 "
+                      "failed:\t\tPASS\n");
+       attr.mq_maxmsg = cur_max_msgs + 1;
+       attr.mq_msgsize = 10;
+       if (test_queue_fail(&attr, &result))
+               printf("Queue open with mq_maxmsg > limit when euid = 99 "
+                      "succeeded:\t\tFAIL\n");
+       else
+               printf("Queue open with mq_maxmsg > limit when euid = 99 "
+                      "failed:\t\tPASS\n");
+       attr.mq_maxmsg = 1;
+       attr.mq_msgsize = cur_max_msgsize + 1;
+       if (test_queue_fail(&attr, &result))
+               printf("Queue open with mq_msgsize > limit when euid = 99 "
+                      "succeeded:\t\tFAIL\n");
+       else
+               printf("Queue open with mq_msgsize > limit when euid = 99 "
+                      "failed:\t\tPASS\n");
+       attr.mq_maxmsg = 65536;
+       attr.mq_msgsize = 65536;
+       if (test_queue_fail(&attr, &result))
+               printf("Queue open with total size > 2GB when euid = 99 "
+                      "succeeded:\t\tFAIL\n");
+       else
+               printf("Queue open with total size > 2GB when euid = 99 "
+                      "failed:\t\t\tPASS\n");
+
+       shutdown(0,"",0);
+}
diff --git a/tools/testing/selftests/mqueue/mq_perf_tests.c b/tools/testing/selftests/mqueue/mq_perf_tests.c
new file mode 100644 (file)
index 0000000..2fadd4b
--- /dev/null
@@ -0,0 +1,741 @@
+/*
+ * This application is Copyright 2012 Red Hat, Inc.
+ *     Doug Ledford <dledford@redhat.com>
+ *
+ * mq_perf_tests is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 3.
+ *
+ * mq_perf_tests is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * For the full text of the license, see <http://www.gnu.org/licenses/>.
+ *
+ * mq_perf_tests.c
+ *   Tests various types of message queue workloads, concentrating on those
+ *   situations that invole large message sizes, large message queue depths,
+ *   or both, and reports back useful metrics about kernel message queue
+ *   performance.
+ *
+ */
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <string.h>
+#include <limits.h>
+#include <errno.h>
+#include <signal.h>
+#include <pthread.h>
+#include <sched.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <mqueue.h>
+#include <popt.h>
+
+static char *usage =
+"Usage:\n"
+"  %s [-c #[,#..] -f] path\n"
+"\n"
+"      -c #    Skip most tests and go straight to a high queue depth test\n"
+"              and then run that test continuously (useful for running at\n"
+"              the same time as some other workload to see how much the\n"
+"              cache thrashing caused by adding messages to a very deep\n"
+"              queue impacts the performance of other programs).  The number\n"
+"              indicates which CPU core we should bind the process to during\n"
+"              the run.  If you have more than one physical CPU, then you\n"
+"              will need one copy per physical CPU package, and you should\n"
+"              specify the CPU cores to pin ourself to via a comma separated\n"
+"              list of CPU values.\n"
+"      -f      Only usable with continuous mode.  Pin ourself to the CPUs\n"
+"              as requested, then instead of looping doing a high mq\n"
+"              workload, just busy loop.  This will allow us to lock up a\n"
+"              single CPU just like we normally would, but without actually\n"
+"              thrashing the CPU cache.  This is to make it easier to get\n"
+"              comparable numbers from some other workload running on the\n"
+"              other CPUs.  One set of numbers with # CPUs locked up running\n"
+"              an mq workload, and another set of numbers with those same\n"
+"              CPUs locked away from the test workload, but not doing\n"
+"              anything to trash the cache like the mq workload might.\n"
+"      path    Path name of the message queue to create\n"
+"\n"
+"      Note: this program must be run as root in order to enable all tests\n"
+"\n";
+
+char *MAX_MSGS = "/proc/sys/fs/mqueue/msg_max";
+char *MAX_MSGSIZE = "/proc/sys/fs/mqueue/msgsize_max";
+
+#define min(a, b) ((a) < (b) ? (a) : (b))
+#define MAX_CPUS 64
+char *cpu_option_string;
+int cpus_to_pin[MAX_CPUS];
+int num_cpus_to_pin;
+pthread_t cpu_threads[MAX_CPUS];
+pthread_t main_thread;
+cpu_set_t *cpu_set;
+int cpu_set_size;
+int cpus_online;
+
+#define MSG_SIZE 16
+#define TEST1_LOOPS 10000000
+#define TEST2_LOOPS 100000
+int continuous_mode;
+int continuous_mode_fake;
+
+struct rlimit saved_limits, cur_limits;
+int saved_max_msgs, saved_max_msgsize;
+int cur_max_msgs, cur_max_msgsize;
+FILE *max_msgs, *max_msgsize;
+int cur_nice;
+char *queue_path = "/mq_perf_tests";
+mqd_t queue = -1;
+struct mq_attr result;
+int mq_prio_max;
+
+const struct poptOption options[] = {
+       {
+               .longName = "continuous",
+               .shortName = 'c',
+               .argInfo = POPT_ARG_STRING,
+               .arg = &cpu_option_string,
+               .val = 'c',
+               .descrip = "Run continuous tests at a high queue depth in "
+                       "order to test the effects of cache thrashing on "
+                       "other tasks on the system.  This test is intended "
+                       "to be run on one core of each physical CPU while "
+                       "some other CPU intensive task is run on all the other "
+                       "cores of that same physical CPU and the other task "
+                       "is timed.  It is assumed that the process of adding "
+                       "messages to the message queue in a tight loop will "
+                       "impact that other task to some degree.  Once the "
+                       "tests are performed in this way, you should then "
+                       "re-run the tests using fake mode in order to check "
+                       "the difference in time required to perform the CPU "
+                       "intensive task",
+               .argDescrip = "cpu[,cpu]",
+       },
+       {
+               .longName = "fake",
+               .shortName = 'f',
+               .argInfo = POPT_ARG_NONE,
+               .arg = &continuous_mode_fake,
+               .val = 0,
+               .descrip = "Tie up the CPUs that we would normally tie up in"
+                       "continuous mode, but don't actually do any mq stuff, "
+                       "just keep the CPU busy so it can't be used to process "
+                       "system level tasks as this would free up resources on "
+                       "the other CPU cores and skew the comparison between "
+                       "the no-mqueue work and mqueue work tests",
+               .argDescrip = NULL,
+       },
+       {
+               .longName = "path",
+               .shortName = 'p',
+               .argInfo = POPT_ARG_STRING | POPT_ARGFLAG_SHOW_DEFAULT,
+               .arg = &queue_path,
+               .val = 'p',
+               .descrip = "The name of the path to use in the mqueue "
+                       "filesystem for our tests",
+               .argDescrip = "pathname",
+       },
+       POPT_AUTOHELP
+       POPT_TABLEEND
+};
+
+static inline void __set(FILE *stream, int value, char *err_msg);
+void shutdown(int exit_val, char *err_cause, int line_no);
+void sig_action_SIGUSR1(int signum, siginfo_t *info, void *context);
+void sig_action(int signum, siginfo_t *info, void *context);
+static inline int get(FILE *stream);
+static inline void set(FILE *stream, int value);
+static inline int try_set(FILE *stream, int value);
+static inline void getr(int type, struct rlimit *rlim);
+static inline void setr(int type, struct rlimit *rlim);
+static inline void open_queue(struct mq_attr *attr);
+void increase_limits(void);
+
+static inline void __set(FILE *stream, int value, char *err_msg)
+{
+       rewind(stream);
+       if (fprintf(stream, "%d", value) < 0)
+               perror(err_msg);
+}
+
+
+void shutdown(int exit_val, char *err_cause, int line_no)
+{
+       static int in_shutdown = 0;
+       int errno_at_shutdown = errno;
+       int i;
+
+       /* In case we get called by multiple threads or from an sighandler */
+       if (in_shutdown++)
+               return;
+
+       for (i = 0; i < num_cpus_to_pin; i++)
+               if (cpu_threads[i]) {
+                       pthread_kill(cpu_threads[i], SIGUSR1);
+                       pthread_join(cpu_threads[i], NULL);
+               }
+
+       if (queue != -1)
+               if (mq_close(queue))
+                       perror("mq_close() during shutdown");
+       if (queue_path)
+               /*
+                * Be silent if this fails, if we cleaned up already it's
+                * expected to fail
+                */
+               mq_unlink(queue_path);
+       if (saved_max_msgs)
+               __set(max_msgs, saved_max_msgs,
+                     "failed to restore saved_max_msgs");
+       if (saved_max_msgsize)
+               __set(max_msgsize, saved_max_msgsize,
+                     "failed to restore saved_max_msgsize");
+       if (exit_val)
+               error(exit_val, errno_at_shutdown, "%s at %d",
+                     err_cause, line_no);
+       exit(0);
+}
+
+void sig_action_SIGUSR1(int signum, siginfo_t *info, void *context)
+{
+       if (pthread_self() != main_thread)
+               pthread_exit(0);
+       else {
+               fprintf(stderr, "Caught signal %d in SIGUSR1 handler, "
+                               "exiting\n", signum);
+               shutdown(0, "", 0);
+               fprintf(stderr, "\n\nReturned from shutdown?!?!\n\n");
+               exit(0);
+       }
+}
+
+void sig_action(int signum, siginfo_t *info, void *context)
+{
+       if (pthread_self() != main_thread)
+               pthread_kill(main_thread, signum);
+       else {
+               fprintf(stderr, "Caught signal %d, exiting\n", signum);
+               shutdown(0, "", 0);
+               fprintf(stderr, "\n\nReturned from shutdown?!?!\n\n");
+               exit(0);
+       }
+}
+
+static inline int get(FILE *stream)
+{
+       int value;
+       rewind(stream);
+       if (fscanf(stream, "%d", &value) != 1)
+               shutdown(4, "Error reading /proc entry", __LINE__);
+       return value;
+}
+
+static inline void set(FILE *stream, int value)
+{
+       int new_value;
+
+       rewind(stream);
+       if (fprintf(stream, "%d", value) < 0)
+               return shutdown(5, "Failed writing to /proc file", __LINE__);
+       new_value = get(stream);
+       if (new_value != value)
+               return shutdown(5, "We didn't get what we wrote to /proc back",
+                               __LINE__);
+}
+
+static inline int try_set(FILE *stream, int value)
+{
+       int new_value;
+
+       rewind(stream);
+       fprintf(stream, "%d", value);
+       new_value = get(stream);
+       return new_value == value;
+}
+
+static inline void getr(int type, struct rlimit *rlim)
+{
+       if (getrlimit(type, rlim))
+               shutdown(6, "getrlimit()", __LINE__);
+}
+
+static inline void setr(int type, struct rlimit *rlim)
+{
+       if (setrlimit(type, rlim))
+               shutdown(7, "setrlimit()", __LINE__);
+}
+
+/**
+ * open_queue - open the global queue for testing
+ * @attr - An attr struct specifying the desired queue traits
+ * @result - An attr struct that lists the actual traits the queue has
+ *
+ * This open is not allowed to fail, failure will result in an orderly
+ * shutdown of the program.  The global queue_path is used to set what
+ * queue to open, the queue descriptor is saved in the global queue
+ * variable.
+ */
+static inline void open_queue(struct mq_attr *attr)
+{
+       int flags = O_RDWR | O_EXCL | O_CREAT | O_NONBLOCK;
+       int perms = DEFFILEMODE;
+
+       queue = mq_open(queue_path, flags, perms, attr);
+       if (queue == -1)
+               shutdown(1, "mq_open()", __LINE__);
+       if (mq_getattr(queue, &result))
+               shutdown(1, "mq_getattr()", __LINE__);
+       printf("\n\tQueue %s created:\n", queue_path);
+       printf("\t\tmq_flags:\t\t\t%s\n", result.mq_flags & O_NONBLOCK ?
+              "O_NONBLOCK" : "(null)");
+       printf("\t\tmq_maxmsg:\t\t\t%d\n", result.mq_maxmsg);
+       printf("\t\tmq_msgsize:\t\t\t%d\n", result.mq_msgsize);
+       printf("\t\tmq_curmsgs:\t\t\t%d\n", result.mq_curmsgs);
+}
+
+void *fake_cont_thread(void *arg)
+{
+       int i;
+
+       for (i = 0; i < num_cpus_to_pin; i++)
+               if (cpu_threads[i] == pthread_self())
+                       break;
+       printf("\tStarted fake continuous mode thread %d on CPU %d\n", i,
+              cpus_to_pin[i]);
+       while (1)
+               ;
+}
+
+void *cont_thread(void *arg)
+{
+       char buff[MSG_SIZE];
+       int i, priority;
+
+       for (i = 0; i < num_cpus_to_pin; i++)
+               if (cpu_threads[i] == pthread_self())
+                       break;
+       printf("\tStarted continuous mode thread %d on CPU %d\n", i,
+              cpus_to_pin[i]);
+       while (1) {
+               while (mq_send(queue, buff, sizeof(buff), 0) == 0)
+                       ;
+               mq_receive(queue, buff, sizeof(buff), &priority);
+       }
+}
+
+#define drain_queue() \
+       while (mq_receive(queue, buff, MSG_SIZE, &prio_in) == MSG_SIZE)
+
+#define do_untimed_send() \
+       do { \
+               if (mq_send(queue, buff, MSG_SIZE, prio_out)) \
+                       shutdown(3, "Test send failure", __LINE__); \
+       } while (0)
+
+#define do_send_recv() \
+       do { \
+               clock_gettime(clock, &start); \
+               if (mq_send(queue, buff, MSG_SIZE, prio_out)) \
+                       shutdown(3, "Test send failure", __LINE__); \
+               clock_gettime(clock, &middle); \
+               if (mq_receive(queue, buff, MSG_SIZE, &prio_in) != MSG_SIZE) \
+                       shutdown(3, "Test receive failure", __LINE__); \
+               clock_gettime(clock, &end); \
+               nsec = ((middle.tv_sec - start.tv_sec) * 1000000000) + \
+                       (middle.tv_nsec - start.tv_nsec); \
+               send_total.tv_nsec += nsec; \
+               if (send_total.tv_nsec >= 1000000000) { \
+                       send_total.tv_sec++; \
+                       send_total.tv_nsec -= 1000000000; \
+               } \
+               nsec = ((end.tv_sec - middle.tv_sec) * 1000000000) + \
+                       (end.tv_nsec - middle.tv_nsec); \
+               recv_total.tv_nsec += nsec; \
+               if (recv_total.tv_nsec >= 1000000000) { \
+                       recv_total.tv_sec++; \
+                       recv_total.tv_nsec -= 1000000000; \
+               } \
+       } while (0)
+
+struct test {
+       char *desc;
+       void (*func)(int *);
+};
+
+void const_prio(int *prio)
+{
+       return;
+}
+
+void inc_prio(int *prio)
+{
+       if (++*prio == mq_prio_max)
+               *prio = 0;
+}
+
+void dec_prio(int *prio)
+{
+       if (--*prio < 0)
+               *prio = mq_prio_max - 1;
+}
+
+void random_prio(int *prio)
+{
+       *prio = random() % mq_prio_max;
+}
+
+struct test test2[] = {
+       {"\n\tTest #2a: Time send/recv message, queue full, constant prio\n",
+               const_prio},
+       {"\n\tTest #2b: Time send/recv message, queue full, increasing prio\n",
+               inc_prio},
+       {"\n\tTest #2c: Time send/recv message, queue full, decreasing prio\n",
+               dec_prio},
+       {"\n\tTest #2d: Time send/recv message, queue full, random prio\n",
+               random_prio},
+       {NULL, NULL}
+};
+
+/**
+ * Tests to perform (all done with MSG_SIZE messages):
+ *
+ * 1) Time to add/remove message with 0 messages on queue
+ * 1a) with constant prio
+ * 2) Time to add/remove message when queue close to capacity:
+ * 2a) with constant prio
+ * 2b) with increasing prio
+ * 2c) with decreasing prio
+ * 2d) with random prio
+ * 3) Test limits of priorities honored (double check _SC_MQ_PRIO_MAX)
+ */
+void *perf_test_thread(void *arg)
+{
+       char buff[MSG_SIZE];
+       int prio_out, prio_in;
+       int i;
+       clockid_t clock;
+       pthread_t *t;
+       struct timespec res, start, middle, end, send_total, recv_total;
+       unsigned long long nsec;
+       struct test *cur_test;
+
+       t = &cpu_threads[0];
+       printf("\n\tStarted mqueue performance test thread on CPU %d\n",
+              cpus_to_pin[0]);
+       mq_prio_max = sysconf(_SC_MQ_PRIO_MAX);
+       if (mq_prio_max == -1)
+               shutdown(2, "sysconf(_SC_MQ_PRIO_MAX)", __LINE__);
+       if (pthread_getcpuclockid(cpu_threads[0], &clock) != 0)
+               shutdown(2, "pthread_getcpuclockid", __LINE__);
+
+       if (clock_getres(clock, &res))
+               shutdown(2, "clock_getres()", __LINE__);
+
+       printf("\t\tMax priorities:\t\t\t%d\n", mq_prio_max);
+       printf("\t\tClock resolution:\t\t%d nsec%s\n", res.tv_nsec,
+              res.tv_nsec > 1 ? "s" : "");
+
+
+
+       printf("\n\tTest #1: Time send/recv message, queue empty\n");
+       printf("\t\t(%d iterations)\n", TEST1_LOOPS);
+       prio_out = 0;
+       send_total.tv_sec = 0;
+       send_total.tv_nsec = 0;
+       recv_total.tv_sec = 0;
+       recv_total.tv_nsec = 0;
+       for (i = 0; i < TEST1_LOOPS; i++)
+               do_send_recv();
+       printf("\t\tSend msg:\t\t\t%d.%ds total time\n",
+              send_total.tv_sec, send_total.tv_nsec);
+       nsec = ((unsigned long long)send_total.tv_sec * 1000000000 +
+                send_total.tv_nsec) / TEST1_LOOPS;
+       printf("\t\t\t\t\t\t%d nsec/msg\n", nsec);
+       printf("\t\tRecv msg:\t\t\t%d.%ds total time\n",
+              recv_total.tv_sec, recv_total.tv_nsec);
+       nsec = ((unsigned long long)recv_total.tv_sec * 1000000000 +
+               recv_total.tv_nsec) / TEST1_LOOPS;
+       printf("\t\t\t\t\t\t%d nsec/msg\n", nsec);
+
+
+       for (cur_test = test2; cur_test->desc != NULL; cur_test++) {
+               printf(cur_test->desc);
+               printf("\t\t(%d iterations)\n", TEST2_LOOPS);
+               prio_out = 0;
+               send_total.tv_sec = 0;
+               send_total.tv_nsec = 0;
+               recv_total.tv_sec = 0;
+               recv_total.tv_nsec = 0;
+               printf("\t\tFilling queue...");
+               fflush(stdout);
+               clock_gettime(clock, &start);
+               for (i = 0; i < result.mq_maxmsg - 1; i++) {
+                       do_untimed_send();
+                       cur_test->func(&prio_out);
+               }
+               clock_gettime(clock, &end);
+               nsec = ((unsigned long long)(end.tv_sec - start.tv_sec) *
+                       1000000000) + (end.tv_nsec - start.tv_nsec);
+               printf("done.\t\t%lld.%llds\n", nsec / 1000000000,
+                      nsec % 1000000000);
+               printf("\t\tTesting...");
+               fflush(stdout);
+               for (i = 0; i < TEST2_LOOPS; i++) {
+                       do_send_recv();
+                       cur_test->func(&prio_out);
+               }
+               printf("done.\n");
+               printf("\t\tSend msg:\t\t\t%d.%ds total time\n",
+                      send_total.tv_sec, send_total.tv_nsec);
+               nsec = ((unsigned long long)send_total.tv_sec * 1000000000 +
+                        send_total.tv_nsec) / TEST2_LOOPS;
+               printf("\t\t\t\t\t\t%d nsec/msg\n", nsec);
+               printf("\t\tRecv msg:\t\t\t%d.%ds total time\n",
+                      recv_total.tv_sec, recv_total.tv_nsec);
+               nsec = ((unsigned long long)recv_total.tv_sec * 1000000000 +
+                       recv_total.tv_nsec) / TEST2_LOOPS;
+               printf("\t\t\t\t\t\t%d nsec/msg\n", nsec);
+               printf("\t\tDraining queue...");
+               fflush(stdout);
+               clock_gettime(clock, &start);
+               drain_queue();
+               clock_gettime(clock, &end);
+               nsec = ((unsigned long long)(end.tv_sec - start.tv_sec) *
+                       1000000000) + (end.tv_nsec - start.tv_nsec);
+               printf("done.\t\t%lld.%llds\n", nsec / 1000000000,
+                      nsec % 1000000000);
+       }
+       return 0;
+}
+
+void increase_limits(void)
+{
+       cur_limits.rlim_cur = RLIM_INFINITY;
+       cur_limits.rlim_max = RLIM_INFINITY;
+       setr(RLIMIT_MSGQUEUE, &cur_limits);
+       while (try_set(max_msgs, cur_max_msgs += 10))
+               ;
+       cur_max_msgs = get(max_msgs);
+       while (try_set(max_msgsize, cur_max_msgsize += 1024))
+               ;
+       cur_max_msgsize = get(max_msgsize);
+       if (setpriority(PRIO_PROCESS, 0, -20) != 0)
+               shutdown(2, "setpriority()", __LINE__);
+       cur_nice = -20;
+}
+
+int main(int argc, char *argv[])
+{
+       struct mq_attr attr;
+       char *option, *next_option;
+       int i, cpu;
+       struct sigaction sa;
+       poptContext popt_context;
+       char rc;
+       void *retval;
+
+       main_thread = pthread_self();
+       num_cpus_to_pin = 0;
+
+       if (sysconf(_SC_NPROCESSORS_ONLN) == -1) {
+               perror("sysconf(_SC_NPROCESSORS_ONLN)");
+               exit(1);
+       }
+       cpus_online = min(MAX_CPUS, sysconf(_SC_NPROCESSORS_ONLN));
+       cpu_set = CPU_ALLOC(cpus_online);
+       if (cpu_set == NULL) {
+               perror("CPU_ALLOC()");
+               exit(1);
+       }
+       cpu_set_size = CPU_ALLOC_SIZE(cpus_online);
+       CPU_ZERO_S(cpu_set_size, cpu_set);
+
+       popt_context = poptGetContext(NULL, argc, (const char **)argv,
+                                     options, 0);
+
+       while ((rc = poptGetNextOpt(popt_context)) > 0) {
+               switch (rc) {
+               case 'c':
+                       continuous_mode = 1;
+                       option = cpu_option_string;
+                       do {
+                               next_option = strchr(option, ',');
+                               if (next_option)
+                                       *next_option = '\0';
+                               cpu = atoi(option);
+                               if (cpu >= cpus_online)
+                                       fprintf(stderr, "CPU %d exceeds "
+                                               "cpus online, ignoring.\n",
+                                               cpu);
+                               else
+                                       cpus_to_pin[num_cpus_to_pin++] = cpu;
+                               if (next_option)
+                                       option = ++next_option;
+                       } while (next_option && num_cpus_to_pin < MAX_CPUS);
+                       /* Double check that they didn't give us the same CPU
+                        * more than once */
+                       for (cpu = 0; cpu < num_cpus_to_pin; cpu++) {
+                               if (CPU_ISSET_S(cpus_to_pin[cpu], cpu_set_size,
+                                               cpu_set)) {
+                                       fprintf(stderr, "Any given CPU may "
+                                               "only be given once.\n");
+                                       exit(1);
+                               } else
+                                       CPU_SET_S(cpus_to_pin[cpu],
+                                                 cpu_set_size, cpu_set);
+                       }
+                       break;
+               case 'p':
+                       /*
+                        * Although we can create a msg queue with a
+                        * non-absolute path name, unlink will fail.  So,
+                        * if the name doesn't start with a /, add one
+                        * when we save it.
+                        */
+                       option = queue_path;
+                       if (*option != '/') {
+                               queue_path = malloc(strlen(option) + 2);
+                               if (!queue_path) {
+                                       perror("malloc()");
+                                       exit(1);
+                               }
+                               queue_path[0] = '/';
+                               queue_path[1] = 0;
+                               strcat(queue_path, option);
+                               free(option);
+                       }
+                       break;
+               }
+       }
+
+       if (continuous_mode && num_cpus_to_pin == 0) {
+               fprintf(stderr, "Must pass at least one CPU to continuous "
+                       "mode.\n");
+               poptPrintUsage(popt_context, stderr, 0);
+               exit(1);
+       } else if (!continuous_mode) {
+               num_cpus_to_pin = 1;
+               cpus_to_pin[0] = cpus_online - 1;
+       }
+
+       if (getuid() != 0) {
+               fprintf(stderr, "Not running as root, but almost all tests "
+                       "require root in order to modify\nsystem settings.  "
+                       "Exiting.\n");
+               exit(1);
+       }
+
+       max_msgs = fopen(MAX_MSGS, "r+");
+       max_msgsize = fopen(MAX_MSGSIZE, "r+");
+       if (!max_msgs)
+               shutdown(2, "Failed to open msg_max", __LINE__);
+       if (!max_msgsize)
+               shutdown(2, "Failed to open msgsize_max", __LINE__);
+
+       /* Load up the current system values for everything we can */
+       getr(RLIMIT_MSGQUEUE, &saved_limits);
+       cur_limits = saved_limits;
+       saved_max_msgs = cur_max_msgs = get(max_msgs);
+       saved_max_msgsize = cur_max_msgsize = get(max_msgsize);
+       errno = 0;
+       cur_nice = getpriority(PRIO_PROCESS, 0);
+       if (errno)
+               shutdown(2, "getpriority()", __LINE__);
+
+       /* Tell the user our initial state */
+       printf("\nInitial system state:\n");
+       printf("\tUsing queue path:\t\t\t%s\n", queue_path);
+       printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t%d\n", saved_limits.rlim_cur);
+       printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t%d\n", saved_limits.rlim_max);
+       printf("\tMaximum Message Size:\t\t\t%d\n", saved_max_msgsize);
+       printf("\tMaximum Queue Size:\t\t\t%d\n", saved_max_msgs);
+       printf("\tNice value:\t\t\t\t%d\n", cur_nice);
+       printf("\n");
+
+       increase_limits();
+
+       printf("Adjusted system state for testing:\n");
+       if (cur_limits.rlim_cur == RLIM_INFINITY) {
+               printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t(unlimited)\n");
+               printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t(unlimited)\n");
+       } else {
+               printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t%d\n",
+                      cur_limits.rlim_cur);
+               printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t%d\n",
+                      cur_limits.rlim_max);
+       }
+       printf("\tMaximum Message Size:\t\t\t%d\n", cur_max_msgsize);
+       printf("\tMaximum Queue Size:\t\t\t%d\n", cur_max_msgs);
+       printf("\tNice value:\t\t\t\t%d\n", cur_nice);
+       printf("\tContinuous mode:\t\t\t(%s)\n", continuous_mode ?
+              (continuous_mode_fake ? "fake mode" : "enabled") :
+              "disabled");
+       printf("\tCPUs to pin:\t\t\t\t%d", cpus_to_pin[0]);
+       for (cpu = 1; cpu < num_cpus_to_pin; cpu++)
+                       printf(",%d", cpus_to_pin[cpu]);
+       printf("\n");
+
+       sa.sa_sigaction = sig_action_SIGUSR1;
+       sigemptyset(&sa.sa_mask);
+       sigaddset(&sa.sa_mask, SIGHUP);
+       sigaddset(&sa.sa_mask, SIGINT);
+       sigaddset(&sa.sa_mask, SIGQUIT);
+       sigaddset(&sa.sa_mask, SIGTERM);
+       sa.sa_flags = SA_SIGINFO;
+       if (sigaction(SIGUSR1, &sa, NULL) == -1)
+               shutdown(1, "sigaction(SIGUSR1)", __LINE__);
+       sa.sa_sigaction = sig_action;
+       if (sigaction(SIGHUP, &sa, NULL) == -1)
+               shutdown(1, "sigaction(SIGHUP)", __LINE__);
+       if (sigaction(SIGINT, &sa, NULL) == -1)
+               shutdown(1, "sigaction(SIGINT)", __LINE__);
+       if (sigaction(SIGQUIT, &sa, NULL) == -1)
+               shutdown(1, "sigaction(SIGQUIT)", __LINE__);
+       if (sigaction(SIGTERM, &sa, NULL) == -1)
+               shutdown(1, "sigaction(SIGTERM)", __LINE__);
+
+       if (!continuous_mode_fake) {
+               attr.mq_flags = O_NONBLOCK;
+               attr.mq_maxmsg = cur_max_msgs;
+               attr.mq_msgsize = MSG_SIZE;
+               open_queue(&attr);
+       }
+       for (i = 0; i < num_cpus_to_pin; i++) {
+               pthread_attr_t thread_attr;
+               void *thread_func;
+
+               if (continuous_mode_fake)
+                       thread_func = &fake_cont_thread;
+               else if (continuous_mode)
+                       thread_func = &cont_thread;
+               else
+                       thread_func = &perf_test_thread;
+
+               CPU_ZERO_S(cpu_set_size, cpu_set);
+               CPU_SET_S(cpus_to_pin[i], cpu_set_size, cpu_set);
+               pthread_attr_init(&thread_attr);
+               pthread_attr_setaffinity_np(&thread_attr, cpu_set_size,
+                                           cpu_set);
+               if (pthread_create(&cpu_threads[i], &thread_attr, thread_func,
+                                  NULL))
+                       shutdown(1, "pthread_create()", __LINE__);
+               pthread_attr_destroy(&thread_attr);
+       }
+
+       if (!continuous_mode) {
+               pthread_join(cpu_threads[0], &retval);
+               shutdown((long)retval, "perf_test_thread()", __LINE__);
+       } else {
+               while (1)
+                       sleep(1);
+       }
+       shutdown(0, "", 0);
+}
index 7dab7b25b5c6175fc536b46ddd87f3f7acd7b81c..f576971f6556f0b08dcd30ef24af4fef61d01e91 100644 (file)
@@ -35,6 +35,7 @@
 #include <sys/mount.h>
 #include <sys/statfs.h>
 #include "../../include/linux/magic.h"
+#include "../../include/linux/kernel-page-flags.h"
 
 
 #ifndef MAX_PATH
 #define KPF_BYTES              8
 #define PROC_KPAGEFLAGS                "/proc/kpageflags"
 
-/* copied from kpageflags_read() */
-#define KPF_LOCKED             0
-#define KPF_ERROR              1
-#define KPF_REFERENCED         2
-#define KPF_UPTODATE           3
-#define KPF_DIRTY              4
-#define KPF_LRU                        5
-#define KPF_ACTIVE             6
-#define KPF_SLAB               7
-#define KPF_WRITEBACK          8
-#define KPF_RECLAIM            9
-#define KPF_BUDDY              10
-
-/* [11-20] new additions in 2.6.31 */
-#define KPF_MMAP               11
-#define KPF_ANON               12
-#define KPF_SWAPCACHE          13
-#define KPF_SWAPBACKED         14
-#define KPF_COMPOUND_HEAD      15
-#define KPF_COMPOUND_TAIL      16
-#define KPF_HUGE               17
-#define KPF_UNEVICTABLE                18
-#define KPF_HWPOISON           19
-#define KPF_NOPAGE             20
-#define KPF_KSM                        21
-#define KPF_THP                        22
-
 /* [32-] kernel hacking assistances */
 #define KPF_RESERVED           32
 #define KPF_MLOCKED            33
@@ -326,7 +300,7 @@ static char *page_flag_name(uint64_t flags)
 {
        static char buf[65];
        int present;
-       int i, j;
+       size_t i, j;
 
        for (i = 0, j = 0; i < ARRAY_SIZE(page_flag_names); i++) {
                present = (flags >> i) & 1;
@@ -344,7 +318,7 @@ static char *page_flag_name(uint64_t flags)
 static char *page_flag_longname(uint64_t flags)
 {
        static char buf[1024];
-       int i, n;
+       size_t i, n;
 
        for (i = 0, n = 0; i < ARRAY_SIZE(page_flag_names); i++) {
                if (!page_flag_names[i])
@@ -402,7 +376,7 @@ static void show_page(unsigned long voffset,
 
 static void show_summary(void)
 {
-       int i;
+       size_t i;
 
        printf("             flags\tpage-count       MB"
                "  symbolic-flags\t\t\tlong-symbolic-flags\n");
@@ -500,7 +474,7 @@ static int debugfs_valid_mountpoint(const char *debugfs)
 /* find the path to the mounted debugfs */
 static const char *debugfs_find_mountpoint(void)
 {
-       const char **ptr;
+       const char *const *ptr;
        char type[100];
        FILE *fp;
 
@@ -537,7 +511,7 @@ static const char *debugfs_find_mountpoint(void)
 
 static void debugfs_mount(void)
 {
-       const char **ptr;
+       const char *const *ptr;
 
        /* see if it's already mounted */
        if (debugfs_find_mountpoint())
@@ -614,10 +588,10 @@ static int unpoison_page(unsigned long offset)
  * page frame walker
  */
 
-static int hash_slot(uint64_t flags)
+static size_t hash_slot(uint64_t flags)
 {
-       int k = HASH_KEY(flags);
-       int i;
+       size_t k = HASH_KEY(flags);
+       size_t i;
 
        /* Explicitly reserve slot 0 for flags 0: the following logic
         * cannot distinguish an unoccupied slot from slot (flags==0).
@@ -670,7 +644,7 @@ static void walk_pfn(unsigned long voffset,
 {
        uint64_t buf[KPAGEFLAGS_BATCH];
        unsigned long batch;
-       long pages;
+       unsigned long pages;
        unsigned long i;
 
        while (count) {
@@ -779,7 +753,7 @@ static const char *page_flag_type(uint64_t flag)
 
 static void usage(void)
 {
-       int i, j;
+       size_t i, j;
 
        printf(
 "page-types [options]\n"
@@ -938,7 +912,7 @@ static void add_bits_filter(uint64_t mask, uint64_t bits)
 
 static uint64_t parse_flag_name(const char *str, int len)
 {
-       int i;
+       size_t i;
 
        if (!*str || !len)
                return 0;
index 65b845bd4e3e792ca09ce01aa31da92ced51a302..085872bb2bb593502024d020316d2978c2cf9031 100644 (file)
@@ -134,7 +134,7 @@ config INITRAMFS_COMPRESSION_BZIP2
        depends on RD_BZIP2
        help
          Its compression ratio and speed is intermediate.
-         Decompression speed is slowest among the four.  The initramfs
+         Decompression speed is slowest among the choices.  The initramfs
          size is about 10% smaller with bzip2, in comparison to gzip.
          Bzip2 uses a large amount of memory. For modern kernels you
          will need at least 8MB RAM or more for booting.
@@ -143,9 +143,9 @@ config INITRAMFS_COMPRESSION_LZMA
        bool "LZMA"
        depends on RD_LZMA
        help
-         The most recent compression algorithm.
-         Its ratio is best, decompression speed is between the other
-         three. Compression is slowest. The initramfs size is about 33%
+         This algorithm's compression ratio is best.
+         Decompression speed is between the other choices.
+         Compression is slowest. The initramfs size is about 33%
          smaller with LZMA in comparison to gzip.
 
 config INITRAMFS_COMPRESSION_XZ
@@ -161,7 +161,7 @@ config INITRAMFS_COMPRESSION_LZO
        bool "LZO"
        depends on RD_LZO
        help
-         Its compression ratio is the poorest among the four. The kernel
+         Its compression ratio is the poorest among the choices. The kernel
          size is about 10% bigger than gzip; however its speed
          (both compression and decompression) is the fastest.
 
index f63ccb0a5982d15436420a964aa2428ef311f58c..28694f4a91398998f569c234c7faee104a945259 100644 (file)
@@ -18,3 +18,6 @@ config KVM_MMIO
 
 config KVM_ASYNC_PF
        bool
+
+config HAVE_KVM_MSI
+       bool
index dcaf272c26c0e232d01b265e652488642e7cc496..26fd54dc459ec8900d83da38c9132878ee60780d 100644 (file)
@@ -254,13 +254,17 @@ static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector,
        }
 }
 
+bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector)
+{
+       struct kvm_ioapic *ioapic = kvm->arch.vioapic;
+       smp_rmb();
+       return test_bit(vector, ioapic->handled_vectors);
+}
+
 void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode)
 {
        struct kvm_ioapic *ioapic = kvm->arch.vioapic;
 
-       smp_rmb();
-       if (!test_bit(vector, ioapic->handled_vectors))
-               return;
        spin_lock(&ioapic->lock);
        __kvm_ioapic_update_eoi(ioapic, vector, trigger_mode);
        spin_unlock(&ioapic->lock);
index 0b190c34ccc31bd398d4549996c57c3203952a82..32872a09b63f3b1d8147f2e8595ed1f45ea9db65 100644 (file)
@@ -71,6 +71,7 @@ int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
                int short_hand, int dest, int dest_mode);
 int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
 void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode);
+bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector);
 int kvm_ioapic_init(struct kvm *kvm);
 void kvm_ioapic_destroy(struct kvm *kvm);
 int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
index 9f614b4e365f77e38ce4b8dec4eeac15148cd5b7..a6a0365475edafc1935e46c04297931eadb8663c 100644 (file)
@@ -138,6 +138,20 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
        return kvm_irq_delivery_to_apic(kvm, NULL, &irq);
 }
 
+int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
+{
+       struct kvm_kernel_irq_routing_entry route;
+
+       if (!irqchip_in_kernel(kvm) || msi->flags != 0)
+               return -EINVAL;
+
+       route.msi.address_lo = msi->address_lo;
+       route.msi.address_hi = msi->address_hi;
+       route.msi.data = msi->data;
+
+       return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
+}
+
 /*
  * Return value:
  *  < 0   Interrupt was ignored (masked or not delivered for other reasons)
index 9739b533ca2e6954f75c98fc1bb307e641aedf94..7e140683ff14d503a9714058cadd9dde7e4ffaf9 100644 (file)
@@ -522,12 +522,11 @@ static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
                return;
 
        if (2 * kvm_dirty_bitmap_bytes(memslot) > PAGE_SIZE)
-               vfree(memslot->dirty_bitmap_head);
+               vfree(memslot->dirty_bitmap);
        else
-               kfree(memslot->dirty_bitmap_head);
+               kfree(memslot->dirty_bitmap);
 
        memslot->dirty_bitmap = NULL;
-       memslot->dirty_bitmap_head = NULL;
 }
 
 /*
@@ -611,8 +610,7 @@ static int kvm_vm_release(struct inode *inode, struct file *filp)
 
 /*
  * Allocation size is twice as large as the actual dirty bitmap size.
- * This makes it possible to do double buffering: see x86's
- * kvm_vm_ioctl_get_dirty_log().
+ * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed.
  */
 static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
 {
@@ -627,8 +625,6 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
        if (!memslot->dirty_bitmap)
                return -ENOMEM;
 
-       memslot->dirty_bitmap_head = memslot->dirty_bitmap;
-       memslot->nr_dirty_pages = 0;
 #endif /* !CONFIG_S390 */
        return 0;
 }
@@ -1477,8 +1473,8 @@ void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
        if (memslot && memslot->dirty_bitmap) {
                unsigned long rel_gfn = gfn - memslot->base_gfn;
 
-               if (!test_and_set_bit_le(rel_gfn, memslot->dirty_bitmap))
-                       memslot->nr_dirty_pages++;
+               /* TODO: introduce set_bit_le() and use it */
+               test_and_set_bit_le(rel_gfn, memslot->dirty_bitmap);
        }
 }
 
@@ -1515,6 +1511,30 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
        finish_wait(&vcpu->wq, &wait);
 }
 
+#ifndef CONFIG_S390
+/*
+ * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
+ */
+void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
+{
+       int me;
+       int cpu = vcpu->cpu;
+       wait_queue_head_t *wqp;
+
+       wqp = kvm_arch_vcpu_wq(vcpu);
+       if (waitqueue_active(wqp)) {
+               wake_up_interruptible(wqp);
+               ++vcpu->stat.halt_wakeup;
+       }
+
+       me = get_cpu();
+       if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
+               if (kvm_arch_vcpu_should_kick(vcpu))
+                       smp_send_reschedule(cpu);
+       put_cpu();
+}
+#endif /* !CONFIG_S390 */
+
 void kvm_resched(struct kvm_vcpu *vcpu)
 {
        if (!need_resched())
@@ -1523,6 +1543,31 @@ void kvm_resched(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_resched);
 
+bool kvm_vcpu_yield_to(struct kvm_vcpu *target)
+{
+       struct pid *pid;
+       struct task_struct *task = NULL;
+
+       rcu_read_lock();
+       pid = rcu_dereference(target->pid);
+       if (pid)
+               task = get_pid_task(target->pid, PIDTYPE_PID);
+       rcu_read_unlock();
+       if (!task)
+               return false;
+       if (task->flags & PF_VCPU) {
+               put_task_struct(task);
+               return false;
+       }
+       if (yield_to(task, 1)) {
+               put_task_struct(task);
+               return true;
+       }
+       put_task_struct(task);
+       return false;
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
+
 void kvm_vcpu_on_spin(struct kvm_vcpu *me)
 {
        struct kvm *kvm = me->kvm;
@@ -1541,8 +1586,6 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
         */
        for (pass = 0; pass < 2 && !yielded; pass++) {
                kvm_for_each_vcpu(i, vcpu, kvm) {
-                       struct task_struct *task = NULL;
-                       struct pid *pid;
                        if (!pass && i < last_boosted_vcpu) {
                                i = last_boosted_vcpu;
                                continue;
@@ -1552,24 +1595,11 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
                                continue;
                        if (waitqueue_active(&vcpu->wq))
                                continue;
-                       rcu_read_lock();
-                       pid = rcu_dereference(vcpu->pid);
-                       if (pid)
-                               task = get_pid_task(vcpu->pid, PIDTYPE_PID);
-                       rcu_read_unlock();
-                       if (!task)
-                               continue;
-                       if (task->flags & PF_VCPU) {
-                               put_task_struct(task);
-                               continue;
-                       }
-                       if (yield_to(task, 1)) {
-                               put_task_struct(task);
+                       if (kvm_vcpu_yield_to(vcpu)) {
                                kvm->last_boosted_vcpu = i;
                                yielded = 1;
                                break;
                        }
-                       put_task_struct(task);
                }
        }
 }
@@ -2039,6 +2069,17 @@ static long kvm_vm_ioctl(struct file *filp,
                        kvm->bsp_vcpu_id = arg;
                mutex_unlock(&kvm->lock);
                break;
+#endif
+#ifdef CONFIG_HAVE_KVM_MSI
+       case KVM_SIGNAL_MSI: {
+               struct kvm_msi msi;
+
+               r = -EFAULT;
+               if (copy_from_user(&msi, argp, sizeof msi))
+                       goto out;
+               r = kvm_send_userspace_msi(kvm, &msi);
+               break;
+       }
 #endif
        default:
                r = kvm_arch_vm_ioctl(filp, ioctl, arg);
@@ -2168,6 +2209,9 @@ static long kvm_dev_ioctl_check_extension_generic(long arg)
        case KVM_CAP_SET_BOOT_CPU_ID:
 #endif
        case KVM_CAP_INTERNAL_ERROR_DATA:
+#ifdef CONFIG_HAVE_KVM_MSI
+       case KVM_CAP_SIGNAL_MSI:
+#endif
                return 1;
 #ifdef CONFIG_HAVE_KVM_IRQCHIP
        case KVM_CAP_IRQ_ROUTING:
@@ -2394,9 +2438,6 @@ int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
 int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
                          gpa_t addr, int len)
 {
-       if (bus->dev_count == NR_IOBUS_DEVS)
-               return -ENOSPC;
-
        bus->range[bus->dev_count++] = (struct kvm_io_range) {
                .addr = addr,
                .len = len,
@@ -2496,12 +2537,15 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
        struct kvm_io_bus *new_bus, *bus;
 
        bus = kvm->buses[bus_idx];
-       if (bus->dev_count > NR_IOBUS_DEVS-1)
+       if (bus->dev_count > NR_IOBUS_DEVS - 1)
                return -ENOSPC;
 
-       new_bus = kmemdup(bus, sizeof(struct kvm_io_bus), GFP_KERNEL);
+       new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) *
+                         sizeof(struct kvm_io_range)), GFP_KERNEL);
        if (!new_bus)
                return -ENOMEM;
+       memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count *
+              sizeof(struct kvm_io_range)));
        kvm_io_bus_insert_dev(new_bus, dev, addr, len);
        rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
        synchronize_srcu_expedited(&kvm->srcu);
@@ -2518,27 +2562,25 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
        struct kvm_io_bus *new_bus, *bus;
 
        bus = kvm->buses[bus_idx];
-
-       new_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
-       if (!new_bus)
-               return -ENOMEM;
-
        r = -ENOENT;
-       for (i = 0; i < new_bus->dev_count; i++)
-               if (new_bus->range[i].dev == dev) {
+       for (i = 0; i < bus->dev_count; i++)
+               if (bus->range[i].dev == dev) {
                        r = 0;
-                       new_bus->dev_count--;
-                       new_bus->range[i] = new_bus->range[new_bus->dev_count];
-                       sort(new_bus->range, new_bus->dev_count,
-                            sizeof(struct kvm_io_range),
-                            kvm_io_bus_sort_cmp, NULL);
                        break;
                }
 
-       if (r) {
-               kfree(new_bus);
+       if (r)
                return r;
-       }
+
+       new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count - 1) *
+                         sizeof(struct kvm_io_range)), GFP_KERNEL);
+       if (!new_bus)
+               return -ENOMEM;
+
+       memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
+       new_bus->dev_count--;
+       memcpy(new_bus->range + i, bus->range + i + 1,
+              (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
 
        rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
        synchronize_srcu_expedited(&kvm->srcu);